Skip to content

Commit

Permalink
Merge pull request #112 from ajcwebdev/refactor
Browse files Browse the repository at this point in the history
Remove Ollama from Docker Image and Add RSS Retry Logic
  • Loading branch information
ajcwebdev authored Jan 25, 2025
2 parents 333e63f + cfbc7f4 commit da448a2
Show file tree
Hide file tree
Showing 35 changed files with 1,360 additions and 1,354 deletions.
54 changes: 27 additions & 27 deletions .github/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -78,39 +78,39 @@ RUN chmod +x /usr/src/app/docker-entrypoint.sh
# 2) Setup Ollama with models
# ---------------------------------------------------

# 1. Use the Ollama image as a base
FROM ollama/ollama:latest AS ollama

# 2. Set working directory to Ollama config directory
WORKDIR /root/.ollama

# 3. Start Ollama server and pull models
RUN ollama serve & \
sleep 10 && \
ollama pull qwen2.5:0.5b && \
ollama ls && \
echo "Listing /root/.ollama after qwen2.5:0.5b pull:" && \
ls -lh /root/.ollama || true && \
echo "Listing /root/.ollama/models after qwen2.5:0.5b pull:" && \
ls -lh /root/.ollama/models || true && \
pkill ollama
# # 1. Use the Ollama image as a base
# FROM ollama/ollama:latest AS ollama

# # 2. Set working directory to Ollama config directory
# WORKDIR /root/.ollama

# # 3. Start Ollama server and pull models
# RUN ollama serve & \
# sleep 10 && \
# ollama pull qwen2.5:0.5b && \
# ollama ls && \
# echo "Listing /root/.ollama after qwen2.5:0.5b pull:" && \
# ls -lh /root/.ollama || true && \
# echo "Listing /root/.ollama/models after qwen2.5:0.5b pull:" && \
# ls -lh /root/.ollama/models || true && \
# pkill ollama

# ---------------------------------------------------
# 3) Final stage combining everything
# ---------------------------------------------------

FROM base

# 1. Copy Ollama binary
COPY --from=ollama /bin/ollama /usr/local/bin/ollama
# # 1. Copy Ollama binary
# COPY --from=ollama /bin/ollama /usr/local/bin/ollama

# 2. Copy pre-downloaded models
COPY --from=ollama /root/.ollama /root/.ollama
# # 2. Copy pre-downloaded models
# COPY --from=ollama /root/.ollama /root/.ollama

RUN echo "Listing /root/.ollama in final stage:" && \
ls -lh /root/.ollama || true && \
echo "Listing /root/.ollama/models in final stage:" && \
ls -lh /root/.ollama/models || true
# RUN echo "Listing /root/.ollama in final stage:" && \
# ls -lh /root/.ollama || true && \
# echo "Listing /root/.ollama/models in final stage:" && \
# ls -lh /root/.ollama/models || true

# Set environment variables for Whisper
ENV WHISPER_FORCE_CPU=1
Expand All @@ -122,9 +122,9 @@ RUN mkdir -p /usr/src/app/content
# 4. Set proper permissions for the entire app directory including content
RUN chown -R node:node /usr/src/app \
&& chmod -R 755 /usr/src/app \
&& chmod 777 /usr/src/app/content \
&& chown -R node:node /root/.ollama \
&& chmod -R 755 /root/.ollama
&& chmod 777 /usr/src/app/content
# && chown -R node:node /root/.ollama \
# && chmod -R 755 /root/.ollama

# Switch to non-root user
USER node
Expand Down
14 changes: 7 additions & 7 deletions .github/docker-entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,15 @@ log_error() {
exit 1
}

echo "Debug: (docker-entrypoint.sh) Checking /root/.ollama before starting Ollama..."
ls -lR /root/.ollama || true
# echo "Debug: (docker-entrypoint.sh) Checking /root/.ollama before starting Ollama..."
# ls -lR /root/.ollama || true

# Start Ollama server in the background
echo "Starting Ollama server..."
ollama serve &
# # Start Ollama server in the background
# echo "Starting Ollama server..."
# ollama serve &

# Wait for Ollama server to start
sleep 5
# # Wait for Ollama server to start
# sleep 5

# If first argument is "serve", then start the server.
if [ "$1" = "serve" ]; then
Expand Down
39 changes: 7 additions & 32 deletions src/commander.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,10 @@ import { argv, exit } from 'node:process'
import { fileURLToPath } from 'node:url'
import { Command } from 'commander'
import { selectPrompts } from './process-steps/04-select-prompt'
import { validateProcessAction, validateLLM, validateTranscription, processAction } from './utils/validate-option'
import { l, err, logCompletionSeparator } from './utils/logging'
import { processAction, validateCLIOptions } from './utils/validate-option'
import { l, err, logSeparator } from './utils/logging'
import { envVarsMap } from './utils/globals/llms'

import type { ProcessingOptions } from './utils/types/process'

// Initialize the command-line interface using Commander.js
Expand Down Expand Up @@ -67,12 +68,7 @@ program
.option('--printPrompt <sections...>', 'Print the prompt sections without processing')
.option('--customPrompt <filePath>', 'Use a custom prompt from a markdown file')
.option('--saveAudio', 'Do not delete intermediary files after processing')
// Added options to override environment variables from CLI
/**
* Additional CLI options to allow passing API keys from the command line,
* overriding .env values if they exist. This way, if the .env is missing
* a key, the user can supply it via the CLI.
*/
// Options to override environment variables from CLI
.option('--openaiApiKey <key>', 'Specify OpenAI API key (overrides .env variable)')
.option('--anthropicApiKey <key>', 'Specify Anthropic API key (overrides .env variable)')
.option('--deepgramApiKey <key>', 'Specify Deepgram API key (overrides .env variable)')
Expand All @@ -84,21 +80,6 @@ program
.option('--togetherApiKey <key>', 'Specify Together API key (overrides .env variable)')
.option('--fireworksApiKey <key>', 'Specify Fireworks API key (overrides .env variable)')
.option('--groqApiKey <key>', 'Specify Groq API key (overrides .env variable)')
// Add examples and additional help text
.addHelpText(
'after',
`
Examples:
$ autoshow --video "https://www.youtube.com/watch?v=..."
$ autoshow --playlist "https://www.youtube.com/playlist?list=..."
$ autoshow --channel "https://www.youtube.com/channel/..."
$ autoshow --file "content/audio.mp3"
$ autoshow --rss "https://feeds.transistor.fm/fsjam-podcast/"
Documentation: https://github.com/ajcwebdev/autoshow#readme
Report Issues: https://github.com/ajcwebdev/autoshow/issues
`
)

/**
* Main action for the program.
Expand All @@ -125,19 +106,13 @@ program.action(async (options: ProcessingOptions) => {
exit(0)
}

// 1) Validate which action was chosen
const action = validateProcessAction(options, "action")

// 2) Validate LLM
const llmServices = validateLLM(options)

// 3) Validate transcription
const transcriptServices = validateTranscription(options)
// Validate action, LLM, and transcription inputs
const { action, llmServices, transcriptServices } = validateCLIOptions(options)

try {
// Helper to handle all action processing logic. If successful, log and exit.
await processAction(action, options, llmServices, transcriptServices)
logCompletionSeparator(action)
logSeparator({ type: 'completion', descriptor: action })
exit(0)
} catch (error) {
err(`Error processing ${action}:`, (error as Error).message)
Expand Down
4 changes: 2 additions & 2 deletions src/llms/chatgpt.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import { env } from 'node:process'
import { OpenAI } from 'openai'
import { GPT_MODELS } from '../utils/globals/llms'
import { err, logAPIResults } from '../utils/logging'
import { err, logLLMCost } from '../utils/logging'
import type { ChatGPTModelType } from '../utils/types/llms'

/**
Expand Down Expand Up @@ -43,7 +43,7 @@ export const callChatGPT = async (

const content = firstChoice.message.content

logAPIResults({
logLLMCost({
modelName: actualModel,
stopReason: firstChoice.finish_reason ?? 'unknown',
tokenUsage: {
Expand Down
25 changes: 15 additions & 10 deletions src/llms/claude.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,19 @@
import { env } from 'node:process'
import { Anthropic } from '@anthropic-ai/sdk'
import { CLAUDE_MODELS } from '../utils/globals/llms'
import { err, logAPIResults } from '../utils/logging'
import { err, logLLMCost } from '../utils/logging'
import type { ClaudeModelType } from '../utils/types/llms'

/**
* Extracts text content from the API response
* @param content - The content returned by the API
* @returns The extracted text content, or null if no text content is found
*/
interface ContentBlock {
type: string;
text?: string;
}

/**
* Main function to call Claude API.
* @param {string} prompt - The prompt or instructions to process.
Expand Down Expand Up @@ -42,7 +52,7 @@ export const callClaude = async (
throw new Error('No text content generated from the API')
}

logAPIResults({
logLLMCost({
modelName: actualModel,
stopReason: response.stop_reason ?? 'unknown',
tokenUsage: {
Expand All @@ -59,18 +69,13 @@ export const callClaude = async (
}
}

/**
* Extracts text content from the API response
* @param content - The content returned by the API
* @returns The extracted text content, or null if no text content is found
*/
function extractTextContent(content: any[]): string | null {
function extractTextContent(content: ContentBlock[]): string | null {
for (const block of content) {
if (typeof block === 'object' && block !== null && 'type' in block) {
if (block.type === 'text' && 'text' in block) {
return block.text
return block.text ?? null;
}
}
}
return null
return null;
}
4 changes: 2 additions & 2 deletions src/llms/cohere.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import { env } from 'node:process'
import { CohereClient } from 'cohere-ai'
import { COHERE_MODELS } from '../utils/globals/llms'
import { err, logAPIResults } from '../utils/logging'
import { err, logLLMCost } from '../utils/logging'
import type { CohereModelType } from '../utils/types/llms'

/**
Expand Down Expand Up @@ -43,7 +43,7 @@ export const callCohere = async (

const { inputTokens, outputTokens } = meta?.tokens ?? {}

logAPIResults({
logLLMCost({
modelName: actualModel,
stopReason: finishReason ?? 'unknown',
tokenUsage: {
Expand Down
4 changes: 2 additions & 2 deletions src/llms/fireworks.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import { env } from 'node:process'
import { FIREWORKS_MODELS } from '../utils/globals/llms'
import { err, logAPIResults } from '../utils/logging'
import { err, logLLMCost } from '../utils/logging'
import type { FireworksModelType, FireworksResponse } from '../utils/types/llms'

/**
Expand Down Expand Up @@ -59,7 +59,7 @@ export const callFireworks = async (
throw new Error('No content generated from the Fireworks API')
}

logAPIResults({
logLLMCost({
modelName: modelKey,
stopReason: data.choices[0]?.finish_reason ?? 'unknown',
tokenUsage: {
Expand Down
4 changes: 2 additions & 2 deletions src/llms/gemini.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import { env } from 'node:process'
import { GoogleGenerativeAI } from "@google/generative-ai"
import { GEMINI_MODELS } from '../utils/globals/llms'
import { err, logAPIResults } from '../utils/logging'
import { err, logLLMCost } from '../utils/logging'
import type { GeminiModelType } from '../utils/types/llms'

/**
Expand Down Expand Up @@ -46,7 +46,7 @@ export const callGemini = async (
const { usageMetadata } = response
const { promptTokenCount, candidatesTokenCount, totalTokenCount } = usageMetadata ?? {}

logAPIResults({
logLLMCost({
modelName: actualModel,
stopReason: 'complete',
tokenUsage: {
Expand Down
4 changes: 2 additions & 2 deletions src/llms/groq.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import { env } from 'node:process'
import { GROQ_MODELS } from '../utils/globals/llms'
import { err, logAPIResults } from '../utils/logging'
import { err, logLLMCost } from '../utils/logging'
import type { GroqModelType, GroqChatCompletionResponse } from '../utils/types/llms'

/**
Expand Down Expand Up @@ -58,7 +58,7 @@ export const callGroq = async (
throw new Error('No content generated from the Groq API')
}

logAPIResults({
logLLMCost({
modelName: modelKey,
stopReason: data.choices[0]?.finish_reason ?? 'unknown',
tokenUsage: {
Expand Down
4 changes: 2 additions & 2 deletions src/llms/mistral.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import { env } from 'node:process'
import { Mistral } from '@mistralai/mistralai'
import { MISTRAL_MODELS } from '../utils/globals/llms'
import { err, logAPIResults } from '../utils/logging'
import { err, logLLMCost } from '../utils/logging'
import type { MistralModelType } from '../utils/types/llms'

/**
Expand Down Expand Up @@ -46,7 +46,7 @@ export const callMistral = async (
const content = firstChoice.message.content
const contentString = Array.isArray(content) ? content.join('') : content

logAPIResults({
logLLMCost({
modelName: actualModel,
stopReason: firstChoice.finishReason ?? 'unknown',
tokenUsage: {
Expand Down
4 changes: 2 additions & 2 deletions src/llms/ollama.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import { env } from 'node:process'
import { OLLAMA_MODELS } from '../utils/globals/llms'
import { l, err, logAPIResults } from '../utils/logging'
import { l, err, logLLMCost } from '../utils/logging'
import { checkOllamaServerAndModel } from '../utils/validate-option'
import type { OllamaModelType, OllamaResponse } from '../utils/types/llms'

Expand Down Expand Up @@ -68,7 +68,7 @@ export const callOllama = async (
const totalPromptTokens = data.prompt_eval_count ?? 0
const totalCompletionTokens = data.eval_count ?? 0

logAPIResults({
logLLMCost({
modelName: modelKey,
stopReason: 'stop',
tokenUsage: {
Expand Down
4 changes: 2 additions & 2 deletions src/llms/together.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import { env } from 'node:process'
import { TOGETHER_MODELS } from '../utils/globals/llms'
import { err, logAPIResults } from '../utils/logging'
import { err, logLLMCost } from '../utils/logging'
import type { TogetherModelType, TogetherResponse } from '../utils/types/llms'

/**
Expand Down Expand Up @@ -59,7 +59,7 @@ export const callTogether = async (
throw new Error('No content generated from the Together AI API')
}

logAPIResults({
logLLMCost({
modelName: modelKey,
stopReason: data.choices[0]?.finish_reason ?? 'unknown',
tokenUsage: {
Expand Down
Loading

0 comments on commit da448a2

Please sign in to comment.