From 060f725e11ea98191ff7107f33bff80f6651728c Mon Sep 17 00:00:00 2001 From: Anthony Campolo <12433465+ajcwebdev@users.noreply.github.com> Date: Wed, 1 Jan 2025 02:43:15 -0600 Subject: [PATCH] calculate and log cost based on llm model --- package.json | 10 +- src/llms/chatgpt.ts | 29 +-- src/llms/claude.ts | 26 +- src/llms/cohere.ts | 15 +- src/llms/fireworks.ts | 29 ++- src/llms/gemini.ts | 18 +- src/llms/groq.ts | 39 +-- src/llms/mistral.ts | 19 +- src/llms/ollama.ts | 41 +++- src/llms/together.ts | 33 ++- src/types/llms.ts | 33 ++- src/types/logging.ts | 32 +++ src/types/transcription.ts | 6 + src/utils/globals.ts | 435 ++++++++++++++++++++++++++-------- src/utils/logging.ts | 164 ++++++++++++- test/all.test.ts | 39 ++- test/docker.test.ts | 82 ++++--- test/local.test.ts | 8 +- test/models/chatgpt.test.ts | 60 +++++ test/models/claude.test.ts | 73 ++++++ test/models/cohere.test.ts | 55 +++++ test/models/fireworks.test.ts | 79 ++++++ test/models/gemini.test.ts | 61 +++++ test/models/groq.test.ts | 73 ++++++ test/models/mistral.test.ts | 91 +++++++ test/models/together.test.ts | 91 +++++++ 26 files changed, 1370 insertions(+), 271 deletions(-) create mode 100644 src/types/logging.ts create mode 100644 test/models/chatgpt.test.ts create mode 100644 test/models/claude.test.ts create mode 100644 test/models/cohere.test.ts create mode 100644 test/models/fireworks.test.ts create mode 100644 test/models/gemini.test.ts create mode 100644 test/models/groq.test.ts create mode 100644 test/models/mistral.test.ts create mode 100644 test/models/together.test.ts diff --git a/package.json b/package.json index e3864c4..ec8e9bd 100644 --- a/package.json +++ b/package.json @@ -37,13 +37,21 @@ "bench-medium": "tsx --test test/bench/medium.test.ts", "bench-large": "tsx --test test/bench/large.test.ts", "bench-turbo": "tsx --test test/bench/turbo.test.ts", + "test-models-chatgpt": "tsx --test test/models/chatgpt.test.ts", + "test-models-claude": "tsx --test test/models/claude.test.ts", + "test-models-cohere": "tsx --test test/models/cohere.test.ts", + "test-models-gemini": "tsx --test test/models/gemini.test.ts", + "test-models-mistral": "tsx --test test/models/mistral.test.ts", + "test-models-fireworks": "tsx --test test/models/fireworks.test.ts", + "test-models-together": "tsx --test test/models/together.test.ts", + "test-models-groq": "tsx --test test/models/groq.test.ts", "test-local": "tsx --test test/local.test.ts", "test-docker": "tsx --test test/docker.test.ts", "test-services": "tsx --test test/services.test.ts", "test-all": "tsx --test test/all.test.ts", "ta": "tsx --test test/all.test.ts", "clean": "tsx scripts/cleanContent.ts", - "docker-cli": "docker run --rm -v $PWD/content:/usr/src/app/content autoshow", + "docker-cli": "docker run --rm --env-file .env -v $PWD/content:/usr/src/app/content autoshow", "docker-serve": "docker run -d -p 3000:3000 -v $PWD/content:/usr/src/app/content autoshow serve", "prune": "docker system prune -af --volumes && docker image prune -af && docker container prune -f && docker volume prune -af", "bun": "bun --env-file=.env --no-warnings src/cli/commander.ts", diff --git a/src/llms/chatgpt.ts b/src/llms/chatgpt.ts index d297a1d..db9a748 100644 --- a/src/llms/chatgpt.ts +++ b/src/llms/chatgpt.ts @@ -4,7 +4,7 @@ import { writeFile } from 'node:fs/promises' import { env } from 'node:process' import { OpenAI } from 'openai' import { GPT_MODELS } from '../utils/globals' -import { l, err } from '../utils/logging' +import { err, logAPIResults } from '../utils/logging' import type { LLMFunction, ChatGPTModelType } from '../types/llms' /** @@ -35,8 +35,8 @@ export const callChatGPT: LLMFunction = async ( // Call the OpenAI chat completions API const response = await openai.chat.completions.create({ model: actualModel, - max_tokens: 4000, // Maximum number of tokens in the response - messages: [{ role: 'user', content: promptAndTranscript }], // The input message (transcript content) + max_completion_tokens: 4000, + messages: [{ role: 'user', content: promptAndTranscript }], }) // Check if we have a valid response @@ -45,20 +45,21 @@ export const callChatGPT: LLMFunction = async ( throw new Error('No valid response received from the API') } - // Get the content and other details safely - const content = firstChoice.message.content - const finish_reason = firstChoice.finish_reason ?? 'unknown' - const usedModel = response.model - const usage = response.usage - const { prompt_tokens, completion_tokens, total_tokens } = usage ?? {} - // Write the generated content to the output file - await writeFile(tempPath, content) + await writeFile(tempPath, firstChoice.message.content) - l.wait(` - Finish Reason: ${finish_reason}\n - ChatGPT Model: ${usedModel}`) - l.wait(` - Token Usage:\n - ${prompt_tokens} prompt tokens\n - ${completion_tokens} completion tokens\n - ${total_tokens} total tokens`) + // Log API results using the standardized logging function + logAPIResults({ + modelName: actualModel, + stopReason: firstChoice.finish_reason ?? 'unknown', + tokenUsage: { + input: response.usage?.prompt_tokens, + output: response.usage?.completion_tokens, + total: response.usage?.total_tokens + } + }) } catch (error) { err(`Error in callChatGPT: ${(error as Error).message}`) - throw error // Re-throw the error for handling in the calling function + throw error } } \ No newline at end of file diff --git a/src/llms/claude.ts b/src/llms/claude.ts index c1b90e5..7ffbb58 100644 --- a/src/llms/claude.ts +++ b/src/llms/claude.ts @@ -4,7 +4,7 @@ import { writeFile } from 'node:fs/promises' import { env } from 'node:process' import { Anthropic } from '@anthropic-ai/sdk' import { CLAUDE_MODELS } from '../utils/globals' -import { l, err } from '../utils/logging' +import { err, logAPIResults } from '../utils/logging' import type { LLMFunction, ClaudeModelType } from '../types/llms' /** @@ -39,18 +39,8 @@ export const callClaude: LLMFunction = async ( messages: [{ role: 'user', content: promptAndTranscript }] // The input message (transcript content) }) - // Destructure the response to get relevant information - const { - content, - model: usedModel, // The actual model used - usage, // Token usage information - stop_reason // Reason why the generation stopped - } = response - - const { input_tokens, output_tokens } = usage - // Extract text content from the response - const textContent = extractTextContent(content) + const textContent = extractTextContent(response.content) // Write the generated text to the output file if (textContent) { @@ -59,8 +49,16 @@ export const callClaude: LLMFunction = async ( throw new Error('No text content generated from the API') } - l.wait(` - Stop Reason: ${stop_reason}\n - Model: ${usedModel}`) - l.wait(` - Token Usage:\n - ${input_tokens} input tokens\n - ${output_tokens} output tokens`) + // Log API results using the standardized logging function + logAPIResults({ + modelName: actualModel, + stopReason: response.stop_reason ?? 'unknown', + tokenUsage: { + input: response.usage.input_tokens, + output: response.usage.output_tokens, + total: response.usage.input_tokens + response.usage.output_tokens + } + }) } catch (error) { err(`Error in callClaude: ${(error as Error).message}`) throw error // Re-throw the error for handling in the calling function diff --git a/src/llms/cohere.ts b/src/llms/cohere.ts index 7b2e43b..1ac416a 100644 --- a/src/llms/cohere.ts +++ b/src/llms/cohere.ts @@ -4,7 +4,7 @@ import { writeFile } from 'node:fs/promises' import { env } from 'node:process' import { CohereClient } from 'cohere-ai' import { COHERE_MODELS } from '../utils/globals' -import { l, err } from '../utils/logging' +import { err, logAPIResults } from '../utils/logging' import type { LLMFunction, CohereModelType } from '../types/llms' /** @@ -35,7 +35,6 @@ export const callCohere: LLMFunction = async ( // Call the Cohere chat API const response = await cohere.chat({ model: actualModel, - // max_tokens: ?, // Cohere doesn't seem to have a max_tokens parameter for chat message: promptAndTranscript // The input message (prompt and transcript content) }) @@ -51,8 +50,16 @@ export const callCohere: LLMFunction = async ( // Write the generated text to the output file await writeFile(tempPath, text) - l.wait(`\n Finish Reason: ${finishReason}\n Model: ${actualModel}`) - l.wait(` Token Usage:\n - ${inputTokens} input tokens\n - ${outputTokens} output tokens`) + // Log API results using the standardized logging function + logAPIResults({ + modelName: actualModel, + stopReason: finishReason ?? 'unknown', + tokenUsage: { + input: inputTokens, + output: outputTokens, + total: inputTokens && outputTokens ? inputTokens + outputTokens : undefined + } + }) } catch (error) { err(`Error in callCohere: ${(error as Error).message}`) throw error // Re-throw the error for handling in the calling function diff --git a/src/llms/fireworks.ts b/src/llms/fireworks.ts index 18b78e2..3a8e226 100644 --- a/src/llms/fireworks.ts +++ b/src/llms/fireworks.ts @@ -3,7 +3,7 @@ import { writeFile } from 'node:fs/promises' import { env } from 'node:process' import { FIREWORKS_MODELS } from '../utils/globals' -import { l, err } from '../utils/logging' +import { err, logAPIResults } from '../utils/logging' import type { LLMFunction, FireworksModelType, FireworksResponse } from '../types/llms' /** @@ -17,7 +17,7 @@ import type { LLMFunction, FireworksModelType, FireworksResponse } from '../type export const callFireworks: LLMFunction = async ( promptAndTranscript: string, tempPath: string, - model: string = 'LLAMA_3_2_3B' + model: string | FireworksModelType = 'LLAMA_3_2_3B' ): Promise => { // Check if the FIREWORKS_API_KEY environment variable is set if (!env['FIREWORKS_API_KEY']) { @@ -25,11 +25,14 @@ export const callFireworks: LLMFunction = async ( } try { - const actualModel = (FIREWORKS_MODELS[model as FireworksModelType] || FIREWORKS_MODELS.LLAMA_3_2_3B).modelId + // Get the model configuration and ID, defaulting to LLAMA_3_2_3B if not found + const modelKey = typeof model === 'string' ? model : 'LLAMA_3_2_3B' + const modelConfig = FIREWORKS_MODELS[modelKey as FireworksModelType] || FIREWORKS_MODELS.LLAMA_3_2_3B + const modelId = modelConfig.modelId // Prepare the request body const requestBody = { - model: actualModel, + model: modelId, messages: [ { role: 'user', @@ -58,10 +61,6 @@ export const callFireworks: LLMFunction = async ( // Extract the generated content const content = data.choices[0]?.message?.content - const finishReason = data.choices[0]?.finish_reason - const usedModel = data.model - const usage = data.usage - const { prompt_tokens, completion_tokens, total_tokens } = usage if (!content) { throw new Error('No content generated from the Fireworks API') @@ -69,11 +68,17 @@ export const callFireworks: LLMFunction = async ( // Write the generated content to the specified output file await writeFile(tempPath, content) - l.wait(`\n Fireworks response saved to ${tempPath}`) - // Log finish reason, used model, and token usage - l.wait(`\n Finish Reason: ${finishReason}\n Model Used: ${usedModel}`) - l.wait(` Token Usage:\n - ${prompt_tokens} prompt tokens\n - ${completion_tokens} completion tokens\n - ${total_tokens} total tokens`) + // Log API results using the model key + logAPIResults({ + modelName: modelKey, + stopReason: data.choices[0]?.finish_reason ?? 'unknown', + tokenUsage: { + input: data.usage.prompt_tokens, + output: data.usage.completion_tokens, + total: data.usage.total_tokens + } + }) } catch (error) { // Log any errors that occur during the process err(`Error in callFireworks: ${(error as Error).message}`) diff --git a/src/llms/gemini.ts b/src/llms/gemini.ts index a1bce94..a5ae5ce 100644 --- a/src/llms/gemini.ts +++ b/src/llms/gemini.ts @@ -4,7 +4,7 @@ import { writeFile } from 'node:fs/promises' import { env } from 'node:process' import { GoogleGenerativeAI } from "@google/generative-ai" import { GEMINI_MODELS } from '../utils/globals' -import { l, err } from '../utils/logging' +import { err, logAPIResults } from '../utils/logging' import type { LLMFunction, GeminiModelType } from '../types/llms' /** @@ -57,7 +57,21 @@ export const callGemini: LLMFunction = async ( // Write the generated text to the output file await writeFile(tempPath, text) - l.wait(`\nModel: ${actualModel}`) + + // Get token usage from the response metadata + const { usageMetadata } = response + const { promptTokenCount, candidatesTokenCount, totalTokenCount } = usageMetadata ?? {} + + // Log API results using the standardized logging function + logAPIResults({ + modelName: actualModel, + stopReason: 'complete', + tokenUsage: { + input: promptTokenCount, + output: candidatesTokenCount, + total: totalTokenCount + } + }) return } catch (error) { diff --git a/src/llms/groq.ts b/src/llms/groq.ts index cd67df6..6a43185 100644 --- a/src/llms/groq.ts +++ b/src/llms/groq.ts @@ -3,8 +3,8 @@ import { writeFile } from 'node:fs/promises' import { env } from 'node:process' import { GROQ_MODELS } from '../utils/globals' -import { l, err } from '../utils/logging' -import type { GroqChatCompletionResponse, GroqModelType } from '../types/llms' +import { err, logAPIResults } from '../utils/logging' +import type { LLMFunction, GroqModelType, GroqChatCompletionResponse } from '../types/llms' // Define the Groq API URL const GROQ_API_URL = 'https://api.groq.com/openai/v1/chat/completions' @@ -13,20 +13,27 @@ const GROQ_API_URL = 'https://api.groq.com/openai/v1/chat/completions' * Function to call the Groq chat completion API. * @param {string} promptAndTranscript - The combined prompt and transcript text to process. * @param {string} tempPath - The temporary file path to write the LLM output. - * @param {string} model - The model to use, e.g., 'MIXTRAL_8X7B_32768'. + * @param {string} model - The model to use, e.g., 'LLAMA_3_2_1B_PREVIEW'. */ -export const callGroq = async (promptAndTranscript: string, tempPath: string, model: string = 'MIXTRAL_8X7B_32768'): Promise => { +export const callGroq: LLMFunction = async ( + promptAndTranscript: string, + tempPath: string, + model: string | GroqModelType = 'LLAMA_3_2_1B_PREVIEW' +): Promise => { // Ensure that the API key is set if (!env['GROQ_API_KEY']) { throw new Error('GROQ_API_KEY environment variable is not set. Please set it to your Groq API key.') } try { - const actualModel = (GROQ_MODELS[model as GroqModelType] || GROQ_MODELS.MIXTRAL_8X7B_32768).modelId + // Get the model configuration and ID, defaulting to LLAMA_3_2_1B_PREVIEW if not found + const modelKey = typeof model === 'string' ? model : 'LLAMA_3_2_1B_PREVIEW' + const modelConfig = GROQ_MODELS[modelKey as GroqModelType] || GROQ_MODELS.LLAMA_3_2_1B_PREVIEW + const modelId = modelConfig.modelId // Prepare the request body const requestBody = { - model: actualModel, + model: modelId, messages: [ { role: 'user', @@ -53,15 +60,10 @@ export const callGroq = async (promptAndTranscript: string, tempPath: string, mo } // Parse the JSON response - const data = (await response.json()) as GroqChatCompletionResponse + const data = await response.json() as GroqChatCompletionResponse // Extract the generated content const content = data.choices[0]?.message?.content - const finishReason = data.choices[0]?.finish_reason - const usedModel = data.model - const usage = data.usage - const { prompt_tokens, completion_tokens, total_tokens } = usage ?? {} - if (!content) { throw new Error('No content generated from the Groq API') } @@ -69,9 +71,16 @@ export const callGroq = async (promptAndTranscript: string, tempPath: string, mo // Write the generated content to the specified output file await writeFile(tempPath, content) - // Log finish reason, used model, and token usage - l.wait(`\n Finish Reason: ${finishReason}\n Model Used: ${usedModel}`) - l.wait(` Token Usage:\n - ${prompt_tokens} prompt tokens\n - ${completion_tokens} completion tokens\n - ${total_tokens} total tokens`) + // Log API results using the standardized logging function + logAPIResults({ + modelName: modelKey, + stopReason: data.choices[0]?.finish_reason ?? 'unknown', + tokenUsage: { + input: data.usage?.prompt_tokens, + output: data.usage?.completion_tokens, + total: data.usage?.total_tokens + } + }) } catch (error) { // Log any errors that occur during the process err(`Error in callGroq: ${(error as Error).message}`) diff --git a/src/llms/mistral.ts b/src/llms/mistral.ts index c785e10..e86228d 100644 --- a/src/llms/mistral.ts +++ b/src/llms/mistral.ts @@ -4,7 +4,7 @@ import { writeFile } from 'node:fs/promises' import { env } from 'node:process' import { Mistral } from '@mistralai/mistralai' import { MISTRAL_MODELS } from '../utils/globals' -import { l, err } from '../utils/logging' +import { err, logAPIResults } from '../utils/logging' import type { LLMFunction, MistralModelType } from '../types/llms' /** @@ -31,12 +31,10 @@ export const callMistral: LLMFunction = async ( try { // Select the actual model to use, defaulting to MISTRAL_NEMO if the specified model is not found const actualModel = (MISTRAL_MODELS[model as MistralModelType] || MISTRAL_MODELS.MISTRAL_NEMO).modelId - l.wait(`\n Using Mistral model:\n - ${actualModel}`) // Make API call to Mistral AI for chat completion const response = await mistral.chat.complete({ model: actualModel, - // max_tokens: ?, // Uncomment and set if you want to limit the response length messages: [{ role: 'user', content: promptAndTranscript }], }) @@ -51,16 +49,21 @@ export const callMistral: LLMFunction = async ( } const content = firstChoice.message.content - const finishReason = firstChoice.finishReason ?? 'unknown' - const usage = response.usage ?? { promptTokens: 0, completionTokens: 0, totalTokens: 0 } const contentString = Array.isArray(content) ? content.join('') : content // Write the generated content to the specified output file await writeFile(tempPath, contentString) - // Log finish reason, used model, and token usage - l.wait(`\n Finish Reason: ${finishReason}\n Model Used: ${actualModel}`) - l.wait(` Token Usage:\n - ${usage.promptTokens} prompt tokens\n - ${usage.completionTokens} completion tokens\n - ${usage.totalTokens} total tokens`) + // Log API results using the standardized logging function + logAPIResults({ + modelName: actualModel, + stopReason: firstChoice.finishReason ?? 'unknown', + tokenUsage: { + input: response.usage?.promptTokens, + output: response.usage?.completionTokens, + total: response.usage?.totalTokens + } + }) } catch (error) { // Log any errors that occur during the process diff --git a/src/llms/ollama.ts b/src/llms/ollama.ts index ee9343c..450616f 100644 --- a/src/llms/ollama.ts +++ b/src/llms/ollama.ts @@ -4,7 +4,7 @@ import { writeFile } from 'node:fs/promises' import { env } from 'node:process' import { spawn } from 'node:child_process' import { OLLAMA_MODELS } from '../utils/globals' -import { l, err } from '../utils/logging' +import { l, err, logAPIResults } from '../utils/logging' import type { LLMFunction, OllamaModelType, OllamaResponse, OllamaTagsResponse } from '../types/llms' /** @@ -20,12 +20,15 @@ import type { LLMFunction, OllamaModelType, OllamaResponse, OllamaTagsResponse } export const callOllama: LLMFunction = async ( promptAndTranscript: string, tempPath: string, - modelName: string = 'LLAMA_3_2_3B' + model: string | OllamaModelType = 'LLAMA_3_2_3B' ) => { try { - // Map the user-friendly name (e.g. 'LLAMA_3_2_3B') to the actual model ID - const ollamaModelName = OLLAMA_MODELS[modelName as OllamaModelType]?.modelId || 'llama3.2:3b' - l.wait(` - modelName: ${modelName}\n - ollamaModelName: ${ollamaModelName}`) + // Get the model configuration and ID + const modelKey = typeof model === 'string' ? model : 'LLAMA_3_2_3B' + const modelConfig = OLLAMA_MODELS[modelKey as OllamaModelType] || OLLAMA_MODELS.LLAMA_3_2_3B + const ollamaModelName = modelConfig.modelId + + l.wait(` - modelName: ${modelKey}\n - ollamaModelName: ${ollamaModelName}`) // Host & port for Ollama const ollamaHost = env['OLLAMA_HOST'] || 'localhost' @@ -44,9 +47,7 @@ export const callOllama: LLMFunction = async ( if (await checkServer()) { l.wait('\n Ollama server is already running...') } else { - // If not running, attempt to start locally (unless you're in Docker with a different approach) if (ollamaHost === 'ollama') { - // In older multi-container approach, the server might be on a different host. throw new Error('Ollama server is not running. Please ensure the Ollama server is running and accessible.') } else { l.wait('\n Ollama server is not running. Attempting to start...') @@ -56,7 +57,7 @@ export const callOllama: LLMFunction = async ( }) ollamaProcess.unref() - // Wait a few seconds for server to start + // Wait for server to start let attempts = 0 while (attempts < 30) { if (await checkServer()) { @@ -72,7 +73,7 @@ export const callOllama: LLMFunction = async ( } } - // Check if the model is available, pull it if not + // Check and pull model if needed try { const tagsResponse = await fetch(`http://${ollamaHost}:${ollamaPort}/api/tags`) if (!tagsResponse.ok) { @@ -96,6 +97,7 @@ export const callOllama: LLMFunction = async ( if (!pullResponse.body) { throw new Error('Response body is null') } + const reader = pullResponse.body.getReader() const decoder = new TextDecoder() while (true) { @@ -150,6 +152,8 @@ export const callOllama: LLMFunction = async ( const decoder = new TextDecoder() let fullContent = '' let isFirstChunk = true + let totalPromptTokens = 0 + let totalCompletionTokens = 0 while (true) { const { done, value } = await reader.read() @@ -171,8 +175,25 @@ export const callOllama: LLMFunction = async ( fullContent += parsedResponse.message.content } + // Accumulate token counts if available + if (parsedResponse.prompt_eval_count) { + totalPromptTokens = parsedResponse.prompt_eval_count + } + if (parsedResponse.eval_count) { + totalCompletionTokens = parsedResponse.eval_count + } + if (parsedResponse.done) { - l.wait(` - Completed receiving response from Ollama.`) + // Log final results using standardized logging function + logAPIResults({ + modelName: modelKey, + stopReason: 'stop', + tokenUsage: { + input: totalPromptTokens || undefined, + output: totalCompletionTokens || undefined, + total: totalPromptTokens + totalCompletionTokens || undefined + } + }) } } catch (parseError) { err(`Error parsing JSON: ${parseError}`) diff --git a/src/llms/together.ts b/src/llms/together.ts index 6ff4cf2..c779e1a 100644 --- a/src/llms/together.ts +++ b/src/llms/together.ts @@ -3,7 +3,7 @@ import { writeFile } from 'node:fs/promises' import { env } from 'node:process' import { TOGETHER_MODELS } from '../utils/globals' -import { l, err } from '../utils/logging' +import { err, logAPIResults } from '../utils/logging' import type { LLMFunction, TogetherModelType, TogetherResponse } from '../types/llms' /** @@ -17,7 +17,7 @@ import type { LLMFunction, TogetherModelType, TogetherResponse } from '../types/ export const callTogether: LLMFunction = async ( promptAndTranscript: string, tempPath: string, - model: string = 'LLAMA_3_2_3B' + model: string | TogetherModelType = 'LLAMA_3_2_3B' ): Promise => { // Check if the TOGETHER_API_KEY environment variable is set if (!env['TOGETHER_API_KEY']) { @@ -25,11 +25,14 @@ export const callTogether: LLMFunction = async ( } try { - const actualModel = (TOGETHER_MODELS[model as TogetherModelType] || TOGETHER_MODELS.LLAMA_3_2_3B).modelId + // Get the model configuration and ID, defaulting to LLAMA_3_2_3B if not found + const modelKey = typeof model === 'string' ? model : 'LLAMA_3_2_3B' + const modelConfig = TOGETHER_MODELS[modelKey as TogetherModelType] || TOGETHER_MODELS.LLAMA_3_2_3B + const modelId = modelConfig.modelId // Prepare the request body const requestBody = { - model: actualModel, + model: modelId, messages: [ { role: 'user', @@ -60,18 +63,24 @@ export const callTogether: LLMFunction = async ( const data = await response.json() as TogetherResponse // Extract the generated content - const content = data.choices[0]?.message?.content ?? '' - const finishReason = data.choices[0]?.finish_reason - const usedModel = data.model - const usage = data.usage - const { prompt_tokens, completion_tokens, total_tokens } = usage + const content = data.choices[0]?.message?.content + if (!content) { + throw new Error('No content generated from the Together AI API') + } // Write the generated content to the specified output file await writeFile(tempPath, content) - // Log finish reason, used model, and token usage - l.wait(`\n Finish Reason: ${finishReason}\n Model Used: ${usedModel}`) - l.wait(` Token Usage:\n - ${prompt_tokens} prompt tokens\n - ${completion_tokens} completion tokens\n - ${total_tokens} total tokens`) + // Log API results using the standardized logging function + logAPIResults({ + modelName: modelKey, + stopReason: data.choices[0]?.finish_reason ?? 'unknown', + tokenUsage: { + input: data.usage.prompt_tokens, + output: data.usage.completion_tokens, + total: data.usage.total_tokens + } + }) } catch (error) { // Log any errors that occur during the process err(`Error in callTogether: ${(error as Error).message}`) diff --git a/src/types/llms.ts b/src/types/llms.ts index e8967e2..e419e49 100644 --- a/src/types/llms.ts +++ b/src/types/llms.ts @@ -1,10 +1,30 @@ // src/types/llms.ts +/** + * Generic type for model configurations that maps model types to their configurations + * @template T The specific model type (e.g., ChatGPTModelType, ClaudeModelType, etc.) + */ +export type ModelConfig = { + [K in T]: ModelConfigValue +} + +export type ModelConfigValue = { + name: string; + modelId: string; + inputCostPer1M: number; // Cost per 1M input tokens + outputCostPer1M: number; // Cost per 1M output tokens +} + /** * Options for Language Models (LLMs) that can be used in the application. */ export type LLMServices = 'chatgpt' | 'claude' | 'cohere' | 'mistral' | 'ollama' | 'gemini' | 'fireworks' | 'together' | 'groq' +export type LLMServiceConfig = { + name: string + value: LLMServices | null +} + /** * Options for LLM processing. */ @@ -46,12 +66,13 @@ export type LLMFunctions = { /** * Available GPT models. */ -export type ChatGPTModelType = 'GPT_4o_MINI' | 'GPT_4o' | 'GPT_4_TURBO' | 'GPT_4' +export type ChatGPTModelType = 'GPT_4o_MINI' | 'GPT_4o' | 'GPT_o1_MINI' +// export type ChatGPTModelType = 'GPT_4o_MINI' | 'GPT_4o' | 'GPT_o1' | 'GPT_o1_MINI' /** * Available Claude models. */ -export type ClaudeModelType = 'CLAUDE_3_5_SONNET' | 'CLAUDE_3_OPUS' | 'CLAUDE_3_SONNET' | 'CLAUDE_3_HAIKU' +export type ClaudeModelType = 'CLAUDE_3_5_SONNET' | 'CLAUDE_3_5_HAIKU' | 'CLAUDE_3_OPUS' | 'CLAUDE_3_SONNET' | 'CLAUDE_3_HAIKU' /** * Available Cohere models. @@ -61,17 +82,17 @@ export type CohereModelType = 'COMMAND_R' | 'COMMAND_R_PLUS' /** * Available Gemini models. */ -export type GeminiModelType = 'GEMINI_1_5_FLASH' | 'GEMINI_1_5_PRO' +export type GeminiModelType = 'GEMINI_1_5_FLASH' | 'GEMINI_1_5_FLASH_8B' | 'GEMINI_1_5_PRO' /** * Available Mistral AI models. */ -export type MistralModelType = 'MIXTRAL_8x7b' | 'MIXTRAL_8x22b' | 'MISTRAL_LARGE' | 'MISTRAL_NEMO' +export type MistralModelType = 'MIXTRAL_8x7B' | 'MIXTRAL_8x22B' | 'MISTRAL_LARGE' | 'MISTRAL_SMALL' | 'MINISTRAL_8B' | 'MINISTRAL_3B' | 'MISTRAL_NEMO' | 'MISTRAL_7B' /** * Available Fireworks models. */ -export type FireworksModelType = 'LLAMA_3_1_405B' | 'LLAMA_3_1_70B' | 'LLAMA_3_1_8B' | 'LLAMA_3_2_3B' | 'LLAMA_3_2_1B' | 'QWEN_2_5_72B' +export type FireworksModelType = 'LLAMA_3_1_405B' | 'LLAMA_3_1_70B' | 'LLAMA_3_1_8B' | 'LLAMA_3_2_3B' | 'QWEN_2_5_72B' /** * Available Together models. @@ -81,7 +102,7 @@ export type TogetherModelType = 'LLAMA_3_2_3B' | 'LLAMA_3_1_405B' | 'LLAMA_3_1_7 /** * Available Groq models. */ -export type GroqModelType = 'LLAMA_3_1_70B_VERSATILE' | 'LLAMA_3_1_8B_INSTANT' | 'LLAMA_3_2_1B_PREVIEW' | 'LLAMA_3_2_3B_PREVIEW' | 'MIXTRAL_8X7B_32768' +export type GroqModelType = 'LLAMA_3_2_1B_PREVIEW' | 'LLAMA_3_2_3B_PREVIEW' | 'LLAMA_3_3_70B_VERSATILE' | 'LLAMA_3_1_8B_INSTANT' | 'MIXTRAL_8X7B_INSTRUCT' /** * Local model with Ollama. diff --git a/src/types/logging.ts b/src/types/logging.ts new file mode 100644 index 0000000..4b24c7a --- /dev/null +++ b/src/types/logging.ts @@ -0,0 +1,32 @@ +// src/types/logging.ts + +export interface TokenUsage { + input: number | undefined + output: number | undefined + total: number | undefined +} + +export interface CostCalculation { + inputCost: number | undefined + outputCost: number | undefined + totalCost: number | undefined +} + +export interface APILogInfo { + modelName: string + stopReason: string + tokenUsage: TokenUsage +} + +/** + * Interface for chainable logger with style methods. + */ +export interface ChainableLogger { + (...args: any[]): void + step: (...args: any[]) => void + dim: (...args: any[]) => void + success: (...args: any[]) => void + opts: (...args: any[]) => void + wait: (...args: any[]) => void + final: (...args: any[]) => void + } \ No newline at end of file diff --git a/src/types/transcription.ts b/src/types/transcription.ts index dfb6c1d..7ef5daf 100644 --- a/src/types/transcription.ts +++ b/src/types/transcription.ts @@ -6,6 +6,12 @@ */ export type TranscriptServices = 'whisper' | 'whisperDocker' | 'deepgram' | 'assembly' +export type TranscriptServiceConfig = { + name: string + value: TranscriptServices + isWhisper?: boolean +} + /** * Available Whisper model types with varying sizes and capabilities. */ diff --git a/src/utils/globals.ts b/src/utils/globals.ts index f519e92..a938708 100644 --- a/src/utils/globals.ts +++ b/src/utils/globals.ts @@ -8,9 +8,8 @@ import { XMLParser } from 'fast-xml-parser' import { exec, execFile } from 'node:child_process' import { promisify } from 'node:util' -import type { LLMServices } from '../types/llms' -import type { TranscriptServices, WhisperModelType } from '../types/transcription' -import type { ChatGPTModelType, ClaudeModelType, CohereModelType, GeminiModelType, MistralModelType, OllamaModelType, TogetherModelType, FireworksModelType, GroqModelType } from '../types/llms' +import type { WhisperModelType, TranscriptServiceConfig } from '../types/transcription' +import type { ModelConfig, ChatGPTModelType, ClaudeModelType, CohereModelType, GeminiModelType, MistralModelType, OllamaModelType, TogetherModelType, FireworksModelType, GroqModelType, LLMServiceConfig, LLMServices } from '../types/llms' export const execPromise = promisify(exec) export const execFilePromise = promisify(execFile) @@ -95,31 +94,6 @@ export const PROCESS_CHOICES = [ { name: 'Podcast RSS Feed', value: 'rss' }, ] -type LLMServiceConfig = { - name: string - value: LLMServices | null -} - -export const LLM_SERVICES: Record = { - SKIP: { name: 'Skip LLM Processing', value: null }, - OLLAMA: { name: 'Ollama (local inference)', value: 'ollama' }, - CHATGPT: { name: 'OpenAI ChatGPT', value: 'chatgpt' }, - CLAUDE: { name: 'Anthropic Claude', value: 'claude' }, - GEMINI: { name: 'Google Gemini', value: 'gemini' }, - COHERE: { name: 'Cohere', value: 'cohere' }, - MISTRAL: { name: 'Mistral', value: 'mistral' }, - FIREWORKS: { name: 'Fireworks AI', value: 'fireworks' }, - TOGETHER: { name: 'Together AI', value: 'together' }, - GROQ: { name: 'Groq', value: 'groq' }, -} as const - -// Modify the type definition for TRANSCRIPT_SERVICES -type TranscriptServiceConfig = { - name: string - value: TranscriptServices - isWhisper?: boolean -} - export const TRANSCRIPT_SERVICES: Record = { WHISPER: { name: 'Whisper.cpp', value: 'whisper', isWhisper: true }, WHISPER_DOCKER: { name: 'Whisper.cpp (Docker)', value: 'whisperDocker', isWhisper: true }, @@ -127,10 +101,6 @@ export const TRANSCRIPT_SERVICES: Record = { ASSEMBLY: { name: 'AssemblyAI', value: 'assembly' }, } as const -export const LLM_OPTIONS = Object.values(LLM_SERVICES) - .map(service => service.value) - .filter((value): value is LLMServices => value !== null) - export const TRANSCRIPT_OPTIONS = Object.values(TRANSCRIPT_SERVICES) .map(service => service.value) @@ -159,106 +129,361 @@ export const WHISPER_MODELS: Record = { 'turbo': 'ggml-large-v3-turbo.bin' } +export const LLM_SERVICES: Record = { + SKIP: { name: 'Skip LLM Processing', value: null }, + OLLAMA: { name: 'Ollama (local inference)', value: 'ollama' }, + CHATGPT: { name: 'OpenAI ChatGPT', value: 'chatgpt' }, + CLAUDE: { name: 'Anthropic Claude', value: 'claude' }, + GEMINI: { name: 'Google Gemini', value: 'gemini' }, + COHERE: { name: 'Cohere', value: 'cohere' }, + MISTRAL: { name: 'Mistral', value: 'mistral' }, + FIREWORKS: { name: 'Fireworks AI', value: 'fireworks' }, + TOGETHER: { name: 'Together AI', value: 'together' }, + GROQ: { name: 'Groq', value: 'groq' }, +} as const + +export const LLM_OPTIONS = Object.values(LLM_SERVICES) + .map(service => service.value) + .filter((value): value is LLMServices => value !== null) + /** - * Ollama model configuration with both display names and model identifiers - * @type {Record} + * Configuration for Ollama models, mapping model types to their display names and identifiers. + * Each model has a human-readable name and a corresponding model identifier used for API calls. + * @type {ModelConfig} */ -export const OLLAMA_MODELS: Record = { - LLAMA_3_2_1B: { name: 'LLAMA 3 2 1B', modelId: 'llama3.2:1b' }, - LLAMA_3_2_3B: { name: 'LLAMA 3 2 3B', modelId: 'llama3.2:3b' }, - GEMMA_2_2B: { name: 'GEMMA 2 2B', modelId: 'gemma2:2b' }, - PHI_3_5: { name: 'PHI 3 5', modelId: 'phi3.5:3.8b' }, - QWEN_2_5_1B: { name: 'QWEN 2 5 1B', modelId: 'qwen2.5:1.5b' }, - QWEN_2_5_3B: { name: 'QWEN 2 5 3B', modelId: 'qwen2.5:3b' }, +export const OLLAMA_MODELS: ModelConfig = { + LLAMA_3_2_1B: { + name: 'LLAMA 3 2 1B', + modelId: 'llama3.2:1b', + inputCostPer1M: 0.00, + outputCostPer1M: 0.00 + }, + LLAMA_3_2_3B: { + name: 'LLAMA 3 2 3B', + modelId: 'llama3.2:3b', + inputCostPer1M: 0.00, + outputCostPer1M: 0.00 + }, + GEMMA_2_2B: { + name: 'GEMMA 2 2B', + modelId: 'gemma2:2b', + inputCostPer1M: 0.00, + outputCostPer1M: 0.00 + }, + PHI_3_5: { + name: 'PHI 3 5', + modelId: 'phi3.5:3.8b', + inputCostPer1M: 0.00, + outputCostPer1M: 0.00 + }, + QWEN_2_5_1B: { + name: 'QWEN 2 5 1B', + modelId: 'qwen2.5:1.5b', + inputCostPer1M: 0.00, + outputCostPer1M: 0.00 + }, + QWEN_2_5_3B: { + name: 'QWEN 2 5 3B', + modelId: 'qwen2.5:3b', + inputCostPer1M: 0.00, + outputCostPer1M: 0.00 + }, } /** - * Unified ChatGPT model configuration with both display names and model identifiers - * @type {Record} + * Configuration for ChatGPT models, mapping model types to their display names and identifiers. + * Includes various GPT-4 models with different capabilities and performance characteristics. + * @type {ModelConfig} */ -export const GPT_MODELS: Record = { - GPT_4o_MINI: { name: 'GPT 4 o MINI', modelId: 'gpt-4o-mini' }, - GPT_4o: { name: 'GPT 4 o', modelId: 'gpt-4o' }, - GPT_4_TURBO: { name: 'GPT 4 TURBO', modelId: 'gpt-4-turbo' }, - GPT_4: { name: 'GPT 4', modelId: 'gpt-4' }, +export const GPT_MODELS: ModelConfig = { + GPT_4o_MINI: { + name: 'GPT 4 o MINI', + modelId: 'gpt-4o-mini', + inputCostPer1M: 0.15, + outputCostPer1M: 0.60 + }, + GPT_4o: { + name: 'GPT 4 o', + modelId: 'gpt-4o', + inputCostPer1M: 2.50, + outputCostPer1M: 10.00 + }, + GPT_o1_MINI: { + name: 'GPT o1 MINI', + modelId: 'o1-mini', + inputCostPer1M: 3.00, + outputCostPer1M: 12.00 + } } /** - * Unified Claude model configuration with both display names and model identifiers - * @type {Record} + * Configuration for Claude models, mapping model types to their display names and identifiers. + * Includes Anthropic's Claude 3 family of models with varying capabilities and performance profiles. + * @type {ModelConfig} */ -export const CLAUDE_MODELS: Record = { - CLAUDE_3_5_SONNET: { name: 'Claude 3.5 Sonnet', modelId: 'claude-3-5-sonnet-20240620' }, - CLAUDE_3_OPUS: { name: 'Claude 3 Opus', modelId: 'claude-3-opus-20240229' }, - CLAUDE_3_SONNET: { name: 'Claude 3 Sonnet', modelId: 'claude-3-sonnet-20240229' }, - CLAUDE_3_HAIKU: { name: 'Claude 3 Haiku', modelId: 'claude-3-haiku-20240307' }, +export const CLAUDE_MODELS: ModelConfig = { + CLAUDE_3_5_SONNET: { + name: 'Claude 3.5 Sonnet', + modelId: 'claude-3-5-sonnet-latest', + inputCostPer1M: 3.00, + outputCostPer1M: 15.00 + }, + CLAUDE_3_5_HAIKU: { + name: 'Claude 3.5 Haiku', + modelId: 'claude-3-5-haiku-latest', + inputCostPer1M: 0.80, + outputCostPer1M: 4.00 + }, + CLAUDE_3_OPUS: { + name: 'Claude 3 Opus', + modelId: 'claude-3-opus-latest', + inputCostPer1M: 15.00, + outputCostPer1M: 75.00 + }, + CLAUDE_3_SONNET: { + name: 'Claude 3 Sonnet', + modelId: 'claude-3-sonnet-20240229', + inputCostPer1M: 3.00, + outputCostPer1M: 15.00 + }, + CLAUDE_3_HAIKU: { + name: 'Claude 3 Haiku', + modelId: 'claude-3-haiku-20240307', + inputCostPer1M: 0.25, + outputCostPer1M: 1.25 + }, } /** - * Unified Gemini model configuration with both display names and model identifiers - * @type {Record} + * Configuration for Google Gemini models, mapping model types to their display names and identifiers. + * Includes Gemini 1.0 and 1.5 models optimized for different use cases. + * @type {ModelConfig} */ -export const GEMINI_MODELS: Record = { - GEMINI_1_5_FLASH: { name: 'Gemini 1.5 Flash', modelId: 'gemini-1.5-flash' }, - GEMINI_1_5_PRO: { name: 'Gemini 1.5 Pro', modelId: 'gemini-1.5-pro-exp-0827' }, +export const GEMINI_MODELS: ModelConfig = { + GEMINI_1_5_FLASH_8B: { + name: 'Gemini 1.5 Flash-8B', + modelId: 'gemini-1.5-flash-8b', + inputCostPer1M: 0.075, + outputCostPer1M: 0.30 + }, + GEMINI_1_5_FLASH: { + name: 'Gemini 1.5 Flash', + modelId: 'gemini-1.5-flash', + inputCostPer1M: 0.15, + outputCostPer1M: 0.60 + }, + GEMINI_1_5_PRO: { + name: 'Gemini 1.5 Pro', + modelId: 'gemini-1.5-pro', + inputCostPer1M: 2.50, + outputCostPer1M: 10.00 + }, } /** -* Unified Cohere model configuration with both display names and model identifiers -* @type {Record} -*/ -export const COHERE_MODELS: Record = { - COMMAND_R: { name: 'Command R', modelId: 'command-r' }, - COMMAND_R_PLUS: { name: 'Command R Plus', modelId: 'command-r-plus' }, + * Configuration for Cohere models, mapping model types to their display names and identifiers. + * Features Command models specialized for different tasks and performance levels. + * @type {ModelConfig} + */ +export const COHERE_MODELS: ModelConfig = { + COMMAND_R: { + name: 'Command R', + modelId: 'command-r', + inputCostPer1M: 0.15, + outputCostPer1M: 0.60 + }, + COMMAND_R_PLUS: { + name: 'Command R Plus', + modelId: 'command-r-plus', + inputCostPer1M: 2.50, + outputCostPer1M: 10.00 + }, } /** -* Unified Mistral model configuration with both display names and model identifiers -* @type {Record} -*/ -export const MISTRAL_MODELS: Record = { - MIXTRAL_8x7b: { name: 'Mixtral 8x7b', modelId: 'open-mixtral-8x7b' }, - MIXTRAL_8x22b: { name: 'Mixtral 8x22b', modelId: 'open-mixtral-8x22b' }, - MISTRAL_LARGE: { name: 'Mistral Large', modelId: 'mistral-large-latest' }, - MISTRAL_NEMO: { name: 'Mistral Nemo', modelId: 'open-mistral-nemo' }, + * Configuration for Mistral AI models, mapping model types to their display names and identifiers. + * Includes Mixtral, Mistral, and Ministral models with various parameter sizes and capabilities. + * @type {ModelConfig} + */ +export const MISTRAL_MODELS: ModelConfig = { + MIXTRAL_8x7B: { + name: 'Mixtral 8x7B', + modelId: 'open-mixtral-8x7b', + inputCostPer1M: 0.70, + outputCostPer1M: 0.70 + }, + MIXTRAL_8x22B: { + name: 'Mixtral 8x22B', + modelId: 'open-mixtral-8x22b', + inputCostPer1M: 2.00, + outputCostPer1M: 6.00 + }, + MISTRAL_LARGE: { + name: 'Mistral Large', + modelId: 'mistral-large-latest', + inputCostPer1M: 2.00, + outputCostPer1M: 6.00 + }, + MISTRAL_SMALL: { + name: 'Mistral Small', + modelId: 'mistral-small-latest', + inputCostPer1M: 0.20, + outputCostPer1M: 0.60 + }, + MINISTRAL_8B: { + name: 'Ministral 8B', + modelId: 'ministral-8b-latest', + inputCostPer1M: 0.10, + outputCostPer1M: 0.10 + }, + MINISTRAL_3B: { + name: 'Ministral 3B', + modelId: 'ministral-3b-latest', + inputCostPer1M: 0.04, + outputCostPer1M: 0.04 + }, + MISTRAL_NEMO: { + name: 'Mistral NeMo', + modelId: 'open-mistral-nemo', + inputCostPer1M: 0.15, + outputCostPer1M: 0.15 + }, + MISTRAL_7B: { + name: 'Mistral 7B', + modelId: 'open-mistral-7b', + inputCostPer1M: 0.25, + outputCostPer1M: 0.25 + }, } /** -* Unified Fireworks model configuration with both display names and model identifiers -* @type {Record} -*/ -export const FIREWORKS_MODELS: Record = { - LLAMA_3_1_405B: { name: 'LLAMA 3 1 405B', modelId: 'accounts/fireworks/models/llama-v3p1-405b-instruct' }, - LLAMA_3_1_70B: { name: 'LLAMA 3 1 70B', modelId: 'accounts/fireworks/models/llama-v3p1-70b-instruct' }, - LLAMA_3_1_8B: { name: 'LLAMA 3 1 8B', modelId: 'accounts/fireworks/models/llama-v3p1-8b-instruct' }, - LLAMA_3_2_3B: { name: 'LLAMA 3 2 3B', modelId: 'accounts/fireworks/models/llama-v3p2-3b-instruct' }, - LLAMA_3_2_1B: { name: 'LLAMA 3 2 1B', modelId: 'accounts/fireworks/models/llama-v3p2-1b-instruct' }, - QWEN_2_5_72B: { name: 'QWEN 2 5 72B', modelId: 'accounts/fireworks/models/qwen2p5-72b-instruct' }, + * Configuration for Fireworks AI models, mapping model types to their display names and identifiers. + * Features various LLaMA and Qwen models optimized for different use cases. + * @type {ModelConfig} + */ +export const FIREWORKS_MODELS: ModelConfig = { + LLAMA_3_1_405B: { + name: 'LLAMA 3 1 405B', + modelId: 'accounts/fireworks/models/llama-v3p1-405b-instruct', + inputCostPer1M: 3.00, + outputCostPer1M: 3.00 + }, + LLAMA_3_1_70B: { + name: 'LLAMA 3 1 70B', + modelId: 'accounts/fireworks/models/llama-v3p1-70b-instruct', + inputCostPer1M: 0.90, + outputCostPer1M: 0.90 + }, + LLAMA_3_1_8B: { + name: 'LLAMA 3 1 8B', + modelId: 'accounts/fireworks/models/llama-v3p1-8b-instruct', + inputCostPer1M: 0.20, + outputCostPer1M: 0.20 + }, + LLAMA_3_2_3B: { + name: 'LLAMA 3 2 3B', + modelId: 'accounts/fireworks/models/llama-v3p2-3b-instruct', + inputCostPer1M: 0.10, + outputCostPer1M: 0.10 + }, + QWEN_2_5_72B: { + name: 'QWEN 2 5 72B', + modelId: 'accounts/fireworks/models/qwen2p5-72b-instruct', + inputCostPer1M: 0.90, + outputCostPer1M: 0.90 + }, } /** -* Unified Together model configuration with both display names and model identifiers -* @type {Record} -*/ -export const TOGETHER_MODELS: Record = { - LLAMA_3_2_3B: { name: 'LLAMA 3 2 3B', modelId: 'meta-llama/Llama-3.2-3B-Instruct-Turbo' }, - LLAMA_3_1_405B: { name: 'LLAMA 3 1 405B', modelId: 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo' }, - LLAMA_3_1_70B: { name: 'LLAMA 3 1 70B', modelId: 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' }, - LLAMA_3_1_8B: { name: 'LLAMA 3 1 8B', modelId: 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' }, - GEMMA_2_27B: { name: 'Gemma 2 27B', modelId: 'google/gemma-2-27b-it' }, - GEMMA_2_9B: { name: 'Gemma 2 9B', modelId: 'google/gemma-2-9b-it' }, - QWEN_2_5_72B: { name: 'QWEN 2 5 72B', modelId: 'Qwen/Qwen2.5-72B-Instruct-Turbo' }, - QWEN_2_5_7B: { name: 'QWEN 2 5 7B', modelId: 'Qwen/Qwen2.5-7B-Instruct-Turbo' }, + * Configuration for Together AI models, mapping model types to their display names and identifiers. + * Includes a diverse range of LLaMA, Gemma, and Qwen models with different parameter counts. + * @type {ModelConfig} + */ +export const TOGETHER_MODELS: ModelConfig = { + LLAMA_3_2_3B: { + name: 'LLAMA 3 2 3B', + modelId: 'meta-llama/Llama-3.2-3B-Instruct-Turbo', + inputCostPer1M: 0.06, + outputCostPer1M: 0.06 + }, + LLAMA_3_1_405B: { + name: 'LLAMA 3 1 405B', + modelId: 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo', + inputCostPer1M: 3.50, + outputCostPer1M: 3.50 + }, + LLAMA_3_1_70B: { + name: 'LLAMA 3 1 70B', + modelId: 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', + inputCostPer1M: 0.88, + outputCostPer1M: 0.88 + }, + LLAMA_3_1_8B: { + name: 'LLAMA 3 1 8B', + modelId: 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo', + inputCostPer1M: 0.18, + outputCostPer1M: 0.18 + }, + GEMMA_2_27B: { + name: 'Gemma 2 27B', + modelId: 'google/gemma-2-27b-it', + inputCostPer1M: 0.80, + outputCostPer1M: 0.80 + }, + GEMMA_2_9B: { + name: 'Gemma 2 9B', + modelId: 'google/gemma-2-9b-it', + inputCostPer1M: 0.30, + outputCostPer1M: 0.30 + }, + QWEN_2_5_72B: { + name: 'QWEN 2 5 72B', + modelId: 'Qwen/Qwen2.5-72B-Instruct-Turbo', + inputCostPer1M: 1.20, + outputCostPer1M: 1.20 + }, + QWEN_2_5_7B: { + name: 'QWEN 2 5 7B', + modelId: 'Qwen/Qwen2.5-7B-Instruct-Turbo', + inputCostPer1M: 0.30, + outputCostPer1M: 0.30 + }, } /** -* Unified Groq model configuration with both display names and model identifiers -* @type {Record} -*/ -export const GROQ_MODELS: Record = { - LLAMA_3_1_70B_VERSATILE: { name: 'LLAMA 3 1 70B Versatile', modelId: 'llama-3.1-70b-versatile' }, - LLAMA_3_1_8B_INSTANT: { name: 'LLAMA 3 1 8B Instant', modelId: 'llama-3.1-8b-instant' }, - LLAMA_3_2_1B_PREVIEW: { name: 'LLAMA 3 2 1B Preview', modelId: 'llama-3.2-1b-preview' }, - LLAMA_3_2_3B_PREVIEW: { name: 'LLAMA 3 2 3B Preview', modelId: 'llama-3.2-3b-preview' }, - MIXTRAL_8X7B_32768: { name: 'Mixtral 8x7b 32768', modelId: 'mixtral-8x7b-32768' }, + * Configuration for Groq models, mapping model types to their display names and identifiers. + * Features optimized versions of LLaMA, Mixtral, and Gemma models for high-performance inference. + * @type {ModelConfig} + */ +export const GROQ_MODELS: ModelConfig = { + LLAMA_3_2_1B_PREVIEW: { + name: 'Llama 3.2 1B (Preview) 8k', + modelId: 'llama-3.2-1b-preview', + inputCostPer1M: 0.04, + outputCostPer1M: 0.04 + }, + LLAMA_3_2_3B_PREVIEW: { + name: 'Llama 3.2 3B (Preview) 8k', + modelId: 'llama-3.2-3b-preview', + inputCostPer1M: 0.06, + outputCostPer1M: 0.06 + }, + LLAMA_3_3_70B_VERSATILE: { + name: 'Llama 3.3 70B Versatile 128k', + modelId: 'llama-3.3-70b-versatile', + inputCostPer1M: 0.59, + outputCostPer1M: 0.79 + }, + LLAMA_3_1_8B_INSTANT: { + name: 'Llama 3.1 8B Instant 128k', + modelId: 'llama-3.1-8b-instant', + inputCostPer1M: 0.05, + outputCostPer1M: 0.08 + }, + MIXTRAL_8X7B_INSTRUCT: { + name: 'Mixtral 8x7B Instruct 32k', + modelId: 'mixtral-8x7b-32768', + inputCostPer1M: 0.24, + outputCostPer1M: 0.24 + }, } \ No newline at end of file diff --git a/src/utils/logging.ts b/src/utils/logging.ts index 5ab92e5..f20c42f 100644 --- a/src/utils/logging.ts +++ b/src/utils/logging.ts @@ -1,19 +1,165 @@ // src/utils/logging.ts import type { ProcessingOptions } from '../types/process' +import type { ModelConfigValue } from '../types/llms' +import type { TokenUsage, CostCalculation, APILogInfo, ChainableLogger } from '../types/logging' +import { + GPT_MODELS, CLAUDE_MODELS, GEMINI_MODELS, COHERE_MODELS, MISTRAL_MODELS, OLLAMA_MODELS, FIREWORKS_MODELS, TOGETHER_MODELS, GROQ_MODELS +} from './globals' import chalk from 'chalk' /** - * Interface for chainable logger with style methods. + * All available model configurations combined */ -export interface ChainableLogger { - (...args: any[]): void - step: (...args: any[]) => void - dim: (...args: any[]) => void - success: (...args: any[]) => void - opts: (...args: any[]) => void - wait: (...args: any[]) => void - final: (...args: any[]) => void +const ALL_MODELS: { [key: string]: ModelConfigValue } = { + ...GPT_MODELS, + ...CLAUDE_MODELS, + ...GEMINI_MODELS, + ...COHERE_MODELS, + ...MISTRAL_MODELS, + ...OLLAMA_MODELS, + ...FIREWORKS_MODELS, + ...TOGETHER_MODELS, + ...GROQ_MODELS +} + +/** + * Finds the model configuration based on the model key + * @param modelKey - The key/name of the model (e.g., 'LLAMA_3_2_3B') + * @returns The model configuration if found, undefined otherwise + */ +function findModelConfig(modelKey: string) { + // First try to find the model directly in our combined models + const model = ALL_MODELS[modelKey] + if (model) return model + + // If not found by key, try matching by model ID as a fallback + return Object.values(ALL_MODELS).find(model => + model.modelId.toLowerCase() === modelKey.toLowerCase() + ) +} + +/** + * Determines if a cost is effectively zero + * @param cost - The cost to check + * @returns true if the cost is zero or very close to zero + */ +function isEffectivelyZero(cost: number): boolean { + return Math.abs(cost) < 0.00001 +} + +/** + * Calculates the cost for token usage based on the model's pricing + * @param modelKey - The key/name of the model + * @param tokenUsage - Object containing token usage information + * @returns Object containing calculated costs + */ +function calculateCosts(modelKey: string, tokenUsage: TokenUsage): CostCalculation { + const modelConfig = findModelConfig(modelKey) + + if (!modelConfig) { + console.warn(`Warning: Could not find cost configuration for model: ${modelKey}`) + return { + inputCost: undefined, + outputCost: undefined, + totalCost: undefined + } + } + + // If both costs per million are zero, return all zeros + if (modelConfig.inputCostPer1M === 0 && modelConfig.outputCostPer1M === 0) { + return { + inputCost: 0, + outputCost: 0, + totalCost: 0 + } + } + + // Calculate costs if token usage is available + const inputCost = tokenUsage.input + ? (tokenUsage.input / 1_000_000) * modelConfig.inputCostPer1M + : undefined + + const outputCost = tokenUsage.output + ? (tokenUsage.output / 1_000_000) * modelConfig.outputCostPer1M + : undefined + + // Calculate total cost only if both input and output costs are available + const totalCost = inputCost !== undefined && outputCost !== undefined + ? inputCost + outputCost + : undefined + + // Check if costs are effectively zero + if (inputCost !== undefined && isEffectivelyZero(inputCost)) { + return { + inputCost: 0, + outputCost: 0, + totalCost: 0 + } + } + + return { + inputCost, + outputCost, + totalCost + } +} + +/** + * Formats a cost value to a standardized string representation + * @param cost - The cost value to format + * @returns Formatted cost string + */ +function formatCost(cost: number | undefined): string { + if (cost === undefined) return 'N/A' + if (cost === 0) return '$0.0000' + return `$${cost.toFixed(4)}` +} + +/** + * Logs API call results in a standardized format across different LLM providers. + * Includes token usage and cost calculations. + * @param info - Object containing model info, stop reason, and token usage + */ +export function logAPIResults(info: APILogInfo): void { + const { modelName, stopReason, tokenUsage } = info + + // Get model display name if available, otherwise use the provided name + const modelConfig = findModelConfig(modelName) + const displayName = modelConfig?.name ?? modelName + + // Log stop/finish reason and model + l.wait(` - ${stopReason ? `${stopReason} Reason` : 'Status'}: ${stopReason}\n - Model: ${displayName}`) + + // Format token usage string based on available data + const tokenLines = [] + if (tokenUsage.input) tokenLines.push(`${tokenUsage.input} input tokens`) + if (tokenUsage.output) tokenLines.push(`${tokenUsage.output} output tokens`) + if (tokenUsage.total) tokenLines.push(`${tokenUsage.total} total tokens`) + + // Log token usage if any data is available + if (tokenLines.length > 0) { + l.wait(` - Token Usage:\n - ${tokenLines.join('\n - ')}`) + } + + // Calculate and log costs + const costs = calculateCosts(modelName, tokenUsage) + const costLines = [] + + if (costs.inputCost !== undefined) { + costLines.push(`Input cost: ${formatCost(costs.inputCost)}`) + } + if (costs.outputCost !== undefined) { + costLines.push(`Output cost: ${formatCost(costs.outputCost)}`) + } + if (costs.totalCost !== undefined) { + costLines.push(`Total cost: ${chalk.bold(formatCost(costs.totalCost))}`) + } + + // Log costs if any calculations were successful + if (costLines.length > 0) { + l.wait(` - Cost Breakdown:\n - ${costLines.join('\n - ')}`) + } } /** diff --git a/test/all.test.ts b/test/all.test.ts index 219c9d1..72e8ab6 100644 --- a/test/all.test.ts +++ b/test/all.test.ts @@ -55,13 +55,13 @@ const commands = [ }, { // Process a local audio file with multiple prompt sections, Whisper 'tiny' model, and Ollama. - cmd: 'npm run as -- --file "content/audio.mp3" --prompt titles summary shortChapters takeaways questions --whisper tiny --ollama', + cmd: 'npm run as -- --file "content/audio.mp3" --prompt titles summary shortChapters takeaways questions --whisper tiny --ollama LLAMA_3_2_1B', expectedFile: 'audio-ollama-shownotes.md', newName: '10-all-prompts-ollama-shownotes.md' }, { // Process playlist videos with titles and longChapters prompts, tiny Whisper model, and Ollama for LLM processing. - cmd: 'npm run as -- --playlist "https://www.youtube.com/playlist?list=PLCVnrVv4KhXPz0SoAVu8Rc1emAdGPbSbr" --prompt titles longChapters --whisper tiny --ollama', + cmd: 'npm run as -- --playlist "https://www.youtube.com/playlist?list=PLCVnrVv4KhXPz0SoAVu8Rc1emAdGPbSbr" --prompt titles longChapters --whisper tiny --ollama LLAMA_3_2_1B', expectedFiles: [ { file: '2024-09-24-ep1-fsjam-podcast-ollama-shownotes.md', newName: '11-prompt-whisper-ollama-shownotes.md' }, { file: '2024-09-24-ep0-fsjam-podcast-ollama-shownotes.md', newName: '12-prompt-whisper-ollama-shownotes.md' } @@ -69,7 +69,7 @@ const commands = [ }, { // Process multiple YouTube videos from URLs with title prompts, Whisper 'tiny' model, and Ollama. - cmd: 'npm run as -- --urls "content/example-urls.md" --prompt titles --whisper tiny --ollama', + cmd: 'npm run as -- --urls "content/example-urls.md" --prompt titles --whisper tiny --ollama LLAMA_3_2_1B', expectedFiles: [ { file: '2024-09-24-ep1-fsjam-podcast-ollama-shownotes.md', newName: '13-prompt-whisper-ollama-shownotes.md' }, { file: '2024-09-24-ep0-fsjam-podcast-ollama-shownotes.md', newName: '14-prompt-whisper-ollama-shownotes.md' } @@ -88,26 +88,26 @@ const commands = [ newName: '16-ajcwebdev-rss-info.json', }, { - cmd: 'npm run docker-cli -- --video "https://www.youtube.com/watch?v=MORMZXEaONk"', + cmd: 'npm run docker-cli -- --video "https://www.youtube.com/watch?v=MORMZXEaONk" --whisper base', expectedFile: '2024-09-24-ep0-fsjam-podcast-prompt.md', newName: '17-docker-video-default.md' }, { - cmd: 'npm run docker-cli -- --playlist "https://www.youtube.com/playlist?list=PLCVnrVv4KhXPz0SoAVu8Rc1emAdGPbSbr"', + cmd: 'npm run docker-cli -- --playlist "https://www.youtube.com/playlist?list=PLCVnrVv4KhXPz0SoAVu8Rc1emAdGPbSbr" --whisper base', expectedFiles: [ { file: '2024-09-24-ep1-fsjam-podcast-prompt.md', newName: '18-docker-playlist-default.md' }, { file: '2024-09-24-ep0-fsjam-podcast-prompt.md', newName: '19-docker-playlist-default.md' } ] }, { - cmd: 'npm run docker-cli -- --urls "content/example-urls.md"', + cmd: 'npm run docker-cli -- --urls "content/example-urls.md" --whisper base', expectedFiles: [ { file: '2024-09-24-ep1-fsjam-podcast-prompt.md', newName: '20-docker-urls-default.md' }, { file: '2024-09-24-ep0-fsjam-podcast-prompt.md', newName: '21-docker-urls-default.md' } ] }, { - cmd: 'npm run docker-cli -- --file "content/audio.mp3"', + cmd: 'npm run docker-cli -- --file "content/audio.mp3" --whisper base', expectedFile: 'audio-prompt.md', newName: '22-docker-file-default.md' }, @@ -122,31 +122,26 @@ const commands = [ newName: '24-docker-whisper-tiny.md' }, { - cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles summary mediumChapters takeaways questions', + cmd: 'npm run docker-cli -- --file "content/audio.mp3" --whisper base --prompt titles summary mediumChapters takeaways questions', expectedFile: 'audio-prompt.md', newName: '25-docker-all-prompts.md' }, { - cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles summary shortChapters takeaways questions --whisper tiny --ollama LLAMA_3_2_1B', - expectedFile: 'audio-ollama-shownotes.md', - newName: '26-docker-all-prompts-ollama-shownotes.md' - }, - { - cmd: 'npm run docker-cli -- --playlist "https://www.youtube.com/playlist?list=PLCVnrVv4KhXPz0SoAVu8Rc1emAdGPbSbr" --prompt titles --whisper tiny --ollama LLAMA_3_2_1B', + cmd: 'npm run docker-cli -- --playlist "https://www.youtube.com/playlist?list=PLCVnrVv4KhXPz0SoAVu8Rc1emAdGPbSbr" --prompt titles --whisper tiny --chatgpt', expectedFiles: [ - { file: '2024-09-24-ep1-fsjam-podcast-ollama-shownotes.md', newName: '27-docker-prompt-whisper-ollama-shownotes.md' }, - { file: '2024-09-24-ep0-fsjam-podcast-ollama-shownotes.md', newName: '28-docker-prompt-whisper-ollama-shownotes.md' } + { file: '2024-09-24-ep1-fsjam-podcast-chatgpt-shownotes.md', newName: '27-docker-prompt-whisper-chatgpt-shownotes.md' }, + { file: '2024-09-24-ep0-fsjam-podcast-chatgpt-shownotes.md', newName: '28-docker-prompt-whisper-chatgpt-shownotes.md' } ] }, { - cmd: 'npm run docker-cli -- --urls "content/example-urls.md" --prompt titles --whisper tiny --ollama LLAMA_3_2_1B', + cmd: 'npm run docker-cli -- --urls "content/example-urls.md" --prompt titles --whisper tiny --claude', expectedFiles: [ - { file: '2024-09-24-ep1-fsjam-podcast-ollama-shownotes.md', newName: '29-docker-prompt-whisper-ollama-shownotes.md' }, - { file: '2024-09-24-ep0-fsjam-podcast-ollama-shownotes.md', newName: '30-docker-prompt-whisper-ollama-shownotes.md' } + { file: '2024-09-24-ep1-fsjam-podcast-claude-shownotes.md', newName: '29-docker-prompt-whisper-claude-shownotes.md' }, + { file: '2024-09-24-ep0-fsjam-podcast-claude-shownotes.md', newName: '30-docker-prompt-whisper-claude-shownotes.md' } ] }, { - cmd: 'npm run docker-cli -- --rss "https://ajcwebdev.substack.com/feed"', + cmd: 'npm run docker-cli -- --rss "https://ajcwebdev.substack.com/feed" --whisper base', expectedFile: '2021-05-10-thoughts-on-lambda-school-layoffs-prompt.md', newName: '31-docker-rss-default.md' }, @@ -255,7 +250,7 @@ const commands = [ }, { // Process video using Deepgram and Llama. - cmd: 'npm run as -- --video "https://www.youtube.com/watch?v=MORMZXEaONk" --deepgram --ollama', + cmd: 'npm run as -- --video "https://www.youtube.com/watch?v=MORMZXEaONk" --deepgram --ollama LLAMA_3_2_1B', expectedFile: '2024-09-24-ep0-fsjam-podcast-ollama-shownotes.md', newName: '50-deepgram-ollama-shownotes.md' }, @@ -267,7 +262,7 @@ const commands = [ }, { // Process video using AssemblyAI and Llama. - cmd: 'npm run as -- --video "https://www.youtube.com/watch?v=MORMZXEaONk" --assembly --ollama', + cmd: 'npm run as -- --video "https://www.youtube.com/watch?v=MORMZXEaONk" --assembly --ollama LLAMA_3_2_1B', expectedFile: '2024-09-24-ep0-fsjam-podcast-ollama-shownotes.md', newName: '52-assembly-ollama-shownotes.md' }, diff --git a/test/docker.test.ts b/test/docker.test.ts index 9a0f759..532d66f 100644 --- a/test/docker.test.ts +++ b/test/docker.test.ts @@ -7,73 +7,89 @@ import { existsSync, renameSync } from 'node:fs' import { join } from 'node:path' const commands = [ + { + cmd: 'npm run docker-cli -- --rss "https://ajcwebdev.substack.com/feed" --info', + expectedFile: 'ajcwebdev_info.json', + newName: '01-docker-ajcwebdev-rss-info.json', + }, + { + cmd: 'npm run docker-cli -- --rss "https://ajcwebdev.substack.com/feed" --whisper tiny', + expectedFile: '2021-05-10-thoughts-on-lambda-school-layoffs-prompt.md', + newName: '02-docker-rss-default.md' + }, { cmd: 'npm run docker-cli -- --video "https://www.youtube.com/watch?v=MORMZXEaONk" --whisper base', expectedFile: '2024-09-24-ep0-fsjam-podcast-prompt.md', - newName: '01-docker-video-default.md' + newName: '03-docker-video-default.md' }, { - cmd: 'npm run docker-cli -- --playlist "https://www.youtube.com/playlist?list=PLCVnrVv4KhXPz0SoAVu8Rc1emAdGPbSbr" --whisper base', + cmd: 'npm run docker-cli -- --playlist "https://www.youtube.com/playlist?list=PLCVnrVv4KhXPz0SoAVu8Rc1emAdGPbSbr" --whisper tiny', expectedFiles: [ - { file: '2024-09-24-ep1-fsjam-podcast-prompt.md', newName: '02-docker-playlist-default.md' }, - { file: '2024-09-24-ep0-fsjam-podcast-prompt.md', newName: '03-docker-playlist-default.md' } + { file: '2024-09-24-ep1-fsjam-podcast-prompt.md', newName: '04-docker-playlist-default.md' }, + { file: '2024-09-24-ep0-fsjam-podcast-prompt.md', newName: '05-docker-playlist-default.md' } ] }, { - cmd: 'npm run docker-cli -- --urls "content/example-urls.md" --whisper base', + cmd: 'npm run docker-cli -- --urls "content/example-urls.md" --whisper tiny', expectedFiles: [ - { file: '2024-09-24-ep1-fsjam-podcast-prompt.md', newName: '04-docker-urls-default.md' }, - { file: '2024-09-24-ep0-fsjam-podcast-prompt.md', newName: '05-docker-urls-default.md' } + { file: '2024-09-24-ep1-fsjam-podcast-prompt.md', newName: '06-docker-urls-default.md' }, + { file: '2024-09-24-ep0-fsjam-podcast-prompt.md', newName: '07-docker-urls-default.md' } ] }, { cmd: 'npm run docker-cli -- --file "content/audio.mp3" --whisper base', expectedFile: 'audio-prompt.md', - newName: '06-docker-file-default.md' + newName: '08-docker-file-default.md' }, { cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles --whisper base --ollama LLAMA_3_2_1B', expectedFile: 'audio-ollama-shownotes.md', - newName: '07-docker-titles-prompt-whisper-tiny-ollama-shownotes.md' + newName: '09-docker-titles-prompt-whisper-tiny-ollama-shownotes.md' }, { - cmd: 'npm run docker-cli -- --file "content/audio.mp3" --whisper base', + cmd: 'npm run docker-cli -- --file "content/audio.mp3" --whisper base --prompt titles summary mediumChapters takeaways questions', expectedFile: 'audio-prompt.md', - newName: '08-docker-whisper-tiny.md' + newName: '10-docker-all-prompts.md' }, { - cmd: 'npm run docker-cli -- --file "content/audio.mp3" --whisper base --prompt titles summary mediumChapters takeaways questions', - expectedFile: 'audio-prompt.md', - newName: '09-docker-all-prompts.md' + cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles summary shortChapters --whisper base --chatgpt', + expectedFile: 'audio-chatgpt-shownotes.md', + newName: '11-docker-three-prompts-chatgpt-shownotes.md' }, { - cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles summary shortChapters takeaways questions --whisper base --ollama LLAMA_3_2_1B', - expectedFile: 'audio-ollama-shownotes.md', - newName: '10-docker-all-prompts-ollama-shownotes.md' + cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles summary shortChapters --whisper base --claude', + expectedFile: 'audio-claude-shownotes.md', + newName: '12-docker-three-prompts-claude-shownotes.md' }, { - cmd: 'npm run docker-cli -- --playlist "https://www.youtube.com/playlist?list=PLCVnrVv4KhXPz0SoAVu8Rc1emAdGPbSbr" --prompt titles --whisper base --ollama LLAMA_3_2_1B', - expectedFiles: [ - { file: '2024-09-24-ep1-fsjam-podcast-ollama-shownotes.md', newName: '11-docker-prompt-whisper-ollama-shownotes.md' }, - { file: '2024-09-24-ep0-fsjam-podcast-ollama-shownotes.md', newName: '12-docker-prompt-whisper-ollama-shownotes.md' } - ] + cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles summary shortChapters --whisper base --cohere', + expectedFile: 'audio-cohere-shownotes.md', + newName: '13-docker-three-prompts-cohere-shownotes.md' }, { - cmd: 'npm run docker-cli -- --urls "content/example-urls.md" --prompt titles --whisper base --ollama LLAMA_3_2_1B', - expectedFiles: [ - { file: '2024-09-24-ep1-fsjam-podcast-ollama-shownotes.md', newName: '13-docker-prompt-whisper-ollama-shownotes.md' }, - { file: '2024-09-24-ep0-fsjam-podcast-ollama-shownotes.md', newName: '14-docker-prompt-whisper-ollama-shownotes.md' } - ] + cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles summary shortChapters --whisper base --gemini', + expectedFile: 'audio-gemini-shownotes.md', + newName: '14-docker-three-prompts-gemini-shownotes.md' }, { - cmd: 'npm run docker-cli -- --rss "https://ajcwebdev.substack.com/feed" --whisper base', - expectedFile: '2021-05-10-thoughts-on-lambda-school-layoffs-prompt.md', - newName: '15-docker-rss-default.md' + cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles summary shortChapters --whisper base --mistral', + expectedFile: 'audio-mistral-shownotes.md', + newName: '15-docker-three-prompts-mistral-shownotes.md' }, { - cmd: 'npm run docker-cli -- --rss "https://ajcwebdev.substack.com/feed" --info', - expectedFile: 'ajcwebdev_info.json', - newName: '16-docker-ajcwebdev-rss-info.json', + cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles summary shortChapters --whisper base --fireworks', + expectedFile: 'audio-fireworks-shownotes.md', + newName: '16-docker-three-prompts-fireworks-shownotes.md' + }, + { + cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles summary shortChapters --whisper base --together', + expectedFile: 'audio-together-shownotes.md', + newName: '17-docker-three-prompts-together-shownotes.md' + }, + { + cmd: 'npm run docker-cli -- --file "content/audio.mp3" --prompt titles summary shortChapters --whisper base --groq', + expectedFile: 'audio-groq-shownotes.md', + newName: '18-docker-three-prompts-groq-shownotes.md' }, ] diff --git a/test/local.test.ts b/test/local.test.ts index 497bcf1..a54a331 100644 --- a/test/local.test.ts +++ b/test/local.test.ts @@ -37,7 +37,7 @@ const commands = [ }, { // Process local audio file with title prompts, Whisper 'tiny' model, and Ollama. - cmd: 'npm run as -- --file "content/audio.mp3" --prompt titles --whisper tiny --ollama LLAMA_3_2_1B', + cmd: 'npm run as -- --file "content/audio.mp3" --prompt titles --whisper tiny --ollama', expectedFile: 'audio-ollama-shownotes.md', newName: '07-titles-prompt-whisper-tiny-ollama-shownotes.md' }, @@ -55,13 +55,13 @@ const commands = [ }, { // Process a local audio file with multiple prompt sections, Whisper 'tiny' model, and Ollama. - cmd: 'npm run as -- --file "content/audio.mp3" --prompt titles summary shortChapters takeaways questions --whisper tiny --ollama', + cmd: 'npm run as -- --file "content/audio.mp3" --prompt titles summary shortChapters takeaways questions --whisper tiny --ollama LLAMA_3_2_1B', expectedFile: 'audio-ollama-shownotes.md', newName: '10-all-prompts-ollama-shownotes.md' }, { // Process playlist videos with titles and longChapters prompts, tiny Whisper model, and Ollama for LLM processing. - cmd: 'npm run as -- --playlist "https://www.youtube.com/playlist?list=PLCVnrVv4KhXPz0SoAVu8Rc1emAdGPbSbr" --prompt titles longChapters --whisper tiny --ollama', + cmd: 'npm run as -- --playlist "https://www.youtube.com/playlist?list=PLCVnrVv4KhXPz0SoAVu8Rc1emAdGPbSbr" --prompt titles longChapters --whisper tiny --ollama LLAMA_3_2_1B', expectedFiles: [ { file: '2024-09-24-ep1-fsjam-podcast-ollama-shownotes.md', newName: '11-prompt-whisper-ollama-shownotes.md' }, { file: '2024-09-24-ep0-fsjam-podcast-ollama-shownotes.md', newName: '12-prompt-whisper-ollama-shownotes.md' } @@ -69,7 +69,7 @@ const commands = [ }, { // Process multiple YouTube videos from URLs with title prompts, Whisper 'tiny' model, and Ollama. - cmd: 'npm run as -- --urls "content/example-urls.md" --prompt titles --whisper tiny --ollama', + cmd: 'npm run as -- --urls "content/example-urls.md" --prompt titles --whisper tiny --ollama LLAMA_3_2_1B', expectedFiles: [ { file: '2024-09-24-ep1-fsjam-podcast-ollama-shownotes.md', newName: '13-prompt-whisper-ollama-shownotes.md' }, { file: '2024-09-24-ep0-fsjam-podcast-ollama-shownotes.md', newName: '14-prompt-whisper-ollama-shownotes.md' } diff --git a/test/models/chatgpt.test.ts b/test/models/chatgpt.test.ts new file mode 100644 index 0000000..8debb69 --- /dev/null +++ b/test/models/chatgpt.test.ts @@ -0,0 +1,60 @@ +// test/models/chatgpt.test.ts + +import test from 'node:test' +import { strictEqual } from 'node:assert/strict' +import { execSync } from 'node:child_process' +import { existsSync, renameSync } from 'node:fs' +import { join } from 'node:path' + +const commands = [ + { + // Process multiple YouTube videos from URLs with title prompts, Whisper 'tiny' model, and ChatGPT default model. + cmd: 'npm run as -- --urls "content/example-urls.md" --prompt titles --whisper tiny --chatgpt', + expectedFiles: [ + { file: '2024-09-24-ep1-fsjam-podcast-chatgpt-shownotes.md', newName: '01-chatgpt-default.md' }, + { file: '2024-09-24-ep0-fsjam-podcast-chatgpt-shownotes.md', newName: '02-chatgpt-default.md' } + ] + }, + { + // Process video with ChatGPT using GPT_4o_MINI model. + cmd: 'npm run as -- --file "content/audio.mp3" --chatgpt GPT_4o_MINI', + expectedFile: 'audio-chatgpt-shownotes.md', + newName: '03-chatgpt-gpt-4o-mini.md' + }, + { + // Process video with ChatGPT using GPT_4o model. + cmd: 'npm run as -- --file "content/audio.mp3" --chatgpt GPT_4o', + expectedFile: 'audio-chatgpt-shownotes.md', + newName: '04-chatgpt-gpt-4o.md' + }, + { + // Process video with ChatGPT using GPT_o1_MINI model. + cmd: 'npm run as -- --file "content/audio.mp3" --chatgpt GPT_o1_MINI', + expectedFile: 'audio-chatgpt-shownotes.md', + newName: '05-chatgpt-gpt-o1-mini.md' + }, +] + +test('Autoshow Command Tests', async (t) => { + for (const [index, command] of commands.entries()) { + await t.test(`should run command ${index + 1} successfully`, async () => { + // Run the command + execSync(command.cmd, { stdio: 'inherit' }) + if (Array.isArray(command.expectedFiles)) { + for (const { file, newName } of command.expectedFiles) { + const filePath = join('content', file) + strictEqual(existsSync(filePath), true, `Expected file ${file} was not created`) + const newPath = join('content', newName) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${newName}`) + } + } else { + const filePath = join('content', command.expectedFile as string) + strictEqual(existsSync(filePath), true, `Expected file ${command.expectedFile} was not created`) + const newPath = join('content', command.newName as string) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${command.newName}`) + } + }) + } +}) \ No newline at end of file diff --git a/test/models/claude.test.ts b/test/models/claude.test.ts new file mode 100644 index 0000000..8e26306 --- /dev/null +++ b/test/models/claude.test.ts @@ -0,0 +1,73 @@ +// test/models/claude.test.ts + +import test from 'node:test' +import { strictEqual } from 'node:assert/strict' +import { execSync } from 'node:child_process' +import { existsSync, renameSync } from 'node:fs' +import { join } from 'node:path' + +const commands = [ + { + // Process multiple YouTube videos from URLs with title prompts using default Claude model + cmd: 'npm run as -- --urls "content/example-urls.md" --prompt titles --whisper tiny --claude', + expectedFiles: [ + { file: '2024-09-24-ep1-fsjam-podcast-claude-shownotes.md', newName: '01-claude-default.md' }, + { file: '2024-09-24-ep0-fsjam-podcast-claude-shownotes.md', newName: '02-claude-default.md' } + ] + }, + { + // Process video with Claude 3.5 Sonnet model + cmd: 'npm run as -- --file "content/audio.mp3" --claude CLAUDE_3_5_SONNET', + expectedFile: 'audio-claude-shownotes.md', + newName: '03-claude-3-5-sonnet.md' + }, + { + // Process video with Claude 3.5 Haiku model + cmd: 'npm run as -- --file "content/audio.mp3" --claude CLAUDE_3_5_HAIKU', + expectedFile: 'audio-claude-shownotes.md', + newName: '04-claude-3-5-haiku.md' + }, + { + // Process video with Claude 3 Opus model + cmd: 'npm run as -- --file "content/audio.mp3" --claude CLAUDE_3_OPUS', + expectedFile: 'audio-claude-shownotes.md', + newName: '05-claude-3-opus.md' + }, + { + // Process video with Claude 3 Sonnet model + cmd: 'npm run as -- --file "content/audio.mp3" --claude CLAUDE_3_SONNET', + expectedFile: 'audio-claude-shownotes.md', + newName: '06-claude-3-sonnet.md' + }, + { + // Process video with Claude 3 Haiku model + cmd: 'npm run as -- --file "content/audio.mp3" --claude CLAUDE_3_HAIKU', + expectedFile: 'audio-claude-shownotes.md', + newName: '07-claude-3-haiku.md' + }, +] + +test('Autoshow Command Tests', async (t) => { + for (const [index, command] of commands.entries()) { + await t.test(`should run command ${index + 1} successfully`, async () => { + // Run the command + execSync(command.cmd, { stdio: 'inherit' }) + + if (Array.isArray(command.expectedFiles)) { + for (const { file, newName } of command.expectedFiles) { + const filePath = join('content', file) + strictEqual(existsSync(filePath), true, `Expected file ${file} was not created`) + const newPath = join('content', newName) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${newName}`) + } + } else { + const filePath = join('content', command.expectedFile as string) + strictEqual(existsSync(filePath), true, `Expected file ${command.expectedFile} was not created`) + const newPath = join('content', command.newName as string) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${command.newName}`) + } + }) + } +}) \ No newline at end of file diff --git a/test/models/cohere.test.ts b/test/models/cohere.test.ts new file mode 100644 index 0000000..c62f6ab --- /dev/null +++ b/test/models/cohere.test.ts @@ -0,0 +1,55 @@ +// test/models/cohere.test.ts + +import test from 'node:test' +import { strictEqual } from 'node:assert/strict' +import { execSync } from 'node:child_process' +import { existsSync, renameSync } from 'node:fs' +import { join } from 'node:path' + +const commands = [ + { + // Process multiple YouTube videos from URLs with title prompts using default Cohere model + cmd: 'npm run as -- --urls "content/example-urls.md" --prompt titles --whisper tiny --cohere', + expectedFiles: [ + { file: '2024-09-24-ep1-fsjam-podcast-cohere-shownotes.md', newName: '01-cohere-default.md' }, + { file: '2024-09-24-ep0-fsjam-podcast-cohere-shownotes.md', newName: '02-cohere-default.md' } + ] + }, + { + // Process video with Cohere Command R model + cmd: 'npm run as -- --file "content/audio.mp3" --cohere COMMAND_R', + expectedFile: 'audio-cohere-shownotes.md', + newName: '03-cohere-command-r.md' + }, + { + // Process video with Cohere Command R Plus model + cmd: 'npm run as -- --file "content/audio.mp3" --cohere COMMAND_R_PLUS', + expectedFile: 'audio-cohere-shownotes.md', + newName: '04-cohere-command-r-plus.md' + } +] + +test('Autoshow Cohere Command Tests', async (t) => { + for (const [index, command] of commands.entries()) { + await t.test(`should run command ${index + 1} successfully`, async () => { + // Run the command + execSync(command.cmd, { stdio: 'inherit' }) + + if (Array.isArray(command.expectedFiles)) { + for (const { file, newName } of command.expectedFiles) { + const filePath = join('content', file) + strictEqual(existsSync(filePath), true, `Expected file ${file} was not created`) + const newPath = join('content', newName) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${newName}`) + } + } else { + const filePath = join('content', command.expectedFile as string) + strictEqual(existsSync(filePath), true, `Expected file ${command.expectedFile} was not created`) + const newPath = join('content', command.newName as string) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${command.newName}`) + } + }) + } +}) \ No newline at end of file diff --git a/test/models/fireworks.test.ts b/test/models/fireworks.test.ts new file mode 100644 index 0000000..8396949 --- /dev/null +++ b/test/models/fireworks.test.ts @@ -0,0 +1,79 @@ +// test/models/fireworks.test.ts + +import test from 'node:test' +import { strictEqual } from 'node:assert/strict' +import { execSync } from 'node:child_process' +import { existsSync, renameSync } from 'node:fs' +import { join } from 'node:path' + +const commands = [ + { + // Process multiple YouTube videos from URLs with title prompts using default Fireworks model + cmd: 'npm run as -- --urls "content/example-urls.md" --prompt titles --whisper tiny --fireworks', + expectedFiles: [ + { file: '2024-09-24-ep1-fsjam-podcast-fireworks-shownotes.md', newName: '01-fireworks-default.md' }, + { file: '2024-09-24-ep0-fsjam-podcast-fireworks-shownotes.md', newName: '02-fireworks-default.md' } + ] + }, + { + // Process video with LLAMA 3 1 405B model + cmd: 'npm run as -- --file "content/audio.mp3" --fireworks LLAMA_3_1_405B', + expectedFile: 'audio-fireworks-shownotes.md', + newName: '03-fireworks-llama-3-1-405b.md' + }, + { + // Process video with LLAMA 3 1 70B model + cmd: 'npm run as -- --file "content/audio.mp3" --fireworks LLAMA_3_1_70B', + expectedFile: 'audio-fireworks-shownotes.md', + newName: '04-fireworks-llama-3-1-70b.md' + }, + { + // Process video with LLAMA 3 1 8B model + cmd: 'npm run as -- --file "content/audio.mp3" --fireworks LLAMA_3_1_8B', + expectedFile: 'audio-fireworks-shownotes.md', + newName: '05-fireworks-llama-3-1-8b.md' + }, + { + // Process video with LLAMA 3 2 3B model + cmd: 'npm run as -- --file "content/audio.mp3" --fireworks LLAMA_3_2_3B', + expectedFile: 'audio-fireworks-shownotes.md', + newName: '06-fireworks-llama-3-2-3b.md' + }, + { + // Process video with LLAMA 3 2 1B model + cmd: 'npm run as -- --file "content/audio.mp3" --fireworks LLAMA_3_2_1B', + expectedFile: 'audio-fireworks-shownotes.md', + newName: '07-fireworks-llama-3-2-1b.md' + }, + { + // Process video with QWEN 2 5 72B model + cmd: 'npm run as -- --file "content/audio.mp3" --fireworks QWEN_2_5_72B', + expectedFile: 'audio-fireworks-shownotes.md', + newName: '08-fireworks-qwen-2-5-72b.md' + } +] + +test('Autoshow Fireworks Command Tests', async (t) => { + for (const [index, command] of commands.entries()) { + await t.test(`should run command ${index + 1} successfully`, async () => { + // Run the command + execSync(command.cmd, { stdio: 'inherit' }) + + if (Array.isArray(command.expectedFiles)) { + for (const { file, newName } of command.expectedFiles) { + const filePath = join('content', file) + strictEqual(existsSync(filePath), true, `Expected file ${file} was not created`) + const newPath = join('content', newName) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${newName}`) + } + } else { + const filePath = join('content', command.expectedFile as string) + strictEqual(existsSync(filePath), true, `Expected file ${command.expectedFile} was not created`) + const newPath = join('content', command.newName as string) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${command.newName}`) + } + }) + } +}) \ No newline at end of file diff --git a/test/models/gemini.test.ts b/test/models/gemini.test.ts new file mode 100644 index 0000000..db73dd0 --- /dev/null +++ b/test/models/gemini.test.ts @@ -0,0 +1,61 @@ +// test/models/gemini.test.ts + +import test from 'node:test' +import { strictEqual } from 'node:assert/strict' +import { execSync } from 'node:child_process' +import { existsSync, renameSync } from 'node:fs' +import { join } from 'node:path' + +const commands = [ + { + // Process multiple YouTube videos from URLs with title prompts using default Gemini model + cmd: 'npm run as -- --urls "content/example-urls.md" --prompt titles --whisper tiny --gemini', + expectedFiles: [ + { file: '2024-09-24-ep1-fsjam-podcast-gemini-shownotes.md', newName: '01-gemini-default.md' }, + { file: '2024-09-24-ep0-fsjam-podcast-gemini-shownotes.md', newName: '02-gemini-default.md' } + ] + }, + { + // Process video with Gemini 1.5 Flash-8B model + cmd: 'npm run as -- --file "content/audio.mp3" --gemini GEMINI_1_5_FLASH_8B', + expectedFile: 'audio-gemini-shownotes.md', + newName: '03-gemini-1-5-flash-8b.md' + }, + { + // Process video with Gemini 1.5 Flash model + cmd: 'npm run as -- --file "content/audio.mp3" --gemini GEMINI_1_5_FLASH', + expectedFile: 'audio-gemini-shownotes.md', + newName: '04-gemini-1-5-flash.md' + }, + { + // Process video with Gemini 1.5 Pro model + cmd: 'npm run as -- --file "content/audio.mp3" --gemini GEMINI_1_5_PRO', + expectedFile: 'audio-gemini-shownotes.md', + newName: '05-gemini-1-5-pro.md' + } +] + +test('Autoshow Gemini Command Tests', async (t) => { + for (const [index, command] of commands.entries()) { + await t.test(`should run command ${index + 1} successfully`, async () => { + // Run the command + execSync(command.cmd, { stdio: 'inherit' }) + + if (Array.isArray(command.expectedFiles)) { + for (const { file, newName } of command.expectedFiles) { + const filePath = join('content', file) + strictEqual(existsSync(filePath), true, `Expected file ${file} was not created`) + const newPath = join('content', newName) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${newName}`) + } + } else { + const filePath = join('content', command.expectedFile as string) + strictEqual(existsSync(filePath), true, `Expected file ${command.expectedFile} was not created`) + const newPath = join('content', command.newName as string) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${command.newName}`) + } + }) + } +}) \ No newline at end of file diff --git a/test/models/groq.test.ts b/test/models/groq.test.ts new file mode 100644 index 0000000..84ebb0f --- /dev/null +++ b/test/models/groq.test.ts @@ -0,0 +1,73 @@ +// test/models/groq.test.ts + +import test from 'node:test' +import { strictEqual } from 'node:assert/strict' +import { execSync } from 'node:child_process' +import { existsSync, renameSync } from 'node:fs' +import { join } from 'node:path' + +const commands = [ + { + // Process multiple YouTube videos from URLs with title prompts using default Groq model + cmd: 'npm run as -- --urls "content/example-urls.md" --prompt titles --whisper tiny --groq', + expectedFiles: [ + { file: '2024-09-24-ep1-fsjam-podcast-groq-shownotes.md', newName: '01-groq-default.md' }, + { file: '2024-09-24-ep0-fsjam-podcast-groq-shownotes.md', newName: '02-groq-default.md' } + ] + }, + { + // Process video with Llama 3.2 1B Preview model + cmd: 'npm run as -- --file "content/audio.mp3" --groq LLAMA_3_2_1B_PREVIEW', + expectedFile: 'audio-groq-shownotes.md', + newName: '03-groq-llama-3-2-1b-preview.md' + }, + { + // Process video with Llama 3.2 3B Preview model + cmd: 'npm run as -- --file "content/audio.mp3" --groq LLAMA_3_2_3B_PREVIEW', + expectedFile: 'audio-groq-shownotes.md', + newName: '04-groq-llama-3-2-3b-preview.md' + }, + { + // Process video with Llama 3.3 70B Versatile model + cmd: 'npm run as -- --file "content/audio.mp3" --groq LLAMA_3_3_70B_VERSATILE', + expectedFile: 'audio-groq-shownotes.md', + newName: '05-groq-llama-3-3-70b-versatile.md' + }, + { + // Process video with Llama 3.1 8B Instant model + cmd: 'npm run as -- --file "content/audio.mp3" --groq LLAMA_3_1_8B_INSTANT', + expectedFile: 'audio-groq-shownotes.md', + newName: '06-groq-llama-3-1-8b-instant.md' + }, + { + // Process video with Mixtral 8x7B Instruct model + cmd: 'npm run as -- --file "content/audio.mp3" --groq MIXTRAL_8X7B_INSTRUCT', + expectedFile: 'audio-groq-shownotes.md', + newName: '07-groq-mixtral-8x7b-instruct.md' + }, +] + +test('Autoshow Groq Command Tests', async (t) => { + for (const [index, command] of commands.entries()) { + await t.test(`should run command ${index + 1} successfully`, async () => { + // Run the command + execSync(command.cmd, { stdio: 'inherit' }) + + if (Array.isArray(command.expectedFiles)) { + for (const { file, newName } of command.expectedFiles) { + const filePath = join('content', file) + strictEqual(existsSync(filePath), true, `Expected file ${file} was not created`) + const newPath = join('content', newName) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${newName}`) + } + } else { + const filePath = join('content', command.expectedFile as string) + strictEqual(existsSync(filePath), true, `Expected file ${command.expectedFile} was not created`) + const newPath = join('content', command.newName as string) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${command.newName}`) + } + }) + } +}) \ No newline at end of file diff --git a/test/models/mistral.test.ts b/test/models/mistral.test.ts new file mode 100644 index 0000000..f9afd78 --- /dev/null +++ b/test/models/mistral.test.ts @@ -0,0 +1,91 @@ +// test/models/mistral.test.ts + +import test from 'node:test' +import { strictEqual } from 'node:assert/strict' +import { execSync } from 'node:child_process' +import { existsSync, renameSync } from 'node:fs' +import { join } from 'node:path' + +const commands = [ + { + // Process multiple YouTube videos from URLs with title prompts using default Mistral model + cmd: 'npm run as -- --urls "content/example-urls.md" --prompt titles --whisper tiny --mistral', + expectedFiles: [ + { file: '2024-09-24-ep1-fsjam-podcast-mistral-shownotes.md', newName: '01-mistral-default.md' }, + { file: '2024-09-24-ep0-fsjam-podcast-mistral-shownotes.md', newName: '02-mistral-default.md' } + ] + }, + { + // Process video with Mixtral 8x7B model + cmd: 'npm run as -- --file "content/audio.mp3" --mistral MIXTRAL_8x7B', + expectedFile: 'audio-mistral-shownotes.md', + newName: '03-mistral-mixtral-8x7b.md' + }, + { + // Process video with Mixtral 8x22B model + cmd: 'npm run as -- --file "content/audio.mp3" --mistral MIXTRAL_8x22B', + expectedFile: 'audio-mistral-shownotes.md', + newName: '04-mistral-mixtral-8x22b.md' + }, + { + // Process video with Mistral Large model + cmd: 'npm run as -- --file "content/audio.mp3" --mistral MISTRAL_LARGE', + expectedFile: 'audio-mistral-shownotes.md', + newName: '05-mistral-large.md' + }, + { + // Process video with Mistral Small model + cmd: 'npm run as -- --file "content/audio.mp3" --mistral MISTRAL_SMALL', + expectedFile: 'audio-mistral-shownotes.md', + newName: '06-mistral-small.md' + }, + { + // Process video with Ministral 8B model + cmd: 'npm run as -- --file "content/audio.mp3" --mistral MINISTRAL_8B', + expectedFile: 'audio-mistral-shownotes.md', + newName: '07-mistral-ministral-8b.md' + }, + { + // Process video with Ministral 3B model + cmd: 'npm run as -- --file "content/audio.mp3" --mistral MINISTRAL_3B', + expectedFile: 'audio-mistral-shownotes.md', + newName: '08-mistral-ministral-3b.md' + }, + { + // Process video with Mistral NeMo model + cmd: 'npm run as -- --file "content/audio.mp3" --mistral MISTRAL_NEMO', + expectedFile: 'audio-mistral-shownotes.md', + newName: '09-mistral-nemo.md' + }, + { + // Process video with Mistral 7B model + cmd: 'npm run as -- --file "content/audio.mp3" --mistral MISTRAL_7B', + expectedFile: 'audio-mistral-shownotes.md', + newName: '10-mistral-7b.md' + } +] + +test('Autoshow Mistral Command Tests', async (t) => { + for (const [index, command] of commands.entries()) { + await t.test(`should run command ${index + 1} successfully`, async () => { + // Run the command + execSync(command.cmd, { stdio: 'inherit' }) + + if (Array.isArray(command.expectedFiles)) { + for (const { file, newName } of command.expectedFiles) { + const filePath = join('content', file) + strictEqual(existsSync(filePath), true, `Expected file ${file} was not created`) + const newPath = join('content', newName) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${newName}`) + } + } else { + const filePath = join('content', command.expectedFile as string) + strictEqual(existsSync(filePath), true, `Expected file ${command.expectedFile} was not created`) + const newPath = join('content', command.newName as string) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${command.newName}`) + } + }) + } +}) \ No newline at end of file diff --git a/test/models/together.test.ts b/test/models/together.test.ts new file mode 100644 index 0000000..26b1202 --- /dev/null +++ b/test/models/together.test.ts @@ -0,0 +1,91 @@ +// test/models/together.test.ts + +import test from 'node:test' +import { strictEqual } from 'node:assert/strict' +import { execSync } from 'node:child_process' +import { existsSync, renameSync } from 'node:fs' +import { join } from 'node:path' + +const commands = [ + { + // Process multiple YouTube videos from URLs with title prompts using default Together model + cmd: 'npm run as -- --urls "content/example-urls.md" --prompt titles --whisper tiny --together', + expectedFiles: [ + { file: '2024-09-24-ep1-fsjam-podcast-together-shownotes.md', newName: '01-together-default.md' }, + { file: '2024-09-24-ep0-fsjam-podcast-together-shownotes.md', newName: '02-together-default.md' } + ] + }, + { + // Process video with LLAMA 3 2 3B model + cmd: 'npm run as -- --file "content/audio.mp3" --together LLAMA_3_2_3B', + expectedFile: 'audio-together-shownotes.md', + newName: '03-together-llama-3-2-3b.md' + }, + { + // Process video with LLAMA 3 1 405B model + cmd: 'npm run as -- --file "content/audio.mp3" --together LLAMA_3_1_405B', + expectedFile: 'audio-together-shownotes.md', + newName: '04-together-llama-3-1-405b.md' + }, + { + // Process video with LLAMA 3 1 70B model + cmd: 'npm run as -- --file "content/audio.mp3" --together LLAMA_3_1_70B', + expectedFile: 'audio-together-shownotes.md', + newName: '05-together-llama-3-1-70b.md' + }, + { + // Process video with LLAMA 3 1 8B model + cmd: 'npm run as -- --file "content/audio.mp3" --together LLAMA_3_1_8B', + expectedFile: 'audio-together-shownotes.md', + newName: '06-together-llama-3-1-8b.md' + }, + { + // Process video with Gemma 2 27B model + cmd: 'npm run as -- --file "content/audio.mp3" --together GEMMA_2_27B', + expectedFile: 'audio-together-shownotes.md', + newName: '07-together-gemma-2-27b.md' + }, + { + // Process video with Gemma 2 9B model + cmd: 'npm run as -- --file "content/audio.mp3" --together GEMMA_2_9B', + expectedFile: 'audio-together-shownotes.md', + newName: '08-together-gemma-2-9b.md' + }, + { + // Process video with QWEN 2 5 72B model + cmd: 'npm run as -- --file "content/audio.mp3" --together QWEN_2_5_72B', + expectedFile: 'audio-together-shownotes.md', + newName: '09-together-qwen-2-5-72b.md' + }, + { + // Process video with QWEN 2 5 7B model + cmd: 'npm run as -- --file "content/audio.mp3" --together QWEN_2_5_7B', + expectedFile: 'audio-together-shownotes.md', + newName: '11-together-qwen-2-5-7b.md' + } +] + +test('Autoshow Together Command Tests', async (t) => { + for (const [index, command] of commands.entries()) { + await t.test(`should run command ${index + 1} successfully`, async () => { + // Run the command + execSync(command.cmd, { stdio: 'inherit' }) + + if (Array.isArray(command.expectedFiles)) { + for (const { file, newName } of command.expectedFiles) { + const filePath = join('content', file) + strictEqual(existsSync(filePath), true, `Expected file ${file} was not created`) + const newPath = join('content', newName) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${newName}`) + } + } else { + const filePath = join('content', command.expectedFile as string) + strictEqual(existsSync(filePath), true, `Expected file ${command.expectedFile} was not created`) + const newPath = join('content', command.newName as string) + renameSync(filePath, newPath) + strictEqual(existsSync(newPath), true, `File was not renamed to ${command.newName}`) + } + }) + } +}) \ No newline at end of file