Skip to content

Commit

Permalink
✨ Introduce "none" LLM provider for execution control (#1163)
Browse files Browse the repository at this point in the history
Added a new "none" model to block LLM execution with error logging.
  • Loading branch information
pelikhan authored Feb 21, 2025
1 parent 4cb807a commit 9f9e97e
Show file tree
Hide file tree
Showing 8 changed files with 41 additions and 4 deletions.
14 changes: 13 additions & 1 deletion docs/src/content/docs/getting-started/configuration.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -1590,7 +1590,7 @@ You can also override the `transcription` model alias to change the default mode

## Echo

This is a dry run LLM provider that returns the messages without calling any LLM.
The `echo` provider is a dry run LLM provider that returns the messages without calling any LLM.
It is most useful for debugging when you want to see the result LLM request without sending it.

```js 'model: "echo"'
Expand All @@ -1599,6 +1599,18 @@ script({
})
```

Echo replies with the chat messages as markdown and JSON, which can be helpful for debugging.

## None

The `none` provider prevents the execution of LLM. It is typically used on a top-level script that exclusively uses inline prompts.

```js 'model: "none"'
script({
model: "none",
})
```

## Model specific environment variables

You can provide different environment variables
Expand Down
5 changes: 3 additions & 2 deletions packages/core/src/connection.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ import {
MODEL_PROVIDER_WHISPERASR,
WHISPERASR_API_BASE,
MODEL_PROVIDER_ECHO,
MODEL_PROVIDER_NONE,
} from "./constants"
import { host, runtimeHost } from "./host"
import { parseModelIdentifier } from "./models"
Expand Down Expand Up @@ -555,12 +556,12 @@ export async function parseTokenFromEnv(
}
}

if (provider === MODEL_PROVIDER_ECHO) {
if (provider === MODEL_PROVIDER_ECHO || provider === MODEL_PROVIDER_NONE) {
return {
provider,
model,
base: undefined,
token: "echo",
token: provider,
}
}

Expand Down
1 change: 1 addition & 0 deletions packages/core/src/constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,7 @@ export const MODEL_PROVIDER_JAN = "jan"
export const MODEL_PROVIDER_DEEPSEEK = "deepseek"
export const MODEL_PROVIDER_WHISPERASR = "whisperasr"
export const MODEL_PROVIDER_ECHO = "echo"
export const MODEL_PROVIDER_NONE = "none"

export const MODEL_PROVIDER_OPENAI_HOSTS = Object.freeze([
MODEL_PROVIDER_OPENAI,
Expand Down
1 change: 0 additions & 1 deletion packages/core/src/echomodel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ import { LanguageModel } from "./chat"
import { renderMessagesToMarkdown } from "./chatrender"
import { deleteEmptyValues } from "./cleaners"
import { MODEL_PROVIDER_ECHO } from "./constants"
import { logVerbose } from "./util"

export const EchoModel = Object.freeze<LanguageModel>({
id: MODEL_PROVIDER_ECHO,
Expand Down
7 changes: 7 additions & 0 deletions packages/core/src/llms.json
Original file line number Diff line number Diff line change
Expand Up @@ -440,6 +440,13 @@
"detail": "A fake LLM provider that responds with the input messages.",
"tools": true,
"tokenless": true
},
{
"id": "none",
"tools": true,
"tokenless": true,
"hidden": true,
"detail": "A LLM provider that stops the execution. Used on top level script to prevent LLM execution."
}
],
"aliases": {
Expand Down
3 changes: 3 additions & 0 deletions packages/core/src/lm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import {
MODEL_PROVIDER_WHISPERASR,
MODEL_PROVIDER_AZURE_OPENAI,
MODEL_PROVIDER_ECHO,
MODEL_PROVIDER_NONE,
} from "./constants"
import { runtimeHost } from "./host"
import { OllamaModel } from "./ollama"
Expand All @@ -24,6 +25,7 @@ import { LMStudioModel } from "./lmstudio"
import { WhiserAsrModel } from "./whisperasr"
import { AzureOpenAIModel } from "./azureopenai"
import { EchoModel } from "./echomodel"
import { NoneModel } from "./nonemodel"

export function resolveLanguageModel(provider: string): LanguageModel {
if (provider === MODEL_PROVIDER_GITHUB_COPILOT_CHAT) {
Expand All @@ -42,6 +44,7 @@ export function resolveLanguageModel(provider: string): LanguageModel {
if (provider === MODEL_PROVIDER_LMSTUDIO) return LMStudioModel
if (provider === MODEL_PROVIDER_WHISPERASR) return WhiserAsrModel
if (provider === MODEL_PROVIDER_ECHO) return EchoModel
if (provider === MODEL_PROVIDER_NONE) return NoneModel

const features = MODEL_PROVIDERS.find((p) => p.id === provider)
return LocalOpenAICompatibleModel(provider, {
Expand Down
13 changes: 13 additions & 0 deletions packages/core/src/nonemodel.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import { LanguageModel } from "./chat"
import { MODEL_PROVIDER_NONE } from "./constants"
import { serializeError } from "./error"

export const NoneModel = Object.freeze<LanguageModel>({
id: MODEL_PROVIDER_NONE,
completer: async (req, connection, options) => {
return {
finishReason: "fail",
error: serializeError("No LLM execution allowed in this context."),
}
},
})
1 change: 1 addition & 0 deletions packages/core/src/types/prompt_template.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,7 @@ type ModelType = OptionsOrString<
| "transformers:onnx-community/Qwen2.5-0.5B-Instruct:q4"
| "transformers:HuggingFaceTB/SmolLM2-1.7B-Instruct:q4f16"
| "echo"
| "none"
>

type ModelSmallType = OptionsOrString<
Expand Down

0 comments on commit 9f9e97e

Please sign in to comment.