Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update to add support for arguments to kernel functions #18

Draft
wants to merge 5 commits into
base: main
Choose a base branch
from
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion samples/azure/package.json
Original file line number Diff line number Diff line change
@@ -3,7 +3,8 @@
"version": "1.0.0",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
"clean": "rm -rf dist",
"build": "rm -rf dist && tsc -p ./"
},
"keywords": [],
"author": "",
89 changes: 89 additions & 0 deletions samples/azure/src/step1.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import { AzureOpenAIChatCompletionService } from '@semantic-kernel/azure-openai';
import { ChatMessageContent, kernel, KernelArguments, PromptExecutionSettings, StreamingTextContent } from "semantic-kernel";

const sk = kernel().addService(
new AzureOpenAIChatCompletionService({
deploymentName: '<OpenAI model name>',
endpoint: '<Azure OpenAI endpoint>',
apiVersion: '<OpenAPI version>'
})
);

// Example 1 - Invoke the kernel with a prompt and display the result
async function simpleInvokePrompt(): Promise<string | undefined> {
const result = await sk.invokePrompt({ promptTemplate: 'What color is the sky' });
return (result?.value as ChatMessageContent)?.items[0]?.toString() ?? undefined;
}

// Example 2 - Invoke the kernel with a templated prompt and display the result
async function templateInvokePrompt(): Promise<string | undefined> {
const args: KernelArguments = new KernelArguments({
arguments: { topic: "sea" },
executionSettings: undefined
});

const result = await sk.invokePrompt({ promptTemplate: 'What color is the {{topic}}', kernelArguments: args});
return (result?.value as ChatMessageContent)?.items[0]?.toString() ?? undefined;
}

// Example 3 - Invoke the kernel with a templated prompt and stream the results
async function streamInvokePrompt(): Promise<void> {
const args: KernelArguments = new KernelArguments({
arguments: { topic: "sea" },
executionSettings: undefined
});
async function streamPrompt() {
const stream = sk.invokeStreamingPrompt({ promptTemplate: 'What color is the {{topic}}? Provide a detailed explanation.', kernelArguments: args });

for await (const result of stream) {
const res = (result as ChatMessageContent)?.items[0];
if (res instanceof StreamingTextContent) {
process.stdout.write((res as StreamingTextContent)?.text as string);
}
}
}
await streamPrompt();
console.log();
}

// Example 4 - Invoke the kernel with a templated prompt and custom execution settings
async function customExecutionSettingsInvokePrompt(): Promise<string | undefined> {
const promptSettings = {
"maxTokens": 500,
"temperature": 0.5,
};
const args: KernelArguments = new KernelArguments({
arguments: { topic: "dogs" },
executionSettings: promptSettings as PromptExecutionSettings,
});
const result = await sk.invokePrompt({ promptTemplate: 'Tell me a story about {{topic}}', kernelArguments: args});
return (result?.value as ChatMessageContent)?.items[0]?.toString() ?? undefined;
}

// Example 5 - Invoke the kernel with a templated prompt and custom execution settings that return a JSON response
async function invokePromptWithJsonResponse(): Promise<string | undefined> {
const promptSettings = {
"reponseFormat": "json_object",
};
const args: KernelArguments = new KernelArguments({
arguments: { topic: "chocolate" },
executionSettings: promptSettings as PromptExecutionSettings,
});
const result = await sk.invokePrompt({ promptTemplate: 'Create a recipe for a {{topic}} cake in JSON format', kernelArguments: args});
return (result?.value as ChatMessageContent)?.items[0]?.toString() ?? undefined;
}

async function runAllExamples() {
console.log('Example 1:');
console.log(await simpleInvokePrompt());
console.log('Example 2:');
console.log(await templateInvokePrompt());
console.log('Example 3:');
await streamInvokePrompt();
console.log('Example 4:');
console.log(await customExecutionSettingsInvokePrompt());
console.log('Example 5:');
console.log(await invokePromptWithJsonResponse());
}

runAllExamples().then(() => {});
6 changes: 3 additions & 3 deletions src/abstractions/package.json
Original file line number Diff line number Diff line change
@@ -27,12 +27,12 @@
},
"devDependencies": {
"@eslint/js": "^9.9.0",
"@semantic-kernel/tsconfig": "*",
"@types/eslint__js": "^8.42.3",
"eslint": "^9.9.0",
"typescript-eslint": "^8.2.0",
"@semantic-kernel/tsconfig": "*",
"tsup": "^8.2.4",
"typescript": "^5.5.4"
"typescript": "^5.5.4",
"typescript-eslint": "^8.2.0"
},
"dependencies": {
"json-schema-to-ts": "^3.1.1"
21 changes: 9 additions & 12 deletions src/abstractions/src/functions/KernelFunctionFromPrompt.ts
Original file line number Diff line number Diff line change
@@ -2,12 +2,8 @@ import { ChatCompletionService, PromptExecutionSettings } from '../AI';
import { Kernel } from '../Kernel';
import { ChatMessageContent } from '../contents';
import { type FromSchema } from '../jsonSchema';
import {
PassThroughPromptTemplate,
PromptTemplate,
PromptTemplateConfig,
PromptTemplateFormat,
} from '../promptTemplate';
import { PromptTemplate, PromptTemplateConfig, PromptTemplateFormat } from '../promptTemplate';
import { KernelPromptTemplate } from '../promptTemplate/KernelPromptTemplate';
import { AIService } from '../services';
import { KernelArguments } from './KernelArguments';
import { KernelFunction } from './KernelFunction';
@@ -77,7 +73,7 @@ export class KernelFunctionFromPrompt extends KernelFunction<
});
}

override invokeCore = async (kernel: Kernel, args: KernelArguments<PromptType>) => {
override invokeCore = async (kernel: Kernel, args: KernelArguments<PromptType, Record<string, unknown>>) => {
const { renderedPrompt, AIService, executionSettings } = await this.renderPrompt(kernel, args);

if (AIService.serviceType === 'ChatCompletion') {
@@ -110,7 +106,7 @@ export class KernelFunctionFromPrompt extends KernelFunction<
throw new Error(`Unsupported AI service type: ${AIService.serviceType}`);
};

override async *invokeStreamingCore<T>(kernel: Kernel, args: KernelArguments<PromptType>): AsyncGenerator<T> {
override async *invokeStreamingCore<T>(kernel: Kernel, args: KernelArguments<PromptType, Record<string, unknown>>): AsyncGenerator<T> {
const { renderedPrompt, AIService, executionSettings } = await this.renderPrompt(kernel, args);

if (AIService.serviceType === 'ChatCompletion') {
@@ -130,17 +126,18 @@ export class KernelFunctionFromPrompt extends KernelFunction<
throw new Error(`Unsupported AI service type: ${AIService.serviceType}`);
}

private getPromptTemplate = (): PromptTemplate => {
private getPromptTemplate = (args: KernelArguments<PromptType, Record<string, unknown>>): PromptTemplate => {
switch (this.promptTemplateConfig.templateFormat) {
case 'passthrough':
return new PassThroughPromptTemplate(this.promptTemplateConfig.template);
// return new PassThroughPromptTemplate(this.promptTemplateConfig.template);
return new KernelPromptTemplate(this.promptTemplateConfig.template, args);
default:
throw new Error(`${this.promptTemplateConfig.templateFormat} template rendering not implemented`);
}
};

private async renderPrompt(kernel: Kernel, args: KernelArguments<PromptType>): Promise<PromptRenderingResult> {
const promptTemplate = this.getPromptTemplate();
private async renderPrompt(kernel: Kernel, args: KernelArguments<PromptType, Record<string, unknown>>): Promise<PromptRenderingResult> {
const promptTemplate = this.getPromptTemplate(args);

const { service, executionSettings } =
kernel.services.trySelectAIService({
14 changes: 14 additions & 0 deletions src/abstractions/src/promptTemplate/KernelPromptTemplate.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import { KernelArguments } from '../functions';
import { PromptTemplate } from './PromptTemplate';
import { handlebarsPromptTemplate } from './handlebarsPromptTemplate';

export class KernelPromptTemplate implements PromptTemplate {
constructor(
private readonly template: string,
private readonly args: KernelArguments
) {}

render() {
return handlebarsPromptTemplate(this.template).render(undefined, this.args);
}
}
2 changes: 1 addition & 1 deletion src/abstractions/src/promptTemplate/PromptTemplate.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { Kernel } from '../Kernel';

export interface PromptTemplate {
render<Props>(kernel: Kernel, Props: Props): string | Promise<string>;
render<Props>(kernel: Kernel | undefined, Props: Props): string | Promise<string>;
}
14 changes: 14 additions & 0 deletions src/abstractions/src/promptTemplate/handlebarsPromptTemplate.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import { KernelArguments } from '../functions';
import { PromptTemplate } from '@semantic-kernel/abstractions/src/promptTemplate/promptTemplate';
import Handlebars from 'handlebars';

export const handlebarsPromptTemplate = (template: string): PromptTemplate => {
return {
render: async (_, props) => {
const compiledTemplate = Handlebars.compile(template);
// TODO: add Kernel plugins as helpers

return compiledTemplate((props as KernelArguments).arguments);
},
};
};