From 11d8ce2f3a2e67e3d5a5c6c1aaa3c477557c7473 Mon Sep 17 00:00:00 2001 From: dingyi Date: Tue, 11 Feb 2025 12:38:27 +0800 Subject: [PATCH] feat(core): add rawOnCensor option and improve chat interface handling - Add rawOnCensor option to Config interface and schema - Enhance ChatInterface to handle post-processing and message storage - Update locales for new rawOnCensor option --- packages/core/src/config.ts | 2 ++ packages/core/src/llm-core/chat/app.ts | 36 ++++++++++++++-------- packages/core/src/locales/en-US.schema.yml | 1 + packages/core/src/locales/zh-CN.schema.yml | 1 + 4 files changed, 28 insertions(+), 12 deletions(-) diff --git a/packages/core/src/config.ts b/packages/core/src/config.ts index 149eece6..d189f3aa 100644 --- a/packages/core/src/config.ts +++ b/packages/core/src/config.ts @@ -26,6 +26,7 @@ export interface Config { autoDelete: boolean autoDeleteTimeout: number messageDelay: number + rawOnCensor: boolean autoUpdateRoomMode: 'disable' | 'all' | 'manual' privateChatWithoutCommand: boolean @@ -87,6 +88,7 @@ export const Config: Schema = Schema.intersect([ outputMode: Schema.dynamic('output-mode').default('text'), splitMessage: Schema.boolean().default(false), censor: Schema.boolean().default(false), + rawOnCensor: Schema.boolean().default(false), streamResponse: Schema.boolean().default(false) }), diff --git a/packages/core/src/llm-core/chat/app.ts b/packages/core/src/llm-core/chat/app.ts index ba852d2b..f4dc880b 100644 --- a/packages/core/src/llm-core/chat/app.ts +++ b/packages/core/src/llm-core/chat/app.ts @@ -134,26 +134,38 @@ export class ChatInterface { arg: ChatLunaLLMCallArg, wrapper: ChatLunaLLMChainWrapper ): Promise { - const response = (await wrapper.call(arg)) as { - message: AIMessage - } & ChainValues + const response = ( + (await wrapper.call(arg)) as { + message: AIMessage + } & ChainValues + ).message + + const displayRespose = new AIMessage(response) + this._chatCount++ // Handle post-processing if needed if (arg.postHandler) { - const handlerResult = await this.handlePostProcessing(arg, response) - response.message.content = handlerResult.displayContent + const handlerResult = await this.handlePostProcessing( + arg, + displayRespose + ) + displayRespose.content = handlerResult.displayContent await this._chatHistory.overrideAdditionalArgs( handlerResult.variables ) } - const messageContent = getMessageContent(response.message.content) + const messageContent = getMessageContent(displayRespose.content) // Update chat history if (messageContent.trim().length > 0) { await this.chatHistory.addMessage(arg.message) - await this.chatHistory.addMessage(response.message) + let saveMessage = response + if (!this.ctx.chatluna.config.rawOnCensor) { + saveMessage = displayRespose + } + await this.chatHistory.addMessage(saveMessage) } // Process response @@ -161,24 +173,24 @@ export class ChatInterface { 'chatluna/after-chat', arg.conversationId, arg.message, - response.message as AIMessage, + displayRespose as AIMessage, { ...arg.variables, chatCount: this._chatCount }, this, wrapper ) - return response + return { message: displayRespose } } private async handlePostProcessing( arg: ChatLunaLLMCallArg, - response: { message: AIMessage } & ChainValues + message: AIMessage ): Promise { - logger.debug(`original content: %c`, response.message.content) + logger.debug(`original content: %c`, message.content) return await arg.postHandler.handler( arg.session, - getMessageContent(response.message.content) + getMessageContent(message.content) ) } diff --git a/packages/core/src/locales/en-US.schema.yml b/packages/core/src/locales/en-US.schema.yml index dc5147d8..da93b70d 100644 --- a/packages/core/src/locales/en-US.schema.yml +++ b/packages/core/src/locales/en-US.schema.yml @@ -27,6 +27,7 @@ $inner: $desc: Select message reply rendering mode. splitMessage: 'Enable message splitting. Splits replies into multiple messages for natural conversation flow. Note: Incompatible with quoted messages, raw mode, or image mode. Stream response enables finer-grained splitting.' censor: Enable text moderation (requires censor service). + rawOnCensor: Whether to send the raw message to the model when the Post Handler is triggered. streamResponse: 'Enable stream response. Initiates message sending during reply generation. Note: Disables rendering output mode and incompatible with plugin mode.' - $desc: Blacklist Management diff --git a/packages/core/src/locales/zh-CN.schema.yml b/packages/core/src/locales/zh-CN.schema.yml index f5c9eadc..77a2c1aa 100644 --- a/packages/core/src/locales/zh-CN.schema.yml +++ b/packages/core/src/locales/zh-CN.schema.yml @@ -27,6 +27,7 @@ $inner: $desc: 选择消息回复的渲染输出模式。 splitMessage: 是否启用消息分割发送。启用后,回复会被分割成多条消息发送,使其看起来更像普通用户的对话。注意:此选项不支持引用消息、原始模式和图片模式。在启用流式响应时,会进行更细化的消息分割。 censor: 是否启用文本审核服务(需要安装 censor 服务)。 + rawOnCensor: 是否在 Post Handler 被触发时,将原始消息发送给模型。 streamResponse: 是否启用流式响应。启用后,bot 会在生成回复的过程中就开始发送消息,而不是等待完全生成后再发送。注意:启用此选项会导致渲染输出模式选项失效,且不支持插件模式。 - $desc: 黑名单选项