diff --git a/requirements.txt b/requirements.txt index b0955f0..ec07850 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,3 @@ -chainlit==1.0.200 \ No newline at end of file +chainlit==1.0.200 +langchain==0.1.5 +openai==1.10.0 diff --git a/src/demo.py b/src/demo.py index 306ff5e..8a43ce8 100644 --- a/src/demo.py +++ b/src/demo.py @@ -1,24 +1,37 @@ import chainlit as cl +from langchain.prompts import ChatPromptTemplate +from langchain.schema import StrOutputParser +from langchain.schema.runnable import Runnable +from langchain.schema.runnable.config import RunnableConfig +from langchain_community.chat_models import ChatOpenAI -@cl.step -def tool(): - return "Response from the tool!" +@cl.on_chat_start +async def on_chat_start(): + model = ChatOpenAI(streaming=True) + prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + "あなたは優秀はQAシステムです。ユーザーからの質問に対して、Step by Stepで思考して適切な回答を返してください。", + ), + ("human", "{question}"), + ] + ) + runnable = prompt | model | StrOutputParser() + cl.user_session.set("runnable", runnable) -@cl.on_message # this function will be called every time a user inputs a message in the UI -async def main(message: cl.Message): - """ - This function is called every time a user inputs a message in the UI. - It sends back an intermediate response from the tool, followed by the final answer. - Args: - message: The user's message. - Returns: - None. - """ +@cl.on_message +async def on_message(message: cl.Message): + runnable = cl.user_session.get("runnable") # type: Runnable - # Call the tool - tool() + msg = cl.Message(content="") - # Send the final answer. - await cl.Message(content="This is the final answer").send() \ No newline at end of file + async for chunk in runnable.astream( + {"question": message.content}, + config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]), + ): + await msg.stream_token(chunk) + + await msg.send() \ No newline at end of file