Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

UI-for-examples #228

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions examples/example-with-ui/context/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
.venv
.env
.mypy_cache
.chainlite

1 change: 1 addition & 0 deletions examples/example-with-ui/context/.python-version
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
3.11
21 changes: 21 additions & 0 deletions examples/example-with-ui/context/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Context
Check out the OpenAI Agents Python context documentation:
https://openai.github.io/openai-agents-python/context/

Context in OpenAI Agents Python allows you to provide additional information to the assistant during conversations. This can include relevant documentation, code snippets, or any other text that helps the assistant better understand the task at hand. Context is passed as a list of strings and can be updated dynamically as the conversation progresses.

`uv add openai-agents python-dotenv chainlit`

Give these commands to run the project:

cd chatbot

uv venv

On Mac:

source .venv/bin/activate

Using uv to run the project

uv run chainlit run app.py -w
137 changes: 137 additions & 0 deletions examples/example-with-ui/context/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
import os
from dotenv import load_dotenv
from typing import cast, List
import chainlit as cl
from agents import Agent, Runner, AsyncOpenAI, OpenAIChatCompletionsModel
from agents.run import RunConfig
from agents.tool import function_tool
from agents.run_context import RunContextWrapper

# Load the environment variables from the .env file
load_dotenv()

gemini_api_key = os.getenv("GEMINI_API_KEY")

# Check if the API key is present; if not, raise an error
if not gemini_api_key:
raise ValueError(
"GEMINI_API_KEY is not set. Please ensure it is defined in your .env file."
)


@cl.set_starters # type: ignore
async def set_starts() -> List[cl.Starter]:
return [
cl.Starter(
label="Greetings",
message="Hello! What can you help me with today?",
),
cl.Starter(
label="Weather",
message="Find the weather in Karachi.",
),
]


class MyContext:
def __init__(self, user_id: str):
self.user_id = user_id
self.seen_messages = []


@function_tool
@cl.step(type="weather tool")
def get_weather(location: str, unit: str = "C") -> str:
"""
Fetch the weather for a given location, returning a short description.
"""
# Example logic
return f"The weather in {location} is 22 degrees {unit}."


@function_tool
@cl.step(type="greeting tool")
def greet_user(context: RunContextWrapper[MyContext], greeting: str) -> str:
user_id = context.context.user_id
return f"Hello {user_id}, you said: {greeting}"


@cl.on_chat_start
async def start():
# Reference: https://ai.google.dev/gemini-api/docs/openai
external_client = AsyncOpenAI(
api_key=gemini_api_key,
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
)

model = OpenAIChatCompletionsModel(
model="gemini-2.0-flash", openai_client=external_client
)

config = RunConfig(
model=model, model_provider=external_client, tracing_disabled=True
)
"""Set up the chat session when a user connects."""
# Initialize an empty chat history in the session.
cl.user_session.set("chat_history", [])

cl.user_session.set("config", config)
agent: Agent = Agent(
name="Assistant",
tools=[greet_user, get_weather],
instructions="You are a helpful assistant. Call greet_user tool to greet the user. Always greet the user when session starts.",
model=model,
)

cl.user_session.set("agent", agent)

await cl.Message(
content="Welcome to the Ajmal Khan AI Assistant! How can I help you today?"
).send()


@cl.on_message
async def main(message: cl.Message):
"""Process incoming messages and generate responses."""
# Send a thinking message
msg = cl.Message(content="Thinking...")
await msg.send()

agent: Agent = cast(Agent, cl.user_session.get("agent"))
config: RunConfig = cast(RunConfig, cl.user_session.get("config"))

# Retrieve the chat history from the session.
history = cl.user_session.get("chat_history") or []

# Append the user's message to the history.
history.append({"role": "user", "content": message.content})

my_ctx = MyContext(user_id="Zia")

try:
print("\n[CALLING_AGENT_WITH_CONTEXT]\n", history, "\n")
result = Runner.run_sync(agent, history, run_config=config, context=my_ctx)

response_content = result.final_output

# Update the thinking message with the actual response
msg.content = response_content
await msg.update()

# Append the assistant's response to the history.
history.append({"role": "developer", "content": response_content})
# NOTE: Here we are appending the response to the history as a developer message.
# This is a BUG in the agents library.
# The expected behavior is to append the response to the history as an assistant message.

# Update the session with the new history.
cl.user_session.set("chat_history", history)

# Optional: Log the interaction
print(f"User: {message.content}")
print(f"Assistant: {response_content}")

except Exception as e:
msg.content = f"Error: {str(e)}"
await msg.update()
print(f"Error: {str(e)}")
21 changes: 21 additions & 0 deletions examples/example-with-ui/context/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
[project]
name = "context"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
authors = [
{ name = "Ajmal Khan", email = "[email protected]" }
]
requires-python = ">=3.11"
dependencies = [
"chainlit>=2.4.1",
"openai-agents>=0.0.4",
"python-dotenv>=1.0.1",
]

[project.scripts]
context = "context:main"

[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
Loading