Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reformat FastAPI GelAI tutorial to shorten lines to sub-79 #8381

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 67 additions & 28 deletions docs/intro/tutorials/ai_fastapi_searchbot.rst
Original file line number Diff line number Diff line change
Expand Up @@ -245,28 +245,35 @@ to it by implementing web search capabilities.
# then pad it with spaces so that it's offset appropriately for its depth

if comment["text"]:
timestamp = datetime.fromisoformat(comment["created_at"].replace("Z", "+00:00"))
timestamp = datetime.fromisoformat(
comment["created_at"].replace("Z", "+00:00")
)
author = comment["author"]
text = html.unescape(comment["text"])
formatted_comment = f"[{timestamp.strftime('%Y-%m-%d %H:%M')}] {author}: {text}"
formatted_comment = (
f"[{timestamp.strftime('%Y-%m-%d %H:%M')}] {author}: {text}"
)
results.append((" " * current_depth) + formatted_comment)

# If there're children comments, we are going to extract them too,
# and add them to the list.

if comment.get("children"):
for child in comment["children"][:max_children]:
child_comments = extract_comment_thread(child, max_depth, current_depth + 1)
child_comments = extract_comment_thread(
child, max_depth, current_depth + 1
)
results.extend(child_comments)

return results


def fetch_web_sources(query: str, limit: int = 5) -> list[WebSource]:
"""
For a given query perform a full-text search for stories on Hacker News.
From each of the matched stories extract the comment thread and format it into a single string.
For each story return its title, url and comment thread.
For a given query perform a full-text search for stories on Hacker
News. From each of the matched stories extract the comment thread and
format it into a single string. For each story return its title, url
and comment thread.
"""
search_url = "http://hn.algolia.com/api/v1/search_by_date?numericFilters=num_comments>0"

Expand Down Expand Up @@ -296,9 +303,7 @@ to it by implementing web search capabilities.
title = hit["title"]
comments = extract_comment_thread(item_result)
text = "\n".join(comments) if len(comments) > 0 else None
web_sources.append(
WebSource(url=site_url, title=title, text=text)
)
web_sources.append(WebSource(url=site_url, title=title, text=text))

return web_sources

Expand All @@ -312,6 +317,7 @@ to it by implementing web search capabilities.
print(source.text)



.. edb:split-section::

One more note: this snippet comes with an extra dependency called ``requests``,
Expand Down Expand Up @@ -394,10 +400,15 @@ those results to the LLM to get a nice-looking summary.
_ = load_dotenv()


def get_llm_completion(system_prompt: str, messages: list[dict[str, str]]) -> str:
def get_llm_completion(
system_prompt: str, messages: list[dict[str, str]]
) -> str:
api_key = os.getenv("OPENAI_API_KEY")
url = "https://api.openai.com/v1/chat/completions"
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}

response = requests.post(
url,
Expand All @@ -415,6 +426,7 @@ those results to the LLM to get a nice-looking summary.
return result["choices"][0]["message"]["content"]



.. edb:split-section::

Note that this cloud LLM API (and many others) requires a secret key to be
Expand Down Expand Up @@ -456,8 +468,9 @@ those results to the LLM to get a nice-looking summary.
system_prompt = (
"You are a helpful assistant that answers user's questions"
+ " by finding relevant information in Hacker News threads."
+ " When answering the question, describe conversations that people have around the subject,"
+ " provided to you as a context, or say i don't know if they are completely irrelevant."
+ " When answering the question, describe conversations that people"
+ " have around the subject, provided to you as a context, or say"
+ " i don't know if they are completely irrelevant."
)

prompt = f"User search query: {query}\n\nWeb search results:\n"
Expand Down Expand Up @@ -817,7 +830,11 @@ basics before proceeding.
:caption: app/main.py

from edgedb import create_async_client
from .queries.get_users_async_edgeql import get_users as get_users_query, GetUsersResult
from .queries.get_users_async_edgeql import (
get_users as get_users_query,
GetUsersResult,
)



gel_client = create_async_client()
Expand Down Expand Up @@ -1046,7 +1063,10 @@ basics before proceeding.
:caption: app/main.py
:class: collapsible

from .queries.get_chats_async_edgeql import get_chats as get_chats_query, GetChatsResult
from .queries.get_chats_async_edgeql import (
get_chats as get_chats_query,
GetChatsResult,
)
from .queries.get_chat_by_id_async_edgeql import (
get_chat_by_id as get_chat_by_id_query,
GetChatByIdResult,
Expand Down Expand Up @@ -1076,7 +1096,9 @@ basics before proceeding.
if not chat:
raise HTTPException(
HTTPStatus.NOT_FOUND,
detail={"error": f"Chat {chat_id} for user {username} does not exist."},
detail={
"error": f"Chat {chat_id} for user {username} does not exist."
},
)
return chat
else:
Expand All @@ -1093,7 +1115,9 @@ basics before proceeding.
username: str = Query(), chat_id: str = Query()
) -> list[GetMessagesResult]:
"""Fetch all messages from a chat"""
return await get_messages_query(gel_client, username=username, chat_id=chat_id)
return await get_messages_query(
gel_client, username=username, chat_id=chat_id
)


.. edb:split-section::
Expand Down Expand Up @@ -1168,8 +1192,9 @@ basics before proceeding.
system_prompt = (
"You are a helpful assistant that answers user's questions"
+ " by finding relevant information in HackerNews threads."
+ " When answering the question, describe conversations that people have around the subject,"
+ " provided to you as a context, or say i don't know if they are completely irrelevant."
+ " When answering the question, describe conversations that"
+ " people have around the subject, provided to you as a context,"
+ " or say i don't know if they are completely irrelevant."
)

prompt = f"User search query: {query}\n\nWeb search results:\n"
Expand All @@ -1180,7 +1205,8 @@ basics before proceeding.

- messages = [{"role": "user", "content": prompt}]
+ messages = [
+ {"role": message.role, "content": message.body} for message in chat_history
+ {"role": message.role, "content": message.body}
+ for message in chat_history
+ ]
+ messages.append({"role": "user", "content": prompt})

Expand Down Expand Up @@ -1305,9 +1331,11 @@ working on our query rather than rewriting it from scratch every time.
prompt = f"Chat history: {formatted_history}\n\nUser message: {query} \n\n"

llm_response = get_llm_completion(
system_prompt=system_prompt, messages=[{"role": "user", "content": prompt}]
system_prompt=system_prompt,
messages=[{"role": "user", "content": prompt}],
)


return llm_response


Expand Down Expand Up @@ -1349,7 +1377,9 @@ working on our query rather than rewriting it from scratch every time.

# 3. Generate a query and perform googling
- search_query = search_terms.query
+ search_query = await generate_search_query(search_terms.query, chat_history)
+ search_query = await generate_search_query(
+ search_terms.query, chat_history
+ )
+ web_sources = await search_web(search_query)


Expand All @@ -1360,7 +1390,8 @@ working on our query rather than rewriting it from scratch every time.
web_sources,
)
+ search_result.search_query = search_query # add search query to the output
+ # to see what the bot is searching for
+ # to see what the bot is
+ # searching for
# 6. Add LLM response to Gel
_ = await add_message_query(
gel_client,
Expand Down Expand Up @@ -1577,11 +1608,15 @@ schema.
)

# 3. Generate a query and perform googling
search_query = await generate_search_query(search_terms.query, chat_history)
search_query = await generate_search_query(
search_terms.query, chat_history
)
web_sources = await search_web(search_query)

+ # 4. Fetch similar chats
+ db_ai: AsyncEdgeDBAI = await create_async_ai(gel_client, model="gpt-4o-mini")
+ db_ai: AsyncEdgeDBAI = await create_async_ai(
+ gel_client, model="gpt-4o-mini"
+ )
+ embedding = await db_ai.generate_embeddings(
+ search_query, model="text-embedding-3-small"
+ )
Expand All @@ -1601,7 +1636,8 @@ schema.
+ similar_chats,
)
search_result.search_query = search_query # add search query to the output
# to see what the bot is searching for
# to see what the bot is
# searching for
# 6. Add LLM response to Gel
_ = await add_message_query(
gel_client,
Expand Down Expand Up @@ -1633,7 +1669,9 @@ schema.
system_prompt = (
"You are a helpful assistant that answers user's questions"
+ " by finding relevant information in HackerNews threads."
+ " When answering the question, describe conversations that people have around the subject, provided to you as a context, or say i don't know if they are completely irrelevant."
+ " When answering the question, describe conversations that"
+ " people have around the subject, provided to you as a context,"
+ " or say i don't know if they are completely irrelevant."
+ + " You can reference previous conversation with the user that"
+ + " are provided to you, if they are relevant, by explicitly referring"
+ + " to them by saying as we discussed in the past."
Expand All @@ -1657,7 +1695,8 @@ schema.
+ prompt += "\n".join(formatted_chats)

messages = [
{"role": message.role, "content": message.body} for message in chat_history
{"role": message.role, "content": message.body}
for message in chat_history
]
messages.append({"role": "user", "content": prompt})

Expand Down