Skip to content

Commit

Permalink
update rag sample from bug bash
Browse files Browse the repository at this point in the history
  • Loading branch information
qubitron committed Nov 13, 2024
1 parent 44ada1d commit 4b65ab9
Show file tree
Hide file tree
Showing 8 changed files with 101 additions and 16 deletions.
2 changes: 2 additions & 0 deletions scenarios/projects/basic/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,6 @@
{"role": "user", "content": "Hey, can you help me with my taxes? I'm a freelancer."},
]
)

print(response.choices[0].message.content)
# </chat_completion>
4 changes: 2 additions & 2 deletions scenarios/projects/basic/prompt_inline.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@
from azure.ai.inference.prompts import PromptTemplate

# create a prompt template from an inline string (using mustache syntax)
prompt_template = PromptTemplate.from_string(prompt_template="""
prompt_template = PromptTemplate.from_string(prompt_template="""\
system:
You are a helpful writing assistant.
The user's first name is {{first_name}} and their last name is {{last_name}}.
user:
Write me a short poem about flowers
""")
Expand Down
2 changes: 1 addition & 1 deletion scenarios/rag/custom-rag-app/.env.sample
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
AIPROJECT_CONNECTION_STRING=your_connection_string
AIPROJECT_CONNECTION_STRING="your_connection_string"
AISEARCH_INDEX_NAME="products"
CHAT_MODEL="gpt-4o-mini"
EMBEDDINGS_MODEL="text-embedding-ada-002"
Expand Down
4 changes: 1 addition & 3 deletions scenarios/rag/custom-rag-app/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,7 @@ Now we'll need to deploy a model so that we can call it from code. To start, we'
- Click the **+ Deploy Model** dropdown and click **Deploy a base model**
- Select **gpt-4o-mini** from the list and click **Confirm**

Repeat the above steps for the following models:
- text-embedding-3-large
- Phi-3.5-mini-instruct _coming soon: not currently available in AI Studio_
Repeat the above steps to add a **text-embedding-ada-002**.

## Set up a local development environment

Expand Down
19 changes: 11 additions & 8 deletions scenarios/rag/custom-rag-app/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,14 @@
ASSET_PATH = os.path.join(pathlib.Path(__file__).parent.resolve(), "assets")

# Configure an root app logger that prints info level logs to stdout
root_logger = logging.getLogger("app")
root_logger.setLevel(logging.INFO)
root_logger.addHandler(logging.StreamHandler(stream=sys.stdout))
logger = logging.getLogger("app")
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(stream=sys.stdout))

# Returns a module-specific logger, inheriting from the root app logger
def get_logger(module_name):
logger = logging.getLogger(f"app.{module_name}")
return logger
module_logger = logging.getLogger(f"app.{module_name}")
return module_logger

# Enable instrumentation and logging of telemetry to the project
def enable_telemetry(log_to_project : bool = False):
Expand All @@ -37,11 +37,14 @@ def enable_telemetry(log_to_project : bool = False):
conn_str=os.environ['AIPROJECT_CONNECTION_STRING'],
credential=DefaultAzureCredential()
)
tracing_link = f"https://ai.azure.com/tracing?wsid=/subscriptions/{project.scope['subscription_id']}/resourceGroups/{project.scope['resource_group_name']}/providers/Microsoft.MachineLearningServices/workspaces/{project.scope['project_name']}"
application_insights_connection_string = project.telemetry.get_connection_string()
if not application_insights_connection_string:
"No application insights connection string found. Telemetry will not be logged to project."
logger.warning("No application insights configured, telemetry will not be logged to project. Add application insights at:")
logger.warning(tracing_link)

return

configure_azure_monitor(connection_string=application_insights_connection_string)
print("Enabled telemetry logging to project, view traces at:")
print(f"https://int.ai.azure.com/project-monitoring?wsid=/subscriptions/{project.scope['subscription_id']}/resourceGroups/{project.scope['resource_group_name']}/providers/Microsoft.MachineLearningServices/workspaces/{project.scope['project_name']}")
logger.info("Enabled telemetry logging to project, view traces at:")
logger.info(tracing_link)
2 changes: 1 addition & 1 deletion scenarios/rag/custom-rag-app/dev-requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
./packages/azure_ai_projects-1.0.0b1-py3-none-any.whl # azure AI projects package
./packages/azure_ai_inference-1.0.0b5-py3-none-any.whl # azure AI inference package w prompts
azure-ai-inference[prompts]
azure-identity
azure-monitor-opentelemetry
azure-search-documents
Expand Down
82 changes: 82 additions & 0 deletions scenarios/rag/custom-rag-app/evaluate_simulate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
from chat_with_products import chat_with_products
import os

from azure.ai.evaluation.simulator import Simulator
from azure.ai.evaluation import evaluate, CoherenceEvaluator, FluencyEvaluator
from typing import Any, Dict, List, Optional
import asyncio
from azure.ai.projects import AIProjectClient
from azure.ai.projects.models import ConnectionType
from azure.identity import DefaultAzureCredential

from dotenv import load_dotenv
load_dotenv()

project = AIProjectClient.from_connection_string(
conn_str=os.environ['AIPROJECT_CONNECTION_STRING'],
credential=DefaultAzureCredential()
)

connection = project.connections.get_default(
connection_type=ConnectionType.AZURE_OPEN_AI,
with_credentials=True)

evaluator_model = {
"azure_endpoint": connection.endpoint_url,
"azure_deployment": os.environ["EVALUATION_MODEL"],
"api_version": "2024-06-01",
"api_key": connection.key,
}

async def custom_simulator_callback(
messages: List[Dict],
stream: bool = False,
session_state: Any = None,
context: Optional[Dict[str, Any]] = None,
) -> dict:
# call your endpoint or ai application here
actual_messages = messages["messages"]
print(f"\n🗨️ {actual_messages[-1]['content']}")
response = chat_with_products(actual_messages)
message = {
"role": "assistant",
"content": response['message']['content'],
"context": response['context']['grounding_data']
}
actual_messages.append(message)
return { "messages": actual_messages, "stream": stream, "session_state": session_state, "context": context }

async def custom_simulator_raw_conversation_starter():
outputs = await custom_simulator(
target=custom_simulator_callback,
conversation_turns=[
[
"I need a new tent, what would you recommend?",
],
],
max_conversation_turns=10,
)
with open("chat_output.jsonl", "w") as f:
for output in outputs:
f.write(output.to_eval_qr_json_lines())

async def evaluate_custom_simulator_raw_conversation_starter():
coherence_eval = CoherenceEvaluator(model_config=model_config)
fluency_eval = FluencyEvaluator(model_config=model_config)
eval_outputs = evaluate(
data="chat_output.jsonl",
evaluators={
"coherence": coherence_eval,
"fluency": fluency_eval,
},
# azure_ai_project=azure_ai_project, # optional only if you did optional installation
)
print(eval_outputs)


if __name__ == "__main__":
custom_simulator = Simulator(model_config=evaluator_model)
async def main():
await custom_simulator_raw_conversation_starter()

asyncio.run(main())
2 changes: 1 addition & 1 deletion scenarios/rag/custom-rag-app/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
azure-ai-projects
azure-ai-inference
azure-ai-inference[prompts]
azure-identity
azure-search-documents
pandas
Expand Down

0 comments on commit 4b65ab9

Please sign in to comment.