diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..f5b2e59c --- /dev/null +++ b/.editorconfig @@ -0,0 +1,15 @@ +# EditorConfig is awesome: https://EditorConfig.org + +# top-most EditorConfig file +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[Makefile] +indent_size = 4 +indent_style = tab diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..fe4c3ca0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,559 @@ +# See: https://github.com/clemlesne/gitops-version +.version.cache + +# Temporary database +.local.sqlite + +# Local app config file +config.yaml + +# Created by https://www.toptal.com/developers/gitignore/api/osx,linux,eclipse,windows,visualstudio,visualstudiocode,dotenv +# Edit at https://www.toptal.com/developers/gitignore?templates=osx,linux,eclipse,windows,visualstudio,visualstudiocode,dotenv + +### dotenv ### +.env + +### Eclipse ### +.metadata +bin/ +tmp/ +*.tmp +*.bak +*.swp +*~.nib +local.properties +.settings/ +.loadpath +.recommenders + +# External tool builders +.externalToolBuilders/ + +# Locally stored "Eclipse launch configurations" +*.launch + +# PyDev specific (Python IDE for Eclipse) +*.pydevproject + +# CDT-specific (C/C++ Development Tooling) +.cproject + +# CDT- autotools +.autotools + +# Java annotation processor (APT) +.factorypath + +# PDT-specific (PHP Development Tools) +.buildpath + +# sbteclipse plugin +.target + +# Tern plugin +.tern-project + +# TeXlipse plugin +.texlipse + +# STS (Spring Tool Suite) +.springBeans + +# Code Recommenders +.recommenders/ + +# Annotation Processing +.apt_generated/ +.apt_generated_test/ + +# Scala IDE specific (Scala & Java development for Eclipse) +.cache-main +.scala_dependencies +.worksheet + +# Uncomment this line if you wish to ignore the project description file. +# Typically, this file would be tracked if it contains build/dependency configurations: +#.project + +### Eclipse Patch ### +# Spring Boot Tooling +.sts4-cache/ + +### Linux ### +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +### OSX ### +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +### VisualStudioCode ### +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +!.vscode/*.code-snippets + +# Local History for Visual Studio Code +.history/ + +# Built Visual Studio Code Extensions +*.vsix + +### VisualStudioCode Patch ### +# Ignore all local history of files +.history +.ionide + +### Windows ### +# Windows thumbnail cache files +Thumbs.db +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db + +# Dump file +*.stackdump + +# Folder config file +[Dd]esktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msix +*.msm +*.msp + +# Windows shortcuts +*.lnk + +### VisualStudio ### +## Ignore Visual Studio temporary files, build results, and +## files generated by popular Visual Studio add-ons. +## +## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore + +# User-specific files +*.rsuser +*.suo +*.user +*.userosscache +*.sln.docstates + +# User-specific files (MonoDevelop/Xamarin Studio) +*.userprefs + +# Mono auto generated files +mono_crash.* + +# Build results +[Dd]ebug/ +[Dd]ebugPublic/ +[Rr]elease/ +[Rr]eleases/ +x64/ +x86/ +[Ww][Ii][Nn]32/ +[Aa][Rr][Mm]/ +[Aa][Rr][Mm]64/ +bld/ +[Bb]in/ +[Oo]bj/ +[Ll]og/ +[Ll]ogs/ + +# Visual Studio 2015/2017 cache/options directory +.vs/ +# Uncomment if you have tasks that create the project's static files in wwwroot +#wwwroot/ + +# Visual Studio 2017 auto generated files +Generated\ Files/ + +# MSTest test Results +[Tt]est[Rr]esult*/ +[Bb]uild[Ll]og.* + +# NUnit +*.VisualState.xml +TestResult.xml +nunit-*.xml + +# Build Results of an ATL Project +[Dd]ebugPS/ +[Rr]eleasePS/ +dlldata.c + +# Benchmark Results +BenchmarkDotNet.Artifacts/ + +# .NET Core +project.lock.json +project.fragment.lock.json +artifacts/ + +# ASP.NET Scaffolding +ScaffoldingReadMe.txt + +# StyleCop +StyleCopReport.xml + +# Files built by Visual Studio +*_i.c +*_p.c +*_h.h +*.ilk +*.meta +*.obj +*.iobj +*.pch +*.pdb +*.ipdb +*.pgc +*.pgd +*.rsp +*.sbr +*.tlb +*.tli +*.tlh +*.tmp_proj +*_wpftmp.csproj +*.log +*.tlog +*.vspscc +*.vssscc +.builds +*.pidb +*.svclog +*.scc + +# Chutzpah Test files +_Chutzpah* + +# Visual C++ cache files +ipch/ +*.aps +*.ncb +*.opendb +*.opensdf +*.sdf +*.cachefile +*.VC.db +*.VC.VC.opendb + +# Visual Studio profiler +*.psess +*.vsp +*.vspx +*.sap + +# Visual Studio Trace Files +*.e2e + +# TFS 2012 Local Workspace +$tf/ + +# Guidance Automation Toolkit +*.gpState + +# ReSharper is a .NET coding add-in +_ReSharper*/ +*.[Rr]e[Ss]harper +*.DotSettings.user + +# TeamCity is a build add-in +_TeamCity* + +# DotCover is a Code Coverage Tool +*.dotCover + +# AxoCover is a Code Coverage Tool +.axoCover/* +!.axoCover/settings.json + +# Coverlet is a free, cross platform Code Coverage Tool +coverage*.json +coverage*.xml +coverage*.info + +# Visual Studio code coverage results +*.coverage +*.coveragexml + +# NCrunch +_NCrunch_* +.*crunch*.local.xml +nCrunchTemp_* + +# MightyMoose +*.mm.* +AutoTest.Net/ + +# Web workbench (sass) +.sass-cache/ + +# Installshield output folder +[Ee]xpress/ + +# DocProject is a documentation generator add-in +DocProject/buildhelp/ +DocProject/Help/*.HxT +DocProject/Help/*.HxC +DocProject/Help/*.hhc +DocProject/Help/*.hhk +DocProject/Help/*.hhp +DocProject/Help/Html2 +DocProject/Help/html + +# Click-Once directory +publish/ + +# Publish Web Output +*.[Pp]ublish.xml +*.azurePubxml +# Note: Comment the next line if you want to checkin your web deploy settings, +# but database connection strings (with potential passwords) will be unencrypted +*.pubxml +*.publishproj + +# Microsoft Azure Web App publish settings. Comment the next line if you want to +# checkin your Azure Web App publish settings, but sensitive information contained +# in these scripts will be unencrypted +PublishScripts/ + +# NuGet Packages +*.nupkg +# NuGet Symbol Packages +*.snupkg +# The packages folder can be ignored because of Package Restore +**/[Pp]ackages/* +# except build/, which is used as an MSBuild target. +!**/[Pp]ackages/build/ +# Uncomment if necessary however generally it will be regenerated when needed +#!**/[Pp]ackages/repositories.config +# NuGet v3's project.json files produces more ignorable files +*.nuget.props +*.nuget.targets + +# Microsoft Azure Build Output +csx/ +*.build.csdef + +# Microsoft Azure Emulator +ecf/ +rcf/ + +# Windows Store app package directories and files +AppPackages/ +BundleArtifacts/ +Package.StoreAssociation.xml +_pkginfo.txt +*.appx +*.appxbundle +*.appxupload + +# Visual Studio cache files +# files ending in .cache can be ignored +*.[Cc]ache +# but keep track of directories ending in .cache +!?*.[Cc]ache/ + +# Others +ClientBin/ +~$* +*.dbmdl +*.dbproj.schemaview +*.jfm +*.pfx +*.publishsettings +orleans.codegen.cs + +# Including strong name files can present a security risk +# (https://github.com/github/gitignore/pull/2483#issue-259490424) +#*.snk + +# Since there are multiple workflows, uncomment next line to ignore bower_components +# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) +#bower_components/ + +# RIA/Silverlight projects +Generated_Code/ + +# Backup & report files from converting an old project file +# to a newer Visual Studio version. Backup files are not needed, +# because we have git ;-) +_UpgradeReport_Files/ +Backup*/ +UpgradeLog*.XML +UpgradeLog*.htm +ServiceFabricBackup/ +*.rptproj.bak + +# SQL Server files +*.mdf +*.ldf +*.ndf + +# Business Intelligence projects +*.rdl.data +*.bim.layout +*.bim_*.settings +*.rptproj.rsuser +*- [Bb]ackup.rdl +*- [Bb]ackup ([0-9]).rdl +*- [Bb]ackup ([0-9][0-9]).rdl + +# Microsoft Fakes +FakesAssemblies/ + +# GhostDoc plugin setting file +*.GhostDoc.xml + +# Node.js Tools for Visual Studio +.ntvs_analysis.dat +node_modules/ + +# Visual Studio 6 build log +*.plg + +# Visual Studio 6 workspace options file +*.opt + +# Visual Studio 6 auto-generated workspace file (contains which files were open etc.) +*.vbw + +# Visual Studio 6 auto-generated project file (contains which files were open etc.) +*.vbp + +# Visual Studio 6 workspace and project file (working project files containing files to include in project) +*.dsw +*.dsp + +# Visual Studio 6 technical files + +# Visual Studio LightSwitch build output +**/*.HTMLClient/GeneratedArtifacts +**/*.DesktopClient/GeneratedArtifacts +**/*.DesktopClient/ModelManifest.xml +**/*.Server/GeneratedArtifacts +**/*.Server/ModelManifest.xml +_Pvt_Extensions + +# Paket dependency manager +.paket/paket.exe +paket-files/ + +# FAKE - F# Make +.fake/ + +# CodeRush personal settings +.cr/personal + +# Python Tools for Visual Studio (PTVS) +__pycache__/ +*.pyc + +# Cake - Uncomment if you are using it +# tools/** +# !tools/packages.config + +# Tabs Studio +*.tss + +# Telerik's JustMock configuration file +*.jmconfig + +# BizTalk build output +*.btp.cs +*.btm.cs +*.odx.cs +*.xsd.cs + +# OpenCover UI analysis results +OpenCover/ + +# Azure Stream Analytics local run output +ASALocalRun/ + +# MSBuild Binary and Structured Log +*.binlog + +# NVidia Nsight GPU debugger configuration file +*.nvuser + +# MFractors (Xamarin productivity tool) working folder +.mfractor/ + +# Local History for Visual Studio +.localhistory/ + +# Visual Studio History (VSHistory) files +.vshistory/ + +# BeatPulse healthcheck temp database +healthchecksdb + +# Backup folder for Package Reference Convert tool in Visual Studio 2017 +MigrationBackup/ + +# Ionide (cross platform F# VS Code tools) working folder +.ionide/ + +# Fody - auto-generated XML schema +FodyWeavers.xsd + +# VS Code files for those working on multiple tools +*.code-workspace + +# Local History for Visual Studio Code + +# Windows Installer files from build outputs + +# JetBrains Rider +*.sln.iml + +### VisualStudio Patch ### +# Additional files built by Visual Studio + +# End of https://www.toptal.com/developers/gitignore/api/osx,linux,eclipse,windows,visualstudio,visualstudiocode,dotenv diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..5cfe8a60 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,4 @@ +[submodule "cicd/version"] + path = cicd/version + url = https://github.com/clemlesne/gitops-version + branch = master diff --git a/.python-version b/.python-version new file mode 100644 index 00000000..ace200aa --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +claimaiphonebot311 diff --git a/.version.config b/.version.config new file mode 100644 index 00000000..9eb7b90e --- /dev/null +++ b/.version.config @@ -0,0 +1 @@ +patch diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..75094583 --- /dev/null +++ b/Makefile @@ -0,0 +1,52 @@ +container_name := ghcr.io/clemlesne/claim-ai-phone-bot +docker := docker +version_full ?= $(shell $(MAKE) --silent version-full) +version_small ?= $(shell $(MAKE) --silent version) +tunnel_name := claim-ai-phone-bot-$(shell hostname | tr '[:upper:]' '[:lower:]') +tunnel_url := $(shell devtunnel show $(tunnel_name) | grep -o 'http[s]*://[^"]*') + +version: + @bash ./cicd/version/version.sh -g . -c + +version-full: + @bash ./cicd/version/version.sh -g . -c -m + +install: + @echo "➡️ Installing Dev Tunnels CLI..." + devtunnel --version || brew install --cask devtunnel + + @echo "➡️ Installing Python dependencies..." + python3 -m pip install -r requirements.txt + +upgrade: + @echo "➡️ Upgrading pip..." + python3 -m pip install --upgrade pip + + @echo "➡️ Upgrading Python dependencies..." + pur -r requirements.txt + +test: + @echo "➡️ Running Black..." + python3 -m black --check . + +lint: + @echo "➡️ Running Black..." + python3 -m black . + +tunnel: + @echo "➡️ Creating tunnel..." + devtunnel show $(tunnel_name) || devtunnel create $(tunnel_name) --allow-anonymous --expiration 1d + + @echo "➡️ Creating port forwarding..." + devtunnel port show $(tunnel_name) --port-number 8080 || devtunnel port create $(tunnel_name) --port-number 8080 + + @echo "➡️ Starting tunnel..." + devtunnel host $(tunnel_name) + +start: + VERSION=$(version_full) EVENTS_DOMAIN=$(tunnel_url) python3 -m uvicorn main:api \ + --header x-version:$${VERSION} \ + --no-server-header \ + --port 8080 \ + --proxy-headers \ + --reload diff --git a/README.md b/README.md index 3d51d1ab..c7e04151 100644 --- a/README.md +++ b/README.md @@ -1 +1,143 @@ -# groupama-diva-poc +# Groupama Diva PoC + +## Overview + +### Features + +- [x] Access to customer conversation history +- [x] Bot can be called from a phone number +- [x] Disengaging from a human agent when needed +- [x] Fine understanding of the customer request with GPT-4 Turbo +- [x] Follow a specific data schema for the claim +- [x] Help the user to find the information needed to complete the claim +- [x] Send a SMS report after the call +- [ ] Access the claim on a public website +- [ ] Call back the user when needed +- [ ] Simulate a IVR workflow + +### High level architecture + +```mermaid +--- +title: System diagram (C4 model) +--- +graph + user(["User"]) + agent(["Agent"]) + + api["Claim AI"] + twilio["Twilio\n(external)"] + + api -- Send SMS report --> twilio + api -- Transfer to --> agent + api -. Send voice .-> user + user -- Call --> api +``` + +### Component level architecture + +```mermaid +--- +title: Claim AI component diagram (C4 model) +--- +graph + user(["User"]) + agent(["Agent"]) + + twilio["Twilio\n(external)"] + + subgraph "Claim AI" + communication_service["Call gateway\n(Azure Communication Services)"] + event_grid[("Broker\n(Azure Event Grid)")] + api["API"] + db_conversation[("Conversations")] + db_claim[("Claims")] + gpt["GPT-4 Turbo\n(Azure OpenAI)"] + end + + api -- Answer with text --> communication_service + api -- Generate completion --> gpt + api -- Save claim --> db_claim + api -- Save conversation --> db_conversation + api -- Send SMS report --> twilio + api -- Transfer to agent --> communication_service + api -. Watch .-> event_grid + + communication_service -- Notifies --> event_grid + communication_service -- Transfer to --> agent + + communication_service -. Send voice .-> user + + user -- Call --> communication_service +``` + +## Installation + +### Prerequisites + +Place a file called `config.yaml` in the root of the project with the following content: + +```yaml +# config.yaml +api: + root_path: "/" + +monitoring: + logging: + app_level: INFO + sys_level: WARN + +resources: + public_url: "https://xxx.blob.core.windows.net/public" + +workflow: + agent_phone_number: "+33612345678" + bot_company: Contoso + bot_name: Robert + conversation_lang: fr-FR + +communication_service: + access_key: xxx + endpoint: https://xxx.france.communication.azure.com + phone_number: "+33612345678" + +cognitive_service: + # Must be of type "Azure AI services multi-service account" + # See: https://learn.microsoft.com/en-us/azure/ai-services/multi-service-resource?tabs=macos&pivots=azportal#create-a-new-multi-service-resource + endpoint: https://xxx.cognitiveservices.azure.com + +openai: + endpoint: https://xxx.openai.azure.com + gpt_deployment: gpt-4-turbo + gpt_model: gpt-4-1106-preview + +eventgrid: + resource_group: claim-ai-poc + subscription_id: xxx + system_topic: claim-ai-poc +``` + +Then run: + +```bash +# Install dependencies +make install +``` + +Also, a public file server is needed to host the audio files. + +For this, you can use Azure Blob Storage. In that case, content of the project folder `resources` requires to be uploaded to the public container of the storage account. + +### Run + +Finally, in two different terminals, run: + +```bash +# Expose the local server to the internet +make tunnel +``` + +```bash +# Start the local API server +make start +``` diff --git a/cicd/version b/cicd/version new file mode 160000 index 00000000..ae933dab --- /dev/null +++ b/cicd/version @@ -0,0 +1 @@ +Subproject commit ae933dabf27bbece481f59c80c659ea502bf1878 diff --git a/helpers/config.py b/helpers/config.py new file mode 100644 index 00000000..2863b095 --- /dev/null +++ b/helpers/config.py @@ -0,0 +1,39 @@ +# Load "CONFIG_JSON" for debug purposes +from dotenv import load_dotenv, find_dotenv + +# Load recursively from relative, like "config.yaml" +load_dotenv(find_dotenv()) + +# Load deps +from helpers.config_models.root import RootModel +import os +import yaml + + +CONFIG_ENV = "CONFIG_JSON" +CONFIG_FILE = "config.yaml" + + +class ConfigNotFound(Exception): + pass + + +class ConfigBadFormat(Exception): + pass + + +if CONFIG_ENV in os.environ: + CONFIG = RootModel.model_validate_json(os.environ[CONFIG_ENV]) + print(f'Config from env "{CONFIG_ENV}" loaded') + +else: + print(f'Config from env "{CONFIG_ENV}" not found') + path = find_dotenv(filename=CONFIG_FILE) + if not path: + raise ConfigNotFound(f'Cannot find config file "{CONFIG_FILE}"') + try: + with open(path, "r", encoding="utf-8") as f: + CONFIG = RootModel.model_validate(yaml.safe_load(f)) + except Exception as e: + raise ConfigBadFormat(f'Config "{path}" is not valid YAML') from e + print(f'Config "{path}" loaded') diff --git a/helpers/config_models/api.py b/helpers/config_models/api.py new file mode 100644 index 00000000..61025069 --- /dev/null +++ b/helpers/config_models/api.py @@ -0,0 +1,5 @@ +from pydantic import BaseModel + + +class ApiModel(BaseModel): + root_path: str = "" diff --git a/helpers/config_models/cognitive_service.py b/helpers/config_models/cognitive_service.py new file mode 100644 index 00000000..8c8a6820 --- /dev/null +++ b/helpers/config_models/cognitive_service.py @@ -0,0 +1,5 @@ +from pydantic import BaseModel + + +class CognitiveServiceModel(BaseModel): + endpoint: str diff --git a/helpers/config_models/communication_service.py b/helpers/config_models/communication_service.py new file mode 100644 index 00000000..bce8549d --- /dev/null +++ b/helpers/config_models/communication_service.py @@ -0,0 +1,7 @@ +from pydantic import BaseModel, SecretStr + + +class CommunicationServiceModel(BaseModel): + access_key: SecretStr + endpoint: str + phone_number: str diff --git a/helpers/config_models/eventgrid.py b/helpers/config_models/eventgrid.py new file mode 100644 index 00000000..05e85961 --- /dev/null +++ b/helpers/config_models/eventgrid.py @@ -0,0 +1,7 @@ +from pydantic import BaseModel + + +class EventgridModel(BaseModel): + resource_group: str + subscription_id: str + system_topic: str diff --git a/helpers/config_models/monitoring.py b/helpers/config_models/monitoring.py new file mode 100644 index 00000000..a9fa6f6a --- /dev/null +++ b/helpers/config_models/monitoring.py @@ -0,0 +1,21 @@ +from enum import Enum +from pydantic import BaseModel, SecretStr + + +class LoggingLevel(str, Enum): + # Copied from https://docs.python.org/3.11/library/logging.html#logging-levels + CRITICAL = "CRITICAL" + DEBUG = "DEBUG" + ERROR = "ERROR" + INFO = "INFO" + WARN = "WARN" # Alias for WARNING, non-standard but used by the logging module + WARNING = "WARNING" + + +class LoggingMonitoringModel(BaseModel): + app_level: LoggingLevel = LoggingLevel.INFO + sys_level: LoggingLevel = LoggingLevel.WARNING + + +class MonitoringModel(BaseModel): + logging: LoggingMonitoringModel diff --git a/helpers/config_models/openai.py b/helpers/config_models/openai.py new file mode 100644 index 00000000..ad47aa71 --- /dev/null +++ b/helpers/config_models/openai.py @@ -0,0 +1,7 @@ +from pydantic import BaseModel, HttpUrl + + +class OpenAiModel(BaseModel): + endpoint: HttpUrl + gpt_deployment: str + gpt_model: str diff --git a/helpers/config_models/resources.py b/helpers/config_models/resources.py new file mode 100644 index 00000000..b7179702 --- /dev/null +++ b/helpers/config_models/resources.py @@ -0,0 +1,5 @@ +from pydantic import BaseModel + + +class ResourcesModel(BaseModel): + public_url: str diff --git a/helpers/config_models/root.py b/helpers/config_models/root.py new file mode 100644 index 00000000..fc1acc40 --- /dev/null +++ b/helpers/config_models/root.py @@ -0,0 +1,20 @@ +from helpers.config_models.api import ApiModel +from helpers.config_models.cognitive_service import CognitiveServiceModel +from helpers.config_models.communication_service import CommunicationServiceModel +from helpers.config_models.eventgrid import EventgridModel +from helpers.config_models.monitoring import MonitoringModel +from helpers.config_models.openai import OpenAiModel +from helpers.config_models.resources import ResourcesModel +from helpers.config_models.workflow import WorkflowModel +from pydantic import BaseModel + + +class RootModel(BaseModel): + api: ApiModel + cognitive_service: CognitiveServiceModel + communication_service: CommunicationServiceModel + eventgrid: EventgridModel + monitoring: MonitoringModel + openai: OpenAiModel + resources: ResourcesModel + workflow: WorkflowModel diff --git a/helpers/config_models/workflow.py b/helpers/config_models/workflow.py new file mode 100644 index 00000000..d65cc6e5 --- /dev/null +++ b/helpers/config_models/workflow.py @@ -0,0 +1,8 @@ +from pydantic import BaseModel + + +class WorkflowModel(BaseModel): + agent_phone_number: str + bot_company: str + bot_name: str + conversation_lang: str diff --git a/helpers/logging.py b/helpers/logging.py new file mode 100644 index 00000000..b1e4ae9c --- /dev/null +++ b/helpers/logging.py @@ -0,0 +1,14 @@ +from helpers.config import CONFIG +import logging + + +LOGGING_APP_LEVEL = CONFIG.monitoring.logging.app_level.value +LOGGING_SYS_LEVEL = CONFIG.monitoring.logging.sys_level.value + +logging.basicConfig(level=LOGGING_SYS_LEVEL) + + +def build_logger(name: str) -> logging.Logger: + logger = logging.getLogger(name) + logger.setLevel(LOGGING_APP_LEVEL) + return logger diff --git a/helpers/version.py b/helpers/version.py new file mode 100644 index 00000000..f4aafe69 --- /dev/null +++ b/helpers/version.py @@ -0,0 +1,4 @@ +import os + + +VERSION: str = os.getenv("VERSION", "0.0.0-unknown") diff --git a/main.py b/main.py new file mode 100644 index 00000000..79673478 --- /dev/null +++ b/main.py @@ -0,0 +1,729 @@ +from typing import Optional, Union +from azure.communication.callautomation import ( + CallAutomationClient, + CallConnectionClient, + FileSource, + PhoneNumberIdentifier, + RecognizeInputType, + TextSource, +) +from azure.communication.sms import SmsClient +from azure.core.credentials import AzureKeyCredential +from azure.core.exceptions import ResourceNotFoundError +from azure.core.messaging import CloudEvent +from azure.eventgrid import EventGridEvent, SystemEventNames +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from azure.mgmt.core.polling.arm_polling import ARMPolling +from azure.mgmt.eventgrid import EventGridManagementClient +from contextlib import asynccontextmanager +from datetime import datetime +from enum import Enum +from fastapi import FastAPI, status, Request +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import JSONResponse, Response +from helpers.config import CONFIG +from helpers.logging import build_logger +from helpers.version import VERSION +from models.action import ActionModel, Indent as IndentAction +from models.call import ( + CallModel, + MessageModel as CallMessageModel, + Persona as CallPersona, + ToolModel as CallToolModel, +) +from models.claim import ClaimModel +from openai import AsyncAzureOpenAI +from os import environ +from uuid import UUID, uuid4 +import asyncio +import json +import re +import sqlite3 + + +_logger = build_logger(__name__) + +ROOT_PATH = CONFIG.api.root_path +AZ_CREDENTIAL = DefaultAzureCredential() + +_logger.info(f'Using root path "{ROOT_PATH}"') + +oai_gpt = AsyncAzureOpenAI( + api_version="2023-12-01-preview", + azure_ad_token_provider=get_bearer_token_provider( + AZ_CREDENTIAL, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=CONFIG.openai.endpoint, + azure_deployment=CONFIG.openai.gpt_deployment, +) +eventgrid_subscription_name = f"tmp-{uuid4()}" +eventgrid_mgmt_client = EventGridManagementClient( + credential=DefaultAzureCredential(), + subscription_id=CONFIG.eventgrid.subscription_id, +) +source_caller = PhoneNumberIdentifier(CONFIG.communication_service.phone_number) +# Cannot place calls with RBAC, need to use access key (see: https://learn.microsoft.com/en-us/azure/communication-services/concepts/authentication#authentication-options) +call_automation_client = CallAutomationClient( + endpoint=CONFIG.communication_service.endpoint, + credential=AzureKeyCredential( + CONFIG.communication_service.access_key.get_secret_value() + ), +) +sms_client = SmsClient(credential=AZ_CREDENTIAL, endpoint=CONFIG.communication_service.endpoint) +db = sqlite3.connect(".local.sqlite", check_same_thread=False) + +EVENTS_DOMAIN = environ.get("EVENTS_DOMAIN").strip("/") +assert EVENTS_DOMAIN, "EVENTS_DOMAIN environment variable is not set" +CALL_EVENT_URL = f"{EVENTS_DOMAIN}/call/event" +CALL_INBOUND_URL = f"{EVENTS_DOMAIN}/call/inbound" + +DEFAULT_SYSTEM_PROMPT = """ + Assistant called {{bot_name}} and is in a call center for the insurance company {{bot_company}} as an expert with 20 years of experience. Today is {{date}}. +""" +CHAT_SYSTEM_PROMPT = f""" + Assistant will help the customer with their insurance claim. + + Assistant: + - Answer in no more than few short sentences + - Answers in {CONFIG.workflow.conversation_lang}, even if the customer speaks in English + - Ask the customer to repeat or rephrase their question if it is not clear + - Cannot talk about any topic other than insurance claims + - Is polite, helpful, and professional + - Refer customers to emergency services or the police if necessary, but cannot give advice under any circumstances + - Rephrase the customer's questions as statements and answer them + + Assistant requires data from the customer to fill the claim. Latest claim data will be given. Assistant role is not over until all the relevant data is gathered. +""" +SMS_SUMMARY_SYSTEM_PROMPT = f""" + Assistant will summarize the call with the customer in a single SMS. The customer cannot reply to this SMS. + + Assistant: + - Answers in {CONFIG.workflow.conversation_lang}, even if the customer speaks in English + - Briefly summarize the call with the customer + - Can include personal details about the customer + - Cannot talk about any topic other than insurance claims + - Do not prefix the answer with any text, like "The answer is" or "Summary of the call" + - Is polite, helpful, and professional + - Use simple and short sentences + + Conversation history: + {{conversation}} +""" + +AGENT_PHONE_NUMBER_EMPTY_PROMPT = "Je suis désolé, mais nous enregistrons actuellement un nombre élevé d'appels et tous nos agents sont actuellement occupés. Notre prochain agent disponible vous rappellera dès que possible." +CALLTRANSFER_FAILURE_PROMPT = "Il semble que je ne puisse pas vous mettre en relation avec un agent pour l'instant, mais le prochain agent disponible vous rappellera dès que possible." +CONNECT_AGENT_PROMPT = "Je suis désolé, je n'ai pas été en mesure de répondre à votre demande. Permettez-moi de vous transférer à un agent qui pourra vous aider davantage. Veuillez rester en ligne et je vous recontacterai sous peu." +END_CALL_TO_CONNECT_AGENT_PROMPT = ( + "Bien sûr, restez en ligne. Je vais vous transférer à un agent." +) +ERROR_PROMPT = ( + "Je suis désolé, j'ai rencontré une erreur. Pouvez-vous répéter votre demande ?" +) +GOODBYE_PROMPT = f"Merci de votre appel ! J'espère avoir pu vous aider. {CONFIG.workflow.bot_company} vous souhaite une excellente journée !" +HELLO_PROMPT = f"Bonjour, je suis {CONFIG.workflow.bot_name}, l'assistant {CONFIG.workflow.bot_company} ! Je suis spécialiste des sinistres. Lorsque vous entendrez un bip, c'est que je suis en train de travailler. Comment puis-je vous aider aujourd'hui ?" +TIMEOUT_SILENCE_PROMPT = "Je suis désolé, je n'ai rien entendu. Si vous avez besoin d'aide, dites-moi comment je peux vous aider." +UPDATED_CLAIM_PROMPT = "Je mets à jour votre dossier..." + + +class Context(str, Enum): + TRANSFER_FAILED = "transfer_failed" + CONNECT_AGENT = "connect_agent" + GOODBYE = "goodbye" + + +@asynccontextmanager +async def lifespan(_: FastAPI): + init_db() + task = asyncio.create_task(eventgrid_register()) # Background task + yield + task.cancel() + eventgrid_unregister() # Foreground task + + +api = FastAPI( + contact={ + "url": "https://github.com/clemlesne/claim-ai-phone-bot", + }, + description="AI-powered call center solution with Azure and OpenAI GPT.", + license_info={ + "name": "Apache-2.0", + "url": "https://github.com/clemlesne/claim-ai-phone-bot/blob/master/LICENCE", + }, + lifespan=lifespan, + root_path=ROOT_PATH, + title="claim-ai-phone-bot", + version=VERSION, +) + +api.add_middleware( + CORSMiddleware, + allow_headers=["*"], + allow_methods=["*"], + allow_origins=["*"], +) + + +async def eventgrid_register() -> None: + def callback(future: ARMPolling): + _logger.info(f"Event Grid subscription created (status {future.status()})") + + _logger.info(f"Creating Event Grid subscription {eventgrid_subscription_name}") + eventgrid_mgmt_client.system_topic_event_subscriptions.begin_create_or_update( + resource_group_name=CONFIG.eventgrid.resource_group, + system_topic_name=CONFIG.eventgrid.system_topic, + event_subscription_name=eventgrid_subscription_name, + event_subscription_info={ + "properties": { + "eventDeliverySchema": "EventGridSchema", + "destination": { + "endpointType": "WebHook", + "properties": { + "endpointUrl": CALL_INBOUND_URL, + "maxEventsPerBatch": 1, + }, + }, + "filter": { + "enableAdvancedFilteringOnArrays": True, + "includedEventTypes": ["Microsoft.Communication.IncomingCall"], + "advancedFilters": [ + { + "key": "data.to.PhoneNumber.Value", + "operatorType": "StringBeginsWith", + "values": [CONFIG.communication_service.phone_number], + } + ], + }, + }, + }, + ).add_done_callback(callback) + + +def eventgrid_unregister() -> None: + _logger.info(f"Deleting Event Grid subscription {eventgrid_subscription_name} (do not wait for completion)") + eventgrid_mgmt_client.system_topic_event_subscriptions.begin_delete( + event_subscription_name=eventgrid_subscription_name, + resource_group_name=CONFIG.eventgrid.resource_group, + system_topic_name=CONFIG.eventgrid.system_topic, + ) + + +@api.get( + "/health/liveness", + status_code=status.HTTP_204_NO_CONTENT, + description="Liveness healthckeck, always returns 204, used to check if the API is up.", +) +async def health_liveness_get() -> None: + return None + + +@api.get("/call/initiate", description="Initiate an outbound call to a phone number.") +def call_initiate_get(phone_number: str) -> None: + _logger.info(f"Initiating outbound call to {phone_number}") + target_caller = PhoneNumberIdentifier(phone_number) + call_connection_properties = call_automation_client.create_call( + callback_url=callback_url(phone_number), + cognitive_services_endpoint=CONFIG.cognitive_service.endpoint, + source_caller_id_number=source_caller, + target_participant=target_caller, + ) + _logger.info( + f"Created call with connection id: {call_connection_properties.call_connection_id}" + ) + return Response(status_code=status.HTTP_204_NO_CONTENT) + + +@api.post( + "/call/inbound", + description="Handle incoming call from a Azure Event Grid event originating from Azure Communication Services.", +) +async def call_inbound_post(request: Request): + for event_dict in await request.json(): + event = EventGridEvent.from_dict(event_dict) + event_type = event.event_type + + _logger.debug(f"Call inbound event {event_type} with data {event.data}") + + if event_type == SystemEventNames.EventGridSubscriptionValidationEventName: + validation_code = event.data["validationCode"] + _logger.info(f"Validating Event Grid subscription ({validation_code})") + return JSONResponse( + content={"validationResponse": event.data["validationCode"]}, + status_code=200, + ) + + elif event_type == SystemEventNames.AcsIncomingCallEventName: + if event.data["from"]["kind"] == "phoneNumber": + phone_number = event.data["from"]["phoneNumber"]["value"] + else: + phone_number = event.data["from"]["rawId"] + + _logger.debug(f"Incoming call handler caller ID: {phone_number}") + call_context = event.data["incomingCallContext"] + answer_call_result = call_automation_client.answer_call( + callback_url=callback_url(phone_number), + cognitive_services_endpoint=CONFIG.cognitive_service.endpoint, + incoming_call_context=call_context, + ) + _logger.info(f"Answered call with {phone_number} ({answer_call_result.call_connection_id})") + + +@api.post( + "/call/event/{call_id}", + description="Handle callbacks from Azure Communication Services.", + status_code=status.HTTP_204_NO_CONTENT, +) +async def call_event_post(request: Request, call_id: UUID) -> None: + for event_dict in await request.json(): + event = CloudEvent.from_dict(event_dict) + + connection_id = event.data["callConnectionId"] + operation_context = event.data.get("operationContext", None) + client = call_automation_client.get_call_connection( + call_connection_id=connection_id + ) + call = get_call(call_id) + target_caller = PhoneNumberIdentifier(call.phone_number) + event_type = event.type + + _logger.debug(f"Call event received {event_type} for call {call}") + _logger.debug(event.data) + + if event_type == "Microsoft.Communication.CallConnected": + _logger.info(f"Call connected ({call.id})") + await handle_recognize( + call=call, + client=client, + text=HELLO_PROMPT, + to=target_caller, + ) + + elif event_type == "Microsoft.Communication.CallDisconnected": + _logger.info(f"Call disconnected ({call.id})") + await handle_hangup(call=call, client=client) + + elif event_type == "Microsoft.Communication.RecognizeCompleted": + if event.data["recognitionType"] == "speech": + speech_text = event.data["speechResult"]["speech"] + _logger.info(f"Recognition completed ({call.id}): {speech_text}") + + await handle_media(client, "acknowledge.wav") + + if speech_text is not None and len(speech_text) > 0: + call.messages.append( + CallMessageModel(content=speech_text, persona=CallPersona.HUMAN) + ) + await intelligence(call, client, target_caller) + + elif event_type == "Microsoft.Communication.RecognizeFailed": + result_information = event.data["resultInformation"] + error_code = result_information["subCode"] + + if error_code == 8510 and call.recognition_retry < 10: + await handle_recognize( + call=call, + client=client, + text=TIMEOUT_SILENCE_PROMPT, + to=target_caller, + ) + call.recognition_retry += 1 + + else: + await handle_play( + call=call, + client=client, + context=Context.GOODBYE.value, + text=GOODBYE_PROMPT, + ) + + elif event_type == "Microsoft.Communication.PlayCompleted": + _logger.debug(f"Play completed ({call.id})") + + if ( + operation_context == Context.TRANSFER_FAILED.value + or operation_context == Context.GOODBYE.value + ): + _logger.info(f"Ending call ({call.id})") + await handle_hangup(call=call, client=client) + + elif operation_context == Context.CONNECT_AGENT.value: + _logger.info(f"Initiating transfer call initiated ({call.id})") + agent_caller = PhoneNumberIdentifier(CONFIG.workflow.agent_phone_number) + client.transfer_call_to_participant(target_participant=agent_caller) + + elif event_type == "Microsoft.Communication.CallTransferAccepted": + _logger.info(f"Call transfer accepted event ({call.id})") + # TODO: Is there anything to do here? + + elif event_type == "Microsoft.Communication.CallTransferFailed": + _logger.debig(f"Call transfer failed event ({call.id})") + result_information = event.data["resultInformation"] + sub_code = result_information["subCode"] + _logger.info(f"Error during call transfer, subCode {sub_code} ({call.id})") + await handle_play( + call=call, + client=client, + context=Context.TRANSFER_FAILED.value, + text=CALLTRANSFER_FAILURE_PROMPT, + ) + + save_call(call) + + +async def intelligence( + call: CallModel, client: CallConnectionClient, target_caller: PhoneNumberIdentifier +) -> None: + chat_res = await gpt_chat(call) + _logger.info(f"GPT response ({call.id}): {chat_res}") + + if chat_res.intent == IndentAction.TALK_TO_HUMAN: + await handle_play( + call=call, + client=client, + context=Context.CONNECT_AGENT.value, + text=END_CALL_TO_CONNECT_AGENT_PROMPT, + ) + + elif chat_res.intent == IndentAction.END_CALL: + await handle_play( + call=call, + client=client, + context=Context.GOODBYE.value, + text=GOODBYE_PROMPT, + ) + + elif chat_res.intent == IndentAction.UPDATE_CLAIM: + await handle_play( + call=call, + client=client, + text=UPDATED_CLAIM_PROMPT, + ) + await intelligence(call, client, target_caller) + + else: + await handle_recognize( + call=call, + client=client, + text=chat_res.content, + to=target_caller, + ) + + +async def handle_play( + client: CallConnectionClient, + call: CallModel, + text: str, + context: Optional[str] = None, +) -> None: + """ + Play a text to a call participant. + + See: https://learn.microsoft.com/en-us/azure/ai-services/speech-service/language-support?tabs=tts + """ + call.messages.append(CallMessageModel(content=text, persona=CallPersona.HUMAN)) + client.play_media_to_all( + play_source=audio_from_text(text), operation_context=context + ) + + +async def gpt_completion(system: str, call: CallModel) -> str: + _logger.debug(f"Running GPT completion ({call.id})") + + messages = [ + { + "content": DEFAULT_SYSTEM_PROMPT.format( + bot_company=CONFIG.workflow.bot_company, + bot_name=CONFIG.workflow.bot_name, + date=datetime.now().strftime("%A %d %B %Y %H:%M:%S"), + ), + "role": "system", + }, + { + "content": system.format( + conversation=str(call.messages), + ), + "role": "system", + }, + { + "content": f"Claim status is: {call.claim.model_dump_json()}", + "role": "assistant", + }, + ] + _logger.debug(f"Messages: {messages}") + + try: + res = await oai_gpt.chat.completions.create( + max_tokens=1000, # Arbitrary limit + messages=messages, + model=CONFIG.openai.gpt_model, + temperature=0, # Most focused and deterministic + ) + content = res.choices[0].message.content + + except Exception: + _logger.warn(f"OpenAI API call error", exc_info=True) + + return content or "" + + +async def gpt_chat(call: CallModel) -> ActionModel: + _logger.debug(f"Running GPT chat ({call.id})") + + messages = [ + { + "content": DEFAULT_SYSTEM_PROMPT.format( + bot_company=CONFIG.workflow.bot_company, + bot_name=CONFIG.workflow.bot_name, + date=datetime.now().strftime("%A %d %B %Y %H:%M:%S"), + ), + "role": "system", + }, + { + "content": CHAT_SYSTEM_PROMPT, + "role": "system", + }, + ] + for message in call.messages: + if message.persona == CallPersona.HUMAN: + messages.append( + { + "content": message.content, + "role": "user", + } + ) + elif message.persona == CallPersona.ASSISTANT: + if not message.tool_calls: + messages.append( + { + "content": message.content, + "role": "assistant", + } + ) + else: + messages.append( + { + "content": message.content, + "role": "assistant", + "tool_calls": [ + { + "id": tool_call.id, + "type": "function", + "function": { + "arguments": tool_call.function_arguments, + "name": tool_call.function_name, + }, + } + for tool_call in message.tool_calls + ], + } + ) + for tool_call in message.tool_calls: + messages.append( + { + "content": message.content, + "role": "tool", + "tool_call_id": tool_call.id, + } + ) + messages.append( + { + "content": f"Claim status is: {call.claim.model_dump_json()}", + "role": "assistant", + } + ) + _logger.debug(f"Messages: {messages}") + + tools = [ + { + "type": "function", + "function": { + "description": "Use this if the user wants to talk to an agent and Assistant is unable to help, this will transfer the customer to an human agent.", + "name": IndentAction.TALK_TO_HUMAN.value, + "parameters": { + "properties": {}, + "required": [], + "type": "object", + }, + }, + }, + { + "type": "function", + "function": { + "description": "Use this if the user wants to end the call, or if the user is satisfied with the answer and confirmed the end of the call.", + "name": IndentAction.END_CALL.value, + "parameters": { + "properties": {}, + "required": [], + "type": "object", + }, + }, + }, + { + "type": "function", + "function": { + "description": "Use this if the user wants to update a claim field with a new value.", + "name": IndentAction.UPDATE_CLAIM.value, + "parameters": { + "properties": { + "field": { + "description": "The claim field to update.", + "enum": list( + ClaimModel.model_json_schema()["properties"].keys() + ), + "type": "string", + }, + "value": { + "description": "The claim field value to update.", + "type": "string", + }, + }, + "required": [ + "field", + "value", + ], + "type": "object", + }, + }, + }, + ] + _logger.debug(f"Tools: {tools}") + + try: + # TODO: Manage to catch timeouts to limit waiting time for end users + res = await oai_gpt.chat.completions.create( + max_tokens=200, # Communication Services limit is 400 characters for TTS + messages=messages, + model=CONFIG.openai.gpt_model, + temperature=0, # Most focused and deterministic + tools=tools, + ) + + content = res.choices[0].message.content + tool_calls = res.choices[0].message.tool_calls + + intent = IndentAction.CONTINUE + models = [] + if tool_calls: + # TODO: Catch tool error individually + for tool_call in tool_calls: + name = tool_call.function.name + arguments = tool_call.function.arguments + _logger.info(f"Tool call {name} with parameters {arguments}") + + model = CallToolModel( + content="", + function_arguments=arguments, + function_name=name, + id=tool_call.id, + ) + + if name == IndentAction.TALK_TO_HUMAN.value: + intent = IndentAction.TALK_TO_HUMAN + elif name == IndentAction.END_CALL.value: + intent = IndentAction.END_CALL + elif name == IndentAction.UPDATE_CLAIM.value: + intent = IndentAction.UPDATE_CLAIM + parameters = json.loads(arguments) + setattr(call.claim, parameters["field"], parameters["value"]) + model.content = f"Udated claim field {parameters['field']} with value {parameters['value']}" + + models.append(model) + + call.messages.append( + CallMessageModel( + content=content or "", + persona=CallPersona.ASSISTANT, + tool_calls=models, + ) + ) + + return ActionModel( + content=content or "", + intent=intent, + ) + + except Exception: + _logger.warn(f"OpenAI API call error", exc_info=True) + + return ActionModel(content=ERROR_PROMPT, intent=IndentAction.CONTINUE) + + +async def handle_recognize( + client: CallConnectionClient, + call: CallModel, + to: PhoneNumberIdentifier, + text: str, + context: Optional[str] = None, +) -> None: + client.start_recognizing_media( + end_silence_timeout=3, # Sometimes user includes breaks in their speech + input_type=RecognizeInputType.SPEECH, + operation_context=context, + play_prompt=audio_from_text(text), + speech_language=CONFIG.workflow.conversation_lang, + target_participant=to, + ) + + +async def handle_media( + client: CallConnectionClient, + file: str, + context: Optional[str] = None, +) -> None: + client.play_media_to_all( + play_source=FileSource(f"{CONFIG.resources.public_url}/{file}"), operation_context=context + ) + + +async def handle_hangup(client: CallConnectionClient, call: CallModel) -> None: + _logger.debug(f"Hanging up call ({call.id})") + try: + client.hang_up(is_for_everyone=True) + except ResourceNotFoundError: + _logger.debug(f"Call already hung up ({call.id})") + + content = await gpt_completion(SMS_SUMMARY_SYSTEM_PROMPT, call) + _logger.info(f"SMS report ({call.id}): {content}") + + try: + sms_client.send( + from_=CONFIG.communication_service.phone_number, + message=content, + to=call.phone_number, + ) + _logger.info(f"SMS report sent ({call.id})") + except Exception: + _logger.warn(f"SMS report error ({call.id})", exc_info=True) + + +def audio_from_text(text: str) -> TextSource: + if len(text) > 400: + _logger.warning( + f"Text is too long to be processed by TTS, truncating to 400 characters, fix this!" + ) + text = text[:400] + return TextSource(text=text, voice_name="fr-FR-DeniseNeural", source_locale=CONFIG.workflow.conversation_lang) + + +def callback_url(caller_id: str) -> str: + call = CallModel(phone_number=caller_id) + save_call(call) + return f"{CALL_EVENT_URL}/{call.id}" + + +def init_db(): + db.execute("CREATE TABLE IF NOT EXISTS calls (id TEXT PRIMARY KEY, data TEXT)") + db.commit() + + +def save_call(call: CallModel): + db.execute( + "INSERT OR REPLACE INTO calls VALUES (?, ?)", + (str(call.id), call.model_dump_json()), + ) + db.commit() + + +def get_call(call_id: UUID) -> CallModel: + cursor = db.execute("SELECT data FROM calls WHERE id = ?", (str(call_id),)) + row = cursor.fetchone() + return CallModel.model_validate_json(row[0]) diff --git a/models/action.py b/models/action.py new file mode 100644 index 00000000..21bf79bb --- /dev/null +++ b/models/action.py @@ -0,0 +1,15 @@ +from enum import Enum +from pydantic import BaseModel +from typing import Optional + + +class Indent(str, Enum): + CONTINUE = "continue" + END_CALL = "end_call" + TALK_TO_HUMAN = "talk_to_human" + UPDATE_CLAIM = "update_claim" + + +class ActionModel(BaseModel): + content: str + intent: Indent diff --git a/models/call.py b/models/call.py new file mode 100644 index 00000000..71c51637 --- /dev/null +++ b/models/call.py @@ -0,0 +1,32 @@ +from enum import Enum +from models.claim import ClaimModel +from pydantic import BaseModel, Field +from typing import List, Optional +from uuid import UUID, uuid4 + + +class Persona(str, Enum): + ASSISTANT = "assistant" + HUMAN = "human" + TOOL = "tool" + + +class ToolModel(BaseModel): + content: str + function_arguments: str + function_name: str + id: str + + +class MessageModel(BaseModel): + content: str + persona: Persona + tool_calls: List[ToolModel] = [] + + +class CallModel(BaseModel): + claim: ClaimModel = Field(default_factory=ClaimModel) + id: UUID = Field(default_factory=uuid4) + messages: List[MessageModel] = [] + phone_number: str + recognition_retry: int = Field(default=0) diff --git a/models/claim.py b/models/claim.py new file mode 100644 index 00000000..4709ba2b --- /dev/null +++ b/models/claim.py @@ -0,0 +1,24 @@ +from pydantic import BaseModel +from typing import Optional + + +class ClaimModel(BaseModel): + additional_documentation: Optional[str] = None + claim_explanation: Optional[str] = None + incident_date_time: Optional[str] = None + incident_description: Optional[str] = None + incident_location: Optional[str] = None + injuries_description: Optional[str] = None + insurance_type: Optional[str] = None + involved_parties: Optional[str] = None + medical_records: Optional[str] = None + police_report_number: Optional[str] = None + policy_number: Optional[str] = None + policyholder_contact_info: Optional[str] = None + policyholder_name: Optional[str] = None + pre_existing_damage_description: Optional[str] = None + property_damage_description: Optional[str] = None + repair_replacement_estimates: Optional[str] = None + stolen_lost_items: Optional[str] = None + vehicle_info: Optional[str] = None + witnesses: Optional[str] = None diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..2b1f4ffc --- /dev/null +++ b/requirements.txt @@ -0,0 +1,10 @@ +azure-communication-callautomation==1.1.0 +azure-communication-sms==1.0.1 +azure-eventgrid==4.16.0 +azure-identity==1.15.0 +azure-mgmt-eventgrid==10.2.0 +fastapi==0.108.0 +openai==1.7.0 +python-dotenv==1.0.0 +pyyaml==6.0.1 +uvicorn==0.25.0 diff --git a/resources/acknowledge.wav b/resources/acknowledge.wav new file mode 100644 index 00000000..c51be308 Binary files /dev/null and b/resources/acknowledge.wav differ