diff --git a/examples/flows/chat/chat-with-pdf/chat-with-pdf.ipynb b/examples/flows/chat/chat-with-pdf/chat-with-pdf.ipynb index 0947b071b4f..45ca7f5dc03 100644 --- a/examples/flows/chat/chat-with-pdf/chat-with-pdf.ipynb +++ b/examples/flows/chat/chat-with-pdf/chat-with-pdf.ipynb @@ -130,7 +130,7 @@ "\n", "config_2k_context = {\n", " \"EMBEDDING_MODEL_DEPLOYMENT_NAME\": \"text-embedding-ada-002\",\n", - " \"CHAT_MODEL_DEPLOYMENT_NAME\": \"gpt-35-turbo\",\n", + " \"CHAT_MODEL_DEPLOYMENT_NAME\": \"gpt-4\", # change this to the name of your deployment if you're using Azure OpenAI\n", " \"PROMPT_TOKEN_LIMIT\": 2000,\n", " \"MAX_COMPLETION_TOKENS\": 256,\n", " \"VERBOSE\": True,\n", @@ -241,7 +241,7 @@ "source": [ "config_3k_context = {\n", " \"EMBEDDING_MODEL_DEPLOYMENT_NAME\": \"text-embedding-ada-002\",\n", - " \"CHAT_MODEL_DEPLOYMENT_NAME\": \"gpt-35-turbo\",\n", + " \"CHAT_MODEL_DEPLOYMENT_NAME\": \"gpt-4\", # change this to the name of your deployment if you're using Azure OpenAI\n", " \"PROMPT_TOKEN_LIMIT\": 3000,\n", " \"MAX_COMPLETION_TOKENS\": 256,\n", " \"VERBOSE\": True,\n", diff --git a/examples/flows/chat/chat-with-pdf/flow.dag.yaml b/examples/flows/chat/chat-with-pdf/flow.dag.yaml index 778a0d1ed30..66748140b5d 100644 --- a/examples/flows/chat/chat-with-pdf/flow.dag.yaml +++ b/examples/flows/chat/chat-with-pdf/flow.dag.yaml @@ -14,7 +14,7 @@ inputs: type: object default: EMBEDDING_MODEL_DEPLOYMENT_NAME: text-embedding-ada-002 - CHAT_MODEL_DEPLOYMENT_NAME: gpt-35-turbo # change to gpt-3.5-turbo when using openai + CHAT_MODEL_DEPLOYMENT_NAME: gpt-4 PROMPT_TOKEN_LIMIT: 3000 MAX_COMPLETION_TOKENS: 256 VERBOSE: true