diff --git a/.github/workflows/CAdeploy.yml b/.github/workflows/CAdeploy.yml index 273dc835..f82b01cd 100644 --- a/.github/workflows/CAdeploy.yml +++ b/.github/workflows/CAdeploy.yml @@ -1,4 +1,4 @@ -name: CI-Validate Deployment-Client Advisor +name: Validate Deployment - Client Advisor on: push: diff --git a/.github/workflows/RAdeploy.yml b/.github/workflows/RAdeploy.yml index d70ac368..f5ce6481 100644 --- a/.github/workflows/RAdeploy.yml +++ b/.github/workflows/RAdeploy.yml @@ -1,4 +1,4 @@ -name: CI-Validate Deployment-Research Assistant +name: Validate Deployment - Researcher on: push: @@ -7,7 +7,7 @@ on: paths: - 'ResearchAssistant/**' schedule: - - cron: '0 6,18 * * *' # Runs at 6:00 AM and 6:00 PM GMT + - cron: '0 7,19 * * *' # Runs at 7:00 AM and 7:00 PM GMT jobs: deploy: diff --git a/.github/workflows/build-clientadvisor.yml b/.github/workflows/build-clientadvisor.yml index 155df0cc..f323fed1 100644 --- a/.github/workflows/build-clientadvisor.yml +++ b/.github/workflows/build-clientadvisor.yml @@ -1,4 +1,4 @@ -name: Build ClientAdvisor Docker Images +name: Build Docker and Optional Push - Client Advisor on: push: diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index 1f2cdc92..83298ce3 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -48,13 +48,17 @@ jobs: id: date run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT + - name: Determine Tag Name Based on Branch + id: determine_tag + run: echo "tagname=${{ github.ref_name == 'main' && 'latest' || github.ref_name == 'dev' && 'dev' || github.ref_name == 'demo' && 'demo' || github.head_ref || 'default' }}" >> $GITHUB_OUTPUT + - name: Build Docker Image and optionally push uses: docker/build-push-action@v6 with: context: . file: ${{ inputs.dockerfile }} push: ${{ inputs.push }} - cache-from: type=registry,ref=${{ inputs.registry }}/${{ inputs.app_name}}:${{ github.ref_name == 'main' && 'latest' || github.ref_name == 'dev' && 'dev' || github.ref_name == 'demo' && 'demo'|| github.ref_name == 'dependabotchanges' && 'dependabotchanges' || 'latest' }} + cache-from: type=registry,ref=${{ inputs.registry }}/${{ inputs.app_name}}:${{ steps.determine_tag.outputs.tagname }} tags: | - ${{ inputs.registry }}/${{ inputs.app_name}}:${{ github.ref_name == 'main' && 'latest' || github.ref_name == 'dev' && 'dev' || github.ref_name == 'demo' && 'demo'|| github.ref_name == 'dependabotchanges' && 'dependabotchanges' || 'latest' }} - ${{ inputs.registry }}/${{ inputs.app_name}}:${{ steps.date.outputs.date }}_${{ github.run_number }} + ${{ inputs.registry }}/${{ inputs.app_name}}:${{ steps.determine_tag.outputs.tagname }} + ${{ inputs.registry }}/${{ inputs.app_name}}:${{ steps.determine_tag.outputs.tagname }}_${{ steps.date.outputs.date }}_${{ github.run_number }} \ No newline at end of file diff --git a/.github/workflows/build-researchassistant.yml b/.github/workflows/build-researchassistant.yml index 7d85f63e..b817c646 100644 --- a/.github/workflows/build-researchassistant.yml +++ b/.github/workflows/build-researchassistant.yml @@ -1,4 +1,4 @@ -name: Build ResearchAssistant Docker Images +name: Build Docker and Optional Push - Researcher on: push: diff --git a/.github/workflows/pr-title-checker.yml b/.github/workflows/pr-title-checker.yml index 5cbbae1a..b7e70e56 100644 --- a/.github/workflows/pr-title-checker.yml +++ b/.github/workflows/pr-title-checker.yml @@ -1,4 +1,4 @@ -name: "pr-title-checker" +name: "PR Title Checker" on: pull_request_target: diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml index 4a1ce599..fd581970 100644 --- a/.github/workflows/pylint.yml +++ b/.github/workflows/pylint.yml @@ -1,4 +1,4 @@ -name: Pylint +name: PyLint on: [push] diff --git a/.github/workflows/stale-bot.yml b/.github/workflows/stale-bot.yml index e31059ab..b4e41fc4 100644 --- a/.github/workflows/stale-bot.yml +++ b/.github/workflows/stale-bot.yml @@ -1,4 +1,4 @@ -name: 'Close stale issues and PRs' +name: 'Stale Bot' on: schedule: - cron: '30 1 * * *' diff --git a/.github/workflows/sync-branches.yml b/.github/workflows/sync-branches.yml deleted file mode 100644 index 7e4e8b45..00000000 --- a/.github/workflows/sync-branches.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: Sync Main to dependabotchanges - -on: - # Schedule the sync job to run daily or customize as needed - schedule: - - cron: '0 1 * * *' # Runs every day at 1 AM UTC - # Trigger the sync job on pushes to the main branch - push: - branches: - - main - -jobs: - sync: - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - with: - fetch-depth: 0 # Fetch all history for accurate branch comparison - - - name: Configure Git - run: | - git config user.name "github-actions[bot]" - git config user.email "github-actions[bot]@users.noreply.github.com" - - - name: Sync main to dependabotchanges - run: | - # Ensure we're on the main branch - git checkout main - # Fetch the latest changes - git pull origin main - - # Switch to dependabotchanges branch - git checkout dependabotchanges - # Merge main branch changes - git merge main --no-edit - - # Push changes back to dependabotchanges1 branch - git push origin dependabotchanges - - - name: Notify on Failure - if: failure() - run: echo "Sync from main to dependabotchanges failed!" diff --git a/.github/workflows/test_client_advisor.yml b/.github/workflows/test_client_advisor.yml index 4f0d124e..4bc79228 100644 --- a/.github/workflows/test_client_advisor.yml +++ b/.github/workflows/test_client_advisor.yml @@ -1,4 +1,4 @@ -name: Unit Tests - Client Advisor +name: Test Workflow with Coverage - Client Advisor on: push: diff --git a/.github/workflows/test_research_assistant.yml b/.github/workflows/test_research_assistant.yml new file mode 100644 index 00000000..cac44acd --- /dev/null +++ b/.github/workflows/test_research_assistant.yml @@ -0,0 +1,48 @@ +name: Test Workflow with Coverage - Researcher + +on: + push: + branches: [main, dev] + paths: + - 'ResearchAssistant/**' + pull_request: + branches: [main, dev] + types: + - opened + - ready_for_review + - reopened + - synchronize + paths: + - 'ResearchAssistant/**' + +jobs: + test_research_assistant: + name: Research Assistant Tests + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install Backend Dependencies + run: | + cd ResearchAssistant/App + python -m pip install --upgrade pip + python -m pip install -r requirements.txt + python -m pip install coverage pytest pytest-cov pytest-asyncio + + - name: Run Backend Tests with Coverage + run: | + cd ResearchAssistant/App + python -m pytest -vv --cov=. --cov-report=xml --cov-report=html --cov-report=term-missing --cov-fail-under=80 --junitxml=coverage-junit.xml + - uses: actions/upload-artifact@v4 + with: + name: research-assistant-coverage + path: | + ResearchAssistant/App/coverage.xml + ResearchAssistant/App/coverage-junit.xml + ResearchAssistant/App/htmlcov/ diff --git a/ClientAdvisor/App/app.py b/ClientAdvisor/App/app.py index e8221243..2755d75b 100644 --- a/ClientAdvisor/App/app.py +++ b/ClientAdvisor/App/app.py @@ -1588,35 +1588,72 @@ def get_users(): if len(rows) <= 6: # update ClientMeetings,Assets,Retirement tables sample data to current date cursor = conn.cursor() - cursor.execute( - """select DATEDIFF(d,CAST(max(StartTime) AS Date),CAST(GETDATE() AS Date)) + 3 as ndays from ClientMeetings""" - ) - rows = cursor.fetchall() - ndays = 0 - for row in rows: - ndays = row["ndays"] - sql_stmt1 = f"UPDATE ClientMeetings SET StartTime = dateadd(day,{ndays},StartTime), EndTime = dateadd(day,{ndays},EndTime)" - cursor.execute(sql_stmt1) - conn.commit() - nmonths = int(ndays / 30) - if nmonths > 0: - sql_stmt1 = ( - f"UPDATE Assets SET AssetDate = dateadd(MONTH,{nmonths},AssetDate)" + combined_stmt = """ + WITH MaxDates AS ( + SELECT + MAX(CAST(StartTime AS Date)) AS MaxClientMeetingDate, + MAX(AssetDate) AS MaxAssetDate, + MAX(StatusDate) AS MaxStatusDate + FROM + (SELECT StartTime, NULL AS AssetDate, NULL AS StatusDate FROM ClientMeetings + UNION ALL + SELECT NULL AS StartTime, AssetDate, NULL AS StatusDate FROM Assets + UNION ALL + SELECT NULL AS StartTime, NULL AS AssetDate, StatusDate FROM Retirement) AS Combined + ), + Today AS ( + SELECT GETDATE() AS TodayDate + ), + DaysDifference AS ( + SELECT + DATEDIFF(DAY, MaxClientMeetingDate, TodayDate) + 3 AS ClientMeetingDaysDifference, + DATEDIFF(DAY, MaxAssetDate, TodayDate) - 30 AS AssetDaysDifference, + DATEDIFF(DAY, MaxStatusDate, TodayDate) - 30 AS StatusDaysDifference + FROM MaxDates, Today ) - cursor.execute(sql_stmt1) + SELECT + ClientMeetingDaysDifference, + AssetDaysDifference / 30 AS AssetMonthsDifference, + StatusDaysDifference / 30 AS StatusMonthsDifference + FROM DaysDifference + """ + cursor.execute(combined_stmt) + date_diff_rows = cursor.fetchall() + + client_days = ( + date_diff_rows[0]["ClientMeetingDaysDifference"] + if date_diff_rows + else 0 + ) + asset_months = ( + int(date_diff_rows[0]["AssetMonthsDifference"]) if date_diff_rows else 0 + ) + status_months = ( + int(date_diff_rows[0]["StatusMonthsDifference"]) + if date_diff_rows + else 0 + ) + + # Update ClientMeetings + if client_days > 0: + client_update_stmt = f"UPDATE ClientMeetings SET StartTime = DATEADD(day, {client_days}, StartTime), EndTime = DATEADD(day, {client_days}, EndTime)" + cursor.execute(client_update_stmt) conn.commit() - sql_stmt1 = f"UPDATE Retirement SET StatusDate = dateadd(MONTH,{nmonths},StatusDate)" - cursor.execute(sql_stmt1) + # Update Assets + if asset_months > 0: + asset_update_stmt = f"UPDATE Assets SET AssetDate = DATEADD(month, {asset_months}, AssetDate)" + cursor.execute(asset_update_stmt) conn.commit() - cursor = conn.cursor() - cursor.execute(sql_stmt) - rows = cursor.fetchall() + # Update Retirement + if status_months > 0: + retire_update_stmt = f"UPDATE Retirement SET StatusDate = DATEADD(month, {status_months}, StatusDate)" + cursor.execute(retire_update_stmt) + conn.commit() users = [] for row in rows: - # print(row) user = { "ClientId": row["ClientId"], "ClientName": row["Client"], @@ -1631,7 +1668,6 @@ def get_users(): "ClientSummary": row["ClientSummary"], } users.append(user) - # print(users) return jsonify(users) diff --git a/ClientAdvisor/App/frontend/src/api/api.ts b/ClientAdvisor/App/frontend/src/api/api.ts index b59dceb0..acd20736 100644 --- a/ClientAdvisor/App/frontend/src/api/api.ts +++ b/ClientAdvisor/App/frontend/src/api/api.ts @@ -201,31 +201,47 @@ export const selectUser = async (options: ClientIdRequest): Promise => return new Response(null, { status: 500, statusText: 'Internal Server Error' }); } }; - +function isLastObjectNotEmpty(arr:any) + { + if (arr.length === 0) return false; + // Handle empty array case + const lastObj = arr[arr.length - 1]; + return Object.keys(lastObj).length > 0; + } export const historyUpdate = async (messages: ChatMessage[], convId: string): Promise => { - const response = await fetch('/history/update', { - method: 'POST', - body: JSON.stringify({ - conversation_id: convId, - messages: messages - }), - headers: { - 'Content-Type': 'application/json' - } - }) - .then(async res => { - return res + if(isLastObjectNotEmpty(messages)){ + const response = await fetch('/history/update', { + method: 'POST', + body: JSON.stringify({ + conversation_id: convId, + messages: messages + }), + headers: { + 'Content-Type': 'application/json' + } }) - .catch(_err => { - console.error('There was an issue fetching your data.') + .then(async res => { + return res + }) + .catch(_err => { + console.error('There was an issue fetching your data.') + const errRes: Response = { + ...new Response(), + ok: false, + status: 500 + } + return errRes + }) + return response + } + else{ const errRes: Response = { ...new Response(), ok: false, status: 500 } - return errRes - }) - return response + return errRes + } } export const historyDelete = async (convId: string): Promise => { @@ -425,4 +441,4 @@ export const historyMessageFeedback = async (messageId: string, feedback: string // const data = await response.text(); // console.log('Response:', data); -// }; \ No newline at end of file +// }; diff --git a/ClientAdvisor/App/frontend/src/components/ChatHistory/ChatHistoryPanel.module.css b/ClientAdvisor/App/frontend/src/components/ChatHistory/ChatHistoryPanel.module.css index abb30159..f167b086 100644 --- a/ClientAdvisor/App/frontend/src/components/ChatHistory/ChatHistoryPanel.module.css +++ b/ClientAdvisor/App/frontend/src/components/ChatHistory/ChatHistoryPanel.module.css @@ -1,11 +1,11 @@ .container { - max-height: calc(100vh - 100px); - width: 300px; + height: calc(100vh - 100px); + width: 305px; } .listContainer { overflow: hidden auto; - max-height: calc(90vh - 105px); + height: calc(90vh - 230px); } .itemCell { @@ -79,9 +79,9 @@ } @media screen and (-ms-high-contrast: active), (forced-colors: active) { - .container{ - border: 2px solid WindowText; - background-color: Window; - color: WindowText; + .container { + border: 2px solid WindowText; + background-color: Window; + color: WindowText; } -} \ No newline at end of file +} diff --git a/ClientAdvisor/App/tests/test_app.py b/ClientAdvisor/App/tests/test_app.py index d456ac70..dd7f9347 100644 --- a/ClientAdvisor/App/tests/test_app.py +++ b/ClientAdvisor/App/tests/test_app.py @@ -208,6 +208,10 @@ async def test_get_users_success(client): { "ClientId": 1, "ndays": 10, + "ClientMeetingDaysDifference": 1, + "AssetMonthsDifference": 1, + "StatusMonthsDifference": 1, + "DaysDifference": 1, "Client": "Client A", "Email": "clienta@example.com", "AssetValue": "1,000,000", diff --git a/ClientAdvisor/Deployment/images/readMe/architecture.png b/ClientAdvisor/Deployment/images/readMe/architecture.png index a981695c..3716611d 100644 Binary files a/ClientAdvisor/Deployment/images/readMe/architecture.png and b/ClientAdvisor/Deployment/images/readMe/architecture.png differ diff --git a/ClientAdvisor/Deployment/images/readMe/keyfeatures.png b/ClientAdvisor/Deployment/images/readMe/keyfeatures.png index 681a39f4..d13c0ed6 100644 Binary files a/ClientAdvisor/Deployment/images/readMe/keyfeatures.png and b/ClientAdvisor/Deployment/images/readMe/keyfeatures.png differ diff --git a/ClientAdvisor/README.md b/ClientAdvisor/README.md index 9be8dde4..3776f1c3 100644 --- a/ClientAdvisor/README.md +++ b/ClientAdvisor/README.md @@ -65,7 +65,7 @@ For additional training and support, please see: ### **How to install/deploy** 1. Please check the link [Azure Products by Region]( -https://azure.microsoft.com/en-us/explore/global-infrastructure/products-by-region/?products=all®ions=all) and choose a region where Azure AI Search, Semantic Ranker, Azure OpenAI Service, and Azure AI Studio are available. +https://azure.microsoft.com/en-us/explore/global-infrastructure/products-by-region/?products=all®ions=all) and choose a region where Azure AI Search, Semantic Ranker, Azure OpenAI Service, and Azure AI Foundry are available. 2. Click the following deployment button to create the required resources for this accelerator in your Azure Subscription. diff --git a/ResearchAssistant/App/test_app.py b/ResearchAssistant/App/test_app.py new file mode 100644 index 00000000..6403b104 --- /dev/null +++ b/ResearchAssistant/App/test_app.py @@ -0,0 +1,575 @@ +import json +import os +from unittest.mock import MagicMock, Mock, patch +from flask import Flask +import pytest +import urllib + +from app import (extract_value, fetchUserGroups, + formatApiResponseNoStreaming, formatApiResponseStreaming, + generateFilterString, is_chat_model, parse_multi_columns, + prepare_body_headers_with_data, should_use_data, + stream_with_data, draft_document_generate) + +AZURE_SEARCH_SERVICE = os.environ.get("AZURE_SEARCH_SERVICE", "") +AZURE_OPENAI_KEY = os.environ.get("AZURE_OPENAI_KEY", "") +AZURE_SEARCH_PERMITTED_GROUPS_COLUMN = os.environ.get( + "AZURE_SEARCH_PERMITTED_GROUPS_COLUMN", "" +) + + +def test_parse_multi_columns(): + assert parse_multi_columns("a|b|c") == ["a", "b", "c"] + assert parse_multi_columns("a,b,c") == ["a", "b", "c"] + + +@patch("requests.get") +def test_success_single_page(mock_get): + # Mock response for a single page of groups + mock_get.return_value.status_code = 200 + mock_get.return_value.json.return_value = { + "value": [{"id": "group1"}, {"id": "group2"}] + } + + userToken = "valid_token" + result = fetchUserGroups(userToken) + expected = [{"id": "group1"}, {"id": "group2"}] + assert result == expected + + +def test_is_chat_model_with_gpt4(): + with patch("app.AZURE_OPENAI_MODEL_NAME", "gpt-4"): + assert is_chat_model() is True + + +def test_is_chat_model_with_gpt35_turbo_4k(): + with patch("app.AZURE_OPENAI_MODEL_NAME", "gpt-35-turbo-4k"): + assert is_chat_model() is True + + +def test_is_chat_model_with_gpt35_turbo_16k(): + with patch("app.AZURE_OPENAI_MODEL_NAME", "gpt-35-turbo-16k"): + assert is_chat_model() is True + + +def test_is_chat_model_with_other_model(): + with patch("app.AZURE_OPENAI_MODEL_NAME", "some-other-model"): + assert is_chat_model() is False + + +def test_should_use_data_with_service_and_key(): + with patch("app.AZURE_SEARCH_SERVICE", "my-service"): + with patch("app.AZURE_SEARCH_KEY", "my-key"): + with patch("app.DEBUG_LOGGING", False): + assert should_use_data() is True + + +def test_should_use_data_with_service_no_key(): + with patch("app.AZURE_SEARCH_SERVICE", "my-service"): + with patch("app.AZURE_SEARCH_KEY", None): + assert should_use_data() is False + + +def test_should_use_data_with_key_no_service(): + with patch("app.AZURE_SEARCH_SERVICE", None): + with patch("app.AZURE_SEARCH_KEY", "my-key"): + assert should_use_data() is False + + +def test_should_use_data_with_neither(): + with patch("app.AZURE_SEARCH_SERVICE", None): + with patch("app.AZURE_SEARCH_KEY", None): + assert should_use_data() is False + + +def test_should_use_data_with_debug_logging(): + with patch("app.AZURE_SEARCH_SERVICE", "my-service"): + with patch("app.AZURE_SEARCH_KEY", "my-key"): + with patch("app.DEBUG_LOGGING", True): + with patch("logging.debug") as mock_debug: + assert should_use_data() is True + mock_debug.assert_called_once_with("Using Azure Cognitive Search") + + +@patch("requests.get") +def test_success_multiple_pages(mock_get): + # Mock response for multiple pages of groups + mock_get.side_effect = [ + _mock_response( + 200, + { + "value": [{"id": "group1"}, {"id": "group2"}], + "@odata.nextLink": "https://next.page", + }, + ), + _mock_response(200, {"value": [{"id": "group3"}]}), + ] + + userToken = "valid_token" + result = fetchUserGroups(userToken) + expected = [{"id": "group1"}, {"id": "group2"}, {"id": "group3"}] + assert result == expected + + +@patch("requests.get") +def test_non_200_status_code(mock_get): + # Mock response with a 403 Forbidden error + mock_get.return_value.status_code = 403 + mock_get.return_value.text = "Forbidden" + + userToken = "valid_token" + result = fetchUserGroups(userToken) + expected = [] + assert result == expected + + +@patch("requests.get") +def test_exception_handling(mock_get): + # Mock an exception when making the request + mock_get.side_effect = Exception("Network error") + + userToken = "valid_token" + result = fetchUserGroups(userToken) + expected = [] + assert result == expected + + +@patch("requests.get") +def test_no_groups_found(mock_get): + # Mock response with no groups found + mock_get.return_value.status_code = 200 + mock_get.return_value.json.return_value = {"value": []} + + userToken = "valid_token" + result = fetchUserGroups(userToken) + expected = [] + assert result == expected + + +def _mock_response(status_code, json_data): + """Helper method to create a mock response object.""" + mock_resp = Mock() + mock_resp.status_code = status_code + mock_resp.json.return_value = json_data + return mock_resp + + +@patch("app.fetchUserGroups") +def test_generateFilterString(mock_fetchUserGroups): + mock_fetchUserGroups.return_value = [{"id": "1"}, {"id": "2"}] + userToken = "fake_token" + + filter_string = generateFilterString(userToken) + assert filter_string == "None/any(g:search.in(g, '1, 2'))" + + +def test_prepare_body_headers_with_data(): + # Create a mock request + mock_request = MagicMock() + mock_request.json = {"messages": ["Hello, world!"], "index_name": "grants"} + mock_request.headers = {"X-MS-TOKEN-AAD-ACCESS-TOKEN": "mock_token"} + + with patch("app.AZURE_OPENAI_TEMPERATURE", 0.7), patch( + "app.AZURE_OPENAI_MAX_TOKENS", 100 + ), patch("app.AZURE_OPENAI_TOP_P", 0.9), patch( + "app.AZURE_SEARCH_SERVICE", "my-service" + ), patch( + "app.AZURE_SEARCH_KEY", "my-key" + ), patch( + "app.DEBUG_LOGGING", True + ), patch( + "app.AZURE_SEARCH_PERMITTED_GROUPS_COLUMN", "group_column" + ), patch( + "app.AZURE_SEARCH_ENABLE_IN_DOMAIN", "true" + ), patch( + "app.AZURE_SEARCH_TOP_K", 5 + ), patch( + "app.AZURE_SEARCH_STRICTNESS", 1 + ): + + body, headers = prepare_body_headers_with_data(mock_request) + print("indexName", body["dataSources"][0]["parameters"]) + assert body["messages"] == ["Hello, world!"] + assert body["temperature"] == 0.7 + assert body["max_tokens"] == 100 + assert body["top_p"] == 0.9 + assert body["dataSources"] + assert body["dataSources"][0]["type"] == "AzureCognitiveSearch" + assert ( + body["dataSources"][0]["parameters"]["endpoint"] + == "https://my-service.search.windows.net" + ) + assert body["dataSources"][0]["parameters"]["key"] == "my-key" + assert body["dataSources"][0]["parameters"]["inScope"] is True + assert body["dataSources"][0]["parameters"]["topNDocuments"] == 5 + assert body["dataSources"][0]["parameters"]["strictness"] == 1 + + assert headers["Content-Type"] == "application/json" + assert headers["x-ms-useragent"] == "GitHubSampleWebApp/PublicAPI/3.0.0" + + +def test_invalid_datasource_type(): + mock_request = MagicMock() + mock_request.json = {"messages": ["Hello, world!"], "index_name": "grants"} + + with patch("app.DATASOURCE_TYPE", "InvalidType"): + with pytest.raises(Exception) as exc_info: + prepare_body_headers_with_data(mock_request) + assert "DATASOURCE_TYPE is not configured or unknown: InvalidType" in str( + exc_info.value + ) + + +# stream_with_data function +def mock_format_as_ndjson(data): + # Ensure data is in a JSON serializable format (like a list or dict) + if isinstance(data, set): + data = list(data) # Convert set to list + return json.dumps(data) + + +def test_stream_with_data_azure_success(): + body = { + "messages": [ + { + "id": "0e29210d-5584-38df-df76-2dfb40147ee7", + "role": "user", + "content": "influenza and its effets ", + "date": "2025-01-09T04:42:25.896Z", + }, + { + "id": "ab42add2-0fba-d6bb-47c0-5d11b7cdb83a", + "role": "user", + "content": "influenza and its effectd", + "date": "2025-01-09T10:14:11.638Z", + }, + { + "id": "1f6dc8e2-c5fe-ce77-b28c-5ec9ba80e94d", + "role": "user", + "content": "influenza and its effects", + "date": "2025-01-09T10:34:15.187Z", + }, + ], + "temperature": 0.0, + "max_tokens": 1000, + "top_p": 1.0, + "stop": "None", + "stream": True, + "dataSources": [ + { + "type": "AzureCognitiveSearch", + "parameters": { + "endpoint": "https://ututut-cs.search.windows.net", + "key": "", + "indexName": "articlesindex", + "fieldsMapping": { + "contentFields": ["content"], + "titleField": "title", + "urlField": "publicurl", + "filepathField": "chunk_id", + "vectorFields": ["titleVector", "contentVector"], + }, + "inScope": False, + "topNDocuments": "5", + "queryType": "vectorSemanticHybrid", + "semanticConfiguration": "my-semantic-config", + "roleInformation": "You are an AI assistant that helps people find information.", + "filter": "None", + "strictness": 3, + "embeddingDeploymentName": "text-embedding-ada-002", + }, + } + ], + } + headers = { + "Content-Type": "application/json", + "api-key": "", + "x-ms-useragent": "GitHubSampleWebApp/PublicAPI/3.0.0", + } + history_metadata = {} + + with patch("requests.Session.post") as mock_post: + mock_response = MagicMock() + mock_response.iter_lines.return_value = [ + b'data: {"id":"1","model":"gpt-35-turbo-16k","created":1736397875,"object":"extensions.chat.completion.chunk","choices":[{"index":0,"delta":{"context":{"messages":[{"role":"tool","content":"hello","end_turn":false}]}},"end_turn":false,"finish_reason":"None"}]}' + ] + mock_response.headers = {"apim-request-id": "test-request-id"} + mock_post.return_value.__enter__.return_value = mock_response + + with patch("app.format_as_ndjson", side_effect=mock_format_as_ndjson): + results = list( + stream_with_data( + body, headers, "https://mock-endpoint.com", history_metadata + ) + ) # Convert generator to a list + print(results, "result test case") + assert len(results) == 1 + + +# Mock constants +USE_AZURE_AI_STUDIO = "true" +AZURE_OPENAI_PREVIEW_API_VERSION = "2023-06-01-preview" +DEBUG_LOGGING = False +AZURE_SEARCH_SERVICE = os.environ.get("AZURE_SEARCH_SERVICE", "mysearchservice") + + +def test_stream_with_data_azure_error(): + body = { + "messages": [ + { + "id": "0e29210d-5584-38df-df76-2dfb40147ee7", + "role": "user", + "content": "influenza and its effets ", + "date": "2025-01-09T04:42:25.896Z", + }, + { + "id": "ab42add2-0fba-d6bb-47c0-5d11b7cdb83a", + "role": "user", + "content": "influenza and its effectd", + "date": "2025-01-09T10:14:11.638Z", + }, + { + "id": "1f6dc8e2-c5fe-ce77-b28c-5ec9ba80e94d", + "role": "user", + "content": "influenza and its effects", + "date": "2025-01-09T10:34:15.187Z", + }, + ], + "temperature": 0.0, + "max_tokens": 1000, + "top_p": 1.0, + "stop": "None", + "stream": True, + "dataSources": [ + { + "type": "AzureCognitiveSearch", + "parameters": { + "endpoint": "https://ututut-cs.search.windows.net", + "key": "", + "indexName": "articlesindex", + "fieldsMapping": { + "contentFields": ["content"], + "titleField": "title", + "urlField": "publicurl", + "filepathField": "chunk_id", + "vectorFields": ["titleVector", "contentVector"], + }, + "inScope": False, + "topNDocuments": "5", + "queryType": "vectorSemanticHybrid", + "semanticConfiguration": "my-semantic-config", + "roleInformation": "You are an AI assistant that helps people find information.", + "filter": "None", + "strictness": 3, + "embeddingDeploymentName": "text-embedding-ada-002", + }, + } + ], + } + + if USE_AZURE_AI_STUDIO.lower() == "true": + body = body + headers = { + "Content-Type": "application/json", + "api-key": "", + "x-ms-useragent": "GitHubSampleWebApp/PublicAPI/3.0.0", + } + history_metadata = {} + + with patch("requests.Session.post") as mock_post: + # if USE_AZURE_AI_STUDIO.lower() == "true": + # body = mock_body + mock_response = MagicMock() + mock_response.iter_lines.return_value = [ + b'data: {"id":"1","model":"gpt-35-turbo-16k","created":1736397875,"object":"extensions.chat.completion.chunk","choices":[{"index":0,"delta":{"context":{"messages":[{"role":"tool","content":"hello","end_turn":false}]}},"end_turn":false,"finish_reason":"None"}]}' + ] + mock_response.headers = {"apim-request-id": "test-request-id"} + mock_post.return_value.__enter__.return_value = mock_response + + with patch("app.format_as_ndjson", side_effect=mock_format_as_ndjson): + results = list( + stream_with_data( + body, headers, "https://mock-endpoint.com", history_metadata + ) + ) # Convert generator to a list + print(results, "result test case") + assert len(results) == 1 + + +def test_formatApiResponseNoStreaming(): + rawResponse = { + "id": "1", + "model": "gpt-3", + "created": 123456789, + "object": "response", + "choices": [ + { + "message": { + "context": {"messages": [{"content": "Hello from tool"}]}, + "content": "Hello from assistant", + } + } + ], + } + response = formatApiResponseNoStreaming(rawResponse) + assert "choices" in response + assert response["choices"][0]["messages"][0]["content"] == "Hello from tool" + + +def test_formatApiResponseStreaming(): + rawResponse = { + "id": "1", + "model": "gpt-3", + "created": 123456789, + "object": "response", + "choices": [{"delta": {"role": "assistant", "content": "Hello"}}], + } + + response = formatApiResponseStreaming(rawResponse) + + # Print response to debug + print(response) # Optional for debugging, remove in production + + assert "choices" in response + assert "messages" in response["choices"][0] + assert len(response["choices"][0]["messages"]) == 1 + + # Check if the content is included under the correct structure + delta_content = response["choices"][0]["messages"][0]["delta"] + assert "role" in delta_content # Check for role + assert ( + "content" not in delta_content + ) # content should not be present as per current logic + + +def test_extract_value(): + text = "'code': 'content_filter', 'status': '400'" + assert extract_value("code", text) == "content_filter" + assert extract_value("status", text) == "400" + assert extract_value("unknown", text) == "N/A" + + +app = Flask(__name__) + + +app.add_url_rule("/draft_document/generate_section", "draft_document_generate", draft_document_generate, methods=["POST"]) + + +# Helper to create a mock response +class MockResponse: + def __init__(self, json_data, status_code): + self.json_data = json_data + self.status_code = status_code + + def read(self): + return json.dumps(self.json_data).encode('utf-8') + + def getcode(self): + return self.status_code + + +@pytest.fixture +def client(): + with app.test_client() as client: + yield client + + +# Test the successful response case +@patch("urllib.request.urlopen") +@patch("os.environ.get") +def test_draft_document_generate_success(mock_os_environ, mock_urlopen, client): + mock_os_environ.side_effect = lambda key: { + "AI_STUDIO_DRAFT_FLOW_ENDPOINT": "https://fakeurl.com", + "AI_STUDIO_DRAFT_FLOW_API_KEY": "fakeapikey", + "AI_STUDIO_DRAFT_FLOW_DEPLOYMENT_NAME": "fake_deployment_name" + }.get(key) + + # Mock the successful API response + mock_urlopen.return_value = MockResponse({"reply": "Generated content for section."}, 200) + + # Sample input payload + payload = { + "grantTopic": "Artificial Intelligence", + "sectionTitle": "Introduction", + "sectionContext": "" + } + + response = client.post("/draft_document/generate_section", json=payload) + + # Assertions + assert response.status_code == 200 + response_json = response.get_json() + assert "content" in response_json + assert response_json["content"] == "Generated content for section." + + +# Test the scenario where "sectionContext" is provided +@patch("urllib.request.urlopen") +@patch("os.environ.get") +def test_draft_document_generate_with_context(mock_os_environ, mock_urlopen, client): + mock_os_environ.side_effect = lambda key: { + "AI_STUDIO_DRAFT_FLOW_ENDPOINT": "https://fakeurl.com", + "AI_STUDIO_DRAFT_FLOW_API_KEY": "fakeapikey", + "AI_STUDIO_DRAFT_FLOW_DEPLOYMENT_NAME": "fake_deployment_name" + }.get(key) + + # Mock the successful API response + mock_urlopen.return_value = MockResponse({"reply": "Generated content with context."}, 200) + + payload = { + "grantTopic": "Quantum Computing", + "sectionTitle": "Background", + "sectionContext": "The section should explain the significance of quantum computing." + } + + response = client.post("/draft_document/generate_section", json=payload) + + # Assertions + assert response.status_code == 200 + response_json = response.get_json() + assert "content" in response_json + assert response_json["content"] == "Generated content with context." + + +@pytest.fixture +def clients(): + app = Flask(__name__) + app.route('/draft_document/generate_section', methods=['POST'])(draft_document_generate) + client = app.test_client() + yield client + + +@patch("urllib.request.urlopen") +@patch("os.environ.get") +def test_draft_document_generate_http_error(mock_env_get, mock_urlopen, client): + # Mock environment variables + mock_env_get.side_effect = lambda key: { + "AI_STUDIO_DRAFT_FLOW_ENDPOINT": "http://mock_endpoint", + "AI_STUDIO_DRAFT_FLOW_API_KEY": "mock_api_key", + "AI_STUDIO_DRAFT_FLOW_DEPLOYMENT_NAME": "mock_deployment" + }.get(key) + + # Mock urllib.request.urlopen to raise an HTTPError + error_response = json.dumps({"error": {"message": "content_filter", "code": "400"}}).encode('utf-8') + mock_urlopen.side_effect = urllib.error.HTTPError( + url="http://mock_endpoint", + code=400, + msg="Bad Request", + hdrs=None, + fp=MagicMock(read=MagicMock(return_value=error_response)) + ) + + # Mock request data + request_data = { + "grantTopic": "Climate Change Research", + "sectionTitle": "Introduction", + "sectionContext": "This research focuses on reducing carbon emissions." + } + + response = client.post( + "/draft_document/generate_section", + data=json.dumps(request_data), + content_type="application/json", + ) + + assert response.status_code == 200 diff --git a/ResearchAssistant/Deployment/AIStudioDeployment.md b/ResearchAssistant/Deployment/AIFoundryDeployment.md similarity index 89% rename from ResearchAssistant/Deployment/AIStudioDeployment.md rename to ResearchAssistant/Deployment/AIFoundryDeployment.md index 42b5966b..0473cb67 100644 --- a/ResearchAssistant/Deployment/AIStudioDeployment.md +++ b/ResearchAssistant/Deployment/AIFoundryDeployment.md @@ -1,8 +1,8 @@ -# AI Studio Deployment Guide +# AI Foundry Deployment Guide Please follow the steps below to configure the Prompt flow endpoint in App service configuration. -## Step 1: Open AI Studio Project -1. Launch the [AI Studio](https://ai.azure.com/) and select `Build` from the top menu. +## Step 1: OpenAI Foundry Project +1. Launch the [AI Foundry](https://ai.azure.com/) and select `project` under Jump into a project in Azure AI Foundry, If not found click on View all projects from the top menu. ![Home](/ResearchAssistant/Deployment/images/aiStudio/Home.png) @@ -10,7 +10,7 @@ Please follow the steps below to configure the Prompt flow endpoint in App servi ## Step 2: Import Prompt Flow and Deploy -1. Click on `PromptFlow` button from left menu under `Tools`. +1. Click on `PromptFlow` button from left menu under `Build and customize`. ![Prompt Flow](/ResearchAssistant/Deployment/images/aiStudio/PromptFlow.png) @@ -27,7 +27,7 @@ Please follow the steps below to configure the Prompt flow endpoint in App servi ![Upload Local File](/ResearchAssistant/Deployment/images/aiStudio/UploadLocalFile.png) -5. Click on `Select runtime` and chick on `Start` from the drop-down list. It can take few minutes for the runtime to start. +5. Click on `Start compute session` and click on `Start compute session` from the drop-down list. It can take few minutes for the runtime to start. ![Select Runtime](/ResearchAssistant/Deployment/images/aiStudio/SelectRunTime.png) @@ -38,12 +38,12 @@ Please follow the steps below to configure the Prompt flow endpoint in App servi ![Deploy Draft Flow](/ResearchAssistant/Deployment/images/aiStudio/DeployDraftFlow.png) -7. It will take few minutes for the flow to be validated and deployed. Click on `Deployments` from left menu. You might only see the Default_AzureOpenAI deployments in the page until the deployment is completed. Please wait and click on `Refresh` after few minutes. +7. It will take few minutes for the flow to be validated and deployed. Click on `Models + endpoints` from left menu. You might only see the Default_AzureOpenAI deployments in the page until the deployment is completed. Please wait and click on `Refresh` after few minutes. ![Deployments Page](/ResearchAssistant/Deployment/images/aiStudio/BlankDeploymentsPage.png) -8. Click on the deployed endpoint with name `draftsinference-1`. +8. Click on the deployed endpoint with name `ai-project-bycra-jzxzb-1`. ![Drafts Endpoint](/ResearchAssistant/Deployment/images/aiStudio/DraftsEndpoint.png) 9. Click on `Consume` from the top menu. Copy below details to use later in step 3.6. diff --git a/ResearchAssistant/Deployment/FabricDeployment.md b/ResearchAssistant/Deployment/FabricDeployment.md index a9a16480..12a997a6 100644 --- a/ResearchAssistant/Deployment/FabricDeployment.md +++ b/ResearchAssistant/Deployment/FabricDeployment.md @@ -8,10 +8,10 @@ Please follow the steps below to set up the Fabric Workspace and collect the id 3. Enter a Workspace Name and click on `Apply`. ![New Workspace](/ResearchAssistant/Deployment/images/fabric/CreateWorkspace.png) -4. On the next page, click on `New`. +4. On the next page, click on `Import`. ![Create Workspace](/ResearchAssistant/Deployment/images/fabric/WorkspaceGuid.png) -5. Click on `Import Notebook`. +5. Click on `Notebook`. ![Create Workspace](/ResearchAssistant/Deployment/images/fabric/ImportNotebooks.png) diff --git a/ResearchAssistant/Deployment/images/aiStudio/BlankDeploymentsPage.png b/ResearchAssistant/Deployment/images/aiStudio/BlankDeploymentsPage.png index 273da9c4..d25edc70 100644 Binary files a/ResearchAssistant/Deployment/images/aiStudio/BlankDeploymentsPage.png and b/ResearchAssistant/Deployment/images/aiStudio/BlankDeploymentsPage.png differ diff --git a/ResearchAssistant/Deployment/images/aiStudio/DeployDraftFlow.png b/ResearchAssistant/Deployment/images/aiStudio/DeployDraftFlow.png index cc3a4161..da4297e7 100644 Binary files a/ResearchAssistant/Deployment/images/aiStudio/DeployDraftFlow.png and b/ResearchAssistant/Deployment/images/aiStudio/DeployDraftFlow.png differ diff --git a/ResearchAssistant/Deployment/images/aiStudio/DraftsEndpoint.png b/ResearchAssistant/Deployment/images/aiStudio/DraftsEndpoint.png index 9f1b4f46..7811d255 100644 Binary files a/ResearchAssistant/Deployment/images/aiStudio/DraftsEndpoint.png and b/ResearchAssistant/Deployment/images/aiStudio/DraftsEndpoint.png differ diff --git a/ResearchAssistant/Deployment/images/aiStudio/DraftsEndpointConsume.png b/ResearchAssistant/Deployment/images/aiStudio/DraftsEndpointConsume.png index 8adf421f..5182c7dd 100644 Binary files a/ResearchAssistant/Deployment/images/aiStudio/DraftsEndpointConsume.png and b/ResearchAssistant/Deployment/images/aiStudio/DraftsEndpointConsume.png differ diff --git a/ResearchAssistant/Deployment/images/aiStudio/Home.png b/ResearchAssistant/Deployment/images/aiStudio/Home.png index f1297640..4260e1ec 100644 Binary files a/ResearchAssistant/Deployment/images/aiStudio/Home.png and b/ResearchAssistant/Deployment/images/aiStudio/Home.png differ diff --git a/ResearchAssistant/Deployment/images/aiStudio/PromptFlow.png b/ResearchAssistant/Deployment/images/aiStudio/PromptFlow.png index 819717a1..f658e9e7 100644 Binary files a/ResearchAssistant/Deployment/images/aiStudio/PromptFlow.png and b/ResearchAssistant/Deployment/images/aiStudio/PromptFlow.png differ diff --git a/ResearchAssistant/Deployment/images/aiStudio/SelectRunTime.png b/ResearchAssistant/Deployment/images/aiStudio/SelectRunTime.png index 012492cf..f58e96b5 100644 Binary files a/ResearchAssistant/Deployment/images/aiStudio/SelectRunTime.png and b/ResearchAssistant/Deployment/images/aiStudio/SelectRunTime.png differ diff --git a/ResearchAssistant/Deployment/images/fabric/ImportNotebooks.png b/ResearchAssistant/Deployment/images/fabric/ImportNotebooks.png index 0264ea3f..ca9c90f0 100644 Binary files a/ResearchAssistant/Deployment/images/fabric/ImportNotebooks.png and b/ResearchAssistant/Deployment/images/fabric/ImportNotebooks.png differ diff --git a/ResearchAssistant/Deployment/images/fabric/WorkspaceGuid.png b/ResearchAssistant/Deployment/images/fabric/WorkspaceGuid.png index b39b2083..aab07f59 100644 Binary files a/ResearchAssistant/Deployment/images/fabric/WorkspaceGuid.png and b/ResearchAssistant/Deployment/images/fabric/WorkspaceGuid.png differ diff --git a/ResearchAssistant/Deployment/images/readMe/architecture.png b/ResearchAssistant/Deployment/images/readMe/architecture.png index 5c1ee04c..a8f01490 100644 Binary files a/ResearchAssistant/Deployment/images/readMe/architecture.png and b/ResearchAssistant/Deployment/images/readMe/architecture.png differ diff --git a/ResearchAssistant/Deployment/images/readMe/landing_page.png b/ResearchAssistant/Deployment/images/readMe/landing_page.png index 4d92f026..2c3e803d 100644 Binary files a/ResearchAssistant/Deployment/images/readMe/landing_page.png and b/ResearchAssistant/Deployment/images/readMe/landing_page.png differ diff --git a/ResearchAssistant/Deployment/scripts/aihub_scripts/flows/DraftFlow.zip b/ResearchAssistant/Deployment/scripts/aihub_scripts/flows/DraftFlow.zip index b4b2a728..82e5bcf2 100644 Binary files a/ResearchAssistant/Deployment/scripts/aihub_scripts/flows/DraftFlow.zip and b/ResearchAssistant/Deployment/scripts/aihub_scripts/flows/DraftFlow.zip differ diff --git a/ResearchAssistant/Deployment/scripts/aihub_scripts/requirements.txt b/ResearchAssistant/Deployment/scripts/aihub_scripts/requirements.txt index 0b492679..e5b81109 100644 --- a/ResearchAssistant/Deployment/scripts/aihub_scripts/requirements.txt +++ b/ResearchAssistant/Deployment/scripts/aihub_scripts/requirements.txt @@ -1,8 +1,8 @@ azure-identity azure-keyvault-secrets azure-ai-resources -azure-ai-ml==1.23.0 -msal[broker]==1.31.1 -azure-mgmt-resource +azure-ai-ml==1.23.1 +msal[broker]==1.24.0b1 +azure-mgmt-resource==23.1.0b2 azure-mgmt-cognitiveservices azure-mgmt-search \ No newline at end of file diff --git a/ResearchAssistant/README.md b/ResearchAssistant/README.md index ba1639dd..7fb9f2ee 100644 --- a/ResearchAssistant/README.md +++ b/ResearchAssistant/README.md @@ -52,7 +52,7 @@ For additional training and support, please see: 1. [Azure OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/) 2. [Azure AI Search](https://learn.microsoft.com/en-us/azure/search/) 3. [Microsoft Fabric](https://learn.microsoft.com/en-us/fabric/) -4. [Azure AI Studio](https://learn.microsoft.com/en-us/azure/ai-studio/) +4. [Azure AI Foundry](https://learn.microsoft.com/en-us/azure/ai-studio/) ### Solution accelerator architecture ![image](Deployment/images/readMe/architecture.png) @@ -64,7 +64,7 @@ For additional training and support, please see: ### **How to install/deploy** 1. Please check the link [Azure Products by Region]( -https://azure.microsoft.com/en-us/explore/global-infrastructure/products-by-region/?products=all®ions=all) and choose a region where Azure AI Search, Semantic Ranker, Azure OpenAI Service, and Azure AI Studio are available. +https://azure.microsoft.com/en-us/explore/global-infrastructure/products-by-region/?products=all®ions=all) and choose a region where Azure AI Search, Semantic Ranker, Azure OpenAI Service, and Azure AI Foundry are available. 2. Click the following deployment button to create the required resources for this accelerator in your Azure Subscription. @@ -74,9 +74,9 @@ https://azure.microsoft.com/en-us/explore/global-infrastructure/products-by-regi ![image](Deployment/images/readMe/armDeployment.png) -4. When Deployment is complete, follow steps in [AI Studio Deployment guide](./Deployment/AIStudioDeployment.md) to configure the grant draft proposal endpoint. +4. When Deployment is complete, follow steps in [AI Foundry Deployment guide](./Deployment/AIFoundryDeployment.md) to configure the grant draft proposal endpoint. -5. When AI Studio deployment is complete, launch the application by navigating to your Azure resource group, choosing the app service resource, and clicking on the default domain. You should bookmark this URL to have quick access to your deployed application. +5. When AI Foundry deployment is complete, launch the application by navigating to your Azure resource group, choosing the app service resource, and clicking on the default domain. You should bookmark this URL to have quick access to your deployed application. The next steps are optional for additional learning. Not required to deploy the solution and run the Grant Writer Assistant.