|
| 1 | +# Copyright The OpenTelemetry Authors |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | + |
| 15 | +from __future__ import annotations |
| 16 | + |
| 17 | +import re |
| 18 | +from dataclasses import dataclass |
| 19 | +from os import environ |
| 20 | +from typing import ( |
| 21 | + TYPE_CHECKING, |
| 22 | + Mapping, |
| 23 | + Sequence, |
| 24 | +) |
| 25 | + |
| 26 | +from opentelemetry.semconv._incubating.attributes import ( |
| 27 | + gen_ai_attributes as GenAIAttributes, |
| 28 | +) |
| 29 | +from opentelemetry.util.types import AttributeValue |
| 30 | + |
| 31 | +if TYPE_CHECKING: |
| 32 | + from google.cloud.aiplatform_v1.types import content, tool |
| 33 | + from google.cloud.aiplatform_v1beta1.types import ( |
| 34 | + content as content_v1beta1, |
| 35 | + ) |
| 36 | + from google.cloud.aiplatform_v1beta1.types import ( |
| 37 | + tool as tool_v1beta1, |
| 38 | + ) |
| 39 | + |
| 40 | + |
| 41 | +@dataclass(frozen=True) |
| 42 | +class GenerateContentParams: |
| 43 | + model: str |
| 44 | + contents: ( |
| 45 | + Sequence[content.Content] | Sequence[content_v1beta1.Content] | None |
| 46 | + ) = None |
| 47 | + system_instruction: content.Content | content_v1beta1.Content | None = None |
| 48 | + tools: Sequence[tool.Tool] | Sequence[tool_v1beta1.Tool] | None = None |
| 49 | + tool_config: tool.ToolConfig | tool_v1beta1.ToolConfig | None = None |
| 50 | + labels: Mapping[str, str] | None = None |
| 51 | + safety_settings: ( |
| 52 | + Sequence[content.SafetySetting] |
| 53 | + | Sequence[content_v1beta1.SafetySetting] |
| 54 | + | None |
| 55 | + ) = None |
| 56 | + generation_config: ( |
| 57 | + content.GenerationConfig | content_v1beta1.GenerationConfig | None |
| 58 | + ) = None |
| 59 | + |
| 60 | + |
| 61 | +def get_genai_request_attributes( |
| 62 | + params: GenerateContentParams, |
| 63 | + operation_name: GenAIAttributes.GenAiOperationNameValues = GenAIAttributes.GenAiOperationNameValues.CHAT, |
| 64 | +): |
| 65 | + model = _get_model_name(params.model) |
| 66 | + generation_config = params.generation_config |
| 67 | + attributes: dict[str, AttributeValue] = { |
| 68 | + GenAIAttributes.GEN_AI_OPERATION_NAME: operation_name.value, |
| 69 | + GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.VERTEX_AI.value, |
| 70 | + GenAIAttributes.GEN_AI_REQUEST_MODEL: model, |
| 71 | + } |
| 72 | + |
| 73 | + if not generation_config: |
| 74 | + return attributes |
| 75 | + |
| 76 | + # Check for optional fields |
| 77 | + # https://proto-plus-python.readthedocs.io/en/stable/fields.html#optional-fields |
| 78 | + if "temperature" in generation_config: |
| 79 | + attributes[GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE] = ( |
| 80 | + generation_config.temperature |
| 81 | + ) |
| 82 | + if "top_p" in generation_config: |
| 83 | + attributes[GenAIAttributes.GEN_AI_REQUEST_TOP_P] = ( |
| 84 | + generation_config.top_p |
| 85 | + ) |
| 86 | + if "max_output_tokens" in generation_config: |
| 87 | + attributes[GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS] = ( |
| 88 | + generation_config.max_output_tokens |
| 89 | + ) |
| 90 | + if "presence_penalty" in generation_config: |
| 91 | + attributes[GenAIAttributes.GEN_AI_REQUEST_PRESENCE_PENALTY] = ( |
| 92 | + generation_config.presence_penalty |
| 93 | + ) |
| 94 | + if "frequency_penalty" in generation_config: |
| 95 | + attributes[GenAIAttributes.GEN_AI_REQUEST_FREQUENCY_PENALTY] = ( |
| 96 | + generation_config.frequency_penalty |
| 97 | + ) |
| 98 | + # Uncomment once GEN_AI_REQUEST_SEED is released in 1.30 |
| 99 | + # https://github.com/open-telemetry/semantic-conventions/pull/1710 |
| 100 | + # if "seed" in generation_config: |
| 101 | + # attributes[GenAIAttributes.GEN_AI_REQUEST_SEED] = ( |
| 102 | + # generation_config.seed |
| 103 | + # ) |
| 104 | + if "stop_sequences" in generation_config: |
| 105 | + attributes[GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES] = ( |
| 106 | + generation_config.stop_sequences |
| 107 | + ) |
| 108 | + |
| 109 | + return attributes |
| 110 | + |
| 111 | + |
| 112 | +_MODEL_STRIP_RE = re.compile( |
| 113 | + r"^projects/(.*)/locations/(.*)/publishers/google/models/" |
| 114 | +) |
| 115 | + |
| 116 | + |
| 117 | +def _get_model_name(model: str) -> str: |
| 118 | + return _MODEL_STRIP_RE.sub("", model) |
| 119 | + |
| 120 | + |
| 121 | +OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT = ( |
| 122 | + "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT" |
| 123 | +) |
| 124 | + |
| 125 | + |
| 126 | +def is_content_enabled() -> bool: |
| 127 | + capture_content = environ.get( |
| 128 | + OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, "false" |
| 129 | + ) |
| 130 | + |
| 131 | + return capture_content.lower() == "true" |
| 132 | + |
| 133 | + |
| 134 | +def get_span_name(span_attributes: Mapping[str, AttributeValue]) -> str: |
| 135 | + name = span_attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] |
| 136 | + model = span_attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] |
| 137 | + if not model: |
| 138 | + return f"{name}" |
| 139 | + return f"{name} {model}" |
0 commit comments