Skip to content

Commit f75132f

Browse files
committed
Align constants with GenAI semantic conventions
1 parent d666678 commit f75132f

File tree

1 file changed

+129
-79
lines changed
  • instrumentation-genai/opentelemetry-instrumentation-openai-agents-v2/src/opentelemetry/instrumentation/openai_agents

1 file changed

+129
-79
lines changed

instrumentation-genai/opentelemetry-instrumentation-openai-agents-v2/src/opentelemetry/instrumentation/openai_agents/constants.py

Lines changed: 129 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -7,59 +7,57 @@
77

88
from __future__ import annotations
99

10+
from opentelemetry.semconv._incubating.attributes import (
11+
gen_ai_attributes as GenAIAttributes,
12+
)
13+
14+
15+
def _enum_values(enum_cls) -> dict[str, str]:
16+
"""Return mapping of enum member name to value."""
17+
return {member.name: member.value for member in enum_cls}
18+
19+
20+
_PROVIDER_VALUES = _enum_values(GenAIAttributes.GenAiProviderNameValues)
21+
1022

11-
# Provider names (superset for forward compatibility)
1223
class GenAIProvider:
13-
OPENAI = "openai"
14-
GCP_GEN_AI = "gcp.gen_ai"
15-
GCP_VERTEX_AI = "gcp.vertex_ai"
16-
GCP_GEMINI = "gcp.gemini"
17-
ANTHROPIC = "anthropic"
18-
COHERE = "cohere"
19-
AZURE_AI_INFERENCE = "azure.ai.inference"
20-
AZURE_AI_OPENAI = "azure.ai.openai"
21-
IBM_WATSONX_AI = "ibm.watsonx.ai"
22-
AWS_BEDROCK = "aws.bedrock"
23-
PERPLEXITY = "perplexity"
24-
X_AI = "x_ai"
25-
DEEPSEEK = "deepseek"
26-
GROQ = "groq"
27-
MISTRAL_AI = "mistral_ai"
28-
29-
ALL = {
30-
OPENAI,
31-
GCP_GEN_AI,
32-
GCP_VERTEX_AI,
33-
GCP_GEMINI,
34-
ANTHROPIC,
35-
COHERE,
36-
AZURE_AI_INFERENCE,
37-
AZURE_AI_OPENAI,
38-
IBM_WATSONX_AI,
39-
AWS_BEDROCK,
40-
PERPLEXITY,
41-
X_AI,
42-
DEEPSEEK,
43-
GROQ,
44-
MISTRAL_AI,
45-
}
24+
OPENAI = _PROVIDER_VALUES["OPENAI"]
25+
GCP_GEN_AI = _PROVIDER_VALUES["GCP_GEN_AI"]
26+
GCP_VERTEX_AI = _PROVIDER_VALUES["GCP_VERTEX_AI"]
27+
GCP_GEMINI = _PROVIDER_VALUES["GCP_GEMINI"]
28+
ANTHROPIC = _PROVIDER_VALUES["ANTHROPIC"]
29+
COHERE = _PROVIDER_VALUES["COHERE"]
30+
AZURE_AI_INFERENCE = _PROVIDER_VALUES["AZURE_AI_INFERENCE"]
31+
AZURE_AI_OPENAI = _PROVIDER_VALUES["AZURE_AI_OPENAI"]
32+
IBM_WATSONX_AI = _PROVIDER_VALUES["IBM_WATSONX_AI"]
33+
AWS_BEDROCK = _PROVIDER_VALUES["AWS_BEDROCK"]
34+
PERPLEXITY = _PROVIDER_VALUES["PERPLEXITY"]
35+
X_AI = _PROVIDER_VALUES["X_AI"]
36+
DEEPSEEK = _PROVIDER_VALUES["DEEPSEEK"]
37+
GROQ = _PROVIDER_VALUES["GROQ"]
38+
MISTRAL_AI = _PROVIDER_VALUES["MISTRAL_AI"]
39+
40+
ALL = set(_PROVIDER_VALUES.values())
41+
42+
43+
_OPERATION_VALUES = _enum_values(GenAIAttributes.GenAiOperationNameValues)
4644

4745

4846
class GenAIOperationName:
49-
CHAT = "chat"
50-
GENERATE_CONTENT = "generate_content"
51-
TEXT_COMPLETION = "text_completion"
52-
EMBEDDINGS = "embeddings"
53-
CREATE_AGENT = "create_agent"
54-
INVOKE_AGENT = "invoke_agent"
55-
EXECUTE_TOOL = "execute_tool"
47+
CHAT = _OPERATION_VALUES["CHAT"]
48+
GENERATE_CONTENT = _OPERATION_VALUES["GENERATE_CONTENT"]
49+
TEXT_COMPLETION = _OPERATION_VALUES["TEXT_COMPLETION"]
50+
EMBEDDINGS = _OPERATION_VALUES["EMBEDDINGS"]
51+
CREATE_AGENT = _OPERATION_VALUES["CREATE_AGENT"]
52+
INVOKE_AGENT = _OPERATION_VALUES["INVOKE_AGENT"]
53+
EXECUTE_TOOL = _OPERATION_VALUES["EXECUTE_TOOL"]
54+
# Operations below are not yet covered by the spec but remain for backwards compatibility
5655
TRANSCRIPTION = "transcription"
5756
SPEECH = "speech_generation"
5857
GUARDRAIL = "guardrail_check"
5958
HANDOFF = "agent_handoff"
6059
RESPONSE = "response" # internal aggregator in current processor
6160

62-
# Mapping of span data class (lower) to default op (heuristic)
6361
CLASS_FALLBACK = {
6462
"generationspan": CHAT,
6563
"responsespan": RESPONSE,
@@ -68,12 +66,14 @@ class GenAIOperationName:
6866
}
6967

7068

69+
_OUTPUT_VALUES = _enum_values(GenAIAttributes.GenAiOutputTypeValues)
70+
71+
7172
class GenAIOutputType:
72-
TEXT = "text"
73-
JSON = "json"
74-
IMAGE = "image"
75-
SPEECH = "speech"
76-
# existing custom inference types retained for backward compatibility
73+
TEXT = _OUTPUT_VALUES["TEXT"]
74+
JSON = _OUTPUT_VALUES["JSON"]
75+
IMAGE = _OUTPUT_VALUES["IMAGE"]
76+
SPEECH = _OUTPUT_VALUES["SPEECH"]
7777

7878

7979
class GenAIToolType:
@@ -91,44 +91,94 @@ class GenAIEvaluationAttributes:
9191
EXPLANATION = "gen_ai.evaluation.explanation"
9292

9393

94-
# Complete list of GenAI semantic convention attribute keys
95-
GEN_AI_PROVIDER_NAME = "gen_ai.provider.name"
96-
GEN_AI_OPERATION_NAME = "gen_ai.operation.name"
97-
GEN_AI_REQUEST_MODEL = "gen_ai.request.model"
98-
GEN_AI_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens"
99-
GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature"
100-
GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p"
101-
GEN_AI_REQUEST_TOP_K = "gen_ai.request.top_k"
102-
GEN_AI_REQUEST_FREQUENCY_PENALTY = "gen_ai.request.frequency_penalty"
103-
GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty"
104-
GEN_AI_REQUEST_CHOICE_COUNT = "gen_ai.request.choice.count"
105-
GEN_AI_REQUEST_STOP_SEQUENCES = "gen_ai.request.stop_sequences"
106-
GEN_AI_REQUEST_ENCODING_FORMATS = "gen_ai.request.encoding_formats"
107-
GEN_AI_REQUEST_SEED = "gen_ai.request.seed"
108-
GEN_AI_RESPONSE_ID = "gen_ai.response.id"
109-
GEN_AI_RESPONSE_MODEL = "gen_ai.response.model"
110-
GEN_AI_RESPONSE_FINISH_REASONS = "gen_ai.response.finish_reasons"
111-
GEN_AI_USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens"
112-
GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens"
113-
GEN_AI_CONVERSATION_ID = "gen_ai.conversation.id"
114-
GEN_AI_AGENT_ID = "gen_ai.agent.id"
115-
GEN_AI_AGENT_NAME = "gen_ai.agent.name"
116-
GEN_AI_AGENT_DESCRIPTION = "gen_ai.agent.description"
117-
GEN_AI_TOOL_NAME = "gen_ai.tool.name"
118-
GEN_AI_TOOL_TYPE = "gen_ai.tool.type"
119-
GEN_AI_TOOL_CALL_ID = "gen_ai.tool.call.id"
120-
GEN_AI_TOOL_DESCRIPTION = "gen_ai.tool.description"
94+
def _attr(name: str, fallback: str) -> str:
95+
return getattr(GenAIAttributes, name, fallback)
96+
97+
98+
GEN_AI_PROVIDER_NAME = _attr("GEN_AI_PROVIDER_NAME", "gen_ai.provider.name")
99+
GEN_AI_OPERATION_NAME = _attr("GEN_AI_OPERATION_NAME", "gen_ai.operation.name")
100+
GEN_AI_REQUEST_MODEL = _attr("GEN_AI_REQUEST_MODEL", "gen_ai.request.model")
101+
GEN_AI_REQUEST_MAX_TOKENS = _attr(
102+
"GEN_AI_REQUEST_MAX_TOKENS", "gen_ai.request.max_tokens"
103+
)
104+
GEN_AI_REQUEST_TEMPERATURE = _attr(
105+
"GEN_AI_REQUEST_TEMPERATURE", "gen_ai.request.temperature"
106+
)
107+
GEN_AI_REQUEST_TOP_P = _attr("GEN_AI_REQUEST_TOP_P", "gen_ai.request.top_p")
108+
GEN_AI_REQUEST_TOP_K = _attr("GEN_AI_REQUEST_TOP_K", "gen_ai.request.top_k")
109+
GEN_AI_REQUEST_FREQUENCY_PENALTY = _attr(
110+
"GEN_AI_REQUEST_FREQUENCY_PENALTY", "gen_ai.request.frequency_penalty"
111+
)
112+
GEN_AI_REQUEST_PRESENCE_PENALTY = _attr(
113+
"GEN_AI_REQUEST_PRESENCE_PENALTY", "gen_ai.request.presence_penalty"
114+
)
115+
GEN_AI_REQUEST_CHOICE_COUNT = _attr(
116+
"GEN_AI_REQUEST_CHOICE_COUNT", "gen_ai.request.choice.count"
117+
)
118+
GEN_AI_REQUEST_STOP_SEQUENCES = _attr(
119+
"GEN_AI_REQUEST_STOP_SEQUENCES", "gen_ai.request.stop_sequences"
120+
)
121+
GEN_AI_REQUEST_ENCODING_FORMATS = _attr(
122+
"GEN_AI_REQUEST_ENCODING_FORMATS", "gen_ai.request.encoding_formats"
123+
)
124+
GEN_AI_REQUEST_SEED = _attr("GEN_AI_REQUEST_SEED", "gen_ai.request.seed")
125+
GEN_AI_RESPONSE_ID = _attr("GEN_AI_RESPONSE_ID", "gen_ai.response.id")
126+
GEN_AI_RESPONSE_MODEL = _attr("GEN_AI_RESPONSE_MODEL", "gen_ai.response.model")
127+
GEN_AI_RESPONSE_FINISH_REASONS = _attr(
128+
"GEN_AI_RESPONSE_FINISH_REASONS", "gen_ai.response.finish_reasons"
129+
)
130+
GEN_AI_USAGE_INPUT_TOKENS = _attr(
131+
"GEN_AI_USAGE_INPUT_TOKENS", "gen_ai.usage.input_tokens"
132+
)
133+
GEN_AI_USAGE_OUTPUT_TOKENS = _attr(
134+
"GEN_AI_USAGE_OUTPUT_TOKENS", "gen_ai.usage.output_tokens"
135+
)
136+
GEN_AI_CONVERSATION_ID = _attr(
137+
"GEN_AI_CONVERSATION_ID", "gen_ai.conversation.id"
138+
)
139+
GEN_AI_AGENT_ID = _attr("GEN_AI_AGENT_ID", "gen_ai.agent.id")
140+
GEN_AI_AGENT_NAME = _attr("GEN_AI_AGENT_NAME", "gen_ai.agent.name")
141+
GEN_AI_AGENT_DESCRIPTION = _attr(
142+
"GEN_AI_AGENT_DESCRIPTION", "gen_ai.agent.description"
143+
)
144+
GEN_AI_TOOL_NAME = _attr("GEN_AI_TOOL_NAME", "gen_ai.tool.name")
145+
GEN_AI_TOOL_TYPE = _attr("GEN_AI_TOOL_TYPE", "gen_ai.tool.type")
146+
GEN_AI_TOOL_CALL_ID = _attr("GEN_AI_TOOL_CALL_ID", "gen_ai.tool.call.id")
147+
GEN_AI_TOOL_DESCRIPTION = _attr(
148+
"GEN_AI_TOOL_DESCRIPTION", "gen_ai.tool.description"
149+
)
150+
GEN_AI_OUTPUT_TYPE = _attr("GEN_AI_OUTPUT_TYPE", "gen_ai.output.type")
151+
GEN_AI_SYSTEM_INSTRUCTIONS = _attr(
152+
"GEN_AI_SYSTEM_INSTRUCTIONS", "gen_ai.system_instructions"
153+
)
154+
GEN_AI_INPUT_MESSAGES = _attr("GEN_AI_INPUT_MESSAGES", "gen_ai.input.messages")
155+
GEN_AI_OUTPUT_MESSAGES = _attr(
156+
"GEN_AI_OUTPUT_MESSAGES", "gen_ai.output.messages"
157+
)
158+
GEN_AI_DATA_SOURCE_ID = _attr("GEN_AI_DATA_SOURCE_ID", "gen_ai.data_source.id")
159+
160+
# The semantic conventions currently expose multiple usage token attributes; we retain the
161+
# completion/prompt aliases for backwards compatibility where used.
162+
GEN_AI_USAGE_PROMPT_TOKENS = _attr(
163+
"GEN_AI_USAGE_PROMPT_TOKENS", "gen_ai.usage.prompt_tokens"
164+
)
165+
GEN_AI_USAGE_COMPLETION_TOKENS = _attr(
166+
"GEN_AI_USAGE_COMPLETION_TOKENS", "gen_ai.usage.completion_tokens"
167+
)
168+
169+
# Attributes not (yet) defined in the spec retain their literal values.
121170
GEN_AI_TOOL_CALL_ARGUMENTS = "gen_ai.tool.call.arguments"
122171
GEN_AI_TOOL_CALL_RESULT = "gen_ai.tool.call.result"
123172
GEN_AI_TOOL_DEFINITIONS = "gen_ai.tool.definitions"
124173
GEN_AI_ORCHESTRATOR_AGENT_DEFINITIONS = "gen_ai.orchestrator.agent.definitions"
125-
GEN_AI_OUTPUT_TYPE = "gen_ai.output.type"
126-
GEN_AI_SYSTEM_INSTRUCTIONS = "gen_ai.system_instructions"
127-
GEN_AI_INPUT_MESSAGES = "gen_ai.input.messages"
128-
GEN_AI_OUTPUT_MESSAGES = "gen_ai.output.messages"
129174
GEN_AI_GUARDRAIL_NAME = "gen_ai.guardrail.name"
130175
GEN_AI_GUARDRAIL_TRIGGERED = "gen_ai.guardrail.triggered"
131176
GEN_AI_HANDOFF_FROM_AGENT = "gen_ai.handoff.from_agent"
132177
GEN_AI_HANDOFF_TO_AGENT = "gen_ai.handoff.to_agent"
133178
GEN_AI_EMBEDDINGS_DIMENSION_COUNT = "gen_ai.embeddings.dimension.count"
134-
GEN_AI_DATA_SOURCE_ID = "gen_ai.data_source.id"
179+
GEN_AI_TOKEN_TYPE = _attr("GEN_AI_TOKEN_TYPE", "gen_ai.token.type")
180+
181+
182+
__all__ = [
183+
name for name in globals() if name.isupper() or name.startswith("GenAI")
184+
]

0 commit comments

Comments
 (0)