Skip to content

Commit 2561982

Browse files
committed
fix(ai-sdk): improve and align attributes with gen-ai semantic-conventions
1 parent d6ebc23 commit 2561982

File tree

4 files changed

+418
-57
lines changed

4 files changed

+418
-57
lines changed

packages/ai-semantic-conventions/src/SemanticAttributes.ts

Lines changed: 49 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,22 +15,68 @@
1515
*/
1616

1717
export const SpanAttributes = {
18+
// OpenTelemetry GenAI Semantic Conventions (Current)
19+
// Required attributes
20+
GEN_AI_OPERATION_NAME: "gen_ai.operation.name",
21+
GEN_AI_PROVIDER_NAME: "gen_ai.provider.name",
22+
23+
// Request attributes
24+
GEN_AI_REQUEST_MODEL: "gen_ai.request.model",
25+
GEN_AI_REQUEST_TEMPERATURE: "gen_ai.request.temperature",
26+
GEN_AI_REQUEST_TOP_P: "gen_ai.request.top_p",
27+
GEN_AI_REQUEST_TOP_K: "gen_ai.request.top_k",
28+
GEN_AI_REQUEST_MAX_TOKENS: "gen_ai.request.max_tokens",
29+
GEN_AI_REQUEST_FREQUENCY_PENALTY: "gen_ai.request.frequency_penalty",
30+
GEN_AI_REQUEST_PRESENCE_PENALTY: "gen_ai.request.presence_penalty",
31+
GEN_AI_REQUEST_STOP_SEQUENCES: "gen_ai.request.stop_sequences",
32+
33+
// Response attributes
34+
GEN_AI_RESPONSE_ID: "gen_ai.response.id",
35+
GEN_AI_RESPONSE_MODEL: "gen_ai.response.model",
36+
GEN_AI_RESPONSE_FINISH_REASONS: "gen_ai.response.finish_reasons",
37+
38+
// Token usage (Current OTel naming)
39+
GEN_AI_USAGE_INPUT_TOKENS: "gen_ai.usage.input_tokens",
40+
GEN_AI_USAGE_OUTPUT_TOKENS: "gen_ai.usage.output_tokens",
41+
42+
// Messages
43+
GEN_AI_INPUT_MESSAGES: "gen_ai.input.messages",
44+
GEN_AI_OUTPUT_MESSAGES: "gen_ai.output.messages",
45+
GEN_AI_SYSTEM_INSTRUCTIONS: "gen_ai.system_instructions",
46+
47+
// Tool definitions
48+
GEN_AI_TOOL_DEFINITIONS: "gen_ai.tool.definitions",
49+
50+
// Agent attributes
51+
GEN_AI_AGENT_NAME: "gen_ai.agent.name",
52+
53+
// Deprecated attributes (kept for backward compatibility)
54+
/** @deprecated Use GEN_AI_PROVIDER_NAME instead */
1855
LLM_SYSTEM: "gen_ai.system",
56+
/** @deprecated Use GEN_AI_REQUEST_MODEL instead */
1957
LLM_REQUEST_MODEL: "gen_ai.request.model",
58+
/** @deprecated Use GEN_AI_REQUEST_MAX_TOKENS instead */
2059
LLM_REQUEST_MAX_TOKENS: "gen_ai.request.max_tokens",
60+
/** @deprecated Use GEN_AI_REQUEST_TEMPERATURE instead */
2161
LLM_REQUEST_TEMPERATURE: "gen_ai.request.temperature",
62+
/** @deprecated Use GEN_AI_REQUEST_TOP_P instead */
2263
LLM_REQUEST_TOP_P: "gen_ai.request.top_p",
64+
/** @deprecated Use GEN_AI_INPUT_MESSAGES and events instead */
2365
LLM_PROMPTS: "gen_ai.prompt",
66+
/** @deprecated Use GEN_AI_OUTPUT_MESSAGES and events instead */
2467
LLM_COMPLETIONS: "gen_ai.completion",
68+
/** @deprecated Use GEN_AI_INPUT_MESSAGES instead */
2569
LLM_INPUT_MESSAGES: "gen_ai.input.messages",
70+
/** @deprecated Use GEN_AI_OUTPUT_MESSAGES instead */
2671
LLM_OUTPUT_MESSAGES: "gen_ai.output.messages",
72+
/** @deprecated Use GEN_AI_RESPONSE_MODEL instead */
2773
LLM_RESPONSE_MODEL: "gen_ai.response.model",
74+
/** @deprecated Use GEN_AI_USAGE_INPUT_TOKENS instead */
2875
LLM_USAGE_PROMPT_TOKENS: "gen_ai.usage.prompt_tokens",
76+
/** @deprecated Use GEN_AI_USAGE_OUTPUT_TOKENS instead */
2977
LLM_USAGE_COMPLETION_TOKENS: "gen_ai.usage.completion_tokens",
3078

31-
GEN_AI_AGENT_NAME: "gen_ai.agent.name",
32-
33-
// LLM
79+
// LLM (Non-standard attributes)
3480
LLM_REQUEST_TYPE: "llm.request.type",
3581
LLM_USAGE_TOTAL_TOKENS: "llm.usage.total_tokens",
3682
LLM_TOP_K: "llm.top_k",

0 commit comments

Comments
 (0)