|
15 | 15 | */ |
16 | 16 |
|
17 | 17 | export const SpanAttributes = { |
| 18 | + // OpenTelemetry GenAI Semantic Conventions (Current) |
| 19 | + // Required attributes |
| 20 | + GEN_AI_OPERATION_NAME: "gen_ai.operation.name", |
| 21 | + GEN_AI_PROVIDER_NAME: "gen_ai.provider.name", |
| 22 | + |
| 23 | + // Request attributes |
| 24 | + GEN_AI_REQUEST_MODEL: "gen_ai.request.model", |
| 25 | + GEN_AI_REQUEST_TEMPERATURE: "gen_ai.request.temperature", |
| 26 | + GEN_AI_REQUEST_TOP_P: "gen_ai.request.top_p", |
| 27 | + GEN_AI_REQUEST_TOP_K: "gen_ai.request.top_k", |
| 28 | + GEN_AI_REQUEST_MAX_TOKENS: "gen_ai.request.max_tokens", |
| 29 | + GEN_AI_REQUEST_FREQUENCY_PENALTY: "gen_ai.request.frequency_penalty", |
| 30 | + GEN_AI_REQUEST_PRESENCE_PENALTY: "gen_ai.request.presence_penalty", |
| 31 | + GEN_AI_REQUEST_STOP_SEQUENCES: "gen_ai.request.stop_sequences", |
| 32 | + |
| 33 | + // Response attributes |
| 34 | + GEN_AI_RESPONSE_ID: "gen_ai.response.id", |
| 35 | + GEN_AI_RESPONSE_MODEL: "gen_ai.response.model", |
| 36 | + GEN_AI_RESPONSE_FINISH_REASONS: "gen_ai.response.finish_reasons", |
| 37 | + |
| 38 | + // Token usage (Current OTel naming) |
| 39 | + GEN_AI_USAGE_INPUT_TOKENS: "gen_ai.usage.input_tokens", |
| 40 | + GEN_AI_USAGE_OUTPUT_TOKENS: "gen_ai.usage.output_tokens", |
| 41 | + |
| 42 | + // Messages |
| 43 | + GEN_AI_INPUT_MESSAGES: "gen_ai.input.messages", |
| 44 | + GEN_AI_OUTPUT_MESSAGES: "gen_ai.output.messages", |
| 45 | + GEN_AI_SYSTEM_INSTRUCTIONS: "gen_ai.system_instructions", |
| 46 | + |
| 47 | + // Tool definitions |
| 48 | + GEN_AI_TOOL_DEFINITIONS: "gen_ai.tool.definitions", |
| 49 | + |
| 50 | + // Agent attributes |
| 51 | + GEN_AI_AGENT_NAME: "gen_ai.agent.name", |
| 52 | + |
| 53 | + // Deprecated attributes (kept for backward compatibility) |
| 54 | + /** @deprecated Use GEN_AI_PROVIDER_NAME instead */ |
18 | 55 | LLM_SYSTEM: "gen_ai.system", |
| 56 | + /** @deprecated Use GEN_AI_REQUEST_MODEL instead */ |
19 | 57 | LLM_REQUEST_MODEL: "gen_ai.request.model", |
| 58 | + /** @deprecated Use GEN_AI_REQUEST_MAX_TOKENS instead */ |
20 | 59 | LLM_REQUEST_MAX_TOKENS: "gen_ai.request.max_tokens", |
| 60 | + /** @deprecated Use GEN_AI_REQUEST_TEMPERATURE instead */ |
21 | 61 | LLM_REQUEST_TEMPERATURE: "gen_ai.request.temperature", |
| 62 | + /** @deprecated Use GEN_AI_REQUEST_TOP_P instead */ |
22 | 63 | LLM_REQUEST_TOP_P: "gen_ai.request.top_p", |
| 64 | + /** @deprecated Use GEN_AI_INPUT_MESSAGES and events instead */ |
23 | 65 | LLM_PROMPTS: "gen_ai.prompt", |
| 66 | + /** @deprecated Use GEN_AI_OUTPUT_MESSAGES and events instead */ |
24 | 67 | LLM_COMPLETIONS: "gen_ai.completion", |
| 68 | + /** @deprecated Use GEN_AI_INPUT_MESSAGES instead */ |
25 | 69 | LLM_INPUT_MESSAGES: "gen_ai.input.messages", |
| 70 | + /** @deprecated Use GEN_AI_OUTPUT_MESSAGES instead */ |
26 | 71 | LLM_OUTPUT_MESSAGES: "gen_ai.output.messages", |
| 72 | + /** @deprecated Use GEN_AI_RESPONSE_MODEL instead */ |
27 | 73 | LLM_RESPONSE_MODEL: "gen_ai.response.model", |
| 74 | + /** @deprecated Use GEN_AI_USAGE_INPUT_TOKENS instead */ |
28 | 75 | LLM_USAGE_PROMPT_TOKENS: "gen_ai.usage.prompt_tokens", |
| 76 | + /** @deprecated Use GEN_AI_USAGE_OUTPUT_TOKENS instead */ |
29 | 77 | LLM_USAGE_COMPLETION_TOKENS: "gen_ai.usage.completion_tokens", |
30 | 78 |
|
31 | | - GEN_AI_AGENT_NAME: "gen_ai.agent.name", |
32 | | - |
33 | | - // LLM |
| 79 | + // LLM (Non-standard attributes) |
34 | 80 | LLM_REQUEST_TYPE: "llm.request.type", |
35 | 81 | LLM_USAGE_TOTAL_TOKENS: "llm.usage.total_tokens", |
36 | 82 | LLM_TOP_K: "llm.top_k", |
|
0 commit comments