Skip to content

Commit 00e2529

Browse files
committed
feat: use official OpenTelemetry incubating semantic conventions for token attributes
1 parent 1a416a0 commit 00e2529

File tree

4 files changed

+32
-39
lines changed

4 files changed

+32
-39
lines changed

packages/ai-semantic-conventions/package.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,8 @@
3434
"access": "public"
3535
},
3636
"dependencies": {
37-
"@opentelemetry/api": "^1.9.0"
37+
"@opentelemetry/api": "^1.9.0",
38+
"@opentelemetry/semantic-conventions": "^1.36.0"
3839
},
3940
"homepage": "https://github.com/traceloop/openllmetry-js/tree/main/packages/ai-semantic-conventions",
4041
"gitHead": "ef1e70d6037f7b5c061056ef2be16e3f55f02ed5"

packages/ai-semantic-conventions/src/SemanticAttributes.ts

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,11 @@
1414
* limitations under the License.
1515
*/
1616

17+
import {
18+
ATTR_GEN_AI_USAGE_INPUT_TOKENS,
19+
ATTR_GEN_AI_USAGE_OUTPUT_TOKENS,
20+
} from "@opentelemetry/semantic-conventions/build/src/index-incubating";
21+
1722
export const SpanAttributes = {
1823
LLM_SYSTEM: "gen_ai.system",
1924
LLM_REQUEST_MODEL: "gen_ai.request.model",
@@ -27,8 +32,8 @@ export const SpanAttributes = {
2732
LLM_RESPONSE_MODEL: "gen_ai.response.model",
2833
LLM_USAGE_PROMPT_TOKENS: "gen_ai.usage.prompt_tokens",
2934
LLM_USAGE_COMPLETION_TOKENS: "gen_ai.usage.completion_tokens",
30-
LLM_USAGE_INPUT_TOKENS: "gen_ai.usage.input_tokens",
31-
LLM_USAGE_OUTPUT_TOKENS: "gen_ai.usage.output_tokens",
35+
LLM_USAGE_INPUT_TOKENS: ATTR_GEN_AI_USAGE_INPUT_TOKENS,
36+
LLM_USAGE_OUTPUT_TOKENS: ATTR_GEN_AI_USAGE_OUTPUT_TOKENS,
3237

3338
GEN_AI_AGENT_NAME: "gen_ai.agent.name",
3439

packages/traceloop-sdk/src/lib/tracing/ai-sdk-transformations.ts

Lines changed: 14 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -318,41 +318,25 @@ const transformPrompts = (attributes: Record<string, any>): void => {
318318
};
319319

320320
const transformPromptTokens = (attributes: Record<string, any>): void => {
321-
if (SpanAttributes.LLM_USAGE_INPUT_TOKENS in attributes) {
322-
// Already has input_tokens, delete legacy duplicates
323-
if (AI_USAGE_PROMPT_TOKENS in attributes) {
324-
delete attributes[AI_USAGE_PROMPT_TOKENS];
325-
}
326-
if (SpanAttributes.LLM_USAGE_PROMPT_TOKENS in attributes) {
327-
delete attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS];
328-
}
329-
} else {
330-
// Transform legacy promptTokens to modern input_tokens
331-
if (AI_USAGE_PROMPT_TOKENS in attributes) {
332-
attributes[SpanAttributes.LLM_USAGE_INPUT_TOKENS] =
333-
attributes[AI_USAGE_PROMPT_TOKENS];
334-
delete attributes[AI_USAGE_PROMPT_TOKENS];
335-
}
321+
// Make sure we have the right naming convention
322+
if (!(SpanAttributes.LLM_USAGE_INPUT_TOKENS in attributes) && AI_USAGE_PROMPT_TOKENS in attributes) {
323+
attributes[SpanAttributes.LLM_USAGE_INPUT_TOKENS] = attributes[AI_USAGE_PROMPT_TOKENS];
336324
}
325+
326+
// Clean up legacy attributes
327+
delete attributes[AI_USAGE_PROMPT_TOKENS];
328+
delete attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS];
337329
};
338330

339331
const transformCompletionTokens = (attributes: Record<string, any>): void => {
340-
if (SpanAttributes.LLM_USAGE_OUTPUT_TOKENS in attributes) {
341-
// Already has output_tokens, delete legacy duplicates
342-
if (AI_USAGE_COMPLETION_TOKENS in attributes) {
343-
delete attributes[AI_USAGE_COMPLETION_TOKENS];
344-
}
345-
if (SpanAttributes.LLM_USAGE_COMPLETION_TOKENS in attributes) {
346-
delete attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS];
347-
}
348-
} else {
349-
// Transform legacy completionTokens to modern output_tokens
350-
if (AI_USAGE_COMPLETION_TOKENS in attributes) {
351-
attributes[SpanAttributes.LLM_USAGE_OUTPUT_TOKENS] =
352-
attributes[AI_USAGE_COMPLETION_TOKENS];
353-
delete attributes[AI_USAGE_COMPLETION_TOKENS];
354-
}
332+
// Make sure we have the right naming convention
333+
if (!(SpanAttributes.LLM_USAGE_OUTPUT_TOKENS in attributes) && AI_USAGE_COMPLETION_TOKENS in attributes) {
334+
attributes[SpanAttributes.LLM_USAGE_OUTPUT_TOKENS] = attributes[AI_USAGE_COMPLETION_TOKENS];
355335
}
336+
337+
// Clean up legacy attributes
338+
delete attributes[AI_USAGE_COMPLETION_TOKENS];
339+
delete attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS];
356340
};
357341

358342
const calculateTotalTokens = (attributes: Record<string, any>): void => {

pnpm-lock.yaml

Lines changed: 9 additions & 6 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)