Skip to content

Commit 1a416a0

Browse files
committed
fix: transform Vercel AI SDK token attributes to use input_tokens/output_tokens
1 parent 93b2388 commit 1a416a0

File tree

5 files changed

+91
-69
lines changed

5 files changed

+91
-69
lines changed

packages/ai-semantic-conventions/src/SemanticAttributes.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@ export const SpanAttributes = {
2727
LLM_RESPONSE_MODEL: "gen_ai.response.model",
2828
LLM_USAGE_PROMPT_TOKENS: "gen_ai.usage.prompt_tokens",
2929
LLM_USAGE_COMPLETION_TOKENS: "gen_ai.usage.completion_tokens",
30+
LLM_USAGE_INPUT_TOKENS: "gen_ai.usage.input_tokens",
31+
LLM_USAGE_OUTPUT_TOKENS: "gen_ai.usage.output_tokens",
3032

3133
GEN_AI_AGENT_NAME: "gen_ai.agent.name",
3234

packages/traceloop-sdk/src/lib/tracing/ai-sdk-transformations.ts

Lines changed: 32 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,6 @@ const AI_PROMPT_MESSAGES = "ai.prompt.messages";
2121
const AI_PROMPT = "ai.prompt";
2222
const AI_USAGE_PROMPT_TOKENS = "ai.usage.promptTokens";
2323
const AI_USAGE_COMPLETION_TOKENS = "ai.usage.completionTokens";
24-
const GEN_AI_USAGE_PROMPT_TOKENS = "gen_ai.usage.prompt_tokens";
25-
const GEN_AI_USAGE_COMPLETION_TOKENS = "gen_ai.usage.completion_tokens";
26-
const GEN_AI_USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens";
27-
const GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens";
2824
const AI_MODEL_PROVIDER = "ai.model.provider";
2925
const AI_PROMPT_TOOLS = "ai.prompt.tools";
3026
const AI_TELEMETRY_METADATA_PREFIX = "ai.telemetry.metadata.";
@@ -322,28 +318,46 @@ const transformPrompts = (attributes: Record<string, any>): void => {
322318
};
323319

324320
const transformPromptTokens = (attributes: Record<string, any>): void => {
325-
if (AI_USAGE_PROMPT_TOKENS in attributes) {
326-
delete attributes[AI_USAGE_PROMPT_TOKENS];
327-
}
328-
329-
if (GEN_AI_USAGE_PROMPT_TOKENS in attributes) {
330-
delete attributes[GEN_AI_USAGE_PROMPT_TOKENS];
321+
if (SpanAttributes.LLM_USAGE_INPUT_TOKENS in attributes) {
322+
// Already has input_tokens, delete legacy duplicates
323+
if (AI_USAGE_PROMPT_TOKENS in attributes) {
324+
delete attributes[AI_USAGE_PROMPT_TOKENS];
325+
}
326+
if (SpanAttributes.LLM_USAGE_PROMPT_TOKENS in attributes) {
327+
delete attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS];
328+
}
329+
} else {
330+
// Transform legacy promptTokens to modern input_tokens
331+
if (AI_USAGE_PROMPT_TOKENS in attributes) {
332+
attributes[SpanAttributes.LLM_USAGE_INPUT_TOKENS] =
333+
attributes[AI_USAGE_PROMPT_TOKENS];
334+
delete attributes[AI_USAGE_PROMPT_TOKENS];
335+
}
331336
}
332337
};
333338

334339
const transformCompletionTokens = (attributes: Record<string, any>): void => {
335-
if (AI_USAGE_COMPLETION_TOKENS in attributes) {
336-
delete attributes[AI_USAGE_COMPLETION_TOKENS];
337-
}
338-
339-
if (GEN_AI_USAGE_COMPLETION_TOKENS in attributes) {
340-
delete attributes[GEN_AI_USAGE_COMPLETION_TOKENS];
340+
if (SpanAttributes.LLM_USAGE_OUTPUT_TOKENS in attributes) {
341+
// Already has output_tokens, delete legacy duplicates
342+
if (AI_USAGE_COMPLETION_TOKENS in attributes) {
343+
delete attributes[AI_USAGE_COMPLETION_TOKENS];
344+
}
345+
if (SpanAttributes.LLM_USAGE_COMPLETION_TOKENS in attributes) {
346+
delete attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS];
347+
}
348+
} else {
349+
// Transform legacy completionTokens to modern output_tokens
350+
if (AI_USAGE_COMPLETION_TOKENS in attributes) {
351+
attributes[SpanAttributes.LLM_USAGE_OUTPUT_TOKENS] =
352+
attributes[AI_USAGE_COMPLETION_TOKENS];
353+
delete attributes[AI_USAGE_COMPLETION_TOKENS];
354+
}
341355
}
342356
};
343357

344358
const calculateTotalTokens = (attributes: Record<string, any>): void => {
345-
const inputTokens = attributes[GEN_AI_USAGE_INPUT_TOKENS];
346-
const outputTokens = attributes[GEN_AI_USAGE_OUTPUT_TOKENS];
359+
const inputTokens = attributes[SpanAttributes.LLM_USAGE_INPUT_TOKENS];
360+
const outputTokens = attributes[SpanAttributes.LLM_USAGE_OUTPUT_TOKENS];
347361

348362
if (inputTokens && outputTokens) {
349363
attributes[`${SpanAttributes.LLM_USAGE_TOTAL_TOKENS}`] =

packages/traceloop-sdk/test/ai-sdk-integration.test.ts

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -141,9 +141,9 @@ describe("Test AI SDK Integration with Recording", function () {
141141
result.text,
142142
);
143143

144-
// Verify token usage
145-
assert.ok(generateTextSpan.attributes["gen_ai.usage.prompt_tokens"]);
146-
assert.ok(generateTextSpan.attributes["gen_ai.usage.completion_tokens"]);
144+
// Verify token usage - should be transformed to input/output tokens
145+
assert.ok(generateTextSpan.attributes["gen_ai.usage.input_tokens"]);
146+
assert.ok(generateTextSpan.attributes["gen_ai.usage.output_tokens"]);
147147
assert.ok(generateTextSpan.attributes["llm.usage.total_tokens"]);
148148
});
149149

@@ -209,9 +209,9 @@ describe("Test AI SDK Integration with Recording", function () {
209209
result.text,
210210
);
211211

212-
// Verify token usage
213-
assert.ok(generateTextSpan.attributes["gen_ai.usage.prompt_tokens"]);
214-
assert.ok(generateTextSpan.attributes["gen_ai.usage.completion_tokens"]);
212+
// Verify token usage - should be transformed to input/output tokens
213+
assert.ok(generateTextSpan.attributes["gen_ai.usage.input_tokens"]);
214+
assert.ok(generateTextSpan.attributes["gen_ai.usage.output_tokens"]);
215215
assert.ok(generateTextSpan.attributes["llm.usage.total_tokens"]);
216216
});
217217

packages/traceloop-sdk/test/ai-sdk-transformations.test.ts

Lines changed: 49 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -787,18 +787,16 @@ describe("AI SDK Transformations", () => {
787787
});
788788

789789
describe("transformAiSdkAttributes - prompt tokens", () => {
790-
it("should transform ai.usage.promptTokens to LLM usage attribute", () => {
790+
it("should delete ai.usage.promptTokens and gen_ai.usage.prompt_tokens (keep input_tokens)", () => {
791791
const attributes = {
792792
"ai.usage.promptTokens": 50,
793+
"gen_ai.usage.input_tokens": 50,
793794
someOtherAttr: "value",
794795
};
795796

796797
transformLLMSpans(attributes);
797798

798-
assert.strictEqual(
799-
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS],
800-
50,
801-
);
799+
assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_INPUT_TOKENS], 50);
802800
assert.strictEqual(attributes["ai.usage.promptTokens"], undefined);
803801
assert.strictEqual(attributes.someOtherAttr, "value");
804802
});
@@ -814,28 +812,31 @@ describe("AI SDK Transformations", () => {
814812
assert.deepStrictEqual(attributes, originalAttributes);
815813
});
816814

817-
it("should handle zero prompt tokens", () => {
815+
it("should handle zero input tokens", () => {
818816
const attributes = {
819817
"ai.usage.promptTokens": 0,
818+
"gen_ai.usage.input_tokens": 0,
820819
};
821820

822821
transformLLMSpans(attributes);
823822

824-
assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], 0);
823+
assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_INPUT_TOKENS], 0);
824+
assert.strictEqual(attributes["ai.usage.promptTokens"], undefined);
825825
});
826826
});
827827

828828
describe("transformAiSdkAttributes - completion tokens", () => {
829-
it("should transform ai.usage.completionTokens to LLM usage attribute", () => {
829+
it("should delete ai.usage.completionTokens and gen_ai.usage.completion_tokens (keep output_tokens)", () => {
830830
const attributes = {
831831
"ai.usage.completionTokens": 25,
832+
"gen_ai.usage.output_tokens": 25,
832833
someOtherAttr: "value",
833834
};
834835

835836
transformLLMSpans(attributes);
836837

837838
assert.strictEqual(
838-
attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS],
839+
attributes[SpanAttributes.LLM_USAGE_OUTPUT_TOKENS],
839840
25,
840841
);
841842
assert.strictEqual(attributes["ai.usage.completionTokens"], undefined);
@@ -853,25 +854,24 @@ describe("AI SDK Transformations", () => {
853854
assert.deepStrictEqual(attributes, originalAttributes);
854855
});
855856

856-
it("should handle zero completion tokens", () => {
857+
it("should handle zero output tokens", () => {
857858
const attributes = {
858859
"ai.usage.completionTokens": 0,
860+
"gen_ai.usage.output_tokens": 0,
859861
};
860862

861863
transformLLMSpans(attributes);
862864

863-
assert.strictEqual(
864-
attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS],
865-
0,
866-
);
865+
assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_OUTPUT_TOKENS], 0);
866+
assert.strictEqual(attributes["ai.usage.completionTokens"], undefined);
867867
});
868868
});
869869

870870
describe("transformAiSdkAttributes - total tokens calculation", () => {
871-
it("should calculate total tokens from prompt and completion tokens", () => {
871+
it("should calculate total tokens from input and output tokens", () => {
872872
const attributes = {
873-
[SpanAttributes.LLM_USAGE_PROMPT_TOKENS]: 50,
874-
[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: 25,
873+
[SpanAttributes.LLM_USAGE_INPUT_TOKENS]: 50,
874+
[SpanAttributes.LLM_USAGE_OUTPUT_TOKENS]: 25,
875875
};
876876

877877
transformLLMSpans(attributes);
@@ -881,18 +881,18 @@ describe("AI SDK Transformations", () => {
881881

882882
it("should handle string token values", () => {
883883
const attributes = {
884-
[SpanAttributes.LLM_USAGE_PROMPT_TOKENS]: "50",
885-
[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: "25",
884+
[SpanAttributes.LLM_USAGE_INPUT_TOKENS]: "50",
885+
[SpanAttributes.LLM_USAGE_OUTPUT_TOKENS]: "25",
886886
};
887887

888888
transformLLMSpans(attributes);
889889

890890
assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], 75);
891891
});
892892

893-
it("should not calculate total when prompt tokens are missing", () => {
893+
it("should not calculate total when input tokens are missing", () => {
894894
const attributes = {
895-
[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: 25,
895+
[SpanAttributes.LLM_USAGE_OUTPUT_TOKENS]: 25,
896896
};
897897

898898
transformLLMSpans(attributes);
@@ -903,9 +903,9 @@ describe("AI SDK Transformations", () => {
903903
);
904904
});
905905

906-
it("should not calculate total when completion tokens are missing", () => {
906+
it("should not calculate total when output tokens are missing", () => {
907907
const attributes = {
908-
[SpanAttributes.LLM_USAGE_PROMPT_TOKENS]: 50,
908+
[SpanAttributes.LLM_USAGE_INPUT_TOKENS]: 50,
909909
};
910910

911911
transformLLMSpans(attributes);
@@ -1017,6 +1017,8 @@ describe("AI SDK Transformations", () => {
10171017
"ai.prompt.messages": JSON.stringify([{ role: "user", content: "Hi" }]),
10181018
"ai.usage.promptTokens": 10,
10191019
"ai.usage.completionTokens": 5,
1020+
"gen_ai.usage.input_tokens": 10,
1021+
"gen_ai.usage.output_tokens": 5,
10201022
"ai.model.provider": "openai.chat",
10211023
someOtherAttr: "value",
10221024
};
@@ -1043,15 +1045,9 @@ describe("AI SDK Transformations", () => {
10431045
"user",
10441046
);
10451047

1046-
// Check token transformations
1047-
assert.strictEqual(
1048-
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS],
1049-
10,
1050-
);
1051-
assert.strictEqual(
1052-
attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS],
1053-
5,
1054-
);
1048+
// Check token transformations - should keep input/output tokens
1049+
assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_INPUT_TOKENS], 10);
1050+
assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_OUTPUT_TOKENS], 5);
10551051
assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], 15);
10561052

10571053
// Check vendor transformation
@@ -1062,6 +1058,14 @@ describe("AI SDK Transformations", () => {
10621058
assert.strictEqual(attributes["ai.prompt.messages"], undefined);
10631059
assert.strictEqual(attributes["ai.usage.promptTokens"], undefined);
10641060
assert.strictEqual(attributes["ai.usage.completionTokens"], undefined);
1061+
assert.strictEqual(
1062+
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS],
1063+
undefined,
1064+
);
1065+
assert.strictEqual(
1066+
attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS],
1067+
undefined,
1068+
);
10651069
assert.strictEqual(attributes["ai.model.provider"], undefined);
10661070

10671071
// Check other attributes are preserved
@@ -1089,6 +1093,8 @@ describe("AI SDK Transformations", () => {
10891093
"ai.prompt.messages": JSON.stringify([{ role: "user", content: "Hi" }]),
10901094
"ai.usage.promptTokens": 10,
10911095
"ai.usage.completionTokens": 5,
1096+
"gen_ai.usage.input_tokens": 10,
1097+
"gen_ai.usage.output_tokens": 5,
10921098
"ai.model.provider": "azure-openai.chat",
10931099
someOtherAttr: "value",
10941100
};
@@ -1115,15 +1121,9 @@ describe("AI SDK Transformations", () => {
11151121
"user",
11161122
);
11171123

1118-
// Check token transformations
1119-
assert.strictEqual(
1120-
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS],
1121-
10,
1122-
);
1123-
assert.strictEqual(
1124-
attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS],
1125-
5,
1126-
);
1124+
// Check token transformations - should keep input/output tokens
1125+
assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_INPUT_TOKENS], 10);
1126+
assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_OUTPUT_TOKENS], 5);
11271127
assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], 15);
11281128

11291129
// Check vendor transformation
@@ -1134,6 +1134,14 @@ describe("AI SDK Transformations", () => {
11341134
assert.strictEqual(attributes["ai.prompt.messages"], undefined);
11351135
assert.strictEqual(attributes["ai.usage.promptTokens"], undefined);
11361136
assert.strictEqual(attributes["ai.usage.completionTokens"], undefined);
1137+
assert.strictEqual(
1138+
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS],
1139+
undefined,
1140+
);
1141+
assert.strictEqual(
1142+
attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS],
1143+
undefined,
1144+
);
11371145
assert.strictEqual(attributes["ai.model.provider"], undefined);
11381146

11391147
// Check other attributes are preserved

packages/traceloop-sdk/test/decorators.test.ts

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -679,13 +679,11 @@ describe("Test SDK Decorators", () => {
679679
result.text,
680680
);
681681
assert.strictEqual(
682-
generateTextSpan.attributes[`${SpanAttributes.LLM_USAGE_PROMPT_TOKENS}`],
682+
generateTextSpan.attributes[`${SpanAttributes.LLM_USAGE_INPUT_TOKENS}`],
683683
14,
684684
);
685685
assert.strictEqual(
686-
generateTextSpan.attributes[
687-
`${SpanAttributes.LLM_USAGE_COMPLETION_TOKENS}`
688-
],
686+
generateTextSpan.attributes[`${SpanAttributes.LLM_USAGE_OUTPUT_TOKENS}`],
689687
8,
690688
);
691689
assert.strictEqual(

0 commit comments

Comments
 (0)