Skip to content

Commit b60fc7b

Browse files
committed
fix(langchain): incorrect role on generations
1 parent ba1aa4c commit b60fc7b

File tree

2 files changed

+92
-10
lines changed

2 files changed

+92
-10
lines changed

packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/span_utils.py

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -211,11 +211,6 @@ def set_chat_response(span: Span, response: LLMResult) -> None:
211211
for generations in response.generations:
212212
for generation in generations:
213213
prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.{i}"
214-
_set_span_attribute(
215-
span,
216-
f"{prefix}.role",
217-
_message_type_to_role(generation.type),
218-
)
219214

220215
# Try to get content from various sources
221216
content = None
@@ -244,6 +239,11 @@ def set_chat_response(span: Span, response: LLMResult) -> None:
244239

245240
# Handle tool calls and function calls
246241
if hasattr(generation, "message") and generation.message:
242+
_set_span_attribute(
243+
span,
244+
f"{prefix}.role",
245+
_message_type_to_role(generation.message.type),
246+
)
247247
# Handle legacy function_call format (single function call)
248248
if generation.message.additional_kwargs.get("function_call"):
249249
_set_span_attribute(
@@ -268,12 +268,8 @@ def set_chat_response(span: Span, response: LLMResult) -> None:
268268
else generation.message.additional_kwargs.get("tool_calls")
269269
)
270270
if tool_calls and isinstance(tool_calls, list):
271-
_set_span_attribute(
272-
span,
273-
f"{prefix}.role",
274-
"assistant",
275-
)
276271
_set_chat_tool_calls(span, prefix, tool_calls)
272+
277273
i += 1
278274

279275

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
import json
2+
3+
import pytest
4+
from langchain_core.messages import AIMessage
5+
from langchain_core.outputs import ChatGeneration, Generation, LLMResult
6+
7+
from opentelemetry.instrumentation.langchain.span_utils import set_chat_response
8+
from opentelemetry.instrumentation.langchain.utils import TRACELOOP_TRACE_CONTENT
9+
from opentelemetry.semconv._incubating.attributes import (
10+
gen_ai_attributes as GenAIAttributes,
11+
)
12+
13+
14+
class _DummySpan:
15+
def __init__(self):
16+
self.attributes = {}
17+
18+
def is_recording(self) -> bool:
19+
return True
20+
21+
def set_attribute(self, key, value) -> None:
22+
self.attributes[key] = value
23+
24+
25+
@pytest.fixture(autouse=True)
26+
def _enable_prompt_content(monkeypatch):
27+
monkeypatch.setenv(TRACELOOP_TRACE_CONTENT, "true")
28+
29+
30+
def _make_result(message: AIMessage) -> tuple[_DummySpan, LLMResult]:
31+
span = _DummySpan()
32+
generation = ChatGeneration(message=message)
33+
result = LLMResult(generations=[[generation]])
34+
return span, result
35+
36+
37+
@pytest.mark.parametrize(
38+
"message",
39+
[
40+
AIMessage(content="hi"),
41+
AIMessage(
42+
content="tool reply",
43+
additional_kwargs={
44+
"function_call": {"name": "call_weather", "arguments": "{}"}
45+
},
46+
),
47+
AIMessage(
48+
content="another reply",
49+
tool_calls=[{"name": "foo", "args": {"city": "SF"}, "id": "1"}],
50+
),
51+
],
52+
)
53+
def test_chat_generation_role_is_assistant(message):
54+
span, result = _make_result(message)
55+
56+
set_chat_response(span, result)
57+
58+
assert (
59+
span.attributes[f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role"]
60+
== "assistant"
61+
)
62+
63+
tool_call = message.additional_kwargs.get("function_call")
64+
if tool_call:
65+
prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.0.tool_calls.0"
66+
assert span.attributes[prefix + ".name"] == tool_call["name"]
67+
assert span.attributes[prefix + ".arguments"] == tool_call["arguments"]
68+
69+
if message.tool_calls:
70+
prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.0.tool_calls.0"
71+
assert span.attributes[prefix + ".name"] == message.tool_calls[0]["name"]
72+
assert span.attributes[prefix + ".id"] == message.tool_calls[0]["id"]
73+
recorded_args = json.loads(span.attributes[prefix + ".arguments"])
74+
assert recorded_args == message.tool_calls[0]["args"]
75+
76+
77+
def test_plain_generation_defaults_to_assistant_role():
78+
span = _DummySpan()
79+
generation = Generation(text="plain completion")
80+
result = LLMResult(generations=[[generation]])
81+
82+
set_chat_response(span, result)
83+
84+
prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.0"
85+
assert span.attributes[prefix + ".role"] == "assistant"
86+
assert span.attributes[prefix + ".content"] == "plain completion"

0 commit comments

Comments
 (0)