|
| 1 | +import json |
| 2 | + |
| 3 | +import pytest |
| 4 | +from langchain_core.messages import AIMessage |
| 5 | +from langchain_core.outputs import ChatGeneration, Generation, LLMResult |
| 6 | + |
| 7 | +from opentelemetry.instrumentation.langchain.span_utils import set_chat_response |
| 8 | +from opentelemetry.instrumentation.langchain.utils import TRACELOOP_TRACE_CONTENT |
| 9 | +from opentelemetry.semconv._incubating.attributes import ( |
| 10 | + gen_ai_attributes as GenAIAttributes, |
| 11 | +) |
| 12 | + |
| 13 | + |
| 14 | +class _DummySpan: |
| 15 | + def __init__(self): |
| 16 | + self.attributes = {} |
| 17 | + |
| 18 | + def is_recording(self) -> bool: |
| 19 | + return True |
| 20 | + |
| 21 | + def set_attribute(self, key, value) -> None: |
| 22 | + self.attributes[key] = value |
| 23 | + |
| 24 | + |
| 25 | +@pytest.fixture(autouse=True) |
| 26 | +def _enable_prompt_content(monkeypatch): |
| 27 | + monkeypatch.setenv(TRACELOOP_TRACE_CONTENT, "true") |
| 28 | + |
| 29 | + |
| 30 | +def _make_result(message: AIMessage) -> tuple[_DummySpan, LLMResult]: |
| 31 | + span = _DummySpan() |
| 32 | + generation = ChatGeneration(message=message) |
| 33 | + result = LLMResult(generations=[[generation]]) |
| 34 | + return span, result |
| 35 | + |
| 36 | + |
| 37 | +@pytest.mark.parametrize( |
| 38 | + "message", |
| 39 | + [ |
| 40 | + AIMessage(content="hi"), |
| 41 | + AIMessage( |
| 42 | + content="tool reply", |
| 43 | + additional_kwargs={ |
| 44 | + "function_call": {"name": "call_weather", "arguments": "{}"} |
| 45 | + }, |
| 46 | + ), |
| 47 | + AIMessage( |
| 48 | + content="another reply", |
| 49 | + tool_calls=[{"name": "foo", "args": {"city": "SF"}, "id": "1"}], |
| 50 | + ), |
| 51 | + ], |
| 52 | +) |
| 53 | +def test_chat_generation_role_is_assistant(message): |
| 54 | + span, result = _make_result(message) |
| 55 | + |
| 56 | + set_chat_response(span, result) |
| 57 | + |
| 58 | + assert ( |
| 59 | + span.attributes[f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role"] |
| 60 | + == "assistant" |
| 61 | + ) |
| 62 | + |
| 63 | + tool_call = message.additional_kwargs.get("function_call") |
| 64 | + if tool_call: |
| 65 | + prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.0.tool_calls.0" |
| 66 | + assert span.attributes[prefix + ".name"] == tool_call["name"] |
| 67 | + assert span.attributes[prefix + ".arguments"] == tool_call["arguments"] |
| 68 | + |
| 69 | + if message.tool_calls: |
| 70 | + prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.0.tool_calls.0" |
| 71 | + assert span.attributes[prefix + ".name"] == message.tool_calls[0]["name"] |
| 72 | + assert span.attributes[prefix + ".id"] == message.tool_calls[0]["id"] |
| 73 | + recorded_args = json.loads(span.attributes[prefix + ".arguments"]) |
| 74 | + assert recorded_args == message.tool_calls[0]["args"] |
| 75 | + |
| 76 | + |
| 77 | +def test_plain_generation_defaults_to_assistant_role(): |
| 78 | + span = _DummySpan() |
| 79 | + generation = Generation(text="plain completion") |
| 80 | + result = LLMResult(generations=[[generation]]) |
| 81 | + |
| 82 | + set_chat_response(span, result) |
| 83 | + |
| 84 | + prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.0" |
| 85 | + assert span.attributes[prefix + ".role"] == "assistant" |
| 86 | + assert span.attributes[prefix + ".content"] == "plain completion" |
0 commit comments