lmnr 0.6.20__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/__init__.py +0 -4
- lmnr/opentelemetry_lib/decorators/__init__.py +211 -151
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +678 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +256 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +295 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +179 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +4 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +16 -16
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +3 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +3 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +3 -3
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +3 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +7 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +190 -0
- lmnr/opentelemetry_lib/tracing/__init__.py +90 -2
- lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +12 -7
- lmnr/opentelemetry_lib/tracing/context.py +109 -0
- lmnr/opentelemetry_lib/tracing/processor.py +6 -7
- lmnr/opentelemetry_lib/tracing/tracer.py +29 -0
- lmnr/opentelemetry_lib/utils/package_check.py +9 -0
- lmnr/sdk/browser/browser_use_otel.py +9 -7
- lmnr/sdk/browser/patchright_otel.py +14 -26
- lmnr/sdk/browser/playwright_otel.py +72 -73
- lmnr/sdk/browser/pw_utils.py +436 -119
- lmnr/sdk/client/asynchronous/resources/browser_events.py +1 -0
- lmnr/sdk/decorators.py +39 -4
- lmnr/sdk/evaluations.py +23 -9
- lmnr/sdk/laminar.py +181 -209
- lmnr/sdk/types.py +0 -6
- lmnr/version.py +1 -1
- {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/METADATA +10 -8
- {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/RECORD +45 -29
- {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/WHEEL +1 -1
- lmnr/opentelemetry_lib/tracing/context_properties.py +0 -65
- {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,229 @@
|
|
1
|
+
import json
|
2
|
+
|
3
|
+
from .utils import (
|
4
|
+
dont_throw,
|
5
|
+
model_as_dict,
|
6
|
+
set_span_attribute,
|
7
|
+
should_send_prompts,
|
8
|
+
)
|
9
|
+
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
|
10
|
+
GEN_AI_RESPONSE_ID,
|
11
|
+
)
|
12
|
+
from opentelemetry.semconv_ai import (
|
13
|
+
SpanAttributes,
|
14
|
+
)
|
15
|
+
|
16
|
+
CONTENT_FILTER_KEY = "content_filter_results"
|
17
|
+
|
18
|
+
|
19
|
+
@dont_throw
|
20
|
+
def set_input_attributes(span, kwargs):
|
21
|
+
if not span.is_recording():
|
22
|
+
return
|
23
|
+
|
24
|
+
if should_send_prompts():
|
25
|
+
if kwargs.get("prompt") is not None:
|
26
|
+
set_span_attribute(
|
27
|
+
span, f"{SpanAttributes.LLM_PROMPTS}.0.user", kwargs.get("prompt")
|
28
|
+
)
|
29
|
+
|
30
|
+
elif kwargs.get("messages") is not None:
|
31
|
+
for i, message in enumerate(kwargs.get("messages")):
|
32
|
+
set_span_attribute(
|
33
|
+
span,
|
34
|
+
f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
|
35
|
+
_dump_content(message.get("content")),
|
36
|
+
)
|
37
|
+
set_span_attribute(
|
38
|
+
span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", message.get("role")
|
39
|
+
)
|
40
|
+
|
41
|
+
|
42
|
+
@dont_throw
|
43
|
+
def set_model_input_attributes(span, kwargs):
|
44
|
+
if not span.is_recording():
|
45
|
+
return
|
46
|
+
|
47
|
+
set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("model"))
|
48
|
+
set_span_attribute(
|
49
|
+
span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens_to_sample")
|
50
|
+
)
|
51
|
+
set_span_attribute(
|
52
|
+
span, SpanAttributes.LLM_REQUEST_TEMPERATURE, kwargs.get("temperature")
|
53
|
+
)
|
54
|
+
set_span_attribute(span, SpanAttributes.LLM_REQUEST_TOP_P, kwargs.get("top_p"))
|
55
|
+
set_span_attribute(
|
56
|
+
span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
|
57
|
+
)
|
58
|
+
set_span_attribute(
|
59
|
+
span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
|
60
|
+
)
|
61
|
+
set_span_attribute(
|
62
|
+
span, SpanAttributes.LLM_IS_STREAMING, kwargs.get("stream") or False
|
63
|
+
)
|
64
|
+
|
65
|
+
|
66
|
+
def set_streaming_response_attributes(
|
67
|
+
span, accumulated_content, finish_reason=None, usage=None
|
68
|
+
):
|
69
|
+
"""Set span attributes for accumulated streaming response."""
|
70
|
+
if not span.is_recording() or not should_send_prompts():
|
71
|
+
return
|
72
|
+
|
73
|
+
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.0"
|
74
|
+
set_span_attribute(span, f"{prefix}.role", "assistant")
|
75
|
+
set_span_attribute(span, f"{prefix}.content", accumulated_content)
|
76
|
+
if finish_reason:
|
77
|
+
set_span_attribute(span, f"{prefix}.finish_reason", finish_reason)
|
78
|
+
|
79
|
+
|
80
|
+
def set_model_streaming_response_attributes(span, usage):
|
81
|
+
if not span.is_recording():
|
82
|
+
return
|
83
|
+
|
84
|
+
if usage:
|
85
|
+
set_span_attribute(
|
86
|
+
span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, usage.completion_tokens
|
87
|
+
)
|
88
|
+
set_span_attribute(
|
89
|
+
span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.prompt_tokens
|
90
|
+
)
|
91
|
+
set_span_attribute(
|
92
|
+
span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.total_tokens
|
93
|
+
)
|
94
|
+
|
95
|
+
|
96
|
+
@dont_throw
|
97
|
+
def set_model_response_attributes(span, response, token_histogram):
|
98
|
+
if not span.is_recording():
|
99
|
+
return
|
100
|
+
response = model_as_dict(response)
|
101
|
+
set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
|
102
|
+
set_span_attribute(span, GEN_AI_RESPONSE_ID, response.get("id"))
|
103
|
+
|
104
|
+
usage = response.get("usage") or {}
|
105
|
+
prompt_tokens = usage.get("prompt_tokens")
|
106
|
+
completion_tokens = usage.get("completion_tokens")
|
107
|
+
if usage:
|
108
|
+
set_span_attribute(
|
109
|
+
span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.get("total_tokens")
|
110
|
+
)
|
111
|
+
set_span_attribute(
|
112
|
+
span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
|
113
|
+
)
|
114
|
+
set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens)
|
115
|
+
|
116
|
+
if (
|
117
|
+
isinstance(prompt_tokens, int)
|
118
|
+
and prompt_tokens >= 0
|
119
|
+
and token_histogram is not None
|
120
|
+
):
|
121
|
+
token_histogram.record(
|
122
|
+
prompt_tokens,
|
123
|
+
attributes={
|
124
|
+
SpanAttributes.LLM_TOKEN_TYPE: "input",
|
125
|
+
SpanAttributes.LLM_RESPONSE_MODEL: response.get("model"),
|
126
|
+
},
|
127
|
+
)
|
128
|
+
|
129
|
+
if (
|
130
|
+
isinstance(completion_tokens, int)
|
131
|
+
and completion_tokens >= 0
|
132
|
+
and token_histogram is not None
|
133
|
+
):
|
134
|
+
token_histogram.record(
|
135
|
+
completion_tokens,
|
136
|
+
attributes={
|
137
|
+
SpanAttributes.LLM_TOKEN_TYPE: "output",
|
138
|
+
SpanAttributes.LLM_RESPONSE_MODEL: response.get("model"),
|
139
|
+
},
|
140
|
+
)
|
141
|
+
|
142
|
+
|
143
|
+
def set_response_attributes(span, response):
|
144
|
+
if not span.is_recording():
|
145
|
+
return
|
146
|
+
choices = model_as_dict(response).get("choices")
|
147
|
+
if should_send_prompts() and choices:
|
148
|
+
_set_completions(span, choices)
|
149
|
+
|
150
|
+
|
151
|
+
def _set_completions(span, choices):
|
152
|
+
if choices is None or not should_send_prompts():
|
153
|
+
return
|
154
|
+
|
155
|
+
for choice in choices:
|
156
|
+
index = choice.get("index")
|
157
|
+
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
|
158
|
+
set_span_attribute(span, f"{prefix}.finish_reason", choice.get("finish_reason"))
|
159
|
+
|
160
|
+
if choice.get("content_filter_results"):
|
161
|
+
set_span_attribute(
|
162
|
+
span,
|
163
|
+
f"{prefix}.{CONTENT_FILTER_KEY}",
|
164
|
+
json.dumps(choice.get("content_filter_results")),
|
165
|
+
)
|
166
|
+
|
167
|
+
if choice.get("finish_reason") == "content_filter":
|
168
|
+
set_span_attribute(span, f"{prefix}.role", "assistant")
|
169
|
+
set_span_attribute(span, f"{prefix}.content", "FILTERED")
|
170
|
+
|
171
|
+
return
|
172
|
+
|
173
|
+
message = choice.get("message")
|
174
|
+
if not message:
|
175
|
+
return
|
176
|
+
|
177
|
+
set_span_attribute(span, f"{prefix}.role", message.get("role"))
|
178
|
+
set_span_attribute(span, f"{prefix}.content", message.get("content"))
|
179
|
+
|
180
|
+
function_call = message.get("function_call")
|
181
|
+
if function_call:
|
182
|
+
set_span_attribute(
|
183
|
+
span, f"{prefix}.tool_calls.0.name", function_call.get("name")
|
184
|
+
)
|
185
|
+
set_span_attribute(
|
186
|
+
span,
|
187
|
+
f"{prefix}.tool_calls.0.arguments",
|
188
|
+
function_call.get("arguments"),
|
189
|
+
)
|
190
|
+
|
191
|
+
tool_calls = message.get("tool_calls")
|
192
|
+
if tool_calls:
|
193
|
+
for i, tool_call in enumerate(tool_calls):
|
194
|
+
function = tool_call.get("function")
|
195
|
+
set_span_attribute(
|
196
|
+
span,
|
197
|
+
f"{prefix}.tool_calls.{i}.id",
|
198
|
+
tool_call.get("id"),
|
199
|
+
)
|
200
|
+
set_span_attribute(
|
201
|
+
span,
|
202
|
+
f"{prefix}.tool_calls.{i}.name",
|
203
|
+
function.get("name"),
|
204
|
+
)
|
205
|
+
set_span_attribute(
|
206
|
+
span,
|
207
|
+
f"{prefix}.tool_calls.{i}.arguments",
|
208
|
+
function.get("arguments"),
|
209
|
+
)
|
210
|
+
|
211
|
+
|
212
|
+
def _dump_content(content):
|
213
|
+
if isinstance(content, str):
|
214
|
+
return content
|
215
|
+
json_serializable = []
|
216
|
+
for item in content:
|
217
|
+
if item.get("type") == "text":
|
218
|
+
json_serializable.append({"type": "text", "text": item.get("text")})
|
219
|
+
elif image_url := item.get("image_url"):
|
220
|
+
json_serializable.append(
|
221
|
+
{
|
222
|
+
"type": "image_url",
|
223
|
+
"image_url": {
|
224
|
+
"url": image_url.get("url"),
|
225
|
+
"detail": image_url.get("detail"),
|
226
|
+
},
|
227
|
+
}
|
228
|
+
)
|
229
|
+
return json.dumps(json_serializable)
|
@@ -0,0 +1,92 @@
|
|
1
|
+
import logging
|
2
|
+
import os
|
3
|
+
import traceback
|
4
|
+
from importlib.metadata import version
|
5
|
+
|
6
|
+
from opentelemetry import context as context_api
|
7
|
+
from .config import Config
|
8
|
+
from opentelemetry.semconv_ai import SpanAttributes
|
9
|
+
|
10
|
+
GEN_AI_SYSTEM = "gen_ai.system"
|
11
|
+
GEN_AI_SYSTEM_GROQ = "groq"
|
12
|
+
|
13
|
+
_PYDANTIC_VERSION = version("pydantic")
|
14
|
+
|
15
|
+
LMNR_TRACE_CONTENT = "LMNR_TRACE_CONTENT"
|
16
|
+
|
17
|
+
|
18
|
+
def set_span_attribute(span, name, value):
|
19
|
+
if value is not None and value != "":
|
20
|
+
span.set_attribute(name, value)
|
21
|
+
|
22
|
+
|
23
|
+
def should_send_prompts():
|
24
|
+
return (
|
25
|
+
os.getenv(LMNR_TRACE_CONTENT) or "true"
|
26
|
+
).lower() == "true" or context_api.get_value("override_enable_content_tracing")
|
27
|
+
|
28
|
+
|
29
|
+
def dont_throw(func):
|
30
|
+
"""
|
31
|
+
A decorator that wraps the passed in function and logs exceptions instead of throwing them.
|
32
|
+
|
33
|
+
@param func: The function to wrap
|
34
|
+
@return: The wrapper function
|
35
|
+
"""
|
36
|
+
# Obtain a logger specific to the function's module
|
37
|
+
logger = logging.getLogger(func.__module__)
|
38
|
+
|
39
|
+
def wrapper(*args, **kwargs):
|
40
|
+
try:
|
41
|
+
return func(*args, **kwargs)
|
42
|
+
except Exception as e:
|
43
|
+
logger.debug(
|
44
|
+
"OpenLLMetry failed to trace in %s, error: %s",
|
45
|
+
func.__name__,
|
46
|
+
traceback.format_exc(),
|
47
|
+
)
|
48
|
+
if Config.exception_logger:
|
49
|
+
Config.exception_logger(e)
|
50
|
+
|
51
|
+
return wrapper
|
52
|
+
|
53
|
+
|
54
|
+
@dont_throw
|
55
|
+
def shared_metrics_attributes(response):
|
56
|
+
response_dict = model_as_dict(response)
|
57
|
+
|
58
|
+
common_attributes = Config.get_common_metrics_attributes()
|
59
|
+
|
60
|
+
return {
|
61
|
+
**common_attributes,
|
62
|
+
GEN_AI_SYSTEM: GEN_AI_SYSTEM_GROQ,
|
63
|
+
SpanAttributes.LLM_RESPONSE_MODEL: response_dict.get("model"),
|
64
|
+
}
|
65
|
+
|
66
|
+
|
67
|
+
@dont_throw
|
68
|
+
def error_metrics_attributes(exception):
|
69
|
+
return {
|
70
|
+
GEN_AI_SYSTEM: GEN_AI_SYSTEM_GROQ,
|
71
|
+
"error.type": exception.__class__.__name__,
|
72
|
+
}
|
73
|
+
|
74
|
+
|
75
|
+
def model_as_dict(model):
|
76
|
+
if _PYDANTIC_VERSION < "2.0.0":
|
77
|
+
return model.dict()
|
78
|
+
if hasattr(model, "model_dump"):
|
79
|
+
return model.model_dump()
|
80
|
+
elif hasattr(model, "parse"): # Raw API response
|
81
|
+
return model_as_dict(model.parse())
|
82
|
+
else:
|
83
|
+
return model
|
84
|
+
|
85
|
+
|
86
|
+
def should_emit_events() -> bool:
|
87
|
+
"""
|
88
|
+
Checks if the instrumentation isn't using the legacy attributes
|
89
|
+
and if the event logger is not None.
|
90
|
+
"""
|
91
|
+
|
92
|
+
return not Config.use_legacy_attributes
|
@@ -0,0 +1 @@
|
|
1
|
+
__version__ = "0.41.0"
|
@@ -12,10 +12,7 @@ from langchain_core.runnables.graph import Graph
|
|
12
12
|
from opentelemetry.trace import Tracer
|
13
13
|
from wrapt import wrap_function_wrapper
|
14
14
|
from opentelemetry.trace import get_tracer
|
15
|
-
|
16
|
-
from lmnr.opentelemetry_lib.tracing.context_properties import (
|
17
|
-
update_association_properties,
|
18
|
-
)
|
15
|
+
from opentelemetry.context import get_value, attach, set_value
|
19
16
|
|
20
17
|
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
21
18
|
from opentelemetry.instrumentation.utils import unwrap
|
@@ -45,12 +42,13 @@ def wrap_pregel_stream(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs)
|
|
45
42
|
}
|
46
43
|
for edge in graph.edges
|
47
44
|
]
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
)
|
45
|
+
d = {
|
46
|
+
"langgraph.edges": json.dumps(edges),
|
47
|
+
"langgraph.nodes": json.dumps(nodes),
|
48
|
+
}
|
49
|
+
association_properties = get_value("lmnr.langgraph.graph") or {}
|
50
|
+
association_properties.update(d)
|
51
|
+
attach(set_value("lmnr.langgraph.graph", association_properties))
|
54
52
|
return wrapped(*args, **kwargs)
|
55
53
|
|
56
54
|
|
@@ -75,12 +73,14 @@ async def async_wrap_pregel_stream(
|
|
75
73
|
}
|
76
74
|
for edge in graph.edges
|
77
75
|
]
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
)
|
76
|
+
|
77
|
+
d = {
|
78
|
+
"langgraph.edges": json.dumps(edges),
|
79
|
+
"langgraph.nodes": json.dumps(nodes),
|
80
|
+
}
|
81
|
+
association_properties = get_value("lmnr.langgraph.graph") or {}
|
82
|
+
association_properties.update(d)
|
83
|
+
attach(set_value("lmnr.langgraph.graph", association_properties))
|
84
84
|
|
85
85
|
async for item in wrapped(*args, **kwargs):
|
86
86
|
yield item
|
@@ -39,6 +39,7 @@ from ..utils import (
|
|
39
39
|
should_emit_events,
|
40
40
|
should_send_prompts,
|
41
41
|
)
|
42
|
+
from lmnr.opentelemetry_lib.tracing.context import get_current_context
|
42
43
|
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
|
43
44
|
from opentelemetry.metrics import Counter, Histogram
|
44
45
|
from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
|
@@ -87,6 +88,7 @@ def chat_wrapper(
|
|
87
88
|
SPAN_NAME,
|
88
89
|
kind=SpanKind.CLIENT,
|
89
90
|
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
|
91
|
+
context=get_current_context(),
|
90
92
|
)
|
91
93
|
|
92
94
|
run_async(_handle_request(span, kwargs, instance))
|
@@ -184,6 +186,7 @@ async def achat_wrapper(
|
|
184
186
|
SPAN_NAME,
|
185
187
|
kind=SpanKind.CLIENT,
|
186
188
|
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
|
189
|
+
context=get_current_context(),
|
187
190
|
)
|
188
191
|
|
189
192
|
await _handle_request(span, kwargs, instance)
|
@@ -27,6 +27,7 @@ from ..utils import (
|
|
27
27
|
should_emit_events,
|
28
28
|
should_send_prompts,
|
29
29
|
)
|
30
|
+
from lmnr.opentelemetry_lib.tracing.context import get_current_context
|
30
31
|
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
|
31
32
|
from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
|
32
33
|
from opentelemetry.semconv_ai import (
|
@@ -55,6 +56,7 @@ def completion_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
55
56
|
SPAN_NAME,
|
56
57
|
kind=SpanKind.CLIENT,
|
57
58
|
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
|
59
|
+
context=get_current_context(),
|
58
60
|
)
|
59
61
|
|
60
62
|
_handle_request(span, kwargs, instance)
|
@@ -89,6 +91,7 @@ async def acompletion_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
89
91
|
name=SPAN_NAME,
|
90
92
|
kind=SpanKind.CLIENT,
|
91
93
|
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
|
94
|
+
context=get_current_context(),
|
92
95
|
)
|
93
96
|
|
94
97
|
_handle_request(span, kwargs, instance)
|
@@ -14,7 +14,7 @@ import openai
|
|
14
14
|
|
15
15
|
_OPENAI_VERSION = version("openai")
|
16
16
|
|
17
|
-
|
17
|
+
LMNR_TRACE_CONTENT = "LMNR_TRACE_CONTENT"
|
18
18
|
|
19
19
|
|
20
20
|
def is_openai_v1():
|
@@ -28,7 +28,7 @@ def is_azure_openai(instance):
|
|
28
28
|
|
29
29
|
|
30
30
|
def is_metrics_enabled() -> bool:
|
31
|
-
return
|
31
|
+
return False
|
32
32
|
|
33
33
|
|
34
34
|
def should_record_stream_token_usage():
|
@@ -171,7 +171,7 @@ def run_async(method):
|
|
171
171
|
|
172
172
|
def should_send_prompts():
|
173
173
|
return (
|
174
|
-
os.getenv(
|
174
|
+
os.getenv(LMNR_TRACE_CONTENT) or "true"
|
175
175
|
).lower() == "true" or context_api.get_value("override_enable_content_tracing")
|
176
176
|
|
177
177
|
|
@@ -17,6 +17,7 @@ from ..utils import (
|
|
17
17
|
dont_throw,
|
18
18
|
should_emit_events,
|
19
19
|
)
|
20
|
+
from lmnr.opentelemetry_lib.tracing.context import get_current_context
|
20
21
|
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
|
21
22
|
from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
|
22
23
|
from opentelemetry.semconv_ai import LLMRequestTypeValues, SpanAttributes
|
@@ -126,6 +127,7 @@ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
126
127
|
kind=SpanKind.CLIENT,
|
127
128
|
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
|
128
129
|
start_time=run.get("start_time"),
|
130
|
+
context=get_current_context(),
|
129
131
|
)
|
130
132
|
|
131
133
|
if exception := run.get("exception"):
|
@@ -250,6 +252,7 @@ def runs_create_and_stream_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
250
252
|
"openai.assistant.run_stream",
|
251
253
|
kind=SpanKind.CLIENT,
|
252
254
|
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
|
255
|
+
context=get_current_context(),
|
253
256
|
)
|
254
257
|
|
255
258
|
i = 0
|
@@ -36,6 +36,7 @@ except ImportError:
|
|
36
36
|
ResponseOutputMessageParam = Dict[str, Any]
|
37
37
|
RESPONSES_AVAILABLE = False
|
38
38
|
|
39
|
+
from lmnr.opentelemetry_lib.tracing.context import get_current_context
|
39
40
|
from openai._legacy_response import LegacyAPIResponse
|
40
41
|
from opentelemetry import context as context_api
|
41
42
|
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
|
@@ -429,6 +430,7 @@ def responses_get_or_create_wrapper(tracer: Tracer, wrapped, instance, args, kwa
|
|
429
430
|
start_time=(
|
430
431
|
start_time if traced_data is None else int(traced_data.start_time)
|
431
432
|
),
|
433
|
+
context=get_current_context(),
|
432
434
|
)
|
433
435
|
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
434
436
|
span.record_exception(e)
|
@@ -472,6 +474,7 @@ def responses_get_or_create_wrapper(tracer: Tracer, wrapped, instance, args, kwa
|
|
472
474
|
SPAN_NAME,
|
473
475
|
kind=SpanKind.CLIENT,
|
474
476
|
start_time=int(traced_data.start_time),
|
477
|
+
context=get_current_context(),
|
475
478
|
)
|
476
479
|
set_data_attributes(traced_data, span)
|
477
480
|
span.end()
|
@@ -523,6 +526,7 @@ async def async_responses_get_or_create_wrapper(
|
|
523
526
|
start_time=(
|
524
527
|
start_time if traced_data is None else int(traced_data.start_time)
|
525
528
|
),
|
529
|
+
context=get_current_context(),
|
526
530
|
)
|
527
531
|
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
528
532
|
span.record_exception(e)
|
@@ -566,6 +570,7 @@ async def async_responses_get_or_create_wrapper(
|
|
566
570
|
SPAN_NAME,
|
567
571
|
kind=SpanKind.CLIENT,
|
568
572
|
start_time=int(traced_data.start_time),
|
573
|
+
context=get_current_context(),
|
569
574
|
)
|
570
575
|
set_data_attributes(traced_data, span)
|
571
576
|
span.end()
|
@@ -590,6 +595,7 @@ def responses_cancel_wrapper(tracer: Tracer, wrapped, instance, args, kwargs):
|
|
590
595
|
kind=SpanKind.CLIENT,
|
591
596
|
start_time=existing_data.start_time,
|
592
597
|
record_exception=True,
|
598
|
+
context=get_current_context(),
|
593
599
|
)
|
594
600
|
span.record_exception(Exception("Response cancelled"))
|
595
601
|
set_data_attributes(existing_data, span)
|
@@ -616,6 +622,7 @@ async def async_responses_cancel_wrapper(
|
|
616
622
|
kind=SpanKind.CLIENT,
|
617
623
|
start_time=existing_data.start_time,
|
618
624
|
record_exception=True,
|
625
|
+
context=get_current_context(),
|
619
626
|
)
|
620
627
|
span.record_exception(Exception("Response cancelled"))
|
621
628
|
set_data_attributes(existing_data, span)
|