lmnr 0.6.20__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/__init__.py +0 -4
- lmnr/opentelemetry_lib/decorators/__init__.py +211 -151
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +678 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +256 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +295 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +179 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +4 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +16 -16
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +3 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +3 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +3 -3
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +3 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +7 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +190 -0
- lmnr/opentelemetry_lib/tracing/__init__.py +90 -2
- lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +12 -7
- lmnr/opentelemetry_lib/tracing/context.py +109 -0
- lmnr/opentelemetry_lib/tracing/processor.py +6 -7
- lmnr/opentelemetry_lib/tracing/tracer.py +29 -0
- lmnr/opentelemetry_lib/utils/package_check.py +9 -0
- lmnr/sdk/browser/browser_use_otel.py +9 -7
- lmnr/sdk/browser/patchright_otel.py +14 -26
- lmnr/sdk/browser/playwright_otel.py +72 -73
- lmnr/sdk/browser/pw_utils.py +436 -119
- lmnr/sdk/client/asynchronous/resources/browser_events.py +1 -0
- lmnr/sdk/decorators.py +39 -4
- lmnr/sdk/evaluations.py +23 -9
- lmnr/sdk/laminar.py +181 -209
- lmnr/sdk/types.py +0 -6
- lmnr/version.py +1 -1
- {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/METADATA +10 -8
- {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/RECORD +45 -29
- {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/WHEEL +1 -1
- lmnr/opentelemetry_lib/tracing/context_properties.py +0 -65
- {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,488 @@
|
|
1
|
+
"""OpenTelemetry Groq instrumentation"""
|
2
|
+
|
3
|
+
import logging
|
4
|
+
import os
|
5
|
+
import time
|
6
|
+
from typing import Callable, Collection, Union
|
7
|
+
|
8
|
+
from opentelemetry import context as context_api
|
9
|
+
from opentelemetry._events import EventLogger, get_event_logger
|
10
|
+
from .config import Config
|
11
|
+
from .event_emitter import (
|
12
|
+
emit_choice_events,
|
13
|
+
emit_message_events,
|
14
|
+
emit_streaming_response_events,
|
15
|
+
)
|
16
|
+
from .span_utils import (
|
17
|
+
set_input_attributes,
|
18
|
+
set_model_input_attributes,
|
19
|
+
set_model_response_attributes,
|
20
|
+
set_model_streaming_response_attributes,
|
21
|
+
set_response_attributes,
|
22
|
+
set_streaming_response_attributes,
|
23
|
+
)
|
24
|
+
from .utils import (
|
25
|
+
error_metrics_attributes,
|
26
|
+
shared_metrics_attributes,
|
27
|
+
should_emit_events,
|
28
|
+
)
|
29
|
+
from .version import __version__
|
30
|
+
from lmnr.opentelemetry_lib.tracing.context import get_current_context
|
31
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
32
|
+
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap
|
33
|
+
from opentelemetry.metrics import Counter, Histogram, Meter, get_meter
|
34
|
+
from opentelemetry.semconv_ai import (
|
35
|
+
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
|
36
|
+
LLMRequestTypeValues,
|
37
|
+
Meters,
|
38
|
+
SpanAttributes,
|
39
|
+
)
|
40
|
+
from opentelemetry.trace import SpanKind, Tracer, get_tracer
|
41
|
+
from opentelemetry.trace.status import Status, StatusCode
|
42
|
+
from wrapt import wrap_function_wrapper
|
43
|
+
|
44
|
+
from groq._streaming import AsyncStream, Stream
|
45
|
+
|
46
|
+
logger = logging.getLogger(__name__)
|
47
|
+
|
48
|
+
_instruments = ("groq >= 0.9.0",)
|
49
|
+
|
50
|
+
|
51
|
+
WRAPPED_METHODS = [
|
52
|
+
{
|
53
|
+
"package": "groq.resources.chat.completions",
|
54
|
+
"object": "Completions",
|
55
|
+
"method": "create",
|
56
|
+
"span_name": "groq.chat",
|
57
|
+
},
|
58
|
+
]
|
59
|
+
WRAPPED_AMETHODS = [
|
60
|
+
{
|
61
|
+
"package": "groq.resources.chat.completions",
|
62
|
+
"object": "AsyncCompletions",
|
63
|
+
"method": "create",
|
64
|
+
"span_name": "groq.chat",
|
65
|
+
},
|
66
|
+
]
|
67
|
+
|
68
|
+
|
69
|
+
def is_streaming_response(response):
|
70
|
+
return isinstance(response, Stream) or isinstance(response, AsyncStream)
|
71
|
+
|
72
|
+
|
73
|
+
def _with_chat_telemetry_wrapper(func):
|
74
|
+
"""Helper for providing tracer for wrapper functions. Includes metric collectors."""
|
75
|
+
|
76
|
+
def _with_chat_telemetry(
|
77
|
+
tracer,
|
78
|
+
token_histogram,
|
79
|
+
choice_counter,
|
80
|
+
duration_histogram,
|
81
|
+
event_logger,
|
82
|
+
to_wrap,
|
83
|
+
):
|
84
|
+
def wrapper(wrapped, instance, args, kwargs):
|
85
|
+
return func(
|
86
|
+
tracer,
|
87
|
+
token_histogram,
|
88
|
+
choice_counter,
|
89
|
+
duration_histogram,
|
90
|
+
event_logger,
|
91
|
+
to_wrap,
|
92
|
+
wrapped,
|
93
|
+
instance,
|
94
|
+
args,
|
95
|
+
kwargs,
|
96
|
+
)
|
97
|
+
|
98
|
+
return wrapper
|
99
|
+
|
100
|
+
return _with_chat_telemetry
|
101
|
+
|
102
|
+
|
103
|
+
def _create_metrics(meter: Meter):
|
104
|
+
token_histogram = meter.create_histogram(
|
105
|
+
name=Meters.LLM_TOKEN_USAGE,
|
106
|
+
unit="token",
|
107
|
+
description="Measures number of input and output tokens used",
|
108
|
+
)
|
109
|
+
|
110
|
+
choice_counter = meter.create_counter(
|
111
|
+
name=Meters.LLM_GENERATION_CHOICES,
|
112
|
+
unit="choice",
|
113
|
+
description="Number of choices returned by chat completions call",
|
114
|
+
)
|
115
|
+
|
116
|
+
duration_histogram = meter.create_histogram(
|
117
|
+
name=Meters.LLM_OPERATION_DURATION,
|
118
|
+
unit="s",
|
119
|
+
description="GenAI operation duration",
|
120
|
+
)
|
121
|
+
|
122
|
+
return token_histogram, choice_counter, duration_histogram
|
123
|
+
|
124
|
+
|
125
|
+
def _process_streaming_chunk(chunk):
|
126
|
+
"""Extract content, finish_reason and usage from a streaming chunk."""
|
127
|
+
if not chunk.choices:
|
128
|
+
return None, None, None
|
129
|
+
|
130
|
+
delta = chunk.choices[0].delta
|
131
|
+
content = delta.content if hasattr(delta, "content") else None
|
132
|
+
finish_reason = chunk.choices[0].finish_reason
|
133
|
+
|
134
|
+
# Extract usage from x_groq if present in the final chunk
|
135
|
+
usage = None
|
136
|
+
if hasattr(chunk, "x_groq") and chunk.x_groq and chunk.x_groq.usage:
|
137
|
+
usage = chunk.x_groq.usage
|
138
|
+
|
139
|
+
return content, finish_reason, usage
|
140
|
+
|
141
|
+
|
142
|
+
def _handle_streaming_response(
|
143
|
+
span, accumulated_content, finish_reason, usage, event_logger
|
144
|
+
):
|
145
|
+
set_model_streaming_response_attributes(span, usage)
|
146
|
+
if should_emit_events() and event_logger:
|
147
|
+
emit_streaming_response_events(accumulated_content, finish_reason, event_logger)
|
148
|
+
else:
|
149
|
+
set_streaming_response_attributes(
|
150
|
+
span, accumulated_content, finish_reason, usage
|
151
|
+
)
|
152
|
+
|
153
|
+
|
154
|
+
def _create_stream_processor(response, span, event_logger):
|
155
|
+
"""Create a generator that processes a stream while collecting telemetry."""
|
156
|
+
accumulated_content = ""
|
157
|
+
finish_reason = None
|
158
|
+
usage = None
|
159
|
+
|
160
|
+
for chunk in response:
|
161
|
+
content, chunk_finish_reason, chunk_usage = _process_streaming_chunk(chunk)
|
162
|
+
if content:
|
163
|
+
accumulated_content += content
|
164
|
+
if chunk_finish_reason:
|
165
|
+
finish_reason = chunk_finish_reason
|
166
|
+
if chunk_usage:
|
167
|
+
usage = chunk_usage
|
168
|
+
yield chunk
|
169
|
+
|
170
|
+
_handle_streaming_response(
|
171
|
+
span, accumulated_content, finish_reason, usage, event_logger
|
172
|
+
)
|
173
|
+
|
174
|
+
if span.is_recording():
|
175
|
+
span.set_status(Status(StatusCode.OK))
|
176
|
+
|
177
|
+
span.end()
|
178
|
+
|
179
|
+
|
180
|
+
async def _create_async_stream_processor(response, span, event_logger):
|
181
|
+
"""Create an async generator that processes a stream while collecting telemetry."""
|
182
|
+
accumulated_content = ""
|
183
|
+
finish_reason = None
|
184
|
+
usage = None
|
185
|
+
|
186
|
+
async for chunk in response:
|
187
|
+
content, chunk_finish_reason, chunk_usage = _process_streaming_chunk(chunk)
|
188
|
+
if content:
|
189
|
+
accumulated_content += content
|
190
|
+
if chunk_finish_reason:
|
191
|
+
finish_reason = chunk_finish_reason
|
192
|
+
if chunk_usage:
|
193
|
+
usage = chunk_usage
|
194
|
+
yield chunk
|
195
|
+
|
196
|
+
_handle_streaming_response(
|
197
|
+
span, accumulated_content, finish_reason, usage, event_logger
|
198
|
+
)
|
199
|
+
|
200
|
+
if span.is_recording():
|
201
|
+
span.set_status(Status(StatusCode.OK))
|
202
|
+
|
203
|
+
span.end()
|
204
|
+
|
205
|
+
|
206
|
+
def _handle_input(span, kwargs, event_logger):
|
207
|
+
set_model_input_attributes(span, kwargs)
|
208
|
+
if should_emit_events() and event_logger:
|
209
|
+
emit_message_events(kwargs, event_logger)
|
210
|
+
else:
|
211
|
+
set_input_attributes(span, kwargs)
|
212
|
+
|
213
|
+
|
214
|
+
def _handle_response(span, response, token_histogram, event_logger):
|
215
|
+
set_model_response_attributes(span, response, token_histogram)
|
216
|
+
if should_emit_events() and event_logger:
|
217
|
+
emit_choice_events(response, event_logger)
|
218
|
+
else:
|
219
|
+
set_response_attributes(span, response)
|
220
|
+
|
221
|
+
|
222
|
+
@_with_chat_telemetry_wrapper
|
223
|
+
def _wrap(
|
224
|
+
tracer: Tracer,
|
225
|
+
token_histogram: Histogram,
|
226
|
+
choice_counter: Counter,
|
227
|
+
duration_histogram: Histogram,
|
228
|
+
event_logger: Union[EventLogger, None],
|
229
|
+
to_wrap,
|
230
|
+
wrapped,
|
231
|
+
instance,
|
232
|
+
args,
|
233
|
+
kwargs,
|
234
|
+
):
|
235
|
+
"""Instruments and calls every function defined in TO_WRAP."""
|
236
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
|
237
|
+
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
|
238
|
+
):
|
239
|
+
return wrapped(*args, **kwargs)
|
240
|
+
|
241
|
+
name = to_wrap.get("span_name")
|
242
|
+
span = tracer.start_span(
|
243
|
+
name,
|
244
|
+
kind=SpanKind.CLIENT,
|
245
|
+
attributes={
|
246
|
+
SpanAttributes.LLM_SYSTEM: "Groq",
|
247
|
+
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
|
248
|
+
},
|
249
|
+
context=get_current_context(),
|
250
|
+
)
|
251
|
+
|
252
|
+
_handle_input(span, kwargs, event_logger)
|
253
|
+
|
254
|
+
start_time = time.time()
|
255
|
+
try:
|
256
|
+
response = wrapped(*args, **kwargs)
|
257
|
+
except Exception as e: # pylint: disable=broad-except
|
258
|
+
end_time = time.time()
|
259
|
+
attributes = error_metrics_attributes(e)
|
260
|
+
|
261
|
+
if duration_histogram:
|
262
|
+
duration = end_time - start_time
|
263
|
+
duration_histogram.record(duration, attributes=attributes)
|
264
|
+
|
265
|
+
raise e
|
266
|
+
|
267
|
+
end_time = time.time()
|
268
|
+
|
269
|
+
if is_streaming_response(response):
|
270
|
+
try:
|
271
|
+
return _create_stream_processor(response, span, event_logger)
|
272
|
+
except Exception as ex:
|
273
|
+
logger.warning(
|
274
|
+
"Failed to process streaming response for groq span, error: %s",
|
275
|
+
str(ex),
|
276
|
+
)
|
277
|
+
span.set_status(Status(StatusCode.ERROR))
|
278
|
+
span.end()
|
279
|
+
raise
|
280
|
+
elif response:
|
281
|
+
try:
|
282
|
+
metric_attributes = shared_metrics_attributes(response)
|
283
|
+
|
284
|
+
if duration_histogram:
|
285
|
+
duration = time.time() - start_time
|
286
|
+
duration_histogram.record(
|
287
|
+
duration,
|
288
|
+
attributes=metric_attributes,
|
289
|
+
)
|
290
|
+
|
291
|
+
_handle_response(span, response, token_histogram, event_logger)
|
292
|
+
|
293
|
+
except Exception as ex: # pylint: disable=broad-except
|
294
|
+
logger.warning(
|
295
|
+
"Failed to set response attributes for groq span, error: %s",
|
296
|
+
str(ex),
|
297
|
+
)
|
298
|
+
|
299
|
+
if span.is_recording():
|
300
|
+
span.set_status(Status(StatusCode.OK))
|
301
|
+
span.end()
|
302
|
+
return response
|
303
|
+
|
304
|
+
|
305
|
+
@_with_chat_telemetry_wrapper
|
306
|
+
async def _awrap(
|
307
|
+
tracer,
|
308
|
+
token_histogram: Histogram,
|
309
|
+
choice_counter: Counter,
|
310
|
+
duration_histogram: Histogram,
|
311
|
+
event_logger: Union[EventLogger, None],
|
312
|
+
to_wrap,
|
313
|
+
wrapped,
|
314
|
+
instance,
|
315
|
+
args,
|
316
|
+
kwargs,
|
317
|
+
):
|
318
|
+
"""Instruments and calls every function defined in TO_WRAP."""
|
319
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
|
320
|
+
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
|
321
|
+
):
|
322
|
+
return await wrapped(*args, **kwargs)
|
323
|
+
|
324
|
+
name = to_wrap.get("span_name")
|
325
|
+
span = tracer.start_span(
|
326
|
+
name,
|
327
|
+
kind=SpanKind.CLIENT,
|
328
|
+
attributes={
|
329
|
+
SpanAttributes.LLM_SYSTEM: "Groq",
|
330
|
+
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
|
331
|
+
},
|
332
|
+
context=get_current_context(),
|
333
|
+
)
|
334
|
+
|
335
|
+
_handle_input(span, kwargs, event_logger)
|
336
|
+
|
337
|
+
start_time = time.time()
|
338
|
+
|
339
|
+
try:
|
340
|
+
response = await wrapped(*args, **kwargs)
|
341
|
+
except Exception as e: # pylint: disable=broad-except
|
342
|
+
end_time = time.time()
|
343
|
+
attributes = error_metrics_attributes(e)
|
344
|
+
|
345
|
+
if duration_histogram:
|
346
|
+
duration = end_time - start_time
|
347
|
+
duration_histogram.record(duration, attributes=attributes)
|
348
|
+
|
349
|
+
raise e
|
350
|
+
|
351
|
+
end_time = time.time()
|
352
|
+
|
353
|
+
if is_streaming_response(response):
|
354
|
+
try:
|
355
|
+
return await _create_async_stream_processor(response, span, event_logger)
|
356
|
+
except Exception as ex:
|
357
|
+
logger.warning(
|
358
|
+
"Failed to process streaming response for groq span, error: %s",
|
359
|
+
str(ex),
|
360
|
+
)
|
361
|
+
span.set_status(Status(StatusCode.ERROR))
|
362
|
+
span.end()
|
363
|
+
raise
|
364
|
+
elif response:
|
365
|
+
metric_attributes = shared_metrics_attributes(response)
|
366
|
+
|
367
|
+
if duration_histogram:
|
368
|
+
duration = time.time() - start_time
|
369
|
+
duration_histogram.record(
|
370
|
+
duration,
|
371
|
+
attributes=metric_attributes,
|
372
|
+
)
|
373
|
+
|
374
|
+
_handle_response(span, response, token_histogram, event_logger)
|
375
|
+
|
376
|
+
if span.is_recording():
|
377
|
+
span.set_status(Status(StatusCode.OK))
|
378
|
+
span.end()
|
379
|
+
return response
|
380
|
+
|
381
|
+
|
382
|
+
def is_metrics_enabled() -> bool:
|
383
|
+
return (os.getenv("TRACELOOP_METRICS_ENABLED") or "true").lower() == "true"
|
384
|
+
|
385
|
+
|
386
|
+
class GroqInstrumentor(BaseInstrumentor):
|
387
|
+
"""An instrumentor for Groq's client library."""
|
388
|
+
|
389
|
+
def __init__(
|
390
|
+
self,
|
391
|
+
enrich_token_usage: bool = False,
|
392
|
+
exception_logger=None,
|
393
|
+
use_legacy_attributes: bool = True,
|
394
|
+
get_common_metrics_attributes: Callable[[], dict] = lambda: {},
|
395
|
+
):
|
396
|
+
super().__init__()
|
397
|
+
Config.exception_logger = exception_logger
|
398
|
+
Config.enrich_token_usage = enrich_token_usage
|
399
|
+
Config.get_common_metrics_attributes = get_common_metrics_attributes
|
400
|
+
Config.use_legacy_attributes = use_legacy_attributes
|
401
|
+
|
402
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
403
|
+
return _instruments
|
404
|
+
|
405
|
+
def _instrument(self, **kwargs):
|
406
|
+
tracer_provider = kwargs.get("tracer_provider")
|
407
|
+
tracer = get_tracer(__name__, __version__, tracer_provider)
|
408
|
+
|
409
|
+
# meter and counters are inited here
|
410
|
+
meter_provider = kwargs.get("meter_provider")
|
411
|
+
meter = get_meter(__name__, __version__, meter_provider)
|
412
|
+
|
413
|
+
if is_metrics_enabled():
|
414
|
+
(
|
415
|
+
token_histogram,
|
416
|
+
choice_counter,
|
417
|
+
duration_histogram,
|
418
|
+
) = _create_metrics(meter)
|
419
|
+
else:
|
420
|
+
(
|
421
|
+
token_histogram,
|
422
|
+
choice_counter,
|
423
|
+
duration_histogram,
|
424
|
+
) = (None, None, None)
|
425
|
+
|
426
|
+
event_logger = None
|
427
|
+
if not Config.use_legacy_attributes:
|
428
|
+
event_logger_provider = kwargs.get("event_logger_provider")
|
429
|
+
event_logger = get_event_logger(
|
430
|
+
__name__, __version__, event_logger_provider=event_logger_provider
|
431
|
+
)
|
432
|
+
|
433
|
+
for wrapped_method in WRAPPED_METHODS:
|
434
|
+
wrap_package = wrapped_method.get("package")
|
435
|
+
wrap_object = wrapped_method.get("object")
|
436
|
+
wrap_method = wrapped_method.get("method")
|
437
|
+
|
438
|
+
try:
|
439
|
+
wrap_function_wrapper(
|
440
|
+
wrap_package,
|
441
|
+
f"{wrap_object}.{wrap_method}",
|
442
|
+
_wrap(
|
443
|
+
tracer,
|
444
|
+
token_histogram,
|
445
|
+
choice_counter,
|
446
|
+
duration_histogram,
|
447
|
+
event_logger,
|
448
|
+
wrapped_method,
|
449
|
+
),
|
450
|
+
)
|
451
|
+
except ModuleNotFoundError:
|
452
|
+
pass # that's ok, we don't want to fail if some methods do not exist
|
453
|
+
|
454
|
+
for wrapped_method in WRAPPED_AMETHODS:
|
455
|
+
wrap_package = wrapped_method.get("package")
|
456
|
+
wrap_object = wrapped_method.get("object")
|
457
|
+
wrap_method = wrapped_method.get("method")
|
458
|
+
try:
|
459
|
+
wrap_function_wrapper(
|
460
|
+
wrap_package,
|
461
|
+
f"{wrap_object}.{wrap_method}",
|
462
|
+
_awrap(
|
463
|
+
tracer,
|
464
|
+
token_histogram,
|
465
|
+
choice_counter,
|
466
|
+
duration_histogram,
|
467
|
+
event_logger,
|
468
|
+
wrapped_method,
|
469
|
+
),
|
470
|
+
)
|
471
|
+
except ModuleNotFoundError:
|
472
|
+
pass # that's ok, we don't want to fail if some methods do not exist
|
473
|
+
|
474
|
+
def _uninstrument(self, **kwargs):
|
475
|
+
for wrapped_method in WRAPPED_METHODS:
|
476
|
+
wrap_package = wrapped_method.get("package")
|
477
|
+
wrap_object = wrapped_method.get("object")
|
478
|
+
unwrap(
|
479
|
+
f"{wrap_package}.{wrap_object}",
|
480
|
+
wrapped_method.get("method"),
|
481
|
+
)
|
482
|
+
for wrapped_method in WRAPPED_AMETHODS:
|
483
|
+
wrap_package = wrapped_method.get("package")
|
484
|
+
wrap_object = wrapped_method.get("object")
|
485
|
+
unwrap(
|
486
|
+
f"{wrap_package}.{wrap_object}",
|
487
|
+
wrapped_method.get("method"),
|
488
|
+
)
|
@@ -0,0 +1,143 @@
|
|
1
|
+
from dataclasses import asdict
|
2
|
+
from enum import Enum
|
3
|
+
from typing import Union
|
4
|
+
|
5
|
+
from opentelemetry._events import Event, EventLogger
|
6
|
+
from .event_models import ChoiceEvent, MessageEvent
|
7
|
+
from .utils import (
|
8
|
+
dont_throw,
|
9
|
+
should_emit_events,
|
10
|
+
should_send_prompts,
|
11
|
+
)
|
12
|
+
from opentelemetry.semconv._incubating.attributes import (
|
13
|
+
gen_ai_attributes as GenAIAttributes,
|
14
|
+
)
|
15
|
+
|
16
|
+
from groq.types.chat.chat_completion import ChatCompletion
|
17
|
+
|
18
|
+
|
19
|
+
class Roles(Enum):
|
20
|
+
USER = "user"
|
21
|
+
ASSISTANT = "assistant"
|
22
|
+
SYSTEM = "system"
|
23
|
+
TOOL = "tool"
|
24
|
+
|
25
|
+
|
26
|
+
VALID_MESSAGE_ROLES = {role.value for role in Roles}
|
27
|
+
"""The valid roles for naming the message event."""
|
28
|
+
|
29
|
+
EVENT_ATTRIBUTES = {
|
30
|
+
# Should be GenAIAttributes.GenAiSystemValues.GROQ.value but it's not defined in the opentelemetry-semconv package
|
31
|
+
GenAIAttributes.GEN_AI_SYSTEM: "groq"
|
32
|
+
}
|
33
|
+
"""The attributes to be used for the event."""
|
34
|
+
|
35
|
+
|
36
|
+
@dont_throw
|
37
|
+
def emit_message_events(kwargs: dict, event_logger):
|
38
|
+
for message in kwargs.get("messages", []):
|
39
|
+
emit_event(
|
40
|
+
MessageEvent(
|
41
|
+
content=message.get("content"), role=message.get("role", "unknown")
|
42
|
+
),
|
43
|
+
event_logger=event_logger,
|
44
|
+
)
|
45
|
+
|
46
|
+
|
47
|
+
@dont_throw
|
48
|
+
def emit_choice_events(response: ChatCompletion, event_logger):
|
49
|
+
for choice in response.choices:
|
50
|
+
emit_event(
|
51
|
+
ChoiceEvent(
|
52
|
+
index=choice.index,
|
53
|
+
message={
|
54
|
+
"content": choice.message.content,
|
55
|
+
"role": choice.message.role or "unknown",
|
56
|
+
},
|
57
|
+
finish_reason=choice.finish_reason,
|
58
|
+
),
|
59
|
+
event_logger=event_logger,
|
60
|
+
)
|
61
|
+
|
62
|
+
|
63
|
+
@dont_throw
|
64
|
+
def emit_streaming_response_events(
|
65
|
+
accumulated_content: str, finish_reason: Union[str, None], event_logger
|
66
|
+
):
|
67
|
+
"""Emit events for streaming response."""
|
68
|
+
emit_event(
|
69
|
+
ChoiceEvent(
|
70
|
+
index=0,
|
71
|
+
message={"content": accumulated_content, "role": "assistant"},
|
72
|
+
finish_reason=finish_reason or "unknown",
|
73
|
+
),
|
74
|
+
event_logger,
|
75
|
+
)
|
76
|
+
|
77
|
+
|
78
|
+
def emit_event(
|
79
|
+
event: Union[MessageEvent, ChoiceEvent], event_logger: Union[EventLogger, None]
|
80
|
+
) -> None:
|
81
|
+
"""
|
82
|
+
Emit an event to the OpenTelemetry SDK.
|
83
|
+
|
84
|
+
Args:
|
85
|
+
event: The event to emit.
|
86
|
+
"""
|
87
|
+
if not should_emit_events() or event_logger is None:
|
88
|
+
return
|
89
|
+
|
90
|
+
if isinstance(event, MessageEvent):
|
91
|
+
_emit_message_event(event, event_logger)
|
92
|
+
elif isinstance(event, ChoiceEvent):
|
93
|
+
_emit_choice_event(event, event_logger)
|
94
|
+
else:
|
95
|
+
raise TypeError("Unsupported event type")
|
96
|
+
|
97
|
+
|
98
|
+
def _emit_message_event(event: MessageEvent, event_logger: EventLogger) -> None:
|
99
|
+
body = asdict(event)
|
100
|
+
|
101
|
+
if event.role in VALID_MESSAGE_ROLES:
|
102
|
+
name = "gen_ai.{}.message".format(event.role)
|
103
|
+
# According to the semantic conventions, the role is conditionally required if available
|
104
|
+
# and not equal to the "role" in the message name. So, remove the role from the body if
|
105
|
+
# it is the same as the in the event name.
|
106
|
+
body.pop("role", None)
|
107
|
+
else:
|
108
|
+
name = "gen_ai.user.message"
|
109
|
+
|
110
|
+
# According to the semantic conventions, only the assistant role has tool call
|
111
|
+
if event.role != Roles.ASSISTANT.value and event.tool_calls is not None:
|
112
|
+
del body["tool_calls"]
|
113
|
+
elif event.tool_calls is None:
|
114
|
+
del body["tool_calls"]
|
115
|
+
|
116
|
+
if not should_send_prompts():
|
117
|
+
del body["content"]
|
118
|
+
if body.get("tool_calls") is not None:
|
119
|
+
for tool_call in body["tool_calls"]:
|
120
|
+
tool_call["function"].pop("arguments", None)
|
121
|
+
|
122
|
+
event_logger.emit(Event(name=name, body=body, attributes=EVENT_ATTRIBUTES))
|
123
|
+
|
124
|
+
|
125
|
+
def _emit_choice_event(event: ChoiceEvent, event_logger: EventLogger) -> None:
|
126
|
+
body = asdict(event)
|
127
|
+
if event.message["role"] == Roles.ASSISTANT.value:
|
128
|
+
# According to the semantic conventions, the role is conditionally required if available
|
129
|
+
# and not equal to "assistant", so remove the role from the body if it is "assistant".
|
130
|
+
body["message"].pop("role", None)
|
131
|
+
|
132
|
+
if event.tool_calls is None:
|
133
|
+
del body["tool_calls"]
|
134
|
+
|
135
|
+
if not should_send_prompts():
|
136
|
+
body["message"].pop("content", None)
|
137
|
+
if body.get("tool_calls") is not None:
|
138
|
+
for tool_call in body["tool_calls"]:
|
139
|
+
tool_call["function"].pop("arguments", None)
|
140
|
+
|
141
|
+
event_logger.emit(
|
142
|
+
Event(name="gen_ai.choice", body=body, attributes=EVENT_ATTRIBUTES)
|
143
|
+
)
|
@@ -0,0 +1,41 @@
|
|
1
|
+
from dataclasses import dataclass
|
2
|
+
from typing import Any, List, Literal, Optional, TypedDict
|
3
|
+
|
4
|
+
|
5
|
+
class _FunctionToolCall(TypedDict):
|
6
|
+
function_name: str
|
7
|
+
arguments: Optional[dict[str, Any]]
|
8
|
+
|
9
|
+
|
10
|
+
class ToolCall(TypedDict):
|
11
|
+
"""Represents a tool call in the AI model."""
|
12
|
+
|
13
|
+
id: str
|
14
|
+
function: _FunctionToolCall
|
15
|
+
type: Literal["function"]
|
16
|
+
|
17
|
+
|
18
|
+
class CompletionMessage(TypedDict):
|
19
|
+
"""Represents a message in the AI model."""
|
20
|
+
|
21
|
+
content: Any
|
22
|
+
role: str = "assistant"
|
23
|
+
|
24
|
+
|
25
|
+
@dataclass
|
26
|
+
class MessageEvent:
|
27
|
+
"""Represents an input event for the AI model."""
|
28
|
+
|
29
|
+
content: Any
|
30
|
+
role: str = "user"
|
31
|
+
tool_calls: Optional[List[ToolCall]] = None
|
32
|
+
|
33
|
+
|
34
|
+
@dataclass
|
35
|
+
class ChoiceEvent:
|
36
|
+
"""Represents a completion event for the AI model."""
|
37
|
+
|
38
|
+
index: int
|
39
|
+
message: CompletionMessage
|
40
|
+
finish_reason: str = "unknown"
|
41
|
+
tool_calls: Optional[List[ToolCall]] = None
|