lmnr 0.6.19__py3-none-any.whl → 0.6.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. lmnr/opentelemetry_lib/decorators/__init__.py +188 -138
  2. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +674 -0
  3. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
  4. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
  5. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
  6. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +256 -0
  7. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +295 -0
  8. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +179 -0
  9. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
  10. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +485 -0
  11. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
  12. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
  13. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
  14. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
  15. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
  16. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
  17. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +3 -3
  18. lmnr/opentelemetry_lib/tracing/__init__.py +1 -1
  19. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +12 -7
  20. lmnr/opentelemetry_lib/tracing/processor.py +1 -1
  21. lmnr/opentelemetry_lib/utils/package_check.py +9 -0
  22. lmnr/sdk/browser/browser_use_otel.py +4 -2
  23. lmnr/sdk/browser/patchright_otel.py +0 -26
  24. lmnr/sdk/browser/playwright_otel.py +51 -78
  25. lmnr/sdk/browser/pw_utils.py +359 -114
  26. lmnr/sdk/client/asynchronous/async_client.py +13 -0
  27. lmnr/sdk/client/asynchronous/resources/__init__.py +2 -0
  28. lmnr/sdk/client/asynchronous/resources/evaluators.py +85 -0
  29. lmnr/sdk/client/asynchronous/resources/tags.py +4 -10
  30. lmnr/sdk/client/synchronous/resources/__init__.py +2 -1
  31. lmnr/sdk/client/synchronous/resources/evaluators.py +85 -0
  32. lmnr/sdk/client/synchronous/resources/tags.py +4 -10
  33. lmnr/sdk/client/synchronous/sync_client.py +14 -0
  34. lmnr/sdk/decorators.py +39 -4
  35. lmnr/sdk/evaluations.py +23 -9
  36. lmnr/sdk/laminar.py +75 -48
  37. lmnr/sdk/utils.py +23 -0
  38. lmnr/version.py +1 -1
  39. {lmnr-0.6.19.dist-info → lmnr-0.6.21.dist-info}/METADATA +8 -7
  40. {lmnr-0.6.19.dist-info → lmnr-0.6.21.dist-info}/RECORD +42 -25
  41. {lmnr-0.6.19.dist-info → lmnr-0.6.21.dist-info}/WHEEL +1 -1
  42. {lmnr-0.6.19.dist-info → lmnr-0.6.21.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,485 @@
1
+ """OpenTelemetry Groq instrumentation"""
2
+
3
+ import logging
4
+ import os
5
+ import time
6
+ from typing import Callable, Collection, Union
7
+
8
+ from opentelemetry import context as context_api
9
+ from opentelemetry._events import EventLogger, get_event_logger
10
+ from .config import Config
11
+ from .event_emitter import (
12
+ emit_choice_events,
13
+ emit_message_events,
14
+ emit_streaming_response_events,
15
+ )
16
+ from .span_utils import (
17
+ set_input_attributes,
18
+ set_model_input_attributes,
19
+ set_model_response_attributes,
20
+ set_model_streaming_response_attributes,
21
+ set_response_attributes,
22
+ set_streaming_response_attributes,
23
+ )
24
+ from .utils import (
25
+ error_metrics_attributes,
26
+ shared_metrics_attributes,
27
+ should_emit_events,
28
+ )
29
+ from .version import __version__
30
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
31
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap
32
+ from opentelemetry.metrics import Counter, Histogram, Meter, get_meter
33
+ from opentelemetry.semconv_ai import (
34
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
35
+ LLMRequestTypeValues,
36
+ Meters,
37
+ SpanAttributes,
38
+ )
39
+ from opentelemetry.trace import SpanKind, Tracer, get_tracer
40
+ from opentelemetry.trace.status import Status, StatusCode
41
+ from wrapt import wrap_function_wrapper
42
+
43
+ from groq._streaming import AsyncStream, Stream
44
+
45
+ logger = logging.getLogger(__name__)
46
+
47
+ _instruments = ("groq >= 0.9.0",)
48
+
49
+
50
+ WRAPPED_METHODS = [
51
+ {
52
+ "package": "groq.resources.chat.completions",
53
+ "object": "Completions",
54
+ "method": "create",
55
+ "span_name": "groq.chat",
56
+ },
57
+ ]
58
+ WRAPPED_AMETHODS = [
59
+ {
60
+ "package": "groq.resources.chat.completions",
61
+ "object": "AsyncCompletions",
62
+ "method": "create",
63
+ "span_name": "groq.chat",
64
+ },
65
+ ]
66
+
67
+
68
+ def is_streaming_response(response):
69
+ return isinstance(response, Stream) or isinstance(response, AsyncStream)
70
+
71
+
72
+ def _with_chat_telemetry_wrapper(func):
73
+ """Helper for providing tracer for wrapper functions. Includes metric collectors."""
74
+
75
+ def _with_chat_telemetry(
76
+ tracer,
77
+ token_histogram,
78
+ choice_counter,
79
+ duration_histogram,
80
+ event_logger,
81
+ to_wrap,
82
+ ):
83
+ def wrapper(wrapped, instance, args, kwargs):
84
+ return func(
85
+ tracer,
86
+ token_histogram,
87
+ choice_counter,
88
+ duration_histogram,
89
+ event_logger,
90
+ to_wrap,
91
+ wrapped,
92
+ instance,
93
+ args,
94
+ kwargs,
95
+ )
96
+
97
+ return wrapper
98
+
99
+ return _with_chat_telemetry
100
+
101
+
102
+ def _create_metrics(meter: Meter):
103
+ token_histogram = meter.create_histogram(
104
+ name=Meters.LLM_TOKEN_USAGE,
105
+ unit="token",
106
+ description="Measures number of input and output tokens used",
107
+ )
108
+
109
+ choice_counter = meter.create_counter(
110
+ name=Meters.LLM_GENERATION_CHOICES,
111
+ unit="choice",
112
+ description="Number of choices returned by chat completions call",
113
+ )
114
+
115
+ duration_histogram = meter.create_histogram(
116
+ name=Meters.LLM_OPERATION_DURATION,
117
+ unit="s",
118
+ description="GenAI operation duration",
119
+ )
120
+
121
+ return token_histogram, choice_counter, duration_histogram
122
+
123
+
124
+ def _process_streaming_chunk(chunk):
125
+ """Extract content, finish_reason and usage from a streaming chunk."""
126
+ if not chunk.choices:
127
+ return None, None, None
128
+
129
+ delta = chunk.choices[0].delta
130
+ content = delta.content if hasattr(delta, "content") else None
131
+ finish_reason = chunk.choices[0].finish_reason
132
+
133
+ # Extract usage from x_groq if present in the final chunk
134
+ usage = None
135
+ if hasattr(chunk, "x_groq") and chunk.x_groq and chunk.x_groq.usage:
136
+ usage = chunk.x_groq.usage
137
+
138
+ return content, finish_reason, usage
139
+
140
+
141
+ def _handle_streaming_response(
142
+ span, accumulated_content, finish_reason, usage, event_logger
143
+ ):
144
+ set_model_streaming_response_attributes(span, usage)
145
+ if should_emit_events() and event_logger:
146
+ emit_streaming_response_events(accumulated_content, finish_reason, event_logger)
147
+ else:
148
+ set_streaming_response_attributes(
149
+ span, accumulated_content, finish_reason, usage
150
+ )
151
+
152
+
153
+ def _create_stream_processor(response, span, event_logger):
154
+ """Create a generator that processes a stream while collecting telemetry."""
155
+ accumulated_content = ""
156
+ finish_reason = None
157
+ usage = None
158
+
159
+ for chunk in response:
160
+ content, chunk_finish_reason, chunk_usage = _process_streaming_chunk(chunk)
161
+ if content:
162
+ accumulated_content += content
163
+ if chunk_finish_reason:
164
+ finish_reason = chunk_finish_reason
165
+ if chunk_usage:
166
+ usage = chunk_usage
167
+ yield chunk
168
+
169
+ _handle_streaming_response(
170
+ span, accumulated_content, finish_reason, usage, event_logger
171
+ )
172
+
173
+ if span.is_recording():
174
+ span.set_status(Status(StatusCode.OK))
175
+
176
+ span.end()
177
+
178
+
179
+ async def _create_async_stream_processor(response, span, event_logger):
180
+ """Create an async generator that processes a stream while collecting telemetry."""
181
+ accumulated_content = ""
182
+ finish_reason = None
183
+ usage = None
184
+
185
+ async for chunk in response:
186
+ content, chunk_finish_reason, chunk_usage = _process_streaming_chunk(chunk)
187
+ if content:
188
+ accumulated_content += content
189
+ if chunk_finish_reason:
190
+ finish_reason = chunk_finish_reason
191
+ if chunk_usage:
192
+ usage = chunk_usage
193
+ yield chunk
194
+
195
+ _handle_streaming_response(
196
+ span, accumulated_content, finish_reason, usage, event_logger
197
+ )
198
+
199
+ if span.is_recording():
200
+ span.set_status(Status(StatusCode.OK))
201
+
202
+ span.end()
203
+
204
+
205
+ def _handle_input(span, kwargs, event_logger):
206
+ set_model_input_attributes(span, kwargs)
207
+ if should_emit_events() and event_logger:
208
+ emit_message_events(kwargs, event_logger)
209
+ else:
210
+ set_input_attributes(span, kwargs)
211
+
212
+
213
+ def _handle_response(span, response, token_histogram, event_logger):
214
+ set_model_response_attributes(span, response, token_histogram)
215
+ if should_emit_events() and event_logger:
216
+ emit_choice_events(response, event_logger)
217
+ else:
218
+ set_response_attributes(span, response)
219
+
220
+
221
+ @_with_chat_telemetry_wrapper
222
+ def _wrap(
223
+ tracer: Tracer,
224
+ token_histogram: Histogram,
225
+ choice_counter: Counter,
226
+ duration_histogram: Histogram,
227
+ event_logger: Union[EventLogger, None],
228
+ to_wrap,
229
+ wrapped,
230
+ instance,
231
+ args,
232
+ kwargs,
233
+ ):
234
+ """Instruments and calls every function defined in TO_WRAP."""
235
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
236
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
237
+ ):
238
+ return wrapped(*args, **kwargs)
239
+
240
+ name = to_wrap.get("span_name")
241
+ span = tracer.start_span(
242
+ name,
243
+ kind=SpanKind.CLIENT,
244
+ attributes={
245
+ SpanAttributes.LLM_SYSTEM: "Groq",
246
+ SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
247
+ },
248
+ )
249
+
250
+ _handle_input(span, kwargs, event_logger)
251
+
252
+ start_time = time.time()
253
+ try:
254
+ response = wrapped(*args, **kwargs)
255
+ except Exception as e: # pylint: disable=broad-except
256
+ end_time = time.time()
257
+ attributes = error_metrics_attributes(e)
258
+
259
+ if duration_histogram:
260
+ duration = end_time - start_time
261
+ duration_histogram.record(duration, attributes=attributes)
262
+
263
+ raise e
264
+
265
+ end_time = time.time()
266
+
267
+ if is_streaming_response(response):
268
+ try:
269
+ return _create_stream_processor(response, span, event_logger)
270
+ except Exception as ex:
271
+ logger.warning(
272
+ "Failed to process streaming response for groq span, error: %s",
273
+ str(ex),
274
+ )
275
+ span.set_status(Status(StatusCode.ERROR))
276
+ span.end()
277
+ raise
278
+ elif response:
279
+ try:
280
+ metric_attributes = shared_metrics_attributes(response)
281
+
282
+ if duration_histogram:
283
+ duration = time.time() - start_time
284
+ duration_histogram.record(
285
+ duration,
286
+ attributes=metric_attributes,
287
+ )
288
+
289
+ _handle_response(span, response, token_histogram, event_logger)
290
+
291
+ except Exception as ex: # pylint: disable=broad-except
292
+ logger.warning(
293
+ "Failed to set response attributes for groq span, error: %s",
294
+ str(ex),
295
+ )
296
+
297
+ if span.is_recording():
298
+ span.set_status(Status(StatusCode.OK))
299
+ span.end()
300
+ return response
301
+
302
+
303
+ @_with_chat_telemetry_wrapper
304
+ async def _awrap(
305
+ tracer,
306
+ token_histogram: Histogram,
307
+ choice_counter: Counter,
308
+ duration_histogram: Histogram,
309
+ event_logger: Union[EventLogger, None],
310
+ to_wrap,
311
+ wrapped,
312
+ instance,
313
+ args,
314
+ kwargs,
315
+ ):
316
+ """Instruments and calls every function defined in TO_WRAP."""
317
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
318
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
319
+ ):
320
+ return await wrapped(*args, **kwargs)
321
+
322
+ name = to_wrap.get("span_name")
323
+ span = tracer.start_span(
324
+ name,
325
+ kind=SpanKind.CLIENT,
326
+ attributes={
327
+ SpanAttributes.LLM_SYSTEM: "Groq",
328
+ SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
329
+ },
330
+ )
331
+
332
+ _handle_input(span, kwargs, event_logger)
333
+
334
+ start_time = time.time()
335
+
336
+ try:
337
+ response = await wrapped(*args, **kwargs)
338
+ except Exception as e: # pylint: disable=broad-except
339
+ end_time = time.time()
340
+ attributes = error_metrics_attributes(e)
341
+
342
+ if duration_histogram:
343
+ duration = end_time - start_time
344
+ duration_histogram.record(duration, attributes=attributes)
345
+
346
+ raise e
347
+
348
+ end_time = time.time()
349
+
350
+ if is_streaming_response(response):
351
+ try:
352
+ return await _create_async_stream_processor(response, span, event_logger)
353
+ except Exception as ex:
354
+ logger.warning(
355
+ "Failed to process streaming response for groq span, error: %s",
356
+ str(ex),
357
+ )
358
+ span.set_status(Status(StatusCode.ERROR))
359
+ span.end()
360
+ raise
361
+ elif response:
362
+ metric_attributes = shared_metrics_attributes(response)
363
+
364
+ if duration_histogram:
365
+ duration = time.time() - start_time
366
+ duration_histogram.record(
367
+ duration,
368
+ attributes=metric_attributes,
369
+ )
370
+
371
+ _handle_response(span, response, token_histogram, event_logger)
372
+
373
+ if span.is_recording():
374
+ span.set_status(Status(StatusCode.OK))
375
+ span.end()
376
+ return response
377
+
378
+
379
+ def is_metrics_enabled() -> bool:
380
+ return (os.getenv("TRACELOOP_METRICS_ENABLED") or "true").lower() == "true"
381
+
382
+
383
+ class GroqInstrumentor(BaseInstrumentor):
384
+ """An instrumentor for Groq's client library."""
385
+
386
+ def __init__(
387
+ self,
388
+ enrich_token_usage: bool = False,
389
+ exception_logger=None,
390
+ use_legacy_attributes: bool = True,
391
+ get_common_metrics_attributes: Callable[[], dict] = lambda: {},
392
+ ):
393
+ super().__init__()
394
+ Config.exception_logger = exception_logger
395
+ Config.enrich_token_usage = enrich_token_usage
396
+ Config.get_common_metrics_attributes = get_common_metrics_attributes
397
+ Config.use_legacy_attributes = use_legacy_attributes
398
+
399
+ def instrumentation_dependencies(self) -> Collection[str]:
400
+ return _instruments
401
+
402
+ def _instrument(self, **kwargs):
403
+ tracer_provider = kwargs.get("tracer_provider")
404
+ tracer = get_tracer(__name__, __version__, tracer_provider)
405
+
406
+ # meter and counters are inited here
407
+ meter_provider = kwargs.get("meter_provider")
408
+ meter = get_meter(__name__, __version__, meter_provider)
409
+
410
+ if is_metrics_enabled():
411
+ (
412
+ token_histogram,
413
+ choice_counter,
414
+ duration_histogram,
415
+ ) = _create_metrics(meter)
416
+ else:
417
+ (
418
+ token_histogram,
419
+ choice_counter,
420
+ duration_histogram,
421
+ ) = (None, None, None)
422
+
423
+ event_logger = None
424
+ if not Config.use_legacy_attributes:
425
+ event_logger_provider = kwargs.get("event_logger_provider")
426
+ event_logger = get_event_logger(
427
+ __name__, __version__, event_logger_provider=event_logger_provider
428
+ )
429
+
430
+ for wrapped_method in WRAPPED_METHODS:
431
+ wrap_package = wrapped_method.get("package")
432
+ wrap_object = wrapped_method.get("object")
433
+ wrap_method = wrapped_method.get("method")
434
+
435
+ try:
436
+ wrap_function_wrapper(
437
+ wrap_package,
438
+ f"{wrap_object}.{wrap_method}",
439
+ _wrap(
440
+ tracer,
441
+ token_histogram,
442
+ choice_counter,
443
+ duration_histogram,
444
+ event_logger,
445
+ wrapped_method,
446
+ ),
447
+ )
448
+ except ModuleNotFoundError:
449
+ pass # that's ok, we don't want to fail if some methods do not exist
450
+
451
+ for wrapped_method in WRAPPED_AMETHODS:
452
+ wrap_package = wrapped_method.get("package")
453
+ wrap_object = wrapped_method.get("object")
454
+ wrap_method = wrapped_method.get("method")
455
+ try:
456
+ wrap_function_wrapper(
457
+ wrap_package,
458
+ f"{wrap_object}.{wrap_method}",
459
+ _awrap(
460
+ tracer,
461
+ token_histogram,
462
+ choice_counter,
463
+ duration_histogram,
464
+ event_logger,
465
+ wrapped_method,
466
+ ),
467
+ )
468
+ except ModuleNotFoundError:
469
+ pass # that's ok, we don't want to fail if some methods do not exist
470
+
471
+ def _uninstrument(self, **kwargs):
472
+ for wrapped_method in WRAPPED_METHODS:
473
+ wrap_package = wrapped_method.get("package")
474
+ wrap_object = wrapped_method.get("object")
475
+ unwrap(
476
+ f"{wrap_package}.{wrap_object}",
477
+ wrapped_method.get("method"),
478
+ )
479
+ for wrapped_method in WRAPPED_AMETHODS:
480
+ wrap_package = wrapped_method.get("package")
481
+ wrap_object = wrapped_method.get("object")
482
+ unwrap(
483
+ f"{wrap_package}.{wrap_object}",
484
+ wrapped_method.get("method"),
485
+ )
@@ -0,0 +1,8 @@
1
+ from typing import Callable
2
+
3
+
4
+ class Config:
5
+ enrich_token_usage = False
6
+ exception_logger = None
7
+ get_common_metrics_attributes: Callable[[], dict] = lambda: {}
8
+ use_legacy_attributes = True
@@ -0,0 +1,143 @@
1
+ from dataclasses import asdict
2
+ from enum import Enum
3
+ from typing import Union
4
+
5
+ from opentelemetry._events import Event, EventLogger
6
+ from .event_models import ChoiceEvent, MessageEvent
7
+ from .utils import (
8
+ dont_throw,
9
+ should_emit_events,
10
+ should_send_prompts,
11
+ )
12
+ from opentelemetry.semconv._incubating.attributes import (
13
+ gen_ai_attributes as GenAIAttributes,
14
+ )
15
+
16
+ from groq.types.chat.chat_completion import ChatCompletion
17
+
18
+
19
+ class Roles(Enum):
20
+ USER = "user"
21
+ ASSISTANT = "assistant"
22
+ SYSTEM = "system"
23
+ TOOL = "tool"
24
+
25
+
26
+ VALID_MESSAGE_ROLES = {role.value for role in Roles}
27
+ """The valid roles for naming the message event."""
28
+
29
+ EVENT_ATTRIBUTES = {
30
+ # Should be GenAIAttributes.GenAiSystemValues.GROQ.value but it's not defined in the opentelemetry-semconv package
31
+ GenAIAttributes.GEN_AI_SYSTEM: "groq"
32
+ }
33
+ """The attributes to be used for the event."""
34
+
35
+
36
+ @dont_throw
37
+ def emit_message_events(kwargs: dict, event_logger):
38
+ for message in kwargs.get("messages", []):
39
+ emit_event(
40
+ MessageEvent(
41
+ content=message.get("content"), role=message.get("role", "unknown")
42
+ ),
43
+ event_logger=event_logger,
44
+ )
45
+
46
+
47
+ @dont_throw
48
+ def emit_choice_events(response: ChatCompletion, event_logger):
49
+ for choice in response.choices:
50
+ emit_event(
51
+ ChoiceEvent(
52
+ index=choice.index,
53
+ message={
54
+ "content": choice.message.content,
55
+ "role": choice.message.role or "unknown",
56
+ },
57
+ finish_reason=choice.finish_reason,
58
+ ),
59
+ event_logger=event_logger,
60
+ )
61
+
62
+
63
+ @dont_throw
64
+ def emit_streaming_response_events(
65
+ accumulated_content: str, finish_reason: Union[str, None], event_logger
66
+ ):
67
+ """Emit events for streaming response."""
68
+ emit_event(
69
+ ChoiceEvent(
70
+ index=0,
71
+ message={"content": accumulated_content, "role": "assistant"},
72
+ finish_reason=finish_reason or "unknown",
73
+ ),
74
+ event_logger,
75
+ )
76
+
77
+
78
+ def emit_event(
79
+ event: Union[MessageEvent, ChoiceEvent], event_logger: Union[EventLogger, None]
80
+ ) -> None:
81
+ """
82
+ Emit an event to the OpenTelemetry SDK.
83
+
84
+ Args:
85
+ event: The event to emit.
86
+ """
87
+ if not should_emit_events() or event_logger is None:
88
+ return
89
+
90
+ if isinstance(event, MessageEvent):
91
+ _emit_message_event(event, event_logger)
92
+ elif isinstance(event, ChoiceEvent):
93
+ _emit_choice_event(event, event_logger)
94
+ else:
95
+ raise TypeError("Unsupported event type")
96
+
97
+
98
+ def _emit_message_event(event: MessageEvent, event_logger: EventLogger) -> None:
99
+ body = asdict(event)
100
+
101
+ if event.role in VALID_MESSAGE_ROLES:
102
+ name = "gen_ai.{}.message".format(event.role)
103
+ # According to the semantic conventions, the role is conditionally required if available
104
+ # and not equal to the "role" in the message name. So, remove the role from the body if
105
+ # it is the same as the in the event name.
106
+ body.pop("role", None)
107
+ else:
108
+ name = "gen_ai.user.message"
109
+
110
+ # According to the semantic conventions, only the assistant role has tool call
111
+ if event.role != Roles.ASSISTANT.value and event.tool_calls is not None:
112
+ del body["tool_calls"]
113
+ elif event.tool_calls is None:
114
+ del body["tool_calls"]
115
+
116
+ if not should_send_prompts():
117
+ del body["content"]
118
+ if body.get("tool_calls") is not None:
119
+ for tool_call in body["tool_calls"]:
120
+ tool_call["function"].pop("arguments", None)
121
+
122
+ event_logger.emit(Event(name=name, body=body, attributes=EVENT_ATTRIBUTES))
123
+
124
+
125
+ def _emit_choice_event(event: ChoiceEvent, event_logger: EventLogger) -> None:
126
+ body = asdict(event)
127
+ if event.message["role"] == Roles.ASSISTANT.value:
128
+ # According to the semantic conventions, the role is conditionally required if available
129
+ # and not equal to "assistant", so remove the role from the body if it is "assistant".
130
+ body["message"].pop("role", None)
131
+
132
+ if event.tool_calls is None:
133
+ del body["tool_calls"]
134
+
135
+ if not should_send_prompts():
136
+ body["message"].pop("content", None)
137
+ if body.get("tool_calls") is not None:
138
+ for tool_call in body["tool_calls"]:
139
+ tool_call["function"].pop("arguments", None)
140
+
141
+ event_logger.emit(
142
+ Event(name="gen_ai.choice", body=body, attributes=EVENT_ATTRIBUTES)
143
+ )
@@ -0,0 +1,41 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any, List, Literal, Optional, TypedDict
3
+
4
+
5
+ class _FunctionToolCall(TypedDict):
6
+ function_name: str
7
+ arguments: Optional[dict[str, Any]]
8
+
9
+
10
+ class ToolCall(TypedDict):
11
+ """Represents a tool call in the AI model."""
12
+
13
+ id: str
14
+ function: _FunctionToolCall
15
+ type: Literal["function"]
16
+
17
+
18
+ class CompletionMessage(TypedDict):
19
+ """Represents a message in the AI model."""
20
+
21
+ content: Any
22
+ role: str = "assistant"
23
+
24
+
25
+ @dataclass
26
+ class MessageEvent:
27
+ """Represents an input event for the AI model."""
28
+
29
+ content: Any
30
+ role: str = "user"
31
+ tool_calls: Optional[List[ToolCall]] = None
32
+
33
+
34
+ @dataclass
35
+ class ChoiceEvent:
36
+ """Represents a completion event for the AI model."""
37
+
38
+ index: int
39
+ message: CompletionMessage
40
+ finish_reason: str = "unknown"
41
+ tool_calls: Optional[List[ToolCall]] = None