opentelemetry-instrumentation-groq 0.40.13__py3-none-any.whl → 0.41.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-groq might be problematic. Click here for more details.

@@ -1,44 +1,51 @@
1
1
  """OpenTelemetry Groq instrumentation"""
2
2
 
3
- import json
4
3
  import logging
5
4
  import os
6
5
  import time
7
- from typing import Callable, Collection
6
+ from typing import Callable, Collection, Union
8
7
 
9
- from groq._streaming import AsyncStream, Stream
10
8
  from opentelemetry import context as context_api
9
+ from opentelemetry._events import EventLogger, get_event_logger
11
10
  from opentelemetry.instrumentation.groq.config import Config
11
+ from opentelemetry.instrumentation.groq.event_emitter import (
12
+ emit_choice_events,
13
+ emit_message_events,
14
+ emit_streaming_response_events,
15
+ )
16
+ from opentelemetry.instrumentation.groq.span_utils import (
17
+ set_input_attributes,
18
+ set_model_input_attributes,
19
+ set_model_response_attributes,
20
+ set_model_streaming_response_attributes,
21
+ set_response_attributes,
22
+ set_streaming_response_attributes,
23
+ )
12
24
  from opentelemetry.instrumentation.groq.utils import (
13
- dont_throw,
14
25
  error_metrics_attributes,
15
- model_as_dict,
16
- set_span_attribute,
17
26
  shared_metrics_attributes,
18
- should_send_prompts,
27
+ should_emit_events,
19
28
  )
20
29
  from opentelemetry.instrumentation.groq.version import __version__
21
30
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
22
31
  from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap
23
32
  from opentelemetry.metrics import Counter, Histogram, Meter, get_meter
24
- from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
25
- GEN_AI_RESPONSE_ID,
26
- )
27
33
  from opentelemetry.semconv_ai import (
28
34
  SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
29
35
  LLMRequestTypeValues,
30
- SpanAttributes,
31
36
  Meters,
37
+ SpanAttributes,
32
38
  )
33
39
  from opentelemetry.trace import SpanKind, Tracer, get_tracer
34
40
  from opentelemetry.trace.status import Status, StatusCode
35
41
  from wrapt import wrap_function_wrapper
36
42
 
43
+ from groq._streaming import AsyncStream, Stream
44
+
37
45
  logger = logging.getLogger(__name__)
38
46
 
39
47
  _instruments = ("groq >= 0.9.0",)
40
48
 
41
- CONTENT_FILTER_KEY = "content_filter_results"
42
49
 
43
50
  WRAPPED_METHODS = [
44
51
  {
@@ -62,187 +69,6 @@ def is_streaming_response(response):
62
69
  return isinstance(response, Stream) or isinstance(response, AsyncStream)
63
70
 
64
71
 
65
- def _dump_content(content):
66
- if isinstance(content, str):
67
- return content
68
- json_serializable = []
69
- for item in content:
70
- if item.get("type") == "text":
71
- json_serializable.append({"type": "text", "text": item.get("text")})
72
- elif item.get("type") == "image":
73
- json_serializable.append(
74
- {
75
- "type": "image",
76
- "source": {
77
- "type": item.get("source").get("type"),
78
- "media_type": item.get("source").get("media_type"),
79
- "data": str(item.get("source").get("data")),
80
- },
81
- }
82
- )
83
- return json.dumps(json_serializable)
84
-
85
-
86
- @dont_throw
87
- def _set_input_attributes(span, kwargs):
88
- set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("model"))
89
- set_span_attribute(
90
- span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens_to_sample")
91
- )
92
- set_span_attribute(
93
- span, SpanAttributes.LLM_REQUEST_TEMPERATURE, kwargs.get("temperature")
94
- )
95
- set_span_attribute(span, SpanAttributes.LLM_REQUEST_TOP_P, kwargs.get("top_p"))
96
- set_span_attribute(
97
- span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
98
- )
99
- set_span_attribute(
100
- span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
101
- )
102
- set_span_attribute(
103
- span, SpanAttributes.LLM_IS_STREAMING, kwargs.get("stream") or False
104
- )
105
-
106
- if should_send_prompts():
107
- if kwargs.get("prompt") is not None:
108
- set_span_attribute(
109
- span, f"{SpanAttributes.LLM_PROMPTS}.0.user", kwargs.get("prompt")
110
- )
111
-
112
- elif kwargs.get("messages") is not None:
113
- for i, message in enumerate(kwargs.get("messages")):
114
- set_span_attribute(
115
- span,
116
- f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
117
- _dump_content(message.get("content")),
118
- )
119
- set_span_attribute(
120
- span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", message.get("role")
121
- )
122
-
123
-
124
- def _set_completions(span, choices):
125
- if choices is None:
126
- return
127
-
128
- for choice in choices:
129
- index = choice.get("index")
130
- prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
131
- set_span_attribute(span, f"{prefix}.finish_reason", choice.get("finish_reason"))
132
-
133
- if choice.get("content_filter_results"):
134
- set_span_attribute(
135
- span,
136
- f"{prefix}.{CONTENT_FILTER_KEY}",
137
- json.dumps(choice.get("content_filter_results")),
138
- )
139
-
140
- if choice.get("finish_reason") == "content_filter":
141
- set_span_attribute(span, f"{prefix}.role", "assistant")
142
- set_span_attribute(span, f"{prefix}.content", "FILTERED")
143
-
144
- return
145
-
146
- message = choice.get("message")
147
- if not message:
148
- return
149
-
150
- set_span_attribute(span, f"{prefix}.role", message.get("role"))
151
- set_span_attribute(span, f"{prefix}.content", message.get("content"))
152
-
153
- function_call = message.get("function_call")
154
- if function_call:
155
- set_span_attribute(
156
- span, f"{prefix}.tool_calls.0.name", function_call.get("name")
157
- )
158
- set_span_attribute(
159
- span,
160
- f"{prefix}.tool_calls.0.arguments",
161
- function_call.get("arguments"),
162
- )
163
-
164
- tool_calls = message.get("tool_calls")
165
- if tool_calls:
166
- for i, tool_call in enumerate(tool_calls):
167
- function = tool_call.get("function")
168
- set_span_attribute(
169
- span,
170
- f"{prefix}.tool_calls.{i}.id",
171
- tool_call.get("id"),
172
- )
173
- set_span_attribute(
174
- span,
175
- f"{prefix}.tool_calls.{i}.name",
176
- function.get("name"),
177
- )
178
- set_span_attribute(
179
- span,
180
- f"{prefix}.tool_calls.{i}.arguments",
181
- function.get("arguments"),
182
- )
183
-
184
-
185
- @dont_throw
186
- def _set_response_attributes(span, response, token_histogram):
187
- response = model_as_dict(response)
188
- set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
189
- set_span_attribute(span, GEN_AI_RESPONSE_ID, response.get("id"))
190
-
191
- usage = response.get("usage") or {}
192
- prompt_tokens = usage.get("prompt_tokens")
193
- completion_tokens = usage.get("completion_tokens")
194
- if usage:
195
- set_span_attribute(
196
- span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.get("total_tokens")
197
- )
198
- set_span_attribute(
199
- span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
200
- )
201
- set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens)
202
-
203
- if (
204
- isinstance(prompt_tokens, int)
205
- and prompt_tokens >= 0
206
- and token_histogram is not None
207
- ):
208
- token_histogram.record(
209
- prompt_tokens,
210
- attributes={
211
- SpanAttributes.LLM_TOKEN_TYPE: "input",
212
- SpanAttributes.LLM_RESPONSE_MODEL: response.get("model"),
213
- },
214
- )
215
-
216
- if (
217
- isinstance(completion_tokens, int)
218
- and completion_tokens >= 0
219
- and token_histogram is not None
220
- ):
221
- token_histogram.record(
222
- completion_tokens,
223
- attributes={
224
- SpanAttributes.LLM_TOKEN_TYPE: "output",
225
- SpanAttributes.LLM_RESPONSE_MODEL: response.get("model"),
226
- },
227
- )
228
-
229
- choices = response.get("choices")
230
- if should_send_prompts() and choices:
231
- _set_completions(span, choices)
232
-
233
-
234
- def _with_tracer_wrapper(func):
235
- """Helper for providing tracer for wrapper functions."""
236
-
237
- def _with_tracer(tracer, to_wrap):
238
- def wrapper(wrapped, instance, args, kwargs):
239
- return func(tracer, to_wrap, wrapped, instance, args, kwargs)
240
-
241
- return wrapper
242
-
243
- return _with_tracer
244
-
245
-
246
72
  def _with_chat_telemetry_wrapper(func):
247
73
  """Helper for providing tracer for wrapper functions. Includes metric collectors."""
248
74
 
@@ -251,6 +77,7 @@ def _with_chat_telemetry_wrapper(func):
251
77
  token_histogram,
252
78
  choice_counter,
253
79
  duration_histogram,
80
+ event_logger,
254
81
  to_wrap,
255
82
  ):
256
83
  def wrapper(wrapped, instance, args, kwargs):
@@ -259,6 +86,7 @@ def _with_chat_telemetry_wrapper(func):
259
86
  token_histogram,
260
87
  choice_counter,
261
88
  duration_histogram,
89
+ event_logger,
262
90
  to_wrap,
263
91
  wrapped,
264
92
  instance,
@@ -310,32 +138,19 @@ def _process_streaming_chunk(chunk):
310
138
  return content, finish_reason, usage
311
139
 
312
140
 
313
- def _set_streaming_response_attributes(
314
- span, accumulated_content, finish_reason=None, usage=None
141
+ def _handle_streaming_response(
142
+ span, accumulated_content, finish_reason, usage, event_logger
315
143
  ):
316
- """Set span attributes for accumulated streaming response."""
317
- if not span.is_recording():
318
- return
319
-
320
- prefix = f"{SpanAttributes.LLM_COMPLETIONS}.0"
321
- set_span_attribute(span, f"{prefix}.role", "assistant")
322
- set_span_attribute(span, f"{prefix}.content", accumulated_content)
323
- if finish_reason:
324
- set_span_attribute(span, f"{prefix}.finish_reason", finish_reason)
325
-
326
- if usage:
327
- set_span_attribute(
328
- span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, usage.completion_tokens
329
- )
330
- set_span_attribute(
331
- span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.prompt_tokens
332
- )
333
- set_span_attribute(
334
- span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.total_tokens
144
+ set_model_streaming_response_attributes(span, usage)
145
+ if should_emit_events() and event_logger:
146
+ emit_streaming_response_events(accumulated_content, finish_reason, event_logger)
147
+ else:
148
+ set_streaming_response_attributes(
149
+ span, accumulated_content, finish_reason, usage
335
150
  )
336
151
 
337
152
 
338
- def _create_stream_processor(response, span):
153
+ def _create_stream_processor(response, span, event_logger):
339
154
  """Create a generator that processes a stream while collecting telemetry."""
340
155
  accumulated_content = ""
341
156
  finish_reason = None
@@ -351,15 +166,17 @@ def _create_stream_processor(response, span):
351
166
  usage = chunk_usage
352
167
  yield chunk
353
168
 
169
+ _handle_streaming_response(
170
+ span, accumulated_content, finish_reason, usage, event_logger
171
+ )
172
+
354
173
  if span.is_recording():
355
- _set_streaming_response_attributes(
356
- span, accumulated_content, finish_reason, usage
357
- )
358
174
  span.set_status(Status(StatusCode.OK))
175
+
359
176
  span.end()
360
177
 
361
178
 
362
- async def _create_async_stream_processor(response, span):
179
+ async def _create_async_stream_processor(response, span, event_logger):
363
180
  """Create an async generator that processes a stream while collecting telemetry."""
364
181
  accumulated_content = ""
365
182
  finish_reason = None
@@ -375,20 +192,39 @@ async def _create_async_stream_processor(response, span):
375
192
  usage = chunk_usage
376
193
  yield chunk
377
194
 
195
+ _handle_streaming_response(
196
+ span, accumulated_content, finish_reason, usage, event_logger
197
+ )
198
+
378
199
  if span.is_recording():
379
- _set_streaming_response_attributes(
380
- span, accumulated_content, finish_reason, usage
381
- )
382
200
  span.set_status(Status(StatusCode.OK))
201
+
383
202
  span.end()
384
203
 
385
204
 
205
+ def _handle_input(span, kwargs, event_logger):
206
+ set_model_input_attributes(span, kwargs)
207
+ if should_emit_events() and event_logger:
208
+ emit_message_events(kwargs, event_logger)
209
+ else:
210
+ set_input_attributes(span, kwargs)
211
+
212
+
213
+ def _handle_response(span, response, token_histogram, event_logger):
214
+ set_model_response_attributes(span, response, token_histogram)
215
+ if should_emit_events() and event_logger:
216
+ emit_choice_events(response, event_logger)
217
+ else:
218
+ set_response_attributes(span, response)
219
+
220
+
386
221
  @_with_chat_telemetry_wrapper
387
222
  def _wrap(
388
223
  tracer: Tracer,
389
224
  token_histogram: Histogram,
390
225
  choice_counter: Counter,
391
226
  duration_histogram: Histogram,
227
+ event_logger: Union[EventLogger, None],
392
228
  to_wrap,
393
229
  wrapped,
394
230
  instance,
@@ -411,8 +247,7 @@ def _wrap(
411
247
  },
412
248
  )
413
249
 
414
- if span.is_recording():
415
- _set_input_attributes(span, kwargs)
250
+ _handle_input(span, kwargs, event_logger)
416
251
 
417
252
  start_time = time.time()
418
253
  try:
@@ -431,7 +266,7 @@ def _wrap(
431
266
 
432
267
  if is_streaming_response(response):
433
268
  try:
434
- return _create_stream_processor(response, span)
269
+ return _create_stream_processor(response, span, event_logger)
435
270
  except Exception as ex:
436
271
  logger.warning(
437
272
  "Failed to process streaming response for groq span, error: %s",
@@ -451,14 +286,14 @@ def _wrap(
451
286
  attributes=metric_attributes,
452
287
  )
453
288
 
454
- if span.is_recording():
455
- _set_response_attributes(span, response, token_histogram)
289
+ _handle_response(span, response, token_histogram, event_logger)
456
290
 
457
291
  except Exception as ex: # pylint: disable=broad-except
458
292
  logger.warning(
459
293
  "Failed to set response attributes for groq span, error: %s",
460
294
  str(ex),
461
295
  )
296
+
462
297
  if span.is_recording():
463
298
  span.set_status(Status(StatusCode.OK))
464
299
  span.end()
@@ -471,6 +306,7 @@ async def _awrap(
471
306
  token_histogram: Histogram,
472
307
  choice_counter: Counter,
473
308
  duration_histogram: Histogram,
309
+ event_logger: Union[EventLogger, None],
474
310
  to_wrap,
475
311
  wrapped,
476
312
  instance,
@@ -492,16 +328,11 @@ async def _awrap(
492
328
  SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
493
329
  },
494
330
  )
495
- try:
496
- if span.is_recording():
497
- _set_input_attributes(span, kwargs)
498
331
 
499
- except Exception as ex: # pylint: disable=broad-except
500
- logger.warning(
501
- "Failed to set input attributes for groq span, error: %s", str(ex)
502
- )
332
+ _handle_input(span, kwargs, event_logger)
503
333
 
504
334
  start_time = time.time()
335
+
505
336
  try:
506
337
  response = await wrapped(*args, **kwargs)
507
338
  except Exception as e: # pylint: disable=broad-except
@@ -518,7 +349,7 @@ async def _awrap(
518
349
 
519
350
  if is_streaming_response(response):
520
351
  try:
521
- return await _create_async_stream_processor(response, span)
352
+ return await _create_async_stream_processor(response, span, event_logger)
522
353
  except Exception as ex:
523
354
  logger.warning(
524
355
  "Failed to process streaming response for groq span, error: %s",
@@ -537,8 +368,7 @@ async def _awrap(
537
368
  attributes=metric_attributes,
538
369
  )
539
370
 
540
- if span.is_recording():
541
- _set_response_attributes(span, response, token_histogram)
371
+ _handle_response(span, response, token_histogram, event_logger)
542
372
 
543
373
  if span.is_recording():
544
374
  span.set_status(Status(StatusCode.OK))
@@ -557,12 +387,14 @@ class GroqInstrumentor(BaseInstrumentor):
557
387
  self,
558
388
  enrich_token_usage: bool = False,
559
389
  exception_logger=None,
390
+ use_legacy_attributes: bool = True,
560
391
  get_common_metrics_attributes: Callable[[], dict] = lambda: {},
561
392
  ):
562
393
  super().__init__()
563
394
  Config.exception_logger = exception_logger
564
395
  Config.enrich_token_usage = enrich_token_usage
565
396
  Config.get_common_metrics_attributes = get_common_metrics_attributes
397
+ Config.use_legacy_attributes = use_legacy_attributes
566
398
 
567
399
  def instrumentation_dependencies(self) -> Collection[str]:
568
400
  return _instruments
@@ -588,6 +420,13 @@ class GroqInstrumentor(BaseInstrumentor):
588
420
  duration_histogram,
589
421
  ) = (None, None, None)
590
422
 
423
+ event_logger = None
424
+ if not Config.use_legacy_attributes:
425
+ event_logger_provider = kwargs.get("event_logger_provider")
426
+ event_logger = get_event_logger(
427
+ __name__, __version__, event_logger_provider=event_logger_provider
428
+ )
429
+
591
430
  for wrapped_method in WRAPPED_METHODS:
592
431
  wrap_package = wrapped_method.get("package")
593
432
  wrap_object = wrapped_method.get("object")
@@ -602,6 +441,7 @@ class GroqInstrumentor(BaseInstrumentor):
602
441
  token_histogram,
603
442
  choice_counter,
604
443
  duration_histogram,
444
+ event_logger,
605
445
  wrapped_method,
606
446
  ),
607
447
  )
@@ -621,6 +461,7 @@ class GroqInstrumentor(BaseInstrumentor):
621
461
  token_histogram,
622
462
  choice_counter,
623
463
  duration_histogram,
464
+ event_logger,
624
465
  wrapped_method,
625
466
  ),
626
467
  )
@@ -636,8 +477,9 @@ class GroqInstrumentor(BaseInstrumentor):
636
477
  wrapped_method.get("method"),
637
478
  )
638
479
  for wrapped_method in WRAPPED_AMETHODS:
480
+ wrap_package = wrapped_method.get("package")
639
481
  wrap_object = wrapped_method.get("object")
640
482
  unwrap(
641
- f"groq.resources.completions.{wrap_object}",
483
+ f"{wrap_package}.{wrap_object}",
642
484
  wrapped_method.get("method"),
643
485
  )
@@ -5,3 +5,4 @@ class Config:
5
5
  enrich_token_usage = False
6
6
  exception_logger = None
7
7
  get_common_metrics_attributes: Callable[[], dict] = lambda: {}
8
+ use_legacy_attributes = True
@@ -0,0 +1,143 @@
1
+ from dataclasses import asdict
2
+ from enum import Enum
3
+ from typing import Union
4
+
5
+ from opentelemetry._events import Event, EventLogger
6
+ from opentelemetry.instrumentation.groq.event_models import ChoiceEvent, MessageEvent
7
+ from opentelemetry.instrumentation.groq.utils import (
8
+ dont_throw,
9
+ should_emit_events,
10
+ should_send_prompts,
11
+ )
12
+ from opentelemetry.semconv._incubating.attributes import (
13
+ gen_ai_attributes as GenAIAttributes,
14
+ )
15
+
16
+ from groq.types.chat.chat_completion import ChatCompletion
17
+
18
+
19
+ class Roles(Enum):
20
+ USER = "user"
21
+ ASSISTANT = "assistant"
22
+ SYSTEM = "system"
23
+ TOOL = "tool"
24
+
25
+
26
+ VALID_MESSAGE_ROLES = {role.value for role in Roles}
27
+ """The valid roles for naming the message event."""
28
+
29
+ EVENT_ATTRIBUTES = {
30
+ # Should be GenAIAttributes.GenAiSystemValues.GROQ.value but it's not defined in the opentelemetry-semconv package
31
+ GenAIAttributes.GEN_AI_SYSTEM: "groq"
32
+ }
33
+ """The attributes to be used for the event."""
34
+
35
+
36
+ @dont_throw
37
+ def emit_message_events(kwargs: dict, event_logger):
38
+ for message in kwargs.get("messages", []):
39
+ emit_event(
40
+ MessageEvent(
41
+ content=message.get("content"), role=message.get("role", "unknown")
42
+ ),
43
+ event_logger=event_logger,
44
+ )
45
+
46
+
47
+ @dont_throw
48
+ def emit_choice_events(response: ChatCompletion, event_logger):
49
+ for choice in response.choices:
50
+ emit_event(
51
+ ChoiceEvent(
52
+ index=choice.index,
53
+ message={
54
+ "content": choice.message.content,
55
+ "role": choice.message.role or "unknown",
56
+ },
57
+ finish_reason=choice.finish_reason,
58
+ ),
59
+ event_logger=event_logger,
60
+ )
61
+
62
+
63
+ @dont_throw
64
+ def emit_streaming_response_events(
65
+ accumulated_content: str, finish_reason: Union[str, None], event_logger
66
+ ):
67
+ """Emit events for streaming response."""
68
+ emit_event(
69
+ ChoiceEvent(
70
+ index=0,
71
+ message={"content": accumulated_content, "role": "assistant"},
72
+ finish_reason=finish_reason or "unknown",
73
+ ),
74
+ event_logger,
75
+ )
76
+
77
+
78
+ def emit_event(
79
+ event: Union[MessageEvent, ChoiceEvent], event_logger: Union[EventLogger, None]
80
+ ) -> None:
81
+ """
82
+ Emit an event to the OpenTelemetry SDK.
83
+
84
+ Args:
85
+ event: The event to emit.
86
+ """
87
+ if not should_emit_events() or event_logger is None:
88
+ return
89
+
90
+ if isinstance(event, MessageEvent):
91
+ _emit_message_event(event, event_logger)
92
+ elif isinstance(event, ChoiceEvent):
93
+ _emit_choice_event(event, event_logger)
94
+ else:
95
+ raise TypeError("Unsupported event type")
96
+
97
+
98
+ def _emit_message_event(event: MessageEvent, event_logger: EventLogger) -> None:
99
+ body = asdict(event)
100
+
101
+ if event.role in VALID_MESSAGE_ROLES:
102
+ name = "gen_ai.{}.message".format(event.role)
103
+ # According to the semantic conventions, the role is conditionally required if available
104
+ # and not equal to the "role" in the message name. So, remove the role from the body if
105
+ # it is the same as the in the event name.
106
+ body.pop("role", None)
107
+ else:
108
+ name = "gen_ai.user.message"
109
+
110
+ # According to the semantic conventions, only the assistant role has tool call
111
+ if event.role != Roles.ASSISTANT.value and event.tool_calls is not None:
112
+ del body["tool_calls"]
113
+ elif event.tool_calls is None:
114
+ del body["tool_calls"]
115
+
116
+ if not should_send_prompts():
117
+ del body["content"]
118
+ if body.get("tool_calls") is not None:
119
+ for tool_call in body["tool_calls"]:
120
+ tool_call["function"].pop("arguments", None)
121
+
122
+ event_logger.emit(Event(name=name, body=body, attributes=EVENT_ATTRIBUTES))
123
+
124
+
125
+ def _emit_choice_event(event: ChoiceEvent, event_logger: EventLogger) -> None:
126
+ body = asdict(event)
127
+ if event.message["role"] == Roles.ASSISTANT.value:
128
+ # According to the semantic conventions, the role is conditionally required if available
129
+ # and not equal to "assistant", so remove the role from the body if it is "assistant".
130
+ body["message"].pop("role", None)
131
+
132
+ if event.tool_calls is None:
133
+ del body["tool_calls"]
134
+
135
+ if not should_send_prompts():
136
+ body["message"].pop("content", None)
137
+ if body.get("tool_calls") is not None:
138
+ for tool_call in body["tool_calls"]:
139
+ tool_call["function"].pop("arguments", None)
140
+
141
+ event_logger.emit(
142
+ Event(name="gen_ai.choice", body=body, attributes=EVENT_ATTRIBUTES)
143
+ )
@@ -0,0 +1,41 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any, List, Literal, Optional, TypedDict
3
+
4
+
5
+ class _FunctionToolCall(TypedDict):
6
+ function_name: str
7
+ arguments: Optional[dict[str, Any]]
8
+
9
+
10
+ class ToolCall(TypedDict):
11
+ """Represents a tool call in the AI model."""
12
+
13
+ id: str
14
+ function: _FunctionToolCall
15
+ type: Literal["function"]
16
+
17
+
18
+ class CompletionMessage(TypedDict):
19
+ """Represents a message in the AI model."""
20
+
21
+ content: Any
22
+ role: str = "assistant"
23
+
24
+
25
+ @dataclass
26
+ class MessageEvent:
27
+ """Represents an input event for the AI model."""
28
+
29
+ content: Any
30
+ role: str = "user"
31
+ tool_calls: Optional[List[ToolCall]] = None
32
+
33
+
34
+ @dataclass
35
+ class ChoiceEvent:
36
+ """Represents a completion event for the AI model."""
37
+
38
+ index: int
39
+ message: CompletionMessage
40
+ finish_reason: str = "unknown"
41
+ tool_calls: Optional[List[ToolCall]] = None
@@ -0,0 +1,230 @@
1
+ import json
2
+
3
+ from opentelemetry.instrumentation.groq.utils import (
4
+ dont_throw,
5
+ model_as_dict,
6
+ set_span_attribute,
7
+ should_send_prompts,
8
+ )
9
+ from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
10
+ GEN_AI_RESPONSE_ID,
11
+ )
12
+ from opentelemetry.semconv_ai import (
13
+ SpanAttributes,
14
+ )
15
+
16
+ CONTENT_FILTER_KEY = "content_filter_results"
17
+
18
+
19
+ @dont_throw
20
+ def set_input_attributes(span, kwargs):
21
+ if not span.is_recording():
22
+ return
23
+
24
+ if should_send_prompts():
25
+ if kwargs.get("prompt") is not None:
26
+ set_span_attribute(
27
+ span, f"{SpanAttributes.LLM_PROMPTS}.0.user", kwargs.get("prompt")
28
+ )
29
+
30
+ elif kwargs.get("messages") is not None:
31
+ for i, message in enumerate(kwargs.get("messages")):
32
+ set_span_attribute(
33
+ span,
34
+ f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
35
+ _dump_content(message.get("content")),
36
+ )
37
+ set_span_attribute(
38
+ span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", message.get("role")
39
+ )
40
+
41
+
42
+ @dont_throw
43
+ def set_model_input_attributes(span, kwargs):
44
+ if not span.is_recording():
45
+ return
46
+
47
+ set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("model"))
48
+ set_span_attribute(
49
+ span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens_to_sample")
50
+ )
51
+ set_span_attribute(
52
+ span, SpanAttributes.LLM_REQUEST_TEMPERATURE, kwargs.get("temperature")
53
+ )
54
+ set_span_attribute(span, SpanAttributes.LLM_REQUEST_TOP_P, kwargs.get("top_p"))
55
+ set_span_attribute(
56
+ span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
57
+ )
58
+ set_span_attribute(
59
+ span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
60
+ )
61
+ set_span_attribute(
62
+ span, SpanAttributes.LLM_IS_STREAMING, kwargs.get("stream") or False
63
+ )
64
+
65
+
66
+ def set_streaming_response_attributes(
67
+ span, accumulated_content, finish_reason=None, usage=None
68
+ ):
69
+ """Set span attributes for accumulated streaming response."""
70
+ if not span.is_recording() or not should_send_prompts():
71
+ return
72
+
73
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.0"
74
+ set_span_attribute(span, f"{prefix}.role", "assistant")
75
+ set_span_attribute(span, f"{prefix}.content", accumulated_content)
76
+ if finish_reason:
77
+ set_span_attribute(span, f"{prefix}.finish_reason", finish_reason)
78
+
79
+
80
+ def set_model_streaming_response_attributes(span, usage):
81
+ if not span.is_recording():
82
+ return
83
+
84
+ if usage:
85
+ set_span_attribute(
86
+ span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, usage.completion_tokens
87
+ )
88
+ set_span_attribute(
89
+ span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.prompt_tokens
90
+ )
91
+ set_span_attribute(
92
+ span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.total_tokens
93
+ )
94
+
95
+
96
+ @dont_throw
97
+ def set_model_response_attributes(span, response, token_histogram):
98
+ if not span.is_recording():
99
+ return
100
+ response = model_as_dict(response)
101
+ set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
102
+ set_span_attribute(span, GEN_AI_RESPONSE_ID, response.get("id"))
103
+
104
+ usage = response.get("usage") or {}
105
+ prompt_tokens = usage.get("prompt_tokens")
106
+ completion_tokens = usage.get("completion_tokens")
107
+ if usage:
108
+ set_span_attribute(
109
+ span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.get("total_tokens")
110
+ )
111
+ set_span_attribute(
112
+ span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
113
+ )
114
+ set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens)
115
+
116
+ if (
117
+ isinstance(prompt_tokens, int)
118
+ and prompt_tokens >= 0
119
+ and token_histogram is not None
120
+ ):
121
+ token_histogram.record(
122
+ prompt_tokens,
123
+ attributes={
124
+ SpanAttributes.LLM_TOKEN_TYPE: "input",
125
+ SpanAttributes.LLM_RESPONSE_MODEL: response.get("model"),
126
+ },
127
+ )
128
+
129
+ if (
130
+ isinstance(completion_tokens, int)
131
+ and completion_tokens >= 0
132
+ and token_histogram is not None
133
+ ):
134
+ token_histogram.record(
135
+ completion_tokens,
136
+ attributes={
137
+ SpanAttributes.LLM_TOKEN_TYPE: "output",
138
+ SpanAttributes.LLM_RESPONSE_MODEL: response.get("model"),
139
+ },
140
+ )
141
+
142
+
143
+ def set_response_attributes(span, response):
144
+ if not span.is_recording():
145
+ return
146
+ choices = model_as_dict(response).get("choices")
147
+ if should_send_prompts() and choices:
148
+ _set_completions(span, choices)
149
+
150
+
151
+ def _set_completions(span, choices):
152
+ if choices is None or not should_send_prompts():
153
+ return
154
+
155
+ for choice in choices:
156
+ index = choice.get("index")
157
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
158
+ set_span_attribute(span, f"{prefix}.finish_reason", choice.get("finish_reason"))
159
+
160
+ if choice.get("content_filter_results"):
161
+ set_span_attribute(
162
+ span,
163
+ f"{prefix}.{CONTENT_FILTER_KEY}",
164
+ json.dumps(choice.get("content_filter_results")),
165
+ )
166
+
167
+ if choice.get("finish_reason") == "content_filter":
168
+ set_span_attribute(span, f"{prefix}.role", "assistant")
169
+ set_span_attribute(span, f"{prefix}.content", "FILTERED")
170
+
171
+ return
172
+
173
+ message = choice.get("message")
174
+ if not message:
175
+ return
176
+
177
+ set_span_attribute(span, f"{prefix}.role", message.get("role"))
178
+ set_span_attribute(span, f"{prefix}.content", message.get("content"))
179
+
180
+ function_call = message.get("function_call")
181
+ if function_call:
182
+ set_span_attribute(
183
+ span, f"{prefix}.tool_calls.0.name", function_call.get("name")
184
+ )
185
+ set_span_attribute(
186
+ span,
187
+ f"{prefix}.tool_calls.0.arguments",
188
+ function_call.get("arguments"),
189
+ )
190
+
191
+ tool_calls = message.get("tool_calls")
192
+ if tool_calls:
193
+ for i, tool_call in enumerate(tool_calls):
194
+ function = tool_call.get("function")
195
+ set_span_attribute(
196
+ span,
197
+ f"{prefix}.tool_calls.{i}.id",
198
+ tool_call.get("id"),
199
+ )
200
+ set_span_attribute(
201
+ span,
202
+ f"{prefix}.tool_calls.{i}.name",
203
+ function.get("name"),
204
+ )
205
+ set_span_attribute(
206
+ span,
207
+ f"{prefix}.tool_calls.{i}.arguments",
208
+ function.get("arguments"),
209
+ )
210
+
211
+
212
+ def _dump_content(content):
213
+ if isinstance(content, str):
214
+ return content
215
+ json_serializable = []
216
+ for item in content:
217
+ if item.get("type") == "text":
218
+ json_serializable.append({"type": "text", "text": item.get("text")})
219
+ elif item.get("type") == "image":
220
+ json_serializable.append(
221
+ {
222
+ "type": "image",
223
+ "source": {
224
+ "type": item.get("source").get("type"),
225
+ "media_type": item.get("source").get("media_type"),
226
+ "data": str(item.get("source").get("data")),
227
+ },
228
+ }
229
+ )
230
+ return json.dumps(json_serializable)
@@ -1,7 +1,8 @@
1
- from importlib.metadata import version
2
- import os
3
1
  import logging
2
+ import os
4
3
  import traceback
4
+ from importlib.metadata import version
5
+
5
6
  from opentelemetry import context as context_api
6
7
  from opentelemetry.instrumentation.groq.config import Config
7
8
  from opentelemetry.semconv_ai import SpanAttributes
@@ -11,6 +12,8 @@ GEN_AI_SYSTEM_GROQ = "groq"
11
12
 
12
13
  _PYDANTIC_VERSION = version("pydantic")
13
14
 
15
+ TRACELOOP_TRACE_CONTENT = "TRACELOOP_TRACE_CONTENT"
16
+
14
17
 
15
18
  def set_span_attribute(span, name, value):
16
19
  if value is not None and value != "":
@@ -19,7 +22,7 @@ def set_span_attribute(span, name, value):
19
22
 
20
23
  def should_send_prompts():
21
24
  return (
22
- os.getenv("TRACELOOP_TRACE_CONTENT") or "true"
25
+ os.getenv(TRACELOOP_TRACE_CONTENT) or "true"
23
26
  ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
24
27
 
25
28
 
@@ -78,3 +81,12 @@ def model_as_dict(model):
78
81
  return model_as_dict(model.parse())
79
82
  else:
80
83
  return model
84
+
85
+
86
+ def should_emit_events() -> bool:
87
+ """
88
+ Checks if the instrumentation isn't using the legacy attributes
89
+ and if the event logger is not None.
90
+ """
91
+
92
+ return not Config.use_legacy_attributes
@@ -1 +1 @@
1
- __version__ = "0.40.13"
1
+ __version__ = "0.41.0"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: opentelemetry-instrumentation-groq
3
- Version: 0.40.13
3
+ Version: 0.41.0
4
4
  Summary: OpenTelemetry Groq instrumentation
5
5
  License: Apache-2.0
6
6
  Author: Gal Kleinman
@@ -17,7 +17,7 @@ Provides-Extra: instruments
17
17
  Requires-Dist: opentelemetry-api (>=1.28.0,<2.0.0)
18
18
  Requires-Dist: opentelemetry-instrumentation (>=0.50b0)
19
19
  Requires-Dist: opentelemetry-semantic-conventions (>=0.50b0)
20
- Requires-Dist: opentelemetry-semantic-conventions-ai (==0.4.9)
20
+ Requires-Dist: opentelemetry-semantic-conventions-ai (==0.4.10)
21
21
  Project-URL: Repository, https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-groq
22
22
  Description-Content-Type: text/markdown
23
23
 
@@ -0,0 +1,11 @@
1
+ opentelemetry/instrumentation/groq/__init__.py,sha256=WovbdJgxxGEe8ZibKiQgP8rpIQLBfSYEN9X9dzDTBgU,15007
2
+ opentelemetry/instrumentation/groq/config.py,sha256=KdVX2d7lY1ToljS9w_R5XzRu5nu_7FYYS0-zlORafgM,203
3
+ opentelemetry/instrumentation/groq/event_emitter.py,sha256=m29pi5dSBIN1PGek6Mo_exifBy9vxECes1hdIEa1YaE,4549
4
+ opentelemetry/instrumentation/groq/event_models.py,sha256=PCfCGxrrArwZqR-4wFcXrhwQq0sBMAxmSrpC4PUMtaM,876
5
+ opentelemetry/instrumentation/groq/span_utils.py,sha256=BCkiFDNaGdD0J4GDX70jebqe9N5klmUeax2WQKIJOw8,7578
6
+ opentelemetry/instrumentation/groq/utils.py,sha256=7G72SkZiEQmSzXDxgRu3yUS0FIZ6TWHvAXXvDqQ_tcI,2409
7
+ opentelemetry/instrumentation/groq/version.py,sha256=WqzeTKVe9Q7QeiOa2KhoieBiGxzHK-Gb2TvnyeIJEKk,23
8
+ opentelemetry_instrumentation_groq-0.41.0.dist-info/METADATA,sha256=ghPMKSkN9gsln_-7czKAYVq5umVeBfRqiDzuOdTz3gY,2118
9
+ opentelemetry_instrumentation_groq-0.41.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
10
+ opentelemetry_instrumentation_groq-0.41.0.dist-info/entry_points.txt,sha256=uezQe06CpIK8xTZZSK0lF29nOKkz_w6VR4sQnb4IAFQ,87
11
+ opentelemetry_instrumentation_groq-0.41.0.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- opentelemetry/instrumentation/groq/__init__.py,sha256=E1blGTym8YomMT-bsUCrVSneSy-CZmHdtCWup19OpFw,20433
2
- opentelemetry/instrumentation/groq/config.py,sha256=eN2YxQdWlAF-qWPwZZr0xFM-8tx9zUjmiparuB64jcU,170
3
- opentelemetry/instrumentation/groq/utils.py,sha256=1ESL4NCp8Mjww8cGEzQO_AEqGiSK4JSiMFYUhwBnuao,2151
4
- opentelemetry/instrumentation/groq/version.py,sha256=vQg86lMQG_F-usx58lOg3MO6b3SadNM0IbUHwUUchxk,24
5
- opentelemetry_instrumentation_groq-0.40.13.dist-info/METADATA,sha256=-bFJTPi3Z0dR9PSUH7DmdHl_7Jl09bDoIbJBsBpTtfQ,2118
6
- opentelemetry_instrumentation_groq-0.40.13.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
7
- opentelemetry_instrumentation_groq-0.40.13.dist-info/entry_points.txt,sha256=uezQe06CpIK8xTZZSK0lF29nOKkz_w6VR4sQnb4IAFQ,87
8
- opentelemetry_instrumentation_groq-0.40.13.dist-info/RECORD,,