opentelemetry-instrumentation-groq 0.29.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-groq might be problematic. Click here for more details.

@@ -0,0 +1,57 @@
1
+ Metadata-Version: 2.1
2
+ Name: opentelemetry-instrumentation-groq
3
+ Version: 0.29.2
4
+ Summary: OpenTelemetry Groq instrumentation
5
+ Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-groq
6
+ License: Apache-2.0
7
+ Author: Gal Kleinman
8
+ Author-email: gal@traceloop.com
9
+ Requires-Python: >=3.9,<4
10
+ Classifier: License :: OSI Approved :: Apache Software License
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Programming Language :: Python :: 3.9
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Provides-Extra: instruments
17
+ Requires-Dist: opentelemetry-api (>=1.27.0,<2.0.0)
18
+ Requires-Dist: opentelemetry-instrumentation (>=0.48b0,<0.49)
19
+ Requires-Dist: opentelemetry-semantic-conventions (>=0.48b0,<0.49)
20
+ Requires-Dist: opentelemetry-semantic-conventions-ai (==0.4.1)
21
+ Project-URL: Repository, https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-groq
22
+ Description-Content-Type: text/markdown
23
+
24
+ # OpenTelemetry Groq Instrumentation
25
+
26
+ <a href="https://pypi.org/project/opentelemetry-instrumentation-groq/">
27
+ <img src="https://badge.fury.io/py/opentelemetry-instrumentation-groq.svg">
28
+ </a>
29
+
30
+ This library allows tracing Groq prompts and completions sent with the official [Groq SDK](https://github.com/groq/groq-python).
31
+
32
+ ## Installation
33
+
34
+ ```bash
35
+ pip install opentelemetry-instrumentation-groq
36
+ ```
37
+
38
+ ## Example usage
39
+
40
+ ```python
41
+ from opentelemetry.instrumentation.groq import GroqInstrumentor
42
+
43
+ GroqInstrumentor().instrument()
44
+ ```
45
+
46
+ ## Privacy
47
+
48
+ **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
49
+
50
+ However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
51
+
52
+ To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
53
+
54
+ ```bash
55
+ TRACELOOP_TRACE_CONTENT=false
56
+ ```
57
+
@@ -0,0 +1,33 @@
1
+ # OpenTelemetry Groq Instrumentation
2
+
3
+ <a href="https://pypi.org/project/opentelemetry-instrumentation-groq/">
4
+ <img src="https://badge.fury.io/py/opentelemetry-instrumentation-groq.svg">
5
+ </a>
6
+
7
+ This library allows tracing Groq prompts and completions sent with the official [Groq SDK](https://github.com/groq/groq-python).
8
+
9
+ ## Installation
10
+
11
+ ```bash
12
+ pip install opentelemetry-instrumentation-groq
13
+ ```
14
+
15
+ ## Example usage
16
+
17
+ ```python
18
+ from opentelemetry.instrumentation.groq import GroqInstrumentor
19
+
20
+ GroqInstrumentor().instrument()
21
+ ```
22
+
23
+ ## Privacy
24
+
25
+ **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
26
+
27
+ However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
28
+
29
+ To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
30
+
31
+ ```bash
32
+ TRACELOOP_TRACE_CONTENT=false
33
+ ```
@@ -0,0 +1,508 @@
1
+ """OpenTelemetry Groq instrumentation"""
2
+
3
+ import json
4
+ import logging
5
+ import os
6
+ import time
7
+ from typing import Callable, Collection
8
+
9
+ from groq._streaming import AsyncStream, Stream
10
+ from opentelemetry import context as context_api
11
+ from opentelemetry.instrumentation.groq.config import Config
12
+ from opentelemetry.instrumentation.groq.utils import (
13
+ dont_throw,
14
+ error_metrics_attributes,
15
+ model_as_dict,
16
+ set_span_attribute,
17
+ shared_metrics_attributes,
18
+ should_send_prompts,
19
+ )
20
+ from opentelemetry.instrumentation.groq.version import __version__
21
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
22
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap
23
+ from opentelemetry.metrics import Counter, Histogram, Meter, get_meter
24
+ from opentelemetry.semconv_ai import (
25
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
26
+ LLMRequestTypeValues,
27
+ SpanAttributes,
28
+ Meters,
29
+ )
30
+ from opentelemetry.trace import SpanKind, Tracer, get_tracer
31
+ from opentelemetry.trace.status import Status, StatusCode
32
+ from wrapt import wrap_function_wrapper
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+ _instruments = ("groq >= 0.9.0",)
37
+
38
+ CONTENT_FILTER_KEY = "content_filter_results"
39
+
40
+ WRAPPED_METHODS = [
41
+ {
42
+ "package": "groq.resources.chat.completions",
43
+ "object": "Completions",
44
+ "method": "create",
45
+ "span_name": "groq.chat",
46
+ },
47
+ ]
48
+ WRAPPED_AMETHODS = [
49
+ {
50
+ "package": "groq.resources.chat.completions",
51
+ "object": "AsyncCompletions",
52
+ "method": "create",
53
+ "span_name": "groq.chat",
54
+ },
55
+ ]
56
+
57
+
58
+ def is_streaming_response(response):
59
+ return isinstance(response, Stream) or isinstance(response, AsyncStream)
60
+
61
+
62
+ def _dump_content(content):
63
+ if isinstance(content, str):
64
+ return content
65
+ json_serializable = []
66
+ for item in content:
67
+ if item.get("type") == "text":
68
+ json_serializable.append({"type": "text", "text": item.get("text")})
69
+ elif item.get("type") == "image":
70
+ json_serializable.append(
71
+ {
72
+ "type": "image",
73
+ "source": {
74
+ "type": item.get("source").get("type"),
75
+ "media_type": item.get("source").get("media_type"),
76
+ "data": str(item.get("source").get("data")),
77
+ },
78
+ }
79
+ )
80
+ return json.dumps(json_serializable)
81
+
82
+
83
+ @dont_throw
84
+ def _set_input_attributes(span, kwargs):
85
+ set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("model"))
86
+ set_span_attribute(
87
+ span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens_to_sample")
88
+ )
89
+ set_span_attribute(
90
+ span, SpanAttributes.LLM_REQUEST_TEMPERATURE, kwargs.get("temperature")
91
+ )
92
+ set_span_attribute(span, SpanAttributes.LLM_REQUEST_TOP_P, kwargs.get("top_p"))
93
+ set_span_attribute(
94
+ span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
95
+ )
96
+ set_span_attribute(
97
+ span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
98
+ )
99
+ set_span_attribute(span, SpanAttributes.LLM_IS_STREAMING, kwargs.get("stream") or False)
100
+
101
+ if should_send_prompts():
102
+ if kwargs.get("prompt") is not None:
103
+ set_span_attribute(
104
+ span, f"{SpanAttributes.LLM_PROMPTS}.0.user", kwargs.get("prompt")
105
+ )
106
+
107
+ elif kwargs.get("messages") is not None:
108
+ for i, message in enumerate(kwargs.get("messages")):
109
+ set_span_attribute(
110
+ span,
111
+ f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
112
+ _dump_content(message.get("content")),
113
+ )
114
+ set_span_attribute(
115
+ span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", message.get("role")
116
+ )
117
+
118
+
119
+ def _set_completions(span, choices):
120
+ if choices is None:
121
+ return
122
+
123
+ for choice in choices:
124
+ index = choice.get("index")
125
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
126
+ set_span_attribute(
127
+ span, f"{prefix}.finish_reason", choice.get("finish_reason")
128
+ )
129
+
130
+ if choice.get("content_filter_results"):
131
+ set_span_attribute(
132
+ span,
133
+ f"{prefix}.{CONTENT_FILTER_KEY}",
134
+ json.dumps(choice.get("content_filter_results")),
135
+ )
136
+
137
+ if choice.get("finish_reason") == "content_filter":
138
+ set_span_attribute(span, f"{prefix}.role", "assistant")
139
+ set_span_attribute(span, f"{prefix}.content", "FILTERED")
140
+
141
+ return
142
+
143
+ message = choice.get("message")
144
+ if not message:
145
+ return
146
+
147
+ set_span_attribute(span, f"{prefix}.role", message.get("role"))
148
+ set_span_attribute(span, f"{prefix}.content", message.get("content"))
149
+
150
+ function_call = message.get("function_call")
151
+ if function_call:
152
+ set_span_attribute(
153
+ span, f"{prefix}.tool_calls.0.name", function_call.get("name")
154
+ )
155
+ set_span_attribute(
156
+ span,
157
+ f"{prefix}.tool_calls.0.arguments",
158
+ function_call.get("arguments"),
159
+ )
160
+
161
+ tool_calls = message.get("tool_calls")
162
+ if tool_calls:
163
+ for i, tool_call in enumerate(tool_calls):
164
+ function = tool_call.get("function")
165
+ set_span_attribute(
166
+ span,
167
+ f"{prefix}.tool_calls.{i}.id",
168
+ tool_call.get("id"),
169
+ )
170
+ set_span_attribute(
171
+ span,
172
+ f"{prefix}.tool_calls.{i}.name",
173
+ function.get("name"),
174
+ )
175
+ set_span_attribute(
176
+ span,
177
+ f"{prefix}.tool_calls.{i}.arguments",
178
+ function.get("arguments"),
179
+ )
180
+
181
+
182
+ @dont_throw
183
+ def _set_response_attributes(span, response):
184
+ response = model_as_dict(response)
185
+
186
+ set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
187
+
188
+ usage = response.get("usage")
189
+ if usage:
190
+ set_span_attribute(
191
+ span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.get("total_tokens")
192
+ )
193
+ set_span_attribute(
194
+ span,
195
+ SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
196
+ usage.get("completion_tokens"),
197
+ )
198
+ set_span_attribute(
199
+ span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.get("prompt_tokens")
200
+ )
201
+
202
+ choices = response.get("choices")
203
+ if should_send_prompts() and choices:
204
+ _set_completions(span, choices)
205
+
206
+
207
+ def _with_tracer_wrapper(func):
208
+ """Helper for providing tracer for wrapper functions."""
209
+
210
+ def _with_tracer(tracer, to_wrap):
211
+ def wrapper(wrapped, instance, args, kwargs):
212
+ return func(tracer, to_wrap, wrapped, instance, args, kwargs)
213
+
214
+ return wrapper
215
+
216
+ return _with_tracer
217
+
218
+
219
+ def _with_chat_telemetry_wrapper(func):
220
+ """Helper for providing tracer for wrapper functions. Includes metric collectors."""
221
+
222
+ def _with_chat_telemetry(
223
+ tracer,
224
+ token_histogram,
225
+ choice_counter,
226
+ duration_histogram,
227
+ to_wrap,
228
+ ):
229
+ def wrapper(wrapped, instance, args, kwargs):
230
+ return func(
231
+ tracer,
232
+ token_histogram,
233
+ choice_counter,
234
+ duration_histogram,
235
+ to_wrap,
236
+ wrapped,
237
+ instance,
238
+ args,
239
+ kwargs,
240
+ )
241
+
242
+ return wrapper
243
+
244
+ return _with_chat_telemetry
245
+
246
+
247
+ def _create_metrics(meter: Meter):
248
+ token_histogram = meter.create_histogram(
249
+ name=Meters.LLM_TOKEN_USAGE,
250
+ unit="token",
251
+ description="Measures number of input and output tokens used",
252
+ )
253
+
254
+ choice_counter = meter.create_counter(
255
+ name=Meters.LLM_GENERATION_CHOICES,
256
+ unit="choice",
257
+ description="Number of choices returned by chat completions call",
258
+ )
259
+
260
+ duration_histogram = meter.create_histogram(
261
+ name=Meters.LLM_OPERATION_DURATION,
262
+ unit="s",
263
+ description="GenAI operation duration",
264
+ )
265
+
266
+ return token_histogram, choice_counter, duration_histogram
267
+
268
+
269
+ @_with_chat_telemetry_wrapper
270
+ def _wrap(
271
+ tracer: Tracer,
272
+ token_histogram: Histogram,
273
+ choice_counter: Counter,
274
+ duration_histogram: Histogram,
275
+ to_wrap,
276
+ wrapped,
277
+ instance,
278
+ args,
279
+ kwargs,
280
+ ):
281
+ """Instruments and calls every function defined in TO_WRAP."""
282
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
283
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
284
+ ):
285
+ return wrapped(*args, **kwargs)
286
+
287
+ name = to_wrap.get("span_name")
288
+ span = tracer.start_span(
289
+ name,
290
+ kind=SpanKind.CLIENT,
291
+ attributes={
292
+ SpanAttributes.LLM_SYSTEM: "Groq",
293
+ SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
294
+ },
295
+ )
296
+
297
+ if span.is_recording():
298
+ _set_input_attributes(span, kwargs)
299
+
300
+ start_time = time.time()
301
+ try:
302
+ response = wrapped(*args, **kwargs)
303
+ except Exception as e: # pylint: disable=broad-except
304
+ end_time = time.time()
305
+ attributes = error_metrics_attributes(e)
306
+
307
+ if duration_histogram:
308
+ duration = end_time - start_time
309
+ duration_histogram.record(duration, attributes=attributes)
310
+
311
+ raise e
312
+
313
+ end_time = time.time()
314
+
315
+ if is_streaming_response(response):
316
+ # TODO: implement streaming
317
+ pass
318
+ elif response:
319
+ try:
320
+ metric_attributes = shared_metrics_attributes(response)
321
+
322
+ if duration_histogram:
323
+ duration = time.time() - start_time
324
+ duration_histogram.record(
325
+ duration,
326
+ attributes=metric_attributes,
327
+ )
328
+
329
+ if span.is_recording():
330
+ _set_response_attributes(span, response)
331
+
332
+ except Exception as ex: # pylint: disable=broad-except
333
+ logger.warning(
334
+ "Failed to set response attributes for groq span, error: %s",
335
+ str(ex),
336
+ )
337
+ if span.is_recording():
338
+ span.set_status(Status(StatusCode.OK))
339
+ span.end()
340
+ return response
341
+
342
+
343
+ @_with_chat_telemetry_wrapper
344
+ async def _awrap(
345
+ tracer,
346
+ token_histogram: Histogram,
347
+ choice_counter: Counter,
348
+ duration_histogram: Histogram,
349
+ to_wrap,
350
+ wrapped,
351
+ instance,
352
+ args,
353
+ kwargs,
354
+ ):
355
+ """Instruments and calls every function defined in TO_WRAP."""
356
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
357
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
358
+ ):
359
+ return await wrapped(*args, **kwargs)
360
+
361
+ name = to_wrap.get("span_name")
362
+ span = tracer.start_span(
363
+ name,
364
+ kind=SpanKind.CLIENT,
365
+ attributes={
366
+ SpanAttributes.LLM_SYSTEM: "Groq",
367
+ SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
368
+ },
369
+ )
370
+ try:
371
+ if span.is_recording():
372
+ _set_input_attributes(span, kwargs)
373
+
374
+ except Exception as ex: # pylint: disable=broad-except
375
+ logger.warning(
376
+ "Failed to set input attributes for groq span, error: %s", str(ex)
377
+ )
378
+
379
+ start_time = time.time()
380
+ try:
381
+ response = await wrapped(*args, **kwargs)
382
+ except Exception as e: # pylint: disable=broad-except
383
+ end_time = time.time()
384
+ attributes = error_metrics_attributes(e)
385
+
386
+ if duration_histogram:
387
+ duration = end_time - start_time
388
+ duration_histogram.record(duration, attributes=attributes)
389
+
390
+ raise e
391
+
392
+ if is_streaming_response(response):
393
+ # TODO: implement streaming
394
+ pass
395
+ elif response:
396
+ metric_attributes = shared_metrics_attributes(response)
397
+
398
+ if duration_histogram:
399
+ duration = time.time() - start_time
400
+ duration_histogram.record(
401
+ duration,
402
+ attributes=metric_attributes,
403
+ )
404
+
405
+ if span.is_recording():
406
+ _set_response_attributes(span, response)
407
+
408
+ if span.is_recording():
409
+ span.set_status(Status(StatusCode.OK))
410
+ span.end()
411
+ return response
412
+
413
+
414
+ def is_metrics_enabled() -> bool:
415
+ return (os.getenv("TRACELOOP_METRICS_ENABLED") or "true").lower() == "true"
416
+
417
+
418
+ class GroqInstrumentor(BaseInstrumentor):
419
+ """An instrumentor for Groq's client library."""
420
+
421
+ def __init__(
422
+ self,
423
+ enrich_token_usage: bool = False,
424
+ exception_logger=None,
425
+ get_common_metrics_attributes: Callable[[], dict] = lambda: {},
426
+ ):
427
+ super().__init__()
428
+ Config.exception_logger = exception_logger
429
+ Config.enrich_token_usage = enrich_token_usage
430
+ Config.get_common_metrics_attributes = get_common_metrics_attributes
431
+
432
+ def instrumentation_dependencies(self) -> Collection[str]:
433
+ return _instruments
434
+
435
+ def _instrument(self, **kwargs):
436
+ tracer_provider = kwargs.get("tracer_provider")
437
+ tracer = get_tracer(__name__, __version__, tracer_provider)
438
+
439
+ # meter and counters are inited here
440
+ meter_provider = kwargs.get("meter_provider")
441
+ meter = get_meter(__name__, __version__, meter_provider)
442
+
443
+ if is_metrics_enabled():
444
+ (
445
+ token_histogram,
446
+ choice_counter,
447
+ duration_histogram,
448
+ ) = _create_metrics(meter)
449
+ else:
450
+ (
451
+ token_histogram,
452
+ choice_counter,
453
+ duration_histogram,
454
+ ) = (None, None, None, None)
455
+
456
+ for wrapped_method in WRAPPED_METHODS:
457
+ wrap_package = wrapped_method.get("package")
458
+ wrap_object = wrapped_method.get("object")
459
+ wrap_method = wrapped_method.get("method")
460
+
461
+ try:
462
+ wrap_function_wrapper(
463
+ wrap_package,
464
+ f"{wrap_object}.{wrap_method}",
465
+ _wrap(
466
+ tracer,
467
+ token_histogram,
468
+ choice_counter,
469
+ duration_histogram,
470
+ wrapped_method,
471
+ ),
472
+ )
473
+ except ModuleNotFoundError:
474
+ pass # that's ok, we don't want to fail if some methods do not exist
475
+
476
+ for wrapped_method in WRAPPED_AMETHODS:
477
+ wrap_package = wrapped_method.get("package")
478
+ wrap_object = wrapped_method.get("object")
479
+ wrap_method = wrapped_method.get("method")
480
+ try:
481
+ wrap_function_wrapper(
482
+ wrap_package,
483
+ f"{wrap_object}.{wrap_method}",
484
+ _awrap(
485
+ tracer,
486
+ token_histogram,
487
+ choice_counter,
488
+ duration_histogram,
489
+ wrapped_method,
490
+ ),
491
+ )
492
+ except ModuleNotFoundError:
493
+ pass # that's ok, we don't want to fail if some methods do not exist
494
+
495
+ def _uninstrument(self, **kwargs):
496
+ for wrapped_method in WRAPPED_METHODS:
497
+ wrap_package = wrapped_method.get("package")
498
+ wrap_object = wrapped_method.get("object")
499
+ unwrap(
500
+ f"{wrap_package}.{wrap_object}",
501
+ wrapped_method.get("method"),
502
+ )
503
+ for wrapped_method in WRAPPED_AMETHODS:
504
+ wrap_object = wrapped_method.get("object")
505
+ unwrap(
506
+ f"groq.resources.completions.{wrap_object}",
507
+ wrapped_method.get("method"),
508
+ )
@@ -0,0 +1,7 @@
1
+ from typing import Callable
2
+
3
+
4
+ class Config:
5
+ enrich_token_usage = False
6
+ exception_logger = None
7
+ get_common_metrics_attributes: Callable[[], dict] = lambda: {}
@@ -0,0 +1,78 @@
1
+ from importlib.metadata import version
2
+ import os
3
+ import logging
4
+ import traceback
5
+ from opentelemetry import context as context_api
6
+ from opentelemetry.instrumentation.groq.config import Config
7
+ from opentelemetry.semconv_ai import SpanAttributes
8
+
9
+ GEN_AI_SYSTEM = "gen_ai.system"
10
+ GEN_AI_SYSTEM_GROQ = "groq"
11
+
12
+
13
+ def set_span_attribute(span, name, value):
14
+ if value is not None and value != "":
15
+ span.set_attribute(name, value)
16
+
17
+
18
+ def should_send_prompts():
19
+ return (
20
+ os.getenv("TRACELOOP_TRACE_CONTENT") or "true"
21
+ ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
22
+
23
+
24
+ def dont_throw(func):
25
+ """
26
+ A decorator that wraps the passed in function and logs exceptions instead of throwing them.
27
+
28
+ @param func: The function to wrap
29
+ @return: The wrapper function
30
+ """
31
+ # Obtain a logger specific to the function's module
32
+ logger = logging.getLogger(func.__module__)
33
+
34
+ def wrapper(*args, **kwargs):
35
+ try:
36
+ return func(*args, **kwargs)
37
+ except Exception as e:
38
+ logger.debug(
39
+ "OpenLLMetry failed to trace in %s, error: %s",
40
+ func.__name__,
41
+ traceback.format_exc(),
42
+ )
43
+ if Config.exception_logger:
44
+ Config.exception_logger(e)
45
+
46
+ return wrapper
47
+
48
+
49
+ @dont_throw
50
+ def shared_metrics_attributes(response):
51
+ response_dict = model_as_dict(response)
52
+
53
+ common_attributes = Config.get_common_metrics_attributes()
54
+
55
+ return {
56
+ **common_attributes,
57
+ GEN_AI_SYSTEM: GEN_AI_SYSTEM_GROQ,
58
+ SpanAttributes.LLM_RESPONSE_MODEL: response_dict.get("model"),
59
+ }
60
+
61
+
62
+ @dont_throw
63
+ def error_metrics_attributes(exception):
64
+ return {
65
+ GEN_AI_SYSTEM: GEN_AI_SYSTEM_GROQ,
66
+ "error.type": exception.__class__.__name__,
67
+ }
68
+
69
+
70
+ def model_as_dict(model):
71
+ if version("pydantic") < "2.0.0":
72
+ return model.dict()
73
+ if hasattr(model, "model_dump"):
74
+ return model.model_dump()
75
+ elif hasattr(model, "parse"): # Raw API response
76
+ return model_as_dict(model.parse())
77
+ else:
78
+ return model
@@ -0,0 +1,54 @@
1
+ [tool.coverage.run]
2
+ branch = true
3
+ source = [ "opentelemetry/instrumentation/groq" ]
4
+
5
+ [tool.coverage.report]
6
+ exclude_lines = ['if TYPE_CHECKING:']
7
+ show_missing = true
8
+
9
+ [tool.poetry]
10
+ name = "opentelemetry-instrumentation-groq"
11
+ version = "0.29.2"
12
+ description = "OpenTelemetry Groq instrumentation"
13
+ authors = [
14
+ "Gal Kleinman <gal@traceloop.com>",
15
+ "Nir Gazit <nir@traceloop.com>",
16
+ ]
17
+ repository = "https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-groq"
18
+ license = "Apache-2.0"
19
+ readme = "README.md"
20
+
21
+ [[tool.poetry.packages]]
22
+ include = "opentelemetry/instrumentation/groq"
23
+
24
+ [tool.poetry.dependencies]
25
+ python = ">=3.9,<4"
26
+ opentelemetry-api = "^1.27.0"
27
+ opentelemetry-instrumentation = "^0.48b0"
28
+ opentelemetry-semantic-conventions = "^0.48b0"
29
+ opentelemetry-semantic-conventions-ai = "0.4.1"
30
+
31
+ [tool.poetry.group.dev.dependencies]
32
+ autopep8 = "^2.2.0"
33
+ flake8 = "7.0.0"
34
+ pytest = "^8.2.2"
35
+ pytest-sugar = "1.0.0"
36
+
37
+ [tool.poetry.group.test.dependencies]
38
+ groq = ">=0.10.0"
39
+ pytest = "^8.2.2"
40
+ pytest-sugar = "1.0.0"
41
+ vcrpy = "^6.0.1"
42
+ pytest-recording = "^0.13.1"
43
+ opentelemetry-sdk = "^1.27.0"
44
+ pytest-asyncio = "^0.23.7"
45
+
46
+ [build-system]
47
+ requires = ["poetry-core"]
48
+ build-backend = "poetry.core.masonry.api"
49
+
50
+ [tool.poetry.extras]
51
+ instruments = ["groq"]
52
+
53
+ [tool.poetry.plugins."opentelemetry_instrumentor"]
54
+ groq = "opentelemetry.instrumentation.groq:GroqInstrumentor"