opentelemetry-instrumentation-groq 0.38.7__tar.gz → 0.38.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-groq might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: opentelemetry-instrumentation-groq
3
- Version: 0.38.7
3
+ Version: 0.38.8
4
4
  Summary: OpenTelemetry Groq instrumentation
5
5
  License: Apache-2.0
6
6
  Author: Gal Kleinman
@@ -183,26 +183,38 @@ def _set_completions(span, choices):
183
183
 
184
184
 
185
185
  @dont_throw
186
- def _set_response_attributes(span, response):
186
+ def _set_response_attributes(span, response, token_histogram):
187
187
  response = model_as_dict(response)
188
-
189
188
  set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
190
189
  set_span_attribute(span, GEN_AI_RESPONSE_ID, response.get("id"))
191
190
 
192
- usage = response.get("usage")
191
+ usage = response.get("usage") or {}
192
+ prompt_tokens = usage.get("prompt_tokens")
193
+ completion_tokens = usage.get("completion_tokens")
193
194
  if usage:
194
195
  set_span_attribute(
195
196
  span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.get("total_tokens")
196
197
  )
197
198
  set_span_attribute(
198
199
  span,
199
- SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
200
- usage.get("completion_tokens"),
200
+ SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
201
201
  )
202
202
  set_span_attribute(
203
- span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.get("prompt_tokens")
203
+ span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens
204
204
  )
205
205
 
206
+ if isinstance(prompt_tokens, int) and prompt_tokens >= 0 and token_histogram is not None:
207
+ token_histogram.record(prompt_tokens, attributes={
208
+ SpanAttributes.LLM_TOKEN_TYPE: "input",
209
+ SpanAttributes.LLM_RESPONSE_MODEL: response.get("model")
210
+ })
211
+
212
+ if isinstance(completion_tokens, int) and completion_tokens >= 0 and token_histogram is not None:
213
+ token_histogram.record(completion_tokens, attributes={
214
+ SpanAttributes.LLM_TOKEN_TYPE: "output",
215
+ SpanAttributes.LLM_RESPONSE_MODEL: response.get("model")
216
+ })
217
+
206
218
  choices = response.get("choices")
207
219
  if should_send_prompts() and choices:
208
220
  _set_completions(span, choices)
@@ -429,7 +441,7 @@ def _wrap(
429
441
  )
430
442
 
431
443
  if span.is_recording():
432
- _set_response_attributes(span, response)
444
+ _set_response_attributes(span, response, token_histogram)
433
445
 
434
446
  except Exception as ex: # pylint: disable=broad-except
435
447
  logger.warning(
@@ -515,7 +527,7 @@ async def _awrap(
515
527
  )
516
528
 
517
529
  if span.is_recording():
518
- _set_response_attributes(span, response)
530
+ _set_response_attributes(span, response, token_histogram)
519
531
 
520
532
  if span.is_recording():
521
533
  span.set_status(Status(StatusCode.OK))
@@ -8,7 +8,7 @@ show_missing = true
8
8
 
9
9
  [tool.poetry]
10
10
  name = "opentelemetry-instrumentation-groq"
11
- version = "0.38.7"
11
+ version = "0.38.8"
12
12
  description = "OpenTelemetry Groq instrumentation"
13
13
  authors = ["Gal Kleinman <gal@traceloop.com>", "Nir Gazit <nir@traceloop.com>"]
14
14
  repository = "https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-groq"