opentelemetry-instrumentation-openai 0.21.3__tar.gz → 0.21.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

Files changed (17) hide show
  1. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/PKG-INFO +1 -1
  2. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/shared/__init__.py +21 -0
  3. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +30 -19
  4. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +8 -5
  5. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +7 -5
  6. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/v0/__init__.py +2 -2
  7. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/v1/__init__.py +2 -2
  8. opentelemetry_instrumentation_openai-0.21.4/opentelemetry/instrumentation/openai/version.py +1 -0
  9. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/pyproject.toml +1 -1
  10. opentelemetry_instrumentation_openai-0.21.3/opentelemetry/instrumentation/openai/version.py +0 -1
  11. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/README.md +0 -0
  12. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/__init__.py +0 -0
  13. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +0 -0
  14. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/shared/config.py +0 -0
  15. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/utils.py +0 -0
  16. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -0
  17. {opentelemetry_instrumentation_openai-0.21.3 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opentelemetry-instrumentation-openai
3
- Version: 0.21.3
3
+ Version: 0.21.4
4
4
  Summary: OpenTelemetry OpenAI instrumentation
5
5
  Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
6
6
  License: Apache-2.0
@@ -245,3 +245,24 @@ def get_token_count_from_string(string: str, model_name: str):
245
245
 
246
246
  token_count = len(encoding.encode(string))
247
247
  return token_count
248
+
249
+
250
+ def _token_type(token_type: str):
251
+ if token_type == "prompt_tokens":
252
+ return "input"
253
+ elif token_type == "completion_tokens":
254
+ return "output"
255
+
256
+ return None
257
+
258
+
259
+ def _metric_shared_attributes(
260
+ response_model: str, operation: str, server_address: str, is_streaming: bool = False
261
+ ):
262
+ return {
263
+ "gen_ai.system": "openai",
264
+ "gen_ai.response.model": response_model,
265
+ "gen_ai.operation.name": operation,
266
+ "server.address": server_address,
267
+ "stream": is_streaming,
268
+ }
@@ -14,10 +14,12 @@ from opentelemetry.instrumentation.openai.utils import (
14
14
  dont_throw,
15
15
  )
16
16
  from opentelemetry.instrumentation.openai.shared import (
17
+ _metric_shared_attributes,
17
18
  _set_client_attributes,
18
19
  _set_request_attributes,
19
20
  _set_span_attribute,
20
21
  _set_functions_attributes,
22
+ _token_type,
21
23
  set_tools_attributes,
22
24
  _set_response_attributes,
23
25
  is_streaming_response,
@@ -267,11 +269,12 @@ def _handle_response(
267
269
  def _set_chat_metrics(
268
270
  instance, token_counter, choice_counter, duration_histogram, response_dict, duration
269
271
  ):
270
- shared_attributes = {
271
- "gen_ai.response.model": response_dict.get("model") or None,
272
- "server.address": _get_openai_base_url(instance),
273
- "stream": False,
274
- }
272
+ shared_attributes = _metric_shared_attributes(
273
+ response_model=response_dict.get("model") or None,
274
+ operation="chat",
275
+ server_address=_get_openai_base_url(instance),
276
+ is_streaming=False,
277
+ )
275
278
 
276
279
  # token metrics
277
280
  usage = response_dict.get("usage") # type: dict
@@ -303,7 +306,7 @@ def _set_token_counter_metrics(token_counter, usage, shared_attributes):
303
306
  if name in OPENAI_LLM_USAGE_TOKEN_TYPES:
304
307
  attributes_with_token_type = {
305
308
  **shared_attributes,
306
- "llm.usage.token_type": name.split("_")[0],
309
+ "gen_ai.token.type": _token_type(name),
307
310
  }
308
311
  token_counter.record(val, attributes=attributes_with_token_type)
309
312
 
@@ -420,14 +423,14 @@ def _set_streaming_token_metrics(
420
423
  if type(prompt_usage) is int and prompt_usage >= 0:
421
424
  attributes_with_token_type = {
422
425
  **shared_attributes,
423
- "llm.usage.token_type": "prompt",
426
+ "gen_ai.token.type": "input",
424
427
  }
425
428
  token_counter.record(prompt_usage, attributes=attributes_with_token_type)
426
429
 
427
430
  if type(completion_usage) is int and completion_usage >= 0:
428
431
  attributes_with_token_type = {
429
432
  **shared_attributes,
430
- "llm.usage.token_type": "completion",
433
+ "gen_ai.token.type": "output",
431
434
  }
432
435
  token_counter.record(
433
436
  completion_usage, attributes=attributes_with_token_type
@@ -523,7 +526,8 @@ class ChatStream(ObjectProxy):
523
526
  if self._first_token and self._streaming_time_to_first_token:
524
527
  self._time_of_first_token = time.time()
525
528
  self._streaming_time_to_first_token.record(
526
- self._time_of_first_token - self._start_time
529
+ self._time_of_first_token - self._start_time,
530
+ attributes=self._shared_attributes(),
527
531
  )
528
532
  self._first_token = False
529
533
 
@@ -549,20 +553,24 @@ class ChatStream(ObjectProxy):
549
553
  if delta and delta.get("role"):
550
554
  complete_choice["message"]["role"] = delta.get("role")
551
555
 
552
- def _close_span(self):
553
- shared_attributes = {
554
- "gen_ai.response.model": self._complete_response.get("model") or None,
555
- "server.address": _get_openai_base_url(self._instance),
556
- "stream": True,
557
- }
556
+ def _shared_attributes(self):
557
+ return _metric_shared_attributes(
558
+ response_model=self._complete_response.get("model")
559
+ or self._request_kwargs.get("model")
560
+ or None,
561
+ operation="chat",
562
+ server_address=_get_openai_base_url(self._instance),
563
+ is_streaming=True,
564
+ )
558
565
 
566
+ def _close_span(self):
559
567
  if not is_azure_openai(self._instance):
560
568
  _set_streaming_token_metrics(
561
569
  self._request_kwargs,
562
570
  self._complete_response,
563
571
  self._span,
564
572
  self._token_counter,
565
- shared_attributes,
573
+ self._shared_attributes(),
566
574
  )
567
575
 
568
576
  # choice metrics
@@ -570,7 +578,7 @@ class ChatStream(ObjectProxy):
570
578
  _set_choice_counter_metrics(
571
579
  self._choice_counter,
572
580
  self._complete_response.get("choices"),
573
- shared_attributes,
581
+ self._shared_attributes(),
574
582
  )
575
583
 
576
584
  # duration metrics
@@ -579,10 +587,13 @@ class ChatStream(ObjectProxy):
579
587
  else:
580
588
  duration = None
581
589
  if duration and isinstance(duration, (float, int)) and self._duration_histogram:
582
- self._duration_histogram.record(duration, attributes=shared_attributes)
590
+ self._duration_histogram.record(
591
+ duration, attributes=self._shared_attributes()
592
+ )
583
593
  if self._streaming_time_to_generate and self._time_of_first_token:
584
594
  self._streaming_time_to_generate.record(
585
- time.time() - self._time_of_first_token
595
+ time.time() - self._time_of_first_token,
596
+ attributes=self._shared_attributes(),
586
597
  )
587
598
 
588
599
  _set_response_attributes(self._span, self._complete_response)
@@ -12,10 +12,12 @@ from opentelemetry.instrumentation.openai.utils import (
12
12
  _with_embeddings_telemetry_wrapper,
13
13
  )
14
14
  from opentelemetry.instrumentation.openai.shared import (
15
+ _metric_shared_attributes,
15
16
  _set_client_attributes,
16
17
  _set_request_attributes,
17
18
  _set_span_attribute,
18
19
  _set_response_attributes,
20
+ _token_type,
19
21
  should_send_prompts,
20
22
  model_as_dict,
21
23
  _get_openai_base_url,
@@ -188,10 +190,11 @@ def _set_embeddings_metrics(
188
190
  response_dict,
189
191
  duration,
190
192
  ):
191
- shared_attributes = {
192
- "gen_ai.response.model": response_dict.get("model") or None,
193
- "server.address": _get_openai_base_url(instance),
194
- }
193
+ shared_attributes = _metric_shared_attributes(
194
+ response_model=response_dict.get("model") or None,
195
+ operation="embeddings",
196
+ server_address=_get_openai_base_url(instance),
197
+ )
195
198
 
196
199
  # token count metrics
197
200
  usage = response_dict.get("usage")
@@ -200,7 +203,7 @@ def _set_embeddings_metrics(
200
203
  if name in OPENAI_LLM_USAGE_TOKEN_TYPES:
201
204
  attributes_with_token_type = {
202
205
  **shared_attributes,
203
- "llm.usage.token_type": name.split("_")[0],
206
+ "gen_ai.token.type": _token_type(name),
204
207
  }
205
208
  token_counter.record(val, attributes=attributes_with_token_type)
206
209
 
@@ -8,6 +8,7 @@ from opentelemetry.metrics import Counter, Histogram
8
8
  from opentelemetry.instrumentation.openai import is_openai_v1
9
9
  from opentelemetry.instrumentation.openai.shared import (
10
10
  _get_openai_base_url,
11
+ _metric_shared_attributes,
11
12
  model_as_dict,
12
13
  )
13
14
  from opentelemetry.instrumentation.openai.utils import (
@@ -52,11 +53,12 @@ def image_gen_metrics_wrapper(
52
53
  else:
53
54
  response_dict = response
54
55
 
55
- shared_attributes = {
56
- # not provide response.model in ImagesResponse response, use model in request kwargs
57
- "gen_ai.response.model": kwargs.get("model") or None,
58
- "server.address": _get_openai_base_url(instance),
59
- }
56
+ # not provide response.model in ImagesResponse response, use model in request kwargs
57
+ shared_attributes = _metric_shared_attributes(
58
+ response_model=kwargs.get("model") or None,
59
+ operation="image_gen",
60
+ server_address=_get_openai_base_url(instance),
61
+ )
60
62
 
61
63
  duration = end_time - start_time
62
64
  if duration_histogram:
@@ -38,7 +38,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
38
38
  tokens_histogram = meter.create_histogram(
39
39
  name="gen_ai.client.token.usage",
40
40
  unit="token",
41
- description="Number of tokens used in prompt and completions",
41
+ description="Measures number of input and output tokens used",
42
42
  )
43
43
 
44
44
  chat_choice_counter = meter.create_counter(
@@ -50,7 +50,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
50
50
  chat_duration_histogram = meter.create_histogram(
51
51
  name="gen_ai.client.operation.duration",
52
52
  unit="s",
53
- description="Duration of chat completion operation",
53
+ description="GenAI operation duration",
54
54
  )
55
55
 
56
56
  chat_exception_counter = meter.create_counter(
@@ -52,7 +52,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
52
52
  tokens_histogram = meter.create_histogram(
53
53
  name="gen_ai.client.token.usage",
54
54
  unit="token",
55
- description="Number of tokens used in prompt and completions",
55
+ description="Measures number of input and output tokens used",
56
56
  )
57
57
 
58
58
  chat_choice_counter = meter.create_counter(
@@ -64,7 +64,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
64
64
  chat_duration_histogram = meter.create_histogram(
65
65
  name="gen_ai.client.operation.duration",
66
66
  unit="s",
67
- description="Duration of chat completion operation",
67
+ description="GenAI operation duration",
68
68
  )
69
69
 
70
70
  chat_exception_counter = meter.create_counter(
@@ -8,7 +8,7 @@ show_missing = true
8
8
 
9
9
  [tool.poetry]
10
10
  name = "opentelemetry-instrumentation-openai"
11
- version = "0.21.3"
11
+ version = "0.21.4"
12
12
  description = "OpenTelemetry OpenAI instrumentation"
13
13
  authors = [
14
14
  "Gal Kleinman <gal@traceloop.com>",