opentelemetry-instrumentation-openai 0.21.2__tar.gz → 0.21.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

Files changed (17) hide show
  1. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/PKG-INFO +1 -1
  2. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/shared/__init__.py +21 -0
  3. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +44 -20
  4. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +8 -5
  5. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +7 -5
  6. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/v0/__init__.py +2 -2
  7. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/v1/__init__.py +2 -2
  8. opentelemetry_instrumentation_openai-0.21.4/opentelemetry/instrumentation/openai/version.py +1 -0
  9. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/pyproject.toml +1 -1
  10. opentelemetry_instrumentation_openai-0.21.2/opentelemetry/instrumentation/openai/version.py +0 -1
  11. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/README.md +0 -0
  12. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/__init__.py +0 -0
  13. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +0 -0
  14. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/shared/config.py +0 -0
  15. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/utils.py +0 -0
  16. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -0
  17. {opentelemetry_instrumentation_openai-0.21.2 → opentelemetry_instrumentation_openai-0.21.4}/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opentelemetry-instrumentation-openai
3
- Version: 0.21.2
3
+ Version: 0.21.4
4
4
  Summary: OpenTelemetry OpenAI instrumentation
5
5
  Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
6
6
  License: Apache-2.0
@@ -245,3 +245,24 @@ def get_token_count_from_string(string: str, model_name: str):
245
245
 
246
246
  token_count = len(encoding.encode(string))
247
247
  return token_count
248
+
249
+
250
+ def _token_type(token_type: str):
251
+ if token_type == "prompt_tokens":
252
+ return "input"
253
+ elif token_type == "completion_tokens":
254
+ return "output"
255
+
256
+ return None
257
+
258
+
259
+ def _metric_shared_attributes(
260
+ response_model: str, operation: str, server_address: str, is_streaming: bool = False
261
+ ):
262
+ return {
263
+ "gen_ai.system": "openai",
264
+ "gen_ai.response.model": response_model,
265
+ "gen_ai.operation.name": operation,
266
+ "server.address": server_address,
267
+ "stream": is_streaming,
268
+ }
@@ -14,10 +14,12 @@ from opentelemetry.instrumentation.openai.utils import (
14
14
  dont_throw,
15
15
  )
16
16
  from opentelemetry.instrumentation.openai.shared import (
17
+ _metric_shared_attributes,
17
18
  _set_client_attributes,
18
19
  _set_request_attributes,
19
20
  _set_span_attribute,
20
21
  _set_functions_attributes,
22
+ _token_type,
21
23
  set_tools_attributes,
22
24
  _set_response_attributes,
23
25
  is_streaming_response,
@@ -267,11 +269,12 @@ def _handle_response(
267
269
  def _set_chat_metrics(
268
270
  instance, token_counter, choice_counter, duration_histogram, response_dict, duration
269
271
  ):
270
- shared_attributes = {
271
- "gen_ai.response.model": response_dict.get("model") or None,
272
- "server.address": _get_openai_base_url(instance),
273
- "stream": False,
274
- }
272
+ shared_attributes = _metric_shared_attributes(
273
+ response_model=response_dict.get("model") or None,
274
+ operation="chat",
275
+ server_address=_get_openai_base_url(instance),
276
+ is_streaming=False,
277
+ )
275
278
 
276
279
  # token metrics
277
280
  usage = response_dict.get("usage") # type: dict
@@ -303,7 +306,7 @@ def _set_token_counter_metrics(token_counter, usage, shared_attributes):
303
306
  if name in OPENAI_LLM_USAGE_TOKEN_TYPES:
304
307
  attributes_with_token_type = {
305
308
  **shared_attributes,
306
- "llm.usage.token_type": name.split("_")[0],
309
+ "gen_ai.token.type": _token_type(name),
307
310
  }
308
311
  token_counter.record(val, attributes=attributes_with_token_type)
309
312
 
@@ -420,16 +423,18 @@ def _set_streaming_token_metrics(
420
423
  if type(prompt_usage) is int and prompt_usage >= 0:
421
424
  attributes_with_token_type = {
422
425
  **shared_attributes,
423
- "llm.usage.token_type": "prompt",
426
+ "gen_ai.token.type": "input",
424
427
  }
425
428
  token_counter.record(prompt_usage, attributes=attributes_with_token_type)
426
429
 
427
430
  if type(completion_usage) is int and completion_usage >= 0:
428
431
  attributes_with_token_type = {
429
432
  **shared_attributes,
430
- "llm.usage.token_type": "completion",
433
+ "gen_ai.token.type": "output",
431
434
  }
432
- token_counter.record(completion_usage, attributes=attributes_with_token_type)
435
+ token_counter.record(
436
+ completion_usage, attributes=attributes_with_token_type
437
+ )
433
438
 
434
439
 
435
440
  class ChatStream(ObjectProxy):
@@ -458,6 +463,8 @@ class ChatStream(ObjectProxy):
458
463
  ):
459
464
  super().__init__(response)
460
465
 
466
+ print("HEYY", response.__class__.__name__)
467
+
461
468
  self._span = span
462
469
  self._instance = instance
463
470
  self._token_counter = token_counter
@@ -476,6 +483,15 @@ class ChatStream(ObjectProxy):
476
483
  def __enter__(self):
477
484
  return self
478
485
 
486
+ def __exit__(self, exc_type, exc_val, exc_tb):
487
+ self.__wrapped__.__exit__(exc_type, exc_val, exc_tb)
488
+
489
+ async def __aenter__(self):
490
+ return self
491
+
492
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
493
+ await self.__wrapped__.__aexit__(exc_type, exc_val, exc_tb)
494
+
479
495
  def __iter__(self):
480
496
  return self
481
497
 
@@ -510,7 +526,8 @@ class ChatStream(ObjectProxy):
510
526
  if self._first_token and self._streaming_time_to_first_token:
511
527
  self._time_of_first_token = time.time()
512
528
  self._streaming_time_to_first_token.record(
513
- self._time_of_first_token - self._start_time
529
+ self._time_of_first_token - self._start_time,
530
+ attributes=self._shared_attributes(),
514
531
  )
515
532
  self._first_token = False
516
533
 
@@ -536,20 +553,24 @@ class ChatStream(ObjectProxy):
536
553
  if delta and delta.get("role"):
537
554
  complete_choice["message"]["role"] = delta.get("role")
538
555
 
539
- def _close_span(self):
540
- shared_attributes = {
541
- "gen_ai.response.model": self._complete_response.get("model") or None,
542
- "server.address": _get_openai_base_url(self._instance),
543
- "stream": True,
544
- }
556
+ def _shared_attributes(self):
557
+ return _metric_shared_attributes(
558
+ response_model=self._complete_response.get("model")
559
+ or self._request_kwargs.get("model")
560
+ or None,
561
+ operation="chat",
562
+ server_address=_get_openai_base_url(self._instance),
563
+ is_streaming=True,
564
+ )
545
565
 
566
+ def _close_span(self):
546
567
  if not is_azure_openai(self._instance):
547
568
  _set_streaming_token_metrics(
548
569
  self._request_kwargs,
549
570
  self._complete_response,
550
571
  self._span,
551
572
  self._token_counter,
552
- shared_attributes,
573
+ self._shared_attributes(),
553
574
  )
554
575
 
555
576
  # choice metrics
@@ -557,7 +578,7 @@ class ChatStream(ObjectProxy):
557
578
  _set_choice_counter_metrics(
558
579
  self._choice_counter,
559
580
  self._complete_response.get("choices"),
560
- shared_attributes,
581
+ self._shared_attributes(),
561
582
  )
562
583
 
563
584
  # duration metrics
@@ -566,10 +587,13 @@ class ChatStream(ObjectProxy):
566
587
  else:
567
588
  duration = None
568
589
  if duration and isinstance(duration, (float, int)) and self._duration_histogram:
569
- self._duration_histogram.record(duration, attributes=shared_attributes)
590
+ self._duration_histogram.record(
591
+ duration, attributes=self._shared_attributes()
592
+ )
570
593
  if self._streaming_time_to_generate and self._time_of_first_token:
571
594
  self._streaming_time_to_generate.record(
572
- time.time() - self._time_of_first_token
595
+ time.time() - self._time_of_first_token,
596
+ attributes=self._shared_attributes(),
573
597
  )
574
598
 
575
599
  _set_response_attributes(self._span, self._complete_response)
@@ -12,10 +12,12 @@ from opentelemetry.instrumentation.openai.utils import (
12
12
  _with_embeddings_telemetry_wrapper,
13
13
  )
14
14
  from opentelemetry.instrumentation.openai.shared import (
15
+ _metric_shared_attributes,
15
16
  _set_client_attributes,
16
17
  _set_request_attributes,
17
18
  _set_span_attribute,
18
19
  _set_response_attributes,
20
+ _token_type,
19
21
  should_send_prompts,
20
22
  model_as_dict,
21
23
  _get_openai_base_url,
@@ -188,10 +190,11 @@ def _set_embeddings_metrics(
188
190
  response_dict,
189
191
  duration,
190
192
  ):
191
- shared_attributes = {
192
- "gen_ai.response.model": response_dict.get("model") or None,
193
- "server.address": _get_openai_base_url(instance),
194
- }
193
+ shared_attributes = _metric_shared_attributes(
194
+ response_model=response_dict.get("model") or None,
195
+ operation="embeddings",
196
+ server_address=_get_openai_base_url(instance),
197
+ )
195
198
 
196
199
  # token count metrics
197
200
  usage = response_dict.get("usage")
@@ -200,7 +203,7 @@ def _set_embeddings_metrics(
200
203
  if name in OPENAI_LLM_USAGE_TOKEN_TYPES:
201
204
  attributes_with_token_type = {
202
205
  **shared_attributes,
203
- "llm.usage.token_type": name.split("_")[0],
206
+ "gen_ai.token.type": _token_type(name),
204
207
  }
205
208
  token_counter.record(val, attributes=attributes_with_token_type)
206
209
 
@@ -8,6 +8,7 @@ from opentelemetry.metrics import Counter, Histogram
8
8
  from opentelemetry.instrumentation.openai import is_openai_v1
9
9
  from opentelemetry.instrumentation.openai.shared import (
10
10
  _get_openai_base_url,
11
+ _metric_shared_attributes,
11
12
  model_as_dict,
12
13
  )
13
14
  from opentelemetry.instrumentation.openai.utils import (
@@ -52,11 +53,12 @@ def image_gen_metrics_wrapper(
52
53
  else:
53
54
  response_dict = response
54
55
 
55
- shared_attributes = {
56
- # not provide response.model in ImagesResponse response, use model in request kwargs
57
- "gen_ai.response.model": kwargs.get("model") or None,
58
- "server.address": _get_openai_base_url(instance),
59
- }
56
+ # not provide response.model in ImagesResponse response, use model in request kwargs
57
+ shared_attributes = _metric_shared_attributes(
58
+ response_model=kwargs.get("model") or None,
59
+ operation="image_gen",
60
+ server_address=_get_openai_base_url(instance),
61
+ )
60
62
 
61
63
  duration = end_time - start_time
62
64
  if duration_histogram:
@@ -38,7 +38,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
38
38
  tokens_histogram = meter.create_histogram(
39
39
  name="gen_ai.client.token.usage",
40
40
  unit="token",
41
- description="Number of tokens used in prompt and completions",
41
+ description="Measures number of input and output tokens used",
42
42
  )
43
43
 
44
44
  chat_choice_counter = meter.create_counter(
@@ -50,7 +50,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
50
50
  chat_duration_histogram = meter.create_histogram(
51
51
  name="gen_ai.client.operation.duration",
52
52
  unit="s",
53
- description="Duration of chat completion operation",
53
+ description="GenAI operation duration",
54
54
  )
55
55
 
56
56
  chat_exception_counter = meter.create_counter(
@@ -52,7 +52,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
52
52
  tokens_histogram = meter.create_histogram(
53
53
  name="gen_ai.client.token.usage",
54
54
  unit="token",
55
- description="Number of tokens used in prompt and completions",
55
+ description="Measures number of input and output tokens used",
56
56
  )
57
57
 
58
58
  chat_choice_counter = meter.create_counter(
@@ -64,7 +64,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
64
64
  chat_duration_histogram = meter.create_histogram(
65
65
  name="gen_ai.client.operation.duration",
66
66
  unit="s",
67
- description="Duration of chat completion operation",
67
+ description="GenAI operation duration",
68
68
  )
69
69
 
70
70
  chat_exception_counter = meter.create_counter(
@@ -8,7 +8,7 @@ show_missing = true
8
8
 
9
9
  [tool.poetry]
10
10
  name = "opentelemetry-instrumentation-openai"
11
- version = "0.21.2"
11
+ version = "0.21.4"
12
12
  description = "OpenTelemetry OpenAI instrumentation"
13
13
  authors = [
14
14
  "Gal Kleinman <gal@traceloop.com>",