opentelemetry-instrumentation-openai 0.17.0__tar.gz → 0.17.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

Files changed (17) hide show
  1. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/PKG-INFO +1 -1
  2. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +135 -154
  3. opentelemetry_instrumentation_openai-0.17.2/opentelemetry/instrumentation/openai/version.py +1 -0
  4. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/pyproject.toml +1 -1
  5. opentelemetry_instrumentation_openai-0.17.0/opentelemetry/instrumentation/openai/version.py +0 -1
  6. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/README.md +0 -0
  7. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/__init__.py +0 -0
  8. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/shared/__init__.py +0 -0
  9. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +0 -0
  10. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/shared/config.py +0 -0
  11. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +0 -0
  12. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +0 -0
  13. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/utils.py +0 -0
  14. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/v0/__init__.py +0 -0
  15. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/v1/__init__.py +0 -0
  16. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -0
  17. {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opentelemetry-instrumentation-openai
3
- Version: 0.17.0
3
+ Version: 0.17.2
4
4
  Summary: OpenTelemetry OpenAI instrumentation
5
5
  Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
6
6
  License: Apache-2.0
@@ -1,6 +1,8 @@
1
1
  import json
2
2
  import logging
3
3
  import time
4
+ from wrapt import ObjectProxy
5
+
4
6
 
5
7
  from opentelemetry import context as context_api
6
8
  from opentelemetry.metrics import Counter, Histogram
@@ -85,7 +87,7 @@ def chat_wrapper(
85
87
 
86
88
  if is_streaming_response(response):
87
89
  # span will be closed after the generator is done
88
- return _build_from_streaming_response(
90
+ return ChatStream(
89
91
  span,
90
92
  response,
91
93
  instance,
@@ -159,7 +161,7 @@ async def achat_wrapper(
159
161
 
160
162
  if is_streaming_response(response):
161
163
  # span will be closed after the generator is done
162
- return _abuild_from_streaming_response(
164
+ return ChatStream(
163
165
  span,
164
166
  response,
165
167
  instance,
@@ -395,161 +397,140 @@ def _set_streaming_token_metrics(
395
397
  token_counter.add(completion_usage, attributes=attributes_with_token_type)
396
398
 
397
399
 
398
- @dont_throw
399
- def _build_from_streaming_response(
400
- span,
401
- response,
402
- instance=None,
403
- token_counter=None,
404
- choice_counter=None,
405
- duration_histogram=None,
406
- streaming_time_to_first_token=None,
407
- streaming_time_to_generate=None,
408
- start_time=None,
409
- request_kwargs=None,
410
- ):
411
- complete_response = {"choices": [], "model": ""}
412
-
413
- first_token = True
414
- time_of_first_token = start_time # will be updated when first token is received
415
-
416
- for item in response:
417
- span.add_event(name="llm.content.completion.chunk")
418
-
419
- item_to_yield = item
420
-
421
- if first_token and streaming_time_to_first_token:
422
- time_of_first_token = time.time()
423
- streaming_time_to_first_token.record(time_of_first_token - start_time)
424
- first_token = False
425
-
426
- _accumulate_stream_items(item, complete_response)
427
-
428
- yield item_to_yield
429
-
430
- shared_attributes = {
431
- "gen_ai.response.model": complete_response.get("model") or None,
432
- "server.address": _get_openai_base_url(instance),
433
- "stream": True,
434
- }
435
-
436
- if not is_azure_openai(instance):
437
- _set_streaming_token_metrics(
438
- request_kwargs, complete_response, span, token_counter, shared_attributes
439
- )
440
-
441
- # choice metrics
442
- if choice_counter and complete_response.get("choices"):
443
- _set_choice_counter_metrics(
444
- choice_counter, complete_response.get("choices"), shared_attributes
445
- )
446
-
447
- # duration metrics
448
- if start_time and isinstance(start_time, (float, int)):
449
- duration = time.time() - start_time
450
- else:
451
- duration = None
452
- if duration and isinstance(duration, (float, int)) and duration_histogram:
453
- duration_histogram.record(duration, attributes=shared_attributes)
454
- if streaming_time_to_generate and time_of_first_token:
455
- streaming_time_to_generate.record(time.time() - time_of_first_token)
456
-
457
- _set_response_attributes(span, complete_response)
458
-
459
- if should_send_prompts():
460
- _set_completions(span, complete_response.get("choices"))
461
-
462
- span.set_status(Status(StatusCode.OK))
463
- span.end()
464
-
465
-
466
- @dont_throw
467
- async def _abuild_from_streaming_response(
468
- span,
469
- response,
470
- instance=None,
471
- token_counter=None,
472
- choice_counter=None,
473
- duration_histogram=None,
474
- streaming_time_to_first_token=None,
475
- streaming_time_to_generate=None,
476
- start_time=None,
477
- request_kwargs=None,
478
- ):
479
- complete_response = {"choices": [], "model": ""}
480
-
481
- first_token = True
482
- time_of_first_token = start_time # will be updated when first token is received
483
-
484
- async for item in response:
485
- span.add_event(name="llm.content.completion.chunk")
486
-
487
- item_to_yield = item
488
-
489
- if first_token and streaming_time_to_first_token:
490
- time_of_first_token = time.time()
491
- streaming_time_to_first_token.record(time_of_first_token - start_time)
492
- first_token = False
493
-
494
- _accumulate_stream_items(item, complete_response)
495
-
496
- yield item_to_yield
497
-
498
- shared_attributes = {
499
- "gen_ai.response.model": complete_response.get("model") or None,
500
- "server.address": _get_openai_base_url(instance),
501
- "stream": True,
502
- }
503
-
504
- if not is_azure_openai(instance):
505
- _set_streaming_token_metrics(
506
- request_kwargs, complete_response, span, token_counter, shared_attributes
507
- )
508
-
509
- # choice metrics
510
- if choice_counter and complete_response.get("choices"):
511
- _set_choice_counter_metrics(
512
- choice_counter, complete_response.get("choices"), shared_attributes
513
- )
514
-
515
- # duration metrics
516
- if start_time and isinstance(start_time, (float, int)):
517
- duration = time.time() - start_time
518
- else:
519
- duration = None
520
- if duration and isinstance(duration, (float, int)) and duration_histogram:
521
- duration_histogram.record(duration, attributes=shared_attributes)
522
- if streaming_time_to_generate and time_of_first_token:
523
- streaming_time_to_generate.record(time.time() - time_of_first_token)
524
-
525
- _set_response_attributes(span, complete_response)
526
-
527
- if should_send_prompts():
528
- _set_completions(span, complete_response.get("choices"))
529
-
530
- span.set_status(Status(StatusCode.OK))
531
- span.end()
532
-
400
+ class ChatStream(ObjectProxy):
401
+ def __init__(
402
+ self,
403
+ span,
404
+ response,
405
+ instance=None,
406
+ token_counter=None,
407
+ choice_counter=None,
408
+ duration_histogram=None,
409
+ streaming_time_to_first_token=None,
410
+ streaming_time_to_generate=None,
411
+ start_time=None,
412
+ request_kwargs=None,
413
+ ):
414
+ super().__init__(response)
415
+
416
+ self._span = span
417
+ self._instance = instance
418
+ self._token_counter = token_counter
419
+ self._choice_counter = choice_counter
420
+ self._duration_histogram = duration_histogram
421
+ self._streaming_time_to_first_token = streaming_time_to_first_token
422
+ self._streaming_time_to_generate = streaming_time_to_generate
423
+ self._start_time = start_time
424
+ self._request_kwargs = request_kwargs
425
+
426
+ self._first_token = True
427
+ # will be updated when first token is received
428
+ self._time_of_first_token = self._start_time
429
+ self._complete_response = {"choices": [], "model": ""}
430
+
431
+ def __enter__(self):
432
+ return self
433
+
434
+ def __iter__(self):
435
+ return self
436
+
437
+ def __aiter__(self):
438
+ return self
439
+
440
+ def __next__(self):
441
+ try:
442
+ chunk = self.__wrapped__.__next__()
443
+ except Exception as e:
444
+ if isinstance(e, StopIteration):
445
+ self._close_span()
446
+ raise e
447
+ else:
448
+ self._process_item(chunk)
449
+ return chunk
450
+
451
+ async def __anext__(self):
452
+ try:
453
+ chunk = await self.__wrapped__.__anext__()
454
+ except Exception as e:
455
+ if isinstance(e, StopAsyncIteration):
456
+ self._close_span()
457
+ raise e
458
+ else:
459
+ self._process_item(chunk)
460
+ return chunk
461
+
462
+ def _process_item(self, item):
463
+ self._span.add_event(name="llm.content.completion.chunk")
464
+
465
+ if self._first_token and self._streaming_time_to_first_token:
466
+ self._time_of_first_token = time.time()
467
+ self._streaming_time_to_first_token.record(
468
+ self._time_of_first_token - self._start_time
469
+ )
470
+ self._first_token = False
471
+
472
+ if is_openai_v1():
473
+ item = model_as_dict(item)
474
+
475
+ self._complete_response["model"] = item.get("model")
476
+
477
+ for choice in item.get("choices"):
478
+ index = choice.get("index")
479
+ if len(self._complete_response.get("choices")) <= index:
480
+ self._complete_response["choices"].append(
481
+ {"index": index, "message": {"content": "", "role": ""}}
482
+ )
483
+ complete_choice = self._complete_response.get("choices")[index]
484
+ if choice.get("finish_reason"):
485
+ complete_choice["finish_reason"] = choice.get("finish_reason")
486
+
487
+ delta = choice.get("delta")
488
+
489
+ if delta and delta.get("content"):
490
+ complete_choice["message"]["content"] += delta.get("content")
491
+ if delta and delta.get("role"):
492
+ complete_choice["message"]["role"] = delta.get("role")
493
+
494
+ def _close_span(self):
495
+ shared_attributes = {
496
+ "gen_ai.response.model": self._complete_response.get("model") or None,
497
+ "server.address": _get_openai_base_url(self._instance),
498
+ "stream": True,
499
+ }
533
500
 
534
- def _accumulate_stream_items(item, complete_response):
535
- if is_openai_v1():
536
- item = model_as_dict(item)
501
+ if not is_azure_openai(self._instance):
502
+ _set_streaming_token_metrics(
503
+ self._request_kwargs,
504
+ self._complete_response,
505
+ self._span,
506
+ self._token_counter,
507
+ shared_attributes,
508
+ )
537
509
 
538
- complete_response["model"] = item.get("model")
510
+ # choice metrics
511
+ if self._choice_counter and self._complete_response.get("choices"):
512
+ _set_choice_counter_metrics(
513
+ self._choice_counter,
514
+ self._complete_response.get("choices"),
515
+ shared_attributes,
516
+ )
539
517
 
540
- for choice in item.get("choices"):
541
- index = choice.get("index")
542
- if len(complete_response.get("choices")) <= index:
543
- complete_response["choices"].append(
544
- {"index": index, "message": {"content": "", "role": ""}}
518
+ # duration metrics
519
+ if self._start_time and isinstance(self._start_time, (float, int)):
520
+ duration = time.time() - self._start_time
521
+ else:
522
+ duration = None
523
+ if duration and isinstance(duration, (float, int)) and self._duration_histogram:
524
+ self._duration_histogram.record(duration, attributes=shared_attributes)
525
+ if self._streaming_time_to_generate and self._time_of_first_token:
526
+ self._streaming_time_to_generate.record(
527
+ time.time() - self._time_of_first_token
545
528
  )
546
- complete_choice = complete_response.get("choices")[index]
547
- if choice.get("finish_reason"):
548
- complete_choice["finish_reason"] = choice.get("finish_reason")
549
529
 
550
- delta = choice.get("delta")
530
+ _set_response_attributes(self._span, self._complete_response)
531
+
532
+ if should_send_prompts():
533
+ _set_completions(self._span, self._complete_response.get("choices"))
551
534
 
552
- if delta and delta.get("content"):
553
- complete_choice["message"]["content"] += delta.get("content")
554
- if delta and delta.get("role"):
555
- complete_choice["message"]["role"] = delta.get("role")
535
+ self._span.set_status(Status(StatusCode.OK))
536
+ self._span.end()
@@ -8,7 +8,7 @@ show_missing = true
8
8
 
9
9
  [tool.poetry]
10
10
  name = "opentelemetry-instrumentation-openai"
11
- version = "0.17.0"
11
+ version = "0.17.2"
12
12
  description = "OpenTelemetry OpenAI instrumentation"
13
13
  authors = [
14
14
  "Gal Kleinman <gal@traceloop.com>",