opentelemetry-instrumentation-openai 0.17.0__tar.gz → 0.17.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/PKG-INFO +1 -1
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +135 -154
- opentelemetry_instrumentation_openai-0.17.2/opentelemetry/instrumentation/openai/version.py +1 -0
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/pyproject.toml +1 -1
- opentelemetry_instrumentation_openai-0.17.0/opentelemetry/instrumentation/openai/version.py +0 -1
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/README.md +0 -0
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/__init__.py +0 -0
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/shared/__init__.py +0 -0
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +0 -0
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/shared/config.py +0 -0
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +0 -0
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +0 -0
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/utils.py +0 -0
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/v0/__init__.py +0 -0
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/v1/__init__.py +0 -0
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -0
- {opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -0
{opentelemetry_instrumentation_openai-0.17.0 → opentelemetry_instrumentation_openai-0.17.2}/PKG-INFO
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: opentelemetry-instrumentation-openai
|
|
3
|
-
Version: 0.17.
|
|
3
|
+
Version: 0.17.2
|
|
4
4
|
Summary: OpenTelemetry OpenAI instrumentation
|
|
5
5
|
Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
|
|
6
6
|
License: Apache-2.0
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import logging
|
|
3
3
|
import time
|
|
4
|
+
from wrapt import ObjectProxy
|
|
5
|
+
|
|
4
6
|
|
|
5
7
|
from opentelemetry import context as context_api
|
|
6
8
|
from opentelemetry.metrics import Counter, Histogram
|
|
@@ -85,7 +87,7 @@ def chat_wrapper(
|
|
|
85
87
|
|
|
86
88
|
if is_streaming_response(response):
|
|
87
89
|
# span will be closed after the generator is done
|
|
88
|
-
return
|
|
90
|
+
return ChatStream(
|
|
89
91
|
span,
|
|
90
92
|
response,
|
|
91
93
|
instance,
|
|
@@ -159,7 +161,7 @@ async def achat_wrapper(
|
|
|
159
161
|
|
|
160
162
|
if is_streaming_response(response):
|
|
161
163
|
# span will be closed after the generator is done
|
|
162
|
-
return
|
|
164
|
+
return ChatStream(
|
|
163
165
|
span,
|
|
164
166
|
response,
|
|
165
167
|
instance,
|
|
@@ -395,161 +397,140 @@ def _set_streaming_token_metrics(
|
|
|
395
397
|
token_counter.add(completion_usage, attributes=attributes_with_token_type)
|
|
396
398
|
|
|
397
399
|
|
|
398
|
-
|
|
399
|
-
def
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
shared_attributes = {
|
|
499
|
-
"gen_ai.response.model": complete_response.get("model") or None,
|
|
500
|
-
"server.address": _get_openai_base_url(instance),
|
|
501
|
-
"stream": True,
|
|
502
|
-
}
|
|
503
|
-
|
|
504
|
-
if not is_azure_openai(instance):
|
|
505
|
-
_set_streaming_token_metrics(
|
|
506
|
-
request_kwargs, complete_response, span, token_counter, shared_attributes
|
|
507
|
-
)
|
|
508
|
-
|
|
509
|
-
# choice metrics
|
|
510
|
-
if choice_counter and complete_response.get("choices"):
|
|
511
|
-
_set_choice_counter_metrics(
|
|
512
|
-
choice_counter, complete_response.get("choices"), shared_attributes
|
|
513
|
-
)
|
|
514
|
-
|
|
515
|
-
# duration metrics
|
|
516
|
-
if start_time and isinstance(start_time, (float, int)):
|
|
517
|
-
duration = time.time() - start_time
|
|
518
|
-
else:
|
|
519
|
-
duration = None
|
|
520
|
-
if duration and isinstance(duration, (float, int)) and duration_histogram:
|
|
521
|
-
duration_histogram.record(duration, attributes=shared_attributes)
|
|
522
|
-
if streaming_time_to_generate and time_of_first_token:
|
|
523
|
-
streaming_time_to_generate.record(time.time() - time_of_first_token)
|
|
524
|
-
|
|
525
|
-
_set_response_attributes(span, complete_response)
|
|
526
|
-
|
|
527
|
-
if should_send_prompts():
|
|
528
|
-
_set_completions(span, complete_response.get("choices"))
|
|
529
|
-
|
|
530
|
-
span.set_status(Status(StatusCode.OK))
|
|
531
|
-
span.end()
|
|
532
|
-
|
|
400
|
+
class ChatStream(ObjectProxy):
|
|
401
|
+
def __init__(
|
|
402
|
+
self,
|
|
403
|
+
span,
|
|
404
|
+
response,
|
|
405
|
+
instance=None,
|
|
406
|
+
token_counter=None,
|
|
407
|
+
choice_counter=None,
|
|
408
|
+
duration_histogram=None,
|
|
409
|
+
streaming_time_to_first_token=None,
|
|
410
|
+
streaming_time_to_generate=None,
|
|
411
|
+
start_time=None,
|
|
412
|
+
request_kwargs=None,
|
|
413
|
+
):
|
|
414
|
+
super().__init__(response)
|
|
415
|
+
|
|
416
|
+
self._span = span
|
|
417
|
+
self._instance = instance
|
|
418
|
+
self._token_counter = token_counter
|
|
419
|
+
self._choice_counter = choice_counter
|
|
420
|
+
self._duration_histogram = duration_histogram
|
|
421
|
+
self._streaming_time_to_first_token = streaming_time_to_first_token
|
|
422
|
+
self._streaming_time_to_generate = streaming_time_to_generate
|
|
423
|
+
self._start_time = start_time
|
|
424
|
+
self._request_kwargs = request_kwargs
|
|
425
|
+
|
|
426
|
+
self._first_token = True
|
|
427
|
+
# will be updated when first token is received
|
|
428
|
+
self._time_of_first_token = self._start_time
|
|
429
|
+
self._complete_response = {"choices": [], "model": ""}
|
|
430
|
+
|
|
431
|
+
def __enter__(self):
|
|
432
|
+
return self
|
|
433
|
+
|
|
434
|
+
def __iter__(self):
|
|
435
|
+
return self
|
|
436
|
+
|
|
437
|
+
def __aiter__(self):
|
|
438
|
+
return self
|
|
439
|
+
|
|
440
|
+
def __next__(self):
|
|
441
|
+
try:
|
|
442
|
+
chunk = self.__wrapped__.__next__()
|
|
443
|
+
except Exception as e:
|
|
444
|
+
if isinstance(e, StopIteration):
|
|
445
|
+
self._close_span()
|
|
446
|
+
raise e
|
|
447
|
+
else:
|
|
448
|
+
self._process_item(chunk)
|
|
449
|
+
return chunk
|
|
450
|
+
|
|
451
|
+
async def __anext__(self):
|
|
452
|
+
try:
|
|
453
|
+
chunk = await self.__wrapped__.__anext__()
|
|
454
|
+
except Exception as e:
|
|
455
|
+
if isinstance(e, StopAsyncIteration):
|
|
456
|
+
self._close_span()
|
|
457
|
+
raise e
|
|
458
|
+
else:
|
|
459
|
+
self._process_item(chunk)
|
|
460
|
+
return chunk
|
|
461
|
+
|
|
462
|
+
def _process_item(self, item):
|
|
463
|
+
self._span.add_event(name="llm.content.completion.chunk")
|
|
464
|
+
|
|
465
|
+
if self._first_token and self._streaming_time_to_first_token:
|
|
466
|
+
self._time_of_first_token = time.time()
|
|
467
|
+
self._streaming_time_to_first_token.record(
|
|
468
|
+
self._time_of_first_token - self._start_time
|
|
469
|
+
)
|
|
470
|
+
self._first_token = False
|
|
471
|
+
|
|
472
|
+
if is_openai_v1():
|
|
473
|
+
item = model_as_dict(item)
|
|
474
|
+
|
|
475
|
+
self._complete_response["model"] = item.get("model")
|
|
476
|
+
|
|
477
|
+
for choice in item.get("choices"):
|
|
478
|
+
index = choice.get("index")
|
|
479
|
+
if len(self._complete_response.get("choices")) <= index:
|
|
480
|
+
self._complete_response["choices"].append(
|
|
481
|
+
{"index": index, "message": {"content": "", "role": ""}}
|
|
482
|
+
)
|
|
483
|
+
complete_choice = self._complete_response.get("choices")[index]
|
|
484
|
+
if choice.get("finish_reason"):
|
|
485
|
+
complete_choice["finish_reason"] = choice.get("finish_reason")
|
|
486
|
+
|
|
487
|
+
delta = choice.get("delta")
|
|
488
|
+
|
|
489
|
+
if delta and delta.get("content"):
|
|
490
|
+
complete_choice["message"]["content"] += delta.get("content")
|
|
491
|
+
if delta and delta.get("role"):
|
|
492
|
+
complete_choice["message"]["role"] = delta.get("role")
|
|
493
|
+
|
|
494
|
+
def _close_span(self):
|
|
495
|
+
shared_attributes = {
|
|
496
|
+
"gen_ai.response.model": self._complete_response.get("model") or None,
|
|
497
|
+
"server.address": _get_openai_base_url(self._instance),
|
|
498
|
+
"stream": True,
|
|
499
|
+
}
|
|
533
500
|
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
501
|
+
if not is_azure_openai(self._instance):
|
|
502
|
+
_set_streaming_token_metrics(
|
|
503
|
+
self._request_kwargs,
|
|
504
|
+
self._complete_response,
|
|
505
|
+
self._span,
|
|
506
|
+
self._token_counter,
|
|
507
|
+
shared_attributes,
|
|
508
|
+
)
|
|
537
509
|
|
|
538
|
-
|
|
510
|
+
# choice metrics
|
|
511
|
+
if self._choice_counter and self._complete_response.get("choices"):
|
|
512
|
+
_set_choice_counter_metrics(
|
|
513
|
+
self._choice_counter,
|
|
514
|
+
self._complete_response.get("choices"),
|
|
515
|
+
shared_attributes,
|
|
516
|
+
)
|
|
539
517
|
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
518
|
+
# duration metrics
|
|
519
|
+
if self._start_time and isinstance(self._start_time, (float, int)):
|
|
520
|
+
duration = time.time() - self._start_time
|
|
521
|
+
else:
|
|
522
|
+
duration = None
|
|
523
|
+
if duration and isinstance(duration, (float, int)) and self._duration_histogram:
|
|
524
|
+
self._duration_histogram.record(duration, attributes=shared_attributes)
|
|
525
|
+
if self._streaming_time_to_generate and self._time_of_first_token:
|
|
526
|
+
self._streaming_time_to_generate.record(
|
|
527
|
+
time.time() - self._time_of_first_token
|
|
545
528
|
)
|
|
546
|
-
complete_choice = complete_response.get("choices")[index]
|
|
547
|
-
if choice.get("finish_reason"):
|
|
548
|
-
complete_choice["finish_reason"] = choice.get("finish_reason")
|
|
549
529
|
|
|
550
|
-
|
|
530
|
+
_set_response_attributes(self._span, self._complete_response)
|
|
531
|
+
|
|
532
|
+
if should_send_prompts():
|
|
533
|
+
_set_completions(self._span, self._complete_response.get("choices"))
|
|
551
534
|
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
if delta and delta.get("role"):
|
|
555
|
-
complete_choice["message"]["role"] = delta.get("role")
|
|
535
|
+
self._span.set_status(Status(StatusCode.OK))
|
|
536
|
+
self._span.end()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.17.2"
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "0.17.0"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|