opentelemetry-instrumentation-openai 0.18.1__py3-none-any.whl → 0.19.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.
- opentelemetry/instrumentation/openai/shared/__init__.py +7 -1
- opentelemetry/instrumentation/openai/shared/chat_wrappers.py +231 -28
- opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +1 -1
- opentelemetry/instrumentation/openai/v0/__init__.py +10 -16
- opentelemetry/instrumentation/openai/v1/__init__.py +10 -16
- opentelemetry/instrumentation/openai/version.py +1 -1
- {opentelemetry_instrumentation_openai-0.18.1.dist-info → opentelemetry_instrumentation_openai-0.19.0.dist-info}/METADATA +1 -1
- {opentelemetry_instrumentation_openai-0.18.1.dist-info → opentelemetry_instrumentation_openai-0.19.0.dist-info}/RECORD +10 -10
- {opentelemetry_instrumentation_openai-0.18.1.dist-info → opentelemetry_instrumentation_openai-0.19.0.dist-info}/WHEEL +0 -0
- {opentelemetry_instrumentation_openai-0.18.1.dist-info → opentelemetry_instrumentation_openai-0.19.0.dist-info}/entry_points.txt +0 -0
|
@@ -112,7 +112,9 @@ def _set_request_attributes(span, kwargs):
|
|
|
112
112
|
_set_span_attribute(
|
|
113
113
|
span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens")
|
|
114
114
|
)
|
|
115
|
-
_set_span_attribute(
|
|
115
|
+
_set_span_attribute(
|
|
116
|
+
span, SpanAttributes.LLM_REQUEST_TEMPERATURE, kwargs.get("temperature")
|
|
117
|
+
)
|
|
116
118
|
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_TOP_P, kwargs.get("top_p"))
|
|
117
119
|
_set_span_attribute(
|
|
118
120
|
span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
|
|
@@ -139,6 +141,10 @@ def _set_response_attributes(span, response):
|
|
|
139
141
|
|
|
140
142
|
_set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
|
|
141
143
|
|
|
144
|
+
_set_span_attribute(
|
|
145
|
+
span, "gen_ai.openai.system_fingerprint", response.get("system_fingerprint")
|
|
146
|
+
)
|
|
147
|
+
|
|
142
148
|
usage = response.get("usage")
|
|
143
149
|
if not usage:
|
|
144
150
|
return
|
|
@@ -87,18 +87,32 @@ def chat_wrapper(
|
|
|
87
87
|
|
|
88
88
|
if is_streaming_response(response):
|
|
89
89
|
# span will be closed after the generator is done
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
90
|
+
if is_openai_v1():
|
|
91
|
+
return ChatStream(
|
|
92
|
+
span,
|
|
93
|
+
response,
|
|
94
|
+
instance,
|
|
95
|
+
token_counter,
|
|
96
|
+
choice_counter,
|
|
97
|
+
duration_histogram,
|
|
98
|
+
streaming_time_to_first_token,
|
|
99
|
+
streaming_time_to_generate,
|
|
100
|
+
start_time,
|
|
101
|
+
kwargs,
|
|
102
|
+
)
|
|
103
|
+
else:
|
|
104
|
+
return _build_from_streaming_response(
|
|
105
|
+
span,
|
|
106
|
+
response,
|
|
107
|
+
instance,
|
|
108
|
+
token_counter,
|
|
109
|
+
choice_counter,
|
|
110
|
+
duration_histogram,
|
|
111
|
+
streaming_time_to_first_token,
|
|
112
|
+
streaming_time_to_generate,
|
|
113
|
+
start_time,
|
|
114
|
+
kwargs,
|
|
115
|
+
)
|
|
102
116
|
|
|
103
117
|
duration = end_time - start_time
|
|
104
118
|
|
|
@@ -161,18 +175,32 @@ async def achat_wrapper(
|
|
|
161
175
|
|
|
162
176
|
if is_streaming_response(response):
|
|
163
177
|
# span will be closed after the generator is done
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
178
|
+
if is_openai_v1():
|
|
179
|
+
return ChatStream(
|
|
180
|
+
span,
|
|
181
|
+
response,
|
|
182
|
+
instance,
|
|
183
|
+
token_counter,
|
|
184
|
+
choice_counter,
|
|
185
|
+
duration_histogram,
|
|
186
|
+
streaming_time_to_first_token,
|
|
187
|
+
streaming_time_to_generate,
|
|
188
|
+
start_time,
|
|
189
|
+
kwargs,
|
|
190
|
+
)
|
|
191
|
+
else:
|
|
192
|
+
return _abuild_from_streaming_response(
|
|
193
|
+
span,
|
|
194
|
+
response,
|
|
195
|
+
instance,
|
|
196
|
+
token_counter,
|
|
197
|
+
choice_counter,
|
|
198
|
+
duration_histogram,
|
|
199
|
+
streaming_time_to_first_token,
|
|
200
|
+
streaming_time_to_generate,
|
|
201
|
+
start_time,
|
|
202
|
+
kwargs,
|
|
203
|
+
)
|
|
176
204
|
|
|
177
205
|
duration = end_time - start_time
|
|
178
206
|
|
|
@@ -277,7 +305,7 @@ def _set_token_counter_metrics(token_counter, usage, shared_attributes):
|
|
|
277
305
|
**shared_attributes,
|
|
278
306
|
"llm.usage.token_type": name.split("_")[0],
|
|
279
307
|
}
|
|
280
|
-
token_counter.
|
|
308
|
+
token_counter.record(val, attributes=attributes_with_token_type)
|
|
281
309
|
|
|
282
310
|
|
|
283
311
|
def _set_prompts(span, messages):
|
|
@@ -359,7 +387,9 @@ def _set_streaming_token_metrics(
|
|
|
359
387
|
prompt_content = ""
|
|
360
388
|
# setting the default model_name as gpt-4. As this uses the embedding "cl100k_base" that
|
|
361
389
|
# is used by most of the other model.
|
|
362
|
-
model_name =
|
|
390
|
+
model_name = (
|
|
391
|
+
request_kwargs.get("model") or complete_response.get("model") or "gpt-4"
|
|
392
|
+
)
|
|
363
393
|
for msg in request_kwargs.get("messages"):
|
|
364
394
|
if msg.get("content"):
|
|
365
395
|
prompt_content += msg.get("content")
|
|
@@ -392,17 +422,27 @@ def _set_streaming_token_metrics(
|
|
|
392
422
|
**shared_attributes,
|
|
393
423
|
"llm.usage.token_type": "prompt",
|
|
394
424
|
}
|
|
395
|
-
token_counter.
|
|
425
|
+
token_counter.record(prompt_usage, attributes=attributes_with_token_type)
|
|
396
426
|
|
|
397
427
|
if type(completion_usage) is int and completion_usage >= 0:
|
|
398
428
|
attributes_with_token_type = {
|
|
399
429
|
**shared_attributes,
|
|
400
430
|
"llm.usage.token_type": "completion",
|
|
401
431
|
}
|
|
402
|
-
token_counter.
|
|
432
|
+
token_counter.record(completion_usage, attributes=attributes_with_token_type)
|
|
403
433
|
|
|
404
434
|
|
|
405
435
|
class ChatStream(ObjectProxy):
|
|
436
|
+
_span = None
|
|
437
|
+
_instance = None
|
|
438
|
+
_token_counter = None
|
|
439
|
+
_choice_counter = None
|
|
440
|
+
_duration_histogram = None
|
|
441
|
+
_streaming_time_to_first_token = None
|
|
442
|
+
_streaming_time_to_generate = None
|
|
443
|
+
_start_time = None
|
|
444
|
+
_request_kwargs = None
|
|
445
|
+
|
|
406
446
|
def __init__(
|
|
407
447
|
self,
|
|
408
448
|
span,
|
|
@@ -539,3 +579,166 @@ class ChatStream(ObjectProxy):
|
|
|
539
579
|
|
|
540
580
|
self._span.set_status(Status(StatusCode.OK))
|
|
541
581
|
self._span.end()
|
|
582
|
+
|
|
583
|
+
|
|
584
|
+
# Backward compatibility with OpenAI v0
|
|
585
|
+
|
|
586
|
+
|
|
587
|
+
@dont_throw
|
|
588
|
+
def _build_from_streaming_response(
|
|
589
|
+
span,
|
|
590
|
+
response,
|
|
591
|
+
instance=None,
|
|
592
|
+
token_counter=None,
|
|
593
|
+
choice_counter=None,
|
|
594
|
+
duration_histogram=None,
|
|
595
|
+
streaming_time_to_first_token=None,
|
|
596
|
+
streaming_time_to_generate=None,
|
|
597
|
+
start_time=None,
|
|
598
|
+
request_kwargs=None,
|
|
599
|
+
):
|
|
600
|
+
complete_response = {"choices": [], "model": ""}
|
|
601
|
+
|
|
602
|
+
first_token = True
|
|
603
|
+
time_of_first_token = start_time # will be updated when first token is received
|
|
604
|
+
|
|
605
|
+
for item in response:
|
|
606
|
+
span.add_event(name="llm.content.completion.chunk")
|
|
607
|
+
|
|
608
|
+
item_to_yield = item
|
|
609
|
+
|
|
610
|
+
if first_token and streaming_time_to_first_token:
|
|
611
|
+
time_of_first_token = time.time()
|
|
612
|
+
streaming_time_to_first_token.record(time_of_first_token - start_time)
|
|
613
|
+
first_token = False
|
|
614
|
+
|
|
615
|
+
_accumulate_stream_items(item, complete_response)
|
|
616
|
+
|
|
617
|
+
yield item_to_yield
|
|
618
|
+
|
|
619
|
+
shared_attributes = {
|
|
620
|
+
"gen_ai.response.model": complete_response.get("model") or None,
|
|
621
|
+
"server.address": _get_openai_base_url(instance),
|
|
622
|
+
"stream": True,
|
|
623
|
+
}
|
|
624
|
+
|
|
625
|
+
if not is_azure_openai(instance):
|
|
626
|
+
_set_streaming_token_metrics(
|
|
627
|
+
request_kwargs, complete_response, span, token_counter, shared_attributes
|
|
628
|
+
)
|
|
629
|
+
|
|
630
|
+
# choice metrics
|
|
631
|
+
if choice_counter and complete_response.get("choices"):
|
|
632
|
+
_set_choice_counter_metrics(
|
|
633
|
+
choice_counter, complete_response.get("choices"), shared_attributes
|
|
634
|
+
)
|
|
635
|
+
|
|
636
|
+
# duration metrics
|
|
637
|
+
if start_time and isinstance(start_time, (float, int)):
|
|
638
|
+
duration = time.time() - start_time
|
|
639
|
+
else:
|
|
640
|
+
duration = None
|
|
641
|
+
if duration and isinstance(duration, (float, int)) and duration_histogram:
|
|
642
|
+
duration_histogram.record(duration, attributes=shared_attributes)
|
|
643
|
+
if streaming_time_to_generate and time_of_first_token:
|
|
644
|
+
streaming_time_to_generate.record(time.time() - time_of_first_token)
|
|
645
|
+
|
|
646
|
+
_set_response_attributes(span, complete_response)
|
|
647
|
+
|
|
648
|
+
if should_send_prompts():
|
|
649
|
+
_set_completions(span, complete_response.get("choices"))
|
|
650
|
+
|
|
651
|
+
span.set_status(Status(StatusCode.OK))
|
|
652
|
+
span.end()
|
|
653
|
+
|
|
654
|
+
|
|
655
|
+
@dont_throw
|
|
656
|
+
async def _abuild_from_streaming_response(
|
|
657
|
+
span,
|
|
658
|
+
response,
|
|
659
|
+
instance=None,
|
|
660
|
+
token_counter=None,
|
|
661
|
+
choice_counter=None,
|
|
662
|
+
duration_histogram=None,
|
|
663
|
+
streaming_time_to_first_token=None,
|
|
664
|
+
streaming_time_to_generate=None,
|
|
665
|
+
start_time=None,
|
|
666
|
+
request_kwargs=None,
|
|
667
|
+
):
|
|
668
|
+
complete_response = {"choices": [], "model": ""}
|
|
669
|
+
|
|
670
|
+
first_token = True
|
|
671
|
+
time_of_first_token = start_time # will be updated when first token is received
|
|
672
|
+
|
|
673
|
+
async for item in response:
|
|
674
|
+
span.add_event(name="llm.content.completion.chunk")
|
|
675
|
+
|
|
676
|
+
item_to_yield = item
|
|
677
|
+
|
|
678
|
+
if first_token and streaming_time_to_first_token:
|
|
679
|
+
time_of_first_token = time.time()
|
|
680
|
+
streaming_time_to_first_token.record(time_of_first_token - start_time)
|
|
681
|
+
first_token = False
|
|
682
|
+
|
|
683
|
+
_accumulate_stream_items(item, complete_response)
|
|
684
|
+
|
|
685
|
+
yield item_to_yield
|
|
686
|
+
|
|
687
|
+
shared_attributes = {
|
|
688
|
+
"gen_ai.response.model": complete_response.get("model") or None,
|
|
689
|
+
"server.address": _get_openai_base_url(instance),
|
|
690
|
+
"stream": True,
|
|
691
|
+
}
|
|
692
|
+
|
|
693
|
+
if not is_azure_openai(instance):
|
|
694
|
+
_set_streaming_token_metrics(
|
|
695
|
+
request_kwargs, complete_response, span, token_counter, shared_attributes
|
|
696
|
+
)
|
|
697
|
+
|
|
698
|
+
# choice metrics
|
|
699
|
+
if choice_counter and complete_response.get("choices"):
|
|
700
|
+
_set_choice_counter_metrics(
|
|
701
|
+
choice_counter, complete_response.get("choices"), shared_attributes
|
|
702
|
+
)
|
|
703
|
+
|
|
704
|
+
# duration metrics
|
|
705
|
+
if start_time and isinstance(start_time, (float, int)):
|
|
706
|
+
duration = time.time() - start_time
|
|
707
|
+
else:
|
|
708
|
+
duration = None
|
|
709
|
+
if duration and isinstance(duration, (float, int)) and duration_histogram:
|
|
710
|
+
duration_histogram.record(duration, attributes=shared_attributes)
|
|
711
|
+
if streaming_time_to_generate and time_of_first_token:
|
|
712
|
+
streaming_time_to_generate.record(time.time() - time_of_first_token)
|
|
713
|
+
|
|
714
|
+
_set_response_attributes(span, complete_response)
|
|
715
|
+
|
|
716
|
+
if should_send_prompts():
|
|
717
|
+
_set_completions(span, complete_response.get("choices"))
|
|
718
|
+
|
|
719
|
+
span.set_status(Status(StatusCode.OK))
|
|
720
|
+
span.end()
|
|
721
|
+
|
|
722
|
+
|
|
723
|
+
def _accumulate_stream_items(item, complete_response):
|
|
724
|
+
if is_openai_v1():
|
|
725
|
+
item = model_as_dict(item)
|
|
726
|
+
|
|
727
|
+
complete_response["model"] = item.get("model")
|
|
728
|
+
|
|
729
|
+
for choice in item.get("choices"):
|
|
730
|
+
index = choice.get("index")
|
|
731
|
+
if len(complete_response.get("choices")) <= index:
|
|
732
|
+
complete_response["choices"].append(
|
|
733
|
+
{"index": index, "message": {"content": "", "role": ""}}
|
|
734
|
+
)
|
|
735
|
+
complete_choice = complete_response.get("choices")[index]
|
|
736
|
+
if choice.get("finish_reason"):
|
|
737
|
+
complete_choice["finish_reason"] = choice.get("finish_reason")
|
|
738
|
+
|
|
739
|
+
delta = choice.get("delta")
|
|
740
|
+
|
|
741
|
+
if delta and delta.get("content"):
|
|
742
|
+
complete_choice["message"]["content"] += delta.get("content")
|
|
743
|
+
if delta and delta.get("role"):
|
|
744
|
+
complete_choice["message"]["role"] = delta.get("role")
|
|
@@ -202,7 +202,7 @@ def _set_embeddings_metrics(
|
|
|
202
202
|
**shared_attributes,
|
|
203
203
|
"llm.usage.token_type": name.split("_")[0],
|
|
204
204
|
}
|
|
205
|
-
token_counter.
|
|
205
|
+
token_counter.record(val, attributes=attributes_with_token_type)
|
|
206
206
|
|
|
207
207
|
# vec size metrics
|
|
208
208
|
# should use counter for vector_size?
|
|
@@ -35,20 +35,20 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
|
|
|
35
35
|
meter = get_meter(__name__, __version__, meter_provider)
|
|
36
36
|
|
|
37
37
|
if is_metrics_enabled():
|
|
38
|
-
|
|
39
|
-
name="
|
|
38
|
+
tokens_histogram = meter.create_histogram(
|
|
39
|
+
name="gen_ai.client.token.usage",
|
|
40
40
|
unit="token",
|
|
41
41
|
description="Number of tokens used in prompt and completions",
|
|
42
42
|
)
|
|
43
43
|
|
|
44
44
|
chat_choice_counter = meter.create_counter(
|
|
45
|
-
name="
|
|
45
|
+
name="gen_ai.client.generation.choices",
|
|
46
46
|
unit="choice",
|
|
47
47
|
description="Number of choices returned by chat completions call",
|
|
48
48
|
)
|
|
49
49
|
|
|
50
50
|
chat_duration_histogram = meter.create_histogram(
|
|
51
|
-
name="
|
|
51
|
+
name="gen_ai.client.operation.duration",
|
|
52
52
|
unit="s",
|
|
53
53
|
description="Duration of chat completion operation",
|
|
54
54
|
)
|
|
@@ -71,7 +71,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
|
|
|
71
71
|
)
|
|
72
72
|
else:
|
|
73
73
|
(
|
|
74
|
-
|
|
74
|
+
tokens_histogram,
|
|
75
75
|
chat_choice_counter,
|
|
76
76
|
chat_duration_histogram,
|
|
77
77
|
chat_exception_counter,
|
|
@@ -80,12 +80,6 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
|
|
|
80
80
|
) = (None, None, None, None, None, None)
|
|
81
81
|
|
|
82
82
|
if is_metrics_enabled():
|
|
83
|
-
embeddings_token_counter = meter.create_counter(
|
|
84
|
-
name="llm.openai.embeddings.tokens",
|
|
85
|
-
unit="token",
|
|
86
|
-
description="Number of tokens used in prompt and completions",
|
|
87
|
-
)
|
|
88
|
-
|
|
89
83
|
embeddings_vector_size_counter = meter.create_counter(
|
|
90
84
|
name="llm.openai.embeddings.vector_size",
|
|
91
85
|
unit="element",
|
|
@@ -105,7 +99,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
|
|
|
105
99
|
)
|
|
106
100
|
else:
|
|
107
101
|
(
|
|
108
|
-
|
|
102
|
+
tokens_histogram,
|
|
109
103
|
embeddings_vector_size_counter,
|
|
110
104
|
embeddings_duration_histogram,
|
|
111
105
|
embeddings_exception_counter,
|
|
@@ -120,7 +114,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
|
|
|
120
114
|
"ChatCompletion.create",
|
|
121
115
|
chat_wrapper(
|
|
122
116
|
tracer,
|
|
123
|
-
|
|
117
|
+
tokens_histogram,
|
|
124
118
|
chat_choice_counter,
|
|
125
119
|
chat_duration_histogram,
|
|
126
120
|
chat_exception_counter,
|
|
@@ -133,7 +127,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
|
|
|
133
127
|
"ChatCompletion.acreate",
|
|
134
128
|
achat_wrapper(
|
|
135
129
|
tracer,
|
|
136
|
-
|
|
130
|
+
tokens_histogram,
|
|
137
131
|
chat_choice_counter,
|
|
138
132
|
chat_duration_histogram,
|
|
139
133
|
chat_exception_counter,
|
|
@@ -146,7 +140,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
|
|
|
146
140
|
"Embedding.create",
|
|
147
141
|
embeddings_wrapper(
|
|
148
142
|
tracer,
|
|
149
|
-
|
|
143
|
+
tokens_histogram,
|
|
150
144
|
embeddings_vector_size_counter,
|
|
151
145
|
embeddings_duration_histogram,
|
|
152
146
|
embeddings_exception_counter,
|
|
@@ -157,7 +151,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
|
|
|
157
151
|
"Embedding.acreate",
|
|
158
152
|
aembeddings_wrapper(
|
|
159
153
|
tracer,
|
|
160
|
-
|
|
154
|
+
tokens_histogram,
|
|
161
155
|
embeddings_vector_size_counter,
|
|
162
156
|
embeddings_duration_histogram,
|
|
163
157
|
embeddings_exception_counter,
|
|
@@ -49,20 +49,20 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
|
|
|
49
49
|
meter = get_meter(__name__, __version__, meter_provider)
|
|
50
50
|
|
|
51
51
|
if is_metrics_enabled():
|
|
52
|
-
|
|
53
|
-
name="
|
|
52
|
+
tokens_histogram = meter.create_histogram(
|
|
53
|
+
name="gen_ai.client.token.usage",
|
|
54
54
|
unit="token",
|
|
55
55
|
description="Number of tokens used in prompt and completions",
|
|
56
56
|
)
|
|
57
57
|
|
|
58
58
|
chat_choice_counter = meter.create_counter(
|
|
59
|
-
name="
|
|
59
|
+
name="gen_ai.client.generation.choices",
|
|
60
60
|
unit="choice",
|
|
61
61
|
description="Number of choices returned by chat completions call",
|
|
62
62
|
)
|
|
63
63
|
|
|
64
64
|
chat_duration_histogram = meter.create_histogram(
|
|
65
|
-
name="
|
|
65
|
+
name="gen_ai.client.operation.duration",
|
|
66
66
|
unit="s",
|
|
67
67
|
description="Duration of chat completion operation",
|
|
68
68
|
)
|
|
@@ -85,7 +85,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
|
|
|
85
85
|
)
|
|
86
86
|
else:
|
|
87
87
|
(
|
|
88
|
-
|
|
88
|
+
tokens_histogram,
|
|
89
89
|
chat_choice_counter,
|
|
90
90
|
chat_duration_histogram,
|
|
91
91
|
chat_exception_counter,
|
|
@@ -98,7 +98,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
|
|
|
98
98
|
"Completions.create",
|
|
99
99
|
chat_wrapper(
|
|
100
100
|
tracer,
|
|
101
|
-
|
|
101
|
+
tokens_histogram,
|
|
102
102
|
chat_choice_counter,
|
|
103
103
|
chat_duration_histogram,
|
|
104
104
|
chat_exception_counter,
|
|
@@ -114,12 +114,6 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
|
|
|
114
114
|
)
|
|
115
115
|
|
|
116
116
|
if is_metrics_enabled():
|
|
117
|
-
embeddings_token_counter = meter.create_counter(
|
|
118
|
-
name="llm.openai.embeddings.tokens",
|
|
119
|
-
unit="token",
|
|
120
|
-
description="Number of tokens used in prompt and completions",
|
|
121
|
-
)
|
|
122
|
-
|
|
123
117
|
embeddings_vector_size_counter = meter.create_counter(
|
|
124
118
|
name="llm.openai.embeddings.vector_size",
|
|
125
119
|
unit="element",
|
|
@@ -139,7 +133,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
|
|
|
139
133
|
)
|
|
140
134
|
else:
|
|
141
135
|
(
|
|
142
|
-
|
|
136
|
+
tokens_histogram,
|
|
143
137
|
embeddings_vector_size_counter,
|
|
144
138
|
embeddings_duration_histogram,
|
|
145
139
|
embeddings_exception_counter,
|
|
@@ -150,7 +144,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
|
|
|
150
144
|
"Embeddings.create",
|
|
151
145
|
embeddings_wrapper(
|
|
152
146
|
tracer,
|
|
153
|
-
|
|
147
|
+
tokens_histogram,
|
|
154
148
|
embeddings_vector_size_counter,
|
|
155
149
|
embeddings_duration_histogram,
|
|
156
150
|
embeddings_exception_counter,
|
|
@@ -162,7 +156,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
|
|
|
162
156
|
"AsyncCompletions.create",
|
|
163
157
|
achat_wrapper(
|
|
164
158
|
tracer,
|
|
165
|
-
|
|
159
|
+
tokens_histogram,
|
|
166
160
|
chat_choice_counter,
|
|
167
161
|
chat_duration_histogram,
|
|
168
162
|
chat_exception_counter,
|
|
@@ -180,7 +174,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
|
|
|
180
174
|
"AsyncEmbeddings.create",
|
|
181
175
|
aembeddings_wrapper(
|
|
182
176
|
tracer,
|
|
183
|
-
|
|
177
|
+
tokens_histogram,
|
|
184
178
|
embeddings_vector_size_counter,
|
|
185
179
|
embeddings_duration_histogram,
|
|
186
180
|
embeddings_exception_counter,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.
|
|
1
|
+
__version__ = "0.19.0"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: opentelemetry-instrumentation-openai
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.19.0
|
|
4
4
|
Summary: OpenTelemetry OpenAI instrumentation
|
|
5
5
|
Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
|
|
6
6
|
License: Apache-2.0
|
|
@@ -1,17 +1,17 @@
|
|
|
1
1
|
opentelemetry/instrumentation/openai/__init__.py,sha256=xl3Kvqry9glVhu8VtdknfUE9FpXQ7KWAFqtVlpjE-40,1344
|
|
2
|
-
opentelemetry/instrumentation/openai/shared/__init__.py,sha256=
|
|
3
|
-
opentelemetry/instrumentation/openai/shared/chat_wrappers.py,sha256=
|
|
2
|
+
opentelemetry/instrumentation/openai/shared/__init__.py,sha256=5kEyVhz2YDHvuq2SDQOsDhtjbG7R7GCn793oLq2_J_k,7490
|
|
3
|
+
opentelemetry/instrumentation/openai/shared/chat_wrappers.py,sha256=ppjZgX8k5iORHUlCSyGPxDAYpXKOqs9KiCUSB0Cgu4s,23353
|
|
4
4
|
opentelemetry/instrumentation/openai/shared/completion_wrappers.py,sha256=-JHfgyxic5I3Wr3Uc_L-U7ztDVFcyovtF37tNLtaW3s,6604
|
|
5
5
|
opentelemetry/instrumentation/openai/shared/config.py,sha256=5uekQEnmYo1o6tsTD2IGc-cVmHUo5KmUC4pOVdrFFNk,102
|
|
6
|
-
opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py,sha256=
|
|
6
|
+
opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py,sha256=aL3uAkfscMcCqCjzY1lZUNtxkiIV2agg6B8wB5CO8mg,6598
|
|
7
7
|
opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py,sha256=BuYVdxiI71ajx5WZ0B5OpgFzOWGGh-fV77pp3h3nrJA,1891
|
|
8
8
|
opentelemetry/instrumentation/openai/utils.py,sha256=IM5l_MjM7XnO0e7tDGsPml-juQ9SI1QK200X0atiAyE,3357
|
|
9
|
-
opentelemetry/instrumentation/openai/v0/__init__.py,sha256=
|
|
10
|
-
opentelemetry/instrumentation/openai/v1/__init__.py,sha256=
|
|
9
|
+
opentelemetry/instrumentation/openai/v0/__init__.py,sha256=z8RkR1Zp2P_qdIIMCvI6HjD0CPpUJp-2MSGWYiShTIc,5832
|
|
10
|
+
opentelemetry/instrumentation/openai/v1/__init__.py,sha256=5apXSO-jvr5hjqdu4PzSI62R6br4U029c4n9RE5e5GY,8368
|
|
11
11
|
opentelemetry/instrumentation/openai/v1/assistant_wrappers.py,sha256=T6Vtdp1fAZdcYjGiTMZwkn4F4DgsltD4p4xLEFW-GhI,5874
|
|
12
12
|
opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py,sha256=SAzYoun2yyOloofyOWtxpm8E2M9TL3Nm8TgKdNyXHuY,2779
|
|
13
|
-
opentelemetry/instrumentation/openai/version.py,sha256=
|
|
14
|
-
opentelemetry_instrumentation_openai-0.
|
|
15
|
-
opentelemetry_instrumentation_openai-0.
|
|
16
|
-
opentelemetry_instrumentation_openai-0.
|
|
17
|
-
opentelemetry_instrumentation_openai-0.
|
|
13
|
+
opentelemetry/instrumentation/openai/version.py,sha256=IPTpw_ZRkJdPKjp9ROF6sfDyeEv2IvChuvliVauZWvE,23
|
|
14
|
+
opentelemetry_instrumentation_openai-0.19.0.dist-info/METADATA,sha256=-CUzWwCC28ixLT76E0DrKH2xwC0gGG78NO2J7G_6yW0,2255
|
|
15
|
+
opentelemetry_instrumentation_openai-0.19.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
16
|
+
opentelemetry_instrumentation_openai-0.19.0.dist-info/entry_points.txt,sha256=vTBfiX5yXji5YHikuJHEOoBZ1TFdPQ1EI4ctd2pZSeE,93
|
|
17
|
+
opentelemetry_instrumentation_openai-0.19.0.dist-info/RECORD,,
|
|
File without changes
|