opentelemetry-instrumentation-openai 0.26.0__py3-none-any.whl → 0.26.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.
- opentelemetry/instrumentation/openai/shared/__init__.py +20 -1
- opentelemetry/instrumentation/openai/shared/chat_wrappers.py +17 -0
- opentelemetry/instrumentation/openai/version.py +1 -1
- {opentelemetry_instrumentation_openai-0.26.0.dist-info → opentelemetry_instrumentation_openai-0.26.1.dist-info}/METADATA +4 -4
- {opentelemetry_instrumentation_openai-0.26.0.dist-info → opentelemetry_instrumentation_openai-0.26.1.dist-info}/RECORD +7 -7
- {opentelemetry_instrumentation_openai-0.26.0.dist-info → opentelemetry_instrumentation_openai-0.26.1.dist-info}/WHEEL +0 -0
- {opentelemetry_instrumentation_openai-0.26.0.dist-info → opentelemetry_instrumentation_openai-0.26.1.dist-info}/entry_points.txt +0 -0
|
@@ -17,6 +17,8 @@ from opentelemetry.instrumentation.openai.utils import (
|
|
|
17
17
|
)
|
|
18
18
|
|
|
19
19
|
OPENAI_LLM_USAGE_TOKEN_TYPES = ["prompt_tokens", "completion_tokens"]
|
|
20
|
+
PROMPT_FILTER_KEY = "prompt_filter_results"
|
|
21
|
+
PROMPT_ERROR = "prompt_error"
|
|
20
22
|
|
|
21
23
|
# tiktoken encodings map for different model, key is model_name, value is tiktoken encoding
|
|
22
24
|
tiktoken_encodings = {}
|
|
@@ -138,6 +140,14 @@ def _set_response_attributes(span, response):
|
|
|
138
140
|
if not span.is_recording():
|
|
139
141
|
return
|
|
140
142
|
|
|
143
|
+
if "error" in response:
|
|
144
|
+
_set_span_attribute(
|
|
145
|
+
span,
|
|
146
|
+
f"{SpanAttributes.LLM_PROMPTS}.{PROMPT_ERROR}",
|
|
147
|
+
json.dumps(response.get("error"))
|
|
148
|
+
)
|
|
149
|
+
return
|
|
150
|
+
|
|
141
151
|
_set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
|
|
142
152
|
|
|
143
153
|
_set_span_attribute(
|
|
@@ -145,6 +155,7 @@ def _set_response_attributes(span, response):
|
|
|
145
155
|
SpanAttributes.LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT,
|
|
146
156
|
response.get("system_fingerprint"),
|
|
147
157
|
)
|
|
158
|
+
_log_prompt_filter(span, response)
|
|
148
159
|
|
|
149
160
|
usage = response.get("usage")
|
|
150
161
|
if not usage:
|
|
@@ -164,10 +175,18 @@ def _set_response_attributes(span, response):
|
|
|
164
175
|
_set_span_attribute(
|
|
165
176
|
span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.get("prompt_tokens")
|
|
166
177
|
)
|
|
167
|
-
|
|
168
178
|
return
|
|
169
179
|
|
|
170
180
|
|
|
181
|
+
def _log_prompt_filter(span, response_dict):
|
|
182
|
+
if response_dict.get("prompt_filter_results"):
|
|
183
|
+
_set_span_attribute(
|
|
184
|
+
span,
|
|
185
|
+
f"{SpanAttributes.LLM_PROMPTS}.{PROMPT_FILTER_KEY}",
|
|
186
|
+
json.dumps(response_dict.get("prompt_filter_results"))
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
|
|
171
190
|
@dont_throw
|
|
172
191
|
def _set_span_stream_usage(span, prompt_tokens, completion_tokens):
|
|
173
192
|
if not span.is_recording():
|
|
@@ -42,6 +42,9 @@ from opentelemetry.trace.status import Status, StatusCode
|
|
|
42
42
|
from opentelemetry.instrumentation.openai.utils import is_openai_v1, is_azure_openai
|
|
43
43
|
|
|
44
44
|
SPAN_NAME = "openai.chat"
|
|
45
|
+
PROMPT_FILTER_KEY = "prompt_filter_results"
|
|
46
|
+
CONTENT_FILTER_KEY = "content_filter_results"
|
|
47
|
+
|
|
45
48
|
LLM_REQUEST_TYPE = LLMRequestTypeValues.CHAT
|
|
46
49
|
|
|
47
50
|
logger = logging.getLogger(__name__)
|
|
@@ -373,9 +376,16 @@ def _set_completions(span, choices):
|
|
|
373
376
|
span, f"{prefix}.finish_reason", choice.get("finish_reason")
|
|
374
377
|
)
|
|
375
378
|
|
|
379
|
+
if choice.get("content_filter_results"):
|
|
380
|
+
_set_span_attribute(
|
|
381
|
+
span, f"{prefix}.{CONTENT_FILTER_KEY}",
|
|
382
|
+
json.dumps(choice.get("content_filter_results"))
|
|
383
|
+
)
|
|
384
|
+
|
|
376
385
|
if choice.get("finish_reason") == "content_filter":
|
|
377
386
|
_set_span_attribute(span, f"{prefix}.role", "assistant")
|
|
378
387
|
_set_span_attribute(span, f"{prefix}.content", "FILTERED")
|
|
388
|
+
|
|
379
389
|
return
|
|
380
390
|
|
|
381
391
|
message = choice.get("message")
|
|
@@ -770,6 +780,10 @@ def _accumulate_stream_items(item, complete_response):
|
|
|
770
780
|
|
|
771
781
|
complete_response["model"] = item.get("model")
|
|
772
782
|
|
|
783
|
+
# prompt filter results
|
|
784
|
+
if item.get("prompt_filter_results"):
|
|
785
|
+
complete_response["prompt_filter_results"] = item.get("prompt_filter_results")
|
|
786
|
+
|
|
773
787
|
for choice in item.get("choices"):
|
|
774
788
|
index = choice.get("index")
|
|
775
789
|
if len(complete_response.get("choices")) <= index:
|
|
@@ -779,11 +793,14 @@ def _accumulate_stream_items(item, complete_response):
|
|
|
779
793
|
complete_choice = complete_response.get("choices")[index]
|
|
780
794
|
if choice.get("finish_reason"):
|
|
781
795
|
complete_choice["finish_reason"] = choice.get("finish_reason")
|
|
796
|
+
if choice.get("content_filter_results"):
|
|
797
|
+
complete_choice["content_filter_results"] = choice.get("content_filter_results")
|
|
782
798
|
|
|
783
799
|
delta = choice.get("delta")
|
|
784
800
|
|
|
785
801
|
if delta and delta.get("content"):
|
|
786
802
|
complete_choice["message"]["content"] += delta.get("content")
|
|
803
|
+
|
|
787
804
|
if delta and delta.get("role"):
|
|
788
805
|
complete_choice["message"]["role"] = delta.get("role")
|
|
789
806
|
if delta and delta.get("tool_calls"):
|
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.26.
|
|
1
|
+
__version__ = "0.26.1"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: opentelemetry-instrumentation-openai
|
|
3
|
-
Version: 0.26.
|
|
3
|
+
Version: 0.26.1
|
|
4
4
|
Summary: OpenTelemetry OpenAI instrumentation
|
|
5
5
|
Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
|
|
6
6
|
License: Apache-2.0
|
|
@@ -14,9 +14,9 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
14
14
|
Classifier: Programming Language :: Python :: 3.11
|
|
15
15
|
Classifier: Programming Language :: Python :: 3.12
|
|
16
16
|
Provides-Extra: instruments
|
|
17
|
-
Requires-Dist: opentelemetry-api (>=1.
|
|
18
|
-
Requires-Dist: opentelemetry-instrumentation (>=0.
|
|
19
|
-
Requires-Dist: opentelemetry-semantic-conventions (>=0.
|
|
17
|
+
Requires-Dist: opentelemetry-api (>=1.26.0,<2.0.0)
|
|
18
|
+
Requires-Dist: opentelemetry-instrumentation (>=0.47b0,<0.48)
|
|
19
|
+
Requires-Dist: opentelemetry-semantic-conventions (>=0.47b0,<0.48)
|
|
20
20
|
Requires-Dist: opentelemetry-semantic-conventions-ai (==0.3.6)
|
|
21
21
|
Requires-Dist: tiktoken (>=0.6.0,<1)
|
|
22
22
|
Project-URL: Repository, https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
opentelemetry/instrumentation/openai/__init__.py,sha256=w0gjgORccQ3EsMiK2dzZg6V7J3pa6PcAuDvy1oofQ5A,1503
|
|
2
|
-
opentelemetry/instrumentation/openai/shared/__init__.py,sha256=
|
|
3
|
-
opentelemetry/instrumentation/openai/shared/chat_wrappers.py,sha256=
|
|
2
|
+
opentelemetry/instrumentation/openai/shared/__init__.py,sha256=j2wf_XzqXqGVdLYfHvddftvK2uy8V7_monZaa2nQKug,8830
|
|
3
|
+
opentelemetry/instrumentation/openai/shared/chat_wrappers.py,sha256=Eh3l3Jmmav7MP3jeomoXTINRH9aco7upT4WEzGI12RM,26729
|
|
4
4
|
opentelemetry/instrumentation/openai/shared/completion_wrappers.py,sha256=OuwS5vZwe_jTcWU7xnx8nVtqWCrGS1hcGbyKhQbf11g,6840
|
|
5
5
|
opentelemetry/instrumentation/openai/shared/config.py,sha256=_4AkHau8C44uBc6-fNBl9EzCGPBuj9yDIngTuwSW6ZE,199
|
|
6
6
|
opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py,sha256=EUtQEl9MP56YVe5tPY1QXNR0yNXw1MvfCwrIzmGXTAg,6930
|
|
@@ -10,8 +10,8 @@ opentelemetry/instrumentation/openai/v0/__init__.py,sha256=FYq3xhtaIdvy7mwCPzxaq
|
|
|
10
10
|
opentelemetry/instrumentation/openai/v1/__init__.py,sha256=wDO1rjgeZRNVXXA3IJUdqYVXRsvst7_JTtAjBK-m1Gc,7693
|
|
11
11
|
opentelemetry/instrumentation/openai/v1/assistant_wrappers.py,sha256=4BDLcqOfwl0LFUdAjLE_PgRcWsQYKoCM_okWLCU8A9U,6277
|
|
12
12
|
opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py,sha256=SAzYoun2yyOloofyOWtxpm8E2M9TL3Nm8TgKdNyXHuY,2779
|
|
13
|
-
opentelemetry/instrumentation/openai/version.py,sha256=
|
|
14
|
-
opentelemetry_instrumentation_openai-0.26.
|
|
15
|
-
opentelemetry_instrumentation_openai-0.26.
|
|
16
|
-
opentelemetry_instrumentation_openai-0.26.
|
|
17
|
-
opentelemetry_instrumentation_openai-0.26.
|
|
13
|
+
opentelemetry/instrumentation/openai/version.py,sha256=u0eTmljUU0kO8AAW-e1ESQ49mK2SuhpCy7eCliBLlDU,23
|
|
14
|
+
opentelemetry_instrumentation_openai-0.26.1.dist-info/METADATA,sha256=lcrhMa87ycHNtrnU-NFS_iLvlowE5Xrakj5pao7ShSs,2255
|
|
15
|
+
opentelemetry_instrumentation_openai-0.26.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
16
|
+
opentelemetry_instrumentation_openai-0.26.1.dist-info/entry_points.txt,sha256=vTBfiX5yXji5YHikuJHEOoBZ1TFdPQ1EI4ctd2pZSeE,93
|
|
17
|
+
opentelemetry_instrumentation_openai-0.26.1.dist-info/RECORD,,
|
|
File without changes
|