opentelemetry-instrumentation-openai 0.26.0__tar.gz → 0.26.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

Files changed (17) hide show
  1. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/PKG-INFO +5 -5
  2. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/opentelemetry/instrumentation/openai/shared/__init__.py +21 -2
  3. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +21 -1
  4. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +1 -1
  5. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +1 -1
  6. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +1 -1
  7. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/opentelemetry/instrumentation/openai/v0/__init__.py +1 -1
  8. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/opentelemetry/instrumentation/openai/v1/__init__.py +1 -1
  9. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +1 -1
  10. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +1 -1
  11. opentelemetry_instrumentation_openai-0.26.2/opentelemetry/instrumentation/openai/version.py +1 -0
  12. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/pyproject.toml +5 -5
  13. opentelemetry_instrumentation_openai-0.26.0/opentelemetry/instrumentation/openai/version.py +0 -1
  14. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/README.md +0 -0
  15. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/opentelemetry/instrumentation/openai/__init__.py +0 -0
  16. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/opentelemetry/instrumentation/openai/shared/config.py +0 -0
  17. {opentelemetry_instrumentation_openai-0.26.0 → opentelemetry_instrumentation_openai-0.26.2}/opentelemetry/instrumentation/openai/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opentelemetry-instrumentation-openai
3
- Version: 0.26.0
3
+ Version: 0.26.2
4
4
  Summary: OpenTelemetry OpenAI instrumentation
5
5
  Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
6
6
  License: Apache-2.0
@@ -14,10 +14,10 @@ Classifier: Programming Language :: Python :: 3.10
14
14
  Classifier: Programming Language :: Python :: 3.11
15
15
  Classifier: Programming Language :: Python :: 3.12
16
16
  Provides-Extra: instruments
17
- Requires-Dist: opentelemetry-api (>=1.25.0,<2.0.0)
18
- Requires-Dist: opentelemetry-instrumentation (>=0.46b0,<0.47)
19
- Requires-Dist: opentelemetry-semantic-conventions (>=0.46b0,<0.47)
20
- Requires-Dist: opentelemetry-semantic-conventions-ai (==0.3.6)
17
+ Requires-Dist: opentelemetry-api (>=1.26.0,<2.0.0)
18
+ Requires-Dist: opentelemetry-instrumentation (>=0.47b0,<0.48)
19
+ Requires-Dist: opentelemetry-semantic-conventions (>=0.47b0,<0.48)
20
+ Requires-Dist: opentelemetry-semantic-conventions-ai (==0.4.0)
21
21
  Requires-Dist: tiktoken (>=0.6.0,<1)
22
22
  Project-URL: Repository, https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
23
23
  Description-Content-Type: text/markdown
@@ -9,7 +9,7 @@ from importlib.metadata import version
9
9
  from opentelemetry import context as context_api
10
10
 
11
11
  from opentelemetry.instrumentation.openai.shared.config import Config
12
- from opentelemetry.semconv.ai import SpanAttributes
12
+ from opentelemetry.semconv_ai import SpanAttributes
13
13
  from opentelemetry.instrumentation.openai.utils import (
14
14
  dont_throw,
15
15
  is_openai_v1,
@@ -17,6 +17,8 @@ from opentelemetry.instrumentation.openai.utils import (
17
17
  )
18
18
 
19
19
  OPENAI_LLM_USAGE_TOKEN_TYPES = ["prompt_tokens", "completion_tokens"]
20
+ PROMPT_FILTER_KEY = "prompt_filter_results"
21
+ PROMPT_ERROR = "prompt_error"
20
22
 
21
23
  # tiktoken encodings map for different model, key is model_name, value is tiktoken encoding
22
24
  tiktoken_encodings = {}
@@ -138,6 +140,14 @@ def _set_response_attributes(span, response):
138
140
  if not span.is_recording():
139
141
  return
140
142
 
143
+ if "error" in response:
144
+ _set_span_attribute(
145
+ span,
146
+ f"{SpanAttributes.LLM_PROMPTS}.{PROMPT_ERROR}",
147
+ json.dumps(response.get("error")),
148
+ )
149
+ return
150
+
141
151
  _set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
142
152
 
143
153
  _set_span_attribute(
@@ -145,6 +155,7 @@ def _set_response_attributes(span, response):
145
155
  SpanAttributes.LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT,
146
156
  response.get("system_fingerprint"),
147
157
  )
158
+ _log_prompt_filter(span, response)
148
159
 
149
160
  usage = response.get("usage")
150
161
  if not usage:
@@ -164,10 +175,18 @@ def _set_response_attributes(span, response):
164
175
  _set_span_attribute(
165
176
  span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.get("prompt_tokens")
166
177
  )
167
-
168
178
  return
169
179
 
170
180
 
181
+ def _log_prompt_filter(span, response_dict):
182
+ if response_dict.get("prompt_filter_results"):
183
+ _set_span_attribute(
184
+ span,
185
+ f"{SpanAttributes.LLM_PROMPTS}.{PROMPT_FILTER_KEY}",
186
+ json.dumps(response_dict.get("prompt_filter_results")),
187
+ )
188
+
189
+
171
190
  @dont_throw
172
191
  def _set_span_stream_usage(span, prompt_tokens, completion_tokens):
173
192
  if not span.is_recording():
@@ -7,7 +7,7 @@ from wrapt import ObjectProxy
7
7
 
8
8
  from opentelemetry import context as context_api
9
9
  from opentelemetry.metrics import Counter, Histogram
10
- from opentelemetry.semconv.ai import (
10
+ from opentelemetry.semconv_ai import (
11
11
  SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
12
12
  SpanAttributes,
13
13
  LLMRequestTypeValues,
@@ -42,6 +42,9 @@ from opentelemetry.trace.status import Status, StatusCode
42
42
  from opentelemetry.instrumentation.openai.utils import is_openai_v1, is_azure_openai
43
43
 
44
44
  SPAN_NAME = "openai.chat"
45
+ PROMPT_FILTER_KEY = "prompt_filter_results"
46
+ CONTENT_FILTER_KEY = "content_filter_results"
47
+
45
48
  LLM_REQUEST_TYPE = LLMRequestTypeValues.CHAT
46
49
 
47
50
  logger = logging.getLogger(__name__)
@@ -373,9 +376,17 @@ def _set_completions(span, choices):
373
376
  span, f"{prefix}.finish_reason", choice.get("finish_reason")
374
377
  )
375
378
 
379
+ if choice.get("content_filter_results"):
380
+ _set_span_attribute(
381
+ span,
382
+ f"{prefix}.{CONTENT_FILTER_KEY}",
383
+ json.dumps(choice.get("content_filter_results")),
384
+ )
385
+
376
386
  if choice.get("finish_reason") == "content_filter":
377
387
  _set_span_attribute(span, f"{prefix}.role", "assistant")
378
388
  _set_span_attribute(span, f"{prefix}.content", "FILTERED")
389
+
379
390
  return
380
391
 
381
392
  message = choice.get("message")
@@ -770,6 +781,10 @@ def _accumulate_stream_items(item, complete_response):
770
781
 
771
782
  complete_response["model"] = item.get("model")
772
783
 
784
+ # prompt filter results
785
+ if item.get("prompt_filter_results"):
786
+ complete_response["prompt_filter_results"] = item.get("prompt_filter_results")
787
+
773
788
  for choice in item.get("choices"):
774
789
  index = choice.get("index")
775
790
  if len(complete_response.get("choices")) <= index:
@@ -779,11 +794,16 @@ def _accumulate_stream_items(item, complete_response):
779
794
  complete_choice = complete_response.get("choices")[index]
780
795
  if choice.get("finish_reason"):
781
796
  complete_choice["finish_reason"] = choice.get("finish_reason")
797
+ if choice.get("content_filter_results"):
798
+ complete_choice["content_filter_results"] = choice.get(
799
+ "content_filter_results"
800
+ )
782
801
 
783
802
  delta = choice.get("delta")
784
803
 
785
804
  if delta and delta.get("content"):
786
805
  complete_choice["message"]["content"] += delta.get("content")
806
+
787
807
  if delta and delta.get("role"):
788
808
  complete_choice["message"]["role"] = delta.get("role")
789
809
  if delta and delta.get("tool_calls"):
@@ -2,7 +2,7 @@ import logging
2
2
 
3
3
  from opentelemetry import context as context_api
4
4
 
5
- from opentelemetry.semconv.ai import (
5
+ from opentelemetry.semconv_ai import (
6
6
  SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
7
7
  SpanAttributes,
8
8
  LLMRequestTypeValues,
@@ -3,7 +3,7 @@ import time
3
3
 
4
4
  from opentelemetry import context as context_api
5
5
  from opentelemetry.metrics import Counter, Histogram
6
- from opentelemetry.semconv.ai import (
6
+ from opentelemetry.semconv_ai import (
7
7
  SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
8
8
  SpanAttributes,
9
9
  LLMRequestTypeValues,
@@ -12,7 +12,7 @@ from opentelemetry.instrumentation.openai.utils import (
12
12
  )
13
13
  from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
14
14
  from opentelemetry.metrics import Counter, Histogram
15
- from opentelemetry.semconv.ai import SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
15
+ from opentelemetry.semconv_ai import SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
16
16
 
17
17
 
18
18
  @_with_image_gen_metric_wrapper
@@ -19,7 +19,7 @@ from opentelemetry.instrumentation.openai.shared.embeddings_wrappers import (
19
19
  )
20
20
  from opentelemetry.instrumentation.openai.utils import is_metrics_enabled
21
21
  from opentelemetry.instrumentation.openai.version import __version__
22
- from opentelemetry.semconv.ai import Meters
22
+ from opentelemetry.semconv_ai import Meters
23
23
 
24
24
  _instruments = ("openai >= 0.27.0", "openai < 1.0.0")
25
25
 
@@ -33,7 +33,7 @@ from opentelemetry.instrumentation.openai.v1.assistant_wrappers import (
33
33
  from opentelemetry.instrumentation.openai.utils import is_metrics_enabled
34
34
  from opentelemetry.instrumentation.openai.version import __version__
35
35
 
36
- from opentelemetry.semconv.ai import Meters
36
+ from opentelemetry.semconv_ai import Meters
37
37
 
38
38
  _instruments = ("openai >= 1.0.0",)
39
39
 
@@ -8,7 +8,7 @@ from opentelemetry.instrumentation.openai.shared import (
8
8
  from opentelemetry.trace import SpanKind
9
9
  from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
10
10
 
11
- from opentelemetry.semconv.ai import SpanAttributes, LLMRequestTypeValues
11
+ from opentelemetry.semconv_ai import SpanAttributes, LLMRequestTypeValues
12
12
 
13
13
  from opentelemetry.instrumentation.openai.utils import _with_tracer_wrapper, dont_throw
14
14
  from opentelemetry.instrumentation.openai.shared.config import Config
@@ -1,7 +1,7 @@
1
1
  from opentelemetry.instrumentation.openai.shared import (
2
2
  _set_span_attribute,
3
3
  )
4
- from opentelemetry.semconv.ai import SpanAttributes
4
+ from opentelemetry.semconv_ai import SpanAttributes
5
5
  from openai import AssistantEventHandler
6
6
  from typing_extensions import override
7
7
 
@@ -8,7 +8,7 @@ show_missing = true
8
8
 
9
9
  [tool.poetry]
10
10
  name = "opentelemetry-instrumentation-openai"
11
- version = "0.26.0"
11
+ version = "0.26.2"
12
12
  description = "OpenTelemetry OpenAI instrumentation"
13
13
  authors = [
14
14
  "Gal Kleinman <gal@traceloop.com>",
@@ -24,10 +24,10 @@ include = "opentelemetry/instrumentation/openai"
24
24
 
25
25
  [tool.poetry.dependencies]
26
26
  python = ">=3.9,<4"
27
- opentelemetry-api = "^1.25.0"
28
- opentelemetry-instrumentation = "^0.46b0"
29
- opentelemetry-semantic-conventions = "^0.46b0"
30
- opentelemetry-semantic-conventions-ai = "0.3.6"
27
+ opentelemetry-api = "^1.26.0"
28
+ opentelemetry-instrumentation = "^0.47b0"
29
+ opentelemetry-semantic-conventions = "^0.47b0"
30
+ opentelemetry-semantic-conventions-ai = "0.4.0"
31
31
  tiktoken = ">=0.6.0, <1"
32
32
 
33
33
  [tool.poetry.group.dev.dependencies]