opentelemetry-instrumentation-openai 0.23.0__tar.gz → 0.25.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

Files changed (17) hide show
  1. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/PKG-INFO +2 -2
  2. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +67 -58
  3. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +11 -3
  4. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +11 -3
  5. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +4 -1
  6. opentelemetry_instrumentation_openai-0.25.0/opentelemetry/instrumentation/openai/version.py +1 -0
  7. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/pyproject.toml +2 -2
  8. opentelemetry_instrumentation_openai-0.23.0/opentelemetry/instrumentation/openai/version.py +0 -1
  9. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/README.md +0 -0
  10. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/opentelemetry/instrumentation/openai/__init__.py +0 -0
  11. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/opentelemetry/instrumentation/openai/shared/__init__.py +0 -0
  12. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/opentelemetry/instrumentation/openai/shared/config.py +0 -0
  13. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/opentelemetry/instrumentation/openai/utils.py +0 -0
  14. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/opentelemetry/instrumentation/openai/v0/__init__.py +0 -0
  15. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/opentelemetry/instrumentation/openai/v1/__init__.py +0 -0
  16. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -0
  17. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.25.0}/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opentelemetry-instrumentation-openai
3
- Version: 0.23.0
3
+ Version: 0.25.0
4
4
  Summary: OpenTelemetry OpenAI instrumentation
5
5
  Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
6
6
  License: Apache-2.0
@@ -17,7 +17,7 @@ Provides-Extra: instruments
17
17
  Requires-Dist: opentelemetry-api (>=1.25.0,<2.0.0)
18
18
  Requires-Dist: opentelemetry-instrumentation (>=0.46b0,<0.47)
19
19
  Requires-Dist: opentelemetry-semantic-conventions (>=0.46b0,<0.47)
20
- Requires-Dist: opentelemetry-semantic-conventions-ai (==0.3.1)
20
+ Requires-Dist: opentelemetry-semantic-conventions-ai (==0.3.4)
21
21
  Requires-Dist: tiktoken (>=0.6.0,<1)
22
22
  Project-URL: Repository, https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
23
23
  Description-Content-Type: text/markdown
@@ -6,7 +6,11 @@ from wrapt import ObjectProxy
6
6
 
7
7
  from opentelemetry import context as context_api
8
8
  from opentelemetry.metrics import Counter, Histogram
9
- from opentelemetry.semconv.ai import SpanAttributes, LLMRequestTypeValues
9
+ from opentelemetry.semconv.ai import (
10
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
11
+ SpanAttributes,
12
+ LLMRequestTypeValues,
13
+ )
10
14
 
11
15
  from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
12
16
  from opentelemetry.instrumentation.openai.utils import (
@@ -56,7 +60,9 @@ def chat_wrapper(
56
60
  args,
57
61
  kwargs,
58
62
  ):
59
- if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
63
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
64
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
65
+ ):
60
66
  return wrapped(*args, **kwargs)
61
67
 
62
68
  # span needs to be opened and closed manually because the response is a generator
@@ -146,7 +152,9 @@ async def achat_wrapper(
146
152
  args,
147
153
  kwargs,
148
154
  ):
149
- if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
155
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
156
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
157
+ ):
150
158
  return wrapped(*args, **kwargs)
151
159
 
152
160
  span = tracer.start_span(
@@ -318,13 +326,19 @@ def _set_prompts(span, messages):
318
326
 
319
327
  for i, msg in enumerate(messages):
320
328
  prefix = f"{SpanAttributes.LLM_PROMPTS}.{i}"
321
- if isinstance(msg.get("content"), str):
322
- content = msg.get("content")
323
- elif isinstance(msg.get("content"), list):
324
- content = json.dumps(msg.get("content"))
325
329
 
326
330
  _set_span_attribute(span, f"{prefix}.role", msg.get("role"))
327
- _set_span_attribute(span, f"{prefix}.content", content)
331
+ if msg.get("content"):
332
+ content = msg.get("content")
333
+ if isinstance(content, list):
334
+ content = json.dumps(content)
335
+ _set_span_attribute(span, f"{prefix}.content", content)
336
+ if msg.get("tool_calls"):
337
+ _set_span_attribute(
338
+ span, f"{prefix}.tool_calls", json.dumps(msg.get("tool_calls"))
339
+ )
340
+ if msg.get("tool_call_id"):
341
+ _set_span_attribute(span, f"{prefix}.tool_call_id", msg.get("tool_call_id"))
328
342
 
329
343
 
330
344
  def _set_completions(span, choices):
@@ -353,26 +367,33 @@ def _set_completions(span, choices):
353
367
  function_call = message.get("function_call")
354
368
  if function_call:
355
369
  _set_span_attribute(
356
- span, f"{prefix}.function_call.name", function_call.get("name")
370
+ span, f"{prefix}.tool_calls.0.name", function_call.get("name")
357
371
  )
358
372
  _set_span_attribute(
359
373
  span,
360
- f"{prefix}.function_call.arguments",
374
+ f"{prefix}.tool_calls.0.arguments",
361
375
  function_call.get("arguments"),
362
376
  )
363
377
 
364
378
  tool_calls = message.get("tool_calls")
365
379
  if tool_calls:
366
- _set_span_attribute(
367
- span,
368
- f"{prefix}.function_call.name",
369
- tool_calls[0].get("function").get("name"),
370
- )
371
- _set_span_attribute(
372
- span,
373
- f"{prefix}.function_call.arguments",
374
- tool_calls[0].get("function").get("arguments"),
375
- )
380
+ for i, tool_call in enumerate(tool_calls):
381
+ function = tool_call.get("function")
382
+ _set_span_attribute(
383
+ span,
384
+ f"{prefix}.tool_calls.{i}.id",
385
+ tool_call.get("id"),
386
+ )
387
+ _set_span_attribute(
388
+ span,
389
+ f"{prefix}.tool_calls.{i}.name",
390
+ function.get("name"),
391
+ )
392
+ _set_span_attribute(
393
+ span,
394
+ f"{prefix}.tool_calls.{i}.arguments",
395
+ function.get("arguments"),
396
+ )
376
397
 
377
398
 
378
399
  def _set_streaming_token_metrics(
@@ -530,44 +551,7 @@ class ChatStream(ObjectProxy):
530
551
  )
531
552
  self._first_token = False
532
553
 
533
- if is_openai_v1():
534
- item = model_as_dict(item)
535
-
536
- self._complete_response["model"] = item.get("model")
537
-
538
- for choice in item.get("choices"):
539
- index = choice.get("index")
540
- if len(self._complete_response.get("choices")) <= index:
541
- self._complete_response["choices"].append(
542
- {"index": index, "message": {"content": "", "role": ""}}
543
- )
544
- complete_choice = self._complete_response.get("choices")[index]
545
- if choice.get("finish_reason"):
546
- complete_choice["finish_reason"] = choice.get("finish_reason")
547
-
548
- delta = choice.get("delta")
549
-
550
- if delta and delta.get("content"):
551
- complete_choice["message"]["content"] += delta.get("content")
552
- if delta and delta.get("role"):
553
- complete_choice["message"]["role"] = delta.get("role")
554
- if delta and delta.get("tool_calls"):
555
- tool_calls = delta.get("tool_calls")
556
- if not isinstance(tool_calls, list) or len(tool_calls) == 0:
557
- continue
558
-
559
- if not complete_choice["message"].get("tool_calls"):
560
- complete_choice["message"]["tool_calls"] = [
561
- {"function": {"name": "", "arguments": ""}}
562
- ]
563
-
564
- tool_call = tool_calls[0]
565
- function = complete_choice["message"]["tool_calls"][0]["function"]
566
-
567
- if tool_call.get("function") and tool_call["function"].get("name"):
568
- function["name"] += tool_call["function"]["name"]
569
- if tool_call.get("function") and tool_call["function"].get("arguments"):
570
- function["arguments"] += tool_call["function"]["arguments"]
554
+ _accumulate_stream_items(item, self._complete_response)
571
555
 
572
556
  def _shared_attributes(self):
573
557
  return _metric_shared_attributes(
@@ -781,3 +765,28 @@ def _accumulate_stream_items(item, complete_response):
781
765
  complete_choice["message"]["content"] += delta.get("content")
782
766
  if delta and delta.get("role"):
783
767
  complete_choice["message"]["role"] = delta.get("role")
768
+ if delta and delta.get("tool_calls"):
769
+ tool_calls = delta.get("tool_calls")
770
+ if not isinstance(tool_calls, list) or len(tool_calls) == 0:
771
+ continue
772
+
773
+ if not complete_choice["message"].get("tool_calls"):
774
+ complete_choice["message"]["tool_calls"] = []
775
+
776
+ for tool_call in tool_calls:
777
+ i = int(tool_call["index"])
778
+ if len(complete_choice["message"]["tool_calls"]) <= i:
779
+ complete_choice["message"]["tool_calls"].append(
780
+ {"id": "", "function": {"name": "", "arguments": ""}}
781
+ )
782
+
783
+ span_tool_call = complete_choice["message"]["tool_calls"][i]
784
+ span_function = span_tool_call["function"]
785
+ tool_call_function = tool_call.get("function")
786
+
787
+ if tool_call.get("id"):
788
+ span_tool_call["id"] = tool_call.get("id")
789
+ if tool_call_function and tool_call_function.get("name"):
790
+ span_function["name"] = tool_call_function.get("name")
791
+ if tool_call_function and tool_call_function.get("arguments"):
792
+ span_function["arguments"] += tool_call_function.get("arguments")
@@ -2,7 +2,11 @@ import logging
2
2
 
3
3
  from opentelemetry import context as context_api
4
4
 
5
- from opentelemetry.semconv.ai import SpanAttributes, LLMRequestTypeValues
5
+ from opentelemetry.semconv.ai import (
6
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
7
+ SpanAttributes,
8
+ LLMRequestTypeValues,
9
+ )
6
10
 
7
11
  from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
8
12
  from opentelemetry.instrumentation.openai.utils import _with_tracer_wrapper, dont_throw
@@ -33,7 +37,9 @@ logger = logging.getLogger(__name__)
33
37
 
34
38
  @_with_tracer_wrapper
35
39
  def completion_wrapper(tracer, wrapped, instance, args, kwargs):
36
- if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
40
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
41
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
42
+ ):
37
43
  return wrapped(*args, **kwargs)
38
44
 
39
45
  # span needs to be opened and closed manually because the response is a generator
@@ -58,7 +64,9 @@ def completion_wrapper(tracer, wrapped, instance, args, kwargs):
58
64
 
59
65
  @_with_tracer_wrapper
60
66
  async def acompletion_wrapper(tracer, wrapped, instance, args, kwargs):
61
- if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
67
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
68
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
69
+ ):
62
70
  return wrapped(*args, **kwargs)
63
71
 
64
72
  span = tracer.start_span(
@@ -3,7 +3,11 @@ import time
3
3
 
4
4
  from opentelemetry import context as context_api
5
5
  from opentelemetry.metrics import Counter, Histogram
6
- from opentelemetry.semconv.ai import SpanAttributes, LLMRequestTypeValues
6
+ from opentelemetry.semconv.ai import (
7
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
8
+ SpanAttributes,
9
+ LLMRequestTypeValues,
10
+ )
7
11
 
8
12
  from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
9
13
  from opentelemetry.instrumentation.openai.utils import (
@@ -46,7 +50,9 @@ def embeddings_wrapper(
46
50
  args,
47
51
  kwargs,
48
52
  ):
49
- if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
53
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
54
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
55
+ ):
50
56
  return wrapped(*args, **kwargs)
51
57
 
52
58
  with tracer.start_as_current_span(
@@ -103,7 +109,9 @@ async def aembeddings_wrapper(
103
109
  args,
104
110
  kwargs,
105
111
  ):
106
- if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
112
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
113
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
114
+ ):
107
115
  return wrapped(*args, **kwargs)
108
116
 
109
117
  async with start_as_current_span_async(
@@ -12,6 +12,7 @@ from opentelemetry.instrumentation.openai.utils import (
12
12
  )
13
13
  from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
14
14
  from opentelemetry.metrics import Counter, Histogram
15
+ from opentelemetry.semconv.ai import SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
15
16
 
16
17
 
17
18
  @_with_image_gen_metric_wrapper
@@ -23,7 +24,9 @@ def image_gen_metrics_wrapper(
23
24
  args,
24
25
  kwargs,
25
26
  ):
26
- if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
27
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
28
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
29
+ ):
27
30
  return wrapped(*args, **kwargs)
28
31
 
29
32
  try:
@@ -8,7 +8,7 @@ show_missing = true
8
8
 
9
9
  [tool.poetry]
10
10
  name = "opentelemetry-instrumentation-openai"
11
- version = "0.23.0"
11
+ version = "0.25.0"
12
12
  description = "OpenTelemetry OpenAI instrumentation"
13
13
  authors = [
14
14
  "Gal Kleinman <gal@traceloop.com>",
@@ -27,7 +27,7 @@ python = ">=3.9,<4"
27
27
  opentelemetry-api = "^1.25.0"
28
28
  opentelemetry-instrumentation = "^0.46b0"
29
29
  opentelemetry-semantic-conventions = "^0.46b0"
30
- opentelemetry-semantic-conventions-ai = "0.3.1"
30
+ opentelemetry-semantic-conventions-ai = "0.3.4"
31
31
  tiktoken = ">=0.6.0, <1"
32
32
 
33
33
  [tool.poetry.group.dev.dependencies]