opentelemetry-instrumentation-openai 0.23.0__tar.gz → 0.24.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

Files changed (17) hide show
  1. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/PKG-INFO +2 -2
  2. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +56 -55
  3. opentelemetry_instrumentation_openai-0.24.0/opentelemetry/instrumentation/openai/version.py +1 -0
  4. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/pyproject.toml +2 -2
  5. opentelemetry_instrumentation_openai-0.23.0/opentelemetry/instrumentation/openai/version.py +0 -1
  6. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/README.md +0 -0
  7. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/opentelemetry/instrumentation/openai/__init__.py +0 -0
  8. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/opentelemetry/instrumentation/openai/shared/__init__.py +0 -0
  9. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +0 -0
  10. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/opentelemetry/instrumentation/openai/shared/config.py +0 -0
  11. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +0 -0
  12. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +0 -0
  13. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/opentelemetry/instrumentation/openai/utils.py +0 -0
  14. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/opentelemetry/instrumentation/openai/v0/__init__.py +0 -0
  15. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/opentelemetry/instrumentation/openai/v1/__init__.py +0 -0
  16. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -0
  17. {opentelemetry_instrumentation_openai-0.23.0 → opentelemetry_instrumentation_openai-0.24.0}/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opentelemetry-instrumentation-openai
3
- Version: 0.23.0
3
+ Version: 0.24.0
4
4
  Summary: OpenTelemetry OpenAI instrumentation
5
5
  Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
6
6
  License: Apache-2.0
@@ -17,7 +17,7 @@ Provides-Extra: instruments
17
17
  Requires-Dist: opentelemetry-api (>=1.25.0,<2.0.0)
18
18
  Requires-Dist: opentelemetry-instrumentation (>=0.46b0,<0.47)
19
19
  Requires-Dist: opentelemetry-semantic-conventions (>=0.46b0,<0.47)
20
- Requires-Dist: opentelemetry-semantic-conventions-ai (==0.3.1)
20
+ Requires-Dist: opentelemetry-semantic-conventions-ai (==0.3.3)
21
21
  Requires-Dist: tiktoken (>=0.6.0,<1)
22
22
  Project-URL: Repository, https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
23
23
  Description-Content-Type: text/markdown
@@ -318,13 +318,19 @@ def _set_prompts(span, messages):
318
318
 
319
319
  for i, msg in enumerate(messages):
320
320
  prefix = f"{SpanAttributes.LLM_PROMPTS}.{i}"
321
- if isinstance(msg.get("content"), str):
322
- content = msg.get("content")
323
- elif isinstance(msg.get("content"), list):
324
- content = json.dumps(msg.get("content"))
325
321
 
326
322
  _set_span_attribute(span, f"{prefix}.role", msg.get("role"))
327
- _set_span_attribute(span, f"{prefix}.content", content)
323
+ if msg.get("content"):
324
+ content = msg.get("content")
325
+ if isinstance(content, list):
326
+ content = json.dumps(content)
327
+ _set_span_attribute(span, f"{prefix}.content", content)
328
+ if msg.get("tool_calls"):
329
+ _set_span_attribute(
330
+ span, f"{prefix}.tool_calls", json.dumps(msg.get("tool_calls"))
331
+ )
332
+ if msg.get("tool_call_id"):
333
+ _set_span_attribute(span, f"{prefix}.tool_call_id", msg.get("tool_call_id"))
328
334
 
329
335
 
330
336
  def _set_completions(span, choices):
@@ -353,26 +359,33 @@ def _set_completions(span, choices):
353
359
  function_call = message.get("function_call")
354
360
  if function_call:
355
361
  _set_span_attribute(
356
- span, f"{prefix}.function_call.name", function_call.get("name")
362
+ span, f"{prefix}.tool_calls.0.name", function_call.get("name")
357
363
  )
358
364
  _set_span_attribute(
359
365
  span,
360
- f"{prefix}.function_call.arguments",
366
+ f"{prefix}.tool_calls.0.arguments",
361
367
  function_call.get("arguments"),
362
368
  )
363
369
 
364
370
  tool_calls = message.get("tool_calls")
365
371
  if tool_calls:
366
- _set_span_attribute(
367
- span,
368
- f"{prefix}.function_call.name",
369
- tool_calls[0].get("function").get("name"),
370
- )
371
- _set_span_attribute(
372
- span,
373
- f"{prefix}.function_call.arguments",
374
- tool_calls[0].get("function").get("arguments"),
375
- )
372
+ for i, tool_call in enumerate(tool_calls):
373
+ function = tool_call.get("function")
374
+ _set_span_attribute(
375
+ span,
376
+ f"{prefix}.tool_calls.{i}.id",
377
+ tool_call.get("id"),
378
+ )
379
+ _set_span_attribute(
380
+ span,
381
+ f"{prefix}.tool_calls.{i}.name",
382
+ function.get("name"),
383
+ )
384
+ _set_span_attribute(
385
+ span,
386
+ f"{prefix}.tool_calls.{i}.arguments",
387
+ function.get("arguments"),
388
+ )
376
389
 
377
390
 
378
391
  def _set_streaming_token_metrics(
@@ -530,44 +543,7 @@ class ChatStream(ObjectProxy):
530
543
  )
531
544
  self._first_token = False
532
545
 
533
- if is_openai_v1():
534
- item = model_as_dict(item)
535
-
536
- self._complete_response["model"] = item.get("model")
537
-
538
- for choice in item.get("choices"):
539
- index = choice.get("index")
540
- if len(self._complete_response.get("choices")) <= index:
541
- self._complete_response["choices"].append(
542
- {"index": index, "message": {"content": "", "role": ""}}
543
- )
544
- complete_choice = self._complete_response.get("choices")[index]
545
- if choice.get("finish_reason"):
546
- complete_choice["finish_reason"] = choice.get("finish_reason")
547
-
548
- delta = choice.get("delta")
549
-
550
- if delta and delta.get("content"):
551
- complete_choice["message"]["content"] += delta.get("content")
552
- if delta and delta.get("role"):
553
- complete_choice["message"]["role"] = delta.get("role")
554
- if delta and delta.get("tool_calls"):
555
- tool_calls = delta.get("tool_calls")
556
- if not isinstance(tool_calls, list) or len(tool_calls) == 0:
557
- continue
558
-
559
- if not complete_choice["message"].get("tool_calls"):
560
- complete_choice["message"]["tool_calls"] = [
561
- {"function": {"name": "", "arguments": ""}}
562
- ]
563
-
564
- tool_call = tool_calls[0]
565
- function = complete_choice["message"]["tool_calls"][0]["function"]
566
-
567
- if tool_call.get("function") and tool_call["function"].get("name"):
568
- function["name"] += tool_call["function"]["name"]
569
- if tool_call.get("function") and tool_call["function"].get("arguments"):
570
- function["arguments"] += tool_call["function"]["arguments"]
546
+ _accumulate_stream_items(item, self._complete_response)
571
547
 
572
548
  def _shared_attributes(self):
573
549
  return _metric_shared_attributes(
@@ -781,3 +757,28 @@ def _accumulate_stream_items(item, complete_response):
781
757
  complete_choice["message"]["content"] += delta.get("content")
782
758
  if delta and delta.get("role"):
783
759
  complete_choice["message"]["role"] = delta.get("role")
760
+ if delta and delta.get("tool_calls"):
761
+ tool_calls = delta.get("tool_calls")
762
+ if not isinstance(tool_calls, list) or len(tool_calls) == 0:
763
+ continue
764
+
765
+ if not complete_choice["message"].get("tool_calls"):
766
+ complete_choice["message"]["tool_calls"] = []
767
+
768
+ for tool_call in tool_calls:
769
+ i = int(tool_call["index"])
770
+ if len(complete_choice["message"]["tool_calls"]) <= i:
771
+ complete_choice["message"]["tool_calls"].append(
772
+ {"id": "", "function": {"name": "", "arguments": ""}}
773
+ )
774
+
775
+ span_tool_call = complete_choice["message"]["tool_calls"][i]
776
+ span_function = span_tool_call["function"]
777
+ tool_call_function = tool_call.get("function")
778
+
779
+ if tool_call.get("id"):
780
+ span_tool_call["id"] = tool_call.get("id")
781
+ if tool_call_function and tool_call_function.get("name"):
782
+ span_function["name"] = tool_call_function.get("name")
783
+ if tool_call_function and tool_call_function.get("arguments"):
784
+ span_function["arguments"] += tool_call_function.get("arguments")
@@ -8,7 +8,7 @@ show_missing = true
8
8
 
9
9
  [tool.poetry]
10
10
  name = "opentelemetry-instrumentation-openai"
11
- version = "0.23.0"
11
+ version = "0.24.0"
12
12
  description = "OpenTelemetry OpenAI instrumentation"
13
13
  authors = [
14
14
  "Gal Kleinman <gal@traceloop.com>",
@@ -27,7 +27,7 @@ python = ">=3.9,<4"
27
27
  opentelemetry-api = "^1.25.0"
28
28
  opentelemetry-instrumentation = "^0.46b0"
29
29
  opentelemetry-semantic-conventions = "^0.46b0"
30
- opentelemetry-semantic-conventions-ai = "0.3.1"
30
+ opentelemetry-semantic-conventions-ai = "0.3.3"
31
31
  tiktoken = ">=0.6.0, <1"
32
32
 
33
33
  [tool.poetry.group.dev.dependencies]