opentelemetry-instrumentation-openai 0.31.2__tar.gz → 0.31.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

Files changed (17) hide show
  1. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/PKG-INFO +1 -1
  2. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/opentelemetry/instrumentation/openai/__init__.py +5 -2
  3. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +19 -19
  4. opentelemetry_instrumentation_openai-0.31.3/opentelemetry/instrumentation/openai/version.py +1 -0
  5. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/pyproject.toml +1 -1
  6. opentelemetry_instrumentation_openai-0.31.2/opentelemetry/instrumentation/openai/version.py +0 -1
  7. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/README.md +0 -0
  8. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/opentelemetry/instrumentation/openai/shared/__init__.py +0 -0
  9. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +0 -0
  10. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/opentelemetry/instrumentation/openai/shared/config.py +0 -0
  11. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +0 -0
  12. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +0 -0
  13. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/opentelemetry/instrumentation/openai/utils.py +0 -0
  14. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/opentelemetry/instrumentation/openai/v0/__init__.py +0 -0
  15. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/opentelemetry/instrumentation/openai/v1/__init__.py +0 -0
  16. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -0
  17. {opentelemetry_instrumentation_openai-0.31.2 → opentelemetry_instrumentation_openai-0.31.3}/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opentelemetry-instrumentation-openai
3
- Version: 0.31.2
3
+ Version: 0.31.3
4
4
  Summary: OpenTelemetry OpenAI instrumentation
5
5
  Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
6
6
  License: Apache-2.0
@@ -1,4 +1,5 @@
1
- from typing import Callable, Collection
1
+ from typing import Callable, Collection, Optional
2
+ from typing_extensions import Coroutine
2
3
 
3
4
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
4
5
 
@@ -19,7 +20,9 @@ class OpenAIInstrumentor(BaseInstrumentor):
19
20
  enrich_token_usage: bool = False,
20
21
  exception_logger=None,
21
22
  get_common_metrics_attributes: Callable[[], dict] = lambda: {},
22
- upload_base64_image: Callable[[str, str, str, str], str] = lambda *args: "",
23
+ upload_base64_image: Optional[
24
+ Callable[[str, str, str, str], Coroutine[None, None, str]]
25
+ ] = lambda *args: "",
23
26
  ):
24
27
  super().__init__()
25
28
  Config.enrich_assistant = enrich_assistant
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import copy
2
3
  import json
3
4
  import logging
@@ -69,15 +70,15 @@ def chat_wrapper(
69
70
  SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
70
71
  ):
71
72
  return wrapped(*args, **kwargs)
72
-
73
73
  # span needs to be opened and closed manually because the response is a generator
74
+
74
75
  span = tracer.start_span(
75
76
  SPAN_NAME,
76
77
  kind=SpanKind.CLIENT,
77
78
  attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
78
79
  )
79
80
 
80
- _handle_request(span, kwargs, instance)
81
+ asyncio.run(_handle_request(span, kwargs, instance))
81
82
 
82
83
  try:
83
84
  start_time = time.time()
@@ -167,7 +168,7 @@ async def achat_wrapper(
167
168
  kind=SpanKind.CLIENT,
168
169
  attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
169
170
  )
170
- _handle_request(span, kwargs, instance)
171
+ await _handle_request(span, kwargs, instance)
171
172
 
172
173
  try:
173
174
  start_time = time.time()
@@ -236,11 +237,11 @@ async def achat_wrapper(
236
237
 
237
238
 
238
239
  @dont_throw
239
- def _handle_request(span, kwargs, instance):
240
+ async def _handle_request(span, kwargs, instance):
240
241
  _set_request_attributes(span, kwargs)
241
242
  _set_client_attributes(span, instance)
242
243
  if should_send_prompts():
243
- _set_prompts(span, kwargs.get("messages"))
244
+ await _set_prompts(span, kwargs.get("messages"))
244
245
  if kwargs.get("functions"):
245
246
  _set_functions_attributes(span, kwargs.get("functions"))
246
247
  elif kwargs.get("tools"):
@@ -331,33 +332,28 @@ def _is_base64_image(item):
331
332
  if not isinstance(item, dict):
332
333
  return False
333
334
 
334
- if not isinstance(item.get('image_url'), dict):
335
+ if not isinstance(item.get("image_url"), dict):
335
336
  return False
336
337
 
337
- if 'data:image/' not in item.get('image_url', {}).get('url', ''):
338
+ if "data:image/" not in item.get("image_url", {}).get("url", ""):
338
339
  return False
339
340
 
340
341
  return True
341
342
 
342
343
 
343
- def _process_image_item(item, trace_id, span_id, message_index, content_index):
344
+ async def _process_image_item(item, trace_id, span_id, message_index, content_index):
344
345
  if not Config.upload_base64_image:
345
346
  return item
346
347
 
347
348
  image_format = item["image_url"]["url"].split(";")[0].split("/")[1]
348
349
  image_name = f"message_{message_index}_content_{content_index}.{image_format}"
349
350
  base64_string = item["image_url"]["url"].split(",")[1]
350
- url = Config.upload_base64_image(trace_id, span_id, image_name, base64_string)
351
+ url = await Config.upload_base64_image(trace_id, span_id, image_name, base64_string)
351
352
 
352
- return {
353
- 'type': 'image_url',
354
- 'image_url': {
355
- 'url': url
356
- }
357
- }
353
+ return {"type": "image_url", "image_url": {"url": url}}
358
354
 
359
355
 
360
- def _set_prompts(span, messages):
356
+ async def _set_prompts(span, messages):
361
357
  if not span.is_recording() or messages is None:
362
358
  return
363
359
 
@@ -369,9 +365,13 @@ def _set_prompts(span, messages):
369
365
  content = copy.deepcopy(msg.get("content"))
370
366
  if isinstance(content, list):
371
367
  content = [
372
- _process_image_item(item, span.context.trace_id, span.context.span_id, i, j)
373
- if _is_base64_image(item)
374
- else item
368
+ (
369
+ await _process_image_item(
370
+ item, span.context.trace_id, span.context.span_id, i, j
371
+ )
372
+ if _is_base64_image(item)
373
+ else item
374
+ )
375
375
  for j, item in enumerate(content)
376
376
  ]
377
377
 
@@ -8,7 +8,7 @@ show_missing = true
8
8
 
9
9
  [tool.poetry]
10
10
  name = "opentelemetry-instrumentation-openai"
11
- version = "0.31.2"
11
+ version = "0.31.3"
12
12
  description = "OpenTelemetry OpenAI instrumentation"
13
13
  authors = [
14
14
  "Gal Kleinman <gal@traceloop.com>",