opentelemetry-instrumentation-openai 0.16.5__tar.gz → 0.16.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

Files changed (17) hide show
  1. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/PKG-INFO +1 -1
  2. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/opentelemetry/instrumentation/openai/__init__.py +5 -1
  3. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/opentelemetry/instrumentation/openai/shared/__init__.py +54 -78
  4. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +19 -14
  5. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +65 -65
  6. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/opentelemetry/instrumentation/openai/shared/config.py +1 -0
  7. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +12 -14
  8. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +16 -6
  9. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/opentelemetry/instrumentation/openai/utils.py +22 -0
  10. opentelemetry_instrumentation_openai-0.16.6/opentelemetry/instrumentation/openai/version.py +1 -0
  11. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/pyproject.toml +1 -1
  12. opentelemetry_instrumentation_openai-0.16.5/opentelemetry/instrumentation/openai/version.py +0 -1
  13. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/README.md +0 -0
  14. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/opentelemetry/instrumentation/openai/v0/__init__.py +0 -0
  15. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/opentelemetry/instrumentation/openai/v1/__init__.py +0 -0
  16. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -0
  17. {opentelemetry_instrumentation_openai-0.16.5 → opentelemetry_instrumentation_openai-0.16.6}/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opentelemetry-instrumentation-openai
3
- Version: 0.16.5
3
+ Version: 0.16.6
4
4
  Summary: OpenTelemetry OpenAI instrumentation
5
5
  Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
6
6
  License: Apache-2.0
@@ -14,11 +14,15 @@ class OpenAIInstrumentor(BaseInstrumentor):
14
14
  """An instrumentor for OpenAI's client library."""
15
15
 
16
16
  def __init__(
17
- self, enrich_assistant: bool = False, enrich_token_usage: bool = False
17
+ self,
18
+ enrich_assistant: bool = False,
19
+ enrich_token_usage: bool = False,
20
+ exception_logger=None,
18
21
  ):
19
22
  super().__init__()
20
23
  Config.enrich_assistant = enrich_assistant
21
24
  Config.enrich_token_usage = enrich_token_usage
25
+ Config.exception_logger = exception_logger
22
26
 
23
27
  def instrumentation_dependencies(self) -> Collection[str]:
24
28
  return _instruments
@@ -10,6 +10,7 @@ from opentelemetry import context as context_api
10
10
 
11
11
  from opentelemetry.semconv.ai import SpanAttributes
12
12
  from opentelemetry.instrumentation.openai.utils import (
13
+ dont_throw,
13
14
  is_openai_v1,
14
15
  should_record_stream_token_usage,
15
16
  )
@@ -46,19 +47,13 @@ def _set_client_attributes(span, instance):
46
47
  if not is_openai_v1():
47
48
  return
48
49
 
49
- try:
50
- client = instance._client # pylint: disable=protected-access
51
- if isinstance(client, (openai.AsyncOpenAI, openai.OpenAI)):
52
- _set_span_attribute(span, OPENAI_API_BASE, str(client.base_url))
53
- if isinstance(client, (openai.AsyncAzureOpenAI, openai.AzureOpenAI)):
54
- _set_span_attribute(
55
- span, OPENAI_API_VERSION, client._api_version
56
- ) # pylint: disable=protected-access
57
-
58
- except Exception as ex: # pylint: disable=broad-except
59
- logger.warning(
60
- "Failed to set api attributes for openai v1 span, error: %s", str(ex)
61
- )
50
+ client = instance._client # pylint: disable=protected-access
51
+ if isinstance(client, (openai.AsyncOpenAI, openai.OpenAI)):
52
+ _set_span_attribute(span, OPENAI_API_BASE, str(client.base_url))
53
+ if isinstance(client, (openai.AsyncAzureOpenAI, openai.AzureOpenAI)):
54
+ _set_span_attribute(
55
+ span, OPENAI_API_VERSION, client._api_version
56
+ ) # pylint: disable=protected-access
62
57
 
63
58
 
64
59
  def _set_api_attributes(span):
@@ -68,16 +63,11 @@ def _set_api_attributes(span):
68
63
  if is_openai_v1():
69
64
  return
70
65
 
71
- try:
72
- base_url = openai.base_url if hasattr(openai, "base_url") else openai.api_base
66
+ base_url = openai.base_url if hasattr(openai, "base_url") else openai.api_base
73
67
 
74
- _set_span_attribute(span, OPENAI_API_BASE, base_url)
75
- _set_span_attribute(span, OPENAI_API_TYPE, openai.api_type)
76
- _set_span_attribute(span, OPENAI_API_VERSION, openai.api_version)
77
- except Exception as ex: # pylint: disable=broad-except
78
- logger.warning(
79
- "Failed to set api attributes for openai span, error: %s", str(ex)
80
- )
68
+ _set_span_attribute(span, OPENAI_API_BASE, base_url)
69
+ _set_span_attribute(span, OPENAI_API_TYPE, openai.api_type)
70
+ _set_span_attribute(span, OPENAI_API_VERSION, openai.api_version)
81
71
 
82
72
  return
83
73
 
@@ -116,76 +106,62 @@ def _set_request_attributes(span, kwargs):
116
106
  if not span.is_recording():
117
107
  return
118
108
 
119
- try:
120
- _set_api_attributes(span)
121
- _set_span_attribute(span, SpanAttributes.LLM_VENDOR, "OpenAI")
122
- _set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("model"))
123
- _set_span_attribute(
124
- span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens")
125
- )
126
- _set_span_attribute(
127
- span, SpanAttributes.LLM_TEMPERATURE, kwargs.get("temperature")
128
- )
129
- _set_span_attribute(span, SpanAttributes.LLM_TOP_P, kwargs.get("top_p"))
130
- _set_span_attribute(
131
- span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
132
- )
133
- _set_span_attribute(
134
- span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
135
- )
136
- _set_span_attribute(span, SpanAttributes.LLM_USER, kwargs.get("user"))
137
- _set_span_attribute(
138
- span, SpanAttributes.LLM_HEADERS, str(kwargs.get("headers"))
139
- )
140
- # The new OpenAI SDK removed the `headers` and create new field called `extra_headers`
141
- if kwargs.get("extra_headers") is not None:
142
- _set_span_attribute(
143
- span, SpanAttributes.LLM_HEADERS, str(kwargs.get("extra_headers"))
144
- )
109
+ _set_api_attributes(span)
110
+ _set_span_attribute(span, SpanAttributes.LLM_VENDOR, "OpenAI")
111
+ _set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("model"))
112
+ _set_span_attribute(
113
+ span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens")
114
+ )
115
+ _set_span_attribute(span, SpanAttributes.LLM_TEMPERATURE, kwargs.get("temperature"))
116
+ _set_span_attribute(span, SpanAttributes.LLM_TOP_P, kwargs.get("top_p"))
117
+ _set_span_attribute(
118
+ span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
119
+ )
120
+ _set_span_attribute(
121
+ span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
122
+ )
123
+ _set_span_attribute(span, SpanAttributes.LLM_USER, kwargs.get("user"))
124
+ _set_span_attribute(span, SpanAttributes.LLM_HEADERS, str(kwargs.get("headers")))
125
+ # The new OpenAI SDK removed the `headers` and create new field called `extra_headers`
126
+ if kwargs.get("extra_headers") is not None:
145
127
  _set_span_attribute(
146
- span, SpanAttributes.LLM_IS_STREAMING, kwargs.get("stream") or False
147
- )
148
- except Exception as ex: # pylint: disable=broad-except
149
- logger.warning(
150
- "Failed to set input attributes for openai span, error: %s", str(ex)
128
+ span, SpanAttributes.LLM_HEADERS, str(kwargs.get("extra_headers"))
151
129
  )
130
+ _set_span_attribute(
131
+ span, SpanAttributes.LLM_IS_STREAMING, kwargs.get("stream") or False
132
+ )
152
133
 
153
134
 
135
+ @dont_throw
154
136
  def _set_response_attributes(span, response):
155
137
  if not span.is_recording():
156
138
  return
157
139
 
158
- try:
159
- _set_span_attribute(
160
- span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model")
161
- )
140
+ _set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
162
141
 
163
- usage = response.get("usage")
164
- if not usage:
165
- return
142
+ usage = response.get("usage")
143
+ if not usage:
144
+ return
166
145
 
167
- if is_openai_v1() and not isinstance(usage, dict):
168
- usage = usage.__dict__
146
+ if is_openai_v1() and not isinstance(usage, dict):
147
+ usage = usage.__dict__
169
148
 
170
- _set_span_attribute(
171
- span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.get("total_tokens")
172
- )
173
- _set_span_attribute(
174
- span,
175
- SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
176
- usage.get("completion_tokens"),
177
- )
178
- _set_span_attribute(
179
- span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.get("prompt_tokens")
180
- )
149
+ _set_span_attribute(
150
+ span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.get("total_tokens")
151
+ )
152
+ _set_span_attribute(
153
+ span,
154
+ SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
155
+ usage.get("completion_tokens"),
156
+ )
157
+ _set_span_attribute(
158
+ span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.get("prompt_tokens")
159
+ )
181
160
 
182
- return
183
- except Exception as ex: # pylint: disable=broad-except
184
- logger.warning(
185
- "Failed to set response attributes for openai span, error: %s", str(ex)
186
- )
161
+ return
187
162
 
188
163
 
164
+ @dont_throw
189
165
  def _set_span_stream_usage(span, prompt_tokens, completion_tokens):
190
166
  if not span.is_recording():
191
167
  return
@@ -9,6 +9,7 @@ from opentelemetry.semconv.ai import SpanAttributes, LLMRequestTypeValues
9
9
  from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
10
10
  from opentelemetry.instrumentation.openai.utils import (
11
11
  _with_chat_telemetry_wrapper,
12
+ dont_throw,
12
13
  )
13
14
  from opentelemetry.instrumentation.openai.shared import (
14
15
  _set_client_attributes,
@@ -187,6 +188,7 @@ async def achat_wrapper(
187
188
  return response
188
189
 
189
190
 
191
+ @dont_throw
190
192
  def _handle_request(span, kwargs, instance):
191
193
  _set_request_attributes(span, kwargs)
192
194
  _set_client_attributes(span, instance)
@@ -198,6 +200,7 @@ def _handle_request(span, kwargs, instance):
198
200
  set_tools_attributes(span, kwargs.get("tools"))
199
201
 
200
202
 
203
+ @dont_throw
201
204
  def _handle_response(
202
205
  response,
203
206
  span,
@@ -278,18 +281,15 @@ def _set_prompts(span, messages):
278
281
  if not span.is_recording() or messages is None:
279
282
  return
280
283
 
281
- try:
282
- for i, msg in enumerate(messages):
283
- prefix = f"{SpanAttributes.LLM_PROMPTS}.{i}"
284
- if isinstance(msg.get("content"), str):
285
- content = msg.get("content")
286
- elif isinstance(msg.get("content"), list):
287
- content = json.dumps(msg.get("content"))
284
+ for i, msg in enumerate(messages):
285
+ prefix = f"{SpanAttributes.LLM_PROMPTS}.{i}"
286
+ if isinstance(msg.get("content"), str):
287
+ content = msg.get("content")
288
+ elif isinstance(msg.get("content"), list):
289
+ content = json.dumps(msg.get("content"))
288
290
 
289
- _set_span_attribute(span, f"{prefix}.role", msg.get("role"))
290
- _set_span_attribute(span, f"{prefix}.content", content)
291
- except Exception as ex: # pylint: disable=broad-except
292
- logger.warning("Failed to set prompts for openai span, error: %s", str(ex))
291
+ _set_span_attribute(span, f"{prefix}.role", msg.get("role"))
292
+ _set_span_attribute(span, f"{prefix}.content", content)
293
293
 
294
294
 
295
295
  def _set_completions(span, choices):
@@ -361,7 +361,7 @@ def _set_streaming_token_metrics(
361
361
  completion_content = ""
362
362
  model_name = complete_response.get("model") or None
363
363
 
364
- for choice in complete_response.get("choices"): # type: dict
364
+ for choice in complete_response.get("choices"):
365
365
  if choice.get("message") and choice.get("message").get("content"):
366
366
  completion_content += choice["message"]["content"]
367
367
 
@@ -390,6 +390,7 @@ def _set_streaming_token_metrics(
390
390
  token_counter.add(completion_usage, attributes=attributes_with_token_type)
391
391
 
392
392
 
393
+ @dont_throw
393
394
  def _build_from_streaming_response(
394
395
  span,
395
396
  response,
@@ -456,6 +457,7 @@ def _build_from_streaming_response(
456
457
  span.end()
457
458
 
458
459
 
460
+ @dont_throw
459
461
  async def _abuild_from_streaming_response(
460
462
  span,
461
463
  response,
@@ -540,7 +542,10 @@ def _accumulate_stream_items(item, complete_response):
540
542
 
541
543
  delta = choice.get("delta")
542
544
 
543
- if delta.get("content"):
545
+ if delta and delta.get("content"):
544
546
  complete_choice["message"]["content"] += delta.get("content")
545
- if delta.get("role"):
547
+ if delta and delta.get("role"):
546
548
  complete_choice["message"]["role"] = delta.get("role")
549
+
550
+ if choice.get("content_filter_results"):
551
+ complete_choice["message"]["content"] = "FILTERED"
@@ -5,7 +5,7 @@ from opentelemetry import context as context_api
5
5
  from opentelemetry.semconv.ai import SpanAttributes, LLMRequestTypeValues
6
6
 
7
7
  from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
8
- from opentelemetry.instrumentation.openai.utils import _with_tracer_wrapper
8
+ from opentelemetry.instrumentation.openai.utils import _with_tracer_wrapper, dont_throw
9
9
  from opentelemetry.instrumentation.openai.shared import (
10
10
  _set_client_attributes,
11
11
  _set_request_attributes,
@@ -48,7 +48,7 @@ def completion_wrapper(tracer, wrapped, instance, args, kwargs):
48
48
 
49
49
  if is_streaming_response(response):
50
50
  # span will be closed after the generator is done
51
- return _build_from_streaming_response(span, response, kwargs)
51
+ return _build_from_streaming_response(span, kwargs, response)
52
52
  else:
53
53
  _handle_response(response, span)
54
54
 
@@ -72,7 +72,7 @@ async def acompletion_wrapper(tracer, wrapped, instance, args, kwargs):
72
72
 
73
73
  if is_streaming_response(response):
74
74
  # span will be closed after the generator is done
75
- return _abuild_from_streaming_response(span, response)
75
+ return _abuild_from_streaming_response(span, kwargs, response)
76
76
  else:
77
77
  _handle_response(response, span)
78
78
 
@@ -80,6 +80,7 @@ async def acompletion_wrapper(tracer, wrapped, instance, args, kwargs):
80
80
  return response
81
81
 
82
82
 
83
+ @dont_throw
83
84
  def _handle_request(span, kwargs, instance):
84
85
  _set_request_attributes(span, kwargs)
85
86
  if should_send_prompts():
@@ -88,6 +89,7 @@ def _handle_request(span, kwargs, instance):
88
89
  _set_client_attributes(span, instance)
89
90
 
90
91
 
92
+ @dont_throw
91
93
  def _handle_response(response, span):
92
94
  if is_openai_v1():
93
95
  response_dict = model_as_dict(response)
@@ -104,56 +106,65 @@ def _set_prompts(span, prompt):
104
106
  if not span.is_recording() or not prompt:
105
107
  return
106
108
 
107
- try:
108
- _set_span_attribute(
109
- span,
110
- f"{SpanAttributes.LLM_PROMPTS}.0.user",
111
- prompt[0] if isinstance(prompt, list) else prompt,
112
- )
113
- except Exception as ex: # pylint: disable=broad-except
114
- logger.warning("Failed to set prompts for openai span, error: %s", str(ex))
109
+ _set_span_attribute(
110
+ span,
111
+ f"{SpanAttributes.LLM_PROMPTS}.0.user",
112
+ prompt[0] if isinstance(prompt, list) else prompt,
113
+ )
115
114
 
116
115
 
116
+ @dont_throw
117
117
  def _set_completions(span, choices):
118
118
  if not span.is_recording() or not choices:
119
119
  return
120
120
 
121
- try:
122
- for choice in choices:
123
- index = choice.get("index")
124
- prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
125
- _set_span_attribute(
126
- span, f"{prefix}.finish_reason", choice.get("finish_reason")
127
- )
128
- _set_span_attribute(span, f"{prefix}.content", choice.get("text"))
129
- except Exception as e:
130
- logger.warning("Failed to set completion attributes, error: %s", str(e))
121
+ for choice in choices:
122
+ index = choice.get("index")
123
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
124
+ _set_span_attribute(
125
+ span, f"{prefix}.finish_reason", choice.get("finish_reason")
126
+ )
127
+ _set_span_attribute(span, f"{prefix}.content", choice.get("text"))
131
128
 
132
129
 
133
- def _build_from_streaming_response(span, response, request_kwargs=None):
130
+ @dont_throw
131
+ def _build_from_streaming_response(span, request_kwargs, response):
134
132
  complete_response = {"choices": [], "model": ""}
135
133
  for item in response:
136
- item_to_yield = item
137
- if is_openai_v1():
138
- item = model_as_dict(item)
134
+ yield item
135
+ _accumulate_streaming_response(complete_response, item)
139
136
 
140
- complete_response["model"] = item.get("model")
137
+ _set_response_attributes(span, complete_response)
141
138
 
142
- for choice in item.get("choices"):
143
- index = choice.get("index")
144
- if len(complete_response.get("choices")) <= index:
145
- complete_response["choices"].append({"index": index, "text": ""})
146
- complete_choice = complete_response.get("choices")[index]
147
- if choice.get("finish_reason"):
148
- complete_choice["finish_reason"] = choice.get("finish_reason")
139
+ _set_token_usage(span, request_kwargs, complete_response)
149
140
 
150
- if choice.get("text"):
151
- complete_choice["text"] += choice.get("text")
141
+ if should_send_prompts():
142
+ _set_completions(span, complete_response.get("choices"))
152
143
 
153
- yield item_to_yield
144
+ span.set_status(Status(StatusCode.OK))
145
+ span.end()
146
+
147
+
148
+ @dont_throw
149
+ async def _abuild_from_streaming_response(span, request_kwargs, response):
150
+ complete_response = {"choices": [], "model": ""}
151
+ async for item in response:
152
+ yield item
153
+ _accumulate_streaming_response(complete_response, item)
154
154
 
155
155
  _set_response_attributes(span, complete_response)
156
156
 
157
+ _set_token_usage(span, request_kwargs, complete_response)
158
+
159
+ if should_send_prompts():
160
+ _set_completions(span, complete_response.get("choices"))
161
+
162
+ span.set_status(Status(StatusCode.OK))
163
+ span.end()
164
+
165
+
166
+ @dont_throw
167
+ def _set_token_usage(span, request_kwargs, complete_response):
157
168
  # use tiktoken calculate token usage
158
169
  if should_record_stream_token_usage():
159
170
  prompt_usage = -1
@@ -172,46 +183,35 @@ def _build_from_streaming_response(span, response, request_kwargs=None):
172
183
  completion_content = ""
173
184
  model_name = complete_response.get("model") or None
174
185
 
175
- for choice in complete_response.get("choices"): # type: dict
186
+ for choice in complete_response.get("choices"):
176
187
  if choice.get("text"):
177
188
  completion_content += choice.get("text")
178
189
 
179
190
  if model_name:
180
- completion_usage = get_token_count_from_string(completion_content, model_name)
191
+ completion_usage = get_token_count_from_string(
192
+ completion_content, model_name
193
+ )
181
194
 
182
195
  # span record
183
196
  _set_span_stream_usage(span, prompt_usage, completion_usage)
184
197
 
185
- if should_send_prompts():
186
- _set_completions(span, complete_response.get("choices"))
187
198
 
188
- span.set_status(Status(StatusCode.OK))
189
- span.end()
199
+ @dont_throw
200
+ def _accumulate_streaming_response(complete_response, item):
201
+ if is_openai_v1():
202
+ item = model_as_dict(item)
190
203
 
204
+ complete_response["model"] = item.get("model")
191
205
 
192
- async def _abuild_from_streaming_response(span, response):
193
- complete_response = {"choices": [], "model": ""}
194
- async for item in response:
195
- item_to_yield = item
196
- if is_openai_v1():
197
- item = model_as_dict(item)
198
-
199
- for choice in item.get("choices"):
200
- index = choice.get("index")
201
- if len(complete_response.get("choices")) <= index:
202
- complete_response["choices"].append({"index": index, "text": ""})
203
- complete_choice = complete_response.get("choices")[index]
204
- if choice.get("finish_reason"):
205
- complete_choice["finish_reason"] = choice.get("finish_reason")
206
+ for choice in item.get("choices"):
207
+ index = choice.get("index")
208
+ if len(complete_response.get("choices")) <= index:
209
+ complete_response["choices"].append({"index": index, "text": ""})
210
+ complete_choice = complete_response.get("choices")[index]
211
+ if choice.get("finish_reason"):
212
+ complete_choice["finish_reason"] = choice.get("finish_reason")
206
213
 
214
+ if choice.get("text"):
207
215
  complete_choice["text"] += choice.get("text")
208
216
 
209
- yield item_to_yield
210
-
211
- _set_response_attributes(span, complete_response)
212
-
213
- if should_send_prompts():
214
- _set_completions(span, complete_response.get("choices"))
215
-
216
- span.set_status(Status(StatusCode.OK))
217
- span.end()
217
+ return complete_response
@@ -1,3 +1,4 @@
1
1
  class Config:
2
2
  enrich_token_usage = False
3
3
  enrich_assistant = False
4
+ exception_logger = None
@@ -7,6 +7,7 @@ from opentelemetry.semconv.ai import SpanAttributes, LLMRequestTypeValues
7
7
 
8
8
  from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
9
9
  from opentelemetry.instrumentation.openai.utils import (
10
+ dont_throw,
10
11
  start_as_current_span_async,
11
12
  _with_embeddings_telemetry_wrapper,
12
13
  )
@@ -144,6 +145,7 @@ async def aembeddings_wrapper(
144
145
  return response
145
146
 
146
147
 
148
+ @dont_throw
147
149
  def _handle_request(span, kwargs, instance):
148
150
  _set_request_attributes(span, kwargs)
149
151
  if should_send_prompts():
@@ -151,6 +153,7 @@ def _handle_request(span, kwargs, instance):
151
153
  _set_client_attributes(span, instance)
152
154
 
153
155
 
156
+ @dont_throw
154
157
  def _handle_response(
155
158
  response,
156
159
  span,
@@ -217,17 +220,12 @@ def _set_prompts(span, prompt):
217
220
  if not span.is_recording() or not prompt:
218
221
  return
219
222
 
220
- try:
221
- if isinstance(prompt, list):
222
- for i, p in enumerate(prompt):
223
- _set_span_attribute(
224
- span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", p
225
- )
226
- else:
227
- _set_span_attribute(
228
- span,
229
- f"{SpanAttributes.LLM_PROMPTS}.0.content",
230
- prompt,
231
- )
232
- except Exception as ex: # pylint: disable=broad-except
233
- logger.warning("Failed to set prompts for openai span, error: %s", str(ex))
223
+ if isinstance(prompt, list):
224
+ for i, p in enumerate(prompt):
225
+ _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", p)
226
+ else:
227
+ _set_span_attribute(
228
+ span,
229
+ f"{SpanAttributes.LLM_PROMPTS}.0.content",
230
+ prompt,
231
+ )
@@ -6,14 +6,24 @@ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
6
6
  from opentelemetry.metrics import Counter, Histogram
7
7
 
8
8
  from opentelemetry.instrumentation.openai import is_openai_v1
9
- from opentelemetry.instrumentation.openai.shared import _get_openai_base_url, model_as_dict
10
- from opentelemetry.instrumentation.openai.utils import _with_image_gen_metric_wrapper
9
+ from opentelemetry.instrumentation.openai.shared import (
10
+ _get_openai_base_url,
11
+ model_as_dict,
12
+ )
13
+ from opentelemetry.instrumentation.openai.utils import (
14
+ _with_image_gen_metric_wrapper,
15
+ )
11
16
 
12
17
 
13
18
  @_with_image_gen_metric_wrapper
14
- def image_gen_metrics_wrapper(duration_histogram: Histogram,
15
- exception_counter: Counter,
16
- wrapped, instance, args, kwargs):
19
+ def image_gen_metrics_wrapper(
20
+ duration_histogram: Histogram,
21
+ exception_counter: Counter,
22
+ wrapped,
23
+ instance,
24
+ args,
25
+ kwargs,
26
+ ):
17
27
  if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
18
28
  return wrapped(*args, **kwargs)
19
29
 
@@ -24,7 +34,7 @@ def image_gen_metrics_wrapper(duration_histogram: Histogram,
24
34
  end_time = time.time()
25
35
  except Exception as e: # pylint: disable=broad-except
26
36
  end_time = time.time()
27
- duration = end_time - start_time if 'start_time' in locals() else 0
37
+ duration = end_time - start_time if "start_time" in locals() else 0
28
38
 
29
39
  attributes = {
30
40
  "error.type": e.__class__.__name__,
@@ -1,5 +1,6 @@
1
1
  from importlib.metadata import version
2
2
  from contextlib import asynccontextmanager
3
+ import logging
3
4
  import os
4
5
 
5
6
  from opentelemetry.instrumentation.openai.shared.config import Config
@@ -99,3 +100,24 @@ def _with_tracer_wrapper(func):
99
100
  async def start_as_current_span_async(tracer, *args, **kwargs):
100
101
  with tracer.start_as_current_span(*args, **kwargs) as span:
101
102
  yield span
103
+
104
+
105
+ def dont_throw(func):
106
+ """
107
+ A decorator that wraps the passed in function and logs exceptions instead of throwing them.
108
+
109
+ @param func: The function to wrap
110
+ @return: The wrapper function
111
+ """
112
+ # Obtain a logger specific to the function's module
113
+ logger = logging.getLogger(func.__module__)
114
+
115
+ def wrapper(*args, **kwargs):
116
+ try:
117
+ return func(*args, **kwargs)
118
+ except Exception as e:
119
+ logger.warning("Failed to execute %s, error: %s", func.__name__, str(e))
120
+ if Config.exception_logger:
121
+ Config.exception_logger(e)
122
+
123
+ return wrapper
@@ -8,7 +8,7 @@ show_missing = true
8
8
 
9
9
  [tool.poetry]
10
10
  name = "opentelemetry-instrumentation-openai"
11
- version = "0.16.5"
11
+ version = "0.16.6"
12
12
  description = "OpenTelemetry OpenAI instrumentation"
13
13
  authors = [
14
14
  "Gal Kleinman <gal@traceloop.com>",