opentelemetry-instrumentation-openai 0.44.3__tar.gz → 0.49.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/PKG-INFO +7 -6
  2. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/shared/__init__.py +28 -19
  3. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +42 -8
  4. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +5 -2
  5. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/shared/config.py +4 -4
  6. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +6 -3
  7. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/shared/event_emitter.py +12 -4
  8. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/utils.py +12 -3
  9. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/v0/__init__.py +4 -4
  10. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/v1/__init__.py +4 -4
  11. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +22 -19
  12. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +7 -5
  13. opentelemetry_instrumentation_openai-0.49.6/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +1113 -0
  14. opentelemetry_instrumentation_openai-0.49.6/opentelemetry/instrumentation/openai/version.py +1 -0
  15. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/pyproject.toml +6 -6
  16. opentelemetry_instrumentation_openai-0.44.3/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +0 -640
  17. opentelemetry_instrumentation_openai-0.44.3/opentelemetry/instrumentation/openai/version.py +0 -1
  18. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/README.md +0 -0
  19. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/__init__.py +0 -0
  20. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/shared/event_models.py +0 -0
  21. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +0 -0
  22. {opentelemetry_instrumentation_openai-0.44.3 → opentelemetry_instrumentation_openai-0.49.6}/opentelemetry/instrumentation/openai/shared/span_utils.py +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.4
2
2
  Name: opentelemetry-instrumentation-openai
3
- Version: 0.44.3
3
+ Version: 0.49.6
4
4
  Summary: OpenTelemetry OpenAI instrumentation
5
5
  License: Apache-2.0
6
6
  Author: Gal Kleinman
@@ -13,11 +13,12 @@ Classifier: Programming Language :: Python :: 3.10
13
13
  Classifier: Programming Language :: Python :: 3.11
14
14
  Classifier: Programming Language :: Python :: 3.12
15
15
  Classifier: Programming Language :: Python :: 3.13
16
+ Classifier: Programming Language :: Python :: 3.14
16
17
  Provides-Extra: instruments
17
- Requires-Dist: opentelemetry-api (>=1.28.0,<2.0.0)
18
- Requires-Dist: opentelemetry-instrumentation (>=0.50b0)
19
- Requires-Dist: opentelemetry-semantic-conventions (>=0.50b0)
20
- Requires-Dist: opentelemetry-semantic-conventions-ai (==0.4.11)
18
+ Requires-Dist: opentelemetry-api (>=1.38.0,<2.0.0)
19
+ Requires-Dist: opentelemetry-instrumentation (>=0.59b0)
20
+ Requires-Dist: opentelemetry-semantic-conventions (>=0.59b0)
21
+ Requires-Dist: opentelemetry-semantic-conventions-ai (>=0.4.13,<0.5.0)
21
22
  Project-URL: Repository, https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
22
23
  Description-Content-Type: text/markdown
23
24
 
@@ -1,6 +1,8 @@
1
1
  import json
2
2
  import logging
3
3
  import types
4
+ import openai
5
+ import pydantic
4
6
  from importlib.metadata import version
5
7
 
6
8
  from opentelemetry.instrumentation.openai.shared.config import Config
@@ -8,14 +10,13 @@ from opentelemetry.instrumentation.openai.utils import (
8
10
  dont_throw,
9
11
  is_openai_v1,
10
12
  )
11
- from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
12
- GEN_AI_RESPONSE_ID,
13
+ from opentelemetry.semconv._incubating.attributes import (
14
+ gen_ai_attributes as GenAIAttributes,
15
+ openai_attributes as OpenAIAttributes,
13
16
  )
14
17
  from opentelemetry.semconv_ai import SpanAttributes
15
18
  from opentelemetry.trace.propagation import set_span_in_context
16
19
  from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
17
- import openai
18
- import pydantic
19
20
 
20
21
  OPENAI_LLM_USAGE_TOKEN_TYPES = ["prompt_tokens", "completion_tokens"]
21
22
  PROMPT_FILTER_KEY = "prompt_filter_results"
@@ -109,7 +110,7 @@ def _set_request_attributes(span, kwargs, instance=None):
109
110
 
110
111
  base_url = _get_openai_base_url(instance) if instance else ""
111
112
  vendor = _get_vendor_from_url(base_url)
112
- _set_span_attribute(span, SpanAttributes.LLM_SYSTEM, vendor)
113
+ _set_span_attribute(span, GenAIAttributes.GEN_AI_SYSTEM, vendor)
113
114
 
114
115
  model = kwargs.get("model")
115
116
  if vendor == "AWS" and model and "." in model:
@@ -117,14 +118,14 @@ def _set_request_attributes(span, kwargs, instance=None):
117
118
  elif vendor == "OpenRouter":
118
119
  model = _extract_model_name_from_provider_format(model)
119
120
 
120
- _set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, model)
121
+ _set_span_attribute(span, GenAIAttributes.GEN_AI_REQUEST_MODEL, model)
121
122
  _set_span_attribute(
122
- span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens")
123
+ span, GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS, kwargs.get("max_tokens")
123
124
  )
124
125
  _set_span_attribute(
125
- span, SpanAttributes.LLM_REQUEST_TEMPERATURE, kwargs.get("temperature")
126
+ span, GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE, kwargs.get("temperature")
126
127
  )
127
- _set_span_attribute(span, SpanAttributes.LLM_REQUEST_TOP_P, kwargs.get("top_p"))
128
+ _set_span_attribute(span, GenAIAttributes.GEN_AI_REQUEST_TOP_P, kwargs.get("top_p"))
128
129
  _set_span_attribute(
129
130
  span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
130
131
  )
@@ -141,6 +142,9 @@ def _set_request_attributes(span, kwargs, instance=None):
141
142
  _set_span_attribute(
142
143
  span, SpanAttributes.LLM_IS_STREAMING, kwargs.get("stream") or False
143
144
  )
145
+ _set_span_attribute(
146
+ span, OpenAIAttributes.OPENAI_REQUEST_SERVICE_TIER, kwargs.get("service_tier")
147
+ )
144
148
  if response_format := kwargs.get("response_format"):
145
149
  # backward-compatible check for
146
150
  # openai.types.shared_params.response_format_json_schema.ResponseFormatJSONSchema
@@ -194,7 +198,7 @@ def _set_response_attributes(span, response):
194
198
  if "error" in response:
195
199
  _set_span_attribute(
196
200
  span,
197
- f"{SpanAttributes.LLM_PROMPTS}.{PROMPT_ERROR}",
201
+ f"{GenAIAttributes.GEN_AI_PROMPT}.{PROMPT_ERROR}",
198
202
  json.dumps(response.get("error")),
199
203
  )
200
204
  return
@@ -202,14 +206,19 @@ def _set_response_attributes(span, response):
202
206
  response_model = response.get("model")
203
207
  if response_model:
204
208
  response_model = _extract_model_name_from_provider_format(response_model)
205
- _set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response_model)
206
- _set_span_attribute(span, GEN_AI_RESPONSE_ID, response.get("id"))
209
+ _set_span_attribute(span, GenAIAttributes.GEN_AI_RESPONSE_MODEL, response_model)
210
+ _set_span_attribute(span, GenAIAttributes.GEN_AI_RESPONSE_ID, response.get("id"))
207
211
 
208
212
  _set_span_attribute(
209
213
  span,
210
214
  SpanAttributes.LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT,
211
215
  response.get("system_fingerprint"),
212
216
  )
217
+ _set_span_attribute(
218
+ span,
219
+ OpenAIAttributes.OPENAI_RESPONSE_SERVICE_TIER,
220
+ response.get("service_tier"),
221
+ )
213
222
  _log_prompt_filter(span, response)
214
223
  usage = response.get("usage")
215
224
  if not usage:
@@ -223,11 +232,11 @@ def _set_response_attributes(span, response):
223
232
  )
224
233
  _set_span_attribute(
225
234
  span,
226
- SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
235
+ GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
227
236
  usage.get("completion_tokens"),
228
237
  )
229
238
  _set_span_attribute(
230
- span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.get("prompt_tokens")
239
+ span, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, usage.get("prompt_tokens")
231
240
  )
232
241
  prompt_tokens_details = dict(usage.get("prompt_tokens_details", {}))
233
242
  _set_span_attribute(
@@ -242,7 +251,7 @@ def _log_prompt_filter(span, response_dict):
242
251
  if response_dict.get("prompt_filter_results"):
243
252
  _set_span_attribute(
244
253
  span,
245
- f"{SpanAttributes.LLM_PROMPTS}.{PROMPT_FILTER_KEY}",
254
+ f"{GenAIAttributes.GEN_AI_PROMPT}.{PROMPT_FILTER_KEY}",
246
255
  json.dumps(response_dict.get("prompt_filter_results")),
247
256
  )
248
257
 
@@ -254,11 +263,11 @@ def _set_span_stream_usage(span, prompt_tokens, completion_tokens):
254
263
 
255
264
  if isinstance(completion_tokens, int) and completion_tokens >= 0:
256
265
  _set_span_attribute(
257
- span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
266
+ span, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, completion_tokens
258
267
  )
259
268
 
260
269
  if isinstance(prompt_tokens, int) and prompt_tokens >= 0:
261
- _set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens)
270
+ _set_span_attribute(span, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, prompt_tokens)
262
271
 
263
272
  if (
264
273
  isinstance(prompt_tokens, int)
@@ -369,8 +378,8 @@ def metric_shared_attributes(
369
378
 
370
379
  return {
371
380
  **attributes,
372
- SpanAttributes.LLM_SYSTEM: vendor,
373
- SpanAttributes.LLM_RESPONSE_MODEL: response_model,
381
+ GenAIAttributes.GEN_AI_SYSTEM: vendor,
382
+ GenAIAttributes.GEN_AI_RESPONSE_MODEL: response_model,
374
383
  "gen_ai.operation.name": operation,
375
384
  "server.address": server_address,
376
385
  "stream": is_streaming,
@@ -42,6 +42,9 @@ from opentelemetry.instrumentation.openai.utils import (
42
42
  from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
43
43
  from opentelemetry.metrics import Counter, Histogram
44
44
  from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
45
+ from opentelemetry.semconv._incubating.attributes import (
46
+ gen_ai_attributes as GenAIAttributes,
47
+ )
45
48
  from opentelemetry.semconv_ai import (
46
49
  SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
47
50
  LLMRequestTypeValues,
@@ -285,6 +288,14 @@ async def _handle_request(span, kwargs, instance):
285
288
  if Config.enable_trace_context_propagation:
286
289
  propagate_trace_context(span, kwargs)
287
290
 
291
+ # Reasoning request attributes
292
+ reasoning_effort = kwargs.get("reasoning_effort")
293
+ _set_span_attribute(
294
+ span,
295
+ SpanAttributes.LLM_REQUEST_REASONING_EFFORT,
296
+ reasoning_effort or ()
297
+ )
298
+
288
299
 
289
300
  @dont_throw
290
301
  def _handle_response(
@@ -316,6 +327,28 @@ def _handle_response(
316
327
  # span attributes
317
328
  _set_response_attributes(span, response_dict)
318
329
 
330
+ # Reasoning usage attributes
331
+ usage = response_dict.get("usage")
332
+ reasoning_tokens = None
333
+ if usage:
334
+ # Support both dict-style and object-style `usage`
335
+ tokens_details = (
336
+ usage.get("completion_tokens_details") if isinstance(usage, dict)
337
+ else getattr(usage, "completion_tokens_details", None)
338
+ )
339
+
340
+ if tokens_details:
341
+ reasoning_tokens = (
342
+ tokens_details.get("reasoning_tokens", None) if isinstance(tokens_details, dict)
343
+ else getattr(tokens_details, "reasoning_tokens", None)
344
+ )
345
+
346
+ _set_span_attribute(
347
+ span,
348
+ SpanAttributes.LLM_USAGE_REASONING_TOKENS,
349
+ reasoning_tokens or 0,
350
+ )
351
+
319
352
  if should_emit_events():
320
353
  if response.choices is not None:
321
354
  for choice in response.choices:
@@ -374,7 +407,7 @@ def _set_token_counter_metrics(token_counter, usage, shared_attributes):
374
407
  if name in OPENAI_LLM_USAGE_TOKEN_TYPES:
375
408
  attributes_with_token_type = {
376
409
  **shared_attributes,
377
- SpanAttributes.LLM_TOKEN_TYPE: _token_type(name),
410
+ GenAIAttributes.GEN_AI_TOKEN_TYPE: _token_type(name),
378
411
  }
379
412
  token_counter.record(val, attributes=attributes_with_token_type)
380
413
 
@@ -399,7 +432,8 @@ async def _process_image_item(item, trace_id, span_id, message_index, content_in
399
432
  image_format = item["image_url"]["url"].split(";")[0].split("/")[1]
400
433
  image_name = f"message_{message_index}_content_{content_index}.{image_format}"
401
434
  base64_string = item["image_url"]["url"].split(",")[1]
402
- url = await Config.upload_base64_image(trace_id, span_id, image_name, base64_string)
435
+ # Convert trace_id and span_id to strings as expected by upload function
436
+ url = await Config.upload_base64_image(str(trace_id), str(span_id), image_name, base64_string)
403
437
 
404
438
  return {"type": "image_url", "image_url": {"url": url}}
405
439
 
@@ -410,7 +444,7 @@ async def _set_prompts(span, messages):
410
444
  return
411
445
 
412
446
  for i, msg in enumerate(messages):
413
- prefix = f"{SpanAttributes.LLM_PROMPTS}.{i}"
447
+ prefix = f"{GenAIAttributes.GEN_AI_PROMPT}.{i}"
414
448
  msg = msg if isinstance(msg, dict) else model_as_dict(msg)
415
449
 
416
450
  _set_span_attribute(span, f"{prefix}.role", msg.get("role"))
@@ -463,7 +497,7 @@ def _set_completions(span, choices):
463
497
 
464
498
  for choice in choices:
465
499
  index = choice.get("index")
466
- prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
500
+ prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.{index}"
467
501
  _set_span_attribute(
468
502
  span, f"{prefix}.finish_reason", choice.get("finish_reason")
469
503
  )
@@ -549,7 +583,7 @@ def _set_streaming_token_metrics(
549
583
  if isinstance(prompt_usage, int) and prompt_usage >= 0:
550
584
  attributes_with_token_type = {
551
585
  **shared_attributes,
552
- SpanAttributes.LLM_TOKEN_TYPE: "input",
586
+ GenAIAttributes.GEN_AI_TOKEN_TYPE: "input",
553
587
  }
554
588
  token_counter.record(
555
589
  prompt_usage, attributes=attributes_with_token_type)
@@ -557,7 +591,7 @@ def _set_streaming_token_metrics(
557
591
  if isinstance(completion_usage, int) and completion_usage >= 0:
558
592
  attributes_with_token_type = {
559
593
  **shared_attributes,
560
- SpanAttributes.LLM_TOKEN_TYPE: "output",
594
+ GenAIAttributes.GEN_AI_TOKEN_TYPE: "output",
561
595
  }
562
596
  token_counter.record(
563
597
  completion_usage, attributes=attributes_with_token_type
@@ -856,7 +890,7 @@ def _build_from_streaming_response(
856
890
  yield item_to_yield
857
891
 
858
892
  shared_attributes = {
859
- SpanAttributes.LLM_RESPONSE_MODEL: complete_response.get("model") or None,
893
+ GenAIAttributes.GEN_AI_RESPONSE_MODEL: complete_response.get("model") or None,
860
894
  "server.address": _get_openai_base_url(instance),
861
895
  "stream": True,
862
896
  }
@@ -927,7 +961,7 @@ async def _abuild_from_streaming_response(
927
961
  yield item_to_yield
928
962
 
929
963
  shared_attributes = {
930
- SpanAttributes.LLM_RESPONSE_MODEL: complete_response.get("model") or None,
964
+ GenAIAttributes.GEN_AI_RESPONSE_MODEL: complete_response.get("model") or None,
931
965
  "server.address": _get_openai_base_url(instance),
932
966
  "stream": True,
933
967
  }
@@ -28,6 +28,9 @@ from opentelemetry.instrumentation.openai.utils import (
28
28
  should_send_prompts,
29
29
  )
30
30
  from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
31
+ from opentelemetry.semconv._incubating.attributes import (
32
+ gen_ai_attributes as GenAIAttributes,
33
+ )
31
34
  from opentelemetry.semconv_ai import (
32
35
  SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
33
36
  LLMRequestTypeValues,
@@ -160,7 +163,7 @@ def _set_prompts(span, prompt):
160
163
 
161
164
  _set_span_attribute(
162
165
  span,
163
- f"{SpanAttributes.LLM_PROMPTS}.0.user",
166
+ f"{GenAIAttributes.GEN_AI_PROMPT}.0.user",
164
167
  prompt[0] if isinstance(prompt, list) else prompt,
165
168
  )
166
169
 
@@ -172,7 +175,7 @@ def _set_completions(span, choices):
172
175
 
173
176
  for choice in choices:
174
177
  index = choice.get("index")
175
- prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
178
+ prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.{index}"
176
179
  _set_span_attribute(
177
180
  span, f"{prefix}.finish_reason", choice.get("finish_reason")
178
181
  )
@@ -1,15 +1,15 @@
1
1
  from typing import Callable, Optional
2
2
 
3
- from opentelemetry._events import EventLogger
3
+ from opentelemetry._logs import Logger
4
4
 
5
5
 
6
6
  class Config:
7
7
  enrich_assistant = False
8
8
  exception_logger = None
9
9
  get_common_metrics_attributes: Callable[[], dict] = lambda: {}
10
- upload_base64_image: Callable[[str, str, str], str] = (
11
- lambda trace_id, span_id, base64_image_url: str
10
+ upload_base64_image: Callable[[str, str, str, str], str] = (
11
+ lambda trace_id, span_id, image_name, base64_string: str
12
12
  )
13
13
  enable_trace_context_propagation: bool = True
14
14
  use_legacy_attributes = True
15
- event_logger: Optional[EventLogger] = None
15
+ event_logger: Optional[Logger] = None
@@ -32,6 +32,9 @@ from opentelemetry.instrumentation.openai.utils import (
32
32
  from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
33
33
  from opentelemetry.metrics import Counter, Histogram
34
34
  from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
35
+ from opentelemetry.semconv._incubating.attributes import (
36
+ gen_ai_attributes as GenAIAttributes,
37
+ )
35
38
  from opentelemetry.semconv_ai import (
36
39
  SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
37
40
  LLMRequestTypeValues,
@@ -248,7 +251,7 @@ def _set_embeddings_metrics(
248
251
  continue
249
252
  attributes_with_token_type = {
250
253
  **shared_attributes,
251
- SpanAttributes.LLM_TOKEN_TYPE: _token_type(name),
254
+ GenAIAttributes.GEN_AI_TOKEN_TYPE: _token_type(name),
252
255
  }
253
256
  token_counter.record(val, attributes=attributes_with_token_type)
254
257
 
@@ -270,11 +273,11 @@ def _set_prompts(span, prompt):
270
273
 
271
274
  if isinstance(prompt, list):
272
275
  for i, p in enumerate(prompt):
273
- _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", p)
276
+ _set_span_attribute(span, f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.content", p)
274
277
  else:
275
278
  _set_span_attribute(
276
279
  span,
277
- f"{SpanAttributes.LLM_PROMPTS}.0.content",
280
+ f"{GenAIAttributes.GEN_AI_PROMPT}.0.content",
278
281
  prompt,
279
282
  )
280
283
 
@@ -2,7 +2,7 @@ from dataclasses import asdict
2
2
  from enum import Enum
3
3
  from typing import Union
4
4
 
5
- from opentelemetry._events import Event
5
+ from opentelemetry._logs import LogRecord
6
6
  from opentelemetry.instrumentation.openai.shared.event_models import (
7
7
  ChoiceEvent,
8
8
  MessageEvent,
@@ -76,7 +76,12 @@ def _emit_message_event(event: MessageEvent) -> None:
76
76
  for tool_call in body["tool_calls"]:
77
77
  tool_call["function"].pop("arguments", None)
78
78
 
79
- Config.event_logger.emit(Event(name=name, body=body, attributes=EVENT_ATTRIBUTES))
79
+ log_record = LogRecord(
80
+ body=body,
81
+ attributes=EVENT_ATTRIBUTES,
82
+ event_name=name
83
+ )
84
+ Config.event_logger.emit(log_record)
80
85
 
81
86
 
82
87
  def _emit_choice_event(event: ChoiceEvent) -> None:
@@ -95,6 +100,9 @@ def _emit_choice_event(event: ChoiceEvent) -> None:
95
100
  for tool_call in body["tool_calls"]:
96
101
  tool_call["function"].pop("arguments", None)
97
102
 
98
- Config.event_logger.emit(
99
- Event(name="gen_ai.choice", body=body, attributes=EVENT_ATTRIBUTES)
103
+ log_record = LogRecord(
104
+ body=body,
105
+ attributes=EVENT_ATTRIBUTES,
106
+ event_name="gen_ai.choice"
100
107
  )
108
+ Config.event_logger.emit(log_record)
@@ -5,9 +5,10 @@ import threading
5
5
  import traceback
6
6
  from contextlib import asynccontextmanager
7
7
  from importlib.metadata import version
8
+ from packaging import version as pkg_version
8
9
 
9
10
  from opentelemetry import context as context_api
10
- from opentelemetry._events import EventLogger
11
+ from opentelemetry._logs import Logger
11
12
  from opentelemetry.instrumentation.openai.shared.config import Config
12
13
 
13
14
  import openai
@@ -18,7 +19,15 @@ TRACELOOP_TRACE_CONTENT = "TRACELOOP_TRACE_CONTENT"
18
19
 
19
20
 
20
21
  def is_openai_v1():
21
- return _OPENAI_VERSION >= "1.0.0"
22
+ return pkg_version.parse(_OPENAI_VERSION) >= pkg_version.parse("1.0.0")
23
+
24
+
25
+ def is_reasoning_supported():
26
+ # Reasoning has been introduced in OpenAI API on Dec 17, 2024
27
+ # as per https://platform.openai.com/docs/changelog.
28
+ # The updated OpenAI library version is 1.58.0
29
+ # as per https://pypi.org/project/openai/.
30
+ return pkg_version.parse(_OPENAI_VERSION) >= pkg_version.parse("1.58.0")
22
31
 
23
32
 
24
33
  def is_azure_openai(instance):
@@ -177,5 +186,5 @@ def should_emit_events() -> bool:
177
186
  and if the event logger is not None.
178
187
  """
179
188
  return not Config.use_legacy_attributes and isinstance(
180
- Config.event_logger, EventLogger
189
+ Config.event_logger, Logger
181
190
  )
@@ -1,6 +1,6 @@
1
1
  from typing import Collection
2
2
 
3
- from opentelemetry._events import get_event_logger
3
+ from opentelemetry._logs import get_logger
4
4
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
5
5
  from opentelemetry.instrumentation.openai.shared.chat_wrappers import (
6
6
  achat_wrapper,
@@ -39,9 +39,9 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
39
39
  meter = get_meter(__name__, __version__, meter_provider)
40
40
 
41
41
  if not Config.use_legacy_attributes:
42
- event_logger_provider = kwargs.get("event_logger_provider")
43
- Config.event_logger = get_event_logger(
44
- __name__, __version__, event_logger_provider=event_logger_provider
42
+ logger_provider = kwargs.get("logger_provider")
43
+ Config.event_logger = get_logger(
44
+ __name__, __version__, logger_provider=logger_provider
45
45
  )
46
46
 
47
47
  if is_metrics_enabled():
@@ -1,6 +1,6 @@
1
1
  from typing import Collection
2
2
 
3
- from opentelemetry._events import get_event_logger
3
+ from opentelemetry._logs import get_logger
4
4
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
5
5
  from opentelemetry.instrumentation.openai.shared.chat_wrappers import (
6
6
  achat_wrapper,
@@ -75,9 +75,9 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
75
75
  meter = get_meter(__name__, __version__, meter_provider)
76
76
 
77
77
  if not Config.use_legacy_attributes:
78
- event_logger_provider = kwargs.get("event_logger_provider")
79
- Config.event_logger = get_event_logger(
80
- __name__, __version__, event_logger_provider=event_logger_provider
78
+ logger_provider = kwargs.get("logger_provider")
79
+ Config.event_logger = get_logger(
80
+ __name__, __version__, logger_provider=logger_provider
81
81
  )
82
82
 
83
83
  if is_metrics_enabled():
@@ -20,6 +20,9 @@ from opentelemetry.instrumentation.openai.utils import (
20
20
  )
21
21
  from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
22
22
  from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
23
+ from opentelemetry.semconv._incubating.attributes import (
24
+ gen_ai_attributes as GenAIAttributes,
25
+ )
23
26
  from opentelemetry.semconv_ai import LLMRequestTypeValues, SpanAttributes
24
27
  from opentelemetry.trace import SpanKind, Status, StatusCode
25
28
 
@@ -150,37 +153,37 @@ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
150
153
 
151
154
  _set_span_attribute(
152
155
  span,
153
- SpanAttributes.LLM_SYSTEM,
156
+ GenAIAttributes.GEN_AI_SYSTEM,
154
157
  "openai",
155
158
  )
156
159
  _set_span_attribute(
157
160
  span,
158
- SpanAttributes.LLM_REQUEST_MODEL,
161
+ GenAIAttributes.GEN_AI_REQUEST_MODEL,
159
162
  assistant["model"],
160
163
  )
161
164
  _set_span_attribute(
162
165
  span,
163
- SpanAttributes.LLM_RESPONSE_MODEL,
166
+ GenAIAttributes.GEN_AI_RESPONSE_MODEL,
164
167
  assistant["model"],
165
168
  )
166
169
  if should_emit_events():
167
170
  emit_event(MessageEvent(content=assistant["instructions"], role="system"))
168
171
  else:
169
172
  _set_span_attribute(
170
- span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system"
173
+ span, f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.role", "system"
171
174
  )
172
175
  _set_span_attribute(
173
176
  span,
174
- f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
177
+ f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.content",
175
178
  assistant["instructions"],
176
179
  )
177
180
  prompt_index += 1
178
181
  _set_span_attribute(
179
- span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system"
182
+ span, f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.role", "system"
180
183
  )
181
184
  _set_span_attribute(
182
185
  span,
183
- f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
186
+ f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.content",
184
187
  run["instructions"],
185
188
  )
186
189
  if should_emit_events():
@@ -189,7 +192,7 @@ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
189
192
 
190
193
  completion_index = 0
191
194
  for msg in messages:
192
- prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{completion_index}"
195
+ prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.{completion_index}"
193
196
  content = msg.get("content")
194
197
 
195
198
  message_content = content[0].get("text").get("value")
@@ -200,12 +203,12 @@ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
200
203
  else:
201
204
  _set_span_attribute(
202
205
  span,
203
- f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role",
206
+ f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.role",
204
207
  message_role,
205
208
  )
206
209
  _set_span_attribute(
207
210
  span,
208
- f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
211
+ f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.content",
209
212
  message_content,
210
213
  )
211
214
  prompt_index += 1
@@ -229,12 +232,12 @@ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
229
232
  usage_dict = model_as_dict(run.get("usage"))
230
233
  _set_span_attribute(
231
234
  span,
232
- SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
235
+ GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS,
233
236
  usage_dict.get("completion_tokens"),
234
237
  )
235
238
  _set_span_attribute(
236
239
  span,
237
- SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
240
+ GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
238
241
  usage_dict.get("prompt_tokens"),
239
242
  )
240
243
 
@@ -270,16 +273,16 @@ def runs_create_and_stream_wrapper(tracer, wrapped, instance, args, kwargs):
270
273
  assistant = assistants[assistant_id]
271
274
 
272
275
  _set_span_attribute(
273
- span, SpanAttributes.LLM_REQUEST_MODEL, assistants[assistant_id]["model"]
276
+ span, GenAIAttributes.GEN_AI_REQUEST_MODEL, assistants[assistant_id]["model"]
274
277
  )
275
278
  _set_span_attribute(
276
279
  span,
277
- SpanAttributes.LLM_SYSTEM,
280
+ GenAIAttributes.GEN_AI_SYSTEM,
278
281
  "openai",
279
282
  )
280
283
  _set_span_attribute(
281
284
  span,
282
- SpanAttributes.LLM_RESPONSE_MODEL,
285
+ GenAIAttributes.GEN_AI_RESPONSE_MODEL,
283
286
  assistants[assistant_id]["model"],
284
287
  )
285
288
  if should_emit_events():
@@ -290,20 +293,20 @@ def runs_create_and_stream_wrapper(tracer, wrapped, instance, args, kwargs):
290
293
  )
291
294
  else:
292
295
  _set_span_attribute(
293
- span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system"
296
+ span, f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.role", "system"
294
297
  )
295
298
  _set_span_attribute(
296
299
  span,
297
- f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
300
+ f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.content",
298
301
  assistants[assistant_id]["instructions"],
299
302
  )
300
303
  i += 1
301
304
  if should_emit_events():
302
305
  emit_event(MessageEvent(content=instructions, role="system"))
303
306
  else:
304
- _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system")
307
+ _set_span_attribute(span, f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.role", "system")
305
308
  _set_span_attribute(
306
- span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", instructions
309
+ span, f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.content", instructions
307
310
  )
308
311
 
309
312
  from opentelemetry.instrumentation.openai.v1.event_handler_wrapper import (