opentelemetry-instrumentation-openai 0.18.2__py3-none-any.whl → 0.20.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

@@ -305,7 +305,7 @@ def _set_token_counter_metrics(token_counter, usage, shared_attributes):
305
305
  **shared_attributes,
306
306
  "llm.usage.token_type": name.split("_")[0],
307
307
  }
308
- token_counter.add(val, attributes=attributes_with_token_type)
308
+ token_counter.record(val, attributes=attributes_with_token_type)
309
309
 
310
310
 
311
311
  def _set_prompts(span, messages):
@@ -422,14 +422,14 @@ def _set_streaming_token_metrics(
422
422
  **shared_attributes,
423
423
  "llm.usage.token_type": "prompt",
424
424
  }
425
- token_counter.add(prompt_usage, attributes=attributes_with_token_type)
425
+ token_counter.record(prompt_usage, attributes=attributes_with_token_type)
426
426
 
427
427
  if type(completion_usage) is int and completion_usage >= 0:
428
428
  attributes_with_token_type = {
429
429
  **shared_attributes,
430
430
  "llm.usage.token_type": "completion",
431
431
  }
432
- token_counter.add(completion_usage, attributes=attributes_with_token_type)
432
+ token_counter.record(completion_usage, attributes=attributes_with_token_type)
433
433
 
434
434
 
435
435
  class ChatStream(ObjectProxy):
@@ -202,7 +202,7 @@ def _set_embeddings_metrics(
202
202
  **shared_attributes,
203
203
  "llm.usage.token_type": name.split("_")[0],
204
204
  }
205
- token_counter.add(val, attributes=attributes_with_token_type)
205
+ token_counter.record(val, attributes=attributes_with_token_type)
206
206
 
207
207
  # vec size metrics
208
208
  # should use counter for vector_size?
@@ -35,20 +35,20 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
35
35
  meter = get_meter(__name__, __version__, meter_provider)
36
36
 
37
37
  if is_metrics_enabled():
38
- chat_token_counter = meter.create_counter(
39
- name="llm.openai.chat_completions.tokens",
38
+ tokens_histogram = meter.create_histogram(
39
+ name="gen_ai.client.token.usage",
40
40
  unit="token",
41
41
  description="Number of tokens used in prompt and completions",
42
42
  )
43
43
 
44
44
  chat_choice_counter = meter.create_counter(
45
- name="llm.openai.chat_completions.choices",
45
+ name="gen_ai.client.generation.choices",
46
46
  unit="choice",
47
47
  description="Number of choices returned by chat completions call",
48
48
  )
49
49
 
50
50
  chat_duration_histogram = meter.create_histogram(
51
- name="llm.openai.chat_completions.duration",
51
+ name="gen_ai.client.operation.duration",
52
52
  unit="s",
53
53
  description="Duration of chat completion operation",
54
54
  )
@@ -71,7 +71,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
71
71
  )
72
72
  else:
73
73
  (
74
- chat_token_counter,
74
+ tokens_histogram,
75
75
  chat_choice_counter,
76
76
  chat_duration_histogram,
77
77
  chat_exception_counter,
@@ -80,12 +80,6 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
80
80
  ) = (None, None, None, None, None, None)
81
81
 
82
82
  if is_metrics_enabled():
83
- embeddings_token_counter = meter.create_counter(
84
- name="llm.openai.embeddings.tokens",
85
- unit="token",
86
- description="Number of tokens used in prompt and completions",
87
- )
88
-
89
83
  embeddings_vector_size_counter = meter.create_counter(
90
84
  name="llm.openai.embeddings.vector_size",
91
85
  unit="element",
@@ -105,7 +99,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
105
99
  )
106
100
  else:
107
101
  (
108
- embeddings_token_counter,
102
+ tokens_histogram,
109
103
  embeddings_vector_size_counter,
110
104
  embeddings_duration_histogram,
111
105
  embeddings_exception_counter,
@@ -120,7 +114,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
120
114
  "ChatCompletion.create",
121
115
  chat_wrapper(
122
116
  tracer,
123
- chat_token_counter,
117
+ tokens_histogram,
124
118
  chat_choice_counter,
125
119
  chat_duration_histogram,
126
120
  chat_exception_counter,
@@ -133,7 +127,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
133
127
  "ChatCompletion.acreate",
134
128
  achat_wrapper(
135
129
  tracer,
136
- chat_token_counter,
130
+ tokens_histogram,
137
131
  chat_choice_counter,
138
132
  chat_duration_histogram,
139
133
  chat_exception_counter,
@@ -146,7 +140,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
146
140
  "Embedding.create",
147
141
  embeddings_wrapper(
148
142
  tracer,
149
- embeddings_token_counter,
143
+ tokens_histogram,
150
144
  embeddings_vector_size_counter,
151
145
  embeddings_duration_histogram,
152
146
  embeddings_exception_counter,
@@ -157,7 +151,7 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
157
151
  "Embedding.acreate",
158
152
  aembeddings_wrapper(
159
153
  tracer,
160
- embeddings_token_counter,
154
+ tokens_histogram,
161
155
  embeddings_vector_size_counter,
162
156
  embeddings_duration_histogram,
163
157
  embeddings_exception_counter,
@@ -49,20 +49,20 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
49
49
  meter = get_meter(__name__, __version__, meter_provider)
50
50
 
51
51
  if is_metrics_enabled():
52
- chat_token_counter = meter.create_counter(
53
- name="llm.openai.chat_completions.tokens",
52
+ tokens_histogram = meter.create_histogram(
53
+ name="gen_ai.client.token.usage",
54
54
  unit="token",
55
55
  description="Number of tokens used in prompt and completions",
56
56
  )
57
57
 
58
58
  chat_choice_counter = meter.create_counter(
59
- name="llm.openai.chat_completions.choices",
59
+ name="gen_ai.client.generation.choices",
60
60
  unit="choice",
61
61
  description="Number of choices returned by chat completions call",
62
62
  )
63
63
 
64
64
  chat_duration_histogram = meter.create_histogram(
65
- name="llm.openai.chat_completions.duration",
65
+ name="gen_ai.client.operation.duration",
66
66
  unit="s",
67
67
  description="Duration of chat completion operation",
68
68
  )
@@ -85,7 +85,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
85
85
  )
86
86
  else:
87
87
  (
88
- chat_token_counter,
88
+ tokens_histogram,
89
89
  chat_choice_counter,
90
90
  chat_duration_histogram,
91
91
  chat_exception_counter,
@@ -98,7 +98,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
98
98
  "Completions.create",
99
99
  chat_wrapper(
100
100
  tracer,
101
- chat_token_counter,
101
+ tokens_histogram,
102
102
  chat_choice_counter,
103
103
  chat_duration_histogram,
104
104
  chat_exception_counter,
@@ -114,12 +114,6 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
114
114
  )
115
115
 
116
116
  if is_metrics_enabled():
117
- embeddings_token_counter = meter.create_counter(
118
- name="llm.openai.embeddings.tokens",
119
- unit="token",
120
- description="Number of tokens used in prompt and completions",
121
- )
122
-
123
117
  embeddings_vector_size_counter = meter.create_counter(
124
118
  name="llm.openai.embeddings.vector_size",
125
119
  unit="element",
@@ -139,7 +133,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
139
133
  )
140
134
  else:
141
135
  (
142
- embeddings_token_counter,
136
+ tokens_histogram,
143
137
  embeddings_vector_size_counter,
144
138
  embeddings_duration_histogram,
145
139
  embeddings_exception_counter,
@@ -150,7 +144,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
150
144
  "Embeddings.create",
151
145
  embeddings_wrapper(
152
146
  tracer,
153
- embeddings_token_counter,
147
+ tokens_histogram,
154
148
  embeddings_vector_size_counter,
155
149
  embeddings_duration_histogram,
156
150
  embeddings_exception_counter,
@@ -162,7 +156,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
162
156
  "AsyncCompletions.create",
163
157
  achat_wrapper(
164
158
  tracer,
165
- chat_token_counter,
159
+ tokens_histogram,
166
160
  chat_choice_counter,
167
161
  chat_duration_histogram,
168
162
  chat_exception_counter,
@@ -180,7 +174,7 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
180
174
  "AsyncEmbeddings.create",
181
175
  aembeddings_wrapper(
182
176
  tracer,
183
- embeddings_token_counter,
177
+ tokens_histogram,
184
178
  embeddings_vector_size_counter,
185
179
  embeddings_duration_histogram,
186
180
  embeddings_exception_counter,
@@ -1 +1 @@
1
- __version__ = "0.18.2"
1
+ __version__ = "0.20.0"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opentelemetry-instrumentation-openai
3
- Version: 0.18.2
3
+ Version: 0.20.0
4
4
  Summary: OpenTelemetry OpenAI instrumentation
5
5
  Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-openai
6
6
  License: Apache-2.0
@@ -1,17 +1,17 @@
1
1
  opentelemetry/instrumentation/openai/__init__.py,sha256=xl3Kvqry9glVhu8VtdknfUE9FpXQ7KWAFqtVlpjE-40,1344
2
2
  opentelemetry/instrumentation/openai/shared/__init__.py,sha256=5kEyVhz2YDHvuq2SDQOsDhtjbG7R7GCn793oLq2_J_k,7490
3
- opentelemetry/instrumentation/openai/shared/chat_wrappers.py,sha256=6gJUpjp1tq3eYviTVTRNq5RcF4yduhI6yAKJBSEGToI,23344
3
+ opentelemetry/instrumentation/openai/shared/chat_wrappers.py,sha256=ppjZgX8k5iORHUlCSyGPxDAYpXKOqs9KiCUSB0Cgu4s,23353
4
4
  opentelemetry/instrumentation/openai/shared/completion_wrappers.py,sha256=-JHfgyxic5I3Wr3Uc_L-U7ztDVFcyovtF37tNLtaW3s,6604
5
5
  opentelemetry/instrumentation/openai/shared/config.py,sha256=5uekQEnmYo1o6tsTD2IGc-cVmHUo5KmUC4pOVdrFFNk,102
6
- opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py,sha256=6I6I98T1a5Np0ZjcGZQbId4ZyEmMI6o9wVm8qoRpO9o,6595
6
+ opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py,sha256=aL3uAkfscMcCqCjzY1lZUNtxkiIV2agg6B8wB5CO8mg,6598
7
7
  opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py,sha256=BuYVdxiI71ajx5WZ0B5OpgFzOWGGh-fV77pp3h3nrJA,1891
8
8
  opentelemetry/instrumentation/openai/utils.py,sha256=IM5l_MjM7XnO0e7tDGsPml-juQ9SI1QK200X0atiAyE,3357
9
- opentelemetry/instrumentation/openai/v0/__init__.py,sha256=ngmmYyfTwRQSjTZAvNpBIOHQ1BIUXeQnOQz126Iucp0,6116
10
- opentelemetry/instrumentation/openai/v1/__init__.py,sha256=6XHk11JhkpZixgMDsjb0b-efd8LlBTG0jjMCyf0fOSo,8652
9
+ opentelemetry/instrumentation/openai/v0/__init__.py,sha256=z8RkR1Zp2P_qdIIMCvI6HjD0CPpUJp-2MSGWYiShTIc,5832
10
+ opentelemetry/instrumentation/openai/v1/__init__.py,sha256=5apXSO-jvr5hjqdu4PzSI62R6br4U029c4n9RE5e5GY,8368
11
11
  opentelemetry/instrumentation/openai/v1/assistant_wrappers.py,sha256=T6Vtdp1fAZdcYjGiTMZwkn4F4DgsltD4p4xLEFW-GhI,5874
12
12
  opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py,sha256=SAzYoun2yyOloofyOWtxpm8E2M9TL3Nm8TgKdNyXHuY,2779
13
- opentelemetry/instrumentation/openai/version.py,sha256=GYySpgpz2Cs3F3nz_H9h8KIG60jkP6f1a--08qCTJCQ,23
14
- opentelemetry_instrumentation_openai-0.18.2.dist-info/METADATA,sha256=8_mfPUYiS7Wxf5kBberCnJ4KEm4VjYtk7ESz_tXiSZ0,2255
15
- opentelemetry_instrumentation_openai-0.18.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
16
- opentelemetry_instrumentation_openai-0.18.2.dist-info/entry_points.txt,sha256=vTBfiX5yXji5YHikuJHEOoBZ1TFdPQ1EI4ctd2pZSeE,93
17
- opentelemetry_instrumentation_openai-0.18.2.dist-info/RECORD,,
13
+ opentelemetry/instrumentation/openai/version.py,sha256=NGIecTe1EEM7UeBjKSJ4vCWuGDWF1ZX4PckW2Eguxps,23
14
+ opentelemetry_instrumentation_openai-0.20.0.dist-info/METADATA,sha256=JE1waIIWwwXU0Nti4BKyCBC6u2t6tniMTfTzQA50NYo,2255
15
+ opentelemetry_instrumentation_openai-0.20.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
16
+ opentelemetry_instrumentation_openai-0.20.0.dist-info/entry_points.txt,sha256=vTBfiX5yXji5YHikuJHEOoBZ1TFdPQ1EI4ctd2pZSeE,93
17
+ opentelemetry_instrumentation_openai-0.20.0.dist-info/RECORD,,