opentelemetry-instrumentation-openai 0.44.2__py3-none-any.whl → 0.45.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.
- opentelemetry/instrumentation/openai/shared/chat_wrappers.py +133 -128
- opentelemetry/instrumentation/openai/shared/completion_wrappers.py +39 -34
- opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +147 -139
- opentelemetry/instrumentation/openai/version.py +1 -1
- {opentelemetry_instrumentation_openai-0.44.2.dist-info → opentelemetry_instrumentation_openai-0.45.0.dist-info}/METADATA +1 -1
- {opentelemetry_instrumentation_openai-0.44.2.dist-info → opentelemetry_instrumentation_openai-0.45.0.dist-info}/RECORD +8 -8
- {opentelemetry_instrumentation_openai-0.44.2.dist-info → opentelemetry_instrumentation_openai-0.45.0.dist-info}/WHEEL +0 -0
- {opentelemetry_instrumentation_openai-0.44.2.dist-info → opentelemetry_instrumentation_openai-0.45.0.dist-info}/entry_points.txt +0 -0
|
@@ -48,6 +48,7 @@ from opentelemetry.semconv_ai import (
|
|
|
48
48
|
SpanAttributes,
|
|
49
49
|
)
|
|
50
50
|
from opentelemetry.trace import SpanKind, Tracer
|
|
51
|
+
from opentelemetry import trace
|
|
51
52
|
from opentelemetry.trace.status import Status, StatusCode
|
|
52
53
|
from wrapt import ObjectProxy
|
|
53
54
|
|
|
@@ -86,75 +87,77 @@ def chat_wrapper(
|
|
|
86
87
|
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
|
|
87
88
|
)
|
|
88
89
|
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
duration_histogram.record(duration, attributes=attributes)
|
|
104
|
-
if exception_counter:
|
|
105
|
-
exception_counter.add(1, attributes=attributes)
|
|
106
|
-
|
|
107
|
-
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
|
108
|
-
span.record_exception(e)
|
|
109
|
-
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
110
|
-
span.end()
|
|
90
|
+
# Use the span as current context to ensure events get proper trace context
|
|
91
|
+
with trace.use_span(span, end_on_exit=False):
|
|
92
|
+
run_async(_handle_request(span, kwargs, instance))
|
|
93
|
+
try:
|
|
94
|
+
start_time = time.time()
|
|
95
|
+
response = wrapped(*args, **kwargs)
|
|
96
|
+
end_time = time.time()
|
|
97
|
+
except Exception as e: # pylint: disable=broad-except
|
|
98
|
+
end_time = time.time()
|
|
99
|
+
duration = end_time - start_time if "start_time" in locals() else 0
|
|
100
|
+
|
|
101
|
+
attributes = {
|
|
102
|
+
"error.type": e.__class__.__name__,
|
|
103
|
+
}
|
|
111
104
|
|
|
112
|
-
|
|
105
|
+
if duration > 0 and duration_histogram:
|
|
106
|
+
duration_histogram.record(duration, attributes=attributes)
|
|
107
|
+
if exception_counter:
|
|
108
|
+
exception_counter.add(1, attributes=attributes)
|
|
113
109
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
span,
|
|
119
|
-
response,
|
|
120
|
-
instance,
|
|
121
|
-
token_counter,
|
|
122
|
-
choice_counter,
|
|
123
|
-
duration_histogram,
|
|
124
|
-
streaming_time_to_first_token,
|
|
125
|
-
streaming_time_to_generate,
|
|
126
|
-
start_time,
|
|
127
|
-
kwargs,
|
|
128
|
-
)
|
|
129
|
-
else:
|
|
130
|
-
return _build_from_streaming_response(
|
|
131
|
-
span,
|
|
132
|
-
response,
|
|
133
|
-
instance,
|
|
134
|
-
token_counter,
|
|
135
|
-
choice_counter,
|
|
136
|
-
duration_histogram,
|
|
137
|
-
streaming_time_to_first_token,
|
|
138
|
-
streaming_time_to_generate,
|
|
139
|
-
start_time,
|
|
140
|
-
kwargs,
|
|
141
|
-
)
|
|
110
|
+
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
|
111
|
+
span.record_exception(e)
|
|
112
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
113
|
+
span.end()
|
|
142
114
|
|
|
143
|
-
|
|
115
|
+
raise
|
|
144
116
|
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
117
|
+
if is_streaming_response(response):
|
|
118
|
+
# span will be closed after the generator is done
|
|
119
|
+
if is_openai_v1():
|
|
120
|
+
return ChatStream(
|
|
121
|
+
span,
|
|
122
|
+
response,
|
|
123
|
+
instance,
|
|
124
|
+
token_counter,
|
|
125
|
+
choice_counter,
|
|
126
|
+
duration_histogram,
|
|
127
|
+
streaming_time_to_first_token,
|
|
128
|
+
streaming_time_to_generate,
|
|
129
|
+
start_time,
|
|
130
|
+
kwargs,
|
|
131
|
+
)
|
|
132
|
+
else:
|
|
133
|
+
return _build_from_streaming_response(
|
|
134
|
+
span,
|
|
135
|
+
response,
|
|
136
|
+
instance,
|
|
137
|
+
token_counter,
|
|
138
|
+
choice_counter,
|
|
139
|
+
duration_histogram,
|
|
140
|
+
streaming_time_to_first_token,
|
|
141
|
+
streaming_time_to_generate,
|
|
142
|
+
start_time,
|
|
143
|
+
kwargs,
|
|
144
|
+
)
|
|
154
145
|
|
|
155
|
-
|
|
146
|
+
duration = end_time - start_time
|
|
156
147
|
|
|
157
|
-
|
|
148
|
+
_handle_response(
|
|
149
|
+
response,
|
|
150
|
+
span,
|
|
151
|
+
instance,
|
|
152
|
+
token_counter,
|
|
153
|
+
choice_counter,
|
|
154
|
+
duration_histogram,
|
|
155
|
+
duration,
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
span.end()
|
|
159
|
+
|
|
160
|
+
return response
|
|
158
161
|
|
|
159
162
|
|
|
160
163
|
@_with_chat_telemetry_wrapper
|
|
@@ -182,78 +185,80 @@ async def achat_wrapper(
|
|
|
182
185
|
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
|
|
183
186
|
)
|
|
184
187
|
|
|
185
|
-
|
|
188
|
+
# Use the span as current context to ensure events get proper trace context
|
|
189
|
+
with trace.use_span(span, end_on_exit=False):
|
|
190
|
+
await _handle_request(span, kwargs, instance)
|
|
186
191
|
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
if duration > 0 and duration_histogram:
|
|
202
|
-
duration_histogram.record(duration, attributes=attributes)
|
|
203
|
-
if exception_counter:
|
|
204
|
-
exception_counter.add(1, attributes=attributes)
|
|
205
|
-
|
|
206
|
-
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
|
207
|
-
span.record_exception(e)
|
|
208
|
-
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
209
|
-
span.end()
|
|
192
|
+
try:
|
|
193
|
+
start_time = time.time()
|
|
194
|
+
response = await wrapped(*args, **kwargs)
|
|
195
|
+
end_time = time.time()
|
|
196
|
+
except Exception as e: # pylint: disable=broad-except
|
|
197
|
+
end_time = time.time()
|
|
198
|
+
duration = end_time - start_time if "start_time" in locals() else 0
|
|
199
|
+
|
|
200
|
+
common_attributes = Config.get_common_metrics_attributes()
|
|
201
|
+
attributes = {
|
|
202
|
+
**common_attributes,
|
|
203
|
+
"error.type": e.__class__.__name__,
|
|
204
|
+
}
|
|
210
205
|
|
|
211
|
-
|
|
206
|
+
if duration > 0 and duration_histogram:
|
|
207
|
+
duration_histogram.record(duration, attributes=attributes)
|
|
208
|
+
if exception_counter:
|
|
209
|
+
exception_counter.add(1, attributes=attributes)
|
|
212
210
|
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
span,
|
|
218
|
-
response,
|
|
219
|
-
instance,
|
|
220
|
-
token_counter,
|
|
221
|
-
choice_counter,
|
|
222
|
-
duration_histogram,
|
|
223
|
-
streaming_time_to_first_token,
|
|
224
|
-
streaming_time_to_generate,
|
|
225
|
-
start_time,
|
|
226
|
-
kwargs,
|
|
227
|
-
)
|
|
228
|
-
else:
|
|
229
|
-
return _abuild_from_streaming_response(
|
|
230
|
-
span,
|
|
231
|
-
response,
|
|
232
|
-
instance,
|
|
233
|
-
token_counter,
|
|
234
|
-
choice_counter,
|
|
235
|
-
duration_histogram,
|
|
236
|
-
streaming_time_to_first_token,
|
|
237
|
-
streaming_time_to_generate,
|
|
238
|
-
start_time,
|
|
239
|
-
kwargs,
|
|
240
|
-
)
|
|
211
|
+
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
|
212
|
+
span.record_exception(e)
|
|
213
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
214
|
+
span.end()
|
|
241
215
|
|
|
242
|
-
|
|
216
|
+
raise
|
|
243
217
|
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
218
|
+
if is_streaming_response(response):
|
|
219
|
+
# span will be closed after the generator is done
|
|
220
|
+
if is_openai_v1():
|
|
221
|
+
return ChatStream(
|
|
222
|
+
span,
|
|
223
|
+
response,
|
|
224
|
+
instance,
|
|
225
|
+
token_counter,
|
|
226
|
+
choice_counter,
|
|
227
|
+
duration_histogram,
|
|
228
|
+
streaming_time_to_first_token,
|
|
229
|
+
streaming_time_to_generate,
|
|
230
|
+
start_time,
|
|
231
|
+
kwargs,
|
|
232
|
+
)
|
|
233
|
+
else:
|
|
234
|
+
return _abuild_from_streaming_response(
|
|
235
|
+
span,
|
|
236
|
+
response,
|
|
237
|
+
instance,
|
|
238
|
+
token_counter,
|
|
239
|
+
choice_counter,
|
|
240
|
+
duration_histogram,
|
|
241
|
+
streaming_time_to_first_token,
|
|
242
|
+
streaming_time_to_generate,
|
|
243
|
+
start_time,
|
|
244
|
+
kwargs,
|
|
245
|
+
)
|
|
253
246
|
|
|
254
|
-
|
|
247
|
+
duration = end_time - start_time
|
|
255
248
|
|
|
256
|
-
|
|
249
|
+
_handle_response(
|
|
250
|
+
response,
|
|
251
|
+
span,
|
|
252
|
+
instance,
|
|
253
|
+
token_counter,
|
|
254
|
+
choice_counter,
|
|
255
|
+
duration_histogram,
|
|
256
|
+
duration,
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
span.end()
|
|
260
|
+
|
|
261
|
+
return response
|
|
257
262
|
|
|
258
263
|
|
|
259
264
|
@dont_throw
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
|
|
3
3
|
from opentelemetry import context as context_api
|
|
4
|
+
from opentelemetry import trace
|
|
4
5
|
from opentelemetry.instrumentation.openai.shared import (
|
|
5
6
|
_set_client_attributes,
|
|
6
7
|
_set_functions_attributes,
|
|
@@ -55,25 +56,27 @@ def completion_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
|
55
56
|
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
|
|
56
57
|
)
|
|
57
58
|
|
|
58
|
-
|
|
59
|
+
# Use the span as current context to ensure events get proper trace context
|
|
60
|
+
with trace.use_span(span, end_on_exit=False):
|
|
61
|
+
_handle_request(span, kwargs, instance)
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
response = wrapped(*args, **kwargs)
|
|
65
|
+
except Exception as e:
|
|
66
|
+
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
|
67
|
+
span.record_exception(e)
|
|
68
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
69
|
+
span.end()
|
|
70
|
+
raise
|
|
71
|
+
|
|
72
|
+
if is_streaming_response(response):
|
|
73
|
+
# span will be closed after the generator is done
|
|
74
|
+
return _build_from_streaming_response(span, kwargs, response)
|
|
75
|
+
else:
|
|
76
|
+
_handle_response(response, span, instance)
|
|
59
77
|
|
|
60
|
-
try:
|
|
61
|
-
response = wrapped(*args, **kwargs)
|
|
62
|
-
except Exception as e:
|
|
63
|
-
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
|
64
|
-
span.record_exception(e)
|
|
65
|
-
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
66
78
|
span.end()
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
if is_streaming_response(response):
|
|
70
|
-
# span will be closed after the generator is done
|
|
71
|
-
return _build_from_streaming_response(span, kwargs, response)
|
|
72
|
-
else:
|
|
73
|
-
_handle_response(response, span, instance)
|
|
74
|
-
|
|
75
|
-
span.end()
|
|
76
|
-
return response
|
|
79
|
+
return response
|
|
77
80
|
|
|
78
81
|
|
|
79
82
|
@_with_tracer_wrapper
|
|
@@ -89,25 +92,27 @@ async def acompletion_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
|
89
92
|
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
|
|
90
93
|
)
|
|
91
94
|
|
|
92
|
-
|
|
95
|
+
# Use the span as current context to ensure events get proper trace context
|
|
96
|
+
with trace.use_span(span, end_on_exit=False):
|
|
97
|
+
_handle_request(span, kwargs, instance)
|
|
98
|
+
|
|
99
|
+
try:
|
|
100
|
+
response = await wrapped(*args, **kwargs)
|
|
101
|
+
except Exception as e:
|
|
102
|
+
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
|
103
|
+
span.record_exception(e)
|
|
104
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
105
|
+
span.end()
|
|
106
|
+
raise
|
|
107
|
+
|
|
108
|
+
if is_streaming_response(response):
|
|
109
|
+
# span will be closed after the generator is done
|
|
110
|
+
return _abuild_from_streaming_response(span, kwargs, response)
|
|
111
|
+
else:
|
|
112
|
+
_handle_response(response, span, instance)
|
|
93
113
|
|
|
94
|
-
try:
|
|
95
|
-
response = await wrapped(*args, **kwargs)
|
|
96
|
-
except Exception as e:
|
|
97
|
-
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
|
98
|
-
span.record_exception(e)
|
|
99
|
-
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
100
114
|
span.end()
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
if is_streaming_response(response):
|
|
104
|
-
# span will be closed after the generator is done
|
|
105
|
-
return _abuild_from_streaming_response(span, kwargs, response)
|
|
106
|
-
else:
|
|
107
|
-
_handle_response(response, span, instance)
|
|
108
|
-
|
|
109
|
-
span.end()
|
|
110
|
-
return response
|
|
115
|
+
return response
|
|
111
116
|
|
|
112
117
|
|
|
113
118
|
@dont_throw
|
|
@@ -2,6 +2,7 @@ import logging
|
|
|
2
2
|
import time
|
|
3
3
|
|
|
4
4
|
from opentelemetry import context as context_api
|
|
5
|
+
from opentelemetry import trace
|
|
5
6
|
from opentelemetry.instrumentation.openai.shared import (
|
|
6
7
|
_set_span_attribute,
|
|
7
8
|
model_as_dict,
|
|
@@ -127,110 +128,115 @@ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
|
127
128
|
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
|
|
128
129
|
start_time=run.get("start_time"),
|
|
129
130
|
)
|
|
130
|
-
if exception := run.get("exception"):
|
|
131
|
-
span.set_attribute(ERROR_TYPE, exception.__class__.__name__)
|
|
132
|
-
span.record_exception(exception)
|
|
133
|
-
span.set_status(Status(StatusCode.ERROR, str(exception)))
|
|
134
|
-
span.end(run.get("end_time"))
|
|
135
|
-
|
|
136
|
-
prompt_index = 0
|
|
137
|
-
if assistants.get(run["assistant_id"]) is not None or Config.enrich_assistant:
|
|
138
|
-
if Config.enrich_assistant:
|
|
139
|
-
assistant = model_as_dict(
|
|
140
|
-
instance._client.beta.assistants.retrieve(run["assistant_id"])
|
|
141
|
-
)
|
|
142
|
-
assistants[run["assistant_id"]] = assistant
|
|
143
|
-
else:
|
|
144
|
-
assistant = assistants[run["assistant_id"]]
|
|
145
131
|
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
span
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
132
|
+
# Use the span as current context to ensure events get proper trace context
|
|
133
|
+
with trace.use_span(span, end_on_exit=False):
|
|
134
|
+
if exception := run.get("exception"):
|
|
135
|
+
span.set_attribute(ERROR_TYPE, exception.__class__.__name__)
|
|
136
|
+
span.record_exception(exception)
|
|
137
|
+
span.set_status(Status(StatusCode.ERROR, str(exception)))
|
|
138
|
+
span.end()
|
|
139
|
+
return response
|
|
140
|
+
|
|
141
|
+
prompt_index = 0
|
|
142
|
+
if assistants.get(run["assistant_id"]) is not None or Config.enrich_assistant:
|
|
143
|
+
if Config.enrich_assistant:
|
|
144
|
+
assistant = model_as_dict(
|
|
145
|
+
instance._client.beta.assistants.retrieve(run["assistant_id"])
|
|
146
|
+
)
|
|
147
|
+
assistants[run["assistant_id"]] = assistant
|
|
148
|
+
else:
|
|
149
|
+
assistant = assistants[run["assistant_id"]]
|
|
150
|
+
|
|
164
151
|
_set_span_attribute(
|
|
165
|
-
span,
|
|
152
|
+
span,
|
|
153
|
+
SpanAttributes.LLM_SYSTEM,
|
|
154
|
+
"openai",
|
|
166
155
|
)
|
|
167
156
|
_set_span_attribute(
|
|
168
157
|
span,
|
|
169
|
-
|
|
170
|
-
assistant["
|
|
158
|
+
SpanAttributes.LLM_REQUEST_MODEL,
|
|
159
|
+
assistant["model"],
|
|
160
|
+
)
|
|
161
|
+
_set_span_attribute(
|
|
162
|
+
span,
|
|
163
|
+
SpanAttributes.LLM_RESPONSE_MODEL,
|
|
164
|
+
assistant["model"],
|
|
171
165
|
)
|
|
172
|
-
prompt_index += 1
|
|
173
|
-
_set_span_attribute(
|
|
174
|
-
span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system"
|
|
175
|
-
)
|
|
176
|
-
_set_span_attribute(
|
|
177
|
-
span,
|
|
178
|
-
f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
|
|
179
|
-
run["instructions"],
|
|
180
|
-
)
|
|
181
|
-
emit_event(MessageEvent(content=run["instructions"], role="system"))
|
|
182
|
-
prompt_index += 1
|
|
183
|
-
|
|
184
|
-
completion_index = 0
|
|
185
|
-
for msg in messages:
|
|
186
|
-
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{completion_index}"
|
|
187
|
-
content = msg.get("content")
|
|
188
|
-
|
|
189
|
-
message_content = content[0].get("text").get("value")
|
|
190
|
-
message_role = msg.get("role")
|
|
191
|
-
if message_role in ["user", "system"]:
|
|
192
166
|
if should_emit_events():
|
|
193
|
-
emit_event(MessageEvent(content=
|
|
167
|
+
emit_event(MessageEvent(content=assistant["instructions"], role="system"))
|
|
194
168
|
else:
|
|
195
169
|
_set_span_attribute(
|
|
196
|
-
span,
|
|
197
|
-
f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role",
|
|
198
|
-
message_role,
|
|
170
|
+
span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system"
|
|
199
171
|
)
|
|
200
172
|
_set_span_attribute(
|
|
201
173
|
span,
|
|
202
174
|
f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
|
|
203
|
-
|
|
175
|
+
assistant["instructions"],
|
|
204
176
|
)
|
|
205
177
|
prompt_index += 1
|
|
206
|
-
else:
|
|
207
|
-
if should_emit_events():
|
|
208
|
-
emit_event(
|
|
209
|
-
ChoiceEvent(
|
|
210
|
-
index=completion_index,
|
|
211
|
-
message={"content": message_content, "role": message_role},
|
|
212
|
-
)
|
|
213
|
-
)
|
|
214
|
-
else:
|
|
215
|
-
_set_span_attribute(span, f"{prefix}.role", msg.get("role"))
|
|
216
|
-
_set_span_attribute(span, f"{prefix}.content", message_content)
|
|
217
|
-
_set_span_attribute(
|
|
218
|
-
span, f"gen_ai.response.{completion_index}.id", msg.get("id")
|
|
219
|
-
)
|
|
220
|
-
completion_index += 1
|
|
221
|
-
|
|
222
|
-
if run.get("usage"):
|
|
223
|
-
usage_dict = model_as_dict(run.get("usage"))
|
|
224
178
|
_set_span_attribute(
|
|
225
|
-
span,
|
|
226
|
-
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
|
|
227
|
-
usage_dict.get("completion_tokens"),
|
|
179
|
+
span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system"
|
|
228
180
|
)
|
|
229
181
|
_set_span_attribute(
|
|
230
182
|
span,
|
|
231
|
-
SpanAttributes.
|
|
232
|
-
|
|
183
|
+
f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
|
|
184
|
+
run["instructions"],
|
|
233
185
|
)
|
|
186
|
+
if should_emit_events():
|
|
187
|
+
emit_event(MessageEvent(content=run["instructions"], role="system"))
|
|
188
|
+
prompt_index += 1
|
|
189
|
+
|
|
190
|
+
completion_index = 0
|
|
191
|
+
for msg in messages:
|
|
192
|
+
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{completion_index}"
|
|
193
|
+
content = msg.get("content")
|
|
194
|
+
|
|
195
|
+
message_content = content[0].get("text").get("value")
|
|
196
|
+
message_role = msg.get("role")
|
|
197
|
+
if message_role in ["user", "system"]:
|
|
198
|
+
if should_emit_events():
|
|
199
|
+
emit_event(MessageEvent(content=message_content, role=message_role))
|
|
200
|
+
else:
|
|
201
|
+
_set_span_attribute(
|
|
202
|
+
span,
|
|
203
|
+
f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role",
|
|
204
|
+
message_role,
|
|
205
|
+
)
|
|
206
|
+
_set_span_attribute(
|
|
207
|
+
span,
|
|
208
|
+
f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
|
|
209
|
+
message_content,
|
|
210
|
+
)
|
|
211
|
+
prompt_index += 1
|
|
212
|
+
else:
|
|
213
|
+
if should_emit_events():
|
|
214
|
+
emit_event(
|
|
215
|
+
ChoiceEvent(
|
|
216
|
+
index=completion_index,
|
|
217
|
+
message={"content": message_content, "role": message_role},
|
|
218
|
+
)
|
|
219
|
+
)
|
|
220
|
+
else:
|
|
221
|
+
_set_span_attribute(span, f"{prefix}.role", msg.get("role"))
|
|
222
|
+
_set_span_attribute(span, f"{prefix}.content", message_content)
|
|
223
|
+
_set_span_attribute(
|
|
224
|
+
span, f"gen_ai.response.{completion_index}.id", msg.get("id")
|
|
225
|
+
)
|
|
226
|
+
completion_index += 1
|
|
227
|
+
|
|
228
|
+
if run.get("usage"):
|
|
229
|
+
usage_dict = model_as_dict(run.get("usage"))
|
|
230
|
+
_set_span_attribute(
|
|
231
|
+
span,
|
|
232
|
+
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
|
|
233
|
+
usage_dict.get("completion_tokens"),
|
|
234
|
+
)
|
|
235
|
+
_set_span_attribute(
|
|
236
|
+
span,
|
|
237
|
+
SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
|
|
238
|
+
usage_dict.get("prompt_tokens"),
|
|
239
|
+
)
|
|
234
240
|
|
|
235
241
|
span.end(run.get("end_time"))
|
|
236
242
|
|
|
@@ -251,68 +257,70 @@ def runs_create_and_stream_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
|
251
257
|
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
|
|
252
258
|
)
|
|
253
259
|
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
else:
|
|
262
|
-
assistant = assistants[assistant_id]
|
|
263
|
-
|
|
264
|
-
_set_span_attribute(
|
|
265
|
-
span, SpanAttributes.LLM_REQUEST_MODEL, assistants[assistant_id]["model"]
|
|
266
|
-
)
|
|
267
|
-
_set_span_attribute(
|
|
268
|
-
span,
|
|
269
|
-
SpanAttributes.LLM_SYSTEM,
|
|
270
|
-
"openai",
|
|
271
|
-
)
|
|
272
|
-
_set_span_attribute(
|
|
273
|
-
span,
|
|
274
|
-
SpanAttributes.LLM_RESPONSE_MODEL,
|
|
275
|
-
assistants[assistant_id]["model"],
|
|
276
|
-
)
|
|
277
|
-
if should_emit_events():
|
|
278
|
-
emit_event(
|
|
279
|
-
MessageEvent(
|
|
280
|
-
content=assistants[assistant_id]["instructions"], role="system"
|
|
260
|
+
# Use the span as current context to ensure events get proper trace context
|
|
261
|
+
with trace.use_span(span, end_on_exit=False):
|
|
262
|
+
i = 0
|
|
263
|
+
if assistants.get(assistant_id) is not None or Config.enrich_assistant:
|
|
264
|
+
if Config.enrich_assistant:
|
|
265
|
+
assistant = model_as_dict(
|
|
266
|
+
instance._client.beta.assistants.retrieve(assistant_id)
|
|
281
267
|
)
|
|
268
|
+
assistants[assistant_id] = assistant
|
|
269
|
+
else:
|
|
270
|
+
assistant = assistants[assistant_id]
|
|
271
|
+
|
|
272
|
+
_set_span_attribute(
|
|
273
|
+
span, SpanAttributes.LLM_REQUEST_MODEL, assistants[assistant_id]["model"]
|
|
282
274
|
)
|
|
283
|
-
else:
|
|
284
275
|
_set_span_attribute(
|
|
285
|
-
span,
|
|
276
|
+
span,
|
|
277
|
+
SpanAttributes.LLM_SYSTEM,
|
|
278
|
+
"openai",
|
|
286
279
|
)
|
|
287
280
|
_set_span_attribute(
|
|
288
281
|
span,
|
|
289
|
-
|
|
290
|
-
assistants[assistant_id]["
|
|
282
|
+
SpanAttributes.LLM_RESPONSE_MODEL,
|
|
283
|
+
assistants[assistant_id]["model"],
|
|
284
|
+
)
|
|
285
|
+
if should_emit_events():
|
|
286
|
+
emit_event(
|
|
287
|
+
MessageEvent(
|
|
288
|
+
content=assistants[assistant_id]["instructions"], role="system"
|
|
289
|
+
)
|
|
290
|
+
)
|
|
291
|
+
else:
|
|
292
|
+
_set_span_attribute(
|
|
293
|
+
span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system"
|
|
294
|
+
)
|
|
295
|
+
_set_span_attribute(
|
|
296
|
+
span,
|
|
297
|
+
f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
|
|
298
|
+
assistants[assistant_id]["instructions"],
|
|
299
|
+
)
|
|
300
|
+
i += 1
|
|
301
|
+
if should_emit_events():
|
|
302
|
+
emit_event(MessageEvent(content=instructions, role="system"))
|
|
303
|
+
else:
|
|
304
|
+
_set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system")
|
|
305
|
+
_set_span_attribute(
|
|
306
|
+
span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", instructions
|
|
291
307
|
)
|
|
292
|
-
i += 1
|
|
293
|
-
if should_emit_events():
|
|
294
|
-
emit_event(MessageEvent(content=instructions, role="system"))
|
|
295
|
-
else:
|
|
296
|
-
_set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system")
|
|
297
|
-
_set_span_attribute(
|
|
298
|
-
span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", instructions
|
|
299
|
-
)
|
|
300
308
|
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
309
|
+
from opentelemetry.instrumentation.openai.v1.event_handler_wrapper import (
|
|
310
|
+
EventHandleWrapper,
|
|
311
|
+
)
|
|
304
312
|
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
313
|
+
kwargs["event_handler"] = EventHandleWrapper(
|
|
314
|
+
original_handler=kwargs["event_handler"],
|
|
315
|
+
span=span,
|
|
316
|
+
)
|
|
309
317
|
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
318
|
+
try:
|
|
319
|
+
response = wrapped(*args, **kwargs)
|
|
320
|
+
return response
|
|
321
|
+
except Exception as e:
|
|
322
|
+
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
|
323
|
+
span.record_exception(e)
|
|
324
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
325
|
+
span.end()
|
|
326
|
+
raise
|
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.
|
|
1
|
+
__version__ = "0.45.0"
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
opentelemetry/instrumentation/openai/__init__.py,sha256=Mx_nwMl0TlhUjrQOR4qdx6MEhBUKp5cuUIIXFzi3mXo,2093
|
|
2
2
|
opentelemetry/instrumentation/openai/shared/__init__.py,sha256=Ba429tv5NPuQN7RoLzaj00K9oj88BaUBdPmUUsZ-7ic,12346
|
|
3
|
-
opentelemetry/instrumentation/openai/shared/chat_wrappers.py,sha256=
|
|
4
|
-
opentelemetry/instrumentation/openai/shared/completion_wrappers.py,sha256=
|
|
3
|
+
opentelemetry/instrumentation/openai/shared/chat_wrappers.py,sha256=whdVqpTBFyWXITuY2pJYzVx1PF7kt5pQXvG7Z0nZV8Y,39325
|
|
4
|
+
opentelemetry/instrumentation/openai/shared/completion_wrappers.py,sha256=600McQXNCPFaifeD4yFq00beZ9XjGEbYT_3XVojHQT4,9244
|
|
5
5
|
opentelemetry/instrumentation/openai/shared/config.py,sha256=nQfVXiznVUIv2_BHSUQpaoCnxysG3XpaYpIZdxi0mxM,477
|
|
6
6
|
opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py,sha256=oRHghd4vSDJ6fNHjL9G8QfKnPnp_NkZfVmTDSgZScVU,9251
|
|
7
7
|
opentelemetry/instrumentation/openai/shared/event_emitter.py,sha256=iXUoyEHbC9DqwjnudkPOZlDBC4qLdEBmfsxB_q0nzbQ,3113
|
|
@@ -11,11 +11,11 @@ opentelemetry/instrumentation/openai/shared/span_utils.py,sha256=47DEQpj8HBSa-_T
|
|
|
11
11
|
opentelemetry/instrumentation/openai/utils.py,sha256=-0ugLRCR50v25KncuOq4tXHHPzdsH5PjS4Qd_8PP0TQ,4684
|
|
12
12
|
opentelemetry/instrumentation/openai/v0/__init__.py,sha256=FhpVbP8NqjN2We_srppZ_U-0-Vbk-A15VSQp3zUnW3k,6353
|
|
13
13
|
opentelemetry/instrumentation/openai/v1/__init__.py,sha256=oLst4xav77tTteZKXo59uyb-2IWqw_xOafaSMzTxq9g,13255
|
|
14
|
-
opentelemetry/instrumentation/openai/v1/assistant_wrappers.py,sha256=
|
|
14
|
+
opentelemetry/instrumentation/openai/v1/assistant_wrappers.py,sha256=oa5xYEDELFN9luvSn3y1xhSs37yRYY_Pwh6htqOs8gc,11297
|
|
15
15
|
opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py,sha256=AT-eDZOFP-K_mm-ecdgQaILoIsEiItZmtwzwAuse86Q,4350
|
|
16
16
|
opentelemetry/instrumentation/openai/v1/responses_wrappers.py,sha256=NSty_lrL5HJVt88d_keV-wQ17-4XGVzc9ukMLaITAug,24471
|
|
17
|
-
opentelemetry/instrumentation/openai/version.py,sha256=
|
|
18
|
-
opentelemetry_instrumentation_openai-0.
|
|
19
|
-
opentelemetry_instrumentation_openai-0.
|
|
20
|
-
opentelemetry_instrumentation_openai-0.
|
|
21
|
-
opentelemetry_instrumentation_openai-0.
|
|
17
|
+
opentelemetry/instrumentation/openai/version.py,sha256=p2Xvd610sh3godS2s3KpBCKAbfwwnEs-mH0Q6wQI5LE,23
|
|
18
|
+
opentelemetry_instrumentation_openai-0.45.0.dist-info/METADATA,sha256=EIpHvG3E1GkeBvLatzjUI92NmAirGvkILzWmzGi01-o,2150
|
|
19
|
+
opentelemetry_instrumentation_openai-0.45.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
20
|
+
opentelemetry_instrumentation_openai-0.45.0.dist-info/entry_points.txt,sha256=vTBfiX5yXji5YHikuJHEOoBZ1TFdPQ1EI4ctd2pZSeE,93
|
|
21
|
+
opentelemetry_instrumentation_openai-0.45.0.dist-info/RECORD,,
|
|
File without changes
|