paid-python 0.6.0__py3-none-any.whl → 1.0.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- paid/__init__.py +31 -0
- paid/client.py +1 -472
- paid/core/client_wrapper.py +3 -2
- paid/customers/__init__.py +3 -0
- paid/customers/client.py +428 -4
- paid/customers/raw_client.py +594 -2
- paid/customers/types/__init__.py +8 -0
- paid/customers/types/customers_check_entitlement_request_view.py +5 -0
- paid/customers/types/customers_check_entitlement_response.py +22 -0
- paid/orders/client.py +435 -0
- paid/orders/raw_client.py +695 -0
- paid/plans/client.py +71 -0
- paid/plans/raw_client.py +121 -2
- paid/types/__init__.py +28 -0
- paid/types/cancel_renewal_response.py +49 -0
- paid/types/contact_create_for_customer.py +37 -0
- paid/types/invoice.py +75 -0
- paid/types/invoice_status.py +5 -0
- paid/types/payment_method.py +58 -0
- paid/types/payment_method_card.py +49 -0
- paid/types/payment_method_type.py +5 -0
- paid/types/payment_method_us_bank_account.py +36 -0
- paid/types/payment_method_us_bank_account_account_type.py +5 -0
- paid/types/plan_plan_products_item.py +6 -0
- paid/types/plan_with_features.py +69 -0
- paid/types/plan_with_features_features_item.py +34 -0
- paid/types/proration_attribute_update.py +44 -0
- paid/types/proration_detail.py +49 -0
- paid/types/proration_upgrade_response.py +73 -0
- paid/types/signal_v_2.py +5 -5
- paid/usage/client.py +6 -6
- {paid_python-0.6.0.dist-info → paid_python-1.0.0a0.dist-info}/METADATA +6 -4
- {paid_python-0.6.0.dist-info → paid_python-1.0.0a0.dist-info}/RECORD +35 -36
- opentelemetry/instrumentation/openai/__init__.py +0 -54
- opentelemetry/instrumentation/openai/shared/__init__.py +0 -399
- opentelemetry/instrumentation/openai/shared/audio_wrappers.py +0 -247
- opentelemetry/instrumentation/openai/shared/chat_wrappers.py +0 -1192
- opentelemetry/instrumentation/openai/shared/completion_wrappers.py +0 -292
- opentelemetry/instrumentation/openai/shared/config.py +0 -15
- opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +0 -311
- opentelemetry/instrumentation/openai/shared/event_emitter.py +0 -108
- opentelemetry/instrumentation/openai/shared/event_models.py +0 -41
- opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +0 -68
- opentelemetry/instrumentation/openai/shared/span_utils.py +0 -0
- opentelemetry/instrumentation/openai/utils.py +0 -213
- opentelemetry/instrumentation/openai/v0/__init__.py +0 -176
- opentelemetry/instrumentation/openai/v1/__init__.py +0 -394
- opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -329
- opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -134
- opentelemetry/instrumentation/openai/v1/responses_wrappers.py +0 -1113
- opentelemetry/instrumentation/openai/version.py +0 -1
- {paid_python-0.6.0.dist-info → paid_python-1.0.0a0.dist-info}/LICENSE +0 -0
- {paid_python-0.6.0.dist-info → paid_python-1.0.0a0.dist-info}/WHEEL +0 -0
|
@@ -1,329 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
import time
|
|
3
|
-
|
|
4
|
-
from opentelemetry import context as context_api
|
|
5
|
-
from opentelemetry import trace
|
|
6
|
-
from opentelemetry.instrumentation.openai.shared import (
|
|
7
|
-
_set_span_attribute,
|
|
8
|
-
model_as_dict,
|
|
9
|
-
)
|
|
10
|
-
from opentelemetry.instrumentation.openai.shared.config import Config
|
|
11
|
-
from opentelemetry.instrumentation.openai.shared.event_emitter import emit_event
|
|
12
|
-
from opentelemetry.instrumentation.openai.shared.event_models import (
|
|
13
|
-
ChoiceEvent,
|
|
14
|
-
MessageEvent,
|
|
15
|
-
)
|
|
16
|
-
from opentelemetry.instrumentation.openai.utils import (
|
|
17
|
-
_with_tracer_wrapper,
|
|
18
|
-
dont_throw,
|
|
19
|
-
should_emit_events,
|
|
20
|
-
)
|
|
21
|
-
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
|
|
22
|
-
from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
|
|
23
|
-
from opentelemetry.semconv._incubating.attributes import (
|
|
24
|
-
gen_ai_attributes as GenAIAttributes,
|
|
25
|
-
)
|
|
26
|
-
from opentelemetry.semconv_ai import LLMRequestTypeValues, SpanAttributes
|
|
27
|
-
from opentelemetry.trace import SpanKind, Status, StatusCode
|
|
28
|
-
|
|
29
|
-
from openai._legacy_response import LegacyAPIResponse
|
|
30
|
-
from openai.types.beta.threads.run import Run
|
|
31
|
-
|
|
32
|
-
logger = logging.getLogger(__name__)
|
|
33
|
-
|
|
34
|
-
assistants = {}
|
|
35
|
-
runs = {}
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
@_with_tracer_wrapper
|
|
39
|
-
def assistants_create_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
40
|
-
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
41
|
-
return wrapped(*args, **kwargs)
|
|
42
|
-
|
|
43
|
-
response = wrapped(*args, **kwargs)
|
|
44
|
-
|
|
45
|
-
assistants[response.id] = {
|
|
46
|
-
"model": kwargs.get("model"),
|
|
47
|
-
"instructions": kwargs.get("instructions"),
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
return response
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
@_with_tracer_wrapper
|
|
54
|
-
def runs_create_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
55
|
-
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
56
|
-
return wrapped(*args, **kwargs)
|
|
57
|
-
|
|
58
|
-
thread_id = kwargs.get("thread_id")
|
|
59
|
-
instructions = kwargs.get("instructions")
|
|
60
|
-
|
|
61
|
-
try:
|
|
62
|
-
response = wrapped(*args, **kwargs)
|
|
63
|
-
response_dict = model_as_dict(response)
|
|
64
|
-
|
|
65
|
-
runs[thread_id] = {
|
|
66
|
-
"start_time": time.time_ns(),
|
|
67
|
-
"assistant_id": kwargs.get("assistant_id"),
|
|
68
|
-
"instructions": instructions,
|
|
69
|
-
"run_id": response_dict.get("id"),
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
return response
|
|
73
|
-
except Exception as e:
|
|
74
|
-
runs[thread_id] = {
|
|
75
|
-
"exception": e,
|
|
76
|
-
"end_time": time.time_ns(),
|
|
77
|
-
}
|
|
78
|
-
raise
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
@_with_tracer_wrapper
|
|
82
|
-
def runs_retrieve_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
83
|
-
@dont_throw
|
|
84
|
-
def process_response(response):
|
|
85
|
-
if type(response) is LegacyAPIResponse:
|
|
86
|
-
parsed_response = response.parse()
|
|
87
|
-
else:
|
|
88
|
-
parsed_response = response
|
|
89
|
-
assert type(parsed_response) is Run
|
|
90
|
-
|
|
91
|
-
if parsed_response.thread_id in runs:
|
|
92
|
-
thread_id = parsed_response.thread_id
|
|
93
|
-
runs[thread_id]["end_time"] = time.time_ns()
|
|
94
|
-
if parsed_response.usage:
|
|
95
|
-
runs[thread_id]["usage"] = parsed_response.usage
|
|
96
|
-
|
|
97
|
-
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
98
|
-
return wrapped(*args, **kwargs)
|
|
99
|
-
|
|
100
|
-
try:
|
|
101
|
-
response = wrapped(*args, **kwargs)
|
|
102
|
-
process_response(response)
|
|
103
|
-
return response
|
|
104
|
-
except Exception as e:
|
|
105
|
-
thread_id = kwargs.get("thread_id")
|
|
106
|
-
if thread_id in runs:
|
|
107
|
-
runs[thread_id]["exception"] = e
|
|
108
|
-
runs[thread_id]["end_time"] = time.time_ns()
|
|
109
|
-
raise
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
@_with_tracer_wrapper
|
|
113
|
-
def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
114
|
-
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
115
|
-
return wrapped(*args, **kwargs)
|
|
116
|
-
|
|
117
|
-
id = kwargs.get("thread_id")
|
|
118
|
-
|
|
119
|
-
response = wrapped(*args, **kwargs)
|
|
120
|
-
|
|
121
|
-
response_dict = model_as_dict(response)
|
|
122
|
-
if id not in runs:
|
|
123
|
-
return response
|
|
124
|
-
|
|
125
|
-
run = runs[id]
|
|
126
|
-
messages = sorted(response_dict["data"], key=lambda x: x["created_at"])
|
|
127
|
-
|
|
128
|
-
span = tracer.start_span(
|
|
129
|
-
"openai.assistant.run",
|
|
130
|
-
kind=SpanKind.CLIENT,
|
|
131
|
-
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
|
|
132
|
-
start_time=run.get("start_time"),
|
|
133
|
-
)
|
|
134
|
-
|
|
135
|
-
# Use the span as current context to ensure events get proper trace context
|
|
136
|
-
with trace.use_span(span, end_on_exit=False):
|
|
137
|
-
if exception := run.get("exception"):
|
|
138
|
-
span.set_attribute(ERROR_TYPE, exception.__class__.__name__)
|
|
139
|
-
span.record_exception(exception)
|
|
140
|
-
span.set_status(Status(StatusCode.ERROR, str(exception)))
|
|
141
|
-
span.end()
|
|
142
|
-
return response
|
|
143
|
-
|
|
144
|
-
prompt_index = 0
|
|
145
|
-
if assistants.get(run["assistant_id"]) is not None or Config.enrich_assistant:
|
|
146
|
-
if Config.enrich_assistant:
|
|
147
|
-
assistant = model_as_dict(
|
|
148
|
-
instance._client.beta.assistants.retrieve(run["assistant_id"])
|
|
149
|
-
)
|
|
150
|
-
assistants[run["assistant_id"]] = assistant
|
|
151
|
-
else:
|
|
152
|
-
assistant = assistants[run["assistant_id"]]
|
|
153
|
-
|
|
154
|
-
_set_span_attribute(
|
|
155
|
-
span,
|
|
156
|
-
GenAIAttributes.GEN_AI_SYSTEM,
|
|
157
|
-
"openai",
|
|
158
|
-
)
|
|
159
|
-
_set_span_attribute(
|
|
160
|
-
span,
|
|
161
|
-
GenAIAttributes.GEN_AI_REQUEST_MODEL,
|
|
162
|
-
assistant["model"],
|
|
163
|
-
)
|
|
164
|
-
_set_span_attribute(
|
|
165
|
-
span,
|
|
166
|
-
GenAIAttributes.GEN_AI_RESPONSE_MODEL,
|
|
167
|
-
assistant["model"],
|
|
168
|
-
)
|
|
169
|
-
if should_emit_events():
|
|
170
|
-
emit_event(MessageEvent(content=assistant["instructions"], role="system"))
|
|
171
|
-
else:
|
|
172
|
-
_set_span_attribute(
|
|
173
|
-
span, f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.role", "system"
|
|
174
|
-
)
|
|
175
|
-
_set_span_attribute(
|
|
176
|
-
span,
|
|
177
|
-
f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.content",
|
|
178
|
-
assistant["instructions"],
|
|
179
|
-
)
|
|
180
|
-
prompt_index += 1
|
|
181
|
-
_set_span_attribute(
|
|
182
|
-
span, f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.role", "system"
|
|
183
|
-
)
|
|
184
|
-
_set_span_attribute(
|
|
185
|
-
span,
|
|
186
|
-
f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.content",
|
|
187
|
-
run["instructions"],
|
|
188
|
-
)
|
|
189
|
-
if should_emit_events():
|
|
190
|
-
emit_event(MessageEvent(content=run["instructions"], role="system"))
|
|
191
|
-
prompt_index += 1
|
|
192
|
-
|
|
193
|
-
completion_index = 0
|
|
194
|
-
for msg in messages:
|
|
195
|
-
prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.{completion_index}"
|
|
196
|
-
content = msg.get("content")
|
|
197
|
-
|
|
198
|
-
message_content = content[0].get("text").get("value")
|
|
199
|
-
message_role = msg.get("role")
|
|
200
|
-
if message_role in ["user", "system"]:
|
|
201
|
-
if should_emit_events():
|
|
202
|
-
emit_event(MessageEvent(content=message_content, role=message_role))
|
|
203
|
-
else:
|
|
204
|
-
_set_span_attribute(
|
|
205
|
-
span,
|
|
206
|
-
f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.role",
|
|
207
|
-
message_role,
|
|
208
|
-
)
|
|
209
|
-
_set_span_attribute(
|
|
210
|
-
span,
|
|
211
|
-
f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.content",
|
|
212
|
-
message_content,
|
|
213
|
-
)
|
|
214
|
-
prompt_index += 1
|
|
215
|
-
else:
|
|
216
|
-
if should_emit_events():
|
|
217
|
-
emit_event(
|
|
218
|
-
ChoiceEvent(
|
|
219
|
-
index=completion_index,
|
|
220
|
-
message={"content": message_content, "role": message_role},
|
|
221
|
-
)
|
|
222
|
-
)
|
|
223
|
-
else:
|
|
224
|
-
_set_span_attribute(span, f"{prefix}.role", msg.get("role"))
|
|
225
|
-
_set_span_attribute(span, f"{prefix}.content", message_content)
|
|
226
|
-
_set_span_attribute(
|
|
227
|
-
span, f"gen_ai.response.{completion_index}.id", msg.get("id")
|
|
228
|
-
)
|
|
229
|
-
completion_index += 1
|
|
230
|
-
|
|
231
|
-
if run.get("usage"):
|
|
232
|
-
usage_dict = model_as_dict(run.get("usage"))
|
|
233
|
-
_set_span_attribute(
|
|
234
|
-
span,
|
|
235
|
-
GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS,
|
|
236
|
-
usage_dict.get("completion_tokens"),
|
|
237
|
-
)
|
|
238
|
-
_set_span_attribute(
|
|
239
|
-
span,
|
|
240
|
-
GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
|
|
241
|
-
usage_dict.get("prompt_tokens"),
|
|
242
|
-
)
|
|
243
|
-
|
|
244
|
-
span.end(run.get("end_time"))
|
|
245
|
-
|
|
246
|
-
return response
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
@_with_tracer_wrapper
|
|
250
|
-
def runs_create_and_stream_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
251
|
-
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
252
|
-
return wrapped(*args, **kwargs)
|
|
253
|
-
|
|
254
|
-
assistant_id = kwargs.get("assistant_id")
|
|
255
|
-
instructions = kwargs.get("instructions")
|
|
256
|
-
|
|
257
|
-
span = tracer.start_span(
|
|
258
|
-
"openai.assistant.run_stream",
|
|
259
|
-
kind=SpanKind.CLIENT,
|
|
260
|
-
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
|
|
261
|
-
)
|
|
262
|
-
|
|
263
|
-
# Use the span as current context to ensure events get proper trace context
|
|
264
|
-
with trace.use_span(span, end_on_exit=False):
|
|
265
|
-
i = 0
|
|
266
|
-
if assistants.get(assistant_id) is not None or Config.enrich_assistant:
|
|
267
|
-
if Config.enrich_assistant:
|
|
268
|
-
assistant = model_as_dict(
|
|
269
|
-
instance._client.beta.assistants.retrieve(assistant_id)
|
|
270
|
-
)
|
|
271
|
-
assistants[assistant_id] = assistant
|
|
272
|
-
else:
|
|
273
|
-
assistant = assistants[assistant_id]
|
|
274
|
-
|
|
275
|
-
_set_span_attribute(
|
|
276
|
-
span, GenAIAttributes.GEN_AI_REQUEST_MODEL, assistants[assistant_id]["model"]
|
|
277
|
-
)
|
|
278
|
-
_set_span_attribute(
|
|
279
|
-
span,
|
|
280
|
-
GenAIAttributes.GEN_AI_SYSTEM,
|
|
281
|
-
"openai",
|
|
282
|
-
)
|
|
283
|
-
_set_span_attribute(
|
|
284
|
-
span,
|
|
285
|
-
GenAIAttributes.GEN_AI_RESPONSE_MODEL,
|
|
286
|
-
assistants[assistant_id]["model"],
|
|
287
|
-
)
|
|
288
|
-
if should_emit_events():
|
|
289
|
-
emit_event(
|
|
290
|
-
MessageEvent(
|
|
291
|
-
content=assistants[assistant_id]["instructions"], role="system"
|
|
292
|
-
)
|
|
293
|
-
)
|
|
294
|
-
else:
|
|
295
|
-
_set_span_attribute(
|
|
296
|
-
span, f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.role", "system"
|
|
297
|
-
)
|
|
298
|
-
_set_span_attribute(
|
|
299
|
-
span,
|
|
300
|
-
f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.content",
|
|
301
|
-
assistants[assistant_id]["instructions"],
|
|
302
|
-
)
|
|
303
|
-
i += 1
|
|
304
|
-
if should_emit_events():
|
|
305
|
-
emit_event(MessageEvent(content=instructions, role="system"))
|
|
306
|
-
else:
|
|
307
|
-
_set_span_attribute(span, f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.role", "system")
|
|
308
|
-
_set_span_attribute(
|
|
309
|
-
span, f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.content", instructions
|
|
310
|
-
)
|
|
311
|
-
|
|
312
|
-
from opentelemetry.instrumentation.openai.v1.event_handler_wrapper import (
|
|
313
|
-
EventHandleWrapper,
|
|
314
|
-
)
|
|
315
|
-
|
|
316
|
-
kwargs["event_handler"] = EventHandleWrapper(
|
|
317
|
-
original_handler=kwargs["event_handler"],
|
|
318
|
-
span=span,
|
|
319
|
-
)
|
|
320
|
-
|
|
321
|
-
try:
|
|
322
|
-
response = wrapped(*args, **kwargs)
|
|
323
|
-
return response
|
|
324
|
-
except Exception as e:
|
|
325
|
-
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
|
326
|
-
span.record_exception(e)
|
|
327
|
-
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
328
|
-
span.end()
|
|
329
|
-
raise
|
|
@@ -1,134 +0,0 @@
|
|
|
1
|
-
from opentelemetry.instrumentation.openai.shared import _set_span_attribute
|
|
2
|
-
from opentelemetry.instrumentation.openai.shared.event_emitter import emit_event
|
|
3
|
-
from opentelemetry.instrumentation.openai.shared.event_models import ChoiceEvent
|
|
4
|
-
from opentelemetry.instrumentation.openai.utils import should_emit_events
|
|
5
|
-
from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
|
|
6
|
-
from opentelemetry.semconv._incubating.attributes import (
|
|
7
|
-
gen_ai_attributes as GenAIAttributes,
|
|
8
|
-
)
|
|
9
|
-
from opentelemetry.trace import Status, StatusCode
|
|
10
|
-
from typing_extensions import override
|
|
11
|
-
|
|
12
|
-
from openai import AssistantEventHandler
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class EventHandleWrapper(AssistantEventHandler):
|
|
16
|
-
_current_text_index = 0
|
|
17
|
-
_prompt_tokens = 0
|
|
18
|
-
_completion_tokens = 0
|
|
19
|
-
|
|
20
|
-
def __init__(self, original_handler, span):
|
|
21
|
-
super().__init__()
|
|
22
|
-
self._original_handler = original_handler
|
|
23
|
-
self._span = span
|
|
24
|
-
|
|
25
|
-
@override
|
|
26
|
-
def on_end(self):
|
|
27
|
-
_set_span_attribute(
|
|
28
|
-
self._span,
|
|
29
|
-
GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS,
|
|
30
|
-
self._prompt_tokens,
|
|
31
|
-
)
|
|
32
|
-
_set_span_attribute(
|
|
33
|
-
self._span,
|
|
34
|
-
GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
|
|
35
|
-
self._completion_tokens,
|
|
36
|
-
)
|
|
37
|
-
self._original_handler.on_end()
|
|
38
|
-
self._span.end()
|
|
39
|
-
|
|
40
|
-
@override
|
|
41
|
-
def on_event(self, event):
|
|
42
|
-
self._original_handler.on_event(event)
|
|
43
|
-
|
|
44
|
-
@override
|
|
45
|
-
def on_run_step_created(self, run_step):
|
|
46
|
-
self._original_handler.on_run_step_created(run_step)
|
|
47
|
-
|
|
48
|
-
@override
|
|
49
|
-
def on_run_step_delta(self, delta, snapshot):
|
|
50
|
-
self._original_handler.on_run_step_delta(delta, snapshot)
|
|
51
|
-
|
|
52
|
-
@override
|
|
53
|
-
def on_run_step_done(self, run_step):
|
|
54
|
-
if run_step.usage:
|
|
55
|
-
self._prompt_tokens += run_step.usage.prompt_tokens
|
|
56
|
-
self._completion_tokens += run_step.usage.completion_tokens
|
|
57
|
-
self._original_handler.on_run_step_done(run_step)
|
|
58
|
-
|
|
59
|
-
@override
|
|
60
|
-
def on_tool_call_created(self, tool_call):
|
|
61
|
-
self._original_handler.on_tool_call_created(tool_call)
|
|
62
|
-
|
|
63
|
-
@override
|
|
64
|
-
def on_tool_call_delta(self, delta, snapshot):
|
|
65
|
-
self._original_handler.on_tool_call_delta(delta, snapshot)
|
|
66
|
-
|
|
67
|
-
@override
|
|
68
|
-
def on_tool_call_done(self, tool_call):
|
|
69
|
-
self._original_handler.on_tool_call_done(tool_call)
|
|
70
|
-
|
|
71
|
-
@override
|
|
72
|
-
def on_exception(self, exception: Exception):
|
|
73
|
-
self._span.set_attribute(ERROR_TYPE, exception.__class__.__name__)
|
|
74
|
-
self._span.record_exception(exception)
|
|
75
|
-
self._span.set_status(Status(StatusCode.ERROR, str(exception)))
|
|
76
|
-
self._original_handler.on_exception(exception)
|
|
77
|
-
|
|
78
|
-
@override
|
|
79
|
-
def on_timeout(self):
|
|
80
|
-
self._original_handler.on_timeout()
|
|
81
|
-
|
|
82
|
-
@override
|
|
83
|
-
def on_message_created(self, message):
|
|
84
|
-
self._original_handler.on_message_created(message)
|
|
85
|
-
|
|
86
|
-
@override
|
|
87
|
-
def on_message_delta(self, delta, snapshot):
|
|
88
|
-
self._original_handler.on_message_delta(delta, snapshot)
|
|
89
|
-
|
|
90
|
-
@override
|
|
91
|
-
def on_message_done(self, message):
|
|
92
|
-
_set_span_attribute(
|
|
93
|
-
self._span,
|
|
94
|
-
f"gen_ai.response.{self._current_text_index}.id",
|
|
95
|
-
message.id,
|
|
96
|
-
)
|
|
97
|
-
emit_event(
|
|
98
|
-
ChoiceEvent(
|
|
99
|
-
index=self._current_text_index,
|
|
100
|
-
message={
|
|
101
|
-
"content": [item.model_dump() for item in message.content],
|
|
102
|
-
"role": message.role,
|
|
103
|
-
},
|
|
104
|
-
)
|
|
105
|
-
)
|
|
106
|
-
self._original_handler.on_message_done(message)
|
|
107
|
-
self._current_text_index += 1
|
|
108
|
-
|
|
109
|
-
@override
|
|
110
|
-
def on_text_created(self, text):
|
|
111
|
-
self._original_handler.on_text_created(text)
|
|
112
|
-
|
|
113
|
-
@override
|
|
114
|
-
def on_text_delta(self, delta, snapshot):
|
|
115
|
-
self._original_handler.on_text_delta(delta, snapshot)
|
|
116
|
-
|
|
117
|
-
@override
|
|
118
|
-
def on_text_done(self, text):
|
|
119
|
-
self._original_handler.on_text_done(text)
|
|
120
|
-
if not should_emit_events():
|
|
121
|
-
_set_span_attribute(
|
|
122
|
-
self._span,
|
|
123
|
-
f"{GenAIAttributes.GEN_AI_COMPLETION}.{self._current_text_index}.role",
|
|
124
|
-
"assistant",
|
|
125
|
-
)
|
|
126
|
-
_set_span_attribute(
|
|
127
|
-
self._span,
|
|
128
|
-
f"{GenAIAttributes.GEN_AI_COMPLETION}.{self._current_text_index}.content",
|
|
129
|
-
text.value,
|
|
130
|
-
)
|
|
131
|
-
|
|
132
|
-
@override
|
|
133
|
-
def on_image_file_done(self, image_file):
|
|
134
|
-
self._original_handler.on_image_file_done(image_file)
|