paid-python 0.6.0__py3-none-any.whl → 1.0.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. paid/__init__.py +31 -0
  2. paid/client.py +1 -472
  3. paid/core/client_wrapper.py +3 -2
  4. paid/customers/__init__.py +3 -0
  5. paid/customers/client.py +428 -4
  6. paid/customers/raw_client.py +594 -2
  7. paid/customers/types/__init__.py +8 -0
  8. paid/customers/types/customers_check_entitlement_request_view.py +5 -0
  9. paid/customers/types/customers_check_entitlement_response.py +22 -0
  10. paid/orders/client.py +435 -0
  11. paid/orders/raw_client.py +695 -0
  12. paid/plans/client.py +71 -0
  13. paid/plans/raw_client.py +121 -2
  14. paid/types/__init__.py +28 -0
  15. paid/types/cancel_renewal_response.py +49 -0
  16. paid/types/contact_create_for_customer.py +37 -0
  17. paid/types/invoice.py +75 -0
  18. paid/types/invoice_status.py +5 -0
  19. paid/types/payment_method.py +58 -0
  20. paid/types/payment_method_card.py +49 -0
  21. paid/types/payment_method_type.py +5 -0
  22. paid/types/payment_method_us_bank_account.py +36 -0
  23. paid/types/payment_method_us_bank_account_account_type.py +5 -0
  24. paid/types/plan_plan_products_item.py +6 -0
  25. paid/types/plan_with_features.py +69 -0
  26. paid/types/plan_with_features_features_item.py +34 -0
  27. paid/types/proration_attribute_update.py +44 -0
  28. paid/types/proration_detail.py +49 -0
  29. paid/types/proration_upgrade_response.py +73 -0
  30. paid/types/signal_v_2.py +5 -5
  31. paid/usage/client.py +6 -6
  32. {paid_python-0.6.0.dist-info → paid_python-1.0.0a0.dist-info}/METADATA +6 -4
  33. {paid_python-0.6.0.dist-info → paid_python-1.0.0a0.dist-info}/RECORD +35 -36
  34. opentelemetry/instrumentation/openai/__init__.py +0 -54
  35. opentelemetry/instrumentation/openai/shared/__init__.py +0 -399
  36. opentelemetry/instrumentation/openai/shared/audio_wrappers.py +0 -247
  37. opentelemetry/instrumentation/openai/shared/chat_wrappers.py +0 -1192
  38. opentelemetry/instrumentation/openai/shared/completion_wrappers.py +0 -292
  39. opentelemetry/instrumentation/openai/shared/config.py +0 -15
  40. opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +0 -311
  41. opentelemetry/instrumentation/openai/shared/event_emitter.py +0 -108
  42. opentelemetry/instrumentation/openai/shared/event_models.py +0 -41
  43. opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +0 -68
  44. opentelemetry/instrumentation/openai/shared/span_utils.py +0 -0
  45. opentelemetry/instrumentation/openai/utils.py +0 -213
  46. opentelemetry/instrumentation/openai/v0/__init__.py +0 -176
  47. opentelemetry/instrumentation/openai/v1/__init__.py +0 -394
  48. opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -329
  49. opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -134
  50. opentelemetry/instrumentation/openai/v1/responses_wrappers.py +0 -1113
  51. opentelemetry/instrumentation/openai/version.py +0 -1
  52. {paid_python-0.6.0.dist-info → paid_python-1.0.0a0.dist-info}/LICENSE +0 -0
  53. {paid_python-0.6.0.dist-info → paid_python-1.0.0a0.dist-info}/WHEEL +0 -0
@@ -1,399 +0,0 @@
1
- import json
2
- import logging
3
- import types
4
- import openai
5
- import pydantic
6
- from importlib.metadata import version
7
-
8
- from opentelemetry.instrumentation.openai.shared.config import Config
9
- from opentelemetry.instrumentation.openai.utils import (
10
- dont_throw,
11
- is_openai_v1,
12
- )
13
- from opentelemetry.semconv._incubating.attributes import (
14
- gen_ai_attributes as GenAIAttributes,
15
- openai_attributes as OpenAIAttributes,
16
- )
17
- from opentelemetry.semconv_ai import SpanAttributes
18
- from opentelemetry.trace.propagation import set_span_in_context
19
- from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
20
-
21
- OPENAI_LLM_USAGE_TOKEN_TYPES = ["prompt_tokens", "completion_tokens"]
22
- PROMPT_FILTER_KEY = "prompt_filter_results"
23
- PROMPT_ERROR = "prompt_error"
24
-
25
- _PYDANTIC_VERSION = version("pydantic")
26
-
27
-
28
- logger = logging.getLogger(__name__)
29
-
30
-
31
- def _set_span_attribute(span, name, value):
32
- if value is None or value == "":
33
- return
34
-
35
- if hasattr(openai, "NOT_GIVEN") and value == openai.NOT_GIVEN:
36
- return
37
-
38
- span.set_attribute(name, value)
39
-
40
-
41
- def _set_client_attributes(span, instance):
42
- if not span.is_recording():
43
- return
44
-
45
- if not is_openai_v1():
46
- return
47
-
48
- client = instance._client # pylint: disable=protected-access
49
- if isinstance(client, (openai.AsyncOpenAI, openai.OpenAI)):
50
- _set_span_attribute(
51
- span, SpanAttributes.LLM_OPENAI_API_BASE, str(client.base_url)
52
- )
53
- if isinstance(client, (openai.AsyncAzureOpenAI, openai.AzureOpenAI)):
54
- _set_span_attribute(
55
- span, SpanAttributes.LLM_OPENAI_API_VERSION, client._api_version
56
- ) # pylint: disable=protected-access
57
-
58
-
59
- def _set_api_attributes(span):
60
- if not span.is_recording():
61
- return
62
-
63
- if is_openai_v1():
64
- return
65
-
66
- base_url = openai.base_url if hasattr(openai, "base_url") else openai.api_base
67
-
68
- _set_span_attribute(span, SpanAttributes.LLM_OPENAI_API_BASE, base_url)
69
- _set_span_attribute(span, SpanAttributes.LLM_OPENAI_API_TYPE, openai.api_type)
70
- _set_span_attribute(span, SpanAttributes.LLM_OPENAI_API_VERSION, openai.api_version)
71
-
72
- return
73
-
74
-
75
- def _set_functions_attributes(span, functions):
76
- if not functions:
77
- return
78
-
79
- for i, function in enumerate(functions):
80
- prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}"
81
- _set_span_attribute(span, f"{prefix}.name", function.get("name"))
82
- _set_span_attribute(span, f"{prefix}.description", function.get("description"))
83
- _set_span_attribute(
84
- span, f"{prefix}.parameters", json.dumps(function.get("parameters"))
85
- )
86
-
87
-
88
- def set_tools_attributes(span, tools):
89
- if not tools:
90
- return
91
-
92
- for i, tool in enumerate(tools):
93
- function = tool.get("function")
94
- if not function:
95
- continue
96
-
97
- prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}"
98
- _set_span_attribute(span, f"{prefix}.name", function.get("name"))
99
- _set_span_attribute(span, f"{prefix}.description", function.get("description"))
100
- _set_span_attribute(
101
- span, f"{prefix}.parameters", json.dumps(function.get("parameters"))
102
- )
103
-
104
-
105
- def _set_request_attributes(span, kwargs, instance=None):
106
- if not span.is_recording():
107
- return
108
-
109
- _set_api_attributes(span)
110
-
111
- base_url = _get_openai_base_url(instance) if instance else ""
112
- vendor = _get_vendor_from_url(base_url)
113
- _set_span_attribute(span, GenAIAttributes.GEN_AI_SYSTEM, vendor)
114
-
115
- model = kwargs.get("model")
116
- if vendor == "AWS" and model and "." in model:
117
- model = _cross_region_check(model)
118
- elif vendor == "OpenRouter":
119
- model = _extract_model_name_from_provider_format(model)
120
-
121
- _set_span_attribute(span, GenAIAttributes.GEN_AI_REQUEST_MODEL, model)
122
- _set_span_attribute(
123
- span, GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS, kwargs.get("max_tokens")
124
- )
125
- _set_span_attribute(
126
- span, GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE, kwargs.get("temperature")
127
- )
128
- _set_span_attribute(span, GenAIAttributes.GEN_AI_REQUEST_TOP_P, kwargs.get("top_p"))
129
- _set_span_attribute(
130
- span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
131
- )
132
- _set_span_attribute(
133
- span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
134
- )
135
- _set_span_attribute(span, SpanAttributes.LLM_USER, kwargs.get("user"))
136
- _set_span_attribute(span, SpanAttributes.LLM_HEADERS, str(kwargs.get("headers")))
137
- # The new OpenAI SDK removed the `headers` and create new field called `extra_headers`
138
- if kwargs.get("extra_headers") is not None:
139
- _set_span_attribute(
140
- span, SpanAttributes.LLM_HEADERS, str(kwargs.get("extra_headers"))
141
- )
142
- _set_span_attribute(
143
- span, SpanAttributes.LLM_IS_STREAMING, kwargs.get("stream") or False
144
- )
145
- _set_span_attribute(
146
- span, OpenAIAttributes.OPENAI_REQUEST_SERVICE_TIER, kwargs.get("service_tier")
147
- )
148
- if response_format := kwargs.get("response_format"):
149
- # backward-compatible check for
150
- # openai.types.shared_params.response_format_json_schema.ResponseFormatJSONSchema
151
- if (
152
- isinstance(response_format, dict)
153
- and response_format.get("type") == "json_schema"
154
- and response_format.get("json_schema")
155
- ):
156
- schema = dict(response_format.get("json_schema")).get("schema")
157
- if schema:
158
- _set_span_attribute(
159
- span,
160
- SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA,
161
- json.dumps(schema),
162
- )
163
- elif (
164
- isinstance(response_format, pydantic.BaseModel)
165
- or (
166
- hasattr(response_format, "model_json_schema")
167
- and callable(response_format.model_json_schema)
168
- )
169
- ):
170
- _set_span_attribute(
171
- span,
172
- SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA,
173
- json.dumps(response_format.model_json_schema()),
174
- )
175
- else:
176
- schema = None
177
- try:
178
- schema = json.dumps(pydantic.TypeAdapter(response_format).json_schema())
179
- except Exception:
180
- try:
181
- schema = json.dumps(response_format)
182
- except Exception:
183
- pass
184
-
185
- if schema:
186
- _set_span_attribute(
187
- span,
188
- SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA,
189
- schema,
190
- )
191
-
192
-
193
- @dont_throw
194
- def _set_response_attributes(span, response):
195
- if not span.is_recording():
196
- return
197
-
198
- if "error" in response:
199
- _set_span_attribute(
200
- span,
201
- f"{GenAIAttributes.GEN_AI_PROMPT}.{PROMPT_ERROR}",
202
- json.dumps(response.get("error")),
203
- )
204
- return
205
-
206
- response_model = response.get("model")
207
- if response_model:
208
- response_model = _extract_model_name_from_provider_format(response_model)
209
- _set_span_attribute(span, GenAIAttributes.GEN_AI_RESPONSE_MODEL, response_model)
210
- _set_span_attribute(span, GenAIAttributes.GEN_AI_RESPONSE_ID, response.get("id"))
211
-
212
- _set_span_attribute(
213
- span,
214
- SpanAttributes.LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT,
215
- response.get("system_fingerprint"),
216
- )
217
- _set_span_attribute(
218
- span,
219
- OpenAIAttributes.OPENAI_RESPONSE_SERVICE_TIER,
220
- response.get("service_tier"),
221
- )
222
- _log_prompt_filter(span, response)
223
- usage = response.get("usage")
224
- if not usage:
225
- return
226
-
227
- if is_openai_v1() and not isinstance(usage, dict):
228
- usage = usage.__dict__
229
-
230
- _set_span_attribute(
231
- span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.get("total_tokens")
232
- )
233
- _set_span_attribute(
234
- span,
235
- GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
236
- usage.get("completion_tokens"),
237
- )
238
- _set_span_attribute(
239
- span, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, usage.get("prompt_tokens")
240
- )
241
- prompt_tokens_details = dict(usage.get("prompt_tokens_details", {}))
242
- _set_span_attribute(
243
- span,
244
- SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS,
245
- prompt_tokens_details.get("cached_tokens", 0),
246
- )
247
- return
248
-
249
-
250
- def _log_prompt_filter(span, response_dict):
251
- if response_dict.get("prompt_filter_results"):
252
- _set_span_attribute(
253
- span,
254
- f"{GenAIAttributes.GEN_AI_PROMPT}.{PROMPT_FILTER_KEY}",
255
- json.dumps(response_dict.get("prompt_filter_results")),
256
- )
257
-
258
-
259
- @dont_throw
260
- def _set_span_stream_usage(span, prompt_tokens, completion_tokens):
261
- if not span.is_recording():
262
- return
263
-
264
- if isinstance(completion_tokens, int) and completion_tokens >= 0:
265
- _set_span_attribute(
266
- span, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, completion_tokens
267
- )
268
-
269
- if isinstance(prompt_tokens, int) and prompt_tokens >= 0:
270
- _set_span_attribute(span, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, prompt_tokens)
271
-
272
- if (
273
- isinstance(prompt_tokens, int)
274
- and isinstance(completion_tokens, int)
275
- and completion_tokens + prompt_tokens >= 0
276
- ):
277
- _set_span_attribute(
278
- span,
279
- SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
280
- completion_tokens + prompt_tokens,
281
- )
282
-
283
-
284
- def _get_openai_base_url(instance):
285
- if hasattr(instance, "_client"):
286
- client = instance._client # pylint: disable=protected-access
287
- if isinstance(client, (openai.AsyncOpenAI, openai.OpenAI)):
288
- return str(client.base_url)
289
-
290
- return ""
291
-
292
-
293
- def _get_vendor_from_url(base_url):
294
- if not base_url:
295
- return "openai"
296
-
297
- if "openai.azure.com" in base_url:
298
- return "Azure"
299
- elif "amazonaws.com" in base_url or "bedrock" in base_url:
300
- return "AWS"
301
- elif "googleapis.com" in base_url or "vertex" in base_url:
302
- return "Google"
303
- elif "openrouter.ai" in base_url:
304
- return "OpenRouter"
305
-
306
- return "openai"
307
-
308
-
309
- def _cross_region_check(value):
310
- if not value or "." not in value:
311
- return value
312
-
313
- prefixes = ["us", "us-gov", "eu", "apac"]
314
- if any(value.startswith(prefix + ".") for prefix in prefixes):
315
- parts = value.split(".")
316
- if len(parts) > 2:
317
- return parts[2]
318
- else:
319
- return value
320
- else:
321
- vendor, model = value.split(".", 1)
322
- return model
323
-
324
-
325
- def _extract_model_name_from_provider_format(model_name):
326
- """
327
- Extract model name from provider/model format.
328
- E.g., 'openai/gpt-4o' -> 'gpt-4o', 'anthropic/claude-3-sonnet' -> 'claude-3-sonnet'
329
- """
330
- if not model_name:
331
- return model_name
332
-
333
- if "/" in model_name:
334
- parts = model_name.split("/")
335
- return parts[-1] # Return the last part (actual model name)
336
-
337
- return model_name
338
-
339
-
340
- def is_streaming_response(response):
341
- if is_openai_v1():
342
- return isinstance(response, openai.Stream) or isinstance(
343
- response, openai.AsyncStream
344
- )
345
-
346
- return isinstance(response, types.GeneratorType) or isinstance(
347
- response, types.AsyncGeneratorType
348
- )
349
-
350
-
351
- def model_as_dict(model):
352
- if isinstance(model, dict):
353
- return model
354
- if _PYDANTIC_VERSION < "2.0.0":
355
- return model.dict()
356
- if hasattr(model, "model_dump"):
357
- return model.model_dump()
358
- elif hasattr(model, "parse"): # Raw API response
359
- return model_as_dict(model.parse())
360
- else:
361
- return model
362
-
363
-
364
- def _token_type(token_type: str):
365
- if token_type == "prompt_tokens":
366
- return "input"
367
- elif token_type == "completion_tokens":
368
- return "output"
369
-
370
- return None
371
-
372
-
373
- def metric_shared_attributes(
374
- response_model: str, operation: str, server_address: str, is_streaming: bool = False
375
- ):
376
- attributes = Config.get_common_metrics_attributes()
377
- vendor = _get_vendor_from_url(server_address)
378
-
379
- return {
380
- **attributes,
381
- GenAIAttributes.GEN_AI_SYSTEM: vendor,
382
- GenAIAttributes.GEN_AI_RESPONSE_MODEL: response_model,
383
- "gen_ai.operation.name": operation,
384
- "server.address": server_address,
385
- "stream": is_streaming,
386
- }
387
-
388
-
389
- def propagate_trace_context(span, kwargs):
390
- if is_openai_v1():
391
- extra_headers = kwargs.get("extra_headers", {})
392
- ctx = set_span_in_context(span)
393
- TraceContextTextMapPropagator().inject(extra_headers, context=ctx)
394
- kwargs["extra_headers"] = extra_headers
395
- else:
396
- headers = kwargs.get("headers", {})
397
- ctx = set_span_in_context(span)
398
- TraceContextTextMapPropagator().inject(headers, context=ctx)
399
- kwargs["headers"] = headers
@@ -1,247 +0,0 @@
1
- import logging
2
- import time
3
-
4
- from opentelemetry import context as context_api
5
- from opentelemetry.instrumentation.openai.shared import (
6
- _set_client_attributes,
7
- _set_request_attributes,
8
- _set_response_attributes,
9
- _set_span_attribute,
10
- metric_shared_attributes,
11
- model_as_dict,
12
- )
13
- from opentelemetry.instrumentation.openai.utils import (
14
- _with_audio_telemetry_wrapper,
15
- dont_throw,
16
- is_openai_v1,
17
- start_as_current_span_async,
18
- )
19
- from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
20
- from opentelemetry.metrics import Counter, Histogram
21
- from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
22
- from opentelemetry.semconv_ai import (
23
- SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
24
- SpanAttributes,
25
- )
26
- from opentelemetry.trace import SpanKind, Status, StatusCode
27
-
28
- SPAN_NAME = "openai.audio.transcriptions"
29
-
30
- logger = logging.getLogger(__name__)
31
-
32
-
33
- def _get_audio_duration(file):
34
- """
35
- Extract audio duration from file object.
36
- Returns duration in seconds, or None if unable to determine.
37
- """
38
- try:
39
- # Try to get duration from common audio libraries
40
- # First check if it's a file-like object with a name attribute
41
- if hasattr(file, "name"):
42
- file_path = file.name
43
- elif isinstance(file, (str, bytes)):
44
- # If it's a path string or bytes
45
- return None
46
- else:
47
- # If it's a file-like object without name, we can't easily determine duration
48
- return None
49
-
50
- # Try mutagen (supports many formats)
51
- try:
52
- from mutagen import File as MutagenFile
53
-
54
- audio = MutagenFile(file_path)
55
- if audio and hasattr(audio.info, "length"):
56
- return audio.info.length
57
- except (ImportError, Exception):
58
- pass
59
-
60
- except Exception as e:
61
- logger.debug(f"Unable to extract audio duration: {e}")
62
-
63
- return None
64
-
65
-
66
- @_with_audio_telemetry_wrapper
67
- def transcription_wrapper(
68
- tracer,
69
- duration_histogram: Histogram,
70
- exception_counter: Counter,
71
- wrapped,
72
- instance,
73
- args,
74
- kwargs,
75
- ):
76
- if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
77
- SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
78
- ):
79
- return wrapped(*args, **kwargs)
80
-
81
- with tracer.start_as_current_span(
82
- name=SPAN_NAME,
83
- kind=SpanKind.CLIENT,
84
- ) as span:
85
- _handle_request(span, kwargs, instance)
86
-
87
- try:
88
- # record time for duration
89
- start_time = time.time()
90
- response = wrapped(*args, **kwargs)
91
- end_time = time.time()
92
- except Exception as e: # pylint: disable=broad-except
93
- end_time = time.time()
94
- duration = end_time - start_time if "start_time" in locals() else 0
95
- attributes = {
96
- "error.type": e.__class__.__name__,
97
- }
98
-
99
- # if there are legal duration, record it
100
- if duration > 0 and duration_histogram:
101
- duration_histogram.record(duration, attributes=attributes)
102
- if exception_counter:
103
- exception_counter.add(1, attributes=attributes)
104
-
105
- span.set_attribute(ERROR_TYPE, e.__class__.__name__)
106
- span.record_exception(e)
107
- span.set_status(Status(StatusCode.ERROR, str(e)))
108
- span.end()
109
-
110
- raise
111
-
112
- duration = end_time - start_time
113
-
114
- _handle_response(
115
- response,
116
- span,
117
- instance,
118
- duration_histogram,
119
- duration,
120
- )
121
-
122
- return response
123
-
124
-
125
- @_with_audio_telemetry_wrapper
126
- async def atranscription_wrapper(
127
- tracer,
128
- duration_histogram: Histogram,
129
- exception_counter: Counter,
130
- wrapped,
131
- instance,
132
- args,
133
- kwargs,
134
- ):
135
- if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
136
- SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
137
- ):
138
- return await wrapped(*args, **kwargs)
139
-
140
- async with start_as_current_span_async(
141
- tracer=tracer,
142
- name=SPAN_NAME,
143
- kind=SpanKind.CLIENT,
144
- ) as span:
145
- _handle_request(span, kwargs, instance)
146
-
147
- try:
148
- # record time for duration
149
- start_time = time.time()
150
- response = await wrapped(*args, **kwargs)
151
- end_time = time.time()
152
- except Exception as e: # pylint: disable=broad-except
153
- end_time = time.time()
154
- duration = end_time - start_time if "start_time" in locals() else 0
155
- attributes = {
156
- "error.type": e.__class__.__name__,
157
- }
158
-
159
- # if there are legal duration, record it
160
- if duration > 0 and duration_histogram:
161
- duration_histogram.record(duration, attributes=attributes)
162
- if exception_counter:
163
- exception_counter.add(1, attributes=attributes)
164
-
165
- span.set_attribute(ERROR_TYPE, e.__class__.__name__)
166
- span.record_exception(e)
167
- span.set_status(Status(StatusCode.ERROR, str(e)))
168
- span.end()
169
-
170
- raise
171
-
172
- duration = end_time - start_time
173
-
174
- _handle_response(
175
- response,
176
- span,
177
- instance,
178
- duration_histogram,
179
- duration,
180
- )
181
-
182
- return response
183
-
184
-
185
- @dont_throw
186
- def _handle_request(span, kwargs, instance):
187
- _set_request_attributes(span, kwargs, instance)
188
- _set_client_attributes(span, instance)
189
-
190
- # Extract and set audio duration
191
- file_param = kwargs.get("file")
192
- if file_param:
193
- audio_duration = _get_audio_duration(file_param)
194
- if audio_duration is not None:
195
- # _set_span_attribute(
196
- # span, SpanAttributes.LLM_OPENAI_AUDIO_INPUT_DURATION_SECONDS, audio_duration
197
- # )
198
- # TODO(Ata): come back here later when semconv is published
199
- _set_span_attribute(
200
- span, 'gen_ai.openai.audio.input.duration_seconds', audio_duration
201
- )
202
- else:
203
- print("REMOVE ME : ATA-DBG : COULD NOT READ AUDIO FILE WITH MUTAGEN")
204
-
205
-
206
- @dont_throw
207
- def _handle_response(
208
- response,
209
- span,
210
- instance=None,
211
- duration_histogram=None,
212
- duration=None,
213
- ):
214
- if is_openai_v1():
215
- response_dict = model_as_dict(response)
216
- else:
217
- response_dict = response
218
-
219
- # metrics record
220
- _set_transcription_metrics(
221
- instance,
222
- duration_histogram,
223
- response_dict,
224
- duration,
225
- )
226
-
227
- # span attributes
228
- _set_response_attributes(span, response_dict)
229
-
230
-
231
- def _set_transcription_metrics(
232
- instance,
233
- duration_histogram,
234
- response_dict,
235
- duration,
236
- ):
237
- from opentelemetry.instrumentation.openai.shared import _get_openai_base_url
238
-
239
- shared_attributes = metric_shared_attributes(
240
- response_model=response_dict.get("model") or None,
241
- operation="audio.transcriptions",
242
- server_address=_get_openai_base_url(instance),
243
- )
244
-
245
- # duration metrics
246
- if duration and isinstance(duration, (float, int)) and duration_histogram:
247
- duration_histogram.record(duration, attributes=shared_attributes)