paid-python 0.3.4__py3-none-any.whl → 0.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. paid/_vendor/__init__.py +0 -0
  2. paid/_vendor/opentelemetry/__init__.py +0 -0
  3. paid/_vendor/opentelemetry/instrumentation/__init__.py +0 -0
  4. paid/_vendor/opentelemetry/instrumentation/openai/__init__.py +54 -0
  5. paid/_vendor/opentelemetry/instrumentation/openai/shared/__init__.py +399 -0
  6. paid/_vendor/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1192 -0
  7. paid/_vendor/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +292 -0
  8. paid/_vendor/opentelemetry/instrumentation/openai/shared/config.py +15 -0
  9. paid/_vendor/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +311 -0
  10. paid/_vendor/opentelemetry/instrumentation/openai/shared/event_emitter.py +108 -0
  11. paid/_vendor/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
  12. paid/_vendor/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
  13. paid/_vendor/opentelemetry/instrumentation/openai/shared/span_utils.py +0 -0
  14. paid/_vendor/opentelemetry/instrumentation/openai/utils.py +190 -0
  15. paid/_vendor/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
  16. paid/_vendor/opentelemetry/instrumentation/openai/v1/__init__.py +358 -0
  17. paid/_vendor/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +329 -0
  18. paid/_vendor/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +134 -0
  19. paid/_vendor/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +996 -0
  20. paid/_vendor/opentelemetry/instrumentation/openai/version.py +1 -0
  21. paid/tracing/autoinstrumentation.py +5 -6
  22. paid/tracing/tracing.py +14 -3
  23. {paid_python-0.3.4.dist-info → paid_python-0.3.6.dist-info}/METADATA +2 -3
  24. {paid_python-0.3.4.dist-info → paid_python-0.3.6.dist-info}/RECORD +26 -6
  25. {paid_python-0.3.4.dist-info → paid_python-0.3.6.dist-info}/LICENSE +0 -0
  26. {paid_python-0.3.4.dist-info → paid_python-0.3.6.dist-info}/WHEEL +0 -0
File without changes
File without changes
File without changes
@@ -0,0 +1,54 @@
1
+ from typing import Callable, Collection, Optional
2
+
3
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
4
+ from paid._vendor.opentelemetry.instrumentation.openai.shared.config import Config
5
+ from paid._vendor.opentelemetry.instrumentation.openai.utils import is_openai_v1
6
+ from typing_extensions import Coroutine
7
+
8
+ _instruments = ("openai >= 0.27.0",)
9
+
10
+
11
+ class OpenAIInstrumentor(BaseInstrumentor):
12
+ """An instrumentor for OpenAI's client library."""
13
+
14
+ def __init__(
15
+ self,
16
+ enrich_assistant: bool = False,
17
+ exception_logger=None,
18
+ get_common_metrics_attributes: Callable[[], dict] = lambda: {},
19
+ upload_base64_image: Optional[
20
+ Callable[[str, str, str, str], Coroutine[None, None, str]]
21
+ ] = lambda *args: "",
22
+ enable_trace_context_propagation: bool = True,
23
+ use_legacy_attributes: bool = True,
24
+ ):
25
+ super().__init__()
26
+ Config.enrich_assistant = enrich_assistant
27
+ Config.exception_logger = exception_logger
28
+ Config.get_common_metrics_attributes = get_common_metrics_attributes
29
+ Config.upload_base64_image = upload_base64_image
30
+ Config.enable_trace_context_propagation = enable_trace_context_propagation
31
+ Config.use_legacy_attributes = use_legacy_attributes
32
+
33
+ def instrumentation_dependencies(self) -> Collection[str]:
34
+ return _instruments
35
+
36
+ def _instrument(self, **kwargs):
37
+ if is_openai_v1():
38
+ from paid._vendor.opentelemetry.instrumentation.openai.v1 import OpenAIV1Instrumentor
39
+
40
+ OpenAIV1Instrumentor().instrument(**kwargs)
41
+ else:
42
+ from paid._vendor.opentelemetry.instrumentation.openai.v0 import OpenAIV0Instrumentor
43
+
44
+ OpenAIV0Instrumentor().instrument(**kwargs)
45
+
46
+ def _uninstrument(self, **kwargs):
47
+ if is_openai_v1():
48
+ from paid._vendor.opentelemetry.instrumentation.openai.v1 import OpenAIV1Instrumentor
49
+
50
+ OpenAIV1Instrumentor().uninstrument(**kwargs)
51
+ else:
52
+ from paid._vendor.opentelemetry.instrumentation.openai.v0 import OpenAIV0Instrumentor
53
+
54
+ OpenAIV0Instrumentor().uninstrument(**kwargs)
@@ -0,0 +1,399 @@
1
+ import json
2
+ import logging
3
+ import types
4
+ import openai
5
+ import pydantic
6
+ from importlib.metadata import version
7
+
8
+ from paid._vendor.opentelemetry.instrumentation.openai.shared.config import Config
9
+ from paid._vendor.opentelemetry.instrumentation.openai.utils import (
10
+ dont_throw,
11
+ is_openai_v1,
12
+ )
13
+ from opentelemetry.semconv._incubating.attributes import (
14
+ gen_ai_attributes as GenAIAttributes,
15
+ openai_attributes as OpenAIAttributes,
16
+ )
17
+ from opentelemetry.semconv_ai import SpanAttributes
18
+ from opentelemetry.trace.propagation import set_span_in_context
19
+ from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
20
+
21
+ OPENAI_LLM_USAGE_TOKEN_TYPES = ["prompt_tokens", "completion_tokens"]
22
+ PROMPT_FILTER_KEY = "prompt_filter_results"
23
+ PROMPT_ERROR = "prompt_error"
24
+
25
+ _PYDANTIC_VERSION = version("pydantic")
26
+
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+
31
+ def _set_span_attribute(span, name, value):
32
+ if value is None or value == "":
33
+ return
34
+
35
+ if hasattr(openai, "NOT_GIVEN") and value == openai.NOT_GIVEN:
36
+ return
37
+
38
+ span.set_attribute(name, value)
39
+
40
+
41
+ def _set_client_attributes(span, instance):
42
+ if not span.is_recording():
43
+ return
44
+
45
+ if not is_openai_v1():
46
+ return
47
+
48
+ client = instance._client # pylint: disable=protected-access
49
+ if isinstance(client, (openai.AsyncOpenAI, openai.OpenAI)):
50
+ _set_span_attribute(
51
+ span, SpanAttributes.LLM_OPENAI_API_BASE, str(client.base_url)
52
+ )
53
+ if isinstance(client, (openai.AsyncAzureOpenAI, openai.AzureOpenAI)):
54
+ _set_span_attribute(
55
+ span, SpanAttributes.LLM_OPENAI_API_VERSION, client._api_version
56
+ ) # pylint: disable=protected-access
57
+
58
+
59
+ def _set_api_attributes(span):
60
+ if not span.is_recording():
61
+ return
62
+
63
+ if is_openai_v1():
64
+ return
65
+
66
+ base_url = openai.base_url if hasattr(openai, "base_url") else openai.api_base
67
+
68
+ _set_span_attribute(span, SpanAttributes.LLM_OPENAI_API_BASE, base_url)
69
+ _set_span_attribute(span, SpanAttributes.LLM_OPENAI_API_TYPE, openai.api_type)
70
+ _set_span_attribute(span, SpanAttributes.LLM_OPENAI_API_VERSION, openai.api_version)
71
+
72
+ return
73
+
74
+
75
+ def _set_functions_attributes(span, functions):
76
+ if not functions:
77
+ return
78
+
79
+ for i, function in enumerate(functions):
80
+ prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}"
81
+ _set_span_attribute(span, f"{prefix}.name", function.get("name"))
82
+ _set_span_attribute(span, f"{prefix}.description", function.get("description"))
83
+ _set_span_attribute(
84
+ span, f"{prefix}.parameters", json.dumps(function.get("parameters"))
85
+ )
86
+
87
+
88
+ def set_tools_attributes(span, tools):
89
+ if not tools:
90
+ return
91
+
92
+ for i, tool in enumerate(tools):
93
+ function = tool.get("function")
94
+ if not function:
95
+ continue
96
+
97
+ prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}"
98
+ _set_span_attribute(span, f"{prefix}.name", function.get("name"))
99
+ _set_span_attribute(span, f"{prefix}.description", function.get("description"))
100
+ _set_span_attribute(
101
+ span, f"{prefix}.parameters", json.dumps(function.get("parameters"))
102
+ )
103
+
104
+
105
+ def _set_request_attributes(span, kwargs, instance=None):
106
+ if not span.is_recording():
107
+ return
108
+
109
+ _set_api_attributes(span)
110
+
111
+ base_url = _get_openai_base_url(instance) if instance else ""
112
+ vendor = _get_vendor_from_url(base_url)
113
+ _set_span_attribute(span, GenAIAttributes.GEN_AI_SYSTEM, vendor)
114
+
115
+ model = kwargs.get("model")
116
+ if vendor == "AWS" and model and "." in model:
117
+ model = _cross_region_check(model)
118
+ elif vendor == "OpenRouter":
119
+ model = _extract_model_name_from_provider_format(model)
120
+
121
+ _set_span_attribute(span, GenAIAttributes.GEN_AI_REQUEST_MODEL, model)
122
+ _set_span_attribute(
123
+ span, GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS, kwargs.get("max_tokens")
124
+ )
125
+ _set_span_attribute(
126
+ span, GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE, kwargs.get("temperature")
127
+ )
128
+ _set_span_attribute(span, GenAIAttributes.GEN_AI_REQUEST_TOP_P, kwargs.get("top_p"))
129
+ _set_span_attribute(
130
+ span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
131
+ )
132
+ _set_span_attribute(
133
+ span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
134
+ )
135
+ _set_span_attribute(span, SpanAttributes.LLM_USER, kwargs.get("user"))
136
+ _set_span_attribute(span, SpanAttributes.LLM_HEADERS, str(kwargs.get("headers")))
137
+ # The new OpenAI SDK removed the `headers` and create new field called `extra_headers`
138
+ if kwargs.get("extra_headers") is not None:
139
+ _set_span_attribute(
140
+ span, SpanAttributes.LLM_HEADERS, str(kwargs.get("extra_headers"))
141
+ )
142
+ _set_span_attribute(
143
+ span, SpanAttributes.LLM_IS_STREAMING, kwargs.get("stream") or False
144
+ )
145
+ _set_span_attribute(
146
+ span, OpenAIAttributes.OPENAI_REQUEST_SERVICE_TIER, kwargs.get("service_tier")
147
+ )
148
+ if response_format := kwargs.get("response_format"):
149
+ # backward-compatible check for
150
+ # openai.types.shared_params.response_format_json_schema.ResponseFormatJSONSchema
151
+ if (
152
+ isinstance(response_format, dict)
153
+ and response_format.get("type") == "json_schema"
154
+ and response_format.get("json_schema")
155
+ ):
156
+ schema = dict(response_format.get("json_schema")).get("schema")
157
+ if schema:
158
+ _set_span_attribute(
159
+ span,
160
+ SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA,
161
+ json.dumps(schema),
162
+ )
163
+ elif (
164
+ isinstance(response_format, pydantic.BaseModel)
165
+ or (
166
+ hasattr(response_format, "model_json_schema")
167
+ and callable(response_format.model_json_schema)
168
+ )
169
+ ):
170
+ _set_span_attribute(
171
+ span,
172
+ SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA,
173
+ json.dumps(response_format.model_json_schema()),
174
+ )
175
+ else:
176
+ schema = None
177
+ try:
178
+ schema = json.dumps(pydantic.TypeAdapter(response_format).json_schema())
179
+ except Exception:
180
+ try:
181
+ schema = json.dumps(response_format)
182
+ except Exception:
183
+ pass
184
+
185
+ if schema:
186
+ _set_span_attribute(
187
+ span,
188
+ SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA,
189
+ schema,
190
+ )
191
+
192
+
193
+ @dont_throw
194
+ def _set_response_attributes(span, response):
195
+ if not span.is_recording():
196
+ return
197
+
198
+ if "error" in response:
199
+ _set_span_attribute(
200
+ span,
201
+ f"{GenAIAttributes.GEN_AI_PROMPT}.{PROMPT_ERROR}",
202
+ json.dumps(response.get("error")),
203
+ )
204
+ return
205
+
206
+ response_model = response.get("model")
207
+ if response_model:
208
+ response_model = _extract_model_name_from_provider_format(response_model)
209
+ _set_span_attribute(span, GenAIAttributes.GEN_AI_RESPONSE_MODEL, response_model)
210
+ _set_span_attribute(span, GenAIAttributes.GEN_AI_RESPONSE_ID, response.get("id"))
211
+
212
+ _set_span_attribute(
213
+ span,
214
+ SpanAttributes.LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT,
215
+ response.get("system_fingerprint"),
216
+ )
217
+ _set_span_attribute(
218
+ span,
219
+ OpenAIAttributes.OPENAI_RESPONSE_SERVICE_TIER,
220
+ response.get("service_tier"),
221
+ )
222
+ _log_prompt_filter(span, response)
223
+ usage = response.get("usage")
224
+ if not usage:
225
+ return
226
+
227
+ if is_openai_v1() and not isinstance(usage, dict):
228
+ usage = usage.__dict__
229
+
230
+ _set_span_attribute(
231
+ span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.get("total_tokens")
232
+ )
233
+ _set_span_attribute(
234
+ span,
235
+ GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
236
+ usage.get("completion_tokens"),
237
+ )
238
+ _set_span_attribute(
239
+ span, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, usage.get("prompt_tokens")
240
+ )
241
+ prompt_tokens_details = dict(usage.get("prompt_tokens_details", {}))
242
+ _set_span_attribute(
243
+ span,
244
+ SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS,
245
+ prompt_tokens_details.get("cached_tokens", 0),
246
+ )
247
+ return
248
+
249
+
250
+ def _log_prompt_filter(span, response_dict):
251
+ if response_dict.get("prompt_filter_results"):
252
+ _set_span_attribute(
253
+ span,
254
+ f"{GenAIAttributes.GEN_AI_PROMPT}.{PROMPT_FILTER_KEY}",
255
+ json.dumps(response_dict.get("prompt_filter_results")),
256
+ )
257
+
258
+
259
+ @dont_throw
260
+ def _set_span_stream_usage(span, prompt_tokens, completion_tokens):
261
+ if not span.is_recording():
262
+ return
263
+
264
+ if isinstance(completion_tokens, int) and completion_tokens >= 0:
265
+ _set_span_attribute(
266
+ span, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, completion_tokens
267
+ )
268
+
269
+ if isinstance(prompt_tokens, int) and prompt_tokens >= 0:
270
+ _set_span_attribute(span, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, prompt_tokens)
271
+
272
+ if (
273
+ isinstance(prompt_tokens, int)
274
+ and isinstance(completion_tokens, int)
275
+ and completion_tokens + prompt_tokens >= 0
276
+ ):
277
+ _set_span_attribute(
278
+ span,
279
+ SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
280
+ completion_tokens + prompt_tokens,
281
+ )
282
+
283
+
284
+ def _get_openai_base_url(instance):
285
+ if hasattr(instance, "_client"):
286
+ client = instance._client # pylint: disable=protected-access
287
+ if isinstance(client, (openai.AsyncOpenAI, openai.OpenAI)):
288
+ return str(client.base_url)
289
+
290
+ return ""
291
+
292
+
293
+ def _get_vendor_from_url(base_url):
294
+ if not base_url:
295
+ return "openai"
296
+
297
+ if "openai.azure.com" in base_url:
298
+ return "Azure"
299
+ elif "amazonaws.com" in base_url or "bedrock" in base_url:
300
+ return "AWS"
301
+ elif "googleapis.com" in base_url or "vertex" in base_url:
302
+ return "Google"
303
+ elif "openrouter.ai" in base_url:
304
+ return "OpenRouter"
305
+
306
+ return "openai"
307
+
308
+
309
+ def _cross_region_check(value):
310
+ if not value or "." not in value:
311
+ return value
312
+
313
+ prefixes = ["us", "us-gov", "eu", "apac"]
314
+ if any(value.startswith(prefix + ".") for prefix in prefixes):
315
+ parts = value.split(".")
316
+ if len(parts) > 2:
317
+ return parts[2]
318
+ else:
319
+ return value
320
+ else:
321
+ vendor, model = value.split(".", 1)
322
+ return model
323
+
324
+
325
+ def _extract_model_name_from_provider_format(model_name):
326
+ """
327
+ Extract model name from provider/model format.
328
+ E.g., 'openai/gpt-4o' -> 'gpt-4o', 'anthropic/claude-3-sonnet' -> 'claude-3-sonnet'
329
+ """
330
+ if not model_name:
331
+ return model_name
332
+
333
+ if "/" in model_name:
334
+ parts = model_name.split("/")
335
+ return parts[-1] # Return the last part (actual model name)
336
+
337
+ return model_name
338
+
339
+
340
+ def is_streaming_response(response):
341
+ if is_openai_v1():
342
+ return isinstance(response, openai.Stream) or isinstance(
343
+ response, openai.AsyncStream
344
+ )
345
+
346
+ return isinstance(response, types.GeneratorType) or isinstance(
347
+ response, types.AsyncGeneratorType
348
+ )
349
+
350
+
351
+ def model_as_dict(model):
352
+ if isinstance(model, dict):
353
+ return model
354
+ if _PYDANTIC_VERSION < "2.0.0":
355
+ return model.dict()
356
+ if hasattr(model, "model_dump"):
357
+ return model.model_dump()
358
+ elif hasattr(model, "parse"): # Raw API response
359
+ return model_as_dict(model.parse())
360
+ else:
361
+ return model
362
+
363
+
364
+ def _token_type(token_type: str):
365
+ if token_type == "prompt_tokens":
366
+ return "input"
367
+ elif token_type == "completion_tokens":
368
+ return "output"
369
+
370
+ return None
371
+
372
+
373
+ def metric_shared_attributes(
374
+ response_model: str, operation: str, server_address: str, is_streaming: bool = False
375
+ ):
376
+ attributes = Config.get_common_metrics_attributes()
377
+ vendor = _get_vendor_from_url(server_address)
378
+
379
+ return {
380
+ **attributes,
381
+ GenAIAttributes.GEN_AI_SYSTEM: vendor,
382
+ GenAIAttributes.GEN_AI_RESPONSE_MODEL: response_model,
383
+ "gen_ai.operation.name": operation,
384
+ "server.address": server_address,
385
+ "stream": is_streaming,
386
+ }
387
+
388
+
389
+ def propagate_trace_context(span, kwargs):
390
+ if is_openai_v1():
391
+ extra_headers = kwargs.get("extra_headers", {})
392
+ ctx = set_span_in_context(span)
393
+ TraceContextTextMapPropagator().inject(extra_headers, context=ctx)
394
+ kwargs["extra_headers"] = extra_headers
395
+ else:
396
+ headers = kwargs.get("headers", {})
397
+ ctx = set_span_in_context(span)
398
+ TraceContextTextMapPropagator().inject(headers, context=ctx)
399
+ kwargs["headers"] = headers