paid-python 0.5.0__py3-none-any.whl → 1.0.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. paid/__init__.py +33 -0
  2. paid/client.py +1 -472
  3. paid/core/client_wrapper.py +3 -2
  4. paid/customers/__init__.py +3 -0
  5. paid/customers/client.py +428 -4
  6. paid/customers/raw_client.py +594 -2
  7. paid/customers/types/__init__.py +8 -0
  8. paid/customers/types/customers_check_entitlement_request_view.py +5 -0
  9. paid/customers/types/customers_check_entitlement_response.py +22 -0
  10. paid/orders/client.py +445 -0
  11. paid/orders/raw_client.py +705 -0
  12. paid/plans/client.py +142 -0
  13. paid/plans/raw_client.py +238 -0
  14. paid/types/__init__.py +30 -0
  15. paid/types/cancel_renewal_response.py +49 -0
  16. paid/types/contact_create_for_customer.py +37 -0
  17. paid/types/invoice.py +75 -0
  18. paid/types/invoice_status.py +5 -0
  19. paid/types/payment_method.py +58 -0
  20. paid/types/payment_method_card.py +49 -0
  21. paid/types/payment_method_type.py +5 -0
  22. paid/types/payment_method_us_bank_account.py +36 -0
  23. paid/types/payment_method_us_bank_account_account_type.py +5 -0
  24. paid/types/plan_group.py +60 -0
  25. paid/types/plan_plan_products_item.py +6 -0
  26. paid/types/plan_with_features.py +69 -0
  27. paid/types/plan_with_features_features_item.py +34 -0
  28. paid/types/proration_attribute_update.py +44 -0
  29. paid/types/proration_detail.py +49 -0
  30. paid/types/proration_upgrade_response.py +73 -0
  31. paid/types/signal_v_2.py +5 -5
  32. paid/usage/client.py +6 -6
  33. {paid_python-0.5.0.dist-info → paid_python-1.0.0a0.dist-info}/METADATA +6 -4
  34. {paid_python-0.5.0.dist-info → paid_python-1.0.0a0.dist-info}/RECORD +36 -36
  35. opentelemetry/instrumentation/openai/__init__.py +0 -54
  36. opentelemetry/instrumentation/openai/shared/__init__.py +0 -399
  37. opentelemetry/instrumentation/openai/shared/audio_wrappers.py +0 -247
  38. opentelemetry/instrumentation/openai/shared/chat_wrappers.py +0 -1192
  39. opentelemetry/instrumentation/openai/shared/completion_wrappers.py +0 -292
  40. opentelemetry/instrumentation/openai/shared/config.py +0 -15
  41. opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +0 -311
  42. opentelemetry/instrumentation/openai/shared/event_emitter.py +0 -108
  43. opentelemetry/instrumentation/openai/shared/event_models.py +0 -41
  44. opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +0 -68
  45. opentelemetry/instrumentation/openai/shared/span_utils.py +0 -0
  46. opentelemetry/instrumentation/openai/utils.py +0 -213
  47. opentelemetry/instrumentation/openai/v0/__init__.py +0 -176
  48. opentelemetry/instrumentation/openai/v1/__init__.py +0 -394
  49. opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -329
  50. opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -134
  51. opentelemetry/instrumentation/openai/v1/responses_wrappers.py +0 -1113
  52. opentelemetry/instrumentation/openai/version.py +0 -1
  53. {paid_python-0.5.0.dist-info → paid_python-1.0.0a0.dist-info}/LICENSE +0 -0
  54. {paid_python-0.5.0.dist-info → paid_python-1.0.0a0.dist-info}/WHEEL +0 -0
@@ -1,394 +0,0 @@
1
- from typing import Collection
2
-
3
- from opentelemetry._logs import get_logger
4
- from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
5
- from opentelemetry.instrumentation.openai.shared.chat_wrappers import (
6
- achat_wrapper,
7
- chat_wrapper,
8
- )
9
- from opentelemetry.instrumentation.openai.shared.completion_wrappers import (
10
- acompletion_wrapper,
11
- completion_wrapper,
12
- )
13
- from opentelemetry.instrumentation.openai.shared.config import Config
14
- from opentelemetry.instrumentation.openai.shared.embeddings_wrappers import (
15
- aembeddings_wrapper,
16
- embeddings_wrapper,
17
- )
18
- from opentelemetry.instrumentation.openai.shared.image_gen_wrappers import (
19
- image_gen_metrics_wrapper,
20
- )
21
- from opentelemetry.instrumentation.openai.shared.audio_wrappers import (
22
- atranscription_wrapper,
23
- transcription_wrapper,
24
- )
25
- from opentelemetry.instrumentation.openai.utils import is_metrics_enabled
26
- from opentelemetry.instrumentation.openai.v1.assistant_wrappers import (
27
- assistants_create_wrapper,
28
- messages_list_wrapper,
29
- runs_create_and_stream_wrapper,
30
- runs_create_wrapper,
31
- runs_retrieve_wrapper,
32
- )
33
-
34
- from opentelemetry.instrumentation.openai.v1.responses_wrappers import (
35
- async_responses_cancel_wrapper,
36
- async_responses_get_or_create_wrapper,
37
- responses_cancel_wrapper,
38
- responses_get_or_create_wrapper,
39
- )
40
-
41
- from opentelemetry.instrumentation.openai.version import __version__
42
- from opentelemetry.instrumentation.utils import unwrap
43
- from opentelemetry.metrics import get_meter
44
- from opentelemetry.semconv._incubating.metrics import gen_ai_metrics as GenAIMetrics
45
- from opentelemetry.semconv_ai import Meters
46
- from opentelemetry.trace import get_tracer
47
- from wrapt import wrap_function_wrapper
48
-
49
-
50
- _instruments = ("openai >= 1.0.0",)
51
-
52
-
53
- class OpenAIV1Instrumentor(BaseInstrumentor):
54
- def instrumentation_dependencies(self) -> Collection[str]:
55
- return _instruments
56
-
57
- def _try_wrap(self, module, function, wrapper):
58
- """
59
- Wrap a function if it exists, otherwise do nothing.
60
- This is useful for handling cases where the function is not available in
61
- the older versions of the library.
62
-
63
- Args:
64
- module (str): The module to wrap, e.g. "openai.resources.chat.completions"
65
- function (str): "Object.function" to wrap, e.g. "Completions.parse"
66
- wrapper (callable): The wrapper to apply to the function.
67
- """
68
- try:
69
- wrap_function_wrapper(module, function, wrapper)
70
- except (AttributeError, ModuleNotFoundError):
71
- pass
72
-
73
- def _instrument(self, **kwargs):
74
- tracer_provider = kwargs.get("tracer_provider")
75
- tracer = get_tracer(__name__, __version__, tracer_provider)
76
-
77
- # meter and counters are inited here
78
- meter_provider = kwargs.get("meter_provider")
79
- meter = get_meter(__name__, __version__, meter_provider)
80
-
81
- if not Config.use_legacy_attributes:
82
- logger_provider = kwargs.get("logger_provider")
83
- Config.event_logger = get_logger(
84
- __name__, __version__, logger_provider=logger_provider
85
- )
86
-
87
- if is_metrics_enabled():
88
- tokens_histogram = meter.create_histogram(
89
- name=Meters.LLM_TOKEN_USAGE,
90
- unit="token",
91
- description="Measures number of input and output tokens used",
92
- )
93
-
94
- chat_choice_counter = meter.create_counter(
95
- name=Meters.LLM_GENERATION_CHOICES,
96
- unit="choice",
97
- description="Number of choices returned by chat completions call",
98
- )
99
-
100
- duration_histogram = meter.create_histogram(
101
- name=Meters.LLM_OPERATION_DURATION,
102
- unit="s",
103
- description="GenAI operation duration",
104
- )
105
-
106
- chat_exception_counter = meter.create_counter(
107
- name=Meters.LLM_COMPLETIONS_EXCEPTIONS,
108
- unit="time",
109
- description="Number of exceptions occurred during chat completions",
110
- )
111
-
112
- streaming_time_to_first_token = meter.create_histogram(
113
- name=GenAIMetrics.GEN_AI_SERVER_TIME_TO_FIRST_TOKEN,
114
- unit="s",
115
- description="Time to first token in streaming chat completions",
116
- )
117
- streaming_time_to_generate = meter.create_histogram(
118
- name=Meters.LLM_STREAMING_TIME_TO_GENERATE,
119
- unit="s",
120
- description="Time between first token and completion in streaming chat completions",
121
- )
122
- else:
123
- (
124
- tokens_histogram,
125
- chat_choice_counter,
126
- duration_histogram,
127
- chat_exception_counter,
128
- streaming_time_to_first_token,
129
- streaming_time_to_generate,
130
- ) = (None, None, None, None, None, None)
131
-
132
- wrap_function_wrapper(
133
- "openai.resources.chat.completions",
134
- "Completions.create",
135
- chat_wrapper(
136
- tracer,
137
- tokens_histogram,
138
- chat_choice_counter,
139
- duration_histogram,
140
- chat_exception_counter,
141
- streaming_time_to_first_token,
142
- streaming_time_to_generate,
143
- ),
144
- )
145
-
146
- wrap_function_wrapper(
147
- "openai.resources.completions",
148
- "Completions.create",
149
- completion_wrapper(tracer),
150
- )
151
-
152
- if is_metrics_enabled():
153
- embeddings_vector_size_counter = meter.create_counter(
154
- name=Meters.LLM_EMBEDDINGS_VECTOR_SIZE,
155
- unit="element",
156
- description="he size of returned vector",
157
- )
158
- embeddings_exception_counter = meter.create_counter(
159
- name=Meters.LLM_EMBEDDINGS_EXCEPTIONS,
160
- unit="time",
161
- description="Number of exceptions occurred during embeddings operation",
162
- )
163
- else:
164
- (
165
- tokens_histogram,
166
- embeddings_vector_size_counter,
167
- embeddings_exception_counter,
168
- ) = (None, None, None)
169
-
170
- wrap_function_wrapper(
171
- "openai.resources.embeddings",
172
- "Embeddings.create",
173
- embeddings_wrapper(
174
- tracer,
175
- tokens_histogram,
176
- embeddings_vector_size_counter,
177
- duration_histogram,
178
- embeddings_exception_counter,
179
- ),
180
- )
181
-
182
- wrap_function_wrapper(
183
- "openai.resources.chat.completions",
184
- "AsyncCompletions.create",
185
- achat_wrapper(
186
- tracer,
187
- tokens_histogram,
188
- chat_choice_counter,
189
- duration_histogram,
190
- chat_exception_counter,
191
- streaming_time_to_first_token,
192
- streaming_time_to_generate,
193
- ),
194
- )
195
- wrap_function_wrapper(
196
- "openai.resources.completions",
197
- "AsyncCompletions.create",
198
- acompletion_wrapper(tracer),
199
- )
200
- wrap_function_wrapper(
201
- "openai.resources.embeddings",
202
- "AsyncEmbeddings.create",
203
- aembeddings_wrapper(
204
- tracer,
205
- tokens_histogram,
206
- embeddings_vector_size_counter,
207
- duration_histogram,
208
- embeddings_exception_counter,
209
- ),
210
- )
211
- # in newer versions, Completions.parse are out of beta
212
- self._try_wrap(
213
- "openai.resources.chat.completions",
214
- "Completions.parse",
215
- chat_wrapper(
216
- tracer,
217
- tokens_histogram,
218
- chat_choice_counter,
219
- duration_histogram,
220
- chat_exception_counter,
221
- streaming_time_to_first_token,
222
- streaming_time_to_generate,
223
- ),
224
- )
225
- self._try_wrap(
226
- "openai.resources.chat.completions",
227
- "AsyncCompletions.parse",
228
- achat_wrapper(
229
- tracer,
230
- tokens_histogram,
231
- chat_choice_counter,
232
- duration_histogram,
233
- chat_exception_counter,
234
- streaming_time_to_first_token,
235
- streaming_time_to_generate,
236
- ),
237
- )
238
-
239
- if is_metrics_enabled():
240
- image_gen_exception_counter = meter.create_counter(
241
- name=Meters.LLM_IMAGE_GENERATIONS_EXCEPTIONS,
242
- unit="time",
243
- description="Number of exceptions occurred during image generations operation",
244
- )
245
- else:
246
- image_gen_exception_counter = None
247
-
248
- wrap_function_wrapper(
249
- "openai.resources.images",
250
- "Images.generate",
251
- image_gen_metrics_wrapper(duration_histogram, image_gen_exception_counter),
252
- )
253
-
254
- if is_metrics_enabled():
255
- audio_transcription_exception_counter = meter.create_counter(
256
- # name=Meters.LLM_AUDIO_TRANSCRIPTIONS_EXCEPTIONS, # TODO(Ata): come back here later when semconv is published
257
- name='llm.openai.audio.transcriptions.exceptions',
258
- unit="time",
259
- description="Number of exceptions occurred during audio transcriptions operation",
260
- )
261
- else:
262
- audio_transcription_exception_counter = None
263
-
264
- wrap_function_wrapper(
265
- "openai.resources.audio.transcriptions",
266
- "Transcriptions.create",
267
- transcription_wrapper(
268
- tracer,
269
- duration_histogram,
270
- audio_transcription_exception_counter,
271
- ),
272
- )
273
-
274
- wrap_function_wrapper(
275
- "openai.resources.audio.transcriptions",
276
- "AsyncTranscriptions.create",
277
- atranscription_wrapper(
278
- tracer,
279
- duration_histogram,
280
- audio_transcription_exception_counter,
281
- ),
282
- )
283
-
284
- # Beta APIs may not be available consistently in all versions
285
- self._try_wrap(
286
- "openai.resources.beta.assistants",
287
- "Assistants.create",
288
- assistants_create_wrapper(tracer),
289
- )
290
- self._try_wrap(
291
- "openai.resources.beta.chat.completions",
292
- "Completions.parse",
293
- chat_wrapper(
294
- tracer,
295
- tokens_histogram,
296
- chat_choice_counter,
297
- duration_histogram,
298
- chat_exception_counter,
299
- streaming_time_to_first_token,
300
- streaming_time_to_generate,
301
- ),
302
- )
303
- self._try_wrap(
304
- "openai.resources.beta.chat.completions",
305
- "AsyncCompletions.parse",
306
- achat_wrapper(
307
- tracer,
308
- tokens_histogram,
309
- chat_choice_counter,
310
- duration_histogram,
311
- chat_exception_counter,
312
- streaming_time_to_first_token,
313
- streaming_time_to_generate,
314
- ),
315
- )
316
- self._try_wrap(
317
- "openai.resources.beta.threads.runs",
318
- "Runs.create",
319
- runs_create_wrapper(tracer),
320
- )
321
- self._try_wrap(
322
- "openai.resources.beta.threads.runs",
323
- "Runs.retrieve",
324
- runs_retrieve_wrapper(tracer),
325
- )
326
- self._try_wrap(
327
- "openai.resources.beta.threads.runs",
328
- "Runs.create_and_stream",
329
- runs_create_and_stream_wrapper(tracer),
330
- )
331
- self._try_wrap(
332
- "openai.resources.beta.threads.messages",
333
- "Messages.list",
334
- messages_list_wrapper(tracer),
335
- )
336
- self._try_wrap(
337
- "openai.resources.responses",
338
- "Responses.create",
339
- responses_get_or_create_wrapper(tracer),
340
- )
341
- self._try_wrap(
342
- "openai.resources.responses",
343
- "Responses.retrieve",
344
- responses_get_or_create_wrapper(tracer),
345
- )
346
- self._try_wrap(
347
- "openai.resources.responses",
348
- "Responses.cancel",
349
- responses_cancel_wrapper(tracer),
350
- )
351
- self._try_wrap(
352
- "openai.resources.responses",
353
- "AsyncResponses.create",
354
- async_responses_get_or_create_wrapper(tracer),
355
- )
356
- self._try_wrap(
357
- "openai.resources.responses",
358
- "AsyncResponses.retrieve",
359
- async_responses_get_or_create_wrapper(tracer),
360
- )
361
- self._try_wrap(
362
- "openai.resources.responses",
363
- "AsyncResponses.cancel",
364
- async_responses_cancel_wrapper(tracer),
365
- )
366
-
367
- def _uninstrument(self, **kwargs):
368
- unwrap("openai.resources.chat.completions", "Completions.create")
369
- unwrap("openai.resources.completions", "Completions.create")
370
- unwrap("openai.resources.embeddings", "Embeddings.create")
371
- unwrap("openai.resources.chat.completions", "AsyncCompletions.create")
372
- unwrap("openai.resources.completions", "AsyncCompletions.create")
373
- unwrap("openai.resources.embeddings", "AsyncEmbeddings.create")
374
- unwrap("openai.resources.images", "Images.generate")
375
- unwrap("openai.resources.audio.transcriptions", "Transcriptions.create")
376
- unwrap("openai.resources.audio.transcriptions", "AsyncTranscriptions.create")
377
-
378
- # Beta APIs may not be available consistently in all versions
379
- try:
380
- unwrap("openai.resources.beta.assistants", "Assistants.create")
381
- unwrap("openai.resources.beta.chat.completions", "Completions.parse")
382
- unwrap("openai.resources.beta.chat.completions", "AsyncCompletions.parse")
383
- unwrap("openai.resources.beta.threads.runs", "Runs.create")
384
- unwrap("openai.resources.beta.threads.runs", "Runs.retrieve")
385
- unwrap("openai.resources.beta.threads.runs", "Runs.create_and_stream")
386
- unwrap("openai.resources.beta.threads.messages", "Messages.list")
387
- unwrap("openai.resources.responses", "Responses.create")
388
- unwrap("openai.resources.responses", "Responses.retrieve")
389
- unwrap("openai.resources.responses", "Responses.cancel")
390
- unwrap("openai.resources.responses", "AsyncResponses.create")
391
- unwrap("openai.resources.responses", "AsyncResponses.retrieve")
392
- unwrap("openai.resources.responses", "AsyncResponses.cancel")
393
- except ImportError:
394
- pass