paid-python 0.6.0__py3-none-any.whl → 1.0.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. paid/__init__.py +31 -0
  2. paid/client.py +1 -472
  3. paid/core/client_wrapper.py +3 -2
  4. paid/customers/__init__.py +3 -0
  5. paid/customers/client.py +428 -4
  6. paid/customers/raw_client.py +594 -2
  7. paid/customers/types/__init__.py +8 -0
  8. paid/customers/types/customers_check_entitlement_request_view.py +5 -0
  9. paid/customers/types/customers_check_entitlement_response.py +22 -0
  10. paid/orders/client.py +435 -0
  11. paid/orders/raw_client.py +695 -0
  12. paid/plans/client.py +71 -0
  13. paid/plans/raw_client.py +121 -2
  14. paid/types/__init__.py +28 -0
  15. paid/types/cancel_renewal_response.py +49 -0
  16. paid/types/contact_create_for_customer.py +37 -0
  17. paid/types/invoice.py +75 -0
  18. paid/types/invoice_status.py +5 -0
  19. paid/types/payment_method.py +58 -0
  20. paid/types/payment_method_card.py +49 -0
  21. paid/types/payment_method_type.py +5 -0
  22. paid/types/payment_method_us_bank_account.py +36 -0
  23. paid/types/payment_method_us_bank_account_account_type.py +5 -0
  24. paid/types/plan_plan_products_item.py +6 -0
  25. paid/types/plan_with_features.py +69 -0
  26. paid/types/plan_with_features_features_item.py +34 -0
  27. paid/types/proration_attribute_update.py +44 -0
  28. paid/types/proration_detail.py +49 -0
  29. paid/types/proration_upgrade_response.py +73 -0
  30. paid/types/signal_v_2.py +5 -5
  31. paid/usage/client.py +6 -6
  32. {paid_python-0.6.0.dist-info → paid_python-1.0.0a0.dist-info}/METADATA +6 -4
  33. {paid_python-0.6.0.dist-info → paid_python-1.0.0a0.dist-info}/RECORD +35 -36
  34. opentelemetry/instrumentation/openai/__init__.py +0 -54
  35. opentelemetry/instrumentation/openai/shared/__init__.py +0 -399
  36. opentelemetry/instrumentation/openai/shared/audio_wrappers.py +0 -247
  37. opentelemetry/instrumentation/openai/shared/chat_wrappers.py +0 -1192
  38. opentelemetry/instrumentation/openai/shared/completion_wrappers.py +0 -292
  39. opentelemetry/instrumentation/openai/shared/config.py +0 -15
  40. opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +0 -311
  41. opentelemetry/instrumentation/openai/shared/event_emitter.py +0 -108
  42. opentelemetry/instrumentation/openai/shared/event_models.py +0 -41
  43. opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +0 -68
  44. opentelemetry/instrumentation/openai/shared/span_utils.py +0 -0
  45. opentelemetry/instrumentation/openai/utils.py +0 -213
  46. opentelemetry/instrumentation/openai/v0/__init__.py +0 -176
  47. opentelemetry/instrumentation/openai/v1/__init__.py +0 -394
  48. opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -329
  49. opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -134
  50. opentelemetry/instrumentation/openai/v1/responses_wrappers.py +0 -1113
  51. opentelemetry/instrumentation/openai/version.py +0 -1
  52. {paid_python-0.6.0.dist-info → paid_python-1.0.0a0.dist-info}/LICENSE +0 -0
  53. {paid_python-0.6.0.dist-info → paid_python-1.0.0a0.dist-info}/WHEEL +0 -0
@@ -1,108 +0,0 @@
1
- from dataclasses import asdict
2
- from enum import Enum
3
- from typing import Union
4
-
5
- from opentelemetry._logs import LogRecord
6
- from opentelemetry.instrumentation.openai.shared.event_models import (
7
- ChoiceEvent,
8
- MessageEvent,
9
- )
10
- from opentelemetry.instrumentation.openai.utils import (
11
- should_emit_events,
12
- should_send_prompts,
13
- )
14
- from opentelemetry.semconv._incubating.attributes import (
15
- gen_ai_attributes as GenAIAttributes,
16
- )
17
-
18
- from .config import Config
19
-
20
-
21
- class Roles(Enum):
22
- USER = "user"
23
- ASSISTANT = "assistant"
24
- SYSTEM = "system"
25
- TOOL = "tool"
26
-
27
-
28
- VALID_MESSAGE_ROLES = {role.value for role in Roles}
29
- """The valid roles for naming the message event."""
30
-
31
- EVENT_ATTRIBUTES = {
32
- GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.OPENAI.value
33
- }
34
- """The attributes to be used for the event."""
35
-
36
-
37
- def emit_event(event: Union[MessageEvent, ChoiceEvent]) -> None:
38
- """
39
- Emit an event to the OpenTelemetry SDK.
40
-
41
- Args:
42
- event: The event to emit.
43
- """
44
- if not should_emit_events():
45
- return
46
-
47
- if isinstance(event, MessageEvent):
48
- _emit_message_event(event)
49
- elif isinstance(event, ChoiceEvent):
50
- _emit_choice_event(event)
51
- else:
52
- raise TypeError("Unsupported event type")
53
-
54
-
55
- def _emit_message_event(event: MessageEvent) -> None:
56
- body = asdict(event)
57
-
58
- if event.role in VALID_MESSAGE_ROLES:
59
- name = "gen_ai.{}.message".format(event.role)
60
- # According to the semantic conventions, the role is conditionally required if available
61
- # and not equal to the "role" in the message name. So, remove the role from the body if
62
- # it is the same as the in the event name.
63
- body.pop("role", None)
64
- else:
65
- name = "gen_ai.user.message"
66
-
67
- # According to the semantic conventions, only the assistant role has tool call
68
- if event.role != Roles.ASSISTANT.value and event.tool_calls is not None:
69
- del body["tool_calls"]
70
- elif event.tool_calls is None:
71
- del body["tool_calls"]
72
-
73
- if not should_send_prompts():
74
- del body["content"]
75
- if body.get("tool_calls") is not None:
76
- for tool_call in body["tool_calls"]:
77
- tool_call["function"].pop("arguments", None)
78
-
79
- log_record = LogRecord(
80
- body=body,
81
- attributes=EVENT_ATTRIBUTES,
82
- event_name=name
83
- )
84
- Config.event_logger.emit(log_record)
85
-
86
-
87
- def _emit_choice_event(event: ChoiceEvent) -> None:
88
- body = asdict(event)
89
- if event.message["role"] == Roles.ASSISTANT.value:
90
- # According to the semantic conventions, the role is conditionally required if available
91
- # and not equal to "assistant", so remove the role from the body if it is "assistant".
92
- body["message"].pop("role", None)
93
-
94
- if event.tool_calls is None:
95
- del body["tool_calls"]
96
-
97
- if not should_send_prompts():
98
- body["message"].pop("content", None)
99
- if body.get("tool_calls") is not None:
100
- for tool_call in body["tool_calls"]:
101
- tool_call["function"].pop("arguments", None)
102
-
103
- log_record = LogRecord(
104
- body=body,
105
- attributes=EVENT_ATTRIBUTES,
106
- event_name="gen_ai.choice"
107
- )
108
- Config.event_logger.emit(log_record)
@@ -1,41 +0,0 @@
1
- from dataclasses import dataclass
2
- from typing import Any, List, Literal, Optional, TypedDict
3
-
4
-
5
- class _FunctionToolCall(TypedDict):
6
- function_name: str
7
- arguments: Optional[dict[str, Any]]
8
-
9
-
10
- class ToolCall(TypedDict):
11
- """Represents a tool call in the AI model."""
12
-
13
- id: str
14
- function: _FunctionToolCall
15
- type: Literal["function"]
16
-
17
-
18
- class CompletionMessage(TypedDict):
19
- """Represents a message in the AI model."""
20
-
21
- content: Any
22
- role: str = "assistant"
23
-
24
-
25
- @dataclass
26
- class MessageEvent:
27
- """Represents an input event for the AI model."""
28
-
29
- content: Any
30
- role: str = "user"
31
- tool_calls: Optional[List[ToolCall]] = None
32
-
33
-
34
- @dataclass
35
- class ChoiceEvent:
36
- """Represents a completion event for the AI model."""
37
-
38
- index: int
39
- message: CompletionMessage
40
- finish_reason: str = "unknown"
41
- tool_calls: Optional[List[ToolCall]] = None
@@ -1,68 +0,0 @@
1
- import time
2
-
3
- from opentelemetry import context as context_api
4
- from opentelemetry.instrumentation.openai import is_openai_v1
5
- from opentelemetry.instrumentation.openai.shared import (
6
- _get_openai_base_url,
7
- metric_shared_attributes,
8
- model_as_dict,
9
- )
10
- from opentelemetry.instrumentation.openai.utils import (
11
- _with_image_gen_metric_wrapper,
12
- )
13
- from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
14
- from opentelemetry.metrics import Counter, Histogram
15
- from opentelemetry.semconv_ai import SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
16
-
17
-
18
- @_with_image_gen_metric_wrapper
19
- def image_gen_metrics_wrapper(
20
- duration_histogram: Histogram,
21
- exception_counter: Counter,
22
- wrapped,
23
- instance,
24
- args,
25
- kwargs,
26
- ):
27
- if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
28
- SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
29
- ):
30
- return wrapped(*args, **kwargs)
31
-
32
- try:
33
- # record time for duration
34
- start_time = time.time()
35
- response = wrapped(*args, **kwargs)
36
- end_time = time.time()
37
- except Exception as e: # pylint: disable=broad-except
38
- end_time = time.time()
39
- duration = end_time - start_time if "start_time" in locals() else 0
40
-
41
- attributes = {
42
- "error.type": e.__class__.__name__,
43
- }
44
-
45
- if duration > 0 and duration_histogram:
46
- duration_histogram.record(duration, attributes=attributes)
47
- if exception_counter:
48
- exception_counter.add(1, attributes=attributes)
49
-
50
- raise
51
-
52
- if is_openai_v1():
53
- response_dict = model_as_dict(response)
54
- else:
55
- response_dict = response
56
-
57
- # not provide response.model in ImagesResponse response, use model in request kwargs
58
- shared_attributes = metric_shared_attributes(
59
- response_model=kwargs.get("model") or None,
60
- operation="image_gen",
61
- server_address=_get_openai_base_url(instance),
62
- )
63
-
64
- duration = end_time - start_time
65
- if duration_histogram:
66
- duration_histogram.record(duration, attributes=shared_attributes)
67
-
68
- return response
File without changes
@@ -1,213 +0,0 @@
1
- import asyncio
2
- import logging
3
- import os
4
- import threading
5
- import traceback
6
- from contextlib import asynccontextmanager
7
- from importlib.metadata import version
8
- from packaging import version as pkg_version
9
-
10
- from opentelemetry import context as context_api
11
- from opentelemetry._logs import Logger
12
- from opentelemetry.instrumentation.openai.shared.config import Config
13
-
14
- import openai
15
-
16
- _OPENAI_VERSION = version("openai")
17
-
18
- TRACELOOP_TRACE_CONTENT = "TRACELOOP_TRACE_CONTENT"
19
-
20
-
21
- def is_openai_v1():
22
- return pkg_version.parse(_OPENAI_VERSION) >= pkg_version.parse("1.0.0")
23
-
24
-
25
- def is_reasoning_supported():
26
- # Reasoning has been introduced in OpenAI API on Dec 17, 2024
27
- # as per https://platform.openai.com/docs/changelog.
28
- # The updated OpenAI library version is 1.58.0
29
- # as per https://pypi.org/project/openai/.
30
- return pkg_version.parse(_OPENAI_VERSION) >= pkg_version.parse("1.58.0")
31
-
32
-
33
- def is_azure_openai(instance):
34
- return is_openai_v1() and isinstance(
35
- instance._client, (openai.AsyncAzureOpenAI, openai.AzureOpenAI)
36
- )
37
-
38
-
39
- def is_metrics_enabled() -> bool:
40
- return (os.getenv("TRACELOOP_METRICS_ENABLED") or "true").lower() == "true"
41
-
42
-
43
- def _with_image_gen_metric_wrapper(func):
44
- def _with_metric(duration_histogram, exception_counter):
45
- def wrapper(wrapped, instance, args, kwargs):
46
- return func(
47
- duration_histogram,
48
- exception_counter,
49
- wrapped,
50
- instance,
51
- args,
52
- kwargs,
53
- )
54
-
55
- return wrapper
56
-
57
- return _with_metric
58
-
59
-
60
- def _with_embeddings_telemetry_wrapper(func):
61
- def _with_embeddings_telemetry(
62
- tracer,
63
- token_counter,
64
- vector_size_counter,
65
- duration_histogram,
66
- exception_counter,
67
- ):
68
- def wrapper(wrapped, instance, args, kwargs):
69
- return func(
70
- tracer,
71
- token_counter,
72
- vector_size_counter,
73
- duration_histogram,
74
- exception_counter,
75
- wrapped,
76
- instance,
77
- args,
78
- kwargs,
79
- )
80
-
81
- return wrapper
82
-
83
- return _with_embeddings_telemetry
84
-
85
-
86
- def _with_audio_telemetry_wrapper(func):
87
- """Wrapper to convert the audio wrapper function into the expected format for wrapt."""
88
- def _with_audio_telemetry(
89
- tracer,
90
- duration_histogram,
91
- exception_counter,
92
- ):
93
- def wrapper(wrapped, instance, args, kwargs):
94
- return func(
95
- tracer,
96
- duration_histogram,
97
- exception_counter,
98
- wrapped,
99
- instance,
100
- args,
101
- kwargs,
102
- )
103
-
104
- return wrapper
105
-
106
- return _with_audio_telemetry
107
-
108
-
109
- def _with_chat_telemetry_wrapper(func):
110
- def _with_chat_telemetry(
111
- tracer,
112
- token_counter,
113
- choice_counter,
114
- duration_histogram,
115
- exception_counter,
116
- streaming_time_to_first_token,
117
- streaming_time_to_generate,
118
- ):
119
- def wrapper(wrapped, instance, args, kwargs):
120
- return func(
121
- tracer,
122
- token_counter,
123
- choice_counter,
124
- duration_histogram,
125
- exception_counter,
126
- streaming_time_to_first_token,
127
- streaming_time_to_generate,
128
- wrapped,
129
- instance,
130
- args,
131
- kwargs,
132
- )
133
-
134
- return wrapper
135
-
136
- return _with_chat_telemetry
137
-
138
-
139
- def _with_tracer_wrapper(func):
140
- def _with_tracer(tracer):
141
- def wrapper(wrapped, instance, args, kwargs):
142
- return func(tracer, wrapped, instance, args, kwargs)
143
-
144
- return wrapper
145
-
146
- return _with_tracer
147
-
148
-
149
- @asynccontextmanager
150
- async def start_as_current_span_async(tracer, *args, **kwargs):
151
- with tracer.start_as_current_span(*args, **kwargs) as span:
152
- yield span
153
-
154
-
155
- def dont_throw(func):
156
- """
157
- A decorator that wraps the passed in function and logs exceptions instead of throwing them.
158
- Works for both synchronous and asynchronous functions.
159
- """
160
- logger = logging.getLogger(func.__module__)
161
-
162
- async def async_wrapper(*args, **kwargs):
163
- try:
164
- return await func(*args, **kwargs)
165
- except Exception as e:
166
- _handle_exception(e, func, logger)
167
-
168
- def sync_wrapper(*args, **kwargs):
169
- try:
170
- return func(*args, **kwargs)
171
- except Exception as e:
172
- _handle_exception(e, func, logger)
173
-
174
- def _handle_exception(e, func, logger):
175
- logger.debug(
176
- "OpenLLMetry failed to trace in %s, error: %s",
177
- func.__name__,
178
- traceback.format_exc(),
179
- )
180
- if Config.exception_logger:
181
- Config.exception_logger(e)
182
-
183
- return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
184
-
185
-
186
- def run_async(method):
187
- try:
188
- loop = asyncio.get_running_loop()
189
- except RuntimeError:
190
- loop = None
191
-
192
- if loop and loop.is_running():
193
- thread = threading.Thread(target=lambda: asyncio.run(method))
194
- thread.start()
195
- thread.join()
196
- else:
197
- asyncio.run(method)
198
-
199
-
200
- def should_send_prompts():
201
- return (
202
- os.getenv(TRACELOOP_TRACE_CONTENT) or "true"
203
- ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
204
-
205
-
206
- def should_emit_events() -> bool:
207
- """
208
- Checks if the instrumentation isn't using the legacy attributes
209
- and if the event logger is not None.
210
- """
211
- return not Config.use_legacy_attributes and isinstance(
212
- Config.event_logger, Logger
213
- )
@@ -1,176 +0,0 @@
1
- from typing import Collection
2
-
3
- from opentelemetry._logs import get_logger
4
- from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
5
- from opentelemetry.instrumentation.openai.shared.chat_wrappers import (
6
- achat_wrapper,
7
- chat_wrapper,
8
- )
9
- from opentelemetry.instrumentation.openai.shared.completion_wrappers import (
10
- acompletion_wrapper,
11
- completion_wrapper,
12
- )
13
- from opentelemetry.instrumentation.openai.shared.config import Config
14
- from opentelemetry.instrumentation.openai.shared.embeddings_wrappers import (
15
- aembeddings_wrapper,
16
- embeddings_wrapper,
17
- )
18
- from opentelemetry.instrumentation.openai.utils import is_metrics_enabled
19
- from opentelemetry.instrumentation.openai.version import __version__
20
- from opentelemetry.instrumentation.utils import unwrap
21
- from opentelemetry.metrics import get_meter
22
- from opentelemetry.semconv._incubating.metrics import gen_ai_metrics as GenAIMetrics
23
- from opentelemetry.semconv_ai import Meters
24
- from opentelemetry.trace import get_tracer
25
- from wrapt import wrap_function_wrapper
26
-
27
- _instruments = ("openai >= 0.27.0", "openai < 1.0.0")
28
-
29
-
30
- class OpenAIV0Instrumentor(BaseInstrumentor):
31
- def instrumentation_dependencies(self) -> Collection[str]:
32
- return _instruments
33
-
34
- def _instrument(self, **kwargs):
35
- tracer_provider = kwargs.get("tracer_provider")
36
- tracer = get_tracer(__name__, __version__, tracer_provider)
37
-
38
- meter_provider = kwargs.get("meter_provider")
39
- meter = get_meter(__name__, __version__, meter_provider)
40
-
41
- if not Config.use_legacy_attributes:
42
- logger_provider = kwargs.get("logger_provider")
43
- Config.event_logger = get_logger(
44
- __name__, __version__, logger_provider=logger_provider
45
- )
46
-
47
- if is_metrics_enabled():
48
- tokens_histogram = meter.create_histogram(
49
- name=Meters.LLM_TOKEN_USAGE,
50
- unit="token",
51
- description="Measures number of input and output tokens used",
52
- )
53
-
54
- chat_choice_counter = meter.create_counter(
55
- name=Meters.LLM_GENERATION_CHOICES,
56
- unit="choice",
57
- description="Number of choices returned by chat completions call",
58
- )
59
-
60
- duration_histogram = meter.create_histogram(
61
- name=Meters.LLM_OPERATION_DURATION,
62
- unit="s",
63
- description="GenAI operation duration",
64
- )
65
-
66
- chat_exception_counter = meter.create_counter(
67
- name=Meters.LLM_COMPLETIONS_EXCEPTIONS,
68
- unit="time",
69
- description="Number of exceptions occurred during chat completions",
70
- )
71
-
72
- streaming_time_to_first_token = meter.create_histogram(
73
- name=GenAIMetrics.GEN_AI_SERVER_TIME_TO_FIRST_TOKEN,
74
- unit="s",
75
- description="Time to first token in streaming chat completions",
76
- )
77
- streaming_time_to_generate = meter.create_histogram(
78
- name=Meters.LLM_STREAMING_TIME_TO_GENERATE,
79
- unit="s",
80
- description="Time between first token and completion in streaming chat completions",
81
- )
82
- else:
83
- (
84
- tokens_histogram,
85
- chat_choice_counter,
86
- duration_histogram,
87
- chat_exception_counter,
88
- streaming_time_to_first_token,
89
- streaming_time_to_generate,
90
- ) = (None, None, None, None, None, None)
91
-
92
- if is_metrics_enabled():
93
- embeddings_vector_size_counter = meter.create_counter(
94
- name=Meters.LLM_EMBEDDINGS_VECTOR_SIZE,
95
- unit="element",
96
- description="he size of returned vector",
97
- )
98
- embeddings_exception_counter = meter.create_counter(
99
- name=Meters.LLM_EMBEDDINGS_EXCEPTIONS,
100
- unit="time",
101
- description="Number of exceptions occurred during embeddings operation",
102
- )
103
- else:
104
- (
105
- tokens_histogram,
106
- embeddings_vector_size_counter,
107
- embeddings_exception_counter,
108
- ) = (None, None, None)
109
-
110
- wrap_function_wrapper(
111
- "openai",
112
- "Completion.create",
113
- completion_wrapper(tracer),
114
- )
115
-
116
- wrap_function_wrapper(
117
- "openai",
118
- "Completion.acreate",
119
- acompletion_wrapper(tracer),
120
- )
121
- wrap_function_wrapper(
122
- "openai",
123
- "ChatCompletion.create",
124
- chat_wrapper(
125
- tracer,
126
- tokens_histogram,
127
- chat_choice_counter,
128
- duration_histogram,
129
- chat_exception_counter,
130
- streaming_time_to_first_token,
131
- streaming_time_to_generate,
132
- ),
133
- )
134
- wrap_function_wrapper(
135
- "openai",
136
- "ChatCompletion.acreate",
137
- achat_wrapper(
138
- tracer,
139
- tokens_histogram,
140
- chat_choice_counter,
141
- duration_histogram,
142
- chat_exception_counter,
143
- streaming_time_to_first_token,
144
- streaming_time_to_generate,
145
- ),
146
- )
147
- wrap_function_wrapper(
148
- "openai",
149
- "Embedding.create",
150
- embeddings_wrapper(
151
- tracer,
152
- tokens_histogram,
153
- embeddings_vector_size_counter,
154
- duration_histogram,
155
- embeddings_exception_counter,
156
- ),
157
- )
158
- wrap_function_wrapper(
159
- "openai",
160
- "Embedding.acreate",
161
- aembeddings_wrapper(
162
- tracer,
163
- tokens_histogram,
164
- embeddings_vector_size_counter,
165
- duration_histogram,
166
- embeddings_exception_counter,
167
- ),
168
- )
169
-
170
- def _uninstrument(self, **kwargs):
171
- unwrap("openai", "Completion.create")
172
- unwrap("openai", "Completion.acreate")
173
- unwrap("openai", "ChatCompletion.create")
174
- unwrap("openai", "ChatCompletion.acreate")
175
- unwrap("openai", "Embedding.create")
176
- unwrap("openai", "Embedding.acreate")