paid-python 0.4.1a0__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. paid/__init__.py +42 -4
  2. paid/agents/client.py +32 -0
  3. paid/agents/raw_client.py +32 -0
  4. paid/client.py +25 -2
  5. paid/core/client_wrapper.py +2 -3
  6. paid/customers/client.py +168 -36
  7. paid/customers/raw_client.py +217 -36
  8. paid/errors/__init__.py +2 -1
  9. paid/errors/internal_server_error.py +11 -0
  10. paid/orders/lines/client.py +0 -4
  11. paid/plans/__init__.py +4 -0
  12. paid/plans/client.py +261 -0
  13. paid/plans/raw_client.py +345 -0
  14. paid/products/__init__.py +7 -0
  15. paid/products/client.py +788 -0
  16. paid/products/raw_client.py +807 -0
  17. paid/products/types/__init__.py +7 -0
  18. paid/products/types/product_create_type.py +5 -0
  19. paid/traces/__init__.py +4 -0
  20. paid/traces/client.py +218 -0
  21. paid/traces/raw_client.py +226 -0
  22. paid/tracing/context_manager.py +9 -4
  23. paid/types/__init__.py +32 -2
  24. paid/types/cost_trace.py +6 -1
  25. paid/types/customer.py +4 -3
  26. paid/types/customer_update.py +4 -2
  27. paid/types/order_line_attribute_create_one.py +5 -0
  28. paid/types/order_line_create.py +26 -5
  29. paid/types/pagination_meta.py +26 -0
  30. paid/types/plan.py +81 -0
  31. paid/types/plan_plan_products_item.py +35 -0
  32. paid/types/plan_plan_products_item_plan_product_attribute_item.py +34 -0
  33. paid/types/product.py +56 -0
  34. paid/types/product_type.py +5 -0
  35. paid/types/product_update.py +36 -0
  36. paid/types/product_update_type.py +5 -0
  37. paid/types/signal.py +17 -5
  38. paid/types/signal_v_2.py +56 -0
  39. paid/types/trace.py +69 -0
  40. paid/types/traces_response.py +26 -0
  41. paid/types/{order_line_attribute_create.py → usage_pagination_meta.py} +16 -8
  42. paid/types/usage_summaries_response.py +26 -0
  43. paid/types/usage_summary.py +121 -0
  44. paid/types/usage_summary_order.py +26 -0
  45. paid/types/usage_summary_order_line.py +26 -0
  46. paid/usage/__init__.py +3 -0
  47. paid/usage/client.py +206 -0
  48. paid/usage/raw_client.py +283 -0
  49. paid/usage/types/__init__.py +7 -0
  50. paid/usage/types/usage_check_usage_response.py +53 -0
  51. {paid_python-0.4.1a0.dist-info → paid_python-0.5.0.dist-info}/METADATA +20 -20
  52. {paid_python-0.4.1a0.dist-info → paid_python-0.5.0.dist-info}/RECORD +54 -25
  53. {paid_python-0.4.1a0.dist-info → paid_python-0.5.0.dist-info}/LICENSE +0 -0
  54. {paid_python-0.4.1a0.dist-info → paid_python-0.5.0.dist-info}/WHEEL +0 -0
paid/usage/raw_client.py CHANGED
@@ -6,9 +6,16 @@ from json.decoder import JSONDecodeError
6
6
  from ..core.api_error import ApiError
7
7
  from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
8
8
  from ..core.http_response import AsyncHttpResponse, HttpResponse
9
+ from ..core.pydantic_utilities import parse_obj_as
9
10
  from ..core.request_options import RequestOptions
10
11
  from ..core.serialization import convert_and_respect_annotation_metadata
12
+ from ..errors.bad_request_error import BadRequestError
13
+ from ..errors.internal_server_error import InternalServerError
14
+ from ..errors.not_found_error import NotFoundError
15
+ from ..types.error import Error
11
16
  from ..types.signal import Signal
17
+ from ..types.signal_v_2 import SignalV2
18
+ from .types.usage_check_usage_response import UsageCheckUsageResponse
12
19
 
13
20
  # this is used as the default value for optional parameters
14
21
  OMIT = typing.cast(typing.Any, ...)
@@ -25,6 +32,8 @@ class RawUsageClient:
25
32
  request_options: typing.Optional[RequestOptions] = None,
26
33
  ) -> HttpResponse[None]:
27
34
  """
35
+ DEPRECATED: Use POST /usage/v2/signals/bulk instead for cleaner field names.
36
+
28
37
  Parameters
29
38
  ----------
30
39
  signals : typing.Optional[typing.Sequence[Signal]]
@@ -58,6 +67,142 @@ class RawUsageClient:
58
67
  raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
59
68
  raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
60
69
 
70
+ def usage_record_bulk_v_2(
71
+ self,
72
+ *,
73
+ signals: typing.Optional[typing.Sequence[SignalV2]] = OMIT,
74
+ request_options: typing.Optional[RequestOptions] = None,
75
+ ) -> HttpResponse[None]:
76
+ """
77
+ Parameters
78
+ ----------
79
+ signals : typing.Optional[typing.Sequence[SignalV2]]
80
+
81
+ request_options : typing.Optional[RequestOptions]
82
+ Request-specific configuration.
83
+
84
+ Returns
85
+ -------
86
+ HttpResponse[None]
87
+ """
88
+ _response = self._client_wrapper.httpx_client.request(
89
+ "usage/v2/signals/bulk",
90
+ method="POST",
91
+ json={
92
+ "signals": convert_and_respect_annotation_metadata(
93
+ object_=signals, annotation=typing.Sequence[SignalV2], direction="write"
94
+ ),
95
+ },
96
+ headers={
97
+ "content-type": "application/json",
98
+ },
99
+ request_options=request_options,
100
+ omit=OMIT,
101
+ )
102
+ try:
103
+ if 200 <= _response.status_code < 300:
104
+ return HttpResponse(response=_response, data=None)
105
+ if _response.status_code == 400:
106
+ raise BadRequestError(
107
+ headers=dict(_response.headers),
108
+ body=typing.cast(
109
+ Error,
110
+ parse_obj_as(
111
+ type_=Error, # type: ignore
112
+ object_=_response.json(),
113
+ ),
114
+ ),
115
+ )
116
+ _response_json = _response.json()
117
+ except JSONDecodeError:
118
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
119
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
120
+
121
+ def check_usage(
122
+ self,
123
+ *,
124
+ external_customer_id: str,
125
+ external_product_id: str,
126
+ request_options: typing.Optional[RequestOptions] = None,
127
+ ) -> HttpResponse[UsageCheckUsageResponse]:
128
+ """
129
+ Parameters
130
+ ----------
131
+ external_customer_id : str
132
+ External customer ID
133
+
134
+ external_product_id : str
135
+ External product ID (the external ID of the product/agent)
136
+
137
+ request_options : typing.Optional[RequestOptions]
138
+ Request-specific configuration.
139
+
140
+ Returns
141
+ -------
142
+ HttpResponse[UsageCheckUsageResponse]
143
+ Usage check response
144
+ """
145
+ _response = self._client_wrapper.httpx_client.request(
146
+ "usage/check-usage",
147
+ method="POST",
148
+ json={
149
+ "externalCustomerId": external_customer_id,
150
+ "externalProductId": external_product_id,
151
+ },
152
+ headers={
153
+ "content-type": "application/json",
154
+ },
155
+ request_options=request_options,
156
+ omit=OMIT,
157
+ )
158
+ try:
159
+ if 200 <= _response.status_code < 300:
160
+ _data = typing.cast(
161
+ UsageCheckUsageResponse,
162
+ parse_obj_as(
163
+ type_=UsageCheckUsageResponse, # type: ignore
164
+ object_=_response.json(),
165
+ ),
166
+ )
167
+ return HttpResponse(response=_response, data=_data)
168
+ if _response.status_code == 400:
169
+ raise BadRequestError(
170
+ headers=dict(_response.headers),
171
+ body=typing.cast(
172
+ Error,
173
+ parse_obj_as(
174
+ type_=Error, # type: ignore
175
+ object_=_response.json(),
176
+ ),
177
+ ),
178
+ )
179
+ if _response.status_code == 404:
180
+ raise NotFoundError(
181
+ headers=dict(_response.headers),
182
+ body=typing.cast(
183
+ Error,
184
+ parse_obj_as(
185
+ type_=Error, # type: ignore
186
+ object_=_response.json(),
187
+ ),
188
+ ),
189
+ )
190
+ if _response.status_code == 500:
191
+ raise InternalServerError(
192
+ headers=dict(_response.headers),
193
+ body=typing.cast(
194
+ Error,
195
+ parse_obj_as(
196
+ type_=Error, # type: ignore
197
+ object_=_response.json(),
198
+ ),
199
+ ),
200
+ )
201
+ _response_json = _response.json()
202
+ except JSONDecodeError:
203
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
204
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
205
+
61
206
 
62
207
  class AsyncRawUsageClient:
63
208
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -70,6 +215,8 @@ class AsyncRawUsageClient:
70
215
  request_options: typing.Optional[RequestOptions] = None,
71
216
  ) -> AsyncHttpResponse[None]:
72
217
  """
218
+ DEPRECATED: Use POST /usage/v2/signals/bulk instead for cleaner field names.
219
+
73
220
  Parameters
74
221
  ----------
75
222
  signals : typing.Optional[typing.Sequence[Signal]]
@@ -102,3 +249,139 @@ class AsyncRawUsageClient:
102
249
  except JSONDecodeError:
103
250
  raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
104
251
  raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
252
+
253
+ async def usage_record_bulk_v_2(
254
+ self,
255
+ *,
256
+ signals: typing.Optional[typing.Sequence[SignalV2]] = OMIT,
257
+ request_options: typing.Optional[RequestOptions] = None,
258
+ ) -> AsyncHttpResponse[None]:
259
+ """
260
+ Parameters
261
+ ----------
262
+ signals : typing.Optional[typing.Sequence[SignalV2]]
263
+
264
+ request_options : typing.Optional[RequestOptions]
265
+ Request-specific configuration.
266
+
267
+ Returns
268
+ -------
269
+ AsyncHttpResponse[None]
270
+ """
271
+ _response = await self._client_wrapper.httpx_client.request(
272
+ "usage/v2/signals/bulk",
273
+ method="POST",
274
+ json={
275
+ "signals": convert_and_respect_annotation_metadata(
276
+ object_=signals, annotation=typing.Sequence[SignalV2], direction="write"
277
+ ),
278
+ },
279
+ headers={
280
+ "content-type": "application/json",
281
+ },
282
+ request_options=request_options,
283
+ omit=OMIT,
284
+ )
285
+ try:
286
+ if 200 <= _response.status_code < 300:
287
+ return AsyncHttpResponse(response=_response, data=None)
288
+ if _response.status_code == 400:
289
+ raise BadRequestError(
290
+ headers=dict(_response.headers),
291
+ body=typing.cast(
292
+ Error,
293
+ parse_obj_as(
294
+ type_=Error, # type: ignore
295
+ object_=_response.json(),
296
+ ),
297
+ ),
298
+ )
299
+ _response_json = _response.json()
300
+ except JSONDecodeError:
301
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
302
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
303
+
304
+ async def check_usage(
305
+ self,
306
+ *,
307
+ external_customer_id: str,
308
+ external_product_id: str,
309
+ request_options: typing.Optional[RequestOptions] = None,
310
+ ) -> AsyncHttpResponse[UsageCheckUsageResponse]:
311
+ """
312
+ Parameters
313
+ ----------
314
+ external_customer_id : str
315
+ External customer ID
316
+
317
+ external_product_id : str
318
+ External product ID (the external ID of the product/agent)
319
+
320
+ request_options : typing.Optional[RequestOptions]
321
+ Request-specific configuration.
322
+
323
+ Returns
324
+ -------
325
+ AsyncHttpResponse[UsageCheckUsageResponse]
326
+ Usage check response
327
+ """
328
+ _response = await self._client_wrapper.httpx_client.request(
329
+ "usage/check-usage",
330
+ method="POST",
331
+ json={
332
+ "externalCustomerId": external_customer_id,
333
+ "externalProductId": external_product_id,
334
+ },
335
+ headers={
336
+ "content-type": "application/json",
337
+ },
338
+ request_options=request_options,
339
+ omit=OMIT,
340
+ )
341
+ try:
342
+ if 200 <= _response.status_code < 300:
343
+ _data = typing.cast(
344
+ UsageCheckUsageResponse,
345
+ parse_obj_as(
346
+ type_=UsageCheckUsageResponse, # type: ignore
347
+ object_=_response.json(),
348
+ ),
349
+ )
350
+ return AsyncHttpResponse(response=_response, data=_data)
351
+ if _response.status_code == 400:
352
+ raise BadRequestError(
353
+ headers=dict(_response.headers),
354
+ body=typing.cast(
355
+ Error,
356
+ parse_obj_as(
357
+ type_=Error, # type: ignore
358
+ object_=_response.json(),
359
+ ),
360
+ ),
361
+ )
362
+ if _response.status_code == 404:
363
+ raise NotFoundError(
364
+ headers=dict(_response.headers),
365
+ body=typing.cast(
366
+ Error,
367
+ parse_obj_as(
368
+ type_=Error, # type: ignore
369
+ object_=_response.json(),
370
+ ),
371
+ ),
372
+ )
373
+ if _response.status_code == 500:
374
+ raise InternalServerError(
375
+ headers=dict(_response.headers),
376
+ body=typing.cast(
377
+ Error,
378
+ parse_obj_as(
379
+ type_=Error, # type: ignore
380
+ object_=_response.json(),
381
+ ),
382
+ ),
383
+ )
384
+ _response_json = _response.json()
385
+ except JSONDecodeError:
386
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
387
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ # isort: skip_file
4
+
5
+ from .usage_check_usage_response import UsageCheckUsageResponse
6
+
7
+ __all__ = ["UsageCheckUsageResponse"]
@@ -0,0 +1,53 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ import typing_extensions
7
+ from ...core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
+ from ...core.serialization import FieldMetadata
9
+
10
+
11
+ class UsageCheckUsageResponse(UniversalBaseModel):
12
+ allowed: typing.Optional[bool] = pydantic.Field(default=None)
13
+ """
14
+ Whether usage is allowed
15
+ """
16
+
17
+ message: typing.Optional[str] = pydantic.Field(default=None)
18
+ """
19
+ Human-readable message about the usage check result
20
+ """
21
+
22
+ event_name: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="eventName")] = pydantic.Field(
23
+ default=None
24
+ )
25
+ """
26
+ Event name (only present when usage is not allowed)
27
+ """
28
+
29
+ available: typing.Optional[float] = pydantic.Field(default=None)
30
+ """
31
+ Available credits (only present for PrepaidCredits when insufficient)
32
+ """
33
+
34
+ events_quantity: typing_extensions.Annotated[typing.Optional[float], FieldMetadata(alias="eventsQuantity")] = (
35
+ pydantic.Field(default=None)
36
+ )
37
+ """
38
+ Current events quantity (only present when usage exceeds limit)
39
+ """
40
+
41
+ limit: typing.Optional[float] = pydantic.Field(default=None)
42
+ """
43
+ Usage limit (only present when usage exceeds limit)
44
+ """
45
+
46
+ if IS_PYDANTIC_V2:
47
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
48
+ else:
49
+
50
+ class Config:
51
+ frozen = True
52
+ smart_union = True
53
+ extra = pydantic.Extra.allow
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: paid-python
3
- Version: 0.4.1a0
3
+ Version: 0.5.0
4
4
  Summary:
5
5
  Requires-Python: >=3.9,<3.14
6
6
  Classifier: Intended Audience :: Developers
@@ -183,7 +183,7 @@ The easiest way to add cost tracking is using the `@paid_tracing` decorator or c
183
183
  ```python
184
184
  from paid.tracing import paid_tracing
185
185
 
186
- @paid_tracing("<external_customer_id>", external_agent_id="<optional_external_agent_id>")
186
+ @paid_tracing("<external_customer_id>", external_product_id="<optional_external_product_id>")
187
187
  def some_agent_workflow(): # your function
188
188
  # Your logic - use any AI providers with Paid wrappers or send signals with signal().
189
189
  # This function is typically an event processor that should lead to AI calls or events emitted as Paid signals
@@ -197,11 +197,11 @@ You can also use `paid_tracing` as a context manager with `with` statements:
197
197
  from paid.tracing import paid_tracing
198
198
 
199
199
  # Synchronous
200
- with paid_tracing("customer_123", external_agent_id="agent_456"):
200
+ with paid_tracing("customer_123", external_product_id="product_456"):
201
201
  result = workflow()
202
202
 
203
203
  # Asynchronous
204
- async with paid_tracing("customer_123", external_agent_id="agent_456"):
204
+ async with paid_tracing("customer_123", external_product_id="product_456"):
205
205
  result = await workflow()
206
206
  ```
207
207
 
@@ -210,7 +210,7 @@ Both approaches:
210
210
  - Initialize tracing using your API key you provided to the Paid client, falls back to `PAID_API_KEY` environment variable.
211
211
  - Handle both sync and async functions/code blocks
212
212
  - Gracefully fall back to normal execution if tracing fails
213
- - Support the same parameters: `external_customer_id`, `external_agent_id`, `tracing_token`, `store_prompt`, `metadata`
213
+ - Support the same parameters: `external_customer_id`, `external_product_id`, `tracing_token`, `store_prompt`, `metadata`
214
214
 
215
215
  * Note - if it happens that you're calling `paid_tracing` from non-main thread, then it's advised to initialize from main thread:
216
216
  ```python
@@ -249,7 +249,7 @@ openAIClient = PaidOpenAI(OpenAI(
249
249
  api_key="<OPENAI_API_KEY>",
250
250
  ))
251
251
 
252
- @paid_tracing("your_external_customer_id", "your_external_agent_id")
252
+ @paid_tracing("your_external_customer_id", external_product_id="your_external_product_id")
253
253
  def image_generate():
254
254
  response = openAIClient.images.generate(
255
255
  model="dall-e-3",
@@ -281,7 +281,7 @@ You can attach custom metadata to your traces by passing a `metadata` dictionary
281
281
 
282
282
  @paid_tracing(
283
283
  "customer_123",
284
- "agent_123",
284
+ external_product_id="product_123",
285
285
  metadata={
286
286
  "campaign_id": "campaign_456",
287
287
  "environment": "production",
@@ -325,7 +325,7 @@ You can attach custom metadata to your traces by passing a `metadata` dictionary
325
325
  # Pass metadata to context manager
326
326
  with paid_tracing(
327
327
  "customer_123",
328
- external_agent_id="agent_123",
328
+ external_product_id="product_123",
329
329
  metadata={
330
330
  "campaign_id": "campaign_456",
331
331
  "environment": "production",
@@ -380,7 +380,7 @@ paid_autoinstrument() # instruments all available: anthropic, gemini, openai, o
380
380
  # Now all OpenAI calls will be automatically traced
381
381
  openai_client = OpenAI(api_key="<OPENAI_API_KEY>")
382
382
 
383
- @paid_tracing("your_external_customer_id", "your_external_agent_id")
383
+ @paid_tracing("your_external_customer_id", external_product_id="your_external_product_id")
384
384
  def chat_with_gpt():
385
385
  response = openai_client.chat.completions.create(
386
386
  model="gpt-4",
@@ -433,7 +433,7 @@ Here's an example of how to use it:
433
433
  ```python
434
434
  from paid.tracing import paid_tracing, signal
435
435
 
436
- @paid_tracing("your_external_customer_id", "your_external_agent_id")
436
+ @paid_tracing("your_external_customer_id", external_product_id="your_external_product_id")
437
437
  def do_work():
438
438
  # ...do some work...
439
439
  signal(
@@ -457,7 +457,7 @@ def do_work():
457
457
  )
458
458
 
459
459
  # Use context manager instead
460
- with paid_tracing("your_external_customer_id", "your_external_agent_id"):
460
+ with paid_tracing("your_external_customer_id", external_product_id="your_external_product_id"):
461
461
  do_work()
462
462
  ```
463
463
 
@@ -472,7 +472,7 @@ This will look something like this:
472
472
  ```python
473
473
  from paid.tracing import paid_tracing, signal
474
474
 
475
- @paid_tracing("your_external_customer_id", "your_external_agent_id")
475
+ @paid_tracing("your_external_customer_id", external_product_id="your_external_product_id")
476
476
  def do_work():
477
477
  # ... your workflow logic
478
478
  # ... your AI calls made through Paid wrappers or hooks
@@ -516,7 +516,7 @@ print(f"Tracing token: {token}")
516
516
  # Store token for other processes (e.g., in Redis, database, message queue)
517
517
  save_to_storage("workflow_123", token)
518
518
 
519
- @paid_tracing("customer_123", tracing_token=token, external_agent_id="agent_123")
519
+ @paid_tracing("customer_123", tracing_token=token, external_product_id="product_123")
520
520
  def process_part_1():
521
521
  # AI calls here will be traced
522
522
  response = openai_client.chat.completions.create(
@@ -531,7 +531,7 @@ process_part_1()
531
531
  # Process 2 (different machine/process): Retrieve and use token
532
532
  token = load_from_storage("workflow_123")
533
533
 
534
- @paid_tracing("customer_123", tracing_token=token, external_agent_id="agent_123")
534
+ @paid_tracing("customer_123", tracing_token=token, external_product_id="product_123")
535
535
  def process_part_2():
536
536
  # AI calls here will be linked to the same trace
537
537
  response = openai_client.chat.completions.create(
@@ -561,7 +561,7 @@ openai_client = PaidOpenAI(OpenAI(api_key="<OPENAI_API_KEY>"))
561
561
  token = generate_tracing_token()
562
562
  save_to_storage("workflow_123", token)
563
563
 
564
- with paid_tracing("customer_123", external_agent_id="agent_123", tracing_token=token):
564
+ with paid_tracing("customer_123", external_product_id="product_123", tracing_token=token):
565
565
  response = openai_client.chat.completions.create(
566
566
  model="gpt-4",
567
567
  messages=[{"role": "user", "content": "Analyze data"}]
@@ -571,7 +571,7 @@ with paid_tracing("customer_123", external_agent_id="agent_123", tracing_token=t
571
571
  # Process 2: Retrieve and use the same token
572
572
  token = load_from_storage("workflow_123")
573
573
 
574
- with paid_tracing("customer_123", external_agent_id="agent_123", tracing_token=token):
574
+ with paid_tracing("customer_123", external_product_id="product_123", tracing_token=token):
575
575
  response = openai_client.chat.completions.create(
576
576
  model="gpt-4",
577
577
  messages=[{"role": "user", "content": "Generate response"}]
@@ -613,7 +613,7 @@ Alternatively the same `costData` payload can be passed to OTLP signaling mechan
613
613
  ```python
614
614
  from paid.tracing import paid_tracing, signal
615
615
 
616
- @paid_tracing("your_external_customer_id", "your_external_agent_id")
616
+ @paid_tracing("your_external_customer_id", external_product_id="your_external_product_id")
617
617
  def do_work():
618
618
  # ...do some work...
619
619
  signal(
@@ -667,7 +667,7 @@ Same but via OTEL signaling:
667
667
  ```python
668
668
  from paid.tracing import paid_tracing, signal
669
669
 
670
- @paid_tracing("your_external_customer_id", "your_external_agent_id")
670
+ @paid_tracing("your_external_customer_id", external_product_id="your_external_product_id")
671
671
  def do_work():
672
672
  # ...do some work...
673
673
  signal(
@@ -719,7 +719,7 @@ initialize_tracing()
719
719
  # Wrap the async OpenAI client
720
720
  openai_client = PaidAsyncOpenAI(AsyncOpenAI(api_key="<OPENAI_API_KEY>"))
721
721
 
722
- @paid_tracing("your_external_customer_id", "your_external_agent_id")
722
+ @paid_tracing("your_external_customer_id", external_product_id="your_external_product_id")
723
723
  async def generate_image():
724
724
  response = await openai_client.images.generate(
725
725
  model="dall-e-3",
@@ -747,7 +747,7 @@ initialize_tracing()
747
747
 
748
748
  openai_client = PaidAsyncOpenAI(AsyncOpenAI(api_key="<OPENAI_API_KEY>"))
749
749
 
750
- @paid_tracing("your_external_customer_id", "your_external_agent_id")
750
+ @paid_tracing("your_external_customer_id", external_product_id="your_external_product_id")
751
751
  async def do_work():
752
752
  # Perform async AI operations
753
753
  response = await openai_client.chat.completions.create(