paid-python 0.0.5a40__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,14 +1,10 @@
1
1
  from typing import Any, Optional
2
2
 
3
- from opentelemetry import trace
4
- from opentelemetry.trace import Status, StatusCode
3
+ from opentelemetry.trace import Span, Status, StatusCode
5
4
 
5
+ from paid.logger import logger
6
6
  from paid.tracing.tracing import (
7
7
  get_paid_tracer,
8
- logger,
9
- paid_external_agent_id_var,
10
- paid_external_customer_id_var,
11
- paid_token_var,
12
8
  )
13
9
 
14
10
  try:
@@ -22,7 +18,7 @@ except ImportError:
22
18
 
23
19
  # Global dictionary to store spans keyed by context object ID
24
20
  # This avoids polluting user's context.context and works across async boundaries
25
- _paid_span_store: dict[int, trace.Span] = {}
21
+ _paid_span_store: dict[int, Span] = {}
26
22
 
27
23
 
28
24
  class PaidOpenAIAgentsHook(RunHooks[Any]):
@@ -32,14 +28,12 @@ class PaidOpenAIAgentsHook(RunHooks[Any]):
32
28
  Can optionally wrap user-provided hooks to combine Paid tracking with custom behavior.
33
29
  """
34
30
 
35
- def __init__(self, user_hooks: Optional[RunHooks[Any]] = None, optional_tracing: bool = False):
31
+ def __init__(self, user_hooks: Optional[RunHooks[Any]] = None):
36
32
  """
37
33
  Initialize PaidAgentsHook.
38
34
 
39
35
  Args:
40
36
  user_hooks: Optional user-provided RunHooks to combine with Paid tracking
41
- optional_tracing: If True, gracefully skip tracing when context is missing.
42
- If False, raise errors when tracing context is not available.
43
37
 
44
38
  Usage:
45
39
  @paid_tracing("<ext_customer_id>", "<ext_agent_id>")
@@ -55,67 +49,26 @@ class PaidOpenAIAgentsHook(RunHooks[Any]):
55
49
 
56
50
  my_hook = MyHook()
57
51
  hook = PaidAgentsHook(user_hooks=my_hook)
58
-
59
- # Optional tracing (won't raise errors if context missing)
60
- hook = PaidAgentsHook(optional_tracing=True)
61
52
  """
62
53
  super().__init__()
63
- self.tracer = get_paid_tracer()
64
- self.optional_tracing = optional_tracing
65
54
  self.user_hooks = user_hooks
66
55
 
67
- def _get_context_vars(self):
68
- """Get tracing context from context variables set by Paid.trace()."""
69
- external_customer_id = paid_external_customer_id_var.get()
70
- external_agent_id = paid_external_agent_id_var.get()
71
- token = paid_token_var.get()
72
- return external_customer_id, external_agent_id, token
73
-
74
- def _should_skip_tracing(self, external_customer_id: Optional[str], token: Optional[str]) -> bool:
75
- """Check if tracing should be skipped."""
76
- # Check if there's an active span (from Paid.trace())
77
- current_span = trace.get_current_span()
78
- if current_span == trace.INVALID_SPAN:
79
- if self.optional_tracing:
80
- logger.info(f"{self.__class__.__name__} No tracing, skipping LLM tracking.")
81
- return True
82
- raise RuntimeError("No OTEL span found. Make sure to call this method from Paid.trace().")
83
-
84
- if not (external_customer_id and token):
85
- if self.optional_tracing:
86
- logger.info(f"{self.__class__.__name__} No external_customer_id or token, skipping LLM tracking")
87
- return True
88
- raise RuntimeError(
89
- "Missing required tracing information: external_customer_id or token."
90
- " Make sure to call this method from Paid.trace()."
91
- )
92
- return False
93
-
94
56
  def _start_span(self, context, agent, hook_name) -> None:
95
57
  try:
96
- external_customer_id, external_agent_id, token = self._get_context_vars()
97
-
98
- # Skip tracing if required context is missing
99
- if self._should_skip_tracing(external_customer_id, token):
100
- return
58
+ tracer = get_paid_tracer()
101
59
 
102
60
  # Get model name from agent
103
61
  model_name = str(agent.model if agent.model else get_default_model())
104
62
 
105
63
  # Start span for this LLM call
106
- span = self.tracer.start_span(f"openai.agents.{hook_name}")
107
- logger.debug(f"{hook_name} : started span")
64
+ span = tracer.start_span(f"openai.agents.{hook_name}")
108
65
 
109
66
  # Set initial attributes
110
67
  attributes = {
111
68
  "gen_ai.system": "openai",
112
69
  "gen_ai.operation.name": f"{hook_name}",
113
- "external_customer_id": external_customer_id,
114
- "token": token,
115
70
  "gen_ai.request.model": model_name,
116
71
  }
117
- if external_agent_id:
118
- attributes["external_agent_id"] = external_agent_id
119
72
 
120
73
  span.set_attributes(attributes)
121
74
 
@@ -123,7 +76,6 @@ class PaidOpenAIAgentsHook(RunHooks[Any]):
123
76
  # This works across async boundaries without polluting user's context
124
77
  context_id = id(context)
125
78
  _paid_span_store[context_id] = span
126
- logger.debug(f"_start_span: Stored span for context ID {context_id}")
127
79
 
128
80
  except Exception as error:
129
81
  logger.error(f"Error while starting span in PaidAgentsHook.{hook_name}: {error}")
@@ -133,7 +85,6 @@ class PaidOpenAIAgentsHook(RunHooks[Any]):
133
85
  # Retrieve span from global dict using context object ID
134
86
  context_id = id(context)
135
87
  span = _paid_span_store.get(context_id)
136
- logger.debug(f"_end_span: Retrieved span for context ID {context_id}: {span}")
137
88
 
138
89
  if span:
139
90
  # Get usage data from the response
@@ -161,17 +112,13 @@ class PaidOpenAIAgentsHook(RunHooks[Any]):
161
112
  span.set_status(Status(StatusCode.ERROR, "No usage available"))
162
113
 
163
114
  span.end()
164
- logger.debug(f"{hook_name} : ended span")
165
115
 
166
116
  # Clean up from global dict
167
117
  del _paid_span_store[context_id]
168
- logger.debug(f"_end_span: Cleaned up span for context ID {context_id}")
169
- else:
170
- logger.warning(f"_end_span: No span found for context ID {context_id}")
171
118
 
172
119
  except Exception as error:
173
- logger.error(f"Error while ending span in PaidAgentsHook.{hook_name}_end: {error}")
174
120
  # Try to end span on error
121
+ logger.error(f"Error while ending span in PaidAgentsHook.{hook_name}: {error}")
175
122
  try:
176
123
  context_id = id(context)
177
124
  span = _paid_span_store.get(context_id)
@@ -181,26 +128,18 @@ class PaidOpenAIAgentsHook(RunHooks[Any]):
181
128
  span.end()
182
129
  del _paid_span_store[context_id]
183
130
  except:
184
- pass
131
+ logger.error(f"Failed to end span after error in PaidAgentsHook.{hook_name}")
185
132
 
186
133
  async def on_llm_start(self, context, agent, system_prompt, input_items) -> None:
187
- logger.debug(f"on_llm_start : context_usage : {getattr(context, 'usage', None)}")
188
-
189
134
  if self.user_hooks and hasattr(self.user_hooks, "on_llm_start"):
190
135
  await self.user_hooks.on_llm_start(context, agent, system_prompt, input_items)
191
136
 
192
137
  async def on_llm_end(self, context, agent, response) -> None:
193
- logger.debug(
194
- f"on_llm_end : context_usage : {getattr(context, 'usage', None)} : response_usage : {getattr(response, 'usage', None)}"
195
- )
196
-
197
138
  if self.user_hooks and hasattr(self.user_hooks, "on_llm_end"):
198
139
  await self.user_hooks.on_llm_end(context, agent, response)
199
140
 
200
141
  async def on_agent_start(self, context, agent) -> None:
201
142
  """Start a span for agent operations and call user hooks."""
202
- logger.debug(f"on_agent_start : context_usage : {getattr(context, 'usage', None)}")
203
-
204
143
  if self.user_hooks and hasattr(self.user_hooks, "on_agent_start"):
205
144
  await self.user_hooks.on_agent_start(context, agent)
206
145
 
@@ -208,26 +147,19 @@ class PaidOpenAIAgentsHook(RunHooks[Any]):
208
147
 
209
148
  async def on_agent_end(self, context, agent, output) -> None:
210
149
  """End the span for agent operations and call user hooks."""
211
- logger.debug(f"on_agent_end : context_usage : {getattr(context, 'usage', None)}")
212
-
213
150
  self._end_span(context, "on_agent")
214
151
 
215
152
  if self.user_hooks and hasattr(self.user_hooks, "on_agent_end"):
216
153
  await self.user_hooks.on_agent_end(context, agent, output)
217
154
 
218
155
  async def on_handoff(self, context, from_agent, to_agent) -> None:
219
- logger.debug(f"on_handoff : context_usage : {getattr(context, 'usage', None)}")
220
156
  if self.user_hooks and hasattr(self.user_hooks, "on_handoff"):
221
157
  await self.user_hooks.on_handoff(context, from_agent, to_agent)
222
158
 
223
159
  async def on_tool_start(self, context, agent, tool) -> None:
224
- logger.debug(f"on_tool_start : context_usage : {getattr(context, 'usage', None)}")
225
-
226
160
  if self.user_hooks and hasattr(self.user_hooks, "on_tool_start"):
227
161
  await self.user_hooks.on_tool_start(context, agent, tool)
228
162
 
229
163
  async def on_tool_end(self, context, agent, tool, result) -> None:
230
- logger.debug(f"on_tool_end : context_usage : {getattr(context, 'usage', None)}")
231
-
232
164
  if self.user_hooks and hasattr(self.user_hooks, "on_tool_end"):
233
165
  await self.user_hooks.on_tool_end(context, agent, tool, result)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: paid-python
3
- Version: 0.0.5a40
3
+ Version: 0.1.1
4
4
  Summary:
5
5
  Requires-Python: >=3.9,<3.14
6
6
  Classifier: Intended Audience :: Developers
@@ -135,7 +135,7 @@ from paid.tracing import paid_tracing
135
135
 
136
136
  @paid_tracing("<external_customer_id>", external_agent_id="<optional_external_agent_id>")
137
137
  def some_agent_workflow(): # your function
138
- # Your logic - use any AI providers with Paid wrappers or send signals with Paid.signal().
138
+ # Your logic - use any AI providers with Paid wrappers or send signals with signal().
139
139
  # This function is typically an event processor that should lead to AI calls or events emitted as Paid signals
140
140
  ```
141
141
 
@@ -205,6 +205,98 @@ def image_generate():
205
205
  image_generate()
206
206
  ```
207
207
 
208
+ ### Passing User Metadata
209
+
210
+ You can attach custom metadata to your traces by passing a `metadata` dictionary to the `paid_tracing()` decorator or context manager. This metadata will be stored with the trace and can be used to filter and query traces later.
211
+
212
+ <Tabs>
213
+ <Tab title="Python - Decorator">
214
+ ```python
215
+ from paid.tracing import paid_tracing, signal
216
+ from paid.tracing.wrappers import PaidOpenAI
217
+ from openai import OpenAI
218
+
219
+ openai_client = PaidOpenAI(OpenAI(api_key="<OPENAI_API_KEY>"))
220
+
221
+ @paid_tracing(
222
+ "customer_123",
223
+ "agent_123",
224
+ metadata={
225
+ "campaign_id": "campaign_456",
226
+ "environment": "production",
227
+ "user_tier": "enterprise"
228
+ }
229
+ )
230
+ def process_event(event):
231
+ """Process event with custom metadata"""
232
+ response = openai_client.chat.completions.create(
233
+ model="gpt-4",
234
+ messages=[{"role": "user", "content": event.content}]
235
+ )
236
+
237
+ signal("event_processed", enable_cost_tracing=True)
238
+ return response
239
+
240
+ process_event(incoming_event)
241
+ ```
242
+ </Tab>
243
+
244
+ <Tab title="Python - Context Manager">
245
+ ```python
246
+ from paid.tracing import paid_tracing, signal
247
+ from paid.tracing.wrappers import PaidOpenAI
248
+ from openai import OpenAI
249
+
250
+ openai_client = PaidOpenAI(OpenAI(api_key="<OPENAI_API_KEY>"))
251
+
252
+ def process_event(event):
253
+ """Process event with custom metadata"""
254
+ response = openai_client.chat.completions.create(
255
+ model="gpt-4",
256
+ messages=[{"role": "user", "content": event.content}]
257
+ )
258
+
259
+ signal("event_processed", enable_cost_tracing=True)
260
+ return response
261
+
262
+ # Pass metadata to context manager
263
+ with paid_tracing(
264
+ "customer_123",
265
+ external_agent_id="agent_123",
266
+ metadata={
267
+ "campaign_id": "campaign_456",
268
+ "environment": "production",
269
+ "user_tier": "enterprise"
270
+ }
271
+ ):
272
+ process_event(incoming_event)
273
+ ```
274
+ </Tab>
275
+
276
+ <Tab title="Node.js">
277
+ ```typescript
278
+ // Metadata support is not yet available in the Node.js SDK.
279
+ // Please use Python for passing custom metadata to traces.
280
+ ```
281
+ </Tab>
282
+ </Tabs>
283
+
284
+ #### Querying Traces by Metadata
285
+
286
+ Once you've added metadata to your traces, you can filter traces using the metadata parameter in the traces API endpoint:
287
+
288
+ ```bash
289
+ # Filter by single metadata field
290
+ curl -G "https://api.paid.ai/api/organizations/{orgId}/traces" \
291
+ --data-urlencode 'metadata={"campaign_id":"campaign_456"}' \
292
+ -H "Authorization: Bearer YOUR_API_KEY"
293
+
294
+ # Filter by multiple metadata fields (all must match)
295
+ curl -G "https://api.paid.ai/api/organizations/{orgId}/traces" \
296
+ --data-urlencode 'metadata={"campaign_id":"campaign_456","environment":"production"}' \
297
+ -H "Authorization: Bearer YOUR_API_KEY"
298
+ ```
299
+
208
300
  ### Auto-Instrumentation (OpenTelemetry Instrumentors)
209
301
 
210
302
  For maximum convenience, you can use OpenTelemetry auto-instrumentation to automatically track costs without modifying your AI library calls. This approach uses official OpenTelemetry instrumentors for supported AI libraries.
@@ -263,31 +355,24 @@ paid_autoinstrument(libraries=["anthropic", "openai"])
263
355
 
264
356
  - Auto-instrumentation uses official OpenTelemetry instrumentors for each AI library
265
357
  - It automatically wraps library calls without requiring you to use Paid wrapper classes
266
- - Works seamlessly with `@paid_tracing()` decorator or `Paid.trace()` callback
358
+ - Works seamlessly with `@paid_tracing()` decorator or context manager
267
359
  - Costs are tracked in the same way as when using manual wrappers
268
360
  - Should be called once during application startup, typically before creating AI client instances
269
361
 
270
362
  ## Signaling via OTEL tracing
271
363
 
272
- A more reliable and user-friendly way to send signals is to send them via OTEL tracing.
273
- This allows you to send signals with less arguments and boilerplate as the information is available in the tracing context `Paid.trace()` or `@paid_tracing()`.
274
- The interface is `Paid.signal()`, which takes in signal name, optional data, and a flag that attaches costs from the same trace.
275
- `Paid.signal()` has to be called within a trace - meaning inside of a callback to `Paid.trace()`.
276
- In contrast to `Paid.usage.record_bulk()`, `Paid.signal()` is using OpenTelemetry to provide reliable delivery.
364
+ Signals allow you to emit events within your tracing context. They have access to all tracing information, so you need fewer arguments compared to manual API calls.
365
+ Use the `signal()` function which must be called within an active `@paid_tracing()` context (decorator or context manager).
277
366
 
278
367
  Here's an example of how to use it:
279
368
 
280
369
  ```python
281
- from paid import Paid
282
- from paid.tracing import paid_tracing
370
+ from paid.tracing import paid_tracing, signal
283
371
 
284
- # Initialize Paid SDK
285
- client = Paid(token="PAID_API_KEY")
286
-
287
- @paid_tracing("your_external_customer_id", "your_external_agent_id") # external_agent_id is necessary for sending signals
372
+ @paid_tracing("your_external_customer_id", "your_external_agent_id")
288
373
  def do_work():
289
374
  # ...do some work...
290
- client.signal(
375
+ signal(
291
376
  event_name="<your_signal_name>",
292
377
  data={ } # optional data (ex. manual cost tracking data)
293
378
  )
@@ -295,28 +380,21 @@ def do_work():
295
380
  do_work()
296
381
  ```
297
382
 
298
- Same, but using callback to specify the function to trace:
383
+ Same approach with context manager:
299
384
 
300
385
  ```python
301
- from paid import Paid
302
-
303
- # Initialize Paid SDK
304
- client = Paid(token="PAID_API_KEY")
305
-
306
- # Initialize tracing, must be after initializing Paid SDK
307
- client.initialize_tracing()
386
+ from paid.tracing import paid_tracing, signal
308
387
 
309
388
  def do_work():
310
389
  # ...do some work...
311
- client.signal(
390
+ signal(
312
391
  event_name="<your_signal_name>",
313
392
  data={ } # optional data (ex. manual cost tracking data)
314
393
  )
315
394
 
316
- # Finally, capture the traces!
317
- client.trace(external_customer_id = "<your_external_customer_id>",
318
- external_agent_id = "<your_external_agent_id>", # external_agent_id is required for signals
319
- fn = lambda: do_work())
395
+ # Use context manager instead
396
+ with paid_tracing("your_external_customer_id", "your_external_agent_id"):
397
+ do_work()
320
398
  ```
321
399
 
322
400
  ### Signal-costs - Attaching cost traces to a signal
@@ -328,17 +406,13 @@ as the wrappers and hooks that recorded those costs.
328
406
  This will look something like this:
329
407
 
330
408
  ```python
331
- from paid import Paid
332
- from paid.tracing import paid_tracing
409
+ from paid.tracing import paid_tracing, signal
333
410
 
334
- # Initialize Paid SDK
335
- client = Paid(token="PAID_API_KEY")
336
-
337
- @paid_tracing("your_external_customer_id", "your_external_agent_id") # external_agent_id is necessary for sending signals
411
+ @paid_tracing("your_external_customer_id", "your_external_agent_id")
338
412
  def do_work():
339
413
  # ... your workflow logic
340
414
  # ... your AI calls made through Paid wrappers or hooks
341
- client.signal(
415
+ signal(
342
416
  event_name="<your_signal_name>",
343
417
  data={ }, # optional data (ex. manual cost tracking data)
344
418
  enable_cost_tracing=True, # set this flag to associate it with costs
@@ -356,20 +430,17 @@ Then, all of the costs traced in @paid_tracing() context are related to that sig
356
430
  Sometimes your agent workflow cannot fit into a single traceable function like above,
357
431
  because it has to be disjoint for whatever reason. It could even be running across different machines.
358
432
 
359
- For such cases, you can pass a tracing token directly to `@paid_tracing()` or `Paid.trace()` to link distributed traces together.
433
+ For such cases, you can pass a tracing token directly to `@paid_tracing()` or context manager to link distributed traces together.
360
434
 
361
435
  #### Using `tracing_token` parameter (Recommended)
362
436
 
363
- The simplest way to implement distributed tracing is to pass the token directly to the decorator or trace function:
437
+ The simplest way to implement distributed tracing is to pass the token directly to the decorator or context manager:
364
438
 
365
439
  ```python
366
- from paid import Paid
367
- from paid.tracing import paid_tracing, generate_tracing_token
440
+ from paid.tracing import paid_tracing, signal, generate_tracing_token
368
441
  from paid.tracing.wrappers.openai import PaidOpenAI
369
442
  from openai import OpenAI
370
443
 
371
- # Initialize
372
- client = Paid(token="<PAID_API_KEY>")
373
444
  openai_client = PaidOpenAI(OpenAI(api_key="<OPENAI_API_KEY>"))
374
445
 
375
446
  # Process 1: Generate token and do initial work
@@ -387,7 +458,7 @@ def process_part_1():
387
458
  messages=[{"role": "user", "content": "Analyze data"}]
388
459
  )
389
460
  # Signal without cost tracing
390
- client.signal("part_1_complete", enable_cost_tracing=False)
461
+ signal("part_1_complete", enable_cost_tracing=False)
391
462
 
392
463
  process_part_1()
393
464
 
@@ -402,164 +473,42 @@ def process_part_2():
402
473
  messages=[{"role": "user", "content": "Generate response"}]
403
474
  )
404
475
  # Signal WITH cost tracing - links all costs from both processes
405
- client.signal("workflow_complete", enable_cost_tracing=True)
476
+ signal("workflow_complete", enable_cost_tracing=True)
406
477
 
407
478
  process_part_2()
408
479
  # No cleanup needed - token is scoped to the decorated function
409
480
  ```
410
481
 
411
- Using `Paid.trace()` instead of decorator:
482
+ Using context manager instead of decorator:
412
483
 
413
484
  ```python
414
- from paid import Paid
415
- from paid.tracing import generate_tracing_token
485
+ from paid.tracing import paid_tracing, signal, generate_tracing_token
416
486
  from paid.tracing.wrappers.openai import PaidOpenAI
417
487
  from openai import OpenAI
418
488
 
419
489
  # Initialize
420
- client = Paid(token="<PAID_API_KEY>")
421
- client.initialize_tracing()
422
490
  openai_client = PaidOpenAI(OpenAI(api_key="<OPENAI_API_KEY>"))
423
491
 
424
- # Process 1: Generate and use token
492
+ # Process 1: Generate token and do initial work
425
493
  token = generate_tracing_token()
426
494
  save_to_storage("workflow_123", token)
427
495
 
428
- def process_part_1():
496
+ with paid_tracing("customer_123", external_agent_id="agent_123", tracing_token=token):
429
497
  response = openai_client.chat.completions.create(
430
498
  model="gpt-4",
431
499
  messages=[{"role": "user", "content": "Analyze data"}]
432
500
  )
433
- client.signal("part_1_complete", enable_cost_tracing=False)
434
-
435
- client.trace(
436
- external_customer_id="customer_123",
437
- external_agent_id="agent_123",
438
- tracing_token=token,
439
- fn=lambda: process_part_1()
440
- )
501
+ signal("part_1_complete", enable_cost_tracing=False)
441
502
 
442
503
  # Process 2: Retrieve and use the same token
443
504
  token = load_from_storage("workflow_123")
444
505
 
445
- def process_part_2():
506
+ with paid_tracing("customer_123", external_agent_id="agent_123", tracing_token=token):
446
507
  response = openai_client.chat.completions.create(
447
508
  model="gpt-4",
448
509
  messages=[{"role": "user", "content": "Generate response"}]
449
510
  )
450
- client.signal("workflow_complete", enable_cost_tracing=True)
451
-
452
- client.trace(
453
- external_customer_id="customer_123",
454
- external_agent_id="agent_123",
455
- tracing_token=token,
456
- fn=lambda: process_part_2()
457
- )
458
- ```
459
-
460
- #### Alternative: Using global context (Advanced)
461
-
462
- For more complex scenarios where you need to set the tracing context globally, you can use these functions:
463
-
464
- ```python
465
- from paid.tracing import (
466
- generate_tracing_token,
467
- generate_and_set_tracing_token,
468
- set_tracing_token,
469
- unset_tracing_token
470
- )
471
-
472
- def generate_tracing_token() -> int:
473
- """
474
- Generates and returns a tracing token without setting it in the tracing context.
475
- Useful when you only want to store or send a tracing token somewhere else
476
- without immediately activating it.
477
-
478
- Returns:
479
- int: The tracing token (OpenTelemetry trace ID)
480
- """
481
-
482
- def generate_and_set_tracing_token() -> int:
483
- """
484
- This function returns tracing token and attaches it to all consequent
485
- Paid.trace() or @paid_tracing tracing contexts. So all the costs and signals that share this
486
- tracing context are associated with each other.
487
-
488
- To stop associating the traces one can either call
489
- generate_and_set_tracing_token() once again or call unset_tracing_token().
490
- The former is suitable if you still want to trace but in a fresh
491
- context, and the latter will go back to unique traces per Paid.trace().
492
-
493
- Returns:
494
- int: The tracing token (OpenTelemetry trace ID)
495
- """
496
-
497
- def set_tracing_token(token: int):
498
- """
499
- Sets tracing token. Provided token should come from generate_and_set_tracing_token()
500
- or generate_tracing_token(). Once set, the consequent traces Paid.trace() or
501
- @paid_tracing() will be related to each other.
502
-
503
- Args:
504
- token (int): A tracing token from generate_and_set_tracing_token() or generate_tracing_token()
505
- """
506
-
507
- def unset_tracing_token():
508
- """
509
- Unsets the token previously set by generate_and_set_tracing_token()
510
- or by set_tracing_token(token). Does nothing if the token was never set.
511
- """
512
- ```
513
-
514
- Example using global context:
515
-
516
- ```python
517
- from paid import Paid
518
- from paid.tracing import paid_tracing, generate_and_set_tracing_token, set_tracing_token, unset_tracing_token
519
- from paid.tracing.wrappers.openai import PaidOpenAI
520
- from openai import OpenAI
521
-
522
- # Initialize
523
- client = Paid(token="<PAID_API_KEY>")
524
- openai_client = PaidOpenAI(OpenAI(api_key="<OPENAI_API_KEY>"))
525
-
526
- # Process 1: Generate token and do initial work
527
- token = generate_and_set_tracing_token()
528
- print(f"Tracing token: {token}")
529
-
530
- # Store token for other processes (e.g., in Redis, database, message queue)
531
- save_to_storage("workflow_123", token)
532
-
533
- @paid_tracing("customer_123", external_agent_id="agent_123")
534
- def process_part_1():
535
- # AI calls here will be traced
536
- response = openai_client.chat.completions.create(
537
- model="gpt-4",
538
- messages=[{"role": "user", "content": "Analyze data"}]
539
- )
540
- # Signal without cost tracing
541
- client.signal("part_1_complete", enable_cost_tracing=False)
542
-
543
- process_part_1()
544
-
545
- # Process 2 (different machine/process): Retrieve and use token
546
- token = load_from_storage("workflow_123")
547
- set_tracing_token(token)
548
-
549
- @paid_tracing("customer_123", external_agent_id="agent_123")
550
- def process_part_2():
551
- # AI calls here will be linked to the same trace
552
- response = openai_client.chat.completions.create(
553
- model="gpt-4",
554
- messages=[{"role": "user", "content": "Generate response"}]
555
- )
556
- # Signal WITH cost tracing - links all costs from both processes
557
- client.signal("workflow_complete", enable_cost_tracing=True)
558
-
559
- process_part_2()
560
-
561
- # Clean up
562
- unset_tracing_token()
511
+ signal("workflow_complete", enable_cost_tracing=True)
563
512
  ```
564
513
 
565
514
  ## Manual Cost Tracking
@@ -594,16 +543,12 @@ client.usage.record_bulk(signals=[signal])
594
543
  Alternatively the same `costData` payload can be passed to OTLP signaling mechanism:
595
544
 
596
545
  ```python
597
- from paid import Paid
598
- from paid.tracing import paid_tracing
599
-
600
- # Initialize Paid SDK
601
- client = Paid(token="PAID_API_KEY")
546
+ from paid.tracing import paid_tracing, signal
602
547
 
603
- @paid_tracing("your_external_customer_id", "your_external_agent_id") # external_agent_id is required for sending signals
548
+ @paid_tracing("your_external_customer_id", "your_external_agent_id")
604
549
  def do_work():
605
550
  # ...do some work...
606
- client.signal(
551
+ signal(
607
552
  event_name="<your_signal_name>",
608
553
  data={
609
554
  "costData": {
@@ -652,16 +597,12 @@ client.usage.record_bulk(signals=[signal])
652
597
  Same but via OTEL signaling:
653
598
 
654
599
  ```python
655
- from paid import Paid
656
- from paid.tracing import paid_tracing
600
+ from paid.tracing import paid_tracing, signal
657
601
 
658
- # Initialize Paid SDK
659
- client = Paid(token="PAID_API_KEY")
660
-
661
- @paid_tracing("your_external_customer_id", "your_external_agent_id") # external_agent_id is required for sending signals
602
+ @paid_tracing("your_external_customer_id", "your_external_agent_id")
662
603
  def do_work():
663
604
  # ...do some work...
664
- client.signal(
605
+ signal(
665
606
  event_name="<your_signal_name>",
666
607
  data={
667
608
  "costData": {
@@ -725,15 +666,13 @@ await generate_image()
725
666
 
726
667
  ### Async Signaling
727
668
 
728
- The `signal()` method works seamlessly in async contexts:
669
+ The `signal()` function works seamlessly in async contexts:
729
670
 
730
671
  ```python
731
- from paid import AsyncPaid
732
- from paid.tracing import paid_tracing
672
+ from paid.tracing import paid_tracing, signal
733
673
  from paid.tracing.wrappers.openai import PaidAsyncOpenAI
734
674
  from openai import AsyncOpenAI
735
675
 
736
- client = AsyncPaid(token="PAID_API_KEY")
737
676
  openai_client = PaidAsyncOpenAI(AsyncOpenAI(api_key="<OPENAI_API_KEY>"))
738
677
 
739
678
  @paid_tracing("your_external_customer_id", "your_external_agent_id")
@@ -744,8 +683,8 @@ async def do_work():
744
683
  messages=[{"role": "user", "content": "Hello!"}]
745
684
  )
746
685
 
747
- # Send signal (synchronous call within async function)
748
- client.signal(
686
+ # Send signal (works in async context)
687
+ signal(
749
688
  event_name="<your_signal_name>",
750
689
  enable_cost_tracing=True # Associate with traced costs
751
690
  )