paid-python 0.0.5a40__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- paid/client.py +339 -233
- paid/logger.py +21 -0
- paid/tracing/__init__.py +4 -4
- paid/tracing/autoinstrumentation.py +6 -3
- paid/tracing/context_manager.py +243 -0
- paid/tracing/distributed_tracing.py +113 -0
- paid/tracing/signal.py +58 -28
- paid/tracing/tracing.py +103 -439
- paid/tracing/wrappers/anthropic/anthropicWrapper.py +11 -72
- paid/tracing/wrappers/bedrock/bedrockWrapper.py +3 -32
- paid/tracing/wrappers/gemini/geminiWrapper.py +10 -46
- paid/tracing/wrappers/langchain/paidLangChainCallback.py +3 -38
- paid/tracing/wrappers/llamaindex/llamaIndexWrapper.py +4 -38
- paid/tracing/wrappers/mistral/mistralWrapper.py +7 -118
- paid/tracing/wrappers/openai/openAiWrapper.py +56 -323
- paid/tracing/wrappers/openai_agents/openaiAgentsHook.py +8 -76
- {paid_python-0.0.5a40.dist-info → paid_python-0.1.0.dist-info}/METADATA +39 -192
- {paid_python-0.0.5a40.dist-info → paid_python-0.1.0.dist-info}/RECORD +20 -17
- {paid_python-0.0.5a40.dist-info → paid_python-0.1.0.dist-info}/LICENSE +0 -0
- {paid_python-0.0.5a40.dist-info → paid_python-0.1.0.dist-info}/WHEEL +0 -0
|
@@ -1,14 +1,10 @@
|
|
|
1
1
|
from typing import Any, Optional
|
|
2
2
|
|
|
3
|
-
from opentelemetry import
|
|
4
|
-
from opentelemetry.trace import Status, StatusCode
|
|
3
|
+
from opentelemetry.trace import Span, Status, StatusCode
|
|
5
4
|
|
|
5
|
+
from paid.logger import logger
|
|
6
6
|
from paid.tracing.tracing import (
|
|
7
7
|
get_paid_tracer,
|
|
8
|
-
logger,
|
|
9
|
-
paid_external_agent_id_var,
|
|
10
|
-
paid_external_customer_id_var,
|
|
11
|
-
paid_token_var,
|
|
12
8
|
)
|
|
13
9
|
|
|
14
10
|
try:
|
|
@@ -22,7 +18,7 @@ except ImportError:
|
|
|
22
18
|
|
|
23
19
|
# Global dictionary to store spans keyed by context object ID
|
|
24
20
|
# This avoids polluting user's context.context and works across async boundaries
|
|
25
|
-
_paid_span_store: dict[int,
|
|
21
|
+
_paid_span_store: dict[int, Span] = {}
|
|
26
22
|
|
|
27
23
|
|
|
28
24
|
class PaidOpenAIAgentsHook(RunHooks[Any]):
|
|
@@ -32,14 +28,12 @@ class PaidOpenAIAgentsHook(RunHooks[Any]):
|
|
|
32
28
|
Can optionally wrap user-provided hooks to combine Paid tracking with custom behavior.
|
|
33
29
|
"""
|
|
34
30
|
|
|
35
|
-
def __init__(self, user_hooks: Optional[RunHooks[Any]] = None
|
|
31
|
+
def __init__(self, user_hooks: Optional[RunHooks[Any]] = None):
|
|
36
32
|
"""
|
|
37
33
|
Initialize PaidAgentsHook.
|
|
38
34
|
|
|
39
35
|
Args:
|
|
40
36
|
user_hooks: Optional user-provided RunHooks to combine with Paid tracking
|
|
41
|
-
optional_tracing: If True, gracefully skip tracing when context is missing.
|
|
42
|
-
If False, raise errors when tracing context is not available.
|
|
43
37
|
|
|
44
38
|
Usage:
|
|
45
39
|
@paid_tracing("<ext_customer_id>", "<ext_agent_id>")
|
|
@@ -55,67 +49,26 @@ class PaidOpenAIAgentsHook(RunHooks[Any]):
|
|
|
55
49
|
|
|
56
50
|
my_hook = MyHook()
|
|
57
51
|
hook = PaidAgentsHook(user_hooks=my_hook)
|
|
58
|
-
|
|
59
|
-
# Optional tracing (won't raise errors if context missing)
|
|
60
|
-
hook = PaidAgentsHook(optional_tracing=True)
|
|
61
52
|
"""
|
|
62
53
|
super().__init__()
|
|
63
|
-
self.tracer = get_paid_tracer()
|
|
64
|
-
self.optional_tracing = optional_tracing
|
|
65
54
|
self.user_hooks = user_hooks
|
|
66
55
|
|
|
67
|
-
def _get_context_vars(self):
|
|
68
|
-
"""Get tracing context from context variables set by Paid.trace()."""
|
|
69
|
-
external_customer_id = paid_external_customer_id_var.get()
|
|
70
|
-
external_agent_id = paid_external_agent_id_var.get()
|
|
71
|
-
token = paid_token_var.get()
|
|
72
|
-
return external_customer_id, external_agent_id, token
|
|
73
|
-
|
|
74
|
-
def _should_skip_tracing(self, external_customer_id: Optional[str], token: Optional[str]) -> bool:
|
|
75
|
-
"""Check if tracing should be skipped."""
|
|
76
|
-
# Check if there's an active span (from Paid.trace())
|
|
77
|
-
current_span = trace.get_current_span()
|
|
78
|
-
if current_span == trace.INVALID_SPAN:
|
|
79
|
-
if self.optional_tracing:
|
|
80
|
-
logger.info(f"{self.__class__.__name__} No tracing, skipping LLM tracking.")
|
|
81
|
-
return True
|
|
82
|
-
raise RuntimeError("No OTEL span found. Make sure to call this method from Paid.trace().")
|
|
83
|
-
|
|
84
|
-
if not (external_customer_id and token):
|
|
85
|
-
if self.optional_tracing:
|
|
86
|
-
logger.info(f"{self.__class__.__name__} No external_customer_id or token, skipping LLM tracking")
|
|
87
|
-
return True
|
|
88
|
-
raise RuntimeError(
|
|
89
|
-
"Missing required tracing information: external_customer_id or token."
|
|
90
|
-
" Make sure to call this method from Paid.trace()."
|
|
91
|
-
)
|
|
92
|
-
return False
|
|
93
|
-
|
|
94
56
|
def _start_span(self, context, agent, hook_name) -> None:
|
|
95
57
|
try:
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
# Skip tracing if required context is missing
|
|
99
|
-
if self._should_skip_tracing(external_customer_id, token):
|
|
100
|
-
return
|
|
58
|
+
tracer = get_paid_tracer()
|
|
101
59
|
|
|
102
60
|
# Get model name from agent
|
|
103
61
|
model_name = str(agent.model if agent.model else get_default_model())
|
|
104
62
|
|
|
105
63
|
# Start span for this LLM call
|
|
106
|
-
span =
|
|
107
|
-
logger.debug(f"{hook_name} : started span")
|
|
64
|
+
span = tracer.start_span(f"openai.agents.{hook_name}")
|
|
108
65
|
|
|
109
66
|
# Set initial attributes
|
|
110
67
|
attributes = {
|
|
111
68
|
"gen_ai.system": "openai",
|
|
112
69
|
"gen_ai.operation.name": f"{hook_name}",
|
|
113
|
-
"external_customer_id": external_customer_id,
|
|
114
|
-
"token": token,
|
|
115
70
|
"gen_ai.request.model": model_name,
|
|
116
71
|
}
|
|
117
|
-
if external_agent_id:
|
|
118
|
-
attributes["external_agent_id"] = external_agent_id
|
|
119
72
|
|
|
120
73
|
span.set_attributes(attributes)
|
|
121
74
|
|
|
@@ -123,7 +76,6 @@ class PaidOpenAIAgentsHook(RunHooks[Any]):
|
|
|
123
76
|
# This works across async boundaries without polluting user's context
|
|
124
77
|
context_id = id(context)
|
|
125
78
|
_paid_span_store[context_id] = span
|
|
126
|
-
logger.debug(f"_start_span: Stored span for context ID {context_id}")
|
|
127
79
|
|
|
128
80
|
except Exception as error:
|
|
129
81
|
logger.error(f"Error while starting span in PaidAgentsHook.{hook_name}: {error}")
|
|
@@ -133,7 +85,6 @@ class PaidOpenAIAgentsHook(RunHooks[Any]):
|
|
|
133
85
|
# Retrieve span from global dict using context object ID
|
|
134
86
|
context_id = id(context)
|
|
135
87
|
span = _paid_span_store.get(context_id)
|
|
136
|
-
logger.debug(f"_end_span: Retrieved span for context ID {context_id}: {span}")
|
|
137
88
|
|
|
138
89
|
if span:
|
|
139
90
|
# Get usage data from the response
|
|
@@ -161,17 +112,13 @@ class PaidOpenAIAgentsHook(RunHooks[Any]):
|
|
|
161
112
|
span.set_status(Status(StatusCode.ERROR, "No usage available"))
|
|
162
113
|
|
|
163
114
|
span.end()
|
|
164
|
-
logger.debug(f"{hook_name} : ended span")
|
|
165
115
|
|
|
166
116
|
# Clean up from global dict
|
|
167
117
|
del _paid_span_store[context_id]
|
|
168
|
-
logger.debug(f"_end_span: Cleaned up span for context ID {context_id}")
|
|
169
|
-
else:
|
|
170
|
-
logger.warning(f"_end_span: No span found for context ID {context_id}")
|
|
171
118
|
|
|
172
119
|
except Exception as error:
|
|
173
|
-
logger.error(f"Error while ending span in PaidAgentsHook.{hook_name}_end: {error}")
|
|
174
120
|
# Try to end span on error
|
|
121
|
+
logger.error(f"Error while ending span in PaidAgentsHook.{hook_name}: {error}")
|
|
175
122
|
try:
|
|
176
123
|
context_id = id(context)
|
|
177
124
|
span = _paid_span_store.get(context_id)
|
|
@@ -181,26 +128,18 @@ class PaidOpenAIAgentsHook(RunHooks[Any]):
|
|
|
181
128
|
span.end()
|
|
182
129
|
del _paid_span_store[context_id]
|
|
183
130
|
except:
|
|
184
|
-
|
|
131
|
+
logger.error(f"Failed to end span after error in PaidAgentsHook.{hook_name}")
|
|
185
132
|
|
|
186
133
|
async def on_llm_start(self, context, agent, system_prompt, input_items) -> None:
|
|
187
|
-
logger.debug(f"on_llm_start : context_usage : {getattr(context, 'usage', None)}")
|
|
188
|
-
|
|
189
134
|
if self.user_hooks and hasattr(self.user_hooks, "on_llm_start"):
|
|
190
135
|
await self.user_hooks.on_llm_start(context, agent, system_prompt, input_items)
|
|
191
136
|
|
|
192
137
|
async def on_llm_end(self, context, agent, response) -> None:
|
|
193
|
-
logger.debug(
|
|
194
|
-
f"on_llm_end : context_usage : {getattr(context, 'usage', None)} : response_usage : {getattr(response, 'usage', None)}"
|
|
195
|
-
)
|
|
196
|
-
|
|
197
138
|
if self.user_hooks and hasattr(self.user_hooks, "on_llm_end"):
|
|
198
139
|
await self.user_hooks.on_llm_end(context, agent, response)
|
|
199
140
|
|
|
200
141
|
async def on_agent_start(self, context, agent) -> None:
|
|
201
142
|
"""Start a span for agent operations and call user hooks."""
|
|
202
|
-
logger.debug(f"on_agent_start : context_usage : {getattr(context, 'usage', None)}")
|
|
203
|
-
|
|
204
143
|
if self.user_hooks and hasattr(self.user_hooks, "on_agent_start"):
|
|
205
144
|
await self.user_hooks.on_agent_start(context, agent)
|
|
206
145
|
|
|
@@ -208,26 +147,19 @@ class PaidOpenAIAgentsHook(RunHooks[Any]):
|
|
|
208
147
|
|
|
209
148
|
async def on_agent_end(self, context, agent, output) -> None:
|
|
210
149
|
"""End the span for agent operations and call user hooks."""
|
|
211
|
-
logger.debug(f"on_agent_end : context_usage : {getattr(context, 'usage', None)}")
|
|
212
|
-
|
|
213
150
|
self._end_span(context, "on_agent")
|
|
214
151
|
|
|
215
152
|
if self.user_hooks and hasattr(self.user_hooks, "on_agent_end"):
|
|
216
153
|
await self.user_hooks.on_agent_end(context, agent, output)
|
|
217
154
|
|
|
218
155
|
async def on_handoff(self, context, from_agent, to_agent) -> None:
|
|
219
|
-
logger.debug(f"on_handoff : context_usage : {getattr(context, 'usage', None)}")
|
|
220
156
|
if self.user_hooks and hasattr(self.user_hooks, "on_handoff"):
|
|
221
157
|
await self.user_hooks.on_handoff(context, from_agent, to_agent)
|
|
222
158
|
|
|
223
159
|
async def on_tool_start(self, context, agent, tool) -> None:
|
|
224
|
-
logger.debug(f"on_tool_start : context_usage : {getattr(context, 'usage', None)}")
|
|
225
|
-
|
|
226
160
|
if self.user_hooks and hasattr(self.user_hooks, "on_tool_start"):
|
|
227
161
|
await self.user_hooks.on_tool_start(context, agent, tool)
|
|
228
162
|
|
|
229
163
|
async def on_tool_end(self, context, agent, tool, result) -> None:
|
|
230
|
-
logger.debug(f"on_tool_end : context_usage : {getattr(context, 'usage', None)}")
|
|
231
|
-
|
|
232
164
|
if self.user_hooks and hasattr(self.user_hooks, "on_tool_end"):
|
|
233
165
|
await self.user_hooks.on_tool_end(context, agent, tool, result)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: paid-python
|
|
3
|
-
Version: 0.0
|
|
3
|
+
Version: 0.1.0
|
|
4
4
|
Summary:
|
|
5
5
|
Requires-Python: >=3.9,<3.14
|
|
6
6
|
Classifier: Intended Audience :: Developers
|
|
@@ -135,7 +135,7 @@ from paid.tracing import paid_tracing
|
|
|
135
135
|
|
|
136
136
|
@paid_tracing("<external_customer_id>", external_agent_id="<optional_external_agent_id>")
|
|
137
137
|
def some_agent_workflow(): # your function
|
|
138
|
-
# Your logic - use any AI providers with Paid wrappers or send signals with
|
|
138
|
+
# Your logic - use any AI providers with Paid wrappers or send signals with signal().
|
|
139
139
|
# This function is typically an event processor that should lead to AI calls or events emitted as Paid signals
|
|
140
140
|
```
|
|
141
141
|
|
|
@@ -263,31 +263,24 @@ paid_autoinstrument(libraries=["anthropic", "openai"])
|
|
|
263
263
|
|
|
264
264
|
- Auto-instrumentation uses official OpenTelemetry instrumentors for each AI library
|
|
265
265
|
- It automatically wraps library calls without requiring you to use Paid wrapper classes
|
|
266
|
-
- Works seamlessly with `@paid_tracing()` decorator or
|
|
266
|
+
- Works seamlessly with `@paid_tracing()` decorator or context manager
|
|
267
267
|
- Costs are tracked in the same way as when using manual wrappers
|
|
268
268
|
- Should be called once during application startup, typically before creating AI client instances
|
|
269
269
|
|
|
270
270
|
## Signaling via OTEL tracing
|
|
271
271
|
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
The interface is `Paid.signal()`, which takes in signal name, optional data, and a flag that attaches costs from the same trace.
|
|
275
|
-
`Paid.signal()` has to be called within a trace - meaning inside of a callback to `Paid.trace()`.
|
|
276
|
-
In contrast to `Paid.usage.record_bulk()`, `Paid.signal()` is using OpenTelemetry to provide reliable delivery.
|
|
272
|
+
Signals allow you to emit events within your tracing context. They have access to all tracing information, so you need fewer arguments compared to manual API calls.
|
|
273
|
+
Use the `signal()` function which must be called within an active `@paid_tracing()` context (decorator or context manager).
|
|
277
274
|
|
|
278
275
|
Here's an example of how to use it:
|
|
279
276
|
|
|
280
277
|
```python
|
|
281
|
-
from paid import
|
|
282
|
-
from paid.tracing import paid_tracing
|
|
283
|
-
|
|
284
|
-
# Initialize Paid SDK
|
|
285
|
-
client = Paid(token="PAID_API_KEY")
|
|
278
|
+
from paid.tracing import paid_tracing, signal
|
|
286
279
|
|
|
287
|
-
@paid_tracing("your_external_customer_id", "your_external_agent_id")
|
|
280
|
+
@paid_tracing("your_external_customer_id", "your_external_agent_id")
|
|
288
281
|
def do_work():
|
|
289
282
|
# ...do some work...
|
|
290
|
-
|
|
283
|
+
signal(
|
|
291
284
|
event_name="<your_signal_name>",
|
|
292
285
|
data={ } # optional data (ex. manual cost tracking data)
|
|
293
286
|
)
|
|
@@ -295,28 +288,21 @@ def do_work():
|
|
|
295
288
|
do_work()
|
|
296
289
|
```
|
|
297
290
|
|
|
298
|
-
Same
|
|
291
|
+
Same approach with context manager:
|
|
299
292
|
|
|
300
293
|
```python
|
|
301
|
-
from paid import
|
|
302
|
-
|
|
303
|
-
# Initialize Paid SDK
|
|
304
|
-
client = Paid(token="PAID_API_KEY")
|
|
305
|
-
|
|
306
|
-
# Initialize tracing, must be after initializing Paid SDK
|
|
307
|
-
client.initialize_tracing()
|
|
294
|
+
from paid.tracing import paid_tracing, signal
|
|
308
295
|
|
|
309
296
|
def do_work():
|
|
310
297
|
# ...do some work...
|
|
311
|
-
|
|
298
|
+
signal(
|
|
312
299
|
event_name="<your_signal_name>",
|
|
313
300
|
data={ } # optional data (ex. manual cost tracking data)
|
|
314
301
|
)
|
|
315
302
|
|
|
316
|
-
#
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
fn = lambda: do_work())
|
|
303
|
+
# Use context manager instead
|
|
304
|
+
with paid_tracing("your_external_customer_id", "your_external_agent_id"):
|
|
305
|
+
do_work()
|
|
320
306
|
```
|
|
321
307
|
|
|
322
308
|
### Signal-costs - Attaching cost traces to a signal
|
|
@@ -328,17 +314,13 @@ as the wrappers and hooks that recorded those costs.
|
|
|
328
314
|
This will look something like this:
|
|
329
315
|
|
|
330
316
|
```python
|
|
331
|
-
from paid import
|
|
332
|
-
from paid.tracing import paid_tracing
|
|
333
|
-
|
|
334
|
-
# Initialize Paid SDK
|
|
335
|
-
client = Paid(token="PAID_API_KEY")
|
|
317
|
+
from paid.tracing import paid_tracing, signal
|
|
336
318
|
|
|
337
|
-
@paid_tracing("your_external_customer_id", "your_external_agent_id")
|
|
319
|
+
@paid_tracing("your_external_customer_id", "your_external_agent_id")
|
|
338
320
|
def do_work():
|
|
339
321
|
# ... your workflow logic
|
|
340
322
|
# ... your AI calls made through Paid wrappers or hooks
|
|
341
|
-
|
|
323
|
+
signal(
|
|
342
324
|
event_name="<your_signal_name>",
|
|
343
325
|
data={ }, # optional data (ex. manual cost tracking data)
|
|
344
326
|
enable_cost_tracing=True, # set this flag to associate it with costs
|
|
@@ -356,20 +338,17 @@ Then, all of the costs traced in @paid_tracing() context are related to that sig
|
|
|
356
338
|
Sometimes your agent workflow cannot fit into a single traceable function like above,
|
|
357
339
|
because it has to be disjoint for whatever reason. It could even be running across different machines.
|
|
358
340
|
|
|
359
|
-
For such cases, you can pass a tracing token directly to `@paid_tracing()` or
|
|
341
|
+
For such cases, you can pass a tracing token directly to `@paid_tracing()` or context manager to link distributed traces together.
|
|
360
342
|
|
|
361
343
|
#### Using `tracing_token` parameter (Recommended)
|
|
362
344
|
|
|
363
|
-
The simplest way to implement distributed tracing is to pass the token directly to the decorator or
|
|
345
|
+
The simplest way to implement distributed tracing is to pass the token directly to the decorator or context manager:
|
|
364
346
|
|
|
365
347
|
```python
|
|
366
|
-
from paid import
|
|
367
|
-
from paid.tracing import paid_tracing, generate_tracing_token
|
|
348
|
+
from paid.tracing import paid_tracing, signal, generate_tracing_token
|
|
368
349
|
from paid.tracing.wrappers.openai import PaidOpenAI
|
|
369
350
|
from openai import OpenAI
|
|
370
351
|
|
|
371
|
-
# Initialize
|
|
372
|
-
client = Paid(token="<PAID_API_KEY>")
|
|
373
352
|
openai_client = PaidOpenAI(OpenAI(api_key="<OPENAI_API_KEY>"))
|
|
374
353
|
|
|
375
354
|
# Process 1: Generate token and do initial work
|
|
@@ -387,7 +366,7 @@ def process_part_1():
|
|
|
387
366
|
messages=[{"role": "user", "content": "Analyze data"}]
|
|
388
367
|
)
|
|
389
368
|
# Signal without cost tracing
|
|
390
|
-
|
|
369
|
+
signal("part_1_complete", enable_cost_tracing=False)
|
|
391
370
|
|
|
392
371
|
process_part_1()
|
|
393
372
|
|
|
@@ -402,164 +381,42 @@ def process_part_2():
|
|
|
402
381
|
messages=[{"role": "user", "content": "Generate response"}]
|
|
403
382
|
)
|
|
404
383
|
# Signal WITH cost tracing - links all costs from both processes
|
|
405
|
-
|
|
384
|
+
signal("workflow_complete", enable_cost_tracing=True)
|
|
406
385
|
|
|
407
386
|
process_part_2()
|
|
408
387
|
# No cleanup needed - token is scoped to the decorated function
|
|
409
388
|
```
|
|
410
389
|
|
|
411
|
-
Using
|
|
390
|
+
Using context manager instead of decorator:
|
|
412
391
|
|
|
413
392
|
```python
|
|
414
|
-
from paid import
|
|
415
|
-
from paid.tracing import generate_tracing_token
|
|
393
|
+
from paid.tracing import paid_tracing, signal, generate_tracing_token
|
|
416
394
|
from paid.tracing.wrappers.openai import PaidOpenAI
|
|
417
395
|
from openai import OpenAI
|
|
418
396
|
|
|
419
397
|
# Initialize
|
|
420
|
-
client = Paid(token="<PAID_API_KEY>")
|
|
421
|
-
client.initialize_tracing()
|
|
422
398
|
openai_client = PaidOpenAI(OpenAI(api_key="<OPENAI_API_KEY>"))
|
|
423
399
|
|
|
424
|
-
# Process 1: Generate and
|
|
400
|
+
# Process 1: Generate token and do initial work
|
|
425
401
|
token = generate_tracing_token()
|
|
426
402
|
save_to_storage("workflow_123", token)
|
|
427
403
|
|
|
428
|
-
|
|
404
|
+
with paid_tracing("customer_123", external_agent_id="agent_123", tracing_token=token):
|
|
429
405
|
response = openai_client.chat.completions.create(
|
|
430
406
|
model="gpt-4",
|
|
431
407
|
messages=[{"role": "user", "content": "Analyze data"}]
|
|
432
408
|
)
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
client.trace(
|
|
436
|
-
external_customer_id="customer_123",
|
|
437
|
-
external_agent_id="agent_123",
|
|
438
|
-
tracing_token=token,
|
|
439
|
-
fn=lambda: process_part_1()
|
|
440
|
-
)
|
|
409
|
+
signal("part_1_complete", enable_cost_tracing=False)
|
|
441
410
|
|
|
442
411
|
# Process 2: Retrieve and use the same token
|
|
443
412
|
token = load_from_storage("workflow_123")
|
|
444
413
|
|
|
445
|
-
|
|
414
|
+
with paid_tracing("customer_123", external_agent_id="agent_123", tracing_token=token):
|
|
446
415
|
response = openai_client.chat.completions.create(
|
|
447
416
|
model="gpt-4",
|
|
448
417
|
messages=[{"role": "user", "content": "Generate response"}]
|
|
449
418
|
)
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
client.trace(
|
|
453
|
-
external_customer_id="customer_123",
|
|
454
|
-
external_agent_id="agent_123",
|
|
455
|
-
tracing_token=token,
|
|
456
|
-
fn=lambda: process_part_2()
|
|
457
|
-
)
|
|
458
|
-
```
|
|
459
|
-
|
|
460
|
-
#### Alternative: Using global context (Advanced)
|
|
461
|
-
|
|
462
|
-
For more complex scenarios where you need to set the tracing context globally, you can use these functions:
|
|
463
|
-
|
|
464
|
-
```python
|
|
465
|
-
from paid.tracing import (
|
|
466
|
-
generate_tracing_token,
|
|
467
|
-
generate_and_set_tracing_token,
|
|
468
|
-
set_tracing_token,
|
|
469
|
-
unset_tracing_token
|
|
470
|
-
)
|
|
471
|
-
|
|
472
|
-
def generate_tracing_token() -> int:
|
|
473
|
-
"""
|
|
474
|
-
Generates and returns a tracing token without setting it in the tracing context.
|
|
475
|
-
Useful when you only want to store or send a tracing token somewhere else
|
|
476
|
-
without immediately activating it.
|
|
477
|
-
|
|
478
|
-
Returns:
|
|
479
|
-
int: The tracing token (OpenTelemetry trace ID)
|
|
480
|
-
"""
|
|
481
|
-
|
|
482
|
-
def generate_and_set_tracing_token() -> int:
|
|
483
|
-
"""
|
|
484
|
-
This function returns tracing token and attaches it to all consequent
|
|
485
|
-
Paid.trace() or @paid_tracing tracing contexts. So all the costs and signals that share this
|
|
486
|
-
tracing context are associated with each other.
|
|
487
|
-
|
|
488
|
-
To stop associating the traces one can either call
|
|
489
|
-
generate_and_set_tracing_token() once again or call unset_tracing_token().
|
|
490
|
-
The former is suitable if you still want to trace but in a fresh
|
|
491
|
-
context, and the latter will go back to unique traces per Paid.trace().
|
|
492
|
-
|
|
493
|
-
Returns:
|
|
494
|
-
int: The tracing token (OpenTelemetry trace ID)
|
|
495
|
-
"""
|
|
496
|
-
|
|
497
|
-
def set_tracing_token(token: int):
|
|
498
|
-
"""
|
|
499
|
-
Sets tracing token. Provided token should come from generate_and_set_tracing_token()
|
|
500
|
-
or generate_tracing_token(). Once set, the consequent traces Paid.trace() or
|
|
501
|
-
@paid_tracing() will be related to each other.
|
|
502
|
-
|
|
503
|
-
Args:
|
|
504
|
-
token (int): A tracing token from generate_and_set_tracing_token() or generate_tracing_token()
|
|
505
|
-
"""
|
|
506
|
-
|
|
507
|
-
def unset_tracing_token():
|
|
508
|
-
"""
|
|
509
|
-
Unsets the token previously set by generate_and_set_tracing_token()
|
|
510
|
-
or by set_tracing_token(token). Does nothing if the token was never set.
|
|
511
|
-
"""
|
|
512
|
-
```
|
|
513
|
-
|
|
514
|
-
Example using global context:
|
|
515
|
-
|
|
516
|
-
```python
|
|
517
|
-
from paid import Paid
|
|
518
|
-
from paid.tracing import paid_tracing, generate_and_set_tracing_token, set_tracing_token, unset_tracing_token
|
|
519
|
-
from paid.tracing.wrappers.openai import PaidOpenAI
|
|
520
|
-
from openai import OpenAI
|
|
521
|
-
|
|
522
|
-
# Initialize
|
|
523
|
-
client = Paid(token="<PAID_API_KEY>")
|
|
524
|
-
openai_client = PaidOpenAI(OpenAI(api_key="<OPENAI_API_KEY>"))
|
|
525
|
-
|
|
526
|
-
# Process 1: Generate token and do initial work
|
|
527
|
-
token = generate_and_set_tracing_token()
|
|
528
|
-
print(f"Tracing token: {token}")
|
|
529
|
-
|
|
530
|
-
# Store token for other processes (e.g., in Redis, database, message queue)
|
|
531
|
-
save_to_storage("workflow_123", token)
|
|
532
|
-
|
|
533
|
-
@paid_tracing("customer_123", external_agent_id="agent_123")
|
|
534
|
-
def process_part_1():
|
|
535
|
-
# AI calls here will be traced
|
|
536
|
-
response = openai_client.chat.completions.create(
|
|
537
|
-
model="gpt-4",
|
|
538
|
-
messages=[{"role": "user", "content": "Analyze data"}]
|
|
539
|
-
)
|
|
540
|
-
# Signal without cost tracing
|
|
541
|
-
client.signal("part_1_complete", enable_cost_tracing=False)
|
|
542
|
-
|
|
543
|
-
process_part_1()
|
|
544
|
-
|
|
545
|
-
# Process 2 (different machine/process): Retrieve and use token
|
|
546
|
-
token = load_from_storage("workflow_123")
|
|
547
|
-
set_tracing_token(token)
|
|
548
|
-
|
|
549
|
-
@paid_tracing("customer_123", external_agent_id="agent_123")
|
|
550
|
-
def process_part_2():
|
|
551
|
-
# AI calls here will be linked to the same trace
|
|
552
|
-
response = openai_client.chat.completions.create(
|
|
553
|
-
model="gpt-4",
|
|
554
|
-
messages=[{"role": "user", "content": "Generate response"}]
|
|
555
|
-
)
|
|
556
|
-
# Signal WITH cost tracing - links all costs from both processes
|
|
557
|
-
client.signal("workflow_complete", enable_cost_tracing=True)
|
|
558
|
-
|
|
559
|
-
process_part_2()
|
|
560
|
-
|
|
561
|
-
# Clean up
|
|
562
|
-
unset_tracing_token()
|
|
419
|
+
signal("workflow_complete", enable_cost_tracing=True)
|
|
563
420
|
```
|
|
564
421
|
|
|
565
422
|
## Manual Cost Tracking
|
|
@@ -594,16 +451,12 @@ client.usage.record_bulk(signals=[signal])
|
|
|
594
451
|
Alternatively the same `costData` payload can be passed to OTLP signaling mechanism:
|
|
595
452
|
|
|
596
453
|
```python
|
|
597
|
-
from paid import
|
|
598
|
-
from paid.tracing import paid_tracing
|
|
599
|
-
|
|
600
|
-
# Initialize Paid SDK
|
|
601
|
-
client = Paid(token="PAID_API_KEY")
|
|
454
|
+
from paid.tracing import paid_tracing, signal
|
|
602
455
|
|
|
603
|
-
@paid_tracing("your_external_customer_id", "your_external_agent_id")
|
|
456
|
+
@paid_tracing("your_external_customer_id", "your_external_agent_id")
|
|
604
457
|
def do_work():
|
|
605
458
|
# ...do some work...
|
|
606
|
-
|
|
459
|
+
signal(
|
|
607
460
|
event_name="<your_signal_name>",
|
|
608
461
|
data={
|
|
609
462
|
"costData": {
|
|
@@ -652,16 +505,12 @@ client.usage.record_bulk(signals=[signal])
|
|
|
652
505
|
Same but via OTEL signaling:
|
|
653
506
|
|
|
654
507
|
```python
|
|
655
|
-
from paid import
|
|
656
|
-
from paid.tracing import paid_tracing
|
|
508
|
+
from paid.tracing import paid_tracing, signal
|
|
657
509
|
|
|
658
|
-
|
|
659
|
-
client = Paid(token="PAID_API_KEY")
|
|
660
|
-
|
|
661
|
-
@paid_tracing("your_external_customer_id", "your_external_agent_id") # external_agent_id is required for sending signals
|
|
510
|
+
@paid_tracing("your_external_customer_id", "your_external_agent_id")
|
|
662
511
|
def do_work():
|
|
663
512
|
# ...do some work...
|
|
664
|
-
|
|
513
|
+
signal(
|
|
665
514
|
event_name="<your_signal_name>",
|
|
666
515
|
data={
|
|
667
516
|
"costData": {
|
|
@@ -725,15 +574,13 @@ await generate_image()
|
|
|
725
574
|
|
|
726
575
|
### Async Signaling
|
|
727
576
|
|
|
728
|
-
The `signal()`
|
|
577
|
+
The `signal()` function works seamlessly in async contexts:
|
|
729
578
|
|
|
730
579
|
```python
|
|
731
|
-
from paid import
|
|
732
|
-
from paid.tracing import paid_tracing
|
|
580
|
+
from paid.tracing import paid_tracing, signal
|
|
733
581
|
from paid.tracing.wrappers.openai import PaidAsyncOpenAI
|
|
734
582
|
from openai import AsyncOpenAI
|
|
735
583
|
|
|
736
|
-
client = AsyncPaid(token="PAID_API_KEY")
|
|
737
584
|
openai_client = PaidAsyncOpenAI(AsyncOpenAI(api_key="<OPENAI_API_KEY>"))
|
|
738
585
|
|
|
739
586
|
@paid_tracing("your_external_customer_id", "your_external_agent_id")
|
|
@@ -744,8 +591,8 @@ async def do_work():
|
|
|
744
591
|
messages=[{"role": "user", "content": "Hello!"}]
|
|
745
592
|
)
|
|
746
593
|
|
|
747
|
-
# Send signal (
|
|
748
|
-
|
|
594
|
+
# Send signal (works in async context)
|
|
595
|
+
signal(
|
|
749
596
|
event_name="<your_signal_name>",
|
|
750
597
|
enable_cost_tracing=True # Associate with traced costs
|
|
751
598
|
)
|
|
@@ -2,7 +2,7 @@ paid/__init__.py,sha256=D1SeLoeTlySo_vZCZrxFX3y5KhKGrHflphLXoewImfk,1826
|
|
|
2
2
|
paid/agents/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
|
|
3
3
|
paid/agents/client.py,sha256=ojc3H-nx4MqDrb74_i6JE_wjHSJaVAErsIunfNeffMo,23305
|
|
4
4
|
paid/agents/raw_client.py,sha256=jN9LvPK2-bGeNQzcV3iRmprpegXKtO2JaOEXjnPfz9Y,26833
|
|
5
|
-
paid/client.py,sha256=
|
|
5
|
+
paid/client.py,sha256=AWo35RlJ5bP2A5bGre3iY1GbBdYW7gu7HlioQIOoltk,23038
|
|
6
6
|
paid/contacts/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
|
|
7
7
|
paid/contacts/client.py,sha256=sNm-yAg4dR9AyYWL7-RC_CuCCvOXX7YlDAUqn47yZhE,14058
|
|
8
8
|
paid/contacts/raw_client.py,sha256=ZYNWuekHiL2sqK_gHR0IzcrLAopUKRXIqMUi-fuLGe4,19211
|
|
@@ -28,6 +28,7 @@ paid/errors/__init__.py,sha256=bFCa9MSXuABYvuPmur95svTaWqHETaijD3q2ZMl0A-Q,285
|
|
|
28
28
|
paid/errors/bad_request_error.py,sha256=_nkSDHMpzm0RadIc19BDq-yM9uJMQGWNrdsT2_6B6Iw,348
|
|
29
29
|
paid/errors/forbidden_error.py,sha256=eDJG4NiZy5uBKpftwKuKJLcmV3zbKs55WFPH1FHI7Pk,347
|
|
30
30
|
paid/errors/not_found_error.py,sha256=nsBHj9gxzRkoJMdFyChYv9ePaPYYf8h4s2nuoUo4CuI,346
|
|
31
|
+
paid/logger.py,sha256=CIo_i2n2Azp4g57GkU9twb_GzJ7x9lZmURqPsq4379M,514
|
|
31
32
|
paid/orders/__init__.py,sha256=CrbHKbmp63NAum57EE9NnGmJqLKiWDMLyPRemOs_U9w,126
|
|
32
33
|
paid/orders/client.py,sha256=TO3hsHj_0w1NTipmu092yQaEawLyKEHHQA1Je2Gj4QE,11533
|
|
33
34
|
paid/orders/lines/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
|
|
@@ -35,27 +36,29 @@ paid/orders/lines/client.py,sha256=GqSwiXdlu49KLHt7uccS_H4nkVQosM1_PQOcPA9v82A,4
|
|
|
35
36
|
paid/orders/lines/raw_client.py,sha256=KZN_yBokCOkf1lUb4ZJtX_NZbqmTqCdJNoaIOdWar8I,4590
|
|
36
37
|
paid/orders/raw_client.py,sha256=650e1Sj2vi9KVJc15M3ENXIKYoth0qMz66dzvXy1Sb4,16245
|
|
37
38
|
paid/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
38
|
-
paid/tracing/__init__.py,sha256=
|
|
39
|
-
paid/tracing/autoinstrumentation.py,sha256=
|
|
40
|
-
paid/tracing/
|
|
41
|
-
paid/tracing/
|
|
39
|
+
paid/tracing/__init__.py,sha256=IP6OkV885_xlK1H68RxGTX_IhpVAkY266zsWgnQKKTs,440
|
|
40
|
+
paid/tracing/autoinstrumentation.py,sha256=p57bU87x1Xi-nb_2C4O56tcgfvN2UYZAd9pJ0Vyh9Nw,6765
|
|
41
|
+
paid/tracing/context_manager.py,sha256=ZVsmum4np_Eyub1_D0D5ChhvdBWnkFBFqzZHxSsljdU,9716
|
|
42
|
+
paid/tracing/distributed_tracing.py,sha256=CpUWpHai-4LxLLHbGxz41r9h5wLG-dC83YL5Vg29OBI,3967
|
|
43
|
+
paid/tracing/signal.py,sha256=PfYxF6EFQS8j7RY5_C5NXrCBVu9Hq2E2tyG4fdQScJk,3252
|
|
44
|
+
paid/tracing/tracing.py,sha256=Xtue-_vuAKLnZ1l6d-tSj9o0UWmMPgXggTVgXYb1Zz4,16230
|
|
42
45
|
paid/tracing/wrappers/__init__.py,sha256=IIleLB_JUbzLw7FshrU2VHZAKF3dZHMGy1O5zCBwwqM,1588
|
|
43
46
|
paid/tracing/wrappers/anthropic/__init__.py,sha256=_x1fjySAQxuT5cIGO_jU09LiGcZH-WQLqKg8mUFAu2w,115
|
|
44
|
-
paid/tracing/wrappers/anthropic/anthropicWrapper.py,sha256=
|
|
47
|
+
paid/tracing/wrappers/anthropic/anthropicWrapper.py,sha256=pGchbOb41CbTxc7H8xXoM-LjR085spqrzXqCVC_rrFk,4913
|
|
45
48
|
paid/tracing/wrappers/bedrock/__init__.py,sha256=HSeZYbslJuWU5xWJm2rAHz4sL_Hxb70iJjTyAOoJj3s,67
|
|
46
|
-
paid/tracing/wrappers/bedrock/bedrockWrapper.py,sha256=
|
|
49
|
+
paid/tracing/wrappers/bedrock/bedrockWrapper.py,sha256=aKfGpbkGqJfI-9QX7O-3vkf15Fi35pPyDxOjydTNpQk,1962
|
|
47
50
|
paid/tracing/wrappers/gemini/__init__.py,sha256=6tzEaxuuYcJEtQHlxzjPLqJuEDY2cZe6QC_ZvZCHMS4,64
|
|
48
|
-
paid/tracing/wrappers/gemini/geminiWrapper.py,sha256=
|
|
51
|
+
paid/tracing/wrappers/gemini/geminiWrapper.py,sha256=xXp8gJyT4k39CWCni_Whnua4dEdyFLtYPK9Wqa9ZByU,3837
|
|
49
52
|
paid/tracing/wrappers/langchain/__init__.py,sha256=LDNPT9UoZen-8f5K0-FO2-Bau7jpeM0Ht3FxctfWW3w,101
|
|
50
|
-
paid/tracing/wrappers/langchain/paidLangChainCallback.py,sha256=
|
|
53
|
+
paid/tracing/wrappers/langchain/paidLangChainCallback.py,sha256=2wKFGsYaHwBl24o0UtBZsGne5SGObbaZH54nKvIP3wI,12794
|
|
51
54
|
paid/tracing/wrappers/llamaindex/__init__.py,sha256=bM2bibDwbb_cmvQehb8i-fi9XwSx2HAk6qpGee7wnu8,88
|
|
52
|
-
paid/tracing/wrappers/llamaindex/llamaIndexWrapper.py,sha256=
|
|
55
|
+
paid/tracing/wrappers/llamaindex/llamaIndexWrapper.py,sha256=p4Ft7PhZ5cQ_QWeKmdJlYEH75u0seNVBMiXfki8O7sU,3271
|
|
53
56
|
paid/tracing/wrappers/mistral/__init__.py,sha256=_Z1DVul6JBG9vYSXSc9mpm0M4l8GebBWSxwBodGQnew,74
|
|
54
|
-
paid/tracing/wrappers/mistral/mistralWrapper.py,sha256=
|
|
57
|
+
paid/tracing/wrappers/mistral/mistralWrapper.py,sha256=IgK_N5tEj4HDLKxw3uwJYiXB9BQyxLNF-04uiNHMD5Y,8265
|
|
55
58
|
paid/tracing/wrappers/openai/__init__.py,sha256=pfaL3O4f4WOS47UUcdZbDbZSNB9fsTyE_WLqqBahVzs,100
|
|
56
|
-
paid/tracing/wrappers/openai/openAiWrapper.py,sha256=
|
|
59
|
+
paid/tracing/wrappers/openai/openAiWrapper.py,sha256=DIMOGdQTjpFaiKV-JeJvNH_cxHsfmzu2wIrKXDwRkbw,22148
|
|
57
60
|
paid/tracing/wrappers/openai_agents/__init__.py,sha256=-xX5HPhYYX6hDWbn5FpFw-P6M2h0k1X9Qjrg0Bkv7cc,94
|
|
58
|
-
paid/tracing/wrappers/openai_agents/openaiAgentsHook.py,sha256
|
|
61
|
+
paid/tracing/wrappers/openai_agents/openaiAgentsHook.py,sha256=-uXUNL0S85cFVT7ObrL9hzEQAIYh5Lo1JgEE57hxk2Y,6650
|
|
59
62
|
paid/tracing/wrappers/utils.py,sha256=_0FCF3BC2wK5cU8suEvo_mXm6Jn8ULkyeEhnYPUlO2Y,2347
|
|
60
63
|
paid/types/__init__.py,sha256=Q0XwHtdwgNww7mVxCMKSh9Xrw-Nakjrf49MJDm3Yfbw,2088
|
|
61
64
|
paid/types/address.py,sha256=fJa_oYXxsIxJXFZy5UQqflVkr8BQkwQ7fKhp2wO05fo,871
|
|
@@ -95,7 +98,7 @@ paid/usage/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
|
|
|
95
98
|
paid/usage/client.py,sha256=280WJuepoovk3BAVbAx2yN2Q_qBdvx3CcPkLu8lXslc,3030
|
|
96
99
|
paid/usage/raw_client.py,sha256=2acg5C4lxuZodZjepU9QYF0fmBxgG-3ZgXs1zUJG-wM,3709
|
|
97
100
|
paid/version.py,sha256=QIpDFnOrxMxrs86eL0iNH0mSZ1DO078wWHYY9TYAoew,78
|
|
98
|
-
paid_python-0.0.
|
|
99
|
-
paid_python-0.0.
|
|
100
|
-
paid_python-0.0.
|
|
101
|
-
paid_python-0.0.
|
|
101
|
+
paid_python-0.1.0.dist-info/LICENSE,sha256=Nz4baY1zvv0Qy7lqrQtbaiMhmEeGr2Q7A93aqzpml4c,1071
|
|
102
|
+
paid_python-0.1.0.dist-info/METADATA,sha256=liDYbzbDdhe3sBUeAuPaBbD7P99Q3YLYClqGqIu9pYs,18729
|
|
103
|
+
paid_python-0.1.0.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
|
|
104
|
+
paid_python-0.1.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|