paygent-sdk 1.0.0__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,257 @@
1
+ """
2
+ LangChain callback handler for automatic usage tracking with Paygent.
3
+ This callback intercepts LangChain LLM calls and automatically sends usage data to Paygent.
4
+ """
5
+
6
+ from typing import Any, Dict, List, Optional
7
+ from uuid import UUID
8
+
9
+ try:
10
+ from langchain_core.callbacks import BaseCallbackHandler
11
+ from langchain_core.outputs import LLMResult
12
+ except ImportError:
13
+ raise ImportError(
14
+ "LangChain is required for this integration. "
15
+ "Install it with: pip install langchain-core"
16
+ )
17
+
18
+ from ..client import Client
19
+ from ..models import UsageData, UsageDataWithStrings
20
+
21
+
22
+ class PaygentLangChainCallback(BaseCallbackHandler):
23
+ """
24
+ LangChain callback handler that automatically tracks usage with Paygent.
25
+
26
+ Usage example:
27
+ ```python
28
+ from paygent_sdk import Client, PaygentLangChainCallback
29
+ from langchain_openai import ChatOpenAI
30
+
31
+ paygent_client = Client("your-api-key")
32
+ callback = PaygentLangChainCallback(
33
+ paygent_client,
34
+ indicator="chat_completion",
35
+ external_agent_id="agent-123",
36
+ external_customer_id="customer-456"
37
+ )
38
+
39
+ llm = ChatOpenAI(callbacks=[callback])
40
+ response = llm.invoke("Hello!")
41
+ # Usage automatically tracked!
42
+ ```
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ paygent_client: Client,
48
+ indicator: str,
49
+ external_agent_id: str,
50
+ external_customer_id: str
51
+ ):
52
+ """
53
+ Create a new PaygentLangChainCallback.
54
+
55
+ Args:
56
+ paygent_client: The Paygent client instance for usage tracking
57
+ indicator: Indicator for the usage event (e.g., "chat_completion")
58
+ external_agent_id: External agent identifier
59
+ external_customer_id: External customer identifier
60
+ """
61
+ super().__init__()
62
+ self.paygent_client = paygent_client
63
+ self.indicator = indicator
64
+ self.external_agent_id = external_agent_id
65
+ self.external_customer_id = external_customer_id
66
+ self.run_info: Dict[UUID, Dict[str, str]] = {}
67
+
68
+ def _extract_provider(self, serialized: Dict[str, Any]) -> str:
69
+ """Extract the service provider from the serialized LLM data."""
70
+ if not serialized:
71
+ return "unknown"
72
+
73
+ # Check id field
74
+ if "id" in serialized:
75
+ id_list = serialized["id"]
76
+ if isinstance(id_list, list):
77
+ id_str = " ".join(id_list).lower()
78
+ else:
79
+ id_str = str(id_list).lower()
80
+
81
+ if "openai" in id_str:
82
+ return "openai"
83
+ if "anthropic" in id_str:
84
+ return "anthropic"
85
+ if "mistral" in id_str:
86
+ return "mistral"
87
+ if "cohere" in id_str:
88
+ return "cohere"
89
+ if "google" in id_str or "gemini" in id_str:
90
+ return "gemini"
91
+ if "huggingface" in id_str:
92
+ return "huggingface"
93
+ if "azure" in id_str:
94
+ return "azure"
95
+
96
+ # Check name field
97
+ if "name" in serialized:
98
+ name = str(serialized["name"]).lower()
99
+ if "openai" in name:
100
+ return "openai"
101
+ if "anthropic" in name:
102
+ return "anthropic"
103
+ if "mistral" in name:
104
+ return "mistral"
105
+ if "gemini" in name or "google" in name:
106
+ return "gemini"
107
+
108
+ return "unknown"
109
+
110
+ def on_llm_start(
111
+ self,
112
+ serialized: Dict[str, Any],
113
+ prompts: List[str],
114
+ *,
115
+ run_id: UUID,
116
+ parent_run_id: Optional[UUID] = None,
117
+ tags: Optional[List[str]] = None,
118
+ metadata: Optional[Dict[str, Any]] = None,
119
+ **kwargs: Any,
120
+ ) -> None:
121
+ """Called when an LLM starts running."""
122
+ try:
123
+ provider = self._extract_provider(serialized)
124
+ model_name = metadata.get("ls_model_name", "unknown") if metadata else "unknown"
125
+
126
+ # Store the run info for use in on_llm_end
127
+ self.run_info[run_id] = {
128
+ "provider": provider,
129
+ "model_name": model_name
130
+ }
131
+ except Exception as e:
132
+ print(f"Error in on_llm_start: {e}")
133
+
134
+ def on_llm_end(
135
+ self,
136
+ response: LLMResult,
137
+ *,
138
+ run_id: UUID,
139
+ parent_run_id: Optional[UUID] = None,
140
+ **kwargs: Any,
141
+ ) -> None:
142
+ """Called when an LLM ends running."""
143
+ try:
144
+ # Get the stored run info
145
+ info = self.run_info.get(run_id, {})
146
+ provider = info.get("provider", "unknown")
147
+ model_name = info.get("model_name", "unknown")
148
+
149
+ # Extract usage information from LangChain's LLMResult
150
+ llm_output = response.llm_output or {}
151
+
152
+ prompt_tokens = 0
153
+ completion_tokens = 0
154
+ total_tokens = 0
155
+
156
+ # OpenAI-style usage (in llm_output["token_usage"])
157
+ if "token_usage" in llm_output:
158
+ token_usage = llm_output["token_usage"]
159
+ prompt_tokens = token_usage.get("prompt_tokens", 0)
160
+ completion_tokens = token_usage.get("completion_tokens", 0)
161
+ total_tokens = token_usage.get("total_tokens", 0)
162
+
163
+ # Anthropic-style usage (in llm_output["usage"])
164
+ elif "usage" in llm_output:
165
+ usage = llm_output["usage"]
166
+ prompt_tokens = usage.get("input_tokens", usage.get("prompt_tokens", 0))
167
+ completion_tokens = usage.get("output_tokens", usage.get("completion_tokens", 0))
168
+ total_tokens = prompt_tokens + completion_tokens
169
+
170
+ # Gemini-style usage (in generations[0].message.usage_metadata)
171
+ elif (response.generations and
172
+ len(response.generations) > 0 and
173
+ len(response.generations[0]) > 0):
174
+ gen = response.generations[0][0]
175
+ if hasattr(gen, "message") and hasattr(gen.message, "usage_metadata"):
176
+ usage_metadata = gen.message.usage_metadata
177
+ prompt_tokens = getattr(usage_metadata, "input_tokens", 0)
178
+ completion_tokens = getattr(usage_metadata, "output_tokens", 0)
179
+ total_tokens = getattr(usage_metadata, "total_tokens", 0)
180
+
181
+ # Try to extract model name from response if not already set
182
+ if model_name == "unknown":
183
+ if "model_name" in llm_output:
184
+ model_name = llm_output["model_name"]
185
+ elif (response.generations and
186
+ len(response.generations) > 0 and
187
+ len(response.generations[0]) > 0):
188
+ gen = response.generations[0][0]
189
+ if hasattr(gen, "message") and hasattr(gen.message, "response_metadata"):
190
+ model_name = gen.message.response_metadata.get("model_name", "unknown")
191
+
192
+ # Send usage data if we have token information
193
+ if total_tokens > 0:
194
+ usage_data = UsageData(
195
+ service_provider=provider,
196
+ model=model_name,
197
+ prompt_tokens=prompt_tokens,
198
+ completion_tokens=completion_tokens,
199
+ total_tokens=total_tokens
200
+ )
201
+
202
+ self.paygent_client.send_usage(
203
+ self.external_agent_id,
204
+ self.external_customer_id,
205
+ self.indicator,
206
+ usage_data
207
+ )
208
+ else:
209
+ # Fallback: use string-based tracking
210
+ # Extract prompt and output strings
211
+ prompt_string = ""
212
+ if response.generations and len(response.generations) > 0:
213
+ # Get the first generation's text
214
+ if len(response.generations[0]) > 0:
215
+ prompt_string = str(response.generations[0][0].text)
216
+
217
+ output_string = ""
218
+ if response.generations and len(response.generations) > 0:
219
+ outputs = [gen.text for gen in response.generations[0]]
220
+ output_string = " ".join(outputs)
221
+
222
+ usage_data_with_strings = UsageDataWithStrings(
223
+ service_provider=provider,
224
+ model=model_name,
225
+ prompt_string=prompt_string,
226
+ output_string=output_string
227
+ )
228
+
229
+ self.paygent_client.send_usage_with_token_string(
230
+ self.external_agent_id,
231
+ self.external_customer_id,
232
+ self.indicator,
233
+ usage_data_with_strings
234
+ )
235
+
236
+ # Clean up the run info
237
+ if run_id in self.run_info:
238
+ del self.run_info[run_id]
239
+
240
+ except Exception as e:
241
+ print(f"Error tracking LangChain usage with Paygent: {e}")
242
+ if run_id in self.run_info:
243
+ del self.run_info[run_id]
244
+
245
+ def on_llm_error(
246
+ self,
247
+ error: BaseException,
248
+ *,
249
+ run_id: UUID,
250
+ parent_run_id: Optional[UUID] = None,
251
+ **kwargs: Any,
252
+ ) -> None:
253
+ """Called when an LLM encounters an error."""
254
+ print(f"LLM error: {error}")
255
+ # Clean up the run info
256
+ if run_id in self.run_info:
257
+ del self.run_info[run_id]
@@ -0,0 +1,128 @@
1
+ """
2
+ Mistral wrapper for automatic usage tracking with Paygent.
3
+ This wrapper intercepts Mistral API calls and automatically sends usage data to Paygent.
4
+ """
5
+
6
+ import json
7
+ from typing import Any
8
+
9
+ try:
10
+ from mistralai import Mistral
11
+ except ImportError:
12
+ raise ImportError(
13
+ "mistralai package is a peer-dependency. To use the Paygent wrapper around mistralai "
14
+ "you're assumed to already have mistralai package installed."
15
+ )
16
+
17
+ from ..client import Client
18
+ from ..models import UsageData, UsageDataWithStrings
19
+
20
+
21
+ class PaygentMistral:
22
+ """Main wrapper class for Mistral that provides automatic usage tracking."""
23
+
24
+ def __init__(self, mistral_client: Mistral, paygent_client: Client):
25
+ """
26
+ Create a new PaygentMistral wrapper.
27
+
28
+ Args:
29
+ mistral_client: The Mistral client instance
30
+ paygent_client: The Paygent client instance for usage tracking
31
+ """
32
+ self.mistral = mistral_client
33
+ self.paygent_client = paygent_client
34
+
35
+ @property
36
+ def chat(self) -> 'ChatWrapper':
37
+ """Access to chat API with automatic usage tracking."""
38
+ return ChatWrapper(self.mistral, self.paygent_client)
39
+
40
+
41
+ class ChatWrapper:
42
+ """Wrapper for Mistral chat API."""
43
+
44
+ def __init__(self, mistral_client: Mistral, paygent_client: Client):
45
+ self.mistral = mistral_client
46
+ self.paygent_client = paygent_client
47
+
48
+ def complete(
49
+ self,
50
+ *,
51
+ model: str,
52
+ messages: list,
53
+ indicator: str,
54
+ external_agent_id: str,
55
+ external_customer_id: str,
56
+ **kwargs
57
+ ) -> Any:
58
+ """
59
+ Create a chat completion with automatic usage tracking.
60
+ Note: Streaming is not supported with automatic tracking.
61
+
62
+ Args:
63
+ model: The model to use
64
+ messages: The messages to send
65
+ indicator: Indicator for the usage event
66
+ external_agent_id: External agent identifier
67
+ external_customer_id: External customer identifier
68
+ **kwargs: Additional Mistral parameters
69
+
70
+ Returns:
71
+ The chat completion response from Mistral
72
+ """
73
+ # Make the Mistral API call
74
+ response = self.mistral.chat.complete(
75
+ model=model,
76
+ messages=messages,
77
+ **kwargs
78
+ )
79
+
80
+ # Extract usage data from response with robust fallback mechanism
81
+ has_valid_usage = (
82
+ hasattr(response, 'usage') and
83
+ response.usage and
84
+ response.usage.prompt_tokens > 0 and
85
+ response.usage.completion_tokens > 0
86
+ )
87
+
88
+ if has_valid_usage:
89
+ # Primary path: Use usage data from API response
90
+ usage_data = UsageData(
91
+ service_provider=model,
92
+ model=model,
93
+ prompt_tokens=response.usage.prompt_tokens,
94
+ completion_tokens=response.usage.completion_tokens,
95
+ total_tokens=response.usage.total_tokens
96
+ )
97
+
98
+ self.paygent_client.send_usage(
99
+ external_agent_id,
100
+ external_customer_id,
101
+ indicator,
102
+ usage_data
103
+ )
104
+ else:
105
+ # Fallback path: Calculate tokens from actual strings
106
+ prompt_string = json.dumps(messages)
107
+ output_string = ''
108
+ if hasattr(response, 'choices') and response.choices:
109
+ if len(response.choices) > 0:
110
+ choice = response.choices[0]
111
+ if hasattr(choice, 'message') and hasattr(choice.message, 'content'):
112
+ output_string = choice.message.content or ''
113
+
114
+ usage_data_with_strings = UsageDataWithStrings(
115
+ service_provider=model,
116
+ model=model,
117
+ prompt_string=prompt_string,
118
+ output_string=output_string
119
+ )
120
+
121
+ self.paygent_client.send_usage_with_token_string(
122
+ external_agent_id,
123
+ external_customer_id,
124
+ indicator,
125
+ usage_data_with_strings
126
+ )
127
+
128
+ return response
@@ -0,0 +1,308 @@
1
+ """
2
+ OpenAI wrapper for automatic usage tracking with Paygent.
3
+ This wrapper intercepts OpenAI API calls and automatically sends usage data to Paygent.
4
+ """
5
+
6
+ import json
7
+ from typing import Any, Optional
8
+
9
+ try:
10
+ from openai import OpenAI
11
+ except ImportError:
12
+ raise ImportError(
13
+ "openai package is a peer-dependency. To use the Paygent wrapper around openai "
14
+ "you're assumed to already have openai package installed."
15
+ )
16
+
17
+ from ..client import Client
18
+ from ..models import UsageData, UsageDataWithStrings
19
+
20
+
21
+ class PaygentOpenAI:
22
+ """Main wrapper class for OpenAI that provides automatic usage tracking."""
23
+
24
+ def __init__(self, openai_client: OpenAI, paygent_client: Client):
25
+ """
26
+ Create a new PaygentOpenAI wrapper.
27
+
28
+ Args:
29
+ openai_client: The OpenAI client instance
30
+ paygent_client: The Paygent client instance for usage tracking
31
+ """
32
+ self.openai = openai_client
33
+ self.paygent_client = paygent_client
34
+
35
+ @property
36
+ def chat(self) -> 'ChatWrapper':
37
+ """Access to chat API with automatic usage tracking."""
38
+ return ChatWrapper(self.openai, self.paygent_client)
39
+
40
+ @property
41
+ def embeddings(self) -> 'EmbeddingsWrapper':
42
+ """Access to embeddings API with automatic usage tracking."""
43
+ return EmbeddingsWrapper(self.openai, self.paygent_client)
44
+
45
+ @property
46
+ def images(self) -> 'ImagesWrapper':
47
+ """Access to images API with automatic usage tracking."""
48
+ return ImagesWrapper(self.openai, self.paygent_client)
49
+
50
+
51
+ class ChatWrapper:
52
+ """Wrapper for OpenAI chat API."""
53
+
54
+ def __init__(self, openai_client: OpenAI, paygent_client: Client):
55
+ self.openai = openai_client
56
+ self.paygent_client = paygent_client
57
+
58
+ @property
59
+ def completions(self) -> 'ChatCompletionsWrapper':
60
+ """Access to chat completions API with automatic usage tracking."""
61
+ return ChatCompletionsWrapper(self.openai, self.paygent_client)
62
+
63
+
64
+ class ChatCompletionsWrapper:
65
+ """Wrapper for OpenAI chat completions API."""
66
+
67
+ def __init__(self, openai_client: OpenAI, paygent_client: Client):
68
+ self.openai = openai_client
69
+ self.paygent_client = paygent_client
70
+
71
+ def create(
72
+ self,
73
+ *,
74
+ model: str,
75
+ messages: list,
76
+ indicator: str,
77
+ external_agent_id: str,
78
+ external_customer_id: str,
79
+ **kwargs
80
+ ) -> Any:
81
+ """
82
+ Create a chat completion with automatic usage tracking.
83
+ Note: Streaming is not supported with automatic tracking.
84
+
85
+ Args:
86
+ model: The model to use
87
+ messages: The messages to send
88
+ indicator: Indicator for the usage event
89
+ external_agent_id: External agent identifier
90
+ external_customer_id: External customer identifier
91
+ **kwargs: Additional OpenAI parameters
92
+
93
+ Returns:
94
+ The chat completion response from OpenAI
95
+ """
96
+ # Ensure streaming is disabled for automatic tracking
97
+ kwargs['stream'] = False
98
+
99
+ # Make the OpenAI API call (non-streaming)
100
+ response = self.openai.chat.completions.create(
101
+ model=model,
102
+ messages=messages,
103
+ **kwargs
104
+ )
105
+
106
+ # Extract usage data from response with robust fallback mechanism
107
+ has_valid_usage = (
108
+ hasattr(response, 'usage') and
109
+ response.usage and
110
+ response.usage.prompt_tokens > 0 and
111
+ response.usage.completion_tokens > 0
112
+ )
113
+
114
+ if has_valid_usage:
115
+ # Primary path: Use usage data from API response
116
+ usage_data = UsageData(
117
+ service_provider=model,
118
+ model=model,
119
+ prompt_tokens=response.usage.prompt_tokens,
120
+ completion_tokens=response.usage.completion_tokens,
121
+ total_tokens=response.usage.total_tokens
122
+ )
123
+
124
+ self.paygent_client.send_usage(
125
+ external_agent_id,
126
+ external_customer_id,
127
+ indicator,
128
+ usage_data
129
+ )
130
+ else:
131
+ # Fallback path: Calculate tokens from actual strings
132
+ # This ensures we never lose billing data even if API response format changes
133
+ prompt_string = json.dumps(messages)
134
+ output_string = ''
135
+ if hasattr(response, 'choices') and response.choices:
136
+ if hasattr(response.choices[0], 'message') and hasattr(response.choices[0].message, 'content'):
137
+ output_string = response.choices[0].message.content or ''
138
+
139
+ usage_data_with_strings = UsageDataWithStrings(
140
+ service_provider=model,
141
+ model=model,
142
+ prompt_string=prompt_string,
143
+ output_string=output_string
144
+ )
145
+
146
+ self.paygent_client.send_usage_with_token_string(
147
+ external_agent_id,
148
+ external_customer_id,
149
+ indicator,
150
+ usage_data_with_strings
151
+ )
152
+
153
+ return response
154
+
155
+
156
+ class EmbeddingsWrapper:
157
+ """Wrapper for OpenAI embeddings API."""
158
+
159
+ def __init__(self, openai_client: OpenAI, paygent_client: Client):
160
+ self.openai = openai_client
161
+ self.paygent_client = paygent_client
162
+
163
+ def create(
164
+ self,
165
+ *,
166
+ model: str,
167
+ input: Any,
168
+ indicator: str,
169
+ external_agent_id: str,
170
+ external_customer_id: str,
171
+ **kwargs
172
+ ) -> Any:
173
+ """
174
+ Create embeddings with automatic usage tracking.
175
+
176
+ Args:
177
+ model: The model to use
178
+ input: The input text(s) to embed
179
+ indicator: Indicator for the usage event
180
+ external_agent_id: External agent identifier
181
+ external_customer_id: External customer identifier
182
+ **kwargs: Additional OpenAI parameters
183
+
184
+ Returns:
185
+ The embeddings response from OpenAI
186
+ """
187
+ # Make the OpenAI API call
188
+ response = self.openai.embeddings.create(
189
+ model=model,
190
+ input=input,
191
+ **kwargs
192
+ )
193
+
194
+ # Extract usage data from response with robust fallback mechanism
195
+ has_valid_usage = (
196
+ hasattr(response, 'usage') and
197
+ response.usage and
198
+ response.usage.prompt_tokens > 0
199
+ )
200
+
201
+ if has_valid_usage:
202
+ # Primary path: Use usage data from API response
203
+ usage_data = UsageData(
204
+ service_provider=model,
205
+ model=model,
206
+ prompt_tokens=response.usage.prompt_tokens,
207
+ completion_tokens=response.usage.prompt_tokens, # Embeddings don't have completion tokens
208
+ total_tokens=response.usage.total_tokens
209
+ )
210
+
211
+ self.paygent_client.send_usage(
212
+ external_agent_id,
213
+ external_customer_id,
214
+ indicator,
215
+ usage_data
216
+ )
217
+ else:
218
+ # Fallback path: Calculate tokens from input text
219
+ input_text = input
220
+ if isinstance(input, list):
221
+ input_text = ' '.join(str(i) for i in input)
222
+ else:
223
+ input_text = str(input)
224
+
225
+ usage_data_with_strings = UsageDataWithStrings(
226
+ service_provider=model,
227
+ model=model,
228
+ prompt_string=input_text,
229
+ output_string='' # Embeddings don't have output
230
+ )
231
+
232
+ self.paygent_client.send_usage_with_token_string(
233
+ external_agent_id,
234
+ external_customer_id,
235
+ indicator,
236
+ usage_data_with_strings
237
+ )
238
+
239
+ return response
240
+
241
+
242
+ class ImagesWrapper:
243
+ """Wrapper for OpenAI images API."""
244
+
245
+ def __init__(self, openai_client: OpenAI, paygent_client: Client):
246
+ self.openai = openai_client
247
+ self.paygent_client = paygent_client
248
+
249
+ def generate(
250
+ self,
251
+ *,
252
+ model: Optional[str] = None,
253
+ prompt: str,
254
+ indicator: str,
255
+ external_agent_id: str,
256
+ external_customer_id: str,
257
+ **kwargs
258
+ ) -> Any:
259
+ """
260
+ Generate images with automatic usage tracking.
261
+ Note: OpenAI's image generation API doesn't return token usage,
262
+ so we track the request parameters instead.
263
+
264
+ Args:
265
+ model: The model to use (optional, defaults to dall-e-2)
266
+ prompt: The prompt for image generation
267
+ indicator: Indicator for the usage event
268
+ external_agent_id: External agent identifier
269
+ external_customer_id: External customer identifier
270
+ **kwargs: Additional OpenAI parameters
271
+
272
+ Returns:
273
+ The images response from OpenAI
274
+ """
275
+ # Make the OpenAI API call
276
+ if model:
277
+ response = self.openai.images.generate(
278
+ model=model,
279
+ prompt=prompt,
280
+ **kwargs
281
+ )
282
+ else:
283
+ response = self.openai.images.generate(
284
+ prompt=prompt,
285
+ **kwargs
286
+ )
287
+ model = 'dall-e-2' # Default model
288
+
289
+ # For image generation, we'll use a simplified usage tracking
290
+ # since OpenAI doesn't provide token counts for images
291
+ usage_data = UsageData(
292
+ service_provider=model,
293
+ model=model,
294
+ prompt_tokens=0, # Images don't use traditional tokens
295
+ completion_tokens=0,
296
+ total_tokens=0
297
+ )
298
+
299
+ # Send usage data to Paygent
300
+ # Note: Cost calculation for images should be handled by the pricing module
301
+ self.paygent_client.send_usage(
302
+ external_agent_id,
303
+ external_customer_id,
304
+ indicator,
305
+ usage_data
306
+ )
307
+
308
+ return response
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: paygent-sdk
3
- Version: 1.0.0
3
+ Version: 2.0.0
4
4
  Summary: Official Python SDK for Paygent - Track AI usage and costs across multiple providers (OpenAI, Anthropic, Google, DeepSeek, etc.)
5
5
  Home-page: https://github.com/paygent/paygent-sdk-python
6
6
  Author: Paygent