paygent-sdk 1.0.0__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,334 @@
1
+ """
2
+ Google Gemini wrapper for automatic usage tracking with Paygent.
3
+ This wrapper intercepts Gemini API calls and automatically sends usage data to Paygent.
4
+
5
+ Usage is identical to the standard google-genai SDK, with only the addition of tracking parameters.
6
+ """
7
+
8
+ import json
9
+ from typing import Any
10
+
11
+ try:
12
+ from google import genai
13
+ except ImportError:
14
+ raise ImportError(
15
+ "google-genai package is a peer-dependency. To use the Paygent wrapper around google-genai "
16
+ "you're assumed to already have google-genai package installed."
17
+ )
18
+
19
+ from ..client import Client
20
+ from ..models import UsageData, UsageDataWithStrings
21
+
22
+
23
+ class PaygentGemini:
24
+ """Main wrapper class for Google Gemini that provides automatic usage tracking."""
25
+
26
+ def __init__(self, gemini_client: genai.Client, paygent_client: Client):
27
+ """
28
+ Create a new PaygentGemini wrapper.
29
+
30
+ Args:
31
+ gemini_client: The GoogleGenAI client instance from google-genai
32
+ paygent_client: The Paygent client instance for usage tracking
33
+ """
34
+ self.gemini = gemini_client
35
+ self.paygent_client = paygent_client
36
+
37
+ @property
38
+ def models(self) -> 'ModelsWrapper':
39
+ """Access to models API with automatic usage tracking."""
40
+ return ModelsWrapper(self.gemini, self.paygent_client)
41
+
42
+
43
+ class ModelsWrapper:
44
+ """Wrapper for Gemini models API."""
45
+
46
+ def __init__(self, gemini_client: genai.Client, paygent_client: Client):
47
+ self.gemini = gemini_client
48
+ self.paygent_client = paygent_client
49
+
50
+ def generate_content(
51
+ self,
52
+ *,
53
+ model: str,
54
+ contents: Any,
55
+ indicator: str,
56
+ external_agent_id: str,
57
+ external_customer_id: str,
58
+ **kwargs
59
+ ) -> Any:
60
+ """
61
+ Generate content with automatic usage tracking.
62
+
63
+ Args:
64
+ model: The model to use
65
+ contents: The content to generate from
66
+ indicator: Indicator for the usage event
67
+ external_agent_id: External agent identifier
68
+ external_customer_id: External customer identifier
69
+ **kwargs: Additional Gemini parameters
70
+
71
+ Returns:
72
+ The generation response from Gemini
73
+ """
74
+ # Make the Gemini API call using the standard SDK method
75
+ response = self.gemini.models.generate_content(
76
+ model=model,
77
+ contents=contents,
78
+ **kwargs
79
+ )
80
+
81
+ # Extract usage data from response with robust fallback mechanism
82
+ has_valid_usage = (
83
+ hasattr(response, 'usage_metadata') and
84
+ response.usage_metadata and
85
+ (response.usage_metadata.prompt_token_count > 0 or
86
+ response.usage_metadata.candidates_token_count > 0)
87
+ )
88
+
89
+ if has_valid_usage:
90
+ # Primary path: Use usage metadata from API response
91
+ usage_data = UsageData(
92
+ service_provider=model,
93
+ model=model,
94
+ prompt_tokens=response.usage_metadata.prompt_token_count or 0,
95
+ completion_tokens=response.usage_metadata.candidates_token_count or 0,
96
+ total_tokens=response.usage_metadata.total_token_count or 0
97
+ )
98
+
99
+ self.paygent_client.send_usage(
100
+ external_agent_id,
101
+ external_customer_id,
102
+ indicator,
103
+ usage_data
104
+ )
105
+ else:
106
+ # Fallback path: Calculate tokens from actual strings
107
+ prompt_string = json.dumps(contents) if not isinstance(contents, str) else contents
108
+ output_string = ''
109
+
110
+ if hasattr(response, 'candidates') and response.candidates:
111
+ if len(response.candidates) > 0:
112
+ candidate = response.candidates[0]
113
+ if hasattr(candidate, 'content') and hasattr(candidate.content, 'parts'):
114
+ if len(candidate.content.parts) > 0:
115
+ part = candidate.content.parts[0]
116
+ if hasattr(part, 'text'):
117
+ output_string = part.text or ''
118
+
119
+ usage_data_with_strings = UsageDataWithStrings(
120
+ service_provider=model,
121
+ model=model,
122
+ prompt_string=prompt_string,
123
+ output_string=output_string
124
+ )
125
+
126
+ self.paygent_client.send_usage_with_token_string(
127
+ external_agent_id,
128
+ external_customer_id,
129
+ indicator,
130
+ usage_data_with_strings
131
+ )
132
+
133
+ return response
134
+
135
+ def start_chat(
136
+ self,
137
+ *,
138
+ model: str,
139
+ indicator: str,
140
+ external_agent_id: str,
141
+ external_customer_id: str,
142
+ **kwargs
143
+ ) -> 'ChatSessionWrapper':
144
+ """
145
+ Start a chat session with automatic usage tracking.
146
+
147
+ Args:
148
+ model: The model to use
149
+ indicator: Indicator for the usage event
150
+ external_agent_id: External agent identifier
151
+ external_customer_id: External customer identifier
152
+ **kwargs: Additional chat configuration
153
+
154
+ Returns:
155
+ A wrapped chat session with automatic tracking
156
+ """
157
+ # Start chat session using the standard SDK
158
+ chat_session = self.gemini.models.start_chat(model=model, **kwargs)
159
+
160
+ return ChatSessionWrapper(
161
+ chat_session,
162
+ self.paygent_client,
163
+ model,
164
+ indicator,
165
+ external_agent_id,
166
+ external_customer_id
167
+ )
168
+
169
+ def generate_image(
170
+ self,
171
+ *,
172
+ model: str,
173
+ prompt: str,
174
+ indicator: str,
175
+ external_agent_id: str,
176
+ external_customer_id: str,
177
+ **kwargs
178
+ ) -> Any:
179
+ """
180
+ Generate images with automatic usage tracking.
181
+
182
+ Args:
183
+ model: The model to use (e.g., "imagen-3.0-generate-001")
184
+ prompt: The prompt for image generation
185
+ indicator: Indicator for the usage event
186
+ external_agent_id: External agent identifier
187
+ external_customer_id: External customer identifier
188
+ **kwargs: Additional image generation parameters
189
+
190
+ Returns:
191
+ The image generation response
192
+ """
193
+ # Make the image generation API call
194
+ response = self.gemini.models.generate_content(
195
+ model=model,
196
+ contents=prompt,
197
+ **kwargs
198
+ )
199
+
200
+ # Extract usage data from response with robust fallback mechanism
201
+ has_valid_usage = (
202
+ hasattr(response, 'usage_metadata') and
203
+ response.usage_metadata and
204
+ (response.usage_metadata.prompt_token_count > 0 or
205
+ response.usage_metadata.candidates_token_count > 0)
206
+ )
207
+
208
+ if has_valid_usage:
209
+ # Primary path: Use usage metadata from API response
210
+ usage_data = UsageData(
211
+ service_provider=model,
212
+ model=model,
213
+ prompt_tokens=response.usage_metadata.prompt_token_count or 0,
214
+ completion_tokens=response.usage_metadata.candidates_token_count or 0,
215
+ total_tokens=response.usage_metadata.total_token_count or 0
216
+ )
217
+
218
+ self.paygent_client.send_usage(
219
+ external_agent_id,
220
+ external_customer_id,
221
+ indicator,
222
+ usage_data
223
+ )
224
+ else:
225
+ # Fallback path: Calculate tokens from prompt string
226
+ usage_data_with_strings = UsageDataWithStrings(
227
+ service_provider=model,
228
+ model=model,
229
+ prompt_string=prompt,
230
+ output_string='' # Image generation doesn't have text output
231
+ )
232
+
233
+ self.paygent_client.send_usage_with_token_string(
234
+ external_agent_id,
235
+ external_customer_id,
236
+ indicator,
237
+ usage_data_with_strings
238
+ )
239
+
240
+ return response
241
+
242
+
243
+ class ChatSessionWrapper:
244
+ """Wrapper for Gemini ChatSession."""
245
+
246
+ def __init__(
247
+ self,
248
+ chat_session: Any,
249
+ paygent_client: Client,
250
+ model_name: str,
251
+ indicator: str,
252
+ external_agent_id: str,
253
+ external_customer_id: str
254
+ ):
255
+ self.chat_session = chat_session
256
+ self.paygent_client = paygent_client
257
+ self.model_name = model_name
258
+ self.indicator = indicator
259
+ self.external_agent_id = external_agent_id
260
+ self.external_customer_id = external_customer_id
261
+
262
+ def send_message(self, message: str) -> Any:
263
+ """
264
+ Send a message in the chat with automatic usage tracking.
265
+
266
+ Args:
267
+ message: The message to send
268
+
269
+ Returns:
270
+ The chat response from Gemini
271
+ """
272
+ # Make the Gemini API call
273
+ response = self.chat_session.send_message(message)
274
+
275
+ # Extract usage data from response with robust fallback mechanism
276
+ has_valid_usage = (
277
+ hasattr(response, 'usage_metadata') and
278
+ response.usage_metadata and
279
+ (response.usage_metadata.prompt_token_count > 0 or
280
+ response.usage_metadata.candidates_token_count > 0)
281
+ )
282
+
283
+ if has_valid_usage:
284
+ # Primary path: Use usage metadata from API response
285
+ usage_data = UsageData(
286
+ service_provider=self.model_name,
287
+ model=self.model_name,
288
+ prompt_tokens=response.usage_metadata.prompt_token_count or 0,
289
+ completion_tokens=response.usage_metadata.candidates_token_count or 0,
290
+ total_tokens=response.usage_metadata.total_token_count or 0
291
+ )
292
+
293
+ self.paygent_client.send_usage(
294
+ self.external_agent_id,
295
+ self.external_customer_id,
296
+ self.indicator,
297
+ usage_data
298
+ )
299
+ else:
300
+ # Fallback path: Calculate tokens from message and response
301
+ output_string = ''
302
+ if hasattr(response, 'candidates') and response.candidates:
303
+ if len(response.candidates) > 0:
304
+ candidate = response.candidates[0]
305
+ if hasattr(candidate, 'content') and hasattr(candidate.content, 'parts'):
306
+ if len(candidate.content.parts) > 0:
307
+ part = candidate.content.parts[0]
308
+ if hasattr(part, 'text'):
309
+ output_string = part.text or ''
310
+
311
+ usage_data_with_strings = UsageDataWithStrings(
312
+ service_provider=self.model_name,
313
+ model=self.model_name,
314
+ prompt_string=message,
315
+ output_string=output_string
316
+ )
317
+
318
+ self.paygent_client.send_usage_with_token_string(
319
+ self.external_agent_id,
320
+ self.external_customer_id,
321
+ self.indicator,
322
+ usage_data_with_strings
323
+ )
324
+
325
+ return response
326
+
327
+ def get_history(self) -> Any:
328
+ """
329
+ Get the chat history.
330
+
331
+ Returns:
332
+ The chat history
333
+ """
334
+ return self.chat_session.get_history()
@@ -0,0 +1,257 @@
1
+ """
2
+ LangChain callback handler for automatic usage tracking with Paygent.
3
+ This callback intercepts LangChain LLM calls and automatically sends usage data to Paygent.
4
+ """
5
+
6
+ from typing import Any, Dict, List, Optional
7
+ from uuid import UUID
8
+
9
+ try:
10
+ from langchain_core.callbacks import BaseCallbackHandler
11
+ from langchain_core.outputs import LLMResult
12
+ except ImportError:
13
+ raise ImportError(
14
+ "LangChain is required for this integration. "
15
+ "Install it with: pip install langchain-core"
16
+ )
17
+
18
+ from ..client import Client
19
+ from ..models import UsageData, UsageDataWithStrings
20
+
21
+
22
+ class PaygentLangChainCallback(BaseCallbackHandler):
23
+ """
24
+ LangChain callback handler that automatically tracks usage with Paygent.
25
+
26
+ Usage example:
27
+ ```python
28
+ from paygent_sdk import Client, PaygentLangChainCallback
29
+ from langchain_openai import ChatOpenAI
30
+
31
+ paygent_client = Client("your-api-key")
32
+ callback = PaygentLangChainCallback(
33
+ paygent_client,
34
+ indicator="chat_completion",
35
+ external_agent_id="agent-123",
36
+ external_customer_id="customer-456"
37
+ )
38
+
39
+ llm = ChatOpenAI(callbacks=[callback])
40
+ response = llm.invoke("Hello!")
41
+ # Usage automatically tracked!
42
+ ```
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ paygent_client: Client,
48
+ indicator: str,
49
+ external_agent_id: str,
50
+ external_customer_id: str
51
+ ):
52
+ """
53
+ Create a new PaygentLangChainCallback.
54
+
55
+ Args:
56
+ paygent_client: The Paygent client instance for usage tracking
57
+ indicator: Indicator for the usage event (e.g., "chat_completion")
58
+ external_agent_id: External agent identifier
59
+ external_customer_id: External customer identifier
60
+ """
61
+ super().__init__()
62
+ self.paygent_client = paygent_client
63
+ self.indicator = indicator
64
+ self.external_agent_id = external_agent_id
65
+ self.external_customer_id = external_customer_id
66
+ self.run_info: Dict[UUID, Dict[str, str]] = {}
67
+
68
+ def _extract_provider(self, serialized: Dict[str, Any]) -> str:
69
+ """Extract the service provider from the serialized LLM data."""
70
+ if not serialized:
71
+ return "unknown"
72
+
73
+ # Check id field
74
+ if "id" in serialized:
75
+ id_list = serialized["id"]
76
+ if isinstance(id_list, list):
77
+ id_str = " ".join(id_list).lower()
78
+ else:
79
+ id_str = str(id_list).lower()
80
+
81
+ if "openai" in id_str:
82
+ return "openai"
83
+ if "anthropic" in id_str:
84
+ return "anthropic"
85
+ if "mistral" in id_str:
86
+ return "mistral"
87
+ if "cohere" in id_str:
88
+ return "cohere"
89
+ if "google" in id_str or "gemini" in id_str:
90
+ return "gemini"
91
+ if "huggingface" in id_str:
92
+ return "huggingface"
93
+ if "azure" in id_str:
94
+ return "azure"
95
+
96
+ # Check name field
97
+ if "name" in serialized:
98
+ name = str(serialized["name"]).lower()
99
+ if "openai" in name:
100
+ return "openai"
101
+ if "anthropic" in name:
102
+ return "anthropic"
103
+ if "mistral" in name:
104
+ return "mistral"
105
+ if "gemini" in name or "google" in name:
106
+ return "gemini"
107
+
108
+ return "unknown"
109
+
110
+ def on_llm_start(
111
+ self,
112
+ serialized: Dict[str, Any],
113
+ prompts: List[str],
114
+ *,
115
+ run_id: UUID,
116
+ parent_run_id: Optional[UUID] = None,
117
+ tags: Optional[List[str]] = None,
118
+ metadata: Optional[Dict[str, Any]] = None,
119
+ **kwargs: Any,
120
+ ) -> None:
121
+ """Called when an LLM starts running."""
122
+ try:
123
+ provider = self._extract_provider(serialized)
124
+ model_name = metadata.get("ls_model_name", "unknown") if metadata else "unknown"
125
+
126
+ # Store the run info for use in on_llm_end
127
+ self.run_info[run_id] = {
128
+ "provider": provider,
129
+ "model_name": model_name
130
+ }
131
+ except Exception as e:
132
+ print(f"Error in on_llm_start: {e}")
133
+
134
+ def on_llm_end(
135
+ self,
136
+ response: LLMResult,
137
+ *,
138
+ run_id: UUID,
139
+ parent_run_id: Optional[UUID] = None,
140
+ **kwargs: Any,
141
+ ) -> None:
142
+ """Called when an LLM ends running."""
143
+ try:
144
+ # Get the stored run info
145
+ info = self.run_info.get(run_id, {})
146
+ provider = info.get("provider", "unknown")
147
+ model_name = info.get("model_name", "unknown")
148
+
149
+ # Extract usage information from LangChain's LLMResult
150
+ llm_output = response.llm_output or {}
151
+
152
+ prompt_tokens = 0
153
+ completion_tokens = 0
154
+ total_tokens = 0
155
+
156
+ # OpenAI-style usage (in llm_output["token_usage"])
157
+ if "token_usage" in llm_output:
158
+ token_usage = llm_output["token_usage"]
159
+ prompt_tokens = token_usage.get("prompt_tokens", 0)
160
+ completion_tokens = token_usage.get("completion_tokens", 0)
161
+ total_tokens = token_usage.get("total_tokens", 0)
162
+
163
+ # Anthropic-style usage (in llm_output["usage"])
164
+ elif "usage" in llm_output:
165
+ usage = llm_output["usage"]
166
+ prompt_tokens = usage.get("input_tokens", usage.get("prompt_tokens", 0))
167
+ completion_tokens = usage.get("output_tokens", usage.get("completion_tokens", 0))
168
+ total_tokens = prompt_tokens + completion_tokens
169
+
170
+ # Gemini-style usage (in generations[0].message.usage_metadata)
171
+ elif (response.generations and
172
+ len(response.generations) > 0 and
173
+ len(response.generations[0]) > 0):
174
+ gen = response.generations[0][0]
175
+ if hasattr(gen, "message") and hasattr(gen.message, "usage_metadata"):
176
+ usage_metadata = gen.message.usage_metadata
177
+ prompt_tokens = getattr(usage_metadata, "input_tokens", 0)
178
+ completion_tokens = getattr(usage_metadata, "output_tokens", 0)
179
+ total_tokens = getattr(usage_metadata, "total_tokens", 0)
180
+
181
+ # Try to extract model name from response if not already set
182
+ if model_name == "unknown":
183
+ if "model_name" in llm_output:
184
+ model_name = llm_output["model_name"]
185
+ elif (response.generations and
186
+ len(response.generations) > 0 and
187
+ len(response.generations[0]) > 0):
188
+ gen = response.generations[0][0]
189
+ if hasattr(gen, "message") and hasattr(gen.message, "response_metadata"):
190
+ model_name = gen.message.response_metadata.get("model_name", "unknown")
191
+
192
+ # Send usage data if we have token information
193
+ if total_tokens > 0:
194
+ usage_data = UsageData(
195
+ service_provider=provider,
196
+ model=model_name,
197
+ prompt_tokens=prompt_tokens,
198
+ completion_tokens=completion_tokens,
199
+ total_tokens=total_tokens
200
+ )
201
+
202
+ self.paygent_client.send_usage(
203
+ self.external_agent_id,
204
+ self.external_customer_id,
205
+ self.indicator,
206
+ usage_data
207
+ )
208
+ else:
209
+ # Fallback: use string-based tracking
210
+ # Extract prompt and output strings
211
+ prompt_string = ""
212
+ if response.generations and len(response.generations) > 0:
213
+ # Get the first generation's text
214
+ if len(response.generations[0]) > 0:
215
+ prompt_string = str(response.generations[0][0].text)
216
+
217
+ output_string = ""
218
+ if response.generations and len(response.generations) > 0:
219
+ outputs = [gen.text for gen in response.generations[0]]
220
+ output_string = " ".join(outputs)
221
+
222
+ usage_data_with_strings = UsageDataWithStrings(
223
+ service_provider=provider,
224
+ model=model_name,
225
+ prompt_string=prompt_string,
226
+ output_string=output_string
227
+ )
228
+
229
+ self.paygent_client.send_usage_with_token_string(
230
+ self.external_agent_id,
231
+ self.external_customer_id,
232
+ self.indicator,
233
+ usage_data_with_strings
234
+ )
235
+
236
+ # Clean up the run info
237
+ if run_id in self.run_info:
238
+ del self.run_info[run_id]
239
+
240
+ except Exception as e:
241
+ print(f"Error tracking LangChain usage with Paygent: {e}")
242
+ if run_id in self.run_info:
243
+ del self.run_info[run_id]
244
+
245
+ def on_llm_error(
246
+ self,
247
+ error: BaseException,
248
+ *,
249
+ run_id: UUID,
250
+ parent_run_id: Optional[UUID] = None,
251
+ **kwargs: Any,
252
+ ) -> None:
253
+ """Called when an LLM encounters an error."""
254
+ print(f"LLM error: {error}")
255
+ # Clean up the run info
256
+ if run_id in self.run_info:
257
+ del self.run_info[run_id]