paygent-sdk 1.0.0__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- paygent_sdk/__init__.py +54 -1
- paygent_sdk/client.py +16 -41
- paygent_sdk/constants.py +84 -0
- paygent_sdk/models.py +28 -0
- paygent_sdk/voice_client.py +263 -0
- paygent_sdk/wrappers/__init__.py +30 -0
- paygent_sdk/wrappers/anthropic_wrapper.py +132 -0
- paygent_sdk/wrappers/gemini_wrapper.py +334 -0
- paygent_sdk/wrappers/langchain_wrapper.py +257 -0
- paygent_sdk/wrappers/mistral_wrapper.py +128 -0
- paygent_sdk/wrappers/openai_wrapper.py +308 -0
- {paygent_sdk-1.0.0.dist-info → paygent_sdk-2.0.0.dist-info}/METADATA +1 -1
- paygent_sdk-2.0.0.dist-info/RECORD +22 -0
- paygent_sdk-1.0.0.dist-info/RECORD +0 -15
- {paygent_sdk-1.0.0.dist-info → paygent_sdk-2.0.0.dist-info}/WHEEL +0 -0
- {paygent_sdk-1.0.0.dist-info → paygent_sdk-2.0.0.dist-info}/licenses/LICENSE +0 -0
- {paygent_sdk-1.0.0.dist-info → paygent_sdk-2.0.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Anthropic wrapper for automatic usage tracking with Paygent.
|
|
3
|
+
This wrapper intercepts Anthropic API calls and automatically sends usage data to Paygent.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
from anthropic import Anthropic
|
|
11
|
+
except ImportError:
|
|
12
|
+
raise ImportError(
|
|
13
|
+
"anthropic package is a peer-dependency. To use the Paygent wrapper around anthropic "
|
|
14
|
+
"you're assumed to already have anthropic package installed."
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
from ..client import Client
|
|
18
|
+
from ..models import UsageData, UsageDataWithStrings
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class PaygentAnthropic:
|
|
22
|
+
"""Main wrapper class for Anthropic that provides automatic usage tracking."""
|
|
23
|
+
|
|
24
|
+
def __init__(self, anthropic_client: Anthropic, paygent_client: Client):
|
|
25
|
+
"""
|
|
26
|
+
Create a new PaygentAnthropic wrapper.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
anthropic_client: The Anthropic client instance
|
|
30
|
+
paygent_client: The Paygent client instance for usage tracking
|
|
31
|
+
"""
|
|
32
|
+
self.anthropic = anthropic_client
|
|
33
|
+
self.paygent_client = paygent_client
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def messages(self) -> 'MessagesWrapper':
|
|
37
|
+
"""Access to messages API with automatic usage tracking."""
|
|
38
|
+
return MessagesWrapper(self.anthropic, self.paygent_client)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class MessagesWrapper:
|
|
42
|
+
"""Wrapper for Anthropic messages API."""
|
|
43
|
+
|
|
44
|
+
def __init__(self, anthropic_client: Anthropic, paygent_client: Client):
|
|
45
|
+
self.anthropic = anthropic_client
|
|
46
|
+
self.paygent_client = paygent_client
|
|
47
|
+
|
|
48
|
+
def create(
|
|
49
|
+
self,
|
|
50
|
+
*,
|
|
51
|
+
model: str,
|
|
52
|
+
messages: list,
|
|
53
|
+
max_tokens: int,
|
|
54
|
+
indicator: str,
|
|
55
|
+
external_agent_id: str,
|
|
56
|
+
external_customer_id: str,
|
|
57
|
+
**kwargs
|
|
58
|
+
) -> Any:
|
|
59
|
+
"""
|
|
60
|
+
Create a message with automatic usage tracking.
|
|
61
|
+
Note: Streaming is not supported with automatic tracking.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
model: The model to use
|
|
65
|
+
messages: The messages to send
|
|
66
|
+
max_tokens: Maximum tokens to generate
|
|
67
|
+
indicator: Indicator for the usage event
|
|
68
|
+
external_agent_id: External agent identifier
|
|
69
|
+
external_customer_id: External customer identifier
|
|
70
|
+
**kwargs: Additional Anthropic parameters
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
The message response from Anthropic
|
|
74
|
+
"""
|
|
75
|
+
# Ensure streaming is disabled for automatic tracking
|
|
76
|
+
kwargs['stream'] = False
|
|
77
|
+
|
|
78
|
+
# Make the Anthropic API call (non-streaming)
|
|
79
|
+
response = self.anthropic.messages.create(
|
|
80
|
+
model=model,
|
|
81
|
+
messages=messages,
|
|
82
|
+
max_tokens=max_tokens,
|
|
83
|
+
**kwargs
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
# Extract usage data from response with robust fallback mechanism
|
|
87
|
+
has_valid_usage = (
|
|
88
|
+
hasattr(response, 'usage') and
|
|
89
|
+
response.usage and
|
|
90
|
+
response.usage.input_tokens > 0 and
|
|
91
|
+
response.usage.output_tokens > 0
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
if has_valid_usage:
|
|
95
|
+
# Primary path: Use usage data from API response
|
|
96
|
+
usage_data = UsageData(
|
|
97
|
+
service_provider=model,
|
|
98
|
+
model=model,
|
|
99
|
+
prompt_tokens=response.usage.input_tokens,
|
|
100
|
+
completion_tokens=response.usage.output_tokens,
|
|
101
|
+
total_tokens=response.usage.input_tokens + response.usage.output_tokens
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
self.paygent_client.send_usage(
|
|
105
|
+
external_agent_id,
|
|
106
|
+
external_customer_id,
|
|
107
|
+
indicator,
|
|
108
|
+
usage_data
|
|
109
|
+
)
|
|
110
|
+
else:
|
|
111
|
+
# Fallback path: Calculate tokens from actual strings
|
|
112
|
+
prompt_string = json.dumps(messages)
|
|
113
|
+
output_string = ''
|
|
114
|
+
if hasattr(response, 'content') and response.content:
|
|
115
|
+
if len(response.content) > 0 and hasattr(response.content[0], 'text'):
|
|
116
|
+
output_string = response.content[0].text or ''
|
|
117
|
+
|
|
118
|
+
usage_data_with_strings = UsageDataWithStrings(
|
|
119
|
+
service_provider=model,
|
|
120
|
+
model=model,
|
|
121
|
+
prompt_string=prompt_string,
|
|
122
|
+
output_string=output_string
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
self.paygent_client.send_usage_with_token_string(
|
|
126
|
+
external_agent_id,
|
|
127
|
+
external_customer_id,
|
|
128
|
+
indicator,
|
|
129
|
+
usage_data_with_strings
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
return response
|
|
@@ -0,0 +1,334 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Google Gemini wrapper for automatic usage tracking with Paygent.
|
|
3
|
+
This wrapper intercepts Gemini API calls and automatically sends usage data to Paygent.
|
|
4
|
+
|
|
5
|
+
Usage is identical to the standard google-genai SDK, with only the addition of tracking parameters.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
from google import genai
|
|
13
|
+
except ImportError:
|
|
14
|
+
raise ImportError(
|
|
15
|
+
"google-genai package is a peer-dependency. To use the Paygent wrapper around google-genai "
|
|
16
|
+
"you're assumed to already have google-genai package installed."
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
from ..client import Client
|
|
20
|
+
from ..models import UsageData, UsageDataWithStrings
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class PaygentGemini:
|
|
24
|
+
"""Main wrapper class for Google Gemini that provides automatic usage tracking."""
|
|
25
|
+
|
|
26
|
+
def __init__(self, gemini_client: genai.Client, paygent_client: Client):
|
|
27
|
+
"""
|
|
28
|
+
Create a new PaygentGemini wrapper.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
gemini_client: The GoogleGenAI client instance from google-genai
|
|
32
|
+
paygent_client: The Paygent client instance for usage tracking
|
|
33
|
+
"""
|
|
34
|
+
self.gemini = gemini_client
|
|
35
|
+
self.paygent_client = paygent_client
|
|
36
|
+
|
|
37
|
+
@property
|
|
38
|
+
def models(self) -> 'ModelsWrapper':
|
|
39
|
+
"""Access to models API with automatic usage tracking."""
|
|
40
|
+
return ModelsWrapper(self.gemini, self.paygent_client)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class ModelsWrapper:
|
|
44
|
+
"""Wrapper for Gemini models API."""
|
|
45
|
+
|
|
46
|
+
def __init__(self, gemini_client: genai.Client, paygent_client: Client):
|
|
47
|
+
self.gemini = gemini_client
|
|
48
|
+
self.paygent_client = paygent_client
|
|
49
|
+
|
|
50
|
+
def generate_content(
|
|
51
|
+
self,
|
|
52
|
+
*,
|
|
53
|
+
model: str,
|
|
54
|
+
contents: Any,
|
|
55
|
+
indicator: str,
|
|
56
|
+
external_agent_id: str,
|
|
57
|
+
external_customer_id: str,
|
|
58
|
+
**kwargs
|
|
59
|
+
) -> Any:
|
|
60
|
+
"""
|
|
61
|
+
Generate content with automatic usage tracking.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
model: The model to use
|
|
65
|
+
contents: The content to generate from
|
|
66
|
+
indicator: Indicator for the usage event
|
|
67
|
+
external_agent_id: External agent identifier
|
|
68
|
+
external_customer_id: External customer identifier
|
|
69
|
+
**kwargs: Additional Gemini parameters
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
The generation response from Gemini
|
|
73
|
+
"""
|
|
74
|
+
# Make the Gemini API call using the standard SDK method
|
|
75
|
+
response = self.gemini.models.generate_content(
|
|
76
|
+
model=model,
|
|
77
|
+
contents=contents,
|
|
78
|
+
**kwargs
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
# Extract usage data from response with robust fallback mechanism
|
|
82
|
+
has_valid_usage = (
|
|
83
|
+
hasattr(response, 'usage_metadata') and
|
|
84
|
+
response.usage_metadata and
|
|
85
|
+
(response.usage_metadata.prompt_token_count > 0 or
|
|
86
|
+
response.usage_metadata.candidates_token_count > 0)
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
if has_valid_usage:
|
|
90
|
+
# Primary path: Use usage metadata from API response
|
|
91
|
+
usage_data = UsageData(
|
|
92
|
+
service_provider=model,
|
|
93
|
+
model=model,
|
|
94
|
+
prompt_tokens=response.usage_metadata.prompt_token_count or 0,
|
|
95
|
+
completion_tokens=response.usage_metadata.candidates_token_count or 0,
|
|
96
|
+
total_tokens=response.usage_metadata.total_token_count or 0
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
self.paygent_client.send_usage(
|
|
100
|
+
external_agent_id,
|
|
101
|
+
external_customer_id,
|
|
102
|
+
indicator,
|
|
103
|
+
usage_data
|
|
104
|
+
)
|
|
105
|
+
else:
|
|
106
|
+
# Fallback path: Calculate tokens from actual strings
|
|
107
|
+
prompt_string = json.dumps(contents) if not isinstance(contents, str) else contents
|
|
108
|
+
output_string = ''
|
|
109
|
+
|
|
110
|
+
if hasattr(response, 'candidates') and response.candidates:
|
|
111
|
+
if len(response.candidates) > 0:
|
|
112
|
+
candidate = response.candidates[0]
|
|
113
|
+
if hasattr(candidate, 'content') and hasattr(candidate.content, 'parts'):
|
|
114
|
+
if len(candidate.content.parts) > 0:
|
|
115
|
+
part = candidate.content.parts[0]
|
|
116
|
+
if hasattr(part, 'text'):
|
|
117
|
+
output_string = part.text or ''
|
|
118
|
+
|
|
119
|
+
usage_data_with_strings = UsageDataWithStrings(
|
|
120
|
+
service_provider=model,
|
|
121
|
+
model=model,
|
|
122
|
+
prompt_string=prompt_string,
|
|
123
|
+
output_string=output_string
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
self.paygent_client.send_usage_with_token_string(
|
|
127
|
+
external_agent_id,
|
|
128
|
+
external_customer_id,
|
|
129
|
+
indicator,
|
|
130
|
+
usage_data_with_strings
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
return response
|
|
134
|
+
|
|
135
|
+
def start_chat(
|
|
136
|
+
self,
|
|
137
|
+
*,
|
|
138
|
+
model: str,
|
|
139
|
+
indicator: str,
|
|
140
|
+
external_agent_id: str,
|
|
141
|
+
external_customer_id: str,
|
|
142
|
+
**kwargs
|
|
143
|
+
) -> 'ChatSessionWrapper':
|
|
144
|
+
"""
|
|
145
|
+
Start a chat session with automatic usage tracking.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
model: The model to use
|
|
149
|
+
indicator: Indicator for the usage event
|
|
150
|
+
external_agent_id: External agent identifier
|
|
151
|
+
external_customer_id: External customer identifier
|
|
152
|
+
**kwargs: Additional chat configuration
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
A wrapped chat session with automatic tracking
|
|
156
|
+
"""
|
|
157
|
+
# Start chat session using the standard SDK
|
|
158
|
+
chat_session = self.gemini.models.start_chat(model=model, **kwargs)
|
|
159
|
+
|
|
160
|
+
return ChatSessionWrapper(
|
|
161
|
+
chat_session,
|
|
162
|
+
self.paygent_client,
|
|
163
|
+
model,
|
|
164
|
+
indicator,
|
|
165
|
+
external_agent_id,
|
|
166
|
+
external_customer_id
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
def generate_image(
|
|
170
|
+
self,
|
|
171
|
+
*,
|
|
172
|
+
model: str,
|
|
173
|
+
prompt: str,
|
|
174
|
+
indicator: str,
|
|
175
|
+
external_agent_id: str,
|
|
176
|
+
external_customer_id: str,
|
|
177
|
+
**kwargs
|
|
178
|
+
) -> Any:
|
|
179
|
+
"""
|
|
180
|
+
Generate images with automatic usage tracking.
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
model: The model to use (e.g., "imagen-3.0-generate-001")
|
|
184
|
+
prompt: The prompt for image generation
|
|
185
|
+
indicator: Indicator for the usage event
|
|
186
|
+
external_agent_id: External agent identifier
|
|
187
|
+
external_customer_id: External customer identifier
|
|
188
|
+
**kwargs: Additional image generation parameters
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
The image generation response
|
|
192
|
+
"""
|
|
193
|
+
# Make the image generation API call
|
|
194
|
+
response = self.gemini.models.generate_content(
|
|
195
|
+
model=model,
|
|
196
|
+
contents=prompt,
|
|
197
|
+
**kwargs
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
# Extract usage data from response with robust fallback mechanism
|
|
201
|
+
has_valid_usage = (
|
|
202
|
+
hasattr(response, 'usage_metadata') and
|
|
203
|
+
response.usage_metadata and
|
|
204
|
+
(response.usage_metadata.prompt_token_count > 0 or
|
|
205
|
+
response.usage_metadata.candidates_token_count > 0)
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
if has_valid_usage:
|
|
209
|
+
# Primary path: Use usage metadata from API response
|
|
210
|
+
usage_data = UsageData(
|
|
211
|
+
service_provider=model,
|
|
212
|
+
model=model,
|
|
213
|
+
prompt_tokens=response.usage_metadata.prompt_token_count or 0,
|
|
214
|
+
completion_tokens=response.usage_metadata.candidates_token_count or 0,
|
|
215
|
+
total_tokens=response.usage_metadata.total_token_count or 0
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
self.paygent_client.send_usage(
|
|
219
|
+
external_agent_id,
|
|
220
|
+
external_customer_id,
|
|
221
|
+
indicator,
|
|
222
|
+
usage_data
|
|
223
|
+
)
|
|
224
|
+
else:
|
|
225
|
+
# Fallback path: Calculate tokens from prompt string
|
|
226
|
+
usage_data_with_strings = UsageDataWithStrings(
|
|
227
|
+
service_provider=model,
|
|
228
|
+
model=model,
|
|
229
|
+
prompt_string=prompt,
|
|
230
|
+
output_string='' # Image generation doesn't have text output
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
self.paygent_client.send_usage_with_token_string(
|
|
234
|
+
external_agent_id,
|
|
235
|
+
external_customer_id,
|
|
236
|
+
indicator,
|
|
237
|
+
usage_data_with_strings
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
return response
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
class ChatSessionWrapper:
|
|
244
|
+
"""Wrapper for Gemini ChatSession."""
|
|
245
|
+
|
|
246
|
+
def __init__(
|
|
247
|
+
self,
|
|
248
|
+
chat_session: Any,
|
|
249
|
+
paygent_client: Client,
|
|
250
|
+
model_name: str,
|
|
251
|
+
indicator: str,
|
|
252
|
+
external_agent_id: str,
|
|
253
|
+
external_customer_id: str
|
|
254
|
+
):
|
|
255
|
+
self.chat_session = chat_session
|
|
256
|
+
self.paygent_client = paygent_client
|
|
257
|
+
self.model_name = model_name
|
|
258
|
+
self.indicator = indicator
|
|
259
|
+
self.external_agent_id = external_agent_id
|
|
260
|
+
self.external_customer_id = external_customer_id
|
|
261
|
+
|
|
262
|
+
def send_message(self, message: str) -> Any:
|
|
263
|
+
"""
|
|
264
|
+
Send a message in the chat with automatic usage tracking.
|
|
265
|
+
|
|
266
|
+
Args:
|
|
267
|
+
message: The message to send
|
|
268
|
+
|
|
269
|
+
Returns:
|
|
270
|
+
The chat response from Gemini
|
|
271
|
+
"""
|
|
272
|
+
# Make the Gemini API call
|
|
273
|
+
response = self.chat_session.send_message(message)
|
|
274
|
+
|
|
275
|
+
# Extract usage data from response with robust fallback mechanism
|
|
276
|
+
has_valid_usage = (
|
|
277
|
+
hasattr(response, 'usage_metadata') and
|
|
278
|
+
response.usage_metadata and
|
|
279
|
+
(response.usage_metadata.prompt_token_count > 0 or
|
|
280
|
+
response.usage_metadata.candidates_token_count > 0)
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
if has_valid_usage:
|
|
284
|
+
# Primary path: Use usage metadata from API response
|
|
285
|
+
usage_data = UsageData(
|
|
286
|
+
service_provider=self.model_name,
|
|
287
|
+
model=self.model_name,
|
|
288
|
+
prompt_tokens=response.usage_metadata.prompt_token_count or 0,
|
|
289
|
+
completion_tokens=response.usage_metadata.candidates_token_count or 0,
|
|
290
|
+
total_tokens=response.usage_metadata.total_token_count or 0
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
self.paygent_client.send_usage(
|
|
294
|
+
self.external_agent_id,
|
|
295
|
+
self.external_customer_id,
|
|
296
|
+
self.indicator,
|
|
297
|
+
usage_data
|
|
298
|
+
)
|
|
299
|
+
else:
|
|
300
|
+
# Fallback path: Calculate tokens from message and response
|
|
301
|
+
output_string = ''
|
|
302
|
+
if hasattr(response, 'candidates') and response.candidates:
|
|
303
|
+
if len(response.candidates) > 0:
|
|
304
|
+
candidate = response.candidates[0]
|
|
305
|
+
if hasattr(candidate, 'content') and hasattr(candidate.content, 'parts'):
|
|
306
|
+
if len(candidate.content.parts) > 0:
|
|
307
|
+
part = candidate.content.parts[0]
|
|
308
|
+
if hasattr(part, 'text'):
|
|
309
|
+
output_string = part.text or ''
|
|
310
|
+
|
|
311
|
+
usage_data_with_strings = UsageDataWithStrings(
|
|
312
|
+
service_provider=self.model_name,
|
|
313
|
+
model=self.model_name,
|
|
314
|
+
prompt_string=message,
|
|
315
|
+
output_string=output_string
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
self.paygent_client.send_usage_with_token_string(
|
|
319
|
+
self.external_agent_id,
|
|
320
|
+
self.external_customer_id,
|
|
321
|
+
self.indicator,
|
|
322
|
+
usage_data_with_strings
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
return response
|
|
326
|
+
|
|
327
|
+
def get_history(self) -> Any:
|
|
328
|
+
"""
|
|
329
|
+
Get the chat history.
|
|
330
|
+
|
|
331
|
+
Returns:
|
|
332
|
+
The chat history
|
|
333
|
+
"""
|
|
334
|
+
return self.chat_session.get_history()
|