tokenator 0.1.11__tar.gz → 0.1.13__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: tokenator
3
- Version: 0.1.11
3
+ Version: 0.1.13
4
4
  Summary: Token usage tracking wrapper for LLMs
5
5
  License: MIT
6
6
  Author: Ujjwal Maheshwari
@@ -171,6 +171,66 @@ print(usage.last_execution().model_dump_json(indent=4))
171
171
  }
172
172
  """
173
173
  ```
174
+
175
+ ### xAI
176
+
177
+ You can use xAI models through the `openai` SDK and track usage using `provider` parameter in `tokenator`.
178
+
179
+ ```python
180
+ from openai import OpenAI
181
+ from tokenator import tokenator_openai
182
+
183
+ xai_client = OpenAI(
184
+ api_key=os.getenv("XAI_API_KEY"),
185
+ base_url="https://api.x.ai/v1"
186
+ )
187
+
188
+ # Wrap it with Tokenator
189
+ client = tokenator_openai(xai_client, db_path=temp_db, provider="xai")
190
+
191
+ # Use it exactly like the OpenAI client but with xAI models
192
+ response = client.chat.completions.create(
193
+ model="grok-2-latest",
194
+ messages=[{"role": "user", "content": "Hello!"}]
195
+ )
196
+
197
+ print(response)
198
+
199
+ print(usage.last_execution())
200
+ ```
201
+
202
+ ### Other AI model providers through openai SDKs
203
+
204
+ Today, a variety of AI companies have made their APIs compatible to the `openai` SDK.
205
+ You can track usage of any such AI models using `tokenator`'s `provider` parameter.
206
+
207
+ For example, let's see how we can track usage of `perplexity` tokens.
208
+
209
+ ```python
210
+ from openai import OpenAI
211
+ from tokenator import tokenator_openai
212
+
213
+ perplexity_client = OpenAI(
214
+ api_key=os.getenv("PERPLEXITY_API_KEY"),
215
+ base_url="https://api.perplexity.ai"
216
+ )
217
+
218
+ # Wrap it with Tokenator
219
+ client = tokenator_openai(perplexity_client, db_path=temp_db, provider="perplexity")
220
+
221
+ # Use it exactly like the OpenAI client but with perplexity models
222
+ response = client.chat.completions.create(
223
+ model="llama-3.1-sonar-small-128k-online",
224
+ messages=[{"role": "user", "content": "Hello!"}]
225
+ )
226
+
227
+ print(response)
228
+
229
+ print(usage.last_execution())
230
+
231
+ print(usage.provider("perplexity"))
232
+ ```
233
+
174
234
  ---
175
235
 
176
236
  Most importantly, none of your data is ever sent to any server.
@@ -149,6 +149,66 @@ print(usage.last_execution().model_dump_json(indent=4))
149
149
  }
150
150
  """
151
151
  ```
152
+
153
+ ### xAI
154
+
155
+ You can use xAI models through the `openai` SDK and track usage using `provider` parameter in `tokenator`.
156
+
157
+ ```python
158
+ from openai import OpenAI
159
+ from tokenator import tokenator_openai
160
+
161
+ xai_client = OpenAI(
162
+ api_key=os.getenv("XAI_API_KEY"),
163
+ base_url="https://api.x.ai/v1"
164
+ )
165
+
166
+ # Wrap it with Tokenator
167
+ client = tokenator_openai(xai_client, db_path=temp_db, provider="xai")
168
+
169
+ # Use it exactly like the OpenAI client but with xAI models
170
+ response = client.chat.completions.create(
171
+ model="grok-2-latest",
172
+ messages=[{"role": "user", "content": "Hello!"}]
173
+ )
174
+
175
+ print(response)
176
+
177
+ print(usage.last_execution())
178
+ ```
179
+
180
+ ### Other AI model providers through openai SDKs
181
+
182
+ Today, a variety of AI companies have made their APIs compatible to the `openai` SDK.
183
+ You can track usage of any such AI models using `tokenator`'s `provider` parameter.
184
+
185
+ For example, let's see how we can track usage of `perplexity` tokens.
186
+
187
+ ```python
188
+ from openai import OpenAI
189
+ from tokenator import tokenator_openai
190
+
191
+ perplexity_client = OpenAI(
192
+ api_key=os.getenv("PERPLEXITY_API_KEY"),
193
+ base_url="https://api.perplexity.ai"
194
+ )
195
+
196
+ # Wrap it with Tokenator
197
+ client = tokenator_openai(perplexity_client, db_path=temp_db, provider="perplexity")
198
+
199
+ # Use it exactly like the OpenAI client but with perplexity models
200
+ response = client.chat.completions.create(
201
+ model="llama-3.1-sonar-small-128k-online",
202
+ messages=[{"role": "user", "content": "Hello!"}]
203
+ )
204
+
205
+ print(response)
206
+
207
+ print(usage.last_execution())
208
+
209
+ print(usage.provider("perplexity"))
210
+ ```
211
+
152
212
  ---
153
213
 
154
214
  Most importantly, none of your data is ever sent to any server.
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "tokenator"
3
- version = "0.1.11"
3
+ version = "0.1.13"
4
4
  description = "Token usage tracking wrapper for LLMs"
5
5
  authors = ["Ujjwal Maheshwari <your.email@example.com>"]
6
6
  readme = "README.md"
@@ -14,7 +14,9 @@ logger = logging.getLogger(__name__)
14
14
 
15
15
 
16
16
  class BaseOpenAIWrapper(BaseWrapper):
17
- provider = "openai"
17
+ def __init__(self, client, db_path=None, provider: str = "openai"):
18
+ super().__init__(client, db_path)
19
+ self.provider = provider
18
20
 
19
21
  def _process_response_usage(
20
22
  self, response: ResponseType
@@ -134,6 +136,7 @@ class AsyncOpenAIWrapper(BaseOpenAIWrapper):
134
136
  def tokenator_openai(
135
137
  client: OpenAI,
136
138
  db_path: Optional[str] = None,
139
+ provider: str = "openai",
137
140
  ) -> OpenAIWrapper: ...
138
141
 
139
142
 
@@ -141,23 +144,26 @@ def tokenator_openai(
141
144
  def tokenator_openai(
142
145
  client: AsyncOpenAI,
143
146
  db_path: Optional[str] = None,
147
+ provider: str = "openai",
144
148
  ) -> AsyncOpenAIWrapper: ...
145
149
 
146
150
 
147
151
  def tokenator_openai(
148
152
  client: Union[OpenAI, AsyncOpenAI],
149
153
  db_path: Optional[str] = None,
154
+ provider: str = "openai",
150
155
  ) -> Union[OpenAIWrapper, AsyncOpenAIWrapper]:
151
156
  """Create a token-tracking wrapper for an OpenAI client.
152
157
 
153
158
  Args:
154
159
  client: OpenAI or AsyncOpenAI client instance
155
160
  db_path: Optional path to SQLite database for token tracking
161
+ provider: Provider name, defaults to "openai"
156
162
  """
157
163
  if isinstance(client, OpenAI):
158
- return OpenAIWrapper(client=client, db_path=db_path)
164
+ return OpenAIWrapper(client=client, db_path=db_path, provider=provider)
159
165
 
160
166
  if isinstance(client, AsyncOpenAI):
161
- return AsyncOpenAIWrapper(client=client, db_path=db_path)
167
+ return AsyncOpenAIWrapper(client=client, db_path=db_path, provider=provider)
162
168
 
163
169
  raise ValueError("Client must be an instance of OpenAI or AsyncOpenAI")
File without changes