tokenator 0.1.12__tar.gz → 0.1.13__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: tokenator
3
- Version: 0.1.12
3
+ Version: 0.1.13
4
4
  Summary: Token usage tracking wrapper for LLMs
5
5
  License: MIT
6
6
  Author: Ujjwal Maheshwari
@@ -186,7 +186,7 @@ xai_client = OpenAI(
186
186
  )
187
187
 
188
188
  # Wrap it with Tokenator
189
- client = tokenator_openai(client, db_path=temp_db, provider="xai")
189
+ client = tokenator_openai(xai_client, db_path=temp_db, provider="xai")
190
190
 
191
191
  # Use it exactly like the OpenAI client but with xAI models
192
192
  response = client.chat.completions.create(
@@ -210,17 +210,17 @@ For example, let's see how we can track usage of `perplexity` tokens.
210
210
  from openai import OpenAI
211
211
  from tokenator import tokenator_openai
212
212
 
213
- xai_client = OpenAI(
213
+ perplexity_client = OpenAI(
214
214
  api_key=os.getenv("PERPLEXITY_API_KEY"),
215
215
  base_url="https://api.perplexity.ai"
216
216
  )
217
217
 
218
218
  # Wrap it with Tokenator
219
- client = tokenator_openai(client, db_path=temp_db, provider="perplexity")
219
+ client = tokenator_openai(perplexity_client, db_path=temp_db, provider="perplexity")
220
220
 
221
- # Use it exactly like the OpenAI client but with xAI models
221
+ # Use it exactly like the OpenAI client but with perplexity models
222
222
  response = client.chat.completions.create(
223
- model="grok-2-latest",
223
+ model="llama-3.1-sonar-small-128k-online",
224
224
  messages=[{"role": "user", "content": "Hello!"}]
225
225
  )
226
226
 
@@ -164,7 +164,7 @@ xai_client = OpenAI(
164
164
  )
165
165
 
166
166
  # Wrap it with Tokenator
167
- client = tokenator_openai(client, db_path=temp_db, provider="xai")
167
+ client = tokenator_openai(xai_client, db_path=temp_db, provider="xai")
168
168
 
169
169
  # Use it exactly like the OpenAI client but with xAI models
170
170
  response = client.chat.completions.create(
@@ -188,17 +188,17 @@ For example, let's see how we can track usage of `perplexity` tokens.
188
188
  from openai import OpenAI
189
189
  from tokenator import tokenator_openai
190
190
 
191
- xai_client = OpenAI(
191
+ perplexity_client = OpenAI(
192
192
  api_key=os.getenv("PERPLEXITY_API_KEY"),
193
193
  base_url="https://api.perplexity.ai"
194
194
  )
195
195
 
196
196
  # Wrap it with Tokenator
197
- client = tokenator_openai(client, db_path=temp_db, provider="perplexity")
197
+ client = tokenator_openai(perplexity_client, db_path=temp_db, provider="perplexity")
198
198
 
199
- # Use it exactly like the OpenAI client but with xAI models
199
+ # Use it exactly like the OpenAI client but with perplexity models
200
200
  response = client.chat.completions.create(
201
- model="grok-2-latest",
201
+ model="llama-3.1-sonar-small-128k-online",
202
202
  messages=[{"role": "user", "content": "Hello!"}]
203
203
  )
204
204
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "tokenator"
3
- version = "0.1.12"
3
+ version = "0.1.13"
4
4
  description = "Token usage tracking wrapper for LLMs"
5
5
  authors = ["Ujjwal Maheshwari <your.email@example.com>"]
6
6
  readme = "README.md"
File without changes