tokenator 0.1.15__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tokenator/utils.py CHANGED
@@ -8,19 +8,22 @@ from pathlib import Path
8
8
 
9
9
  logger = logging.getLogger(__name__)
10
10
 
11
+
11
12
  def is_notebook() -> bool:
12
13
  try:
13
- from IPython import get_ipython # type: ignore
14
+ from IPython import get_ipython # type: ignore
15
+
14
16
  shell = get_ipython().__class__.__name__
15
- if shell == 'ZMQInteractiveShell':
16
- return True # Jupyter notebook or qtconsole
17
- elif shell == 'TerminalInteractiveShell':
17
+ if shell == "ZMQInteractiveShell":
18
+ return True # Jupyter notebook or qtconsole
19
+ elif shell == "TerminalInteractiveShell":
18
20
  return False # Terminal running IPython
19
21
  else:
20
22
  return False # Other type (?)
21
23
  except NameError:
22
24
  return False
23
25
 
26
+
24
27
  def is_colab() -> bool:
25
28
  """Check if running in Google Colab."""
26
29
  try:
@@ -1,10 +1,10 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: tokenator
3
- Version: 0.1.15
3
+ Version: 0.2.0
4
4
  Summary: Token usage tracking wrapper for LLMs
5
5
  License: MIT
6
6
  Author: Ujjwal Maheshwari
7
- Author-email: your.email@example.com
7
+ Author-email: ujjwalm29@gmail.com
8
8
  Requires-Python: >=3.9,<4.0
9
9
  Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Programming Language :: Python :: 3
@@ -15,23 +15,28 @@ Classifier: Programming Language :: Python :: 3.12
15
15
  Classifier: Programming Language :: Python :: 3.13
16
16
  Requires-Dist: alembic (>=1.13.0,<2.0.0)
17
17
  Requires-Dist: anthropic (>=0.43.0,<0.44.0)
18
+ Requires-Dist: google-genai (>=1.3.0,<2.0.0)
18
19
  Requires-Dist: ipython
19
20
  Requires-Dist: openai (>=1.59.0,<2.0.0)
20
21
  Requires-Dist: requests (>=2.32.3,<3.0.0)
21
22
  Requires-Dist: sqlalchemy (>=2.0.0,<3.0.0)
22
23
  Description-Content-Type: text/markdown
23
24
 
24
- # Tokenator : Track and analyze LLM token usage and cost
25
+ # Tokenator : Track, analyze, compare LLM token usage and costs
25
26
 
26
27
  Have you ever wondered :
27
28
  - How many tokens does your AI agent consume?
28
- - How much does it cost to do run a complex AI workflow with multiple LLM providers?
29
+ - How much does it cost to run a complex AI workflow with multiple LLM providers?
30
+ - Which LLM is more cost effective for my use case?
29
31
  - How much money/tokens did you spend today on developing with LLMs?
30
32
 
31
- Afraid not, tokenator is here! With tokenator's easy to use API, you can start tracking LLM usage in a matter of minutes.
33
+ Afraid not, tokenator is here! With tokenator's easy to use functions, you can start tracking LLM usage in a matter of minutes.
32
34
 
33
35
  Get started with just 3 lines of code!
34
36
 
37
+ Tokenator supports the official SDKs from openai, anthropic and google-genai(the new one).
38
+ LLM providers which use the openai SDK like perplexity, deepseek and xAI are also supported.
39
+
35
40
  ## Installation
36
41
 
37
42
  ```bash
@@ -114,6 +119,10 @@ print(cost.last_hour().model_dump_json(indent=4))
114
119
  }
115
120
  ```
116
121
 
122
+ ## Cookbooks
123
+
124
+ Want more code, example use cases and ideas? Check out our amazing [cookbooks](https://github.com/ujjwalm29/tokenator/tree/main/docs/cookbooks)!
125
+
117
126
  ## Features
118
127
 
119
128
  - Drop-in replacement for OpenAI, Anthropic client
@@ -173,6 +182,54 @@ print(usage.last_execution().model_dump_json(indent=4))
173
182
  """
174
183
  ```
175
184
 
185
+ ### Google (Gemini - through AI studio)
186
+
187
+ ```python
188
+ from google import genai
189
+ from tokenator import tokenator_gemini
190
+
191
+ gemini_client = genai.Client(api_key=os.getenv("GEMINI_API_KEY"))
192
+
193
+ # Wrap it with Tokenator
194
+ client = tokenator_gemini(gemini_client)
195
+
196
+ # Use it exactly like the google-genai client
197
+ response = models.generate_content(
198
+ model="gemini-2.0-flash",
199
+ contents="hello how are you",
200
+ )
201
+
202
+ print(response)
203
+
204
+ print(usage.last_execution().model_dump_json(indent=4))
205
+ """
206
+ {
207
+ "total_cost": 0.0001,
208
+ "total_tokens": 23,
209
+ "prompt_tokens": 10,
210
+ "completion_tokens": 13,
211
+ "providers": [
212
+ {
213
+ "total_cost": 0.0001,
214
+ "total_tokens": 23,
215
+ "prompt_tokens": 10,
216
+ "completion_tokens": 13,
217
+ "provider": "gemini",
218
+ "models": [
219
+ {
220
+ "total_cost": 0.0004,
221
+ "total_tokens": 79,
222
+ "prompt_tokens": 52,
223
+ "completion_tokens": 27,
224
+ "model": "gemini-2.0-flash"
225
+ }
226
+ ]
227
+ }
228
+ ]
229
+ }
230
+ """
231
+ ```
232
+
176
233
  ### xAI
177
234
 
178
235
  You can use xAI models through the `openai` SDK and track usage using `provider` parameter in `tokenator`.
@@ -221,7 +278,7 @@ client = tokenator_openai(perplexity_client, db_path=temp_db, provider="perplexi
221
278
 
222
279
  # Use it exactly like the OpenAI client but with perplexity models
223
280
  response = client.chat.completions.create(
224
- model="llama-3.1-sonar-small-128k-online",
281
+ model="sonar",
225
282
  messages=[{"role": "user", "content": "Hello!"}]
226
283
  )
227
284
 
@@ -1,8 +1,11 @@
1
- tokenator/__init__.py,sha256=AEPE73UGB_TeNLhro3eY0hU8yy6T-_6AyDls8vWApnE,465
1
+ tokenator/__init__.py,sha256=NB2UOm5oDxj4KLabed4PTSGGzkXvEYUSolOo44ei7XQ,559
2
2
  tokenator/anthropic/client_anthropic.py,sha256=2oxTLb5-sPK_KL-OumCjE4wPVI8U_eFyRonn9XjGXJw,7196
3
3
  tokenator/anthropic/stream_interceptors.py,sha256=4VHC_-WkG3Pa10YizmFLrHcbz0Tm2MR_YB5-uohKp5A,5221
4
- tokenator/base_wrapper.py,sha256=EQ49xGduEp05-gj1xyZDasrck4RpComaoKslHxQTwuw,4956
4
+ tokenator/base_wrapper.py,sha256=Qhd7efdasNHyatR95uxIzbKKVgouT3OZ72DJ7ZrHcrQ,5015
5
5
  tokenator/create_migrations.py,sha256=k9IHiGK21dLTA8MYNsuhO0-kUVIcMSViMFYtY4WU2Rw,730
6
+ tokenator/gemini/__init__.py,sha256=XphFSP33w0j3j7oNn2PSHTwdjnqvAxitQs6qe6URDOY,132
7
+ tokenator/gemini/client_gemini.py,sha256=9dJxOG2HczLzDvqepekrU2hY89oHVMRtOQNrvrgV6sQ,8090
8
+ tokenator/gemini/stream_interceptors.py,sha256=i8DEWAsp1MlZ1xVJZGPzkiCjpX9o6oCJgdkL2k2dung,2266
6
9
  tokenator/migrations/env.py,sha256=JoF5MJ4ae0wJW5kdBHuFlG3ZqeCCDvbMcU8fNA_a6hM,1396
7
10
  tokenator/migrations/script.py.mako,sha256=nJL-tbLQE0Qy4P9S4r4ntNAcikPtoFUlvXe6xvm9ot8,635
8
11
  tokenator/migrations/versions/f028b8155fed_adding_detailed_input_and_output_token_.py,sha256=WIZN5HdNRXlRdfpUJpJFaPD4G1s-SgRdTMQl4WDB-hA,2189
@@ -13,9 +16,9 @@ tokenator/openai/client_openai.py,sha256=pbdJ-aZPuJs-7OT1VEv0DW36cCYbRAVKhSQEprx
13
16
  tokenator/openai/stream_interceptors.py,sha256=ez1MnjRZW_rEalv2SIPAvrU9oMD6OJoD9vht-057fDM,5243
14
17
  tokenator/schemas.py,sha256=kBmShqgpQ3W-ILAP1NuCaFgqFplQM4OH0MmJteLqrwI,2371
15
18
  tokenator/state.py,sha256=xdqDC-rlEA88-VgqQqHnAOXQ5pNTpnHcgOtohDIImPY,262
16
- tokenator/usage.py,sha256=QaudrO6uwnMNRn9aCYVPj9yiQHmbdoAVZ9-G4Q1B0fw,20511
17
- tokenator/utils.py,sha256=djoWmAhqH-O2Su3qIcuY-_3Vj1-qPwMcdzwq9IlwiDc,2435
18
- tokenator-0.1.15.dist-info/LICENSE,sha256=wdG-B6-ODk8RQ4jq5uXSn0w1UWTzCH_MMyvh7AwtGns,1074
19
- tokenator-0.1.15.dist-info/METADATA,sha256=dtws3Qwm2iZLCYZv0meqQP80Q49821HdyZgUmDeqDcg,6035
20
- tokenator-0.1.15.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
21
- tokenator-0.1.15.dist-info/RECORD,,
19
+ tokenator/usage.py,sha256=7UMh3dIXf588J0TSOk5CK2BqKolTK2Iu6j7MuyFGLBI,25236
20
+ tokenator/utils.py,sha256=sLC3UxnPWxTFoxuQjGROQHT_POcOKJ-32p8-E0B7hwo,2438
21
+ tokenator-0.2.0.dist-info/LICENSE,sha256=wdG-B6-ODk8RQ4jq5uXSn0w1UWTzCH_MMyvh7AwtGns,1074
22
+ tokenator-0.2.0.dist-info/METADATA,sha256=98IYObi9FhHSYr5jGmkaRdEDERllqdP96KRmxBS1l38,7553
23
+ tokenator-0.2.0.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
24
+ tokenator-0.2.0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.0.1
2
+ Generator: poetry-core 2.1.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any