tokenator 0.1.10__py3-none-any.whl → 0.1.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tokenator/anthropic/client_anthropic.py +0 -1
- tokenator/base_wrapper.py +1 -1
- {tokenator-0.1.10.dist-info → tokenator-0.1.11.dist-info}/METADATA +57 -4
- {tokenator-0.1.10.dist-info → tokenator-0.1.11.dist-info}/RECORD +6 -6
- {tokenator-0.1.10.dist-info → tokenator-0.1.11.dist-info}/WHEEL +1 -1
- {tokenator-0.1.10.dist-info → tokenator-0.1.11.dist-info}/LICENSE +0 -0
@@ -71,7 +71,6 @@ def _create_usage_callback(execution_id, log_usage_fn):
|
|
71
71
|
usage_data.usage.prompt_tokens += chunk.message.usage.input_tokens
|
72
72
|
usage_data.usage.completion_tokens += chunk.message.usage.output_tokens
|
73
73
|
elif isinstance(chunk, RawMessageDeltaEvent):
|
74
|
-
usage_data.usage.prompt_tokens += chunk.usage.input_tokens
|
75
74
|
usage_data.usage.completion_tokens += chunk.usage.output_tokens
|
76
75
|
|
77
76
|
usage_data.usage.total_tokens = usage_data.usage.prompt_tokens + usage_data.usage.completion_tokens
|
tokenator/base_wrapper.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.3
|
2
2
|
Name: tokenator
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.11
|
4
4
|
Summary: Token usage tracking wrapper for LLMs
|
5
5
|
License: MIT
|
6
6
|
Author: Ujjwal Maheshwari
|
@@ -20,12 +20,12 @@ Requires-Dist: requests (>=2.32.3,<3.0.0)
|
|
20
20
|
Requires-Dist: sqlalchemy (>=2.0.0,<3.0.0)
|
21
21
|
Description-Content-Type: text/markdown
|
22
22
|
|
23
|
-
# Tokenator :
|
23
|
+
# Tokenator : Track and analyze LLM token usage and cost
|
24
24
|
|
25
25
|
Have you ever wondered about :
|
26
26
|
- How many tokens does your AI agent consume?
|
27
27
|
- How much does it cost to do run a complex AI workflow with multiple LLM providers?
|
28
|
-
- How much money did
|
28
|
+
- How much money/tokens did you spend today on developing with LLMs?
|
29
29
|
|
30
30
|
Afraid not, tokenator is here! With tokenator's easy to use API, you can start tracking LLM usage in a matter of minutes.
|
31
31
|
|
@@ -57,6 +57,9 @@ response = client.chat.completions.create(
|
|
57
57
|
)
|
58
58
|
```
|
59
59
|
|
60
|
+
Works with AsyncOpenAI and `streaming=True` as well!
|
61
|
+
Note : When streaming, don't forget to add `stream_options={"include_usage": True}` to the `create()` call!
|
62
|
+
|
60
63
|
### Cost Analysis
|
61
64
|
|
62
65
|
```python
|
@@ -120,6 +123,56 @@ print(cost.last_hour().model_dump_json(indent=4))
|
|
120
123
|
- Minimal memory footprint
|
121
124
|
- Minimal latency footprint
|
122
125
|
|
126
|
+
### Anthropic
|
127
|
+
|
128
|
+
```python
|
129
|
+
from anthropic import Anthropic, AsyncAnthropic
|
130
|
+
from tokenator import tokenator_anthropic
|
131
|
+
|
132
|
+
anthropic_client = AsyncAnthropic(api_key="your-api-key")
|
133
|
+
|
134
|
+
# Wrap it with Tokenator
|
135
|
+
client = tokenator_anthropic(anthropic_client)
|
136
|
+
|
137
|
+
# Use it exactly like the Anthropic client
|
138
|
+
response = await client.messages.create(
|
139
|
+
model="claude-3-5-haiku-20241022",
|
140
|
+
messages=[{"role": "user", "content": "hello how are you"}],
|
141
|
+
max_tokens=20,
|
142
|
+
)
|
143
|
+
|
144
|
+
print(response)
|
145
|
+
|
146
|
+
print(usage.last_execution().model_dump_json(indent=4))
|
147
|
+
"""
|
148
|
+
{
|
149
|
+
"total_cost": 0.0001,
|
150
|
+
"total_tokens": 23,
|
151
|
+
"prompt_tokens": 10,
|
152
|
+
"completion_tokens": 13,
|
153
|
+
"providers": [
|
154
|
+
{
|
155
|
+
"total_cost": 0.0001,
|
156
|
+
"total_tokens": 23,
|
157
|
+
"prompt_tokens": 10,
|
158
|
+
"completion_tokens": 13,
|
159
|
+
"provider": "anthropic",
|
160
|
+
"models": [
|
161
|
+
{
|
162
|
+
"total_cost": 0.0004,
|
163
|
+
"total_tokens": 79,
|
164
|
+
"prompt_tokens": 52,
|
165
|
+
"completion_tokens": 27,
|
166
|
+
"model": "claude-3-5-haiku-20241022"
|
167
|
+
}
|
168
|
+
]
|
169
|
+
}
|
170
|
+
]
|
171
|
+
}
|
172
|
+
"""
|
173
|
+
```
|
174
|
+
---
|
175
|
+
|
123
176
|
Most importantly, none of your data is ever sent to any server.
|
124
177
|
|
125
178
|
## License
|
@@ -1,7 +1,7 @@
|
|
1
1
|
tokenator/__init__.py,sha256=bIAPyGAvWreS2i_5tzxJEyX9JlZgAUNxzVk1iHNUhvU,593
|
2
|
-
tokenator/anthropic/client_anthropic.py,sha256=
|
2
|
+
tokenator/anthropic/client_anthropic.py,sha256=fnjWz_Kf8D0GUTudkZNeSmH9ueCGFLDSBDz1U8Jri3Y,5861
|
3
3
|
tokenator/anthropic/stream_interceptors.py,sha256=4VHC_-WkG3Pa10YizmFLrHcbz0Tm2MR_YB5-uohKp5A,5221
|
4
|
-
tokenator/base_wrapper.py,sha256=
|
4
|
+
tokenator/base_wrapper.py,sha256=IO344KWbRswQy4vG_pBxWPR7Wp7K-4mlgmS3SCYGep8,2467
|
5
5
|
tokenator/create_migrations.py,sha256=k9IHiGK21dLTA8MYNsuhO0-kUVIcMSViMFYtY4WU2Rw,730
|
6
6
|
tokenator/migrations/env.py,sha256=JoF5MJ4ae0wJW5kdBHuFlG3ZqeCCDvbMcU8fNA_a6hM,1396
|
7
7
|
tokenator/migrations/script.py.mako,sha256=nJL-tbLQE0Qy4P9S4r4ntNAcikPtoFUlvXe6xvm9ot8,635
|
@@ -13,7 +13,7 @@ tokenator/openai/stream_interceptors.py,sha256=ez1MnjRZW_rEalv2SIPAvrU9oMD6OJoD9
|
|
13
13
|
tokenator/schemas.py,sha256=Ye8hqZlrm3Gh2FyvOVX-hWCpKynWxS58QQRQMfDtIAQ,2114
|
14
14
|
tokenator/usage.py,sha256=eTWfcRrTLop-30FmwHpi7_GwCJxU6Qfji374hG1Qptw,8476
|
15
15
|
tokenator/utils.py,sha256=xg9l2GV1yJL1BlxKL1r8CboABWDslf3G5rGQEJSjFrE,1973
|
16
|
-
tokenator-0.1.
|
17
|
-
tokenator-0.1.
|
18
|
-
tokenator-0.1.
|
19
|
-
tokenator-0.1.
|
16
|
+
tokenator-0.1.11.dist-info/LICENSE,sha256=wdG-B6-ODk8RQ4jq5uXSn0w1UWTzCH_MMyvh7AwtGns,1074
|
17
|
+
tokenator-0.1.11.dist-info/METADATA,sha256=bXOp-V9bD3dI3mM5lSv1MruAo00RL-2bOZ0kjQUmTOQ,4446
|
18
|
+
tokenator-0.1.11.dist-info/WHEEL,sha256=RaoafKOydTQ7I_I3JTrPCg6kUmTgtm4BornzOqyEfJ8,88
|
19
|
+
tokenator-0.1.11.dist-info/RECORD,,
|
File without changes
|