tokencostauto 0.1.306__tar.gz → 0.1.310__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {tokencostauto-0.1.306/tokencostauto.egg-info → tokencostauto-0.1.310}/PKG-INFO +1 -1
- {tokencostauto-0.1.306 → tokencostauto-0.1.310}/pyproject.toml +1 -1
- {tokencostauto-0.1.306 → tokencostauto-0.1.310}/tokencostauto/model_prices.json +178 -2
- {tokencostauto-0.1.306 → tokencostauto-0.1.310/tokencostauto.egg-info}/PKG-INFO +1 -1
- {tokencostauto-0.1.306 → tokencostauto-0.1.310}/LICENSE +0 -0
- {tokencostauto-0.1.306 → tokencostauto-0.1.310}/MANIFEST.in +0 -0
- {tokencostauto-0.1.306 → tokencostauto-0.1.310}/README.md +0 -0
- {tokencostauto-0.1.306 → tokencostauto-0.1.310}/setup.cfg +0 -0
- {tokencostauto-0.1.306 → tokencostauto-0.1.310}/tests/test_costs.py +0 -0
- {tokencostauto-0.1.306 → tokencostauto-0.1.310}/tokencostauto/__init__.py +0 -0
- {tokencostauto-0.1.306 → tokencostauto-0.1.310}/tokencostauto/constants.py +0 -0
- {tokencostauto-0.1.306 → tokencostauto-0.1.310}/tokencostauto/costs.py +0 -0
- {tokencostauto-0.1.306 → tokencostauto-0.1.310}/tokencostauto.egg-info/SOURCES.txt +0 -0
- {tokencostauto-0.1.306 → tokencostauto-0.1.310}/tokencostauto.egg-info/dependency_links.txt +0 -0
- {tokencostauto-0.1.306 → tokencostauto-0.1.310}/tokencostauto.egg-info/requires.txt +0 -0
- {tokencostauto-0.1.306 → tokencostauto-0.1.310}/tokencostauto.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tokencostauto
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.310
|
|
4
4
|
Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
|
|
5
5
|
Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/madpin/tokencostaudo
|
|
@@ -15411,7 +15411,7 @@
|
|
|
15411
15411
|
"tpm": 800000
|
|
15412
15412
|
},
|
|
15413
15413
|
"gemini-2.5-flash": {
|
|
15414
|
-
"cache_read_input_token_cost":
|
|
15414
|
+
"cache_read_input_token_cost": 3e-08,
|
|
15415
15415
|
"input_cost_per_audio_token": 1e-06,
|
|
15416
15416
|
"input_cost_per_token": 3e-07,
|
|
15417
15417
|
"litellm_provider": "vertex_ai-language-models",
|
|
@@ -15678,7 +15678,8 @@
|
|
|
15678
15678
|
"tpm": 250000
|
|
15679
15679
|
},
|
|
15680
15680
|
"gemini-2.5-pro": {
|
|
15681
|
-
"cache_read_input_token_cost":
|
|
15681
|
+
"cache_read_input_token_cost": 1.25e-07,
|
|
15682
|
+
"cache_creation_input_token_cost_above_200k_tokens": 2.5e-07,
|
|
15682
15683
|
"input_cost_per_token": 1.25e-06,
|
|
15683
15684
|
"input_cost_per_token_above_200k_tokens": 2.5e-06,
|
|
15684
15685
|
"litellm_provider": "vertex_ai-language-models",
|
|
@@ -25027,5 +25028,180 @@
|
|
|
25027
25028
|
"input_cost_per_query": 0.016,
|
|
25028
25029
|
"litellm_provider": "tavily",
|
|
25029
25030
|
"mode": "search"
|
|
25031
|
+
},
|
|
25032
|
+
"vertex_ai/mistralai/codestral-2@001": {
|
|
25033
|
+
"input_cost_per_token": 3e-07,
|
|
25034
|
+
"litellm_provider": "vertex_ai-mistral_models",
|
|
25035
|
+
"max_input_tokens": 128000,
|
|
25036
|
+
"max_output_tokens": 128000,
|
|
25037
|
+
"max_tokens": 128000,
|
|
25038
|
+
"mode": "chat",
|
|
25039
|
+
"output_cost_per_token": 9e-07,
|
|
25040
|
+
"supports_function_calling": true,
|
|
25041
|
+
"supports_tool_choice": true
|
|
25042
|
+
},
|
|
25043
|
+
"vertex_ai/codestral-2": {
|
|
25044
|
+
"input_cost_per_token": 3e-07,
|
|
25045
|
+
"litellm_provider": "vertex_ai-mistral_models",
|
|
25046
|
+
"max_input_tokens": 128000,
|
|
25047
|
+
"max_output_tokens": 128000,
|
|
25048
|
+
"max_tokens": 128000,
|
|
25049
|
+
"mode": "chat",
|
|
25050
|
+
"output_cost_per_token": 9e-07,
|
|
25051
|
+
"supports_function_calling": true,
|
|
25052
|
+
"supports_tool_choice": true
|
|
25053
|
+
},
|
|
25054
|
+
"vertex_ai/codestral-2@001": {
|
|
25055
|
+
"input_cost_per_token": 3e-07,
|
|
25056
|
+
"litellm_provider": "vertex_ai-mistral_models",
|
|
25057
|
+
"max_input_tokens": 128000,
|
|
25058
|
+
"max_output_tokens": 128000,
|
|
25059
|
+
"max_tokens": 128000,
|
|
25060
|
+
"mode": "chat",
|
|
25061
|
+
"output_cost_per_token": 9e-07,
|
|
25062
|
+
"supports_function_calling": true,
|
|
25063
|
+
"supports_tool_choice": true
|
|
25064
|
+
},
|
|
25065
|
+
"vertex_ai/mistralai/codestral-2": {
|
|
25066
|
+
"input_cost_per_token": 3e-07,
|
|
25067
|
+
"litellm_provider": "vertex_ai-mistral_models",
|
|
25068
|
+
"max_input_tokens": 128000,
|
|
25069
|
+
"max_output_tokens": 128000,
|
|
25070
|
+
"max_tokens": 128000,
|
|
25071
|
+
"mode": "chat",
|
|
25072
|
+
"output_cost_per_token": 9e-07,
|
|
25073
|
+
"supports_function_calling": true,
|
|
25074
|
+
"supports_tool_choice": true
|
|
25075
|
+
},
|
|
25076
|
+
"vertex_ai/mistral-medium-3": {
|
|
25077
|
+
"input_cost_per_token": 4e-07,
|
|
25078
|
+
"litellm_provider": "vertex_ai-mistral_models",
|
|
25079
|
+
"max_input_tokens": 128000,
|
|
25080
|
+
"max_output_tokens": 8191,
|
|
25081
|
+
"max_tokens": 8191,
|
|
25082
|
+
"mode": "chat",
|
|
25083
|
+
"output_cost_per_token": 2e-06,
|
|
25084
|
+
"supports_function_calling": true,
|
|
25085
|
+
"supports_tool_choice": true
|
|
25086
|
+
},
|
|
25087
|
+
"vertex_ai/mistral-medium-3@001": {
|
|
25088
|
+
"input_cost_per_token": 4e-07,
|
|
25089
|
+
"litellm_provider": "vertex_ai-mistral_models",
|
|
25090
|
+
"max_input_tokens": 128000,
|
|
25091
|
+
"max_output_tokens": 8191,
|
|
25092
|
+
"max_tokens": 8191,
|
|
25093
|
+
"mode": "chat",
|
|
25094
|
+
"output_cost_per_token": 2e-06,
|
|
25095
|
+
"supports_function_calling": true,
|
|
25096
|
+
"supports_tool_choice": true
|
|
25097
|
+
},
|
|
25098
|
+
"vertex_ai/mistralai/mistral-medium-3": {
|
|
25099
|
+
"input_cost_per_token": 4e-07,
|
|
25100
|
+
"litellm_provider": "vertex_ai-mistral_models",
|
|
25101
|
+
"max_input_tokens": 128000,
|
|
25102
|
+
"max_output_tokens": 8191,
|
|
25103
|
+
"max_tokens": 8191,
|
|
25104
|
+
"mode": "chat",
|
|
25105
|
+
"output_cost_per_token": 2e-06,
|
|
25106
|
+
"supports_function_calling": true,
|
|
25107
|
+
"supports_tool_choice": true
|
|
25108
|
+
},
|
|
25109
|
+
"vertex_ai/mistralai/mistral-medium-3@001": {
|
|
25110
|
+
"input_cost_per_token": 4e-07,
|
|
25111
|
+
"litellm_provider": "vertex_ai-mistral_models",
|
|
25112
|
+
"max_input_tokens": 128000,
|
|
25113
|
+
"max_output_tokens": 8191,
|
|
25114
|
+
"max_tokens": 8191,
|
|
25115
|
+
"mode": "chat",
|
|
25116
|
+
"output_cost_per_token": 2e-06,
|
|
25117
|
+
"supports_function_calling": true,
|
|
25118
|
+
"supports_tool_choice": true
|
|
25119
|
+
},
|
|
25120
|
+
"openai/sora-2": {
|
|
25121
|
+
"litellm_provider": "openai",
|
|
25122
|
+
"mode": "video_generation",
|
|
25123
|
+
"output_cost_per_video_per_second": 0.1,
|
|
25124
|
+
"source": "https://platform.openai.com/docs/api-reference/videos",
|
|
25125
|
+
"supported_modalities": [
|
|
25126
|
+
"text"
|
|
25127
|
+
],
|
|
25128
|
+
"supported_output_modalities": [
|
|
25129
|
+
"video"
|
|
25130
|
+
],
|
|
25131
|
+
"supported_resolutions": [
|
|
25132
|
+
"720x1280",
|
|
25133
|
+
"1280x720"
|
|
25134
|
+
]
|
|
25135
|
+
},
|
|
25136
|
+
"azure/sora-2": {
|
|
25137
|
+
"litellm_provider": "azure",
|
|
25138
|
+
"mode": "video_generation",
|
|
25139
|
+
"output_cost_per_video_per_second": 0.1,
|
|
25140
|
+
"source": "https://azure.microsoft.com/en-us/products/ai-services/video-generation",
|
|
25141
|
+
"supported_modalities": [
|
|
25142
|
+
"text"
|
|
25143
|
+
],
|
|
25144
|
+
"supported_output_modalities": [
|
|
25145
|
+
"video"
|
|
25146
|
+
],
|
|
25147
|
+
"supported_resolutions": [
|
|
25148
|
+
"720x1280",
|
|
25149
|
+
"1280x720"
|
|
25150
|
+
]
|
|
25151
|
+
},
|
|
25152
|
+
"azure/sora-2-pro": {
|
|
25153
|
+
"litellm_provider": "azure",
|
|
25154
|
+
"mode": "video_generation",
|
|
25155
|
+
"output_cost_per_video_per_second": 0.3,
|
|
25156
|
+
"source": "https://azure.microsoft.com/en-us/products/ai-services/video-generation",
|
|
25157
|
+
"supported_modalities": [
|
|
25158
|
+
"text"
|
|
25159
|
+
],
|
|
25160
|
+
"supported_output_modalities": [
|
|
25161
|
+
"video"
|
|
25162
|
+
],
|
|
25163
|
+
"supported_resolutions": [
|
|
25164
|
+
"720x1280",
|
|
25165
|
+
"1280x720"
|
|
25166
|
+
]
|
|
25167
|
+
},
|
|
25168
|
+
"azure/sora-2-pro-high-res": {
|
|
25169
|
+
"litellm_provider": "azure",
|
|
25170
|
+
"mode": "video_generation",
|
|
25171
|
+
"output_cost_per_video_per_second": 0.5,
|
|
25172
|
+
"source": "https://azure.microsoft.com/en-us/products/ai-services/video-generation",
|
|
25173
|
+
"supported_modalities": [
|
|
25174
|
+
"text"
|
|
25175
|
+
],
|
|
25176
|
+
"supported_output_modalities": [
|
|
25177
|
+
"video"
|
|
25178
|
+
],
|
|
25179
|
+
"supported_resolutions": [
|
|
25180
|
+
"1024x1792",
|
|
25181
|
+
"1792x1024"
|
|
25182
|
+
]
|
|
25183
|
+
},
|
|
25184
|
+
"amazon.titan-image-generator-v1": {
|
|
25185
|
+
"input_cost_per_image": 0.0,
|
|
25186
|
+
"output_cost_per_image": 0.008,
|
|
25187
|
+
"output_cost_per_image_premium_image": 0.01,
|
|
25188
|
+
"output_cost_per_image_above_512_and_512_pixels": 0.01,
|
|
25189
|
+
"output_cost_per_image_above_512_and_512_pixels_and_premium_image": 0.012,
|
|
25190
|
+
"litellm_provider": "bedrock",
|
|
25191
|
+
"mode": "image_generation"
|
|
25192
|
+
},
|
|
25193
|
+
"amazon.titan-image-generator-v2": {
|
|
25194
|
+
"input_cost_per_image": 0.0,
|
|
25195
|
+
"output_cost_per_image": 0.008,
|
|
25196
|
+
"output_cost_per_image_premium_image": 0.01,
|
|
25197
|
+
"output_cost_per_image_above_1024_and_1024_pixels": 0.01,
|
|
25198
|
+
"output_cost_per_image_above_1024_and_1024_pixels_and_premium_image": 0.012,
|
|
25199
|
+
"litellm_provider": "bedrock",
|
|
25200
|
+
"mode": "image_generation"
|
|
25201
|
+
},
|
|
25202
|
+
"vertex_ai/search_api": {
|
|
25203
|
+
"input_cost_per_query": 0.0015,
|
|
25204
|
+
"litellm_provider": "vertex_ai",
|
|
25205
|
+
"mode": "vector_store"
|
|
25030
25206
|
}
|
|
25031
25207
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tokencostauto
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.310
|
|
4
4
|
Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
|
|
5
5
|
Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/madpin/tokencostaudo
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|