tokencostauto 0.1.78__py3-none-any.whl → 0.1.80__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tokencostauto/model_prices.json +175 -8
- {tokencostauto-0.1.78.dist-info → tokencostauto-0.1.80.dist-info}/METADATA +1 -1
- tokencostauto-0.1.80.dist-info/RECORD +9 -0
- tokencostauto-0.1.78.dist-info/RECORD +0 -9
- {tokencostauto-0.1.78.dist-info → tokencostauto-0.1.80.dist-info}/WHEEL +0 -0
- {tokencostauto-0.1.78.dist-info → tokencostauto-0.1.80.dist-info}/licenses/LICENSE +0 -0
- {tokencostauto-0.1.78.dist-info → tokencostauto-0.1.80.dist-info}/top_level.txt +0 -0
tokencostauto/model_prices.json
CHANGED
@@ -11157,9 +11157,9 @@
|
|
11157
11157
|
"max_tokens": 100000,
|
11158
11158
|
"max_input_tokens": 200000,
|
11159
11159
|
"max_output_tokens": 100000,
|
11160
|
-
"input_cost_per_token":
|
11161
|
-
"output_cost_per_token":
|
11162
|
-
"cache_read_input_token_cost":
|
11160
|
+
"input_cost_per_token": 2e-06,
|
11161
|
+
"output_cost_per_token": 8e-06,
|
11162
|
+
"cache_read_input_token_cost": 5e-07,
|
11163
11163
|
"litellm_provider": "openai",
|
11164
11164
|
"mode": "chat",
|
11165
11165
|
"supports_function_calling": true,
|
@@ -11169,15 +11169,28 @@
|
|
11169
11169
|
"supports_prompt_caching": true,
|
11170
11170
|
"supports_response_schema": true,
|
11171
11171
|
"supports_reasoning": true,
|
11172
|
-
"supports_tool_choice": true
|
11172
|
+
"supports_tool_choice": true,
|
11173
|
+
"supported_endpoints": [
|
11174
|
+
"/v1/responses",
|
11175
|
+
"/v1/chat/completions",
|
11176
|
+
"/v1/completions",
|
11177
|
+
"/v1/batch"
|
11178
|
+
],
|
11179
|
+
"supported_modalities": [
|
11180
|
+
"text",
|
11181
|
+
"image"
|
11182
|
+
],
|
11183
|
+
"supported_output_modalities": [
|
11184
|
+
"text"
|
11185
|
+
]
|
11173
11186
|
},
|
11174
11187
|
"o3-2025-04-16": {
|
11175
11188
|
"max_tokens": 100000,
|
11176
11189
|
"max_input_tokens": 200000,
|
11177
11190
|
"max_output_tokens": 100000,
|
11178
|
-
"input_cost_per_token":
|
11179
|
-
"output_cost_per_token":
|
11180
|
-
"cache_read_input_token_cost":
|
11191
|
+
"input_cost_per_token": 2e-06,
|
11192
|
+
"output_cost_per_token": 8e-06,
|
11193
|
+
"cache_read_input_token_cost": 5e-07,
|
11181
11194
|
"litellm_provider": "openai",
|
11182
11195
|
"mode": "chat",
|
11183
11196
|
"supports_function_calling": true,
|
@@ -11187,7 +11200,20 @@
|
|
11187
11200
|
"supports_prompt_caching": true,
|
11188
11201
|
"supports_response_schema": true,
|
11189
11202
|
"supports_reasoning": true,
|
11190
|
-
"supports_tool_choice": true
|
11203
|
+
"supports_tool_choice": true,
|
11204
|
+
"supported_endpoints": [
|
11205
|
+
"/v1/responses",
|
11206
|
+
"/v1/chat/completions",
|
11207
|
+
"/v1/completions",
|
11208
|
+
"/v1/batch"
|
11209
|
+
],
|
11210
|
+
"supported_modalities": [
|
11211
|
+
"text",
|
11212
|
+
"image"
|
11213
|
+
],
|
11214
|
+
"supported_output_modalities": [
|
11215
|
+
"text"
|
11216
|
+
]
|
11191
11217
|
},
|
11192
11218
|
"o4-mini": {
|
11193
11219
|
"max_tokens": 100000,
|
@@ -12426,6 +12452,9 @@
|
|
12426
12452
|
"supported_output_modalities": [
|
12427
12453
|
"text"
|
12428
12454
|
],
|
12455
|
+
"supported_regions": [
|
12456
|
+
"global"
|
12457
|
+
],
|
12429
12458
|
"source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview",
|
12430
12459
|
"supports_parallel_function_calling": true,
|
12431
12460
|
"supports_web_search": true
|
@@ -14389,5 +14418,143 @@
|
|
14389
14418
|
"supports_audio_output": true,
|
14390
14419
|
"supports_system_messages": true,
|
14391
14420
|
"supports_tool_choice": true
|
14421
|
+
},
|
14422
|
+
"o3-pro": {
|
14423
|
+
"max_tokens": 100000,
|
14424
|
+
"max_input_tokens": 200000,
|
14425
|
+
"max_output_tokens": 100000,
|
14426
|
+
"input_cost_per_token": 2e-05,
|
14427
|
+
"input_cost_per_token_batches": 1e-05,
|
14428
|
+
"output_cost_per_token_batches": 4e-05,
|
14429
|
+
"output_cost_per_token": 8e-05,
|
14430
|
+
"litellm_provider": "openai",
|
14431
|
+
"mode": "chat",
|
14432
|
+
"supports_function_calling": true,
|
14433
|
+
"supports_parallel_function_calling": false,
|
14434
|
+
"supports_vision": true,
|
14435
|
+
"supports_pdf_input": true,
|
14436
|
+
"supports_prompt_caching": true,
|
14437
|
+
"supports_response_schema": true,
|
14438
|
+
"supports_reasoning": true,
|
14439
|
+
"supports_tool_choice": true,
|
14440
|
+
"supported_endpoints": [
|
14441
|
+
"/v1/responses",
|
14442
|
+
"/v1/batch"
|
14443
|
+
],
|
14444
|
+
"supported_modalities": [
|
14445
|
+
"text",
|
14446
|
+
"image"
|
14447
|
+
],
|
14448
|
+
"supported_output_modalities": [
|
14449
|
+
"text"
|
14450
|
+
]
|
14451
|
+
},
|
14452
|
+
"o3-pro-2025-06-10": {
|
14453
|
+
"max_tokens": 100000,
|
14454
|
+
"max_input_tokens": 200000,
|
14455
|
+
"max_output_tokens": 100000,
|
14456
|
+
"input_cost_per_token": 2e-05,
|
14457
|
+
"input_cost_per_token_batches": 1e-05,
|
14458
|
+
"output_cost_per_token_batches": 4e-05,
|
14459
|
+
"output_cost_per_token": 8e-05,
|
14460
|
+
"litellm_provider": "openai",
|
14461
|
+
"mode": "chat",
|
14462
|
+
"supports_function_calling": true,
|
14463
|
+
"supports_parallel_function_calling": false,
|
14464
|
+
"supports_vision": true,
|
14465
|
+
"supports_pdf_input": true,
|
14466
|
+
"supports_prompt_caching": true,
|
14467
|
+
"supports_response_schema": true,
|
14468
|
+
"supports_reasoning": true,
|
14469
|
+
"supports_tool_choice": true,
|
14470
|
+
"supported_endpoints": [
|
14471
|
+
"/v1/responses",
|
14472
|
+
"/v1/batch"
|
14473
|
+
],
|
14474
|
+
"supported_modalities": [
|
14475
|
+
"text",
|
14476
|
+
"image"
|
14477
|
+
],
|
14478
|
+
"supported_output_modalities": [
|
14479
|
+
"text"
|
14480
|
+
]
|
14481
|
+
},
|
14482
|
+
"mistral/magistral-medium-2506": {
|
14483
|
+
"max_tokens": 40000,
|
14484
|
+
"max_input_tokens": 40000,
|
14485
|
+
"max_output_tokens": 40000,
|
14486
|
+
"input_cost_per_token": 2e-06,
|
14487
|
+
"output_cost_per_token": 5e-06,
|
14488
|
+
"litellm_provider": "mistral",
|
14489
|
+
"mode": "chat",
|
14490
|
+
"source": "https://mistral.ai/news/magistral",
|
14491
|
+
"supports_function_calling": true,
|
14492
|
+
"supports_assistant_prefill": true,
|
14493
|
+
"supports_tool_choice": true
|
14494
|
+
},
|
14495
|
+
"mistral/magistral-small-2506": {
|
14496
|
+
"max_tokens": 40000,
|
14497
|
+
"max_input_tokens": 40000,
|
14498
|
+
"max_output_tokens": 40000,
|
14499
|
+
"input_cost_per_token": 0.0,
|
14500
|
+
"output_cost_per_token": 0.0,
|
14501
|
+
"litellm_provider": "mistral",
|
14502
|
+
"mode": "chat",
|
14503
|
+
"source": "https://mistral.ai/news/magistral",
|
14504
|
+
"supports_function_calling": true,
|
14505
|
+
"supports_assistant_prefill": true,
|
14506
|
+
"supports_tool_choice": true
|
14507
|
+
},
|
14508
|
+
"vertex_ai/claude-opus-4": {
|
14509
|
+
"max_tokens": 32000,
|
14510
|
+
"max_input_tokens": 200000,
|
14511
|
+
"max_output_tokens": 32000,
|
14512
|
+
"input_cost_per_token": 1.5e-05,
|
14513
|
+
"output_cost_per_token": 7.5e-05,
|
14514
|
+
"search_context_cost_per_query": {
|
14515
|
+
"search_context_size_low": 0.01,
|
14516
|
+
"search_context_size_medium": 0.01,
|
14517
|
+
"search_context_size_high": 0.01
|
14518
|
+
},
|
14519
|
+
"cache_creation_input_token_cost": 1.875e-05,
|
14520
|
+
"cache_read_input_token_cost": 1.5e-06,
|
14521
|
+
"litellm_provider": "vertex_ai-anthropic_models",
|
14522
|
+
"mode": "chat",
|
14523
|
+
"supports_function_calling": true,
|
14524
|
+
"supports_vision": true,
|
14525
|
+
"tool_use_system_prompt_tokens": 159,
|
14526
|
+
"supports_assistant_prefill": true,
|
14527
|
+
"supports_pdf_input": true,
|
14528
|
+
"supports_prompt_caching": true,
|
14529
|
+
"supports_response_schema": true,
|
14530
|
+
"supports_tool_choice": true,
|
14531
|
+
"supports_reasoning": true,
|
14532
|
+
"supports_computer_use": true
|
14533
|
+
},
|
14534
|
+
"vertex_ai/claude-sonnet-4": {
|
14535
|
+
"max_tokens": 64000,
|
14536
|
+
"max_input_tokens": 200000,
|
14537
|
+
"max_output_tokens": 64000,
|
14538
|
+
"input_cost_per_token": 3e-06,
|
14539
|
+
"output_cost_per_token": 1.5e-05,
|
14540
|
+
"search_context_cost_per_query": {
|
14541
|
+
"search_context_size_low": 0.01,
|
14542
|
+
"search_context_size_medium": 0.01,
|
14543
|
+
"search_context_size_high": 0.01
|
14544
|
+
},
|
14545
|
+
"cache_creation_input_token_cost": 3.75e-06,
|
14546
|
+
"cache_read_input_token_cost": 3e-07,
|
14547
|
+
"litellm_provider": "vertex_ai-anthropic_models",
|
14548
|
+
"mode": "chat",
|
14549
|
+
"supports_function_calling": true,
|
14550
|
+
"supports_vision": true,
|
14551
|
+
"tool_use_system_prompt_tokens": 159,
|
14552
|
+
"supports_assistant_prefill": true,
|
14553
|
+
"supports_pdf_input": true,
|
14554
|
+
"supports_prompt_caching": true,
|
14555
|
+
"supports_response_schema": true,
|
14556
|
+
"supports_tool_choice": true,
|
14557
|
+
"supports_reasoning": true,
|
14558
|
+
"supports_computer_use": true
|
14392
14559
|
}
|
14393
14560
|
}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: tokencostauto
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.80
|
4
4
|
Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
|
5
5
|
Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
|
6
6
|
Project-URL: Homepage, https://github.com/madpin/tokencostaudo
|
@@ -0,0 +1,9 @@
|
|
1
|
+
tokencostauto/__init__.py,sha256=-4d_ryFH62SgNXPXA8vGPFZoAKtOBjnsg37EB_RkZG8,289
|
2
|
+
tokencostauto/constants.py,sha256=_82MlTkTrdrwzyRosQD7d3JdgNP9KAUM-cZo8DE00P0,3395
|
3
|
+
tokencostauto/costs.py,sha256=tXsgrTypq-dCHaHtoXcg2XepezWsAvZpl9gEsv_53iE,10679
|
4
|
+
tokencostauto/model_prices.json,sha256=FzWkQt7VCW9R1-sexo3DF2oS1mQJnxKZM2axoCjDiWc,519974
|
5
|
+
tokencostauto-0.1.80.dist-info/licenses/LICENSE,sha256=4PLv_CD6Ughnsvg_nM2XeTqGwVK6lQVR77kVWbPq-0U,1065
|
6
|
+
tokencostauto-0.1.80.dist-info/METADATA,sha256=0btJSoUWXSsKaIUhLo7WqA376aYAIqWT7E4wa4Yn0N4,204075
|
7
|
+
tokencostauto-0.1.80.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
8
|
+
tokencostauto-0.1.80.dist-info/top_level.txt,sha256=szZQTUJRotfIaeZCDsOgvofIkLt2ak88RP13oI51-TU,14
|
9
|
+
tokencostauto-0.1.80.dist-info/RECORD,,
|
@@ -1,9 +0,0 @@
|
|
1
|
-
tokencostauto/__init__.py,sha256=-4d_ryFH62SgNXPXA8vGPFZoAKtOBjnsg37EB_RkZG8,289
|
2
|
-
tokencostauto/constants.py,sha256=_82MlTkTrdrwzyRosQD7d3JdgNP9KAUM-cZo8DE00P0,3395
|
3
|
-
tokencostauto/costs.py,sha256=tXsgrTypq-dCHaHtoXcg2XepezWsAvZpl9gEsv_53iE,10679
|
4
|
-
tokencostauto/model_prices.json,sha256=lWT-d25jGHcf8YPnebNSulqzD-mJr5qdEO6E75jU6RQ,514472
|
5
|
-
tokencostauto-0.1.78.dist-info/licenses/LICENSE,sha256=4PLv_CD6Ughnsvg_nM2XeTqGwVK6lQVR77kVWbPq-0U,1065
|
6
|
-
tokencostauto-0.1.78.dist-info/METADATA,sha256=43fXq7t9oBQ56GTYPtg5rN1HM7cSZQaFINL3eU0N3PQ,204075
|
7
|
-
tokencostauto-0.1.78.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
8
|
-
tokencostauto-0.1.78.dist-info/top_level.txt,sha256=szZQTUJRotfIaeZCDsOgvofIkLt2ak88RP13oI51-TU,14
|
9
|
-
tokencostauto-0.1.78.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|