tokencostauto 0.1.78__tar.gz → 0.1.80__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tokencostauto
3
- Version: 0.1.78
3
+ Version: 0.1.80
4
4
  Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
5
5
  Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/madpin/tokencostaudo
@@ -11,7 +11,7 @@ tokencostauto = ["model_prices.json"]
11
11
  [project]
12
12
 
13
13
  name = "tokencostauto"
14
- version = "0.1.78"
14
+ version = "0.1.80"
15
15
 
16
16
  authors = [
17
17
  { name = "Trisha Pan", email = "trishaepan@gmail.com" },
@@ -11157,9 +11157,9 @@
11157
11157
  "max_tokens": 100000,
11158
11158
  "max_input_tokens": 200000,
11159
11159
  "max_output_tokens": 100000,
11160
- "input_cost_per_token": 1e-05,
11161
- "output_cost_per_token": 4e-05,
11162
- "cache_read_input_token_cost": 2.5e-06,
11160
+ "input_cost_per_token": 2e-06,
11161
+ "output_cost_per_token": 8e-06,
11162
+ "cache_read_input_token_cost": 5e-07,
11163
11163
  "litellm_provider": "openai",
11164
11164
  "mode": "chat",
11165
11165
  "supports_function_calling": true,
@@ -11169,15 +11169,28 @@
11169
11169
  "supports_prompt_caching": true,
11170
11170
  "supports_response_schema": true,
11171
11171
  "supports_reasoning": true,
11172
- "supports_tool_choice": true
11172
+ "supports_tool_choice": true,
11173
+ "supported_endpoints": [
11174
+ "/v1/responses",
11175
+ "/v1/chat/completions",
11176
+ "/v1/completions",
11177
+ "/v1/batch"
11178
+ ],
11179
+ "supported_modalities": [
11180
+ "text",
11181
+ "image"
11182
+ ],
11183
+ "supported_output_modalities": [
11184
+ "text"
11185
+ ]
11173
11186
  },
11174
11187
  "o3-2025-04-16": {
11175
11188
  "max_tokens": 100000,
11176
11189
  "max_input_tokens": 200000,
11177
11190
  "max_output_tokens": 100000,
11178
- "input_cost_per_token": 1e-05,
11179
- "output_cost_per_token": 4e-05,
11180
- "cache_read_input_token_cost": 2.5e-06,
11191
+ "input_cost_per_token": 2e-06,
11192
+ "output_cost_per_token": 8e-06,
11193
+ "cache_read_input_token_cost": 5e-07,
11181
11194
  "litellm_provider": "openai",
11182
11195
  "mode": "chat",
11183
11196
  "supports_function_calling": true,
@@ -11187,7 +11200,20 @@
11187
11200
  "supports_prompt_caching": true,
11188
11201
  "supports_response_schema": true,
11189
11202
  "supports_reasoning": true,
11190
- "supports_tool_choice": true
11203
+ "supports_tool_choice": true,
11204
+ "supported_endpoints": [
11205
+ "/v1/responses",
11206
+ "/v1/chat/completions",
11207
+ "/v1/completions",
11208
+ "/v1/batch"
11209
+ ],
11210
+ "supported_modalities": [
11211
+ "text",
11212
+ "image"
11213
+ ],
11214
+ "supported_output_modalities": [
11215
+ "text"
11216
+ ]
11191
11217
  },
11192
11218
  "o4-mini": {
11193
11219
  "max_tokens": 100000,
@@ -12426,6 +12452,9 @@
12426
12452
  "supported_output_modalities": [
12427
12453
  "text"
12428
12454
  ],
12455
+ "supported_regions": [
12456
+ "global"
12457
+ ],
12429
12458
  "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview",
12430
12459
  "supports_parallel_function_calling": true,
12431
12460
  "supports_web_search": true
@@ -14389,5 +14418,143 @@
14389
14418
  "supports_audio_output": true,
14390
14419
  "supports_system_messages": true,
14391
14420
  "supports_tool_choice": true
14421
+ },
14422
+ "o3-pro": {
14423
+ "max_tokens": 100000,
14424
+ "max_input_tokens": 200000,
14425
+ "max_output_tokens": 100000,
14426
+ "input_cost_per_token": 2e-05,
14427
+ "input_cost_per_token_batches": 1e-05,
14428
+ "output_cost_per_token_batches": 4e-05,
14429
+ "output_cost_per_token": 8e-05,
14430
+ "litellm_provider": "openai",
14431
+ "mode": "chat",
14432
+ "supports_function_calling": true,
14433
+ "supports_parallel_function_calling": false,
14434
+ "supports_vision": true,
14435
+ "supports_pdf_input": true,
14436
+ "supports_prompt_caching": true,
14437
+ "supports_response_schema": true,
14438
+ "supports_reasoning": true,
14439
+ "supports_tool_choice": true,
14440
+ "supported_endpoints": [
14441
+ "/v1/responses",
14442
+ "/v1/batch"
14443
+ ],
14444
+ "supported_modalities": [
14445
+ "text",
14446
+ "image"
14447
+ ],
14448
+ "supported_output_modalities": [
14449
+ "text"
14450
+ ]
14451
+ },
14452
+ "o3-pro-2025-06-10": {
14453
+ "max_tokens": 100000,
14454
+ "max_input_tokens": 200000,
14455
+ "max_output_tokens": 100000,
14456
+ "input_cost_per_token": 2e-05,
14457
+ "input_cost_per_token_batches": 1e-05,
14458
+ "output_cost_per_token_batches": 4e-05,
14459
+ "output_cost_per_token": 8e-05,
14460
+ "litellm_provider": "openai",
14461
+ "mode": "chat",
14462
+ "supports_function_calling": true,
14463
+ "supports_parallel_function_calling": false,
14464
+ "supports_vision": true,
14465
+ "supports_pdf_input": true,
14466
+ "supports_prompt_caching": true,
14467
+ "supports_response_schema": true,
14468
+ "supports_reasoning": true,
14469
+ "supports_tool_choice": true,
14470
+ "supported_endpoints": [
14471
+ "/v1/responses",
14472
+ "/v1/batch"
14473
+ ],
14474
+ "supported_modalities": [
14475
+ "text",
14476
+ "image"
14477
+ ],
14478
+ "supported_output_modalities": [
14479
+ "text"
14480
+ ]
14481
+ },
14482
+ "mistral/magistral-medium-2506": {
14483
+ "max_tokens": 40000,
14484
+ "max_input_tokens": 40000,
14485
+ "max_output_tokens": 40000,
14486
+ "input_cost_per_token": 2e-06,
14487
+ "output_cost_per_token": 5e-06,
14488
+ "litellm_provider": "mistral",
14489
+ "mode": "chat",
14490
+ "source": "https://mistral.ai/news/magistral",
14491
+ "supports_function_calling": true,
14492
+ "supports_assistant_prefill": true,
14493
+ "supports_tool_choice": true
14494
+ },
14495
+ "mistral/magistral-small-2506": {
14496
+ "max_tokens": 40000,
14497
+ "max_input_tokens": 40000,
14498
+ "max_output_tokens": 40000,
14499
+ "input_cost_per_token": 0.0,
14500
+ "output_cost_per_token": 0.0,
14501
+ "litellm_provider": "mistral",
14502
+ "mode": "chat",
14503
+ "source": "https://mistral.ai/news/magistral",
14504
+ "supports_function_calling": true,
14505
+ "supports_assistant_prefill": true,
14506
+ "supports_tool_choice": true
14507
+ },
14508
+ "vertex_ai/claude-opus-4": {
14509
+ "max_tokens": 32000,
14510
+ "max_input_tokens": 200000,
14511
+ "max_output_tokens": 32000,
14512
+ "input_cost_per_token": 1.5e-05,
14513
+ "output_cost_per_token": 7.5e-05,
14514
+ "search_context_cost_per_query": {
14515
+ "search_context_size_low": 0.01,
14516
+ "search_context_size_medium": 0.01,
14517
+ "search_context_size_high": 0.01
14518
+ },
14519
+ "cache_creation_input_token_cost": 1.875e-05,
14520
+ "cache_read_input_token_cost": 1.5e-06,
14521
+ "litellm_provider": "vertex_ai-anthropic_models",
14522
+ "mode": "chat",
14523
+ "supports_function_calling": true,
14524
+ "supports_vision": true,
14525
+ "tool_use_system_prompt_tokens": 159,
14526
+ "supports_assistant_prefill": true,
14527
+ "supports_pdf_input": true,
14528
+ "supports_prompt_caching": true,
14529
+ "supports_response_schema": true,
14530
+ "supports_tool_choice": true,
14531
+ "supports_reasoning": true,
14532
+ "supports_computer_use": true
14533
+ },
14534
+ "vertex_ai/claude-sonnet-4": {
14535
+ "max_tokens": 64000,
14536
+ "max_input_tokens": 200000,
14537
+ "max_output_tokens": 64000,
14538
+ "input_cost_per_token": 3e-06,
14539
+ "output_cost_per_token": 1.5e-05,
14540
+ "search_context_cost_per_query": {
14541
+ "search_context_size_low": 0.01,
14542
+ "search_context_size_medium": 0.01,
14543
+ "search_context_size_high": 0.01
14544
+ },
14545
+ "cache_creation_input_token_cost": 3.75e-06,
14546
+ "cache_read_input_token_cost": 3e-07,
14547
+ "litellm_provider": "vertex_ai-anthropic_models",
14548
+ "mode": "chat",
14549
+ "supports_function_calling": true,
14550
+ "supports_vision": true,
14551
+ "tool_use_system_prompt_tokens": 159,
14552
+ "supports_assistant_prefill": true,
14553
+ "supports_pdf_input": true,
14554
+ "supports_prompt_caching": true,
14555
+ "supports_response_schema": true,
14556
+ "supports_tool_choice": true,
14557
+ "supports_reasoning": true,
14558
+ "supports_computer_use": true
14392
14559
  }
14393
14560
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tokencostauto
3
- Version: 0.1.78
3
+ Version: 0.1.80
4
4
  Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
5
5
  Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/madpin/tokencostaudo
File without changes
File without changes
File without changes