tokencostauto 0.1.391__tar.gz → 0.1.395__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tokencostauto
3
- Version: 0.1.391
3
+ Version: 0.1.395
4
4
  Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
5
5
  Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/madpin/tokencostaudo
@@ -11,7 +11,7 @@ tokencostauto = ["model_prices.json"]
11
11
  [project]
12
12
 
13
13
  name = "tokencostauto"
14
- version = "0.1.391"
14
+ version = "0.1.395"
15
15
 
16
16
  authors = [
17
17
  { name = "Trisha Pan", email = "trishaepan@gmail.com" },
@@ -8758,7 +8758,6 @@
8758
8758
  "output_cost_per_token": 0.0
8759
8759
  },
8760
8760
  "voyage/rerank-2": {
8761
- "input_cost_per_query": 5e-08,
8762
8761
  "input_cost_per_token": 5e-08,
8763
8762
  "litellm_provider": "voyage",
8764
8763
  "max_input_tokens": 16000,
@@ -8769,7 +8768,6 @@
8769
8768
  "output_cost_per_token": 0.0
8770
8769
  },
8771
8770
  "voyage/rerank-2-lite": {
8772
- "input_cost_per_query": 2e-08,
8773
8771
  "input_cost_per_token": 2e-08,
8774
8772
  "litellm_provider": "voyage",
8775
8773
  "max_input_tokens": 8000,
@@ -24900,15 +24898,15 @@
24900
24898
  "supports_vision": false
24901
24899
  },
24902
24900
  "global.anthropic.claude-haiku-4-5-20251001-v1:0": {
24903
- "cache_creation_input_token_cost": 1.375e-06,
24904
- "cache_read_input_token_cost": 1.1e-07,
24905
- "input_cost_per_token": 1.1e-06,
24901
+ "cache_creation_input_token_cost": 1.25e-06,
24902
+ "cache_read_input_token_cost": 1e-07,
24903
+ "input_cost_per_token": 1e-06,
24906
24904
  "litellm_provider": "bedrock_converse",
24907
24905
  "max_input_tokens": 200000,
24908
24906
  "max_output_tokens": 64000,
24909
24907
  "max_tokens": 64000,
24910
24908
  "mode": "chat",
24911
- "output_cost_per_token": 5.5e-06,
24909
+ "output_cost_per_token": 5e-06,
24912
24910
  "source": "https://aws.amazon.com/about-aws/whats-new/2025/10/claude-4-5-haiku-anthropic-amazon-bedrock",
24913
24911
  "supports_assistant_prefill": true,
24914
24912
  "supports_computer_use": true,
@@ -31473,5 +31471,342 @@
31473
31471
  "output_cost_per_token": 2e-07,
31474
31472
  "litellm_provider": "fireworks_ai",
31475
31473
  "mode": "chat"
31474
+ },
31475
+ "gemini/gemini-2.5-computer-use-preview-10-2025": {
31476
+ "input_cost_per_token": 1.25e-06,
31477
+ "input_cost_per_token_above_200k_tokens": 2.5e-06,
31478
+ "litellm_provider": "gemini",
31479
+ "max_images_per_prompt": 3000,
31480
+ "max_input_tokens": 128000,
31481
+ "max_output_tokens": 64000,
31482
+ "max_tokens": 64000,
31483
+ "mode": "chat",
31484
+ "output_cost_per_token": 1e-05,
31485
+ "output_cost_per_token_above_200k_tokens": 1.5e-05,
31486
+ "rpm": 2000,
31487
+ "source": "https://ai.google.dev/gemini-api/docs/computer-use",
31488
+ "supported_endpoints": [
31489
+ "/v1/chat/completions",
31490
+ "/v1/completions"
31491
+ ],
31492
+ "supported_modalities": [
31493
+ "text",
31494
+ "image"
31495
+ ],
31496
+ "supported_output_modalities": [
31497
+ "text"
31498
+ ],
31499
+ "supports_computer_use": true,
31500
+ "supports_function_calling": true,
31501
+ "supports_system_messages": true,
31502
+ "supports_tool_choice": true,
31503
+ "supports_vision": true,
31504
+ "tpm": 800000
31505
+ },
31506
+ "vertex_ai/deepseek-ai/deepseek-v3.2-maas": {
31507
+ "input_cost_per_token": 5.6e-07,
31508
+ "input_cost_per_token_batches": 2.8e-07,
31509
+ "litellm_provider": "vertex_ai-deepseek_models",
31510
+ "max_input_tokens": 163840,
31511
+ "max_output_tokens": 32768,
31512
+ "max_tokens": 163840,
31513
+ "mode": "chat",
31514
+ "output_cost_per_token": 1.68e-06,
31515
+ "output_cost_per_token_batches": 8.4e-07,
31516
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models",
31517
+ "supported_regions": [
31518
+ "us-west2"
31519
+ ],
31520
+ "supports_assistant_prefill": true,
31521
+ "supports_function_calling": true,
31522
+ "supports_prompt_caching": true,
31523
+ "supports_reasoning": true,
31524
+ "supports_tool_choice": true
31525
+ },
31526
+ "voyage/rerank-2.5": {
31527
+ "input_cost_per_token": 5e-08,
31528
+ "litellm_provider": "voyage",
31529
+ "max_input_tokens": 32000,
31530
+ "max_output_tokens": 32000,
31531
+ "max_query_tokens": 32000,
31532
+ "max_tokens": 32000,
31533
+ "mode": "rerank",
31534
+ "output_cost_per_token": 0.0
31535
+ },
31536
+ "voyage/rerank-2.5-lite": {
31537
+ "input_cost_per_token": 2e-08,
31538
+ "litellm_provider": "voyage",
31539
+ "max_input_tokens": 32000,
31540
+ "max_output_tokens": 32000,
31541
+ "max_query_tokens": 32000,
31542
+ "max_tokens": 32000,
31543
+ "mode": "rerank",
31544
+ "output_cost_per_token": 0.0
31545
+ },
31546
+ "azure_ai/claude-haiku-4-5": {
31547
+ "input_cost_per_token": 1e-06,
31548
+ "litellm_provider": "azure_ai",
31549
+ "max_input_tokens": 200000,
31550
+ "max_output_tokens": 64000,
31551
+ "max_tokens": 64000,
31552
+ "mode": "chat",
31553
+ "output_cost_per_token": 5e-06,
31554
+ "supports_assistant_prefill": true,
31555
+ "supports_computer_use": true,
31556
+ "supports_function_calling": true,
31557
+ "supports_pdf_input": true,
31558
+ "supports_prompt_caching": true,
31559
+ "supports_reasoning": true,
31560
+ "supports_response_schema": true,
31561
+ "supports_tool_choice": true,
31562
+ "supports_vision": true
31563
+ },
31564
+ "azure_ai/claude-opus-4-1": {
31565
+ "input_cost_per_token": 1.5e-05,
31566
+ "litellm_provider": "azure_ai",
31567
+ "max_input_tokens": 200000,
31568
+ "max_output_tokens": 32000,
31569
+ "max_tokens": 32000,
31570
+ "mode": "chat",
31571
+ "output_cost_per_token": 7.5e-05,
31572
+ "supports_assistant_prefill": true,
31573
+ "supports_computer_use": true,
31574
+ "supports_function_calling": true,
31575
+ "supports_pdf_input": true,
31576
+ "supports_prompt_caching": true,
31577
+ "supports_reasoning": true,
31578
+ "supports_response_schema": true,
31579
+ "supports_tool_choice": true,
31580
+ "supports_vision": true
31581
+ },
31582
+ "azure_ai/claude-sonnet-4-5": {
31583
+ "input_cost_per_token": 3e-06,
31584
+ "litellm_provider": "azure_ai",
31585
+ "max_input_tokens": 200000,
31586
+ "max_output_tokens": 64000,
31587
+ "max_tokens": 64000,
31588
+ "mode": "chat",
31589
+ "output_cost_per_token": 1.5e-05,
31590
+ "supports_assistant_prefill": true,
31591
+ "supports_computer_use": true,
31592
+ "supports_function_calling": true,
31593
+ "supports_pdf_input": true,
31594
+ "supports_prompt_caching": true,
31595
+ "supports_reasoning": true,
31596
+ "supports_response_schema": true,
31597
+ "supports_tool_choice": true,
31598
+ "supports_vision": true
31599
+ },
31600
+ "gpt-5.2": {
31601
+ "cache_read_input_token_cost": 1.75e-07,
31602
+ "cache_read_input_token_cost_priority": 3.5e-07,
31603
+ "input_cost_per_token": 1.75e-06,
31604
+ "input_cost_per_token_priority": 3.5e-06,
31605
+ "litellm_provider": "openai",
31606
+ "max_input_tokens": 400000,
31607
+ "max_output_tokens": 128000,
31608
+ "max_tokens": 128000,
31609
+ "mode": "chat",
31610
+ "output_cost_per_token": 1.4e-05,
31611
+ "output_cost_per_token_priority": 2.8e-05,
31612
+ "supported_endpoints": [
31613
+ "/v1/chat/completions",
31614
+ "/v1/batch",
31615
+ "/v1/responses"
31616
+ ],
31617
+ "supported_modalities": [
31618
+ "text",
31619
+ "image"
31620
+ ],
31621
+ "supported_output_modalities": [
31622
+ "text",
31623
+ "image"
31624
+ ],
31625
+ "supports_function_calling": true,
31626
+ "supports_native_streaming": true,
31627
+ "supports_parallel_function_calling": true,
31628
+ "supports_pdf_input": true,
31629
+ "supports_prompt_caching": true,
31630
+ "supports_reasoning": true,
31631
+ "supports_response_schema": true,
31632
+ "supports_system_messages": true,
31633
+ "supports_tool_choice": true,
31634
+ "supports_service_tier": true,
31635
+ "supports_vision": true
31636
+ },
31637
+ "gpt-5.2-2025-12-11": {
31638
+ "cache_read_input_token_cost": 1.75e-07,
31639
+ "cache_read_input_token_cost_priority": 3.5e-07,
31640
+ "input_cost_per_token": 1.75e-06,
31641
+ "input_cost_per_token_priority": 3.5e-06,
31642
+ "litellm_provider": "openai",
31643
+ "max_input_tokens": 400000,
31644
+ "max_output_tokens": 128000,
31645
+ "max_tokens": 128000,
31646
+ "mode": "chat",
31647
+ "output_cost_per_token": 1.4e-05,
31648
+ "output_cost_per_token_priority": 2.8e-05,
31649
+ "supported_endpoints": [
31650
+ "/v1/chat/completions",
31651
+ "/v1/batch",
31652
+ "/v1/responses"
31653
+ ],
31654
+ "supported_modalities": [
31655
+ "text",
31656
+ "image"
31657
+ ],
31658
+ "supported_output_modalities": [
31659
+ "text",
31660
+ "image"
31661
+ ],
31662
+ "supports_function_calling": true,
31663
+ "supports_native_streaming": true,
31664
+ "supports_parallel_function_calling": true,
31665
+ "supports_pdf_input": true,
31666
+ "supports_prompt_caching": true,
31667
+ "supports_reasoning": true,
31668
+ "supports_response_schema": true,
31669
+ "supports_system_messages": true,
31670
+ "supports_tool_choice": true,
31671
+ "supports_service_tier": true,
31672
+ "supports_vision": true
31673
+ },
31674
+ "gpt-5.2-chat-latest": {
31675
+ "cache_read_input_token_cost": 1.75e-07,
31676
+ "cache_read_input_token_cost_priority": 3.5e-07,
31677
+ "input_cost_per_token": 1.75e-06,
31678
+ "input_cost_per_token_priority": 3.5e-06,
31679
+ "litellm_provider": "openai",
31680
+ "max_input_tokens": 128000,
31681
+ "max_output_tokens": 16384,
31682
+ "max_tokens": 16384,
31683
+ "mode": "chat",
31684
+ "output_cost_per_token": 1.4e-05,
31685
+ "output_cost_per_token_priority": 2.8e-05,
31686
+ "supported_endpoints": [
31687
+ "/v1/chat/completions",
31688
+ "/v1/responses"
31689
+ ],
31690
+ "supported_modalities": [
31691
+ "text",
31692
+ "image"
31693
+ ],
31694
+ "supported_output_modalities": [
31695
+ "text"
31696
+ ],
31697
+ "supports_function_calling": true,
31698
+ "supports_native_streaming": true,
31699
+ "supports_parallel_function_calling": true,
31700
+ "supports_pdf_input": true,
31701
+ "supports_prompt_caching": true,
31702
+ "supports_reasoning": true,
31703
+ "supports_response_schema": true,
31704
+ "supports_system_messages": true,
31705
+ "supports_tool_choice": true,
31706
+ "supports_vision": true
31707
+ },
31708
+ "gpt-5.2-pro": {
31709
+ "input_cost_per_token": 2.1e-05,
31710
+ "litellm_provider": "openai",
31711
+ "max_input_tokens": 400000,
31712
+ "max_output_tokens": 128000,
31713
+ "max_tokens": 128000,
31714
+ "mode": "responses",
31715
+ "output_cost_per_token": 0.000168,
31716
+ "supported_endpoints": [
31717
+ "/v1/batch",
31718
+ "/v1/responses"
31719
+ ],
31720
+ "supported_modalities": [
31721
+ "text",
31722
+ "image"
31723
+ ],
31724
+ "supported_output_modalities": [
31725
+ "text"
31726
+ ],
31727
+ "supports_function_calling": true,
31728
+ "supports_native_streaming": true,
31729
+ "supports_parallel_function_calling": true,
31730
+ "supports_pdf_input": true,
31731
+ "supports_prompt_caching": true,
31732
+ "supports_reasoning": true,
31733
+ "supports_response_schema": true,
31734
+ "supports_system_messages": true,
31735
+ "supports_tool_choice": true,
31736
+ "supports_vision": true,
31737
+ "supports_web_search": true
31738
+ },
31739
+ "gpt-5.2-pro-2025-12-11": {
31740
+ "input_cost_per_token": 2.1e-05,
31741
+ "litellm_provider": "openai",
31742
+ "max_input_tokens": 400000,
31743
+ "max_output_tokens": 128000,
31744
+ "max_tokens": 128000,
31745
+ "mode": "responses",
31746
+ "output_cost_per_token": 0.000168,
31747
+ "supported_endpoints": [
31748
+ "/v1/batch",
31749
+ "/v1/responses"
31750
+ ],
31751
+ "supported_modalities": [
31752
+ "text",
31753
+ "image"
31754
+ ],
31755
+ "supported_output_modalities": [
31756
+ "text"
31757
+ ],
31758
+ "supports_function_calling": true,
31759
+ "supports_native_streaming": true,
31760
+ "supports_parallel_function_calling": true,
31761
+ "supports_pdf_input": true,
31762
+ "supports_prompt_caching": true,
31763
+ "supports_reasoning": true,
31764
+ "supports_response_schema": true,
31765
+ "supports_system_messages": true,
31766
+ "supports_tool_choice": true,
31767
+ "supports_vision": true,
31768
+ "supports_web_search": true
31769
+ },
31770
+ "mistral/codestral-2508": {
31771
+ "input_cost_per_token": 3e-07,
31772
+ "litellm_provider": "mistral",
31773
+ "max_input_tokens": 256000,
31774
+ "max_output_tokens": 256000,
31775
+ "max_tokens": 256000,
31776
+ "mode": "chat",
31777
+ "output_cost_per_token": 9e-07,
31778
+ "source": "https://mistral.ai/news/codestral-25-08",
31779
+ "supports_assistant_prefill": true,
31780
+ "supports_function_calling": true,
31781
+ "supports_response_schema": true,
31782
+ "supports_tool_choice": true
31783
+ },
31784
+ "mistral/labs-devstral-small-2512": {
31785
+ "input_cost_per_token": 1e-07,
31786
+ "litellm_provider": "mistral",
31787
+ "max_input_tokens": 256000,
31788
+ "max_output_tokens": 256000,
31789
+ "max_tokens": 256000,
31790
+ "mode": "chat",
31791
+ "output_cost_per_token": 3e-07,
31792
+ "source": "https://docs.mistral.ai/models/devstral-small-2-25-12",
31793
+ "supports_assistant_prefill": true,
31794
+ "supports_function_calling": true,
31795
+ "supports_response_schema": true,
31796
+ "supports_tool_choice": true
31797
+ },
31798
+ "mistral/devstral-2512": {
31799
+ "input_cost_per_token": 4e-07,
31800
+ "litellm_provider": "mistral",
31801
+ "max_input_tokens": 256000,
31802
+ "max_output_tokens": 256000,
31803
+ "max_tokens": 256000,
31804
+ "mode": "chat",
31805
+ "output_cost_per_token": 2e-06,
31806
+ "source": "https://mistral.ai/news/devstral-2-vibe-cli",
31807
+ "supports_assistant_prefill": true,
31808
+ "supports_function_calling": true,
31809
+ "supports_response_schema": true,
31810
+ "supports_tool_choice": true
31476
31811
  }
31477
31812
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tokencostauto
3
- Version: 0.1.391
3
+ Version: 0.1.395
4
4
  Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
5
5
  Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/madpin/tokencostaudo
File without changes