tokencostauto 0.1.327__py3-none-any.whl → 0.1.335__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15602,7 +15602,7 @@
15602
15602
  "tool_use_system_prompt_tokens": 159
15603
15603
  },
15604
15604
  "gemini/gemini-2.5-flash": {
15605
- "cache_read_input_token_cost": 7.5e-08,
15605
+ "cache_read_input_token_cost": 3e-08,
15606
15606
  "input_cost_per_audio_token": 1e-06,
15607
15607
  "input_cost_per_token": 3e-07,
15608
15608
  "litellm_provider": "gemini",
@@ -16267,7 +16267,6 @@
16267
16267
  "output_cost_per_token": 1.5e-05,
16268
16268
  "source": "https://docs.x.ai/docs/models",
16269
16269
  "supports_function_calling": true,
16270
- "supports_reasoning": true,
16271
16270
  "supports_tool_choice": true,
16272
16271
  "supports_web_search": true
16273
16272
  },
@@ -16283,7 +16282,6 @@
16283
16282
  "output_cost_per_token_above_128k_tokens": 3e-05,
16284
16283
  "source": "https://docs.x.ai/docs/models",
16285
16284
  "supports_function_calling": true,
16286
- "supports_reasoning": true,
16287
16285
  "supports_tool_choice": true,
16288
16286
  "supports_web_search": true
16289
16287
  },
@@ -16299,7 +16297,6 @@
16299
16297
  "output_cost_per_token_above_128k_tokens": 3e-05,
16300
16298
  "source": "https://docs.x.ai/docs/models",
16301
16299
  "supports_function_calling": true,
16302
- "supports_reasoning": true,
16303
16300
  "supports_tool_choice": true,
16304
16301
  "supports_web_search": true
16305
16302
  },
@@ -22779,7 +22776,6 @@
22779
22776
  "cache_read_input_token_cost": 5e-08,
22780
22777
  "source": "https://docs.x.ai/docs/models",
22781
22778
  "supports_function_calling": true,
22782
- "supports_reasoning": true,
22783
22779
  "supports_tool_choice": true,
22784
22780
  "supports_web_search": true
22785
22781
  },
@@ -23635,7 +23631,6 @@
23635
23631
  "output_cost_per_token": 2.75e-05,
23636
23632
  "source": "https://azure.microsoft.com/en-us/blog/grok-4-is-now-available-in-azure-ai-foundry-unlock-frontier-intelligence-and-business-ready-capabilities/",
23637
23633
  "supports_function_calling": true,
23638
- "supports_reasoning": true,
23639
23634
  "supports_response_schema": true,
23640
23635
  "supports_tool_choice": true,
23641
23636
  "supports_web_search": true
@@ -23663,7 +23658,6 @@
23663
23658
  "mode": "chat",
23664
23659
  "source": "https://techcommunity.microsoft.com/blog/azure-ai-foundry-blog/announcing-the-grok-4-fast-models-from-xai-now-available-in-azure-ai-foundry/4456701",
23665
23660
  "supports_function_calling": true,
23666
- "supports_reasoning": true,
23667
23661
  "supports_response_schema": true,
23668
23662
  "supports_tool_choice": true,
23669
23663
  "supports_web_search": true
@@ -24883,19 +24877,21 @@
24883
24877
  "supports_vision": false
24884
24878
  },
24885
24879
  "global.anthropic.claude-haiku-4-5-20251001-v1:0": {
24886
- "cache_creation_input_token_cost": 1.25e-06,
24887
- "cache_read_input_token_cost": 1e-07,
24888
- "input_cost_per_token": 1e-06,
24880
+ "cache_creation_input_token_cost": 1.375e-06,
24881
+ "cache_read_input_token_cost": 1.1e-07,
24882
+ "input_cost_per_token": 1.1e-06,
24889
24883
  "litellm_provider": "bedrock_converse",
24890
24884
  "max_input_tokens": 200000,
24891
24885
  "max_output_tokens": 8192,
24892
24886
  "max_tokens": 8192,
24893
24887
  "mode": "chat",
24894
- "output_cost_per_token": 5e-06,
24888
+ "output_cost_per_token": 5.5e-06,
24889
+ "source": "https://aws.amazon.com/about-aws/whats-new/2025/10/claude-4-5-haiku-anthropic-amazon-bedrock",
24895
24890
  "supports_assistant_prefill": true,
24896
24891
  "supports_function_calling": true,
24897
24892
  "supports_pdf_input": true,
24898
24893
  "supports_prompt_caching": true,
24894
+ "supports_reasoning": true,
24899
24895
  "supports_response_schema": true,
24900
24896
  "supports_tool_choice": true,
24901
24897
  "supports_vision": true,
@@ -25361,12 +25357,12 @@
25361
25357
  "openai/container": {
25362
25358
  "code_interpreter_cost_per_session": 0.03,
25363
25359
  "litellm_provider": "openai",
25364
- "mode": "container"
25360
+ "mode": "chat"
25365
25361
  },
25366
25362
  "azure/container": {
25367
25363
  "code_interpreter_cost_per_session": 0.03,
25368
25364
  "litellm_provider": "azure",
25369
- "mode": "container"
25365
+ "mode": "chat"
25370
25366
  },
25371
25367
  "vertex_ai/mistral-ocr-2505": {
25372
25368
  "litellm_provider": "vertex_ai",
@@ -25466,6 +25462,7 @@
25466
25462
  "mode": "chat",
25467
25463
  "output_cost_per_audio_token": 1.2e-05,
25468
25464
  "output_cost_per_token": 2e-06,
25465
+ "rpm": 100000,
25469
25466
  "source": "https://ai.google.dev/gemini-api/docs/pricing",
25470
25467
  "supported_endpoints": [
25471
25468
  "/v1/chat/completions",
@@ -25492,6 +25489,291 @@
25492
25489
  "supports_tool_choice": true,
25493
25490
  "supports_url_context": true,
25494
25491
  "supports_vision": true,
25495
- "supports_web_search": true
25492
+ "supports_web_search": true,
25493
+ "tpm": 8000000
25494
+ },
25495
+ "azure/gpt-image-1-mini": {
25496
+ "input_cost_per_pixel": 8.0566406e-09,
25497
+ "litellm_provider": "azure",
25498
+ "mode": "image_generation",
25499
+ "output_cost_per_pixel": 0.0,
25500
+ "supported_endpoints": [
25501
+ "/v1/images/generations"
25502
+ ]
25503
+ },
25504
+ "azure/low/1024-x-1024/gpt-image-1-mini": {
25505
+ "input_cost_per_pixel": 2.0751953125e-09,
25506
+ "litellm_provider": "azure",
25507
+ "mode": "image_generation",
25508
+ "output_cost_per_pixel": 0.0,
25509
+ "supported_endpoints": [
25510
+ "/v1/images/generations"
25511
+ ]
25512
+ },
25513
+ "azure/low/1024-x-1536/gpt-image-1-mini": {
25514
+ "input_cost_per_pixel": 2.0751953125e-09,
25515
+ "litellm_provider": "azure",
25516
+ "mode": "image_generation",
25517
+ "output_cost_per_pixel": 0.0,
25518
+ "supported_endpoints": [
25519
+ "/v1/images/generations"
25520
+ ]
25521
+ },
25522
+ "azure/low/1536-x-1024/gpt-image-1-mini": {
25523
+ "input_cost_per_pixel": 2.0345052083e-09,
25524
+ "litellm_provider": "azure",
25525
+ "mode": "image_generation",
25526
+ "output_cost_per_pixel": 0.0,
25527
+ "supported_endpoints": [
25528
+ "/v1/images/generations"
25529
+ ]
25530
+ },
25531
+ "azure/medium/1024-x-1024/gpt-image-1-mini": {
25532
+ "input_cost_per_pixel": 8.056640625e-09,
25533
+ "litellm_provider": "azure",
25534
+ "mode": "image_generation",
25535
+ "output_cost_per_pixel": 0.0,
25536
+ "supported_endpoints": [
25537
+ "/v1/images/generations"
25538
+ ]
25539
+ },
25540
+ "azure/medium/1024-x-1536/gpt-image-1-mini": {
25541
+ "input_cost_per_pixel": 8.056640625e-09,
25542
+ "litellm_provider": "azure",
25543
+ "mode": "image_generation",
25544
+ "output_cost_per_pixel": 0.0,
25545
+ "supported_endpoints": [
25546
+ "/v1/images/generations"
25547
+ ]
25548
+ },
25549
+ "azure/medium/1536-x-1024/gpt-image-1-mini": {
25550
+ "input_cost_per_pixel": 7.9752604167e-09,
25551
+ "litellm_provider": "azure",
25552
+ "mode": "image_generation",
25553
+ "output_cost_per_pixel": 0.0,
25554
+ "supported_endpoints": [
25555
+ "/v1/images/generations"
25556
+ ]
25557
+ },
25558
+ "azure/high/1024-x-1024/gpt-image-1-mini": {
25559
+ "input_cost_per_pixel": 3.173828125e-08,
25560
+ "litellm_provider": "azure",
25561
+ "mode": "image_generation",
25562
+ "output_cost_per_pixel": 0.0,
25563
+ "supported_endpoints": [
25564
+ "/v1/images/generations"
25565
+ ]
25566
+ },
25567
+ "azure/high/1024-x-1536/gpt-image-1-mini": {
25568
+ "input_cost_per_pixel": 3.173828125e-08,
25569
+ "litellm_provider": "azure",
25570
+ "mode": "image_generation",
25571
+ "output_cost_per_pixel": 0.0,
25572
+ "supported_endpoints": [
25573
+ "/v1/images/generations"
25574
+ ]
25575
+ },
25576
+ "azure/high/1536-x-1024/gpt-image-1-mini": {
25577
+ "input_cost_per_pixel": 3.1575520833e-08,
25578
+ "litellm_provider": "azure",
25579
+ "mode": "image_generation",
25580
+ "output_cost_per_pixel": 0.0,
25581
+ "supported_endpoints": [
25582
+ "/v1/images/generations"
25583
+ ]
25584
+ },
25585
+ "firecrawl/search": {
25586
+ "litellm_provider": "firecrawl",
25587
+ "mode": "search",
25588
+ "tiered_pricing": [
25589
+ {
25590
+ "input_cost_per_query": 0.00166,
25591
+ "max_results_range": [
25592
+ 1,
25593
+ 10
25594
+ ]
25595
+ },
25596
+ {
25597
+ "input_cost_per_query": 0.00332,
25598
+ "max_results_range": [
25599
+ 11,
25600
+ 20
25601
+ ]
25602
+ },
25603
+ {
25604
+ "input_cost_per_query": 0.00498,
25605
+ "max_results_range": [
25606
+ 21,
25607
+ 30
25608
+ ]
25609
+ },
25610
+ {
25611
+ "input_cost_per_query": 0.00664,
25612
+ "max_results_range": [
25613
+ 31,
25614
+ 40
25615
+ ]
25616
+ },
25617
+ {
25618
+ "input_cost_per_query": 0.0083,
25619
+ "max_results_range": [
25620
+ 41,
25621
+ 50
25622
+ ]
25623
+ },
25624
+ {
25625
+ "input_cost_per_query": 0.00996,
25626
+ "max_results_range": [
25627
+ 51,
25628
+ 60
25629
+ ]
25630
+ },
25631
+ {
25632
+ "input_cost_per_query": 0.01162,
25633
+ "max_results_range": [
25634
+ 61,
25635
+ 70
25636
+ ]
25637
+ },
25638
+ {
25639
+ "input_cost_per_query": 0.01328,
25640
+ "max_results_range": [
25641
+ 71,
25642
+ 80
25643
+ ]
25644
+ },
25645
+ {
25646
+ "input_cost_per_query": 0.01494,
25647
+ "max_results_range": [
25648
+ 81,
25649
+ 90
25650
+ ]
25651
+ },
25652
+ {
25653
+ "input_cost_per_query": 0.0166,
25654
+ "max_results_range": [
25655
+ 91,
25656
+ 100
25657
+ ]
25658
+ }
25659
+ ],
25660
+ "metadata": {
25661
+ "notes": "Firecrawl search pricing: $83 for 100,000 credits, 2 credits per 10 results. Cost = ceiling(limit/10) * 2 * $0.00083"
25662
+ }
25663
+ },
25664
+ "searxng/search": {
25665
+ "litellm_provider": "searxng",
25666
+ "mode": "search",
25667
+ "input_cost_per_query": 0.0,
25668
+ "metadata": {
25669
+ "notes": "SearXNG is an open-source metasearch engine. Free to use when self-hosted or using public instances."
25670
+ }
25671
+ },
25672
+ "azure/gpt-5-pro": {
25673
+ "input_cost_per_token": 1.5e-05,
25674
+ "litellm_provider": "azure",
25675
+ "max_input_tokens": 272000,
25676
+ "max_output_tokens": 128000,
25677
+ "max_tokens": 400000,
25678
+ "mode": "responses",
25679
+ "output_cost_per_token": 0.00012,
25680
+ "source": "https://learn.microsoft.com/en-us/azure/ai-foundry/foundry-models/concepts/models-sold-directly-by-azure?pivots=azure-openai&tabs=global-standard-aoai%2Cstandard-chat-completions%2Cglobal-standard#gpt-5",
25681
+ "supported_endpoints": [
25682
+ "/v1/responses"
25683
+ ],
25684
+ "supported_modalities": [
25685
+ "text",
25686
+ "image"
25687
+ ],
25688
+ "supported_output_modalities": [
25689
+ "text"
25690
+ ],
25691
+ "supports_function_calling": true,
25692
+ "supports_parallel_function_calling": true,
25693
+ "supports_pdf_input": true,
25694
+ "supports_prompt_caching": true,
25695
+ "supports_reasoning": true,
25696
+ "supports_response_schema": true,
25697
+ "supports_system_messages": true,
25698
+ "supports_tool_choice": true,
25699
+ "supports_vision": true
25700
+ },
25701
+ "vertex_ai/minimaxai/minimax-m2-maas": {
25702
+ "input_cost_per_token": 3e-07,
25703
+ "litellm_provider": "vertex_ai-minimax_models",
25704
+ "max_input_tokens": 196608,
25705
+ "max_output_tokens": 196608,
25706
+ "max_tokens": 196608,
25707
+ "mode": "chat",
25708
+ "output_cost_per_token": 1.2e-06,
25709
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models",
25710
+ "supports_function_calling": true,
25711
+ "supports_tool_choice": true
25712
+ },
25713
+ "cohere/embed-v4.0": {
25714
+ "input_cost_per_token": 1.2e-07,
25715
+ "litellm_provider": "cohere",
25716
+ "max_input_tokens": 128000,
25717
+ "max_tokens": 128000,
25718
+ "mode": "embedding",
25719
+ "output_cost_per_token": 0.0,
25720
+ "output_vector_size": 1536,
25721
+ "supports_embedding_image_input": true
25722
+ },
25723
+ "gemini/veo-3.1-fast-generate-preview": {
25724
+ "litellm_provider": "gemini",
25725
+ "max_input_tokens": 1024,
25726
+ "max_tokens": 1024,
25727
+ "mode": "video_generation",
25728
+ "output_cost_per_second": 0.15,
25729
+ "source": "https://ai.google.dev/gemini-api/docs/video",
25730
+ "supported_modalities": [
25731
+ "text"
25732
+ ],
25733
+ "supported_output_modalities": [
25734
+ "video"
25735
+ ]
25736
+ },
25737
+ "gemini/veo-3.1-generate-preview": {
25738
+ "litellm_provider": "gemini",
25739
+ "max_input_tokens": 1024,
25740
+ "max_tokens": 1024,
25741
+ "mode": "video_generation",
25742
+ "output_cost_per_second": 0.4,
25743
+ "source": "https://ai.google.dev/gemini-api/docs/video",
25744
+ "supported_modalities": [
25745
+ "text"
25746
+ ],
25747
+ "supported_output_modalities": [
25748
+ "video"
25749
+ ]
25750
+ },
25751
+ "vertex_ai/veo-3.1-generate-preview": {
25752
+ "litellm_provider": "vertex_ai-video-models",
25753
+ "max_input_tokens": 1024,
25754
+ "max_tokens": 1024,
25755
+ "mode": "video_generation",
25756
+ "output_cost_per_second": 0.4,
25757
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/veo",
25758
+ "supported_modalities": [
25759
+ "text"
25760
+ ],
25761
+ "supported_output_modalities": [
25762
+ "video"
25763
+ ]
25764
+ },
25765
+ "vertex_ai/veo-3.1-fast-generate-preview": {
25766
+ "litellm_provider": "vertex_ai-video-models",
25767
+ "max_input_tokens": 1024,
25768
+ "max_tokens": 1024,
25769
+ "mode": "video_generation",
25770
+ "output_cost_per_second": 0.15,
25771
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/veo",
25772
+ "supported_modalities": [
25773
+ "text"
25774
+ ],
25775
+ "supported_output_modalities": [
25776
+ "video"
25777
+ ]
25496
25778
  }
25497
25779
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tokencostauto
3
- Version: 0.1.327
3
+ Version: 0.1.335
4
4
  Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
5
5
  Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/madpin/tokencostaudo
@@ -0,0 +1,9 @@
1
+ tokencostauto/__init__.py,sha256=-4d_ryFH62SgNXPXA8vGPFZoAKtOBjnsg37EB_RkZG8,289
2
+ tokencostauto/constants.py,sha256=_82MlTkTrdrwzyRosQD7d3JdgNP9KAUM-cZo8DE00P0,3395
3
+ tokencostauto/costs.py,sha256=tXsgrTypq-dCHaHtoXcg2XepezWsAvZpl9gEsv_53iE,10679
4
+ tokencostauto/model_prices.json,sha256=91qlOz1j-wg-HV61DEUFyQSa5I-sIN7pCV2JoiKlIaE,918499
5
+ tokencostauto-0.1.335.dist-info/licenses/LICENSE,sha256=4PLv_CD6Ughnsvg_nM2XeTqGwVK6lQVR77kVWbPq-0U,1065
6
+ tokencostauto-0.1.335.dist-info/METADATA,sha256=5zSKa9mGTYTaojbxGj1dyqyirVJNCaDkiH6RtcF2QKE,204076
7
+ tokencostauto-0.1.335.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
8
+ tokencostauto-0.1.335.dist-info/top_level.txt,sha256=szZQTUJRotfIaeZCDsOgvofIkLt2ak88RP13oI51-TU,14
9
+ tokencostauto-0.1.335.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- tokencostauto/__init__.py,sha256=-4d_ryFH62SgNXPXA8vGPFZoAKtOBjnsg37EB_RkZG8,289
2
- tokencostauto/constants.py,sha256=_82MlTkTrdrwzyRosQD7d3JdgNP9KAUM-cZo8DE00P0,3395
3
- tokencostauto/costs.py,sha256=tXsgrTypq-dCHaHtoXcg2XepezWsAvZpl9gEsv_53iE,10679
4
- tokencostauto/model_prices.json,sha256=jzSQ-Uq1HzF2VNK0tdcOjG_Xtkf-n_3f7wZbz3e1EA8,909439
5
- tokencostauto-0.1.327.dist-info/licenses/LICENSE,sha256=4PLv_CD6Ughnsvg_nM2XeTqGwVK6lQVR77kVWbPq-0U,1065
6
- tokencostauto-0.1.327.dist-info/METADATA,sha256=WQQE8kkw2XXhkCrjtSwkAgGwdqNEz8JTIi52K2j8Hdg,204076
7
- tokencostauto-0.1.327.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
8
- tokencostauto-0.1.327.dist-info/top_level.txt,sha256=szZQTUJRotfIaeZCDsOgvofIkLt2ak88RP13oI51-TU,14
9
- tokencostauto-0.1.327.dist-info/RECORD,,