tokencostauto 0.1.108__py3-none-any.whl → 0.1.112__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15790,5 +15790,198 @@
15790
15790
  "supports_response_schema": true,
15791
15791
  "supports_reasoning": true,
15792
15792
  "supports_tool_choice": true
15793
+ },
15794
+ "o3-deep-research": {
15795
+ "max_tokens": 100000,
15796
+ "max_input_tokens": 200000,
15797
+ "max_output_tokens": 100000,
15798
+ "input_cost_per_token": 1e-05,
15799
+ "output_cost_per_token": 4e-05,
15800
+ "input_cost_per_token_batches": 5e-06,
15801
+ "output_cost_per_token_batches": 2e-05,
15802
+ "cache_read_input_token_cost": 2.5e-06,
15803
+ "litellm_provider": "openai",
15804
+ "mode": "responses",
15805
+ "supported_endpoints": [
15806
+ "/v1/chat/completions",
15807
+ "/v1/batch",
15808
+ "/v1/responses"
15809
+ ],
15810
+ "supported_modalities": [
15811
+ "text",
15812
+ "image"
15813
+ ],
15814
+ "supported_output_modalities": [
15815
+ "text"
15816
+ ],
15817
+ "supports_pdf_input": true,
15818
+ "supports_function_calling": true,
15819
+ "supports_parallel_function_calling": true,
15820
+ "supports_response_schema": true,
15821
+ "supports_vision": true,
15822
+ "supports_prompt_caching": true,
15823
+ "supports_system_messages": true,
15824
+ "supports_tool_choice": true,
15825
+ "supports_native_streaming": true
15826
+ },
15827
+ "o3-deep-research-2025-06-26": {
15828
+ "max_tokens": 100000,
15829
+ "max_input_tokens": 200000,
15830
+ "max_output_tokens": 100000,
15831
+ "input_cost_per_token": 1e-05,
15832
+ "output_cost_per_token": 4e-05,
15833
+ "input_cost_per_token_batches": 5e-06,
15834
+ "output_cost_per_token_batches": 2e-05,
15835
+ "cache_read_input_token_cost": 2.5e-06,
15836
+ "litellm_provider": "openai",
15837
+ "mode": "responses",
15838
+ "supported_endpoints": [
15839
+ "/v1/chat/completions",
15840
+ "/v1/batch",
15841
+ "/v1/responses"
15842
+ ],
15843
+ "supported_modalities": [
15844
+ "text",
15845
+ "image"
15846
+ ],
15847
+ "supported_output_modalities": [
15848
+ "text"
15849
+ ],
15850
+ "supports_pdf_input": true,
15851
+ "supports_function_calling": true,
15852
+ "supports_parallel_function_calling": true,
15853
+ "supports_response_schema": true,
15854
+ "supports_vision": true,
15855
+ "supports_prompt_caching": true,
15856
+ "supports_system_messages": true,
15857
+ "supports_tool_choice": true,
15858
+ "supports_native_streaming": true
15859
+ },
15860
+ "o4-mini-deep-research": {
15861
+ "max_tokens": 100000,
15862
+ "max_input_tokens": 200000,
15863
+ "max_output_tokens": 100000,
15864
+ "input_cost_per_token": 2e-06,
15865
+ "output_cost_per_token": 8e-06,
15866
+ "input_cost_per_token_batches": 1e-06,
15867
+ "output_cost_per_token_batches": 4e-06,
15868
+ "cache_read_input_token_cost": 5e-07,
15869
+ "litellm_provider": "openai",
15870
+ "mode": "responses",
15871
+ "supported_endpoints": [
15872
+ "/v1/chat/completions",
15873
+ "/v1/batch",
15874
+ "/v1/responses"
15875
+ ],
15876
+ "supported_modalities": [
15877
+ "text",
15878
+ "image"
15879
+ ],
15880
+ "supported_output_modalities": [
15881
+ "text"
15882
+ ],
15883
+ "supports_pdf_input": true,
15884
+ "supports_function_calling": true,
15885
+ "supports_parallel_function_calling": true,
15886
+ "supports_response_schema": true,
15887
+ "supports_vision": true,
15888
+ "supports_prompt_caching": true,
15889
+ "supports_system_messages": true,
15890
+ "supports_tool_choice": true,
15891
+ "supports_native_streaming": true
15892
+ },
15893
+ "o4-mini-deep-research-2025-06-26": {
15894
+ "max_tokens": 100000,
15895
+ "max_input_tokens": 200000,
15896
+ "max_output_tokens": 100000,
15897
+ "input_cost_per_token": 2e-06,
15898
+ "output_cost_per_token": 8e-06,
15899
+ "input_cost_per_token_batches": 1e-06,
15900
+ "output_cost_per_token_batches": 4e-06,
15901
+ "cache_read_input_token_cost": 5e-07,
15902
+ "litellm_provider": "openai",
15903
+ "mode": "responses",
15904
+ "supported_endpoints": [
15905
+ "/v1/chat/completions",
15906
+ "/v1/batch",
15907
+ "/v1/responses"
15908
+ ],
15909
+ "supported_modalities": [
15910
+ "text",
15911
+ "image"
15912
+ ],
15913
+ "supported_output_modalities": [
15914
+ "text"
15915
+ ],
15916
+ "supports_pdf_input": true,
15917
+ "supports_function_calling": true,
15918
+ "supports_parallel_function_calling": true,
15919
+ "supports_response_schema": true,
15920
+ "supports_vision": true,
15921
+ "supports_prompt_caching": true,
15922
+ "supports_system_messages": true,
15923
+ "supports_tool_choice": true,
15924
+ "supports_native_streaming": true
15925
+ },
15926
+ "deepseek/deepseek-r1": {
15927
+ "max_tokens": 8192,
15928
+ "max_input_tokens": 65536,
15929
+ "max_output_tokens": 8192,
15930
+ "input_cost_per_token": 5.5e-07,
15931
+ "input_cost_per_token_cache_hit": 1.4e-07,
15932
+ "output_cost_per_token": 2.19e-06,
15933
+ "litellm_provider": "deepseek",
15934
+ "mode": "chat",
15935
+ "supports_function_calling": true,
15936
+ "supports_assistant_prefill": true,
15937
+ "supports_tool_choice": true,
15938
+ "supports_reasoning": true,
15939
+ "supports_prompt_caching": true
15940
+ },
15941
+ "deepseek/deepseek-v3": {
15942
+ "max_tokens": 8192,
15943
+ "max_input_tokens": 65536,
15944
+ "max_output_tokens": 8192,
15945
+ "input_cost_per_token": 2.7e-07,
15946
+ "input_cost_per_token_cache_hit": 7e-08,
15947
+ "cache_read_input_token_cost": 7e-08,
15948
+ "cache_creation_input_token_cost": 0.0,
15949
+ "output_cost_per_token": 1.1e-06,
15950
+ "litellm_provider": "deepseek",
15951
+ "mode": "chat",
15952
+ "supports_function_calling": true,
15953
+ "supports_assistant_prefill": true,
15954
+ "supports_tool_choice": true,
15955
+ "supports_prompt_caching": true
15956
+ },
15957
+ "elevenlabs/scribe_v1": {
15958
+ "mode": "audio_transcription",
15959
+ "input_cost_per_second": 6.11e-05,
15960
+ "output_cost_per_second": 0.0,
15961
+ "litellm_provider": "elevenlabs",
15962
+ "supported_endpoints": [
15963
+ "/v1/audio/transcriptions"
15964
+ ],
15965
+ "source": "https://elevenlabs.io/pricing",
15966
+ "metadata": {
15967
+ "original_pricing_per_hour": 0.22,
15968
+ "calculation": "$0.22/hour = $0.00366/minute = $0.0000611 per second (enterprise pricing)",
15969
+ "notes": "ElevenLabs Scribe v1 - state-of-the-art speech recognition model with 99 language support"
15970
+ }
15971
+ },
15972
+ "elevenlabs/scribe_v1_experimental": {
15973
+ "mode": "audio_transcription",
15974
+ "input_cost_per_second": 6.11e-05,
15975
+ "output_cost_per_second": 0.0,
15976
+ "litellm_provider": "elevenlabs",
15977
+ "supported_endpoints": [
15978
+ "/v1/audio/transcriptions"
15979
+ ],
15980
+ "source": "https://elevenlabs.io/pricing",
15981
+ "metadata": {
15982
+ "original_pricing_per_hour": 0.22,
15983
+ "calculation": "$0.22/hour = $0.00366/minute = $0.0000611 per second (enterprise pricing)",
15984
+ "notes": "ElevenLabs Scribe v1 experimental - enhanced version of the main Scribe model"
15985
+ }
15793
15986
  }
15794
15987
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tokencostauto
3
- Version: 0.1.108
3
+ Version: 0.1.112
4
4
  Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
5
5
  Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/madpin/tokencostaudo
@@ -0,0 +1,9 @@
1
+ tokencostauto/__init__.py,sha256=-4d_ryFH62SgNXPXA8vGPFZoAKtOBjnsg37EB_RkZG8,289
2
+ tokencostauto/constants.py,sha256=_82MlTkTrdrwzyRosQD7d3JdgNP9KAUM-cZo8DE00P0,3395
3
+ tokencostauto/costs.py,sha256=tXsgrTypq-dCHaHtoXcg2XepezWsAvZpl9gEsv_53iE,10679
4
+ tokencostauto/model_prices.json,sha256=WBJ2cMeMptxY5JsuNY9HHHiTgME5GZSo9JoE_JGBITA,570069
5
+ tokencostauto-0.1.112.dist-info/licenses/LICENSE,sha256=4PLv_CD6Ughnsvg_nM2XeTqGwVK6lQVR77kVWbPq-0U,1065
6
+ tokencostauto-0.1.112.dist-info/METADATA,sha256=vcYDGTz4CNao8LMfsEYoSjp3gXBj9DHeWj825BbF0I4,204076
7
+ tokencostauto-0.1.112.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
8
+ tokencostauto-0.1.112.dist-info/top_level.txt,sha256=szZQTUJRotfIaeZCDsOgvofIkLt2ak88RP13oI51-TU,14
9
+ tokencostauto-0.1.112.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- tokencostauto/__init__.py,sha256=-4d_ryFH62SgNXPXA8vGPFZoAKtOBjnsg37EB_RkZG8,289
2
- tokencostauto/constants.py,sha256=_82MlTkTrdrwzyRosQD7d3JdgNP9KAUM-cZo8DE00P0,3395
3
- tokencostauto/costs.py,sha256=tXsgrTypq-dCHaHtoXcg2XepezWsAvZpl9gEsv_53iE,10679
4
- tokencostauto/model_prices.json,sha256=ZVkyXyiI0fwxn19g1AZ55tAHTviVN3BAQpsjAW3Lqbw,563323
5
- tokencostauto-0.1.108.dist-info/licenses/LICENSE,sha256=4PLv_CD6Ughnsvg_nM2XeTqGwVK6lQVR77kVWbPq-0U,1065
6
- tokencostauto-0.1.108.dist-info/METADATA,sha256=gkrIvS7uxM-37fsr34OFOPdXT3epIrxGu7Vb2gutoZ8,204076
7
- tokencostauto-0.1.108.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
8
- tokencostauto-0.1.108.dist-info/top_level.txt,sha256=szZQTUJRotfIaeZCDsOgvofIkLt2ak88RP13oI51-TU,14
9
- tokencostauto-0.1.108.dist-info/RECORD,,