tokencostauto 0.1.108__tar.gz → 0.1.110__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tokencostauto
3
- Version: 0.1.108
3
+ Version: 0.1.110
4
4
  Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
5
5
  Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/madpin/tokencostaudo
@@ -11,7 +11,7 @@ tokencostauto = ["model_prices.json"]
11
11
  [project]
12
12
 
13
13
  name = "tokencostauto"
14
- version = "0.1.108"
14
+ version = "0.1.110"
15
15
 
16
16
  authors = [
17
17
  { name = "Trisha Pan", email = "trishaepan@gmail.com" },
@@ -15790,5 +15790,137 @@
15790
15790
  "supports_response_schema": true,
15791
15791
  "supports_reasoning": true,
15792
15792
  "supports_tool_choice": true
15793
+ },
15794
+ "o3-deep-research": {
15795
+ "max_tokens": 100000,
15796
+ "max_input_tokens": 200000,
15797
+ "max_output_tokens": 100000,
15798
+ "input_cost_per_token": 1e-05,
15799
+ "output_cost_per_token": 4e-05,
15800
+ "input_cost_per_token_batches": 5e-06,
15801
+ "output_cost_per_token_batches": 2e-05,
15802
+ "cache_read_input_token_cost": 2.5e-06,
15803
+ "litellm_provider": "openai",
15804
+ "mode": "responses",
15805
+ "supported_endpoints": [
15806
+ "/v1/chat/completions",
15807
+ "/v1/batch",
15808
+ "/v1/responses"
15809
+ ],
15810
+ "supported_modalities": [
15811
+ "text",
15812
+ "image"
15813
+ ],
15814
+ "supported_output_modalities": [
15815
+ "text"
15816
+ ],
15817
+ "supports_pdf_input": true,
15818
+ "supports_function_calling": true,
15819
+ "supports_parallel_function_calling": true,
15820
+ "supports_response_schema": true,
15821
+ "supports_vision": true,
15822
+ "supports_prompt_caching": true,
15823
+ "supports_system_messages": true,
15824
+ "supports_tool_choice": true,
15825
+ "supports_native_streaming": true
15826
+ },
15827
+ "o3-deep-research-2025-06-26": {
15828
+ "max_tokens": 100000,
15829
+ "max_input_tokens": 200000,
15830
+ "max_output_tokens": 100000,
15831
+ "input_cost_per_token": 1e-05,
15832
+ "output_cost_per_token": 4e-05,
15833
+ "input_cost_per_token_batches": 5e-06,
15834
+ "output_cost_per_token_batches": 2e-05,
15835
+ "cache_read_input_token_cost": 2.5e-06,
15836
+ "litellm_provider": "openai",
15837
+ "mode": "responses",
15838
+ "supported_endpoints": [
15839
+ "/v1/chat/completions",
15840
+ "/v1/batch",
15841
+ "/v1/responses"
15842
+ ],
15843
+ "supported_modalities": [
15844
+ "text",
15845
+ "image"
15846
+ ],
15847
+ "supported_output_modalities": [
15848
+ "text"
15849
+ ],
15850
+ "supports_pdf_input": true,
15851
+ "supports_function_calling": true,
15852
+ "supports_parallel_function_calling": true,
15853
+ "supports_response_schema": true,
15854
+ "supports_vision": true,
15855
+ "supports_prompt_caching": true,
15856
+ "supports_system_messages": true,
15857
+ "supports_tool_choice": true,
15858
+ "supports_native_streaming": true
15859
+ },
15860
+ "o4-mini-deep-research": {
15861
+ "max_tokens": 100000,
15862
+ "max_input_tokens": 200000,
15863
+ "max_output_tokens": 100000,
15864
+ "input_cost_per_token": 2e-06,
15865
+ "output_cost_per_token": 8e-06,
15866
+ "input_cost_per_token_batches": 1e-06,
15867
+ "output_cost_per_token_batches": 4e-06,
15868
+ "cache_read_input_token_cost": 5e-07,
15869
+ "litellm_provider": "openai",
15870
+ "mode": "responses",
15871
+ "supported_endpoints": [
15872
+ "/v1/chat/completions",
15873
+ "/v1/batch",
15874
+ "/v1/responses"
15875
+ ],
15876
+ "supported_modalities": [
15877
+ "text",
15878
+ "image"
15879
+ ],
15880
+ "supported_output_modalities": [
15881
+ "text"
15882
+ ],
15883
+ "supports_pdf_input": true,
15884
+ "supports_function_calling": true,
15885
+ "supports_parallel_function_calling": true,
15886
+ "supports_response_schema": true,
15887
+ "supports_vision": true,
15888
+ "supports_prompt_caching": true,
15889
+ "supports_system_messages": true,
15890
+ "supports_tool_choice": true,
15891
+ "supports_native_streaming": true
15892
+ },
15893
+ "o4-mini-deep-research-2025-06-26": {
15894
+ "max_tokens": 100000,
15895
+ "max_input_tokens": 200000,
15896
+ "max_output_tokens": 100000,
15897
+ "input_cost_per_token": 2e-06,
15898
+ "output_cost_per_token": 8e-06,
15899
+ "input_cost_per_token_batches": 1e-06,
15900
+ "output_cost_per_token_batches": 4e-06,
15901
+ "cache_read_input_token_cost": 5e-07,
15902
+ "litellm_provider": "openai",
15903
+ "mode": "responses",
15904
+ "supported_endpoints": [
15905
+ "/v1/chat/completions",
15906
+ "/v1/batch",
15907
+ "/v1/responses"
15908
+ ],
15909
+ "supported_modalities": [
15910
+ "text",
15911
+ "image"
15912
+ ],
15913
+ "supported_output_modalities": [
15914
+ "text"
15915
+ ],
15916
+ "supports_pdf_input": true,
15917
+ "supports_function_calling": true,
15918
+ "supports_parallel_function_calling": true,
15919
+ "supports_response_schema": true,
15920
+ "supports_vision": true,
15921
+ "supports_prompt_caching": true,
15922
+ "supports_system_messages": true,
15923
+ "supports_tool_choice": true,
15924
+ "supports_native_streaming": true
15793
15925
  }
15794
15926
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tokencostauto
3
- Version: 0.1.108
3
+ Version: 0.1.110
4
4
  Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
5
5
  Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/madpin/tokencostaudo
File without changes