tokencostauto 0.1.342__py3-none-any.whl → 0.1.344__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9266,6 +9266,7 @@
9266
9266
  "supports_function_calling": false,
9267
9267
  "supports_parallel_function_calling": true,
9268
9268
  "supports_prompt_caching": true,
9269
+ "supports_reasoning": true,
9269
9270
  "supports_response_schema": false,
9270
9271
  "supports_system_messages": true,
9271
9272
  "supports_tool_choice": true,
@@ -10442,6 +10443,7 @@
10442
10443
  "supports_audio_output": true,
10443
10444
  "supports_function_calling": true,
10444
10445
  "supports_prompt_caching": true,
10446
+ "supports_reasoning": true,
10445
10447
  "supports_response_schema": true,
10446
10448
  "supports_system_messages": true,
10447
10449
  "supports_tool_choice": true,
@@ -24690,6 +24692,7 @@
24690
24692
  "litellm_provider": "vertex_ai-language-models",
24691
24693
  "max_audio_length_hours": 8.4,
24692
24694
  "max_audio_per_prompt": 1,
24695
+ "supports_reasoning": false,
24693
24696
  "max_images_per_prompt": 3000,
24694
24697
  "max_input_tokens": 32768,
24695
24698
  "max_output_tokens": 32768,
@@ -25951,5 +25954,178 @@
25951
25954
  "1920x1080"
25952
25955
  ],
25953
25956
  "comment": "2 credits per image (any resolution) @ $0.01 per credit = $0.02 per image"
25957
+ },
25958
+ "gpt-5.1": {
25959
+ "cache_read_input_token_cost": 1.25e-07,
25960
+ "cache_read_input_token_cost_priority": 2.5e-07,
25961
+ "input_cost_per_token": 1.25e-06,
25962
+ "input_cost_per_token_priority": 2.5e-06,
25963
+ "litellm_provider": "openai",
25964
+ "max_input_tokens": 272000,
25965
+ "max_output_tokens": 128000,
25966
+ "max_tokens": 128000,
25967
+ "mode": "chat",
25968
+ "output_cost_per_token": 1e-05,
25969
+ "output_cost_per_token_priority": 2e-05,
25970
+ "supported_endpoints": [
25971
+ "/v1/chat/completions",
25972
+ "/v1/responses"
25973
+ ],
25974
+ "supported_modalities": [
25975
+ "text",
25976
+ "image"
25977
+ ],
25978
+ "supported_output_modalities": [
25979
+ "text",
25980
+ "image"
25981
+ ],
25982
+ "supports_function_calling": true,
25983
+ "supports_native_streaming": true,
25984
+ "supports_parallel_function_calling": true,
25985
+ "supports_pdf_input": true,
25986
+ "supports_prompt_caching": true,
25987
+ "supports_reasoning": true,
25988
+ "supports_response_schema": true,
25989
+ "supports_system_messages": true,
25990
+ "supports_tool_choice": true,
25991
+ "supports_service_tier": true,
25992
+ "supports_vision": true
25993
+ },
25994
+ "gpt-5.1-2025-11-13": {
25995
+ "cache_read_input_token_cost": 1.25e-07,
25996
+ "cache_read_input_token_cost_priority": 2.5e-07,
25997
+ "input_cost_per_token": 1.25e-06,
25998
+ "input_cost_per_token_priority": 2.5e-06,
25999
+ "litellm_provider": "openai",
26000
+ "max_input_tokens": 272000,
26001
+ "max_output_tokens": 128000,
26002
+ "max_tokens": 128000,
26003
+ "mode": "chat",
26004
+ "output_cost_per_token": 1e-05,
26005
+ "output_cost_per_token_priority": 2e-05,
26006
+ "supported_endpoints": [
26007
+ "/v1/chat/completions",
26008
+ "/v1/responses"
26009
+ ],
26010
+ "supported_modalities": [
26011
+ "text",
26012
+ "image"
26013
+ ],
26014
+ "supported_output_modalities": [
26015
+ "text",
26016
+ "image"
26017
+ ],
26018
+ "supports_function_calling": true,
26019
+ "supports_native_streaming": true,
26020
+ "supports_parallel_function_calling": true,
26021
+ "supports_pdf_input": true,
26022
+ "supports_prompt_caching": true,
26023
+ "supports_reasoning": true,
26024
+ "supports_response_schema": true,
26025
+ "supports_system_messages": true,
26026
+ "supports_tool_choice": true,
26027
+ "supports_service_tier": true,
26028
+ "supports_vision": true
26029
+ },
26030
+ "gpt-5.1-chat-latest": {
26031
+ "cache_read_input_token_cost": 1.25e-07,
26032
+ "cache_read_input_token_cost_priority": 2.5e-07,
26033
+ "input_cost_per_token": 1.25e-06,
26034
+ "input_cost_per_token_priority": 2.5e-06,
26035
+ "litellm_provider": "openai",
26036
+ "max_input_tokens": 128000,
26037
+ "max_output_tokens": 16384,
26038
+ "max_tokens": 16384,
26039
+ "mode": "chat",
26040
+ "output_cost_per_token": 1e-05,
26041
+ "output_cost_per_token_priority": 2e-05,
26042
+ "supported_endpoints": [
26043
+ "/v1/chat/completions",
26044
+ "/v1/responses"
26045
+ ],
26046
+ "supported_modalities": [
26047
+ "text",
26048
+ "image"
26049
+ ],
26050
+ "supported_output_modalities": [
26051
+ "text",
26052
+ "image"
26053
+ ],
26054
+ "supports_function_calling": false,
26055
+ "supports_native_streaming": true,
26056
+ "supports_parallel_function_calling": false,
26057
+ "supports_pdf_input": true,
26058
+ "supports_prompt_caching": true,
26059
+ "supports_reasoning": true,
26060
+ "supports_response_schema": true,
26061
+ "supports_system_messages": true,
26062
+ "supports_tool_choice": false,
26063
+ "supports_vision": true
26064
+ },
26065
+ "gpt-5.1-codex": {
26066
+ "cache_read_input_token_cost": 1.25e-07,
26067
+ "cache_read_input_token_cost_priority": 2.5e-07,
26068
+ "input_cost_per_token": 1.25e-06,
26069
+ "input_cost_per_token_priority": 2.5e-06,
26070
+ "litellm_provider": "openai",
26071
+ "max_input_tokens": 272000,
26072
+ "max_output_tokens": 128000,
26073
+ "max_tokens": 128000,
26074
+ "mode": "responses",
26075
+ "output_cost_per_token": 1e-05,
26076
+ "output_cost_per_token_priority": 2e-05,
26077
+ "supported_endpoints": [
26078
+ "/v1/responses"
26079
+ ],
26080
+ "supported_modalities": [
26081
+ "text",
26082
+ "image"
26083
+ ],
26084
+ "supported_output_modalities": [
26085
+ "text"
26086
+ ],
26087
+ "supports_function_calling": true,
26088
+ "supports_native_streaming": true,
26089
+ "supports_parallel_function_calling": true,
26090
+ "supports_pdf_input": true,
26091
+ "supports_prompt_caching": true,
26092
+ "supports_reasoning": true,
26093
+ "supports_response_schema": true,
26094
+ "supports_system_messages": false,
26095
+ "supports_tool_choice": true,
26096
+ "supports_vision": true
26097
+ },
26098
+ "gpt-5.1-codex-mini": {
26099
+ "cache_read_input_token_cost": 2.5e-08,
26100
+ "cache_read_input_token_cost_priority": 4.5e-08,
26101
+ "input_cost_per_token": 2.5e-07,
26102
+ "input_cost_per_token_priority": 4.5e-07,
26103
+ "litellm_provider": "openai",
26104
+ "max_input_tokens": 272000,
26105
+ "max_output_tokens": 128000,
26106
+ "max_tokens": 128000,
26107
+ "mode": "responses",
26108
+ "output_cost_per_token": 2e-06,
26109
+ "output_cost_per_token_priority": 3.6e-06,
26110
+ "supported_endpoints": [
26111
+ "/v1/responses"
26112
+ ],
26113
+ "supported_modalities": [
26114
+ "text",
26115
+ "image"
26116
+ ],
26117
+ "supported_output_modalities": [
26118
+ "text"
26119
+ ],
26120
+ "supports_function_calling": true,
26121
+ "supports_native_streaming": true,
26122
+ "supports_parallel_function_calling": true,
26123
+ "supports_pdf_input": true,
26124
+ "supports_prompt_caching": true,
26125
+ "supports_reasoning": true,
26126
+ "supports_response_schema": true,
26127
+ "supports_system_messages": false,
26128
+ "supports_tool_choice": true,
26129
+ "supports_vision": true
25954
26130
  }
25955
26131
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tokencostauto
3
- Version: 0.1.342
3
+ Version: 0.1.344
4
4
  Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
5
5
  Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/madpin/tokencostaudo
@@ -0,0 +1,9 @@
1
+ tokencostauto/__init__.py,sha256=-4d_ryFH62SgNXPXA8vGPFZoAKtOBjnsg37EB_RkZG8,289
2
+ tokencostauto/constants.py,sha256=_82MlTkTrdrwzyRosQD7d3JdgNP9KAUM-cZo8DE00P0,3395
3
+ tokencostauto/costs.py,sha256=tXsgrTypq-dCHaHtoXcg2XepezWsAvZpl9gEsv_53iE,10679
4
+ tokencostauto/model_prices.json,sha256=7sWmlUw9Ak-UxJDdoA4kdL8cXjYl6_lJLK75bFqOTlQ,930467
5
+ tokencostauto-0.1.344.dist-info/licenses/LICENSE,sha256=4PLv_CD6Ughnsvg_nM2XeTqGwVK6lQVR77kVWbPq-0U,1065
6
+ tokencostauto-0.1.344.dist-info/METADATA,sha256=suIeuXfW1Pmgo_4zM5MUKWFWfU8YzBO21Mso3qTxdzA,204076
7
+ tokencostauto-0.1.344.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
8
+ tokencostauto-0.1.344.dist-info/top_level.txt,sha256=szZQTUJRotfIaeZCDsOgvofIkLt2ak88RP13oI51-TU,14
9
+ tokencostauto-0.1.344.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- tokencostauto/__init__.py,sha256=-4d_ryFH62SgNXPXA8vGPFZoAKtOBjnsg37EB_RkZG8,289
2
- tokencostauto/constants.py,sha256=_82MlTkTrdrwzyRosQD7d3JdgNP9KAUM-cZo8DE00P0,3395
3
- tokencostauto/costs.py,sha256=tXsgrTypq-dCHaHtoXcg2XepezWsAvZpl9gEsv_53iE,10679
4
- tokencostauto/model_prices.json,sha256=NL71BolPmAxHcm1401fS_A4YKIMkf8ql3ViW3oP0S-A,924511
5
- tokencostauto-0.1.342.dist-info/licenses/LICENSE,sha256=4PLv_CD6Ughnsvg_nM2XeTqGwVK6lQVR77kVWbPq-0U,1065
6
- tokencostauto-0.1.342.dist-info/METADATA,sha256=_LLcTHY9wGDgTcKZf16yMmo6ZJARFuAVe_iCOk-ui0E,204076
7
- tokencostauto-0.1.342.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
8
- tokencostauto-0.1.342.dist-info/top_level.txt,sha256=szZQTUJRotfIaeZCDsOgvofIkLt2ak88RP13oI51-TU,14
9
- tokencostauto-0.1.342.dist-info/RECORD,,