tokencostauto 0.1.342__tar.gz → 0.1.346__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {tokencostauto-0.1.342/tokencostauto.egg-info → tokencostauto-0.1.346}/PKG-INFO +1 -1
- {tokencostauto-0.1.342 → tokencostauto-0.1.346}/pyproject.toml +1 -1
- {tokencostauto-0.1.342 → tokencostauto-0.1.346}/tokencostauto/model_prices.json +240 -5
- {tokencostauto-0.1.342 → tokencostauto-0.1.346/tokencostauto.egg-info}/PKG-INFO +1 -1
- {tokencostauto-0.1.342 → tokencostauto-0.1.346}/LICENSE +0 -0
- {tokencostauto-0.1.342 → tokencostauto-0.1.346}/MANIFEST.in +0 -0
- {tokencostauto-0.1.342 → tokencostauto-0.1.346}/README.md +0 -0
- {tokencostauto-0.1.342 → tokencostauto-0.1.346}/setup.cfg +0 -0
- {tokencostauto-0.1.342 → tokencostauto-0.1.346}/tests/test_costs.py +0 -0
- {tokencostauto-0.1.342 → tokencostauto-0.1.346}/tokencostauto/__init__.py +0 -0
- {tokencostauto-0.1.342 → tokencostauto-0.1.346}/tokencostauto/constants.py +0 -0
- {tokencostauto-0.1.342 → tokencostauto-0.1.346}/tokencostauto/costs.py +0 -0
- {tokencostauto-0.1.342 → tokencostauto-0.1.346}/tokencostauto.egg-info/SOURCES.txt +0 -0
- {tokencostauto-0.1.342 → tokencostauto-0.1.346}/tokencostauto.egg-info/dependency_links.txt +0 -0
- {tokencostauto-0.1.342 → tokencostauto-0.1.346}/tokencostauto.egg-info/requires.txt +0 -0
- {tokencostauto-0.1.342 → tokencostauto-0.1.346}/tokencostauto.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tokencostauto
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.346
|
|
4
4
|
Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
|
|
5
5
|
Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/madpin/tokencostaudo
|
|
@@ -9266,6 +9266,7 @@
|
|
|
9266
9266
|
"supports_function_calling": false,
|
|
9267
9267
|
"supports_parallel_function_calling": true,
|
|
9268
9268
|
"supports_prompt_caching": true,
|
|
9269
|
+
"supports_reasoning": true,
|
|
9269
9270
|
"supports_response_schema": false,
|
|
9270
9271
|
"supports_system_messages": true,
|
|
9271
9272
|
"supports_tool_choice": true,
|
|
@@ -10442,6 +10443,7 @@
|
|
|
10442
10443
|
"supports_audio_output": true,
|
|
10443
10444
|
"supports_function_calling": true,
|
|
10444
10445
|
"supports_prompt_caching": true,
|
|
10446
|
+
"supports_reasoning": true,
|
|
10445
10447
|
"supports_response_schema": true,
|
|
10446
10448
|
"supports_system_messages": true,
|
|
10447
10449
|
"supports_tool_choice": true,
|
|
@@ -24690,6 +24692,7 @@
|
|
|
24690
24692
|
"litellm_provider": "vertex_ai-language-models",
|
|
24691
24693
|
"max_audio_length_hours": 8.4,
|
|
24692
24694
|
"max_audio_per_prompt": 1,
|
|
24695
|
+
"supports_reasoning": false,
|
|
24693
24696
|
"max_images_per_prompt": 3000,
|
|
24694
24697
|
"max_input_tokens": 32768,
|
|
24695
24698
|
"max_output_tokens": 32768,
|
|
@@ -25876,7 +25879,9 @@
|
|
|
25876
25879
|
"1280x720",
|
|
25877
25880
|
"720x1280"
|
|
25878
25881
|
],
|
|
25879
|
-
"
|
|
25882
|
+
"metadata": {
|
|
25883
|
+
"comment": "5 credits per second @ $0.01 per credit = $0.05 per second"
|
|
25884
|
+
}
|
|
25880
25885
|
},
|
|
25881
25886
|
"runwayml/gen4_aleph": {
|
|
25882
25887
|
"litellm_provider": "runwayml",
|
|
@@ -25894,7 +25899,9 @@
|
|
|
25894
25899
|
"1280x720",
|
|
25895
25900
|
"720x1280"
|
|
25896
25901
|
],
|
|
25897
|
-
"
|
|
25902
|
+
"metadata": {
|
|
25903
|
+
"comment": "15 credits per second @ $0.01 per credit = $0.15 per second"
|
|
25904
|
+
}
|
|
25898
25905
|
},
|
|
25899
25906
|
"runwayml/gen3a_turbo": {
|
|
25900
25907
|
"litellm_provider": "runwayml",
|
|
@@ -25912,7 +25919,9 @@
|
|
|
25912
25919
|
"1280x720",
|
|
25913
25920
|
"720x1280"
|
|
25914
25921
|
],
|
|
25915
|
-
"
|
|
25922
|
+
"metadata": {
|
|
25923
|
+
"comment": "5 credits per second @ $0.01 per credit = $0.05 per second"
|
|
25924
|
+
}
|
|
25916
25925
|
},
|
|
25917
25926
|
"runwayml/gen4_image": {
|
|
25918
25927
|
"litellm_provider": "runwayml",
|
|
@@ -25931,7 +25940,9 @@
|
|
|
25931
25940
|
"1280x720",
|
|
25932
25941
|
"1920x1080"
|
|
25933
25942
|
],
|
|
25934
|
-
"
|
|
25943
|
+
"metadata": {
|
|
25944
|
+
"comment": "5 credits per 720p image or 8 credits per 1080p image @ $0.01 per credit. Using 5 credits ($0.05) as base cost"
|
|
25945
|
+
}
|
|
25935
25946
|
},
|
|
25936
25947
|
"runwayml/gen4_image_turbo": {
|
|
25937
25948
|
"litellm_provider": "runwayml",
|
|
@@ -25950,6 +25961,230 @@
|
|
|
25950
25961
|
"1280x720",
|
|
25951
25962
|
"1920x1080"
|
|
25952
25963
|
],
|
|
25953
|
-
"
|
|
25964
|
+
"metadata": {
|
|
25965
|
+
"comment": "2 credits per image (any resolution) @ $0.01 per credit = $0.02 per image"
|
|
25966
|
+
}
|
|
25967
|
+
},
|
|
25968
|
+
"gpt-5.1": {
|
|
25969
|
+
"cache_read_input_token_cost": 1.25e-07,
|
|
25970
|
+
"cache_read_input_token_cost_priority": 2.5e-07,
|
|
25971
|
+
"input_cost_per_token": 1.25e-06,
|
|
25972
|
+
"input_cost_per_token_priority": 2.5e-06,
|
|
25973
|
+
"litellm_provider": "openai",
|
|
25974
|
+
"max_input_tokens": 272000,
|
|
25975
|
+
"max_output_tokens": 128000,
|
|
25976
|
+
"max_tokens": 128000,
|
|
25977
|
+
"mode": "chat",
|
|
25978
|
+
"output_cost_per_token": 1e-05,
|
|
25979
|
+
"output_cost_per_token_priority": 2e-05,
|
|
25980
|
+
"supported_endpoints": [
|
|
25981
|
+
"/v1/chat/completions",
|
|
25982
|
+
"/v1/responses"
|
|
25983
|
+
],
|
|
25984
|
+
"supported_modalities": [
|
|
25985
|
+
"text",
|
|
25986
|
+
"image"
|
|
25987
|
+
],
|
|
25988
|
+
"supported_output_modalities": [
|
|
25989
|
+
"text",
|
|
25990
|
+
"image"
|
|
25991
|
+
],
|
|
25992
|
+
"supports_function_calling": true,
|
|
25993
|
+
"supports_native_streaming": true,
|
|
25994
|
+
"supports_parallel_function_calling": true,
|
|
25995
|
+
"supports_pdf_input": true,
|
|
25996
|
+
"supports_prompt_caching": true,
|
|
25997
|
+
"supports_reasoning": true,
|
|
25998
|
+
"supports_response_schema": true,
|
|
25999
|
+
"supports_system_messages": true,
|
|
26000
|
+
"supports_tool_choice": true,
|
|
26001
|
+
"supports_service_tier": true,
|
|
26002
|
+
"supports_vision": true
|
|
26003
|
+
},
|
|
26004
|
+
"gpt-5.1-2025-11-13": {
|
|
26005
|
+
"cache_read_input_token_cost": 1.25e-07,
|
|
26006
|
+
"cache_read_input_token_cost_priority": 2.5e-07,
|
|
26007
|
+
"input_cost_per_token": 1.25e-06,
|
|
26008
|
+
"input_cost_per_token_priority": 2.5e-06,
|
|
26009
|
+
"litellm_provider": "openai",
|
|
26010
|
+
"max_input_tokens": 272000,
|
|
26011
|
+
"max_output_tokens": 128000,
|
|
26012
|
+
"max_tokens": 128000,
|
|
26013
|
+
"mode": "chat",
|
|
26014
|
+
"output_cost_per_token": 1e-05,
|
|
26015
|
+
"output_cost_per_token_priority": 2e-05,
|
|
26016
|
+
"supported_endpoints": [
|
|
26017
|
+
"/v1/chat/completions",
|
|
26018
|
+
"/v1/responses"
|
|
26019
|
+
],
|
|
26020
|
+
"supported_modalities": [
|
|
26021
|
+
"text",
|
|
26022
|
+
"image"
|
|
26023
|
+
],
|
|
26024
|
+
"supported_output_modalities": [
|
|
26025
|
+
"text",
|
|
26026
|
+
"image"
|
|
26027
|
+
],
|
|
26028
|
+
"supports_function_calling": true,
|
|
26029
|
+
"supports_native_streaming": true,
|
|
26030
|
+
"supports_parallel_function_calling": true,
|
|
26031
|
+
"supports_pdf_input": true,
|
|
26032
|
+
"supports_prompt_caching": true,
|
|
26033
|
+
"supports_reasoning": true,
|
|
26034
|
+
"supports_response_schema": true,
|
|
26035
|
+
"supports_system_messages": true,
|
|
26036
|
+
"supports_tool_choice": true,
|
|
26037
|
+
"supports_service_tier": true,
|
|
26038
|
+
"supports_vision": true
|
|
26039
|
+
},
|
|
26040
|
+
"gpt-5.1-chat-latest": {
|
|
26041
|
+
"cache_read_input_token_cost": 1.25e-07,
|
|
26042
|
+
"cache_read_input_token_cost_priority": 2.5e-07,
|
|
26043
|
+
"input_cost_per_token": 1.25e-06,
|
|
26044
|
+
"input_cost_per_token_priority": 2.5e-06,
|
|
26045
|
+
"litellm_provider": "openai",
|
|
26046
|
+
"max_input_tokens": 128000,
|
|
26047
|
+
"max_output_tokens": 16384,
|
|
26048
|
+
"max_tokens": 16384,
|
|
26049
|
+
"mode": "chat",
|
|
26050
|
+
"output_cost_per_token": 1e-05,
|
|
26051
|
+
"output_cost_per_token_priority": 2e-05,
|
|
26052
|
+
"supported_endpoints": [
|
|
26053
|
+
"/v1/chat/completions",
|
|
26054
|
+
"/v1/responses"
|
|
26055
|
+
],
|
|
26056
|
+
"supported_modalities": [
|
|
26057
|
+
"text",
|
|
26058
|
+
"image"
|
|
26059
|
+
],
|
|
26060
|
+
"supported_output_modalities": [
|
|
26061
|
+
"text",
|
|
26062
|
+
"image"
|
|
26063
|
+
],
|
|
26064
|
+
"supports_function_calling": false,
|
|
26065
|
+
"supports_native_streaming": true,
|
|
26066
|
+
"supports_parallel_function_calling": false,
|
|
26067
|
+
"supports_pdf_input": true,
|
|
26068
|
+
"supports_prompt_caching": true,
|
|
26069
|
+
"supports_reasoning": true,
|
|
26070
|
+
"supports_response_schema": true,
|
|
26071
|
+
"supports_system_messages": true,
|
|
26072
|
+
"supports_tool_choice": false,
|
|
26073
|
+
"supports_vision": true
|
|
26074
|
+
},
|
|
26075
|
+
"gpt-5.1-codex": {
|
|
26076
|
+
"cache_read_input_token_cost": 1.25e-07,
|
|
26077
|
+
"cache_read_input_token_cost_priority": 2.5e-07,
|
|
26078
|
+
"input_cost_per_token": 1.25e-06,
|
|
26079
|
+
"input_cost_per_token_priority": 2.5e-06,
|
|
26080
|
+
"litellm_provider": "openai",
|
|
26081
|
+
"max_input_tokens": 272000,
|
|
26082
|
+
"max_output_tokens": 128000,
|
|
26083
|
+
"max_tokens": 128000,
|
|
26084
|
+
"mode": "responses",
|
|
26085
|
+
"output_cost_per_token": 1e-05,
|
|
26086
|
+
"output_cost_per_token_priority": 2e-05,
|
|
26087
|
+
"supported_endpoints": [
|
|
26088
|
+
"/v1/responses"
|
|
26089
|
+
],
|
|
26090
|
+
"supported_modalities": [
|
|
26091
|
+
"text",
|
|
26092
|
+
"image"
|
|
26093
|
+
],
|
|
26094
|
+
"supported_output_modalities": [
|
|
26095
|
+
"text"
|
|
26096
|
+
],
|
|
26097
|
+
"supports_function_calling": true,
|
|
26098
|
+
"supports_native_streaming": true,
|
|
26099
|
+
"supports_parallel_function_calling": true,
|
|
26100
|
+
"supports_pdf_input": true,
|
|
26101
|
+
"supports_prompt_caching": true,
|
|
26102
|
+
"supports_reasoning": true,
|
|
26103
|
+
"supports_response_schema": true,
|
|
26104
|
+
"supports_system_messages": false,
|
|
26105
|
+
"supports_tool_choice": true,
|
|
26106
|
+
"supports_vision": true
|
|
26107
|
+
},
|
|
26108
|
+
"gpt-5.1-codex-mini": {
|
|
26109
|
+
"cache_read_input_token_cost": 2.5e-08,
|
|
26110
|
+
"cache_read_input_token_cost_priority": 4.5e-08,
|
|
26111
|
+
"input_cost_per_token": 2.5e-07,
|
|
26112
|
+
"input_cost_per_token_priority": 4.5e-07,
|
|
26113
|
+
"litellm_provider": "openai",
|
|
26114
|
+
"max_input_tokens": 272000,
|
|
26115
|
+
"max_output_tokens": 128000,
|
|
26116
|
+
"max_tokens": 128000,
|
|
26117
|
+
"mode": "responses",
|
|
26118
|
+
"output_cost_per_token": 2e-06,
|
|
26119
|
+
"output_cost_per_token_priority": 3.6e-06,
|
|
26120
|
+
"supported_endpoints": [
|
|
26121
|
+
"/v1/responses"
|
|
26122
|
+
],
|
|
26123
|
+
"supported_modalities": [
|
|
26124
|
+
"text",
|
|
26125
|
+
"image"
|
|
26126
|
+
],
|
|
26127
|
+
"supported_output_modalities": [
|
|
26128
|
+
"text"
|
|
26129
|
+
],
|
|
26130
|
+
"supports_function_calling": true,
|
|
26131
|
+
"supports_native_streaming": true,
|
|
26132
|
+
"supports_parallel_function_calling": true,
|
|
26133
|
+
"supports_pdf_input": true,
|
|
26134
|
+
"supports_prompt_caching": true,
|
|
26135
|
+
"supports_reasoning": true,
|
|
26136
|
+
"supports_response_schema": true,
|
|
26137
|
+
"supports_system_messages": false,
|
|
26138
|
+
"supports_tool_choice": true,
|
|
26139
|
+
"supports_vision": true
|
|
26140
|
+
},
|
|
26141
|
+
"fal_ai/fal-ai/flux/schnell": {
|
|
26142
|
+
"litellm_provider": "fal_ai",
|
|
26143
|
+
"mode": "image_generation",
|
|
26144
|
+
"output_cost_per_image": 0.003,
|
|
26145
|
+
"supported_endpoints": [
|
|
26146
|
+
"/v1/images/generations"
|
|
26147
|
+
]
|
|
26148
|
+
},
|
|
26149
|
+
"fal_ai/fal-ai/imagen4/preview/fast": {
|
|
26150
|
+
"litellm_provider": "fal_ai",
|
|
26151
|
+
"mode": "image_generation",
|
|
26152
|
+
"output_cost_per_image": 0.02,
|
|
26153
|
+
"supported_endpoints": [
|
|
26154
|
+
"/v1/images/generations"
|
|
26155
|
+
]
|
|
26156
|
+
},
|
|
26157
|
+
"fal_ai/fal-ai/imagen4/preview/ultra": {
|
|
26158
|
+
"litellm_provider": "fal_ai",
|
|
26159
|
+
"mode": "image_generation",
|
|
26160
|
+
"output_cost_per_image": 0.06,
|
|
26161
|
+
"supported_endpoints": [
|
|
26162
|
+
"/v1/images/generations"
|
|
26163
|
+
]
|
|
26164
|
+
},
|
|
26165
|
+
"voyage/voyage-3.5": {
|
|
26166
|
+
"input_cost_per_token": 6e-08,
|
|
26167
|
+
"litellm_provider": "voyage",
|
|
26168
|
+
"max_input_tokens": 32000,
|
|
26169
|
+
"max_tokens": 32000,
|
|
26170
|
+
"mode": "embedding",
|
|
26171
|
+
"output_cost_per_token": 0.0
|
|
26172
|
+
},
|
|
26173
|
+
"voyage/voyage-3.5-lite": {
|
|
26174
|
+
"input_cost_per_token": 2e-08,
|
|
26175
|
+
"litellm_provider": "voyage",
|
|
26176
|
+
"max_input_tokens": 32000,
|
|
26177
|
+
"max_tokens": 32000,
|
|
26178
|
+
"mode": "embedding",
|
|
26179
|
+
"output_cost_per_token": 0.0
|
|
26180
|
+
},
|
|
26181
|
+
"runwayml/eleven_multilingual_v2": {
|
|
26182
|
+
"litellm_provider": "runwayml",
|
|
26183
|
+
"mode": "audio_speech",
|
|
26184
|
+
"input_cost_per_character": 3e-07,
|
|
26185
|
+
"source": "https://docs.dev.runwayml.com/guides/pricing/",
|
|
26186
|
+
"metadata": {
|
|
26187
|
+
"comment": "Estimated cost based on standard TTS pricing. RunwayML uses ElevenLabs models."
|
|
26188
|
+
}
|
|
25954
26189
|
}
|
|
25955
26190
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tokencostauto
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.346
|
|
4
4
|
Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
|
|
5
5
|
Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/madpin/tokencostaudo
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|