tokencostauto 0.1.462__py3-none-any.whl → 0.1.464__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tokencostauto/model_prices.json +359 -4
- {tokencostauto-0.1.462.dist-info → tokencostauto-0.1.464.dist-info}/METADATA +1 -1
- tokencostauto-0.1.464.dist-info/RECORD +9 -0
- tokencostauto-0.1.462.dist-info/RECORD +0 -9
- {tokencostauto-0.1.462.dist-info → tokencostauto-0.1.464.dist-info}/WHEEL +0 -0
- {tokencostauto-0.1.462.dist-info → tokencostauto-0.1.464.dist-info}/licenses/LICENSE +0 -0
- {tokencostauto-0.1.462.dist-info → tokencostauto-0.1.464.dist-info}/top_level.txt +0 -0
tokencostauto/model_prices.json
CHANGED
|
@@ -37,14 +37,14 @@
|
|
|
37
37
|
"supports_vision": true
|
|
38
38
|
},
|
|
39
39
|
"gpt-4o-audio-preview": {
|
|
40
|
-
"input_cost_per_audio_token":
|
|
40
|
+
"input_cost_per_audio_token": 4e-05,
|
|
41
41
|
"input_cost_per_token": 2.5e-06,
|
|
42
42
|
"litellm_provider": "openai",
|
|
43
43
|
"max_input_tokens": 128000,
|
|
44
44
|
"max_output_tokens": 16384,
|
|
45
45
|
"max_tokens": 16384,
|
|
46
46
|
"mode": "chat",
|
|
47
|
-
"output_cost_per_audio_token":
|
|
47
|
+
"output_cost_per_audio_token": 8e-05,
|
|
48
48
|
"output_cost_per_token": 1e-05,
|
|
49
49
|
"supports_audio_input": true,
|
|
50
50
|
"supports_audio_output": true,
|
|
@@ -54,14 +54,14 @@
|
|
|
54
54
|
"supports_tool_choice": true
|
|
55
55
|
},
|
|
56
56
|
"gpt-4o-audio-preview-2024-10-01": {
|
|
57
|
-
"input_cost_per_audio_token":
|
|
57
|
+
"input_cost_per_audio_token": 4e-05,
|
|
58
58
|
"input_cost_per_token": 2.5e-06,
|
|
59
59
|
"litellm_provider": "openai",
|
|
60
60
|
"max_input_tokens": 128000,
|
|
61
61
|
"max_output_tokens": 16384,
|
|
62
62
|
"max_tokens": 16384,
|
|
63
63
|
"mode": "chat",
|
|
64
|
-
"output_cost_per_audio_token":
|
|
64
|
+
"output_cost_per_audio_token": 8e-05,
|
|
65
65
|
"output_cost_per_token": 1e-05,
|
|
66
66
|
"supports_audio_input": true,
|
|
67
67
|
"supports_audio_output": true,
|
|
@@ -35657,5 +35657,360 @@
|
|
|
35657
35657
|
"mode": "chat",
|
|
35658
35658
|
"output_cost_per_token": 0,
|
|
35659
35659
|
"supports_reasoning": true
|
|
35660
|
+
},
|
|
35661
|
+
"gmi/anthropic/claude-opus-4.5": {
|
|
35662
|
+
"input_cost_per_token": 5e-06,
|
|
35663
|
+
"litellm_provider": "gmi",
|
|
35664
|
+
"max_input_tokens": 409600,
|
|
35665
|
+
"max_output_tokens": 32000,
|
|
35666
|
+
"max_tokens": 32000,
|
|
35667
|
+
"mode": "chat",
|
|
35668
|
+
"output_cost_per_token": 2.5e-05,
|
|
35669
|
+
"supports_function_calling": true,
|
|
35670
|
+
"supports_vision": true
|
|
35671
|
+
},
|
|
35672
|
+
"gmi/anthropic/claude-sonnet-4.5": {
|
|
35673
|
+
"input_cost_per_token": 3e-06,
|
|
35674
|
+
"litellm_provider": "gmi",
|
|
35675
|
+
"max_input_tokens": 409600,
|
|
35676
|
+
"max_output_tokens": 32000,
|
|
35677
|
+
"max_tokens": 32000,
|
|
35678
|
+
"mode": "chat",
|
|
35679
|
+
"output_cost_per_token": 1.5e-05,
|
|
35680
|
+
"supports_function_calling": true,
|
|
35681
|
+
"supports_vision": true
|
|
35682
|
+
},
|
|
35683
|
+
"gmi/anthropic/claude-sonnet-4": {
|
|
35684
|
+
"input_cost_per_token": 3e-06,
|
|
35685
|
+
"litellm_provider": "gmi",
|
|
35686
|
+
"max_input_tokens": 409600,
|
|
35687
|
+
"max_output_tokens": 32000,
|
|
35688
|
+
"max_tokens": 32000,
|
|
35689
|
+
"mode": "chat",
|
|
35690
|
+
"output_cost_per_token": 1.5e-05,
|
|
35691
|
+
"supports_function_calling": true,
|
|
35692
|
+
"supports_vision": true
|
|
35693
|
+
},
|
|
35694
|
+
"gmi/anthropic/claude-opus-4": {
|
|
35695
|
+
"input_cost_per_token": 1.5e-05,
|
|
35696
|
+
"litellm_provider": "gmi",
|
|
35697
|
+
"max_input_tokens": 409600,
|
|
35698
|
+
"max_output_tokens": 32000,
|
|
35699
|
+
"max_tokens": 32000,
|
|
35700
|
+
"mode": "chat",
|
|
35701
|
+
"output_cost_per_token": 7.5e-05,
|
|
35702
|
+
"supports_function_calling": true,
|
|
35703
|
+
"supports_vision": true
|
|
35704
|
+
},
|
|
35705
|
+
"gmi/openai/gpt-5.2": {
|
|
35706
|
+
"input_cost_per_token": 1.75e-06,
|
|
35707
|
+
"litellm_provider": "gmi",
|
|
35708
|
+
"max_input_tokens": 409600,
|
|
35709
|
+
"max_output_tokens": 32000,
|
|
35710
|
+
"max_tokens": 32000,
|
|
35711
|
+
"mode": "chat",
|
|
35712
|
+
"output_cost_per_token": 1.4e-05,
|
|
35713
|
+
"supports_function_calling": true
|
|
35714
|
+
},
|
|
35715
|
+
"gmi/openai/gpt-5.1": {
|
|
35716
|
+
"input_cost_per_token": 1.25e-06,
|
|
35717
|
+
"litellm_provider": "gmi",
|
|
35718
|
+
"max_input_tokens": 409600,
|
|
35719
|
+
"max_output_tokens": 32000,
|
|
35720
|
+
"max_tokens": 32000,
|
|
35721
|
+
"mode": "chat",
|
|
35722
|
+
"output_cost_per_token": 1e-05,
|
|
35723
|
+
"supports_function_calling": true
|
|
35724
|
+
},
|
|
35725
|
+
"gmi/openai/gpt-5": {
|
|
35726
|
+
"input_cost_per_token": 1.25e-06,
|
|
35727
|
+
"litellm_provider": "gmi",
|
|
35728
|
+
"max_input_tokens": 409600,
|
|
35729
|
+
"max_output_tokens": 32000,
|
|
35730
|
+
"max_tokens": 32000,
|
|
35731
|
+
"mode": "chat",
|
|
35732
|
+
"output_cost_per_token": 1e-05,
|
|
35733
|
+
"supports_function_calling": true
|
|
35734
|
+
},
|
|
35735
|
+
"gmi/openai/gpt-4o": {
|
|
35736
|
+
"input_cost_per_token": 2.5e-06,
|
|
35737
|
+
"litellm_provider": "gmi",
|
|
35738
|
+
"max_input_tokens": 131072,
|
|
35739
|
+
"max_output_tokens": 16384,
|
|
35740
|
+
"max_tokens": 16384,
|
|
35741
|
+
"mode": "chat",
|
|
35742
|
+
"output_cost_per_token": 1e-05,
|
|
35743
|
+
"supports_function_calling": true,
|
|
35744
|
+
"supports_vision": true
|
|
35745
|
+
},
|
|
35746
|
+
"gmi/openai/gpt-4o-mini": {
|
|
35747
|
+
"input_cost_per_token": 1.5e-07,
|
|
35748
|
+
"litellm_provider": "gmi",
|
|
35749
|
+
"max_input_tokens": 131072,
|
|
35750
|
+
"max_output_tokens": 16384,
|
|
35751
|
+
"max_tokens": 16384,
|
|
35752
|
+
"mode": "chat",
|
|
35753
|
+
"output_cost_per_token": 6e-07,
|
|
35754
|
+
"supports_function_calling": true,
|
|
35755
|
+
"supports_vision": true
|
|
35756
|
+
},
|
|
35757
|
+
"gmi/deepseek-ai/DeepSeek-V3.2": {
|
|
35758
|
+
"input_cost_per_token": 2.8e-07,
|
|
35759
|
+
"litellm_provider": "gmi",
|
|
35760
|
+
"max_input_tokens": 163840,
|
|
35761
|
+
"max_output_tokens": 16384,
|
|
35762
|
+
"max_tokens": 16384,
|
|
35763
|
+
"mode": "chat",
|
|
35764
|
+
"output_cost_per_token": 4e-07,
|
|
35765
|
+
"supports_function_calling": true
|
|
35766
|
+
},
|
|
35767
|
+
"gmi/deepseek-ai/DeepSeek-V3-0324": {
|
|
35768
|
+
"input_cost_per_token": 2.8e-07,
|
|
35769
|
+
"litellm_provider": "gmi",
|
|
35770
|
+
"max_input_tokens": 163840,
|
|
35771
|
+
"max_output_tokens": 16384,
|
|
35772
|
+
"max_tokens": 16384,
|
|
35773
|
+
"mode": "chat",
|
|
35774
|
+
"output_cost_per_token": 8.8e-07,
|
|
35775
|
+
"supports_function_calling": true
|
|
35776
|
+
},
|
|
35777
|
+
"gmi/google/gemini-3-pro-preview": {
|
|
35778
|
+
"input_cost_per_token": 2e-06,
|
|
35779
|
+
"litellm_provider": "gmi",
|
|
35780
|
+
"max_input_tokens": 1048576,
|
|
35781
|
+
"max_output_tokens": 65536,
|
|
35782
|
+
"max_tokens": 65536,
|
|
35783
|
+
"mode": "chat",
|
|
35784
|
+
"output_cost_per_token": 1.2e-05,
|
|
35785
|
+
"supports_function_calling": true,
|
|
35786
|
+
"supports_vision": true
|
|
35787
|
+
},
|
|
35788
|
+
"gmi/google/gemini-3-flash-preview": {
|
|
35789
|
+
"input_cost_per_token": 5e-07,
|
|
35790
|
+
"litellm_provider": "gmi",
|
|
35791
|
+
"max_input_tokens": 1048576,
|
|
35792
|
+
"max_output_tokens": 65536,
|
|
35793
|
+
"max_tokens": 65536,
|
|
35794
|
+
"mode": "chat",
|
|
35795
|
+
"output_cost_per_token": 3e-06,
|
|
35796
|
+
"supports_function_calling": true,
|
|
35797
|
+
"supports_vision": true
|
|
35798
|
+
},
|
|
35799
|
+
"gmi/moonshotai/Kimi-K2-Thinking": {
|
|
35800
|
+
"input_cost_per_token": 8e-07,
|
|
35801
|
+
"litellm_provider": "gmi",
|
|
35802
|
+
"max_input_tokens": 262144,
|
|
35803
|
+
"max_output_tokens": 16384,
|
|
35804
|
+
"max_tokens": 16384,
|
|
35805
|
+
"mode": "chat",
|
|
35806
|
+
"output_cost_per_token": 1.2e-06
|
|
35807
|
+
},
|
|
35808
|
+
"gmi/MiniMaxAI/MiniMax-M2.1": {
|
|
35809
|
+
"input_cost_per_token": 3e-07,
|
|
35810
|
+
"litellm_provider": "gmi",
|
|
35811
|
+
"max_input_tokens": 196608,
|
|
35812
|
+
"max_output_tokens": 16384,
|
|
35813
|
+
"max_tokens": 16384,
|
|
35814
|
+
"mode": "chat",
|
|
35815
|
+
"output_cost_per_token": 1.2e-06
|
|
35816
|
+
},
|
|
35817
|
+
"gmi/Qwen/Qwen3-VL-235B-A22B-Instruct-FP8": {
|
|
35818
|
+
"input_cost_per_token": 3e-07,
|
|
35819
|
+
"litellm_provider": "gmi",
|
|
35820
|
+
"max_input_tokens": 262144,
|
|
35821
|
+
"max_output_tokens": 16384,
|
|
35822
|
+
"max_tokens": 16384,
|
|
35823
|
+
"mode": "chat",
|
|
35824
|
+
"output_cost_per_token": 1.4e-06,
|
|
35825
|
+
"supports_vision": true
|
|
35826
|
+
},
|
|
35827
|
+
"gmi/zai-org/GLM-4.7-FP8": {
|
|
35828
|
+
"input_cost_per_token": 4e-07,
|
|
35829
|
+
"litellm_provider": "gmi",
|
|
35830
|
+
"max_input_tokens": 202752,
|
|
35831
|
+
"max_output_tokens": 16384,
|
|
35832
|
+
"max_tokens": 16384,
|
|
35833
|
+
"mode": "chat",
|
|
35834
|
+
"output_cost_per_token": 2e-06
|
|
35835
|
+
},
|
|
35836
|
+
"gpt-audio": {
|
|
35837
|
+
"input_cost_per_audio_token": 3.2e-05,
|
|
35838
|
+
"input_cost_per_token": 2.5e-06,
|
|
35839
|
+
"litellm_provider": "openai",
|
|
35840
|
+
"max_input_tokens": 128000,
|
|
35841
|
+
"max_output_tokens": 16384,
|
|
35842
|
+
"max_tokens": 16384,
|
|
35843
|
+
"mode": "chat",
|
|
35844
|
+
"output_cost_per_audio_token": 6.4e-05,
|
|
35845
|
+
"output_cost_per_token": 1e-05,
|
|
35846
|
+
"supported_endpoints": [
|
|
35847
|
+
"/v1/chat/completions",
|
|
35848
|
+
"/v1/responses",
|
|
35849
|
+
"/v1/realtime",
|
|
35850
|
+
"/v1/batch"
|
|
35851
|
+
],
|
|
35852
|
+
"supported_modalities": [
|
|
35853
|
+
"text",
|
|
35854
|
+
"audio"
|
|
35855
|
+
],
|
|
35856
|
+
"supported_output_modalities": [
|
|
35857
|
+
"text",
|
|
35858
|
+
"audio"
|
|
35859
|
+
],
|
|
35860
|
+
"supports_audio_input": true,
|
|
35861
|
+
"supports_audio_output": true,
|
|
35862
|
+
"supports_function_calling": true,
|
|
35863
|
+
"supports_native_streaming": true,
|
|
35864
|
+
"supports_parallel_function_calling": true,
|
|
35865
|
+
"supports_prompt_caching": false,
|
|
35866
|
+
"supports_reasoning": false,
|
|
35867
|
+
"supports_response_schema": false,
|
|
35868
|
+
"supports_system_messages": true,
|
|
35869
|
+
"supports_tool_choice": true,
|
|
35870
|
+
"supports_vision": false
|
|
35871
|
+
},
|
|
35872
|
+
"gpt-audio-2025-08-28": {
|
|
35873
|
+
"input_cost_per_audio_token": 3.2e-05,
|
|
35874
|
+
"input_cost_per_token": 2.5e-06,
|
|
35875
|
+
"litellm_provider": "openai",
|
|
35876
|
+
"max_input_tokens": 128000,
|
|
35877
|
+
"max_output_tokens": 16384,
|
|
35878
|
+
"max_tokens": 16384,
|
|
35879
|
+
"mode": "chat",
|
|
35880
|
+
"output_cost_per_audio_token": 6.4e-05,
|
|
35881
|
+
"output_cost_per_token": 1e-05,
|
|
35882
|
+
"supported_endpoints": [
|
|
35883
|
+
"/v1/chat/completions",
|
|
35884
|
+
"/v1/responses",
|
|
35885
|
+
"/v1/realtime",
|
|
35886
|
+
"/v1/batch"
|
|
35887
|
+
],
|
|
35888
|
+
"supported_modalities": [
|
|
35889
|
+
"text",
|
|
35890
|
+
"audio"
|
|
35891
|
+
],
|
|
35892
|
+
"supported_output_modalities": [
|
|
35893
|
+
"text",
|
|
35894
|
+
"audio"
|
|
35895
|
+
],
|
|
35896
|
+
"supports_audio_input": true,
|
|
35897
|
+
"supports_audio_output": true,
|
|
35898
|
+
"supports_function_calling": true,
|
|
35899
|
+
"supports_native_streaming": true,
|
|
35900
|
+
"supports_parallel_function_calling": true,
|
|
35901
|
+
"supports_prompt_caching": false,
|
|
35902
|
+
"supports_reasoning": false,
|
|
35903
|
+
"supports_response_schema": false,
|
|
35904
|
+
"supports_system_messages": true,
|
|
35905
|
+
"supports_tool_choice": true,
|
|
35906
|
+
"supports_vision": false
|
|
35907
|
+
},
|
|
35908
|
+
"gpt-audio-mini": {
|
|
35909
|
+
"input_cost_per_audio_token": 1e-05,
|
|
35910
|
+
"input_cost_per_token": 6e-07,
|
|
35911
|
+
"litellm_provider": "openai",
|
|
35912
|
+
"max_input_tokens": 128000,
|
|
35913
|
+
"max_output_tokens": 16384,
|
|
35914
|
+
"max_tokens": 16384,
|
|
35915
|
+
"mode": "chat",
|
|
35916
|
+
"output_cost_per_audio_token": 2e-05,
|
|
35917
|
+
"output_cost_per_token": 2.4e-06,
|
|
35918
|
+
"supported_endpoints": [
|
|
35919
|
+
"/v1/chat/completions",
|
|
35920
|
+
"/v1/responses",
|
|
35921
|
+
"/v1/realtime",
|
|
35922
|
+
"/v1/batch"
|
|
35923
|
+
],
|
|
35924
|
+
"supported_modalities": [
|
|
35925
|
+
"text",
|
|
35926
|
+
"audio"
|
|
35927
|
+
],
|
|
35928
|
+
"supported_output_modalities": [
|
|
35929
|
+
"text",
|
|
35930
|
+
"audio"
|
|
35931
|
+
],
|
|
35932
|
+
"supports_audio_input": true,
|
|
35933
|
+
"supports_audio_output": true,
|
|
35934
|
+
"supports_function_calling": true,
|
|
35935
|
+
"supports_native_streaming": true,
|
|
35936
|
+
"supports_parallel_function_calling": true,
|
|
35937
|
+
"supports_prompt_caching": false,
|
|
35938
|
+
"supports_reasoning": false,
|
|
35939
|
+
"supports_response_schema": false,
|
|
35940
|
+
"supports_system_messages": true,
|
|
35941
|
+
"supports_tool_choice": true,
|
|
35942
|
+
"supports_vision": false
|
|
35943
|
+
},
|
|
35944
|
+
"gpt-audio-mini-2025-10-06": {
|
|
35945
|
+
"input_cost_per_audio_token": 1e-05,
|
|
35946
|
+
"input_cost_per_token": 6e-07,
|
|
35947
|
+
"litellm_provider": "openai",
|
|
35948
|
+
"max_input_tokens": 128000,
|
|
35949
|
+
"max_output_tokens": 16384,
|
|
35950
|
+
"max_tokens": 16384,
|
|
35951
|
+
"mode": "chat",
|
|
35952
|
+
"output_cost_per_audio_token": 2e-05,
|
|
35953
|
+
"output_cost_per_token": 2.4e-06,
|
|
35954
|
+
"supported_endpoints": [
|
|
35955
|
+
"/v1/chat/completions",
|
|
35956
|
+
"/v1/responses",
|
|
35957
|
+
"/v1/realtime",
|
|
35958
|
+
"/v1/batch"
|
|
35959
|
+
],
|
|
35960
|
+
"supported_modalities": [
|
|
35961
|
+
"text",
|
|
35962
|
+
"audio"
|
|
35963
|
+
],
|
|
35964
|
+
"supported_output_modalities": [
|
|
35965
|
+
"text",
|
|
35966
|
+
"audio"
|
|
35967
|
+
],
|
|
35968
|
+
"supports_audio_input": true,
|
|
35969
|
+
"supports_audio_output": true,
|
|
35970
|
+
"supports_function_calling": true,
|
|
35971
|
+
"supports_native_streaming": true,
|
|
35972
|
+
"supports_parallel_function_calling": true,
|
|
35973
|
+
"supports_prompt_caching": false,
|
|
35974
|
+
"supports_reasoning": false,
|
|
35975
|
+
"supports_response_schema": false,
|
|
35976
|
+
"supports_system_messages": true,
|
|
35977
|
+
"supports_tool_choice": true,
|
|
35978
|
+
"supports_vision": false
|
|
35979
|
+
},
|
|
35980
|
+
"gpt-audio-mini-2025-12-15": {
|
|
35981
|
+
"input_cost_per_audio_token": 1e-05,
|
|
35982
|
+
"input_cost_per_token": 6e-07,
|
|
35983
|
+
"litellm_provider": "openai",
|
|
35984
|
+
"max_input_tokens": 128000,
|
|
35985
|
+
"max_output_tokens": 16384,
|
|
35986
|
+
"max_tokens": 16384,
|
|
35987
|
+
"mode": "chat",
|
|
35988
|
+
"output_cost_per_audio_token": 2e-05,
|
|
35989
|
+
"output_cost_per_token": 2.4e-06,
|
|
35990
|
+
"supported_endpoints": [
|
|
35991
|
+
"/v1/chat/completions",
|
|
35992
|
+
"/v1/responses",
|
|
35993
|
+
"/v1/realtime",
|
|
35994
|
+
"/v1/batch"
|
|
35995
|
+
],
|
|
35996
|
+
"supported_modalities": [
|
|
35997
|
+
"text",
|
|
35998
|
+
"audio"
|
|
35999
|
+
],
|
|
36000
|
+
"supported_output_modalities": [
|
|
36001
|
+
"text",
|
|
36002
|
+
"audio"
|
|
36003
|
+
],
|
|
36004
|
+
"supports_audio_input": true,
|
|
36005
|
+
"supports_audio_output": true,
|
|
36006
|
+
"supports_function_calling": true,
|
|
36007
|
+
"supports_native_streaming": true,
|
|
36008
|
+
"supports_parallel_function_calling": true,
|
|
36009
|
+
"supports_prompt_caching": false,
|
|
36010
|
+
"supports_reasoning": false,
|
|
36011
|
+
"supports_response_schema": false,
|
|
36012
|
+
"supports_system_messages": true,
|
|
36013
|
+
"supports_tool_choice": true,
|
|
36014
|
+
"supports_vision": false
|
|
35660
36015
|
}
|
|
35661
36016
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tokencostauto
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.464
|
|
4
4
|
Summary: To calculate token and translated USD cost of string and message calls to OpenAI, for example when used by AI agents
|
|
5
5
|
Author-email: Trisha Pan <trishaepan@gmail.com>, Alex Reibman <areibman@gmail.com>, Pratyush Shukla <ps4534@nyu.edu>, Thiago MadPin <madpin@gmail.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/madpin/tokencostaudo
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
tokencostauto/__init__.py,sha256=-4d_ryFH62SgNXPXA8vGPFZoAKtOBjnsg37EB_RkZG8,289
|
|
2
|
+
tokencostauto/constants.py,sha256=_82MlTkTrdrwzyRosQD7d3JdgNP9KAUM-cZo8DE00P0,3395
|
|
3
|
+
tokencostauto/costs.py,sha256=tXsgrTypq-dCHaHtoXcg2XepezWsAvZpl9gEsv_53iE,10679
|
|
4
|
+
tokencostauto/model_prices.json,sha256=FHD1JBClJh7I11Y5Aj5a7JeeXfo_AEPnPzD42nRvybQ,1277037
|
|
5
|
+
tokencostauto-0.1.464.dist-info/licenses/LICENSE,sha256=4PLv_CD6Ughnsvg_nM2XeTqGwVK6lQVR77kVWbPq-0U,1065
|
|
6
|
+
tokencostauto-0.1.464.dist-info/METADATA,sha256=FkG18A-8TKIaAlwlZSS02rYRlFuk_AgT0DdHsFbCR_g,204076
|
|
7
|
+
tokencostauto-0.1.464.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
|
|
8
|
+
tokencostauto-0.1.464.dist-info/top_level.txt,sha256=szZQTUJRotfIaeZCDsOgvofIkLt2ak88RP13oI51-TU,14
|
|
9
|
+
tokencostauto-0.1.464.dist-info/RECORD,,
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
tokencostauto/__init__.py,sha256=-4d_ryFH62SgNXPXA8vGPFZoAKtOBjnsg37EB_RkZG8,289
|
|
2
|
-
tokencostauto/constants.py,sha256=_82MlTkTrdrwzyRosQD7d3JdgNP9KAUM-cZo8DE00P0,3395
|
|
3
|
-
tokencostauto/costs.py,sha256=tXsgrTypq-dCHaHtoXcg2XepezWsAvZpl9gEsv_53iE,10679
|
|
4
|
-
tokencostauto/model_prices.json,sha256=aDFZZR6fnd6N4Fdc-zL3m6tsqmR8KiCelIqRuvx8oas,1265556
|
|
5
|
-
tokencostauto-0.1.462.dist-info/licenses/LICENSE,sha256=4PLv_CD6Ughnsvg_nM2XeTqGwVK6lQVR77kVWbPq-0U,1065
|
|
6
|
-
tokencostauto-0.1.462.dist-info/METADATA,sha256=bLqFKmPlxZSRF2VHZpr91xFCtaAPO1hdtx9yMt8TEbg,204076
|
|
7
|
-
tokencostauto-0.1.462.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
|
|
8
|
-
tokencostauto-0.1.462.dist-info/top_level.txt,sha256=szZQTUJRotfIaeZCDsOgvofIkLt2ak88RP13oI51-TU,14
|
|
9
|
-
tokencostauto-0.1.462.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|