model-library 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- model_library/base/base.py +2 -0
- model_library/base/output.py +24 -9
- model_library/base/utils.py +27 -5
- model_library/config/README.md +169 -0
- model_library/config/ai21labs_models.yaml +11 -11
- model_library/config/alibaba_models.yaml +21 -22
- model_library/config/all_models.json +4623 -2599
- model_library/config/amazon_models.yaml +100 -102
- model_library/config/anthropic_models.yaml +43 -52
- model_library/config/cohere_models.yaml +25 -24
- model_library/config/deepseek_models.yaml +28 -25
- model_library/config/dummy_model.yaml +9 -7
- model_library/config/fireworks_models.yaml +86 -56
- model_library/config/google_models.yaml +146 -126
- model_library/config/inception_models.yaml +6 -6
- model_library/config/kimi_models.yaml +13 -14
- model_library/config/minimax_models.yaml +37 -0
- model_library/config/mistral_models.yaml +85 -29
- model_library/config/openai_models.yaml +192 -150
- model_library/config/perplexity_models.yaml +10 -23
- model_library/config/together_models.yaml +115 -104
- model_library/config/xai_models.yaml +47 -79
- model_library/config/zai_models.yaml +23 -15
- model_library/exceptions.py +7 -16
- model_library/providers/amazon.py +32 -17
- model_library/providers/minimax.py +33 -0
- model_library/providers/mistral.py +10 -1
- model_library/providers/openai.py +2 -6
- model_library/register_models.py +36 -36
- model_library/registry_utils.py +78 -16
- model_library/utils.py +2 -2
- {model_library-0.1.3.dist-info → model_library-0.1.5.dist-info}/METADATA +2 -2
- model_library-0.1.5.dist-info/RECORD +64 -0
- model_library-0.1.3.dist-info/RECORD +0 -61
- {model_library-0.1.3.dist-info → model_library-0.1.5.dist-info}/WHEEL +0 -0
- {model_library-0.1.3.dist-info → model_library-0.1.5.dist-info}/licenses/LICENSE +0 -0
- {model_library-0.1.3.dist-info → model_library-0.1.5.dist-info}/top_level.txt +0 -0
|
@@ -2,11 +2,12 @@ base-config:
|
|
|
2
2
|
company: Cohere
|
|
3
3
|
open_source: false
|
|
4
4
|
documentation_url: https://docs.cohere.com/v2/docs/models
|
|
5
|
-
|
|
5
|
+
supports:
|
|
6
|
+
images: false
|
|
7
|
+
files: false
|
|
8
|
+
tools: true
|
|
9
|
+
metadata:
|
|
6
10
|
available_as_evaluator: false
|
|
7
|
-
supports_images: false
|
|
8
|
-
supports_files: false
|
|
9
|
-
supports_tools: true
|
|
10
11
|
available_for_everyone: true
|
|
11
12
|
ignored_for_cost: false
|
|
12
13
|
properties:
|
|
@@ -16,8 +17,8 @@ command-models:
|
|
|
16
17
|
base-config:
|
|
17
18
|
properties:
|
|
18
19
|
context_window: 128000
|
|
19
|
-
|
|
20
|
-
|
|
20
|
+
supports:
|
|
21
|
+
temperature: true
|
|
21
22
|
default_parameters:
|
|
22
23
|
temperature: 0.3
|
|
23
24
|
|
|
@@ -30,7 +31,7 @@ command-models:
|
|
|
30
31
|
release_date: 2025-03-13
|
|
31
32
|
properties:
|
|
32
33
|
context_window: 256000
|
|
33
|
-
|
|
34
|
+
max_tokens: 8000
|
|
34
35
|
costs_per_million_token:
|
|
35
36
|
input: 2.5
|
|
36
37
|
output: 10.0
|
|
@@ -43,8 +44,8 @@ command-models:
|
|
|
43
44
|
release_date: 2024-04-24
|
|
44
45
|
properties:
|
|
45
46
|
context_window: 4000
|
|
46
|
-
|
|
47
|
-
|
|
47
|
+
max_tokens: 4000
|
|
48
|
+
metadata:
|
|
48
49
|
deprecated: true
|
|
49
50
|
costs_per_million_token:
|
|
50
51
|
input: 0.3
|
|
@@ -58,8 +59,8 @@ command-models:
|
|
|
58
59
|
release_date: 2024-03-24
|
|
59
60
|
properties:
|
|
60
61
|
context_window: 4000
|
|
61
|
-
|
|
62
|
-
|
|
62
|
+
max_tokens: 4000
|
|
63
|
+
metadata:
|
|
63
64
|
deprecated: true
|
|
64
65
|
costs_per_million_token:
|
|
65
66
|
input: 1.0
|
|
@@ -73,8 +74,8 @@ command-models:
|
|
|
73
74
|
release_date: 2024-04-24
|
|
74
75
|
properties:
|
|
75
76
|
context_window: 128000
|
|
76
|
-
|
|
77
|
-
|
|
77
|
+
max_tokens: 4000
|
|
78
|
+
metadata:
|
|
78
79
|
available_for_everyone: false
|
|
79
80
|
deprecated: true
|
|
80
81
|
costs_per_million_token:
|
|
@@ -90,8 +91,8 @@ command-models:
|
|
|
90
91
|
release_date: 2024-03-24
|
|
91
92
|
properties:
|
|
92
93
|
context_window: 128000
|
|
93
|
-
|
|
94
|
-
|
|
94
|
+
max_tokens: 4000
|
|
95
|
+
metadata:
|
|
95
96
|
available_for_everyone: false
|
|
96
97
|
deprecated: true
|
|
97
98
|
costs_per_million_token:
|
|
@@ -107,8 +108,8 @@ command-models:
|
|
|
107
108
|
release_date: 2024-08-30
|
|
108
109
|
properties:
|
|
109
110
|
context_window: 128000
|
|
110
|
-
|
|
111
|
-
|
|
111
|
+
max_tokens: 4000
|
|
112
|
+
metadata:
|
|
112
113
|
available_for_everyone: false
|
|
113
114
|
deprecated: true
|
|
114
115
|
costs_per_million_token:
|
|
@@ -124,8 +125,8 @@ command-models:
|
|
|
124
125
|
release_date: 2024-04-24
|
|
125
126
|
properties:
|
|
126
127
|
context_window: 128000
|
|
127
|
-
|
|
128
|
-
|
|
128
|
+
max_tokens: 4000
|
|
129
|
+
metadata:
|
|
129
130
|
deprecated: true
|
|
130
131
|
costs_per_million_token:
|
|
131
132
|
input: 2.5
|
|
@@ -140,8 +141,8 @@ command-models:
|
|
|
140
141
|
release_date: 2024-04-24
|
|
141
142
|
properties:
|
|
142
143
|
context_window: 128000
|
|
143
|
-
|
|
144
|
-
|
|
144
|
+
max_tokens: 4000
|
|
145
|
+
metadata:
|
|
145
146
|
deprecated: true
|
|
146
147
|
costs_per_million_token:
|
|
147
148
|
input: 2.5
|
|
@@ -155,8 +156,8 @@ command-models:
|
|
|
155
156
|
release_date: 2024-08-30
|
|
156
157
|
properties:
|
|
157
158
|
context_window: 128000
|
|
158
|
-
|
|
159
|
-
|
|
159
|
+
max_tokens: 4000
|
|
160
|
+
metadata:
|
|
160
161
|
deprecated: true
|
|
161
162
|
costs_per_million_token:
|
|
162
163
|
input: 2.5
|
|
@@ -171,7 +172,7 @@ command-models:
|
|
|
171
172
|
release_date: 2024-12-13
|
|
172
173
|
properties:
|
|
173
174
|
context_window: 128000
|
|
174
|
-
|
|
175
|
+
max_tokens: 4000
|
|
175
176
|
costs_per_million_token:
|
|
176
177
|
input: 0.0375
|
|
177
178
|
output: 0.15
|
|
@@ -2,48 +2,51 @@ base-config:
|
|
|
2
2
|
company: DeepSeek
|
|
3
3
|
documentation_url: https://api-docs.deepseek.com/
|
|
4
4
|
open_source: true
|
|
5
|
-
|
|
6
|
-
|
|
5
|
+
supports:
|
|
6
|
+
images: false
|
|
7
|
+
files: false
|
|
8
|
+
metadata:
|
|
7
9
|
available_as_evaluator: false
|
|
8
|
-
supports_files: false
|
|
9
10
|
available_for_everyone: true
|
|
10
11
|
ignored_for_cost: false
|
|
11
12
|
properties:
|
|
12
13
|
reasoning_model: false
|
|
13
|
-
|
|
14
|
-
deepseek-
|
|
14
|
+
|
|
15
|
+
deepseek-endpoints:
|
|
15
16
|
base-config:
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
17
|
+
supports:
|
|
18
|
+
images: false
|
|
19
|
+
files: false
|
|
20
|
+
tools: true
|
|
21
|
+
batch: false
|
|
22
|
+
temperature: true
|
|
21
23
|
default_parameters:
|
|
22
24
|
temperature: 1
|
|
23
|
-
costs_per_million_token:
|
|
24
|
-
input: 0.28
|
|
25
|
-
output: 0.42
|
|
26
|
-
cache:
|
|
27
|
-
read_discount: 0.1
|
|
28
25
|
|
|
29
26
|
deepseek/deepseek-chat:
|
|
30
|
-
label: DeepSeek
|
|
31
|
-
description: DeepSeek
|
|
27
|
+
label: DeepSeek Chat
|
|
28
|
+
description: DeepSeek's latest chat model
|
|
32
29
|
release_date: 2025-09-29
|
|
33
30
|
properties:
|
|
34
31
|
context_window: 128_000
|
|
35
|
-
|
|
32
|
+
max_tokens: 8_000
|
|
36
33
|
reasoning_model: false
|
|
37
|
-
|
|
38
|
-
|
|
34
|
+
costs_per_million_token:
|
|
35
|
+
input: 0.27
|
|
36
|
+
output: 1.10
|
|
37
|
+
cache:
|
|
38
|
+
read: 0.07
|
|
39
39
|
|
|
40
40
|
deepseek/deepseek-reasoner:
|
|
41
|
-
label: DeepSeek
|
|
42
|
-
description: DeepSeek
|
|
41
|
+
label: DeepSeek Reasoner
|
|
42
|
+
description: DeepSeek's latest reasoning model
|
|
43
43
|
release_date: 2025-09-29
|
|
44
44
|
properties:
|
|
45
45
|
context_window: 128_000
|
|
46
|
-
|
|
46
|
+
max_tokens: 64_000
|
|
47
47
|
reasoning_model: true
|
|
48
|
-
|
|
49
|
-
|
|
48
|
+
costs_per_million_token:
|
|
49
|
+
input: 0.55
|
|
50
|
+
output: 2.19
|
|
51
|
+
cache:
|
|
52
|
+
read: 0.14
|
|
@@ -2,11 +2,11 @@ vals-models:
|
|
|
2
2
|
base-config:
|
|
3
3
|
company: Vals AI
|
|
4
4
|
documentation_url: ""
|
|
5
|
-
|
|
6
|
-
|
|
5
|
+
supports:
|
|
6
|
+
batch: true
|
|
7
7
|
properties:
|
|
8
8
|
context_window: 128_000
|
|
9
|
-
|
|
9
|
+
max_tokens: 16_384
|
|
10
10
|
training_cutoff: ""
|
|
11
11
|
|
|
12
12
|
vals/dumbmar-5o-ultra-thinking:
|
|
@@ -14,8 +14,9 @@ vals-models:
|
|
|
14
14
|
open_source: false
|
|
15
15
|
description: Vals Dummy Model for testing
|
|
16
16
|
release_date: null
|
|
17
|
-
|
|
18
|
-
|
|
17
|
+
supports:
|
|
18
|
+
images: false
|
|
19
|
+
metadata:
|
|
19
20
|
available_as_evaluator: false
|
|
20
21
|
costs_per_million_token:
|
|
21
22
|
input: 0.15
|
|
@@ -28,8 +29,9 @@ vals-models:
|
|
|
28
29
|
open_source: false
|
|
29
30
|
description: Vals Dummy Model for evaluating
|
|
30
31
|
release_date: null
|
|
31
|
-
|
|
32
|
-
|
|
32
|
+
supports:
|
|
33
|
+
images: false
|
|
34
|
+
metadata:
|
|
33
35
|
available_as_evaluator: true
|
|
34
36
|
costs_per_million_token:
|
|
35
37
|
input: 0.15
|
|
@@ -2,12 +2,13 @@ base-config:
|
|
|
2
2
|
company: Fireworks
|
|
3
3
|
documentation_url: https://fireworks.ai/models
|
|
4
4
|
open_source: false
|
|
5
|
-
|
|
5
|
+
supports:
|
|
6
|
+
files: false
|
|
7
|
+
tools: true
|
|
8
|
+
metadata:
|
|
6
9
|
available_as_evaluator: false
|
|
7
|
-
supports_files: false
|
|
8
10
|
available_for_everyone: true
|
|
9
11
|
ignored_for_cost: false
|
|
10
|
-
supports_tools: true
|
|
11
12
|
properties:
|
|
12
13
|
reasoning_model: false
|
|
13
14
|
costs_per_million_token:
|
|
@@ -21,8 +22,8 @@ qwen-models:
|
|
|
21
22
|
base-config:
|
|
22
23
|
company: Alibaba
|
|
23
24
|
open_source: true
|
|
24
|
-
|
|
25
|
-
|
|
25
|
+
supports:
|
|
26
|
+
temperature: true
|
|
26
27
|
default_parameters:
|
|
27
28
|
temperature: 0.7
|
|
28
29
|
|
|
@@ -32,11 +33,11 @@ qwen-models:
|
|
|
32
33
|
release_date: 2025-04-28
|
|
33
34
|
properties:
|
|
34
35
|
context_window: 128_000
|
|
35
|
-
|
|
36
|
+
max_tokens: 32_768
|
|
36
37
|
training_cutoff: "2024-08"
|
|
37
38
|
reasoning_model: true
|
|
38
|
-
|
|
39
|
-
|
|
39
|
+
supports:
|
|
40
|
+
images: false
|
|
40
41
|
costs_per_million_token:
|
|
41
42
|
input: 0.22
|
|
42
43
|
output: 0.88
|
|
@@ -52,13 +53,14 @@ llama-4-models:
|
|
|
52
53
|
release_date: 2025-04-05
|
|
53
54
|
properties:
|
|
54
55
|
context_window: 1_000_000
|
|
55
|
-
|
|
56
|
+
max_tokens: 16_384
|
|
56
57
|
training_cutoff: "2024-08"
|
|
57
58
|
costs_per_million_token:
|
|
58
59
|
input: 0.22
|
|
59
60
|
output: 0.88
|
|
60
|
-
|
|
61
|
-
|
|
61
|
+
supports:
|
|
62
|
+
images: true
|
|
63
|
+
metadata:
|
|
62
64
|
deprecated: true
|
|
63
65
|
|
|
64
66
|
fireworks/llama4-scout-instruct-basic:
|
|
@@ -67,23 +69,23 @@ llama-4-models:
|
|
|
67
69
|
release_date: 2025-04-05
|
|
68
70
|
properties:
|
|
69
71
|
context_window: 10_000_000
|
|
70
|
-
|
|
72
|
+
max_tokens: 16_384
|
|
71
73
|
training_cutoff: "2024-08"
|
|
72
74
|
costs_per_million_token:
|
|
73
|
-
input: 0.
|
|
74
|
-
output: 0.
|
|
75
|
-
|
|
76
|
-
|
|
75
|
+
input: 0.15
|
|
76
|
+
output: 0.6
|
|
77
|
+
supports:
|
|
78
|
+
images: true
|
|
79
|
+
metadata:
|
|
77
80
|
deprecated: true
|
|
78
81
|
|
|
79
82
|
deepseek-models:
|
|
80
83
|
base-config:
|
|
81
84
|
company: DeepSeek
|
|
82
85
|
open_source: true
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
deprecated: true
|
|
86
|
+
supports:
|
|
87
|
+
images: false
|
|
88
|
+
temperature: true
|
|
87
89
|
default_parameters:
|
|
88
90
|
temperature: 1
|
|
89
91
|
|
|
@@ -93,12 +95,14 @@ deepseek-models:
|
|
|
93
95
|
release_date: 2025-01-20
|
|
94
96
|
properties:
|
|
95
97
|
context_window: 163_840
|
|
96
|
-
|
|
98
|
+
max_tokens: 163_840
|
|
97
99
|
training_cutoff: null
|
|
98
100
|
reasoning_model: true
|
|
101
|
+
metadata:
|
|
102
|
+
deprecated: true
|
|
99
103
|
costs_per_million_token:
|
|
100
|
-
input:
|
|
101
|
-
output:
|
|
104
|
+
input: 1.35
|
|
105
|
+
output: 5.4
|
|
102
106
|
|
|
103
107
|
fireworks/deepseek-v3-0324:
|
|
104
108
|
label: DeepSeek V3 (03/24/2025)
|
|
@@ -106,10 +110,12 @@ deepseek-models:
|
|
|
106
110
|
release_date: 2025-03-24
|
|
107
111
|
properties:
|
|
108
112
|
context_window: 131_072
|
|
109
|
-
|
|
113
|
+
max_tokens: 131_072
|
|
114
|
+
metadata:
|
|
115
|
+
deprecated: true
|
|
110
116
|
costs_per_million_token:
|
|
111
|
-
input:
|
|
112
|
-
output:
|
|
117
|
+
input: 0.9
|
|
118
|
+
output: 0.9
|
|
113
119
|
|
|
114
120
|
fireworks/deepseek-v3:
|
|
115
121
|
label: DeepSeek V3
|
|
@@ -117,10 +123,12 @@ deepseek-models:
|
|
|
117
123
|
release_date: 2024-12-26
|
|
118
124
|
properties:
|
|
119
125
|
context_window: 131_072
|
|
120
|
-
|
|
126
|
+
max_tokens: 131_072
|
|
127
|
+
metadata:
|
|
128
|
+
deprecated: true
|
|
121
129
|
costs_per_million_token:
|
|
122
|
-
input: 0.
|
|
123
|
-
output: 0.
|
|
130
|
+
input: 0.9
|
|
131
|
+
output: 0.9
|
|
124
132
|
|
|
125
133
|
fireworks/deepseek-v3p1:
|
|
126
134
|
label: DeepSeek V3.1
|
|
@@ -128,19 +136,39 @@ deepseek-models:
|
|
|
128
136
|
release_date: 2025-08-21
|
|
129
137
|
properties:
|
|
130
138
|
context_window: 163_840
|
|
131
|
-
|
|
132
|
-
reasoning_model: false
|
|
133
|
-
|
|
139
|
+
max_tokens: 163_840
|
|
140
|
+
reasoning_model: false
|
|
141
|
+
costs_per_million_token:
|
|
142
|
+
input: 0.56
|
|
143
|
+
output: 1.68
|
|
144
|
+
|
|
145
|
+
fireworks/deepseek-v3p2:
|
|
146
|
+
label: DeepSeek V3.2 (Nonthinking)
|
|
147
|
+
description: ""
|
|
148
|
+
release_date: 2025-12-01
|
|
149
|
+
properties:
|
|
150
|
+
context_window: 160_000
|
|
151
|
+
max_tokens: 20_480
|
|
152
|
+
reasoning_model: false
|
|
134
153
|
costs_per_million_token:
|
|
135
154
|
input: 0.56
|
|
136
155
|
output: 1.68
|
|
156
|
+
cache:
|
|
157
|
+
read: 0.28
|
|
158
|
+
alternative_keys:
|
|
159
|
+
- fireworks/deepseek-v3p2-thinking:
|
|
160
|
+
label: DeepSeek V3.2 (Thinking)
|
|
161
|
+
properties:
|
|
162
|
+
reasoning_model: true
|
|
163
|
+
default_parameters:
|
|
164
|
+
reasoning_effort: "high"
|
|
137
165
|
|
|
138
166
|
openai-models:
|
|
139
167
|
base-config:
|
|
140
168
|
company: OpenAI
|
|
141
169
|
open_source: true
|
|
142
|
-
|
|
143
|
-
|
|
170
|
+
supports:
|
|
171
|
+
images: false
|
|
144
172
|
|
|
145
173
|
fireworks/gpt-oss-120b:
|
|
146
174
|
label: GPT OSS 120B
|
|
@@ -148,7 +176,7 @@ openai-models:
|
|
|
148
176
|
release_date: 2025-08-05
|
|
149
177
|
properties:
|
|
150
178
|
context_window: 128_000
|
|
151
|
-
|
|
179
|
+
max_tokens: 32_768
|
|
152
180
|
training_cutoff: null
|
|
153
181
|
reasoning_model: true
|
|
154
182
|
costs_per_million_token:
|
|
@@ -161,20 +189,20 @@ openai-models:
|
|
|
161
189
|
release_date: 2025-08-05
|
|
162
190
|
properties:
|
|
163
191
|
context_window: 128_000
|
|
164
|
-
|
|
192
|
+
max_tokens: 32_768
|
|
165
193
|
training_cutoff: null
|
|
166
194
|
reasoning_model: true
|
|
167
195
|
costs_per_million_token:
|
|
168
|
-
input: 0.
|
|
169
|
-
output: 0.
|
|
196
|
+
input: 0.07
|
|
197
|
+
output: 0.3
|
|
170
198
|
|
|
171
199
|
kimi-models:
|
|
172
200
|
base-config:
|
|
173
201
|
company: Kimi
|
|
174
202
|
open_source: true
|
|
175
203
|
documentation_url: https://www.kimi.com/
|
|
176
|
-
|
|
177
|
-
|
|
204
|
+
supports:
|
|
205
|
+
images: false
|
|
178
206
|
|
|
179
207
|
fireworks/kimi-k2-instruct-0905:
|
|
180
208
|
label: Kimi K2 Instruct 0905
|
|
@@ -182,11 +210,11 @@ kimi-models:
|
|
|
182
210
|
release_date: 2025-09-04
|
|
183
211
|
properties:
|
|
184
212
|
context_window: 256_000
|
|
185
|
-
|
|
213
|
+
max_tokens: 256_000
|
|
186
214
|
training_cutoff: null
|
|
187
215
|
reasoning_model: false
|
|
188
|
-
|
|
189
|
-
|
|
216
|
+
supports:
|
|
217
|
+
images: false
|
|
190
218
|
costs_per_million_token:
|
|
191
219
|
input: 0.60
|
|
192
220
|
output: 2.50
|
|
@@ -196,33 +224,35 @@ minimax-models:
|
|
|
196
224
|
company: MiniMax AI
|
|
197
225
|
documentation_url: https://platform.minimax.io/docs
|
|
198
226
|
open_source: true
|
|
199
|
-
|
|
227
|
+
supports:
|
|
228
|
+
images: false
|
|
229
|
+
files: false
|
|
230
|
+
tools: true
|
|
231
|
+
temperature: true
|
|
232
|
+
metadata:
|
|
200
233
|
available_as_evaluator: false
|
|
201
|
-
supports_images: false
|
|
202
|
-
supports_files: false
|
|
203
|
-
supports_tools: true
|
|
204
234
|
available_for_everyone: true
|
|
205
235
|
ignored_for_cost: false
|
|
206
|
-
|
|
207
|
-
|
|
236
|
+
default_parameters:
|
|
237
|
+
temperature: 1.0
|
|
238
|
+
top_p: 0.95
|
|
239
|
+
top_k: 40
|
|
208
240
|
|
|
209
241
|
fireworks/minimax-m2:
|
|
210
242
|
label: MiniMax-M2
|
|
211
243
|
description: MiniMax-M2 is a cost-efficient open-source model optimized for agentic applications and coding in particular.
|
|
212
244
|
release_date: 2025-10-26
|
|
213
245
|
properties:
|
|
214
|
-
context_window: 204_800
|
|
215
|
-
|
|
246
|
+
context_window: 204_800
|
|
247
|
+
max_tokens: 131_000
|
|
216
248
|
reasoning_model: true
|
|
217
249
|
training_cutoff: null
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
250
|
+
supports:
|
|
251
|
+
tools: true
|
|
252
|
+
temperature: true
|
|
221
253
|
default_parameters: # taken from https://huggingface.co/MiniMaxAI/MiniMax-M2#inference-parameters
|
|
222
254
|
temperature: 1.0
|
|
223
255
|
top_p: 0.95
|
|
224
|
-
top_k: 40
|
|
225
256
|
costs_per_million_token:
|
|
226
257
|
input: 0.30
|
|
227
258
|
output: 1.20
|
|
228
|
-
|