lm-deluge 0.0.35__py3-none-any.whl → 0.0.36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

@@ -0,0 +1,74 @@
1
+ GROQ_MODELS = {
2
+ "llama-3.1-8b-groq": {
3
+ "id": "llama-3.1-8b-groq",
4
+ "name": "llama-3.1-8b-instant",
5
+ "api_base": "https://api.groq.com/openai/v1",
6
+ "api_key_env_var": "GROQ_API_KEY",
7
+ "supports_json": False,
8
+ "api_spec": "openai",
9
+ },
10
+ "llama-3.3-70b-groq": {
11
+ "id": "llama-3.3-70b-groq",
12
+ "name": "llama-3.3-70b-versatile",
13
+ "api_base": "https://api.groq.com/openai/v1",
14
+ "api_key_env_var": "GROQ_API_KEY",
15
+ "supports_json": False,
16
+ "api_spec": "openai",
17
+ },
18
+ "r1-llama-70b-groq": {
19
+ "id": "r1-llama-70b-groq",
20
+ "name": "deepseek-r1-distill-llama-70b",
21
+ "api_base": "https://api.groq.com/openai/v1",
22
+ "api_key_env_var": "GROQ_API_KEY",
23
+ "supports_json": False,
24
+ "api_spec": "openai",
25
+ },
26
+ "llama-4-maverick-groq": {
27
+ "id": "llama-4-maverick-groq",
28
+ "name": "meta-llama/llama-4-maverick-17b-128e-instruct",
29
+ "api_base": "https://api.groq.com/openai/v1",
30
+ "api_key_env_var": "GROQ_API_KEY",
31
+ "supports_json": False,
32
+ "api_spec": "openai",
33
+ },
34
+ "llama-4-scout-groq": {
35
+ "id": "llama-4-scout-groq",
36
+ "name": "meta-llama/llama-4-scout-17b-16e-instruct",
37
+ "api_base": "https://api.groq.com/openai/v1",
38
+ "api_key_env_var": "GROQ_API_KEY",
39
+ "supports_json": False,
40
+ "api_spec": "openai",
41
+ },
42
+ "kimi-k2-groq": {
43
+ "id": "kimi-k2-groq",
44
+ "name": "moonshotai/kimi-k2-instruct",
45
+ "api_base": "https://api.groq.com/openai/v1",
46
+ "api_key_env_var": "GROQ_API_KEY",
47
+ "supports_json": False,
48
+ "api_spec": "openai",
49
+ },
50
+ "gpt-oss-120b-groq": {
51
+ "id": "gpt-oss-120b-groq",
52
+ "name": "openai/gpt-oss-120b",
53
+ "api_base": "https://api.groq.com/openai/v1",
54
+ "api_key_env_var": "GROQ_API_KEY",
55
+ "supports_json": False,
56
+ "api_spec": "openai",
57
+ },
58
+ "gpt-oss-20b-groq": {
59
+ "id": "gpt-oss-20b-groq",
60
+ "name": "openai/gpt-oss-20b",
61
+ "api_base": "https://api.groq.com/openai/v1",
62
+ "api_key_env_var": "GROQ_API_KEY",
63
+ "supports_json": False,
64
+ "api_spec": "openai",
65
+ },
66
+ "qwen-3-32b-groq": {
67
+ "id": "qwen-3-32b-groq",
68
+ "name": "qwen/qwen3-32b",
69
+ "api_base": "https://api.groq.com/openai/v1",
70
+ "api_key_env_var": "GROQ_API_KEY",
71
+ "supports_json": False,
72
+ "api_spec": "openai",
73
+ },
74
+ }
@@ -0,0 +1,65 @@
1
+ META_MODELS = {
2
+ # `7MMM. ,MMF' mm
3
+ # MMMb dPMM MM
4
+ # M YM ,M MM .gP"Ya mmMMmm ,6"Yb.
5
+ # M Mb M' MM ,M' Yb MM 8) MM
6
+ # M YM.P' MM 8M"""""" MM ,pm9MM
7
+ # M `YM' MM YM. , MM 8M MM
8
+ # .JML. `' .JMML.`Mbmmd' `Mbmo`Moo9^Yo.
9
+ "llama-4-scout": {
10
+ "id": "llama-4-scout",
11
+ "name": "Llama-4-Scout-17B-16E-Instruct-FP8",
12
+ "api_base": "https://api.llama.com/compat/v1",
13
+ "api_key_env_var": "META_API_KEY",
14
+ "supports_json": True,
15
+ "supports_logprobs": True,
16
+ "api_spec": "openai",
17
+ "input_cost": 0.0,
18
+ "output_cost": 0.0,
19
+ "requests_per_minute": 3_000,
20
+ "tokens_per_minute": 1_000_000,
21
+ "reasoning_model": False,
22
+ },
23
+ "llama-4-maverick": {
24
+ "id": "llama-4-maverick",
25
+ "name": "Llama-4-Maverick-17B-128E-Instruct-FP8",
26
+ "api_base": "https://api.llama.com/compat/v1",
27
+ "api_key_env_var": "META_API_KEY",
28
+ "supports_json": True,
29
+ "supports_logprobs": True,
30
+ "api_spec": "openai",
31
+ "input_cost": 0.0,
32
+ "output_cost": 0.0,
33
+ "requests_per_minute": 3_000,
34
+ "tokens_per_minute": 1_000_000,
35
+ "reasoning_model": False,
36
+ },
37
+ "llama-3.3-70b": {
38
+ "id": "llama-3.3-70b",
39
+ "name": "Llama-3.3-70B-Instruct",
40
+ "api_base": "https://api.llama.com/compat/v1",
41
+ "api_key_env_var": "META_API_KEY",
42
+ "supports_json": True,
43
+ "supports_logprobs": True,
44
+ "api_spec": "openai",
45
+ "input_cost": 0.0,
46
+ "output_cost": 0.0,
47
+ "requests_per_minute": 3_000,
48
+ "tokens_per_minute": 1_000_000,
49
+ "reasoning_model": False,
50
+ },
51
+ "llama-3.3-8b": {
52
+ "id": "llama-3.3-8b",
53
+ "name": "Llama-3.3-8B-Instruct",
54
+ "api_base": "https://api.llama.com/compat/v1",
55
+ "api_key_env_var": "META_API_KEY",
56
+ "supports_json": True,
57
+ "supports_logprobs": True,
58
+ "api_spec": "openai",
59
+ "input_cost": 0.0,
60
+ "output_cost": 0.0,
61
+ "requests_per_minute": 3_000,
62
+ "tokens_per_minute": 1_000_000,
63
+ "reasoning_model": False,
64
+ },
65
+ }
@@ -0,0 +1,110 @@
1
+ # ██████ ██████ ███ █████ ████
2
+ # ░░██████ ██████ ░░░ ░░███ ░░███
3
+ # ░███░█████░███ ████ █████ ███████ ████████ ██████ ░███
4
+ # ░███░░███ ░███ ░░███ ███░░ ░░░███░ ░░███░░███ ░░░░░███ ░███
5
+ # ░███ ░░░ ░███ ░███ ░░█████ ░███ ░███ ░░░ ███████ ░███
6
+ # ░███ ░███ ░███ ░░░░███ ░███ ███ ░███ ███░░███ ░███
7
+ # █████ █████ █████ ██████ ░░█████ █████ ░░████████ █████
8
+ # ░░░░░ ░░░░░ ░░░░░ ░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░ ░░░░░
9
+ MISTRAL_MODELS = {
10
+ "mistral-medium": {
11
+ "id": "mistral-medium",
12
+ "name": "mistral-medium-latest",
13
+ "api_base": "https://api.mistral.ai/v1",
14
+ "api_key_env_var": "MISTRAL_API_KEY",
15
+ "supports_json": True,
16
+ "api_spec": "mistral",
17
+ "input_cost": 0.4,
18
+ "output_cost": 2.0,
19
+ },
20
+ "mistral-large": {
21
+ "id": "mistral-large",
22
+ "name": "mistral-large-latest",
23
+ "api_base": "https://api.mistral.ai/v1",
24
+ "api_key_env_var": "MISTRAL_API_KEY",
25
+ "supports_json": True,
26
+ "api_spec": "mistral",
27
+ "input_cost": 2.0,
28
+ "output_cost": 6.0,
29
+ },
30
+ "pixtral-large": {
31
+ "id": "pixtral-large",
32
+ "name": "pixtral-large-latest",
33
+ "api_base": "https://api.mistral.ai/v1",
34
+ "api_key_env_var": "MISTRAL_API_KEY",
35
+ "supports_json": True,
36
+ "api_spec": "mistral",
37
+ "input_cost": 2.0,
38
+ "output_cost": 6.0,
39
+ },
40
+ "mistral-small": {
41
+ "id": "mistral-small",
42
+ "name": "mistral-small-latest",
43
+ "api_base": "https://api.mistral.ai/v1",
44
+ "api_key_env_var": "MISTRAL_API_KEY",
45
+ "supports_json": True,
46
+ "api_spec": "mistral",
47
+ "input_cost": 0.1,
48
+ "output_cost": 0.3,
49
+ },
50
+ "devstral-small": {
51
+ "id": "devstral-small",
52
+ "name": "devstral-small-2505",
53
+ "api_base": "https://api.mistral.ai/v1",
54
+ "api_key_env_var": "MISTRAL_API_KEY",
55
+ "supports_json": True,
56
+ "api_spec": "mistral",
57
+ "input_cost": 0.1,
58
+ "output_cost": 0.3,
59
+ },
60
+ "codestral": {
61
+ "id": "codestral",
62
+ "name": "codestral-latest",
63
+ "api_base": "https://api.mistral.ai/v1",
64
+ "api_key_env_var": "MISTRAL_API_KEY",
65
+ "supports_json": True,
66
+ "api_spec": "mistral",
67
+ "input_cost": 0.2,
68
+ "output_cost": 0.6,
69
+ },
70
+ "pixtral-12b": {
71
+ "id": "pixtral-12b",
72
+ "name": "pixtral-12b",
73
+ "api_base": "https://api.mistral.ai/v1",
74
+ "api_key_env_var": "MISTRAL_API_KEY",
75
+ "supports_json": True,
76
+ "api_spec": "mistral",
77
+ "input_cost": 0.1,
78
+ "output_cost": 0.3,
79
+ },
80
+ "mistral-nemo": {
81
+ "id": "mistral-nemo",
82
+ "name": "open-mistral-nemo",
83
+ "api_base": "https://api.mistral.ai/v1",
84
+ "api_key_env_var": "MISTRAL_API_KEY",
85
+ "supports_json": True,
86
+ "api_spec": "mistral",
87
+ "input_cost": 0.1,
88
+ "output_cost": 0.3,
89
+ },
90
+ "ministral-8b": {
91
+ "id": "ministral-8b",
92
+ "name": "ministral-8b-latest",
93
+ "api_base": "https://api.mistral.ai/v1",
94
+ "api_key_env_var": "MISTRAL_API_KEY",
95
+ "supports_json": True,
96
+ "api_spec": "mistral",
97
+ "input_cost": 0.7,
98
+ "output_cost": 0.7,
99
+ },
100
+ "mixtral-8x22b": {
101
+ "id": "mixtral-8x22b",
102
+ "name": "open-mixtral-8x22b",
103
+ "api_base": "https://api.mistral.ai/v1",
104
+ "api_key_env_var": "MISTRAL_API_KEY",
105
+ "supports_json": True,
106
+ "api_spec": "mistral",
107
+ "input_cost": 2.0,
108
+ "output_cost": 6.0,
109
+ },
110
+ }
@@ -0,0 +1,318 @@
1
+ OPENAI_MODELS = {
2
+ # ███████ █████████ █████
3
+ # ███░░░░░███ ███░░░░░███ ░░███
4
+ # ███ ░░███ ████████ ██████ ████████ ░███ ░███ ░███
5
+ # ░███ ░███░░███░░███ ███░░███░░███░░███ ░███████████ ░███
6
+ # ░███ ░███ ░███ ░███░███████ ░███ ░███ ░███░░░░░███ ░███
7
+ # ░░███ ███ ░███ ░███░███░░░ ░███ ░███ ░███ ░███ ░███
8
+ # ░░░███████░ ░███████ ░░██████ ████ █████ █████ █████ █████
9
+ # ░░░░░░░ ░███░░░ ░░░░░░ ░░░░ ░░░░░ ░░░░░ ░░░░░ ░░░░░
10
+ # ░███
11
+ # █████
12
+ # ░░░░░
13
+ "gpt-5": {
14
+ "id": "gpt-5",
15
+ "name": "gpt-5",
16
+ "api_base": "https://api.openai.com/v1",
17
+ "api_key_env_var": "OPENAI_API_KEY",
18
+ "supports_json": False,
19
+ "supports_logprobs": True,
20
+ "supports_responses": True,
21
+ "api_spec": "openai",
22
+ "input_cost": 1.25,
23
+ "cached_input_cost": 0.125,
24
+ "output_cost": 10.0,
25
+ "reasoning_model": True,
26
+ },
27
+ "gpt-5-chat": {
28
+ "id": "gpt-5-chat",
29
+ "name": "gpt-5-chat-latest",
30
+ "api_base": "https://api.openai.com/v1",
31
+ "api_key_env_var": "OPENAI_API_KEY",
32
+ "supports_json": False,
33
+ "supports_logprobs": True,
34
+ "supports_responses": True,
35
+ "api_spec": "openai",
36
+ "input_cost": 1.25,
37
+ "cached_input_cost": 0.125,
38
+ "output_cost": 10.0,
39
+ "reasoning_model": False,
40
+ },
41
+ "gpt-5-mini": {
42
+ "id": "gpt-5-mini",
43
+ "name": "gpt-5-mini",
44
+ "api_base": "https://api.openai.com/v1",
45
+ "api_key_env_var": "OPENAI_API_KEY",
46
+ "supports_json": False,
47
+ "supports_logprobs": True,
48
+ "supports_responses": True,
49
+ "api_spec": "openai",
50
+ "input_cost": 0.25,
51
+ "cached_input_cost": 0.025,
52
+ "output_cost": 2.0,
53
+ "reasoning_model": True,
54
+ },
55
+ "gpt-5-nano": {
56
+ "id": "gpt-5-nano",
57
+ "name": "gpt-5-nano",
58
+ "api_base": "https://api.openai.com/v1",
59
+ "api_key_env_var": "OPENAI_API_KEY",
60
+ "supports_json": False,
61
+ "supports_logprobs": True,
62
+ "supports_responses": True,
63
+ "api_spec": "openai",
64
+ "input_cost": 0.05,
65
+ "cached_input_cost": 0.005,
66
+ "output_cost": 0.40,
67
+ "reasoning_model": True,
68
+ },
69
+ "openai-computer-use-preview": {
70
+ "id": "openai-computer-use-preview",
71
+ "name": "computer-use-preview",
72
+ "api_base": "https://api.openai.com/v1",
73
+ "api_key_env_var": "OPENAI_API_KEY",
74
+ "supports_json": True,
75
+ "supports_logprobs": False,
76
+ "supports_responses": True,
77
+ "api_spec": "openai",
78
+ "input_cost": 2.0,
79
+ "output_cost": 8.0,
80
+ "requests_per_minute": 20,
81
+ "tokens_per_minute": 100_000,
82
+ "reasoning_model": False,
83
+ },
84
+ "o3": {
85
+ "id": "o3",
86
+ "name": "o3-2025-04-16",
87
+ "api_base": "https://api.openai.com/v1",
88
+ "api_key_env_var": "OPENAI_API_KEY",
89
+ "supports_json": False,
90
+ "supports_logprobs": True,
91
+ "supports_responses": True,
92
+ "api_spec": "openai",
93
+ "input_cost": 10.0,
94
+ "output_cost": 40.0,
95
+ "requests_per_minute": 20,
96
+ "tokens_per_minute": 100_000,
97
+ "reasoning_model": True,
98
+ },
99
+ "o4-mini": {
100
+ "id": "o4-mini",
101
+ "name": "o4-mini-2025-04-16",
102
+ "api_base": "https://api.openai.com/v1",
103
+ "api_key_env_var": "OPENAI_API_KEY",
104
+ "supports_json": False,
105
+ "supports_logprobs": True,
106
+ "supports_responses": True,
107
+ "api_spec": "openai",
108
+ "input_cost": 1.1,
109
+ "output_cost": 4.4,
110
+ "requests_per_minute": 20,
111
+ "tokens_per_minute": 100_000,
112
+ "reasoning_model": True,
113
+ },
114
+ "gpt-4.1": {
115
+ "id": "gpt-4.1",
116
+ "name": "gpt-4.1-2025-04-14",
117
+ "api_base": "https://api.openai.com/v1",
118
+ "api_key_env_var": "OPENAI_API_KEY",
119
+ "supports_json": True,
120
+ "supports_logprobs": True,
121
+ "supports_responses": True,
122
+ "api_spec": "openai",
123
+ "input_cost": 2.0,
124
+ "output_cost": 8.0,
125
+ "requests_per_minute": 20,
126
+ "tokens_per_minute": 100_000,
127
+ "reasoning_model": False,
128
+ },
129
+ "gpt-4.1-mini": {
130
+ "id": "gpt-4.1-mini",
131
+ "name": "gpt-4.1-mini-2025-04-14",
132
+ "api_base": "https://api.openai.com/v1",
133
+ "api_key_env_var": "OPENAI_API_KEY",
134
+ "supports_json": True,
135
+ "supports_logprobs": True,
136
+ "supports_responses": True,
137
+ "api_spec": "openai",
138
+ "input_cost": 0.4,
139
+ "output_cost": 1.6,
140
+ "requests_per_minute": 20,
141
+ "tokens_per_minute": 100_000,
142
+ "reasoning_model": False,
143
+ },
144
+ "gpt-4.1-nano": {
145
+ "id": "gpt-4.1-nano",
146
+ "name": "gpt-4.1-nano-2025-04-14",
147
+ "api_base": "https://api.openai.com/v1",
148
+ "api_key_env_var": "OPENAI_API_KEY",
149
+ "supports_json": True,
150
+ "supports_logprobs": True,
151
+ "supports_responses": True,
152
+ "api_spec": "openai",
153
+ "input_cost": 0.1,
154
+ "output_cost": 0.4,
155
+ "requests_per_minute": 20,
156
+ "tokens_per_minute": 100_000,
157
+ "reasoning_model": False,
158
+ },
159
+ "gpt-4.5": {
160
+ "id": "gpt-4.5",
161
+ "name": "gpt-4.5-preview-2025-02-27",
162
+ "api_base": "https://api.openai.com/v1",
163
+ "api_key_env_var": "OPENAI_API_KEY",
164
+ "supports_json": False,
165
+ "supports_logprobs": True,
166
+ "supports_responses": True,
167
+ "api_spec": "openai",
168
+ "input_cost": 75.0,
169
+ "output_cost": 150.0,
170
+ "requests_per_minute": 20,
171
+ "tokens_per_minute": 100_000,
172
+ "reasoning_model": False,
173
+ },
174
+ "o3-mini": {
175
+ "id": "o3-mini",
176
+ "name": "o3-mini-2025-01-31",
177
+ "api_base": "https://api.openai.com/v1",
178
+ "api_key_env_var": "OPENAI_API_KEY",
179
+ "supports_json": False,
180
+ "supports_logprobs": True,
181
+ "supports_responses": True,
182
+ "api_spec": "openai",
183
+ "input_cost": 1.1,
184
+ "output_cost": 4.4,
185
+ "requests_per_minute": 20,
186
+ "tokens_per_minute": 100_000,
187
+ "reasoning_model": True,
188
+ },
189
+ "o1": {
190
+ "id": "o1",
191
+ "name": "o1-2024-12-17",
192
+ "api_base": "https://api.openai.com/v1",
193
+ "api_key_env_var": "OPENAI_API_KEY",
194
+ "supports_json": False,
195
+ "supports_logprobs": True,
196
+ "supports_responses": True,
197
+ "api_spec": "openai",
198
+ "input_cost": 15.0,
199
+ "output_cost": 60.0,
200
+ "requests_per_minute": 20,
201
+ "tokens_per_minute": 100_000,
202
+ "reasoning_model": True,
203
+ },
204
+ "o1-preview": {
205
+ "id": "o1-preview",
206
+ "name": "o1-preview-2024-09-12",
207
+ "api_base": "https://api.openai.com/v1",
208
+ "api_key_env_var": "OPENAI_API_KEY",
209
+ "supports_json": False,
210
+ "supports_logprobs": True,
211
+ "supports_responses": True,
212
+ "api_spec": "openai",
213
+ "input_cost": 15.0,
214
+ "output_cost": 60.0,
215
+ "requests_per_minute": 20,
216
+ "tokens_per_minute": 100_000,
217
+ "reasoning_model": True,
218
+ },
219
+ "o1-mini": {
220
+ "id": "o1-mini",
221
+ "name": "o1-mini-2024-09-12",
222
+ "api_base": "https://api.openai.com/v1",
223
+ "api_key_env_var": "OPENAI_API_KEY",
224
+ "supports_json": False,
225
+ "supports_logprobs": True,
226
+ "supports_responses": True,
227
+ "api_spec": "openai",
228
+ "input_cost": 3.0,
229
+ "output_cost": 15.0,
230
+ "requests_per_minute": 20,
231
+ "tokens_per_minute": 100_000,
232
+ "reasoning_model": True,
233
+ },
234
+ "gpt-4o": {
235
+ "id": "gpt-4o",
236
+ "name": "gpt-4o-2024-08-06",
237
+ "api_base": "https://api.openai.com/v1",
238
+ "api_key_env_var": "OPENAI_API_KEY",
239
+ "supports_json": True,
240
+ "supports_logprobs": True,
241
+ "supports_responses": True,
242
+ "api_spec": "openai",
243
+ "input_cost": 5.0,
244
+ "output_cost": 15.0,
245
+ "requests_per_minute": 10_000,
246
+ "tokens_per_minute": 30_000_000,
247
+ },
248
+ "gpt-4o-mini": {
249
+ "id": "gpt-4o-mini",
250
+ "name": "gpt-4o-mini-2024-07-18",
251
+ "api_base": "https://api.openai.com/v1",
252
+ "api_key_env_var": "OPENAI_API_KEY",
253
+ "supports_json": True,
254
+ "supports_logprobs": True,
255
+ "supports_responses": True,
256
+ "api_spec": "openai",
257
+ "input_cost": 0.15,
258
+ "output_cost": 0.6,
259
+ "requests_per_minute": 60_000,
260
+ "tokens_per_minute": 250_000_000,
261
+ },
262
+ "gpt-3.5-turbo": {
263
+ "id": "gpt-3.5-turbo",
264
+ "name": "gpt-3.5-turbo-0125",
265
+ "api_base": "https://api.openai.com/v1",
266
+ "api_key_env_var": "OPENAI_API_KEY",
267
+ "supports_json": True,
268
+ "supports_logprobs": True,
269
+ "supports_responses": True,
270
+ "api_spec": "openai",
271
+ "input_cost": 0.5,
272
+ "output_cost": 1.5,
273
+ "requests_per_minute": 40_000,
274
+ "tokens_per_minute": 75_000_000,
275
+ },
276
+ "gpt-4-turbo": {
277
+ "id": "gpt-4-turbo",
278
+ "name": "gpt-4-turbo-2024-04-09",
279
+ "api_base": "https://api.openai.com/v1",
280
+ "api_key_env_var": "OPENAI_API_KEY",
281
+ "supports_json": True,
282
+ "supports_logprobs": True,
283
+ "supports_responses": True,
284
+ "api_spec": "openai",
285
+ "input_cost": 10.0,
286
+ "output_cost": 30.0,
287
+ "requests_per_minute": 10_000,
288
+ "tokens_per_minute": 1_500_000,
289
+ },
290
+ "gpt-4": {
291
+ "id": "gpt-4",
292
+ "name": "gpt-4-0613",
293
+ "api_base": "https://api.openai.com/v1",
294
+ "api_key_env_var": "OPENAI_API_KEY",
295
+ "supports_json": False,
296
+ "supports_logprobs": False,
297
+ "supports_responses": True,
298
+ "api_spec": "openai",
299
+ "input_cost": 30.0,
300
+ "output_cost": 60.0,
301
+ "requests_per_minute": 10_000,
302
+ "tokens_per_minute": 300_000,
303
+ },
304
+ "gpt-4-32k": {
305
+ "id": "gpt-4-32k",
306
+ "name": "gpt-4-32k-0613",
307
+ "api_base": "https://api.openai.com/v1",
308
+ "api_key_env_var": "OPENAI_API_KEY",
309
+ "supports_json": False,
310
+ "supports_logprobs": False,
311
+ "supports_responses": True,
312
+ "api_spec": "openai",
313
+ "input_cost": 60.0,
314
+ "output_cost": 120.0,
315
+ "requests_per_minute": 1_000,
316
+ "tokens_per_minute": 150_000,
317
+ },
318
+ }
@@ -0,0 +1 @@
1
+ OPENROUTER_MODELS = {}