nous-genai 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. nous/__init__.py +3 -0
  2. nous/genai/__init__.py +56 -0
  3. nous/genai/__main__.py +3 -0
  4. nous/genai/_internal/__init__.py +1 -0
  5. nous/genai/_internal/capability_rules.py +476 -0
  6. nous/genai/_internal/config.py +102 -0
  7. nous/genai/_internal/errors.py +63 -0
  8. nous/genai/_internal/http.py +951 -0
  9. nous/genai/_internal/json_schema.py +54 -0
  10. nous/genai/cli.py +1316 -0
  11. nous/genai/client.py +719 -0
  12. nous/genai/mcp_cli.py +275 -0
  13. nous/genai/mcp_server.py +1080 -0
  14. nous/genai/providers/__init__.py +15 -0
  15. nous/genai/providers/aliyun.py +535 -0
  16. nous/genai/providers/anthropic.py +483 -0
  17. nous/genai/providers/gemini.py +1606 -0
  18. nous/genai/providers/openai.py +1909 -0
  19. nous/genai/providers/tuzi.py +1158 -0
  20. nous/genai/providers/volcengine.py +273 -0
  21. nous/genai/reference/__init__.py +17 -0
  22. nous/genai/reference/catalog.py +206 -0
  23. nous/genai/reference/mappings.py +467 -0
  24. nous/genai/reference/mode_overrides.py +26 -0
  25. nous/genai/reference/model_catalog.py +82 -0
  26. nous/genai/reference/model_catalog_data/__init__.py +1 -0
  27. nous/genai/reference/model_catalog_data/aliyun.py +98 -0
  28. nous/genai/reference/model_catalog_data/anthropic.py +10 -0
  29. nous/genai/reference/model_catalog_data/google.py +45 -0
  30. nous/genai/reference/model_catalog_data/openai.py +44 -0
  31. nous/genai/reference/model_catalog_data/tuzi_anthropic.py +21 -0
  32. nous/genai/reference/model_catalog_data/tuzi_google.py +19 -0
  33. nous/genai/reference/model_catalog_data/tuzi_openai.py +75 -0
  34. nous/genai/reference/model_catalog_data/tuzi_web.py +136 -0
  35. nous/genai/reference/model_catalog_data/volcengine.py +107 -0
  36. nous/genai/tools/__init__.py +13 -0
  37. nous/genai/tools/output_parser.py +119 -0
  38. nous/genai/types.py +416 -0
  39. nous/py.typed +1 -0
  40. nous_genai-0.1.0.dist-info/METADATA +200 -0
  41. nous_genai-0.1.0.dist-info/RECORD +45 -0
  42. nous_genai-0.1.0.dist-info/WHEEL +5 -0
  43. nous_genai-0.1.0.dist-info/entry_points.txt +4 -0
  44. nous_genai-0.1.0.dist-info/licenses/LICENSE +190 -0
  45. nous_genai-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,467 @@
1
+ from __future__ import annotations
2
+
3
+ import copy
4
+ from typing import Any
5
+
6
+
7
+ _MAPPINGS: list[dict[str, Any]] = [
8
+ # === OpenAI (chat_completions) ===
9
+ {
10
+ "provider": "openai",
11
+ "protocol": "chat_completions",
12
+ "operation": "common",
13
+ "from": "params.idempotency_key",
14
+ "to": "header.Idempotency-Key",
15
+ },
16
+ {
17
+ "provider": "openai",
18
+ "protocol": "chat_completions",
19
+ "operation": "chat",
20
+ "from": "params.temperature",
21
+ "to": "body.temperature",
22
+ },
23
+ {
24
+ "provider": "openai",
25
+ "protocol": "chat_completions",
26
+ "operation": "chat",
27
+ "from": "params.top_p",
28
+ "to": "body.top_p",
29
+ },
30
+ {
31
+ "provider": "openai",
32
+ "protocol": "chat_completions",
33
+ "operation": "chat",
34
+ "from": "params.seed",
35
+ "to": "body.seed",
36
+ },
37
+ {
38
+ "provider": "openai",
39
+ "protocol": "chat_completions",
40
+ "operation": "chat",
41
+ "from": "params.reasoning.effort",
42
+ "to": "body.reasoning_effort",
43
+ "notes": "reasoning models(如 gpt-5/o 系列)",
44
+ },
45
+ {
46
+ "provider": "openai",
47
+ "protocol": "chat_completions",
48
+ "operation": "chat",
49
+ "from": "output.text.max_output_tokens | params.max_output_tokens",
50
+ "to": "body.max_completion_tokens",
51
+ },
52
+ {
53
+ "provider": "openai",
54
+ "protocol": "chat_completions",
55
+ "operation": "chat",
56
+ "from": "params.stop",
57
+ "to": "body.stop",
58
+ },
59
+ {
60
+ "provider": "openai",
61
+ "protocol": "chat_completions",
62
+ "operation": "chat",
63
+ "from": "provider_options['openai']",
64
+ "to": "body.* (merge)",
65
+ "notes": "显式透传;禁止覆盖 SDK 已设置字段",
66
+ },
67
+ {
68
+ "provider": "openai",
69
+ "protocol": "chat_completions",
70
+ "operation": "chat",
71
+ "from": "output.text.format/json_schema",
72
+ "to": "body.response_format",
73
+ "notes": "json_object/json_schema(text-only)",
74
+ },
75
+ {
76
+ "provider": "openai",
77
+ "protocol": "chat_completions",
78
+ "operation": "chat",
79
+ "from": "input[].text",
80
+ "to": "messages[].content[].{type:text}",
81
+ },
82
+ {
83
+ "provider": "openai",
84
+ "protocol": "chat_completions",
85
+ "operation": "chat",
86
+ "from": "input[].image(bytes/path/url)",
87
+ "to": "messages[].content[].{type:image_url}",
88
+ "notes": "bytes/path 会内联为 data:...;base64",
89
+ },
90
+ {
91
+ "provider": "openai",
92
+ "protocol": "chat_completions",
93
+ "operation": "chat",
94
+ "from": "input[].audio(bytes/path/url)",
95
+ "to": "messages[].content[].{type:input_audio}",
96
+ "notes": "bytes/path/url 会转 base64;format 由 mime 推断(wav/mp3/m4a)",
97
+ },
98
+ {
99
+ "provider": "openai",
100
+ "protocol": "chat_completions",
101
+ "operation": "chat",
102
+ "from": "output.modalities includes audio + output.audio.voice/format",
103
+ "to": "body.modalities/body.audio",
104
+ "notes": "音频输出为 message.audio(data base64 + transcript)",
105
+ },
106
+ {
107
+ "provider": "openai",
108
+ "protocol": "chat_completions",
109
+ "operation": "images",
110
+ "from": "output.image.n",
111
+ "to": "body.n",
112
+ },
113
+ {
114
+ "provider": "openai",
115
+ "protocol": "chat_completions",
116
+ "operation": "images",
117
+ "from": "output.image.size",
118
+ "to": "body.size",
119
+ },
120
+ {
121
+ "provider": "openai",
122
+ "protocol": "chat_completions",
123
+ "operation": "images",
124
+ "from": "output.image.format",
125
+ "to": "body.response_format",
126
+ "notes": "url | b64_json(bytes/base64 会落到 b64_json)",
127
+ },
128
+ {
129
+ "provider": "openai",
130
+ "protocol": "chat_completions",
131
+ "operation": "images",
132
+ "from": "provider_options['openai']",
133
+ "to": "body.* (merge)",
134
+ "notes": "显式透传 OpenAI 私参(quality/style/background 等)",
135
+ },
136
+ {
137
+ "provider": "openai",
138
+ "protocol": "chat_completions",
139
+ "operation": "embeddings",
140
+ "from": "input[].text",
141
+ "to": "body.input",
142
+ },
143
+ {
144
+ "provider": "openai",
145
+ "protocol": "chat_completions",
146
+ "operation": "embeddings",
147
+ "from": "output.embedding.dimensions",
148
+ "to": "body.dimensions",
149
+ "notes": "仅 text-embedding-3-*",
150
+ },
151
+ {
152
+ "provider": "openai",
153
+ "protocol": "chat_completions",
154
+ "operation": "embeddings",
155
+ "from": "provider_options['openai']",
156
+ "to": "body.* (merge)",
157
+ },
158
+ {
159
+ "provider": "openai",
160
+ "protocol": "chat_completions",
161
+ "operation": "tts",
162
+ "from": "output.audio.voice",
163
+ "to": "body.voice",
164
+ },
165
+ {
166
+ "provider": "openai",
167
+ "protocol": "chat_completions",
168
+ "operation": "tts",
169
+ "from": "output.audio.format",
170
+ "to": "body.response_format",
171
+ },
172
+ {
173
+ "provider": "openai",
174
+ "protocol": "chat_completions",
175
+ "operation": "tts",
176
+ "from": "input[].text",
177
+ "to": "body.input",
178
+ },
179
+ {
180
+ "provider": "openai",
181
+ "protocol": "chat_completions",
182
+ "operation": "tts",
183
+ "from": "provider_options['openai']",
184
+ "to": "body.* (merge)",
185
+ },
186
+ {
187
+ "provider": "openai",
188
+ "protocol": "chat_completions",
189
+ "operation": "transcription",
190
+ "from": "input[].audio",
191
+ "to": "multipart.file",
192
+ },
193
+ {
194
+ "provider": "openai",
195
+ "protocol": "chat_completions",
196
+ "operation": "transcription",
197
+ "from": "params.temperature",
198
+ "to": "multipart.temperature",
199
+ },
200
+ {
201
+ "provider": "openai",
202
+ "protocol": "chat_completions",
203
+ "operation": "transcription",
204
+ "from": "audio.meta.language",
205
+ "to": "multipart.language",
206
+ },
207
+ {
208
+ "provider": "openai",
209
+ "protocol": "chat_completions",
210
+ "operation": "transcription",
211
+ "from": "audio.meta.transcription_prompt | text(meta.transcription_prompt=true)",
212
+ "to": "multipart.prompt",
213
+ "notes": "默认不自动采集任意 text part(避免意外影响转写)",
214
+ },
215
+ {
216
+ "provider": "openai",
217
+ "protocol": "chat_completions",
218
+ "operation": "transcription",
219
+ "from": "provider_options['openai']",
220
+ "to": "multipart.* (merge)",
221
+ },
222
+ {
223
+ "provider": "openai",
224
+ "protocol": "chat_completions",
225
+ "operation": "video",
226
+ "from": "output.video.duration_sec",
227
+ "to": "body.seconds",
228
+ "notes": "4/8/12 秒就近取值",
229
+ },
230
+ {
231
+ "provider": "openai",
232
+ "protocol": "chat_completions",
233
+ "operation": "video",
234
+ "from": "output.video.aspect_ratio",
235
+ "to": "body.size",
236
+ "notes": "16:9/9:16 预置映射",
237
+ },
238
+ {
239
+ "provider": "openai",
240
+ "protocol": "chat_completions",
241
+ "operation": "video",
242
+ "from": "provider_options['openai']",
243
+ "to": "body.* (merge)",
244
+ },
245
+ # === OpenAI (responses) ===
246
+ {
247
+ "provider": "openai",
248
+ "protocol": "responses",
249
+ "operation": "common",
250
+ "from": "params.idempotency_key",
251
+ "to": "header.Idempotency-Key",
252
+ },
253
+ {
254
+ "provider": "openai",
255
+ "protocol": "responses",
256
+ "operation": "chat",
257
+ "from": "params.temperature",
258
+ "to": "body.temperature",
259
+ },
260
+ {
261
+ "provider": "openai",
262
+ "protocol": "responses",
263
+ "operation": "chat",
264
+ "from": "params.top_p",
265
+ "to": "body.top_p",
266
+ },
267
+ {
268
+ "provider": "openai",
269
+ "protocol": "responses",
270
+ "operation": "chat",
271
+ "from": "params.reasoning.effort",
272
+ "to": "body.reasoning.effort",
273
+ "notes": "reasoning models(如 gpt-5/o 系列)",
274
+ },
275
+ {
276
+ "provider": "openai",
277
+ "protocol": "responses",
278
+ "operation": "chat",
279
+ "from": "output.text.max_output_tokens | params.max_output_tokens",
280
+ "to": "body.max_output_tokens",
281
+ },
282
+ {
283
+ "provider": "openai",
284
+ "protocol": "responses",
285
+ "operation": "chat",
286
+ "from": "output.text.format/json_schema",
287
+ "to": "body.text.format",
288
+ "notes": "json_object/json_schema(text-only)",
289
+ },
290
+ {
291
+ "provider": "openai",
292
+ "protocol": "responses",
293
+ "operation": "chat",
294
+ "from": "input[].text",
295
+ "to": "body.input[].content[].{type:input_text}",
296
+ },
297
+ {
298
+ "provider": "openai",
299
+ "protocol": "responses",
300
+ "operation": "chat",
301
+ "from": "input[].image(bytes/path/url)",
302
+ "to": "body.input[].content[].{type:input_image}",
303
+ "notes": "bytes/path 会内联为 data:...;base64",
304
+ },
305
+ {
306
+ "provider": "openai",
307
+ "protocol": "responses",
308
+ "operation": "chat",
309
+ "from": "params.seed / params.stop",
310
+ "to": "not mapped",
311
+ "notes": "OpenAI /responses 当前会返回 unknown_parameter",
312
+ },
313
+ {
314
+ "provider": "openai",
315
+ "protocol": "responses",
316
+ "operation": "chat",
317
+ "from": "provider_options['openai']",
318
+ "to": "body.* (merge)",
319
+ "notes": "显式透传;禁止覆盖 SDK 已设置字段",
320
+ },
321
+ # === Google AI Studio (Gemini Developer API) ===
322
+ {
323
+ "provider": "google",
324
+ "protocol": "gemini",
325
+ "operation": "generate",
326
+ "from": "params.temperature",
327
+ "to": "generationConfig.temperature",
328
+ },
329
+ {
330
+ "provider": "google",
331
+ "protocol": "gemini",
332
+ "operation": "generate",
333
+ "from": "params.top_p",
334
+ "to": "generationConfig.topP",
335
+ },
336
+ {
337
+ "provider": "google",
338
+ "protocol": "gemini",
339
+ "operation": "generate",
340
+ "from": "params.seed",
341
+ "to": "generationConfig.seed",
342
+ },
343
+ {
344
+ "provider": "google",
345
+ "protocol": "gemini",
346
+ "operation": "generate",
347
+ "from": "params.reasoning.effort",
348
+ "to": "generationConfig.thinkingConfig.thinkingLevel | thinkingBudget(fallback)",
349
+ "notes": "Gemini 3+ 优先 thinkingLevel(3 Pro 仅 low/high;3 Flash 支持 minimal/medium);Gemini 2.5 / Robotics-ER 走 thinkingBudget;其他型号忽略",
350
+ },
351
+ {
352
+ "provider": "google",
353
+ "protocol": "gemini",
354
+ "operation": "generate",
355
+ "from": "output.text.max_output_tokens | params.max_output_tokens",
356
+ "to": "generationConfig.maxOutputTokens",
357
+ },
358
+ {
359
+ "provider": "google",
360
+ "protocol": "gemini",
361
+ "operation": "generate",
362
+ "from": "params.stop",
363
+ "to": "generationConfig.stopSequences",
364
+ },
365
+ {
366
+ "provider": "google",
367
+ "protocol": "gemini",
368
+ "operation": "generate",
369
+ "from": "output.text.format/json_schema",
370
+ "to": "generationConfig.responseMimeType/responseSchema",
371
+ "notes": "application/json(text-only)",
372
+ },
373
+ {
374
+ "provider": "google",
375
+ "protocol": "gemini",
376
+ "operation": "generate",
377
+ "from": "output.modalities",
378
+ "to": "generationConfig.responseModalities",
379
+ "notes": "TEXT/IMAGE/AUDIO(不支持 VIDEO 输出)",
380
+ },
381
+ {
382
+ "provider": "google",
383
+ "protocol": "gemini",
384
+ "operation": "generate",
385
+ "from": "output.image.n",
386
+ "to": "generationConfig.candidateCount",
387
+ },
388
+ {
389
+ "provider": "google",
390
+ "protocol": "gemini",
391
+ "operation": "generate",
392
+ "from": "output.image.size",
393
+ "to": "generationConfig.imageConfig.imageSize",
394
+ },
395
+ {
396
+ "provider": "google",
397
+ "protocol": "gemini",
398
+ "operation": "generate",
399
+ "from": "output.audio.voice/language",
400
+ "to": "generationConfig.speechConfig",
401
+ "notes": "voiceName + languageCode",
402
+ },
403
+ {
404
+ "provider": "google",
405
+ "protocol": "gemini",
406
+ "operation": "generate",
407
+ "from": "input[].(image/audio/video) path/url/bytes/ref",
408
+ "to": "inlineData | fileData(fileUri)",
409
+ "notes": "大文件会走 Files API 上传并等待 ACTIVE",
410
+ },
411
+ {
412
+ "provider": "google",
413
+ "protocol": "gemini",
414
+ "operation": "generate",
415
+ "from": "provider_options['google'|'gemini']",
416
+ "to": "body.* / generationConfig.* (merge)",
417
+ "notes": "显式透传;禁止覆盖 SDK 已设置字段",
418
+ },
419
+ {
420
+ "provider": "google",
421
+ "protocol": "gemini",
422
+ "operation": "embedding",
423
+ "from": "output.embedding.dimensions",
424
+ "to": "outputDimensionality",
425
+ },
426
+ # === Anthropic (messages) ===
427
+ {
428
+ "provider": "anthropic",
429
+ "protocol": "messages",
430
+ "operation": "chat",
431
+ "from": "params.reasoning.effort",
432
+ "to": "body.thinking",
433
+ "notes": "effort 映射到 thinking.enabled.budget_tokens(none 不传 thinking)",
434
+ },
435
+ # === OpenAI-compatible providers ===
436
+ {
437
+ "provider": "volcengine",
438
+ "protocol": "chat_completions",
439
+ "operation": "chat/embeddings/images",
440
+ "from": "same as openai/chat_completions (subset)",
441
+ "to": "same as openai/chat_completions (subset)",
442
+ "notes": "Base URL 默认 https://ark.cn-beijing.volces.com/api/v3;API key=VOLCENGINE_API_KEY",
443
+ },
444
+ {
445
+ "provider": "aliyun",
446
+ "protocol": "chat_completions",
447
+ "operation": "chat/embeddings",
448
+ "from": "same as openai/chat_completions (subset)",
449
+ "to": "same as openai/chat_completions (subset)",
450
+ "notes": "Base URL 默认 https://dashscope.aliyuncs.com/compatible-mode/v1;API key=ALIYUN_API_KEY",
451
+ },
452
+ {
453
+ "provider": "tuzi-openai",
454
+ "protocol": "chat_completions",
455
+ "operation": "chat/images/audio/embeddings",
456
+ "from": "same as openai/chat_completions (subset)",
457
+ "to": "same as openai/chat_completions (subset)",
458
+ "notes": "Base URL 默认 https://api.tu-zi.com/v1;API key=TUZI_OPENAI_API_KEY(tuzi-web 则为 TUZI_WEB_API_KEY)",
459
+ },
460
+ ]
461
+
462
+
463
+ def get_parameter_mappings() -> list[dict[str, Any]]:
464
+ """
465
+ Return a JSON-friendly table of the current unified-params -> provider-params mapping.
466
+ """
467
+ return copy.deepcopy(_MAPPINGS)
@@ -0,0 +1,26 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TypedDict
4
+
5
+
6
+ class CapabilityOverride(TypedDict, total=False):
7
+ supports_stream: bool
8
+ supports_job: bool
9
+
10
+
11
+ CAPABILITY_OVERRIDES: dict[str, dict[str, CapabilityOverride]] = {
12
+ # NOTE: Keep this table small and explicit.
13
+ #
14
+ # Purpose:
15
+ # - Fix known capability inference mistakes from heuristic `Adapter.capabilities(model_id)`.
16
+ # - Provide a reviewable, auditable source of truth for edge models.
17
+ #
18
+ # Keying:
19
+ # - provider: normalized lower-case provider name (e.g. "tuzi-web")
20
+ # - model_id: exact catalog model id string (case-sensitive)
21
+ #
22
+ # Example:
23
+ # "tuzi-web": {
24
+ # "some-model-id": {"supports_stream": False, "supports_job": True},
25
+ # },
26
+ }
@@ -0,0 +1,82 @@
1
+ from __future__ import annotations
2
+
3
+ # Curated model catalog used by demos/CLI.
4
+ #
5
+ # Catalog only lists model ids per provider.
6
+ # Capabilities (and display categories) are inferred at runtime from Adapter.capabilities().
7
+
8
+ from .model_catalog_data.aliyun import MODELS as ALIYUN_MODELS
9
+ from .model_catalog_data.anthropic import MODELS as ANTHROPIC_MODELS
10
+ from .model_catalog_data.google import MODELS as GOOGLE_MODELS
11
+ from .model_catalog_data.openai import MODELS as OPENAI_MODELS
12
+ from .model_catalog_data.tuzi_anthropic import MODELS as TUZI_ANTHROPIC_MODELS
13
+ from .model_catalog_data.tuzi_google import MODELS as TUZI_GOOGLE_MODELS
14
+ from .model_catalog_data.tuzi_openai import MODELS as TUZI_OPENAI_MODELS
15
+ from .model_catalog_data.tuzi_web import MODELS as TUZI_WEB_MODELS
16
+ from .model_catalog_data.volcengine import MODELS as VOLCENGINE_MODELS
17
+
18
+
19
+ MODEL_CATALOG: dict[str, list[str]] = {
20
+ "openai": OPENAI_MODELS,
21
+ "anthropic": ANTHROPIC_MODELS,
22
+ "google": GOOGLE_MODELS,
23
+ "volcengine": VOLCENGINE_MODELS,
24
+ "aliyun": ALIYUN_MODELS,
25
+ "tuzi-web": TUZI_WEB_MODELS,
26
+ "tuzi-openai": TUZI_OPENAI_MODELS,
27
+ "tuzi-google": TUZI_GOOGLE_MODELS,
28
+ "tuzi-anthropic": TUZI_ANTHROPIC_MODELS,
29
+ }
30
+
31
+
32
+ SUPPORTED_TESTS: dict[str, list[str]] = {
33
+ "openai": [
34
+ "chat",
35
+ "tools",
36
+ "transcription",
37
+ "image",
38
+ "video",
39
+ "audio",
40
+ "embedding",
41
+ ],
42
+ "anthropic": ["chat", "tools"],
43
+ "google": [
44
+ "chat",
45
+ "tools",
46
+ "transcription",
47
+ "image",
48
+ "video",
49
+ "audio",
50
+ "embedding",
51
+ ],
52
+ "volcengine": ["chat", "image", "video", "embedding"],
53
+ "aliyun": ["chat", "transcription", "image", "video", "audio", "embedding"],
54
+ "tuzi-web": [
55
+ "chat",
56
+ "tools",
57
+ "transcription",
58
+ "image",
59
+ "video",
60
+ "audio",
61
+ "embedding",
62
+ ],
63
+ "tuzi-openai": [
64
+ "chat",
65
+ "tools",
66
+ "transcription",
67
+ "image",
68
+ "video",
69
+ "audio",
70
+ "embedding",
71
+ ],
72
+ "tuzi-google": [
73
+ "chat",
74
+ "tools",
75
+ "transcription",
76
+ "image",
77
+ "video",
78
+ "audio",
79
+ "embedding",
80
+ ],
81
+ "tuzi-anthropic": ["chat", "tools"],
82
+ }
@@ -0,0 +1 @@
1
+ from __future__ import annotations
@@ -0,0 +1,98 @@
1
+ from __future__ import annotations
2
+
3
+ MODELS: list[str] = [
4
+ "deepseek-v3",
5
+ "deepseek-v3.1",
6
+ "deepseek-v3.2",
7
+ "kimi-k2-thinking",
8
+ "qvq-max",
9
+ "qvq-plus",
10
+ "qwen-1.8b-chat",
11
+ "qwen-1.8b-longcontext-chat",
12
+ "qwen-14b-chat",
13
+ "qwen-72b-chat",
14
+ "qwen-7b-chat",
15
+ "qwen-coder-plus",
16
+ "qwen-coder-turbo",
17
+ "qwen-deep-search-planning",
18
+ "qwen-flash",
19
+ "qwen-flash-character",
20
+ "qwen-long",
21
+ "qwen-math-plus",
22
+ "qwen-math-turbo",
23
+ "qwen-max",
24
+ "qwen-max-longcontext",
25
+ "qwen-mt-flash",
26
+ "qwen-mt-lite",
27
+ "qwen-mt-plus",
28
+ "qwen-mt-turbo",
29
+ "qwen-omni-turbo",
30
+ "qwen-plus",
31
+ "qwen-turbo",
32
+ "qwen-vl-max",
33
+ "qwen-vl-ocr",
34
+ "qwen-vl-plus",
35
+ "qwen1.5-0.5b-chat",
36
+ "qwen1.5-1.8b-chat",
37
+ "qwen1.5-110b-chat",
38
+ "qwen1.5-14b-chat",
39
+ "qwen1.5-32b-chat",
40
+ "qwen1.5-72b-chat",
41
+ "qwen1.5-7b-chat",
42
+ "qwen2-0.5b-instruct",
43
+ "qwen2-1.5b-instruct",
44
+ "qwen2-57b-a14b-instruct",
45
+ "qwen2-7b-instruct",
46
+ "qwen2.5-0.5b-instruct",
47
+ "qwen2.5-1.5b-instruct",
48
+ "qwen2.5-14b-instruct",
49
+ "qwen2.5-14b-instruct-1m",
50
+ "qwen2.5-32b-instruct",
51
+ "qwen2.5-3b-instruct",
52
+ "qwen2.5-72b-instruct",
53
+ "qwen2.5-7b-instruct",
54
+ "qwen2.5-7b-instruct-1m",
55
+ "qwen2.5-coder-0.5b-instruct",
56
+ "qwen2.5-coder-14b-instruct",
57
+ "qwen2.5-coder-32b-instruct",
58
+ "qwen2.5-coder-3b-instruct",
59
+ "qwen2.5-coder-7b-instruct",
60
+ "qwen2.5-math-1.5b-instruct",
61
+ "qwen2.5-math-72b-instruct",
62
+ "qwen2.5-math-7b-instruct",
63
+ "qwen2.5-vl-32b-instruct",
64
+ "qwen3-0.6b",
65
+ "qwen3-1.7b",
66
+ "qwen3-14b",
67
+ "qwen3-235b-a22b",
68
+ "qwen3-235b-a22b-instruct-2507",
69
+ "qwen3-235b-a22b-thinking-2507",
70
+ "qwen3-30b-a3b",
71
+ "qwen3-30b-a3b-instruct-2507",
72
+ "qwen3-30b-a3b-thinking-2507",
73
+ "qwen3-32b",
74
+ "qwen3-4b",
75
+ "qwen3-8b",
76
+ "qwen3-coder-480b-a35b-instruct",
77
+ "qwen3-coder-flash",
78
+ "qwen3-coder-plus",
79
+ "qwen3-livetranslate-flash",
80
+ "qwen3-livetranslate-flash-realtime",
81
+ "qwen3-max-preview",
82
+ "qwen3-next-80b-a3b-instruct",
83
+ "qwen3-next-80b-a3b-thinking",
84
+ "qwen3-omni-flash",
85
+ "qwen3-omni-flash-realtime",
86
+ "qwen3-vl-flash",
87
+ "qwen3-vl-plus",
88
+ "qwq-plus",
89
+ "qwen3-asr-flash-realtime",
90
+ "qwen-image-edit-plus",
91
+ "qwen-image-max",
92
+ "z-image-turbo",
93
+ "qwen-tts",
94
+ "qwen3-tts-flash",
95
+ "qwen3-tts-flash-realtime",
96
+ "qwen3-tts-vc-realtime",
97
+ "qwen3-tts-vd-realtime",
98
+ ]
@@ -0,0 +1,10 @@
1
+ from __future__ import annotations
2
+
3
+ MODELS: list[str] = [
4
+ "claude-haiku-4",
5
+ "claude-haiku-4-5",
6
+ "claude-opus-4",
7
+ "claude-opus-4-5",
8
+ "claude-sonnet-4",
9
+ "claude-sonnet-4-5",
10
+ ]