claudish 3.2.0 → 3.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -61466,41 +61466,59 @@ class GeminiHandler {
61466
61466
  }));
61467
61467
  return [{ functionDeclarations }];
61468
61468
  }
61469
+ normalizeType(type) {
61470
+ if (!type)
61471
+ return "string";
61472
+ if (Array.isArray(type)) {
61473
+ const nonNullTypes = type.filter((t) => t !== "null");
61474
+ return nonNullTypes[0] || "string";
61475
+ }
61476
+ return type;
61477
+ }
61469
61478
  convertJsonSchemaToGemini(schema) {
61470
61479
  if (!schema)
61471
- return {};
61472
- const geminiSchema = {
61473
- type: schema.type || "object"
61474
- };
61475
- if (schema.properties) {
61476
- geminiSchema.properties = {};
61477
- for (const [key, prop] of Object.entries(schema.properties)) {
61478
- geminiSchema.properties[key] = this.convertPropertyToGemini(prop);
61479
- }
61480
+ return { type: "object" };
61481
+ return this.sanitizeSchemaForGemini(schema);
61482
+ }
61483
+ sanitizeSchemaForGemini(schema) {
61484
+ if (!schema || typeof schema !== "object") {
61485
+ return schema;
61480
61486
  }
61481
- if (schema.required) {
61482
- geminiSchema.required = schema.required;
61487
+ if (Array.isArray(schema)) {
61488
+ return schema.map((item) => this.sanitizeSchemaForGemini(item));
61483
61489
  }
61484
- return geminiSchema;
61485
- }
61486
- convertPropertyToGemini(prop) {
61487
- const result = {
61488
- type: prop.type || "string"
61489
- };
61490
- if (prop.description)
61491
- result.description = prop.description;
61492
- if (prop.enum)
61493
- result.enum = prop.enum;
61494
- if (prop.items)
61495
- result.items = this.convertPropertyToGemini(prop.items);
61496
- if (prop.properties) {
61490
+ const result = {};
61491
+ const normalizedType = this.normalizeType(schema.type);
61492
+ result.type = normalizedType;
61493
+ if (schema.description && typeof schema.description === "string") {
61494
+ result.description = schema.description;
61495
+ }
61496
+ if (Array.isArray(schema.enum)) {
61497
+ result.enum = schema.enum.filter((v) => typeof v === "string" || typeof v === "number" || typeof v === "boolean");
61498
+ }
61499
+ if (Array.isArray(schema.required)) {
61500
+ result.required = schema.required.filter((r) => typeof r === "string");
61501
+ }
61502
+ if (schema.properties && typeof schema.properties === "object") {
61497
61503
  result.properties = {};
61498
- for (const [k, v] of Object.entries(prop.properties)) {
61499
- result.properties[k] = this.convertPropertyToGemini(v);
61504
+ for (const [key, value] of Object.entries(schema.properties)) {
61505
+ if (value && typeof value === "object") {
61506
+ result.properties[key] = this.sanitizeSchemaForGemini(value);
61507
+ }
61508
+ }
61509
+ }
61510
+ if (schema.items) {
61511
+ if (typeof schema.items === "object" && !Array.isArray(schema.items)) {
61512
+ result.items = this.sanitizeSchemaForGemini(schema.items);
61513
+ } else if (Array.isArray(schema.items)) {
61514
+ result.items = this.sanitizeSchemaForGemini(schema.items[0]);
61500
61515
  }
61501
61516
  }
61502
61517
  return result;
61503
61518
  }
61519
+ convertPropertyToGemini(prop) {
61520
+ return this.sanitizeSchemaForGemini(prop);
61521
+ }
61504
61522
  buildGeminiPayload(claudeRequest) {
61505
61523
  const contents = this.convertToGeminiMessages(claudeRequest);
61506
61524
  const tools = this.convertToGeminiTools(claudeRequest);
@@ -61803,14 +61821,49 @@ data: ${JSON.stringify(d)}
61803
61821
  });
61804
61822
  const endpoint = this.getApiEndpoint();
61805
61823
  log(`[GeminiHandler] Calling API: ${endpoint}`);
61806
- const response = await fetch(endpoint, {
61807
- method: "POST",
61808
- headers: {
61809
- "Content-Type": "application/json",
61810
- "x-goog-api-key": this.apiKey
61811
- },
61812
- body: JSON.stringify(geminiPayload)
61813
- });
61824
+ const controller = new AbortController;
61825
+ const timeoutId = setTimeout(() => controller.abort(), 30000);
61826
+ let response;
61827
+ try {
61828
+ response = await fetch(endpoint, {
61829
+ method: "POST",
61830
+ headers: {
61831
+ "Content-Type": "application/json",
61832
+ "x-goog-api-key": this.apiKey
61833
+ },
61834
+ body: JSON.stringify(geminiPayload),
61835
+ signal: controller.signal
61836
+ });
61837
+ } catch (fetchError) {
61838
+ clearTimeout(timeoutId);
61839
+ if (fetchError.name === "AbortError") {
61840
+ log(`[GeminiHandler] Request timed out after 30s`);
61841
+ return c.json({
61842
+ error: {
61843
+ type: "timeout_error",
61844
+ message: "Request to Gemini API timed out. Check your network connection to generativelanguage.googleapis.com"
61845
+ }
61846
+ }, 504);
61847
+ }
61848
+ if (fetchError.cause?.code === "UND_ERR_CONNECT_TIMEOUT") {
61849
+ log(`[GeminiHandler] Connection timeout: ${fetchError.message}`);
61850
+ return c.json({
61851
+ error: {
61852
+ type: "connection_error",
61853
+ message: `Cannot connect to Gemini API (generativelanguage.googleapis.com). This may be due to: network/firewall blocking, VPN interference, or regional restrictions. Error: ${fetchError.cause?.code}`
61854
+ }
61855
+ }, 503);
61856
+ }
61857
+ log(`[GeminiHandler] Fetch error: ${fetchError.message}`);
61858
+ return c.json({
61859
+ error: {
61860
+ type: "network_error",
61861
+ message: `Failed to connect to Gemini API: ${fetchError.message}`
61862
+ }
61863
+ }, 503);
61864
+ } finally {
61865
+ clearTimeout(timeoutId);
61866
+ }
61814
61867
  log(`[GeminiHandler] Response status: ${response.status}`);
61815
61868
  if (!response.ok) {
61816
61869
  const errorText = await response.text();
@@ -61918,15 +61971,23 @@ class OpenAIHandler {
61918
61971
  const model = this.modelName.toLowerCase();
61919
61972
  return model.includes("o1") || model.includes("o3");
61920
61973
  }
61974
+ usesMaxCompletionTokens() {
61975
+ const model = this.modelName.toLowerCase();
61976
+ return model.includes("gpt-5") || model.includes("o1") || model.includes("o3") || model.includes("o4");
61977
+ }
61921
61978
  buildOpenAIPayload(claudeRequest, messages, tools) {
61922
61979
  const payload = {
61923
61980
  model: this.modelName,
61924
61981
  messages,
61925
61982
  temperature: claudeRequest.temperature ?? 1,
61926
61983
  stream: true,
61927
- max_tokens: claudeRequest.max_tokens,
61928
61984
  stream_options: { include_usage: true }
61929
61985
  };
61986
+ if (this.usesMaxCompletionTokens()) {
61987
+ payload.max_completion_tokens = claudeRequest.max_tokens;
61988
+ } else {
61989
+ payload.max_tokens = claudeRequest.max_tokens;
61990
+ }
61930
61991
  if (tools.length > 0) {
61931
61992
  payload.tools = tools;
61932
61993
  }
@@ -61989,14 +62050,49 @@ class OpenAIHandler {
61989
62050
  });
61990
62051
  const endpoint = this.getApiEndpoint();
61991
62052
  log(`[OpenAIHandler] Calling API: ${endpoint}`);
61992
- const response = await fetch(endpoint, {
61993
- method: "POST",
61994
- headers: {
61995
- "Content-Type": "application/json",
61996
- Authorization: `Bearer ${this.apiKey}`
61997
- },
61998
- body: JSON.stringify(openAIPayload)
61999
- });
62053
+ const controller = new AbortController;
62054
+ const timeoutId = setTimeout(() => controller.abort(), 30000);
62055
+ let response;
62056
+ try {
62057
+ response = await fetch(endpoint, {
62058
+ method: "POST",
62059
+ headers: {
62060
+ "Content-Type": "application/json",
62061
+ Authorization: `Bearer ${this.apiKey}`
62062
+ },
62063
+ body: JSON.stringify(openAIPayload),
62064
+ signal: controller.signal
62065
+ });
62066
+ } catch (fetchError) {
62067
+ clearTimeout(timeoutId);
62068
+ if (fetchError.name === "AbortError") {
62069
+ log(`[OpenAIHandler] Request timed out after 30s`);
62070
+ return c.json({
62071
+ error: {
62072
+ type: "timeout_error",
62073
+ message: "Request to OpenAI API timed out. Check your network connection to api.openai.com"
62074
+ }
62075
+ }, 504);
62076
+ }
62077
+ if (fetchError.cause?.code === "UND_ERR_CONNECT_TIMEOUT") {
62078
+ log(`[OpenAIHandler] Connection timeout: ${fetchError.message}`);
62079
+ return c.json({
62080
+ error: {
62081
+ type: "connection_error",
62082
+ message: `Cannot connect to OpenAI API (api.openai.com). This may be due to: network/firewall blocking, VPN interference, or regional restrictions. Error: ${fetchError.cause?.code}`
62083
+ }
62084
+ }, 503);
62085
+ }
62086
+ log(`[OpenAIHandler] Fetch error: ${fetchError.message}`);
62087
+ return c.json({
62088
+ error: {
62089
+ type: "network_error",
62090
+ message: `Failed to connect to OpenAI API: ${fetchError.message}`
62091
+ }
62092
+ }, 503);
62093
+ } finally {
62094
+ clearTimeout(timeoutId);
62095
+ }
62000
62096
  log(`[OpenAIHandler] Response status: ${response.status}`);
62001
62097
  if (!response.ok) {
62002
62098
  const errorText = await response.text();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claudish",
3
- "version": "3.2.0",
3
+ "version": "3.2.2",
4
4
  "description": "Run Claude Code with any model - OpenRouter, Ollama, LM Studio & local models",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -1,15 +1,78 @@
1
1
  {
2
2
  "version": "1.1.5",
3
- "lastUpdated": "2026-01-04",
3
+ "lastUpdated": "2026-01-10",
4
4
  "source": "https://openrouter.ai/models?categories=programming&fmt=cards&order=top-weekly",
5
5
  "models": [
6
+ {
7
+ "id": "x-ai/grok-code-fast-1",
8
+ "name": "xAI: Grok Code Fast 1",
9
+ "description": "Grok Code Fast 1 is a speedy and economical reasoning model that excels at agentic coding. With reasoning traces visible in the response, developers can steer Grok Code for high-quality work flows.",
10
+ "provider": "X-ai",
11
+ "category": "reasoning",
12
+ "priority": 1,
13
+ "pricing": {
14
+ "input": "$0.20/1M",
15
+ "output": "$1.50/1M",
16
+ "average": "$0.85/1M"
17
+ },
18
+ "context": "256K",
19
+ "maxOutputTokens": 10000,
20
+ "modality": "text->text",
21
+ "supportsTools": true,
22
+ "supportsReasoning": true,
23
+ "supportsVision": false,
24
+ "isModerated": false,
25
+ "recommended": true
26
+ },
27
+ {
28
+ "id": "minimax/minimax-m2.1",
29
+ "name": "MiniMax: MiniMax M2.1",
30
+ "description": "MiniMax-M2.1 is a lightweight, state-of-the-art large language model optimized for coding, agentic workflows, and modern application development. With only 10 billion activated parameters, it delivers a major jump in real-world capability while maintaining exceptional latency, scalability, and cost efficiency.\n\nCompared to its predecessor, M2.1 delivers cleaner, more concise outputs and faster perceived response times. It shows leading multilingual coding performance across major systems and application languages, achieving 49.4% on Multi-SWE-Bench and 72.5% on SWE-Bench Multilingual, and serves as a versatile agent “brain” for IDEs, coding tools, and general-purpose assistance.\n\nTo avoid degrading this model's performance, MiniMax highly recommends preserving reasoning between turns. Learn more about using reasoning_details to pass back reasoning in our [docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks).",
31
+ "provider": "Minimax",
32
+ "category": "reasoning",
33
+ "priority": 2,
34
+ "pricing": {
35
+ "input": "$0.28/1M",
36
+ "output": "$1.20/1M",
37
+ "average": "$0.74/1M"
38
+ },
39
+ "context": "196K",
40
+ "maxOutputTokens": null,
41
+ "modality": "text->text",
42
+ "supportsTools": true,
43
+ "supportsReasoning": true,
44
+ "supportsVision": false,
45
+ "isModerated": false,
46
+ "recommended": true
47
+ },
48
+ {
49
+ "id": "z-ai/glm-4.7",
50
+ "name": "Z.AI: GLM 4.7",
51
+ "description": "GLM-4.7 is Z.AI’s latest flagship model, featuring upgrades in two key areas: enhanced programming capabilities and more stable multi-step reasoning/execution. It demonstrates significant improvements in executing complex agent tasks while delivering more natural conversational experiences and superior front-end aesthetics.",
52
+ "provider": "Z-ai",
53
+ "category": "reasoning",
54
+ "priority": 3,
55
+ "pricing": {
56
+ "input": "$0.40/1M",
57
+ "output": "$1.50/1M",
58
+ "average": "$0.95/1M"
59
+ },
60
+ "context": "202K",
61
+ "maxOutputTokens": 65535,
62
+ "modality": "text->text",
63
+ "supportsTools": true,
64
+ "supportsReasoning": true,
65
+ "supportsVision": false,
66
+ "isModerated": false,
67
+ "recommended": true
68
+ },
6
69
  {
7
70
  "id": "google/gemini-3-pro-preview",
8
71
  "name": "Google: Gemini 3 Pro Preview",
9
72
  "description": "Gemini 3 Pro is Google’s flagship frontier model for high-precision multimodal reasoning, combining strong performance across text, image, video, audio, and code with a 1M-token context window. Reasoning Details must be preserved when using multi-turn tool calling, see our docs here: https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks. It delivers state-of-the-art benchmark results in general reasoning, STEM problem solving, factual QA, and multimodal understanding, including leading scores on LMArena, GPQA Diamond, MathArena Apex, MMMU-Pro, and Video-MMMU. Interactions emphasize depth and interpretability: the model is designed to infer intent with minimal prompting and produce direct, insight-focused responses.\n\nBuilt for advanced development and agentic workflows, Gemini 3 Pro provides robust tool-calling, long-horizon planning stability, and strong zero-shot generation for complex UI, visualization, and coding tasks. It excels at agentic coding (SWE-Bench Verified, Terminal-Bench 2.0), multimodal analysis, and structured long-form tasks such as research synthesis, planning, and interactive learning experiences. Suitable applications include autonomous agents, coding assistants, multimodal analytics, scientific reasoning, and high-context information processing.",
10
73
  "provider": "Google",
11
74
  "category": "vision",
12
- "priority": 1,
75
+ "priority": 4,
13
76
  "pricing": {
14
77
  "input": "$2.00/1M",
15
78
  "output": "$12.00/1M",
@@ -25,16 +88,16 @@
25
88
  "recommended": true
26
89
  },
27
90
  {
28
- "id": "openai/gpt-5.1-codex",
29
- "name": "OpenAI: GPT-5.1-Codex",
30
- "description": "GPT-5.1-Codex is a specialized version of GPT-5.1 optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks. The model supports building projects from scratch, feature development, debugging, large-scale refactoring, and code review. Compared to GPT-5.1, Codex is more steerable, adheres closely to developer instructions, and produces cleaner, higher-quality code outputs. Reasoning effort can be adjusted with the `reasoning.effort` parameter. Read the [docs here](https://openrouter.ai/docs/use-cases/reasoning-tokens#reasoning-effort-level)\n\nCodex integrates into developer environments including the CLI, IDE extensions, GitHub, and cloud tasks. It adapts reasoning effort dynamically—providing fast responses for small tasks while sustaining extended multi-hour runs for large projects. The model is trained to perform structured code reviews, catching critical flaws by reasoning over dependencies and validating behavior against tests. It also supports multimodal inputs such as images or screenshots for UI development and integrates tool use for search, dependency installation, and environment setup. Codex is intended specifically for agentic coding applications.",
91
+ "id": "openai/gpt-5.2",
92
+ "name": "OpenAI: GPT-5.2",
93
+ "description": "GPT-5.2 is the latest frontier-grade model in the GPT-5 series, offering stronger agentic and long context perfomance compared to GPT-5.1. It uses adaptive reasoning to allocate computation dynamically, responding quickly to simple queries while spending more depth on complex tasks.\n\nBuilt for broad task coverage, GPT-5.2 delivers consistent gains across math, coding, sciende, and tool calling workloads, with more coherent long-form answers and improved tool-use reliability.",
31
94
  "provider": "Openai",
32
- "category": "vision",
33
- "priority": 2,
95
+ "category": "reasoning",
96
+ "priority": 5,
34
97
  "pricing": {
35
- "input": "$1.25/1M",
36
- "output": "$10.00/1M",
37
- "average": "$5.63/1M"
98
+ "input": "$1.75/1M",
99
+ "output": "$14.00/1M",
100
+ "average": "$7.88/1M"
38
101
  },
39
102
  "context": "400K",
40
103
  "maxOutputTokens": 128000,
@@ -46,40 +109,19 @@
46
109
  "recommended": true
47
110
  },
48
111
  {
49
- "id": "x-ai/grok-code-fast-1",
50
- "name": "xAI: Grok Code Fast 1",
51
- "description": "Grok Code Fast 1 is a speedy and economical reasoning model that excels at agentic coding. With reasoning traces visible in the response, developers can steer Grok Code for high-quality work flows.",
52
- "provider": "X-ai",
112
+ "id": "moonshotai/kimi-k2-thinking",
113
+ "name": "MoonshotAI: Kimi K2 Thinking",
114
+ "description": "Kimi K2 Thinking is Moonshot AI’s most advanced open reasoning model to date, extending the K2 series into agentic, long-horizon reasoning. Built on the trillion-parameter Mixture-of-Experts (MoE) architecture introduced in Kimi K2, it activates 32 billion parameters per forward pass and supports 256 k-token context windows. The model is optimized for persistent step-by-step thought, dynamic tool invocation, and complex reasoning workflows that span hundreds of turns. It interleaves step-by-step reasoning with tool use, enabling autonomous research, coding, and writing that can persist for hundreds of sequential actions without drift.\n\nIt sets new open-source benchmarks on HLE, BrowseComp, SWE-Multilingual, and LiveCodeBench, while maintaining stable multi-agent behavior through 200–300 tool calls. Built on a large-scale MoE architecture with MuonClip optimization, it combines strong reasoning depth with high inference efficiency for demanding agentic and analytical tasks.",
115
+ "provider": "Moonshotai",
53
116
  "category": "reasoning",
54
- "priority": 3,
55
- "pricing": {
56
- "input": "$0.20/1M",
57
- "output": "$1.50/1M",
58
- "average": "$0.85/1M"
59
- },
60
- "context": "256K",
61
- "maxOutputTokens": 10000,
62
- "modality": "text->text",
63
- "supportsTools": true,
64
- "supportsReasoning": true,
65
- "supportsVision": false,
66
- "isModerated": false,
67
- "recommended": true
68
- },
69
- {
70
- "id": "minimax/minimax-m2",
71
- "name": "MiniMax: MiniMax M2",
72
- "description": "MiniMax-M2 is a compact, high-efficiency large language model optimized for end-to-end coding and agentic workflows. With 10 billion activated parameters (230 billion total), it delivers near-frontier intelligence across general reasoning, tool use, and multi-step task execution while maintaining low latency and deployment efficiency.\n\nThe model excels in code generation, multi-file editing, compile-run-fix loops, and test-validated repair, showing strong results on SWE-Bench Verified, Multi-SWE-Bench, and Terminal-Bench. It also performs competitively in agentic evaluations such as BrowseComp and GAIA, effectively handling long-horizon planning, retrieval, and recovery from execution errors.\n\nBenchmarked by [Artificial Analysis](https://artificialanalysis.ai/models/minimax-m2), MiniMax-M2 ranks among the top open-source models for composite intelligence, spanning mathematics, science, and instruction-following. Its small activation footprint enables fast inference, high concurrency, and improved unit economics, making it well-suited for large-scale agents, developer assistants, and reasoning-driven applications that require responsiveness and cost efficiency.\n\nTo avoid degrading this model's performance, MiniMax highly recommends preserving reasoning between turns. Learn more about using reasoning_details to pass back reasoning in our [docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks).",
73
- "provider": "Minimax",
74
- "category": "reasoning",
75
- "priority": 4,
117
+ "priority": 6,
76
118
  "pricing": {
77
- "input": "$0.20/1M",
78
- "output": "$1.00/1M",
79
- "average": "$0.60/1M"
119
+ "input": "$0.40/1M",
120
+ "output": "$1.75/1M",
121
+ "average": "$1.07/1M"
80
122
  },
81
- "context": "196K",
82
- "maxOutputTokens": 65536,
123
+ "context": "262K",
124
+ "maxOutputTokens": 65535,
83
125
  "modality": "text->text",
84
126
  "supportsTools": true,
85
127
  "supportsReasoning": true,
@@ -88,18 +130,18 @@
88
130
  "recommended": true
89
131
  },
90
132
  {
91
- "id": "z-ai/glm-4.6",
92
- "name": "Z.AI: GLM 4.6",
93
- "description": "Compared with GLM-4.5, this generation brings several key improvements:\n\nLonger context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex agentic tasks.\nSuperior coding performance: The model achieves higher scores on code benchmarks and demonstrates better real-world performance in applications such as Claude Code、Cline、Roo Code and Kilo Code, including improvements in generating visually polished front-end pages.\nAdvanced reasoning: GLM-4.6 shows a clear improvement in reasoning performance and supports tool use during inference, leading to stronger overall capability.\nMore capable agents: GLM-4.6 exhibits stronger performance in tool using and search-based agents, and integrates more effectively within agent frameworks.\nRefined writing: Better aligns with human preferences in style and readability, and performs more naturally in role-playing scenarios.",
94
- "provider": "Z-ai",
133
+ "id": "deepseek/deepseek-v3.2",
134
+ "name": "DeepSeek: DeepSeek V3.2",
135
+ "description": "DeepSeek-V3.2 is a large language model designed to harmonize high computational efficiency with strong reasoning and agentic tool-use performance. It introduces DeepSeek Sparse Attention (DSA), a fine-grained sparse attention mechanism that reduces training and inference cost while preserving quality in long-context scenarios. A scalable reinforcement learning post-training framework further improves reasoning, with reported performance in the GPT-5 class, and the model has demonstrated gold-medal results on the 2025 IMO and IOI. V3.2 also uses a large-scale agentic task synthesis pipeline to better integrate reasoning into tool-use settings, boosting compliance and generalization in interactive environments.\n\nUsers can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)",
136
+ "provider": "Deepseek",
95
137
  "category": "reasoning",
96
- "priority": 5,
138
+ "priority": 7,
97
139
  "pricing": {
98
- "input": "$0.35/1M",
99
- "output": "$1.50/1M",
100
- "average": "$0.93/1M"
140
+ "input": "$0.25/1M",
141
+ "output": "$0.38/1M",
142
+ "average": "$0.32/1M"
101
143
  },
102
- "context": "202K",
144
+ "context": "163K",
103
145
  "maxOutputTokens": 65536,
104
146
  "modality": "text->text",
105
147
  "supportsTools": true,
@@ -109,22 +151,22 @@
109
151
  "recommended": true
110
152
  },
111
153
  {
112
- "id": "qwen/qwen3-vl-235b-a22b-instruct",
113
- "name": "Qwen: Qwen3 VL 235B A22B Instruct",
114
- "description": "Qwen3-VL-235B-A22B Instruct is an open-weight multimodal model that unifies strong text generation with visual understanding across images and video. The Instruct model targets general vision-language use (VQA, document parsing, chart/table extraction, multilingual OCR). The series emphasizes robust perception (recognition of diverse real-world and synthetic categories), spatial understanding (2D/3D grounding), and long-form visual comprehension, with competitive results on public multimodal benchmarks for both perception and reasoning.\n\nBeyond analysis, Qwen3-VL supports agentic interaction and tool use: it can follow complex instructions over multi-image, multi-turn dialogues; align text to video timelines for precise temporal queries; and operate GUI elements for automation tasks. The models also enable visual coding workflowsturning sketches or mockups into code and assisting with UI debuggingwhile maintaining strong text-only performance comparable to the flagship Qwen3 language models. This makes Qwen3-VL suitable for production scenarios spanning document AI, multilingual OCR, software/UI assistance, spatial/embodied tasks, and research on vision-language agents.",
154
+ "id": "qwen/qwen3-vl-235b-a22b-thinking",
155
+ "name": "Qwen: Qwen3 VL 235B A22B Thinking",
156
+ "description": "Qwen3-VL-235B-A22B Thinking is a multimodal model that unifies strong text generation with visual understanding across images and video. The Thinking model is optimized for multimodal reasoning in STEM and math. The series emphasizes robust perception (recognition of diverse real-world and synthetic categories), spatial understanding (2D/3D grounding), and long-form visual comprehension, with competitive results on public multimodal benchmarks for both perception and reasoning.\n\nBeyond analysis, Qwen3-VL supports agentic interaction and tool use: it can follow complex instructions over multi-image, multi-turn dialogues; align text to video timelines for precise temporal queries; and operate GUI elements for automation tasks. The models also enable visual coding workflows, turning sketches or mockups into code and assisting with UI debugging, while maintaining strong text-only performance comparable to the flagship Qwen3 language models. This makes Qwen3-VL suitable for production scenarios spanning document AI, multilingual OCR, software/UI assistance, spatial/embodied tasks, and research on vision-language agents.",
115
157
  "provider": "Qwen",
116
158
  "category": "vision",
117
- "priority": 6,
159
+ "priority": 8,
118
160
  "pricing": {
119
- "input": "$0.20/1M",
120
- "output": "$1.20/1M",
121
- "average": "$0.70/1M"
161
+ "input": "$0.45/1M",
162
+ "output": "$3.50/1M",
163
+ "average": "$1.98/1M"
122
164
  },
123
165
  "context": "262K",
124
- "maxOutputTokens": null,
166
+ "maxOutputTokens": 262144,
125
167
  "modality": "text+image->text",
126
168
  "supportsTools": true,
127
- "supportsReasoning": false,
169
+ "supportsReasoning": true,
128
170
  "supportsVision": true,
129
171
  "isModerated": false,
130
172
  "recommended": true
@@ -9,94 +9,92 @@ import { readFileSync, writeFileSync } from "node:fs";
9
9
  import { join } from "node:path";
10
10
 
11
11
  interface ModelInfo {
12
- name: string;
13
- description: string;
14
- priority: number;
15
- provider: string;
12
+ name: string;
13
+ description: string;
14
+ priority: number;
15
+ provider: string;
16
16
  }
17
17
 
18
18
  interface ExtractedModels {
19
- [key: string]: ModelInfo;
19
+ [key: string]: ModelInfo;
20
20
  }
21
21
 
22
22
  function extractModels(markdownContent: string): ExtractedModels {
23
- const models: ExtractedModels = {};
24
- let priority = 1;
25
-
26
- // Extract from Quick Reference section (lines 11-30)
27
- const quickRefMatch = markdownContent.match(
28
- /## Quick Reference - Model IDs Only\n\n([\s\S]*?)\n---/,
29
- );
30
- if (!quickRefMatch) {
31
- throw new Error("Could not find Quick Reference section");
32
- }
33
-
34
- const quickRef = quickRefMatch[1];
35
- const lines = quickRef.split("\n");
36
-
37
- for (const line of lines) {
38
- // Match pattern: - `model-id` - Description (may contain commas), $price/1M or FREE, contextK/M [⭐]
39
- // Use non-greedy match and look for $ or FREE to find the price section
40
- const match = line.match(
41
- /^- `([^`]+)` - (.+?), (?:\$[\d.]+\/1M|FREE), ([\dKM]+)(?: ⭐)?$/,
42
- );
43
- if (match) {
44
- const [, modelId, description] = match;
45
-
46
- // Determine provider from model ID
47
- let provider = "Unknown";
48
- if (modelId.startsWith("x-ai/")) provider = "xAI";
49
- else if (modelId.startsWith("minimax/")) provider = "MiniMax";
50
- else if (modelId.startsWith("z-ai/")) provider = "Zhipu AI";
51
- else if (modelId.startsWith("openai/")) provider = "OpenAI";
52
- else if (modelId.startsWith("google/")) provider = "Google";
53
- else if (modelId.startsWith("qwen/")) provider = "Alibaba";
54
- else if (modelId.startsWith("deepseek/")) provider = "DeepSeek";
55
- else if (modelId.startsWith("tngtech/")) provider = "TNG Tech";
56
- else if (modelId.startsWith("openrouter/")) provider = "OpenRouter";
57
- else if (modelId.startsWith("anthropic/")) provider = "Anthropic";
58
-
59
- // Extract short name from description
60
- const name = description.trim();
61
-
62
- models[modelId] = {
63
- name,
64
- description: description.trim(),
65
- priority: priority++,
66
- provider,
67
- };
68
- }
69
- }
70
-
71
- // Add custom option
72
- models.custom = {
73
- name: "Custom Model",
74
- description: "Enter any OpenRouter model ID manually",
75
- priority: 999,
76
- provider: "Custom",
77
- };
78
-
79
- return models;
23
+ const models: ExtractedModels = {};
24
+ let priority = 1;
25
+
26
+ // Extract from Quick Reference section (lines 11-30)
27
+ const quickRefMatch = markdownContent.match(
28
+ /## Quick Reference - Model IDs Only\n\n([\s\S]*?)\n---/
29
+ );
30
+ if (!quickRefMatch) {
31
+ throw new Error("Could not find Quick Reference section");
32
+ }
33
+
34
+ const quickRef = quickRefMatch[1];
35
+ const lines = quickRef.split("\n");
36
+
37
+ for (const line of lines) {
38
+ // Match pattern: - `model-id` - Description (may contain commas), $price/1M or FREE, contextK/M [⭐]
39
+ // Use non-greedy match and look for $ or FREE to find the price section
40
+ const match = line.match(/^- `([^`]+)` - (.+?), (?:\$[\d.]+\/1M|FREE), ([\dKM]+)(?: ⭐)?$/);
41
+ if (match) {
42
+ const [, modelId, description] = match;
43
+
44
+ // Determine provider from model ID
45
+ let provider = "Unknown";
46
+ if (modelId.startsWith("x-ai/")) provider = "xAI";
47
+ else if (modelId.startsWith("minimax/")) provider = "MiniMax";
48
+ else if (modelId.startsWith("z-ai/")) provider = "Zhipu AI";
49
+ else if (modelId.startsWith("openai/")) provider = "OpenAI";
50
+ else if (modelId.startsWith("google/")) provider = "Google";
51
+ else if (modelId.startsWith("qwen/")) provider = "Alibaba";
52
+ else if (modelId.startsWith("deepseek/")) provider = "DeepSeek";
53
+ else if (modelId.startsWith("tngtech/")) provider = "TNG Tech";
54
+ else if (modelId.startsWith("openrouter/")) provider = "OpenRouter";
55
+ else if (modelId.startsWith("anthropic/")) provider = "Anthropic";
56
+
57
+ // Extract short name from description
58
+ const name = description.trim();
59
+
60
+ models[modelId] = {
61
+ name,
62
+ description: description.trim(),
63
+ priority: priority++,
64
+ provider,
65
+ };
66
+ }
67
+ }
68
+
69
+ // Add custom option
70
+ models.custom = {
71
+ name: "Custom Model",
72
+ description: "Enter any OpenRouter model ID manually",
73
+ priority: 999,
74
+ provider: "Custom",
75
+ };
76
+
77
+ return models;
80
78
  }
81
79
 
82
80
  function generateTypeScript(models: ExtractedModels): string {
83
- const modelIds = Object.keys(models)
84
- .filter((id) => id !== "custom")
85
- .map((id) => ` | "${id}"`)
86
- .join("\n");
87
-
88
- const modelInfo = Object.entries(models)
89
- .map(([id, info]) => {
90
- return ` "${id}": {
81
+ const modelIds = Object.keys(models)
82
+ .filter((id) => id !== "custom")
83
+ .map((id) => ` | "${id}"`)
84
+ .join("\n");
85
+
86
+ const modelInfo = Object.entries(models)
87
+ .map(([id, info]) => {
88
+ return ` "${id}": {
91
89
  name: "${info.name}",
92
90
  description: "${info.description}",
93
91
  priority: ${info.priority},
94
92
  provider: "${info.provider}",
95
93
  }`;
96
- })
97
- .join(",\n");
94
+ })
95
+ .join(",\n");
98
96
 
99
- return `// AUTO-GENERATED from shared/recommended-models.md
97
+ return `// AUTO-GENERATED from shared/recommended-models.md
100
98
  // DO NOT EDIT MANUALLY - Run 'bun run extract-models' to regenerate
101
99
 
102
100
  import type { OpenRouterModel } from "./types.js";
@@ -142,12 +140,12 @@ export const OPENROUTER_HEADERS = {
142
140
  }
143
141
 
144
142
  function generateTypes(models: ExtractedModels): string {
145
- const modelIds = Object.keys(models)
146
- .filter((id) => id !== "custom")
147
- .map((id) => ` "${id}"`)
148
- .join(",\n");
143
+ const modelIds = Object.keys(models)
144
+ .filter((id) => id !== "custom")
145
+ .map((id) => ` "${id}"`)
146
+ .join(",\n");
149
147
 
150
- return `// AUTO-GENERATED from shared/recommended-models.md
148
+ return `// AUTO-GENERATED from shared/recommended-models.md
151
149
  // DO NOT EDIT MANUALLY - Run 'bun run extract-models' to regenerate
152
150
 
153
151
  // OpenRouter Models - Top Recommended for Development (Priority Order)
@@ -162,58 +160,55 @@ export type OpenRouterModel = (typeof OPENROUTER_MODELS)[number];
162
160
 
163
161
  // Main execution
164
162
  try {
165
- const sharedModelsPath = join(
166
- import.meta.dir,
167
- "../../../shared/recommended-models.md",
168
- );
169
- const configPath = join(import.meta.dir, "../src/config.ts");
170
- const typesPath = join(import.meta.dir, "../src/types.ts");
171
-
172
- console.log("📖 Reading shared/recommended-models.md...");
173
- const markdownContent = readFileSync(sharedModelsPath, "utf-8");
174
-
175
- console.log("🔍 Extracting model information...");
176
- const models = extractModels(markdownContent);
177
-
178
- console.log(`✅ Found ${Object.keys(models).length - 1} models + custom option`);
179
-
180
- console.log("📝 Generating config.ts...");
181
- const configCode = generateTypeScript(models);
182
- writeFileSync(configPath, configCode);
183
-
184
- console.log("📝 Generating types.ts...");
185
- const typesCode = generateTypes(models);
186
- const existingTypes = readFileSync(typesPath, "utf-8");
187
-
188
- // Replace OPENROUTER_MODELS array and OpenRouterModel type, keep other types
189
- // Handle both auto-generated and manual versions
190
- let updatedTypes = existingTypes;
191
-
192
- // Try to replace auto-generated section first
193
- if (existingTypes.includes("// AUTO-GENERATED")) {
194
- updatedTypes = existingTypes.replace(
195
- /\/\/ AUTO-GENERATED[\s\S]*?export type OpenRouterModel = \(typeof OPENROUTER_MODELS\)\[number\];/,
196
- typesCode.trim(),
197
- );
198
- } else {
199
- // First time - replace manual OPENROUTER_MODELS section
200
- updatedTypes = existingTypes.replace(
201
- /\/\/ OpenRouter Models[\s\S]*?export type OpenRouterModel = \(typeof OPENROUTER_MODELS\)\[number\];/,
202
- typesCode.trim(),
203
- );
204
- }
205
-
206
- writeFileSync(typesPath, updatedTypes);
207
-
208
- console.log("✅ Successfully generated TypeScript files");
209
- console.log("");
210
- console.log("Models:");
211
- for (const [id, info] of Object.entries(models)) {
212
- if (id !== "custom") {
213
- console.log(` • ${id} - ${info.name} (${info.provider})`);
214
- }
215
- }
163
+ const sharedModelsPath = join(import.meta.dir, "../../../shared/recommended-models.md");
164
+ const configPath = join(import.meta.dir, "../src/config.ts");
165
+ const typesPath = join(import.meta.dir, "../src/types.ts");
166
+
167
+ console.log("📖 Reading shared/recommended-models.md...");
168
+ const markdownContent = readFileSync(sharedModelsPath, "utf-8");
169
+
170
+ console.log("🔍 Extracting model information...");
171
+ const models = extractModels(markdownContent);
172
+
173
+ console.log(`✅ Found ${Object.keys(models).length - 1} models + custom option`);
174
+
175
+ console.log("📝 Generating config.ts...");
176
+ const configCode = generateTypeScript(models);
177
+ writeFileSync(configPath, configCode);
178
+
179
+ console.log("📝 Generating types.ts...");
180
+ const typesCode = generateTypes(models);
181
+ const existingTypes = readFileSync(typesPath, "utf-8");
182
+
183
+ // Replace OPENROUTER_MODELS array and OpenRouterModel type, keep other types
184
+ // Handle both auto-generated and manual versions
185
+ let updatedTypes = existingTypes;
186
+
187
+ // Try to replace auto-generated section first
188
+ if (existingTypes.includes("// AUTO-GENERATED")) {
189
+ updatedTypes = existingTypes.replace(
190
+ /\/\/ AUTO-GENERATED[\s\S]*?export type OpenRouterModel = \(typeof OPENROUTER_MODELS\)\[number\];/,
191
+ typesCode.trim()
192
+ );
193
+ } else {
194
+ // First time - replace manual OPENROUTER_MODELS section
195
+ updatedTypes = existingTypes.replace(
196
+ /\/\/ OpenRouter Models[\s\S]*?export type OpenRouterModel = \(typeof OPENROUTER_MODELS\)\[number\];/,
197
+ typesCode.trim()
198
+ );
199
+ }
200
+
201
+ writeFileSync(typesPath, updatedTypes);
202
+
203
+ console.log("✅ Successfully generated TypeScript files");
204
+ console.log("");
205
+ console.log("Models:");
206
+ for (const [id, info] of Object.entries(models)) {
207
+ if (id !== "custom") {
208
+ console.log(` • ${id} - ${info.name} (${info.provider})`);
209
+ }
210
+ }
216
211
  } catch (error) {
217
- console.error("❌ Error:", error);
218
- process.exit(1);
212
+ console.error("❌ Error:", error);
213
+ process.exit(1);
219
214
  }
@@ -1,13 +1,13 @@
1
1
  #!/usr/bin/env node
2
2
 
3
- console.log('\x1b[32m✓ Claudish installed successfully!\x1b[0m');
4
- console.log('');
5
- console.log('\x1b[1mUsage:\x1b[0m');
3
+ console.log("\x1b[32m✓ Claudish installed successfully!\x1b[0m");
4
+ console.log("");
5
+ console.log("\x1b[1mUsage:\x1b[0m");
6
6
  console.log(' claudish --model x-ai/grok-code-fast-1 "your prompt"');
7
- console.log(' claudish --interactive # Interactive model selection');
8
- console.log(' claudish --list-models # List all available models');
9
- console.log('');
10
- console.log('\x1b[1mGet started:\x1b[0m');
11
- console.log(' 1. Set OPENROUTER_API_KEY environment variable');
12
- console.log(' 2. Run: claudish --interactive');
13
- console.log('');
7
+ console.log(" claudish --interactive # Interactive model selection");
8
+ console.log(" claudish --list-models # List all available models");
9
+ console.log("");
10
+ console.log("\x1b[1mGet started:\x1b[0m");
11
+ console.log(" 1. Set OPENROUTER_API_KEY environment variable");
12
+ console.log(" 2. Run: claudish --interactive");
13
+ console.log("");