claudish 2.2.1 → 2.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +83 -4
- package/dist/index.js +1988 -1160
- package/package.json +21 -20
- package/recommended-models.json +28 -112
- package/scripts/extract-models.ts +10 -0
- package/scripts/postinstall.cjs +0 -0
- package/skills/claudish-usage/SKILL.md +43 -24
package/package.json
CHANGED
|
@@ -1,31 +1,15 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "claudish",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.5.0",
|
|
4
4
|
"description": "CLI tool to run Claude Code with any OpenRouter model (Grok, GPT-5, MiniMax, etc.) via local Anthropic API-compatible proxy",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
7
7
|
"bin": {
|
|
8
8
|
"claudish": "dist/index.js"
|
|
9
9
|
},
|
|
10
|
-
"scripts": {
|
|
11
|
-
"dev": "bun run src/index.ts",
|
|
12
|
-
"dev:grok": "bun run src/index.ts --interactive --model x-ai/grok-code-fast-1",
|
|
13
|
-
"dev:grok:debug": "bun run src/index.ts --interactive --debug --log-level info --model x-ai/grok-code-fast-1",
|
|
14
|
-
"dev:info": "bun run src/index.ts --interactive --monitor",
|
|
15
|
-
"extract-models": "bun run scripts/extract-models.ts",
|
|
16
|
-
"build": "bun run extract-models && bun build src/index.ts --outdir dist --target node && chmod +x dist/index.js",
|
|
17
|
-
"link": "npm link",
|
|
18
|
-
"unlink": "npm unlink -g claudish",
|
|
19
|
-
"install-global": "bun run build && npm link",
|
|
20
|
-
"kill-all": "pkill -f 'bun.*claudish' || pkill -f 'claude.*claudish-settings' || echo 'No claudish processes found'",
|
|
21
|
-
"test": "bun test ./tests/comprehensive-model-test.ts",
|
|
22
|
-
"typecheck": "tsc --noEmit",
|
|
23
|
-
"lint": "biome check .",
|
|
24
|
-
"format": "biome format --write .",
|
|
25
|
-
"postinstall": "node scripts/postinstall.cjs"
|
|
26
|
-
},
|
|
27
10
|
"dependencies": {
|
|
28
11
|
"@hono/node-server": "^1.19.6",
|
|
12
|
+
"dotenv": "^17.2.3",
|
|
29
13
|
"hono": "^4.10.6"
|
|
30
14
|
},
|
|
31
15
|
"devDependencies": {
|
|
@@ -54,5 +38,22 @@
|
|
|
54
38
|
"ai"
|
|
55
39
|
],
|
|
56
40
|
"author": "Jack Rudenko <i@madappgang.com>",
|
|
57
|
-
"license": "MIT"
|
|
58
|
-
|
|
41
|
+
"license": "MIT",
|
|
42
|
+
"scripts": {
|
|
43
|
+
"dev": "bun run src/index.ts",
|
|
44
|
+
"dev:grok": "bun run src/index.ts --interactive --model x-ai/grok-code-fast-1",
|
|
45
|
+
"dev:grok:debug": "bun run src/index.ts --interactive --debug --log-level info --model x-ai/grok-code-fast-1",
|
|
46
|
+
"dev:info": "bun run src/index.ts --interactive --monitor",
|
|
47
|
+
"extract-models": "bun run scripts/extract-models.ts",
|
|
48
|
+
"build": "bun run extract-models && bun build src/index.ts --outdir dist --target node && chmod +x dist/index.js",
|
|
49
|
+
"link": "npm link",
|
|
50
|
+
"unlink": "npm unlink -g claudish",
|
|
51
|
+
"install-global": "bun run build && npm link",
|
|
52
|
+
"kill-all": "pkill -f 'bun.*claudish' || pkill -f 'claude.*claudish-settings' || echo 'No claudish processes found'",
|
|
53
|
+
"test": "bun test ./tests/comprehensive-model-test.ts",
|
|
54
|
+
"typecheck": "tsc --noEmit",
|
|
55
|
+
"lint": "biome check .",
|
|
56
|
+
"format": "biome format --write .",
|
|
57
|
+
"postinstall": "node scripts/postinstall.cjs"
|
|
58
|
+
}
|
|
59
|
+
}
|
package/recommended-models.json
CHANGED
|
@@ -1,26 +1,26 @@
|
|
|
1
1
|
{
|
|
2
2
|
"version": "2.1.0",
|
|
3
|
-
"lastUpdated": "2025-11-
|
|
3
|
+
"lastUpdated": "2025-11-24",
|
|
4
4
|
"source": "https://openrouter.ai/models?categories=programming&fmt=cards&order=top-weekly",
|
|
5
5
|
"models": [
|
|
6
6
|
{
|
|
7
|
-
"id": "
|
|
8
|
-
"name": "
|
|
9
|
-
"description": "
|
|
10
|
-
"provider": "
|
|
11
|
-
"category": "
|
|
7
|
+
"id": "google/gemini-3-pro-preview",
|
|
8
|
+
"name": "Google: Gemini 3 Pro Preview",
|
|
9
|
+
"description": "Gemini 3 Pro is Google’s flagship frontier model for high-precision multimodal reasoning, combining strong performance across text, image, video, audio, and code with a 1M-token context window. Reasoning Details must be preserved when using multi-turn tool calling, see our docs here: https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks. It delivers state-of-the-art benchmark results in general reasoning, STEM problem solving, factual QA, and multimodal understanding, including leading scores on LMArena, GPQA Diamond, MathArena Apex, MMMU-Pro, and Video-MMMU. Interactions emphasize depth and interpretability: the model is designed to infer intent with minimal prompting and produce direct, insight-focused responses.\n\nBuilt for advanced development and agentic workflows, Gemini 3 Pro provides robust tool-calling, long-horizon planning stability, and strong zero-shot generation for complex UI, visualization, and coding tasks. It excels at agentic coding (SWE-Bench Verified, Terminal-Bench 2.0), multimodal analysis, and structured long-form tasks such as research synthesis, planning, and interactive learning experiences. Suitable applications include autonomous agents, coding assistants, multimodal analytics, scientific reasoning, and high-context information processing.",
|
|
10
|
+
"provider": "Google",
|
|
11
|
+
"category": "vision",
|
|
12
12
|
"priority": 1,
|
|
13
13
|
"pricing": {
|
|
14
|
-
"input": "$
|
|
15
|
-
"output": "$
|
|
16
|
-
"average": "$
|
|
14
|
+
"input": "$2.00/1M",
|
|
15
|
+
"output": "$12.00/1M",
|
|
16
|
+
"average": "$7.00/1M"
|
|
17
17
|
},
|
|
18
|
-
"context": "
|
|
19
|
-
"maxOutputTokens":
|
|
20
|
-
"modality": "text->text",
|
|
18
|
+
"context": "1048K",
|
|
19
|
+
"maxOutputTokens": 65536,
|
|
20
|
+
"modality": "text+image->text",
|
|
21
21
|
"supportsTools": true,
|
|
22
22
|
"supportsReasoning": true,
|
|
23
|
-
"supportsVision":
|
|
23
|
+
"supportsVision": true,
|
|
24
24
|
"isModerated": false,
|
|
25
25
|
"recommended": true
|
|
26
26
|
},
|
|
@@ -46,19 +46,19 @@
|
|
|
46
46
|
"recommended": true
|
|
47
47
|
},
|
|
48
48
|
{
|
|
49
|
-
"id": "
|
|
50
|
-
"name": "
|
|
51
|
-
"description": "
|
|
52
|
-
"provider": "
|
|
49
|
+
"id": "x-ai/grok-code-fast-1",
|
|
50
|
+
"name": "xAI: Grok Code Fast 1",
|
|
51
|
+
"description": "Grok Code Fast 1 is a speedy and economical reasoning model that excels at agentic coding. With reasoning traces visible in the response, developers can steer Grok Code for high-quality work flows.",
|
|
52
|
+
"provider": "X-ai",
|
|
53
53
|
"category": "reasoning",
|
|
54
54
|
"priority": 3,
|
|
55
55
|
"pricing": {
|
|
56
|
-
"input": "$0.
|
|
57
|
-
"output": "$
|
|
58
|
-
"average": "$
|
|
56
|
+
"input": "$0.20/1M",
|
|
57
|
+
"output": "$1.50/1M",
|
|
58
|
+
"average": "$0.85/1M"
|
|
59
59
|
},
|
|
60
|
-
"context": "
|
|
61
|
-
"maxOutputTokens":
|
|
60
|
+
"context": "256K",
|
|
61
|
+
"maxOutputTokens": 10000,
|
|
62
62
|
"modality": "text->text",
|
|
63
63
|
"supportsTools": true,
|
|
64
64
|
"supportsReasoning": true,
|
|
@@ -66,38 +66,17 @@
|
|
|
66
66
|
"isModerated": false,
|
|
67
67
|
"recommended": true
|
|
68
68
|
},
|
|
69
|
-
{
|
|
70
|
-
"id": "google/gemini-2.5-flash",
|
|
71
|
-
"name": "Google: Gemini 2.5 Flash",
|
|
72
|
-
"description": "Gemini 2.5 Flash is Google's state-of-the-art workhorse model, specifically designed for advanced reasoning, coding, mathematics, and scientific tasks. It includes built-in \"thinking\" capabilities, enabling it to provide responses with greater accuracy and nuanced context handling. \n\nAdditionally, Gemini 2.5 Flash is configurable through the \"max tokens for reasoning\" parameter, as described in the documentation (https://openrouter.ai/docs/use-cases/reasoning-tokens#max-tokens-for-reasoning).",
|
|
73
|
-
"provider": "Google",
|
|
74
|
-
"category": "reasoning",
|
|
75
|
-
"priority": 4,
|
|
76
|
-
"pricing": {
|
|
77
|
-
"input": "$0.30/1M",
|
|
78
|
-
"output": "$2.50/1M",
|
|
79
|
-
"average": "$1.40/1M"
|
|
80
|
-
},
|
|
81
|
-
"context": "1048K",
|
|
82
|
-
"maxOutputTokens": 65535,
|
|
83
|
-
"modality": "text+image->text",
|
|
84
|
-
"supportsTools": true,
|
|
85
|
-
"supportsReasoning": true,
|
|
86
|
-
"supportsVision": true,
|
|
87
|
-
"isModerated": false,
|
|
88
|
-
"recommended": true
|
|
89
|
-
},
|
|
90
69
|
{
|
|
91
70
|
"id": "minimax/minimax-m2",
|
|
92
71
|
"name": "MiniMax: MiniMax M2",
|
|
93
72
|
"description": "MiniMax-M2 is a compact, high-efficiency large language model optimized for end-to-end coding and agentic workflows. With 10 billion activated parameters (230 billion total), it delivers near-frontier intelligence across general reasoning, tool use, and multi-step task execution while maintaining low latency and deployment efficiency.\n\nThe model excels in code generation, multi-file editing, compile-run-fix loops, and test-validated repair, showing strong results on SWE-Bench Verified, Multi-SWE-Bench, and Terminal-Bench. It also performs competitively in agentic evaluations such as BrowseComp and GAIA, effectively handling long-horizon planning, retrieval, and recovery from execution errors.\n\nBenchmarked by [Artificial Analysis](https://artificialanalysis.ai/models/minimax-m2), MiniMax-M2 ranks among the top open-source models for composite intelligence, spanning mathematics, science, and instruction-following. Its small activation footprint enables fast inference, high concurrency, and improved unit economics, making it well-suited for large-scale agents, developer assistants, and reasoning-driven applications that require responsiveness and cost efficiency.\n\nTo avoid degrading this model's performance, MiniMax highly recommends preserving reasoning between turns. Learn more about using reasoning_details to pass back reasoning in our [docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks).",
|
|
94
73
|
"provider": "Minimax",
|
|
95
74
|
"category": "reasoning",
|
|
96
|
-
"priority":
|
|
75
|
+
"priority": 4,
|
|
97
76
|
"pricing": {
|
|
98
|
-
"input": "$0.
|
|
99
|
-
"output": "$
|
|
100
|
-
"average": "$0.
|
|
77
|
+
"input": "$0.24/1M",
|
|
78
|
+
"output": "$0.96/1M",
|
|
79
|
+
"average": "$0.60/1M"
|
|
101
80
|
},
|
|
102
81
|
"context": "204K",
|
|
103
82
|
"maxOutputTokens": 131072,
|
|
@@ -114,7 +93,7 @@
|
|
|
114
93
|
"description": "Compared with GLM-4.5, this generation brings several key improvements:\n\nLonger context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex agentic tasks.\nSuperior coding performance: The model achieves higher scores on code benchmarks and demonstrates better real-world performance in applications such as Claude Code、Cline、Roo Code and Kilo Code, including improvements in generating visually polished front-end pages.\nAdvanced reasoning: GLM-4.6 shows a clear improvement in reasoning performance and supports tool use during inference, leading to stronger overall capability.\nMore capable agents: GLM-4.6 exhibits stronger performance in tool using and search-based agents, and integrates more effectively within agent frameworks.\nRefined writing: Better aligns with human preferences in style and readability, and performs more naturally in role-playing scenarios.",
|
|
115
94
|
"provider": "Z-ai",
|
|
116
95
|
"category": "reasoning",
|
|
117
|
-
"priority":
|
|
96
|
+
"priority": 5,
|
|
118
97
|
"pricing": {
|
|
119
98
|
"input": "$0.40/1M",
|
|
120
99
|
"output": "$1.75/1M",
|
|
@@ -129,55 +108,13 @@
|
|
|
129
108
|
"isModerated": false,
|
|
130
109
|
"recommended": true
|
|
131
110
|
},
|
|
132
|
-
{
|
|
133
|
-
"id": "openai/gpt-5",
|
|
134
|
-
"name": "OpenAI: GPT-5",
|
|
135
|
-
"description": "GPT-5 is OpenAI’s most advanced model, offering major improvements in reasoning, code quality, and user experience. It is optimized for complex tasks that require step-by-step reasoning, instruction following, and accuracy in high-stakes use cases. It supports test-time routing features and advanced prompt understanding, including user-specified intent like \"think hard about this.\" Improvements include reductions in hallucination, sycophancy, and better performance in coding, writing, and health-related tasks.",
|
|
136
|
-
"provider": "Openai",
|
|
137
|
-
"category": "reasoning",
|
|
138
|
-
"priority": 7,
|
|
139
|
-
"pricing": {
|
|
140
|
-
"input": "$1.25/1M",
|
|
141
|
-
"output": "$10.00/1M",
|
|
142
|
-
"average": "$5.63/1M"
|
|
143
|
-
},
|
|
144
|
-
"context": "400K",
|
|
145
|
-
"maxOutputTokens": 128000,
|
|
146
|
-
"modality": "text+image->text",
|
|
147
|
-
"supportsTools": true,
|
|
148
|
-
"supportsReasoning": true,
|
|
149
|
-
"supportsVision": true,
|
|
150
|
-
"isModerated": true,
|
|
151
|
-
"recommended": true
|
|
152
|
-
},
|
|
153
|
-
{
|
|
154
|
-
"id": "google/gemini-3-pro-preview",
|
|
155
|
-
"name": "Google: Gemini 3 Pro Preview",
|
|
156
|
-
"description": "Gemini 3 Pro is Google’s flagship frontier model for high-precision multimodal reasoning, combining strong performance across text, image, video, audio, and code with a 1M-token context window. Reasoning Details must be preserved when using multi-turn tool calling, see our docs here: https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks. It delivers state-of-the-art benchmark results in general reasoning, STEM problem solving, factual QA, and multimodal understanding, including leading scores on LMArena, GPQA Diamond, MathArena Apex, MMMU-Pro, and Video-MMMU. Interactions emphasize depth and interpretability: the model is designed to infer intent with minimal prompting and produce direct, insight-focused responses.\n\nBuilt for advanced development and agentic workflows, Gemini 3 Pro provides robust tool-calling, long-horizon planning stability, and strong zero-shot generation for complex UI, visualization, and coding tasks. It excels at agentic coding (SWE-Bench Verified, Terminal-Bench 2.0), multimodal analysis, and structured long-form tasks such as research synthesis, planning, and interactive learning experiences. Suitable applications include autonomous agents, coding assistants, multimodal analytics, scientific reasoning, and high-context information processing.",
|
|
157
|
-
"provider": "Google",
|
|
158
|
-
"category": "vision",
|
|
159
|
-
"priority": 8,
|
|
160
|
-
"pricing": {
|
|
161
|
-
"input": "$2.00/1M",
|
|
162
|
-
"output": "$12.00/1M",
|
|
163
|
-
"average": "$7.00/1M"
|
|
164
|
-
},
|
|
165
|
-
"context": "1048K",
|
|
166
|
-
"maxOutputTokens": 65536,
|
|
167
|
-
"modality": "text+image->text",
|
|
168
|
-
"supportsTools": true,
|
|
169
|
-
"supportsReasoning": true,
|
|
170
|
-
"supportsVision": true,
|
|
171
|
-
"isModerated": false,
|
|
172
|
-
"recommended": true
|
|
173
|
-
},
|
|
174
111
|
{
|
|
175
112
|
"id": "qwen/qwen3-vl-235b-a22b-instruct",
|
|
176
113
|
"name": "Qwen: Qwen3 VL 235B A22B Instruct",
|
|
177
114
|
"description": "Qwen3-VL-235B-A22B Instruct is an open-weight multimodal model that unifies strong text generation with visual understanding across images and video. The Instruct model targets general vision-language use (VQA, document parsing, chart/table extraction, multilingual OCR). The series emphasizes robust perception (recognition of diverse real-world and synthetic categories), spatial understanding (2D/3D grounding), and long-form visual comprehension, with competitive results on public multimodal benchmarks for both perception and reasoning.\n\nBeyond analysis, Qwen3-VL supports agentic interaction and tool use: it can follow complex instructions over multi-image, multi-turn dialogues; align text to video timelines for precise temporal queries; and operate GUI elements for automation tasks. The models also enable visual coding workflows—turning sketches or mockups into code and assisting with UI debugging—while maintaining strong text-only performance comparable to the flagship Qwen3 language models. This makes Qwen3-VL suitable for production scenarios spanning document AI, multilingual OCR, software/UI assistance, spatial/embodied tasks, and research on vision-language agents.",
|
|
178
115
|
"provider": "Qwen",
|
|
179
116
|
"category": "vision",
|
|
180
|
-
"priority":
|
|
117
|
+
"priority": 6,
|
|
181
118
|
"pricing": {
|
|
182
119
|
"input": "$0.21/1M",
|
|
183
120
|
"output": "$1.90/1M",
|
|
@@ -191,27 +128,6 @@
|
|
|
191
128
|
"supportsVision": true,
|
|
192
129
|
"isModerated": false,
|
|
193
130
|
"recommended": true
|
|
194
|
-
},
|
|
195
|
-
{
|
|
196
|
-
"id": "openrouter/polaris-alpha",
|
|
197
|
-
"name": "Polaris Alpha",
|
|
198
|
-
"description": "openrouter/polaris-alpha (metadata pending - not yet available in API)",
|
|
199
|
-
"provider": "Openrouter",
|
|
200
|
-
"category": "programming",
|
|
201
|
-
"priority": 10,
|
|
202
|
-
"pricing": {
|
|
203
|
-
"input": "N/A",
|
|
204
|
-
"output": "N/A",
|
|
205
|
-
"average": "N/A"
|
|
206
|
-
},
|
|
207
|
-
"context": "N/A",
|
|
208
|
-
"maxOutputTokens": null,
|
|
209
|
-
"modality": "text->text",
|
|
210
|
-
"supportsTools": false,
|
|
211
|
-
"supportsReasoning": false,
|
|
212
|
-
"supportsVision": false,
|
|
213
|
-
"isModerated": false,
|
|
214
|
-
"recommended": true
|
|
215
131
|
}
|
|
216
132
|
]
|
|
217
133
|
}
|
|
@@ -120,6 +120,16 @@ export const ENV = {
|
|
|
120
120
|
CLAUDISH_ACTIVE_MODEL_NAME: "CLAUDISH_ACTIVE_MODEL_NAME", // Set by claudish to show active model in status line
|
|
121
121
|
ANTHROPIC_MODEL: "ANTHROPIC_MODEL", // Claude Code standard env var for model selection
|
|
122
122
|
ANTHROPIC_SMALL_FAST_MODEL: "ANTHROPIC_SMALL_FAST_MODEL", // Claude Code standard env var for fast model
|
|
123
|
+
// Claudish model mapping overrides (highest priority)
|
|
124
|
+
CLAUDISH_MODEL_OPUS: "CLAUDISH_MODEL_OPUS",
|
|
125
|
+
CLAUDISH_MODEL_SONNET: "CLAUDISH_MODEL_SONNET",
|
|
126
|
+
CLAUDISH_MODEL_HAIKU: "CLAUDISH_MODEL_HAIKU",
|
|
127
|
+
CLAUDISH_MODEL_SUBAGENT: "CLAUDISH_MODEL_SUBAGENT",
|
|
128
|
+
// Claude Code standard model configuration (fallback if CLAUDISH_* not set)
|
|
129
|
+
ANTHROPIC_DEFAULT_OPUS_MODEL: "ANTHROPIC_DEFAULT_OPUS_MODEL",
|
|
130
|
+
ANTHROPIC_DEFAULT_SONNET_MODEL: "ANTHROPIC_DEFAULT_SONNET_MODEL",
|
|
131
|
+
ANTHROPIC_DEFAULT_HAIKU_MODEL: "ANTHROPIC_DEFAULT_HAIKU_MODEL",
|
|
132
|
+
CLAUDE_CODE_SUBAGENT_MODEL: "CLAUDE_CODE_SUBAGENT_MODEL",
|
|
123
133
|
} as const;
|
|
124
134
|
|
|
125
135
|
// OpenRouter API Configuration
|
package/scripts/postinstall.cjs
CHANGED
|
File without changes
|
|
@@ -5,7 +5,7 @@ description: CRITICAL - Guide for using Claudish CLI ONLY through sub-agents to
|
|
|
5
5
|
|
|
6
6
|
# Claudish Usage Skill
|
|
7
7
|
|
|
8
|
-
**Version:** 1.
|
|
8
|
+
**Version:** 1.1.0
|
|
9
9
|
**Purpose:** Guide AI agents on how to use Claudish CLI to run Claude Code with OpenRouter models
|
|
10
10
|
**Status:** Production Ready
|
|
11
11
|
|
|
@@ -214,14 +214,22 @@ claudish --version
|
|
|
214
214
|
### Step 2: Get Available Models
|
|
215
215
|
|
|
216
216
|
```bash
|
|
217
|
-
# List
|
|
218
|
-
claudish --
|
|
217
|
+
# List ALL OpenRouter models grouped by provider
|
|
218
|
+
claudish --models
|
|
219
|
+
|
|
220
|
+
# Fuzzy search models by name, ID, or description
|
|
221
|
+
claudish --models gemini
|
|
222
|
+
claudish --models "grok code"
|
|
223
|
+
|
|
224
|
+
# Show top recommended programming models (curated list)
|
|
225
|
+
claudish --top-models
|
|
219
226
|
|
|
220
227
|
# JSON output for parsing
|
|
221
|
-
claudish --
|
|
228
|
+
claudish --models --json
|
|
229
|
+
claudish --top-models --json
|
|
222
230
|
|
|
223
231
|
# Force update from OpenRouter API
|
|
224
|
-
claudish --
|
|
232
|
+
claudish --models --force-update
|
|
225
233
|
```
|
|
226
234
|
|
|
227
235
|
### Step 3: Run Claudish
|
|
@@ -275,11 +283,18 @@ git diff | claudish --stdin --model openai/gpt-5-codex "Review these changes"
|
|
|
275
283
|
|
|
276
284
|
**Get Latest Models:**
|
|
277
285
|
```bash
|
|
278
|
-
#
|
|
279
|
-
claudish --
|
|
286
|
+
# List all models (auto-updates every 2 days)
|
|
287
|
+
claudish --models
|
|
288
|
+
|
|
289
|
+
# Search for specific models
|
|
290
|
+
claudish --models grok
|
|
291
|
+
claudish --models "gemini flash"
|
|
292
|
+
|
|
293
|
+
# Show curated top models
|
|
294
|
+
claudish --top-models
|
|
280
295
|
|
|
281
296
|
# Force immediate update
|
|
282
|
-
claudish --
|
|
297
|
+
claudish --models --force-update
|
|
283
298
|
```
|
|
284
299
|
|
|
285
300
|
## NEW: Direct Agent Selection (v2.1.0)
|
|
@@ -503,8 +518,8 @@ Use Claudish CLI to implement this feature with Grok model:
|
|
|
503
518
|
${featureDescription}
|
|
504
519
|
|
|
505
520
|
INSTRUCTIONS:
|
|
506
|
-
1.
|
|
507
|
-
claudish --
|
|
521
|
+
1. Search for available models:
|
|
522
|
+
claudish --models grok
|
|
508
523
|
|
|
509
524
|
2. Run implementation with Grok:
|
|
510
525
|
claudish --model x-ai/grok-code-fast-1 "${featureDescription}"
|
|
@@ -682,7 +697,8 @@ done
|
|
|
682
697
|
|------|-------------|---------|
|
|
683
698
|
| `--model <model>` | OpenRouter model to use | `--model x-ai/grok-code-fast-1` |
|
|
684
699
|
| `--stdin` | Read prompt from stdin | `git diff \| claudish --stdin --model grok` |
|
|
685
|
-
| `--
|
|
700
|
+
| `--models` | List all models or search | `claudish --models` or `claudish --models gemini` |
|
|
701
|
+
| `--top-models` | Show top recommended models | `claudish --top-models` |
|
|
686
702
|
| `--json` | JSON output (implies --quiet) | `claudish --json "task"` |
|
|
687
703
|
| `--help-ai` | Print AI agent usage guide | `claudish --help-ai` |
|
|
688
704
|
|
|
@@ -781,7 +797,7 @@ Model 'invalid/model' not found
|
|
|
781
797
|
**Fix:**
|
|
782
798
|
```bash
|
|
783
799
|
# List available models
|
|
784
|
-
claudish --
|
|
800
|
+
claudish --models
|
|
785
801
|
|
|
786
802
|
# Use valid model ID
|
|
787
803
|
claudish --model x-ai/grok-code-fast-1 "task"
|
|
@@ -869,10 +885,13 @@ await Task({
|
|
|
869
885
|
**How:**
|
|
870
886
|
```bash
|
|
871
887
|
# Auto-updates every 2 days
|
|
872
|
-
claudish --
|
|
888
|
+
claudish --models
|
|
889
|
+
|
|
890
|
+
# Search for specific models
|
|
891
|
+
claudish --models deepseek
|
|
873
892
|
|
|
874
893
|
# Force update now
|
|
875
|
-
claudish --
|
|
894
|
+
claudish --models --force-update
|
|
876
895
|
```
|
|
877
896
|
|
|
878
897
|
### 6. ✅ Use --stdin for Large Prompts
|
|
@@ -982,13 +1001,13 @@ const MODELS = ["x-ai/grok-code-fast-1", "openai/gpt-5"];
|
|
|
982
1001
|
**Right:**
|
|
983
1002
|
```typescript
|
|
984
1003
|
// Query dynamically
|
|
985
|
-
const { stdout } = await Bash("claudish --
|
|
1004
|
+
const { stdout } = await Bash("claudish --models --json");
|
|
986
1005
|
const models = JSON.parse(stdout).models.map(m => m.id);
|
|
987
1006
|
```
|
|
988
1007
|
|
|
989
1008
|
### ✅ Do Accept Custom Models From Users
|
|
990
1009
|
|
|
991
|
-
**Problem:** User provides a custom model ID that's not in --
|
|
1010
|
+
**Problem:** User provides a custom model ID that's not in --top-models
|
|
992
1011
|
|
|
993
1012
|
**Wrong (rejecting custom models):**
|
|
994
1013
|
```typescript
|
|
@@ -1002,7 +1021,7 @@ if (!availableModels.includes(userModel)) {
|
|
|
1002
1021
|
|
|
1003
1022
|
**Right (accept any valid model ID):**
|
|
1004
1023
|
```typescript
|
|
1005
|
-
// Claudish accepts ANY valid OpenRouter model ID, even if not in --
|
|
1024
|
+
// Claudish accepts ANY valid OpenRouter model ID, even if not in --top-models
|
|
1006
1025
|
const userModel = "custom/provider/model-123";
|
|
1007
1026
|
|
|
1008
1027
|
// Validate it's a non-empty string with provider format
|
|
@@ -1056,7 +1075,7 @@ const model = prefs.preferredModel || defaultModel;
|
|
|
1056
1075
|
```typescript
|
|
1057
1076
|
// In a multi-step workflow, ask once
|
|
1058
1077
|
if (!process.env.CLAUDISH_MODEL) {
|
|
1059
|
-
const { stdout } = await Bash("claudish --
|
|
1078
|
+
const { stdout } = await Bash("claudish --models --json");
|
|
1060
1079
|
const models = JSON.parse(stdout).models;
|
|
1061
1080
|
|
|
1062
1081
|
const response = await AskUserQuestion({
|
|
@@ -1086,7 +1105,7 @@ await Bash(`claudish --model ${model} "task 2"`);
|
|
|
1086
1105
|
1. ✅ **Accept any model ID** user provides (unless obviously malformed)
|
|
1087
1106
|
2. ✅ **Don't filter** based on your "shortlist" - let Claudish handle validation
|
|
1088
1107
|
3. ✅ **Offer to set CLAUDISH_MODEL** environment variable for session persistence
|
|
1089
|
-
4. ✅ **Explain** that --
|
|
1108
|
+
4. ✅ **Explain** that --top-models shows curated recommendations, --models shows all
|
|
1090
1109
|
5. ✅ **Validate format** (should contain "/") but not restrict to known models
|
|
1091
1110
|
6. ❌ **Never reject** a user's custom model with "not in my shortlist"
|
|
1092
1111
|
|
|
@@ -1163,7 +1182,7 @@ async function reviewCodeWithMultipleModels(files: string[]) {
|
|
|
1163
1182
|
*/
|
|
1164
1183
|
async function implementWithModel(featureDescription: string) {
|
|
1165
1184
|
// Step 1: Get available models
|
|
1166
|
-
const { stdout } = await Bash("claudish --
|
|
1185
|
+
const { stdout } = await Bash("claudish --models --json");
|
|
1167
1186
|
const models = JSON.parse(stdout).models;
|
|
1168
1187
|
|
|
1169
1188
|
// Step 2: Let user select model
|
|
@@ -1226,7 +1245,7 @@ Include:
|
|
|
1226
1245
|
**Symptoms:** Unexpected API costs
|
|
1227
1246
|
|
|
1228
1247
|
**Solutions:**
|
|
1229
|
-
1. Use budget-friendly models (check pricing with `--
|
|
1248
|
+
1. Use budget-friendly models (check pricing with `--models` or `--top-models`)
|
|
1230
1249
|
2. Enable cost tracking: `--cost-tracker`
|
|
1231
1250
|
3. Use --json to monitor costs: `claudish --json "task" | jq '.total_cost_usd'`
|
|
1232
1251
|
|
|
@@ -1244,7 +1263,7 @@ Include:
|
|
|
1244
1263
|
**Symptoms:** "Model not found" error
|
|
1245
1264
|
|
|
1246
1265
|
**Solutions:**
|
|
1247
|
-
1. Update model cache: `claudish --
|
|
1266
|
+
1. Update model cache: `claudish --models --force-update`
|
|
1248
1267
|
2. Check OpenRouter website for model availability
|
|
1249
1268
|
3. Use alternative model from same category
|
|
1250
1269
|
|
|
@@ -1275,5 +1294,5 @@ claudish --help-ai # AI agent usage guide
|
|
|
1275
1294
|
---
|
|
1276
1295
|
|
|
1277
1296
|
**Maintained by:** MadAppGang
|
|
1278
|
-
**Last Updated:** November
|
|
1279
|
-
**Skill Version:** 1.
|
|
1297
|
+
**Last Updated:** November 25, 2025
|
|
1298
|
+
**Skill Version:** 1.1.0
|