claudish 3.0.4 → 3.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -34402,7 +34402,7 @@ function createTempSettingsFile(modelDisplay, port) {
34402
34402
  const DIM2 = "\\033[2m";
34403
34403
  const RESET2 = "\\033[0m";
34404
34404
  const BOLD2 = "\\033[1m";
34405
- statusCommand = `JSON=$(cat) && DIR=$(basename "$(pwd)") && [ \${#DIR} -gt 15 ] && DIR="\${DIR:0:12}..." || true && CTX=100 && COST="0" && if [ -f "${tokenFilePath}" ]; then TOKENS=$(cat "${tokenFilePath}" 2>/dev/null) && REAL_COST=$(echo "$TOKENS" | grep -o '"total_cost":[0-9.]*' | cut -d: -f2) && REAL_CTX=$(echo "$TOKENS" | grep -o '"context_left_percent":[0-9]*' | grep -o '[0-9]*') && if [ ! -z "$REAL_COST" ]; then COST="$REAL_COST"; else COST=$(echo "$JSON" | grep -o '"total_cost_usd":[0-9.]*' | cut -d: -f2); fi && if [ ! -z "$REAL_CTX" ]; then CTX="$REAL_CTX"; fi; else COST=$(echo "$JSON" | grep -o '"total_cost_usd":[0-9.]*' | cut -d: -f2); fi && [ -z "$COST" ] && COST="0" || true && if [ "$CLAUDISH_IS_LOCAL" = "true" ]; then COST_DISPLAY="LOCAL"; else COST_DISPLAY=$(printf "\\$%.3f" "$COST"); fi && printf "${CYAN2}${BOLD2}%s${RESET2} ${DIM2}•${RESET2} ${YELLOW2}%s${RESET2} ${DIM2}•${RESET2} ${GREEN2}%s${RESET2} ${DIM2}•${RESET2} ${MAGENTA}%s%%${RESET2}\\n" "$DIR" "$CLAUDISH_ACTIVE_MODEL_NAME" "$COST_DISPLAY" "$CTX"`;
34405
+ statusCommand = `JSON=$(cat) && DIR=$(basename "$(pwd)") && [ \${#DIR} -gt 15 ] && DIR="\${DIR:0:12}..." || true && CTX=100 && COST="0" && if [ -f "${tokenFilePath}" ]; then TOKENS=$(cat "${tokenFilePath}" 2>/dev/null) && REAL_CTX=$(echo "$TOKENS" | grep -o '"context_left_percent":[0-9]*' | grep -o '[0-9]*') && if [ ! -z "$REAL_CTX" ]; then CTX="$REAL_CTX"; fi; fi && COST=$(echo "$JSON" | grep -o '"total_cost_usd":[0-9.]*' | cut -d: -f2) && [ -z "$COST" ] && COST="0" || true && if [ "$CLAUDISH_IS_LOCAL" = "true" ]; then COST_DISPLAY="LOCAL"; else COST_DISPLAY=$(printf "\\$%.3f" "$COST"); fi && printf "${CYAN2}${BOLD2}%s${RESET2} ${DIM2}•${RESET2} ${YELLOW2}%s${RESET2} ${DIM2}•${RESET2} ${GREEN2}%s${RESET2} ${DIM2}•${RESET2} ${MAGENTA}%s%%${RESET2}\\n" "$DIR" "$CLAUDISH_ACTIVE_MODEL_NAME" "$COST_DISPLAY" "$CTX"`;
34406
34406
  }
34407
34407
  const settings = {
34408
34408
  statusLine: {
@@ -39898,8 +39898,8 @@ var init_openai_compat = __esm(() => {
39898
39898
  });
39899
39899
 
39900
39900
  // src/handlers/openrouter-handler.ts
39901
- import { writeFileSync as writeFileSync8 } from "node:fs";
39902
- import { tmpdir as tmpdir2 } from "node:os";
39901
+ import { writeFileSync as writeFileSync8, mkdirSync as mkdirSync5 } from "node:fs";
39902
+ import { homedir as homedir2 } from "node:os";
39903
39903
  import { join as join8 } from "node:path";
39904
39904
 
39905
39905
  class OpenRouterHandler {
@@ -39947,7 +39947,9 @@ class OpenRouterHandler {
39947
39947
  context_left_percent: leftPct,
39948
39948
  updated_at: Date.now()
39949
39949
  };
39950
- writeFileSync8(join8(tmpdir2(), `claudish-tokens-${this.port}.json`), JSON.stringify(data), "utf-8");
39950
+ const claudishDir = join8(homedir2(), ".claudish");
39951
+ mkdirSync5(claudishDir, { recursive: true });
39952
+ writeFileSync8(join8(claudishDir, `tokens-${this.port}.json`), JSON.stringify(data), "utf-8");
39951
39953
  } catch (e) {}
39952
39954
  }
39953
39955
  async handle(c, payload) {
@@ -60599,8 +60601,8 @@ var init_undici = __esm(() => {
60599
60601
  });
60600
60602
 
60601
60603
  // src/handlers/local-provider-handler.ts
60602
- import { writeFileSync as writeFileSync9 } from "node:fs";
60603
- import { tmpdir as tmpdir3 } from "node:os";
60604
+ import { writeFileSync as writeFileSync9, mkdirSync as mkdirSync6 } from "node:fs";
60605
+ import { homedir as homedir3 } from "node:os";
60604
60606
  import { join as join9 } from "node:path";
60605
60607
 
60606
60608
  class LocalProviderHandler {
@@ -60767,7 +60769,9 @@ class LocalProviderHandler {
60767
60769
  context_left_percent: leftPct,
60768
60770
  updated_at: Date.now()
60769
60771
  };
60770
- writeFileSync9(join9(tmpdir3(), `claudish-tokens-${this.port}.json`), JSON.stringify(data), "utf-8");
60772
+ const claudishDir = join9(homedir3(), ".claudish");
60773
+ mkdirSync6(claudishDir, { recursive: true });
60774
+ writeFileSync9(join9(claudishDir, `tokens-${this.port}.json`), JSON.stringify(data), "utf-8");
60771
60775
  } catch (e) {}
60772
60776
  }
60773
60777
  async handle(c, payload) {
@@ -61289,24 +61293,24 @@ __export(exports_update_checker, {
61289
61293
  });
61290
61294
  import { execSync } from "node:child_process";
61291
61295
  import { createInterface as createInterface2 } from "node:readline";
61292
- import { existsSync as existsSync7, readFileSync as readFileSync6, writeFileSync as writeFileSync10, mkdirSync as mkdirSync5, unlinkSync as unlinkSync2 } from "node:fs";
61296
+ import { existsSync as existsSync7, readFileSync as readFileSync6, writeFileSync as writeFileSync10, mkdirSync as mkdirSync7, unlinkSync as unlinkSync2 } from "node:fs";
61293
61297
  import { join as join10 } from "node:path";
61294
- import { tmpdir as tmpdir4, homedir as homedir2, platform } from "node:os";
61298
+ import { tmpdir as tmpdir2, homedir as homedir4, platform } from "node:os";
61295
61299
  function getCacheFilePath() {
61296
61300
  let cacheDir;
61297
61301
  if (isWindows2) {
61298
- const localAppData = process.env.LOCALAPPDATA || join10(homedir2(), "AppData", "Local");
61302
+ const localAppData = process.env.LOCALAPPDATA || join10(homedir4(), "AppData", "Local");
61299
61303
  cacheDir = join10(localAppData, "claudish");
61300
61304
  } else {
61301
- cacheDir = join10(homedir2(), ".cache", "claudish");
61305
+ cacheDir = join10(homedir4(), ".cache", "claudish");
61302
61306
  }
61303
61307
  try {
61304
61308
  if (!existsSync7(cacheDir)) {
61305
- mkdirSync5(cacheDir, { recursive: true });
61309
+ mkdirSync7(cacheDir, { recursive: true });
61306
61310
  }
61307
61311
  return join10(cacheDir, "update-check.json");
61308
61312
  } catch {
61309
- return join10(tmpdir4(), "claudish-update-check.json");
61313
+ return join10(tmpdir2(), "claudish-update-check.json");
61310
61314
  }
61311
61315
  }
61312
61316
  function readCache() {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claudish",
3
- "version": "3.0.4",
3
+ "version": "3.0.6",
4
4
  "description": "Run Claude Code with any model - OpenRouter, Ollama, LM Studio & local models",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -0,0 +1,133 @@
1
+ {
2
+ "version": "1.1.5",
3
+ "lastUpdated": "2025-12-28",
4
+ "source": "https://openrouter.ai/models?categories=programming&fmt=cards&order=top-weekly",
5
+ "models": [
6
+ {
7
+ "id": "google/gemini-3-pro-preview",
8
+ "name": "Google: Gemini 3 Pro Preview",
9
+ "description": "Gemini 3 Pro is Google’s flagship frontier model for high-precision multimodal reasoning, combining strong performance across text, image, video, audio, and code with a 1M-token context window. Reasoning Details must be preserved when using multi-turn tool calling, see our docs here: https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks. It delivers state-of-the-art benchmark results in general reasoning, STEM problem solving, factual QA, and multimodal understanding, including leading scores on LMArena, GPQA Diamond, MathArena Apex, MMMU-Pro, and Video-MMMU. Interactions emphasize depth and interpretability: the model is designed to infer intent with minimal prompting and produce direct, insight-focused responses.\n\nBuilt for advanced development and agentic workflows, Gemini 3 Pro provides robust tool-calling, long-horizon planning stability, and strong zero-shot generation for complex UI, visualization, and coding tasks. It excels at agentic coding (SWE-Bench Verified, Terminal-Bench 2.0), multimodal analysis, and structured long-form tasks such as research synthesis, planning, and interactive learning experiences. Suitable applications include autonomous agents, coding assistants, multimodal analytics, scientific reasoning, and high-context information processing.",
10
+ "provider": "Google",
11
+ "category": "vision",
12
+ "priority": 1,
13
+ "pricing": {
14
+ "input": "$2.00/1M",
15
+ "output": "$12.00/1M",
16
+ "average": "$7.00/1M"
17
+ },
18
+ "context": "1048K",
19
+ "maxOutputTokens": 65536,
20
+ "modality": "text+image->text",
21
+ "supportsTools": true,
22
+ "supportsReasoning": true,
23
+ "supportsVision": true,
24
+ "isModerated": false,
25
+ "recommended": true
26
+ },
27
+ {
28
+ "id": "openai/gpt-5.1-codex",
29
+ "name": "OpenAI: GPT-5.1-Codex",
30
+ "description": "GPT-5.1-Codex is a specialized version of GPT-5.1 optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks. The model supports building projects from scratch, feature development, debugging, large-scale refactoring, and code review. Compared to GPT-5.1, Codex is more steerable, adheres closely to developer instructions, and produces cleaner, higher-quality code outputs. Reasoning effort can be adjusted with the `reasoning.effort` parameter. Read the [docs here](https://openrouter.ai/docs/use-cases/reasoning-tokens#reasoning-effort-level)\n\nCodex integrates into developer environments including the CLI, IDE extensions, GitHub, and cloud tasks. It adapts reasoning effort dynamically—providing fast responses for small tasks while sustaining extended multi-hour runs for large projects. The model is trained to perform structured code reviews, catching critical flaws by reasoning over dependencies and validating behavior against tests. It also supports multimodal inputs such as images or screenshots for UI development and integrates tool use for search, dependency installation, and environment setup. Codex is intended specifically for agentic coding applications.",
31
+ "provider": "Openai",
32
+ "category": "vision",
33
+ "priority": 2,
34
+ "pricing": {
35
+ "input": "$1.25/1M",
36
+ "output": "$10.00/1M",
37
+ "average": "$5.63/1M"
38
+ },
39
+ "context": "400K",
40
+ "maxOutputTokens": 128000,
41
+ "modality": "text+image->text",
42
+ "supportsTools": true,
43
+ "supportsReasoning": true,
44
+ "supportsVision": true,
45
+ "isModerated": true,
46
+ "recommended": true
47
+ },
48
+ {
49
+ "id": "x-ai/grok-code-fast-1",
50
+ "name": "xAI: Grok Code Fast 1",
51
+ "description": "Grok Code Fast 1 is a speedy and economical reasoning model that excels at agentic coding. With reasoning traces visible in the response, developers can steer Grok Code for high-quality work flows.",
52
+ "provider": "X-ai",
53
+ "category": "reasoning",
54
+ "priority": 3,
55
+ "pricing": {
56
+ "input": "$0.20/1M",
57
+ "output": "$1.50/1M",
58
+ "average": "$0.85/1M"
59
+ },
60
+ "context": "256K",
61
+ "maxOutputTokens": 10000,
62
+ "modality": "text->text",
63
+ "supportsTools": true,
64
+ "supportsReasoning": true,
65
+ "supportsVision": false,
66
+ "isModerated": false,
67
+ "recommended": true
68
+ },
69
+ {
70
+ "id": "minimax/minimax-m2",
71
+ "name": "MiniMax: MiniMax M2",
72
+ "description": "MiniMax-M2 is a compact, high-efficiency large language model optimized for end-to-end coding and agentic workflows. With 10 billion activated parameters (230 billion total), it delivers near-frontier intelligence across general reasoning, tool use, and multi-step task execution while maintaining low latency and deployment efficiency.\n\nThe model excels in code generation, multi-file editing, compile-run-fix loops, and test-validated repair, showing strong results on SWE-Bench Verified, Multi-SWE-Bench, and Terminal-Bench. It also performs competitively in agentic evaluations such as BrowseComp and GAIA, effectively handling long-horizon planning, retrieval, and recovery from execution errors.\n\nBenchmarked by [Artificial Analysis](https://artificialanalysis.ai/models/minimax-m2), MiniMax-M2 ranks among the top open-source models for composite intelligence, spanning mathematics, science, and instruction-following. Its small activation footprint enables fast inference, high concurrency, and improved unit economics, making it well-suited for large-scale agents, developer assistants, and reasoning-driven applications that require responsiveness and cost efficiency.\n\nTo avoid degrading this model's performance, MiniMax highly recommends preserving reasoning between turns. Learn more about using reasoning_details to pass back reasoning in our [docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks).",
73
+ "provider": "Minimax",
74
+ "category": "reasoning",
75
+ "priority": 4,
76
+ "pricing": {
77
+ "input": "$0.20/1M",
78
+ "output": "$1.00/1M",
79
+ "average": "$0.60/1M"
80
+ },
81
+ "context": "196K",
82
+ "maxOutputTokens": 131072,
83
+ "modality": "text->text",
84
+ "supportsTools": true,
85
+ "supportsReasoning": true,
86
+ "supportsVision": false,
87
+ "isModerated": false,
88
+ "recommended": true
89
+ },
90
+ {
91
+ "id": "z-ai/glm-4.6",
92
+ "name": "Z.AI: GLM 4.6",
93
+ "description": "Compared with GLM-4.5, this generation brings several key improvements:\n\nLonger context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex agentic tasks.\nSuperior coding performance: The model achieves higher scores on code benchmarks and demonstrates better real-world performance in applications such as Claude Code、Cline、Roo Code and Kilo Code, including improvements in generating visually polished front-end pages.\nAdvanced reasoning: GLM-4.6 shows a clear improvement in reasoning performance and supports tool use during inference, leading to stronger overall capability.\nMore capable agents: GLM-4.6 exhibits stronger performance in tool using and search-based agents, and integrates more effectively within agent frameworks.\nRefined writing: Better aligns with human preferences in style and readability, and performs more naturally in role-playing scenarios.",
94
+ "provider": "Z-ai",
95
+ "category": "reasoning",
96
+ "priority": 5,
97
+ "pricing": {
98
+ "input": "$0.39/1M",
99
+ "output": "$1.90/1M",
100
+ "average": "$1.15/1M"
101
+ },
102
+ "context": "204K",
103
+ "maxOutputTokens": 204800,
104
+ "modality": "text->text",
105
+ "supportsTools": true,
106
+ "supportsReasoning": true,
107
+ "supportsVision": false,
108
+ "isModerated": false,
109
+ "recommended": true
110
+ },
111
+ {
112
+ "id": "qwen/qwen3-vl-235b-a22b-instruct",
113
+ "name": "Qwen: Qwen3 VL 235B A22B Instruct",
114
+ "description": "Qwen3-VL-235B-A22B Instruct is an open-weight multimodal model that unifies strong text generation with visual understanding across images and video. The Instruct model targets general vision-language use (VQA, document parsing, chart/table extraction, multilingual OCR). The series emphasizes robust perception (recognition of diverse real-world and synthetic categories), spatial understanding (2D/3D grounding), and long-form visual comprehension, with competitive results on public multimodal benchmarks for both perception and reasoning.\n\nBeyond analysis, Qwen3-VL supports agentic interaction and tool use: it can follow complex instructions over multi-image, multi-turn dialogues; align text to video timelines for precise temporal queries; and operate GUI elements for automation tasks. The models also enable visual coding workflows—turning sketches or mockups into code and assisting with UI debugging—while maintaining strong text-only performance comparable to the flagship Qwen3 language models. This makes Qwen3-VL suitable for production scenarios spanning document AI, multilingual OCR, software/UI assistance, spatial/embodied tasks, and research on vision-language agents.",
115
+ "provider": "Qwen",
116
+ "category": "vision",
117
+ "priority": 6,
118
+ "pricing": {
119
+ "input": "$0.20/1M",
120
+ "output": "$1.20/1M",
121
+ "average": "$0.70/1M"
122
+ },
123
+ "context": "262K",
124
+ "maxOutputTokens": null,
125
+ "modality": "text+image->text",
126
+ "supportsTools": true,
127
+ "supportsReasoning": false,
128
+ "supportsVision": true,
129
+ "isModerated": false,
130
+ "recommended": true
131
+ }
132
+ ]
133
+ }