@akiojin/gwt 2.3.0 → 2.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/README.ja.md +5 -3
  2. package/README.md +5 -3
  3. package/dist/claude.d.ts +1 -0
  4. package/dist/claude.d.ts.map +1 -1
  5. package/dist/claude.js +6 -3
  6. package/dist/claude.js.map +1 -1
  7. package/dist/cli/ui/components/App.d.ts +3 -1
  8. package/dist/cli/ui/components/App.d.ts.map +1 -1
  9. package/dist/cli/ui/components/App.js +47 -2
  10. package/dist/cli/ui/components/App.js.map +1 -1
  11. package/dist/cli/ui/components/screens/AIToolSelectorScreen.d.ts +1 -1
  12. package/dist/cli/ui/components/screens/AIToolSelectorScreen.d.ts.map +1 -1
  13. package/dist/cli/ui/components/screens/AIToolSelectorScreen.js.map +1 -1
  14. package/dist/cli/ui/components/screens/BranchListScreen.d.ts.map +1 -1
  15. package/dist/cli/ui/components/screens/BranchListScreen.js +4 -0
  16. package/dist/cli/ui/components/screens/BranchListScreen.js.map +1 -1
  17. package/dist/cli/ui/components/screens/ModelSelectorScreen.d.ts +18 -0
  18. package/dist/cli/ui/components/screens/ModelSelectorScreen.d.ts.map +1 -0
  19. package/dist/cli/ui/components/screens/ModelSelectorScreen.js +201 -0
  20. package/dist/cli/ui/components/screens/ModelSelectorScreen.js.map +1 -0
  21. package/dist/cli/ui/types.d.ts +11 -1
  22. package/dist/cli/ui/types.d.ts.map +1 -1
  23. package/dist/cli/ui/utils/modelOptions.d.ts +6 -0
  24. package/dist/cli/ui/utils/modelOptions.d.ts.map +1 -0
  25. package/dist/cli/ui/utils/modelOptions.js +111 -0
  26. package/dist/cli/ui/utils/modelOptions.js.map +1 -0
  27. package/dist/codex.d.ts +6 -0
  28. package/dist/codex.d.ts.map +1 -1
  29. package/dist/codex.js +11 -4
  30. package/dist/codex.js.map +1 -1
  31. package/dist/gemini.d.ts +1 -0
  32. package/dist/gemini.d.ts.map +1 -1
  33. package/dist/gemini.js +6 -3
  34. package/dist/gemini.js.map +1 -1
  35. package/dist/index.d.ts.map +1 -1
  36. package/dist/index.js +59 -13
  37. package/dist/index.js.map +1 -1
  38. package/dist/qwen.d.ts +1 -0
  39. package/dist/qwen.d.ts.map +1 -1
  40. package/dist/qwen.js +6 -3
  41. package/dist/qwen.js.map +1 -1
  42. package/package.json +1 -1
  43. package/src/claude.ts +8 -3
  44. package/src/cli/ui/__tests__/components/ModelSelectorScreen.initial.test.tsx +81 -0
  45. package/src/cli/ui/__tests__/components/common/LoadingIndicator.test.tsx +28 -14
  46. package/src/cli/ui/__tests__/components/screens/BranchListScreen.test.tsx +10 -21
  47. package/src/cli/ui/components/App.tsx +84 -4
  48. package/src/cli/ui/components/screens/AIToolSelectorScreen.tsx +1 -2
  49. package/src/cli/ui/components/screens/BranchListScreen.tsx +5 -0
  50. package/src/cli/ui/components/screens/ModelSelectorScreen.tsx +320 -0
  51. package/src/cli/ui/types.ts +13 -0
  52. package/src/cli/ui/utils/modelOptions.test.ts +51 -0
  53. package/src/cli/ui/utils/modelOptions.ts +125 -0
  54. package/src/codex.ts +23 -4
  55. package/src/gemini.ts +8 -3
  56. package/src/index.ts +90 -12
  57. package/src/qwen.ts +8 -3
@@ -0,0 +1,125 @@
1
+ import type { AITool, InferenceLevel, ModelOption } from "../types.js";
2
+
3
+ const CODEX_BASE_LEVELS: InferenceLevel[] = ["high", "medium", "low"];
4
+ const CODEX_MAX_LEVELS: InferenceLevel[] = ["xhigh", "high", "medium", "low"];
5
+
6
+ const MODEL_OPTIONS: Record<string, ModelOption[]> = {
7
+ "claude-code": [
8
+ {
9
+ id: "default",
10
+ label: "Default (recommended) — Sonnet 4.5",
11
+ description:
12
+ "Official default alias. Tracks the recommended Claude Code model (currently Sonnet 4.5) and shows as a standard model in /model.",
13
+ isDefault: true,
14
+ },
15
+ {
16
+ id: "opus",
17
+ label: "Opus 4.1",
18
+ description:
19
+ "Official Opus alias for Claude Code (non-custom, matches /model option).",
20
+ },
21
+ {
22
+ id: "haiku",
23
+ label: "Haiku 4.5",
24
+ description:
25
+ "Official Haiku alias for Claude Code (fastest model, non-custom).",
26
+ },
27
+ ],
28
+ "codex-cli": [
29
+ {
30
+ id: "gpt-5.1-codex",
31
+ label: "gpt-5.1-codex",
32
+ description: "Standard Codex model",
33
+ inferenceLevels: CODEX_BASE_LEVELS,
34
+ defaultInference: "high",
35
+ isDefault: true,
36
+ },
37
+ {
38
+ id: "gpt-5.1-codex-max",
39
+ label: "gpt-5.1-codex-max",
40
+ description: "Max performance (xhigh available)",
41
+ inferenceLevels: CODEX_MAX_LEVELS,
42
+ defaultInference: "medium",
43
+ },
44
+ {
45
+ id: "gpt-5.1-codex-mini",
46
+ label: "gpt-5.1-codex-mini",
47
+ description: "Lightweight / cost-saving",
48
+ inferenceLevels: CODEX_BASE_LEVELS,
49
+ defaultInference: "medium",
50
+ },
51
+ {
52
+ id: "gpt-5.1",
53
+ label: "gpt-5.1",
54
+ description: "General-purpose GPT-5.1",
55
+ inferenceLevels: CODEX_BASE_LEVELS,
56
+ defaultInference: "high",
57
+ },
58
+ ],
59
+ "gemini-cli": [
60
+ {
61
+ id: "gemini-3-pro-preview",
62
+ label: "Pro (gemini-3-pro-preview)",
63
+ description:
64
+ "Default Pro. Falls back to gemini-2.5-pro when preview is unavailable.",
65
+ isDefault: true,
66
+ },
67
+ {
68
+ id: "gemini-2.5-pro",
69
+ label: "Pro (gemini-2.5-pro)",
70
+ description: "Stable Pro model for deep reasoning and creativity",
71
+ },
72
+ {
73
+ id: "gemini-2.5-flash",
74
+ label: "Flash (gemini-2.5-flash)",
75
+ description: "Balance of speed and reasoning",
76
+ },
77
+ {
78
+ id: "gemini-2.5-flash-lite",
79
+ label: "Flash-Lite (gemini-2.5-flash-lite)",
80
+ description: "Fastest for simple tasks",
81
+ },
82
+ ],
83
+ "qwen-cli": [
84
+ {
85
+ id: "coder-model",
86
+ label: "Coder Model",
87
+ description:
88
+ "Latest Qwen Coder model (qwen3-coder-plus-2025-09-23) from Alibaba Cloud ModelStudio",
89
+ isDefault: true,
90
+ },
91
+ {
92
+ id: "vision-model",
93
+ label: "Vision Model",
94
+ description:
95
+ "Latest Qwen Vision model (qwen3-vl-plus-2025-09-23) from Alibaba Cloud ModelStudio",
96
+ },
97
+ ],
98
+ };
99
+
100
+ export function getModelOptions(tool: AITool): ModelOption[] {
101
+ return MODEL_OPTIONS[tool] ?? [];
102
+ }
103
+
104
+ export function getDefaultModelOption(tool: AITool): ModelOption | undefined {
105
+ const options = getModelOptions(tool);
106
+ return options.find((opt) => opt.isDefault) ?? options[0];
107
+ }
108
+
109
+ export function getInferenceLevelsForModel(
110
+ model?: ModelOption,
111
+ ): InferenceLevel[] {
112
+ if (!model?.inferenceLevels || model.inferenceLevels.length === 0) {
113
+ return [];
114
+ }
115
+ return model.inferenceLevels;
116
+ }
117
+
118
+ export function getDefaultInferenceForModel(
119
+ model?: ModelOption,
120
+ ): InferenceLevel | undefined {
121
+ if (!model) return undefined;
122
+ if (model.defaultInference) return model.defaultInference;
123
+ const levels = getInferenceLevelsForModel(model);
124
+ return levels[0];
125
+ }
package/src/codex.ts CHANGED
@@ -5,14 +5,23 @@ import { existsSync } from "fs";
5
5
  import { createChildStdio, getTerminalStreams } from "./utils/terminal.js";
6
6
 
7
7
  const CODEX_CLI_PACKAGE = "@openai/codex@latest";
8
- const DEFAULT_CODEX_ARGS = [
8
+
9
+ export type CodexReasoningEffort = "low" | "medium" | "high" | "xhigh";
10
+
11
+ export const DEFAULT_CODEX_MODEL = "gpt-5.1-codex";
12
+ export const DEFAULT_CODEX_REASONING_EFFORT: CodexReasoningEffort = "high";
13
+
14
+ export const buildDefaultCodexArgs = (
15
+ model: string = DEFAULT_CODEX_MODEL,
16
+ reasoningEffort: CodexReasoningEffort = DEFAULT_CODEX_REASONING_EFFORT,
17
+ ): string[] => [
9
18
  "--enable",
10
19
  "web_search_request",
11
- "--model=gpt-5.1-codex",
20
+ `--model=${model}`,
12
21
  "--sandbox",
13
22
  "workspace-write",
14
23
  "-c",
15
- "model_reasoning_effort=high",
24
+ `model_reasoning_effort=${reasoningEffort}`,
16
25
  "-c",
17
26
  "model_reasoning_summaries=detailed",
18
27
  "-c",
@@ -42,6 +51,8 @@ export async function launchCodexCLI(
42
51
  extraArgs?: string[];
43
52
  bypassApprovals?: boolean;
44
53
  envOverrides?: Record<string, string>;
54
+ model?: string;
55
+ reasoningEffort?: CodexReasoningEffort;
45
56
  } = {},
46
57
  ): Promise<void> {
47
58
  const terminal = getTerminalStreams();
@@ -55,6 +66,12 @@ export async function launchCodexCLI(
55
66
  console.log(chalk.gray(` Working directory: ${worktreePath}`));
56
67
 
57
68
  const args: string[] = [];
69
+ const model = options.model ?? DEFAULT_CODEX_MODEL;
70
+ const reasoningEffort =
71
+ options.reasoningEffort ?? DEFAULT_CODEX_REASONING_EFFORT;
72
+
73
+ console.log(chalk.green(` šŸŽÆ Model: ${model}`));
74
+ console.log(chalk.green(` 🧠 Reasoning: ${reasoningEffort}`));
58
75
 
59
76
  switch (options.mode) {
60
77
  case "continue":
@@ -80,7 +97,9 @@ export async function launchCodexCLI(
80
97
  args.push(...options.extraArgs);
81
98
  }
82
99
 
83
- args.push(...DEFAULT_CODEX_ARGS);
100
+ const codexArgs = buildDefaultCodexArgs(model, reasoningEffort);
101
+
102
+ args.push(...codexArgs);
84
103
 
85
104
  terminal.exitRawMode();
86
105
 
package/src/gemini.ts CHANGED
@@ -1,6 +1,5 @@
1
1
  import { execa } from "execa";
2
2
  import chalk from "chalk";
3
- import { platform } from "os";
4
3
  import { existsSync } from "fs";
5
4
  import { createChildStdio, getTerminalStreams } from "./utils/terminal.js";
6
5
 
@@ -23,6 +22,7 @@ export async function launchGeminiCLI(
23
22
  mode?: "normal" | "continue" | "resume";
24
23
  extraArgs?: string[];
25
24
  envOverrides?: Record<string, string>;
25
+ model?: string;
26
26
  } = {},
27
27
  ): Promise<void> {
28
28
  const terminal = getTerminalStreams();
@@ -38,6 +38,11 @@ export async function launchGeminiCLI(
38
38
 
39
39
  const args: string[] = [];
40
40
 
41
+ if (options.model) {
42
+ args.push("--model", options.model);
43
+ console.log(chalk.green(` šŸŽÆ Model: ${options.model}`));
44
+ }
45
+
41
46
  // Handle execution mode
42
47
  switch (options.mode) {
43
48
  case "continue":
@@ -135,7 +140,7 @@ export async function launchGeminiCLI(
135
140
  errorMessage = `Failed to launch Gemini CLI: ${error.message || "Unknown error"}`;
136
141
  }
137
142
 
138
- if (platform() === "win32") {
143
+ if (process.platform === "win32") {
139
144
  console.error(chalk.red("\nšŸ’” Windows troubleshooting tips:"));
140
145
  if (hasLocalGemini) {
141
146
  console.error(
@@ -175,7 +180,7 @@ export async function launchGeminiCLI(
175
180
  */
176
181
  async function isGeminiCommandAvailable(): Promise<boolean> {
177
182
  try {
178
- const command = platform() === "win32" ? "where" : "which";
183
+ const command = process.platform === "win32" ? "where" : "which";
179
184
  await execa(command, ["gemini"], { shell: true });
180
185
  return true;
181
186
  } catch {
package/src/index.ts CHANGED
@@ -10,7 +10,11 @@ import {
10
10
  GitError,
11
11
  } from "./git.js";
12
12
  import { launchClaudeCode } from "./claude.js";
13
- import { launchCodexCLI, CodexError } from "./codex.js";
13
+ import {
14
+ launchCodexCLI,
15
+ CodexError,
16
+ type CodexReasoningEffort,
17
+ } from "./codex.js";
14
18
  import { launchGeminiCLI, GeminiError } from "./gemini.js";
15
19
  import { launchQwenCLI, QwenError } from "./qwen.js";
16
20
  import {
@@ -183,17 +187,43 @@ async function runDependencyInstallStep<T extends DependencyInstallResult>(
183
187
 
184
188
  async function waitForEnter(promptMessage: string): Promise<void> {
185
189
  if (!process.stdin.isTTY) {
190
+ // For non-interactive environments, resolve immediately.
186
191
  return;
187
192
  }
188
193
 
194
+ // Ensure stdin is resumed and not in raw mode before using readline.
195
+ // This is crucial for environments where stdin might be paused or in raw mode
196
+ // by other libraries (like Ink.js).
197
+ if (typeof process.stdin.resume === "function") {
198
+ process.stdin.resume();
199
+ }
200
+ if (process.stdin.isRaw) {
201
+ process.stdin.setRawMode(false);
202
+ }
203
+
189
204
  await new Promise<void>((resolve) => {
190
205
  const rl = readline.createInterface({
191
206
  input: process.stdin,
192
207
  output: process.stdout,
193
208
  });
194
209
 
210
+ // Handle Ctrl+C to gracefully exit.
211
+ rl.on("SIGINT", () => {
212
+ rl.close();
213
+ // Restore stdin to a paused state before exiting.
214
+ if (typeof process.stdin.pause === "function") {
215
+ process.stdin.pause();
216
+ }
217
+ process.exit(0);
218
+ });
219
+
195
220
  rl.question(`${promptMessage}\n`, () => {
196
221
  rl.close();
222
+ // Pause stdin again to allow other parts of the application
223
+ // to take control if needed.
224
+ if (typeof process.stdin.pause === "function") {
225
+ process.stdin.pause();
226
+ }
197
227
  resolve();
198
228
  });
199
229
  });
@@ -292,11 +322,17 @@ export async function handleAIToolWorkflow(
292
322
  tool,
293
323
  mode,
294
324
  skipPermissions,
325
+ model,
326
+ inferenceLevel,
295
327
  } = selectionResult;
296
328
 
297
329
  const branchLabel = displayName ?? branch;
330
+ const modelInfo =
331
+ model || inferenceLevel
332
+ ? `, model=${model ?? "default"}${inferenceLevel ? `/${inferenceLevel}` : ""}`
333
+ : "";
298
334
  printInfo(
299
- `Selected: ${branchLabel} with ${tool} (${mode} mode, skipPermissions: ${skipPermissions})`,
335
+ `Selected: ${branchLabel} with ${tool} (${mode} mode${modelInfo}, skipPermissions: ${skipPermissions})`,
300
336
  );
301
337
 
302
338
  try {
@@ -503,9 +539,11 @@ export async function handleAIToolWorkflow(
503
539
  printWarning(
504
540
  "Resolve these divergences (e.g., rebase or merge) before launching to avoid conflicts.",
505
541
  );
506
- await waitForEnter("Press Enter to continue.");
542
+ await waitForEnter(
543
+ "Press Enter to return to the main menu and resolve these issues manually.",
544
+ );
507
545
  printWarning(
508
- "Skipping AI tool launch until divergences are resolved. Returning to main menu...",
546
+ "AI tool launch has been cancelled until divergences are resolved.",
509
547
  );
510
548
  return;
511
549
  } else if (fastForwardError) {
@@ -528,7 +566,12 @@ export async function handleAIToolWorkflow(
528
566
  // Builtin tools use their dedicated launch functions
529
567
  // Custom tools use the generic launchCustomAITool function
530
568
  if (tool === "claude-code") {
531
- await launchClaudeCode(worktreePath, {
569
+ const launchOptions: {
570
+ mode?: "normal" | "continue" | "resume";
571
+ skipPermissions?: boolean;
572
+ envOverrides?: Record<string, string>;
573
+ model?: string;
574
+ } = {
532
575
  mode:
533
576
  mode === "resume"
534
577
  ? "resume"
@@ -537,9 +580,19 @@ export async function handleAIToolWorkflow(
537
580
  : "normal",
538
581
  skipPermissions,
539
582
  envOverrides: sharedEnv,
540
- });
583
+ };
584
+ if (model) {
585
+ launchOptions.model = model;
586
+ }
587
+ await launchClaudeCode(worktreePath, launchOptions);
541
588
  } else if (tool === "codex-cli") {
542
- await launchCodexCLI(worktreePath, {
589
+ const launchOptions: {
590
+ mode?: "normal" | "continue" | "resume";
591
+ bypassApprovals?: boolean;
592
+ envOverrides?: Record<string, string>;
593
+ model?: string;
594
+ reasoningEffort?: CodexReasoningEffort;
595
+ } = {
543
596
  mode:
544
597
  mode === "resume"
545
598
  ? "resume"
@@ -548,9 +601,21 @@ export async function handleAIToolWorkflow(
548
601
  : "normal",
549
602
  bypassApprovals: skipPermissions,
550
603
  envOverrides: sharedEnv,
551
- });
604
+ };
605
+ if (model) {
606
+ launchOptions.model = model;
607
+ }
608
+ if (inferenceLevel) {
609
+ launchOptions.reasoningEffort = inferenceLevel as CodexReasoningEffort;
610
+ }
611
+ await launchCodexCLI(worktreePath, launchOptions);
552
612
  } else if (tool === "gemini-cli") {
553
- await launchGeminiCLI(worktreePath, {
613
+ const launchOptions: {
614
+ mode?: "normal" | "continue" | "resume";
615
+ skipPermissions?: boolean;
616
+ envOverrides?: Record<string, string>;
617
+ model?: string;
618
+ } = {
554
619
  mode:
555
620
  mode === "resume"
556
621
  ? "resume"
@@ -559,9 +624,18 @@ export async function handleAIToolWorkflow(
559
624
  : "normal",
560
625
  skipPermissions,
561
626
  envOverrides: sharedEnv,
562
- });
627
+ };
628
+ if (model) {
629
+ launchOptions.model = model;
630
+ }
631
+ await launchGeminiCLI(worktreePath, launchOptions);
563
632
  } else if (tool === "qwen-cli") {
564
- await launchQwenCLI(worktreePath, {
633
+ const launchOptions: {
634
+ mode?: "normal" | "continue" | "resume";
635
+ skipPermissions?: boolean;
636
+ envOverrides?: Record<string, string>;
637
+ model?: string;
638
+ } = {
565
639
  mode:
566
640
  mode === "resume"
567
641
  ? "resume"
@@ -570,7 +644,11 @@ export async function handleAIToolWorkflow(
570
644
  : "normal",
571
645
  skipPermissions,
572
646
  envOverrides: sharedEnv,
573
- });
647
+ };
648
+ if (model) {
649
+ launchOptions.model = model;
650
+ }
651
+ await launchQwenCLI(worktreePath, launchOptions);
574
652
  } else {
575
653
  // Custom tool
576
654
  printInfo(`Launching custom tool: ${toolConfig.displayName}`);
package/src/qwen.ts CHANGED
@@ -1,6 +1,5 @@
1
1
  import { execa } from "execa";
2
2
  import chalk from "chalk";
3
- import { platform } from "os";
4
3
  import { existsSync } from "fs";
5
4
  import { createChildStdio, getTerminalStreams } from "./utils/terminal.js";
6
5
 
@@ -23,6 +22,7 @@ export async function launchQwenCLI(
23
22
  mode?: "normal" | "continue" | "resume";
24
23
  extraArgs?: string[];
25
24
  envOverrides?: Record<string, string>;
25
+ model?: string;
26
26
  } = {},
27
27
  ): Promise<void> {
28
28
  const terminal = getTerminalStreams();
@@ -38,6 +38,11 @@ export async function launchQwenCLI(
38
38
 
39
39
  const args: string[] = ["--checkpointing"];
40
40
 
41
+ if (options.model) {
42
+ args.push("--model", options.model);
43
+ console.log(chalk.green(` šŸŽÆ Model: ${options.model}`));
44
+ }
45
+
41
46
  // Handle execution mode
42
47
  // Note: Qwen CLI doesn't have explicit continue/resume CLI options at startup.
43
48
  // Session management is done via /chat commands during interactive sessions.
@@ -141,7 +146,7 @@ export async function launchQwenCLI(
141
146
  errorMessage = `Failed to launch Qwen CLI: ${error.message || "Unknown error"}`;
142
147
  }
143
148
 
144
- if (platform() === "win32") {
149
+ if (process.platform === "win32") {
145
150
  console.error(chalk.red("\nšŸ’” Windows troubleshooting tips:"));
146
151
  if (hasLocalQwen) {
147
152
  console.error(
@@ -181,7 +186,7 @@ export async function launchQwenCLI(
181
186
  */
182
187
  async function isQwenCommandAvailable(): Promise<boolean> {
183
188
  try {
184
- const command = platform() === "win32" ? "where" : "which";
189
+ const command = process.platform === "win32" ? "where" : "which";
185
190
  await execa(command, ["qwen"], { shell: true });
186
191
  return true;
187
192
  } catch {