@akiojin/gwt 2.3.0 → 2.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.ja.md +5 -3
- package/README.md +5 -3
- package/dist/claude.d.ts +1 -0
- package/dist/claude.d.ts.map +1 -1
- package/dist/claude.js +6 -3
- package/dist/claude.js.map +1 -1
- package/dist/cli/ui/components/App.d.ts +3 -1
- package/dist/cli/ui/components/App.d.ts.map +1 -1
- package/dist/cli/ui/components/App.js +47 -2
- package/dist/cli/ui/components/App.js.map +1 -1
- package/dist/cli/ui/components/screens/AIToolSelectorScreen.d.ts +1 -1
- package/dist/cli/ui/components/screens/AIToolSelectorScreen.d.ts.map +1 -1
- package/dist/cli/ui/components/screens/AIToolSelectorScreen.js.map +1 -1
- package/dist/cli/ui/components/screens/BranchListScreen.d.ts.map +1 -1
- package/dist/cli/ui/components/screens/BranchListScreen.js +4 -0
- package/dist/cli/ui/components/screens/BranchListScreen.js.map +1 -1
- package/dist/cli/ui/components/screens/ModelSelectorScreen.d.ts +18 -0
- package/dist/cli/ui/components/screens/ModelSelectorScreen.d.ts.map +1 -0
- package/dist/cli/ui/components/screens/ModelSelectorScreen.js +201 -0
- package/dist/cli/ui/components/screens/ModelSelectorScreen.js.map +1 -0
- package/dist/cli/ui/types.d.ts +11 -1
- package/dist/cli/ui/types.d.ts.map +1 -1
- package/dist/cli/ui/utils/modelOptions.d.ts +6 -0
- package/dist/cli/ui/utils/modelOptions.d.ts.map +1 -0
- package/dist/cli/ui/utils/modelOptions.js +111 -0
- package/dist/cli/ui/utils/modelOptions.js.map +1 -0
- package/dist/codex.d.ts +6 -0
- package/dist/codex.d.ts.map +1 -1
- package/dist/codex.js +11 -4
- package/dist/codex.js.map +1 -1
- package/dist/gemini.d.ts +1 -0
- package/dist/gemini.d.ts.map +1 -1
- package/dist/gemini.js +6 -3
- package/dist/gemini.js.map +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +59 -13
- package/dist/index.js.map +1 -1
- package/dist/qwen.d.ts +1 -0
- package/dist/qwen.d.ts.map +1 -1
- package/dist/qwen.js +6 -3
- package/dist/qwen.js.map +1 -1
- package/package.json +1 -1
- package/src/claude.ts +8 -3
- package/src/cli/ui/__tests__/components/ModelSelectorScreen.initial.test.tsx +81 -0
- package/src/cli/ui/__tests__/components/common/LoadingIndicator.test.tsx +28 -14
- package/src/cli/ui/__tests__/components/screens/BranchListScreen.test.tsx +10 -21
- package/src/cli/ui/components/App.tsx +84 -4
- package/src/cli/ui/components/screens/AIToolSelectorScreen.tsx +1 -2
- package/src/cli/ui/components/screens/BranchListScreen.tsx +5 -0
- package/src/cli/ui/components/screens/ModelSelectorScreen.tsx +320 -0
- package/src/cli/ui/types.ts +13 -0
- package/src/cli/ui/utils/modelOptions.test.ts +51 -0
- package/src/cli/ui/utils/modelOptions.ts +125 -0
- package/src/codex.ts +23 -4
- package/src/gemini.ts +8 -3
- package/src/index.ts +90 -12
- package/src/qwen.ts +8 -3
|
@@ -14,14 +14,23 @@ import { BranchActionSelectorScreen } from "../screens/BranchActionSelectorScree
|
|
|
14
14
|
import { AIToolSelectorScreen } from "./screens/AIToolSelectorScreen.js";
|
|
15
15
|
import { SessionSelectorScreen } from "./screens/SessionSelectorScreen.js";
|
|
16
16
|
import { ExecutionModeSelectorScreen } from "./screens/ExecutionModeSelectorScreen.js";
|
|
17
|
-
import type { AITool } from "./screens/AIToolSelectorScreen.js";
|
|
18
17
|
import type { ExecutionMode } from "./screens/ExecutionModeSelectorScreen.js";
|
|
18
|
+
import {
|
|
19
|
+
ModelSelectorScreen,
|
|
20
|
+
type ModelSelectionResult,
|
|
21
|
+
} from "./screens/ModelSelectorScreen.js";
|
|
19
22
|
import type { WorktreeItem } from "./screens/WorktreeManagerScreen.js";
|
|
20
23
|
import { useGitData } from "../hooks/useGitData.js";
|
|
21
24
|
import { useScreenState } from "../hooks/useScreenState.js";
|
|
22
25
|
import { formatBranchItems } from "../utils/branchFormatter.js";
|
|
23
26
|
import { calculateStatistics } from "../utils/statisticsCalculator.js";
|
|
24
|
-
import type {
|
|
27
|
+
import type {
|
|
28
|
+
AITool,
|
|
29
|
+
BranchInfo,
|
|
30
|
+
BranchItem,
|
|
31
|
+
InferenceLevel,
|
|
32
|
+
SelectedBranchState,
|
|
33
|
+
} from "../types.js";
|
|
25
34
|
import { getRepositoryRoot, deleteBranch } from "../../../git.js";
|
|
26
35
|
import {
|
|
27
36
|
createWorktree,
|
|
@@ -36,6 +45,10 @@ import {
|
|
|
36
45
|
resolveBaseBranchLabel,
|
|
37
46
|
resolveBaseBranchRef,
|
|
38
47
|
} from "../utils/baseBranch.js";
|
|
48
|
+
import {
|
|
49
|
+
getDefaultInferenceForModel,
|
|
50
|
+
getDefaultModelOption,
|
|
51
|
+
} from "../utils/modelOptions.js";
|
|
39
52
|
|
|
40
53
|
const SPINNER_FRAMES = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧"];
|
|
41
54
|
const COMPLETION_HOLD_DURATION_MS = 3000;
|
|
@@ -58,6 +71,8 @@ export interface SelectionResult {
|
|
|
58
71
|
tool: AITool;
|
|
59
72
|
mode: ExecutionMode;
|
|
60
73
|
skipPermissions: boolean;
|
|
74
|
+
model?: string | null;
|
|
75
|
+
inferenceLevel?: InferenceLevel;
|
|
61
76
|
}
|
|
62
77
|
|
|
63
78
|
export interface AppProps {
|
|
@@ -90,6 +105,11 @@ export function App({ onExit, loadingIndicatorDelay = 300 }: AppProps) {
|
|
|
90
105
|
const [creationSourceBranch, setCreationSourceBranch] =
|
|
91
106
|
useState<SelectedBranchState | null>(null);
|
|
92
107
|
const [selectedTool, setSelectedTool] = useState<AITool | null>(null);
|
|
108
|
+
const [selectedModel, setSelectedModel] =
|
|
109
|
+
useState<ModelSelectionResult | null>(null);
|
|
110
|
+
const [lastModelByTool, setLastModelByTool] = useState<
|
|
111
|
+
Record<AITool, ModelSelectionResult | undefined>
|
|
112
|
+
>({});
|
|
93
113
|
|
|
94
114
|
// PR cleanup feedback
|
|
95
115
|
const [cleanupIndicators, setCleanupIndicators] = useState<
|
|
@@ -365,6 +385,7 @@ export function App({ onExit, loadingIndicatorDelay = 300 }: AppProps) {
|
|
|
365
385
|
|
|
366
386
|
setSelectedBranch(selection);
|
|
367
387
|
setSelectedTool(null);
|
|
388
|
+
setSelectedModel(null);
|
|
368
389
|
setCreationSourceBranch(null);
|
|
369
390
|
|
|
370
391
|
if (protectedSelected) {
|
|
@@ -405,6 +426,7 @@ export function App({ onExit, loadingIndicatorDelay = 300 }: AppProps) {
|
|
|
405
426
|
branchCategory: inferBranchCategory(worktree.branch),
|
|
406
427
|
});
|
|
407
428
|
setSelectedTool(null);
|
|
429
|
+
setSelectedModel(null);
|
|
408
430
|
setCreationSourceBranch(null);
|
|
409
431
|
setCleanupFooterMessage(null);
|
|
410
432
|
navigateTo("ai-tool-selector");
|
|
@@ -518,6 +540,7 @@ export function App({ onExit, loadingIndicatorDelay = 300 }: AppProps) {
|
|
|
518
540
|
branchCategory: inferBranchCategory(branchName),
|
|
519
541
|
});
|
|
520
542
|
setSelectedTool(null);
|
|
543
|
+
setSelectedModel(null);
|
|
521
544
|
setCleanupFooterMessage(null);
|
|
522
545
|
|
|
523
546
|
navigateTo("ai-tool-selector");
|
|
@@ -726,9 +749,22 @@ export function App({ onExit, loadingIndicatorDelay = 300 }: AppProps) {
|
|
|
726
749
|
const handleToolSelect = useCallback(
|
|
727
750
|
(tool: AITool) => {
|
|
728
751
|
setSelectedTool(tool);
|
|
752
|
+
setSelectedModel(lastModelByTool[tool] ?? null);
|
|
753
|
+
navigateTo("model-selector");
|
|
754
|
+
},
|
|
755
|
+
[lastModelByTool, navigateTo],
|
|
756
|
+
);
|
|
757
|
+
|
|
758
|
+
const handleModelSelect = useCallback(
|
|
759
|
+
(selection: ModelSelectionResult) => {
|
|
760
|
+
setSelectedModel(selection);
|
|
761
|
+
setLastModelByTool((prev) => ({
|
|
762
|
+
...prev,
|
|
763
|
+
...(selectedTool ? { [selectedTool]: selection } : {}),
|
|
764
|
+
}));
|
|
729
765
|
navigateTo("execution-mode-selector");
|
|
730
766
|
},
|
|
731
|
-
[navigateTo],
|
|
767
|
+
[navigateTo, selectedTool],
|
|
732
768
|
);
|
|
733
769
|
|
|
734
770
|
// Handle session selection
|
|
@@ -746,6 +782,23 @@ export function App({ onExit, loadingIndicatorDelay = 300 }: AppProps) {
|
|
|
746
782
|
(result: { mode: ExecutionMode; skipPermissions: boolean }) => {
|
|
747
783
|
// All selections complete - exit with result
|
|
748
784
|
if (selectedBranch && selectedTool) {
|
|
785
|
+
const defaultModel = getDefaultModelOption(selectedTool);
|
|
786
|
+
const resolvedModel =
|
|
787
|
+
selectedModel?.model ?? defaultModel?.id ?? null;
|
|
788
|
+
const resolvedInference =
|
|
789
|
+
selectedModel?.inferenceLevel ??
|
|
790
|
+
getDefaultInferenceForModel(defaultModel ?? undefined);
|
|
791
|
+
|
|
792
|
+
// Claude Code のデフォルトはモデル未指定が最も安定するため、default/sonnet/現行推奨IDの場合は --model を付けない
|
|
793
|
+
const isClaudeDefaultModel =
|
|
794
|
+
selectedTool === "claude-code" &&
|
|
795
|
+
(!resolvedModel ||
|
|
796
|
+
resolvedModel === "default" ||
|
|
797
|
+
resolvedModel === "sonnet" ||
|
|
798
|
+
resolvedModel === "claude-sonnet-4-5-20250929");
|
|
799
|
+
|
|
800
|
+
const modelForPayload = isClaudeDefaultModel ? undefined : resolvedModel;
|
|
801
|
+
|
|
749
802
|
const payload: SelectionResult = {
|
|
750
803
|
branch: selectedBranch.name,
|
|
751
804
|
displayName: selectedBranch.displayName,
|
|
@@ -753,6 +806,10 @@ export function App({ onExit, loadingIndicatorDelay = 300 }: AppProps) {
|
|
|
753
806
|
tool: selectedTool,
|
|
754
807
|
mode: result.mode,
|
|
755
808
|
skipPermissions: result.skipPermissions,
|
|
809
|
+
...(modelForPayload !== undefined ? { model: modelForPayload } : {}),
|
|
810
|
+
...(resolvedInference !== undefined
|
|
811
|
+
? { inferenceLevel: resolvedInference }
|
|
812
|
+
: {}),
|
|
756
813
|
...(selectedBranch.remoteBranch
|
|
757
814
|
? { remoteBranch: selectedBranch.remoteBranch }
|
|
758
815
|
: {}),
|
|
@@ -762,7 +819,15 @@ export function App({ onExit, loadingIndicatorDelay = 300 }: AppProps) {
|
|
|
762
819
|
exit();
|
|
763
820
|
}
|
|
764
821
|
},
|
|
765
|
-
[
|
|
822
|
+
[
|
|
823
|
+
selectedBranch,
|
|
824
|
+
selectedTool,
|
|
825
|
+
selectedModel,
|
|
826
|
+
onExit,
|
|
827
|
+
exit,
|
|
828
|
+
getDefaultModelOption,
|
|
829
|
+
getDefaultInferenceForModel,
|
|
830
|
+
],
|
|
766
831
|
);
|
|
767
832
|
|
|
768
833
|
// Render screen based on currentScreen
|
|
@@ -846,6 +911,21 @@ export function App({ onExit, loadingIndicatorDelay = 300 }: AppProps) {
|
|
|
846
911
|
/>
|
|
847
912
|
);
|
|
848
913
|
|
|
914
|
+
case "model-selector":
|
|
915
|
+
if (!selectedTool) {
|
|
916
|
+
goBack();
|
|
917
|
+
return null;
|
|
918
|
+
}
|
|
919
|
+
return (
|
|
920
|
+
<ModelSelectorScreen
|
|
921
|
+
tool={selectedTool}
|
|
922
|
+
onBack={goBack}
|
|
923
|
+
onSelect={handleModelSelect}
|
|
924
|
+
version={version}
|
|
925
|
+
initialSelection={selectedModel}
|
|
926
|
+
/>
|
|
927
|
+
);
|
|
928
|
+
|
|
849
929
|
case "session-selector":
|
|
850
930
|
// TODO: Implement session data fetching
|
|
851
931
|
return (
|
|
@@ -6,8 +6,7 @@ import { Select } from "../common/Select.js";
|
|
|
6
6
|
import { useTerminalSize } from "../../hooks/useTerminalSize.js";
|
|
7
7
|
import { getAllTools } from "../../../../config/tools.js";
|
|
8
8
|
import type { AIToolConfig } from "../../../../types/tools.js";
|
|
9
|
-
|
|
10
|
-
export type AITool = string;
|
|
9
|
+
import type { AITool } from "../../types.js";
|
|
11
10
|
|
|
12
11
|
export interface AIToolItem {
|
|
13
12
|
label: string;
|
|
@@ -147,6 +147,11 @@ export function BranchListScreen({
|
|
|
147
147
|
return;
|
|
148
148
|
}
|
|
149
149
|
|
|
150
|
+
// Disable global shortcuts while in filter mode
|
|
151
|
+
if (filterMode) {
|
|
152
|
+
return;
|
|
153
|
+
}
|
|
154
|
+
|
|
150
155
|
// Global shortcuts (blocked by Input component when typing in filter mode)
|
|
151
156
|
if (input === "m" && onNavigate) {
|
|
152
157
|
onNavigate("worktree-manager");
|
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
import React, { useEffect, useMemo, useState } from "react";
|
|
2
|
+
import { Box, Text, useInput } from "ink";
|
|
3
|
+
import { Header } from "../parts/Header.js";
|
|
4
|
+
import { Footer } from "../parts/Footer.js";
|
|
5
|
+
import { Select, type SelectItem } from "../common/Select.js";
|
|
6
|
+
import { useTerminalSize } from "../../hooks/useTerminalSize.js";
|
|
7
|
+
import type { AITool, InferenceLevel, ModelOption } from "../../types.js";
|
|
8
|
+
import {
|
|
9
|
+
getDefaultInferenceForModel,
|
|
10
|
+
getDefaultModelOption,
|
|
11
|
+
getInferenceLevelsForModel,
|
|
12
|
+
getModelOptions,
|
|
13
|
+
} from "../../utils/modelOptions.js";
|
|
14
|
+
|
|
15
|
+
export interface ModelSelectionResult {
|
|
16
|
+
model: string | null;
|
|
17
|
+
inferenceLevel?: InferenceLevel;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
interface ModelSelectItem extends SelectItem {
|
|
21
|
+
description?: string;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
interface InferenceSelectItem extends SelectItem {
|
|
25
|
+
hint?: string;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
export interface ModelSelectorScreenProps {
|
|
29
|
+
tool: AITool;
|
|
30
|
+
onBack: () => void;
|
|
31
|
+
onSelect: (selection: ModelSelectionResult) => void;
|
|
32
|
+
version?: string | null;
|
|
33
|
+
initialSelection?: ModelSelectionResult | null;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
const TOOL_LABELS: Record<string, string> = {
|
|
37
|
+
"claude-code": "Claude Code",
|
|
38
|
+
"codex-cli": "Codex",
|
|
39
|
+
"gemini-cli": "Gemini",
|
|
40
|
+
"qwen-cli": "Qwen",
|
|
41
|
+
};
|
|
42
|
+
|
|
43
|
+
const INFERENCE_LABELS: Record<InferenceLevel, string> = {
|
|
44
|
+
low: "Low (lighter reasoning)",
|
|
45
|
+
medium: "Medium (balanced reasoning)",
|
|
46
|
+
high: "High (deeper reasoning)",
|
|
47
|
+
xhigh: "Extra high (maximum reasoning)",
|
|
48
|
+
};
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* モデル選択 → (必要なら) 推論レベル選択を行う画面
|
|
52
|
+
*/
|
|
53
|
+
export function ModelSelectorScreen({
|
|
54
|
+
tool,
|
|
55
|
+
onBack,
|
|
56
|
+
onSelect,
|
|
57
|
+
version,
|
|
58
|
+
initialSelection,
|
|
59
|
+
}: ModelSelectorScreenProps) {
|
|
60
|
+
const { rows } = useTerminalSize();
|
|
61
|
+
|
|
62
|
+
const [step, setStep] = useState<"model" | "inference">("model");
|
|
63
|
+
const [modelOptions, setModelOptions] = useState<ModelOption[]>([]);
|
|
64
|
+
const [selectedModel, setSelectedModel] = useState<ModelOption | null>(null);
|
|
65
|
+
|
|
66
|
+
// モデル候補をツールに応じてロード
|
|
67
|
+
useEffect(() => {
|
|
68
|
+
const options = getModelOptions(tool);
|
|
69
|
+
setModelOptions(options);
|
|
70
|
+
// 初期選択が有効なら保持
|
|
71
|
+
if (initialSelection?.model) {
|
|
72
|
+
const found = options.find((opt) => opt.id === initialSelection.model);
|
|
73
|
+
if (found) {
|
|
74
|
+
setSelectedModel(found);
|
|
75
|
+
setStep("model");
|
|
76
|
+
return;
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
setSelectedModel(null);
|
|
80
|
+
setStep("model");
|
|
81
|
+
}, [tool, initialSelection?.model]);
|
|
82
|
+
|
|
83
|
+
const modelItems: ModelSelectItem[] = useMemo(
|
|
84
|
+
() =>
|
|
85
|
+
modelOptions.map((option) => ({
|
|
86
|
+
label: option.label,
|
|
87
|
+
value: option.id,
|
|
88
|
+
...(option.description ? { description: option.description } : {}),
|
|
89
|
+
})),
|
|
90
|
+
[modelOptions],
|
|
91
|
+
);
|
|
92
|
+
|
|
93
|
+
const defaultModelIndex = useMemo(() => {
|
|
94
|
+
const initial = initialSelection?.model
|
|
95
|
+
? modelOptions.findIndex((opt) => opt.id === initialSelection.model)
|
|
96
|
+
: -1;
|
|
97
|
+
if (initial !== -1) return initial;
|
|
98
|
+
const defaultOption = getDefaultModelOption(tool);
|
|
99
|
+
if (!defaultOption) return 0;
|
|
100
|
+
const index = modelOptions.findIndex((opt) => opt.id === defaultOption.id);
|
|
101
|
+
return index >= 0 ? index : 0;
|
|
102
|
+
}, [initialSelection?.model, modelOptions, tool]);
|
|
103
|
+
|
|
104
|
+
const inferenceOptions = useMemo(
|
|
105
|
+
() => getInferenceLevelsForModel(selectedModel ?? undefined),
|
|
106
|
+
[selectedModel],
|
|
107
|
+
);
|
|
108
|
+
|
|
109
|
+
const inferenceItems: InferenceSelectItem[] = useMemo(
|
|
110
|
+
() => {
|
|
111
|
+
return inferenceOptions.map((level) => {
|
|
112
|
+
if (selectedModel?.id === "gpt-5.1-codex-max") {
|
|
113
|
+
if (level === "low") {
|
|
114
|
+
return {
|
|
115
|
+
label: "Low",
|
|
116
|
+
value: level,
|
|
117
|
+
hint: "Fast responses with lighter reasoning",
|
|
118
|
+
};
|
|
119
|
+
}
|
|
120
|
+
if (level === "medium") {
|
|
121
|
+
return {
|
|
122
|
+
label: "Medium (default)",
|
|
123
|
+
value: level,
|
|
124
|
+
hint: "Balances speed and reasoning depth for everyday tasks",
|
|
125
|
+
};
|
|
126
|
+
}
|
|
127
|
+
if (level === "high") {
|
|
128
|
+
return {
|
|
129
|
+
label: "High",
|
|
130
|
+
value: level,
|
|
131
|
+
hint: "Maximizes reasoning depth for complex problems",
|
|
132
|
+
};
|
|
133
|
+
}
|
|
134
|
+
if (level === "xhigh") {
|
|
135
|
+
return {
|
|
136
|
+
label: "Extra high",
|
|
137
|
+
value: level,
|
|
138
|
+
hint:
|
|
139
|
+
"Extra high reasoning depth; may quickly consume Plus plan rate limits.",
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
return {
|
|
145
|
+
label: INFERENCE_LABELS[level],
|
|
146
|
+
value: level,
|
|
147
|
+
};
|
|
148
|
+
});
|
|
149
|
+
},
|
|
150
|
+
[inferenceOptions, selectedModel?.id],
|
|
151
|
+
);
|
|
152
|
+
|
|
153
|
+
const defaultInferenceIndex = useMemo(() => {
|
|
154
|
+
const initialLevel = initialSelection?.inferenceLevel;
|
|
155
|
+
if (initialLevel && inferenceOptions.includes(initialLevel)) {
|
|
156
|
+
return inferenceOptions.findIndex((lvl) => lvl === initialLevel);
|
|
157
|
+
}
|
|
158
|
+
const defaultLevel = getDefaultInferenceForModel(selectedModel ?? undefined);
|
|
159
|
+
if (!defaultLevel) return 0;
|
|
160
|
+
const index = inferenceOptions.findIndex((lvl) => lvl === defaultLevel);
|
|
161
|
+
return index >= 0 ? index : 0;
|
|
162
|
+
}, [initialSelection?.inferenceLevel, inferenceOptions, selectedModel]);
|
|
163
|
+
|
|
164
|
+
useInput((_input, key) => {
|
|
165
|
+
if (key.escape) {
|
|
166
|
+
if (step === "inference") {
|
|
167
|
+
setStep("model");
|
|
168
|
+
return;
|
|
169
|
+
}
|
|
170
|
+
onBack();
|
|
171
|
+
}
|
|
172
|
+
});
|
|
173
|
+
|
|
174
|
+
const handleModelSelect = (item: ModelSelectItem) => {
|
|
175
|
+
const option =
|
|
176
|
+
modelOptions.find((opt) => opt.id === item.value) ?? modelOptions[0];
|
|
177
|
+
|
|
178
|
+
if (!option) {
|
|
179
|
+
onSelect({ model: null });
|
|
180
|
+
return;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
setSelectedModel(option);
|
|
184
|
+
|
|
185
|
+
const levels = getInferenceLevelsForModel(option);
|
|
186
|
+
if (levels.length > 0) {
|
|
187
|
+
setStep("inference");
|
|
188
|
+
} else {
|
|
189
|
+
onSelect({ model: option.id });
|
|
190
|
+
}
|
|
191
|
+
};
|
|
192
|
+
|
|
193
|
+
const handleInferenceSelect = (item: InferenceSelectItem) => {
|
|
194
|
+
if (!selectedModel) {
|
|
195
|
+
setStep("model");
|
|
196
|
+
return;
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
onSelect({
|
|
200
|
+
model: selectedModel.id,
|
|
201
|
+
inferenceLevel: item.value as InferenceLevel,
|
|
202
|
+
});
|
|
203
|
+
};
|
|
204
|
+
|
|
205
|
+
const footerActions =
|
|
206
|
+
step === "model"
|
|
207
|
+
? [
|
|
208
|
+
{ key: "enter", description: "Select" },
|
|
209
|
+
{ key: "esc", description: "Back" },
|
|
210
|
+
]
|
|
211
|
+
: [
|
|
212
|
+
{ key: "enter", description: "Select" },
|
|
213
|
+
{ key: "esc", description: "Back to model" },
|
|
214
|
+
];
|
|
215
|
+
|
|
216
|
+
const toolLabel = TOOL_LABELS[tool] ?? tool;
|
|
217
|
+
|
|
218
|
+
const renderModelItem = (
|
|
219
|
+
item: ModelSelectItem,
|
|
220
|
+
isSelected: boolean,
|
|
221
|
+
): React.ReactNode => (
|
|
222
|
+
<Box flexDirection="column">
|
|
223
|
+
{isSelected ? (
|
|
224
|
+
<Text color="cyan">➤ {item.label}</Text>
|
|
225
|
+
) : (
|
|
226
|
+
<Text> {item.label}</Text>
|
|
227
|
+
)}
|
|
228
|
+
{item.description ? (
|
|
229
|
+
<Text color="gray"> {item.description}</Text>
|
|
230
|
+
) : null}
|
|
231
|
+
</Box>
|
|
232
|
+
);
|
|
233
|
+
|
|
234
|
+
return (
|
|
235
|
+
<Box flexDirection="column" height={rows}>
|
|
236
|
+
<Header
|
|
237
|
+
title={step === "model" ? "Model Selection" : "Inference Level"}
|
|
238
|
+
titleColor="blue"
|
|
239
|
+
version={version}
|
|
240
|
+
/>
|
|
241
|
+
|
|
242
|
+
<Box flexDirection="column" flexGrow={1} marginTop={1}>
|
|
243
|
+
{step === "model" ? (
|
|
244
|
+
<>
|
|
245
|
+
{tool === "gemini-cli" ? (
|
|
246
|
+
<Box marginBottom={1} flexDirection="column">
|
|
247
|
+
<Text>Gemini 3 preview is enabled.</Text>
|
|
248
|
+
<Text>
|
|
249
|
+
Selecting Pro uses gemini-3-pro-preview and falls back to
|
|
250
|
+
gemini-2.5-pro if unavailable.
|
|
251
|
+
</Text>
|
|
252
|
+
<Text>Use --model to pin a specific Gemini model.</Text>
|
|
253
|
+
</Box>
|
|
254
|
+
) : null}
|
|
255
|
+
|
|
256
|
+
<Box marginBottom={1}>
|
|
257
|
+
<Text>
|
|
258
|
+
Select a model for {toolLabel}
|
|
259
|
+
{modelOptions.length === 0 ? " (no options)" : ""}
|
|
260
|
+
</Text>
|
|
261
|
+
</Box>
|
|
262
|
+
{tool === "qwen-cli" ? (
|
|
263
|
+
<Box marginBottom={1} flexDirection="column">
|
|
264
|
+
<Text>Latest Qwen models from Alibaba Cloud ModelStudio:</Text>
|
|
265
|
+
<Text>• coder-model (qwen3-coder-plus-2025-09-23)</Text>
|
|
266
|
+
<Text>• vision-model (qwen3-vl-plus-2025-09-23)</Text>
|
|
267
|
+
</Box>
|
|
268
|
+
) : null}
|
|
269
|
+
|
|
270
|
+
{modelItems.length === 0 ? (
|
|
271
|
+
<Select
|
|
272
|
+
items={[
|
|
273
|
+
{
|
|
274
|
+
label: "No model selection required. Press Enter to continue.",
|
|
275
|
+
value: "__continue__",
|
|
276
|
+
},
|
|
277
|
+
]}
|
|
278
|
+
onSelect={() => onSelect({ model: null })}
|
|
279
|
+
/>
|
|
280
|
+
) : (
|
|
281
|
+
<Select
|
|
282
|
+
items={modelItems}
|
|
283
|
+
onSelect={handleModelSelect}
|
|
284
|
+
initialIndex={defaultModelIndex}
|
|
285
|
+
renderItem={renderModelItem}
|
|
286
|
+
/>
|
|
287
|
+
)}
|
|
288
|
+
</>
|
|
289
|
+
) : (
|
|
290
|
+
<>
|
|
291
|
+
<Box marginBottom={1}>
|
|
292
|
+
<Text>
|
|
293
|
+
Select reasoning level for {selectedModel?.label ?? "model"}
|
|
294
|
+
</Text>
|
|
295
|
+
</Box>
|
|
296
|
+
<Select
|
|
297
|
+
items={inferenceItems}
|
|
298
|
+
onSelect={handleInferenceSelect}
|
|
299
|
+
initialIndex={defaultInferenceIndex}
|
|
300
|
+
renderItem={(item, isSelected) => (
|
|
301
|
+
<Box flexDirection="column">
|
|
302
|
+
{isSelected ? (
|
|
303
|
+
<Text color="cyan">➤ {item.label}</Text>
|
|
304
|
+
) : (
|
|
305
|
+
<Text> {item.label}</Text>
|
|
306
|
+
)}
|
|
307
|
+
{"hint" in item && item.hint ? (
|
|
308
|
+
<Text color="gray"> {item.hint}</Text>
|
|
309
|
+
) : null}
|
|
310
|
+
</Box>
|
|
311
|
+
)}
|
|
312
|
+
/>
|
|
313
|
+
</>
|
|
314
|
+
)}
|
|
315
|
+
</Box>
|
|
316
|
+
|
|
317
|
+
<Footer actions={footerActions} />
|
|
318
|
+
</Box>
|
|
319
|
+
);
|
|
320
|
+
}
|
package/src/cli/ui/types.ts
CHANGED
|
@@ -5,6 +5,18 @@ export interface WorktreeInfo {
|
|
|
5
5
|
isAccessible?: boolean;
|
|
6
6
|
}
|
|
7
7
|
|
|
8
|
+
export type AITool = string;
|
|
9
|
+
export type InferenceLevel = "low" | "medium" | "high" | "xhigh";
|
|
10
|
+
|
|
11
|
+
export interface ModelOption {
|
|
12
|
+
id: string;
|
|
13
|
+
label: string;
|
|
14
|
+
description?: string;
|
|
15
|
+
inferenceLevels?: InferenceLevel[];
|
|
16
|
+
defaultInference?: InferenceLevel;
|
|
17
|
+
isDefault?: boolean;
|
|
18
|
+
}
|
|
19
|
+
|
|
8
20
|
export interface BranchInfo {
|
|
9
21
|
name: string;
|
|
10
22
|
type: "local" | "remote";
|
|
@@ -161,6 +173,7 @@ export type ScreenType =
|
|
|
161
173
|
| "branch-creator"
|
|
162
174
|
| "branch-action-selector"
|
|
163
175
|
| "ai-tool-selector"
|
|
176
|
+
| "model-selector"
|
|
164
177
|
| "session-selector"
|
|
165
178
|
| "execution-mode-selector"
|
|
166
179
|
| "batch-merge-progress"
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import { describe, it, expect } from "vitest";
|
|
2
|
+
import {
|
|
3
|
+
getModelOptions,
|
|
4
|
+
getDefaultInferenceForModel,
|
|
5
|
+
getDefaultModelOption,
|
|
6
|
+
} from "./modelOptions.js";
|
|
7
|
+
|
|
8
|
+
const byId = (tool: string) => getModelOptions(tool).map((m) => m.id);
|
|
9
|
+
|
|
10
|
+
describe("modelOptions", () => {
|
|
11
|
+
it("lists Claude official aliases and keeps default as recommended Sonnet 4.5", () => {
|
|
12
|
+
const ids = byId("claude-code");
|
|
13
|
+
expect(ids).toEqual([
|
|
14
|
+
"default",
|
|
15
|
+
"opus",
|
|
16
|
+
"haiku",
|
|
17
|
+
]);
|
|
18
|
+
const defaultModel = getDefaultModelOption("claude-code");
|
|
19
|
+
expect(defaultModel?.id).toBe("default");
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
it("has unique Codex models", () => {
|
|
23
|
+
const ids = byId("codex-cli");
|
|
24
|
+
const unique = new Set(ids);
|
|
25
|
+
expect(unique.size).toBe(ids.length);
|
|
26
|
+
expect(ids).toEqual([
|
|
27
|
+
"gpt-5.1-codex",
|
|
28
|
+
"gpt-5.1-codex-max",
|
|
29
|
+
"gpt-5.1-codex-mini",
|
|
30
|
+
"gpt-5.1",
|
|
31
|
+
]);
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
it("uses medium as default reasoning for codex-max", () => {
|
|
35
|
+
const codexMax = getModelOptions("codex-cli").find((m) => m.id === "gpt-5.1-codex-max");
|
|
36
|
+
expect(getDefaultInferenceForModel(codexMax)).toBe("medium");
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
it("lists expected Gemini models", () => {
|
|
40
|
+
expect(byId("gemini-cli")).toEqual([
|
|
41
|
+
"gemini-3-pro-preview",
|
|
42
|
+
"gemini-2.5-pro",
|
|
43
|
+
"gemini-2.5-flash",
|
|
44
|
+
"gemini-2.5-flash-lite",
|
|
45
|
+
]);
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
it("lists expected Qwen models", () => {
|
|
49
|
+
expect(byId("qwen-cli")).toEqual(["coder-model", "vision-model"]);
|
|
50
|
+
});
|
|
51
|
+
});
|