claude-evolve 1.6.30 → 1.6.32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/ai-cli.sh +29 -19
- package/lib/config.sh +2 -2
- package/package.json +1 -1
package/lib/ai-cli.sh
CHANGED
|
@@ -17,9 +17,14 @@ call_ai_model_configured() {
|
|
|
17
17
|
|
|
18
18
|
# Build command directly based on model
|
|
19
19
|
case "$model_name" in
|
|
20
|
-
opus
|
|
20
|
+
opus)
|
|
21
21
|
local ai_output
|
|
22
|
-
ai_output=$(timeout 300 claude --dangerously-skip-permissions --mcp-config '' --model
|
|
22
|
+
ai_output=$(timeout 300 claude --dangerously-skip-permissions --mcp-config '' --model opus -p "$prompt" 2>&1)
|
|
23
|
+
local ai_exit_code=$?
|
|
24
|
+
;;
|
|
25
|
+
sonnet)
|
|
26
|
+
local ai_output
|
|
27
|
+
ai_output=$(timeout 300 claude --dangerously-skip-permissions --mcp-config '' --model sonnet -p "$prompt" 2>&1)
|
|
23
28
|
local ai_exit_code=$?
|
|
24
29
|
;;
|
|
25
30
|
sonnet-think)
|
|
@@ -42,17 +47,17 @@ $prompt"
|
|
|
42
47
|
;;
|
|
43
48
|
gpt5high)
|
|
44
49
|
local ai_output
|
|
45
|
-
ai_output=$(timeout
|
|
50
|
+
ai_output=$(timeout 600 codex exec -m gpt-5 -c model_reasoning_effort="high" --dangerously-bypass-approvals-and-sandbox "$prompt" 2>&1)
|
|
46
51
|
local ai_exit_code=$?
|
|
47
52
|
;;
|
|
48
53
|
gpt5)
|
|
49
54
|
local ai_output
|
|
50
|
-
ai_output=$(timeout
|
|
55
|
+
ai_output=$(timeout 600 codex exec -m gpt-5 --dangerously-bypass-approvals-and-sandbox "$prompt" 2>&1)
|
|
51
56
|
local ai_exit_code=$?
|
|
52
57
|
;;
|
|
53
58
|
o3high)
|
|
54
59
|
local ai_output
|
|
55
|
-
ai_output=$(timeout
|
|
60
|
+
ai_output=$(timeout 600 codex exec -m o3-mini -c model_reasoning_effort="high" --dangerously-bypass-approvals-and-sandbox "$prompt" 2>&1)
|
|
56
61
|
local ai_exit_code=$?
|
|
57
62
|
;;
|
|
58
63
|
gemini-pro)
|
|
@@ -69,47 +74,52 @@ $prompt"
|
|
|
69
74
|
;;
|
|
70
75
|
cursor-sonnet)
|
|
71
76
|
local ai_output
|
|
72
|
-
ai_output=$(timeout
|
|
77
|
+
ai_output=$(timeout 600 cursor-agent sonnet-4.5 -p "$prompt" 2>&1)
|
|
73
78
|
local ai_exit_code=$?
|
|
74
79
|
;;
|
|
75
80
|
cursor-opus)
|
|
76
81
|
local ai_output
|
|
77
|
-
ai_output=$(timeout
|
|
82
|
+
ai_output=$(timeout 600 cursor-agent opus -p "$prompt" 2>&1)
|
|
83
|
+
local ai_exit_code=$?
|
|
84
|
+
;;
|
|
85
|
+
glm-openrouter)
|
|
86
|
+
local ai_output
|
|
87
|
+
ai_output=$(timeout 600 opencode -m openrouter/z-ai/glm-4.6 run "$prompt" 2>&1)
|
|
78
88
|
local ai_exit_code=$?
|
|
79
89
|
;;
|
|
80
|
-
glm)
|
|
90
|
+
glm-zai)
|
|
81
91
|
local ai_output
|
|
82
|
-
ai_output=$(timeout
|
|
92
|
+
ai_output=$(timeout 600 opencode -m zai-coding-plan/glm-4.6 run "$prompt" 2>&1)
|
|
83
93
|
local ai_exit_code=$?
|
|
84
94
|
;;
|
|
85
|
-
deepseek-
|
|
95
|
+
deepseek-openrouter)
|
|
86
96
|
local ai_output
|
|
87
|
-
ai_output=$(timeout
|
|
97
|
+
ai_output=$(timeout 600 opencode -m openrouter/deepseek/deepseek-v3.1-terminus run "$prompt" 2>&1)
|
|
88
98
|
local ai_exit_code=$?
|
|
89
99
|
;;
|
|
90
|
-
grok-code-fast)
|
|
100
|
+
grok-code-fast-openrouter)
|
|
91
101
|
local ai_output
|
|
92
|
-
ai_output=$(timeout
|
|
102
|
+
ai_output=$(timeout 600 opencode -m openrouter/x-ai/grok-code-fast-1 run "$prompt" 2>&1)
|
|
93
103
|
local ai_exit_code=$?
|
|
94
104
|
;;
|
|
95
|
-
grok-4)
|
|
105
|
+
grok-4-openrouter)
|
|
96
106
|
local ai_output
|
|
97
|
-
ai_output=$(timeout
|
|
107
|
+
ai_output=$(timeout 600 opencode -m openrouter/x-ai/grok-4 run "$prompt" 2>&1)
|
|
98
108
|
local ai_exit_code=$?
|
|
99
109
|
;;
|
|
100
|
-
codex-oss)
|
|
110
|
+
codex-oss-local)
|
|
101
111
|
# Codex-OSS via Codex CLI with Ollama backend
|
|
102
112
|
local ai_output
|
|
103
|
-
ai_output=$(timeout
|
|
113
|
+
ai_output=$(timeout 2400 codex exec --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check --oss "$prompt" 2>&1)
|
|
104
114
|
local ai_exit_code=$?
|
|
105
115
|
;;
|
|
106
|
-
kimi-k2-
|
|
116
|
+
kimi-k2-llamacloud)
|
|
107
117
|
# Kimi K2 via Codex CLI with Ollama cloud backend
|
|
108
118
|
local ai_output
|
|
109
119
|
ai_output=$(timeout 600 codex exec --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check --oss -m kimi-k2:1t-cloud "$prompt" 2>&1)
|
|
110
120
|
local ai_exit_code=$?
|
|
111
121
|
;;
|
|
112
|
-
deepseek-v3-
|
|
122
|
+
deepseek-v3-llamacloud)
|
|
113
123
|
# Deepseek via Codex CLI with Ollama cloud backend
|
|
114
124
|
local ai_output
|
|
115
125
|
ai_output=$(timeout 600 codex exec --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check --oss -m deepseek-v3.1:671b-cloud "$prompt" 2>&1)
|
package/lib/config.sh
CHANGED
|
@@ -54,9 +54,9 @@ DEFAULT_MAX_RETRIES=3
|
|
|
54
54
|
DEFAULT_MEMORY_LIMIT_MB=12288
|
|
55
55
|
|
|
56
56
|
# Default LLM CLI configuration
|
|
57
|
-
DEFAULT_LLM_RUN="codex-oss gemini-flash
|
|
57
|
+
DEFAULT_LLM_RUN="glm-zai glm-zai glm-zai glm-zai glm-zai codex-oss-local gemini-flash"
|
|
58
58
|
# Ideate: Commercial models for idea generation + local fallback
|
|
59
|
-
DEFAULT_LLM_IDEATE="gemini-pro sonnet-think gpt5high glm grok-4 deepseek-
|
|
59
|
+
DEFAULT_LLM_IDEATE="gemini-pro sonnet-think gpt5high glm-openrouter grok-4-openrouter deepseek-openrouter glm-zai codex-oss-local"
|
|
60
60
|
|
|
61
61
|
# Load configuration from a YAML file and update variables
|
|
62
62
|
_load_yaml_config() {
|