claude-all-config 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/LICENSE.md +70 -0
- package/README.md +133 -0
- package/VERSION +1 -0
- package/agents/accessibility-reviewer.md +96 -0
- package/agents/ai-prompt-optimizer.md +94 -0
- package/agents/api-tester.md +102 -0
- package/agents/code-generator.md +94 -0
- package/agents/code-reviewer.md +47 -0
- package/agents/component-generator.md +102 -0
- package/agents/doc-generator.md +91 -0
- package/agents/migration-generator.md +94 -0
- package/agents/performance-analyzer.md +90 -0
- package/agents/proactive-mode.md +91 -0
- package/agents/readme-generator.md +101 -0
- package/agents/security-auditor.md +86 -0
- package/agents/terraform-generator.md +94 -0
- package/agents/test-generator.md +76 -0
- package/bin/agentrouter.json +36 -0
- package/bin/ai-chat +20 -0
- package/bin/antigravity.json +76 -0
- package/bin/api-manager +340 -0
- package/bin/claude-launcher +19 -0
- package/bin/claude-master +15 -0
- package/bin/claude_master.py +295 -0
- package/bin/cohere.json +7 -0
- package/bin/deepseek.json +44 -0
- package/bin/gemini.json +56 -0
- package/bin/glm.json +21 -0
- package/bin/groq.json +41 -0
- package/bin/minimax.json +26 -0
- package/bin/mistral.json +7 -0
- package/bin/moonshot.json +7 -0
- package/bin/ollama.json +36 -0
- package/bin/openai.json +46 -0
- package/bin/openrouter.json +38 -0
- package/bin/perplexity.json +12 -0
- package/bin/qwen.json +7 -0
- package/bin/switch-provider +73 -0
- package/bin/test.json +7 -0
- package/bin/xai.json +41 -0
- package/claude-all +2707 -0
- package/claude-config.json +340 -0
- package/claude-suite/REFACTORING_SUMMARY.md +88 -0
- package/claude-suite/auth/.antigravity_proxy.py +78 -0
- package/claude-suite/auth/__pycache__/openai_auth.cpython-312.pyc +0 -0
- package/claude-suite/auth/gemini_auth.py +80 -0
- package/claude-suite/auth/openai_auth.py +138 -0
- package/claude-suite/backups/claude-all-before-refactor +1075 -0
- package/claude-suite/backups/claude-all.backup +840 -0
- package/claude-suite/backups/claude-all.original +840 -0
- package/claude-suite/models/add-model-manual.sh +588 -0
- package/claude-suite/models/add-model.sh +114 -0
- package/claude-suite/models/model-switcher.sh +69 -0
- package/claude-suite/providers/claude-glm +89 -0
- package/claude-suite/providers/claude-glm-wrapper.sh +55 -0
- package/claude-suite/providers/claude-minimax +12 -0
- package/claude-suite/providers/claude-smart +132 -0
- package/claude-suite/providers/xai_chat.sh +56 -0
- package/claude-suite/utils/__pycache__/claude_master.cpython-312.pyc +0 -0
- package/claude-suite/utils/antigravity_proxy_server.py +168 -0
- package/claude-suite/utils/claude-all-help.txt +83 -0
- package/claude-suite/utils/claude_master.py +408 -0
- package/commands/brainstorm.md +5 -0
- package/commands/execute-plan.md +5 -0
- package/commands/write-plan.md +5 -0
- package/docs/ANTIGRAVITY-SETUP.md +176 -0
- package/docs/AUTH_CREDENTIALS.md +54 -0
- package/docs/NPM-INSTALLATION.md +166 -0
- package/hooks/hooks.json +15 -0
- package/hooks/run-hook.cmd +19 -0
- package/hooks/session-start.sh +52 -0
- package/install.sh +155 -0
- package/mcp.json +34 -0
- package/model/perplexity.json +12 -0
- package/package.json +69 -0
- package/plugins/README.md +47 -0
- package/plugins/installed_plugins.json +317 -0
- package/plugins/known_marketplaces.json +10 -0
- package/plugins/marketplace-info/marketplace.json +517 -0
- package/postinstall.js +100 -0
- package/scripts/antigravity_proxy_server.py +168 -0
- package/scripts/get_gemini_api_key.py +96 -0
- package/scripts/setup_antigravity_auth.py +171 -0
- package/skills/api-development/SKILL.md +11 -0
- package/skills/api-development/openapi/api-documentation.yaml +108 -0
- package/skills/brainstorming/SKILL.md +54 -0
- package/skills/code-quality/SKILL.md +196 -0
- package/skills/condition-based-waiting/SKILL.md +120 -0
- package/skills/condition-based-waiting/example.ts +158 -0
- package/skills/database-development/SKILL.md +11 -0
- package/skills/database-development/migrations/migration.template.sql +49 -0
- package/skills/defense-in-depth/SKILL.md +127 -0
- package/skills/deployment/SKILL.md +11 -0
- package/skills/deployment/ci-cd/github-actions.yml +95 -0
- package/skills/deployment/docker/Dockerfile.template +39 -0
- package/skills/dispatching-parallel-agents/SKILL.md +180 -0
- package/skills/documentation-generation/SKILL.md +8 -0
- package/skills/documentation-generation/templates/README.template.md +60 -0
- package/skills/error-handling/SKILL.md +267 -0
- package/skills/executing-plans/SKILL.md +76 -0
- package/skills/finishing-a-development-branch/SKILL.md +200 -0
- package/skills/frontend-design/frontend-design/SKILL.md +42 -0
- package/skills/integration-testing/SKILL.md +13 -0
- package/skills/integration-testing/examples/contract-test.py +317 -0
- package/skills/integration-testing/examples/e2e-test.js +147 -0
- package/skills/integration-testing/examples/test-isolation.md +94 -0
- package/skills/logging-monitoring/SKILL.md +66 -0
- package/skills/mobile-development/SKILL.md +11 -0
- package/skills/mobile-development/responsive/responsive.css +80 -0
- package/skills/performance-optimization/SKILL.md +9 -0
- package/skills/performance-optimization/profiling/profile.template.js +21 -0
- package/skills/receiving-code-review/SKILL.md +209 -0
- package/skills/refactoring/SKILL.md +11 -0
- package/skills/refactoring/code-smells/common-smells.md +115 -0
- package/skills/requesting-code-review/SKILL.md +105 -0
- package/skills/requesting-code-review/code-reviewer.md +146 -0
- package/skills/root-cause-tracing/SKILL.md +174 -0
- package/skills/root-cause-tracing/find-polluter.sh +63 -0
- package/skills/security-review/SKILL.md +11 -0
- package/skills/security-review/checklists/owasp-checklist.md +31 -0
- package/skills/sharing-skills/SKILL.md +194 -0
- package/skills/subagent-driven-development/SKILL.md +240 -0
- package/skills/subagent-driven-development/code-quality-reviewer-prompt.md +20 -0
- package/skills/subagent-driven-development/implementer-prompt.md +78 -0
- package/skills/subagent-driven-development/spec-reviewer-prompt.md +61 -0
- package/skills/systematic-debugging/CREATION-LOG.md +119 -0
- package/skills/systematic-debugging/SKILL.md +295 -0
- package/skills/systematic-debugging/test-academic.md +14 -0
- package/skills/systematic-debugging/test-pressure-1.md +58 -0
- package/skills/systematic-debugging/test-pressure-2.md +68 -0
- package/skills/systematic-debugging/test-pressure-3.md +69 -0
- package/skills/test-driven-development/SKILL.md +364 -0
- package/skills/testing-anti-patterns/SKILL.md +302 -0
- package/skills/testing-skills-with-subagents/SKILL.md +387 -0
- package/skills/testing-skills-with-subagents/examples/CLAUDE_MD_TESTING.md +189 -0
- package/skills/ui-ux-review/SKILL.md +13 -0
- package/skills/ui-ux-review/checklists/ux-heuristics.md +61 -0
- package/skills/using-git-worktrees/SKILL.md +213 -0
- package/skills/using-superpowers/SKILL.md +101 -0
- package/skills/verification-before-completion/SKILL.md +139 -0
- package/skills/writing-plans/SKILL.md +116 -0
- package/skills/writing-skills/SKILL.md +622 -0
- package/skills/writing-skills/anthropic-best-practices.md +1150 -0
- package/skills/writing-skills/graphviz-conventions.dot +172 -0
- package/skills/writing-skills/persuasion-principles.md +187 -0
- package/update.sh +36 -0
- package/utils/check-superpowers.sh +114 -0
- package/utils/claude-branding.md +166 -0
- package/utils/config.js +185 -0
- package/utils/custom-claude-config.sh +89 -0
- package/utils/custom-claude-hooks.md +129 -0
- package/utils/custom-claude-lib.js +222 -0
- package/utils/customize-claude-ui.sh +162 -0
- package/utils/fix-claude-integration.sh +133 -0
- package/utils/help.js +125 -0
- package/utils/install-curl.ps1 +135 -0
- package/utils/install-curl.sh +525 -0
- package/utils/install-superpowers.js +411 -0
- package/utils/install.js +298 -0
- package/utils/install.sh +182 -0
- package/utils/postinstall.js +63 -0
- package/utils/rename-claude.sh +96 -0
- package/utils/uninstall-superpowers.js +273 -0
- package/utils/uninstall.ps1 +136 -0
- package/utils/uninstall.sh +163 -0
- package/utils/update.sh +160 -0
|
@@ -0,0 +1,1075 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
|
|
3
|
+
# Cross-platform Claude-All Launcher
|
|
4
|
+
# Supports: Linux, Termux, macOS, Windows (Git Bash/WSL)
|
|
5
|
+
|
|
6
|
+
set -e
|
|
7
|
+
|
|
8
|
+
# Platform detection
|
|
9
|
+
detect_platform() {
|
|
10
|
+
case "$(uname -s)" in
|
|
11
|
+
Linux*) echo "Linux";;
|
|
12
|
+
Darwin*) echo "macOS";;
|
|
13
|
+
CYGWIN*|MINGW*|MSYS*) echo "Windows";;
|
|
14
|
+
esac
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
PLATFORM=$(detect_platform)
|
|
18
|
+
|
|
19
|
+
# Colors - auto-detect and install if needed
|
|
20
|
+
setup_colors() {
|
|
21
|
+
# Ensure tput is available for colors
|
|
22
|
+
if ! command -v tput &> /dev/null; then
|
|
23
|
+
if command -v pkg &> /dev/null && [[ "$PLATFORM" != "Windows" ]]; then
|
|
24
|
+
# Install ncurses-utils silently in background
|
|
25
|
+
pkg install -y ncurses-utils &>/dev/null &
|
|
26
|
+
fi
|
|
27
|
+
fi
|
|
28
|
+
|
|
29
|
+
if command -v tput &> /dev/null; then
|
|
30
|
+
GREEN=$(tput setaf 2 2>/dev/null || echo "")
|
|
31
|
+
BLUE=$(tput setaf 4 2>/dev/null || echo "")
|
|
32
|
+
RED=$(tput setaf 1 2>/dev/null || echo "")
|
|
33
|
+
YELLOW=$(tput setaf 3 2>/dev/null || echo "")
|
|
34
|
+
NC=$(tput sgr0 2>/dev/null || echo "")
|
|
35
|
+
else
|
|
36
|
+
# Fallback to plain text
|
|
37
|
+
GREEN=''
|
|
38
|
+
BLUE=''
|
|
39
|
+
RED=''
|
|
40
|
+
YELLOW=''
|
|
41
|
+
NC=''
|
|
42
|
+
fi
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
setup_colors
|
|
46
|
+
|
|
47
|
+
# Portable home directory
|
|
48
|
+
if [[ -n "$HOME" ]]; then
|
|
49
|
+
USER_HOME="$HOME"
|
|
50
|
+
elif [[ -n "$USERPROFILE" ]]; then
|
|
51
|
+
# Windows
|
|
52
|
+
USER_HOME="$USERPROFILE"
|
|
53
|
+
else
|
|
54
|
+
USER_HOME="$HOME"
|
|
55
|
+
fi
|
|
56
|
+
|
|
57
|
+
# API Key Files - Use user home directory
|
|
58
|
+
GLM_API_KEY_FILE="$USER_HOME/.glm_api_key"
|
|
59
|
+
MINIMAX_API_KEY_FILE="$USER_HOME/.minimax_api_key"
|
|
60
|
+
|
|
61
|
+
# Configuration
|
|
62
|
+
LITELLM_PORT=8555
|
|
63
|
+
MODEL_OVERRIDE=""
|
|
64
|
+
SELECTED_MODEL=""
|
|
65
|
+
LITELLM_HOST="http://127.0.0.1:$LITELLM_PORT"
|
|
66
|
+
|
|
67
|
+
# Function to get custom models
|
|
68
|
+
get_custom_models() {
|
|
69
|
+
local custom_models=()
|
|
70
|
+
local model_dir="$SCRIPT_DIR/model"
|
|
71
|
+
|
|
72
|
+
# Ensure model directory exists
|
|
73
|
+
if [[ ! -d "$model_dir" ]]; then
|
|
74
|
+
mkdir -p "$model_dir" 2>/dev/null || return 0
|
|
75
|
+
fi
|
|
76
|
+
|
|
77
|
+
# Find JSON files using simple for loop (most compatible)
|
|
78
|
+
for json_file in "$model_dir"/*.json; do
|
|
79
|
+
if [[ -f "$json_file" ]]; then
|
|
80
|
+
local filename=$(basename "$json_file" .json)
|
|
81
|
+
|
|
82
|
+
# Skip default model files
|
|
83
|
+
case "$filename" in
|
|
84
|
+
"glm"|"groq"|"minimax"|"openai"|"gemini"|"xai"|"ollama")
|
|
85
|
+
continue
|
|
86
|
+
;;
|
|
87
|
+
esac
|
|
88
|
+
|
|
89
|
+
# Parse JSON for model info
|
|
90
|
+
if command -v jq &> /dev/null; then
|
|
91
|
+
local provider_name=$(jq -r '.provider_name // "Unknown"' "$json_file" 2>/dev/null)
|
|
92
|
+
local description=$(jq -r '.description // "Custom Provider"' "$json_file" 2>/dev/null)
|
|
93
|
+
# Ensure we got valid values
|
|
94
|
+
[[ "$provider_name" == "null" || -z "$provider_name" ]] && provider_name="Unknown"
|
|
95
|
+
[[ "$description" == "null" || -z "$description" ]] && description="Custom Provider"
|
|
96
|
+
printf '%s:%s:%s\n' "$filename" "$provider_name" "$description"
|
|
97
|
+
else
|
|
98
|
+
# Fallback parsing without jq
|
|
99
|
+
local provider_name=$(grep '"provider_name"' "$json_file" | sed 's/.*"provider_name"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/' 2>/dev/null)
|
|
100
|
+
[[ -z "$provider_name" ]] && provider_name="$filename"
|
|
101
|
+
local description=$(grep '"description"' "$json_file" | sed 's/.*"description"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/' 2>/dev/null)
|
|
102
|
+
[[ -z "$description" ]] && description="Custom Provider"
|
|
103
|
+
printf '%s:%s:%s\n' "$filename" "$provider_name" "$description"
|
|
104
|
+
fi
|
|
105
|
+
fi
|
|
106
|
+
done
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
# Function to handle custom model
|
|
110
|
+
handle_custom_model() {
|
|
111
|
+
local model_name="$1"
|
|
112
|
+
local model_file="$SCRIPT_DIR/model/${model_name}.json"
|
|
113
|
+
|
|
114
|
+
if [[ ! -f "$model_file" ]]; then
|
|
115
|
+
echo -e "${RED}Error: Model configuration not found: $model_file${NC}"
|
|
116
|
+
exit 1
|
|
117
|
+
fi
|
|
118
|
+
|
|
119
|
+
# Extract configuration
|
|
120
|
+
if command -v jq &> /dev/null; then
|
|
121
|
+
local api_base=$(jq -r '.api_base // ""' "$model_file" 2>/dev/null)
|
|
122
|
+
local api_key=$(jq -r '.api_key // ""' "$model_file" 2>/dev/null)
|
|
123
|
+
local model=$(jq -r '.model // ""' "$model_file" 2>/dev/null)
|
|
124
|
+
local provider_name=$(jq -r '.provider_name // ""' "$model_file" 2>/dev/null)
|
|
125
|
+
else
|
|
126
|
+
echo -e "${RED}Error: jq is required for custom models. Install with: pkg install jq${NC}"
|
|
127
|
+
exit 1
|
|
128
|
+
fi
|
|
129
|
+
|
|
130
|
+
# Get API key if not provided
|
|
131
|
+
if [[ -z "$api_key" || "$api_key" == "your-api-key-here" ]]; then
|
|
132
|
+
# Try to get from environment based on provider name
|
|
133
|
+
case "${model_name,,}" in
|
|
134
|
+
"qwen"|"qwen2")
|
|
135
|
+
if [[ -n "$DASHSCOPE_API_KEY" ]]; then
|
|
136
|
+
api_key="$DASHSCOPE_API_KEY"
|
|
137
|
+
echo -e "${GREEN}✓ Using API key from DASHSCOPE_API_KEY${NC}"
|
|
138
|
+
else
|
|
139
|
+
echo -e "${YELLOW}Enter Qwen API Key (https://bailian.console.aliyun.com/):${NC}"
|
|
140
|
+
read -s api_key
|
|
141
|
+
fi
|
|
142
|
+
;;
|
|
143
|
+
"deepseek")
|
|
144
|
+
if [[ -n "$DEEPSEEK_API_KEY" ]]; then
|
|
145
|
+
api_key="$DEEPSEEK_API_KEY"
|
|
146
|
+
echo -e "${GREEN}✓ Using API key from DEEPSEEK_API_KEY${NC}"
|
|
147
|
+
else
|
|
148
|
+
echo -e "${YELLOW}Enter Deepseek API Key:${NC}"
|
|
149
|
+
read -s api_key
|
|
150
|
+
fi
|
|
151
|
+
;;
|
|
152
|
+
"moonshot")
|
|
153
|
+
if [[ -n "$MOONSHOT_API_KEY" ]]; then
|
|
154
|
+
api_key="$MOONSHOT_API_KEY"
|
|
155
|
+
echo -e "${GREEN}✓ Using API key from MOONSHOT_API_KEY${NC}"
|
|
156
|
+
else
|
|
157
|
+
echo -e "${YELLOW}Enter Moonshot API Key:${NC}"
|
|
158
|
+
read -s api_key
|
|
159
|
+
fi
|
|
160
|
+
;;
|
|
161
|
+
"perplexity")
|
|
162
|
+
if [[ -n "$PERPLEXITY_API_KEY" ]]; then
|
|
163
|
+
api_key="$PERPLEXITY_API_KEY"
|
|
164
|
+
echo -e "${GREEN}✓ Using API key from PERPLEXITY_API_KEY${NC}"
|
|
165
|
+
else
|
|
166
|
+
echo -e "${YELLOW}Enter Perplexity API Key:${NC}"
|
|
167
|
+
read -s api_key
|
|
168
|
+
fi
|
|
169
|
+
;;
|
|
170
|
+
"cohere")
|
|
171
|
+
if [[ -n "$COHERE_API_KEY" ]]; then
|
|
172
|
+
api_key="$COHERE_API_KEY"
|
|
173
|
+
echo -e "${GREEN}✓ Using API key from COHERE_API_KEY${NC}"
|
|
174
|
+
else
|
|
175
|
+
echo -e "${YELLOW}Enter Cohere API Key:${NC}"
|
|
176
|
+
read -s api_key
|
|
177
|
+
fi
|
|
178
|
+
;;
|
|
179
|
+
"mistral")
|
|
180
|
+
if [[ -n "$MISTRAL_API_KEY" ]]; then
|
|
181
|
+
api_key="$MISTRAL_API_KEY"
|
|
182
|
+
echo -e "${GREEN}✓ Using API key from MISTRAL_API_KEY${NC}"
|
|
183
|
+
else
|
|
184
|
+
echo -e "${YELLOW}Enter Mistral API Key:${NC}"
|
|
185
|
+
read -s api_key
|
|
186
|
+
fi
|
|
187
|
+
;;
|
|
188
|
+
"openrouter")
|
|
189
|
+
if [[ -n "$OPENROUTER_API_KEY" ]]; then
|
|
190
|
+
api_key="$OPENROUTER_API_KEY"
|
|
191
|
+
echo -e "${GREEN}✓ Using API key from OPENROUTER_API_KEY${NC}"
|
|
192
|
+
else
|
|
193
|
+
echo -e "${YELLOW}Enter OpenRouter API Key (https://openrouter.ai/keys):${NC}"
|
|
194
|
+
read -s api_key
|
|
195
|
+
fi
|
|
196
|
+
;;
|
|
197
|
+
"agentrouter")
|
|
198
|
+
if [[ -n "$ANTHROPIC_API_KEY" ]]; then
|
|
199
|
+
api_key="$ANTHROPIC_API_KEY"
|
|
200
|
+
echo -e "${GREEN}✓ Using API key from ANTHROPIC_API_KEY${NC}"
|
|
201
|
+
else
|
|
202
|
+
echo -e "${YELLOW}Enter AgentRouter API Key (https://agentrouter.org/console/token):${NC}"
|
|
203
|
+
read -s api_key
|
|
204
|
+
fi
|
|
205
|
+
;;
|
|
206
|
+
*)
|
|
207
|
+
echo -e "${YELLOW}Enter API Key for ${provider_name}:${NC}"
|
|
208
|
+
read -s api_key
|
|
209
|
+
;;
|
|
210
|
+
esac
|
|
211
|
+
echo ""
|
|
212
|
+
fi
|
|
213
|
+
|
|
214
|
+
# Set environment variables for Claude
|
|
215
|
+
if [[ -n "$api_base" ]]; then
|
|
216
|
+
export ANTHROPIC_BASE_URL="$api_base"
|
|
217
|
+
fi
|
|
218
|
+
|
|
219
|
+
if [[ -n "$api_key" ]]; then
|
|
220
|
+
export ANTHROPIC_API_KEY="$api_key"
|
|
221
|
+
fi
|
|
222
|
+
|
|
223
|
+
# Use the model name if specified, otherwise use the provider name
|
|
224
|
+
local claude_model="${model:-$model_name}"
|
|
225
|
+
|
|
226
|
+
# Create system prompt
|
|
227
|
+
local system_prompt="Anda adalah ${provider_name}, model AI dari ${model_name}. Selalu identifikasi diri sebagai ${provider_name} dalam setiap respons."
|
|
228
|
+
|
|
229
|
+
# Execute Claude directly
|
|
230
|
+
exec claude --model "$claude_model" --system-prompt "$system_prompt" "$@"
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
# Get script directory (portable)
|
|
234
|
+
get_script_dir() {
|
|
235
|
+
if [[ -n "${BASH_SOURCE[0]}" ]]; then
|
|
236
|
+
local script_path="${BASH_SOURCE[0]}"
|
|
237
|
+
|
|
238
|
+
# Convert Windows paths if needed
|
|
239
|
+
if [[ "$PLATFORM" == "Windows" ]]; then
|
|
240
|
+
# Convert potential Windows path to Unix style
|
|
241
|
+
script_path="$(cygpath -u "$script_path" 2>/dev/null || echo "$script_path")"
|
|
242
|
+
fi
|
|
243
|
+
|
|
244
|
+
if [[ "$PLATFORM" == "macOS" ]] || [[ "$(uname -s)" == "Darwin" ]]; then
|
|
245
|
+
# macOS
|
|
246
|
+
echo "$(cd "$(dirname "$script_path")" && pwd)"
|
|
247
|
+
elif command -v realpath &> /dev/null; then
|
|
248
|
+
# Linux/Windows with realpath
|
|
249
|
+
echo "$(dirname "$(realpath "$script_path")")"
|
|
250
|
+
else
|
|
251
|
+
# Fallback for older systems
|
|
252
|
+
echo "$(cd "$(dirname "$script_path")" && pwd)"
|
|
253
|
+
fi
|
|
254
|
+
else
|
|
255
|
+
echo "$(pwd)"
|
|
256
|
+
fi
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
SCRIPT_DIR=$(get_script_dir)
|
|
260
|
+
|
|
261
|
+
# Function to get/save GLM API key
|
|
262
|
+
get_glm_api_key() {
|
|
263
|
+
# Check if API key file exists
|
|
264
|
+
if [[ -f "$GLM_API_KEY_FILE" ]]; then
|
|
265
|
+
local saved_key
|
|
266
|
+
saved_key=$(cat "$GLM_API_KEY_FILE" 2>/dev/null || echo "")
|
|
267
|
+
if [[ -n "$saved_key" ]]; then
|
|
268
|
+
echo -e "${GREEN}✓ Using saved API key${NC}"
|
|
269
|
+
ANTHROPIC_AUTH_TOKEN="$saved_key"
|
|
270
|
+
export ANTHROPIC_AUTH_TOKEN
|
|
271
|
+
return 0
|
|
272
|
+
fi
|
|
273
|
+
fi
|
|
274
|
+
|
|
275
|
+
# Ask for new API key
|
|
276
|
+
echo ""
|
|
277
|
+
echo "Enter ZhipuAI/Z.AI API Key:"
|
|
278
|
+
echo "Get it from: https://open.bigmodel.cn/usercenter/apikeys"
|
|
279
|
+
if [[ "$PLATFORM" == "Windows" ]]; then
|
|
280
|
+
# Windows - use regular read
|
|
281
|
+
read -p "API Key: " ANTHROPIC_AUTH_TOKEN
|
|
282
|
+
else
|
|
283
|
+
# Linux/macOS/Termux - silent read
|
|
284
|
+
read -s -p "API Key: " ANTHROPIC_AUTH_TOKEN
|
|
285
|
+
fi
|
|
286
|
+
echo ""
|
|
287
|
+
|
|
288
|
+
if [[ -n "$ANTHROPIC_AUTH_TOKEN" ]]; then
|
|
289
|
+
# Save for next time
|
|
290
|
+
echo "$ANTHROPIC_AUTH_TOKEN" > "$GLM_API_KEY_FILE" 2>/dev/null || true
|
|
291
|
+
chmod 600 "$GLM_API_KEY_FILE" 2>/dev/null || true
|
|
292
|
+
echo -e "${GREEN}✓ API key saved for next time${NC}"
|
|
293
|
+
export ANTHROPIC_AUTH_TOKEN
|
|
294
|
+
else
|
|
295
|
+
echo -e "${RED}No API key provided${NC}"
|
|
296
|
+
exit 1
|
|
297
|
+
fi
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
# Function to get/save MiniMax API key
|
|
301
|
+
get_minimax_api_key() {
|
|
302
|
+
local api_key=""
|
|
303
|
+
|
|
304
|
+
# Check if API key file exists
|
|
305
|
+
if [[ -f "$MINIMAX_API_KEY_FILE" ]]; then
|
|
306
|
+
local saved_key
|
|
307
|
+
saved_key=$(cat "$MINIMAX_API_KEY_FILE" 2>/dev/null || echo "")
|
|
308
|
+
if [[ -n "$saved_key" ]]; then
|
|
309
|
+
echo -e "${GREEN}✓ Using saved API key${NC}"
|
|
310
|
+
api_key="$saved_key"
|
|
311
|
+
fi
|
|
312
|
+
fi
|
|
313
|
+
|
|
314
|
+
# Ask for new API key if not found
|
|
315
|
+
if [[ -z "$api_key" ]]; then
|
|
316
|
+
echo ""
|
|
317
|
+
echo "Enter MiniMax API Key:"
|
|
318
|
+
echo "Get it from: https://platform.minimax.io/"
|
|
319
|
+
if [[ "$PLATFORM" == "Windows" ]]; then
|
|
320
|
+
read -p "API Key: " api_key
|
|
321
|
+
else
|
|
322
|
+
read -s -p "API Key: " api_key
|
|
323
|
+
fi
|
|
324
|
+
echo ""
|
|
325
|
+
|
|
326
|
+
if [[ -n "$api_key" ]]; then
|
|
327
|
+
# Save for next time
|
|
328
|
+
echo "$api_key" > "$MINIMAX_API_KEY_FILE" 2>/dev/null || true
|
|
329
|
+
chmod 600 "$MINIMAX_API_KEY_FILE" 2>/dev/null || true
|
|
330
|
+
echo -e "${GREEN}✓ API key saved for next time${NC}"
|
|
331
|
+
else
|
|
332
|
+
echo -e "${RED}No API key provided${NC}"
|
|
333
|
+
exit 1
|
|
334
|
+
fi
|
|
335
|
+
fi
|
|
336
|
+
|
|
337
|
+
# Export BOTH variables for compatibility
|
|
338
|
+
# MiniMax endpoint uses Authorization header via ANTHROPIC_API_KEY
|
|
339
|
+
export ANTHROPIC_API_KEY="$api_key"
|
|
340
|
+
export ANTHROPIC_AUTH_TOKEN="$api_key"
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
check_dependencies() {
|
|
344
|
+
echo -e "${BLUE}Checking dependencies...${NC}"
|
|
345
|
+
|
|
346
|
+
# Check for python3
|
|
347
|
+
if ! command -v python3 &> /dev/null; then
|
|
348
|
+
echo -e "${RED}Error: python3 is not installed.${NC}"
|
|
349
|
+
echo "Please install Python 3 first."
|
|
350
|
+
exit 1
|
|
351
|
+
fi
|
|
352
|
+
|
|
353
|
+
# Check for claude CLI (OPTIONAL - won't exit if not found)
|
|
354
|
+
if ! command -v claude &> /dev/null; then
|
|
355
|
+
echo -e "${YELLOW}⚠️ 'claude' command not found.${NC}"
|
|
356
|
+
echo -e "${YELLOW} For direct providers (GLM, MiniMax, OpenAI), you don't need it!${NC}"
|
|
357
|
+
echo -e "${YELLOW} For LiteLLM providers (Gemini, Groq, Ollama), install with:${NC}"
|
|
358
|
+
echo -e "${YELLOW} npm install -g @anthropic-ai/claude-code${NC}"
|
|
359
|
+
echo ""
|
|
360
|
+
read -p "Continue without claude CLI? (Y/n): " continue_without
|
|
361
|
+
if [[ "$continue_without" =~ ^[Nn]$ ]]; then
|
|
362
|
+
echo "Installing @anthropic-ai/claude-code..."
|
|
363
|
+
npm install -g @anthropic-ai/claude-code || {
|
|
364
|
+
echo -e "${YELLOW}Failed to install claude CLI. Continuing anyway...${NC}"
|
|
365
|
+
}
|
|
366
|
+
else
|
|
367
|
+
echo -e "${GREEN}✓ Skipping claude CLI installation${NC}"
|
|
368
|
+
fi
|
|
369
|
+
else
|
|
370
|
+
echo -e "${GREEN}✓ claude CLI found${NC}"
|
|
371
|
+
fi
|
|
372
|
+
|
|
373
|
+
# Check for npm (only needed for claude CLI installation)
|
|
374
|
+
if command -v claude &> /dev/null; then
|
|
375
|
+
if ! command -v npm &> /dev/null; then
|
|
376
|
+
echo -e "${RED}Error: npm is not installed (needed for claude CLI).${NC}"
|
|
377
|
+
echo "Please install npm first."
|
|
378
|
+
exit 1
|
|
379
|
+
fi
|
|
380
|
+
fi
|
|
381
|
+
|
|
382
|
+
# Check for litellm (only needed for some providers)
|
|
383
|
+
if ! python3 -m pip show litellm &> /dev/null; then
|
|
384
|
+
echo -e "${YELLOW}'litellm' not found. Installing via pip...${NC}"
|
|
385
|
+
python3 -m pip install litellm[proxy] || {
|
|
386
|
+
echo -e "${YELLOW}Failed to install litellm. Some features may not work.${NC}"
|
|
387
|
+
}
|
|
388
|
+
fi
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
check_gemini_oauth() {
|
|
392
|
+
# Check if we have ADC credentials
|
|
393
|
+
local adc_path
|
|
394
|
+
if [[ -n "$HOME" ]]; then
|
|
395
|
+
adc_path="$HOME/.config/gcloud/application_default_credentials.json"
|
|
396
|
+
else
|
|
397
|
+
adc_path="$USERPROFILE/.config/gcloud/application_default_credentials.json"
|
|
398
|
+
fi
|
|
399
|
+
|
|
400
|
+
if [[ -f "$adc_path" ]]; then
|
|
401
|
+
return 0
|
|
402
|
+
fi
|
|
403
|
+
|
|
404
|
+
# If not, check if gcloud is installed
|
|
405
|
+
if command -v gcloud &> /dev/null; then
|
|
406
|
+
echo -e "${BLUE}gcloud found. Attempting login...${NC}"
|
|
407
|
+
gcloud auth application-default login
|
|
408
|
+
return
|
|
409
|
+
fi
|
|
410
|
+
|
|
411
|
+
# Fallback to custom python script
|
|
412
|
+
echo -e "${YELLOW}gcloud not found. Using lightweight Python Auth helper...${NC}"
|
|
413
|
+
|
|
414
|
+
# Install dependency
|
|
415
|
+
if ! python3 -m pip show google-auth-oauthlib &> /dev/null; then
|
|
416
|
+
echo "Installing google-auth-oauthlib..."
|
|
417
|
+
python3 -m pip install google-auth-oauthlib || true
|
|
418
|
+
fi
|
|
419
|
+
|
|
420
|
+
# Run helper script
|
|
421
|
+
local auth_script="$SCRIPT_DIR/gemini_auth.py"
|
|
422
|
+
if [[ ! -f "$auth_script" ]]; then
|
|
423
|
+
curl -fsSL https://raw.githubusercontent.com/zesbe/CliAllModel/main/gemini_auth.py -o "$SCRIPT_DIR/gemini_auth.py" 2>/dev/null || true
|
|
424
|
+
auth_script="$SCRIPT_DIR/gemini_auth.py"
|
|
425
|
+
fi
|
|
426
|
+
|
|
427
|
+
if [[ -f "$auth_script" ]]; then
|
|
428
|
+
python3 "$auth_script"
|
|
429
|
+
fi
|
|
430
|
+
|
|
431
|
+
if [[ ! -f "$adc_path" ]]; then
|
|
432
|
+
echo -e "${RED}Authentication failed or cancelled.${NC}"
|
|
433
|
+
exit 1
|
|
434
|
+
fi
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
check_openai_oauth() {
|
|
438
|
+
local cred_path
|
|
439
|
+
if [[ -n "$HOME" ]]; then
|
|
440
|
+
cred_path="$HOME/.config/openai/credentials.json"
|
|
441
|
+
else
|
|
442
|
+
cred_path="$USERPROFILE/.config/openai/credentials.json"
|
|
443
|
+
fi
|
|
444
|
+
|
|
445
|
+
if [[ ! -f "$cred_path" ]]; then
|
|
446
|
+
echo -e "${YELLOW}No cached OpenAI OAuth token found. Launching helper...${NC}"
|
|
447
|
+
|
|
448
|
+
# Install dependency
|
|
449
|
+
python3 -m pip install requests > /dev/null 2>&1 || true
|
|
450
|
+
|
|
451
|
+
local auth_script="$SCRIPT_DIR/openai_auth.py"
|
|
452
|
+
if [[ ! -f "$auth_script" ]]; then
|
|
453
|
+
curl -fsSL https://raw.githubusercontent.com/zesbe/CliAllModel/main/openai_auth.py -o "$SCRIPT_DIR/openai_auth.py" 2>/dev/null || true
|
|
454
|
+
auth_script="$SCRIPT_DIR/openai_auth.py"
|
|
455
|
+
fi
|
|
456
|
+
|
|
457
|
+
if [[ -f "$auth_script" ]]; then
|
|
458
|
+
python3 "$auth_script"
|
|
459
|
+
fi
|
|
460
|
+
|
|
461
|
+
if [[ ! -f "$cred_path" ]]; then
|
|
462
|
+
echo -e "${RED}OpenAI OAuth failed.${NC}"
|
|
463
|
+
exit 1
|
|
464
|
+
fi
|
|
465
|
+
fi
|
|
466
|
+
|
|
467
|
+
# Extract access token
|
|
468
|
+
OPENAI_ACCESS_TOKEN=$(python3 -c "import json, os; print(json.load(open(os.path.expanduser('$cred_path')))['access_token'])" 2>/dev/null || echo "")
|
|
469
|
+
export OPENAI_API_KEY="$OPENAI_ACCESS_TOKEN"
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
start_litellm_proxy() {
|
|
473
|
+
local model=$1
|
|
474
|
+
echo -e "${BLUE}Starting LiteLLM proxy for model: $model...${NC}"
|
|
475
|
+
|
|
476
|
+
# Kill any existing litellm on this port (use pkill for portability)
|
|
477
|
+
if [[ "$PLATFORM" == "Darwin" ]]; then
|
|
478
|
+
# macOS
|
|
479
|
+
lsof -ti:$LITELLM_PORT | xargs kill -9 2>/dev/null || true
|
|
480
|
+
elif [[ "$PLATFORM" == "Windows" ]]; then
|
|
481
|
+
# Windows - try netstat
|
|
482
|
+
netstat -ano 2>/dev/null | grep ":$LITELLM_PORT" | awk '{print $5}' | while read pid; do
|
|
483
|
+
kill -f $pid 2>/dev/null || true
|
|
484
|
+
done
|
|
485
|
+
else
|
|
486
|
+
# Linux/Termux
|
|
487
|
+
fuser -k $LITELLM_PORT/tcp &> /dev/null || true
|
|
488
|
+
fi
|
|
489
|
+
|
|
490
|
+
# Start litellm in background
|
|
491
|
+
python3 -m litellm --model "$model" --port $LITELLM_PORT --drop_params &> /tmp/litellm.log &
|
|
492
|
+
LITELLM_PID=$!
|
|
493
|
+
|
|
494
|
+
# Wait for it to start
|
|
495
|
+
echo -n "Waiting for proxy to start..."
|
|
496
|
+
for i in {1..10}; do
|
|
497
|
+
if curl -s $LITELLM_HOST/health &> /dev/null; then
|
|
498
|
+
echo -e " ${GREEN}Ready!${NC}"
|
|
499
|
+
return 0
|
|
500
|
+
fi
|
|
501
|
+
sleep 1
|
|
502
|
+
echo -n "."
|
|
503
|
+
done
|
|
504
|
+
|
|
505
|
+
echo -e "\n${RED}Failed to start LiteLLM proxy. Check logs:${NC}"
|
|
506
|
+
cat /tmp/litellm.log 2>/dev/null || echo "No log file found"
|
|
507
|
+
kill $LITELLM_PID 2>/dev/null || true
|
|
508
|
+
exit 1
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
cleanup() {
|
|
512
|
+
if [[ -n "$LITELLM_PID" ]]; then
|
|
513
|
+
echo -e "\n${BLUE}Stopping LiteLLM proxy...${NC}"
|
|
514
|
+
kill $LITELLM_PID 2>/dev/null || true
|
|
515
|
+
fi
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
trap cleanup EXIT
|
|
519
|
+
|
|
520
|
+
# Interactive model selection
|
|
521
|
+
interactive_model_select() {
|
|
522
|
+
local provider=$1
|
|
523
|
+
local model_file="$SCRIPT_DIR/model/${provider}.json"
|
|
524
|
+
|
|
525
|
+
if [[ ! -f "$model_file" ]]; then
|
|
526
|
+
echo -e "${RED}Config file not found: $model_file${NC}"
|
|
527
|
+
return 1
|
|
528
|
+
fi
|
|
529
|
+
|
|
530
|
+
# Display models using Python (flush output to stderr)
|
|
531
|
+
python3 << EOF >&2
|
|
532
|
+
import json
|
|
533
|
+
import sys
|
|
534
|
+
|
|
535
|
+
with open('$model_file', 'r') as f:
|
|
536
|
+
data = json.load(f)
|
|
537
|
+
|
|
538
|
+
print('')
|
|
539
|
+
print('=== Select Model ===')
|
|
540
|
+
for i, model in enumerate(data['models'], 1):
|
|
541
|
+
name = model['name']
|
|
542
|
+
desc = model['description']
|
|
543
|
+
print(f'{i}) {name} - {desc}')
|
|
544
|
+
print('')
|
|
545
|
+
print('Available Models:')
|
|
546
|
+
for i, model in enumerate(data['models'], 1):
|
|
547
|
+
name = model['name']
|
|
548
|
+
desc = model['description']
|
|
549
|
+
print(f' {i}. {name} - {desc}')
|
|
550
|
+
print('')
|
|
551
|
+
sys.stderr.flush()
|
|
552
|
+
EOF
|
|
553
|
+
|
|
554
|
+
# Save model list to temp file and get count
|
|
555
|
+
model_count=$(python3 << EOF
|
|
556
|
+
import json
|
|
557
|
+
with open('$model_file', 'r') as f:
|
|
558
|
+
data = json.load(f)
|
|
559
|
+
models = [m['id'] for m in data['models']]
|
|
560
|
+
with open('$SCRIPT_DIR/.${provider}_models.tmp', 'w') as f:
|
|
561
|
+
for m in models:
|
|
562
|
+
f.write(m + '\n')
|
|
563
|
+
print(len(models))
|
|
564
|
+
EOF
|
|
565
|
+
)
|
|
566
|
+
|
|
567
|
+
# Wait for user input
|
|
568
|
+
echo -n "Select model [1-$model_count]: "
|
|
569
|
+
read choice
|
|
570
|
+
|
|
571
|
+
# Validate choice
|
|
572
|
+
if [[ -z "$choice" ]] || ! [[ "$choice" =~ ^[0-9]+$ ]]; then
|
|
573
|
+
echo -e "${RED}Please enter a number (1-$model_count)${NC}"
|
|
574
|
+
return 1
|
|
575
|
+
fi
|
|
576
|
+
|
|
577
|
+
# Read model ID from temp file
|
|
578
|
+
if [[ -f "$SCRIPT_DIR/.${provider}_models.tmp" ]]; then
|
|
579
|
+
local model_ids
|
|
580
|
+
model_ids=($(cat "$SCRIPT_DIR/.${provider}_models.tmp"))
|
|
581
|
+
local idx=$((choice - 1))
|
|
582
|
+
|
|
583
|
+
if [[ $idx -ge 0 ]] && [[ $idx -lt ${#model_ids[@]} ]]; then
|
|
584
|
+
local selected="${model_ids[$idx]}"
|
|
585
|
+
echo -e "${GREEN}✓ Selected: $selected${NC}"
|
|
586
|
+
echo "$selected"
|
|
587
|
+
rm -f "$SCRIPT_DIR/.${provider}_models.tmp"
|
|
588
|
+
return 0
|
|
589
|
+
else
|
|
590
|
+
echo -e "${RED}Invalid choice: $choice${NC}"
|
|
591
|
+
echo "Please select 1-${#model_ids[@]}"
|
|
592
|
+
fi
|
|
593
|
+
else
|
|
594
|
+
echo -e "${RED}Model list not found${NC}"
|
|
595
|
+
fi
|
|
596
|
+
|
|
597
|
+
return 1
|
|
598
|
+
}
|
|
599
|
+
|
|
600
|
+
# Parse arguments
|
|
601
|
+
while [[ $# -gt 0 ]]; do
|
|
602
|
+
case $1 in
|
|
603
|
+
-m|--model)
|
|
604
|
+
MODEL_OVERRIDE="$2"
|
|
605
|
+
shift 2
|
|
606
|
+
;;
|
|
607
|
+
*)
|
|
608
|
+
if [[ -z "$choice" ]]; then
|
|
609
|
+
choice="$1"
|
|
610
|
+
fi
|
|
611
|
+
shift
|
|
612
|
+
;;
|
|
613
|
+
esac
|
|
614
|
+
done
|
|
615
|
+
|
|
616
|
+
# Handle direct argument for custom models
|
|
617
|
+
if [[ -n "$1" ]] && [[ "$1" =~ ^[0-9]+$ ]] && [[ "$1" -ge 12 ]]; then
|
|
618
|
+
choice="$1"
|
|
619
|
+
export CHOICE="$choice"
|
|
620
|
+
|
|
621
|
+
# Check if it's model manager - get actual number
|
|
622
|
+
if [[ -f "$SCRIPT_DIR/add-model-manual.sh" ]]; then
|
|
623
|
+
# Count custom models
|
|
624
|
+
custom_count=0
|
|
625
|
+
while IFS= read -r model_info; do
|
|
626
|
+
if [[ -n "$model_info" ]]; then
|
|
627
|
+
((custom_count++))
|
|
628
|
+
fi
|
|
629
|
+
done < <(get_custom_models)
|
|
630
|
+
|
|
631
|
+
model_manager_num=$((12 + custom_count))
|
|
632
|
+
if [[ $choice -eq $model_manager_num ]]; then
|
|
633
|
+
exec "$SCRIPT_DIR/add-model-manual.sh"
|
|
634
|
+
fi
|
|
635
|
+
fi
|
|
636
|
+
|
|
637
|
+
# Handle custom model selection
|
|
638
|
+
custom_index=$((choice - 12))
|
|
639
|
+
count=0
|
|
640
|
+
while IFS= read -r model_info; do
|
|
641
|
+
if [[ -n "$model_info" ]]; then
|
|
642
|
+
if [[ $count -eq $custom_index ]]; then
|
|
643
|
+
IFS=':' read -r filename provider_name description <<< "$model_info"
|
|
644
|
+
echo -e "${BLUE}Using ${provider_name}...${NC}"
|
|
645
|
+
handle_custom_model "$filename" "${@:2}"
|
|
646
|
+
exit 0
|
|
647
|
+
fi
|
|
648
|
+
((count++))
|
|
649
|
+
fi
|
|
650
|
+
done < <(get_custom_models)
|
|
651
|
+
echo -e "${RED}Invalid custom model selection${NC}"
|
|
652
|
+
exit 1
|
|
653
|
+
fi
|
|
654
|
+
|
|
655
|
+
# Handle direct CHOICE (for environment variable or argument)
|
|
656
|
+
if [[ -n "$CHOICE" ]]; then
|
|
657
|
+
choice="$CHOICE"
|
|
658
|
+
fi
|
|
659
|
+
|
|
660
|
+
# Check dependencies
|
|
661
|
+
check_dependencies() {
|
|
662
|
+
# Check for required commands
|
|
663
|
+
local missing_deps=()
|
|
664
|
+
|
|
665
|
+
if ! command -v jq &> /dev/null; then
|
|
666
|
+
echo -e "${YELLOW}Warning: jq not found. JSON parsing will be limited.${NC}"
|
|
667
|
+
echo -e "${YELLOW}Install jq: pkg install jq (Termux) or apt-get install jq${NC}"
|
|
668
|
+
echo ""
|
|
669
|
+
fi
|
|
670
|
+
|
|
671
|
+
if [[ "$PLATFORM" == "Windows" ]] && ! command -v nano &> /dev/null && ! command -v vim &> /dev/null; then
|
|
672
|
+
echo -e "${YELLOW}Warning: No text editor found. Install nano or vim.${NC}"
|
|
673
|
+
echo ""
|
|
674
|
+
fi
|
|
675
|
+
}
|
|
676
|
+
|
|
677
|
+
# Main Menu
|
|
678
|
+
if [[ -z "$choice" ]]; then
|
|
679
|
+
clear
|
|
680
|
+
echo -e "${GREEN}=====================================${NC}"
|
|
681
|
+
echo -e "${GREEN} Claude Code Multi-Model Launcher ${NC}"
|
|
682
|
+
echo -e "${GREEN}=====================================${NC}"
|
|
683
|
+
echo "Select your AI Provider:"
|
|
684
|
+
echo "1) MiniMax (Direct Anthropic API)"
|
|
685
|
+
echo "2) Google Gemini (API Key - AI Studio)"
|
|
686
|
+
echo "3) Google Gemini (OAuth - Vertex AI)"
|
|
687
|
+
echo "4) OpenAI (API Key)"
|
|
688
|
+
echo "5) OpenAI (OAuth - Experimental)"
|
|
689
|
+
echo "6) xAI / Grok (API Key)"
|
|
690
|
+
echo "7) ZhipuAI / GLM (API Key)"
|
|
691
|
+
echo "8) Groq (API Key)"
|
|
692
|
+
echo "9) Ollama (Local Models)"
|
|
693
|
+
echo "10) 🔑 API Key Manager (Update/Edit keys)"
|
|
694
|
+
echo "11) Custom / Other"
|
|
695
|
+
|
|
696
|
+
# Display custom models and track model manager number
|
|
697
|
+
next_num=12
|
|
698
|
+
has_custom=false
|
|
699
|
+
|
|
700
|
+
# Get all custom models into array first
|
|
701
|
+
custom_models_array=()
|
|
702
|
+
while IFS= read -r model_info; do
|
|
703
|
+
if [[ -n "$model_info" ]]; then
|
|
704
|
+
has_custom=true
|
|
705
|
+
custom_models_array+=("$model_info")
|
|
706
|
+
fi
|
|
707
|
+
done < <(get_custom_models)
|
|
708
|
+
|
|
709
|
+
# Display custom models
|
|
710
|
+
for model_info in "${custom_models_array[@]}"; do
|
|
711
|
+
IFS=':' read -r filename provider_name description <<< "$model_info"
|
|
712
|
+
echo "${next_num}) ${provider_name} (${description})"
|
|
713
|
+
((next_num++))
|
|
714
|
+
done
|
|
715
|
+
|
|
716
|
+
# Add model manager
|
|
717
|
+
model_manager_num=$next_num
|
|
718
|
+
echo "${next_num}) ➕ Add/Edit/Delete Models"
|
|
719
|
+
max_choice=$next_num
|
|
720
|
+
export model_manager_num # Export for later use
|
|
721
|
+
|
|
722
|
+
echo -e "${GREEN}=====================================${NC}"
|
|
723
|
+
read -p "Enter choice [1-$max_choice]: " choice
|
|
724
|
+
fi
|
|
725
|
+
|
|
726
|
+
# Model name will be set dynamically based on provider selection
|
|
727
|
+
|
|
728
|
+
case $choice in
|
|
729
|
+
1)
|
|
730
|
+
# MiniMax Direct
|
|
731
|
+
echo -e "${BLUE}Configuring for MiniMax...${NC}"
|
|
732
|
+
|
|
733
|
+
# Get API key (with auto-save feature)
|
|
734
|
+
get_minimax_api_key
|
|
735
|
+
|
|
736
|
+
export ANTHROPIC_BASE_URL="https://api.minimax.io/anthropic"
|
|
737
|
+
echo -e "${YELLOW}Note: MiniMax maps model names automatically.${NC}"
|
|
738
|
+
|
|
739
|
+
# Check if model was specified in arguments
|
|
740
|
+
if [[ -n "$MODEL_OVERRIDE" ]]; then
|
|
741
|
+
exec claude --model "$MODEL_OVERRIDE" --system-prompt "Anda adalah MiniMax, model AI dari perusahaan MiniMax. Selalu identifikasi diri sebagai MiniMax dalam setiap respons." "$@"
|
|
742
|
+
else
|
|
743
|
+
# No model specified, use default
|
|
744
|
+
echo -e "${YELLOW}No model specified, using default: claude-3-5-sonnet-20241022${NC}"
|
|
745
|
+
exec claude --model "claude-3-5-sonnet-20241022" --system-prompt "Anda adalah MiniMax, model AI dari perusahaan MiniMax. Selalu identifikasi diri sebagai MiniMax dalam setiap respons." "$@"
|
|
746
|
+
fi
|
|
747
|
+
;;
|
|
748
|
+
2)
|
|
749
|
+
# Gemini API Key - Direct Integration
|
|
750
|
+
echo -e "${BLUE}Configuring for Gemini (AI Studio)...${NC}"
|
|
751
|
+
if [[ -z "$GEMINI_API_KEY" ]]; then
|
|
752
|
+
echo "Get Key: https://aistudio.google.com/app/apikey"
|
|
753
|
+
read -s -p "Enter Gemini API Key: " GEMINI_API_KEY
|
|
754
|
+
echo ""
|
|
755
|
+
export GEMINI_API_KEY
|
|
756
|
+
fi
|
|
757
|
+
|
|
758
|
+
# Interactive model selection
|
|
759
|
+
if interactive_model_select "gemini"; then
|
|
760
|
+
MODEL_NAME="$selected_model"
|
|
761
|
+
else
|
|
762
|
+
# Default to Gemini 1.5 Flash
|
|
763
|
+
MODEL_NAME="gemini-1.5-flash"
|
|
764
|
+
echo -e "${YELLOW}Using default model: $MODEL_NAME${NC}"
|
|
765
|
+
read -p "Enter Model Name [default: $MODEL_NAME]: " input_model
|
|
766
|
+
[[ -n "$input_model" ]] && MODEL_NAME=$input_model
|
|
767
|
+
fi
|
|
768
|
+
|
|
769
|
+
# Map model names to Gemini API format
|
|
770
|
+
case "$MODEL_NAME" in
|
|
771
|
+
"gemini-3.0-flash"|"gemini-3.0-pro"|"gemini-3.0-ultra")
|
|
772
|
+
# Direct mapping for 3.0 models
|
|
773
|
+
CLAUDE_MODEL="$MODEL_NAME"
|
|
774
|
+
;;
|
|
775
|
+
"gemini-2.0-flash-exp")
|
|
776
|
+
CLAUDE_MODEL="gemini-2.0-flash-exp"
|
|
777
|
+
;;
|
|
778
|
+
"gemini/gemini-1.5-pro"|"gemini-1.5-pro")
|
|
779
|
+
CLAUDE_MODEL="gemini-1.5-pro"
|
|
780
|
+
;;
|
|
781
|
+
"gemini/gemini-1.5-pro-latest")
|
|
782
|
+
CLAUDE_MODEL="gemini-1.5-pro-latest"
|
|
783
|
+
;;
|
|
784
|
+
"gemini/gemini-1.5-flash"|"gemini-1.5-flash")
|
|
785
|
+
CLAUDE_MODEL="gemini-1.5-flash"
|
|
786
|
+
;;
|
|
787
|
+
"gemini/gemini-1.5-flash-8b"|"gemini-1.5-flash-8b")
|
|
788
|
+
CLAUDE_MODEL="gemini-1.5-flash-8b"
|
|
789
|
+
;;
|
|
790
|
+
"gemini/gemini-1.5-flash-latest")
|
|
791
|
+
CLAUDE_MODEL="gemini-1.5-flash-latest"
|
|
792
|
+
;;
|
|
793
|
+
"gemini/gemini-1.0-pro"|"gemini-1.0-pro")
|
|
794
|
+
CLAUDE_MODEL="gemini-1.0-pro"
|
|
795
|
+
;;
|
|
796
|
+
*)
|
|
797
|
+
CLAUDE_MODEL="$MODEL_NAME"
|
|
798
|
+
;;
|
|
799
|
+
esac
|
|
800
|
+
|
|
801
|
+
echo -e "${GREEN}✓ Using model: $CLAUDE_MODEL${NC}"
|
|
802
|
+
|
|
803
|
+
# Configure Gemini direct API endpoint
|
|
804
|
+
export ANTHROPIC_BASE_URL="https://generativelanguage.googleapis.com/v1beta/anthropic"
|
|
805
|
+
export ANTHROPIC_API_KEY="$GEMINI_API_KEY"
|
|
806
|
+
|
|
807
|
+
# Execute Claude with Gemini model and system prompt
|
|
808
|
+
exec claude --model "$CLAUDE_MODEL" --system-prompt "Anda adalah Gemini, model AI dari Google. Selalu identifikasi diri sebagai Gemini dalam setiap respons." "$@"
|
|
809
|
+
;;
|
|
810
|
+
3)
|
|
811
|
+
# AntiGravity (Google Internal) - Direct Connection
|
|
812
|
+
echo -e "${BLUE}Configuring for AntiGravity (Google Internal)...${NC}"
|
|
813
|
+
|
|
814
|
+
# Check for authentication file
|
|
815
|
+
AUTH_FILE="$HOME/.config/claude-all/antigravity/google_internal_auth.json"
|
|
816
|
+
if [[ ! -f "$AUTH_FILE" ]]; then
|
|
817
|
+
echo -e "${RED}❌ AntiGravity authentication not found!${NC}"
|
|
818
|
+
echo ""
|
|
819
|
+
echo -e "${YELLOW}Please run setup first:${NC}"
|
|
820
|
+
echo " python3 setup_antigravity_auth.py"
|
|
821
|
+
echo ""
|
|
822
|
+
echo -e "${YELLOW}Note: You need to be on Google network/VPN${NC}"
|
|
823
|
+
exit 1
|
|
824
|
+
fi
|
|
825
|
+
|
|
826
|
+
# Load authentication
|
|
827
|
+
if command -v jq &> /dev/null; then
|
|
828
|
+
ACCESS_TOKEN=$(jq -r '.access_token // empty' "$AUTH_FILE" 2>/dev/null)
|
|
829
|
+
REFRESH_TOKEN=$(jq -r '.refresh_token // empty' "$AUTH_FILE" 2>/dev/null)
|
|
830
|
+
else
|
|
831
|
+
echo -e "${RED}❌ jq is required for authentication${NC}"
|
|
832
|
+
exit 1
|
|
833
|
+
fi
|
|
834
|
+
|
|
835
|
+
if [[ -z "$ACCESS_TOKEN" ]]; then
|
|
836
|
+
echo -e "${RED}❌ No access token found!${NC}"
|
|
837
|
+
echo -e "${YELLOW}Please run setup again:${NC}"
|
|
838
|
+
echo " python3 setup_antigravity_auth.py"
|
|
839
|
+
exit 1
|
|
840
|
+
fi
|
|
841
|
+
|
|
842
|
+
echo -e "${GREEN}✓ Authentication loaded${NC}"
|
|
843
|
+
|
|
844
|
+
# Interactive model selection using antigravity.json
|
|
845
|
+
if interactive_model_select "antigravity"; then
|
|
846
|
+
MODEL_NAME="$selected_model"
|
|
847
|
+
else
|
|
848
|
+
# Default to latest
|
|
849
|
+
MODEL_NAME="gemini-2.5-flash"
|
|
850
|
+
echo -e "${YELLOW}Using default model: $MODEL_NAME${NC}"
|
|
851
|
+
fi
|
|
852
|
+
|
|
853
|
+
echo -e "${GREEN}✓ Using model: $MODEL_NAME${NC}"
|
|
854
|
+
echo -e "${BLUE}Connecting to AntiGravity internal API...${NC}"
|
|
855
|
+
|
|
856
|
+
# Set environment for AntiGravity
|
|
857
|
+
export ANTHROPIC_API_KEY="$ACCESS_TOKEN"
|
|
858
|
+
export ANTHROPIC_BASE_URL="https://antigravity.corp.google.com/v1"
|
|
859
|
+
export ANTHROPIC_AUTH_TOKEN="$ACCESS_TOKEN"
|
|
860
|
+
|
|
861
|
+
# Add refresh token if available
|
|
862
|
+
if [[ -n "$REFRESH_TOKEN" ]]; then
|
|
863
|
+
export ANTIGRAVITY_REFRESH_TOKEN="$REFRESH_TOKEN"
|
|
864
|
+
fi
|
|
865
|
+
|
|
866
|
+
# Execute Claude directly with AntiGravity
|
|
867
|
+
exec claude --model "$MODEL_NAME" --system-prompt "Anda adalah Gemini dari Google Internal AntiGravity. Selalu identifikasi diri sebagai Gemini dari Google Internal." "$@"
|
|
868
|
+
;;
|
|
869
|
+
4)
|
|
870
|
+
# OpenAI API Key
|
|
871
|
+
echo -e "${BLUE}Configuring for OpenAI...${NC}"
|
|
872
|
+
if [[ -z "$OPENAI_API_KEY" ]]; then
|
|
873
|
+
echo "Get Key: https://platform.openai.com/api-keys"
|
|
874
|
+
read -p "Enter OpenAI API Key: " OPENAI_API_KEY
|
|
875
|
+
export OPENAI_API_KEY
|
|
876
|
+
fi
|
|
877
|
+
|
|
878
|
+
# Get model selection
|
|
879
|
+
selected_model=$(interactive_model_select "openai")
|
|
880
|
+
if [[ $? -eq 0 ]]; then
|
|
881
|
+
# Success - got model from menu
|
|
882
|
+
MODEL_NAME="$selected_model"
|
|
883
|
+
else
|
|
884
|
+
# Fallback to manual input
|
|
885
|
+
MODEL_NAME="gpt-4o"
|
|
886
|
+
read -p "Enter Model Name [default: $MODEL_NAME]: " input_model
|
|
887
|
+
[[ -n "$input_model" ]] && MODEL_NAME=$input_model
|
|
888
|
+
fi
|
|
889
|
+
|
|
890
|
+
export ANTHROPIC_BASE_URL="https://api.openai.com/v1/"
|
|
891
|
+
export ANTHROPIC_API_KEY="$OPENAI_API_KEY"
|
|
892
|
+
exec claude --model "$MODEL_NAME" "$@"
|
|
893
|
+
;;
|
|
894
|
+
5)
|
|
895
|
+
# OpenAI OAuth (Experimental)
|
|
896
|
+
echo -e "${BLUE}Configuring for OpenAI (OAuth Experimental)...${NC}"
|
|
897
|
+
|
|
898
|
+
check_openai_oauth
|
|
899
|
+
|
|
900
|
+
selected_model=$(interactive_model_select "openai")
|
|
901
|
+
if [[ $? -eq 0 ]]; then
|
|
902
|
+
# Success - got model from menu
|
|
903
|
+
MODEL_NAME="$selected_model"
|
|
904
|
+
else
|
|
905
|
+
# Fallback to manual input
|
|
906
|
+
MODEL_NAME="gpt-4o"
|
|
907
|
+
read -p "Enter Model Name [default: $MODEL_NAME]: " input_model
|
|
908
|
+
[[ -n "$input_model" ]] && MODEL_NAME=$input_model
|
|
909
|
+
fi
|
|
910
|
+
|
|
911
|
+
export ANTHROPIC_BASE_URL="https://api.openai.com/v1/"
|
|
912
|
+
export ANTHROPIC_API_KEY="$OPENAI_ACCESS_TOKEN"
|
|
913
|
+
exec claude --model "$MODEL_NAME" "$@"
|
|
914
|
+
;;
|
|
915
|
+
6)
|
|
916
|
+
# xAI
|
|
917
|
+
echo -e "${BLUE}Configuring for xAI (Grok)...${NC}"
|
|
918
|
+
if [[ -z "$XAI_API_KEY" ]]; then
|
|
919
|
+
echo "Get Key: https://console.x.ai/"
|
|
920
|
+
read -p "Enter xAI API Key: " XAI_API_KEY
|
|
921
|
+
export XAI_API_KEY
|
|
922
|
+
fi
|
|
923
|
+
|
|
924
|
+
# Get model selection
|
|
925
|
+
selected_model=$(interactive_model_select "xai")
|
|
926
|
+
if [[ $? -eq 0 ]]; then
|
|
927
|
+
# Success - got model from menu
|
|
928
|
+
MODEL_NAME="${selected_model#xai/}"
|
|
929
|
+
else
|
|
930
|
+
# Fallback to manual input
|
|
931
|
+
MODEL_NAME="grok-beta"
|
|
932
|
+
read -p "Enter Model Name [default: $MODEL_NAME]: " input_model
|
|
933
|
+
[[ -n "$input_model" ]] && MODEL_NAME=$input_model
|
|
934
|
+
fi
|
|
935
|
+
|
|
936
|
+
export ANTHROPIC_BASE_URL="https://api.x.ai/v1/"
|
|
937
|
+
export ANTHROPIC_API_KEY="$XAI_API_KEY"
|
|
938
|
+
exec claude --model "$MODEL_NAME" "$@"
|
|
939
|
+
;;
|
|
940
|
+
7)
|
|
941
|
+
# ZhipuAI
|
|
942
|
+
echo -e "${BLUE}Configuring for ZhipuAI (GLM)...${NC}"
|
|
943
|
+
|
|
944
|
+
# Get API key (with auto-save feature)
|
|
945
|
+
get_glm_api_key
|
|
946
|
+
|
|
947
|
+
# Check if model config exists
|
|
948
|
+
model_file="$SCRIPT_DIR/model/glm.json"
|
|
949
|
+
if [[ -f "$model_file" ]]; then
|
|
950
|
+
# Interactive model selection
|
|
951
|
+
selected_model=$(interactive_model_select "glm")
|
|
952
|
+
if [[ $? -eq 0 ]] && [[ -n "$selected_model" ]]; then
|
|
953
|
+
# Keep full model name with zhipu/ prefix
|
|
954
|
+
MODEL_NAME="$selected_model"
|
|
955
|
+
else
|
|
956
|
+
MODEL_NAME="claude-3-5-sonnet-20241022"
|
|
957
|
+
fi
|
|
958
|
+
else
|
|
959
|
+
# No model config, use default
|
|
960
|
+
echo -e "${YELLOW}Model config not found, using default: claude-3-5-sonnet-20241022${NC}"
|
|
961
|
+
MODEL_NAME="claude-3-5-sonnet-20241022"
|
|
962
|
+
fi
|
|
963
|
+
|
|
964
|
+
export ANTHROPIC_BASE_URL="https://api.z.ai/api/anthropic"
|
|
965
|
+
export ANTHROPIC_API_KEY="$ANTHROPIC_AUTH_TOKEN"
|
|
966
|
+
export CLAUDE_MODEL="$MODEL_NAME"
|
|
967
|
+
exec claude --model "$MODEL_NAME" --system-prompt "Anda adalah GLM, model AI dari ZhipuAI. Selalu identifikasi diri sebagai GLM dalam setiap respons." "$@"
|
|
968
|
+
;;
|
|
969
|
+
8)
|
|
970
|
+
# Groq
|
|
971
|
+
echo -e "${BLUE}Configuring for Groq...${NC}"
|
|
972
|
+
if [[ -z "$GROQ_API_KEY" ]]; then
|
|
973
|
+
echo "Get Key: https://console.groq.com/keys"
|
|
974
|
+
read -p "Enter Groq API Key: " GROQ_API_KEY
|
|
975
|
+
export GROQ_API_KEY
|
|
976
|
+
fi
|
|
977
|
+
|
|
978
|
+
# Get model selection
|
|
979
|
+
selected_model=$(interactive_model_select "groq")
|
|
980
|
+
if [[ $? -eq 0 ]]; then
|
|
981
|
+
# Success - got model from menu
|
|
982
|
+
MODEL_NAME="${selected_model#groq/}"
|
|
983
|
+
else
|
|
984
|
+
# Fallback to manual input
|
|
985
|
+
MODEL_NAME="llama-3.1-70b-versatile"
|
|
986
|
+
read -p "Enter Model Name [default: $MODEL_NAME]: " input_model
|
|
987
|
+
[[ -n "$input_model" ]] && MODEL_NAME=$input_model
|
|
988
|
+
fi
|
|
989
|
+
|
|
990
|
+
export ANTHROPIC_BASE_URL="https://api.groq.com/openai/v1/"
|
|
991
|
+
export ANTHROPIC_API_KEY="$GROQ_API_KEY"
|
|
992
|
+
exec claude --model "$MODEL_NAME" "$@"
|
|
993
|
+
;;
|
|
994
|
+
9)
|
|
995
|
+
# Ollama
|
|
996
|
+
check_dependencies
|
|
997
|
+
echo -e "${BLUE}Configuring for Ollama...${NC}"
|
|
998
|
+
if [[ -z "$OLLAMA_HOST" ]]; then
|
|
999
|
+
read -p "Enter Ollama Host [default: http://localhost:11434]: " OLLAMA_HOST
|
|
1000
|
+
[[ -z "$OLLAMA_HOST" ]] && OLLAMA_HOST="http://localhost:11434"
|
|
1001
|
+
export OLLAMA_HOST
|
|
1002
|
+
fi
|
|
1003
|
+
|
|
1004
|
+
if interactive_model_select "ollama"; then
|
|
1005
|
+
MODEL_NAME="$selected_model"
|
|
1006
|
+
start_litellm_proxy "$MODEL_NAME"
|
|
1007
|
+
else
|
|
1008
|
+
MODEL_NAME="ollama/llama3"
|
|
1009
|
+
read -p "Enter Model Name [default: $MODEL_NAME]: " input_model
|
|
1010
|
+
[[ -n "$input_model" ]] && MODEL_NAME=$input_model
|
|
1011
|
+
start_litellm_proxy "$MODEL_NAME"
|
|
1012
|
+
fi
|
|
1013
|
+
|
|
1014
|
+
export ANTHROPIC_BASE_URL="$LITELLM_HOST"
|
|
1015
|
+
export ANTHROPIC_API_KEY="sk-litellm"
|
|
1016
|
+
exec claude "$@"
|
|
1017
|
+
;;
|
|
1018
|
+
10)
|
|
1019
|
+
# API Key Manager
|
|
1020
|
+
echo -e "${BLUE}Opening API Key Manager...${NC}"
|
|
1021
|
+
# Check if api-manager exists in the same directory
|
|
1022
|
+
if [[ -f "$SCRIPT_DIR/api-manager" ]]; then
|
|
1023
|
+
"$SCRIPT_DIR/api-manager"
|
|
1024
|
+
else
|
|
1025
|
+
echo -e "${YELLOW}API Manager not found. Please install or check path.${NC}"
|
|
1026
|
+
fi
|
|
1027
|
+
echo ""
|
|
1028
|
+
echo -e "${YELLOW}Press Enter to return to main menu...${NC}"
|
|
1029
|
+
read
|
|
1030
|
+
# Restart the menu
|
|
1031
|
+
exec "$0"
|
|
1032
|
+
;;
|
|
1033
|
+
11)
|
|
1034
|
+
# Custom
|
|
1035
|
+
check_dependencies
|
|
1036
|
+
echo -e "${BLUE}Configuring Custom LiteLLM Model...${NC}"
|
|
1037
|
+
read -p "Enter LiteLLM Model String: " MODEL_NAME
|
|
1038
|
+
read -p "Press Enter to continue..."
|
|
1039
|
+
|
|
1040
|
+
start_litellm_proxy "$MODEL_NAME"
|
|
1041
|
+
export ANTHROPIC_BASE_URL="$LITELLM_HOST"
|
|
1042
|
+
export ANTHROPIC_API_KEY="sk-litellm"
|
|
1043
|
+
exec claude "$@"
|
|
1044
|
+
;;
|
|
1045
|
+
esac
|
|
1046
|
+
|
|
1047
|
+
# Handle custom models (12+) but exclude model manager
|
|
1048
|
+
if [[ "$choice" =~ ^[0-9]+$ ]] && [[ $choice -ge 12 ]] && [[ -n "$model_manager_num" ]] && [[ $choice -lt $model_manager_num ]]; then
|
|
1049
|
+
custom_index=$((choice - 12))
|
|
1050
|
+
count=0
|
|
1051
|
+
while IFS= read -r model_info; do
|
|
1052
|
+
if [[ -n "$model_info" ]]; then
|
|
1053
|
+
if [[ $count -eq $custom_index ]]; then
|
|
1054
|
+
IFS=':' read -r filename provider_name description <<< "$model_info"
|
|
1055
|
+
echo -e "${BLUE}Using ${provider_name}...${NC}"
|
|
1056
|
+
handle_custom_model "$filename" "$@"
|
|
1057
|
+
exit 0
|
|
1058
|
+
fi
|
|
1059
|
+
((count++))
|
|
1060
|
+
fi
|
|
1061
|
+
done < <(get_custom_models)
|
|
1062
|
+
fi
|
|
1063
|
+
|
|
1064
|
+
# Handle model manager - check dynamic number
|
|
1065
|
+
if [[ "$choice" -eq "$model_manager_num" ]]; then
|
|
1066
|
+
if [[ -f "$SCRIPT_DIR/add-model-manual.sh" ]]; then
|
|
1067
|
+
exec "$SCRIPT_DIR/add-model-manual.sh"
|
|
1068
|
+
else
|
|
1069
|
+
echo -e "${RED}Error: add-model-manual.sh not found${NC}"
|
|
1070
|
+
exit 1
|
|
1071
|
+
fi
|
|
1072
|
+
fi
|
|
1073
|
+
|
|
1074
|
+
echo "Invalid choice"
|
|
1075
|
+
exit 1
|