claude-all-config 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/LICENSE.md +70 -0
- package/README.md +133 -0
- package/VERSION +1 -0
- package/agents/accessibility-reviewer.md +96 -0
- package/agents/ai-prompt-optimizer.md +94 -0
- package/agents/api-tester.md +102 -0
- package/agents/code-generator.md +94 -0
- package/agents/code-reviewer.md +47 -0
- package/agents/component-generator.md +102 -0
- package/agents/doc-generator.md +91 -0
- package/agents/migration-generator.md +94 -0
- package/agents/performance-analyzer.md +90 -0
- package/agents/proactive-mode.md +91 -0
- package/agents/readme-generator.md +101 -0
- package/agents/security-auditor.md +86 -0
- package/agents/terraform-generator.md +94 -0
- package/agents/test-generator.md +76 -0
- package/bin/agentrouter.json +36 -0
- package/bin/ai-chat +20 -0
- package/bin/antigravity.json +76 -0
- package/bin/api-manager +340 -0
- package/bin/claude-launcher +19 -0
- package/bin/claude-master +15 -0
- package/bin/claude_master.py +295 -0
- package/bin/cohere.json +7 -0
- package/bin/deepseek.json +44 -0
- package/bin/gemini.json +56 -0
- package/bin/glm.json +21 -0
- package/bin/groq.json +41 -0
- package/bin/minimax.json +26 -0
- package/bin/mistral.json +7 -0
- package/bin/moonshot.json +7 -0
- package/bin/ollama.json +36 -0
- package/bin/openai.json +46 -0
- package/bin/openrouter.json +38 -0
- package/bin/perplexity.json +12 -0
- package/bin/qwen.json +7 -0
- package/bin/switch-provider +73 -0
- package/bin/test.json +7 -0
- package/bin/xai.json +41 -0
- package/claude-all +2707 -0
- package/claude-config.json +340 -0
- package/claude-suite/REFACTORING_SUMMARY.md +88 -0
- package/claude-suite/auth/.antigravity_proxy.py +78 -0
- package/claude-suite/auth/__pycache__/openai_auth.cpython-312.pyc +0 -0
- package/claude-suite/auth/gemini_auth.py +80 -0
- package/claude-suite/auth/openai_auth.py +138 -0
- package/claude-suite/backups/claude-all-before-refactor +1075 -0
- package/claude-suite/backups/claude-all.backup +840 -0
- package/claude-suite/backups/claude-all.original +840 -0
- package/claude-suite/models/add-model-manual.sh +588 -0
- package/claude-suite/models/add-model.sh +114 -0
- package/claude-suite/models/model-switcher.sh +69 -0
- package/claude-suite/providers/claude-glm +89 -0
- package/claude-suite/providers/claude-glm-wrapper.sh +55 -0
- package/claude-suite/providers/claude-minimax +12 -0
- package/claude-suite/providers/claude-smart +132 -0
- package/claude-suite/providers/xai_chat.sh +56 -0
- package/claude-suite/utils/__pycache__/claude_master.cpython-312.pyc +0 -0
- package/claude-suite/utils/antigravity_proxy_server.py +168 -0
- package/claude-suite/utils/claude-all-help.txt +83 -0
- package/claude-suite/utils/claude_master.py +408 -0
- package/commands/brainstorm.md +5 -0
- package/commands/execute-plan.md +5 -0
- package/commands/write-plan.md +5 -0
- package/docs/ANTIGRAVITY-SETUP.md +176 -0
- package/docs/AUTH_CREDENTIALS.md +54 -0
- package/docs/NPM-INSTALLATION.md +166 -0
- package/hooks/hooks.json +15 -0
- package/hooks/run-hook.cmd +19 -0
- package/hooks/session-start.sh +52 -0
- package/install.sh +155 -0
- package/mcp.json +34 -0
- package/model/perplexity.json +12 -0
- package/package.json +69 -0
- package/plugins/README.md +47 -0
- package/plugins/installed_plugins.json +317 -0
- package/plugins/known_marketplaces.json +10 -0
- package/plugins/marketplace-info/marketplace.json +517 -0
- package/postinstall.js +100 -0
- package/scripts/antigravity_proxy_server.py +168 -0
- package/scripts/get_gemini_api_key.py +96 -0
- package/scripts/setup_antigravity_auth.py +171 -0
- package/skills/api-development/SKILL.md +11 -0
- package/skills/api-development/openapi/api-documentation.yaml +108 -0
- package/skills/brainstorming/SKILL.md +54 -0
- package/skills/code-quality/SKILL.md +196 -0
- package/skills/condition-based-waiting/SKILL.md +120 -0
- package/skills/condition-based-waiting/example.ts +158 -0
- package/skills/database-development/SKILL.md +11 -0
- package/skills/database-development/migrations/migration.template.sql +49 -0
- package/skills/defense-in-depth/SKILL.md +127 -0
- package/skills/deployment/SKILL.md +11 -0
- package/skills/deployment/ci-cd/github-actions.yml +95 -0
- package/skills/deployment/docker/Dockerfile.template +39 -0
- package/skills/dispatching-parallel-agents/SKILL.md +180 -0
- package/skills/documentation-generation/SKILL.md +8 -0
- package/skills/documentation-generation/templates/README.template.md +60 -0
- package/skills/error-handling/SKILL.md +267 -0
- package/skills/executing-plans/SKILL.md +76 -0
- package/skills/finishing-a-development-branch/SKILL.md +200 -0
- package/skills/frontend-design/frontend-design/SKILL.md +42 -0
- package/skills/integration-testing/SKILL.md +13 -0
- package/skills/integration-testing/examples/contract-test.py +317 -0
- package/skills/integration-testing/examples/e2e-test.js +147 -0
- package/skills/integration-testing/examples/test-isolation.md +94 -0
- package/skills/logging-monitoring/SKILL.md +66 -0
- package/skills/mobile-development/SKILL.md +11 -0
- package/skills/mobile-development/responsive/responsive.css +80 -0
- package/skills/performance-optimization/SKILL.md +9 -0
- package/skills/performance-optimization/profiling/profile.template.js +21 -0
- package/skills/receiving-code-review/SKILL.md +209 -0
- package/skills/refactoring/SKILL.md +11 -0
- package/skills/refactoring/code-smells/common-smells.md +115 -0
- package/skills/requesting-code-review/SKILL.md +105 -0
- package/skills/requesting-code-review/code-reviewer.md +146 -0
- package/skills/root-cause-tracing/SKILL.md +174 -0
- package/skills/root-cause-tracing/find-polluter.sh +63 -0
- package/skills/security-review/SKILL.md +11 -0
- package/skills/security-review/checklists/owasp-checklist.md +31 -0
- package/skills/sharing-skills/SKILL.md +194 -0
- package/skills/subagent-driven-development/SKILL.md +240 -0
- package/skills/subagent-driven-development/code-quality-reviewer-prompt.md +20 -0
- package/skills/subagent-driven-development/implementer-prompt.md +78 -0
- package/skills/subagent-driven-development/spec-reviewer-prompt.md +61 -0
- package/skills/systematic-debugging/CREATION-LOG.md +119 -0
- package/skills/systematic-debugging/SKILL.md +295 -0
- package/skills/systematic-debugging/test-academic.md +14 -0
- package/skills/systematic-debugging/test-pressure-1.md +58 -0
- package/skills/systematic-debugging/test-pressure-2.md +68 -0
- package/skills/systematic-debugging/test-pressure-3.md +69 -0
- package/skills/test-driven-development/SKILL.md +364 -0
- package/skills/testing-anti-patterns/SKILL.md +302 -0
- package/skills/testing-skills-with-subagents/SKILL.md +387 -0
- package/skills/testing-skills-with-subagents/examples/CLAUDE_MD_TESTING.md +189 -0
- package/skills/ui-ux-review/SKILL.md +13 -0
- package/skills/ui-ux-review/checklists/ux-heuristics.md +61 -0
- package/skills/using-git-worktrees/SKILL.md +213 -0
- package/skills/using-superpowers/SKILL.md +101 -0
- package/skills/verification-before-completion/SKILL.md +139 -0
- package/skills/writing-plans/SKILL.md +116 -0
- package/skills/writing-skills/SKILL.md +622 -0
- package/skills/writing-skills/anthropic-best-practices.md +1150 -0
- package/skills/writing-skills/graphviz-conventions.dot +172 -0
- package/skills/writing-skills/persuasion-principles.md +187 -0
- package/update.sh +36 -0
- package/utils/check-superpowers.sh +114 -0
- package/utils/claude-branding.md +166 -0
- package/utils/config.js +185 -0
- package/utils/custom-claude-config.sh +89 -0
- package/utils/custom-claude-hooks.md +129 -0
- package/utils/custom-claude-lib.js +222 -0
- package/utils/customize-claude-ui.sh +162 -0
- package/utils/fix-claude-integration.sh +133 -0
- package/utils/help.js +125 -0
- package/utils/install-curl.ps1 +135 -0
- package/utils/install-curl.sh +525 -0
- package/utils/install-superpowers.js +411 -0
- package/utils/install.js +298 -0
- package/utils/install.sh +182 -0
- package/utils/postinstall.js +63 -0
- package/utils/rename-claude.sh +96 -0
- package/utils/uninstall-superpowers.js +273 -0
- package/utils/uninstall.ps1 +136 -0
- package/utils/uninstall.sh +163 -0
- package/utils/update.sh +160 -0
|
@@ -0,0 +1,840 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
|
|
3
|
+
# Cross-platform Claude-All Launcher
|
|
4
|
+
# Supports: Linux, Termux, macOS, Windows (Git Bash/WSL)
|
|
5
|
+
|
|
6
|
+
set -e
|
|
7
|
+
|
|
8
|
+
# Platform detection
|
|
9
|
+
detect_platform() {
|
|
10
|
+
case "$(uname -s)" in
|
|
11
|
+
Linux*) echo "Linux";;
|
|
12
|
+
Darwin*) echo "macOS";;
|
|
13
|
+
CYGWIN*|MINGW*|MSYS*) echo "Windows";;
|
|
14
|
+
esac
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
PLATFORM=$(detect_platform)
|
|
18
|
+
|
|
19
|
+
# Colors - Support both Linux/macOS and Windows
|
|
20
|
+
if [[ "$PLATFORM" == "Windows" ]]; then
|
|
21
|
+
# Windows (Git Bash/WSL) - Use tput if available, otherwise plain
|
|
22
|
+
if command -v tput &> /dev/null; then
|
|
23
|
+
GREEN=$(tput setaf 2 2>/dev/null || echo "")
|
|
24
|
+
BLUE=$(tput setaf 4 2>/dev/null || echo "")
|
|
25
|
+
RED=$(tput setaf 1 2>/dev/null || echo "")
|
|
26
|
+
YELLOW=$(tput setaf 3 2>/dev/null || echo "")
|
|
27
|
+
NC=$(tput sgr0 2>/dev/null || echo "")
|
|
28
|
+
else
|
|
29
|
+
# Plain text for Windows without color support
|
|
30
|
+
GREEN=''
|
|
31
|
+
BLUE=''
|
|
32
|
+
RED=''
|
|
33
|
+
YELLOW=''
|
|
34
|
+
NC=''
|
|
35
|
+
fi
|
|
36
|
+
else
|
|
37
|
+
# Linux/macOS/Termux
|
|
38
|
+
GREEN='\033[0;32m'
|
|
39
|
+
BLUE='\033[0;34m'
|
|
40
|
+
RED='\033[0;31m'
|
|
41
|
+
YELLOW='\033[1;33m'
|
|
42
|
+
NC='\033[0m'
|
|
43
|
+
fi
|
|
44
|
+
|
|
45
|
+
# Portable home directory
|
|
46
|
+
if [[ -n "$HOME" ]]; then
|
|
47
|
+
USER_HOME="$HOME"
|
|
48
|
+
elif [[ -n "$USERPROFILE" ]]; then
|
|
49
|
+
# Windows
|
|
50
|
+
USER_HOME="$USERPROFILE"
|
|
51
|
+
else
|
|
52
|
+
USER_HOME="$HOME"
|
|
53
|
+
fi
|
|
54
|
+
|
|
55
|
+
# API Key Files - Use user home directory
|
|
56
|
+
GLM_API_KEY_FILE="$USER_HOME/.glm_api_key"
|
|
57
|
+
MINIMAX_API_KEY_FILE="$USER_HOME/.minimax_api_key"
|
|
58
|
+
|
|
59
|
+
# Configuration
|
|
60
|
+
LITELLM_PORT=8555
|
|
61
|
+
MODEL_OVERRIDE=""
|
|
62
|
+
SELECTED_MODEL=""
|
|
63
|
+
LITELLM_HOST="http://127.0.0.1:$LITELLM_PORT"
|
|
64
|
+
|
|
65
|
+
# Get script directory (portable)
|
|
66
|
+
get_script_dir() {
|
|
67
|
+
if [[ -n "${BASH_SOURCE[0]}" ]]; then
|
|
68
|
+
local script_path="${BASH_SOURCE[0]}"
|
|
69
|
+
if [[ "$(uname -s)" == "Darwin" ]]; then
|
|
70
|
+
# macOS
|
|
71
|
+
echo "$(cd "$(dirname "$script_path")" && pwd)"
|
|
72
|
+
else
|
|
73
|
+
# Linux/Windows
|
|
74
|
+
echo "$(dirname "$(realpath "$script_path")")"
|
|
75
|
+
fi
|
|
76
|
+
else
|
|
77
|
+
echo "$(pwd)"
|
|
78
|
+
fi
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
SCRIPT_DIR=$(get_script_dir)
|
|
82
|
+
|
|
83
|
+
# Function to get/save GLM API key
|
|
84
|
+
get_glm_api_key() {
|
|
85
|
+
# Check if API key file exists
|
|
86
|
+
if [[ -f "$GLM_API_KEY_FILE" ]]; then
|
|
87
|
+
local saved_key
|
|
88
|
+
saved_key=$(cat "$GLM_API_KEY_FILE" 2>/dev/null || echo "")
|
|
89
|
+
if [[ -n "$saved_key" ]]; then
|
|
90
|
+
echo -e "${GREEN}✓ Using saved API key${NC}"
|
|
91
|
+
ANTHROPIC_AUTH_TOKEN="$saved_key"
|
|
92
|
+
export ANTHROPIC_AUTH_TOKEN
|
|
93
|
+
return 0
|
|
94
|
+
fi
|
|
95
|
+
fi
|
|
96
|
+
|
|
97
|
+
# Ask for new API key
|
|
98
|
+
echo ""
|
|
99
|
+
echo "Enter ZhipuAI/Z.AI API Key:"
|
|
100
|
+
echo "Get it from: https://open.bigmodel.cn/usercenter/apikeys"
|
|
101
|
+
if [[ "$PLATFORM" == "Windows" ]]; then
|
|
102
|
+
# Windows - use regular read
|
|
103
|
+
read -p "API Key: " ANTHROPIC_AUTH_TOKEN
|
|
104
|
+
else
|
|
105
|
+
# Linux/macOS/Termux - silent read
|
|
106
|
+
read -s -p "API Key: " ANTHROPIC_AUTH_TOKEN
|
|
107
|
+
fi
|
|
108
|
+
echo ""
|
|
109
|
+
|
|
110
|
+
if [[ -n "$ANTHROPIC_AUTH_TOKEN" ]]; then
|
|
111
|
+
# Save for next time
|
|
112
|
+
echo "$ANTHROPIC_AUTH_TOKEN" > "$GLM_API_KEY_FILE" 2>/dev/null || true
|
|
113
|
+
chmod 600 "$GLM_API_KEY_FILE" 2>/dev/null || true
|
|
114
|
+
echo -e "${GREEN}✓ API key saved for next time${NC}"
|
|
115
|
+
export ANTHROPIC_AUTH_TOKEN
|
|
116
|
+
else
|
|
117
|
+
echo -e "${RED}No API key provided${NC}"
|
|
118
|
+
exit 1
|
|
119
|
+
fi
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
# Function to get/save MiniMax API key
|
|
123
|
+
get_minimax_api_key() {
|
|
124
|
+
local api_key=""
|
|
125
|
+
|
|
126
|
+
# Check if API key file exists
|
|
127
|
+
if [[ -f "$MINIMAX_API_KEY_FILE" ]]; then
|
|
128
|
+
local saved_key
|
|
129
|
+
saved_key=$(cat "$MINIMAX_API_KEY_FILE" 2>/dev/null || echo "")
|
|
130
|
+
if [[ -n "$saved_key" ]]; then
|
|
131
|
+
echo -e "${GREEN}✓ Using saved API key${NC}"
|
|
132
|
+
api_key="$saved_key"
|
|
133
|
+
fi
|
|
134
|
+
fi
|
|
135
|
+
|
|
136
|
+
# Ask for new API key if not found
|
|
137
|
+
if [[ -z "$api_key" ]]; then
|
|
138
|
+
echo ""
|
|
139
|
+
echo "Enter MiniMax API Key:"
|
|
140
|
+
echo "Get it from: https://platform.minimax.io/"
|
|
141
|
+
if [[ "$PLATFORM" == "Windows" ]]; then
|
|
142
|
+
read -p "API Key: " api_key
|
|
143
|
+
else
|
|
144
|
+
read -s -p "API Key: " api_key
|
|
145
|
+
fi
|
|
146
|
+
echo ""
|
|
147
|
+
|
|
148
|
+
if [[ -n "$api_key" ]]; then
|
|
149
|
+
# Save for next time
|
|
150
|
+
echo "$api_key" > "$MINIMAX_API_KEY_FILE" 2>/dev/null || true
|
|
151
|
+
chmod 600 "$MINIMAX_API_KEY_FILE" 2>/dev/null || true
|
|
152
|
+
echo -e "${GREEN}✓ API key saved for next time${NC}"
|
|
153
|
+
else
|
|
154
|
+
echo -e "${RED}No API key provided${NC}"
|
|
155
|
+
exit 1
|
|
156
|
+
fi
|
|
157
|
+
fi
|
|
158
|
+
|
|
159
|
+
# Export BOTH variables for compatibility
|
|
160
|
+
# MiniMax endpoint uses Authorization header via ANTHROPIC_API_KEY
|
|
161
|
+
export ANTHROPIC_API_KEY="$api_key"
|
|
162
|
+
export ANTHROPIC_AUTH_TOKEN="$api_key"
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
check_dependencies() {
|
|
166
|
+
echo -e "${BLUE}Checking dependencies...${NC}"
|
|
167
|
+
|
|
168
|
+
# Check for python3
|
|
169
|
+
if ! command -v python3 &> /dev/null; then
|
|
170
|
+
echo -e "${RED}Error: python3 is not installed.${NC}"
|
|
171
|
+
echo "Please install Python 3 first."
|
|
172
|
+
exit 1
|
|
173
|
+
fi
|
|
174
|
+
|
|
175
|
+
# Check for claude CLI (OPTIONAL - won't exit if not found)
|
|
176
|
+
if ! command -v claude &> /dev/null; then
|
|
177
|
+
echo -e "${YELLOW}⚠️ 'claude' command not found.${NC}"
|
|
178
|
+
echo -e "${YELLOW} For direct providers (GLM, MiniMax, OpenAI), you don't need it!${NC}"
|
|
179
|
+
echo -e "${YELLOW} For LiteLLM providers (Gemini, Groq, Ollama), install with:${NC}"
|
|
180
|
+
echo -e "${YELLOW} npm install -g @anthropic-ai/claude-code${NC}"
|
|
181
|
+
echo ""
|
|
182
|
+
read -p "Continue without claude CLI? (Y/n): " continue_without
|
|
183
|
+
if [[ "$continue_without" =~ ^[Nn]$ ]]; then
|
|
184
|
+
echo "Installing @anthropic-ai/claude-code..."
|
|
185
|
+
npm install -g @anthropic-ai/claude-code || {
|
|
186
|
+
echo -e "${YELLOW}Failed to install claude CLI. Continuing anyway...${NC}"
|
|
187
|
+
}
|
|
188
|
+
else
|
|
189
|
+
echo -e "${GREEN}✓ Skipping claude CLI installation${NC}"
|
|
190
|
+
fi
|
|
191
|
+
else
|
|
192
|
+
echo -e "${GREEN}✓ claude CLI found${NC}"
|
|
193
|
+
fi
|
|
194
|
+
|
|
195
|
+
# Check for npm (only needed for claude CLI installation)
|
|
196
|
+
if command -v claude &> /dev/null; then
|
|
197
|
+
if ! command -v npm &> /dev/null; then
|
|
198
|
+
echo -e "${RED}Error: npm is not installed (needed for claude CLI).${NC}"
|
|
199
|
+
echo "Please install npm first."
|
|
200
|
+
exit 1
|
|
201
|
+
fi
|
|
202
|
+
fi
|
|
203
|
+
|
|
204
|
+
# Check for litellm (only needed for some providers)
|
|
205
|
+
if ! python3 -m pip show litellm &> /dev/null; then
|
|
206
|
+
echo -e "${YELLOW}'litellm' not found. Installing via pip...${NC}"
|
|
207
|
+
python3 -m pip install litellm[proxy] || {
|
|
208
|
+
echo -e "${YELLOW}Failed to install litellm. Some features may not work.${NC}"
|
|
209
|
+
}
|
|
210
|
+
fi
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
check_gemini_oauth() {
|
|
214
|
+
# Check if we have ADC credentials
|
|
215
|
+
local adc_path
|
|
216
|
+
if [[ -n "$HOME" ]]; then
|
|
217
|
+
adc_path="$HOME/.config/gcloud/application_default_credentials.json"
|
|
218
|
+
else
|
|
219
|
+
adc_path="$USERPROFILE/.config/gcloud/application_default_credentials.json"
|
|
220
|
+
fi
|
|
221
|
+
|
|
222
|
+
if [[ -f "$adc_path" ]]; then
|
|
223
|
+
return 0
|
|
224
|
+
fi
|
|
225
|
+
|
|
226
|
+
# If not, check if gcloud is installed
|
|
227
|
+
if command -v gcloud &> /dev/null; then
|
|
228
|
+
echo -e "${BLUE}gcloud found. Attempting login...${NC}"
|
|
229
|
+
gcloud auth application-default login
|
|
230
|
+
return
|
|
231
|
+
fi
|
|
232
|
+
|
|
233
|
+
# Fallback to custom python script
|
|
234
|
+
echo -e "${YELLOW}gcloud not found. Using lightweight Python Auth helper...${NC}"
|
|
235
|
+
|
|
236
|
+
# Install dependency
|
|
237
|
+
if ! python3 -m pip show google-auth-oauthlib &> /dev/null; then
|
|
238
|
+
echo "Installing google-auth-oauthlib..."
|
|
239
|
+
python3 -m pip install google-auth-oauthlib || true
|
|
240
|
+
fi
|
|
241
|
+
|
|
242
|
+
# Run helper script
|
|
243
|
+
local auth_script="$SCRIPT_DIR/gemini_auth.py"
|
|
244
|
+
if [[ ! -f "$auth_script" ]]; then
|
|
245
|
+
curl -fsSL https://raw.githubusercontent.com/zesbe/CliAllModel/main/gemini_auth.py -o "$SCRIPT_DIR/gemini_auth.py" 2>/dev/null || true
|
|
246
|
+
auth_script="$SCRIPT_DIR/gemini_auth.py"
|
|
247
|
+
fi
|
|
248
|
+
|
|
249
|
+
if [[ -f "$auth_script" ]]; then
|
|
250
|
+
python3 "$auth_script"
|
|
251
|
+
fi
|
|
252
|
+
|
|
253
|
+
if [[ ! -f "$adc_path" ]]; then
|
|
254
|
+
echo -e "${RED}Authentication failed or cancelled.${NC}"
|
|
255
|
+
exit 1
|
|
256
|
+
fi
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
check_openai_oauth() {
|
|
260
|
+
local cred_path
|
|
261
|
+
if [[ -n "$HOME" ]]; then
|
|
262
|
+
cred_path="$HOME/.config/openai/credentials.json"
|
|
263
|
+
else
|
|
264
|
+
cred_path="$USERPROFILE/.config/openai/credentials.json"
|
|
265
|
+
fi
|
|
266
|
+
|
|
267
|
+
if [[ ! -f "$cred_path" ]]; then
|
|
268
|
+
echo -e "${YELLOW}No cached OpenAI OAuth token found. Launching helper...${NC}"
|
|
269
|
+
|
|
270
|
+
# Install dependency
|
|
271
|
+
python3 -m pip install requests > /dev/null 2>&1 || true
|
|
272
|
+
|
|
273
|
+
local auth_script="$SCRIPT_DIR/openai_auth.py"
|
|
274
|
+
if [[ ! -f "$auth_script" ]]; then
|
|
275
|
+
curl -fsSL https://raw.githubusercontent.com/zesbe/CliAllModel/main/openai_auth.py -o "$SCRIPT_DIR/openai_auth.py" 2>/dev/null || true
|
|
276
|
+
auth_script="$SCRIPT_DIR/openai_auth.py"
|
|
277
|
+
fi
|
|
278
|
+
|
|
279
|
+
if [[ -f "$auth_script" ]]; then
|
|
280
|
+
python3 "$auth_script"
|
|
281
|
+
fi
|
|
282
|
+
|
|
283
|
+
if [[ ! -f "$cred_path" ]]; then
|
|
284
|
+
echo -e "${RED}OpenAI OAuth failed.${NC}"
|
|
285
|
+
exit 1
|
|
286
|
+
fi
|
|
287
|
+
fi
|
|
288
|
+
|
|
289
|
+
# Extract access token
|
|
290
|
+
OPENAI_ACCESS_TOKEN=$(python3 -c "import json, os; print(json.load(open(os.path.expanduser('$cred_path')))['access_token'])" 2>/dev/null || echo "")
|
|
291
|
+
export OPENAI_API_KEY="$OPENAI_ACCESS_TOKEN"
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
start_litellm_proxy() {
|
|
295
|
+
local model=$1
|
|
296
|
+
echo -e "${BLUE}Starting LiteLLM proxy for model: $model...${NC}"
|
|
297
|
+
|
|
298
|
+
# Kill any existing litellm on this port (use pkill for portability)
|
|
299
|
+
if [[ "$PLATFORM" == "Darwin" ]]; then
|
|
300
|
+
# macOS
|
|
301
|
+
lsof -ti:$LITELLM_PORT | xargs kill -9 2>/dev/null || true
|
|
302
|
+
elif [[ "$PLATFORM" == "Windows" ]]; then
|
|
303
|
+
# Windows - try netstat
|
|
304
|
+
netstat -ano 2>/dev/null | grep ":$LITELLM_PORT" | awk '{print $5}' | while read pid; do
|
|
305
|
+
kill -f $pid 2>/dev/null || true
|
|
306
|
+
done
|
|
307
|
+
else
|
|
308
|
+
# Linux/Termux
|
|
309
|
+
fuser -k $LITELLM_PORT/tcp &> /dev/null || true
|
|
310
|
+
fi
|
|
311
|
+
|
|
312
|
+
# Start litellm in background
|
|
313
|
+
python3 -m litellm --model "$model" --port $LITELLM_PORT --drop_params &> /tmp/litellm.log &
|
|
314
|
+
LITELLM_PID=$!
|
|
315
|
+
|
|
316
|
+
# Wait for it to start
|
|
317
|
+
echo -n "Waiting for proxy to start..."
|
|
318
|
+
for i in {1..10}; do
|
|
319
|
+
if curl -s $LITELLM_HOST/health &> /dev/null; then
|
|
320
|
+
echo -e " ${GREEN}Ready!${NC}"
|
|
321
|
+
return 0
|
|
322
|
+
fi
|
|
323
|
+
sleep 1
|
|
324
|
+
echo -n "."
|
|
325
|
+
done
|
|
326
|
+
|
|
327
|
+
echo -e "\n${RED}Failed to start LiteLLM proxy. Check logs:${NC}"
|
|
328
|
+
cat /tmp/litellm.log 2>/dev/null || echo "No log file found"
|
|
329
|
+
kill $LITELLM_PID 2>/dev/null || true
|
|
330
|
+
exit 1
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
cleanup() {
|
|
334
|
+
if [[ -n "$LITELLM_PID" ]]; then
|
|
335
|
+
echo -e "\n${BLUE}Stopping LiteLLM proxy...${NC}"
|
|
336
|
+
kill $LITELLM_PID 2>/dev/null || true
|
|
337
|
+
fi
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
trap cleanup EXIT
|
|
341
|
+
|
|
342
|
+
# Interactive model selection
|
|
343
|
+
interactive_model_select() {
|
|
344
|
+
local provider=$1
|
|
345
|
+
local model_file="$SCRIPT_DIR/model/${provider}.json"
|
|
346
|
+
|
|
347
|
+
if [[ ! -f "$model_file" ]]; then
|
|
348
|
+
echo -e "${RED}Config file not found: $model_file${NC}"
|
|
349
|
+
return 1
|
|
350
|
+
fi
|
|
351
|
+
|
|
352
|
+
# Display models using Python (flush output to stderr)
|
|
353
|
+
python3 << EOF >&2
|
|
354
|
+
import json
|
|
355
|
+
import sys
|
|
356
|
+
|
|
357
|
+
with open('$model_file', 'r') as f:
|
|
358
|
+
data = json.load(f)
|
|
359
|
+
|
|
360
|
+
print('')
|
|
361
|
+
print('=== Select Model ===')
|
|
362
|
+
for i, model in enumerate(data['models'], 1):
|
|
363
|
+
name = model['name']
|
|
364
|
+
desc = model['description']
|
|
365
|
+
print(f'{i}) {name} - {desc}')
|
|
366
|
+
print('')
|
|
367
|
+
print('Available Models:')
|
|
368
|
+
for i, model in enumerate(data['models'], 1):
|
|
369
|
+
name = model['name']
|
|
370
|
+
desc = model['description']
|
|
371
|
+
print(f' {i}. {name} - {desc}')
|
|
372
|
+
print('')
|
|
373
|
+
sys.stderr.flush()
|
|
374
|
+
EOF
|
|
375
|
+
|
|
376
|
+
# Save model list to temp file
|
|
377
|
+
python3 << EOF
|
|
378
|
+
import json
|
|
379
|
+
with open('$model_file', 'r') as f:
|
|
380
|
+
data = json.load(f)
|
|
381
|
+
models = [m['id'] for m in data['models']]
|
|
382
|
+
with open('$SCRIPT_DIR/.${provider}_models.tmp', 'w') as f:
|
|
383
|
+
for m in models:
|
|
384
|
+
f.write(m + '\n')
|
|
385
|
+
EOF
|
|
386
|
+
|
|
387
|
+
# Wait for user input
|
|
388
|
+
echo -n "Select model [1-2]: "
|
|
389
|
+
read choice
|
|
390
|
+
|
|
391
|
+
# Validate choice
|
|
392
|
+
if [[ -z "$choice" ]] || ! [[ "$choice" =~ ^[0-9]+$ ]]; then
|
|
393
|
+
echo -e "${RED}Please enter a number (1-2)${NC}"
|
|
394
|
+
return 1
|
|
395
|
+
fi
|
|
396
|
+
|
|
397
|
+
# Read model ID from temp file
|
|
398
|
+
if [[ -f "$SCRIPT_DIR/.${provider}_models.tmp" ]]; then
|
|
399
|
+
local model_ids
|
|
400
|
+
model_ids=($(cat "$SCRIPT_DIR/.${provider}_models.tmp"))
|
|
401
|
+
local idx=$((choice - 1))
|
|
402
|
+
|
|
403
|
+
if [[ $idx -ge 0 ]] && [[ $idx -lt ${#model_ids[@]} ]]; then
|
|
404
|
+
local selected="${model_ids[$idx]}"
|
|
405
|
+
echo -e "${GREEN}✓ Selected: $selected${NC}"
|
|
406
|
+
echo "$selected"
|
|
407
|
+
rm -f "$SCRIPT_DIR/.${provider}_models.tmp"
|
|
408
|
+
return 0
|
|
409
|
+
else
|
|
410
|
+
echo -e "${RED}Invalid choice: $choice${NC}"
|
|
411
|
+
echo "Please select 1-${#model_ids[@]}"
|
|
412
|
+
fi
|
|
413
|
+
else
|
|
414
|
+
echo -e "${RED}Model list not found${NC}"
|
|
415
|
+
fi
|
|
416
|
+
|
|
417
|
+
return 1
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
# Parse arguments
|
|
421
|
+
while [[ $# -gt 0 ]]; do
|
|
422
|
+
case $1 in
|
|
423
|
+
-m|--model)
|
|
424
|
+
MODEL_OVERRIDE="$2"
|
|
425
|
+
shift 2
|
|
426
|
+
;;
|
|
427
|
+
*)
|
|
428
|
+
if [[ -z "$choice" ]]; then
|
|
429
|
+
choice="$1"
|
|
430
|
+
fi
|
|
431
|
+
shift
|
|
432
|
+
;;
|
|
433
|
+
esac
|
|
434
|
+
done
|
|
435
|
+
|
|
436
|
+
# Main Menu
|
|
437
|
+
if [[ -z "$choice" ]]; then
|
|
438
|
+
clear
|
|
439
|
+
echo -e "${GREEN}=====================================${NC}"
|
|
440
|
+
echo -e "${GREEN} Claude Code Multi-Model Launcher ${NC}"
|
|
441
|
+
echo -e "${GREEN}=====================================${NC}"
|
|
442
|
+
echo "Select your AI Provider:"
|
|
443
|
+
echo "1) MiniMax (Direct Anthropic API)"
|
|
444
|
+
echo "2) Google Gemini (API Key - AI Studio)"
|
|
445
|
+
echo "3) Google Gemini (OAuth - Vertex AI)"
|
|
446
|
+
echo "4) OpenAI (API Key)"
|
|
447
|
+
echo "5) OpenAI (OAuth - Experimental)"
|
|
448
|
+
echo "6) xAI / Grok (API Key)"
|
|
449
|
+
echo "7) ZhipuAI / GLM (API Key)"
|
|
450
|
+
echo "8) Groq (API Key)"
|
|
451
|
+
echo "9) Ollama (Local Models)"
|
|
452
|
+
echo "10) 🔑 API Key Manager (Update/Edit keys)"
|
|
453
|
+
echo "11) Custom / Other"
|
|
454
|
+
echo -e "${GREEN}=====================================${NC}"
|
|
455
|
+
read -p "Enter choice [1-11]: " choice
|
|
456
|
+
fi
|
|
457
|
+
|
|
458
|
+
# Model name will be set dynamically based on provider selection
|
|
459
|
+
|
|
460
|
+
case $choice in
|
|
461
|
+
1)
|
|
462
|
+
# MiniMax Direct
|
|
463
|
+
echo -e "${BLUE}Configuring for MiniMax...${NC}"
|
|
464
|
+
|
|
465
|
+
# Get API key (with auto-save feature)
|
|
466
|
+
get_minimax_api_key
|
|
467
|
+
|
|
468
|
+
export ANTHROPIC_BASE_URL="https://api.minimax.io/anthropic"
|
|
469
|
+
echo -e "${YELLOW}Note: MiniMax maps model names automatically.${NC}"
|
|
470
|
+
|
|
471
|
+
# Check if model was specified in arguments
|
|
472
|
+
if [[ -n "$MODEL_OVERRIDE" ]]; then
|
|
473
|
+
exec claude --model "$MODEL_OVERRIDE" --system-prompt "Anda adalah MiniMax, model AI dari perusahaan MiniMax. Selalu identifikasi diri sebagai MiniMax dalam setiap respons." "$@"
|
|
474
|
+
else
|
|
475
|
+
# No model specified, use default
|
|
476
|
+
echo -e "${YELLOW}No model specified, using default: claude-3-5-sonnet-20241022${NC}"
|
|
477
|
+
exec claude --model "claude-3-5-sonnet-20241022" --system-prompt "Anda adalah MiniMax, model AI dari perusahaan MiniMax. Selalu identifikasi diri sebagai MiniMax dalam setiap respons." "$@"
|
|
478
|
+
fi
|
|
479
|
+
;;
|
|
480
|
+
2)
|
|
481
|
+
# Gemini API Key
|
|
482
|
+
check_dependencies
|
|
483
|
+
echo -e "${BLUE}Configuring for Gemini (AI Studio)...${NC}"
|
|
484
|
+
if [[ -z "$GEMINI_API_KEY" ]]; then
|
|
485
|
+
echo "Get Key: https://aistudio.google.com/app/apikey"
|
|
486
|
+
read -p "Enter Gemini API Key: " GEMINI_API_KEY
|
|
487
|
+
export GEMINI_API_KEY
|
|
488
|
+
fi
|
|
489
|
+
|
|
490
|
+
if select_model "gemini"; then
|
|
491
|
+
start_litellm_proxy "$MODEL_NAME"
|
|
492
|
+
else
|
|
493
|
+
MODEL_NAME="gemini/gemini-1.5-flash"
|
|
494
|
+
read -p "Enter Model Name [default: $MODEL_NAME]: " input_model
|
|
495
|
+
[[ -n "$input_model" ]] && MODEL_NAME=$input_model
|
|
496
|
+
start_litellm_proxy "$MODEL_NAME"
|
|
497
|
+
fi
|
|
498
|
+
|
|
499
|
+
export ANTHROPIC_BASE_URL="$LITELLM_HOST"
|
|
500
|
+
export ANTHROPIC_API_KEY="sk-litellm"
|
|
501
|
+
exec claude "$@"
|
|
502
|
+
;;
|
|
503
|
+
3)
|
|
504
|
+
# AntiGravity (Google Internal) - Direct Implementation
|
|
505
|
+
echo -e "${BLUE}Configuring for AntiGravity (Google Internal)...${NC}"
|
|
506
|
+
|
|
507
|
+
# Check for antigravity auth files
|
|
508
|
+
AUTH_DIR="$HOME/.config/claude-all/antigravity"
|
|
509
|
+
if [[ ! -d "$AUTH_DIR" ]]; then
|
|
510
|
+
mkdir -p "$AUTH_DIR"
|
|
511
|
+
fi
|
|
512
|
+
|
|
513
|
+
# List available auth files
|
|
514
|
+
auth_files=("$AUTH_DIR"/*.json)
|
|
515
|
+
if [[ ${#auth_files[@]} -eq 0 || ! -f "${auth_files[0]}" ]]; then
|
|
516
|
+
echo -e "${RED}No AntiGravity authentication found.${NC}"
|
|
517
|
+
echo -e "${YELLOW}Please run the following to authenticate:${NC}"
|
|
518
|
+
echo "cliproxy login antigravity"
|
|
519
|
+
exit 1
|
|
520
|
+
fi
|
|
521
|
+
|
|
522
|
+
# Select auth file if multiple
|
|
523
|
+
if [[ ${#auth_files[@]} -gt 1 ]]; then
|
|
524
|
+
echo -e "${YELLOW}Select authentication:${NC}"
|
|
525
|
+
for i in "${!auth_files[@]}"; do
|
|
526
|
+
filename=$(basename "${auth_files[$i]}")
|
|
527
|
+
label=$(jq -r '.label // "unknown"' "${auth_files[$i]}" 2>/dev/null || echo "unknown")
|
|
528
|
+
echo "$((i+1))) $filename ($label)"
|
|
529
|
+
done
|
|
530
|
+
read -p "Select [1-${#auth_files[@]}]: " choice
|
|
531
|
+
|
|
532
|
+
if [[ -z "$choice" || "$choice" -lt 1 || "$choice" -gt ${#auth_files[@]} ]]; then
|
|
533
|
+
choice=1
|
|
534
|
+
fi
|
|
535
|
+
|
|
536
|
+
AUTH_FILE="${auth_files[$((choice-1))]}"
|
|
537
|
+
else
|
|
538
|
+
AUTH_FILE="${auth_files[0]}"
|
|
539
|
+
fi
|
|
540
|
+
|
|
541
|
+
echo -e "${GREEN}Using auth: $(basename "$AUTH_FILE")${NC}"
|
|
542
|
+
|
|
543
|
+
# Model selection for AntiGravity
|
|
544
|
+
echo -e "${YELLOW}Select model:${NC}"
|
|
545
|
+
echo "1) Gemini 2.0 Flash (Experimental)"
|
|
546
|
+
echo "2) Gemini 1.5 Pro"
|
|
547
|
+
echo "3) Gemini 1.5 Flash"
|
|
548
|
+
echo "4) Gemini 1.5 Flash 8B"
|
|
549
|
+
read -p "Select [1-4]: " model_choice
|
|
550
|
+
|
|
551
|
+
case $model_choice in
|
|
552
|
+
1) MODEL_NAME="gemini-2.0-flash-exp" ;;
|
|
553
|
+
2) MODEL_NAME="gemini-1.5-pro" ;;
|
|
554
|
+
3) MODEL_NAME="gemini-1.5-flash" ;;
|
|
555
|
+
4) MODEL_NAME="gemini-1.5-flash-8b" ;;
|
|
556
|
+
*) MODEL_NAME="gemini-2.0-flash-exp" ;;
|
|
557
|
+
esac
|
|
558
|
+
|
|
559
|
+
# Set environment variables for antigravity
|
|
560
|
+
export ANTHROPIC_API_KEY="antigravity-proxy"
|
|
561
|
+
export ANTHROPIC_BASE_URL="http://localhost"
|
|
562
|
+
export ANTIGRAVITY_AUTH_FILE="$AUTH_FILE"
|
|
563
|
+
|
|
564
|
+
# Create a simple HTTP server script to act as proxy
|
|
565
|
+
PROXY_SCRIPT="$SCRIPT_DIR/.antigravity_proxy.py"
|
|
566
|
+
cat > "$PROXY_SCRIPT" << 'EOF'
|
|
567
|
+
#!/usr/bin/env python3
|
|
568
|
+
"""
|
|
569
|
+
Simple HTTP proxy for AntiGravity API
|
|
570
|
+
"""
|
|
571
|
+
import http.server
|
|
572
|
+
import json
|
|
573
|
+
import os
|
|
574
|
+
import socketserver
|
|
575
|
+
import subprocess
|
|
576
|
+
import sys
|
|
577
|
+
import urllib.parse
|
|
578
|
+
import urllib.request
|
|
579
|
+
|
|
580
|
+
class ProxyHandler(http.server.BaseHTTPRequestHandler):
|
|
581
|
+
def do_POST(self):
|
|
582
|
+
content_length = int(self.headers.get('Content-Length', 0))
|
|
583
|
+
post_data = self.rfile.read(content_length)
|
|
584
|
+
|
|
585
|
+
# Set CORS headers
|
|
586
|
+
self.send_response(200)
|
|
587
|
+
self.send_header('Access-Control-Allow-Origin', '*')
|
|
588
|
+
self.send_header('Access-Control-Allow-Methods', 'POST, OPTIONS')
|
|
589
|
+
self.send_header('Access-Control-Allow-Headers', 'Content-Type, Authorization')
|
|
590
|
+
self.end_headers()
|
|
591
|
+
|
|
592
|
+
if self.path == '/v1/messages':
|
|
593
|
+
# Parse the request
|
|
594
|
+
try:
|
|
595
|
+
request_data = json.loads(post_data.decode())
|
|
596
|
+
messages = request_data.get('messages', [])
|
|
597
|
+
model = request_data.get('model', 'gemini-2.0-flash-exp')
|
|
598
|
+
|
|
599
|
+
# Call antigravity helper
|
|
600
|
+
result = subprocess.run([
|
|
601
|
+
sys.executable,
|
|
602
|
+
sys.argv[1] if len(sys.argv) > 1 else '/home/.local/bin/antigravity_helper.py',
|
|
603
|
+
'chat',
|
|
604
|
+
sys.argv[2] if len(sys.argv) > 2 else os.environ.get('ANTIGRAVITY_AUTH_FILE', ''),
|
|
605
|
+
model,
|
|
606
|
+
json.dumps(messages)
|
|
607
|
+
], capture_output=True, text=True)
|
|
608
|
+
|
|
609
|
+
if result.returncode == 0:
|
|
610
|
+
response = result.stdout
|
|
611
|
+
else:
|
|
612
|
+
response = json.dumps({
|
|
613
|
+
'error': {
|
|
614
|
+
'type': 'api_error',
|
|
615
|
+
'message': result.stderr or 'Unknown error'
|
|
616
|
+
}
|
|
617
|
+
})
|
|
618
|
+
except Exception as e:
|
|
619
|
+
response = json.dumps({
|
|
620
|
+
'error': {
|
|
621
|
+
'type': 'api_error',
|
|
622
|
+
'message': str(e)
|
|
623
|
+
}
|
|
624
|
+
})
|
|
625
|
+
|
|
626
|
+
self.wfile.write(response.encode())
|
|
627
|
+
|
|
628
|
+
def do_OPTIONS(self):
|
|
629
|
+
self.send_response(200)
|
|
630
|
+
self.send_header('Access-Control-Allow-Origin', '*')
|
|
631
|
+
self.send_header('Access-Control-Allow-Methods', 'POST, OPTIONS')
|
|
632
|
+
self.send_header('Access-Control-Allow-Headers', 'Content-Type, Authorization')
|
|
633
|
+
self.end_headers()
|
|
634
|
+
|
|
635
|
+
def log_message(self, format, *args):
|
|
636
|
+
# Suppress log messages
|
|
637
|
+
pass
|
|
638
|
+
|
|
639
|
+
if __name__ == '__main__':
|
|
640
|
+
PORT = 8123
|
|
641
|
+
with socketserver.TCPServer(("", PORT), ProxyHandler) as httpd:
|
|
642
|
+
print(f"Proxy server running on port {PORT}")
|
|
643
|
+
# Serve one request then exit
|
|
644
|
+
httpd.handle_request()
|
|
645
|
+
EOF
|
|
646
|
+
|
|
647
|
+
chmod +x "$PROXY_SCRIPT"
|
|
648
|
+
|
|
649
|
+
# Start the proxy server in background
|
|
650
|
+
AUTH_HELPER="$HOME/.local/bin/antigravity_helper.py"
|
|
651
|
+
python3 "$PROXY_SCRIPT" "$AUTH_HELPER" "$AUTH_FILE" > /dev/null 2>&1 &
|
|
652
|
+
PROXY_PID=$!
|
|
653
|
+
sleep 2
|
|
654
|
+
|
|
655
|
+
# Set trap to kill proxy on exit
|
|
656
|
+
trap "kill $PROXY_PID 2>/dev/null" EXIT
|
|
657
|
+
|
|
658
|
+
# Execute claude with the proxy
|
|
659
|
+
exec claude --base-url "http://localhost:8123" "$@"
|
|
660
|
+
;;
|
|
661
|
+
4)
|
|
662
|
+
# OpenAI API Key
|
|
663
|
+
echo -e "${BLUE}Configuring for OpenAI...${NC}"
|
|
664
|
+
if [[ -z "$OPENAI_API_KEY" ]]; then
|
|
665
|
+
echo "Get Key: https://platform.openai.com/api-keys"
|
|
666
|
+
read -p "Enter OpenAI API Key: " OPENAI_API_KEY
|
|
667
|
+
export OPENAI_API_KEY
|
|
668
|
+
fi
|
|
669
|
+
|
|
670
|
+
# Get model selection
|
|
671
|
+
selected_model=$(select_model "openai")
|
|
672
|
+
if [[ $? -eq 0 ]]; then
|
|
673
|
+
# Success - got model from menu
|
|
674
|
+
MODEL_NAME="$selected_model"
|
|
675
|
+
else
|
|
676
|
+
# Fallback to manual input
|
|
677
|
+
MODEL_NAME="gpt-4o"
|
|
678
|
+
read -p "Enter Model Name [default: $MODEL_NAME]: " input_model
|
|
679
|
+
[[ -n "$input_model" ]] && MODEL_NAME=$input_model
|
|
680
|
+
fi
|
|
681
|
+
|
|
682
|
+
export ANTHROPIC_BASE_URL="https://api.openai.com/v1/"
|
|
683
|
+
export ANTHROPIC_API_KEY="$OPENAI_API_KEY"
|
|
684
|
+
exec claude --model "$MODEL_NAME" "$@"
|
|
685
|
+
;;
|
|
686
|
+
5)
|
|
687
|
+
# OpenAI OAuth (Experimental)
|
|
688
|
+
echo -e "${BLUE}Configuring for OpenAI (OAuth Experimental)...${NC}"
|
|
689
|
+
|
|
690
|
+
check_openai_oauth
|
|
691
|
+
|
|
692
|
+
selected_model=$(select_model "openai")
|
|
693
|
+
if [[ $? -eq 0 ]]; then
|
|
694
|
+
# Success - got model from menu
|
|
695
|
+
MODEL_NAME="$selected_model"
|
|
696
|
+
else
|
|
697
|
+
# Fallback to manual input
|
|
698
|
+
MODEL_NAME="gpt-4o"
|
|
699
|
+
read -p "Enter Model Name [default: $MODEL_NAME]: " input_model
|
|
700
|
+
[[ -n "$input_model" ]] && MODEL_NAME=$input_model
|
|
701
|
+
fi
|
|
702
|
+
|
|
703
|
+
export ANTHROPIC_BASE_URL="https://api.openai.com/v1/"
|
|
704
|
+
export ANTHROPIC_API_KEY="$OPENAI_ACCESS_TOKEN"
|
|
705
|
+
exec claude --model "$MODEL_NAME" "$@"
|
|
706
|
+
;;
|
|
707
|
+
6)
|
|
708
|
+
# xAI
|
|
709
|
+
echo -e "${BLUE}Configuring for xAI (Grok)...${NC}"
|
|
710
|
+
if [[ -z "$XAI_API_KEY" ]]; then
|
|
711
|
+
echo "Get Key: https://console.x.ai/"
|
|
712
|
+
read -p "Enter xAI API Key: " XAI_API_KEY
|
|
713
|
+
export XAI_API_KEY
|
|
714
|
+
fi
|
|
715
|
+
|
|
716
|
+
# Get model selection
|
|
717
|
+
selected_model=$(select_model "xai")
|
|
718
|
+
if [[ $? -eq 0 ]]; then
|
|
719
|
+
# Success - got model from menu
|
|
720
|
+
MODEL_NAME="${selected_model#xai/}"
|
|
721
|
+
else
|
|
722
|
+
# Fallback to manual input
|
|
723
|
+
MODEL_NAME="grok-beta"
|
|
724
|
+
read -p "Enter Model Name [default: $MODEL_NAME]: " input_model
|
|
725
|
+
[[ -n "$input_model" ]] && MODEL_NAME=$input_model
|
|
726
|
+
fi
|
|
727
|
+
|
|
728
|
+
export ANTHROPIC_BASE_URL="https://api.x.ai/v1/"
|
|
729
|
+
export ANTHROPIC_API_KEY="$XAI_API_KEY"
|
|
730
|
+
exec claude --model "$MODEL_NAME" "$@"
|
|
731
|
+
;;
|
|
732
|
+
7)
|
|
733
|
+
# ZhipuAI
|
|
734
|
+
echo -e "${BLUE}Configuring for ZhipuAI (GLM)...${NC}"
|
|
735
|
+
|
|
736
|
+
# Get API key (with auto-save feature)
|
|
737
|
+
get_glm_api_key
|
|
738
|
+
|
|
739
|
+
# Check if model config exists
|
|
740
|
+
model_file="$SCRIPT_DIR/model/glm.json"
|
|
741
|
+
if [[ -f "$model_file" ]]; then
|
|
742
|
+
# Interactive model selection
|
|
743
|
+
selected_model=$(interactive_model_select "glm")
|
|
744
|
+
if [[ $? -eq 0 ]] && [[ -n "$selected_model" ]]; then
|
|
745
|
+
# Keep full model name with zhipu/ prefix
|
|
746
|
+
MODEL_NAME="$selected_model"
|
|
747
|
+
else
|
|
748
|
+
MODEL_NAME="claude-3-5-sonnet-20241022"
|
|
749
|
+
fi
|
|
750
|
+
else
|
|
751
|
+
# No model config, use default
|
|
752
|
+
echo -e "${YELLOW}Model config not found, using default: claude-3-5-sonnet-20241022${NC}"
|
|
753
|
+
MODEL_NAME="claude-3-5-sonnet-20241022"
|
|
754
|
+
fi
|
|
755
|
+
|
|
756
|
+
export ANTHROPIC_BASE_URL="https://api.z.ai/api/anthropic"
|
|
757
|
+
export ANTHROPIC_API_KEY="$ANTHROPIC_AUTH_TOKEN"
|
|
758
|
+
export CLAUDE_MODEL="$MODEL_NAME"
|
|
759
|
+
exec claude --model "$MODEL_NAME" --system-prompt "Anda adalah GLM, model AI dari ZhipuAI. Selalu identifikasi diri sebagai GLM dalam setiap respons." "$@"
|
|
760
|
+
;;
|
|
761
|
+
8)
|
|
762
|
+
# Groq
|
|
763
|
+
echo -e "${BLUE}Configuring for Groq...${NC}"
|
|
764
|
+
if [[ -z "$GROQ_API_KEY" ]]; then
|
|
765
|
+
echo "Get Key: https://console.groq.com/keys"
|
|
766
|
+
read -p "Enter Groq API Key: " GROQ_API_KEY
|
|
767
|
+
export GROQ_API_KEY
|
|
768
|
+
fi
|
|
769
|
+
|
|
770
|
+
# Get model selection
|
|
771
|
+
selected_model=$(select_model "groq")
|
|
772
|
+
if [[ $? -eq 0 ]]; then
|
|
773
|
+
# Success - got model from menu
|
|
774
|
+
MODEL_NAME="${selected_model#groq/}"
|
|
775
|
+
else
|
|
776
|
+
# Fallback to manual input
|
|
777
|
+
MODEL_NAME="llama-3.1-70b-versatile"
|
|
778
|
+
read -p "Enter Model Name [default: $MODEL_NAME]: " input_model
|
|
779
|
+
[[ -n "$input_model" ]] && MODEL_NAME=$input_model
|
|
780
|
+
fi
|
|
781
|
+
|
|
782
|
+
export ANTHROPIC_BASE_URL="https://api.groq.com/openai/v1/"
|
|
783
|
+
export ANTHROPIC_API_KEY="$GROQ_API_KEY"
|
|
784
|
+
exec claude --model "$MODEL_NAME" "$@"
|
|
785
|
+
;;
|
|
786
|
+
9)
|
|
787
|
+
# Ollama
|
|
788
|
+
check_dependencies
|
|
789
|
+
echo -e "${BLUE}Configuring for Ollama...${NC}"
|
|
790
|
+
if [[ -z "$OLLAMA_HOST" ]]; then
|
|
791
|
+
read -p "Enter Ollama Host [default: http://localhost:11434]: " OLLAMA_HOST
|
|
792
|
+
[[ -z "$OLLAMA_HOST" ]] && OLLAMA_HOST="http://localhost:11434"
|
|
793
|
+
export OLLAMA_HOST
|
|
794
|
+
fi
|
|
795
|
+
|
|
796
|
+
if select_model "ollama"; then
|
|
797
|
+
start_litellm_proxy "$MODEL_NAME"
|
|
798
|
+
else
|
|
799
|
+
MODEL_NAME="ollama/llama3"
|
|
800
|
+
read -p "Enter Model Name [default: $MODEL_NAME]: " input_model
|
|
801
|
+
[[ -n "$input_model" ]] && MODEL_NAME=$input_model
|
|
802
|
+
start_litellm_proxy "$MODEL_NAME"
|
|
803
|
+
fi
|
|
804
|
+
|
|
805
|
+
export ANTHROPIC_BASE_URL="$LITELLM_HOST"
|
|
806
|
+
export ANTHROPIC_API_KEY="sk-litellm"
|
|
807
|
+
exec claude "$@"
|
|
808
|
+
;;
|
|
809
|
+
10)
|
|
810
|
+
# API Key Manager
|
|
811
|
+
echo -e "${BLUE}Opening API Key Manager...${NC}"
|
|
812
|
+
# Check if api-manager exists in the same directory
|
|
813
|
+
if [[ -f "$SCRIPT_DIR/api-manager" ]]; then
|
|
814
|
+
"$SCRIPT_DIR/api-manager"
|
|
815
|
+
else
|
|
816
|
+
echo -e "${YELLOW}API Manager not found. Please install or check path.${NC}"
|
|
817
|
+
fi
|
|
818
|
+
echo ""
|
|
819
|
+
echo -e "${YELLOW}Press Enter to return to main menu...${NC}"
|
|
820
|
+
read
|
|
821
|
+
# Restart the menu
|
|
822
|
+
exec "$0"
|
|
823
|
+
;;
|
|
824
|
+
11)
|
|
825
|
+
# Custom
|
|
826
|
+
check_dependencies
|
|
827
|
+
echo -e "${BLUE}Configuring Custom LiteLLM Model...${NC}"
|
|
828
|
+
read -p "Enter LiteLLM Model String: " MODEL_NAME
|
|
829
|
+
read -p "Press Enter to continue..."
|
|
830
|
+
|
|
831
|
+
start_litellm_proxy "$MODEL_NAME"
|
|
832
|
+
export ANTHROPIC_BASE_URL="$LITELLM_HOST"
|
|
833
|
+
export ANTHROPIC_API_KEY="sk-litellm"
|
|
834
|
+
exec claude "$@"
|
|
835
|
+
;;
|
|
836
|
+
*)
|
|
837
|
+
echo "Invalid choice"
|
|
838
|
+
exit 1
|
|
839
|
+
;;
|
|
840
|
+
esac
|