claude-all-config 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. package/LICENSE +21 -0
  2. package/LICENSE.md +70 -0
  3. package/README.md +133 -0
  4. package/VERSION +1 -0
  5. package/agents/accessibility-reviewer.md +96 -0
  6. package/agents/ai-prompt-optimizer.md +94 -0
  7. package/agents/api-tester.md +102 -0
  8. package/agents/code-generator.md +94 -0
  9. package/agents/code-reviewer.md +47 -0
  10. package/agents/component-generator.md +102 -0
  11. package/agents/doc-generator.md +91 -0
  12. package/agents/migration-generator.md +94 -0
  13. package/agents/performance-analyzer.md +90 -0
  14. package/agents/proactive-mode.md +91 -0
  15. package/agents/readme-generator.md +101 -0
  16. package/agents/security-auditor.md +86 -0
  17. package/agents/terraform-generator.md +94 -0
  18. package/agents/test-generator.md +76 -0
  19. package/bin/agentrouter.json +36 -0
  20. package/bin/ai-chat +20 -0
  21. package/bin/antigravity.json +76 -0
  22. package/bin/api-manager +340 -0
  23. package/bin/claude-launcher +19 -0
  24. package/bin/claude-master +15 -0
  25. package/bin/claude_master.py +295 -0
  26. package/bin/cohere.json +7 -0
  27. package/bin/deepseek.json +44 -0
  28. package/bin/gemini.json +56 -0
  29. package/bin/glm.json +21 -0
  30. package/bin/groq.json +41 -0
  31. package/bin/minimax.json +26 -0
  32. package/bin/mistral.json +7 -0
  33. package/bin/moonshot.json +7 -0
  34. package/bin/ollama.json +36 -0
  35. package/bin/openai.json +46 -0
  36. package/bin/openrouter.json +38 -0
  37. package/bin/perplexity.json +12 -0
  38. package/bin/qwen.json +7 -0
  39. package/bin/switch-provider +73 -0
  40. package/bin/test.json +7 -0
  41. package/bin/xai.json +41 -0
  42. package/claude-all +2707 -0
  43. package/claude-config.json +340 -0
  44. package/claude-suite/REFACTORING_SUMMARY.md +88 -0
  45. package/claude-suite/auth/.antigravity_proxy.py +78 -0
  46. package/claude-suite/auth/__pycache__/openai_auth.cpython-312.pyc +0 -0
  47. package/claude-suite/auth/gemini_auth.py +80 -0
  48. package/claude-suite/auth/openai_auth.py +138 -0
  49. package/claude-suite/backups/claude-all-before-refactor +1075 -0
  50. package/claude-suite/backups/claude-all.backup +840 -0
  51. package/claude-suite/backups/claude-all.original +840 -0
  52. package/claude-suite/models/add-model-manual.sh +588 -0
  53. package/claude-suite/models/add-model.sh +114 -0
  54. package/claude-suite/models/model-switcher.sh +69 -0
  55. package/claude-suite/providers/claude-glm +89 -0
  56. package/claude-suite/providers/claude-glm-wrapper.sh +55 -0
  57. package/claude-suite/providers/claude-minimax +12 -0
  58. package/claude-suite/providers/claude-smart +132 -0
  59. package/claude-suite/providers/xai_chat.sh +56 -0
  60. package/claude-suite/utils/__pycache__/claude_master.cpython-312.pyc +0 -0
  61. package/claude-suite/utils/antigravity_proxy_server.py +168 -0
  62. package/claude-suite/utils/claude-all-help.txt +83 -0
  63. package/claude-suite/utils/claude_master.py +408 -0
  64. package/commands/brainstorm.md +5 -0
  65. package/commands/execute-plan.md +5 -0
  66. package/commands/write-plan.md +5 -0
  67. package/docs/ANTIGRAVITY-SETUP.md +176 -0
  68. package/docs/AUTH_CREDENTIALS.md +54 -0
  69. package/docs/NPM-INSTALLATION.md +166 -0
  70. package/hooks/hooks.json +15 -0
  71. package/hooks/run-hook.cmd +19 -0
  72. package/hooks/session-start.sh +52 -0
  73. package/install.sh +155 -0
  74. package/mcp.json +34 -0
  75. package/model/perplexity.json +12 -0
  76. package/package.json +69 -0
  77. package/plugins/README.md +47 -0
  78. package/plugins/installed_plugins.json +317 -0
  79. package/plugins/known_marketplaces.json +10 -0
  80. package/plugins/marketplace-info/marketplace.json +517 -0
  81. package/postinstall.js +100 -0
  82. package/scripts/antigravity_proxy_server.py +168 -0
  83. package/scripts/get_gemini_api_key.py +96 -0
  84. package/scripts/setup_antigravity_auth.py +171 -0
  85. package/skills/api-development/SKILL.md +11 -0
  86. package/skills/api-development/openapi/api-documentation.yaml +108 -0
  87. package/skills/brainstorming/SKILL.md +54 -0
  88. package/skills/code-quality/SKILL.md +196 -0
  89. package/skills/condition-based-waiting/SKILL.md +120 -0
  90. package/skills/condition-based-waiting/example.ts +158 -0
  91. package/skills/database-development/SKILL.md +11 -0
  92. package/skills/database-development/migrations/migration.template.sql +49 -0
  93. package/skills/defense-in-depth/SKILL.md +127 -0
  94. package/skills/deployment/SKILL.md +11 -0
  95. package/skills/deployment/ci-cd/github-actions.yml +95 -0
  96. package/skills/deployment/docker/Dockerfile.template +39 -0
  97. package/skills/dispatching-parallel-agents/SKILL.md +180 -0
  98. package/skills/documentation-generation/SKILL.md +8 -0
  99. package/skills/documentation-generation/templates/README.template.md +60 -0
  100. package/skills/error-handling/SKILL.md +267 -0
  101. package/skills/executing-plans/SKILL.md +76 -0
  102. package/skills/finishing-a-development-branch/SKILL.md +200 -0
  103. package/skills/frontend-design/frontend-design/SKILL.md +42 -0
  104. package/skills/integration-testing/SKILL.md +13 -0
  105. package/skills/integration-testing/examples/contract-test.py +317 -0
  106. package/skills/integration-testing/examples/e2e-test.js +147 -0
  107. package/skills/integration-testing/examples/test-isolation.md +94 -0
  108. package/skills/logging-monitoring/SKILL.md +66 -0
  109. package/skills/mobile-development/SKILL.md +11 -0
  110. package/skills/mobile-development/responsive/responsive.css +80 -0
  111. package/skills/performance-optimization/SKILL.md +9 -0
  112. package/skills/performance-optimization/profiling/profile.template.js +21 -0
  113. package/skills/receiving-code-review/SKILL.md +209 -0
  114. package/skills/refactoring/SKILL.md +11 -0
  115. package/skills/refactoring/code-smells/common-smells.md +115 -0
  116. package/skills/requesting-code-review/SKILL.md +105 -0
  117. package/skills/requesting-code-review/code-reviewer.md +146 -0
  118. package/skills/root-cause-tracing/SKILL.md +174 -0
  119. package/skills/root-cause-tracing/find-polluter.sh +63 -0
  120. package/skills/security-review/SKILL.md +11 -0
  121. package/skills/security-review/checklists/owasp-checklist.md +31 -0
  122. package/skills/sharing-skills/SKILL.md +194 -0
  123. package/skills/subagent-driven-development/SKILL.md +240 -0
  124. package/skills/subagent-driven-development/code-quality-reviewer-prompt.md +20 -0
  125. package/skills/subagent-driven-development/implementer-prompt.md +78 -0
  126. package/skills/subagent-driven-development/spec-reviewer-prompt.md +61 -0
  127. package/skills/systematic-debugging/CREATION-LOG.md +119 -0
  128. package/skills/systematic-debugging/SKILL.md +295 -0
  129. package/skills/systematic-debugging/test-academic.md +14 -0
  130. package/skills/systematic-debugging/test-pressure-1.md +58 -0
  131. package/skills/systematic-debugging/test-pressure-2.md +68 -0
  132. package/skills/systematic-debugging/test-pressure-3.md +69 -0
  133. package/skills/test-driven-development/SKILL.md +364 -0
  134. package/skills/testing-anti-patterns/SKILL.md +302 -0
  135. package/skills/testing-skills-with-subagents/SKILL.md +387 -0
  136. package/skills/testing-skills-with-subagents/examples/CLAUDE_MD_TESTING.md +189 -0
  137. package/skills/ui-ux-review/SKILL.md +13 -0
  138. package/skills/ui-ux-review/checklists/ux-heuristics.md +61 -0
  139. package/skills/using-git-worktrees/SKILL.md +213 -0
  140. package/skills/using-superpowers/SKILL.md +101 -0
  141. package/skills/verification-before-completion/SKILL.md +139 -0
  142. package/skills/writing-plans/SKILL.md +116 -0
  143. package/skills/writing-skills/SKILL.md +622 -0
  144. package/skills/writing-skills/anthropic-best-practices.md +1150 -0
  145. package/skills/writing-skills/graphviz-conventions.dot +172 -0
  146. package/skills/writing-skills/persuasion-principles.md +187 -0
  147. package/update.sh +36 -0
  148. package/utils/check-superpowers.sh +114 -0
  149. package/utils/claude-branding.md +166 -0
  150. package/utils/config.js +185 -0
  151. package/utils/custom-claude-config.sh +89 -0
  152. package/utils/custom-claude-hooks.md +129 -0
  153. package/utils/custom-claude-lib.js +222 -0
  154. package/utils/customize-claude-ui.sh +162 -0
  155. package/utils/fix-claude-integration.sh +133 -0
  156. package/utils/help.js +125 -0
  157. package/utils/install-curl.ps1 +135 -0
  158. package/utils/install-curl.sh +525 -0
  159. package/utils/install-superpowers.js +411 -0
  160. package/utils/install.js +298 -0
  161. package/utils/install.sh +182 -0
  162. package/utils/postinstall.js +63 -0
  163. package/utils/rename-claude.sh +96 -0
  164. package/utils/uninstall-superpowers.js +273 -0
  165. package/utils/uninstall.ps1 +136 -0
  166. package/utils/uninstall.sh +163 -0
  167. package/utils/update.sh +160 -0
@@ -0,0 +1,69 @@
1
+ #!/bin/bash
2
+
3
+ # Model switcher for chat mode
4
+ # Usage: pipe stdin to this script
5
+
6
+ SCRIPT_DIR="$(dirname "$(realpath "$0")")"
7
+ MODEL_FILE="$SCRIPT_DIR/model/glm.json"
8
+
9
+ # Read stdin line by line
10
+ while IFS= read -r line; do
11
+ # Check if line starts with /model
12
+ if [[ "$line" =~ ^/model ]]; then
13
+ # Extract model name if provided
14
+ if [[ "$line" =~ ^/model[[:space:]]+([a-zA-Z0-9._-]+) ]]; then
15
+ new_model="${BASH_REMATCH[1]}"
16
+ # Prepend zhipu/ if not present
17
+ if [[ ! "$new_model" == zhipu/* ]]; then
18
+ new_model="zhipu/$new_model"
19
+ fi
20
+ echo -e "${YELLOW}Switching to model: $new_model${NC}" >&2
21
+ # Output the model switch command
22
+ echo "/model_switch:$new_model"
23
+ else
24
+ # Show model selection menu
25
+ echo "" >&2
26
+ echo "=== Select Model ===" >&2
27
+ python3 -c "
28
+ import json
29
+
30
+ try:
31
+ with open('$MODEL_FILE', 'r') as f:
32
+ data = json.load(f)
33
+ for i, model in enumerate(data['models'], 1):
34
+ print(f'{i}) {model[\"name\"]} - {model[\"description\"]}')
35
+ print('')
36
+ except Exception as e:
37
+ print(f'Error: {e}')
38
+ " >&2
39
+
40
+ echo -n "Select model [1-9]: " >&2
41
+ read -t 5 choice 2>/dev/null
42
+
43
+ if [ -n "$choice" ] && [[ "$choice" =~ ^[0-9]+$ ]]; then
44
+ model_id=$(python3 -c "
45
+ import json
46
+ with open('$MODEL_FILE', 'r') as f:
47
+ data = json.load(f)
48
+ try:
49
+ idx = int('$choice') - 1
50
+ if 0 <= idx < len(data['models']):
51
+ print(data['models'][idx]['id'])
52
+ except:
53
+ pass
54
+ ")
55
+ if [ -n "$model_id" ]; then
56
+ echo -e "${GREEN}✓ Selected: $model_id${NC}" >&2
57
+ echo "/model_switch:$model_id"
58
+ else
59
+ echo -e "${RED}Invalid selection${NC}" >&2
60
+ fi
61
+ else
62
+ echo -e "${YELLOW}No selection, keeping current model${NC}" >&2
63
+ fi
64
+ fi
65
+ else
66
+ # Regular line, output as is
67
+ echo "$line"
68
+ fi
69
+ done
@@ -0,0 +1,89 @@
1
+ #!/usr/bin/env bash
2
+
3
+ # Direct GLM chat launcher with model switching support
4
+ # Cross-platform: Linux, Termux, macOS, Windows (Git Bash/WSL)
5
+ # Usage: claude-glm [model_name]
6
+
7
+ # Platform detection
8
+ detect_platform() {
9
+ case "$(uname -s)" in
10
+ Linux*) echo "Linux";;
11
+ Darwin*) echo "macOS";;
12
+ CYGWIN*|MINGW*|MSYS*) echo "Windows";;
13
+ *) echo "Linux";;
14
+ esac
15
+ }
16
+
17
+ PLATFORM=$(detect_platform)
18
+
19
+ # Colors - Support both Linux/macOS and Windows
20
+ if [[ "$PLATFORM" == "Windows" ]]; then
21
+ if command -v tput &> /dev/null; then
22
+ GREEN=$(tput setaf 2 2>/dev/null || echo "")
23
+ BLUE=$(tput setaf 4 2>/dev/null || echo "")
24
+ RED=$(tput setaf 1 2>/dev/null || echo "")
25
+ YELLOW=$(tput setaf 3 2>/dev/null || echo "")
26
+ NC=$(tput sgr0 2>/dev/null || echo "")
27
+ else
28
+ GREEN=''
29
+ BLUE=''
30
+ RED=''
31
+ YELLOW=''
32
+ NC=''
33
+ fi
34
+ else
35
+ GREEN='\033[0;32m'
36
+ BLUE='\033[0;34m'
37
+ RED='\033[0;31m'
38
+ YELLOW='\033[1;33m'
39
+ NC='\033[0m'
40
+ fi
41
+
42
+ # Portable home directory
43
+ if [[ -n "$HOME" ]]; then
44
+ USER_HOME="$HOME"
45
+ elif [[ -n "$USERPROFILE" ]]; then
46
+ USER_HOME="$USERPROFILE"
47
+ else
48
+ USER_HOME="$HOME"
49
+ fi
50
+
51
+ GLM_API_KEY_FILE="$USER_HOME/.glm_api_key"
52
+
53
+ # Get API key
54
+ if [[ -f "$GLM_API_KEY_FILE" ]]; then
55
+ API_KEY=$(cat "$GLM_API_KEY_FILE" 2>/dev/null)
56
+ else
57
+ echo -e "${RED}No API key found. Run 'claude-all 7' first.${NC}"
58
+ exit 1
59
+ fi
60
+
61
+ # Parse arguments
62
+ MODEL_NAME="$1"
63
+ if [[ -z "$MODEL_NAME" ]]; then
64
+ # No model specified, show menu
65
+ echo -e "${BLUE}=== GLM Model Selection ===${NC}"
66
+ echo ""
67
+ echo "1) GLM-4.7 (Latest flagship - Smartest)"
68
+ echo "2) GLM-4.6 (Previous flagship)"
69
+ echo "3) GLM-4.5 Air (Fast variant - Quick)"
70
+ echo ""
71
+ read -p "Select model [1-3]: " choice
72
+
73
+ case $choice in
74
+ 1) MODEL_NAME="glm-4.7" ;;
75
+ 2) MODEL_NAME="glm-4.6" ;;
76
+ 3) MODEL_NAME="glm-4.5-air" ;;
77
+ *) MODEL_NAME="glm-4.7" ;;
78
+ esac
79
+ fi
80
+
81
+ # Start chat with selected model
82
+ echo -e "${GREEN}✓ Starting chat with $MODEL_NAME${NC}"
83
+ echo ""
84
+
85
+ export ANTHROPIC_BASE_URL="https://api.z.ai/api/anthropic"
86
+ export ANTHROPIC_API_KEY="$API_KEY"
87
+ export ANTHROPIC_AUTH_TOKEN="$API_KEY"
88
+
89
+ exec claude --model "$MODEL_NAME" "${@:2}"
@@ -0,0 +1,55 @@
1
+ #!/bin/bash
2
+
3
+ # Wrapper for claude CLI with GLM model support
4
+ # This enables /model command to work with GLM
5
+
6
+ GLM_API_KEY_FILE="$HOME/.glm_api_key"
7
+ SCRIPT_DIR="$(dirname "$(realpath "$0")")"
8
+ GREEN='\033[0;32m'
9
+ BLUE='\033[0;34m'
10
+ RED='\033[0;31m'
11
+ NC='\033[0m'
12
+
13
+ # Function to load API key with error handling
14
+ load_api_key() {
15
+ local key_file="$1"
16
+ local provider_name="$2"
17
+
18
+ if [ ! -f "$key_file" ]; then
19
+ echo -e "${RED}Error: No $provider_name API key found${NC}" >&2
20
+ echo "Please run 'claude-all 7' first to set up your API key." >&2
21
+ return 1
22
+ fi
23
+
24
+ local api_key
25
+ api_key=$(cat "$key_file" 2>/dev/null)
26
+ if [ -z "$api_key" ]; then
27
+ echo -e "${RED}Error: $provider_name API key file is empty${NC}" >&2
28
+ echo "Please run 'claude-all 7' again to set up your API key." >&2
29
+ return 1
30
+ fi
31
+
32
+ echo "$api_key"
33
+ return 0
34
+ }
35
+
36
+ # Get API key with proper error handling
37
+ API_KEY=$(load_api_key "$GLM_API_KEY_FILE" "GLM")
38
+ if [ $? -ne 0 ]; then
39
+ exit 1
40
+ fi
41
+
42
+ # Set environment variables
43
+ export ANTHROPIC_AUTH_TOKEN="$API_KEY"
44
+ export ANTHROPIC_BASE_URL="https://api.z.ai/api/anthropic"
45
+ export ANTHROPIC_API_KEY="$API_KEY"
46
+
47
+ # Model name from argument or default
48
+ MODEL_NAME="${1:-glm-4.7}"
49
+
50
+ echo -e "${BLUE}Starting GLM Chat${NC}"
51
+ echo -e "${GREEN}Model: $MODEL_NAME${NC}"
52
+ echo ""
53
+
54
+ # Start claude with model
55
+ exec claude --model "$MODEL_NAME" "${@:2}"
@@ -0,0 +1,12 @@
1
+ #!/bin/bash
2
+
3
+ # Konfigurasi untuk MiniMax API (Anthropic Compatible)
4
+ export ANTHROPIC_BASE_URL="https://api.minimax.io/anthropic"
5
+ export ANTHROPIC_API_KEY="eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJHcm91cE5hbWUiOiJZdWRpIEhhcnlhbnRvIChDb2FjaCBZSCkiLCJVc2VyTmFtZSI6Ill1ZGkgSGFyeWFudG8gKENvYWNoIFlIKSIsIkFjY291bnQiOiIiLCJTdWJqZWN0SUQiOiIxOTg2MDI2NDgyMzE2NjE2NjQwIiwiUGhvbmUiOiIiLCJHcm91cElEIjoiMTk4NjAyNjQ4MjMxMjQxODI0MCIsIlBhZ2VOYW1lIjoiIiwiTWFpbCI6Inl1ZGloYXJ5YW50bzQxQGdtYWlsLmNvbSIsIkNyZWF0ZVRpbWUiOiIyMDI1LTEyLTExIDE2OjU1OjMyIiwiVG9rZW5UeXBlIjoxLCJpc3MiOiJtaW5pbWF4In0.GKf3IhLmsqhSl5UdNsDEJCyPSjxVJ1CnwgQMCYb1VwUC0pi6CU9b0QmN9TLiwBGBnlJhY1ZZQTda_oDfE-wjn2txJqq_2d99s_yqfdh1GN_qrqGSUDZMiRHXmfj8TG7xR2JCZ0WqADziAMxXwn0M9YnfPEKZaPv8_sY2KmDtlvC_JJbCq_chr6I9N5TRHARN9cae_jy8RLvRAp9j7fGohzMa8GZdP8ALtNf3IYIGPz9dPiBIUAw61O6kH_wsC86c3rumww3sO0zxpztEarKXE8fXvwtN3RdoM8aDCWyhZaMO3NhHEQq2Or1fsvVrhKyFJKd_z9Loi1MM6fsLz1UdjA"
6
+
7
+ # Set model default ke MiniMax-M2 (atau model lain yang didukung MiniMax)
8
+ export ANTHROPIC_MODEL="MiniMax-M2"
9
+
10
+ # Jalankan claude dengan argumen yang diberikan
11
+ echo "🚀 Menjalankan Claude Code dengan MiniMax API..."
12
+ exec claude "$@"
@@ -0,0 +1,132 @@
1
+ #!/usr/bin/env bash
2
+
3
+ # Smart Claude Launcher with GLM Auto-Switch
4
+ # Cross-platform: Linux, Termux, macOS, Windows (Git Bash/WSL)
5
+ # Usage: claude-smart
6
+
7
+ # Platform detection
8
+ detect_platform() {
9
+ case "$(uname -s)" in
10
+ Linux*) echo "Linux";;
11
+ Darwin*) echo "macOS";;
12
+ CYGWIN*|MINGW*|MSYS*) echo "Windows";;
13
+ *) echo "Linux";;
14
+ esac
15
+ }
16
+
17
+ PLATFORM=$(detect_platform)
18
+
19
+ # Colors - Support both Linux/macOS and Windows
20
+ if [[ "$PLATFORM" == "Windows" ]]; then
21
+ if command -v tput &> /dev/null; then
22
+ GREEN=$(tput setaf 2 2>/dev/null || echo "")
23
+ BLUE=$(tput setaf 4 2>/dev/null || echo "")
24
+ RED=$(tput setaf 1 2>/dev/null || echo "")
25
+ YELLOW=$(tput setaf 3 2>/dev/null || echo "")
26
+ NC=$(tput sgr0 2>/dev/null || echo "")
27
+ else
28
+ GREEN=''
29
+ BLUE=''
30
+ RED=''
31
+ YELLOW=''
32
+ NC=''
33
+ fi
34
+ else
35
+ GREEN='\033[0;32m'
36
+ BLUE='\033[0;34m'
37
+ RED='\033[0;31m'
38
+ YELLOW='\033[1;33m'
39
+ NC='\033[0m'
40
+ fi
41
+
42
+ # Portable home directory
43
+ if [[ -n "$HOME" ]]; then
44
+ USER_HOME="$HOME"
45
+ elif [[ -n "$USERPROFILE" ]]; then
46
+ USER_HOME="$USERPROFILE"
47
+ else
48
+ USER_HOME="$HOME"
49
+ fi
50
+
51
+ GLM_API_KEY_FILE="$USER_HOME/.glm_api_key"
52
+
53
+ # Default to Anthropic
54
+ CURRENT_PROVIDER="anthropic"
55
+ MODEL_NAME=""
56
+
57
+ # Parse arguments to detect model
58
+ while [[ $# -gt 0 ]]; do
59
+ case $1 in
60
+ --model)
61
+ MODEL_NAME="$2"
62
+ # Check if it's a GLM model
63
+ if [[ "$MODEL_NAME" == *"glm"* ]]; then
64
+ CURRENT_PROVIDER="glm"
65
+ fi
66
+ shift 2
67
+ ;;
68
+ *)
69
+ shift
70
+ ;;
71
+ esac
72
+ done
73
+
74
+ # If no model specified, start interactive selection
75
+ if [[ -z "$MODEL_NAME" ]]; then
76
+ echo -e "${BLUE}=== AI Model Selection ===${NC}"
77
+ echo ""
78
+ echo "ANTHROPIC MODELS:"
79
+ echo "1) claude-3-5-sonnet-latest (Smartest)"
80
+ echo "2) claude-3-haiku-latest (Fast)"
81
+ echo ""
82
+ echo "GLM MODELS:"
83
+ echo "3) glm-4.7 (GLM Latest Flagship)"
84
+ echo "4) glm-4.6 (GLM Previous Flagship)"
85
+ echo "5) glm-4.5-air (GLM Fast)"
86
+ echo ""
87
+ read -p "Select model [1-5]: " choice
88
+
89
+ case $choice in
90
+ 1) MODEL_NAME="claude-3-5-sonnet-latest"; CURRENT_PROVIDER="anthropic" ;;
91
+ 2) MODEL_NAME="claude-3-haiku-latest"; CURRENT_PROVIDER="anthropic" ;;
92
+ 3) MODEL_NAME="glm-4.7"; CURRENT_PROVIDER="glm" ;;
93
+ 4) MODEL_NAME="glm-4.6"; CURRENT_PROVIDER="glm" ;;
94
+ 5) MODEL_NAME="glm-4.5-air"; CURRENT_PROVIDER="glm" ;;
95
+ *) MODEL_NAME="claude-3-5-sonnet-latest"; CURRENT_PROVIDER="anthropic" ;;
96
+ esac
97
+ fi
98
+
99
+ # Configure based on provider
100
+ if [[ "$CURRENT_PROVIDER" == "glm" ]]; then
101
+ # GLM Mode
102
+ if [[ -f "$GLM_API_KEY_FILE" ]]; then
103
+ API_KEY=$(cat "$GLM_API_KEY_FILE" 2>/dev/null)
104
+ export ANTHROPIC_BASE_URL="https://api.z.ai/api/anthropic"
105
+ export ANTHROPIC_API_KEY="$API_KEY"
106
+ export ANTHROPIC_AUTH_TOKEN="$API_KEY"
107
+ echo -e "${GREEN}✓ Using GLM endpoint: $MODEL_NAME${NC}"
108
+ else
109
+ echo -e "${RED}No GLM API key found. Run 'claude-all 7' first.${NC}"
110
+ exit 1
111
+ fi
112
+ else
113
+ # Anthropic Mode
114
+ if [[ -z "$ANTHROPIC_API_KEY" ]]; then
115
+ echo "Enter Anthropic API Key:"
116
+ if [[ "$PLATFORM" == "Windows" ]]; then
117
+ read ANTHROPIC_API_KEY
118
+ else
119
+ read -s ANTHROPIC_API_KEY
120
+ fi
121
+ export ANTHROPIC_API_KEY
122
+ fi
123
+ export ANTHROPIC_BASE_URL="https://api.anthropic.com"
124
+ echo -e "${GREEN}✓ Using Anthropic endpoint: $MODEL_NAME${NC}"
125
+ fi
126
+
127
+ echo ""
128
+ echo "Starting chat..."
129
+ echo ""
130
+
131
+ # Launch claude
132
+ exec claude --model "$MODEL_NAME"
@@ -0,0 +1,56 @@
1
+ #!/bin/bash
2
+ # xAI Chat Wrapper for Claude-All
3
+ # Uses OpenAI-compatible API
4
+
5
+ XAI_API_KEY="$1"
6
+ MODEL="${2:-grok-4-latest}"
7
+ shift 2
8
+
9
+ # Build messages array
10
+ messages="["
11
+ add_message=true
12
+
13
+ while [[ $# -gt 0 ]]; do
14
+ if [[ "$1" == "--system" && -n "$2" ]]; then
15
+ if [[ "$messages" != "[" ]]; then
16
+ messages+=","
17
+ fi
18
+ messages+="{\"role\":\"system\",\"content\":\"$2\"}"
19
+ add_message=true
20
+ shift 2
21
+ elif [[ "$1" == "--user" && -n "$2" ]]; then
22
+ if [[ "$messages" != "[" ]]; then
23
+ messages+=","
24
+ fi
25
+ messages+="{\"role\":\"user\",\"content\":\"$2\"}"
26
+ add_message=true
27
+ shift 2
28
+ elif [[ "$1" == "--assistant" && -n "$2" ]]; then
29
+ if [[ "$messages" != "[" ]]; then
30
+ messages+=","
31
+ fi
32
+ messages+="{\"role\":\"assistant\",\"content\":\"$2\"}"
33
+ add_message=true
34
+ shift 2
35
+ else
36
+ # Default as user message
37
+ if [[ "$messages" != "[" ]]; then
38
+ messages+=","
39
+ fi
40
+ messages+="{\"role\":\"user\",\"content\":\"$1\"}"
41
+ shift
42
+ fi
43
+ done
44
+
45
+ messages+="]"
46
+
47
+ # API call
48
+ curl -s "https://api.x.ai/v1/chat/completions" \
49
+ -H "Content-Type: application/json" \
50
+ -H "Authorization: Bearer $XAI_API_KEY" \
51
+ -d "{
52
+ \"messages\": $messages,
53
+ \"model\": \"$MODEL\",
54
+ \"stream\": false,
55
+ \"temperature\": 0
56
+ }" | jq -r '.choices[0].message.content'
@@ -0,0 +1,168 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ AntiGravity Proxy Server
4
+ Simple proxy to handle Google internal authentication
5
+ """
6
+
7
+ import json
8
+ import os
9
+ import sys
10
+ import http.server
11
+ import socketserver
12
+ import urllib.parse
13
+ import urllib.request
14
+ from datetime import datetime
15
+
16
+ class AntiGravityProxyHandler(http.server.BaseHTTPRequestHandler):
17
+ def do_GET(self):
18
+ self.send_response(200)
19
+ self.send_header('Content-type', 'application/json')
20
+ self.end_headers()
21
+
22
+ if self.path == '/health':
23
+ response = {"status": "ok", "service": "antigravity-proxy"}
24
+ self.wfile.write(json.dumps(response).encode())
25
+ elif self.path == '/':
26
+ # Simple UI
27
+ html = """
28
+ <!DOCTYPE html>
29
+ <html>
30
+ <head>
31
+ <title>AntiGravity Proxy</title>
32
+ <style>
33
+ body { font-family: Arial, sans-serif; padding: 20px; }
34
+ .container { max-width: 800px; margin: 0 auto; }
35
+ .auth-section { background: #f5f5f5; padding: 20px; margin: 20px 0; }
36
+ input { padding: 10px; margin: 5px; }
37
+ button { padding: 10px 20px; background: #4285f4; color: white; border: none; cursor: pointer; }
38
+ </style>
39
+ </head>
40
+ <body>
41
+ <div class="container">
42
+ <h1>AntiGravity Proxy Server</h1>
43
+ <div class="auth-section">
44
+ <h2>Google Authentication</h2>
45
+ <p>This proxy handles authentication for Google Internal AI models.</p>
46
+ <p>Status: <span id="status">Not authenticated</span></p>
47
+ </div>
48
+ <div class="auth-section">
49
+ <h2>Usage</h2>
50
+ <p>API Endpoint: <code>http://localhost:8080/v1</code></p>
51
+ <p>Compatible with OpenAI/Anthropic API format.</p>
52
+ </div>
53
+ </div>
54
+ </body>
55
+ </html>
56
+ """
57
+ self.wfile.write(html.encode())
58
+ else:
59
+ self.send_error(404)
60
+
61
+ def do_POST(self):
62
+ if self.path.startswith('/v1/'):
63
+ self.handle_anthropic_api()
64
+ else:
65
+ self.send_error(404)
66
+
67
+ def do_OPTIONS(self):
68
+ self.send_response(200)
69
+ self.send_header('Access-Control-Allow-Origin', '*')
70
+ self.send_header('Access-Control-Allow-Methods', 'POST, OPTIONS')
71
+ self.send_header('Access-Control-Allow-Headers', 'Content-Type, Authorization')
72
+ self.end_headers()
73
+
74
+ def handle_anthropic_api(self):
75
+ # Handle Anthropic-compatible API
76
+ content_length = int(self.headers.get('Content-Length', 0))
77
+ post_data = self.rfile.read(content_length)
78
+
79
+ try:
80
+ request_data = json.loads(post_data.decode())
81
+ messages = request_data.get('messages', [])
82
+ model = request_data.get('model', 'gemini-2.0-flash')
83
+
84
+ # Set CORS headers
85
+ self.send_response(200)
86
+ self.send_header('Access-Control-Allow-Origin', '*')
87
+ self.send_header('Content-Type', 'application/json')
88
+ self.end_headers()
89
+
90
+ # For now, return a mock response
91
+ # In production, this would call the actual AntiGravity API
92
+ response = {
93
+ "id": f"msg_{datetime.now().timestamp()}",
94
+ "type": "message",
95
+ "role": "assistant",
96
+ "content": [
97
+ {
98
+ "type": "text",
99
+ "text": f"[AntiGravity Proxy] Received request for model: {model}\n\nThis is a proxy response. The actual implementation would:\n1. Authenticate with Google internal systems\n2. Route to AntiGravity API\n3. Return the real model response\n\nYour message: {messages[-1]['content'] if messages else 'No message'}"
100
+ }
101
+ ],
102
+ "model": model,
103
+ "stop_reason": "end_turn",
104
+ "stop_sequence": None,
105
+ "usage": {
106
+ "input_tokens": 10,
107
+ "output_tokens": 50
108
+ }
109
+ }
110
+
111
+ self.wfile.write(json.dumps(response).encode())
112
+
113
+ except Exception as e:
114
+ self.send_response(500)
115
+ self.send_header('Content-Type', 'application/json')
116
+ self.end_headers()
117
+ error_response = {
118
+ "error": {
119
+ "type": "api_error",
120
+ "message": str(e)
121
+ }
122
+ }
123
+ self.wfile.write(json.dumps(error_response).encode())
124
+
125
+ def log_message(self, format, *args):
126
+ # Suppress logs for cleaner output
127
+ pass
128
+
129
+ def main():
130
+ PORT = 8080
131
+
132
+ # Check if auth exists
133
+ auth_dir = os.path.expanduser("~/.config/claude-all/antigravity")
134
+ auth_files = []
135
+ if os.path.exists(auth_dir):
136
+ auth_files = [f for f in os.listdir(auth_dir) if f.endswith('.json')]
137
+
138
+ print("=" * 60)
139
+ print("AntiGravity Proxy Server")
140
+ print("=" * 60)
141
+ print(f"Port: {PORT}")
142
+ print(f"Auth files found: {len(auth_files)}")
143
+
144
+ if auth_files:
145
+ print("✓ Authentication files available")
146
+ for f in auth_files:
147
+ print(f" - {f}")
148
+ else:
149
+ print("⚠️ No authentication files found")
150
+ print(" Run: python3 setup_google_internal_auth.py")
151
+
152
+ print()
153
+ print("Starting server...")
154
+ print(f"URL: http://localhost:{PORT}")
155
+ print(f"API: http://localhost:{PORT}/v1")
156
+ print()
157
+ print("Press Ctrl+C to stop")
158
+ print("=" * 60)
159
+
160
+ with socketserver.TCPServer(("", PORT), AntiGravityProxyHandler) as httpd:
161
+ try:
162
+ httpd.serve_forever()
163
+ except KeyboardInterrupt:
164
+ print("\nShutting down...")
165
+ httpd.shutdown()
166
+
167
+ if __name__ == "__main__":
168
+ main()
@@ -0,0 +1,83 @@
1
+ Claude Code Multi-Model Launcher - Help
2
+ =======================================
3
+
4
+ Usage:
5
+ claude-all <provider> [options] [message]
6
+ claude-all --help
7
+
8
+ Provider Options (1-10):
9
+ 1) MiniMax (Direct Anthropic API)
10
+ 2) Google Gemini (API Key - AI Studio)
11
+ 3) Google Gemini (OAuth - Vertex AI)
12
+ 4) OpenAI (API Key)
13
+ 5) OpenAI (OAuth - Experimental)
14
+ 6) xAI / Grok (API Key)
15
+ 7) ZhipuAI / GLM (API Key)
16
+ 8) Groq (API Key)
17
+ 9) Ollama (Local Models)
18
+ 10) Custom / Other
19
+
20
+ Options:
21
+ -m, --model <model_name> Specify exact model to use
22
+ -h, --help Show this help message
23
+
24
+ Examples:
25
+ # Use default model (auto-selects first model)
26
+ claude-all 7
27
+ echo "Hello" | claude-all 7
28
+
29
+ # Use specific model
30
+ claude-all 7 --model glm-4.5-air
31
+ claude-all 4 -m o1-preview
32
+ claude-all 8 -m llama-3.3-70b-versatile
33
+
34
+ # Direct message (no need for /model prefix)
35
+ echo "Write a function" | claude-all 7 --model glm-4.7
36
+
37
+ Available Models by Provider:
38
+ -----------------------------
39
+
40
+ GLM (7):
41
+ - glm-4.7 (default)
42
+ - glm-4.6
43
+ - glm-4.6-flash
44
+ - glm-4.6-plus
45
+ - glm-4.5-air
46
+ - glm-4.5-pro
47
+ - glm-4-plus
48
+ - glm-4-air
49
+ - glm-4-airx
50
+ - glm-4
51
+
52
+ OpenAI (8):
53
+ - o1-preview
54
+ - o1-mini
55
+ - gpt-4o (default)
56
+ - gpt-4o-mini
57
+ - gpt-4-turbo
58
+ - gpt-4o-2024-08-06
59
+ - gpt-4
60
+ - gpt-3.5-turbo
61
+
62
+ Groq (7):
63
+ - llama-3.3-70b-versatile (default)
64
+ - llama-3.1-70b-versatile
65
+ - llama-3.1-8b-instant
66
+ - llama3-70b-8192
67
+ - llama3-8b-8192
68
+ - mixtral-8x7b-32768
69
+ - gemma2-9b-it
70
+
71
+ Gemini (7):
72
+ - gemini-2.0-flash-exp
73
+ - gemini-1.5-pro
74
+ - gemini-1.5-pro-latest
75
+ - gemini-1.5-flash
76
+ - gemini-1.5-flash-8b
77
+ - gemini-1.5-flash-latest
78
+ - gemini-1.0-pro
79
+
80
+ Tips:
81
+ - Use --model to avoid auto-selection
82
+ - You can also set API key via environment variable
83
+ - No need to type /model prefix when piping messages