jekyll-theme-zer0 0.22.20 → 0.22.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +74 -4
- data/README.md +325 -40
- data/_data/README.md +1 -0
- data/_data/roadmap.yml +215 -0
- data/scripts/bin/install +717 -0
- data/scripts/bin/test +45 -2
- data/scripts/generate-roadmap.rb +200 -0
- data/scripts/generate-roadmap.sh +21 -0
- data/scripts/lib/install/README.md +63 -0
- data/scripts/lib/install/agents.sh +166 -0
- data/scripts/lib/install/ai/diagnose.sh +199 -0
- data/scripts/lib/install/ai/openai.sh +233 -0
- data/scripts/lib/install/ai/suggest.sh +182 -0
- data/scripts/lib/install/ai/wizard.sh +160 -0
- data/scripts/lib/install/config.sh +56 -0
- data/scripts/lib/install/deploy/README.md +52 -0
- data/scripts/lib/install/deploy/azure-swa.sh +50 -0
- data/scripts/lib/install/deploy/docker-prod.sh +71 -0
- data/scripts/lib/install/deploy/github-pages.sh +44 -0
- data/scripts/lib/install/deploy/registry.sh +190 -0
- data/scripts/lib/install/doctor.sh +301 -0
- data/scripts/lib/install/fs.sh +52 -0
- data/scripts/lib/install/logging.sh +33 -0
- data/scripts/lib/install/pages.sh +255 -0
- data/scripts/lib/install/platform.sh +71 -0
- data/scripts/lib/install/profile.sh +113 -0
- data/scripts/lib/install/template.sh +137 -0
- data/scripts/lib/install/upgrade.sh +184 -0
- data/scripts/lib/install/wizard_interactive.sh +189 -0
- metadata +27 -2
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# scripts/lib/install/ai/diagnose.sh
|
|
3
|
+
#
|
|
4
|
+
# `install diagnose [--ai]` — Jekyll build / runtime error analysis.
|
|
5
|
+
#
|
|
6
|
+
# Two modes:
|
|
7
|
+
#
|
|
8
|
+
# 1. Rule-based (default, no network). Pattern-matches a curated list of
|
|
9
|
+
# known errors against a build log and prints structured fixes.
|
|
10
|
+
#
|
|
11
|
+
# 2. AI-assisted (`--ai`). Sends a sanitized error log + the most relevant
|
|
12
|
+
# config files to OpenAI, returns a unified diff for user review.
|
|
13
|
+
#
|
|
14
|
+
# Public API:
|
|
15
|
+
# diagnose_run <target_dir> <repo_root> [--log <file>] [--ai] [--auto-accept]
|
|
16
|
+
#
|
|
17
|
+
# If --log is not provided, runs `jekyll build` once and captures output.
|
|
18
|
+
|
|
19
|
+
# shellcheck disable=SC2034
|
|
20
|
+
AI_DIAGNOSE_LIB_VERSION="1.0.0"
|
|
21
|
+
|
|
22
|
+
# ----- Rule-based pattern table --------------------------------------------
|
|
23
|
+
# Each rule: [pattern_regex] [short_label] [explanation+fix]
|
|
24
|
+
# Order matters — first match wins. Keep patterns specific.
|
|
25
|
+
_diagnose_rules() {
|
|
26
|
+
cat <<'EOF'
|
|
27
|
+
theme could not be found|MISSING_THEME|The Jekyll theme gem is not installed. Run `bundle install` (or `bundle update jekyll-theme-zer0`) and confirm the gem name in _config.yml matches your Gemfile entry.
|
|
28
|
+
Address already in use|PORT_IN_USE|Port 4000 is already bound. Either stop the existing process (`lsof -ti :4000 | xargs kill`) or run on a different port (`bundle exec jekyll serve --port 4001`).
|
|
29
|
+
You have requested:.*Could not find compatible versions|GEM_VERSION_CONFLICT|A gem version constraint cannot be satisfied. Run `bundle update` to refresh the lockfile, or pin to compatible versions in your Gemfile.
|
|
30
|
+
Liquid Exception:.*Could not locate the included file|MISSING_INCLUDE|A `{% include %}` tag references a file that doesn't exist. Verify the path inside `_includes/` and check for typos.
|
|
31
|
+
Liquid Exception|LIQUID_ERROR|A Liquid template raised an error. Check the file path printed above the exception and look for unmatched tags ({% if %} without {% endif %}, etc.).
|
|
32
|
+
SassC::SyntaxError|SASS_SYNTAX|Sass compilation failed. Review the file/line printed in the error and check for missing semicolons, unclosed braces, or invalid @import paths.
|
|
33
|
+
No such file or directory @ rb_sysopen|MISSING_FILE|Jekyll tried to open a file that doesn't exist. Common causes: deleted but still referenced in _config.yml or front matter; typo in include path.
|
|
34
|
+
incompatible character encodings|ENCODING_ISSUE|A file has mixed encodings. Re-save the offending file as UTF-8 without BOM.
|
|
35
|
+
EOF
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
_diagnose_rule_based() {
|
|
39
|
+
local log_file="$1"
|
|
40
|
+
local matched=0 line key label fix
|
|
41
|
+
while IFS='|' read -r pattern label fix; do
|
|
42
|
+
[[ -z "$pattern" ]] && continue
|
|
43
|
+
if grep -qE "$pattern" "$log_file" 2>/dev/null; then
|
|
44
|
+
log_info "Matched rule: $label"
|
|
45
|
+
echo " ↳ $fix"
|
|
46
|
+
echo
|
|
47
|
+
matched=$((matched+1))
|
|
48
|
+
fi
|
|
49
|
+
done < <(_diagnose_rules)
|
|
50
|
+
|
|
51
|
+
if [[ "$matched" = "0" ]]; then
|
|
52
|
+
log_warning "No known patterns matched. Re-run with --ai for AI analysis (requires OPENAI_API_KEY)."
|
|
53
|
+
return 1
|
|
54
|
+
fi
|
|
55
|
+
log_success "Matched $matched rule(s)."
|
|
56
|
+
return 0
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
_diagnose_capture_build_log() {
|
|
60
|
+
local target_dir="$1" log_file="$2"
|
|
61
|
+
log_info "Running 'jekyll build' to capture errors ..."
|
|
62
|
+
(
|
|
63
|
+
cd "$target_dir" || exit 1
|
|
64
|
+
if [[ -f Gemfile ]] && command -v bundle >/dev/null 2>&1; then
|
|
65
|
+
bundle exec jekyll build 2>&1 | tee "$log_file"
|
|
66
|
+
elif command -v jekyll >/dev/null 2>&1; then
|
|
67
|
+
jekyll build 2>&1 | tee "$log_file"
|
|
68
|
+
else
|
|
69
|
+
echo "neither bundler nor jekyll is installed" > "$log_file"
|
|
70
|
+
return 1
|
|
71
|
+
fi
|
|
72
|
+
) || true
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
_diagnose_ai() {
|
|
76
|
+
local log_file="$1" target_dir="$2" repo_root="$3" auto_accept="$4"
|
|
77
|
+
|
|
78
|
+
if ! ai_enabled; then
|
|
79
|
+
log_warning "AI is disabled (ZER0_NO_AI=1) — using rule-based mode only."
|
|
80
|
+
return 1
|
|
81
|
+
fi
|
|
82
|
+
if ! ai_require_key; then
|
|
83
|
+
return 1
|
|
84
|
+
fi
|
|
85
|
+
|
|
86
|
+
local sys_prompt_file="$repo_root/templates/ai/prompts/diagnose-system.md"
|
|
87
|
+
if [[ ! -f "$sys_prompt_file" ]]; then
|
|
88
|
+
log_error "System prompt missing: $sys_prompt_file"
|
|
89
|
+
return 1
|
|
90
|
+
fi
|
|
91
|
+
local system_prompt
|
|
92
|
+
system_prompt="$(cat "$sys_prompt_file")"
|
|
93
|
+
|
|
94
|
+
# Build sanitized context (last 80 lines of log + _config.yml + Gemfile)
|
|
95
|
+
local sanitized_log sanitized_cfg sanitized_gem
|
|
96
|
+
sanitized_log="$(tail -n 80 "$log_file" | ai_sanitize_text)"
|
|
97
|
+
if [[ -f "$target_dir/_config.yml" ]]; then
|
|
98
|
+
sanitized_cfg="$(ai_sanitize_text < "$target_dir/_config.yml")"
|
|
99
|
+
else
|
|
100
|
+
sanitized_cfg="(missing)"
|
|
101
|
+
fi
|
|
102
|
+
if [[ -f "$target_dir/Gemfile" ]]; then
|
|
103
|
+
sanitized_gem="$(ai_sanitize_text < "$target_dir/Gemfile")"
|
|
104
|
+
else
|
|
105
|
+
sanitized_gem="(missing)"
|
|
106
|
+
fi
|
|
107
|
+
|
|
108
|
+
local user_prompt="===== BUILD LOG (last 80 lines) =====
|
|
109
|
+
${sanitized_log}
|
|
110
|
+
|
|
111
|
+
===== _config.yml =====
|
|
112
|
+
${sanitized_cfg}
|
|
113
|
+
|
|
114
|
+
===== Gemfile =====
|
|
115
|
+
${sanitized_gem}
|
|
116
|
+
|
|
117
|
+
Diagnose the failure and propose a minimal fix. If a file change is needed, output a unified diff. Be concise."
|
|
118
|
+
|
|
119
|
+
local model
|
|
120
|
+
model="$(ai_default_model diagnose)"
|
|
121
|
+
local in_chars=$(( ${#system_prompt} + ${#user_prompt} ))
|
|
122
|
+
log_info "About to call OpenAI:"
|
|
123
|
+
ai_estimate_cost "$model" "$in_chars" 600
|
|
124
|
+
|
|
125
|
+
if [[ "$auto_accept" != "1" ]]; then
|
|
126
|
+
printf "Proceed with API call? [y/N] "
|
|
127
|
+
local go
|
|
128
|
+
read -r go
|
|
129
|
+
if [[ ! "$go" =~ ^[Yy]$ ]]; then
|
|
130
|
+
log_warning "Aborted by user."
|
|
131
|
+
return 1
|
|
132
|
+
fi
|
|
133
|
+
fi
|
|
134
|
+
|
|
135
|
+
log_info "Calling $model ..."
|
|
136
|
+
local resp
|
|
137
|
+
if ! resp="$(ai_call_chat "$model" "$system_prompt" "$user_prompt" 800 0.2)"; then
|
|
138
|
+
log_error "OpenAI call failed."
|
|
139
|
+
return 1
|
|
140
|
+
fi
|
|
141
|
+
|
|
142
|
+
echo
|
|
143
|
+
log_info "AI diagnosis:"
|
|
144
|
+
echo "─────────────────────────────────────────────────────────────"
|
|
145
|
+
printf '%s\n' "$resp"
|
|
146
|
+
echo "─────────────────────────────────────────────────────────────"
|
|
147
|
+
log_info "If the response includes a unified diff, save it to a file and apply with: patch -p0 < fix.diff"
|
|
148
|
+
return 0
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
diagnose_run() {
|
|
152
|
+
local target_dir="$1" repo_root="$2"
|
|
153
|
+
shift 2 || true
|
|
154
|
+
|
|
155
|
+
local log_file="" use_ai=0 auto_accept=0
|
|
156
|
+
while [[ $# -gt 0 ]]; do
|
|
157
|
+
case "$1" in
|
|
158
|
+
--log) log_file="${2:-}"; shift ;;
|
|
159
|
+
--ai) use_ai=1 ;;
|
|
160
|
+
--auto-accept) auto_accept=1 ;;
|
|
161
|
+
*) log_warning "diagnose_run: ignoring unknown flag: $1" ;;
|
|
162
|
+
esac
|
|
163
|
+
shift
|
|
164
|
+
done
|
|
165
|
+
|
|
166
|
+
if [[ ! -d "$target_dir" ]]; then
|
|
167
|
+
log_error "Target directory does not exist: $target_dir"
|
|
168
|
+
return 1
|
|
169
|
+
fi
|
|
170
|
+
|
|
171
|
+
local cleanup_log=0
|
|
172
|
+
if [[ -z "$log_file" ]]; then
|
|
173
|
+
log_file="$(mktemp)"
|
|
174
|
+
cleanup_log=1
|
|
175
|
+
_diagnose_capture_build_log "$target_dir" "$log_file"
|
|
176
|
+
fi
|
|
177
|
+
|
|
178
|
+
if [[ ! -f "$log_file" ]]; then
|
|
179
|
+
log_error "Log file not found: $log_file"
|
|
180
|
+
[[ "$cleanup_log" = "1" ]] && rm -f "$log_file"
|
|
181
|
+
return 1
|
|
182
|
+
fi
|
|
183
|
+
|
|
184
|
+
log_info "Diagnosing build log ($(wc -l < "$log_file" | tr -d ' ') lines) ..."
|
|
185
|
+
echo
|
|
186
|
+
|
|
187
|
+
# Always run rule-based first
|
|
188
|
+
local rule_result=0
|
|
189
|
+
_diagnose_rule_based "$log_file" || rule_result=1
|
|
190
|
+
|
|
191
|
+
# AI if requested
|
|
192
|
+
if [[ "$use_ai" = "1" ]]; then
|
|
193
|
+
echo
|
|
194
|
+
_diagnose_ai "$log_file" "$target_dir" "$repo_root" "$auto_accept" || true
|
|
195
|
+
fi
|
|
196
|
+
|
|
197
|
+
[[ "$cleanup_log" = "1" ]] && rm -f "$log_file"
|
|
198
|
+
return $rule_result
|
|
199
|
+
}
|
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# scripts/lib/install/ai/openai.sh
|
|
3
|
+
#
|
|
4
|
+
# Shared OpenAI client + safety primitives for Phase 5 AI features.
|
|
5
|
+
#
|
|
6
|
+
# All AI features (wizard, diagnose, suggest) source this module. It provides:
|
|
7
|
+
# - ai_enabled — global kill-switch check (ZER0_NO_AI=1 disables)
|
|
8
|
+
# - ai_require_key — verify OPENAI_API_KEY is present
|
|
9
|
+
# - ai_default_model — model selection (env override: ZER0_AI_MODEL)
|
|
10
|
+
# - ai_sanitize_text — strip secrets/PII before sending to API
|
|
11
|
+
# - ai_estimate_cost — print rough token + USD estimate
|
|
12
|
+
# - ai_call_chat — POST to /v1/chat/completions (curl, 30s timeout)
|
|
13
|
+
# - ai_show_diff_confirm — diff-then-confirm gate for any AI-generated file
|
|
14
|
+
#
|
|
15
|
+
# All network I/O is curl-based — no SDK dependency. All keys read from env
|
|
16
|
+
# only — never persisted, never logged. All calls timeout at 30s with graceful
|
|
17
|
+
# fallback (callers should check the return code and degrade to non-AI path).
|
|
18
|
+
|
|
19
|
+
# shellcheck disable=SC2034
|
|
20
|
+
AI_OPENAI_LIB_VERSION="1.0.0"
|
|
21
|
+
|
|
22
|
+
AI_OPENAI_ENDPOINT="${AI_OPENAI_ENDPOINT:-https://api.openai.com/v1/chat/completions}"
|
|
23
|
+
AI_OPENAI_TIMEOUT_SECS="${AI_OPENAI_TIMEOUT_SECS:-30}"
|
|
24
|
+
|
|
25
|
+
# Default model per use-case. gpt-4o-mini is cheap; gpt-4o for harder tasks.
|
|
26
|
+
ai_default_model() {
|
|
27
|
+
local kind="${1:-wizard}"
|
|
28
|
+
if [[ -n "${ZER0_AI_MODEL:-}" ]]; then
|
|
29
|
+
echo "$ZER0_AI_MODEL"
|
|
30
|
+
return
|
|
31
|
+
fi
|
|
32
|
+
case "$kind" in
|
|
33
|
+
diagnose) echo "gpt-4o" ;;
|
|
34
|
+
*) echo "gpt-4o-mini" ;;
|
|
35
|
+
esac
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
# Returns 0 if AI is enabled (no kill-switch), 1 if disabled.
|
|
39
|
+
ai_enabled() {
|
|
40
|
+
if [[ "${ZER0_NO_AI:-0}" = "1" ]]; then
|
|
41
|
+
return 1
|
|
42
|
+
fi
|
|
43
|
+
return 0
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
# Verify OPENAI_API_KEY is present. Prints install hint on failure.
|
|
47
|
+
ai_require_key() {
|
|
48
|
+
if [[ -z "${OPENAI_API_KEY:-}" ]]; then
|
|
49
|
+
log_error "OPENAI_API_KEY environment variable is not set."
|
|
50
|
+
log_info "Get a key at https://platform.openai.com/api-keys then:"
|
|
51
|
+
log_info " export OPENAI_API_KEY='sk-...'"
|
|
52
|
+
log_info "Or run without --ai for the rule-based fallback."
|
|
53
|
+
return 1
|
|
54
|
+
fi
|
|
55
|
+
return 0
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
# Strip secrets and PII from a string before sending to the API.
|
|
59
|
+
# Reads from stdin, writes sanitized text to stdout.
|
|
60
|
+
#
|
|
61
|
+
# Strips:
|
|
62
|
+
# - OPENAI_API_KEY / RUBYGEMS_API_KEY / GITHUB_TOKEN values
|
|
63
|
+
# - sk-... API key patterns
|
|
64
|
+
# - email addresses
|
|
65
|
+
# - $HOME absolute paths (replaced with ~)
|
|
66
|
+
# - 40+ char hex strings (likely tokens)
|
|
67
|
+
ai_sanitize_text() {
|
|
68
|
+
local home_esc
|
|
69
|
+
# Escape $HOME for sed (handle / in path)
|
|
70
|
+
home_esc="$(printf '%s' "${HOME:-/Users/none}" | sed 's:/:\\/:g')"
|
|
71
|
+
|
|
72
|
+
sed \
|
|
73
|
+
-e "s/${home_esc}/~/g" \
|
|
74
|
+
-e 's/sk-[A-Za-z0-9_-]\{20,\}/[REDACTED_API_KEY]/g' \
|
|
75
|
+
-e 's/rubygems_[A-Za-z0-9]\{40,\}/[REDACTED_RUBYGEMS_KEY]/g' \
|
|
76
|
+
-e 's/ghp_[A-Za-z0-9]\{20,\}/[REDACTED_GITHUB_TOKEN]/g' \
|
|
77
|
+
-e 's/github_pat_[A-Za-z0-9_]\{20,\}/[REDACTED_GITHUB_PAT]/g' \
|
|
78
|
+
-e 's/[A-Za-z0-9._%+-]\{1,\}@[A-Za-z0-9.-]\{1,\}\.[A-Za-z]\{2,\}/[REDACTED_EMAIL]/g' \
|
|
79
|
+
-e 's/[^A-Fa-f0-9]\([A-Fa-f0-9]\{40,\}\)[^A-Fa-f0-9]/ [REDACTED_HASH] /g'
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
# Print a rough token + cost estimate so the user knows what they're spending
|
|
83
|
+
# before the call goes out. Writes to stderr so caller can capture stdout.
|
|
84
|
+
#
|
|
85
|
+
# Args: <model> <input_chars> <expected_output_tokens>
|
|
86
|
+
ai_estimate_cost() {
|
|
87
|
+
local model="$1" input_chars="$2" out_tokens="${3:-500}"
|
|
88
|
+
# ~4 chars per token (English text avg)
|
|
89
|
+
local in_tokens=$(( input_chars / 4 + 1 ))
|
|
90
|
+
local in_cents out_cents total_cents
|
|
91
|
+
case "$model" in
|
|
92
|
+
gpt-4o-mini)
|
|
93
|
+
# $0.15/1M input, $0.60/1M output (approx, Nov 2024)
|
|
94
|
+
in_cents=$(( (in_tokens * 15 + 999999) / 1000000 ))
|
|
95
|
+
out_cents=$(( (out_tokens * 60 + 999999) / 1000000 ))
|
|
96
|
+
;;
|
|
97
|
+
gpt-4o)
|
|
98
|
+
# $2.50/1M input, $10.00/1M output
|
|
99
|
+
in_cents=$(( (in_tokens * 250 + 999999) / 1000000 ))
|
|
100
|
+
out_cents=$(( (out_tokens * 1000 + 999999) / 1000000 ))
|
|
101
|
+
;;
|
|
102
|
+
*)
|
|
103
|
+
in_cents=0; out_cents=0 ;;
|
|
104
|
+
esac
|
|
105
|
+
total_cents=$(( in_cents + out_cents ))
|
|
106
|
+
{
|
|
107
|
+
echo " Model: $model"
|
|
108
|
+
echo " Input: ~${in_tokens} tokens (${input_chars} chars)"
|
|
109
|
+
echo " Output: ~${out_tokens} tokens (cap)"
|
|
110
|
+
echo " Est. cost: ≤ \$0.0$(printf '%02d' $total_cents) (≤ ${total_cents}¢)"
|
|
111
|
+
} >&2
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
# Make a chat completion call. Returns the assistant message content on stdout.
|
|
115
|
+
# Non-zero return on failure (network, auth, rate-limit, timeout).
|
|
116
|
+
#
|
|
117
|
+
# Args:
|
|
118
|
+
# $1 = model
|
|
119
|
+
# $2 = system prompt (string)
|
|
120
|
+
# $3 = user prompt (string)
|
|
121
|
+
# $4 = max_tokens (default 1024)
|
|
122
|
+
# $5 = temperature (default 0.3)
|
|
123
|
+
ai_call_chat() {
|
|
124
|
+
local model="$1" system="$2" user="$3"
|
|
125
|
+
local max_tokens="${4:-1024}" temp="${5:-0.3}"
|
|
126
|
+
|
|
127
|
+
if ! ai_require_key; then
|
|
128
|
+
return 1
|
|
129
|
+
fi
|
|
130
|
+
|
|
131
|
+
# Build JSON payload via python (handles escaping safely; available on
|
|
132
|
+
# all macOS + most Linux distros). Falls back to a naive heredoc if
|
|
133
|
+
# python is missing — caller is expected to keep prompts ASCII-safe.
|
|
134
|
+
local payload
|
|
135
|
+
if command -v python3 >/dev/null 2>&1; then
|
|
136
|
+
payload="$(python3 - "$model" "$system" "$user" "$max_tokens" "$temp" <<'PY'
|
|
137
|
+
import json, sys
|
|
138
|
+
model, system, user, max_tokens, temp = sys.argv[1:]
|
|
139
|
+
print(json.dumps({
|
|
140
|
+
"model": model,
|
|
141
|
+
"messages": [
|
|
142
|
+
{"role": "system", "content": system},
|
|
143
|
+
{"role": "user", "content": user},
|
|
144
|
+
],
|
|
145
|
+
"max_tokens": int(max_tokens),
|
|
146
|
+
"temperature": float(temp),
|
|
147
|
+
}))
|
|
148
|
+
PY
|
|
149
|
+
)"
|
|
150
|
+
else
|
|
151
|
+
log_error "python3 not found — required for safe JSON payload construction."
|
|
152
|
+
return 1
|
|
153
|
+
fi
|
|
154
|
+
|
|
155
|
+
local resp http_code tmpfile
|
|
156
|
+
tmpfile="$(mktemp)"
|
|
157
|
+
# Capture body to tmpfile, status code to stdout, so we can branch on HTTP code.
|
|
158
|
+
http_code="$(curl -sS -o "$tmpfile" -w '%{http_code}' \
|
|
159
|
+
--max-time "$AI_OPENAI_TIMEOUT_SECS" \
|
|
160
|
+
-X POST "$AI_OPENAI_ENDPOINT" \
|
|
161
|
+
-H "Authorization: Bearer $OPENAI_API_KEY" \
|
|
162
|
+
-H 'Content-Type: application/json' \
|
|
163
|
+
--data-binary "$payload" 2>/dev/null || echo "000")"
|
|
164
|
+
|
|
165
|
+
resp="$(cat "$tmpfile")"
|
|
166
|
+
rm -f "$tmpfile"
|
|
167
|
+
|
|
168
|
+
if [[ "$http_code" != "200" ]]; then
|
|
169
|
+
log_error "OpenAI API call failed (HTTP $http_code)"
|
|
170
|
+
# Print first ~200 chars of response (already from API, no secrets)
|
|
171
|
+
echo "$resp" | head -c 200 >&2
|
|
172
|
+
echo >&2
|
|
173
|
+
return 1
|
|
174
|
+
fi
|
|
175
|
+
|
|
176
|
+
# Extract message content
|
|
177
|
+
if command -v python3 >/dev/null 2>&1; then
|
|
178
|
+
echo "$resp" | python3 -c '
|
|
179
|
+
import json, sys
|
|
180
|
+
try:
|
|
181
|
+
d = json.load(sys.stdin)
|
|
182
|
+
print(d["choices"][0]["message"]["content"])
|
|
183
|
+
except Exception as e:
|
|
184
|
+
print(f"[parse error: {e}]", file=sys.stderr)
|
|
185
|
+
sys.exit(1)
|
|
186
|
+
'
|
|
187
|
+
else
|
|
188
|
+
# Last-resort grep extraction (fragile; warn)
|
|
189
|
+
log_warning "python3 unavailable — falling back to fragile JSON extraction"
|
|
190
|
+
echo "$resp" | sed -n 's/.*"content":"\([^"]*\)".*/\1/p' | head -1
|
|
191
|
+
fi
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
# Show a unified diff between an existing file and a proposed new content,
|
|
195
|
+
# and prompt the user to accept. Auto-accepts if --auto-accept (CI mode).
|
|
196
|
+
#
|
|
197
|
+
# Args: <existing_file_or_/dev/null> <proposed_content_string> <description> [auto_accept]
|
|
198
|
+
# Returns 0 if user accepts (or auto-accept), 1 if rejected.
|
|
199
|
+
ai_show_diff_confirm() {
|
|
200
|
+
local existing="$1" proposed="$2" desc="$3" auto="${4:-0}"
|
|
201
|
+
local tmp
|
|
202
|
+
tmp="$(mktemp)"
|
|
203
|
+
printf '%s\n' "$proposed" > "$tmp"
|
|
204
|
+
|
|
205
|
+
echo
|
|
206
|
+
log_info "Proposed change: $desc"
|
|
207
|
+
echo "─────────────────────────── diff ───────────────────────────"
|
|
208
|
+
if [[ -f "$existing" ]]; then
|
|
209
|
+
diff -u "$existing" "$tmp" || true
|
|
210
|
+
else
|
|
211
|
+
echo "(new file)"
|
|
212
|
+
diff -u /dev/null "$tmp" || true
|
|
213
|
+
fi
|
|
214
|
+
echo "────────────────────────────────────────────────────────────"
|
|
215
|
+
echo
|
|
216
|
+
|
|
217
|
+
if [[ "$auto" = "1" ]]; then
|
|
218
|
+
log_info "Auto-accept enabled — applying change."
|
|
219
|
+
echo "$tmp" # caller reads this path then deletes
|
|
220
|
+
return 0
|
|
221
|
+
fi
|
|
222
|
+
|
|
223
|
+
printf "Apply this change? [y/N] "
|
|
224
|
+
local reply
|
|
225
|
+
read -r reply
|
|
226
|
+
if [[ "$reply" =~ ^[Yy]$ ]]; then
|
|
227
|
+
echo "$tmp"
|
|
228
|
+
return 0
|
|
229
|
+
fi
|
|
230
|
+
rm -f "$tmp"
|
|
231
|
+
log_warning "Rejected by user — no changes written."
|
|
232
|
+
return 1
|
|
233
|
+
}
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# scripts/lib/install/ai/suggest.sh
|
|
3
|
+
#
|
|
4
|
+
# `install deploy --ai-suggest` — recommend a deploy target.
|
|
5
|
+
#
|
|
6
|
+
# Two modes:
|
|
7
|
+
# - Rule-based (default): inspects target dir for signals and picks a slug.
|
|
8
|
+
# - AI-assisted (--ai): sends sanitized site stats to OpenAI for rationale.
|
|
9
|
+
#
|
|
10
|
+
# Public API:
|
|
11
|
+
# suggest_deploy_target <target_dir> <repo_root> [--ai] [--auto-accept]
|
|
12
|
+
#
|
|
13
|
+
# Prints recommended slug to stdout (last line). All other output to stderr.
|
|
14
|
+
|
|
15
|
+
# shellcheck disable=SC2034
|
|
16
|
+
AI_SUGGEST_LIB_VERSION="1.0.0"
|
|
17
|
+
|
|
18
|
+
# Inspect the target dir and emit a human-readable summary on stdout.
|
|
19
|
+
_suggest_collect_signals() {
|
|
20
|
+
local target_dir="$1"
|
|
21
|
+
local total_size site_files has_dockerfile has_api has_cname has_workflows
|
|
22
|
+
if command -v du >/dev/null 2>&1; then
|
|
23
|
+
total_size="$(du -sh "$target_dir" 2>/dev/null | awk '{print $1}')"
|
|
24
|
+
else
|
|
25
|
+
total_size="?"
|
|
26
|
+
fi
|
|
27
|
+
site_files="$(find "$target_dir" -type f \( -name '*.md' -o -name '*.html' \) 2>/dev/null | wc -l | tr -d ' ')"
|
|
28
|
+
has_dockerfile=no; [[ -f "$target_dir/Dockerfile" ]] || [[ -f "$target_dir/docker/Dockerfile.prod" ]] && has_dockerfile=yes
|
|
29
|
+
has_api=no
|
|
30
|
+
if find "$target_dir" -type d \( -name 'api' -o -name 'functions' \) 2>/dev/null | grep -q .; then
|
|
31
|
+
has_api=yes
|
|
32
|
+
fi
|
|
33
|
+
has_cname=no; [[ -f "$target_dir/CNAME" ]] && has_cname=yes
|
|
34
|
+
has_workflows=no; [[ -d "$target_dir/.github/workflows" ]] && has_workflows=yes
|
|
35
|
+
|
|
36
|
+
cat <<EOF
|
|
37
|
+
size: $total_size
|
|
38
|
+
content_files: $site_files
|
|
39
|
+
has_dockerfile: $has_dockerfile
|
|
40
|
+
has_api_or_functions: $has_api
|
|
41
|
+
has_cname: $has_cname
|
|
42
|
+
has_workflows: $has_workflows
|
|
43
|
+
EOF
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
# Pure rule-based scoring. Deterministic.
|
|
47
|
+
_suggest_rule_based() {
|
|
48
|
+
local signals="$1"
|
|
49
|
+
local has_api has_dockerfile has_cname
|
|
50
|
+
has_api="$(printf '%s\n' "$signals" | grep '^has_api_or_functions:' | awk '{print $2}')"
|
|
51
|
+
has_dockerfile="$(printf '%s\n' "$signals" | grep '^has_dockerfile:' | awk '{print $2}')"
|
|
52
|
+
has_cname="$(printf '%s\n' "$signals" | grep '^has_cname:' | awk '{print $2}')"
|
|
53
|
+
|
|
54
|
+
local slug rationale
|
|
55
|
+
if [[ "$has_api" = "yes" ]]; then
|
|
56
|
+
slug="azure-swa"
|
|
57
|
+
rationale="API/functions directory present → Azure Static Web Apps integrates serverless functions out of the box."
|
|
58
|
+
elif [[ "$has_dockerfile" = "yes" ]] && [[ "$has_cname" = "yes" ]]; then
|
|
59
|
+
slug="docker-prod"
|
|
60
|
+
rationale="Existing Dockerfile + custom domain (CNAME) → self-hosted Docker gives full control."
|
|
61
|
+
else
|
|
62
|
+
slug="github-pages"
|
|
63
|
+
rationale="No API code, no custom Docker build → GitHub Pages is the simplest, cheapest target."
|
|
64
|
+
fi
|
|
65
|
+
{
|
|
66
|
+
echo "Rule-based recommendation:"
|
|
67
|
+
echo " Target: $slug"
|
|
68
|
+
echo " Rationale: $rationale"
|
|
69
|
+
} >&2
|
|
70
|
+
printf '%s\n' "$slug"
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
# AI-assisted recommendation. Returns slug on stdout.
|
|
74
|
+
_suggest_ai() {
|
|
75
|
+
local signals="$1" repo_root="$2" auto_accept="$3"
|
|
76
|
+
|
|
77
|
+
if ! ai_enabled; then
|
|
78
|
+
log_warning "AI is disabled (ZER0_NO_AI=1) — using rule-based only."
|
|
79
|
+
return 1
|
|
80
|
+
fi
|
|
81
|
+
if ! ai_require_key; then
|
|
82
|
+
return 1
|
|
83
|
+
fi
|
|
84
|
+
|
|
85
|
+
local sys_prompt_file="$repo_root/templates/ai/prompts/suggest-system.md"
|
|
86
|
+
if [[ ! -f "$sys_prompt_file" ]]; then
|
|
87
|
+
log_error "System prompt missing: $sys_prompt_file"
|
|
88
|
+
return 1
|
|
89
|
+
fi
|
|
90
|
+
local system_prompt
|
|
91
|
+
system_prompt="$(cat "$sys_prompt_file")"
|
|
92
|
+
|
|
93
|
+
local user_prompt="Site signals:
|
|
94
|
+
${signals}
|
|
95
|
+
|
|
96
|
+
Available deploy targets:
|
|
97
|
+
- github-pages: GitHub Pages with peaceiris/actions-gh-pages
|
|
98
|
+
- azure-swa: Azure Static Web Apps (supports serverless functions)
|
|
99
|
+
- docker-prod: Self-hosted Ruby builder + nginx:alpine container
|
|
100
|
+
|
|
101
|
+
Recommend exactly one target. Respond with two lines:
|
|
102
|
+
TARGET: <slug>
|
|
103
|
+
RATIONALE: <one sentence>"
|
|
104
|
+
|
|
105
|
+
local model
|
|
106
|
+
model="$(ai_default_model wizard)"
|
|
107
|
+
local in_chars=$(( ${#system_prompt} + ${#user_prompt} ))
|
|
108
|
+
{
|
|
109
|
+
log_info "About to call OpenAI:"
|
|
110
|
+
ai_estimate_cost "$model" "$in_chars" 200
|
|
111
|
+
} >&2
|
|
112
|
+
|
|
113
|
+
if [[ "$auto_accept" != "1" ]]; then
|
|
114
|
+
printf "Proceed with API call? [y/N] " >&2
|
|
115
|
+
local go
|
|
116
|
+
read -r go
|
|
117
|
+
if [[ ! "$go" =~ ^[Yy]$ ]]; then
|
|
118
|
+
log_warning "Aborted by user."
|
|
119
|
+
return 1
|
|
120
|
+
fi
|
|
121
|
+
fi
|
|
122
|
+
|
|
123
|
+
log_info "Calling $model ..." >&2
|
|
124
|
+
local resp
|
|
125
|
+
if ! resp="$(ai_call_chat "$model" "$system_prompt" "$user_prompt" 200 0.2)"; then
|
|
126
|
+
log_error "OpenAI call failed."
|
|
127
|
+
return 1
|
|
128
|
+
fi
|
|
129
|
+
|
|
130
|
+
{
|
|
131
|
+
echo "AI recommendation:"
|
|
132
|
+
printf '%s\n' "$resp" | sed 's/^/ /'
|
|
133
|
+
} >&2
|
|
134
|
+
|
|
135
|
+
# Extract slug from "TARGET: <slug>" line
|
|
136
|
+
local slug
|
|
137
|
+
slug="$(printf '%s\n' "$resp" | sed -n 's/^TARGET:[[:space:]]*\([a-z-]*\).*/\1/p' | head -1)"
|
|
138
|
+
if [[ -z "$slug" ]]; then
|
|
139
|
+
log_error "Could not parse TARGET: line from AI response."
|
|
140
|
+
return 1
|
|
141
|
+
fi
|
|
142
|
+
printf '%s\n' "$slug"
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
suggest_deploy_target() {
|
|
146
|
+
local target_dir="$1" repo_root="$2"
|
|
147
|
+
shift 2 || true
|
|
148
|
+
|
|
149
|
+
local use_ai=0 auto_accept=0
|
|
150
|
+
while [[ $# -gt 0 ]]; do
|
|
151
|
+
case "$1" in
|
|
152
|
+
--ai) use_ai=1 ;;
|
|
153
|
+
--auto-accept) auto_accept=1 ;;
|
|
154
|
+
*) log_warning "suggest_deploy_target: ignoring unknown flag: $1" ;;
|
|
155
|
+
esac
|
|
156
|
+
shift
|
|
157
|
+
done
|
|
158
|
+
|
|
159
|
+
if [[ ! -d "$target_dir" ]]; then
|
|
160
|
+
log_error "Target directory does not exist: $target_dir"
|
|
161
|
+
return 1
|
|
162
|
+
fi
|
|
163
|
+
|
|
164
|
+
log_info "Inspecting target site for deploy signals ..." >&2
|
|
165
|
+
local signals
|
|
166
|
+
signals="$(_suggest_collect_signals "$target_dir")"
|
|
167
|
+
{
|
|
168
|
+
echo "Site signals:"
|
|
169
|
+
printf '%s\n' "$signals" | sed 's/^/ /'
|
|
170
|
+
echo
|
|
171
|
+
} >&2
|
|
172
|
+
|
|
173
|
+
local slug
|
|
174
|
+
if [[ "$use_ai" = "1" ]]; then
|
|
175
|
+
if slug="$(_suggest_ai "$signals" "$repo_root" "$auto_accept")"; then
|
|
176
|
+
printf '%s\n' "$slug"
|
|
177
|
+
return 0
|
|
178
|
+
fi
|
|
179
|
+
log_warning "AI suggestion failed — falling back to rule-based."
|
|
180
|
+
fi
|
|
181
|
+
_suggest_rule_based "$signals"
|
|
182
|
+
}
|