jekyll-theme-zer0 0.22.21 → 0.22.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,233 @@
1
+ #!/usr/bin/env bash
2
+ # scripts/lib/install/ai/openai.sh
3
+ #
4
+ # Shared OpenAI client + safety primitives for Phase 5 AI features.
5
+ #
6
+ # All AI features (wizard, diagnose, suggest) source this module. It provides:
7
+ # - ai_enabled — global kill-switch check (ZER0_NO_AI=1 disables)
8
+ # - ai_require_key — verify OPENAI_API_KEY is present
9
+ # - ai_default_model — model selection (env override: ZER0_AI_MODEL)
10
+ # - ai_sanitize_text — strip secrets/PII before sending to API
11
+ # - ai_estimate_cost — print rough token + USD estimate
12
+ # - ai_call_chat — POST to /v1/chat/completions (curl, 30s timeout)
13
+ # - ai_show_diff_confirm — diff-then-confirm gate for any AI-generated file
14
+ #
15
+ # All network I/O is curl-based — no SDK dependency. All keys read from env
16
+ # only — never persisted, never logged. All calls timeout at 30s with graceful
17
+ # fallback (callers should check the return code and degrade to non-AI path).
18
+
19
+ # shellcheck disable=SC2034
20
+ AI_OPENAI_LIB_VERSION="1.0.0"
21
+
22
+ AI_OPENAI_ENDPOINT="${AI_OPENAI_ENDPOINT:-https://api.openai.com/v1/chat/completions}"
23
+ AI_OPENAI_TIMEOUT_SECS="${AI_OPENAI_TIMEOUT_SECS:-30}"
24
+
25
+ # Default model per use-case. gpt-4o-mini is cheap; gpt-4o for harder tasks.
26
+ ai_default_model() {
27
+ local kind="${1:-wizard}"
28
+ if [[ -n "${ZER0_AI_MODEL:-}" ]]; then
29
+ echo "$ZER0_AI_MODEL"
30
+ return
31
+ fi
32
+ case "$kind" in
33
+ diagnose) echo "gpt-4o" ;;
34
+ *) echo "gpt-4o-mini" ;;
35
+ esac
36
+ }
37
+
38
+ # Returns 0 if AI is enabled (no kill-switch), 1 if disabled.
39
+ ai_enabled() {
40
+ if [[ "${ZER0_NO_AI:-0}" = "1" ]]; then
41
+ return 1
42
+ fi
43
+ return 0
44
+ }
45
+
46
+ # Verify OPENAI_API_KEY is present. Prints install hint on failure.
47
+ ai_require_key() {
48
+ if [[ -z "${OPENAI_API_KEY:-}" ]]; then
49
+ log_error "OPENAI_API_KEY environment variable is not set."
50
+ log_info "Get a key at https://platform.openai.com/api-keys then:"
51
+ log_info " export OPENAI_API_KEY='sk-...'"
52
+ log_info "Or run without --ai for the rule-based fallback."
53
+ return 1
54
+ fi
55
+ return 0
56
+ }
57
+
58
+ # Strip secrets and PII from a string before sending to the API.
59
+ # Reads from stdin, writes sanitized text to stdout.
60
+ #
61
+ # Strips:
62
+ # - OPENAI_API_KEY / RUBYGEMS_API_KEY / GITHUB_TOKEN values
63
+ # - sk-... API key patterns
64
+ # - email addresses
65
+ # - $HOME absolute paths (replaced with ~)
66
+ # - 40+ char hex strings (likely tokens)
67
+ ai_sanitize_text() {
68
+ local home_esc
69
+ # Escape $HOME for sed (handle / in path)
70
+ home_esc="$(printf '%s' "${HOME:-/Users/none}" | sed 's:/:\\/:g')"
71
+
72
+ sed \
73
+ -e "s/${home_esc}/~/g" \
74
+ -e 's/sk-[A-Za-z0-9_-]\{20,\}/[REDACTED_API_KEY]/g' \
75
+ -e 's/rubygems_[A-Za-z0-9]\{40,\}/[REDACTED_RUBYGEMS_KEY]/g' \
76
+ -e 's/ghp_[A-Za-z0-9]\{20,\}/[REDACTED_GITHUB_TOKEN]/g' \
77
+ -e 's/github_pat_[A-Za-z0-9_]\{20,\}/[REDACTED_GITHUB_PAT]/g' \
78
+ -e 's/[A-Za-z0-9._%+-]\{1,\}@[A-Za-z0-9.-]\{1,\}\.[A-Za-z]\{2,\}/[REDACTED_EMAIL]/g' \
79
+ -e 's/[^A-Fa-f0-9]\([A-Fa-f0-9]\{40,\}\)[^A-Fa-f0-9]/ [REDACTED_HASH] /g'
80
+ }
81
+
82
+ # Print a rough token + cost estimate so the user knows what they're spending
83
+ # before the call goes out. Writes to stderr so caller can capture stdout.
84
+ #
85
+ # Args: <model> <input_chars> <expected_output_tokens>
86
+ ai_estimate_cost() {
87
+ local model="$1" input_chars="$2" out_tokens="${3:-500}"
88
+ # ~4 chars per token (English text avg)
89
+ local in_tokens=$(( input_chars / 4 + 1 ))
90
+ local in_cents out_cents total_cents
91
+ case "$model" in
92
+ gpt-4o-mini)
93
+ # $0.15/1M input, $0.60/1M output (approx, Nov 2024)
94
+ in_cents=$(( (in_tokens * 15 + 999999) / 1000000 ))
95
+ out_cents=$(( (out_tokens * 60 + 999999) / 1000000 ))
96
+ ;;
97
+ gpt-4o)
98
+ # $2.50/1M input, $10.00/1M output
99
+ in_cents=$(( (in_tokens * 250 + 999999) / 1000000 ))
100
+ out_cents=$(( (out_tokens * 1000 + 999999) / 1000000 ))
101
+ ;;
102
+ *)
103
+ in_cents=0; out_cents=0 ;;
104
+ esac
105
+ total_cents=$(( in_cents + out_cents ))
106
+ {
107
+ echo " Model: $model"
108
+ echo " Input: ~${in_tokens} tokens (${input_chars} chars)"
109
+ echo " Output: ~${out_tokens} tokens (cap)"
110
+ echo " Est. cost: ≤ \$0.0$(printf '%02d' $total_cents) (≤ ${total_cents}¢)"
111
+ } >&2
112
+ }
113
+
114
+ # Make a chat completion call. Returns the assistant message content on stdout.
115
+ # Non-zero return on failure (network, auth, rate-limit, timeout).
116
+ #
117
+ # Args:
118
+ # $1 = model
119
+ # $2 = system prompt (string)
120
+ # $3 = user prompt (string)
121
+ # $4 = max_tokens (default 1024)
122
+ # $5 = temperature (default 0.3)
123
+ ai_call_chat() {
124
+ local model="$1" system="$2" user="$3"
125
+ local max_tokens="${4:-1024}" temp="${5:-0.3}"
126
+
127
+ if ! ai_require_key; then
128
+ return 1
129
+ fi
130
+
131
+ # Build JSON payload via python (handles escaping safely; available on
132
+ # all macOS + most Linux distros). Falls back to a naive heredoc if
133
+ # python is missing — caller is expected to keep prompts ASCII-safe.
134
+ local payload
135
+ if command -v python3 >/dev/null 2>&1; then
136
+ payload="$(python3 - "$model" "$system" "$user" "$max_tokens" "$temp" <<'PY'
137
+ import json, sys
138
+ model, system, user, max_tokens, temp = sys.argv[1:]
139
+ print(json.dumps({
140
+ "model": model,
141
+ "messages": [
142
+ {"role": "system", "content": system},
143
+ {"role": "user", "content": user},
144
+ ],
145
+ "max_tokens": int(max_tokens),
146
+ "temperature": float(temp),
147
+ }))
148
+ PY
149
+ )"
150
+ else
151
+ log_error "python3 not found — required for safe JSON payload construction."
152
+ return 1
153
+ fi
154
+
155
+ local resp http_code tmpfile
156
+ tmpfile="$(mktemp)"
157
+ # Capture body to tmpfile, status code to stdout, so we can branch on HTTP code.
158
+ http_code="$(curl -sS -o "$tmpfile" -w '%{http_code}' \
159
+ --max-time "$AI_OPENAI_TIMEOUT_SECS" \
160
+ -X POST "$AI_OPENAI_ENDPOINT" \
161
+ -H "Authorization: Bearer $OPENAI_API_KEY" \
162
+ -H 'Content-Type: application/json' \
163
+ --data-binary "$payload" 2>/dev/null || echo "000")"
164
+
165
+ resp="$(cat "$tmpfile")"
166
+ rm -f "$tmpfile"
167
+
168
+ if [[ "$http_code" != "200" ]]; then
169
+ log_error "OpenAI API call failed (HTTP $http_code)"
170
+ # Print first ~200 chars of response (already from API, no secrets)
171
+ echo "$resp" | head -c 200 >&2
172
+ echo >&2
173
+ return 1
174
+ fi
175
+
176
+ # Extract message content
177
+ if command -v python3 >/dev/null 2>&1; then
178
+ echo "$resp" | python3 -c '
179
+ import json, sys
180
+ try:
181
+ d = json.load(sys.stdin)
182
+ print(d["choices"][0]["message"]["content"])
183
+ except Exception as e:
184
+ print(f"[parse error: {e}]", file=sys.stderr)
185
+ sys.exit(1)
186
+ '
187
+ else
188
+ # Last-resort grep extraction (fragile; warn)
189
+ log_warning "python3 unavailable — falling back to fragile JSON extraction"
190
+ echo "$resp" | sed -n 's/.*"content":"\([^"]*\)".*/\1/p' | head -1
191
+ fi
192
+ }
193
+
194
+ # Show a unified diff between an existing file and a proposed new content,
195
+ # and prompt the user to accept. Auto-accepts if --auto-accept (CI mode).
196
+ #
197
+ # Args: <existing_file_or_/dev/null> <proposed_content_string> <description> [auto_accept]
198
+ # Returns 0 if user accepts (or auto-accept), 1 if rejected.
199
+ ai_show_diff_confirm() {
200
+ local existing="$1" proposed="$2" desc="$3" auto="${4:-0}"
201
+ local tmp
202
+ tmp="$(mktemp)"
203
+ printf '%s\n' "$proposed" > "$tmp"
204
+
205
+ echo
206
+ log_info "Proposed change: $desc"
207
+ echo "─────────────────────────── diff ───────────────────────────"
208
+ if [[ -f "$existing" ]]; then
209
+ diff -u "$existing" "$tmp" || true
210
+ else
211
+ echo "(new file)"
212
+ diff -u /dev/null "$tmp" || true
213
+ fi
214
+ echo "────────────────────────────────────────────────────────────"
215
+ echo
216
+
217
+ if [[ "$auto" = "1" ]]; then
218
+ log_info "Auto-accept enabled — applying change."
219
+ echo "$tmp" # caller reads this path then deletes
220
+ return 0
221
+ fi
222
+
223
+ printf "Apply this change? [y/N] "
224
+ local reply
225
+ read -r reply
226
+ if [[ "$reply" =~ ^[Yy]$ ]]; then
227
+ echo "$tmp"
228
+ return 0
229
+ fi
230
+ rm -f "$tmp"
231
+ log_warning "Rejected by user — no changes written."
232
+ return 1
233
+ }
@@ -0,0 +1,182 @@
1
+ #!/usr/bin/env bash
2
+ # scripts/lib/install/ai/suggest.sh
3
+ #
4
+ # `install deploy --ai-suggest` — recommend a deploy target.
5
+ #
6
+ # Two modes:
7
+ # - Rule-based (default): inspects target dir for signals and picks a slug.
8
+ # - AI-assisted (--ai): sends sanitized site stats to OpenAI for rationale.
9
+ #
10
+ # Public API:
11
+ # suggest_deploy_target <target_dir> <repo_root> [--ai] [--auto-accept]
12
+ #
13
+ # Prints recommended slug to stdout (last line). All other output to stderr.
14
+
15
+ # shellcheck disable=SC2034
16
+ AI_SUGGEST_LIB_VERSION="1.0.0"
17
+
18
+ # Inspect the target dir and emit a human-readable summary on stdout.
19
+ _suggest_collect_signals() {
20
+ local target_dir="$1"
21
+ local total_size site_files has_dockerfile has_api has_cname has_workflows
22
+ if command -v du >/dev/null 2>&1; then
23
+ total_size="$(du -sh "$target_dir" 2>/dev/null | awk '{print $1}')"
24
+ else
25
+ total_size="?"
26
+ fi
27
+ site_files="$(find "$target_dir" -type f \( -name '*.md' -o -name '*.html' \) 2>/dev/null | wc -l | tr -d ' ')"
28
+ has_dockerfile=no; [[ -f "$target_dir/Dockerfile" ]] || [[ -f "$target_dir/docker/Dockerfile.prod" ]] && has_dockerfile=yes
29
+ has_api=no
30
+ if find "$target_dir" -type d \( -name 'api' -o -name 'functions' \) 2>/dev/null | grep -q .; then
31
+ has_api=yes
32
+ fi
33
+ has_cname=no; [[ -f "$target_dir/CNAME" ]] && has_cname=yes
34
+ has_workflows=no; [[ -d "$target_dir/.github/workflows" ]] && has_workflows=yes
35
+
36
+ cat <<EOF
37
+ size: $total_size
38
+ content_files: $site_files
39
+ has_dockerfile: $has_dockerfile
40
+ has_api_or_functions: $has_api
41
+ has_cname: $has_cname
42
+ has_workflows: $has_workflows
43
+ EOF
44
+ }
45
+
46
+ # Pure rule-based scoring. Deterministic.
47
+ _suggest_rule_based() {
48
+ local signals="$1"
49
+ local has_api has_dockerfile has_cname
50
+ has_api="$(printf '%s\n' "$signals" | grep '^has_api_or_functions:' | awk '{print $2}')"
51
+ has_dockerfile="$(printf '%s\n' "$signals" | grep '^has_dockerfile:' | awk '{print $2}')"
52
+ has_cname="$(printf '%s\n' "$signals" | grep '^has_cname:' | awk '{print $2}')"
53
+
54
+ local slug rationale
55
+ if [[ "$has_api" = "yes" ]]; then
56
+ slug="azure-swa"
57
+ rationale="API/functions directory present → Azure Static Web Apps integrates serverless functions out of the box."
58
+ elif [[ "$has_dockerfile" = "yes" ]] && [[ "$has_cname" = "yes" ]]; then
59
+ slug="docker-prod"
60
+ rationale="Existing Dockerfile + custom domain (CNAME) → self-hosted Docker gives full control."
61
+ else
62
+ slug="github-pages"
63
+ rationale="No API code, no custom Docker build → GitHub Pages is the simplest, cheapest target."
64
+ fi
65
+ {
66
+ echo "Rule-based recommendation:"
67
+ echo " Target: $slug"
68
+ echo " Rationale: $rationale"
69
+ } >&2
70
+ printf '%s\n' "$slug"
71
+ }
72
+
73
+ # AI-assisted recommendation. Returns slug on stdout.
74
+ _suggest_ai() {
75
+ local signals="$1" repo_root="$2" auto_accept="$3"
76
+
77
+ if ! ai_enabled; then
78
+ log_warning "AI is disabled (ZER0_NO_AI=1) — using rule-based only."
79
+ return 1
80
+ fi
81
+ if ! ai_require_key; then
82
+ return 1
83
+ fi
84
+
85
+ local sys_prompt_file="$repo_root/templates/ai/prompts/suggest-system.md"
86
+ if [[ ! -f "$sys_prompt_file" ]]; then
87
+ log_error "System prompt missing: $sys_prompt_file"
88
+ return 1
89
+ fi
90
+ local system_prompt
91
+ system_prompt="$(cat "$sys_prompt_file")"
92
+
93
+ local user_prompt="Site signals:
94
+ ${signals}
95
+
96
+ Available deploy targets:
97
+ - github-pages: GitHub Pages with peaceiris/actions-gh-pages
98
+ - azure-swa: Azure Static Web Apps (supports serverless functions)
99
+ - docker-prod: Self-hosted Ruby builder + nginx:alpine container
100
+
101
+ Recommend exactly one target. Respond with two lines:
102
+ TARGET: <slug>
103
+ RATIONALE: <one sentence>"
104
+
105
+ local model
106
+ model="$(ai_default_model wizard)"
107
+ local in_chars=$(( ${#system_prompt} + ${#user_prompt} ))
108
+ {
109
+ log_info "About to call OpenAI:"
110
+ ai_estimate_cost "$model" "$in_chars" 200
111
+ } >&2
112
+
113
+ if [[ "$auto_accept" != "1" ]]; then
114
+ printf "Proceed with API call? [y/N] " >&2
115
+ local go
116
+ read -r go
117
+ if [[ ! "$go" =~ ^[Yy]$ ]]; then
118
+ log_warning "Aborted by user."
119
+ return 1
120
+ fi
121
+ fi
122
+
123
+ log_info "Calling $model ..." >&2
124
+ local resp
125
+ if ! resp="$(ai_call_chat "$model" "$system_prompt" "$user_prompt" 200 0.2)"; then
126
+ log_error "OpenAI call failed."
127
+ return 1
128
+ fi
129
+
130
+ {
131
+ echo "AI recommendation:"
132
+ printf '%s\n' "$resp" | sed 's/^/ /'
133
+ } >&2
134
+
135
+ # Extract slug from "TARGET: <slug>" line
136
+ local slug
137
+ slug="$(printf '%s\n' "$resp" | sed -n 's/^TARGET:[[:space:]]*\([a-z-]*\).*/\1/p' | head -1)"
138
+ if [[ -z "$slug" ]]; then
139
+ log_error "Could not parse TARGET: line from AI response."
140
+ return 1
141
+ fi
142
+ printf '%s\n' "$slug"
143
+ }
144
+
145
+ suggest_deploy_target() {
146
+ local target_dir="$1" repo_root="$2"
147
+ shift 2 || true
148
+
149
+ local use_ai=0 auto_accept=0
150
+ while [[ $# -gt 0 ]]; do
151
+ case "$1" in
152
+ --ai) use_ai=1 ;;
153
+ --auto-accept) auto_accept=1 ;;
154
+ *) log_warning "suggest_deploy_target: ignoring unknown flag: $1" ;;
155
+ esac
156
+ shift
157
+ done
158
+
159
+ if [[ ! -d "$target_dir" ]]; then
160
+ log_error "Target directory does not exist: $target_dir"
161
+ return 1
162
+ fi
163
+
164
+ log_info "Inspecting target site for deploy signals ..." >&2
165
+ local signals
166
+ signals="$(_suggest_collect_signals "$target_dir")"
167
+ {
168
+ echo "Site signals:"
169
+ printf '%s\n' "$signals" | sed 's/^/ /'
170
+ echo
171
+ } >&2
172
+
173
+ local slug
174
+ if [[ "$use_ai" = "1" ]]; then
175
+ if slug="$(_suggest_ai "$signals" "$repo_root" "$auto_accept")"; then
176
+ printf '%s\n' "$slug"
177
+ return 0
178
+ fi
179
+ log_warning "AI suggestion failed — falling back to rule-based."
180
+ fi
181
+ _suggest_rule_based "$signals"
182
+ }
@@ -0,0 +1,160 @@
1
+ #!/usr/bin/env bash
2
+ # scripts/lib/install/ai/wizard.sh
3
+ #
4
+ # `install wizard --ai` — opt-in OpenAI-powered config generation.
5
+ #
6
+ # Flow:
7
+ # 1. ai_enabled() check (ZER0_NO_AI kill-switch)
8
+ # 2. ai_require_key() check (OPENAI_API_KEY)
9
+ # 3. Prompt user for: site description, target audience, deploy preference
10
+ # 4. Read system prompt from templates/ai/prompts/wizard-system.md
11
+ # 5. ai_estimate_cost() → user sees price estimate
12
+ # 6. ai_call_chat() → returns JSON: {title, description, tagline, navigation,
13
+ # welcome_post_outline, suggested_deploy_target}
14
+ # 7. ai_show_diff_confirm() per generated file (_config.yml, navigation,
15
+ # welcome post)
16
+ # 8. On any failure → fall back to non-AI wizard (delegated to caller).
17
+ #
18
+ # Public API:
19
+ # wizard_ai_run <target_dir> <repo_root> [--auto-accept]
20
+ #
21
+ # Returns 0 on success, 1 on failure (caller should fall back).
22
+
23
+ # shellcheck disable=SC2034
24
+ AI_WIZARD_LIB_VERSION="1.0.0"
25
+
26
+ wizard_ai_run() {
27
+ local target_dir="$1" repo_root="$2"
28
+ shift 2 || true
29
+
30
+ local auto_accept=0
31
+ while [[ $# -gt 0 ]]; do
32
+ case "$1" in
33
+ --auto-accept) auto_accept=1 ;;
34
+ *) log_warning "wizard_ai_run: ignoring unknown flag: $1" ;;
35
+ esac
36
+ shift
37
+ done
38
+
39
+ if ! ai_enabled; then
40
+ log_warning "AI is disabled (ZER0_NO_AI=1) — cannot run --ai wizard."
41
+ return 1
42
+ fi
43
+ if ! ai_require_key; then
44
+ return 1
45
+ fi
46
+
47
+ # 1. Gather user inputs
48
+ log_info "AI Wizard — describe your site in a sentence or two."
49
+ log_info "Examples: 'Personal blog about Rust performance' or 'Docs site for an open-source CLI'."
50
+ printf "Site description: "
51
+ local site_desc audience deploy_pref
52
+ read -r site_desc
53
+ if [[ -z "$site_desc" ]]; then
54
+ log_error "Empty description — aborting AI wizard."
55
+ return 1
56
+ fi
57
+ printf "Target audience (e.g., 'developers', 'data scientists', 'general'): "
58
+ read -r audience
59
+ [[ -z "$audience" ]] && audience="developers"
60
+ printf "Deploy preference (github-pages | azure-swa | docker-prod | unsure): "
61
+ read -r deploy_pref
62
+ [[ -z "$deploy_pref" ]] && deploy_pref="unsure"
63
+
64
+ # 2. Load system prompt
65
+ local sys_prompt_file="$repo_root/templates/ai/prompts/wizard-system.md"
66
+ if [[ ! -f "$sys_prompt_file" ]]; then
67
+ log_error "System prompt missing: $sys_prompt_file"
68
+ return 1
69
+ fi
70
+ local system_prompt
71
+ system_prompt="$(cat "$sys_prompt_file")"
72
+
73
+ # 3. Build user prompt
74
+ local user_prompt
75
+ user_prompt="Site description: ${site_desc}
76
+ Target audience: ${audience}
77
+ Deploy preference: ${deploy_pref}
78
+
79
+ Return ONLY a JSON object with keys: title, description, tagline, suggested_deploy_target, navigation (array of {label, url}), welcome_post_outline (string of 3-5 bullet points)."
80
+
81
+ # Sanitize (paranoid — user input could contain pasted secrets)
82
+ user_prompt="$(printf '%s' "$user_prompt" | ai_sanitize_text)"
83
+
84
+ # 4. Cost estimate + confirm
85
+ local model
86
+ model="$(ai_default_model wizard)"
87
+ local in_chars=$(( ${#system_prompt} + ${#user_prompt} ))
88
+ log_info "About to call OpenAI:"
89
+ ai_estimate_cost "$model" "$in_chars" 800
90
+ if [[ "$auto_accept" != "1" ]]; then
91
+ printf "Proceed with API call? [y/N] "
92
+ local go
93
+ read -r go
94
+ if [[ ! "$go" =~ ^[Yy]$ ]]; then
95
+ log_warning "Aborted by user."
96
+ return 1
97
+ fi
98
+ fi
99
+
100
+ # 5. Call API
101
+ log_info "Calling $model ..."
102
+ local raw
103
+ if ! raw="$(ai_call_chat "$model" "$system_prompt" "$user_prompt" 1024 0.4)"; then
104
+ log_error "OpenAI call failed."
105
+ return 1
106
+ fi
107
+
108
+ # 6. Parse JSON (strip code-fence if present)
109
+ local json
110
+ json="$(printf '%s' "$raw" | sed -E '/^```(json)?$/d')"
111
+ if ! command -v python3 >/dev/null 2>&1; then
112
+ log_error "python3 required to parse wizard response."
113
+ return 1
114
+ fi
115
+
116
+ # Extract fields safely
117
+ local title description tagline suggested_deploy
118
+ title="$(printf '%s' "$json" | python3 -c 'import json,sys;d=json.load(sys.stdin);print(d.get("title",""))' 2>/dev/null || echo "")"
119
+ description="$(printf '%s' "$json" | python3 -c 'import json,sys;d=json.load(sys.stdin);print(d.get("description",""))' 2>/dev/null || echo "")"
120
+ tagline="$(printf '%s' "$json" | python3 -c 'import json,sys;d=json.load(sys.stdin);print(d.get("tagline",""))' 2>/dev/null || echo "")"
121
+ suggested_deploy="$(printf '%s' "$json" | python3 -c 'import json,sys;d=json.load(sys.stdin);print(d.get("suggested_deploy_target",""))' 2>/dev/null || echo "")"
122
+
123
+ if [[ -z "$title" ]]; then
124
+ log_error "Failed to parse AI response (missing 'title' field)."
125
+ log_info "Raw response: $raw"
126
+ return 1
127
+ fi
128
+
129
+ # 7. Build proposed _config.yml fragment + diff
130
+ local cfg_file="$target_dir/_config.yml"
131
+ local proposed
132
+ proposed="$(cat <<EOF
133
+ # Generated by install wizard --ai
134
+ title: "$title"
135
+ description: "$description"
136
+ tagline: "$tagline"
137
+ EOF
138
+ )"
139
+ log_info "AI suggests deploy target: ${suggested_deploy:-(none)}"
140
+ if [[ -f "$cfg_file" ]]; then
141
+ log_warning "_config.yml already exists. The AI suggestions above won't be merged automatically."
142
+ log_info "Recommended: cp the values you like into _config.yml manually."
143
+ else
144
+ local accepted_path
145
+ if accepted_path="$(ai_show_diff_confirm "$cfg_file" "$proposed" "Create _config.yml" "$auto_accept")"; then
146
+ mv "$accepted_path" "$cfg_file"
147
+ log_success "Wrote $cfg_file"
148
+ fi
149
+ fi
150
+
151
+ # 8. Print full JSON for user reference (so welcome post outline + nav
152
+ # aren't lost). User can manually convert to files.
153
+ echo
154
+ log_info "Full AI response (use these to flesh out navigation + a welcome post):"
155
+ echo "─────────────────────────────────────────────────────────────"
156
+ printf '%s\n' "$json"
157
+ echo "─────────────────────────────────────────────────────────────"
158
+
159
+ return 0
160
+ }
@@ -0,0 +1,56 @@
1
+ #!/bin/bash
2
+ # =========================================================================
3
+ # scripts/lib/install/config.sh
4
+ # =========================================================================
5
+ # Configuration loader for install.sh.
6
+ #
7
+ # load_install_config <SCRIPT_DIR> [<SOURCE_DIR>]
8
+ # Searches for templates/config/install.conf in either of the supplied
9
+ # directories. On success: sources it, exports TEMPLATES_DIR, returns 0.
10
+ # On failure: applies hard-coded fallback defaults and returns 1.
11
+ #
12
+ # This function is identical in behaviour to the previous private
13
+ # `_load_install_config` inside install.sh; the only change is location +
14
+ # accepting the search roots as parameters (so the function is testable
15
+ # without globals).
16
+ # =========================================================================
17
+
18
+ load_install_config() {
19
+ local script_dir="${1:-${SCRIPT_DIR:-$(pwd)}}"
20
+ local source_dir="${2:-${SOURCE_DIR:-$script_dir}}"
21
+
22
+ local config_paths=(
23
+ "$script_dir/templates/config/install.conf"
24
+ "$source_dir/templates/config/install.conf"
25
+ )
26
+
27
+ local config_path
28
+ for config_path in "${config_paths[@]}"; do
29
+ if [[ -f "$config_path" ]]; then
30
+ # shellcheck source=/dev/null
31
+ source "$config_path"
32
+ export TEMPLATES_DIR="$(dirname "$(dirname "$config_path")")"
33
+ return 0
34
+ fi
35
+ done
36
+
37
+ # Fallback defaults when templates not available (remote install
38
+ # without bundled templates/, or stripped distribution).
39
+ export THEME_NAME="${THEME_NAME:-zer0-mistakes}"
40
+ export THEME_GEM_NAME="${THEME_GEM_NAME:-jekyll-theme-zer0}"
41
+ export THEME_DISPLAY_NAME="${THEME_DISPLAY_NAME:-Zer0-Mistakes Jekyll Theme}"
42
+ export GITHUB_USER="${GITHUB_USER:-bamr87}"
43
+ export GITHUB_REPO="${GITHUB_REPO:-bamr87/zer0-mistakes}"
44
+ export GITHUB_URL="${GITHUB_URL:-https://github.com/bamr87/zer0-mistakes}"
45
+ export GITHUB_RAW_URL="${GITHUB_RAW_URL:-https://raw.githubusercontent.com/bamr87/zer0-mistakes/main}"
46
+ export DEFAULT_PORT="${DEFAULT_PORT:-4000}"
47
+ export DEFAULT_URL="${DEFAULT_URL:-http://localhost:4000}"
48
+ export JEKYLL_VERSION="${JEKYLL_VERSION:-~> 4.3}"
49
+ export FFI_VERSION="${FFI_VERSION:-~> 1.17.0}"
50
+ export WEBRICK_VERSION="${WEBRICK_VERSION:-~> 1.7}"
51
+ export COMMONMARKER_VERSION="${COMMONMARKER_VERSION:-0.23.10}"
52
+ export GITHUB_PAGES_MAX_VERSION="${GITHUB_PAGES_MAX_VERSION:-232}"
53
+ export COMMONMARKER_MACOS_VERSION="${COMMONMARKER_MACOS_VERSION:-~> 0.23}"
54
+ export RUBY_MIN_VERSION_MACOS="${RUBY_MIN_VERSION_MACOS:-2.6.0}"
55
+ return 1
56
+ }
@@ -0,0 +1,52 @@
1
+ # scripts/lib/install/deploy/
2
+
3
+ Pluggable deploy-target modules consumed by `scripts/bin/install deploy`
4
+ (Phase 4 of the installer refactor). Each module configures one target;
5
+ the registry coordinates discovery, dispatch, and verification.
6
+
7
+ ## Files
8
+
9
+ | File | Role |
10
+ | ----------------- | ------------------------------------------------------------------- |
11
+ | `registry.sh` | Module discovery, dispatch, shared `deploy_render` / `deploy_copy`. |
12
+ | `github-pages.sh` | Actions workflow that publishes `_site/` to `gh-pages`. |
13
+ | `azure-swa.sh` | Azure SWA workflow + `staticwebapp.config.json`. |
14
+ | `docker-prod.sh` | Multi-stage Docker build + production compose + nginx config. |
15
+
16
+ ## Module contract
17
+
18
+ Every module must define:
19
+
20
+ | Symbol | Purpose |
21
+ | --------------------------------------- | -------------------------------------------------------- |
22
+ | `DEPLOY_<SLUG_UPPER>_TITLE` | One-line display name shown by `install list-targets`. |
23
+ | `DEPLOY_<SLUG_UPPER>_SUMMARY` | One-line description shown by `install list-targets`. |
24
+ | `deploy_<slug>_check_prereqs <dir>` | Print warnings; return non-zero only on hard blockers. |
25
+ | `deploy_<slug>_install <dir>` | Idempotent file install (uses `deploy_render_if_absent`).|
26
+ | `deploy_<slug>_verify <dir>` | Confirm expected files exist + look correct. |
27
+ | `deploy_<slug>_doc_url` | Print the canonical upstream documentation URL. |
28
+
29
+ Modules use the lightweight `deploy_render` placeholder set
30
+ (`{{RUBY_VERSION}}`, `{{DEFAULT_BRANCH}}`, `{{GITHUB_USER}}`,
31
+ `{{SITE_NAME}}`) so they can run without the full install.sh global
32
+ environment.
33
+
34
+ ## Adding a target
35
+
36
+ 1. Add `templates/deploy/<slug>/` with the assets (workflow YAML,
37
+ Dockerfile, README, etc.). Use `*.template` for files that need
38
+ variable substitution.
39
+ 2. Create `scripts/lib/install/deploy/<slug>.sh` exporting the four
40
+ hooks above.
41
+ 3. Add `<slug>` to `DEPLOY_TARGETS_LIST` in `registry.sh` (alphabetical).
42
+ 4. Optionally reference `<slug>` under `deploy_targets:` in
43
+ `templates/profiles/*.yml` so the profile can suggest it.
44
+ 5. Update `templates/deploy/README.md` with the new target row.
45
+
46
+ ## CLI integration
47
+
48
+ ```bash
49
+ ./scripts/bin/install list-targets
50
+ ./scripts/bin/install deploy github-pages /path/to/site
51
+ ./scripts/bin/install deploy azure-swa,docker-prod /path/to/site
52
+ ```