@meshxdata/fops 0.1.44 → 0.1.46
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +183 -0
- package/package.json +1 -1
- package/src/commands/lifecycle.js +101 -5
- package/src/commands/setup.js +45 -4
- package/src/plugins/bundled/fops-plugin-azure/index.js +29 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-core.js +1185 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-flux.js +1180 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-ingress.js +393 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-naming.js +104 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-network.js +296 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-postgres.js +768 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-reconcilers.js +538 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-secrets.js +849 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-stacks.js +643 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-state.js +145 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-storage.js +496 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-terraform.js +1032 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks.js +155 -4245
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-keyvault.js +186 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-ops.js +29 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-results.js +78 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure.js +1 -1
- package/src/plugins/bundled/fops-plugin-azure/lib/commands/infra-cmds.js +758 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/commands/registry-cmds.js +250 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/commands/test-cmds.js +52 -1
- package/src/plugins/bundled/fops-plugin-azure/lib/commands/vm-cmds.js +10 -0
- package/src/plugins/bundled/fops-plugin-foundation/lib/apply.js +3 -2
- package/src/plugins/bundled/fops-plugin-foundation/lib/helpers.js +21 -0
- package/src/plugins/bundled/fops-plugin-foundation/lib/tools-read.js +3 -5
- package/src/ui/tui/App.js +13 -13
- package/src/web/dist/assets/index-NXC8Hvnp.css +1 -0
- package/src/web/dist/assets/index-QH1N4ejK.js +112 -0
- package/src/web/dist/index.html +2 -2
- package/src/web/server.js +4 -4
- package/src/web/dist/assets/index-BphVaAUd.css +0 -1
- package/src/web/dist/assets/index-CSckLzuG.js +0 -129
|
@@ -0,0 +1,1180 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* azure-aks-flux.js - Flux provisioning, bootstrap, and commands
|
|
3
|
+
*
|
|
4
|
+
* Depends on: azure-aks-naming.js, azure-aks-state.js
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import fs from "node:fs";
|
|
8
|
+
import os from "node:os";
|
|
9
|
+
import path from "node:path";
|
|
10
|
+
import { fileURLToPath } from "node:url";
|
|
11
|
+
import {
|
|
12
|
+
OK, WARN, ERR, DIM, LABEL,
|
|
13
|
+
banner, hint, kvLine, subArgs,
|
|
14
|
+
lazyExeca, ensureAzCli, ensureAzAuth,
|
|
15
|
+
resolveGithubToken, readState,
|
|
16
|
+
} from "./azure.js";
|
|
17
|
+
import { AKS_DEFAULTS } from "./azure-aks-naming.js";
|
|
18
|
+
import {
|
|
19
|
+
readClusterState, writeClusterState, resolveFluxConfig, requireCluster,
|
|
20
|
+
} from "./azure-aks-state.js";
|
|
21
|
+
|
|
22
|
+
// ── Template defaults ─────────────────────────────────────────────────────────
|
|
23
|
+
|
|
24
|
+
export const TEMPLATE_DEFAULTS = {
|
|
25
|
+
templateOwner: "meshxdata",
|
|
26
|
+
templateRepo: "platform-flux-template",
|
|
27
|
+
templateBranch: "main",
|
|
28
|
+
templatePath: "clusters/_template",
|
|
29
|
+
};
|
|
30
|
+
|
|
31
|
+
// Kustomizations that fops manages directly — suspend them so Flux doesn't revert patches.
|
|
32
|
+
export const FOPS_MANAGED_KUSTOMIZATIONS = [
|
|
33
|
+
"foundation-backend",
|
|
34
|
+
"foundation-processor",
|
|
35
|
+
"foundation-scheduler",
|
|
36
|
+
"foundation-watcher",
|
|
37
|
+
"foundation-storage-engine",
|
|
38
|
+
"istio-controlplane",
|
|
39
|
+
];
|
|
40
|
+
|
|
41
|
+
export const DAI_KUSTOMIZATIONS = [
|
|
42
|
+
"dai-backend",
|
|
43
|
+
"dai-trino",
|
|
44
|
+
];
|
|
45
|
+
|
|
46
|
+
// ── CLI helpers ───────────────────────────────────────────────────────────────
|
|
47
|
+
|
|
48
|
+
export async function ensureFluxCli(execa) {
|
|
49
|
+
try {
|
|
50
|
+
await execa("flux", ["--version"], { timeout: 10000 });
|
|
51
|
+
} catch {
|
|
52
|
+
console.error(ERR("\n Flux CLI is not installed."));
|
|
53
|
+
hint("Install: brew install fluxcd/tap/flux");
|
|
54
|
+
hint("Or: curl -s https://fluxcd.io/install.sh | sudo bash\n");
|
|
55
|
+
process.exit(1);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
// ── Template submodule resolver ───────────────────────────────────────────────
|
|
60
|
+
|
|
61
|
+
function resolveTemplateSubmodule() {
|
|
62
|
+
const thisDir = path.dirname(fileURLToPath(import.meta.url));
|
|
63
|
+
const fromSource = path.resolve(thisDir, "../../../../../../platform-flux-template");
|
|
64
|
+
if (fs.existsSync(path.join(fromSource, "clusters/_template"))) {
|
|
65
|
+
return fromSource;
|
|
66
|
+
}
|
|
67
|
+
return null;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// ── Clone or update repo ──────────────────────────────────────────────────────
|
|
71
|
+
|
|
72
|
+
export async function cloneOrUpdateRepo(execa, { repo, owner, branch, githubToken, cacheKey }) {
|
|
73
|
+
const cacheDir = path.join(os.homedir(), ".fops", "repos", cacheKey);
|
|
74
|
+
const repoUrl = githubToken
|
|
75
|
+
? `https://x-access-token:${githubToken}@github.com/${owner}/${repo}.git`
|
|
76
|
+
: `https://github.com/${owner}/${repo}.git`;
|
|
77
|
+
|
|
78
|
+
if (fs.existsSync(path.join(cacheDir, ".git"))) {
|
|
79
|
+
hint(`Updating cached ${owner}/${repo}…`);
|
|
80
|
+
await execa("git", ["fetch", "origin", branch || "main"], { cwd: cacheDir, timeout: 60000 });
|
|
81
|
+
await execa("git", ["reset", "--hard", `origin/${branch || "main"}`], { cwd: cacheDir, timeout: 30000 });
|
|
82
|
+
} else {
|
|
83
|
+
hint(`Cloning ${owner}/${repo}…`);
|
|
84
|
+
fs.mkdirSync(cacheDir, { recursive: true });
|
|
85
|
+
await execa("git", ["clone", "--depth", "1", "--branch", branch || "main", repoUrl, cacheDir], { timeout: 120000 });
|
|
86
|
+
}
|
|
87
|
+
return cacheDir;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// ── Template rendering ────────────────────────────────────────────────────────
|
|
91
|
+
|
|
92
|
+
export function renderTemplate(templateDir, outputDir, vars) {
|
|
93
|
+
const varMap = {
|
|
94
|
+
"{{CLUSTER_NAME}}": vars.clusterName,
|
|
95
|
+
"{{OVERLAY}}": vars.overlay || `${vars.clusterName}-azure`,
|
|
96
|
+
"{{ENVIRONMENT}}": vars.environment || "demo",
|
|
97
|
+
"{{CLOUD}}": "azure",
|
|
98
|
+
"{{REGION}}": vars.region || "uaenorth",
|
|
99
|
+
"{{AZURE_KEYVAULT_URL}}": vars.keyvaultUrl || "",
|
|
100
|
+
"{{AZURE_IDENTITY_ID}}": vars.azureIdentityId || "",
|
|
101
|
+
"{{AZURE_TENANT_ID}}": vars.azureTenantId || "",
|
|
102
|
+
"{{ACR_USERNAME}}": vars.acrUsername || "",
|
|
103
|
+
"{{ACR_PASSWORD}}": vars.acrPassword || "",
|
|
104
|
+
"{{TAILSCALE_CLIENT_ID}}": vars.tailscaleClientId || "",
|
|
105
|
+
"{{TAILSCALE_CLIENT_SECRET}}": vars.tailscaleClientSecret || "",
|
|
106
|
+
"{{TAILSCALE_HOSTNAME}}": vars.tailscaleHostname || vars.clusterName,
|
|
107
|
+
"{{VNET_CIDR}}": vars.vnetCidr || "10.50.0.0/16",
|
|
108
|
+
"{{PROMETHEUS_REMOTE_WRITE_URL}}": vars.prometheusRemoteWriteUrl || "",
|
|
109
|
+
"{{DOMAIN}}": vars.domain || "",
|
|
110
|
+
"{{NAMESPACE}}": vars.namespace || "foundation",
|
|
111
|
+
"{{POSTGRES_HOST}}": vars.postgresHost || "",
|
|
112
|
+
"{{FLUX_PATH}}": vars.fluxPath || `clusters/${vars.clusterName}`,
|
|
113
|
+
"{{AUTH0_DOMAIN}}": vars.auth0Domain || "meshx.eu.auth0.com",
|
|
114
|
+
};
|
|
115
|
+
|
|
116
|
+
function applyVars(content) {
|
|
117
|
+
let out = content;
|
|
118
|
+
for (const [k, v] of Object.entries(varMap)) {
|
|
119
|
+
out = out.replaceAll(k, v);
|
|
120
|
+
}
|
|
121
|
+
return out;
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
function copyDir(src, dest) {
|
|
125
|
+
fs.mkdirSync(dest, { recursive: true });
|
|
126
|
+
for (const entry of fs.readdirSync(src, { withFileTypes: true })) {
|
|
127
|
+
const srcPath = path.join(src, entry.name);
|
|
128
|
+
const destPath = path.join(dest, entry.name);
|
|
129
|
+
if (entry.isDirectory()) {
|
|
130
|
+
copyDir(srcPath, destPath);
|
|
131
|
+
} else {
|
|
132
|
+
const content = fs.readFileSync(srcPath, "utf8");
|
|
133
|
+
fs.writeFileSync(destPath, applyVars(content));
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
copyDir(templateDir, outputDir);
|
|
139
|
+
return varMap;
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// ── Recursive copy with variable substitution ─────────────────────────────────
|
|
143
|
+
|
|
144
|
+
export function copyAndRenderTemplateDir(srcDir, destDir, vars) {
|
|
145
|
+
fs.mkdirSync(destDir, { recursive: true });
|
|
146
|
+
for (const entry of fs.readdirSync(srcDir, { withFileTypes: true })) {
|
|
147
|
+
const srcPath = path.join(srcDir, entry.name);
|
|
148
|
+
const destPath = path.join(destDir, entry.name);
|
|
149
|
+
if (entry.isDirectory()) {
|
|
150
|
+
copyAndRenderTemplateDir(srcPath, destPath, vars);
|
|
151
|
+
} else if (entry.isFile()) {
|
|
152
|
+
let content = fs.readFileSync(srcPath, "utf8");
|
|
153
|
+
for (const [key, val] of Object.entries(vars)) {
|
|
154
|
+
content = content.replace(new RegExp(`\\{\\{${key}\\}\\}`, "g"), val || "");
|
|
155
|
+
}
|
|
156
|
+
fs.writeFileSync(destPath, content);
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// ── Commit cluster to Flux repo ───────────────────────────────────────────────
|
|
162
|
+
|
|
163
|
+
export async function commitClusterToFlux(execa, { fluxDir, clusterName, clusterDir, overlayName, dryRun }) {
|
|
164
|
+
const targetDir = path.join(fluxDir, "clusters", clusterName);
|
|
165
|
+
|
|
166
|
+
if (fs.existsSync(targetDir)) {
|
|
167
|
+
hint(`Cluster directory exists in flux repo — updating…`);
|
|
168
|
+
fs.rmSync(targetDir, { recursive: true, force: true });
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
fs.cpSync(clusterDir, targetDir, { recursive: true });
|
|
172
|
+
|
|
173
|
+
await execa("git", ["add", `clusters/${clusterName}`], { cwd: fluxDir, timeout: 30000 });
|
|
174
|
+
|
|
175
|
+
if (overlayName) {
|
|
176
|
+
const appsDir = path.join(fluxDir, "apps");
|
|
177
|
+
if (fs.existsSync(appsDir)) {
|
|
178
|
+
await execa("git", ["add", "apps/"], { cwd: fluxDir, timeout: 30000 });
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
const { stdout: status } = await execa("git", ["status", "--porcelain"], { cwd: fluxDir, timeout: 10000 });
|
|
183
|
+
if (!status.trim()) {
|
|
184
|
+
hint("No changes to commit (manifests already up to date)");
|
|
185
|
+
return false;
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
if (dryRun) {
|
|
189
|
+
console.log(OK(` ✓ Would commit cluster manifests (dry-run)`));
|
|
190
|
+
return false;
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
await execa("git", ["commit", "-m", `Add cluster ${clusterName}\n\nAuto-generated by fops azure aks up`], { cwd: fluxDir, timeout: 30000 });
|
|
194
|
+
console.log(OK(` ✓ Committed cluster manifests`));
|
|
195
|
+
|
|
196
|
+
hint("Pushing to flux repo…");
|
|
197
|
+
await execa("git", ["push", "origin", "main"], { cwd: fluxDir, timeout: 60000 });
|
|
198
|
+
console.log(OK(` ✓ Pushed to flux repo`));
|
|
199
|
+
return true;
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// ── Full template-based Flux provisioning ─────────────────────────────────────
|
|
203
|
+
|
|
204
|
+
export async function provisionFluxFromTemplate(execa, opts) {
|
|
205
|
+
const {
|
|
206
|
+
clusterName,
|
|
207
|
+
region,
|
|
208
|
+
domain,
|
|
209
|
+
keyvaultUrl,
|
|
210
|
+
environment,
|
|
211
|
+
azureIdentityId,
|
|
212
|
+
azureTenantId,
|
|
213
|
+
acrUsername,
|
|
214
|
+
acrPassword,
|
|
215
|
+
tailscaleClientId,
|
|
216
|
+
tailscaleClientSecret,
|
|
217
|
+
vnetCidr,
|
|
218
|
+
prometheusRemoteWriteUrl,
|
|
219
|
+
auth0Domain,
|
|
220
|
+
githubToken,
|
|
221
|
+
fluxRepo,
|
|
222
|
+
fluxOwner,
|
|
223
|
+
fluxBranch,
|
|
224
|
+
templateRepo = TEMPLATE_DEFAULTS.templateRepo,
|
|
225
|
+
templateOwner = TEMPLATE_DEFAULTS.templateOwner,
|
|
226
|
+
templateBranch = TEMPLATE_DEFAULTS.templateBranch,
|
|
227
|
+
templatePath = TEMPLATE_DEFAULTS.templatePath,
|
|
228
|
+
dryRun = false,
|
|
229
|
+
noCommit = false,
|
|
230
|
+
} = opts;
|
|
231
|
+
|
|
232
|
+
banner("Flux Template Provisioning");
|
|
233
|
+
kvLine("Cluster", clusterName);
|
|
234
|
+
kvLine("Target", DIM(`${fluxOwner}/${fluxRepo}`));
|
|
235
|
+
|
|
236
|
+
let templateDir;
|
|
237
|
+
const submodulePath = resolveTemplateSubmodule();
|
|
238
|
+
if (submodulePath) {
|
|
239
|
+
templateDir = submodulePath;
|
|
240
|
+
kvLine("Template", DIM(`submodule (${path.basename(submodulePath)})`));
|
|
241
|
+
} else {
|
|
242
|
+
kvLine("Template", DIM(`${templateOwner}/${templateRepo}`));
|
|
243
|
+
templateDir = await cloneOrUpdateRepo(execa, {
|
|
244
|
+
repo: templateRepo,
|
|
245
|
+
owner: templateOwner,
|
|
246
|
+
branch: templateBranch,
|
|
247
|
+
githubToken,
|
|
248
|
+
cacheKey: `${templateOwner}-${templateRepo}`,
|
|
249
|
+
});
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
const templateSourceDir = path.join(templateDir, templatePath);
|
|
253
|
+
if (!fs.existsSync(templateSourceDir)) {
|
|
254
|
+
console.log(WARN(`\n ⚠ Template directory not found: ${templatePath}`));
|
|
255
|
+
hint("Falling back to legacy Flux bootstrap (no template rendering)");
|
|
256
|
+
return false;
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
const renderedDir = path.join(os.tmpdir(), `flux-${clusterName}-${Date.now()}`);
|
|
260
|
+
hint("Rendering template…");
|
|
261
|
+
const varsUsed = renderTemplate(templateSourceDir, renderedDir, {
|
|
262
|
+
clusterName,
|
|
263
|
+
overlay: `${clusterName}-azure`,
|
|
264
|
+
region,
|
|
265
|
+
domain,
|
|
266
|
+
keyvaultUrl,
|
|
267
|
+
environment,
|
|
268
|
+
azureIdentityId,
|
|
269
|
+
azureTenantId,
|
|
270
|
+
acrUsername,
|
|
271
|
+
acrPassword,
|
|
272
|
+
tailscaleClientId,
|
|
273
|
+
tailscaleClientSecret,
|
|
274
|
+
tailscaleHostname: clusterName,
|
|
275
|
+
vnetCidr,
|
|
276
|
+
prometheusRemoteWriteUrl,
|
|
277
|
+
auth0Domain,
|
|
278
|
+
fluxPath: `clusters/${clusterName}`,
|
|
279
|
+
});
|
|
280
|
+
console.log(OK(` ✓ Template rendered to ${renderedDir}`));
|
|
281
|
+
|
|
282
|
+
if (noCommit) {
|
|
283
|
+
console.log(OK(`\n ✓ Template rendered (--no-commit). Output: ${renderedDir}`));
|
|
284
|
+
return { renderedDir, varsUsed, committed: false };
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
const fluxDir = await cloneOrUpdateRepo(execa, {
|
|
288
|
+
repo: fluxRepo,
|
|
289
|
+
owner: fluxOwner,
|
|
290
|
+
branch: fluxBranch,
|
|
291
|
+
githubToken,
|
|
292
|
+
cacheKey: `${fluxOwner}-${fluxRepo}`,
|
|
293
|
+
});
|
|
294
|
+
|
|
295
|
+
const overlayName = `${clusterName}-azure`;
|
|
296
|
+
const overlayVars = {
|
|
297
|
+
CLUSTER_NAME: clusterName,
|
|
298
|
+
CLUSTER_DOMAIN: `${clusterName}.meshx.app`,
|
|
299
|
+
POSTGRES_HOST: `fops-${clusterName}-psql.postgres.database.azure.com`,
|
|
300
|
+
STORAGE_ACCOUNT: `fops${clusterName.replace(/-/g, "")}`,
|
|
301
|
+
NATS_PREFIX: `meshx-${clusterName}`,
|
|
302
|
+
OVERLAY_NAME: overlayName,
|
|
303
|
+
HELM_REPO_NAMESPACE: "flux-system",
|
|
304
|
+
AUTH0_DOMAIN: auth0Domain || "meshx.eu.auth0.com",
|
|
305
|
+
};
|
|
306
|
+
|
|
307
|
+
const appsDir = path.join(fluxDir, "apps", "foundation");
|
|
308
|
+
if (fs.existsSync(appsDir)) {
|
|
309
|
+
let createdCount = 0;
|
|
310
|
+
for (const app of fs.readdirSync(appsDir)) {
|
|
311
|
+
const appDir = path.join(appsDir, app);
|
|
312
|
+
if (!fs.statSync(appDir).isDirectory()) continue;
|
|
313
|
+
const templateOverlay = path.join(appDir, "overlays", "_template");
|
|
314
|
+
const clusterOverlay = path.join(appDir, "overlays", overlayName);
|
|
315
|
+
|
|
316
|
+
if (fs.existsSync(templateOverlay)) {
|
|
317
|
+
if (fs.existsSync(clusterOverlay)) {
|
|
318
|
+
fs.rmSync(clusterOverlay, { recursive: true, force: true });
|
|
319
|
+
}
|
|
320
|
+
copyAndRenderTemplateDir(templateOverlay, clusterOverlay, overlayVars);
|
|
321
|
+
createdCount++;
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
if (createdCount > 0) {
|
|
325
|
+
hint(` Created ${createdCount} overlays: ${overlayName}`);
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
const committed = await commitClusterToFlux(execa, {
|
|
330
|
+
fluxDir,
|
|
331
|
+
clusterName,
|
|
332
|
+
clusterDir: renderedDir,
|
|
333
|
+
overlayName,
|
|
334
|
+
dryRun,
|
|
335
|
+
});
|
|
336
|
+
|
|
337
|
+
try { fs.rmSync(renderedDir, { recursive: true, force: true }); } catch {}
|
|
338
|
+
|
|
339
|
+
return { renderedDir, varsUsed, committed, fluxDir };
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
// ── Bootstrap Flux (Azure GitOps extension) ───────────────────────────────────
|
|
343
|
+
|
|
344
|
+
export async function bootstrapFlux(execa, { clusterName, rg, sub, githubToken, repo, owner, path: fluxPath, branch }) {
|
|
345
|
+
repo = repo || AKS_DEFAULTS.fluxRepo;
|
|
346
|
+
owner = owner || AKS_DEFAULTS.fluxOwner;
|
|
347
|
+
branch = branch || AKS_DEFAULTS.fluxBranch;
|
|
348
|
+
|
|
349
|
+
if (!githubToken) {
|
|
350
|
+
console.error(ERR("\n GitHub token required for Flux bootstrap."));
|
|
351
|
+
hint("Authenticate: gh auth login (writes to ~/.netrc)");
|
|
352
|
+
hint("Or: export GITHUB_TOKEN=<token>");
|
|
353
|
+
hint("Or: --github-token <token>\n");
|
|
354
|
+
process.exit(1);
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
const repoUrl = `https://github.com/${owner}/${repo}`;
|
|
358
|
+
const configName = "flux-system";
|
|
359
|
+
|
|
360
|
+
banner("Flux Bootstrap (Azure GitOps)");
|
|
361
|
+
kvLine("Repo", DIM(`${owner}/${repo}`));
|
|
362
|
+
kvLine("Path", DIM(fluxPath));
|
|
363
|
+
kvLine("Branch", DIM(branch));
|
|
364
|
+
kvLine("Mode", DIM("Azure-managed extension (read-only)"));
|
|
365
|
+
hint("This takes 2–5 minutes…\n");
|
|
366
|
+
|
|
367
|
+
hint("Installing Flux extension…");
|
|
368
|
+
try {
|
|
369
|
+
await execa("az", [
|
|
370
|
+
"k8s-extension", "create",
|
|
371
|
+
"--resource-group", rg,
|
|
372
|
+
"--cluster-name", clusterName,
|
|
373
|
+
"--cluster-type", "managedClusters",
|
|
374
|
+
"--name", "flux",
|
|
375
|
+
"--extension-type", "microsoft.flux",
|
|
376
|
+
"--scope", "cluster",
|
|
377
|
+
"--output", "none",
|
|
378
|
+
...subArgs(sub),
|
|
379
|
+
], { timeout: 600000 });
|
|
380
|
+
console.log(OK(" ✓ Flux extension installed"));
|
|
381
|
+
} catch (err) {
|
|
382
|
+
const msg = (err.stderr || err.message || "").toString();
|
|
383
|
+
if (/rpds|No module named|ModuleNotFoundError/.test(msg)) {
|
|
384
|
+
console.error(ERR("\n Azure k8s-extension failed (broken vendored rpds — known Azure CLI bug)."));
|
|
385
|
+
hint("Workaround (macOS Homebrew): install rpds-py into Azure CLI's Python, then remove the extension's vendored rpds:");
|
|
386
|
+
hint(" $(brew --prefix azure-cli)/libexec/bin/pip install rpds-py");
|
|
387
|
+
hint(" rm -rf ~/.azure/cliextensions/k8s-extension/rpds");
|
|
388
|
+
hint("Then re-run. See: https://github.com/Azure/azure-cli/issues/32709\n");
|
|
389
|
+
throw err;
|
|
390
|
+
}
|
|
391
|
+
throw err;
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
hint("Creating GitOps configuration…");
|
|
395
|
+
await execa("az", [
|
|
396
|
+
"k8s-configuration", "flux", "create",
|
|
397
|
+
"--resource-group", rg,
|
|
398
|
+
"--cluster-name", clusterName,
|
|
399
|
+
"--cluster-type", "managedClusters",
|
|
400
|
+
"--name", configName,
|
|
401
|
+
"--namespace", "flux-system",
|
|
402
|
+
"--scope", "cluster",
|
|
403
|
+
"--url", repoUrl,
|
|
404
|
+
"--branch", branch,
|
|
405
|
+
"--https-user", "x-access-token",
|
|
406
|
+
"--https-key", githubToken,
|
|
407
|
+
"--kustomization", `name=${configName}`, `path=./${fluxPath}`, "prune=true",
|
|
408
|
+
"--output", "none",
|
|
409
|
+
...subArgs(sub),
|
|
410
|
+
], { timeout: 300000 });
|
|
411
|
+
console.log(OK(" ✓ GitOps configuration created (read-only — no push to repo)"));
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
// ── Reconcile Flux (idempotent install/update) ────────────────────────────────
|
|
415
|
+
|
|
416
|
+
export async function reconcileFlux(execa, { clusterName, rg, sub, githubToken, repo, owner, path: fluxPath, branch }) {
|
|
417
|
+
repo = repo || AKS_DEFAULTS.fluxRepo;
|
|
418
|
+
owner = owner || AKS_DEFAULTS.fluxOwner;
|
|
419
|
+
branch = branch || AKS_DEFAULTS.fluxBranch;
|
|
420
|
+
const repoUrl = `https://github.com/${owner}/${repo}`;
|
|
421
|
+
const configName = "flux-system";
|
|
422
|
+
|
|
423
|
+
const { exitCode: extExists } = await execa("az", [
|
|
424
|
+
"k8s-extension", "show",
|
|
425
|
+
"--resource-group", rg,
|
|
426
|
+
"--cluster-name", clusterName,
|
|
427
|
+
"--cluster-type", "managedClusters",
|
|
428
|
+
"--name", "flux",
|
|
429
|
+
"--output", "none",
|
|
430
|
+
...subArgs(sub),
|
|
431
|
+
], { reject: false, timeout: 30000 });
|
|
432
|
+
|
|
433
|
+
if (extExists !== 0) {
|
|
434
|
+
hint("Installing Flux extension…");
|
|
435
|
+
try {
|
|
436
|
+
await execa("az", [
|
|
437
|
+
"k8s-extension", "create",
|
|
438
|
+
"--resource-group", rg,
|
|
439
|
+
"--cluster-name", clusterName,
|
|
440
|
+
"--cluster-type", "managedClusters",
|
|
441
|
+
"--name", "flux",
|
|
442
|
+
"--extension-type", "microsoft.flux",
|
|
443
|
+
"--scope", "cluster",
|
|
444
|
+
"--output", "none",
|
|
445
|
+
...subArgs(sub),
|
|
446
|
+
], { timeout: 600000 });
|
|
447
|
+
console.log(OK(" ✓ Flux extension installed"));
|
|
448
|
+
} catch (err) {
|
|
449
|
+
const msg = (err.stderr || err.message || "").toString();
|
|
450
|
+
if (/rpds|No module named|ModuleNotFoundError/.test(msg)) {
|
|
451
|
+
console.error(ERR("\n Azure k8s-extension failed (broken vendored rpds — known Azure CLI bug)."));
|
|
452
|
+
hint("Workaround (macOS Homebrew): install rpds-py into Azure CLI's Python, then remove the extension's vendored rpds:");
|
|
453
|
+
hint(" $(brew --prefix azure-cli)/libexec/bin/pip install rpds-py");
|
|
454
|
+
hint(" rm -rf ~/.azure/cliextensions/k8s-extension/rpds");
|
|
455
|
+
hint("Then re-run. See: https://github.com/Azure/azure-cli/issues/32709\n");
|
|
456
|
+
throw err;
|
|
457
|
+
}
|
|
458
|
+
throw err;
|
|
459
|
+
}
|
|
460
|
+
} else {
|
|
461
|
+
console.log(OK(" ✓ Flux extension already installed"));
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
const { exitCode: cfgExists } = await execa("az", [
|
|
465
|
+
"k8s-configuration", "flux", "show",
|
|
466
|
+
"--resource-group", rg,
|
|
467
|
+
"--cluster-name", clusterName,
|
|
468
|
+
"--cluster-type", "managedClusters",
|
|
469
|
+
"--name", configName,
|
|
470
|
+
"--output", "none",
|
|
471
|
+
...subArgs(sub),
|
|
472
|
+
], { reject: false, timeout: 30000 });
|
|
473
|
+
|
|
474
|
+
if (cfgExists !== 0) {
|
|
475
|
+
hint("Creating GitOps configuration…");
|
|
476
|
+
try {
|
|
477
|
+
await execa("az", [
|
|
478
|
+
"k8s-configuration", "flux", "create",
|
|
479
|
+
"--resource-group", rg,
|
|
480
|
+
"--cluster-name", clusterName,
|
|
481
|
+
"--cluster-type", "managedClusters",
|
|
482
|
+
"--name", configName,
|
|
483
|
+
"--namespace", "flux-system",
|
|
484
|
+
"--scope", "cluster",
|
|
485
|
+
"--url", repoUrl,
|
|
486
|
+
"--branch", branch,
|
|
487
|
+
"--https-user", "x-access-token",
|
|
488
|
+
"--https-key", githubToken,
|
|
489
|
+
"--kustomization", `name=${configName}`, `path=./${fluxPath}`, "prune=true",
|
|
490
|
+
"--output", "none",
|
|
491
|
+
...subArgs(sub),
|
|
492
|
+
], { timeout: 300000 });
|
|
493
|
+
console.log(OK(" ✓ GitOps configuration created"));
|
|
494
|
+
} catch (err) {
|
|
495
|
+
const azErr = (err.stderr || err.message || "").replace(/^.*ERROR:\s*/m, "").split("\n")[0];
|
|
496
|
+
console.log(WARN(` ⚠ GitOps configuration failed: ${azErr}`));
|
|
497
|
+
hint("Create manually: az k8s-configuration flux create -g " + rg + " -c " + clusterName + " -t managedClusters -n " + configName + " ...");
|
|
498
|
+
}
|
|
499
|
+
} else {
|
|
500
|
+
const { stdout: cfgJson } = await execa("az", [
|
|
501
|
+
"k8s-configuration", "flux", "show",
|
|
502
|
+
"--resource-group", rg,
|
|
503
|
+
"--cluster-name", clusterName,
|
|
504
|
+
"--cluster-type", "managedClusters",
|
|
505
|
+
"--name", configName,
|
|
506
|
+
"--output", "json",
|
|
507
|
+
...subArgs(sub),
|
|
508
|
+
], { reject: false, timeout: 30000 });
|
|
509
|
+
let currentUrl = "";
|
|
510
|
+
let currentPath = "";
|
|
511
|
+
if (cfgJson) {
|
|
512
|
+
try {
|
|
513
|
+
const cfg = JSON.parse(cfgJson);
|
|
514
|
+
currentUrl = (cfg.gitRepository && cfg.gitRepository.url) || "";
|
|
515
|
+
const ks = cfg.kustomizations && cfg.kustomizations[configName];
|
|
516
|
+
currentPath = (ks && ks.path) || "";
|
|
517
|
+
} catch { /* ignore */ }
|
|
518
|
+
}
|
|
519
|
+
const desiredPath = fluxPath.startsWith("./") ? fluxPath : `./${fluxPath}`;
|
|
520
|
+
const urlMismatch = currentUrl && currentUrl !== repoUrl;
|
|
521
|
+
const pathMismatch = currentPath && currentPath !== desiredPath;
|
|
522
|
+
if (urlMismatch || pathMismatch) {
|
|
523
|
+
hint(`Flux path mismatch: ${currentPath} → ${desiredPath}. Recreating…`);
|
|
524
|
+
try {
|
|
525
|
+
await execa("az", [
|
|
526
|
+
"k8s-configuration", "flux", "delete",
|
|
527
|
+
"--resource-group", rg,
|
|
528
|
+
"--cluster-name", clusterName,
|
|
529
|
+
"--cluster-type", "managedClusters",
|
|
530
|
+
"--name", configName,
|
|
531
|
+
"--yes",
|
|
532
|
+
"--output", "none",
|
|
533
|
+
...subArgs(sub),
|
|
534
|
+
], { timeout: 300000 });
|
|
535
|
+
hint(" Deleted old configuration…");
|
|
536
|
+
await execa("az", [
|
|
537
|
+
"k8s-configuration", "flux", "create",
|
|
538
|
+
"--resource-group", rg,
|
|
539
|
+
"--cluster-name", clusterName,
|
|
540
|
+
"--cluster-type", "managedClusters",
|
|
541
|
+
"--name", configName,
|
|
542
|
+
"--namespace", "flux-system",
|
|
543
|
+
"--scope", "cluster",
|
|
544
|
+
"--url", repoUrl,
|
|
545
|
+
"--branch", branch,
|
|
546
|
+
"--https-user", "x-access-token",
|
|
547
|
+
"--https-key", githubToken,
|
|
548
|
+
"--kustomization", `name=${configName}`, `path=${desiredPath}`, "prune=true",
|
|
549
|
+
"--output", "none",
|
|
550
|
+
...subArgs(sub),
|
|
551
|
+
], { timeout: 300000 });
|
|
552
|
+
console.log(OK(` ✓ GitOps configuration recreated with path ${desiredPath}`));
|
|
553
|
+
} catch (err) {
|
|
554
|
+
const azErr = (err.stderr || err.message || "").replace(/^.*ERROR:\s*/m, "").split("\n")[0];
|
|
555
|
+
console.log(WARN(` ⚠ Flux recreate failed: ${azErr}`));
|
|
556
|
+
}
|
|
557
|
+
} else {
|
|
558
|
+
console.log(OK(" ✓ GitOps configuration already exists"));
|
|
559
|
+
}
|
|
560
|
+
}
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
// ── Reconcile Flux step (for reconciler array) ────────────────────────────────
|
|
564
|
+
|
|
565
|
+
export async function reconcileFluxStep(ctx) {
|
|
566
|
+
const { execa, clusterName, rg, sub, opts } = ctx;
|
|
567
|
+
if (opts.noFlux) return;
|
|
568
|
+
|
|
569
|
+
const githubToken = resolveGithubToken(opts);
|
|
570
|
+
if (!githubToken) {
|
|
571
|
+
console.log(WARN(" ⚠ Skipping Flux — no GitHub token found."));
|
|
572
|
+
hint("Authenticate with: gh auth login");
|
|
573
|
+
return;
|
|
574
|
+
}
|
|
575
|
+
|
|
576
|
+
const { fluxRepo, fluxOwner, fluxPath, fluxBranch } = resolveFluxConfig(clusterName, opts);
|
|
577
|
+
|
|
578
|
+
await reconcileFlux(execa, {
|
|
579
|
+
clusterName, rg, sub, githubToken,
|
|
580
|
+
repo: fluxRepo, owner: fluxOwner,
|
|
581
|
+
path: fluxPath, branch: fluxBranch,
|
|
582
|
+
});
|
|
583
|
+
|
|
584
|
+
writeClusterState(clusterName, {
|
|
585
|
+
flux: { repo: fluxRepo, owner: fluxOwner, path: fluxPath, branch: fluxBranch },
|
|
586
|
+
});
|
|
587
|
+
}
|
|
588
|
+
|
|
589
|
+
// ── Suspend managed Kustomizations ────────────────────────────────────────────
|
|
590
|
+
|
|
591
|
+
export async function suspendManagedKustomizations(ctx) {
|
|
592
|
+
const { execa, clusterName, opts } = ctx;
|
|
593
|
+
const kubectl = (args, o = {}) =>
|
|
594
|
+
execa("kubectl", ["--context", clusterName, ...args], { timeout: 15000, reject: false, ...o });
|
|
595
|
+
|
|
596
|
+
const targets = opts?.dai
|
|
597
|
+
? FOPS_MANAGED_KUSTOMIZATIONS
|
|
598
|
+
: [...FOPS_MANAGED_KUSTOMIZATIONS, ...DAI_KUSTOMIZATIONS];
|
|
599
|
+
|
|
600
|
+
let suspended = 0;
|
|
601
|
+
for (const name of targets) {
|
|
602
|
+
const { exitCode } = await kubectl([
|
|
603
|
+
"get", "kustomization", name, "-n", "flux-system",
|
|
604
|
+
]);
|
|
605
|
+
if (exitCode !== 0) continue;
|
|
606
|
+
|
|
607
|
+
const { stdout: isSuspended } = await kubectl([
|
|
608
|
+
"get", "kustomization", name, "-n", "flux-system",
|
|
609
|
+
"-o", "jsonpath={.spec.suspend}",
|
|
610
|
+
]);
|
|
611
|
+
if (isSuspended === "true") continue;
|
|
612
|
+
|
|
613
|
+
await kubectl([
|
|
614
|
+
"patch", "kustomization", name, "-n", "flux-system",
|
|
615
|
+
"--type", "merge", "-p", '{"spec":{"suspend":true}}',
|
|
616
|
+
]);
|
|
617
|
+
suspended++;
|
|
618
|
+
}
|
|
619
|
+
if (suspended > 0) {
|
|
620
|
+
console.log(OK(` ✓ Suspended ${suspended} Flux Kustomization(s) to prevent revert`));
|
|
621
|
+
} else {
|
|
622
|
+
console.log(OK(" ✓ Managed Kustomizations already suspended"));
|
|
623
|
+
}
|
|
624
|
+
}
|
|
625
|
+
|
|
626
|
+
// ── Flux prerequisites reconciler ─────────────────────────────────────────────
|
|
627
|
+
|
|
628
|
+
export async function reconcileFluxPrereqs(ctx) {
|
|
629
|
+
const { execa, clusterName } = ctx;
|
|
630
|
+
const tracked = readClusterState(clusterName);
|
|
631
|
+
if (!tracked?.flux) return;
|
|
632
|
+
|
|
633
|
+
banner("Flux Prerequisites");
|
|
634
|
+
|
|
635
|
+
const kubectl = (args, opts = {}) =>
|
|
636
|
+
execa("kubectl", ["--context", clusterName, ...args], { reject: false, timeout: 60000, ...opts });
|
|
637
|
+
|
|
638
|
+
// 1. Clean up legacy acr-cache-system if present
|
|
639
|
+
try {
|
|
640
|
+
const { exitCode } = await kubectl(["get", "namespace", "acr-cache-system"]);
|
|
641
|
+
if (exitCode === 0) {
|
|
642
|
+
hint("Cleaning up legacy acr-cache-system namespace…");
|
|
643
|
+
for (const wh of ["acr-pod-webhook", "acr-helm-webhook"]) {
|
|
644
|
+
await kubectl(["delete", "mutatingwebhookconfiguration", wh, "--ignore-not-found"]);
|
|
645
|
+
}
|
|
646
|
+
await kubectl(["delete", "namespace", "acr-cache-system", "--ignore-not-found"]);
|
|
647
|
+
console.log(OK(" ✓ Legacy acr-cache-system removed"));
|
|
648
|
+
}
|
|
649
|
+
} catch { /* not present */ }
|
|
650
|
+
|
|
651
|
+
// 2. Pre-install CRDs that Flux manifests reference before their operators deploy
|
|
652
|
+
const crdBundles = [
|
|
653
|
+
{
|
|
654
|
+
name: "external-secrets",
|
|
655
|
+
check: "externalsecrets.external-secrets.io",
|
|
656
|
+
url: "https://raw.githubusercontent.com/external-secrets/external-secrets/main/deploy/crds/bundle.yaml",
|
|
657
|
+
serverSide: true,
|
|
658
|
+
},
|
|
659
|
+
{
|
|
660
|
+
name: "Istio",
|
|
661
|
+
check: "virtualservices.networking.istio.io",
|
|
662
|
+
helm: { repo: "https://istio-release.storage.googleapis.com/charts", chart: "base", release: "istio-base", namespace: "istio-system" },
|
|
663
|
+
},
|
|
664
|
+
];
|
|
665
|
+
|
|
666
|
+
for (const bundle of crdBundles) {
|
|
667
|
+
const { exitCode } = await kubectl(["get", "crd", bundle.check], { timeout: 10000 });
|
|
668
|
+
if (exitCode === 0) {
|
|
669
|
+
console.log(OK(` ✓ ${bundle.name} CRDs present`));
|
|
670
|
+
continue;
|
|
671
|
+
}
|
|
672
|
+
|
|
673
|
+
hint(`Installing ${bundle.name} CRDs…`);
|
|
674
|
+
try {
|
|
675
|
+
if (bundle.helm) {
|
|
676
|
+
await execa("helm", ["repo", "add", bundle.name, bundle.helm.repo, "--force-update"], { reject: false, timeout: 30000 });
|
|
677
|
+
await execa("helm", [
|
|
678
|
+
"--kube-context", clusterName,
|
|
679
|
+
"install", bundle.helm.release, `${bundle.name}/${bundle.helm.chart}`,
|
|
680
|
+
"-n", bundle.helm.namespace, "--create-namespace",
|
|
681
|
+
"--wait", "--timeout", "90s",
|
|
682
|
+
], { timeout: 120000 });
|
|
683
|
+
} else {
|
|
684
|
+
const applyArgs = ["apply", "-f", bundle.url];
|
|
685
|
+
if (bundle.serverSide) applyArgs.push("--server-side");
|
|
686
|
+
await kubectl(applyArgs, { timeout: 120000 });
|
|
687
|
+
}
|
|
688
|
+
console.log(OK(` ✓ ${bundle.name} CRDs installed`));
|
|
689
|
+
} catch (err) {
|
|
690
|
+
console.log(WARN(` ⚠ ${bundle.name} CRDs failed: ${(err.message || "").split("\n")[0]}`));
|
|
691
|
+
}
|
|
692
|
+
}
|
|
693
|
+
|
|
694
|
+
// 3. Enable v1beta1 on ExternalSecret CRD if the operator disabled it
|
|
695
|
+
try {
|
|
696
|
+
const { stdout: crdJson } = await kubectl([
|
|
697
|
+
"get", "crd", "externalsecrets.external-secrets.io",
|
|
698
|
+
"-o", "jsonpath={.spec.versions[?(@.name==\"v1beta1\")].served}",
|
|
699
|
+
], { timeout: 10000 });
|
|
700
|
+
if (crdJson === "false") {
|
|
701
|
+
hint("Enabling v1beta1 on ExternalSecret CRD…");
|
|
702
|
+
const { stdout: fullCrd } = await kubectl([
|
|
703
|
+
"get", "crd", "externalsecrets.external-secrets.io", "-o", "json",
|
|
704
|
+
], { timeout: 10000 });
|
|
705
|
+
const crd = JSON.parse(fullCrd);
|
|
706
|
+
for (const v of crd.spec.versions) {
|
|
707
|
+
if (v.name === "v1beta1") v.served = true;
|
|
708
|
+
}
|
|
709
|
+
await execa("kubectl", [
|
|
710
|
+
"--context", clusterName, "apply", "--server-side", "-f", "-",
|
|
711
|
+
], { input: JSON.stringify(crd), timeout: 30000, reject: false });
|
|
712
|
+
|
|
713
|
+
const { stdout: ssCrd } = await kubectl([
|
|
714
|
+
"get", "crd", "secretstores.external-secrets.io", "-o", "json",
|
|
715
|
+
], { timeout: 10000 });
|
|
716
|
+
const ss = JSON.parse(ssCrd);
|
|
717
|
+
for (const v of ss.spec.versions) {
|
|
718
|
+
if (v.name === "v1beta1") v.served = true;
|
|
719
|
+
}
|
|
720
|
+
await execa("kubectl", [
|
|
721
|
+
"--context", clusterName, "apply", "--server-side", "-f", "-",
|
|
722
|
+
], { input: JSON.stringify(ss), timeout: 30000, reject: false });
|
|
723
|
+
|
|
724
|
+
console.log(OK(" ✓ v1beta1 enabled on ExternalSecret CRDs"));
|
|
725
|
+
}
|
|
726
|
+
} catch { /* CRD not installed yet */ }
|
|
727
|
+
|
|
728
|
+
// 4. Trigger a full Flux reconciliation
|
|
729
|
+
try {
|
|
730
|
+
hint("Triggering Flux reconciliation…");
|
|
731
|
+
const { stdout: ksList } = await kubectl([
|
|
732
|
+
"get", "kustomization", "-n", "flux-system", "--no-headers",
|
|
733
|
+
"-o", "custom-columns=NAME:.metadata.name",
|
|
734
|
+
]);
|
|
735
|
+
const names = ksList.trim().split("\n").map(n => n.trim()).filter(Boolean);
|
|
736
|
+
const ts = String(Date.now());
|
|
737
|
+
for (const ks of names) {
|
|
738
|
+
await kubectl([
|
|
739
|
+
"annotate", "kustomization", ks, "-n", "flux-system",
|
|
740
|
+
`reconcile.fluxcd.io/requestedAt=${ts}-${ks}`, "--overwrite",
|
|
741
|
+
], { timeout: 10000 });
|
|
742
|
+
}
|
|
743
|
+
console.log(OK(` ✓ Reconciliation triggered for ${names.length} kustomizations`));
|
|
744
|
+
} catch {
|
|
745
|
+
hint("Could not trigger reconciliation — Flux may not be ready yet");
|
|
746
|
+
}
|
|
747
|
+
}
|
|
748
|
+
|
|
749
|
+
// ── CLI module resolver helper ────────────────────────────────────────────────
|
|
750
|
+
|
|
751
|
+
function resolveCliSrc(relPath) {
|
|
752
|
+
const thisDir = path.dirname(fileURLToPath(import.meta.url));
|
|
753
|
+
const fromSource = path.resolve(thisDir, "../../../..", relPath);
|
|
754
|
+
if (fs.existsSync(fromSource)) return new URL(`file://${fromSource}`).href;
|
|
755
|
+
const fopsBin = process.argv[1];
|
|
756
|
+
if (fopsBin) {
|
|
757
|
+
try {
|
|
758
|
+
const cliRoot = path.dirname(fs.realpathSync(fopsBin));
|
|
759
|
+
const fromCli = path.resolve(cliRoot, "src", relPath);
|
|
760
|
+
if (fs.existsSync(fromCli)) return new URL(`file://${fromCli}`).href;
|
|
761
|
+
} catch { /* fall through */ }
|
|
762
|
+
}
|
|
763
|
+
return "../../../../" + relPath;
|
|
764
|
+
}
|
|
765
|
+
|
|
766
|
+
// ── aks flux init ─────────────────────────────────────────────────────────────
|
|
767
|
+
|
|
768
|
+
export async function aksFluxInit(opts = {}) {
|
|
769
|
+
const clusterName = opts.clusterName;
|
|
770
|
+
if (!clusterName) {
|
|
771
|
+
console.error(ERR("\n Cluster name is required."));
|
|
772
|
+
hint("Usage: fops azure aks flux init <name> --flux-repo <path>\n");
|
|
773
|
+
process.exit(1);
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
const overlay = opts.overlay || "demo-azure";
|
|
777
|
+
const namespace = opts.namespace || "foundation";
|
|
778
|
+
const fluxRepoPath = opts.fluxRepo;
|
|
779
|
+
|
|
780
|
+
if (!fluxRepoPath) {
|
|
781
|
+
console.error(ERR("\n --flux-repo <path> is required (path to your local flux repo clone)."));
|
|
782
|
+
hint("Example: fops azure aks flux init alessio --flux-repo ../flux\n");
|
|
783
|
+
process.exit(1);
|
|
784
|
+
}
|
|
785
|
+
|
|
786
|
+
const fluxRepo = path.resolve(fluxRepoPath);
|
|
787
|
+
if (!fs.existsSync(path.join(fluxRepo, "clusters"))) {
|
|
788
|
+
console.error(ERR(`\n "${fluxRepo}" does not look like a flux repo (no clusters/ dir).`));
|
|
789
|
+
process.exit(1);
|
|
790
|
+
}
|
|
791
|
+
|
|
792
|
+
const clusterDir = path.join(fluxRepo, "clusters", clusterName);
|
|
793
|
+
if (fs.existsSync(clusterDir)) {
|
|
794
|
+
console.error(ERR(`\n Cluster directory already exists: ${clusterDir}`));
|
|
795
|
+
hint("Remove it first or use a different name.\n");
|
|
796
|
+
process.exit(1);
|
|
797
|
+
}
|
|
798
|
+
|
|
799
|
+
const state = readState();
|
|
800
|
+
const projectRoot = state.azure?.projectRoot || state.projectRoot;
|
|
801
|
+
const thisDir = path.dirname(fileURLToPath(import.meta.url));
|
|
802
|
+
let templateRoot = projectRoot && fs.existsSync(path.join(projectRoot, "flux-templates", "cluster"))
|
|
803
|
+
? path.join(projectRoot, "flux-templates", "cluster")
|
|
804
|
+
: path.resolve(thisDir, "../../../../foundation-flux/cluster");
|
|
805
|
+
if (!fs.existsSync(templateRoot)) {
|
|
806
|
+
console.error(ERR("\n Template directory not found."));
|
|
807
|
+
hint(`Expected at: ${templateRoot}`);
|
|
808
|
+
hint("Create flux-templates/cluster/ in your project or install foundation-flux.\n");
|
|
809
|
+
process.exit(1);
|
|
810
|
+
}
|
|
811
|
+
|
|
812
|
+
const vars = {
|
|
813
|
+
"{{CLUSTER_NAME}}": clusterName,
|
|
814
|
+
"{{OVERLAY}}": overlay,
|
|
815
|
+
"{{NAMESPACE}}": namespace,
|
|
816
|
+
"{{FLUX_PATH}}": opts.fluxPath || `clusters/${clusterName}`,
|
|
817
|
+
"{{POSTGRES_HOST}}": opts.postgresHost || `{{POSTGRES_HOST}}`,
|
|
818
|
+
"{{ACCESS_KEY_ID}}": opts.accessKeyId || `{{ACCESS_KEY_ID}}`,
|
|
819
|
+
"{{AZURE_SP_CLIENT_ID_B64}}": opts.azureSpClientId || `{{AZURE_SP_CLIENT_ID_B64}}`,
|
|
820
|
+
"{{AZURE_SP_CLIENT_SECRET_B64}}": opts.azureSpClientSecret || `{{AZURE_SP_CLIENT_SECRET_B64}}`,
|
|
821
|
+
"{{AZURE_IDENTITY_ID}}": opts.azureIdentityId || `{{AZURE_IDENTITY_ID}}`,
|
|
822
|
+
"{{AZURE_TENANT_ID}}": opts.azureTenantId || `{{AZURE_TENANT_ID}}`,
|
|
823
|
+
"{{AZURE_KEYVAULT_URL}}": opts.azureKeyvaultUrl || `{{AZURE_KEYVAULT_URL}}`,
|
|
824
|
+
};
|
|
825
|
+
|
|
826
|
+
function applyVars(content) {
|
|
827
|
+
let out = content;
|
|
828
|
+
for (const [k, v] of Object.entries(vars)) {
|
|
829
|
+
out = out.replaceAll(k, v);
|
|
830
|
+
}
|
|
831
|
+
return out;
|
|
832
|
+
}
|
|
833
|
+
|
|
834
|
+
function copyDir(src, dest) {
|
|
835
|
+
fs.mkdirSync(dest, { recursive: true });
|
|
836
|
+
for (const entry of fs.readdirSync(src, { withFileTypes: true })) {
|
|
837
|
+
const srcPath = path.join(src, entry.name);
|
|
838
|
+
const destPath = path.join(dest, entry.name);
|
|
839
|
+
if (entry.isDirectory()) {
|
|
840
|
+
copyDir(srcPath, destPath);
|
|
841
|
+
} else {
|
|
842
|
+
const content = fs.readFileSync(srcPath, "utf8");
|
|
843
|
+
fs.writeFileSync(destPath, applyVars(content));
|
|
844
|
+
}
|
|
845
|
+
}
|
|
846
|
+
}
|
|
847
|
+
|
|
848
|
+
banner(`Flux Init: ${clusterName}`);
|
|
849
|
+
kvLine("Cluster", clusterName);
|
|
850
|
+
kvLine("Overlay", overlay);
|
|
851
|
+
kvLine("Namespace", namespace);
|
|
852
|
+
kvLine("Output", DIM(clusterDir));
|
|
853
|
+
console.log("");
|
|
854
|
+
|
|
855
|
+
hint("Scaffolding cluster manifests…");
|
|
856
|
+
copyDir(templateRoot, clusterDir);
|
|
857
|
+
console.log(OK(" ✓ Cluster directory created"));
|
|
858
|
+
|
|
859
|
+
const remaining = Object.entries(vars)
|
|
860
|
+
.filter(([, v]) => v.startsWith("{{"))
|
|
861
|
+
.map(([k]) => k);
|
|
862
|
+
|
|
863
|
+
if (remaining.length) {
|
|
864
|
+
console.log(`\n ${WARN("Placeholders to fill in:")}`);
|
|
865
|
+
for (const p of remaining) {
|
|
866
|
+
hint(` ${p}`);
|
|
867
|
+
}
|
|
868
|
+
}
|
|
869
|
+
|
|
870
|
+
const sampleOverlay = path.join(fluxRepo, "apps/foundation/backend/overlays/meshx", overlay);
|
|
871
|
+
if (!fs.existsSync(sampleOverlay)) {
|
|
872
|
+
console.log(WARN(`\n ⚠ Overlay "${overlay}" not found in apps/ — app Kustomizations will fail until it exists.`));
|
|
873
|
+
hint(`Create overlays or use an existing one: --overlay demo-azure`);
|
|
874
|
+
}
|
|
875
|
+
|
|
876
|
+
console.log("");
|
|
877
|
+
hint("Next steps:");
|
|
878
|
+
hint(` 1. Fill in remaining {{…}} placeholders in clusters/${clusterName}/`);
|
|
879
|
+
hint(` 2. Commit and push to the flux repo`);
|
|
880
|
+
hint(` 3. Bootstrap Flux: fops azure aks flux bootstrap ${clusterName}`);
|
|
881
|
+
console.log("");
|
|
882
|
+
}
|
|
883
|
+
|
|
884
|
+
// ── aks flux bootstrap ────────────────────────────────────────────────────────
|
|
885
|
+
|
|
886
|
+
export async function aksFluxBootstrap(opts = {}) {
|
|
887
|
+
const execa = await lazyExeca();
|
|
888
|
+
const sub = opts.profile;
|
|
889
|
+
await ensureAzCli(execa);
|
|
890
|
+
await ensureAzAuth(execa, { subscription: sub });
|
|
891
|
+
const cl = requireCluster(opts.clusterName);
|
|
892
|
+
|
|
893
|
+
const { getCredentials } = await import("./azure-aks-core.js");
|
|
894
|
+
await getCredentials(execa, {
|
|
895
|
+
clusterName: cl.clusterName,
|
|
896
|
+
rg: cl.resourceGroup,
|
|
897
|
+
sub,
|
|
898
|
+
});
|
|
899
|
+
|
|
900
|
+
const githubToken = resolveGithubToken(opts);
|
|
901
|
+
if (!githubToken) {
|
|
902
|
+
console.error(ERR("\n GitHub token required for Flux bootstrap."));
|
|
903
|
+
hint("Authenticate: gh auth login or set GITHUB_TOKEN\n");
|
|
904
|
+
process.exit(1);
|
|
905
|
+
}
|
|
906
|
+
|
|
907
|
+
const fluxOpts = {
|
|
908
|
+
fluxRepo: opts.repo,
|
|
909
|
+
fluxOwner: opts.owner,
|
|
910
|
+
fluxPath: opts.path,
|
|
911
|
+
fluxBranch: opts.branch,
|
|
912
|
+
};
|
|
913
|
+
const { fluxRepo, fluxOwner, fluxPath, fluxBranch } = resolveFluxConfig(cl.clusterName, fluxOpts);
|
|
914
|
+
|
|
915
|
+
const { ensureGhcrPullSecret } = await import("./azure-aks-core.js");
|
|
916
|
+
await ensureGhcrPullSecret(execa, { clusterName: cl.clusterName, githubToken });
|
|
917
|
+
|
|
918
|
+
await reconcileFlux(execa, {
|
|
919
|
+
clusterName: cl.clusterName,
|
|
920
|
+
rg: cl.resourceGroup,
|
|
921
|
+
sub,
|
|
922
|
+
githubToken,
|
|
923
|
+
repo: fluxRepo,
|
|
924
|
+
owner: fluxOwner,
|
|
925
|
+
path: fluxPath,
|
|
926
|
+
branch: fluxBranch,
|
|
927
|
+
});
|
|
928
|
+
|
|
929
|
+
writeClusterState(cl.clusterName, {
|
|
930
|
+
flux: { repo: fluxRepo, owner: fluxOwner, path: fluxPath, branch: fluxBranch },
|
|
931
|
+
});
|
|
932
|
+
console.log(OK(` ✓ Flux now using ${fluxOwner}/${fluxRepo}\n`));
|
|
933
|
+
}
|
|
934
|
+
|
|
935
|
+
// ── aks flux status ───────────────────────────────────────────────────────────
|
|
936
|
+
|
|
937
|
+
export async function aksFluxStatus(opts = {}) {
|
|
938
|
+
const execa = await lazyExeca();
|
|
939
|
+
await ensureFluxCli(execa);
|
|
940
|
+
const cl = requireCluster(opts.clusterName);
|
|
941
|
+
|
|
942
|
+
banner(`Flux Status: ${cl.clusterName}`);
|
|
943
|
+
|
|
944
|
+
const commands = [
|
|
945
|
+
{ label: "Sources", args: ["get", "sources", "all"] },
|
|
946
|
+
{ label: "Kustomizations", args: ["get", "kustomizations"] },
|
|
947
|
+
{ label: "Helm Releases", args: ["get", "helmreleases", "--all-namespaces"] },
|
|
948
|
+
];
|
|
949
|
+
|
|
950
|
+
for (const { label, args } of commands) {
|
|
951
|
+
console.log(`\n ${LABEL(label)}`);
|
|
952
|
+
const { stdout } = await execa("flux", [
|
|
953
|
+
...args, "--context", cl.clusterName,
|
|
954
|
+
], { timeout: 30000, reject: false });
|
|
955
|
+
if (stdout?.trim()) {
|
|
956
|
+
for (const line of stdout.trim().split("\n")) {
|
|
957
|
+
console.log(` ${DIM(line)}`);
|
|
958
|
+
}
|
|
959
|
+
} else {
|
|
960
|
+
hint(` No ${label.toLowerCase()} found.`);
|
|
961
|
+
}
|
|
962
|
+
}
|
|
963
|
+
|
|
964
|
+
console.log("");
|
|
965
|
+
}
|
|
966
|
+
|
|
967
|
+
// ── aks flux reconcile ────────────────────────────────────────────────────────
|
|
968
|
+
|
|
969
|
+
export async function aksFluxReconcile(opts = {}) {
|
|
970
|
+
const execa = await lazyExeca();
|
|
971
|
+
await ensureFluxCli(execa);
|
|
972
|
+
const cl = requireCluster(opts.clusterName);
|
|
973
|
+
|
|
974
|
+
banner(`Flux Reconcile: ${cl.clusterName}`);
|
|
975
|
+
|
|
976
|
+
const source = opts.source || "flux-system";
|
|
977
|
+
hint(`Triggering reconcile for source "${source}"…\n`);
|
|
978
|
+
|
|
979
|
+
await execa("flux", [
|
|
980
|
+
"reconcile", "source", "git", source,
|
|
981
|
+
"--context", cl.clusterName,
|
|
982
|
+
], { timeout: 60000, stdio: "inherit" });
|
|
983
|
+
|
|
984
|
+
const ksNames = ["flux-system-flux-system", "flux-system"];
|
|
985
|
+
let reconciled = false;
|
|
986
|
+
for (const ks of ksNames) {
|
|
987
|
+
const { exitCode } = await execa("flux", [
|
|
988
|
+
"reconcile", "kustomization", ks,
|
|
989
|
+
"--context", cl.clusterName,
|
|
990
|
+
], { timeout: 60000, stdio: "inherit", reject: false });
|
|
991
|
+
if (exitCode === 0) { reconciled = true; break; }
|
|
992
|
+
}
|
|
993
|
+
|
|
994
|
+
if (reconciled) {
|
|
995
|
+
console.log(OK("\n ✓ Reconciliation triggered.\n"));
|
|
996
|
+
} else {
|
|
997
|
+
console.error(ERR("\n ✗ Could not reconcile kustomization. Check: kubectl --context " + cl.clusterName + " get kustomizations -n flux-system\n"));
|
|
998
|
+
}
|
|
999
|
+
}
|
|
1000
|
+
|
|
1001
|
+
// ── aks data bootstrap ────────────────────────────────────────────────────────
|
|
1002
|
+
|
|
1003
|
+
function findBootstrapRepoRoot() {
|
|
1004
|
+
const scriptPath = "scripts/bootstrap_foundation.py";
|
|
1005
|
+
const envRoot = process.env.FOUNDATION_ROOT;
|
|
1006
|
+
if (envRoot && fs.existsSync(path.join(envRoot, scriptPath))) return path.resolve(envRoot);
|
|
1007
|
+
try {
|
|
1008
|
+
const fopsPath = path.join(os.homedir(), ".fops.json");
|
|
1009
|
+
const raw = JSON.parse(fs.readFileSync(fopsPath, "utf8"));
|
|
1010
|
+
const root = raw?.projectRoot;
|
|
1011
|
+
if (root && fs.existsSync(path.join(root, scriptPath))) return path.resolve(root);
|
|
1012
|
+
} catch {}
|
|
1013
|
+
let dir = path.resolve(process.cwd());
|
|
1014
|
+
for (;;) {
|
|
1015
|
+
if (fs.existsSync(path.join(dir, scriptPath))) return dir;
|
|
1016
|
+
const parent = path.dirname(dir);
|
|
1017
|
+
if (parent === dir) break;
|
|
1018
|
+
dir = parent;
|
|
1019
|
+
}
|
|
1020
|
+
return null;
|
|
1021
|
+
}
|
|
1022
|
+
|
|
1023
|
+
async function discoverFoundationApiUrlFromCluster(execa, clusterName) {
|
|
1024
|
+
try {
|
|
1025
|
+
const { stdout } = await execa("kubectl", [
|
|
1026
|
+
"get", "ingress", "-A",
|
|
1027
|
+
"-o", "jsonpath={.items[*].spec.rules[*].host}",
|
|
1028
|
+
"--context", clusterName,
|
|
1029
|
+
], { timeout: 15000 });
|
|
1030
|
+
const first = (stdout || "").trim().split(/\s+/).filter(Boolean)[0];
|
|
1031
|
+
if (first) return `https://${first}/api`;
|
|
1032
|
+
} catch {}
|
|
1033
|
+
return null;
|
|
1034
|
+
}
|
|
1035
|
+
|
|
1036
|
+
export async function aksDataBootstrap(opts = {}) {
|
|
1037
|
+
const execa = await lazyExeca();
|
|
1038
|
+
await ensureAzCli(execa);
|
|
1039
|
+
await ensureAzAuth(execa, { subscription: opts.profile });
|
|
1040
|
+
const cl = requireCluster(opts.clusterName);
|
|
1041
|
+
|
|
1042
|
+
const { getCredentials } = await import("./azure-aks-core.js");
|
|
1043
|
+
await getCredentials(execa, {
|
|
1044
|
+
clusterName: cl.clusterName,
|
|
1045
|
+
rg: cl.resourceGroup,
|
|
1046
|
+
sub: opts.profile,
|
|
1047
|
+
});
|
|
1048
|
+
|
|
1049
|
+
let apiUrl = opts.apiUrl?.trim() || cl.foundationApiUrl?.trim();
|
|
1050
|
+
if (!apiUrl) {
|
|
1051
|
+
apiUrl = await discoverFoundationApiUrlFromCluster(execa, cl.clusterName);
|
|
1052
|
+
if (apiUrl) {
|
|
1053
|
+
hint(`Using API URL from cluster ingress: ${apiUrl}`);
|
|
1054
|
+
writeClusterState(cl.clusterName, { foundationApiUrl: apiUrl });
|
|
1055
|
+
}
|
|
1056
|
+
}
|
|
1057
|
+
if (!apiUrl) {
|
|
1058
|
+
console.error(ERR("\n Foundation backend API URL is required."));
|
|
1059
|
+
hint("Pass the backend API base URL (e.g. https://foundation.example.com/api):");
|
|
1060
|
+
hint(" fops azure aks bootstrap " + cl.clusterName + " --api-url https://your-foundation-host/api\n");
|
|
1061
|
+
process.exit(1);
|
|
1062
|
+
}
|
|
1063
|
+
const normalized = apiUrl.replace(/\/+$/, "");
|
|
1064
|
+
if (!normalized.endsWith("/api")) {
|
|
1065
|
+
console.log(WARN(" API URL should usually end with /api (e.g. https://host/api). Using as-is."));
|
|
1066
|
+
}
|
|
1067
|
+
|
|
1068
|
+
const root = findBootstrapRepoRoot();
|
|
1069
|
+
if (!root) {
|
|
1070
|
+
console.error(ERR("\n Could not find foundation-compose root (scripts/bootstrap_foundation.py)."));
|
|
1071
|
+
hint("Run from the foundation-compose directory, or set FOUNDATION_ROOT.\n");
|
|
1072
|
+
process.exit(1);
|
|
1073
|
+
}
|
|
1074
|
+
|
|
1075
|
+
const { loadEnvFromFile } = await import("./azure-helpers.js");
|
|
1076
|
+
const projectEnv = loadEnvFromFile(path.join(root, ".env"));
|
|
1077
|
+
let bootstrapEnv = {
|
|
1078
|
+
...process.env,
|
|
1079
|
+
...projectEnv,
|
|
1080
|
+
PYTHONUNBUFFERED: "1",
|
|
1081
|
+
API_URL: normalized,
|
|
1082
|
+
};
|
|
1083
|
+
|
|
1084
|
+
// CLI --bearer-token takes precedence
|
|
1085
|
+
if (opts.bearerToken?.trim()) {
|
|
1086
|
+
bootstrapEnv.BEARER_TOKEN = opts.bearerToken.trim();
|
|
1087
|
+
}
|
|
1088
|
+
|
|
1089
|
+
let hasCreds = !!(bootstrapEnv.BEARER_TOKEN?.trim()
|
|
1090
|
+
|| (bootstrapEnv.QA_USERNAME?.trim() && bootstrapEnv.QA_PASSWORD != null));
|
|
1091
|
+
if (!hasCreds) {
|
|
1092
|
+
try {
|
|
1093
|
+
const fopsPath = path.join(os.homedir(), ".fops.json");
|
|
1094
|
+
const raw = JSON.parse(fs.readFileSync(fopsPath, "utf8"));
|
|
1095
|
+
const cfg = raw?.plugins?.entries?.["fops-plugin-foundation"]?.config || {};
|
|
1096
|
+
if (cfg.bearerToken?.trim()) {
|
|
1097
|
+
bootstrapEnv.BEARER_TOKEN = cfg.bearerToken.trim();
|
|
1098
|
+
hasCreds = true;
|
|
1099
|
+
} else if (cfg.user?.trim() && cfg.password) {
|
|
1100
|
+
bootstrapEnv.QA_USERNAME = cfg.user.trim();
|
|
1101
|
+
bootstrapEnv.QA_PASSWORD = cfg.password;
|
|
1102
|
+
hasCreds = true;
|
|
1103
|
+
}
|
|
1104
|
+
} catch {}
|
|
1105
|
+
}
|
|
1106
|
+
if (!hasCreds && !opts.yes) {
|
|
1107
|
+
console.log(WARN(" No Foundation credentials in env or ~/.fops.json."));
|
|
1108
|
+
const { getInquirer } = await import(resolveCliSrc("lazy.js"));
|
|
1109
|
+
const inquirer = await getInquirer();
|
|
1110
|
+
const { authMethod } = await inquirer.prompt([{
|
|
1111
|
+
type: "list",
|
|
1112
|
+
name: "authMethod",
|
|
1113
|
+
message: "Authentication method:",
|
|
1114
|
+
choices: [
|
|
1115
|
+
{ name: "Username / password", value: "password" },
|
|
1116
|
+
{ name: "Bearer token (JWT)", value: "jwt" },
|
|
1117
|
+
],
|
|
1118
|
+
}]);
|
|
1119
|
+
if (authMethod === "jwt") {
|
|
1120
|
+
const { token } = await inquirer.prompt([{ type: "input", name: "token", message: "Bearer token:", validate: (v) => v?.trim() ? true : "Token required" }]);
|
|
1121
|
+
bootstrapEnv.BEARER_TOKEN = token.trim();
|
|
1122
|
+
} else {
|
|
1123
|
+
const { user } = await inquirer.prompt([{ type: "input", name: "user", message: "Username (email):", validate: (v) => v?.trim() ? true : "Username required" }]);
|
|
1124
|
+
const { password } = await inquirer.prompt([{ type: "password", name: "password", message: "Password:", mask: "*", validate: (v) => v ? true : "Password required" }]);
|
|
1125
|
+
bootstrapEnv.QA_USERNAME = user.trim();
|
|
1126
|
+
bootstrapEnv.QA_PASSWORD = password;
|
|
1127
|
+
}
|
|
1128
|
+
hasCreds = true;
|
|
1129
|
+
}
|
|
1130
|
+
if (!hasCreds) {
|
|
1131
|
+
console.error(ERR(" Set BEARER_TOKEN or QA_USERNAME+QA_PASSWORD (env or ~/.fops.json), or run without --yes.\n"));
|
|
1132
|
+
process.exit(1);
|
|
1133
|
+
}
|
|
1134
|
+
|
|
1135
|
+
banner("Bootstrap demo data (AKS)");
|
|
1136
|
+
kvLine("Cluster", cl.clusterName);
|
|
1137
|
+
kvLine("API URL", normalized);
|
|
1138
|
+
hint("Same as fops bootstrap — creates demo data mesh via backend API.\n");
|
|
1139
|
+
|
|
1140
|
+
const scriptsDir = path.join(root, "scripts");
|
|
1141
|
+
const venvPython = path.join(scriptsDir, ".venv", "bin", "python");
|
|
1142
|
+
const scriptPath = path.join(scriptsDir, "bootstrap_foundation.py");
|
|
1143
|
+
if (!fs.existsSync(venvPython)) {
|
|
1144
|
+
hint("Creating scripts/.venv…");
|
|
1145
|
+
await execa("python3", ["-m", "venv", path.join(scriptsDir, ".venv")], { cwd: root, timeout: 30000 });
|
|
1146
|
+
const pip = path.join(scriptsDir, ".venv", "bin", "pip");
|
|
1147
|
+
if (!fs.existsSync(pip)) {
|
|
1148
|
+
hint("Installing pip into venv…");
|
|
1149
|
+
await execa("sh", ["-c", `curl -sS https://bootstrap.pypa.io/get-pip.py | ${venvPython}`], { cwd: root, timeout: 60000 });
|
|
1150
|
+
}
|
|
1151
|
+
const reqPath = path.join(scriptsDir, "requirements.txt");
|
|
1152
|
+
if (fs.existsSync(reqPath)) {
|
|
1153
|
+
await execa(venvPython, ["-m", "pip", "install", "--quiet", "-r", reqPath], { cwd: root, timeout: 120000 });
|
|
1154
|
+
}
|
|
1155
|
+
}
|
|
1156
|
+
|
|
1157
|
+
let captured = "";
|
|
1158
|
+
const proc = execa(venvPython, ["-u", scriptPath], {
|
|
1159
|
+
cwd: root,
|
|
1160
|
+
timeout: 600_000,
|
|
1161
|
+
env: bootstrapEnv,
|
|
1162
|
+
reject: false,
|
|
1163
|
+
});
|
|
1164
|
+
proc.stdout?.on("data", (chunk) => { const s = chunk.toString(); captured += s; process.stdout.write(s); });
|
|
1165
|
+
proc.stderr?.on("data", (chunk) => { const s = chunk.toString(); captured += s; process.stderr.write(s); });
|
|
1166
|
+
const result = await proc;
|
|
1167
|
+
|
|
1168
|
+
if (result.exitCode === 0) {
|
|
1169
|
+
console.log(OK("\n ✓ Bootstrap complete! Demo data mesh created on the cluster backend."));
|
|
1170
|
+
writeClusterState(cl.clusterName, { foundationApiUrl: normalized });
|
|
1171
|
+
return;
|
|
1172
|
+
}
|
|
1173
|
+
if ((captured || "").includes("401") || (captured || "").includes("Insufficient permissions")) {
|
|
1174
|
+
hint("\n If the user needs Foundation Admin, grant it in the UI or via your IdP, then retry.");
|
|
1175
|
+
}
|
|
1176
|
+
const code = result.exitCode === 255 || result.exitCode === -1 ? 1 : result.exitCode;
|
|
1177
|
+
console.error(ERR(`\n Bootstrap failed (exit code ${code}).`));
|
|
1178
|
+
hint("Ensure the AKS backend is up and reachable at " + normalized);
|
|
1179
|
+
process.exit(1);
|
|
1180
|
+
}
|