@meshxdata/fops 0.1.49 → 0.1.51
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +368 -0
- package/package.json +1 -1
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-core.js +347 -6
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-data-bootstrap.js +421 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-flux.js +5 -179
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-naming.js +14 -4
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-postgres.js +171 -4
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-storage.js +303 -8
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks.js +2 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-auth.js +1 -1
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-fleet-swarm.js +936 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-fleet.js +10 -918
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-helpers.js +5 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-keyvault-keys.js +413 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-keyvault.js +14 -399
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-ops-config.js +754 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-ops-knock.js +527 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-ops-ssh.js +427 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-ops.js +99 -1686
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-provision-health.js +279 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-provision-init.js +186 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-provision.js +66 -444
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-results.js +11 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-vm-lifecycle.js +5 -540
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-vm-terraform.js +544 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/commands/infra-cmds.js +75 -3
- package/src/plugins/bundled/fops-plugin-azure/lib/commands/test-cmds.js +227 -11
- package/src/plugins/bundled/fops-plugin-azure/lib/commands/vm-cmds.js +2 -1
- package/src/plugins/bundled/fops-plugin-azure/lib/pytest-parse.js +21 -0
- package/src/plugins/bundled/fops-plugin-foundation/index.js +371 -44
|
@@ -0,0 +1,421 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Azure AKS — Data bootstrap operations
|
|
3
|
+
* Extracted from azure-aks-flux.js for maintainability
|
|
4
|
+
*
|
|
5
|
+
* Runs bootstrap_foundation.py script to create demo data mesh on AKS cluster.
|
|
6
|
+
*/
|
|
7
|
+
import fs from "node:fs";
|
|
8
|
+
import os from "node:os";
|
|
9
|
+
import path from "node:path";
|
|
10
|
+
import {
|
|
11
|
+
OK, WARN, ERR, DIM,
|
|
12
|
+
banner, hint, kvLine,
|
|
13
|
+
lazyExeca, ensureAzCli, ensureAzAuth,
|
|
14
|
+
resolveCliSrc,
|
|
15
|
+
} from "./azure-helpers.js";
|
|
16
|
+
import { writeClusterState, requireCluster } from "./azure-aks-state.js";
|
|
17
|
+
|
|
18
|
+
// ── aks data bootstrap ────────────────────────────────────────────────────────
|
|
19
|
+
|
|
20
|
+
function findBootstrapRepoRoot() {
|
|
21
|
+
const scriptPath = "scripts/bootstrap_foundation.py";
|
|
22
|
+
const envRoot = process.env.FOUNDATION_ROOT;
|
|
23
|
+
if (envRoot && fs.existsSync(path.join(envRoot, scriptPath))) return path.resolve(envRoot);
|
|
24
|
+
try {
|
|
25
|
+
const fopsPath = path.join(os.homedir(), ".fops.json");
|
|
26
|
+
const raw = JSON.parse(fs.readFileSync(fopsPath, "utf8"));
|
|
27
|
+
const root = raw?.projectRoot;
|
|
28
|
+
if (root && fs.existsSync(path.join(root, scriptPath))) return path.resolve(root);
|
|
29
|
+
} catch {}
|
|
30
|
+
let dir = path.resolve(process.cwd());
|
|
31
|
+
for (;;) {
|
|
32
|
+
if (fs.existsSync(path.join(dir, scriptPath))) return dir;
|
|
33
|
+
const parent = path.dirname(dir);
|
|
34
|
+
if (parent === dir) break;
|
|
35
|
+
dir = parent;
|
|
36
|
+
}
|
|
37
|
+
return null;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
async function discoverFoundationApiUrlFromCluster(execa, clusterName) {
|
|
41
|
+
try {
|
|
42
|
+
const { stdout } = await execa("kubectl", [
|
|
43
|
+
"get", "ingress", "-A",
|
|
44
|
+
"-o", "jsonpath={.items[*].spec.rules[*].host}",
|
|
45
|
+
"--context", clusterName,
|
|
46
|
+
], { timeout: 15000 });
|
|
47
|
+
const first = (stdout || "").trim().split(/\s+/).filter(Boolean)[0];
|
|
48
|
+
if (first) return `https://${first}/api`;
|
|
49
|
+
} catch {}
|
|
50
|
+
return null;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
async function uploadSampleDataToStorage(execa, clusterName, repoRoot, env) {
|
|
54
|
+
const samplesDir = path.join(repoRoot, "storage-data", "spark", "samples");
|
|
55
|
+
if (!fs.existsSync(samplesDir)) {
|
|
56
|
+
console.log(WARN(" ⚠ No storage-data/spark/samples directory found, skipping sample upload"));
|
|
57
|
+
return;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
const sampleFiles = fs.readdirSync(samplesDir).filter(f => f.endsWith(".csv"));
|
|
61
|
+
if (sampleFiles.length === 0) {
|
|
62
|
+
console.log(WARN(" ⚠ No CSV sample files found in storage-data/spark/samples"));
|
|
63
|
+
return;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
hint("Uploading sample data files to MinIO storage…");
|
|
67
|
+
|
|
68
|
+
const s3Access = env.S3_ACCESS_KEY || env.MINIO_ACCESS_KEY || "minio";
|
|
69
|
+
const s3Secret = env.S3_SECRET_KEY || env.MINIO_SECRET_KEY || "minio123";
|
|
70
|
+
|
|
71
|
+
// Create ConfigMap with sample files (base64 encoded)
|
|
72
|
+
const configMapData = {};
|
|
73
|
+
for (const file of sampleFiles) {
|
|
74
|
+
const filePath = path.join(samplesDir, file);
|
|
75
|
+
const content = fs.readFileSync(filePath);
|
|
76
|
+
configMapData[file] = content.toString("base64");
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
const configMapManifest = JSON.stringify({
|
|
80
|
+
apiVersion: "v1",
|
|
81
|
+
kind: "ConfigMap",
|
|
82
|
+
metadata: { name: "bootstrap-sample-data", namespace: "foundation" },
|
|
83
|
+
binaryData: configMapData,
|
|
84
|
+
});
|
|
85
|
+
|
|
86
|
+
// Apply ConfigMap
|
|
87
|
+
const { exitCode: cmExitCode } = await execa("kubectl", [
|
|
88
|
+
"--context", clusterName, "apply", "-f", "-",
|
|
89
|
+
], { input: configMapManifest, reject: false, timeout: 30000 });
|
|
90
|
+
|
|
91
|
+
if (cmExitCode !== 0) {
|
|
92
|
+
console.log(WARN(" ⚠ Could not create sample data ConfigMap"));
|
|
93
|
+
return;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// Create Job to upload files from ConfigMap to MinIO
|
|
97
|
+
const uploadScript = `
|
|
98
|
+
set -e
|
|
99
|
+
mc alias set minio http://foundation-storage-engine:8080 "${s3Access}" "${s3Secret}"
|
|
100
|
+
mc mb minio/foundation --ignore-existing
|
|
101
|
+
mc mb minio/foundation/spark --ignore-existing
|
|
102
|
+
mc mb minio/foundation/spark/samples --ignore-existing
|
|
103
|
+
for f in /samples/*.csv; do
|
|
104
|
+
[ -f "$f" ] && mc cp "$f" minio/foundation/spark/samples/
|
|
105
|
+
done
|
|
106
|
+
echo "Sample files uploaded successfully"
|
|
107
|
+
`.trim();
|
|
108
|
+
|
|
109
|
+
const jobManifest = JSON.stringify({
|
|
110
|
+
apiVersion: "batch/v1",
|
|
111
|
+
kind: "Job",
|
|
112
|
+
metadata: { name: "bootstrap-sample-upload", namespace: "foundation" },
|
|
113
|
+
spec: {
|
|
114
|
+
backoffLimit: 3,
|
|
115
|
+
ttlSecondsAfterFinished: 120,
|
|
116
|
+
template: {
|
|
117
|
+
spec: {
|
|
118
|
+
restartPolicy: "Never",
|
|
119
|
+
containers: [{
|
|
120
|
+
name: "mc",
|
|
121
|
+
image: "minio/mc:latest",
|
|
122
|
+
command: ["sh", "-c", uploadScript],
|
|
123
|
+
volumeMounts: [{ name: "samples", mountPath: "/samples" }],
|
|
124
|
+
}],
|
|
125
|
+
volumes: [{
|
|
126
|
+
name: "samples",
|
|
127
|
+
configMap: { name: "bootstrap-sample-data" },
|
|
128
|
+
}],
|
|
129
|
+
},
|
|
130
|
+
},
|
|
131
|
+
},
|
|
132
|
+
});
|
|
133
|
+
|
|
134
|
+
// Delete old job if exists, then create new one
|
|
135
|
+
await execa("kubectl", [
|
|
136
|
+
"--context", clusterName, "delete", "job", "bootstrap-sample-upload", "-n", "foundation", "--ignore-not-found",
|
|
137
|
+
], { reject: false, timeout: 15000 });
|
|
138
|
+
|
|
139
|
+
const { exitCode: jobExitCode } = await execa("kubectl", [
|
|
140
|
+
"--context", clusterName, "apply", "-f", "-",
|
|
141
|
+
], { input: jobManifest, reject: false, timeout: 15000 });
|
|
142
|
+
|
|
143
|
+
if (jobExitCode !== 0) {
|
|
144
|
+
console.log(WARN(" ⚠ Could not create sample upload Job"));
|
|
145
|
+
return;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// Wait for Job completion
|
|
149
|
+
const { exitCode: waitExitCode } = await execa("kubectl", [
|
|
150
|
+
"--context", clusterName, "wait", "--for=condition=complete",
|
|
151
|
+
"job/bootstrap-sample-upload", "-n", "foundation", "--timeout=120s",
|
|
152
|
+
], { reject: false, timeout: 130000 });
|
|
153
|
+
|
|
154
|
+
if (waitExitCode === 0) {
|
|
155
|
+
console.log(OK(` ✓ Uploaded ${sampleFiles.length} sample file(s) to MinIO storage`));
|
|
156
|
+
} else {
|
|
157
|
+
// Check job status for errors
|
|
158
|
+
const { stdout: jobLogs } = await execa("kubectl", [
|
|
159
|
+
"--context", clusterName, "logs", "job/bootstrap-sample-upload", "-n", "foundation",
|
|
160
|
+
], { reject: false, timeout: 15000 });
|
|
161
|
+
console.log(WARN(" ⚠ Sample upload Job did not complete"));
|
|
162
|
+
if (jobLogs) hint(jobLogs.slice(0, 300));
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
// Cleanup ConfigMap
|
|
166
|
+
await execa("kubectl", [
|
|
167
|
+
"--context", clusterName, "delete", "configmap", "bootstrap-sample-data", "-n", "foundation", "--ignore-not-found",
|
|
168
|
+
], { reject: false, timeout: 15000 });
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
export async function aksDataBootstrap(opts = {}) {
|
|
172
|
+
const execa = await lazyExeca();
|
|
173
|
+
await ensureAzCli(execa);
|
|
174
|
+
await ensureAzAuth(execa, { subscription: opts.profile });
|
|
175
|
+
const cl = requireCluster(opts.clusterName);
|
|
176
|
+
|
|
177
|
+
const { getCredentials } = await import("./azure-aks-core.js");
|
|
178
|
+
await getCredentials(execa, {
|
|
179
|
+
clusterName: cl.clusterName,
|
|
180
|
+
rg: cl.resourceGroup,
|
|
181
|
+
sub: opts.profile,
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
let apiUrl = opts.apiUrl?.trim() || cl.foundationApiUrl?.trim();
|
|
185
|
+
if (!apiUrl) {
|
|
186
|
+
apiUrl = await discoverFoundationApiUrlFromCluster(execa, cl.clusterName);
|
|
187
|
+
if (apiUrl) {
|
|
188
|
+
hint(`Using API URL from cluster ingress: ${apiUrl}`);
|
|
189
|
+
writeClusterState(cl.clusterName, { foundationApiUrl: apiUrl });
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
if (!apiUrl) {
|
|
193
|
+
console.error(ERR("\n Foundation backend API URL is required."));
|
|
194
|
+
hint("Pass the backend API base URL (e.g. https://foundation.example.com/api):");
|
|
195
|
+
hint(" fops azure aks bootstrap " + cl.clusterName + " --api-url https://your-foundation-host/api\n");
|
|
196
|
+
process.exit(1);
|
|
197
|
+
}
|
|
198
|
+
const normalized = apiUrl.replace(/\/+$/, "");
|
|
199
|
+
if (!normalized.endsWith("/api")) {
|
|
200
|
+
console.log(WARN(" API URL should usually end with /api (e.g. https://host/api). Using as-is."));
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
const root = findBootstrapRepoRoot();
|
|
204
|
+
if (!root) {
|
|
205
|
+
console.error(ERR("\n Could not find foundation-compose root (scripts/bootstrap_foundation.py)."));
|
|
206
|
+
hint("Run from the foundation-compose directory, or set FOUNDATION_ROOT.\n");
|
|
207
|
+
process.exit(1);
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
const { loadEnvFromFile } = await import("./azure-helpers.js");
|
|
211
|
+
const projectEnv = loadEnvFromFile(path.join(root, ".env"));
|
|
212
|
+
let bootstrapEnv = {
|
|
213
|
+
...process.env,
|
|
214
|
+
...projectEnv,
|
|
215
|
+
PYTHONUNBUFFERED: "1",
|
|
216
|
+
API_URL: normalized,
|
|
217
|
+
};
|
|
218
|
+
|
|
219
|
+
// CLI --bearer-token takes precedence
|
|
220
|
+
if (opts.bearerToken?.trim()) {
|
|
221
|
+
bootstrapEnv.BEARER_TOKEN = opts.bearerToken.trim();
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
let hasCreds = !!(bootstrapEnv.BEARER_TOKEN?.trim()
|
|
225
|
+
|| (bootstrapEnv.QA_USERNAME?.trim() && bootstrapEnv.QA_PASSWORD != null));
|
|
226
|
+
if (!hasCreds) {
|
|
227
|
+
try {
|
|
228
|
+
const fopsPath = path.join(os.homedir(), ".fops.json");
|
|
229
|
+
const raw = JSON.parse(fs.readFileSync(fopsPath, "utf8"));
|
|
230
|
+
const cfg = raw?.plugins?.entries?.["fops-plugin-foundation"]?.config || {};
|
|
231
|
+
if (cfg.bearerToken?.trim()) {
|
|
232
|
+
bootstrapEnv.BEARER_TOKEN = cfg.bearerToken.trim();
|
|
233
|
+
hasCreds = true;
|
|
234
|
+
} else if (cfg.user?.trim() && cfg.password) {
|
|
235
|
+
bootstrapEnv.QA_USERNAME = cfg.user.trim();
|
|
236
|
+
bootstrapEnv.QA_PASSWORD = cfg.password;
|
|
237
|
+
hasCreds = true;
|
|
238
|
+
}
|
|
239
|
+
} catch {}
|
|
240
|
+
}
|
|
241
|
+
if (!hasCreds && !opts.yes) {
|
|
242
|
+
console.log(WARN(" No Foundation credentials in env or ~/.fops.json."));
|
|
243
|
+
const { getInquirer } = await import(resolveCliSrc("lazy.js"));
|
|
244
|
+
const inquirer = await getInquirer();
|
|
245
|
+
const { authMethod } = await inquirer.prompt([{
|
|
246
|
+
type: "list",
|
|
247
|
+
name: "authMethod",
|
|
248
|
+
message: "Authentication method:",
|
|
249
|
+
choices: [
|
|
250
|
+
{ name: "Username / password", value: "password" },
|
|
251
|
+
{ name: "Bearer token (JWT)", value: "jwt" },
|
|
252
|
+
],
|
|
253
|
+
}]);
|
|
254
|
+
if (authMethod === "jwt") {
|
|
255
|
+
const { token } = await inquirer.prompt([{ type: "input", name: "token", message: "Bearer token:", validate: (v) => v?.trim() ? true : "Token required" }]);
|
|
256
|
+
bootstrapEnv.BEARER_TOKEN = token.trim();
|
|
257
|
+
} else {
|
|
258
|
+
const { user } = await inquirer.prompt([{ type: "input", name: "user", message: "Username (email):", validate: (v) => v?.trim() ? true : "Username required" }]);
|
|
259
|
+
const { password } = await inquirer.prompt([{ type: "password", name: "password", message: "Password:", mask: "*", validate: (v) => v ? true : "Password required" }]);
|
|
260
|
+
bootstrapEnv.QA_USERNAME = user.trim();
|
|
261
|
+
bootstrapEnv.QA_PASSWORD = password;
|
|
262
|
+
}
|
|
263
|
+
hasCreds = true;
|
|
264
|
+
}
|
|
265
|
+
if (!hasCreds) {
|
|
266
|
+
console.error(ERR(" Set BEARER_TOKEN or QA_USERNAME+QA_PASSWORD (env or ~/.fops.json), or run without --yes.\n"));
|
|
267
|
+
process.exit(1);
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
// Fetch S3/storage credentials from cluster secret
|
|
271
|
+
try {
|
|
272
|
+
const s3SecretResult = await execa("kubectl", [
|
|
273
|
+
"get", "secret", "-n", "foundation", "storage-engine-auth-credentials",
|
|
274
|
+
"-o", "jsonpath={.data.AUTH_IDENTITY},{.data.AUTH_CREDENTIAL}",
|
|
275
|
+
], { timeout: 15000 });
|
|
276
|
+
const [identityB64, credentialB64] = s3SecretResult.stdout.trim().split(",");
|
|
277
|
+
if (identityB64 && credentialB64) {
|
|
278
|
+
const s3AccessKey = Buffer.from(identityB64, "base64").toString("utf8");
|
|
279
|
+
const s3SecretKey = Buffer.from(credentialB64, "base64").toString("utf8");
|
|
280
|
+
bootstrapEnv.S3_ACCESS_KEY = s3AccessKey;
|
|
281
|
+
bootstrapEnv.S3_SECRET_KEY = s3SecretKey;
|
|
282
|
+
bootstrapEnv.MINIO_ACCESS_KEY = s3AccessKey;
|
|
283
|
+
bootstrapEnv.MINIO_SECRET_KEY = s3SecretKey;
|
|
284
|
+
bootstrapEnv.MY_S3_ACCESS = s3AccessKey;
|
|
285
|
+
bootstrapEnv.MY_S3_SECRET = s3SecretKey;
|
|
286
|
+
bootstrapEnv.AWS_ACCESS_KEY_ID = s3AccessKey;
|
|
287
|
+
bootstrapEnv.AWS_SECRET_ACCESS_KEY = s3SecretKey;
|
|
288
|
+
console.log(OK(" ✓ Retrieved S3 credentials from cluster"));
|
|
289
|
+
}
|
|
290
|
+
} catch {
|
|
291
|
+
console.log(WARN(" ⚠ Could not fetch S3 credentials from cluster (storage-engine-auth-credentials)"));
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
// Fetch CF Access credentials from ~/.fops.json
|
|
295
|
+
try {
|
|
296
|
+
const fopsPath = path.join(os.homedir(), ".fops.json");
|
|
297
|
+
const raw = JSON.parse(fs.readFileSync(fopsPath, "utf8"));
|
|
298
|
+
if (raw?.cloudflare?.accessClientId) {
|
|
299
|
+
bootstrapEnv.CF_ACCESS_CLIENT_ID = raw.cloudflare.accessClientId;
|
|
300
|
+
bootstrapEnv.CF_ACCESS_CLIENT_SECRET = raw.cloudflare.accessClientSecret || "";
|
|
301
|
+
}
|
|
302
|
+
} catch { /* no fops.json */ }
|
|
303
|
+
|
|
304
|
+
// Upload sample data files to MinIO storage
|
|
305
|
+
if (!opts.skipSampleUpload) {
|
|
306
|
+
await uploadSampleDataToStorage(execa, cl.clusterName, root, bootstrapEnv);
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
banner("Bootstrap demo data (AKS)");
|
|
310
|
+
kvLine("Cluster", cl.clusterName);
|
|
311
|
+
kvLine("API URL", normalized);
|
|
312
|
+
hint("Same as fops bootstrap — creates demo data mesh via backend API.\n");
|
|
313
|
+
|
|
314
|
+
const scriptsDir = path.join(root, "scripts");
|
|
315
|
+
const venvPython = path.join(scriptsDir, ".venv", "bin", "python");
|
|
316
|
+
const scriptPath = path.join(scriptsDir, "bootstrap_foundation.py");
|
|
317
|
+
if (!fs.existsSync(venvPython)) {
|
|
318
|
+
hint("Creating scripts/.venv…");
|
|
319
|
+
await execa("python3", ["-m", "venv", path.join(scriptsDir, ".venv")], { cwd: root, timeout: 30000 });
|
|
320
|
+
const pip = path.join(scriptsDir, ".venv", "bin", "pip");
|
|
321
|
+
if (!fs.existsSync(pip)) {
|
|
322
|
+
hint("Installing pip into venv…");
|
|
323
|
+
await execa("sh", ["-c", `curl -sS https://bootstrap.pypa.io/get-pip.py | ${venvPython}`], { cwd: root, timeout: 60000 });
|
|
324
|
+
}
|
|
325
|
+
const reqPath = path.join(scriptsDir, "requirements.txt");
|
|
326
|
+
if (fs.existsSync(reqPath)) {
|
|
327
|
+
await execa(venvPython, ["-m", "pip", "install", "--quiet", "-r", reqPath], { cwd: root, timeout: 120000 });
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
// Ensure bootstrap user exists in cluster postgres (for QA_USERNAME/QA_PASSWORD auth)
|
|
332
|
+
if (bootstrapEnv.QA_USERNAME?.trim() && bootstrapEnv.QA_PASSWORD) {
|
|
333
|
+
hint("Ensuring bootstrap user exists in cluster postgres…");
|
|
334
|
+
const ensureUserScript = path.join(scriptsDir, "ensure_bootstrap_user_sql.py");
|
|
335
|
+
if (fs.existsSync(ensureUserScript)) {
|
|
336
|
+
try {
|
|
337
|
+
const sqlResult = await execa(venvPython, [ensureUserScript], {
|
|
338
|
+
cwd: root,
|
|
339
|
+
timeout: 30000,
|
|
340
|
+
env: { ...process.env, QA_USERNAME: bootstrapEnv.QA_USERNAME, QA_PASSWORD: bootstrapEnv.QA_PASSWORD },
|
|
341
|
+
});
|
|
342
|
+
const sql = sqlResult.stdout?.trim();
|
|
343
|
+
if (sql) {
|
|
344
|
+
// Get postgres password from cluster secret
|
|
345
|
+
let pgPassword = "";
|
|
346
|
+
try {
|
|
347
|
+
const secretResult = await execa("kubectl", [
|
|
348
|
+
"get", "secret", "-n", "foundation", "postgres",
|
|
349
|
+
"-o", "jsonpath={.data.password}",
|
|
350
|
+
], { timeout: 15000 });
|
|
351
|
+
pgPassword = Buffer.from(secretResult.stdout.trim(), "base64").toString("utf8");
|
|
352
|
+
} catch { /* ignore */ }
|
|
353
|
+
|
|
354
|
+
// Get postgres host from backend deployment config
|
|
355
|
+
let pgHost = "foundation-postgres"; // default for in-cluster
|
|
356
|
+
try {
|
|
357
|
+
const hostResult = await execa("kubectl", [
|
|
358
|
+
"get", "helmrelease", "-n", "foundation", "foundation-backend",
|
|
359
|
+
"-o", "jsonpath={.spec.values.config.postgres.host}",
|
|
360
|
+
], { timeout: 15000 });
|
|
361
|
+
if (hostResult.stdout?.trim()) pgHost = hostResult.stdout.trim();
|
|
362
|
+
} catch { /* ignore */ }
|
|
363
|
+
|
|
364
|
+
let kubectlResult;
|
|
365
|
+
const isAzurePg = pgHost.includes(".postgres.database.azure.com");
|
|
366
|
+
|
|
367
|
+
if (isAzurePg) {
|
|
368
|
+
// Azure PostgreSQL - run psql via temporary pod
|
|
369
|
+
const podName = `bootstrap-psql-${Date.now()}`;
|
|
370
|
+
const pgUser = "foundation";
|
|
371
|
+
const connStr = `postgresql://${pgUser}:${pgPassword}@${pgHost}:5432/foundation?sslmode=require`;
|
|
372
|
+
kubectlResult = await execa("kubectl", [
|
|
373
|
+
"run", podName, "-n", "foundation", "--rm", "-i", "--restart=Never",
|
|
374
|
+
"--image=postgres:15-alpine", "--",
|
|
375
|
+
"psql", connStr, "-c", sql,
|
|
376
|
+
], { timeout: 60000, reject: false });
|
|
377
|
+
} else {
|
|
378
|
+
// In-cluster postgres - kubectl exec
|
|
379
|
+
kubectlResult = await execa("kubectl", [
|
|
380
|
+
"exec", "-n", "foundation", "deploy/foundation-postgres", "--",
|
|
381
|
+
"psql", "-U", "foundation", "-d", "foundation", "-c", sql,
|
|
382
|
+
], { timeout: 30000, reject: false });
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
if (kubectlResult.exitCode === 0) {
|
|
386
|
+
console.log(OK(" ✓ Bootstrap user ready in cluster postgres"));
|
|
387
|
+
} else {
|
|
388
|
+
console.log(WARN(" ⚠ Could not create bootstrap user (may already exist or postgres not ready)"));
|
|
389
|
+
if (kubectlResult.stderr) hint(kubectlResult.stderr.slice(0, 200));
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
} catch (e) {
|
|
393
|
+
console.log(WARN(` ⚠ ensure-bootstrap-user skipped: ${e.message}`));
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
let captured = "";
|
|
399
|
+
const proc = execa(venvPython, ["-u", scriptPath], {
|
|
400
|
+
cwd: root,
|
|
401
|
+
timeout: 600_000,
|
|
402
|
+
env: bootstrapEnv,
|
|
403
|
+
reject: false,
|
|
404
|
+
});
|
|
405
|
+
proc.stdout?.on("data", (chunk) => { const s = chunk.toString(); captured += s; process.stdout.write(s); });
|
|
406
|
+
proc.stderr?.on("data", (chunk) => { const s = chunk.toString(); captured += s; process.stderr.write(s); });
|
|
407
|
+
const result = await proc;
|
|
408
|
+
|
|
409
|
+
if (result.exitCode === 0) {
|
|
410
|
+
console.log(OK("\n ✓ Bootstrap complete! Demo data mesh created on the cluster backend."));
|
|
411
|
+
writeClusterState(cl.clusterName, { foundationApiUrl: normalized });
|
|
412
|
+
return;
|
|
413
|
+
}
|
|
414
|
+
if ((captured || "").includes("401") || (captured || "").includes("Insufficient permissions")) {
|
|
415
|
+
hint("\n If the user needs Foundation Admin, grant it in the UI or via your IdP, then retry.");
|
|
416
|
+
}
|
|
417
|
+
const code = result.exitCode === 255 || result.exitCode === -1 ? 1 : result.exitCode;
|
|
418
|
+
console.error(ERR(`\n Bootstrap failed (exit code ${code}).`));
|
|
419
|
+
hint("Ensure the AKS backend is up and reachable at " + normalized);
|
|
420
|
+
process.exit(1);
|
|
421
|
+
}
|
|
@@ -18,6 +18,10 @@ import { AKS_DEFAULTS } from "./azure-aks-naming.js";
|
|
|
18
18
|
import {
|
|
19
19
|
readClusterState, writeClusterState, resolveFluxConfig, requireCluster,
|
|
20
20
|
} from "./azure-aks-state.js";
|
|
21
|
+
import { aksDataBootstrap } from "./azure-aks-data-bootstrap.js";
|
|
22
|
+
|
|
23
|
+
// Re-export for backwards compatibility
|
|
24
|
+
export { aksDataBootstrap } from "./azure-aks-data-bootstrap.js";
|
|
21
25
|
|
|
22
26
|
// ── Template defaults ─────────────────────────────────────────────────────────
|
|
23
27
|
|
|
@@ -998,183 +1002,5 @@ export async function aksFluxReconcile(opts = {}) {
|
|
|
998
1002
|
}
|
|
999
1003
|
}
|
|
1000
1004
|
|
|
1001
|
-
// ── aks data bootstrap ────────────────────────────────────────────────────────
|
|
1002
|
-
|
|
1003
|
-
function findBootstrapRepoRoot() {
|
|
1004
|
-
const scriptPath = "scripts/bootstrap_foundation.py";
|
|
1005
|
-
const envRoot = process.env.FOUNDATION_ROOT;
|
|
1006
|
-
if (envRoot && fs.existsSync(path.join(envRoot, scriptPath))) return path.resolve(envRoot);
|
|
1007
|
-
try {
|
|
1008
|
-
const fopsPath = path.join(os.homedir(), ".fops.json");
|
|
1009
|
-
const raw = JSON.parse(fs.readFileSync(fopsPath, "utf8"));
|
|
1010
|
-
const root = raw?.projectRoot;
|
|
1011
|
-
if (root && fs.existsSync(path.join(root, scriptPath))) return path.resolve(root);
|
|
1012
|
-
} catch {}
|
|
1013
|
-
let dir = path.resolve(process.cwd());
|
|
1014
|
-
for (;;) {
|
|
1015
|
-
if (fs.existsSync(path.join(dir, scriptPath))) return dir;
|
|
1016
|
-
const parent = path.dirname(dir);
|
|
1017
|
-
if (parent === dir) break;
|
|
1018
|
-
dir = parent;
|
|
1019
|
-
}
|
|
1020
|
-
return null;
|
|
1021
|
-
}
|
|
1022
|
-
|
|
1023
|
-
async function discoverFoundationApiUrlFromCluster(execa, clusterName) {
|
|
1024
|
-
try {
|
|
1025
|
-
const { stdout } = await execa("kubectl", [
|
|
1026
|
-
"get", "ingress", "-A",
|
|
1027
|
-
"-o", "jsonpath={.items[*].spec.rules[*].host}",
|
|
1028
|
-
"--context", clusterName,
|
|
1029
|
-
], { timeout: 15000 });
|
|
1030
|
-
const first = (stdout || "").trim().split(/\s+/).filter(Boolean)[0];
|
|
1031
|
-
if (first) return `https://${first}/api`;
|
|
1032
|
-
} catch {}
|
|
1033
|
-
return null;
|
|
1034
|
-
}
|
|
1035
|
-
|
|
1036
|
-
export async function aksDataBootstrap(opts = {}) {
|
|
1037
|
-
const execa = await lazyExeca();
|
|
1038
|
-
await ensureAzCli(execa);
|
|
1039
|
-
await ensureAzAuth(execa, { subscription: opts.profile });
|
|
1040
|
-
const cl = requireCluster(opts.clusterName);
|
|
1041
|
-
|
|
1042
|
-
const { getCredentials } = await import("./azure-aks-core.js");
|
|
1043
|
-
await getCredentials(execa, {
|
|
1044
|
-
clusterName: cl.clusterName,
|
|
1045
|
-
rg: cl.resourceGroup,
|
|
1046
|
-
sub: opts.profile,
|
|
1047
|
-
});
|
|
1048
|
-
|
|
1049
|
-
let apiUrl = opts.apiUrl?.trim() || cl.foundationApiUrl?.trim();
|
|
1050
|
-
if (!apiUrl) {
|
|
1051
|
-
apiUrl = await discoverFoundationApiUrlFromCluster(execa, cl.clusterName);
|
|
1052
|
-
if (apiUrl) {
|
|
1053
|
-
hint(`Using API URL from cluster ingress: ${apiUrl}`);
|
|
1054
|
-
writeClusterState(cl.clusterName, { foundationApiUrl: apiUrl });
|
|
1055
|
-
}
|
|
1056
|
-
}
|
|
1057
|
-
if (!apiUrl) {
|
|
1058
|
-
console.error(ERR("\n Foundation backend API URL is required."));
|
|
1059
|
-
hint("Pass the backend API base URL (e.g. https://foundation.example.com/api):");
|
|
1060
|
-
hint(" fops azure aks bootstrap " + cl.clusterName + " --api-url https://your-foundation-host/api\n");
|
|
1061
|
-
process.exit(1);
|
|
1062
|
-
}
|
|
1063
|
-
const normalized = apiUrl.replace(/\/+$/, "");
|
|
1064
|
-
if (!normalized.endsWith("/api")) {
|
|
1065
|
-
console.log(WARN(" API URL should usually end with /api (e.g. https://host/api). Using as-is."));
|
|
1066
|
-
}
|
|
1067
|
-
|
|
1068
|
-
const root = findBootstrapRepoRoot();
|
|
1069
|
-
if (!root) {
|
|
1070
|
-
console.error(ERR("\n Could not find foundation-compose root (scripts/bootstrap_foundation.py)."));
|
|
1071
|
-
hint("Run from the foundation-compose directory, or set FOUNDATION_ROOT.\n");
|
|
1072
|
-
process.exit(1);
|
|
1073
|
-
}
|
|
1074
|
-
|
|
1075
|
-
const { loadEnvFromFile } = await import("./azure-helpers.js");
|
|
1076
|
-
const projectEnv = loadEnvFromFile(path.join(root, ".env"));
|
|
1077
|
-
let bootstrapEnv = {
|
|
1078
|
-
...process.env,
|
|
1079
|
-
...projectEnv,
|
|
1080
|
-
PYTHONUNBUFFERED: "1",
|
|
1081
|
-
API_URL: normalized,
|
|
1082
|
-
};
|
|
1083
|
-
|
|
1084
|
-
// CLI --bearer-token takes precedence
|
|
1085
|
-
if (opts.bearerToken?.trim()) {
|
|
1086
|
-
bootstrapEnv.BEARER_TOKEN = opts.bearerToken.trim();
|
|
1087
|
-
}
|
|
1088
|
-
|
|
1089
|
-
let hasCreds = !!(bootstrapEnv.BEARER_TOKEN?.trim()
|
|
1090
|
-
|| (bootstrapEnv.QA_USERNAME?.trim() && bootstrapEnv.QA_PASSWORD != null));
|
|
1091
|
-
if (!hasCreds) {
|
|
1092
|
-
try {
|
|
1093
|
-
const fopsPath = path.join(os.homedir(), ".fops.json");
|
|
1094
|
-
const raw = JSON.parse(fs.readFileSync(fopsPath, "utf8"));
|
|
1095
|
-
const cfg = raw?.plugins?.entries?.["fops-plugin-foundation"]?.config || {};
|
|
1096
|
-
if (cfg.bearerToken?.trim()) {
|
|
1097
|
-
bootstrapEnv.BEARER_TOKEN = cfg.bearerToken.trim();
|
|
1098
|
-
hasCreds = true;
|
|
1099
|
-
} else if (cfg.user?.trim() && cfg.password) {
|
|
1100
|
-
bootstrapEnv.QA_USERNAME = cfg.user.trim();
|
|
1101
|
-
bootstrapEnv.QA_PASSWORD = cfg.password;
|
|
1102
|
-
hasCreds = true;
|
|
1103
|
-
}
|
|
1104
|
-
} catch {}
|
|
1105
|
-
}
|
|
1106
|
-
if (!hasCreds && !opts.yes) {
|
|
1107
|
-
console.log(WARN(" No Foundation credentials in env or ~/.fops.json."));
|
|
1108
|
-
const { getInquirer } = await import(resolveCliSrc("lazy.js"));
|
|
1109
|
-
const inquirer = await getInquirer();
|
|
1110
|
-
const { authMethod } = await inquirer.prompt([{
|
|
1111
|
-
type: "list",
|
|
1112
|
-
name: "authMethod",
|
|
1113
|
-
message: "Authentication method:",
|
|
1114
|
-
choices: [
|
|
1115
|
-
{ name: "Username / password", value: "password" },
|
|
1116
|
-
{ name: "Bearer token (JWT)", value: "jwt" },
|
|
1117
|
-
],
|
|
1118
|
-
}]);
|
|
1119
|
-
if (authMethod === "jwt") {
|
|
1120
|
-
const { token } = await inquirer.prompt([{ type: "input", name: "token", message: "Bearer token:", validate: (v) => v?.trim() ? true : "Token required" }]);
|
|
1121
|
-
bootstrapEnv.BEARER_TOKEN = token.trim();
|
|
1122
|
-
} else {
|
|
1123
|
-
const { user } = await inquirer.prompt([{ type: "input", name: "user", message: "Username (email):", validate: (v) => v?.trim() ? true : "Username required" }]);
|
|
1124
|
-
const { password } = await inquirer.prompt([{ type: "password", name: "password", message: "Password:", mask: "*", validate: (v) => v ? true : "Password required" }]);
|
|
1125
|
-
bootstrapEnv.QA_USERNAME = user.trim();
|
|
1126
|
-
bootstrapEnv.QA_PASSWORD = password;
|
|
1127
|
-
}
|
|
1128
|
-
hasCreds = true;
|
|
1129
|
-
}
|
|
1130
|
-
if (!hasCreds) {
|
|
1131
|
-
console.error(ERR(" Set BEARER_TOKEN or QA_USERNAME+QA_PASSWORD (env or ~/.fops.json), or run without --yes.\n"));
|
|
1132
|
-
process.exit(1);
|
|
1133
|
-
}
|
|
1134
|
-
|
|
1135
|
-
banner("Bootstrap demo data (AKS)");
|
|
1136
|
-
kvLine("Cluster", cl.clusterName);
|
|
1137
|
-
kvLine("API URL", normalized);
|
|
1138
|
-
hint("Same as fops bootstrap — creates demo data mesh via backend API.\n");
|
|
1139
|
-
|
|
1140
|
-
const scriptsDir = path.join(root, "scripts");
|
|
1141
|
-
const venvPython = path.join(scriptsDir, ".venv", "bin", "python");
|
|
1142
|
-
const scriptPath = path.join(scriptsDir, "bootstrap_foundation.py");
|
|
1143
|
-
if (!fs.existsSync(venvPython)) {
|
|
1144
|
-
hint("Creating scripts/.venv…");
|
|
1145
|
-
await execa("python3", ["-m", "venv", path.join(scriptsDir, ".venv")], { cwd: root, timeout: 30000 });
|
|
1146
|
-
const pip = path.join(scriptsDir, ".venv", "bin", "pip");
|
|
1147
|
-
if (!fs.existsSync(pip)) {
|
|
1148
|
-
hint("Installing pip into venv…");
|
|
1149
|
-
await execa("sh", ["-c", `curl -sS https://bootstrap.pypa.io/get-pip.py | ${venvPython}`], { cwd: root, timeout: 60000 });
|
|
1150
|
-
}
|
|
1151
|
-
const reqPath = path.join(scriptsDir, "requirements.txt");
|
|
1152
|
-
if (fs.existsSync(reqPath)) {
|
|
1153
|
-
await execa(venvPython, ["-m", "pip", "install", "--quiet", "-r", reqPath], { cwd: root, timeout: 120000 });
|
|
1154
|
-
}
|
|
1155
|
-
}
|
|
1156
1005
|
|
|
1157
|
-
|
|
1158
|
-
const proc = execa(venvPython, ["-u", scriptPath], {
|
|
1159
|
-
cwd: root,
|
|
1160
|
-
timeout: 600_000,
|
|
1161
|
-
env: bootstrapEnv,
|
|
1162
|
-
reject: false,
|
|
1163
|
-
});
|
|
1164
|
-
proc.stdout?.on("data", (chunk) => { const s = chunk.toString(); captured += s; process.stdout.write(s); });
|
|
1165
|
-
proc.stderr?.on("data", (chunk) => { const s = chunk.toString(); captured += s; process.stderr.write(s); });
|
|
1166
|
-
const result = await proc;
|
|
1167
|
-
|
|
1168
|
-
if (result.exitCode === 0) {
|
|
1169
|
-
console.log(OK("\n ✓ Bootstrap complete! Demo data mesh created on the cluster backend."));
|
|
1170
|
-
writeClusterState(cl.clusterName, { foundationApiUrl: normalized });
|
|
1171
|
-
return;
|
|
1172
|
-
}
|
|
1173
|
-
if ((captured || "").includes("401") || (captured || "").includes("Insufficient permissions")) {
|
|
1174
|
-
hint("\n If the user needs Foundation Admin, grant it in the UI or via your IdP, then retry.");
|
|
1175
|
-
}
|
|
1176
|
-
const code = result.exitCode === 255 || result.exitCode === -1 ? 1 : result.exitCode;
|
|
1177
|
-
console.error(ERR(`\n Bootstrap failed (exit code ${code}).`));
|
|
1178
|
-
hint("Ensure the AKS backend is up and reachable at " + normalized);
|
|
1179
|
-
process.exit(1);
|
|
1180
|
-
}
|
|
1006
|
+
// aksDataBootstrap is now in azure-aks-data-bootstrap.js and re-exported above for backwards compatibility
|
|
@@ -28,17 +28,27 @@ export const AKS_DEFAULTS = {
|
|
|
28
28
|
|
|
29
29
|
// Region pairs for Postgres geo-replication
|
|
30
30
|
export const PG_REPLICA_REGIONS = {
|
|
31
|
-
uaenorth: "
|
|
31
|
+
uaenorth: "northeurope", // UAE → EU (North Europe)
|
|
32
32
|
westeurope: "uaenorth", // EU → UAE
|
|
33
|
-
|
|
33
|
+
northeurope: "uaenorth", // EU → UAE
|
|
34
|
+
eastus: "northeurope", // US East → EU
|
|
34
35
|
westus2: "eastus", // US West → US East
|
|
35
36
|
};
|
|
36
37
|
|
|
38
|
+
// HA replica regions for storage object replication (cross-region)
|
|
39
|
+
export const HA_REPLICA_REGIONS = {
|
|
40
|
+
uaenorth: "northeurope", // UAE North → North Europe
|
|
41
|
+
northeurope: "uaenorth", // North Europe → UAE North
|
|
42
|
+
westeurope: "northeurope", // West Europe → North Europe
|
|
43
|
+
eastus: "northeurope", // US East → North Europe
|
|
44
|
+
westus2: "northeurope", // US West → North Europe
|
|
45
|
+
};
|
|
46
|
+
|
|
37
47
|
// ── Postgres Defaults ─────────────────────────────────────────────────────────
|
|
38
48
|
|
|
39
49
|
export const PG_DEFAULTS = {
|
|
40
|
-
sku: "
|
|
41
|
-
tier: "
|
|
50
|
+
sku: "Standard_D2ds_v4",
|
|
51
|
+
tier: "GeneralPurpose",
|
|
42
52
|
version: "15",
|
|
43
53
|
storageSizeGb: 32,
|
|
44
54
|
adminUser: "foundation",
|