@meshxdata/fops 0.1.45 → 0.1.47
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +202 -17
- package/package.json +1 -1
- package/src/commands/lifecycle.js +81 -5
- package/src/commands/setup.js +45 -4
- package/src/plugins/bundled/fops-plugin-azure/index.js +29 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-core.js +1185 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-flux.js +1180 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-ingress.js +393 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-naming.js +104 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-network.js +296 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-postgres.js +768 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-reconcilers.js +538 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-secrets.js +849 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-stacks.js +643 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-state.js +145 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-storage.js +496 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-terraform.js +1032 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks.js +155 -4245
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-keyvault.js +186 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/azure-results.js +5 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/commands/infra-cmds.js +758 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/commands/registry-cmds.js +250 -0
- package/src/plugins/bundled/fops-plugin-azure/lib/commands/test-cmds.js +2 -1
- package/src/plugins/bundled/fops-plugin-foundation/lib/apply.js +3 -2
- package/src/plugins/bundled/fops-plugin-foundation/lib/helpers.js +21 -0
- package/src/plugins/bundled/fops-plugin-foundation/lib/tools-read.js +3 -5
- package/src/ui/tui/App.js +13 -13
- package/src/web/dist/assets/index-NXC8Hvnp.css +1 -0
- package/src/web/dist/assets/index-QH1N4ejK.js +112 -0
- package/src/web/dist/index.html +2 -2
- package/src/web/server.js +4 -4
- package/src/web/dist/assets/index-BphVaAUd.css +0 -1
- package/src/web/dist/assets/index-CSckLzuG.js +0 -129
|
@@ -0,0 +1,643 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* azure-aks-stacks.js – Stack lifecycle management for AKS clusters
|
|
3
|
+
*
|
|
4
|
+
* Dependencies: azure.js, azure-aks-naming.js, azure-aks-state.js,
|
|
5
|
+
* azure-aks-secrets.js, azure-aks-storage.js, azure-aks-ingress.js
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import {
|
|
9
|
+
lazyExeca,
|
|
10
|
+
subArgs,
|
|
11
|
+
ensureAzCli,
|
|
12
|
+
ensureAzAuth,
|
|
13
|
+
banner,
|
|
14
|
+
kvLine,
|
|
15
|
+
hint,
|
|
16
|
+
runReconcilers,
|
|
17
|
+
OK, ERR, WARN, DIM, LABEL,
|
|
18
|
+
} from "./azure.js";
|
|
19
|
+
|
|
20
|
+
import { pgServerName, kvName, timeSince } from "./azure-aks-naming.js";
|
|
21
|
+
import {
|
|
22
|
+
readClusterState,
|
|
23
|
+
readStackState,
|
|
24
|
+
writeStackState,
|
|
25
|
+
deleteStackState,
|
|
26
|
+
listStacks,
|
|
27
|
+
requireCluster,
|
|
28
|
+
} from "./azure-aks-state.js";
|
|
29
|
+
import { SECRET_STORE_NAME, detectEsApiVersion } from "./azure-aks-secrets.js";
|
|
30
|
+
import { reconcileHelmRepos } from "./azure-aks-storage.js";
|
|
31
|
+
import { clusterDomain } from "./azure-aks-ingress.js";
|
|
32
|
+
|
|
33
|
+
// ── Helpers ──────────────────────────────────────────────────────────────────
|
|
34
|
+
|
|
35
|
+
export function printClusterInfo(cl) {
|
|
36
|
+
console.log(`\n ${LABEL("Cluster Info")}`);
|
|
37
|
+
kvLine("Name", cl.clusterName, { pad: 12 });
|
|
38
|
+
kvLine("RG", cl.resourceGroup, { pad: 12 });
|
|
39
|
+
kvLine("FQDN", cl.fqdn || "—", { pad: 12 });
|
|
40
|
+
kvLine("K8s", cl.kubernetesVersion || "—", { pad: 12 });
|
|
41
|
+
kvLine("Nodes", `${cl.nodeCount || "?"} x ${cl.nodeVmSize || "?"}`, { pad: 12 });
|
|
42
|
+
if (cl.flux) {
|
|
43
|
+
kvLine("Flux", `${cl.flux.owner}/${cl.flux.repo}`, { pad: 12 });
|
|
44
|
+
}
|
|
45
|
+
hint("");
|
|
46
|
+
hint(`kubectl: kubectl --context ${cl.clusterName} get nodes`);
|
|
47
|
+
hint(`status: fops azure aks status ${cl.clusterName}`);
|
|
48
|
+
if (!cl.flux) {
|
|
49
|
+
hint(`flux: fops azure aks flux bootstrap ${cl.clusterName} --flux-owner <org> --flux-repo <repo>`);
|
|
50
|
+
}
|
|
51
|
+
console.log("");
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
export function stackDomain(clusterName, namespace) {
|
|
55
|
+
const baseDomain = clusterDomain(clusterName);
|
|
56
|
+
if (namespace === "foundation") return baseDomain;
|
|
57
|
+
return `${namespace}.${baseDomain}`;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
export function pgDatabaseName(namespace) {
|
|
61
|
+
return namespace.replace(/-/g, "_");
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// ── Stack Reconcilers ────────────────────────────────────────────────────────
|
|
65
|
+
|
|
66
|
+
async function reconcileStackNamespace(ctx) {
|
|
67
|
+
const { execa, clusterName, namespace } = ctx;
|
|
68
|
+
const kubectl = (args, opts = {}) =>
|
|
69
|
+
execa("kubectl", ["--context", clusterName, ...args], { timeout: 30000, reject: false, ...opts });
|
|
70
|
+
|
|
71
|
+
const { exitCode } = await kubectl(["get", "namespace", namespace]);
|
|
72
|
+
if (exitCode !== 0) {
|
|
73
|
+
await kubectl(["create", "namespace", namespace]);
|
|
74
|
+
console.log(OK(` ✓ Namespace "${namespace}" created`));
|
|
75
|
+
} else {
|
|
76
|
+
console.log(OK(` ✓ Namespace "${namespace}" exists`));
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
async function reconcileStackPgDatabase(ctx) {
|
|
81
|
+
const { execa, clusterName, namespace, rg, sub } = ctx;
|
|
82
|
+
const kubectl = (args, opts = {}) =>
|
|
83
|
+
execa("kubectl", ["--context", clusterName, ...args], { timeout: 60000, reject: false, ...opts });
|
|
84
|
+
|
|
85
|
+
const pgServer = pgServerName(clusterName);
|
|
86
|
+
const pgHost = `${pgServer}.postgres.database.azure.com`;
|
|
87
|
+
const dbName = pgDatabaseName(namespace);
|
|
88
|
+
|
|
89
|
+
// Read admin password from foundation namespace postgres secret
|
|
90
|
+
const { stdout: pwB64 } = await kubectl([
|
|
91
|
+
"get", "secret", "postgres", "-n", "foundation",
|
|
92
|
+
"-o", "jsonpath={.data.password}",
|
|
93
|
+
]);
|
|
94
|
+
if (!pwB64) {
|
|
95
|
+
console.log(WARN(" ⚠ No postgres secret found in foundation — skipping DB setup"));
|
|
96
|
+
return;
|
|
97
|
+
}
|
|
98
|
+
const pgPass = Buffer.from(pwB64, "base64").toString();
|
|
99
|
+
|
|
100
|
+
// Create database and role for this namespace
|
|
101
|
+
const sqlStatements = [
|
|
102
|
+
`DO $$ BEGIN IF NOT EXISTS (SELECT FROM pg_database WHERE datname = '${dbName}') THEN EXECUTE 'CREATE DATABASE ${dbName}'; END IF; IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = '${dbName}') THEN CREATE ROLE ${dbName} LOGIN PASSWORD '${pgPass}'; END IF; END $$;`,
|
|
103
|
+
`GRANT ALL ON DATABASE ${dbName} TO ${dbName};`,
|
|
104
|
+
];
|
|
105
|
+
|
|
106
|
+
const script = sqlStatements.map(s => `psql -c "${s}"`).join(" && ") + " && echo DONE";
|
|
107
|
+
const jobName = `fops-pg-setup-${namespace}`;
|
|
108
|
+
|
|
109
|
+
const jobManifest = JSON.stringify({
|
|
110
|
+
apiVersion: "batch/v1", kind: "Job",
|
|
111
|
+
metadata: { name: jobName, namespace: "foundation" },
|
|
112
|
+
spec: {
|
|
113
|
+
backoffLimit: 2, ttlSecondsAfterFinished: 60,
|
|
114
|
+
template: {
|
|
115
|
+
spec: {
|
|
116
|
+
restartPolicy: "Never",
|
|
117
|
+
containers: [{
|
|
118
|
+
name: "psql",
|
|
119
|
+
image: "postgres:16-alpine",
|
|
120
|
+
env: [
|
|
121
|
+
{ name: "PGHOST", value: pgHost },
|
|
122
|
+
{ name: "PGUSER", value: "foundation" },
|
|
123
|
+
{ name: "PGDATABASE", value: "postgres" },
|
|
124
|
+
{ name: "PGPASSWORD", value: pgPass },
|
|
125
|
+
{ name: "PGSSLMODE", value: "require" },
|
|
126
|
+
],
|
|
127
|
+
command: ["sh", "-c", script],
|
|
128
|
+
}],
|
|
129
|
+
},
|
|
130
|
+
},
|
|
131
|
+
},
|
|
132
|
+
});
|
|
133
|
+
|
|
134
|
+
await kubectl(["delete", "job", jobName, "-n", "foundation", "--ignore-not-found"]);
|
|
135
|
+
await new Promise(r => setTimeout(r, 1000));
|
|
136
|
+
|
|
137
|
+
const { exitCode, stderr } = await kubectl(["apply", "-f", "-"], { input: jobManifest });
|
|
138
|
+
if (exitCode !== 0) {
|
|
139
|
+
console.log(WARN(` ⚠ pg-setup job failed: ${(stderr || "").split("\n")[0]}`));
|
|
140
|
+
return;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
const { exitCode: waitCode } = await execa("kubectl", [
|
|
144
|
+
"--context", clusterName,
|
|
145
|
+
"wait", "--for=condition=complete", `job/${jobName}`,
|
|
146
|
+
"-n", "foundation", "--timeout=60s",
|
|
147
|
+
], { timeout: 70000, reject: false });
|
|
148
|
+
|
|
149
|
+
if (waitCode === 0) {
|
|
150
|
+
console.log(OK(` ✓ Postgres database "${dbName}" ready`));
|
|
151
|
+
} else {
|
|
152
|
+
const { stdout: logs } = await kubectl([
|
|
153
|
+
"logs", `job/${jobName}`, "-n", "foundation", "--tail=5",
|
|
154
|
+
]);
|
|
155
|
+
if (logs?.includes("DONE")) {
|
|
156
|
+
console.log(OK(` ✓ Postgres database "${dbName}" ready`));
|
|
157
|
+
} else {
|
|
158
|
+
console.log(WARN(` ⚠ pg-setup job didn't complete — check: kubectl logs job/${jobName} -n foundation`));
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
await kubectl(["delete", "job", jobName, "-n", "foundation", "--ignore-not-found"]);
|
|
163
|
+
|
|
164
|
+
// Create postgres secret in the stack namespace
|
|
165
|
+
const secretYaml = await kubectl([
|
|
166
|
+
"create", "secret", "generic", "postgres", "-n", namespace,
|
|
167
|
+
"--from-literal", `host=${pgHost}`,
|
|
168
|
+
"--from-literal", `user=${dbName}`,
|
|
169
|
+
"--from-literal", `password=${pgPass}`,
|
|
170
|
+
"--from-literal", `superUserPassword=${pgPass}`,
|
|
171
|
+
"--from-literal", `postgres-password=${pgPass}`,
|
|
172
|
+
"--from-literal", `mlflow-username=mlflow`,
|
|
173
|
+
"--from-literal", `mlflow-password=${pgPass}`,
|
|
174
|
+
"--dry-run=client", "-o", "yaml",
|
|
175
|
+
]);
|
|
176
|
+
if (secretYaml.stdout) {
|
|
177
|
+
await kubectl(["apply", "-f", "-"], { input: secretYaml.stdout });
|
|
178
|
+
console.log(OK(` ✓ Postgres secret synced to ${namespace} namespace`));
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
async function reconcileStackSecretStore(ctx) {
|
|
183
|
+
const { execa, clusterName, namespace, rg, sub } = ctx;
|
|
184
|
+
const vaultName = kvName(clusterName);
|
|
185
|
+
|
|
186
|
+
const kubectl = (args, opts = {}) =>
|
|
187
|
+
execa("kubectl", ["--context", clusterName, ...args], { timeout: 30000, reject: false, ...opts });
|
|
188
|
+
|
|
189
|
+
// Check if SP secret exists in foundation, replicate to this namespace
|
|
190
|
+
const { stdout: spSecretJson } = await kubectl([
|
|
191
|
+
"get", "secret", "azure-secret-sp", "-n", "foundation", "-o", "json",
|
|
192
|
+
]);
|
|
193
|
+
const spSecret = spSecretJson ? JSON.parse(spSecretJson) : null;
|
|
194
|
+
|
|
195
|
+
if (!spSecret || !spSecret.data?.ClientID) {
|
|
196
|
+
console.log(WARN(" ⚠ Secret 'azure-secret-sp' not found in foundation — SecretStore needs SP credentials"));
|
|
197
|
+
return;
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
// Replicate to this namespace if not foundation
|
|
201
|
+
if (namespace !== "foundation") {
|
|
202
|
+
const { exitCode: spExists } = await kubectl(["get", "secret", "azure-secret-sp", "-n", namespace]);
|
|
203
|
+
if (spExists !== 0) {
|
|
204
|
+
await kubectl([
|
|
205
|
+
"create", "secret", "generic", "azure-secret-sp",
|
|
206
|
+
"-n", namespace,
|
|
207
|
+
"--from-literal", `ClientID=${Buffer.from(spSecret.data.ClientID, "base64").toString()}`,
|
|
208
|
+
"--from-literal", `ClientSecret=${Buffer.from(spSecret.data.ClientSecret, "base64").toString()}`,
|
|
209
|
+
]);
|
|
210
|
+
console.log(OK(` ✓ Replicated azure-secret-sp to ${namespace}`));
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
// Get tenant ID
|
|
215
|
+
const { stdout: tenantId } = await execa("az", [
|
|
216
|
+
"account", "show", "--query", "tenantId", "-o", "tsv",
|
|
217
|
+
...subArgs(sub),
|
|
218
|
+
], { reject: false, timeout: 15000 });
|
|
219
|
+
|
|
220
|
+
// Create SecretStore
|
|
221
|
+
const { exitCode: ssExists } = await kubectl([
|
|
222
|
+
"get", "secretstore", SECRET_STORE_NAME, "-n", namespace,
|
|
223
|
+
]);
|
|
224
|
+
if (ssExists === 0) {
|
|
225
|
+
console.log(OK(` ✓ SecretStore exists in ${namespace}`));
|
|
226
|
+
return;
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
const apiVersion = await detectEsApiVersion(kubectl);
|
|
230
|
+
const manifest = `apiVersion: ${apiVersion}
|
|
231
|
+
kind: SecretStore
|
|
232
|
+
metadata:
|
|
233
|
+
name: ${SECRET_STORE_NAME}
|
|
234
|
+
namespace: ${namespace}
|
|
235
|
+
spec:
|
|
236
|
+
provider:
|
|
237
|
+
azurekv:
|
|
238
|
+
authType: ServicePrincipal
|
|
239
|
+
vaultUrl: https://${vaultName}.vault.azure.net
|
|
240
|
+
tenantId: ${(tenantId || "").trim()}
|
|
241
|
+
authSecretRef:
|
|
242
|
+
clientId:
|
|
243
|
+
name: azure-secret-sp
|
|
244
|
+
key: ClientID
|
|
245
|
+
clientSecret:
|
|
246
|
+
name: azure-secret-sp
|
|
247
|
+
key: ClientSecret
|
|
248
|
+
`;
|
|
249
|
+
const { exitCode: applyCode, stderr } = await kubectl(
|
|
250
|
+
["apply", "-f", "-"],
|
|
251
|
+
{ input: manifest },
|
|
252
|
+
);
|
|
253
|
+
if (applyCode === 0) {
|
|
254
|
+
console.log(OK(` ✓ SecretStore "${SECRET_STORE_NAME}" created in ${namespace}`));
|
|
255
|
+
} else {
|
|
256
|
+
console.log(WARN(` ⚠ SecretStore creation failed in ${namespace}: ${(stderr || "").split("\n")[0]}`));
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
async function reconcileStackK8sSecrets(ctx) {
|
|
261
|
+
const { execa, clusterName, namespace } = ctx;
|
|
262
|
+
const kubectl = (args, opts = {}) =>
|
|
263
|
+
execa("kubectl", ["--context", clusterName, ...args], { timeout: 30000, reject: false, ...opts });
|
|
264
|
+
|
|
265
|
+
// Check if OPA keypair exists in this namespace
|
|
266
|
+
const { exitCode: opaExists } = await kubectl([
|
|
267
|
+
"get", "secret", "foundation-opa-keypair", "-n", namespace,
|
|
268
|
+
]);
|
|
269
|
+
if (opaExists === 0) {
|
|
270
|
+
console.log(OK(` ✓ OPA keypair secret exists in ${namespace}`));
|
|
271
|
+
return;
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
// Try to copy from foundation namespace
|
|
275
|
+
const { stdout: foundationOpa } = await kubectl([
|
|
276
|
+
"get", "secret", "foundation-opa-keypair", "-n", "foundation", "-o", "json",
|
|
277
|
+
]);
|
|
278
|
+
if (foundationOpa) {
|
|
279
|
+
try {
|
|
280
|
+
const secret = JSON.parse(foundationOpa);
|
|
281
|
+
const accessKey = secret.data?.OPA_ACCESS_KEY_ID
|
|
282
|
+
? Buffer.from(secret.data.OPA_ACCESS_KEY_ID, "base64").toString()
|
|
283
|
+
: "placeholder";
|
|
284
|
+
const secretKey = secret.data?.OPA_SECRET_ACCESS_KEY
|
|
285
|
+
? Buffer.from(secret.data.OPA_SECRET_ACCESS_KEY, "base64").toString()
|
|
286
|
+
: "placeholder";
|
|
287
|
+
|
|
288
|
+
const { stdout: opaYaml } = await kubectl([
|
|
289
|
+
"create", "secret", "generic", "foundation-opa-keypair", "-n", namespace,
|
|
290
|
+
"--from-literal", `OPA_ACCESS_KEY_ID=${accessKey}`,
|
|
291
|
+
"--from-literal", `OPA_SECRET_ACCESS_KEY=${secretKey}`,
|
|
292
|
+
"--dry-run=client", "-o", "yaml",
|
|
293
|
+
]);
|
|
294
|
+
if (opaYaml) {
|
|
295
|
+
await kubectl(["apply", "-f", "-"], { input: opaYaml });
|
|
296
|
+
console.log(OK(` ✓ OPA keypair secret replicated to ${namespace}`));
|
|
297
|
+
}
|
|
298
|
+
} catch {
|
|
299
|
+
console.log(WARN(` ⚠ Could not replicate OPA secret to ${namespace}`));
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
async function reconcileStackStorageEngine(ctx) {
|
|
305
|
+
const { execa, clusterName, namespace } = ctx;
|
|
306
|
+
const kubectl = (args, opts = {}) =>
|
|
307
|
+
execa("kubectl", ["--context", clusterName, ...args], { timeout: 30000, reject: false, ...opts });
|
|
308
|
+
|
|
309
|
+
// Check if deployment already exists
|
|
310
|
+
const { exitCode } = await kubectl([
|
|
311
|
+
"get", "deployment", "foundation-storage-engine", "-n", namespace,
|
|
312
|
+
]);
|
|
313
|
+
if (exitCode === 0) {
|
|
314
|
+
console.log(OK(` ✓ Storage engine deployment exists in ${namespace}`));
|
|
315
|
+
return;
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
hint(`Creating foundation-storage-engine deployment in ${namespace}…`);
|
|
319
|
+
const manifest = JSON.stringify({
|
|
320
|
+
apiVersion: "apps/v1", kind: "Deployment",
|
|
321
|
+
metadata: { name: "foundation-storage-engine", namespace, labels: { app: "foundation-storage-engine" } },
|
|
322
|
+
spec: {
|
|
323
|
+
replicas: 1,
|
|
324
|
+
selector: { matchLabels: { app: "foundation-storage-engine" } },
|
|
325
|
+
template: {
|
|
326
|
+
metadata: { labels: { app: "foundation-storage-engine" } },
|
|
327
|
+
spec: {
|
|
328
|
+
containers: [{
|
|
329
|
+
name: "storage-engine",
|
|
330
|
+
image: "minio/minio:RELEASE.2024-11-07T00-52-20Z",
|
|
331
|
+
args: ["server", "/data", "--console-address", ":9001"],
|
|
332
|
+
env: [
|
|
333
|
+
{ name: "MINIO_ROOT_USER", value: "minio" },
|
|
334
|
+
{ name: "MINIO_ROOT_PASSWORD", value: "minio123" },
|
|
335
|
+
],
|
|
336
|
+
ports: [
|
|
337
|
+
{ containerPort: 9000, name: "api" },
|
|
338
|
+
{ containerPort: 9001, name: "console" },
|
|
339
|
+
],
|
|
340
|
+
volumeMounts: [{ name: "data", mountPath: "/data" }],
|
|
341
|
+
resources: { requests: { cpu: "100m", memory: "256Mi" }, limits: { cpu: "500m", memory: "512Mi" } },
|
|
342
|
+
readinessProbe: { httpGet: { path: "/minio/health/ready", port: 9000 }, initialDelaySeconds: 5, periodSeconds: 10 },
|
|
343
|
+
}],
|
|
344
|
+
volumes: [{ name: "data", emptyDir: {} }],
|
|
345
|
+
},
|
|
346
|
+
},
|
|
347
|
+
},
|
|
348
|
+
});
|
|
349
|
+
await kubectl(["apply", "-f", "-"], { input: manifest });
|
|
350
|
+
|
|
351
|
+
// Service
|
|
352
|
+
const svcManifest = JSON.stringify({
|
|
353
|
+
apiVersion: "v1", kind: "Service",
|
|
354
|
+
metadata: { name: "foundation-storage-engine", namespace },
|
|
355
|
+
spec: {
|
|
356
|
+
selector: { app: "foundation-storage-engine" },
|
|
357
|
+
ports: [
|
|
358
|
+
{ port: 8080, targetPort: 9000, name: "api" },
|
|
359
|
+
{ port: 9000, targetPort: 9000, name: "s3" },
|
|
360
|
+
],
|
|
361
|
+
},
|
|
362
|
+
});
|
|
363
|
+
await kubectl(["apply", "-f", "-"], { input: svcManifest });
|
|
364
|
+
|
|
365
|
+
// Minio alias service
|
|
366
|
+
const minioSvcManifest = JSON.stringify({
|
|
367
|
+
apiVersion: "v1", kind: "Service",
|
|
368
|
+
metadata: { name: "minio", namespace },
|
|
369
|
+
spec: {
|
|
370
|
+
selector: { app: "foundation-storage-engine" },
|
|
371
|
+
ports: [
|
|
372
|
+
{ port: 80, targetPort: 9000, name: "http" },
|
|
373
|
+
{ port: 9000, targetPort: 9000, name: "s3" },
|
|
374
|
+
{ port: 8080, targetPort: 9000, name: "api" },
|
|
375
|
+
],
|
|
376
|
+
},
|
|
377
|
+
});
|
|
378
|
+
await kubectl(["apply", "-f", "-"], { input: minioSvcManifest });
|
|
379
|
+
|
|
380
|
+
console.log(OK(` ✓ Storage engine deployed to ${namespace}`));
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
async function reconcileStackIngress(ctx) {
|
|
384
|
+
const { execa, clusterName, namespace } = ctx;
|
|
385
|
+
const domain = stackDomain(clusterName, namespace);
|
|
386
|
+
|
|
387
|
+
const kubectl = (args, opts = {}) =>
|
|
388
|
+
execa("kubectl", ["--context", clusterName, ...args], { timeout: 15000, reject: false, ...opts });
|
|
389
|
+
|
|
390
|
+
// Create Gateway for this namespace
|
|
391
|
+
const gwYaml = `apiVersion: networking.istio.io/v1beta1
|
|
392
|
+
kind: Gateway
|
|
393
|
+
metadata:
|
|
394
|
+
name: ${namespace}-gateway
|
|
395
|
+
namespace: ${namespace}
|
|
396
|
+
spec:
|
|
397
|
+
selector:
|
|
398
|
+
istio: ingressgateway
|
|
399
|
+
servers:
|
|
400
|
+
- port:
|
|
401
|
+
number: 80
|
|
402
|
+
name: http
|
|
403
|
+
protocol: HTTP
|
|
404
|
+
hosts:
|
|
405
|
+
- "${domain}"
|
|
406
|
+
- "*.${domain}"
|
|
407
|
+
- port:
|
|
408
|
+
number: 443
|
|
409
|
+
name: https
|
|
410
|
+
protocol: HTTPS
|
|
411
|
+
tls:
|
|
412
|
+
mode: SIMPLE
|
|
413
|
+
credentialName: istio-ingressgateway-certs
|
|
414
|
+
hosts:
|
|
415
|
+
- "${domain}"
|
|
416
|
+
- "*.${domain}"
|
|
417
|
+
`;
|
|
418
|
+
|
|
419
|
+
await kubectl(["apply", "-f", "-"], { input: gwYaml });
|
|
420
|
+
console.log(OK(` ✓ Gateway created for ${domain}`));
|
|
421
|
+
|
|
422
|
+
// Update state with domain
|
|
423
|
+
writeStackState(clusterName, namespace, { domain });
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
// ── Stack Reconciler Array ───────────────────────────────────────────────────
|
|
427
|
+
|
|
428
|
+
export const STACK_RECONCILERS = [
|
|
429
|
+
{ name: "namespace", fn: reconcileStackNamespace },
|
|
430
|
+
{ name: "pg-database", fn: reconcileStackPgDatabase },
|
|
431
|
+
{ name: "secret-store", fn: reconcileStackSecretStore },
|
|
432
|
+
{ name: "k8s-secrets", fn: reconcileStackK8sSecrets },
|
|
433
|
+
{ name: "storage-engine", fn: reconcileStackStorageEngine },
|
|
434
|
+
{ name: "helm-repos", fn: reconcileHelmRepos },
|
|
435
|
+
{ name: "ingress", fn: reconcileStackIngress },
|
|
436
|
+
];
|
|
437
|
+
|
|
438
|
+
// ── CLI Commands ─────────────────────────────────────────────────────────────
|
|
439
|
+
|
|
440
|
+
export async function aksStackUp(opts = {}) {
|
|
441
|
+
const { namespace, clusterName: explicitCluster, profile } = opts;
|
|
442
|
+
|
|
443
|
+
if (!namespace) {
|
|
444
|
+
console.error(ERR("\n Namespace is required for stack deployment"));
|
|
445
|
+
hint("Usage: fops azure aks stack up <namespace> [--cluster <name>]");
|
|
446
|
+
process.exit(1);
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
const cl = requireCluster(explicitCluster);
|
|
450
|
+
const { clusterName, resourceGroup: rg } = cl;
|
|
451
|
+
|
|
452
|
+
banner(`Stack: ${namespace}`);
|
|
453
|
+
kvLine("Cluster", clusterName);
|
|
454
|
+
kvLine("Namespace", namespace);
|
|
455
|
+
kvLine("Domain", stackDomain(clusterName, namespace));
|
|
456
|
+
|
|
457
|
+
const execa = lazyExeca();
|
|
458
|
+
await ensureAzCli(execa);
|
|
459
|
+
const sub = await ensureAzAuth(execa, profile);
|
|
460
|
+
|
|
461
|
+
const ctx = {
|
|
462
|
+
execa,
|
|
463
|
+
clusterName,
|
|
464
|
+
namespace,
|
|
465
|
+
rg,
|
|
466
|
+
sub,
|
|
467
|
+
opts,
|
|
468
|
+
};
|
|
469
|
+
|
|
470
|
+
// Write initial stack state
|
|
471
|
+
writeStackState(clusterName, namespace, {
|
|
472
|
+
status: "deploying",
|
|
473
|
+
domain: stackDomain(clusterName, namespace),
|
|
474
|
+
postgres: { database: pgDatabaseName(namespace), role: pgDatabaseName(namespace) },
|
|
475
|
+
});
|
|
476
|
+
|
|
477
|
+
await runReconcilers(STACK_RECONCILERS, ctx);
|
|
478
|
+
|
|
479
|
+
writeStackState(clusterName, namespace, { status: "deployed" });
|
|
480
|
+
|
|
481
|
+
console.log(OK(`\n ✓ Stack "${namespace}" deployed to ${clusterName}`));
|
|
482
|
+
hint(` Domain: https://${stackDomain(clusterName, namespace)}`);
|
|
483
|
+
hint(` kubectl: kubectl --context ${clusterName} -n ${namespace} get pods`);
|
|
484
|
+
console.log("");
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
export async function aksStackDown(opts = {}) {
|
|
488
|
+
const { namespace, clusterName: explicitCluster, profile, yes } = opts;
|
|
489
|
+
|
|
490
|
+
if (!namespace) {
|
|
491
|
+
console.error(ERR("\n Namespace is required"));
|
|
492
|
+
hint("Usage: fops azure aks stack down <namespace> [--cluster <name>]");
|
|
493
|
+
process.exit(1);
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
if (namespace === "foundation") {
|
|
497
|
+
console.error(ERR("\n Cannot remove the default 'foundation' stack"));
|
|
498
|
+
hint("Use 'fops azure aks down' to destroy the entire cluster");
|
|
499
|
+
process.exit(1);
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
const cl = requireCluster(explicitCluster);
|
|
503
|
+
const { clusterName, resourceGroup: rg } = cl;
|
|
504
|
+
|
|
505
|
+
const stack = readStackState(clusterName, namespace);
|
|
506
|
+
if (!stack) {
|
|
507
|
+
console.error(ERR(`\n Stack "${namespace}" not found on cluster ${clusterName}`));
|
|
508
|
+
process.exit(1);
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
banner(`Remove Stack: ${namespace}`);
|
|
512
|
+
kvLine("Cluster", clusterName);
|
|
513
|
+
kvLine("Namespace", namespace);
|
|
514
|
+
|
|
515
|
+
if (!yes) {
|
|
516
|
+
const readline = await import("node:readline");
|
|
517
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
518
|
+
const answer = await new Promise(resolve => {
|
|
519
|
+
rl.question(` Delete stack "${namespace}" from ${clusterName}? [y/N] `, resolve);
|
|
520
|
+
});
|
|
521
|
+
rl.close();
|
|
522
|
+
if (answer.toLowerCase() !== "y") {
|
|
523
|
+
console.log(DIM(" Cancelled.\n"));
|
|
524
|
+
return;
|
|
525
|
+
}
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
const execa = lazyExeca();
|
|
529
|
+
await ensureAzCli(execa);
|
|
530
|
+
const sub = await ensureAzAuth(execa, profile);
|
|
531
|
+
|
|
532
|
+
const kubectl = (args, o = {}) =>
|
|
533
|
+
execa("kubectl", ["--context", clusterName, ...args], { timeout: 60000, reject: false, ...o });
|
|
534
|
+
|
|
535
|
+
// Delete namespace (cascades to all resources)
|
|
536
|
+
hint(`Deleting namespace ${namespace}…`);
|
|
537
|
+
const { exitCode } = await kubectl(["delete", "namespace", namespace, "--wait=false"]);
|
|
538
|
+
if (exitCode === 0) {
|
|
539
|
+
console.log(OK(` ✓ Namespace "${namespace}" deletion initiated`));
|
|
540
|
+
} else {
|
|
541
|
+
console.log(WARN(` ⚠ Could not delete namespace — may already be deleted`));
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
// Remove from state
|
|
545
|
+
deleteStackState(clusterName, namespace);
|
|
546
|
+
console.log(OK(`\n ✓ Stack "${namespace}" removed from ${clusterName}\n`));
|
|
547
|
+
}
|
|
548
|
+
|
|
549
|
+
export async function aksStackList(opts = {}) {
|
|
550
|
+
const { clusterName: explicitCluster, profile } = opts;
|
|
551
|
+
|
|
552
|
+
const cl = readClusterState(explicitCluster);
|
|
553
|
+
if (!cl) {
|
|
554
|
+
console.error(ERR("\n No AKS cluster tracked"));
|
|
555
|
+
hint("Create one: fops azure aks up <name>");
|
|
556
|
+
process.exit(1);
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
const { clusterName } = cl;
|
|
560
|
+
const stacks = listStacks(clusterName);
|
|
561
|
+
|
|
562
|
+
banner(`Stacks: ${clusterName}`);
|
|
563
|
+
|
|
564
|
+
if (stacks.length === 0) {
|
|
565
|
+
console.log(DIM(" No stacks deployed.\n"));
|
|
566
|
+
hint("Deploy one: fops azure aks stack up <namespace>");
|
|
567
|
+
console.log("");
|
|
568
|
+
return;
|
|
569
|
+
}
|
|
570
|
+
|
|
571
|
+
console.log("");
|
|
572
|
+
for (const stack of stacks) {
|
|
573
|
+
const age = stack.createdAt ? timeSince(stack.createdAt) : "—";
|
|
574
|
+
const status = stack.status || "unknown";
|
|
575
|
+
const statusColor = status === "deployed" ? OK : WARN;
|
|
576
|
+
console.log(` ${LABEL(stack.namespace.padEnd(20))} ${statusColor(status.padEnd(12))} ${DIM(stack.domain || "—")}`);
|
|
577
|
+
console.log(` ${DIM(" Created:")} ${age} ago ${DIM("DB:")} ${stack.postgres?.database || "—"}`);
|
|
578
|
+
console.log("");
|
|
579
|
+
}
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
export async function aksStackStatus(opts = {}) {
|
|
583
|
+
const { namespace, clusterName: explicitCluster, profile } = opts;
|
|
584
|
+
|
|
585
|
+
if (!namespace) {
|
|
586
|
+
console.error(ERR("\n Namespace is required"));
|
|
587
|
+
hint("Usage: fops azure aks stack status <namespace> [--cluster <name>]");
|
|
588
|
+
process.exit(1);
|
|
589
|
+
}
|
|
590
|
+
|
|
591
|
+
const cl = requireCluster(explicitCluster);
|
|
592
|
+
const { clusterName, resourceGroup: rg } = cl;
|
|
593
|
+
|
|
594
|
+
const stack = readStackState(clusterName, namespace);
|
|
595
|
+
if (!stack) {
|
|
596
|
+
console.error(ERR(`\n Stack "${namespace}" not found on cluster ${clusterName}`));
|
|
597
|
+
hint(`Deploy it: fops azure aks stack up ${namespace}`);
|
|
598
|
+
process.exit(1);
|
|
599
|
+
}
|
|
600
|
+
|
|
601
|
+
banner(`Stack Status: ${namespace}`);
|
|
602
|
+
kvLine("Cluster", clusterName, { pad: 12 });
|
|
603
|
+
kvLine("Namespace", namespace, { pad: 12 });
|
|
604
|
+
kvLine("Domain", stack.domain || "—", { pad: 12 });
|
|
605
|
+
kvLine("Status", stack.status || "unknown", { pad: 12 });
|
|
606
|
+
kvLine("Database", stack.postgres?.database || "—", { pad: 12 });
|
|
607
|
+
kvLine("Created", stack.createdAt ? timeSince(stack.createdAt) + " ago" : "—", { pad: 12 });
|
|
608
|
+
|
|
609
|
+
const execa = lazyExeca();
|
|
610
|
+
const kubectl = (args, o = {}) =>
|
|
611
|
+
execa("kubectl", ["--context", clusterName, ...args], { timeout: 15000, reject: false, ...o });
|
|
612
|
+
|
|
613
|
+
// Check namespace exists
|
|
614
|
+
const { exitCode: nsExists } = await kubectl(["get", "namespace", namespace]);
|
|
615
|
+
if (nsExists !== 0) {
|
|
616
|
+
console.log(WARN(`\n ⚠ Namespace "${namespace}" does not exist in cluster`));
|
|
617
|
+
return;
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
// List pods
|
|
621
|
+
const { stdout: podJson } = await kubectl([
|
|
622
|
+
"get", "pods", "-n", namespace, "-o", "json",
|
|
623
|
+
]);
|
|
624
|
+
if (podJson) {
|
|
625
|
+
const pods = JSON.parse(podJson).items || [];
|
|
626
|
+
const running = pods.filter(p => p.status?.phase === "Running").length;
|
|
627
|
+
const total = pods.length;
|
|
628
|
+
console.log(OK(` Pods: ${running}/${total} running`));
|
|
629
|
+
}
|
|
630
|
+
|
|
631
|
+
// Check services
|
|
632
|
+
const { stdout: svcJson } = await kubectl([
|
|
633
|
+
"get", "services", "-n", namespace, "-o", "json",
|
|
634
|
+
]);
|
|
635
|
+
if (svcJson) {
|
|
636
|
+
const svcs = JSON.parse(svcJson).items || [];
|
|
637
|
+
console.log(OK(` Services: ${svcs.length}`));
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
console.log("");
|
|
641
|
+
hint(`kubectl: kubectl --context ${clusterName} -n ${namespace} get all`);
|
|
642
|
+
console.log("");
|
|
643
|
+
}
|