@meshxdata/fops 0.1.49 → 0.1.50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/CHANGELOG.md +182 -0
  2. package/package.json +1 -1
  3. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-core.js +347 -6
  4. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-data-bootstrap.js +421 -0
  5. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-flux.js +5 -179
  6. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-naming.js +14 -4
  7. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-postgres.js +171 -4
  8. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-storage.js +303 -8
  9. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks.js +2 -0
  10. package/src/plugins/bundled/fops-plugin-azure/lib/azure-auth.js +1 -1
  11. package/src/plugins/bundled/fops-plugin-azure/lib/azure-fleet-swarm.js +936 -0
  12. package/src/plugins/bundled/fops-plugin-azure/lib/azure-fleet.js +10 -918
  13. package/src/plugins/bundled/fops-plugin-azure/lib/azure-helpers.js +5 -0
  14. package/src/plugins/bundled/fops-plugin-azure/lib/azure-keyvault-keys.js +413 -0
  15. package/src/plugins/bundled/fops-plugin-azure/lib/azure-keyvault.js +14 -399
  16. package/src/plugins/bundled/fops-plugin-azure/lib/azure-ops-config.js +754 -0
  17. package/src/plugins/bundled/fops-plugin-azure/lib/azure-ops-knock.js +527 -0
  18. package/src/plugins/bundled/fops-plugin-azure/lib/azure-ops-ssh.js +427 -0
  19. package/src/plugins/bundled/fops-plugin-azure/lib/azure-ops.js +99 -1686
  20. package/src/plugins/bundled/fops-plugin-azure/lib/azure-provision-health.js +279 -0
  21. package/src/plugins/bundled/fops-plugin-azure/lib/azure-provision-init.js +186 -0
  22. package/src/plugins/bundled/fops-plugin-azure/lib/azure-provision.js +66 -444
  23. package/src/plugins/bundled/fops-plugin-azure/lib/azure-results.js +11 -0
  24. package/src/plugins/bundled/fops-plugin-azure/lib/azure-vm-lifecycle.js +5 -540
  25. package/src/plugins/bundled/fops-plugin-azure/lib/azure-vm-terraform.js +544 -0
  26. package/src/plugins/bundled/fops-plugin-azure/lib/commands/infra-cmds.js +75 -3
  27. package/src/plugins/bundled/fops-plugin-azure/lib/commands/test-cmds.js +227 -11
  28. package/src/plugins/bundled/fops-plugin-azure/lib/commands/vm-cmds.js +2 -1
  29. package/src/plugins/bundled/fops-plugin-azure/lib/pytest-parse.js +21 -0
  30. package/src/plugins/bundled/fops-plugin-foundation/index.js +309 -44
@@ -6,7 +6,7 @@
6
6
 
7
7
  import chalk from "chalk";
8
8
  import { DEFAULTS, OK, WARN, ERR, DIM, hint, banner, kvLine, subArgs } from "./azure.js";
9
- import { pgServerName, generatePassword, PG_DEFAULTS, PG_REPLICA_REGIONS, EH_DEFAULTS, ehNamespaceName } from "./azure-aks-naming.js";
9
+ import { pgServerName, generatePassword, PG_DEFAULTS, PG_REPLICA_REGIONS, EH_DEFAULTS, ehNamespaceName, cidrOverlaps } from "./azure-aks-naming.js";
10
10
  import { readClusterState, writeClusterState, requireCluster } from "./azure-aks-state.js";
11
11
  import { findAvailableSubnetCidr, findAksVnet } from "./azure-aks-network.js";
12
12
 
@@ -358,6 +358,15 @@ export async function aksPostgresReplicaCreate(opts = {}) {
358
358
 
359
359
  const source = JSON.parse(sourceJson);
360
360
  const sourceId = source.id;
361
+ const sourceTier = source.sku?.tier;
362
+
363
+ // Burstable tier doesn't support read replicas
364
+ if (sourceTier === "Burstable") {
365
+ console.error(ERR(`\n ✗ Read replicas require General Purpose or Memory Optimized tier`));
366
+ hint(` Current tier: ${sourceTier} (${source.sku?.name})`);
367
+ hint(` Upgrade with: az postgres flexible-server update -g ${rg} -n ${sourceServer} --tier GeneralPurpose --sku-name Standard_D2ds_v4`);
368
+ process.exit(1);
369
+ }
361
370
 
362
371
  // Check if replica already exists
363
372
  const { exitCode: replicaExists } = await execa("az", [
@@ -371,16 +380,174 @@ export async function aksPostgresReplicaCreate(opts = {}) {
371
380
  return;
372
381
  }
373
382
 
383
+ // Cross-region replica needs VNet + private DNS in target region
384
+ const sourceLocation = (source.location || "").toLowerCase().replace(/\s/g, "");
385
+ const isCrossRegion = sourceLocation !== targetRegion.toLowerCase().replace(/\s/g, "");
386
+ let replicaSubnetId = null;
387
+ let replicaDnsZone = null;
388
+
389
+ if (isCrossRegion) {
390
+ hint("Setting up cross-region networking…\n");
391
+
392
+ const vnetName = `fops-${clusterName}-vnet-${targetRegion.replace(/[^a-z]/g, "")}`;
393
+ const subnetName = "postgres-subnet";
394
+
395
+ // Get source VNet CIDR to avoid overlap
396
+ const sourceSubnetId = source.network?.delegatedSubnetResourceId;
397
+ let sourceVnetCidrs = [];
398
+ if (sourceSubnetId) {
399
+ const sourceVnetMatch = sourceSubnetId.match(/resourceGroups\/([^/]+)\/providers\/Microsoft.Network\/virtualNetworks\/([^/]+)/i);
400
+ if (sourceVnetMatch) {
401
+ const { stdout: srcCidrs } = await execa("az", [
402
+ "network", "vnet", "show", "-g", sourceVnetMatch[1], "-n", sourceVnetMatch[2],
403
+ "--query", "addressSpace.addressPrefixes", "-o", "tsv", ...subArgs(sub),
404
+ ], { reject: false, timeout: 15000 });
405
+ sourceVnetCidrs = (srcCidrs || "").split("\n").filter(Boolean);
406
+ }
407
+ }
408
+
409
+ // Pick a non-overlapping CIDR (try 10.250, 10.251, 10.252...)
410
+ let vnetCidr = "10.250.0.0/16";
411
+ let subnetCidr = "10.250.1.0/24";
412
+ for (let i = 250; i < 255; i++) {
413
+ const testCidr = `10.${i}.0.0/16`;
414
+ const overlaps = sourceVnetCidrs.some(src => cidrOverlaps(testCidr, [src]));
415
+ if (!overlaps) {
416
+ vnetCidr = testCidr;
417
+ subnetCidr = `10.${i}.1.0/24`;
418
+ break;
419
+ }
420
+ }
421
+
422
+ // Create VNet in target region if not exists
423
+ const { exitCode: vnetExists } = await execa("az", [
424
+ "network", "vnet", "show", "-g", rg, "-n", vnetName, "--output", "none", ...subArgs(sub),
425
+ ], { reject: false, timeout: 15000 });
426
+
427
+ if (vnetExists !== 0) {
428
+ hint(`Creating VNet "${vnetName}" in ${targetRegion}…`);
429
+ await execa("az", [
430
+ "network", "vnet", "create",
431
+ "-g", rg, "-n", vnetName, "--location", targetRegion,
432
+ "--address-prefix", vnetCidr, "--output", "none", ...subArgs(sub),
433
+ ], { timeout: 60000 });
434
+ console.log(OK(` ✓ VNet "${vnetName}" created`));
435
+ } else {
436
+ console.log(OK(` ✓ VNet "${vnetName}" exists`));
437
+ }
438
+
439
+ // Create postgres subnet with delegation
440
+ const { exitCode: subnetExists } = await execa("az", [
441
+ "network", "vnet", "subnet", "show",
442
+ "-g", rg, "--vnet-name", vnetName, "-n", subnetName, "--output", "none", ...subArgs(sub),
443
+ ], { reject: false, timeout: 15000 });
444
+
445
+ if (subnetExists !== 0) {
446
+ hint(`Creating subnet "${subnetName}"…`);
447
+ await execa("az", [
448
+ "network", "vnet", "subnet", "create",
449
+ "-g", rg, "--vnet-name", vnetName, "-n", subnetName,
450
+ "--address-prefixes", subnetCidr,
451
+ "--delegations", "Microsoft.DBforPostgreSQL/flexibleServers",
452
+ "--output", "none", ...subArgs(sub),
453
+ ], { timeout: 60000 });
454
+ console.log(OK(` ✓ Subnet "${subnetName}" created`));
455
+ }
456
+
457
+ // Get subnet ID
458
+ const { stdout: subnetJson } = await execa("az", [
459
+ "network", "vnet", "subnet", "show",
460
+ "-g", rg, "--vnet-name", vnetName, "-n", subnetName, "--output", "json", ...subArgs(sub),
461
+ ], { timeout: 15000 });
462
+ replicaSubnetId = JSON.parse(subnetJson).id;
463
+
464
+ // Create private DNS zone for replica
465
+ replicaDnsZone = `${replicaName}.private.postgres.database.azure.com`;
466
+ const { exitCode: dnsExists } = await execa("az", [
467
+ "network", "private-dns", "zone", "show", "-g", rg, "-n", replicaDnsZone, "--output", "none", ...subArgs(sub),
468
+ ], { reject: false, timeout: 15000 });
469
+
470
+ if (dnsExists !== 0) {
471
+ hint(`Creating private DNS zone…`);
472
+ await execa("az", [
473
+ "network", "private-dns", "zone", "create", "-g", rg, "-n", replicaDnsZone, "--output", "none", ...subArgs(sub),
474
+ ], { timeout: 60000 });
475
+
476
+ // Link DNS zone to the replica VNet
477
+ await execa("az", [
478
+ "network", "private-dns", "link", "vnet", "create",
479
+ "-g", rg, "--zone-name", replicaDnsZone, "--name", `${vnetName}-link`,
480
+ "--virtual-network", vnetName, "--registration-enabled", "false", "--output", "none", ...subArgs(sub),
481
+ ], { timeout: 60000 });
482
+ console.log(OK(` ✓ Private DNS zone configured`));
483
+ }
484
+
485
+ // Set up VNet peering between source and replica VNets
486
+ const sourceVnetMatch = sourceSubnetId?.match(/resourceGroups\/([^/]+)\/providers\/Microsoft.Network\/virtualNetworks\/([^/]+)/i);
487
+ if (sourceVnetMatch) {
488
+ const sourceVnetRg = sourceVnetMatch[1];
489
+ const sourceVnetName = sourceVnetMatch[2];
490
+
491
+ // Get full VNet IDs
492
+ const { stdout: sourceVnetJson } = await execa("az", [
493
+ "network", "vnet", "show", "-g", sourceVnetRg, "-n", sourceVnetName, "--query", "id", "-o", "tsv", ...subArgs(sub),
494
+ ], { reject: false, timeout: 15000 });
495
+ const sourceVnetId = (sourceVnetJson || "").trim();
496
+
497
+ const { stdout: replicaVnetJson } = await execa("az", [
498
+ "network", "vnet", "show", "-g", rg, "-n", vnetName, "--query", "id", "-o", "tsv", ...subArgs(sub),
499
+ ], { reject: false, timeout: 15000 });
500
+ const replicaVnetId = (replicaVnetJson || "").trim();
501
+
502
+ if (sourceVnetId && replicaVnetId) {
503
+ const peeringName1 = `${sourceVnetName}-to-${vnetName}`;
504
+ const peeringName2 = `${vnetName}-to-${sourceVnetName}`;
505
+
506
+ // Check if peering already exists
507
+ const { exitCode: peering1Exists } = await execa("az", [
508
+ "network", "vnet", "peering", "show",
509
+ "-g", sourceVnetRg, "--vnet-name", sourceVnetName, "-n", peeringName1, "--output", "none", ...subArgs(sub),
510
+ ], { reject: false, timeout: 15000 });
511
+
512
+ if (peering1Exists !== 0) {
513
+ hint(`Creating VNet peering…`);
514
+ // Source -> Replica peering
515
+ await execa("az", [
516
+ "network", "vnet", "peering", "create",
517
+ "-g", sourceVnetRg, "--vnet-name", sourceVnetName, "-n", peeringName1,
518
+ "--remote-vnet", replicaVnetId, "--allow-vnet-access", "--output", "none", ...subArgs(sub),
519
+ ], { timeout: 60000 });
520
+
521
+ // Replica -> Source peering
522
+ await execa("az", [
523
+ "network", "vnet", "peering", "create",
524
+ "-g", rg, "--vnet-name", vnetName, "-n", peeringName2,
525
+ "--remote-vnet", sourceVnetId, "--allow-vnet-access", "--output", "none", ...subArgs(sub),
526
+ ], { timeout: 60000 });
527
+ console.log(OK(` ✓ VNet peering configured`));
528
+ } else {
529
+ console.log(OK(` ✓ VNet peering exists`));
530
+ }
531
+ }
532
+ }
533
+
534
+ console.log("");
535
+ }
536
+
374
537
  hint("Creating read replica (this takes 10-15 minutes)…\n");
375
538
 
376
- const { exitCode, stderr } = await execa("az", [
539
+ const replicaArgs = [
377
540
  "postgres", "flexible-server", "replica", "create",
378
541
  "--replica-name", replicaName,
379
542
  "--resource-group", rg,
380
543
  "--source-server", sourceId,
381
544
  "--location", targetRegion,
382
- "--output", "json", ...subArgs(sub),
383
- ], { timeout: 900000, reject: false });
545
+ ];
546
+ if (replicaSubnetId) replicaArgs.push("--subnet", replicaSubnetId);
547
+ if (replicaDnsZone) replicaArgs.push("--private-dns-zone", replicaDnsZone);
548
+ replicaArgs.push("--output", "json", ...subArgs(sub));
549
+
550
+ const { exitCode, stderr } = await execa("az", replicaArgs, { timeout: 900000, reject: false });
384
551
 
385
552
  if (exitCode !== 0) {
386
553
  const errMsg = (stderr || "").split("\n").find(l => l.includes("ERROR")) || stderr;
@@ -6,8 +6,8 @@
6
6
 
7
7
  import crypto from "node:crypto";
8
8
  import { OK, WARN, hint, subArgs } from "./azure.js";
9
- import { pgServerName } from "./azure-aks-naming.js";
10
- import { readClusterState } from "./azure-aks-state.js";
9
+ import { pgServerName, HA_REPLICA_REGIONS } from "./azure-aks-naming.js";
10
+ import { readClusterState, writeClusterState } from "./azure-aks-state.js";
11
11
 
12
12
  // ── Helm Repositories ─────────────────────────────────────────────────────────
13
13
 
@@ -51,14 +51,17 @@ export async function reconcileStorageAccount(ctx) {
51
51
  ...subArgs(sub),
52
52
  ], { reject: false, timeout: 30000 });
53
53
 
54
+ // HA SKU preference: RAGZRS (zone + geo redundant) > RAGRS (geo redundant)
55
+ const haSku = "Standard_RAGZRS";
56
+ const fallbackSku = "Standard_RAGRS";
57
+
54
58
  if (saExists !== 0) {
55
- // Create storage account
56
- hint(`Creating Storage Account "${storageAccountName}"…`);
57
- const { exitCode, stderr } = await execa("az", [
59
+ hint(`Creating Storage Account "${storageAccountName}" with cross-region HA (${haSku})…`);
60
+ let { exitCode, stderr } = await execa("az", [
58
61
  "storage", "account", "create",
59
62
  "--name", storageAccountName,
60
63
  "--resource-group", rg,
61
- "--sku", "Standard_LRS",
64
+ "--sku", haSku,
62
65
  "--kind", "StorageV2",
63
66
  "--https-only", "true",
64
67
  "--min-tls-version", "TLS1_2",
@@ -67,13 +70,72 @@ export async function reconcileStorageAccount(ctx) {
67
70
  ...subArgs(sub),
68
71
  ], { reject: false, timeout: 120000 });
69
72
 
73
+ // Fallback to RAGRS if RAGZRS not available in region
74
+ if (exitCode !== 0 && (stderr || "").includes("not supported")) {
75
+ hint(`RAGZRS not available, falling back to ${fallbackSku}…`);
76
+ ({ exitCode, stderr } = await execa("az", [
77
+ "storage", "account", "create",
78
+ "--name", storageAccountName,
79
+ "--resource-group", rg,
80
+ "--sku", fallbackSku,
81
+ "--kind", "StorageV2",
82
+ "--https-only", "true",
83
+ "--min-tls-version", "TLS1_2",
84
+ "--allow-blob-public-access", "false",
85
+ "--output", "none",
86
+ ...subArgs(sub),
87
+ ], { reject: false, timeout: 120000 }));
88
+ }
89
+
70
90
  if (exitCode !== 0) {
71
91
  console.log(WARN(` ⚠ Storage Account creation failed: ${(stderr || "").split("\n")[0]}`));
72
92
  return;
73
93
  }
74
- console.log(OK(` ✓ Storage Account "${storageAccountName}" created`));
94
+ console.log(OK(` ✓ Storage Account "${storageAccountName}" created (cross-region HA)`));
75
95
  } else {
76
- console.log(OK(` ✓ Storage Account "${storageAccountName}" exists`));
96
+ const { stdout: skuJson } = await execa("az", [
97
+ "storage", "account", "show",
98
+ "--name", storageAccountName,
99
+ "--resource-group", rg,
100
+ "--query", "sku.name",
101
+ "-o", "tsv",
102
+ ...subArgs(sub),
103
+ ], { reject: false, timeout: 30000 });
104
+ const currentSku = (skuJson || "").trim();
105
+
106
+ // Upgrade path: LRS → RAGZRS, ZRS → RAGZRS, RAGRS → RAGZRS
107
+ const upgradeNeeded = ["Standard_LRS", "Standard_ZRS", "Standard_RAGRS"].includes(currentSku);
108
+ if (upgradeNeeded) {
109
+ hint(`Upgrading Storage Account from ${currentSku} to ${haSku}…`);
110
+ let { exitCode } = await execa("az", [
111
+ "storage", "account", "update",
112
+ "--name", storageAccountName,
113
+ "--resource-group", rg,
114
+ "--sku", haSku,
115
+ "--output", "none",
116
+ ...subArgs(sub),
117
+ ], { reject: false, timeout: 120000 });
118
+
119
+ // Fallback if RAGZRS upgrade fails (region limitation or LRS→RAGZRS not allowed)
120
+ if (exitCode !== 0 && currentSku === "Standard_LRS") {
121
+ ({ exitCode } = await execa("az", [
122
+ "storage", "account", "update",
123
+ "--name", storageAccountName,
124
+ "--resource-group", rg,
125
+ "--sku", fallbackSku,
126
+ "--output", "none",
127
+ ...subArgs(sub),
128
+ ], { reject: false, timeout: 120000 }));
129
+ }
130
+
131
+ if (exitCode === 0) {
132
+ console.log(OK(` ✓ Storage Account "${storageAccountName}" upgraded to cross-region HA`));
133
+ } else {
134
+ console.log(WARN(` ⚠ Could not upgrade Storage Account SKU (current: ${currentSku})`));
135
+ }
136
+ } else {
137
+ console.log(OK(` ✓ Storage Account "${storageAccountName}" exists (${currentSku})`));
138
+ }
77
139
  }
78
140
 
79
141
  // 2. Get storage account key
@@ -494,3 +556,236 @@ export async function reconcileAcrWebhooks(ctx) {
494
556
  }
495
557
  }
496
558
  }
559
+
560
+ // ── Cross-region storage object replication (HA) ──────────────────────────────
561
+
562
+ export async function reconcileStorageReplication(ctx) {
563
+ const { execa, clusterName, rg, sub, cluster } = ctx;
564
+ const location = (cluster?.location || "uaenorth").toLowerCase().replace(/\s/g, "");
565
+ const replicaRegion = ctx.opts?.haRegion || HA_REPLICA_REGIONS[location];
566
+
567
+ if (!replicaRegion) {
568
+ console.log(WARN(` ⚠ No HA replica region configured for ${location}`));
569
+ return;
570
+ }
571
+
572
+ const sourceAccountName = `fops${clusterName.replace(/-/g, "")}`.toLowerCase().slice(0, 24);
573
+ const destAccountName = `fops${clusterName.replace(/-/g, "")}ha`.toLowerCase().slice(0, 24);
574
+ const containers = ["foundation", "vault"];
575
+
576
+ hint(`Setting up cross-region storage replication (${location} → ${replicaRegion})…`);
577
+
578
+ // 1. Check if source account exists
579
+ const { exitCode: srcExists } = await execa("az", [
580
+ "storage", "account", "show",
581
+ "--name", sourceAccountName,
582
+ "--resource-group", rg,
583
+ "--output", "none",
584
+ ...subArgs(sub),
585
+ ], { reject: false, timeout: 30000 });
586
+
587
+ if (srcExists !== 0) {
588
+ console.log(WARN(` ⚠ Source storage account "${sourceAccountName}" not found — skipping replication`));
589
+ return;
590
+ }
591
+
592
+ // 2. Create destination storage account in replica region
593
+ const { exitCode: destExists } = await execa("az", [
594
+ "storage", "account", "show",
595
+ "--name", destAccountName,
596
+ "--resource-group", rg,
597
+ "--output", "none",
598
+ ...subArgs(sub),
599
+ ], { reject: false, timeout: 30000 });
600
+
601
+ if (destExists !== 0) {
602
+ hint(`Creating HA storage account "${destAccountName}" in ${replicaRegion}…`);
603
+ // Try RAGZRS first, fallback to RAGRS
604
+ let { exitCode, stderr } = await execa("az", [
605
+ "storage", "account", "create",
606
+ "--name", destAccountName,
607
+ "--resource-group", rg,
608
+ "--location", replicaRegion,
609
+ "--sku", "Standard_RAGZRS",
610
+ "--kind", "StorageV2",
611
+ "--https-only", "true",
612
+ "--min-tls-version", "TLS1_2",
613
+ "--allow-blob-public-access", "false",
614
+ "--output", "none",
615
+ ...subArgs(sub),
616
+ ], { reject: false, timeout: 120000 });
617
+
618
+ if (exitCode !== 0 && (stderr || "").includes("not supported")) {
619
+ ({ exitCode, stderr } = await execa("az", [
620
+ "storage", "account", "create",
621
+ "--name", destAccountName,
622
+ "--resource-group", rg,
623
+ "--location", replicaRegion,
624
+ "--sku", "Standard_RAGRS",
625
+ "--kind", "StorageV2",
626
+ "--https-only", "true",
627
+ "--min-tls-version", "TLS1_2",
628
+ "--allow-blob-public-access", "false",
629
+ "--output", "none",
630
+ ...subArgs(sub),
631
+ ], { reject: false, timeout: 120000 }));
632
+ }
633
+
634
+ if (exitCode !== 0) {
635
+ console.log(WARN(` ⚠ HA storage account creation failed: ${(stderr || "").split("\n")[0]}`));
636
+ return;
637
+ }
638
+ console.log(OK(` ✓ HA storage account "${destAccountName}" created in ${replicaRegion}`));
639
+ } else {
640
+ console.log(OK(` ✓ HA storage account "${destAccountName}" exists`));
641
+ }
642
+
643
+ // 3. Enable blob versioning on both accounts (required for object replication)
644
+ for (const account of [sourceAccountName, destAccountName]) {
645
+ await execa("az", [
646
+ "storage", "account", "blob-service-properties", "update",
647
+ "--account-name", account,
648
+ "--resource-group", rg,
649
+ "--enable-versioning", "true",
650
+ "--output", "none",
651
+ ...subArgs(sub),
652
+ ], { reject: false, timeout: 60000 });
653
+ }
654
+ console.log(OK(" ✓ Blob versioning enabled on both accounts"));
655
+
656
+ // 4. Get destination account key and create containers
657
+ const { stdout: destKeyJson } = await execa("az", [
658
+ "storage", "account", "keys", "list",
659
+ "--account-name", destAccountName,
660
+ "--resource-group", rg,
661
+ "--query", "[0].value",
662
+ "--output", "tsv",
663
+ ...subArgs(sub),
664
+ ], { timeout: 30000 });
665
+ const destKey = destKeyJson?.trim();
666
+
667
+ for (const container of containers) {
668
+ const { exitCode: containerExists } = await execa("az", [
669
+ "storage", "container", "show",
670
+ "--name", container,
671
+ "--account-name", destAccountName,
672
+ "--account-key", destKey,
673
+ "--output", "none",
674
+ ], { reject: false, timeout: 30000 });
675
+
676
+ if (containerExists !== 0) {
677
+ await execa("az", [
678
+ "storage", "container", "create",
679
+ "--name", container,
680
+ "--account-name", destAccountName,
681
+ "--account-key", destKey,
682
+ "--output", "none",
683
+ ], { reject: false, timeout: 30000 });
684
+ console.log(OK(` ✓ Container "${container}" created in HA account`));
685
+ }
686
+ }
687
+
688
+ // 5. Set up object replication policy
689
+ const { stdout: srcIdJson } = await execa("az", [
690
+ "storage", "account", "show",
691
+ "--name", sourceAccountName,
692
+ "--resource-group", rg,
693
+ "--query", "id",
694
+ "-o", "tsv",
695
+ ...subArgs(sub),
696
+ ], { timeout: 15000 });
697
+ const srcId = srcIdJson?.trim();
698
+
699
+ const { stdout: destIdJson } = await execa("az", [
700
+ "storage", "account", "show",
701
+ "--name", destAccountName,
702
+ "--resource-group", rg,
703
+ "--query", "id",
704
+ "-o", "tsv",
705
+ ...subArgs(sub),
706
+ ], { timeout: 15000 });
707
+ const destId = destIdJson?.trim();
708
+
709
+ // Check if policy already exists
710
+ const { stdout: policiesJson } = await execa("az", [
711
+ "storage", "account", "or-policy", "list",
712
+ "--account-name", sourceAccountName,
713
+ "--resource-group", rg,
714
+ "--output", "json",
715
+ ...subArgs(sub),
716
+ ], { reject: false, timeout: 30000 });
717
+
718
+ const policies = JSON.parse(policiesJson || "[]");
719
+ const existingPolicy = policies.find(p => p.destinationAccount === destId);
720
+
721
+ if (!existingPolicy) {
722
+ hint("Creating object replication policy…");
723
+ // Build rules for each container
724
+ const rules = containers.map(c => ({
725
+ sourceContainer: c,
726
+ destinationContainer: c,
727
+ filters: { minCreationTime: new Date().toISOString() },
728
+ }));
729
+
730
+ const policyJson = JSON.stringify({
731
+ sourceAccount: srcId,
732
+ destinationAccount: destId,
733
+ rules,
734
+ });
735
+
736
+ const tmpFile = `/tmp/fops-or-policy-${clusterName}.json`;
737
+ const { writeFileSync, unlinkSync } = await import("node:fs");
738
+ writeFileSync(tmpFile, policyJson);
739
+
740
+ const { exitCode: policyCode, stderr: policyErr } = await execa("az", [
741
+ "storage", "account", "or-policy", "create",
742
+ "--account-name", destAccountName,
743
+ "--resource-group", rg,
744
+ "--source-account", srcId,
745
+ "--destination-account", destId,
746
+ "--policy", "@" + tmpFile,
747
+ "--output", "none",
748
+ ...subArgs(sub),
749
+ ], { reject: false, timeout: 60000 });
750
+
751
+ try { unlinkSync(tmpFile); } catch {}
752
+
753
+ if (policyCode === 0) {
754
+ console.log(OK(` ✓ Object replication policy created (${sourceAccountName} → ${destAccountName})`));
755
+ } else {
756
+ // Fallback: create policy without file
757
+ const { exitCode: fallbackCode } = await execa("az", [
758
+ "storage", "account", "or-policy", "create",
759
+ "--account-name", destAccountName,
760
+ "--resource-group", rg,
761
+ "--source-account", srcId,
762
+ "--destination-account", destId,
763
+ "--source-container", containers[0],
764
+ "--destination-container", containers[0],
765
+ "--output", "none",
766
+ ...subArgs(sub),
767
+ ], { reject: false, timeout: 60000 });
768
+
769
+ if (fallbackCode === 0) {
770
+ console.log(OK(` ✓ Object replication policy created for "${containers[0]}" container`));
771
+ } else {
772
+ console.log(WARN(` ⚠ Object replication policy failed: ${(policyErr || "").split("\n")[0]}`));
773
+ hint(` Create manually: az storage account or-policy create --account-name ${destAccountName} ...`);
774
+ }
775
+ }
776
+ } else {
777
+ console.log(OK(` ✓ Object replication policy already exists`));
778
+ }
779
+
780
+ // Save HA storage info to state
781
+ writeClusterState(clusterName, {
782
+ storageHA: {
783
+ sourceAccount: sourceAccountName,
784
+ destAccount: destAccountName,
785
+ destRegion: replicaRegion,
786
+ configuredAt: new Date().toISOString(),
787
+ },
788
+ });
789
+
790
+ console.log(OK(` ✓ Storage HA configured: ${location} → ${replicaRegion}`));
791
+ }
@@ -11,6 +11,7 @@ export {
11
11
  PG_DEFAULTS,
12
12
  EH_DEFAULTS,
13
13
  PG_REPLICA_REGIONS,
14
+ HA_REPLICA_REGIONS,
14
15
  pgServerName,
15
16
  kvName,
16
17
  ehNamespaceName,
@@ -66,6 +67,7 @@ export {
66
67
  OLD_PG_HOSTS,
67
68
  reconcileStorageAccount,
68
69
  reconcileStorageEngine,
70
+ reconcileStorageReplication,
69
71
  reconcileHelmRepos,
70
72
  reconcileHelmValues,
71
73
  reconcileAcrWebhooks,
@@ -78,7 +78,7 @@ export function resolveAuth0Config() {
78
78
  };
79
79
  const domain = get("MX_AUTH0_DOMAIN") || get("AUTH0_DOMAIN");
80
80
  const clientId = get("MX_AUTH0_CLIENT_ID") || get("AUTH0_CLIENT_ID");
81
- const clientSecret = get("MX_AUTH0_CLIENT_SECRET") || get("AUTH0_CLIENT_SECRET");
81
+ const clientSecret = get("MX_AUTH0_CLIENT_SECRET") || get("AUTH0_CLIENT_SECRET") || get("AUTH0_SECRET");
82
82
  const audience = get("MX_AUTH0_AUDIENCE") || get("AUTH0_AUDIENCE");
83
83
  if (domain && clientId) return { domain, clientId, clientSecret, audience };
84
84
  } catch { /* try next */ }