@njdamstra/appwrite-utils-cli 1.10.0 → 1.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,724 @@
1
+ import { Query } from "node-appwrite";
2
+ import yaml from "js-yaml";
3
+ import fs from "node:fs";
4
+ import path from "node:path";
5
+ import inquirer from "inquirer";
6
+ import chalk from "chalk";
7
+ import { MessageFormatter, tryAwaitWithRetry, } from "@njdamstra/appwrite-utils-helpers";
8
+ import { ProgressManager } from "../shared/progressManager.js";
9
+ import { MigrationPlanSchema, MigrationCheckpointSchema, suggestTargetType, generateBackupKey, } from "./migrateStringsTypes.js";
10
+ // ────────────────────────────────────────────────────────
11
+ // Phase 1: Analyze — offline, reads local YAML configs
12
+ // ────────────────────────────────────────────────────────
13
+ export function analyzeStringAttributes(config, options = {}) {
14
+ const collections = [
15
+ ...(config.collections || []),
16
+ ...(config.tables || []),
17
+ ];
18
+ const databases = config.databases || [];
19
+ const entries = [];
20
+ const collectionsSeen = new Set();
21
+ for (const db of databases) {
22
+ // Find collections assigned to this database (or unassigned = applied to all)
23
+ const dbCollections = collections.filter((c) => {
24
+ const coll = c;
25
+ if (coll.databaseId)
26
+ return coll.databaseId === db.$id;
27
+ if (coll.databaseIds?.length)
28
+ return coll.databaseIds.includes(db.$id);
29
+ return true; // unassigned → applied to all databases
30
+ });
31
+ for (const coll of dbCollections) {
32
+ const collId = coll.$id || coll.name;
33
+ const dedupKey = `${db.$id}:${collId}`;
34
+ if (collectionsSeen.has(dedupKey))
35
+ continue;
36
+ collectionsSeen.add(dedupKey);
37
+ const attributes = coll.attributes || [];
38
+ const indexes = coll.indexes || [];
39
+ for (const attr of attributes) {
40
+ if (attr.type !== "string")
41
+ continue;
42
+ const stringAttr = attr;
43
+ const size = stringAttr.size || 50;
44
+ const isEncrypted = !!stringAttr.encrypt;
45
+ const isRequired = !!attr.required;
46
+ const isArray = !!attr.array;
47
+ const hasDefault = stringAttr.xdefault !== undefined && stringAttr.xdefault !== null;
48
+ // Find indexes that reference this attribute
49
+ const affectedIndexes = indexes
50
+ .filter((idx) => idx.attributes.includes(attr.key))
51
+ .map((idx) => idx.key);
52
+ const hasIndex = affectedIndexes.length > 0;
53
+ const suggested = suggestTargetType(size, hasIndex);
54
+ // Build entry
55
+ const entry = {
56
+ databaseId: db.$id,
57
+ databaseName: db.name,
58
+ collectionId: collId,
59
+ collectionName: coll.name,
60
+ attributeKey: attr.key,
61
+ currentType: "string",
62
+ currentSize: size,
63
+ isRequired,
64
+ isArray,
65
+ isEncrypted,
66
+ hasDefault,
67
+ defaultValue: hasDefault ? stringAttr.xdefault : undefined,
68
+ suggestedType: suggested,
69
+ targetType: suggested,
70
+ targetSize: suggested === "varchar" ? size : undefined,
71
+ action: isEncrypted ? "skip" : "migrate",
72
+ skipReason: isEncrypted ? "encrypted" : undefined,
73
+ indexesAffected: affectedIndexes,
74
+ };
75
+ // Warn if indexed attr gets non-varchar suggestion
76
+ if (hasIndex &&
77
+ suggested !== "varchar" &&
78
+ !isEncrypted) {
79
+ entry.targetType = "varchar";
80
+ entry.targetSize = size;
81
+ }
82
+ entries.push(entry);
83
+ }
84
+ }
85
+ }
86
+ const toMigrate = entries.filter((e) => e.action === "migrate").length;
87
+ const toSkip = entries.filter((e) => e.action === "skip").length;
88
+ const uniqueDbs = new Set(entries.map((e) => e.databaseId));
89
+ const uniqueColls = new Set(entries.map((e) => `${e.databaseId}:${e.collectionId}`));
90
+ const plan = {
91
+ version: 1,
92
+ generatedAt: new Date().toISOString(),
93
+ appwriteEndpoint: config.appwriteEndpoint,
94
+ appwriteProject: config.appwriteProject,
95
+ summary: {
96
+ totalStringAttributes: entries.length,
97
+ toMigrate,
98
+ toSkip,
99
+ databaseCount: uniqueDbs.size,
100
+ collectionCount: uniqueColls.size,
101
+ },
102
+ entries,
103
+ };
104
+ // Write YAML plan
105
+ const outputPath = options.outputPath || path.join(process.cwd(), "migrate-strings-plan.yaml");
106
+ const yamlContent = yaml.dump(plan, {
107
+ lineWidth: 120,
108
+ noRefs: true,
109
+ sortKeys: false,
110
+ });
111
+ fs.writeFileSync(outputPath, yamlContent, "utf8");
112
+ // Print summary
113
+ MessageFormatter.info(`Migration plan written to ${outputPath}`, {
114
+ prefix: "Analyze",
115
+ });
116
+ printPlanSummary(plan);
117
+ return plan;
118
+ }
119
+ function printPlanSummary(plan) {
120
+ const { summary } = plan;
121
+ console.log("");
122
+ console.log(chalk.bold("String Attribute Migration Plan Summary"));
123
+ console.log(chalk.gray("─".repeat(50)));
124
+ console.log(` Total string attributes: ${chalk.yellow(summary.totalStringAttributes)}`);
125
+ console.log(` To migrate: ${chalk.green(summary.toMigrate)}`);
126
+ console.log(` To skip: ${chalk.red(summary.toSkip)}`);
127
+ console.log(` Databases: ${summary.databaseCount}`);
128
+ console.log(` Collections: ${summary.collectionCount}`);
129
+ console.log("");
130
+ // Type distribution
131
+ const typeDistribution = {};
132
+ for (const entry of plan.entries) {
133
+ if (entry.action === "migrate") {
134
+ typeDistribution[entry.targetType] =
135
+ (typeDistribution[entry.targetType] || 0) + 1;
136
+ }
137
+ }
138
+ if (Object.keys(typeDistribution).length > 0) {
139
+ console.log(chalk.bold(" Target type distribution:"));
140
+ for (const [type, count] of Object.entries(typeDistribution)) {
141
+ console.log(` ${type}: ${count}`);
142
+ }
143
+ console.log("");
144
+ }
145
+ // Skipped reasons
146
+ const skipReasons = {};
147
+ for (const entry of plan.entries) {
148
+ if (entry.action === "skip" && entry.skipReason) {
149
+ skipReasons[entry.skipReason] =
150
+ (skipReasons[entry.skipReason] || 0) + 1;
151
+ }
152
+ }
153
+ if (Object.keys(skipReasons).length > 0) {
154
+ console.log(chalk.bold(" Skip reasons:"));
155
+ for (const [reason, count] of Object.entries(skipReasons)) {
156
+ console.log(` ${reason}: ${count}`);
157
+ }
158
+ console.log("");
159
+ }
160
+ console.log(chalk.dim(" Edit the YAML plan file to change targetType or action before executing."));
161
+ console.log("");
162
+ }
163
+ // ────────────────────────────────────────────────────────
164
+ // Phase 2: Execute — server connection required
165
+ // ────────────────────────────────────────────────────────
166
+ export async function executeMigrationPlan(adapter, options) {
167
+ // Load and validate plan
168
+ const planYaml = fs.readFileSync(options.planPath, "utf8");
169
+ const planRaw = yaml.load(planYaml);
170
+ const plan = MigrationPlanSchema.parse(planRaw);
171
+ const migrateEntries = plan.entries.filter((e) => e.action === "migrate");
172
+ if (migrateEntries.length === 0) {
173
+ MessageFormatter.info("No attributes to migrate in plan.", {
174
+ prefix: "Execute",
175
+ });
176
+ return { succeeded: 0, failed: 0, skipped: plan.entries.length };
177
+ }
178
+ // Load or create checkpoint
179
+ const checkpointPath = options.checkpointPath ||
180
+ options.planPath.replace(/\.ya?ml$/, ".checkpoint.json");
181
+ const checkpoint = loadOrCreateCheckpoint(checkpointPath, options.planPath);
182
+ const batchSize = options.batchSize || 100;
183
+ const batchDelayMs = options.batchDelayMs || 50;
184
+ // Group by database/collection
185
+ const groups = new Map();
186
+ for (const entry of migrateEntries) {
187
+ const key = `${entry.databaseId}:${entry.collectionId}`;
188
+ if (!groups.has(key))
189
+ groups.set(key, []);
190
+ groups.get(key).push(entry);
191
+ }
192
+ let succeeded = 0;
193
+ let failed = 0;
194
+ let skipped = plan.entries.filter((e) => e.action === "skip").length;
195
+ MessageFormatter.info(`Executing migration: ${migrateEntries.length} attributes across ${groups.size} collections`, { prefix: "Execute" });
196
+ if (options.dryRun) {
197
+ MessageFormatter.info("DRY RUN — no changes will be made", {
198
+ prefix: "Execute",
199
+ });
200
+ printDryRunSummary(plan);
201
+ return { succeeded: 0, failed: 0, skipped: plan.entries.length };
202
+ }
203
+ for (const [groupKey, entries] of groups) {
204
+ const first = entries[0];
205
+ console.log("");
206
+ console.log(chalk.bold(`Collection: ${first.collectionName} (${first.databaseName}/${first.collectionId})`));
207
+ console.log(` Attributes to migrate: ${entries.map((e) => e.attributeKey).join(", ")}`);
208
+ // Per-collection confirmation
209
+ const { proceed } = await inquirer.prompt([
210
+ {
211
+ type: "list",
212
+ name: "proceed",
213
+ message: `Migrate ${entries.length} attribute(s) in ${first.collectionName}?`,
214
+ choices: [
215
+ { name: "Yes, proceed", value: "yes" },
216
+ { name: "Skip this collection", value: "skip" },
217
+ { name: "Abort entire migration", value: "abort" },
218
+ ],
219
+ },
220
+ ]);
221
+ if (proceed === "abort") {
222
+ MessageFormatter.info("Migration aborted by user.", {
223
+ prefix: "Execute",
224
+ });
225
+ break;
226
+ }
227
+ if (proceed === "skip") {
228
+ skipped += entries.length;
229
+ continue;
230
+ }
231
+ // Migrate each attribute in this collection
232
+ for (const entry of entries) {
233
+ const cpEntry = getOrCreateCheckpointEntry(checkpoint, entry);
234
+ if (cpEntry.phase === "completed") {
235
+ MessageFormatter.info(` ${entry.attributeKey}: already completed (checkpoint)`, { prefix: "Execute" });
236
+ succeeded++;
237
+ continue;
238
+ }
239
+ try {
240
+ await migrateOneAttribute(adapter, entry, cpEntry, checkpoint, checkpointPath, {
241
+ batchSize,
242
+ batchDelayMs,
243
+ keepBackups: options.keepBackups ?? true,
244
+ });
245
+ succeeded++;
246
+ MessageFormatter.success(` ${entry.attributeKey}: migrated to ${entry.targetType}`, { prefix: "Execute" });
247
+ }
248
+ catch (err) {
249
+ failed++;
250
+ cpEntry.phase = "failed";
251
+ cpEntry.error = err.message || String(err);
252
+ saveCheckpoint(checkpoint, checkpointPath);
253
+ MessageFormatter.error(` ${entry.attributeKey}: FAILED — ${cpEntry.error}`, undefined, { prefix: "Execute" });
254
+ }
255
+ }
256
+ // After collection completes, offer to update local YAML
257
+ const successInGroup = entries.filter((e) => {
258
+ const cp = findCheckpointEntry(checkpoint, e);
259
+ return cp?.phase === "completed";
260
+ }).length;
261
+ if (successInGroup > 0) {
262
+ const { updateYaml } = await inquirer.prompt([
263
+ {
264
+ type: "confirm",
265
+ name: "updateYaml",
266
+ message: `Update local YAML config for ${first.collectionName}? (change type: string → new types)`,
267
+ default: false,
268
+ },
269
+ ]);
270
+ if (updateYaml) {
271
+ MessageFormatter.info("Local YAML update: use your editor to change 'type: string' to the new types in your collection YAML files.", { prefix: "Execute" });
272
+ }
273
+ }
274
+ }
275
+ // Final summary
276
+ console.log("");
277
+ console.log(chalk.bold("Migration Results"));
278
+ console.log(chalk.gray("─".repeat(40)));
279
+ console.log(` Succeeded: ${chalk.green(succeeded)}`);
280
+ console.log(` Failed: ${chalk.red(failed)}`);
281
+ console.log(` Skipped: ${chalk.yellow(skipped)}`);
282
+ console.log("");
283
+ if (failed > 0) {
284
+ MessageFormatter.info(`Checkpoint saved at ${checkpointPath} — rerun to resume failed attributes.`, { prefix: "Execute" });
285
+ }
286
+ return { succeeded, failed, skipped };
287
+ }
288
+ async function migrateOneAttribute(adapter, entry, cpEntry, checkpoint, checkpointPath, opts) {
289
+ const { databaseId, collectionId, attributeKey, targetType, targetSize } = entry;
290
+ const backupKey = cpEntry.backupKey;
291
+ const advance = (phase) => {
292
+ cpEntry.phase = phase;
293
+ checkpoint.lastUpdatedAt = new Date().toISOString();
294
+ saveCheckpoint(checkpoint, checkpointPath);
295
+ };
296
+ // Step 1: Create backup attribute
297
+ if (phaseIndex(cpEntry.phase) < phaseIndex("backup_created")) {
298
+ MessageFormatter.info(` Creating backup attribute ${backupKey}...`, {
299
+ prefix: "Migrate",
300
+ });
301
+ await tryAwaitWithRetry(() => adapter.createAttribute({
302
+ databaseId,
303
+ tableId: collectionId,
304
+ key: backupKey,
305
+ type: "string", // backup keeps original type
306
+ size: entry.currentSize,
307
+ required: false, // always optional for backup
308
+ array: entry.isArray,
309
+ }));
310
+ const available = await waitForAttribute(adapter, databaseId, collectionId, backupKey);
311
+ if (!available)
312
+ throw new Error(`Backup attribute ${backupKey} stuck`);
313
+ advance("backup_created");
314
+ }
315
+ // Step 2: Copy data to backup
316
+ if (phaseIndex(cpEntry.phase) < phaseIndex("data_copied_to_backup")) {
317
+ MessageFormatter.info(` Copying data to backup ${backupKey}...`, {
318
+ prefix: "Migrate",
319
+ });
320
+ await copyAttributeData(adapter, databaseId, collectionId, attributeKey, backupKey, opts.batchSize, opts.batchDelayMs);
321
+ advance("data_copied_to_backup");
322
+ }
323
+ // Step 3: Verify backup
324
+ if (phaseIndex(cpEntry.phase) < phaseIndex("data_verified_backup")) {
325
+ await verifyDataCopy(adapter, databaseId, collectionId, attributeKey, backupKey);
326
+ advance("data_verified_backup");
327
+ }
328
+ // Step 4: Delete indexes + original attribute
329
+ if (phaseIndex(cpEntry.phase) < phaseIndex("original_deleted")) {
330
+ // Save and delete affected indexes
331
+ if (entry.indexesAffected.length > 0) {
332
+ MessageFormatter.info(` Removing ${entry.indexesAffected.length} affected index(es)...`, {
333
+ prefix: "Migrate",
334
+ });
335
+ await saveAndDeleteIndexes(adapter, databaseId, collectionId, entry.indexesAffected, cpEntry);
336
+ saveCheckpoint(checkpoint, checkpointPath);
337
+ }
338
+ MessageFormatter.info(` Deleting original attribute ${attributeKey}...`, {
339
+ prefix: "Migrate",
340
+ });
341
+ await tryAwaitWithRetry(() => adapter.deleteAttribute({
342
+ databaseId,
343
+ tableId: collectionId,
344
+ key: attributeKey,
345
+ }));
346
+ await waitForAttributeGone(adapter, databaseId, collectionId, attributeKey);
347
+ advance("original_deleted");
348
+ }
349
+ // Step 5: Create new attribute with target type
350
+ if (phaseIndex(cpEntry.phase) < phaseIndex("new_attr_created")) {
351
+ MessageFormatter.info(` Creating new attribute ${attributeKey} as ${targetType}...`, { prefix: "Migrate" });
352
+ const createParams = {
353
+ databaseId,
354
+ tableId: collectionId,
355
+ key: attributeKey,
356
+ type: targetType,
357
+ required: false, // create as optional first — data needs to be copied back
358
+ array: entry.isArray,
359
+ };
360
+ if (targetType === "varchar" && targetSize) {
361
+ createParams.size = targetSize;
362
+ }
363
+ if (entry.hasDefault && entry.defaultValue !== undefined) {
364
+ createParams.default = entry.defaultValue;
365
+ }
366
+ await tryAwaitWithRetry(() => adapter.createAttribute(createParams));
367
+ const available = await waitForAttribute(adapter, databaseId, collectionId, attributeKey);
368
+ if (!available)
369
+ throw new Error(`New attribute ${attributeKey} stuck after creation`);
370
+ advance("new_attr_created");
371
+ }
372
+ // Step 6: Copy data back from backup
373
+ if (phaseIndex(cpEntry.phase) < phaseIndex("data_copied_back")) {
374
+ MessageFormatter.info(` Copying data back from backup...`, {
375
+ prefix: "Migrate",
376
+ });
377
+ await copyAttributeData(adapter, databaseId, collectionId, backupKey, attributeKey, opts.batchSize, opts.batchDelayMs);
378
+ advance("data_copied_back");
379
+ }
380
+ // Step 7: Verify final data
381
+ if (phaseIndex(cpEntry.phase) < phaseIndex("data_verified_final")) {
382
+ await verifyDataCopy(adapter, databaseId, collectionId, backupKey, attributeKey);
383
+ advance("data_verified_final");
384
+ }
385
+ // Step 8: Recreate indexes + delete backup
386
+ if (phaseIndex(cpEntry.phase) < phaseIndex("backup_deleted")) {
387
+ // Recreate indexes
388
+ if (cpEntry.storedIndexes.length > 0) {
389
+ MessageFormatter.info(` Recreating ${cpEntry.storedIndexes.length} index(es)...`, { prefix: "Migrate" });
390
+ await recreateIndexes(adapter, databaseId, collectionId, cpEntry);
391
+ }
392
+ // Delete backup (unless keepBackups)
393
+ if (!opts.keepBackups) {
394
+ MessageFormatter.info(` Deleting backup attribute ${backupKey}...`, {
395
+ prefix: "Migrate",
396
+ });
397
+ await tryAwaitWithRetry(() => adapter.deleteAttribute({
398
+ databaseId,
399
+ tableId: collectionId,
400
+ key: backupKey,
401
+ }));
402
+ await waitForAttributeGone(adapter, databaseId, collectionId, backupKey);
403
+ }
404
+ advance("backup_deleted");
405
+ }
406
+ // Step 9: Mark completed
407
+ // If the original attribute was required, update it now (after data is in place)
408
+ if (entry.isRequired) {
409
+ try {
410
+ await tryAwaitWithRetry(() => adapter.updateAttribute({
411
+ databaseId,
412
+ tableId: collectionId,
413
+ key: attributeKey,
414
+ required: true,
415
+ }));
416
+ }
417
+ catch {
418
+ // Non-fatal — attribute is migrated, just not set back to required
419
+ MessageFormatter.info(` Warning: could not set ${attributeKey} back to required`, { prefix: "Migrate" });
420
+ }
421
+ }
422
+ advance("completed");
423
+ }
424
+ // ────────────────────────────────────────────────────────
425
+ // Helper: copy attribute data via cursor pagination
426
+ // ────────────────────────────────────────────────────────
427
+ async function copyAttributeData(adapter, databaseId, collectionId, sourceKey, targetKey, batchSize, batchDelayMs) {
428
+ let lastId;
429
+ let totalCopied = 0;
430
+ let totalDocs;
431
+ // Get initial count
432
+ const countRes = await tryAwaitWithRetry(() => adapter.listRows({
433
+ databaseId,
434
+ tableId: collectionId,
435
+ queries: [Query.limit(1)],
436
+ }));
437
+ totalDocs = countRes?.total ?? undefined;
438
+ const progress = totalDocs
439
+ ? ProgressManager.create(`copy-${sourceKey}-${targetKey}`, totalDocs, {
440
+ title: ` Copy ${sourceKey} → ${targetKey}`,
441
+ })
442
+ : undefined;
443
+ while (true) {
444
+ const queries = [Query.limit(batchSize)];
445
+ if (lastId)
446
+ queries.push(Query.cursorAfter(lastId));
447
+ const res = await tryAwaitWithRetry(() => adapter.listRows({ databaseId, tableId: collectionId, queries }));
448
+ const docs = res?.documents || res?.rows || [];
449
+ if (docs.length === 0)
450
+ break;
451
+ // Batch update: copy sourceKey → targetKey
452
+ if (adapter.supportsBulkOperations() && adapter.bulkUpsertRows) {
453
+ const rows = docs
454
+ .filter((d) => d[sourceKey] !== undefined)
455
+ .map((d) => ({
456
+ id: d.$id,
457
+ data: { [targetKey]: d[sourceKey] },
458
+ }));
459
+ if (rows.length > 0) {
460
+ await tryAwaitWithRetry(() => adapter.bulkUpsertRows({
461
+ databaseId,
462
+ tableId: collectionId,
463
+ rows,
464
+ }));
465
+ }
466
+ }
467
+ else {
468
+ for (const doc of docs) {
469
+ if (doc[sourceKey] === undefined)
470
+ continue;
471
+ await tryAwaitWithRetry(() => adapter.updateRow({
472
+ databaseId,
473
+ tableId: collectionId,
474
+ id: doc.$id,
475
+ data: { [targetKey]: doc[sourceKey] },
476
+ }));
477
+ }
478
+ }
479
+ totalCopied += docs.length;
480
+ lastId = docs[docs.length - 1].$id;
481
+ progress?.update(totalCopied);
482
+ if (docs.length < batchSize)
483
+ break; // last page
484
+ if (batchDelayMs > 0)
485
+ await delay(batchDelayMs);
486
+ }
487
+ progress?.stop();
488
+ }
489
+ // ────────────────────────────────────────────────────────
490
+ // Helper: verify data copy (count + spot check)
491
+ // ────────────────────────────────────────────────────────
492
+ async function verifyDataCopy(adapter, databaseId, collectionId, sourceKey, targetKey) {
493
+ // Spot-check first 5 documents
494
+ const res = await tryAwaitWithRetry(() => adapter.listRows({
495
+ databaseId,
496
+ tableId: collectionId,
497
+ queries: [Query.limit(5)],
498
+ }));
499
+ const docs = res?.documents || res?.rows || [];
500
+ for (const doc of docs) {
501
+ if (doc[sourceKey] === undefined)
502
+ continue;
503
+ if (doc[sourceKey] !== doc[targetKey]) {
504
+ throw new Error(`Verification failed: doc ${doc.$id} has ${sourceKey}=${JSON.stringify(doc[sourceKey])} but ${targetKey}=${JSON.stringify(doc[targetKey])}`);
505
+ }
506
+ }
507
+ }
508
+ // ────────────────────────────────────────────────────────
509
+ // Helper: wait for attribute to become available
510
+ // ────────────────────────────────────────────────────────
511
+ async function waitForAttribute(adapter, databaseId, collectionId, key, maxWaitMs = 60_000) {
512
+ const start = Date.now();
513
+ const checkInterval = 2000;
514
+ while (Date.now() - start < maxWaitMs) {
515
+ const res = await tryAwaitWithRetry(() => adapter.getTable({ databaseId, tableId: collectionId }));
516
+ const attrs = res?.data?.attributes || res?.data?.columns || [];
517
+ const attr = attrs.find((a) => a.key === key);
518
+ if (attr) {
519
+ if (attr.status === "available")
520
+ return true;
521
+ if (attr.status === "failed" || attr.status === "stuck")
522
+ return false;
523
+ }
524
+ await delay(checkInterval);
525
+ }
526
+ return false;
527
+ }
528
+ // ────────────────────────────────────────────────────────
529
+ // Helper: wait for attribute to be fully deleted
530
+ // ────────────────────────────────────────────────────────
531
+ async function waitForAttributeGone(adapter, databaseId, collectionId, key, maxWaitMs = 60_000) {
532
+ const start = Date.now();
533
+ const checkInterval = 2000;
534
+ while (Date.now() - start < maxWaitMs) {
535
+ const res = await tryAwaitWithRetry(() => adapter.getTable({ databaseId, tableId: collectionId }));
536
+ const attrs = res?.data?.attributes || res?.data?.columns || [];
537
+ const attr = attrs.find((a) => a.key === key);
538
+ if (!attr)
539
+ return true;
540
+ if (attr.status === "deleting") {
541
+ await delay(checkInterval);
542
+ continue;
543
+ }
544
+ // Still present and not deleting — wait
545
+ await delay(checkInterval);
546
+ }
547
+ return false;
548
+ }
549
+ // ────────────────────────────────────────────────────────
550
+ // Helper: index management
551
+ // ────────────────────────────────────────────────────────
552
+ async function saveAndDeleteIndexes(adapter, databaseId, collectionId, indexKeys, cpEntry) {
553
+ // Fetch current indexes from server
554
+ const res = await tryAwaitWithRetry(() => adapter.listIndexes({ databaseId, tableId: collectionId }));
555
+ const allIndexes = res?.data || [];
556
+ for (const idxKey of indexKeys) {
557
+ const idx = allIndexes.find((i) => i.key === idxKey);
558
+ if (!idx)
559
+ continue;
560
+ // Store definition for recreation
561
+ const alreadyStored = cpEntry.storedIndexes.some((s) => s.key === idxKey);
562
+ if (!alreadyStored) {
563
+ cpEntry.storedIndexes.push({
564
+ key: idx.key,
565
+ type: idx.type || "key",
566
+ attributes: idx.attributes || [],
567
+ orders: idx.orders,
568
+ });
569
+ }
570
+ // Delete
571
+ await tryAwaitWithRetry(() => adapter.deleteIndex({ databaseId, tableId: collectionId, key: idxKey }));
572
+ }
573
+ // Wait for indexes to be gone
574
+ for (const idxKey of indexKeys) {
575
+ await waitForIndexGone(adapter, databaseId, collectionId, idxKey);
576
+ }
577
+ }
578
+ async function recreateIndexes(adapter, databaseId, collectionId, cpEntry) {
579
+ for (const idx of cpEntry.storedIndexes) {
580
+ await tryAwaitWithRetry(() => adapter.createIndex({
581
+ databaseId,
582
+ tableId: collectionId,
583
+ key: idx.key,
584
+ type: idx.type,
585
+ attributes: idx.attributes,
586
+ orders: idx.orders,
587
+ }));
588
+ // Wait for index to become available
589
+ await waitForIndexAvailable(adapter, databaseId, collectionId, idx.key);
590
+ }
591
+ }
592
+ async function waitForIndexGone(adapter, databaseId, collectionId, key, maxWaitMs = 60_000) {
593
+ const start = Date.now();
594
+ while (Date.now() - start < maxWaitMs) {
595
+ const res = await tryAwaitWithRetry(() => adapter.listIndexes({ databaseId, tableId: collectionId }));
596
+ const indexes = res?.data || [];
597
+ if (!indexes.find((i) => i.key === key))
598
+ return;
599
+ await delay(2000);
600
+ }
601
+ }
602
+ async function waitForIndexAvailable(adapter, databaseId, collectionId, key, maxWaitMs = 60_000) {
603
+ const start = Date.now();
604
+ while (Date.now() - start < maxWaitMs) {
605
+ const res = await tryAwaitWithRetry(() => adapter.listIndexes({ databaseId, tableId: collectionId }));
606
+ const indexes = res?.data || [];
607
+ const idx = indexes.find((i) => i.key === key);
608
+ if (idx?.status === "available")
609
+ return;
610
+ if (idx?.status === "failed") {
611
+ throw new Error(`Index ${key} creation failed`);
612
+ }
613
+ await delay(2000);
614
+ }
615
+ }
616
+ // ────────────────────────────────────────────────────────
617
+ // Checkpoint management
618
+ // ────────────────────────────────────────────────────────
619
+ function loadOrCreateCheckpoint(checkpointPath, planFile) {
620
+ if (fs.existsSync(checkpointPath)) {
621
+ try {
622
+ const raw = JSON.parse(fs.readFileSync(checkpointPath, "utf8"));
623
+ const parsed = MigrationCheckpointSchema.parse(raw);
624
+ MessageFormatter.info(`Resuming from checkpoint: ${checkpointPath}`, { prefix: "Checkpoint" });
625
+ return parsed;
626
+ }
627
+ catch {
628
+ MessageFormatter.info("Corrupt checkpoint file, creating new one.", { prefix: "Checkpoint" });
629
+ }
630
+ }
631
+ const now = new Date().toISOString();
632
+ return {
633
+ planFile,
634
+ startedAt: now,
635
+ lastUpdatedAt: now,
636
+ entries: [],
637
+ };
638
+ }
639
+ function saveCheckpoint(checkpoint, checkpointPath) {
640
+ checkpoint.lastUpdatedAt = new Date().toISOString();
641
+ fs.writeFileSync(checkpointPath, JSON.stringify(checkpoint, null, 2), "utf8");
642
+ }
643
+ function getOrCreateCheckpointEntry(checkpoint, entry) {
644
+ const existing = findCheckpointEntry(checkpoint, entry);
645
+ if (existing)
646
+ return existing;
647
+ const cpEntry = {
648
+ databaseId: entry.databaseId,
649
+ collectionId: entry.collectionId,
650
+ attributeKey: entry.attributeKey,
651
+ backupKey: generateBackupKey(entry.attributeKey),
652
+ phase: "pending",
653
+ targetType: entry.targetType,
654
+ targetSize: entry.targetSize,
655
+ storedIndexes: [],
656
+ };
657
+ checkpoint.entries.push(cpEntry);
658
+ return cpEntry;
659
+ }
660
+ function findCheckpointEntry(checkpoint, entry) {
661
+ return checkpoint.entries.find((e) => e.databaseId === entry.databaseId &&
662
+ e.collectionId === entry.collectionId &&
663
+ e.attributeKey === entry.attributeKey);
664
+ }
665
+ // ────────────────────────────────────────────────────────
666
+ // Phase ordering for checkpoint resume
667
+ // ────────────────────────────────────────────────────────
668
+ const PHASE_ORDER = [
669
+ "pending",
670
+ "backup_created",
671
+ "data_copied_to_backup",
672
+ "data_verified_backup",
673
+ "original_deleted",
674
+ "new_attr_created",
675
+ "data_copied_back",
676
+ "data_verified_final",
677
+ "backup_deleted",
678
+ "completed",
679
+ ];
680
+ function phaseIndex(phase) {
681
+ const idx = PHASE_ORDER.indexOf(phase);
682
+ return idx >= 0 ? idx : -1;
683
+ }
684
+ // ────────────────────────────────────────────────────────
685
+ // Dry run summary
686
+ // ────────────────────────────────────────────────────────
687
+ function printDryRunSummary(plan) {
688
+ console.log("");
689
+ console.log(chalk.bold("Dry Run — What Would Happen:"));
690
+ console.log(chalk.gray("─".repeat(50)));
691
+ const groups = new Map();
692
+ for (const entry of plan.entries) {
693
+ if (entry.action !== "migrate")
694
+ continue;
695
+ const key = `${entry.databaseName}/${entry.collectionName}`;
696
+ if (!groups.has(key))
697
+ groups.set(key, []);
698
+ groups.get(key).push(entry);
699
+ }
700
+ for (const [groupName, entries] of groups) {
701
+ console.log(`\n ${chalk.cyan(groupName)}`);
702
+ for (const e of entries) {
703
+ const sizeInfo = e.targetType === "varchar" ? ` (size: ${e.targetSize})` : "";
704
+ const indexInfo = e.indexesAffected.length > 0
705
+ ? ` [indexes: ${e.indexesAffected.join(", ")}]`
706
+ : "";
707
+ console.log(` ${e.attributeKey}: string(${e.currentSize}) → ${e.targetType}${sizeInfo}${indexInfo}`);
708
+ }
709
+ }
710
+ const skipped = plan.entries.filter((e) => e.action === "skip");
711
+ if (skipped.length > 0) {
712
+ console.log(`\n ${chalk.yellow("Skipped:")}`);
713
+ for (const e of skipped) {
714
+ console.log(` ${e.attributeKey}: ${e.skipReason || "manual skip"}`);
715
+ }
716
+ }
717
+ console.log("");
718
+ }
719
+ // ────────────────────────────────────────────────────────
720
+ // Utility
721
+ // ────────────────────────────────────────────────────────
722
+ function delay(ms) {
723
+ return new Promise((resolve) => setTimeout(resolve, ms));
724
+ }