appwrite-utils-cli 1.7.6 → 1.7.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/SELECTION_DIALOGS.md +146 -0
  2. package/dist/adapters/DatabaseAdapter.d.ts +1 -0
  3. package/dist/adapters/LegacyAdapter.js +15 -3
  4. package/dist/adapters/TablesDBAdapter.js +15 -3
  5. package/dist/cli/commands/databaseCommands.js +90 -23
  6. package/dist/collections/wipeOperations.d.ts +2 -2
  7. package/dist/collections/wipeOperations.js +37 -139
  8. package/dist/main.js +175 -4
  9. package/dist/migrations/appwriteToX.d.ts +27 -2
  10. package/dist/migrations/appwriteToX.js +293 -69
  11. package/dist/migrations/yaml/YamlImportConfigLoader.d.ts +1 -1
  12. package/dist/migrations/yaml/generateImportSchemas.js +23 -8
  13. package/dist/shared/schemaGenerator.js +25 -12
  14. package/dist/shared/selectionDialogs.d.ts +214 -0
  15. package/dist/shared/selectionDialogs.js +516 -0
  16. package/dist/utils/configDiscovery.d.ts +4 -4
  17. package/dist/utils/configDiscovery.js +66 -30
  18. package/dist/utils/yamlConverter.d.ts +1 -0
  19. package/dist/utils/yamlConverter.js +26 -3
  20. package/dist/utilsController.d.ts +6 -1
  21. package/dist/utilsController.js +91 -2
  22. package/package.json +1 -1
  23. package/src/adapters/DatabaseAdapter.ts +2 -1
  24. package/src/adapters/LegacyAdapter.ts +95 -82
  25. package/src/adapters/TablesDBAdapter.ts +62 -47
  26. package/src/cli/commands/databaseCommands.ts +134 -34
  27. package/src/collections/wipeOperations.ts +62 -224
  28. package/src/main.ts +276 -34
  29. package/src/migrations/appwriteToX.ts +385 -90
  30. package/src/migrations/yaml/generateImportSchemas.ts +26 -8
  31. package/src/shared/schemaGenerator.ts +29 -12
  32. package/src/shared/selectionDialogs.ts +716 -0
  33. package/src/utils/configDiscovery.ts +83 -39
  34. package/src/utils/yamlConverter.ts +29 -3
  35. package/src/utilsController.ts +116 -4
@@ -0,0 +1,146 @@
1
+ # Selection Dialogs System
2
+
3
+ ## Overview
4
+
5
+ The `SelectionDialogs` class provides a comprehensive interactive selection system for the enhanced sync flow in the Appwrite Utils CLI. It enables users to select databases, tables/collections, and storage buckets with visual indicators for configured vs new items.
6
+
7
+ ## Features
8
+
9
+ - **Visual Indicators**: ✅ for configured items, ○ for new items
10
+ - **Multi-Selection Support**: Checkbox-style selection with "Select All" functionality
11
+ - **Configuration Awareness**: Detects and highlights existing configurations
12
+ - **Grouped Display**: Organizes buckets by database for better context
13
+ - **Comprehensive Confirmation**: Shows detailed summary before sync execution
14
+ - **Graceful Error Handling**: Proper error messages and cancellation support
15
+ - **Type Safety**: Full TypeScript support with proper interfaces
16
+
17
+ ## Main Functions
18
+
19
+ ### `promptForExistingConfig(configuredItems: any[])`
20
+ Prompts user about existing configuration with options to:
21
+ - Sync existing configured items
22
+ - Add/remove items from configuration
23
+
24
+ ### `selectDatabases(availableDatabases, configuredDatabases, options?)`
25
+ Interactive database selection with:
26
+ - Visual indicators for configured vs new databases
27
+ - "Select All" functionality
28
+ - Filtering options (new only, default selections)
29
+
30
+ ### `selectTablesForDatabase(databaseId, databaseName, availableTables, configuredTables, options?)`
31
+ Table/collection selection for a specific database with:
32
+ - Database context display
33
+ - Table selection with indicators
34
+ - Multi-selection support
35
+
36
+ ### `selectBucketsForDatabases(selectedDatabaseIds, availableBuckets, configuredBuckets, options?)`
37
+ Storage bucket selection with:
38
+ - Grouping by database
39
+ - Relevance filtering (only buckets for selected databases)
40
+ - Ungrouped/general storage support
41
+
42
+ ### `confirmSyncSelection(selectionSummary: SyncSelectionSummary)`
43
+ Final confirmation dialog showing:
44
+ - Complete selection summary
45
+ - Statistics (total, new, existing items)
46
+ - Detailed breakdown by category
47
+
48
+ ## Usage Example
49
+
50
+ ```typescript
51
+ import { SelectionDialogs } from './shared/selectionDialogs.js';
52
+ import type { Models } from 'node-appwrite';
53
+
54
+ // 1. Check for existing configuration
55
+ const { syncExisting, modifyConfiguration } = await SelectionDialogs.promptForExistingConfig(configuredDatabases);
56
+
57
+ if (modifyConfiguration) {
58
+ // 2. Select databases
59
+ const selectedDatabaseIds = await SelectionDialogs.selectDatabases(
60
+ availableDatabases,
61
+ configuredDatabases,
62
+ { showSelectAll: true }
63
+ );
64
+
65
+ // 3. Select tables for each database
66
+ const tableSelectionsMap = new Map<string, string[]>();
67
+ for (const databaseId of selectedDatabaseIds) {
68
+ const selectedTableIds = await SelectionDialogs.selectTablesForDatabase(
69
+ databaseId,
70
+ databaseName,
71
+ availableTables,
72
+ configuredTables
73
+ );
74
+ tableSelectionsMap.set(databaseId, selectedTableIds);
75
+ }
76
+
77
+ // 4. Select buckets
78
+ const selectedBucketIds = await SelectionDialogs.selectBucketsForDatabases(
79
+ selectedDatabaseIds,
80
+ availableBuckets,
81
+ configuredBuckets
82
+ );
83
+
84
+ // 5. Create selection summary and confirm
85
+ const selectionSummary = SelectionDialogs.createSyncSelectionSummary(
86
+ databaseSelections,
87
+ bucketSelections
88
+ );
89
+
90
+ const confirmed = await SelectionDialogs.confirmSyncSelection(selectionSummary);
91
+
92
+ if (confirmed) {
93
+ // Proceed with sync operation
94
+ }
95
+ }
96
+ ```
97
+
98
+ ## Configuration Options
99
+
100
+ ### Database Selection Options
101
+ - `showSelectAll`: Show "Select All" option (default: true)
102
+ - `allowNewOnly`: Only show new/unconfigured databases (default: false)
103
+ - `defaultSelected`: Array of database IDs to pre-select
104
+
105
+ ### Table Selection Options
106
+ - `showSelectAll`: Show "Select All" option (default: true)
107
+ - `allowNewOnly`: Only show new/unconfigured tables (default: false)
108
+ - `defaultSelected`: Array of table IDs to pre-select
109
+ - `showDatabaseContext`: Show database name in header (default: true)
110
+
111
+ ### Bucket Selection Options
112
+ - `showSelectAll`: Show "Select All" option (default: true)
113
+ - `allowNewOnly`: Only show new/unconfigured buckets (default: false)
114
+ - `defaultSelected`: Array of bucket IDs to pre-select
115
+ - `groupByDatabase`: Group buckets by database (default: true)
116
+
117
+ ## Interfaces
118
+
119
+ ### `SyncSelectionSummary`
120
+ Contains complete selection information:
121
+ - `databases`: Array of selected databases with their tables
122
+ - `buckets`: Array of selected buckets
123
+ - `totalDatabases/Tables/Buckets`: Count of selected items
124
+ - `newItems/existingItems`: Breakdown of new vs existing configurations
125
+
126
+ ### `DatabaseSelection`
127
+ Represents a selected database:
128
+ - `databaseId/databaseName`: Database identification
129
+ - `tableIds/tableNames`: Selected tables for this database
130
+ - `isNew`: Whether this is a new configuration
131
+
132
+ ### `BucketSelection`
133
+ Represents a selected bucket:
134
+ - `bucketId/bucketName`: Bucket identification
135
+ - `databaseId/databaseName`: Associated database (if applicable)
136
+ - `isNew`: Whether this is a new configuration
137
+
138
+ ## Integration
139
+
140
+ The selection dialogs are designed to integrate seamlessly with the existing CLI infrastructure:
141
+
142
+ - Uses `MessageFormatter` for consistent styling
143
+ - Integrates with existing logging system
144
+ - Follows established error handling patterns
145
+ - Compatible with existing configuration management
146
+ - Uses inquirer.js for interactive prompts
@@ -72,6 +72,7 @@ export interface BulkDeleteRowsParams {
72
72
  databaseId: string;
73
73
  tableId: string;
74
74
  rowIds: string[];
75
+ batchSize?: number;
75
76
  }
76
77
  export interface CreateIndexParams {
77
78
  databaseId: string;
@@ -7,6 +7,7 @@
7
7
  * older Appwrite instances.
8
8
  */
9
9
  import { Query } from "node-appwrite";
10
+ import { chunk } from "es-toolkit";
10
11
  import { BaseAdapter, AdapterError, UnsupportedOperationError } from './DatabaseAdapter.js';
11
12
  /**
12
13
  * LegacyAdapter - Translates TablesDB calls to legacy Databases API
@@ -310,13 +311,24 @@ export class LegacyAdapter extends BaseAdapter {
310
311
  }
311
312
  async bulkDeleteRows(params) {
312
313
  try {
313
- // Try to use deleteDocuments with queries first (more efficient)
314
- const queries = params.rowIds.map(id => Query.equal('$id', id));
314
+ let queries;
315
+ // Wipe mode: use Query.limit for deleting without fetching
316
+ if (params.rowIds.length === 0) {
317
+ const batchSize = params.batchSize || 250;
318
+ queries = [Query.limit(batchSize)];
319
+ }
320
+ // Specific IDs mode: chunk into batches of 80-90 to stay within Appwrite limits
321
+ // (max 100 IDs per Query.equal, and queries must be < 4096 chars total)
322
+ else {
323
+ const ID_BATCH_SIZE = 85; // Safe batch size for Query.equal
324
+ const idBatches = chunk(params.rowIds, ID_BATCH_SIZE);
325
+ queries = idBatches.map(batch => Query.equal('$id', batch));
326
+ }
315
327
  const result = await this.databases.deleteDocuments(params.databaseId, params.tableId, // Maps tableId to collectionId
316
328
  queries);
317
329
  return {
318
330
  data: result,
319
- total: params.rowIds.length
331
+ total: params.rowIds.length || result.total || 0
320
332
  };
321
333
  }
322
334
  catch (error) {
@@ -6,6 +6,7 @@
6
6
  * and returns Models.Row instead of Models.Document.
7
7
  */
8
8
  import { Query } from "node-appwrite";
9
+ import { chunk } from "es-toolkit";
9
10
  import { BaseAdapter, AdapterError } from './DatabaseAdapter.js';
10
11
  /**
11
12
  * TablesDBAdapter implementation for native TablesDB API
@@ -270,8 +271,19 @@ export class TablesDBAdapter extends BaseAdapter {
270
271
  }
271
272
  async bulkDeleteRows(params) {
272
273
  try {
273
- // Convert rowIds to queries for the deleteRows API
274
- const queries = params.rowIds.map(id => Query.equal('$id', id));
274
+ let queries;
275
+ // Wipe mode: use Query.limit for deleting without fetching
276
+ if (params.rowIds.length === 0) {
277
+ const batchSize = params.batchSize || 250;
278
+ queries = [Query.limit(batchSize)];
279
+ }
280
+ // Specific IDs mode: chunk into batches of 80-90 to stay within Appwrite limits
281
+ // (max 100 IDs per Query.equal, and queries must be < 4096 chars total)
282
+ else {
283
+ const ID_BATCH_SIZE = 85; // Safe batch size for Query.equal
284
+ const idBatches = chunk(params.rowIds, ID_BATCH_SIZE);
285
+ queries = idBatches.map(batch => Query.equal('$id', batch));
286
+ }
275
287
  const result = await this.tablesDB.deleteRows({
276
288
  databaseId: params.databaseId,
277
289
  tableId: params.tableId,
@@ -279,7 +291,7 @@ export class TablesDBAdapter extends BaseAdapter {
279
291
  });
280
292
  return {
281
293
  data: result,
282
- total: params.rowIds.length
294
+ total: params.rowIds.length || result.total || 0
283
295
  };
284
296
  }
285
297
  catch (error) {
@@ -3,32 +3,95 @@ import chalk from "chalk";
3
3
  import { join } from "node:path";
4
4
  import { MessageFormatter } from "../../shared/messageFormatter.js";
5
5
  import { ConfirmationDialogs } from "../../shared/confirmationDialogs.js";
6
+ import { SelectionDialogs } from "../../shared/selectionDialogs.js";
7
+ import { logger } from "../../shared/logging.js";
6
8
  import { fetchAllDatabases } from "../../databases/methods.js";
7
9
  import { listBuckets } from "../../storage/methods.js";
8
10
  import { getFunction, downloadLatestFunctionDeployment } from "../../functions/methods.js";
9
11
  export const databaseCommands = {
10
12
  async syncDb(cli) {
11
13
  MessageFormatter.progress("Pushing local configuration to Appwrite...", { prefix: "Database" });
12
- const databases = await cli.selectDatabases(cli.getLocalDatabases(), chalk.blue("Select local databases to push:"), true);
13
- if (!databases.length) {
14
- MessageFormatter.warning("No databases selected. Skipping database sync.", { prefix: "Database" });
15
- return;
16
- }
17
14
  try {
18
- // Loop through each database and prompt for collections specific to that database
19
- for (const database of databases) {
20
- MessageFormatter.info(`\n📦 Configuring push for database: ${database.name}`, { prefix: "Database" });
21
- const collections = await cli.selectCollectionsAndTables(database, cli.controller.database, chalk.blue(`Select collections/tables to push to "${database.name}":`), true, // multiSelect
15
+ // Initialize controller
16
+ await cli.controller.init();
17
+ // Get available and configured databases
18
+ const availableDatabases = await fetchAllDatabases(cli.controller.database);
19
+ const configuredDatabases = cli.controller.config?.databases || [];
20
+ // Get local collections for selection
21
+ const localCollections = cli.getLocalCollections();
22
+ // Prompt about existing configuration
23
+ const { syncExisting, modifyConfiguration } = await SelectionDialogs.promptForExistingConfig(configuredDatabases);
24
+ // Select databases
25
+ const selectedDatabaseIds = await SelectionDialogs.selectDatabases(availableDatabases, configuredDatabases, { showSelectAll: true, allowNewOnly: !syncExisting });
26
+ if (selectedDatabaseIds.length === 0) {
27
+ MessageFormatter.warning("No databases selected. Skipping database sync.", { prefix: "Database" });
28
+ return;
29
+ }
30
+ // Select tables/collections for each database using the existing method
31
+ const tableSelectionsMap = new Map();
32
+ const availableTablesMap = new Map();
33
+ for (const databaseId of selectedDatabaseIds) {
34
+ const database = availableDatabases.find(db => db.$id === databaseId);
35
+ // Use the existing selectCollectionsAndTables method
36
+ const selectedCollections = await cli.selectCollectionsAndTables(database, cli.controller.database, chalk.blue(`Select collections/tables to push to "${database.name}":`), true, // multiSelect
22
37
  true // prefer local
23
38
  );
24
- if (collections.length === 0) {
39
+ // Map selected collections to table IDs
40
+ const selectedTableIds = selectedCollections.map((c) => c.$id || c.id);
41
+ // Store selections
42
+ tableSelectionsMap.set(databaseId, selectedTableIds);
43
+ availableTablesMap.set(databaseId, selectedCollections);
44
+ if (selectedCollections.length === 0) {
25
45
  MessageFormatter.warning(`No collections selected for database "${database.name}". Skipping.`, { prefix: "Database" });
26
46
  continue;
27
47
  }
28
- // Push selected collections to this specific database
29
- await cli.controller.syncDb([database], collections);
30
- MessageFormatter.success(`Pushed ${collections.length} collection(s) to database "${database.name}"`, { prefix: "Database" });
31
48
  }
49
+ // Ask if user wants to select buckets
50
+ const { selectBuckets } = await inquirer.prompt([
51
+ {
52
+ type: "confirm",
53
+ name: "selectBuckets",
54
+ message: "Do you want to select storage buckets to sync as well?",
55
+ default: false,
56
+ },
57
+ ]);
58
+ let bucketSelections = [];
59
+ if (selectBuckets) {
60
+ // Get available and configured buckets
61
+ try {
62
+ const availableBucketsResponse = await listBuckets(cli.controller.storage);
63
+ const availableBuckets = availableBucketsResponse.buckets || [];
64
+ const configuredBuckets = cli.controller.config?.buckets || [];
65
+ if (availableBuckets.length === 0) {
66
+ MessageFormatter.warning("No storage buckets available in remote instance.", { prefix: "Database" });
67
+ }
68
+ else {
69
+ // Select buckets using SelectionDialogs
70
+ const selectedBucketIds = await SelectionDialogs.selectBucketsForDatabases(selectedDatabaseIds, availableBuckets, configuredBuckets, { showSelectAll: true, groupByDatabase: true });
71
+ if (selectedBucketIds.length > 0) {
72
+ // Create BucketSelection objects
73
+ bucketSelections = SelectionDialogs.createBucketSelection(selectedBucketIds, availableBuckets, configuredBuckets, availableDatabases);
74
+ MessageFormatter.info(`Selected ${bucketSelections.length} storage bucket(s)`, { prefix: "Database" });
75
+ }
76
+ }
77
+ }
78
+ catch (error) {
79
+ MessageFormatter.warning("Failed to fetch storage buckets. Continuing with databases only.", { prefix: "Database" });
80
+ logger.warn("Storage bucket fetch failed during syncDb", { error: error instanceof Error ? error.message : String(error) });
81
+ }
82
+ }
83
+ // Create DatabaseSelection objects
84
+ const databaseSelections = SelectionDialogs.createDatabaseSelection(selectedDatabaseIds, availableDatabases, tableSelectionsMap, configuredDatabases, availableTablesMap);
85
+ // Show confirmation summary
86
+ const selectionSummary = SelectionDialogs.createSyncSelectionSummary(databaseSelections, bucketSelections);
87
+ const confirmed = await SelectionDialogs.confirmSyncSelection(selectionSummary);
88
+ if (!confirmed) {
89
+ MessageFormatter.info("Sync operation cancelled by user", { prefix: "Database" });
90
+ return;
91
+ }
92
+ // Perform selective sync using the controller
93
+ MessageFormatter.progress("Starting selective sync...", { prefix: "Database" });
94
+ await cli.controller.selectiveSync(databaseSelections, bucketSelections);
32
95
  MessageFormatter.success("\n✅ All database configurations pushed successfully!", { prefix: "Database" });
33
96
  // Then handle functions if requested
34
97
  const { syncFunctions } = await inquirer.prompt([
@@ -73,19 +136,23 @@ export const databaseCommands = {
73
136
  ]);
74
137
  if (syncDatabases) {
75
138
  const remoteDatabases = await fetchAllDatabases(cli.controller.database);
76
- // Use the controller's synchronizeConfigurations method which handles collections properly
77
- MessageFormatter.progress("Pulling collections and generating collection files...", { prefix: "Collections" });
78
- await cli.controller.synchronizeConfigurations(remoteDatabases);
79
- // Also configure buckets for any new databases
139
+ // First, prepare the combined database list for bucket configuration
80
140
  const localDatabases = cli.controller.config?.databases || [];
81
- const updatedConfig = await cli.configureBuckets({
141
+ const allDatabases = [
142
+ ...localDatabases,
143
+ ...remoteDatabases.filter((rd) => !localDatabases.some((ld) => ld.name === rd.name)),
144
+ ];
145
+ // Configure buckets FIRST to get user selections before writing config
146
+ MessageFormatter.progress("Configuring storage buckets...", { prefix: "Buckets" });
147
+ const configWithBuckets = await cli.configureBuckets({
82
148
  ...cli.controller.config,
83
- databases: [
84
- ...localDatabases,
85
- ...remoteDatabases.filter((rd) => !localDatabases.some((ld) => ld.name === rd.name)),
86
- ],
149
+ databases: allDatabases,
87
150
  });
88
- cli.controller.config = updatedConfig;
151
+ // Update controller config with bucket selections
152
+ cli.controller.config = configWithBuckets;
153
+ // Now synchronize configurations with the updated config that includes bucket selections
154
+ MessageFormatter.progress("Pulling collections and generating collection files...", { prefix: "Collections" });
155
+ await cli.controller.synchronizeConfigurations(remoteDatabases, configWithBuckets);
89
156
  }
90
157
  // Then sync functions
91
158
  const { syncFunctions } = await inquirer.prompt([
@@ -10,7 +10,7 @@ export declare const wipeAllTables: (adapter: DatabaseAdapter, databaseId: strin
10
10
  tableName: string;
11
11
  }[]>;
12
12
  /**
13
- * Optimized streaming deletion of all rows from a table
14
- * Uses bulk deletion when available, falls back to optimized individual deletion
13
+ * Optimized deletion of all rows from a table using direct bulk deletion
14
+ * Uses Query.limit() to delete rows without fetching IDs first
15
15
  */
16
16
  export declare const wipeTableRows: (adapter: DatabaseAdapter, databaseId: string, tableId: string) => Promise<void>;
@@ -2,7 +2,7 @@ import { Databases, Query, } from "node-appwrite";
2
2
  import { tryAwaitWithRetry } from "../utils/helperFunctions.js";
3
3
  import { MessageFormatter } from "../shared/messageFormatter.js";
4
4
  import { ProgressManager } from "../shared/progressManager.js";
5
- import { isRetryableError, isBulkNotSupportedError, isCriticalError } from "../shared/errorUtils.js";
5
+ import { isRetryableError, isCriticalError } from "../shared/errorUtils.js";
6
6
  import { delay } from "../utils/helperFunctions.js";
7
7
  import { chunk } from "es-toolkit";
8
8
  import pLimit from "p-limit";
@@ -167,82 +167,54 @@ export const wipeAllTables = async (adapter, databaseId) => {
167
167
  return deleted;
168
168
  };
169
169
  /**
170
- * Optimized streaming deletion of all rows from a table
171
- * Uses bulk deletion when available, falls back to optimized individual deletion
170
+ * Optimized deletion of all rows from a table using direct bulk deletion
171
+ * Uses Query.limit() to delete rows without fetching IDs first
172
172
  */
173
173
  export const wipeTableRows = async (adapter, databaseId, tableId) => {
174
174
  try {
175
- // Configuration for optimized deletion
176
- const FETCH_BATCH_SIZE = 1000; // How many to fetch per query
177
- const BULK_DELETE_BATCH_SIZE = 500; // How many to bulk delete at once
178
- const INDIVIDUAL_DELETE_BATCH_SIZE = 200; // For fallback individual deletion
179
- const MAX_CONCURRENT_OPERATIONS = 10; // Concurrent bulk/individual operations
175
+ // Check if bulk deletion is available
176
+ if (!adapter.bulkDeleteRows) {
177
+ MessageFormatter.error("Bulk deletion not available for this adapter - wipe operation not supported", new Error("bulkDeleteRows not available"), { prefix: "Wipe" });
178
+ throw new Error("Bulk deletion required for wipe operations");
179
+ }
180
+ const DELETE_BATCH_SIZE = 250; // How many rows to delete per batch
180
181
  let totalDeleted = 0;
181
- let cursor;
182
182
  let hasMoreRows = true;
183
183
  MessageFormatter.info("Starting optimized table row deletion...", { prefix: "Wipe" });
184
- // Create progress tracker (we'll update the total as we discover more rows)
185
- const progress = ProgressManager.create(`delete-${tableId}`, 1, // Start with 1, will update as we go
184
+ const progress = ProgressManager.create(`delete-${tableId}`, 1, // Start with 1, will update as we discover more
186
185
  { title: "Deleting table rows" });
187
186
  while (hasMoreRows) {
188
- // Fetch next batch of rows
189
- const queries = [Query.limit(FETCH_BATCH_SIZE)];
190
- if (cursor) {
191
- queries.push(Query.cursorAfter(cursor));
192
- }
193
- const response = await adapter.listRows({ databaseId, tableId, queries });
194
- const rows = response.rows || [];
195
- if (rows.length === 0) {
196
- hasMoreRows = false;
197
- break;
198
- }
199
- // Update progress total as we discover more rows
200
- if (rows.length === FETCH_BATCH_SIZE) {
201
- // There might be more rows, update progress total
202
- progress.setTotal(totalDeleted + rows.length + 1000); // Estimate more
203
- }
204
- MessageFormatter.progress(`Processing batch: ${rows.length} rows (${totalDeleted + rows.length} total so far)`, { prefix: "Wipe" });
205
- // Try to use bulk deletion first, fall back to individual deletion
206
- const rowIds = rows.map((row) => row.$id);
207
- // Check if bulk deletion is available and try it first
208
- if (adapter.bulkDeleteRows) {
209
- try {
210
- // Attempt bulk deletion (available in TablesDB)
211
- const deletedCount = await tryBulkDeletion(adapter, databaseId, tableId, rowIds, BULK_DELETE_BATCH_SIZE, MAX_CONCURRENT_OPERATIONS);
212
- totalDeleted += deletedCount;
213
- progress.update(totalDeleted);
214
- }
215
- catch (bulkError) {
216
- // Enhanced error handling: categorize the error and decide on fallback strategy
217
- const errorMessage = bulkError instanceof Error ? bulkError.message : String(bulkError);
218
- if (isRetryableError(errorMessage)) {
219
- MessageFormatter.progress(`Bulk deletion encountered retryable error, retrying with individual deletion for ${rows.length} rows`, { prefix: "Wipe" });
220
- }
221
- else if (isBulkNotSupportedError(errorMessage)) {
222
- MessageFormatter.progress(`Bulk deletion not supported by server, switching to individual deletion for ${rows.length} rows`, { prefix: "Wipe" });
223
- }
224
- else {
225
- MessageFormatter.progress(`Bulk deletion failed (${errorMessage}), falling back to individual deletion for ${rows.length} rows`, { prefix: "Wipe" });
226
- }
227
- const deletedCount = await tryIndividualDeletion(adapter, databaseId, tableId, rows, INDIVIDUAL_DELETE_BATCH_SIZE, MAX_CONCURRENT_OPERATIONS, progress, totalDeleted);
228
- totalDeleted += deletedCount;
187
+ try {
188
+ // Delete next batch using Query.limit() - no fetching needed!
189
+ const result = await tryAwaitWithRetry(async () => adapter.bulkDeleteRows({
190
+ databaseId,
191
+ tableId,
192
+ rowIds: [], // Empty array signals we want to use Query.limit instead
193
+ batchSize: DELETE_BATCH_SIZE
194
+ }));
195
+ const deletedCount = result.total || 0;
196
+ if (deletedCount === 0) {
197
+ hasMoreRows = false;
198
+ break;
229
199
  }
230
- }
231
- else {
232
- // Bulk deletion not available, use optimized individual deletion
233
- MessageFormatter.progress(`Using individual deletion for ${rows.length} rows (bulk deletion not available)`, { prefix: "Wipe" });
234
- const deletedCount = await tryIndividualDeletion(adapter, databaseId, tableId, rows, INDIVIDUAL_DELETE_BATCH_SIZE, MAX_CONCURRENT_OPERATIONS, progress, totalDeleted);
235
200
  totalDeleted += deletedCount;
201
+ progress.setTotal(totalDeleted + 100); // Estimate more rows exist
202
+ progress.update(totalDeleted);
203
+ MessageFormatter.progress(`Deleted ${deletedCount} rows (${totalDeleted} total so far)`, { prefix: "Wipe" });
204
+ // Small delay between batches to be respectful to the API
205
+ await delay(10);
236
206
  }
237
- // Set up cursor for next iteration
238
- if (rows.length < FETCH_BATCH_SIZE) {
239
- hasMoreRows = false;
240
- }
241
- else {
242
- cursor = rows[rows.length - 1].$id;
207
+ catch (error) {
208
+ const errorMessage = error.message || String(error);
209
+ if (isCriticalError(errorMessage)) {
210
+ MessageFormatter.error(`Critical error during bulk deletion: ${errorMessage}`, error, { prefix: "Wipe" });
211
+ throw error;
212
+ }
213
+ else {
214
+ MessageFormatter.error(`Error during deletion batch: ${errorMessage}`, error, { prefix: "Wipe" });
215
+ // Continue trying with next batch
216
+ }
243
217
  }
244
- // Small delay between fetch cycles to be respectful to the API
245
- await delay(10);
246
218
  }
247
219
  // Update final progress total
248
220
  progress.setTotal(totalDeleted);
@@ -259,77 +231,3 @@ export const wipeTableRows = async (adapter, databaseId, tableId) => {
259
231
  throw error;
260
232
  }
261
233
  };
262
- /**
263
- * Helper function to attempt bulk deletion of row IDs
264
- */
265
- async function tryBulkDeletion(adapter, databaseId, tableId, rowIds, batchSize, maxConcurrent) {
266
- if (!adapter.bulkDeleteRows) {
267
- throw new Error("Bulk deletion not available on this adapter");
268
- }
269
- const limit = pLimit(maxConcurrent);
270
- const batches = chunk(rowIds, batchSize);
271
- let successfullyDeleted = 0;
272
- const deletePromises = batches.map((batch) => limit(async () => {
273
- try {
274
- const result = await tryAwaitWithRetry(async () => adapter.bulkDeleteRows({ databaseId, tableId, rowIds: batch }));
275
- successfullyDeleted += batch.length; // Assume success if no error thrown
276
- }
277
- catch (error) {
278
- const errorMessage = error.message || String(error);
279
- // Enhanced error handling for bulk deletion
280
- if (isCriticalError(errorMessage)) {
281
- MessageFormatter.error(`Critical error in bulk deletion batch: ${errorMessage}`, error, { prefix: "Wipe" });
282
- throw error;
283
- }
284
- else {
285
- // For non-critical errors in bulk deletion, re-throw to trigger fallback
286
- throw new Error(`Bulk deletion batch failed: ${errorMessage}`);
287
- }
288
- }
289
- }));
290
- await Promise.all(deletePromises);
291
- return successfullyDeleted;
292
- }
293
- /**
294
- * Helper function for fallback individual deletion
295
- */
296
- async function tryIndividualDeletion(adapter, databaseId, tableId, rows, batchSize, maxConcurrent, progress, baseDeleted) {
297
- const limit = pLimit(maxConcurrent);
298
- const batches = chunk(rows, batchSize);
299
- let processedInBatch = 0;
300
- let successfullyDeleted = 0;
301
- const deletePromises = batches.map((batch) => limit(async () => {
302
- const batchDeletePromises = batch.map(async (row) => {
303
- try {
304
- await tryAwaitWithRetry(async () => adapter.deleteRow({ databaseId, tableId, id: row.$id }));
305
- successfullyDeleted++;
306
- }
307
- catch (error) {
308
- const errorMessage = error.message || String(error);
309
- // Enhanced error handling for row deletion
310
- if (errorMessage.includes("Row with the requested ID could not be found")) {
311
- // Row already deleted, count as success since it's gone
312
- successfullyDeleted++;
313
- }
314
- else if (isCriticalError(errorMessage)) {
315
- // Critical error, log and rethrow to stop operation
316
- MessageFormatter.error(`Critical error deleting row ${row.$id}: ${errorMessage}`, error, { prefix: "Wipe" });
317
- throw error;
318
- }
319
- else if (isRetryableError(errorMessage)) {
320
- // Retryable error, will be handled by tryAwaitWithRetry
321
- MessageFormatter.progress(`Retryable error for row ${row.$id}, will retry`, { prefix: "Wipe" });
322
- }
323
- else {
324
- // Other non-critical errors, log but continue
325
- MessageFormatter.error(`Failed to delete row ${row.$id}: ${errorMessage}`, error, { prefix: "Wipe" });
326
- }
327
- }
328
- processedInBatch++;
329
- progress.update(baseDeleted + successfullyDeleted);
330
- });
331
- await Promise.all(batchDeletePromises);
332
- }));
333
- await Promise.all(deletePromises);
334
- return successfullyDeleted;
335
- }