appwrite-utils-cli 1.7.5 → 1.7.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapters/DatabaseAdapter.d.ts +1 -0
- package/dist/adapters/LegacyAdapter.js +55 -21
- package/dist/adapters/TablesDBAdapter.js +21 -2
- package/dist/collections/wipeOperations.d.ts +2 -2
- package/dist/collections/wipeOperations.js +37 -132
- package/package.json +1 -1
- package/src/adapters/DatabaseAdapter.ts +2 -1
- package/src/adapters/LegacyAdapter.ts +67 -24
- package/src/adapters/TablesDBAdapter.ts +25 -3
- package/src/collections/wipeOperations.ts +53 -208
|
@@ -6,6 +6,8 @@
|
|
|
6
6
|
* code can use modern TablesDB patterns while maintaining compatibility with
|
|
7
7
|
* older Appwrite instances.
|
|
8
8
|
*/
|
|
9
|
+
import { Query } from "node-appwrite";
|
|
10
|
+
import { chunk } from "es-toolkit";
|
|
9
11
|
import { BaseAdapter, AdapterError, UnsupportedOperationError } from './DatabaseAdapter.js';
|
|
10
12
|
/**
|
|
11
13
|
* LegacyAdapter - Translates TablesDB calls to legacy Databases API
|
|
@@ -308,30 +310,62 @@ export class LegacyAdapter extends BaseAdapter {
|
|
|
308
310
|
throw new UnsupportedOperationError('bulkUpsertRows', 'legacy');
|
|
309
311
|
}
|
|
310
312
|
async bulkDeleteRows(params) {
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
databaseId: params.databaseId,
|
|
318
|
-
tableId: params.tableId,
|
|
319
|
-
id: rowId
|
|
320
|
-
});
|
|
321
|
-
results.push({ id: rowId, deleted: true });
|
|
313
|
+
try {
|
|
314
|
+
let queries;
|
|
315
|
+
// Wipe mode: use Query.limit for deleting without fetching
|
|
316
|
+
if (params.rowIds.length === 0) {
|
|
317
|
+
const batchSize = params.batchSize || 250;
|
|
318
|
+
queries = [Query.limit(batchSize)];
|
|
322
319
|
}
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
320
|
+
// Specific IDs mode: chunk into batches of 80-90 to stay within Appwrite limits
|
|
321
|
+
// (max 100 IDs per Query.equal, and queries must be < 4096 chars total)
|
|
322
|
+
else {
|
|
323
|
+
const ID_BATCH_SIZE = 85; // Safe batch size for Query.equal
|
|
324
|
+
const idBatches = chunk(params.rowIds, ID_BATCH_SIZE);
|
|
325
|
+
queries = idBatches.map(batch => Query.equal('$id', batch));
|
|
326
|
+
}
|
|
327
|
+
const result = await this.databases.deleteDocuments(params.databaseId, params.tableId, // Maps tableId to collectionId
|
|
328
|
+
queries);
|
|
329
|
+
return {
|
|
330
|
+
data: result,
|
|
331
|
+
total: params.rowIds.length || result.total || 0
|
|
332
|
+
};
|
|
333
|
+
}
|
|
334
|
+
catch (error) {
|
|
335
|
+
// If deleteDocuments with queries fails, fall back to individual deletes
|
|
336
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
337
|
+
// Check if the error indicates that deleteDocuments with queries is not supported
|
|
338
|
+
if (errorMessage.includes('not supported') || errorMessage.includes('invalid') || errorMessage.includes('queries')) {
|
|
339
|
+
// Fall back to individual deletions
|
|
340
|
+
const results = [];
|
|
341
|
+
const errors = [];
|
|
342
|
+
for (const rowId of params.rowIds) {
|
|
343
|
+
try {
|
|
344
|
+
await this.deleteRow({
|
|
345
|
+
databaseId: params.databaseId,
|
|
346
|
+
tableId: params.tableId,
|
|
347
|
+
id: rowId
|
|
348
|
+
});
|
|
349
|
+
results.push({ id: rowId, deleted: true });
|
|
350
|
+
}
|
|
351
|
+
catch (individualError) {
|
|
352
|
+
errors.push({
|
|
353
|
+
rowId,
|
|
354
|
+
error: individualError instanceof Error ? individualError.message : 'Unknown error'
|
|
355
|
+
});
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
return {
|
|
359
|
+
data: results,
|
|
360
|
+
total: results.length,
|
|
361
|
+
errors: errors.length > 0 ? errors : undefined
|
|
362
|
+
};
|
|
363
|
+
}
|
|
364
|
+
else {
|
|
365
|
+
// Re-throw the original error if it's not a support issue
|
|
366
|
+
throw new AdapterError(`Failed to bulk delete rows (legacy): ${errorMessage}`, 'BULK_DELETE_ROWS_FAILED', error instanceof Error ? error : undefined);
|
|
328
367
|
}
|
|
329
368
|
}
|
|
330
|
-
return {
|
|
331
|
-
data: results,
|
|
332
|
-
total: results.length,
|
|
333
|
-
errors: errors.length > 0 ? errors : undefined
|
|
334
|
-
};
|
|
335
369
|
}
|
|
336
370
|
// Metadata and Capabilities
|
|
337
371
|
getMetadata() {
|
|
@@ -5,6 +5,8 @@
|
|
|
5
5
|
* without any translation layer. It uses object notation parameters
|
|
6
6
|
* and returns Models.Row instead of Models.Document.
|
|
7
7
|
*/
|
|
8
|
+
import { Query } from "node-appwrite";
|
|
9
|
+
import { chunk } from "es-toolkit";
|
|
8
10
|
import { BaseAdapter, AdapterError } from './DatabaseAdapter.js';
|
|
9
11
|
/**
|
|
10
12
|
* TablesDBAdapter implementation for native TablesDB API
|
|
@@ -269,10 +271,27 @@ export class TablesDBAdapter extends BaseAdapter {
|
|
|
269
271
|
}
|
|
270
272
|
async bulkDeleteRows(params) {
|
|
271
273
|
try {
|
|
272
|
-
|
|
274
|
+
let queries;
|
|
275
|
+
// Wipe mode: use Query.limit for deleting without fetching
|
|
276
|
+
if (params.rowIds.length === 0) {
|
|
277
|
+
const batchSize = params.batchSize || 250;
|
|
278
|
+
queries = [Query.limit(batchSize)];
|
|
279
|
+
}
|
|
280
|
+
// Specific IDs mode: chunk into batches of 80-90 to stay within Appwrite limits
|
|
281
|
+
// (max 100 IDs per Query.equal, and queries must be < 4096 chars total)
|
|
282
|
+
else {
|
|
283
|
+
const ID_BATCH_SIZE = 85; // Safe batch size for Query.equal
|
|
284
|
+
const idBatches = chunk(params.rowIds, ID_BATCH_SIZE);
|
|
285
|
+
queries = idBatches.map(batch => Query.equal('$id', batch));
|
|
286
|
+
}
|
|
287
|
+
const result = await this.tablesDB.deleteRows({
|
|
288
|
+
databaseId: params.databaseId,
|
|
289
|
+
tableId: params.tableId,
|
|
290
|
+
queries: queries
|
|
291
|
+
});
|
|
273
292
|
return {
|
|
274
293
|
data: result,
|
|
275
|
-
total: params.rowIds.length
|
|
294
|
+
total: params.rowIds.length || result.total || 0
|
|
276
295
|
};
|
|
277
296
|
}
|
|
278
297
|
catch (error) {
|
|
@@ -10,7 +10,7 @@ export declare const wipeAllTables: (adapter: DatabaseAdapter, databaseId: strin
|
|
|
10
10
|
tableName: string;
|
|
11
11
|
}[]>;
|
|
12
12
|
/**
|
|
13
|
-
* Optimized
|
|
14
|
-
* Uses
|
|
13
|
+
* Optimized deletion of all rows from a table using direct bulk deletion
|
|
14
|
+
* Uses Query.limit() to delete rows without fetching IDs first
|
|
15
15
|
*/
|
|
16
16
|
export declare const wipeTableRows: (adapter: DatabaseAdapter, databaseId: string, tableId: string) => Promise<void>;
|
|
@@ -2,7 +2,7 @@ import { Databases, Query, } from "node-appwrite";
|
|
|
2
2
|
import { tryAwaitWithRetry } from "../utils/helperFunctions.js";
|
|
3
3
|
import { MessageFormatter } from "../shared/messageFormatter.js";
|
|
4
4
|
import { ProgressManager } from "../shared/progressManager.js";
|
|
5
|
-
import { isRetryableError,
|
|
5
|
+
import { isRetryableError, isCriticalError } from "../shared/errorUtils.js";
|
|
6
6
|
import { delay } from "../utils/helperFunctions.js";
|
|
7
7
|
import { chunk } from "es-toolkit";
|
|
8
8
|
import pLimit from "p-limit";
|
|
@@ -167,82 +167,54 @@ export const wipeAllTables = async (adapter, databaseId) => {
|
|
|
167
167
|
return deleted;
|
|
168
168
|
};
|
|
169
169
|
/**
|
|
170
|
-
* Optimized
|
|
171
|
-
* Uses
|
|
170
|
+
* Optimized deletion of all rows from a table using direct bulk deletion
|
|
171
|
+
* Uses Query.limit() to delete rows without fetching IDs first
|
|
172
172
|
*/
|
|
173
173
|
export const wipeTableRows = async (adapter, databaseId, tableId) => {
|
|
174
174
|
try {
|
|
175
|
-
//
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
175
|
+
// Check if bulk deletion is available
|
|
176
|
+
if (!adapter.bulkDeleteRows) {
|
|
177
|
+
MessageFormatter.error("Bulk deletion not available for this adapter - wipe operation not supported", new Error("bulkDeleteRows not available"), { prefix: "Wipe" });
|
|
178
|
+
throw new Error("Bulk deletion required for wipe operations");
|
|
179
|
+
}
|
|
180
|
+
const DELETE_BATCH_SIZE = 250; // How many rows to delete per batch
|
|
180
181
|
let totalDeleted = 0;
|
|
181
|
-
let cursor;
|
|
182
182
|
let hasMoreRows = true;
|
|
183
183
|
MessageFormatter.info("Starting optimized table row deletion...", { prefix: "Wipe" });
|
|
184
|
-
|
|
185
|
-
const progress = ProgressManager.create(`delete-${tableId}`, 1, // Start with 1, will update as we go
|
|
184
|
+
const progress = ProgressManager.create(`delete-${tableId}`, 1, // Start with 1, will update as we discover more
|
|
186
185
|
{ title: "Deleting table rows" });
|
|
187
186
|
while (hasMoreRows) {
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
progress.setTotal(totalDeleted +
|
|
187
|
+
try {
|
|
188
|
+
// Delete next batch using Query.limit() - no fetching needed!
|
|
189
|
+
const result = await tryAwaitWithRetry(async () => adapter.bulkDeleteRows({
|
|
190
|
+
databaseId,
|
|
191
|
+
tableId,
|
|
192
|
+
rowIds: [], // Empty array signals we want to use Query.limit instead
|
|
193
|
+
batchSize: DELETE_BATCH_SIZE
|
|
194
|
+
}));
|
|
195
|
+
const deletedCount = result.total || 0;
|
|
196
|
+
if (deletedCount === 0) {
|
|
197
|
+
hasMoreRows = false;
|
|
198
|
+
break;
|
|
199
|
+
}
|
|
200
|
+
totalDeleted += deletedCount;
|
|
201
|
+
progress.setTotal(totalDeleted + 100); // Estimate more rows exist
|
|
202
|
+
progress.update(totalDeleted);
|
|
203
|
+
MessageFormatter.progress(`Deleted ${deletedCount} rows (${totalDeleted} total so far)`, { prefix: "Wipe" });
|
|
204
|
+
// Small delay between batches to be respectful to the API
|
|
205
|
+
await delay(10);
|
|
203
206
|
}
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
try {
|
|
210
|
-
// Attempt bulk deletion (available in TablesDB)
|
|
211
|
-
await tryBulkDeletion(adapter, databaseId, tableId, rowIds, BULK_DELETE_BATCH_SIZE, MAX_CONCURRENT_OPERATIONS);
|
|
212
|
-
totalDeleted += rows.length;
|
|
213
|
-
progress.update(totalDeleted);
|
|
207
|
+
catch (error) {
|
|
208
|
+
const errorMessage = error.message || String(error);
|
|
209
|
+
if (isCriticalError(errorMessage)) {
|
|
210
|
+
MessageFormatter.error(`Critical error during bulk deletion: ${errorMessage}`, error, { prefix: "Wipe" });
|
|
211
|
+
throw error;
|
|
214
212
|
}
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
if (isRetryableError(errorMessage)) {
|
|
219
|
-
MessageFormatter.progress(`Bulk deletion encountered retryable error, retrying with individual deletion for ${rows.length} rows`, { prefix: "Wipe" });
|
|
220
|
-
}
|
|
221
|
-
else if (isBulkNotSupportedError(errorMessage)) {
|
|
222
|
-
MessageFormatter.progress(`Bulk deletion not supported by server, switching to individual deletion for ${rows.length} rows`, { prefix: "Wipe" });
|
|
223
|
-
}
|
|
224
|
-
else {
|
|
225
|
-
MessageFormatter.progress(`Bulk deletion failed (${errorMessage}), falling back to individual deletion for ${rows.length} rows`, { prefix: "Wipe" });
|
|
226
|
-
}
|
|
227
|
-
await tryIndividualDeletion(adapter, databaseId, tableId, rows, INDIVIDUAL_DELETE_BATCH_SIZE, MAX_CONCURRENT_OPERATIONS, progress, totalDeleted);
|
|
228
|
-
totalDeleted += rows.length;
|
|
213
|
+
else {
|
|
214
|
+
MessageFormatter.error(`Error during deletion batch: ${errorMessage}`, error, { prefix: "Wipe" });
|
|
215
|
+
// Continue trying with next batch
|
|
229
216
|
}
|
|
230
217
|
}
|
|
231
|
-
else {
|
|
232
|
-
// Bulk deletion not available, use optimized individual deletion
|
|
233
|
-
MessageFormatter.progress(`Using individual deletion for ${rows.length} rows (bulk deletion not available)`, { prefix: "Wipe" });
|
|
234
|
-
await tryIndividualDeletion(adapter, databaseId, tableId, rows, INDIVIDUAL_DELETE_BATCH_SIZE, MAX_CONCURRENT_OPERATIONS, progress, totalDeleted);
|
|
235
|
-
totalDeleted += rows.length;
|
|
236
|
-
}
|
|
237
|
-
// Set up cursor for next iteration
|
|
238
|
-
if (rows.length < FETCH_BATCH_SIZE) {
|
|
239
|
-
hasMoreRows = false;
|
|
240
|
-
}
|
|
241
|
-
else {
|
|
242
|
-
cursor = rows[rows.length - 1].$id;
|
|
243
|
-
}
|
|
244
|
-
// Small delay between fetch cycles to be respectful to the API
|
|
245
|
-
await delay(10);
|
|
246
218
|
}
|
|
247
219
|
// Update final progress total
|
|
248
220
|
progress.setTotal(totalDeleted);
|
|
@@ -259,70 +231,3 @@ export const wipeTableRows = async (adapter, databaseId, tableId) => {
|
|
|
259
231
|
throw error;
|
|
260
232
|
}
|
|
261
233
|
};
|
|
262
|
-
/**
|
|
263
|
-
* Helper function to attempt bulk deletion of row IDs
|
|
264
|
-
*/
|
|
265
|
-
async function tryBulkDeletion(adapter, databaseId, tableId, rowIds, batchSize, maxConcurrent) {
|
|
266
|
-
if (!adapter.bulkDeleteRows) {
|
|
267
|
-
throw new Error("Bulk deletion not available on this adapter");
|
|
268
|
-
}
|
|
269
|
-
const limit = pLimit(maxConcurrent);
|
|
270
|
-
const batches = chunk(rowIds, batchSize);
|
|
271
|
-
const deletePromises = batches.map((batch) => limit(async () => {
|
|
272
|
-
try {
|
|
273
|
-
await tryAwaitWithRetry(async () => adapter.bulkDeleteRows({ databaseId, tableId, rowIds: batch }));
|
|
274
|
-
}
|
|
275
|
-
catch (error) {
|
|
276
|
-
const errorMessage = error.message || String(error);
|
|
277
|
-
// Enhanced error handling for bulk deletion
|
|
278
|
-
if (isCriticalError(errorMessage)) {
|
|
279
|
-
MessageFormatter.error(`Critical error in bulk deletion batch: ${errorMessage}`, error, { prefix: "Wipe" });
|
|
280
|
-
throw error;
|
|
281
|
-
}
|
|
282
|
-
else {
|
|
283
|
-
// For non-critical errors in bulk deletion, re-throw to trigger fallback
|
|
284
|
-
throw new Error(`Bulk deletion batch failed: ${errorMessage}`);
|
|
285
|
-
}
|
|
286
|
-
}
|
|
287
|
-
}));
|
|
288
|
-
await Promise.all(deletePromises);
|
|
289
|
-
}
|
|
290
|
-
/**
|
|
291
|
-
* Helper function for fallback individual deletion
|
|
292
|
-
*/
|
|
293
|
-
async function tryIndividualDeletion(adapter, databaseId, tableId, rows, batchSize, maxConcurrent, progress, baseDeleted) {
|
|
294
|
-
const limit = pLimit(maxConcurrent);
|
|
295
|
-
const batches = chunk(rows, batchSize);
|
|
296
|
-
let processedInBatch = 0;
|
|
297
|
-
const deletePromises = batches.map((batch) => limit(async () => {
|
|
298
|
-
const batchDeletePromises = batch.map(async (row) => {
|
|
299
|
-
try {
|
|
300
|
-
await tryAwaitWithRetry(async () => adapter.deleteRow({ databaseId, tableId, id: row.$id }));
|
|
301
|
-
}
|
|
302
|
-
catch (error) {
|
|
303
|
-
const errorMessage = error.message || String(error);
|
|
304
|
-
// Enhanced error handling for row deletion
|
|
305
|
-
if (errorMessage.includes("Row with the requested ID could not be found")) {
|
|
306
|
-
// Row already deleted, skip silently
|
|
307
|
-
}
|
|
308
|
-
else if (isCriticalError(errorMessage)) {
|
|
309
|
-
// Critical error, log and rethrow to stop operation
|
|
310
|
-
MessageFormatter.error(`Critical error deleting row ${row.$id}: ${errorMessage}`, error, { prefix: "Wipe" });
|
|
311
|
-
throw error;
|
|
312
|
-
}
|
|
313
|
-
else if (isRetryableError(errorMessage)) {
|
|
314
|
-
// Retryable error, will be handled by tryAwaitWithRetry
|
|
315
|
-
MessageFormatter.progress(`Retryable error for row ${row.$id}, will retry`, { prefix: "Wipe" });
|
|
316
|
-
}
|
|
317
|
-
else {
|
|
318
|
-
// Other non-critical errors, log but continue
|
|
319
|
-
MessageFormatter.error(`Failed to delete row ${row.$id}: ${errorMessage}`, error, { prefix: "Wipe" });
|
|
320
|
-
}
|
|
321
|
-
}
|
|
322
|
-
processedInBatch++;
|
|
323
|
-
progress.update(baseDeleted + processedInBatch);
|
|
324
|
-
});
|
|
325
|
-
await Promise.all(batchDeletePromises);
|
|
326
|
-
}));
|
|
327
|
-
await Promise.all(deletePromises);
|
|
328
|
-
}
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "appwrite-utils-cli",
|
|
3
3
|
"description": "Appwrite Utility Functions to help with database management, data conversion, data import, migrations, and much more. Meant to be used as a CLI tool, I do not recommend installing this in frontend environments.",
|
|
4
|
-
"version": "1.7.
|
|
4
|
+
"version": "1.7.7",
|
|
5
5
|
"main": "src/main.ts",
|
|
6
6
|
"type": "module",
|
|
7
7
|
"repository": {
|
|
@@ -86,7 +86,8 @@ export interface BulkUpsertRowsParams {
|
|
|
86
86
|
export interface BulkDeleteRowsParams {
|
|
87
87
|
databaseId: string;
|
|
88
88
|
tableId: string;
|
|
89
|
-
rowIds: string[];
|
|
89
|
+
rowIds: string[]; // Empty array = wipe mode (use Query.limit), otherwise specific IDs to delete
|
|
90
|
+
batchSize?: number; // Optional batch size for wipe mode (default 250)
|
|
90
91
|
}
|
|
91
92
|
|
|
92
93
|
// Index operation parameters
|
|
@@ -7,6 +7,8 @@
|
|
|
7
7
|
* older Appwrite instances.
|
|
8
8
|
*/
|
|
9
9
|
|
|
10
|
+
import { Query } from "node-appwrite";
|
|
11
|
+
import { chunk } from "es-toolkit";
|
|
10
12
|
import {
|
|
11
13
|
BaseAdapter,
|
|
12
14
|
type CreateRowParams,
|
|
@@ -586,32 +588,73 @@ export class LegacyAdapter extends BaseAdapter {
|
|
|
586
588
|
throw new UnsupportedOperationError('bulkUpsertRows', 'legacy');
|
|
587
589
|
}
|
|
588
590
|
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
591
|
+
async bulkDeleteRows(params: BulkDeleteRowsParams): Promise<ApiResponse> {
|
|
592
|
+
try {
|
|
593
|
+
let queries: string[];
|
|
594
|
+
|
|
595
|
+
// Wipe mode: use Query.limit for deleting without fetching
|
|
596
|
+
if (params.rowIds.length === 0) {
|
|
597
|
+
const batchSize = params.batchSize || 250;
|
|
598
|
+
queries = [Query.limit(batchSize)];
|
|
599
|
+
}
|
|
600
|
+
// Specific IDs mode: chunk into batches of 80-90 to stay within Appwrite limits
|
|
601
|
+
// (max 100 IDs per Query.equal, and queries must be < 4096 chars total)
|
|
602
|
+
else {
|
|
603
|
+
const ID_BATCH_SIZE = 85; // Safe batch size for Query.equal
|
|
604
|
+
const idBatches = chunk(params.rowIds, ID_BATCH_SIZE);
|
|
605
|
+
queries = idBatches.map(batch => Query.equal('$id', batch));
|
|
606
|
+
}
|
|
607
|
+
|
|
608
|
+
const result = await this.databases.deleteDocuments(
|
|
609
|
+
params.databaseId,
|
|
610
|
+
params.tableId, // Maps tableId to collectionId
|
|
611
|
+
queries
|
|
612
|
+
);
|
|
613
|
+
|
|
614
|
+
return {
|
|
615
|
+
data: result,
|
|
616
|
+
total: params.rowIds.length || (result as any).total || 0
|
|
617
|
+
};
|
|
618
|
+
} catch (error) {
|
|
619
|
+
// If deleteDocuments with queries fails, fall back to individual deletes
|
|
620
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
621
|
+
|
|
622
|
+
// Check if the error indicates that deleteDocuments with queries is not supported
|
|
623
|
+
if (errorMessage.includes('not supported') || errorMessage.includes('invalid') || errorMessage.includes('queries')) {
|
|
624
|
+
// Fall back to individual deletions
|
|
625
|
+
const results = [];
|
|
626
|
+
const errors = [];
|
|
627
|
+
|
|
628
|
+
for (const rowId of params.rowIds) {
|
|
629
|
+
try {
|
|
630
|
+
await this.deleteRow({
|
|
631
|
+
databaseId: params.databaseId,
|
|
632
|
+
tableId: params.tableId,
|
|
633
|
+
id: rowId
|
|
634
|
+
});
|
|
635
|
+
results.push({ id: rowId, deleted: true });
|
|
636
|
+
} catch (individualError) {
|
|
637
|
+
errors.push({
|
|
638
|
+
rowId,
|
|
639
|
+
error: individualError instanceof Error ? individualError.message : 'Unknown error'
|
|
640
|
+
});
|
|
641
|
+
}
|
|
642
|
+
}
|
|
643
|
+
|
|
644
|
+
return {
|
|
645
|
+
data: results,
|
|
646
|
+
total: results.length,
|
|
647
|
+
errors: errors.length > 0 ? errors : undefined
|
|
648
|
+
};
|
|
649
|
+
} else {
|
|
650
|
+
// Re-throw the original error if it's not a support issue
|
|
651
|
+
throw new AdapterError(
|
|
652
|
+
`Failed to bulk delete rows (legacy): ${errorMessage}`,
|
|
653
|
+
'BULK_DELETE_ROWS_FAILED',
|
|
654
|
+
error instanceof Error ? error : undefined
|
|
655
|
+
);
|
|
607
656
|
}
|
|
608
657
|
}
|
|
609
|
-
|
|
610
|
-
return {
|
|
611
|
-
data: results,
|
|
612
|
-
total: results.length,
|
|
613
|
-
errors: errors.length > 0 ? errors : undefined
|
|
614
|
-
};
|
|
615
658
|
}
|
|
616
659
|
|
|
617
660
|
// Metadata and Capabilities
|
|
@@ -6,6 +6,8 @@
|
|
|
6
6
|
* and returns Models.Row instead of Models.Document.
|
|
7
7
|
*/
|
|
8
8
|
|
|
9
|
+
import { Query } from "node-appwrite";
|
|
10
|
+
import { chunk } from "es-toolkit";
|
|
9
11
|
import {
|
|
10
12
|
BaseAdapter,
|
|
11
13
|
type DatabaseAdapter,
|
|
@@ -513,12 +515,32 @@ export class TablesDBAdapter extends BaseAdapter {
|
|
|
513
515
|
}
|
|
514
516
|
}
|
|
515
517
|
|
|
516
|
-
|
|
518
|
+
async bulkDeleteRows(params: BulkDeleteRowsParams): Promise<ApiResponse> {
|
|
517
519
|
try {
|
|
518
|
-
|
|
520
|
+
let queries: string[];
|
|
521
|
+
|
|
522
|
+
// Wipe mode: use Query.limit for deleting without fetching
|
|
523
|
+
if (params.rowIds.length === 0) {
|
|
524
|
+
const batchSize = params.batchSize || 250;
|
|
525
|
+
queries = [Query.limit(batchSize)];
|
|
526
|
+
}
|
|
527
|
+
// Specific IDs mode: chunk into batches of 80-90 to stay within Appwrite limits
|
|
528
|
+
// (max 100 IDs per Query.equal, and queries must be < 4096 chars total)
|
|
529
|
+
else {
|
|
530
|
+
const ID_BATCH_SIZE = 85; // Safe batch size for Query.equal
|
|
531
|
+
const idBatches = chunk(params.rowIds, ID_BATCH_SIZE);
|
|
532
|
+
queries = idBatches.map(batch => Query.equal('$id', batch));
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
const result = await this.tablesDB.deleteRows({
|
|
536
|
+
databaseId: params.databaseId,
|
|
537
|
+
tableId: params.tableId,
|
|
538
|
+
queries: queries
|
|
539
|
+
});
|
|
540
|
+
|
|
519
541
|
return {
|
|
520
542
|
data: result,
|
|
521
|
-
total: params.rowIds.length
|
|
543
|
+
total: params.rowIds.length || (result as any).total || 0
|
|
522
544
|
};
|
|
523
545
|
} catch (error) {
|
|
524
546
|
throw new AdapterError(
|
|
@@ -7,7 +7,7 @@ import type { DatabaseAdapter } from "../adapters/DatabaseAdapter.js";
|
|
|
7
7
|
import { tryAwaitWithRetry } from "../utils/helperFunctions.js";
|
|
8
8
|
import { MessageFormatter } from "../shared/messageFormatter.js";
|
|
9
9
|
import { ProgressManager } from "../shared/progressManager.js";
|
|
10
|
-
import { isRetryableError,
|
|
10
|
+
import { isRetryableError, isCriticalError } from "../shared/errorUtils.js";
|
|
11
11
|
import { delay } from "../utils/helperFunctions.js";
|
|
12
12
|
import { chunk } from "es-toolkit";
|
|
13
13
|
import pLimit from "p-limit";
|
|
@@ -239,8 +239,8 @@ export const wipeAllTables = async (
|
|
|
239
239
|
};
|
|
240
240
|
|
|
241
241
|
/**
|
|
242
|
-
* Optimized
|
|
243
|
-
* Uses
|
|
242
|
+
* Optimized deletion of all rows from a table using direct bulk deletion
|
|
243
|
+
* Uses Query.limit() to delete rows without fetching IDs first
|
|
244
244
|
*/
|
|
245
245
|
export const wipeTableRows = async (
|
|
246
246
|
adapter: DatabaseAdapter,
|
|
@@ -248,123 +248,78 @@ export const wipeTableRows = async (
|
|
|
248
248
|
tableId: string
|
|
249
249
|
): Promise<void> => {
|
|
250
250
|
try {
|
|
251
|
-
//
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
251
|
+
// Check if bulk deletion is available
|
|
252
|
+
if (!adapter.bulkDeleteRows) {
|
|
253
|
+
MessageFormatter.error(
|
|
254
|
+
"Bulk deletion not available for this adapter - wipe operation not supported",
|
|
255
|
+
new Error("bulkDeleteRows not available"),
|
|
256
|
+
{ prefix: "Wipe" }
|
|
257
|
+
);
|
|
258
|
+
throw new Error("Bulk deletion required for wipe operations");
|
|
259
|
+
}
|
|
256
260
|
|
|
261
|
+
const DELETE_BATCH_SIZE = 250; // How many rows to delete per batch
|
|
257
262
|
let totalDeleted = 0;
|
|
258
|
-
let cursor: string | undefined;
|
|
259
263
|
let hasMoreRows = true;
|
|
260
264
|
|
|
261
265
|
MessageFormatter.info("Starting optimized table row deletion...", { prefix: "Wipe" });
|
|
262
266
|
|
|
263
|
-
// Create progress tracker (we'll update the total as we discover more rows)
|
|
264
267
|
const progress = ProgressManager.create(
|
|
265
268
|
`delete-${tableId}`,
|
|
266
|
-
1, // Start with 1, will update as we
|
|
269
|
+
1, // Start with 1, will update as we discover more
|
|
267
270
|
{ title: "Deleting table rows" }
|
|
268
271
|
);
|
|
269
272
|
|
|
270
273
|
while (hasMoreRows) {
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
274
|
+
try {
|
|
275
|
+
// Delete next batch using Query.limit() - no fetching needed!
|
|
276
|
+
const result = await tryAwaitWithRetry(async () =>
|
|
277
|
+
adapter.bulkDeleteRows!({
|
|
278
|
+
databaseId,
|
|
279
|
+
tableId,
|
|
280
|
+
rowIds: [], // Empty array signals we want to use Query.limit instead
|
|
281
|
+
batchSize: DELETE_BATCH_SIZE
|
|
282
|
+
})
|
|
283
|
+
);
|
|
279
284
|
|
|
280
|
-
|
|
281
|
-
hasMoreRows = false;
|
|
282
|
-
break;
|
|
283
|
-
}
|
|
285
|
+
const deletedCount = (result as any).total || 0;
|
|
284
286
|
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
}
|
|
287
|
+
if (deletedCount === 0) {
|
|
288
|
+
hasMoreRows = false;
|
|
289
|
+
break;
|
|
290
|
+
}
|
|
290
291
|
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
);
|
|
292
|
+
totalDeleted += deletedCount;
|
|
293
|
+
progress.setTotal(totalDeleted + 100); // Estimate more rows exist
|
|
294
|
+
progress.update(totalDeleted);
|
|
295
295
|
|
|
296
|
-
// Try to use bulk deletion first, fall back to individual deletion
|
|
297
|
-
const rowIds = rows.map((row: any) => row.$id);
|
|
298
|
-
|
|
299
|
-
// Check if bulk deletion is available and try it first
|
|
300
|
-
if (adapter.bulkDeleteRows) {
|
|
301
|
-
try {
|
|
302
|
-
// Attempt bulk deletion (available in TablesDB)
|
|
303
|
-
await tryBulkDeletion(adapter, databaseId, tableId, rowIds, BULK_DELETE_BATCH_SIZE, MAX_CONCURRENT_OPERATIONS);
|
|
304
|
-
totalDeleted += rows.length;
|
|
305
|
-
progress.update(totalDeleted);
|
|
306
|
-
} catch (bulkError) {
|
|
307
|
-
// Enhanced error handling: categorize the error and decide on fallback strategy
|
|
308
|
-
const errorMessage = bulkError instanceof Error ? bulkError.message : String(bulkError);
|
|
309
|
-
|
|
310
|
-
if (isRetryableError(errorMessage)) {
|
|
311
|
-
MessageFormatter.progress(
|
|
312
|
-
`Bulk deletion encountered retryable error, retrying with individual deletion for ${rows.length} rows`,
|
|
313
|
-
{ prefix: "Wipe" }
|
|
314
|
-
);
|
|
315
|
-
} else if (isBulkNotSupportedError(errorMessage)) {
|
|
316
|
-
MessageFormatter.progress(
|
|
317
|
-
`Bulk deletion not supported by server, switching to individual deletion for ${rows.length} rows`,
|
|
318
|
-
{ prefix: "Wipe" }
|
|
319
|
-
);
|
|
320
|
-
} else {
|
|
321
|
-
MessageFormatter.progress(
|
|
322
|
-
`Bulk deletion failed (${errorMessage}), falling back to individual deletion for ${rows.length} rows`,
|
|
323
|
-
{ prefix: "Wipe" }
|
|
324
|
-
);
|
|
325
|
-
}
|
|
326
|
-
|
|
327
|
-
await tryIndividualDeletion(
|
|
328
|
-
adapter,
|
|
329
|
-
databaseId,
|
|
330
|
-
tableId,
|
|
331
|
-
rows,
|
|
332
|
-
INDIVIDUAL_DELETE_BATCH_SIZE,
|
|
333
|
-
MAX_CONCURRENT_OPERATIONS,
|
|
334
|
-
progress,
|
|
335
|
-
totalDeleted
|
|
336
|
-
);
|
|
337
|
-
totalDeleted += rows.length;
|
|
338
|
-
}
|
|
339
|
-
} else {
|
|
340
|
-
// Bulk deletion not available, use optimized individual deletion
|
|
341
296
|
MessageFormatter.progress(
|
|
342
|
-
`
|
|
297
|
+
`Deleted ${deletedCount} rows (${totalDeleted} total so far)`,
|
|
343
298
|
{ prefix: "Wipe" }
|
|
344
299
|
);
|
|
345
300
|
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
databaseId,
|
|
349
|
-
tableId,
|
|
350
|
-
rows,
|
|
351
|
-
INDIVIDUAL_DELETE_BATCH_SIZE,
|
|
352
|
-
MAX_CONCURRENT_OPERATIONS,
|
|
353
|
-
progress,
|
|
354
|
-
totalDeleted
|
|
355
|
-
);
|
|
356
|
-
totalDeleted += rows.length;
|
|
357
|
-
}
|
|
301
|
+
// Small delay between batches to be respectful to the API
|
|
302
|
+
await delay(10);
|
|
358
303
|
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
hasMoreRows = false;
|
|
362
|
-
} else {
|
|
363
|
-
cursor = rows[rows.length - 1].$id;
|
|
364
|
-
}
|
|
304
|
+
} catch (error: any) {
|
|
305
|
+
const errorMessage = error.message || String(error);
|
|
365
306
|
|
|
366
|
-
|
|
367
|
-
|
|
307
|
+
if (isCriticalError(errorMessage)) {
|
|
308
|
+
MessageFormatter.error(
|
|
309
|
+
`Critical error during bulk deletion: ${errorMessage}`,
|
|
310
|
+
error,
|
|
311
|
+
{ prefix: "Wipe" }
|
|
312
|
+
);
|
|
313
|
+
throw error;
|
|
314
|
+
} else {
|
|
315
|
+
MessageFormatter.error(
|
|
316
|
+
`Error during deletion batch: ${errorMessage}`,
|
|
317
|
+
error,
|
|
318
|
+
{ prefix: "Wipe" }
|
|
319
|
+
);
|
|
320
|
+
// Continue trying with next batch
|
|
321
|
+
}
|
|
322
|
+
}
|
|
368
323
|
}
|
|
369
324
|
|
|
370
325
|
// Update final progress total
|
|
@@ -389,113 +344,3 @@ export const wipeTableRows = async (
|
|
|
389
344
|
throw error;
|
|
390
345
|
}
|
|
391
346
|
};
|
|
392
|
-
|
|
393
|
-
/**
|
|
394
|
-
* Helper function to attempt bulk deletion of row IDs
|
|
395
|
-
*/
|
|
396
|
-
async function tryBulkDeletion(
|
|
397
|
-
adapter: DatabaseAdapter,
|
|
398
|
-
databaseId: string,
|
|
399
|
-
tableId: string,
|
|
400
|
-
rowIds: string[],
|
|
401
|
-
batchSize: number,
|
|
402
|
-
maxConcurrent: number
|
|
403
|
-
): Promise<void> {
|
|
404
|
-
if (!adapter.bulkDeleteRows) {
|
|
405
|
-
throw new Error("Bulk deletion not available on this adapter");
|
|
406
|
-
}
|
|
407
|
-
|
|
408
|
-
const limit = pLimit(maxConcurrent);
|
|
409
|
-
const batches = chunk(rowIds, batchSize);
|
|
410
|
-
|
|
411
|
-
const deletePromises = batches.map((batch) =>
|
|
412
|
-
limit(async () => {
|
|
413
|
-
try {
|
|
414
|
-
await tryAwaitWithRetry(async () =>
|
|
415
|
-
adapter.bulkDeleteRows!({ databaseId, tableId, rowIds: batch })
|
|
416
|
-
);
|
|
417
|
-
} catch (error: any) {
|
|
418
|
-
const errorMessage = error.message || String(error);
|
|
419
|
-
|
|
420
|
-
// Enhanced error handling for bulk deletion
|
|
421
|
-
if (isCriticalError(errorMessage)) {
|
|
422
|
-
MessageFormatter.error(
|
|
423
|
-
`Critical error in bulk deletion batch: ${errorMessage}`,
|
|
424
|
-
error,
|
|
425
|
-
{ prefix: "Wipe" }
|
|
426
|
-
);
|
|
427
|
-
throw error;
|
|
428
|
-
} else {
|
|
429
|
-
// For non-critical errors in bulk deletion, re-throw to trigger fallback
|
|
430
|
-
throw new Error(`Bulk deletion batch failed: ${errorMessage}`);
|
|
431
|
-
}
|
|
432
|
-
}
|
|
433
|
-
})
|
|
434
|
-
);
|
|
435
|
-
|
|
436
|
-
await Promise.all(deletePromises);
|
|
437
|
-
}
|
|
438
|
-
|
|
439
|
-
/**
|
|
440
|
-
* Helper function for fallback individual deletion
|
|
441
|
-
*/
|
|
442
|
-
async function tryIndividualDeletion(
|
|
443
|
-
adapter: DatabaseAdapter,
|
|
444
|
-
databaseId: string,
|
|
445
|
-
tableId: string,
|
|
446
|
-
rows: any[],
|
|
447
|
-
batchSize: number,
|
|
448
|
-
maxConcurrent: number,
|
|
449
|
-
progress: any,
|
|
450
|
-
baseDeleted: number
|
|
451
|
-
): Promise<void> {
|
|
452
|
-
const limit = pLimit(maxConcurrent);
|
|
453
|
-
const batches = chunk(rows, batchSize);
|
|
454
|
-
let processedInBatch = 0;
|
|
455
|
-
|
|
456
|
-
const deletePromises = batches.map((batch) =>
|
|
457
|
-
limit(async () => {
|
|
458
|
-
const batchDeletePromises = batch.map(async (row: any) => {
|
|
459
|
-
try {
|
|
460
|
-
await tryAwaitWithRetry(async () =>
|
|
461
|
-
adapter.deleteRow({ databaseId, tableId, id: row.$id })
|
|
462
|
-
);
|
|
463
|
-
} catch (error: any) {
|
|
464
|
-
const errorMessage = error.message || String(error);
|
|
465
|
-
|
|
466
|
-
// Enhanced error handling for row deletion
|
|
467
|
-
if (errorMessage.includes("Row with the requested ID could not be found")) {
|
|
468
|
-
// Row already deleted, skip silently
|
|
469
|
-
} else if (isCriticalError(errorMessage)) {
|
|
470
|
-
// Critical error, log and rethrow to stop operation
|
|
471
|
-
MessageFormatter.error(
|
|
472
|
-
`Critical error deleting row ${row.$id}: ${errorMessage}`,
|
|
473
|
-
error,
|
|
474
|
-
{ prefix: "Wipe" }
|
|
475
|
-
);
|
|
476
|
-
throw error;
|
|
477
|
-
} else if (isRetryableError(errorMessage)) {
|
|
478
|
-
// Retryable error, will be handled by tryAwaitWithRetry
|
|
479
|
-
MessageFormatter.progress(
|
|
480
|
-
`Retryable error for row ${row.$id}, will retry`,
|
|
481
|
-
{ prefix: "Wipe" }
|
|
482
|
-
);
|
|
483
|
-
} else {
|
|
484
|
-
// Other non-critical errors, log but continue
|
|
485
|
-
MessageFormatter.error(
|
|
486
|
-
`Failed to delete row ${row.$id}: ${errorMessage}`,
|
|
487
|
-
error,
|
|
488
|
-
{ prefix: "Wipe" }
|
|
489
|
-
);
|
|
490
|
-
}
|
|
491
|
-
}
|
|
492
|
-
processedInBatch++;
|
|
493
|
-
progress.update(baseDeleted + processedInBatch);
|
|
494
|
-
});
|
|
495
|
-
|
|
496
|
-
await Promise.all(batchDeletePromises);
|
|
497
|
-
})
|
|
498
|
-
);
|
|
499
|
-
|
|
500
|
-
await Promise.all(deletePromises);
|
|
501
|
-
}
|