appwrite-utils-cli 1.0.8 ā 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +102 -0
- package/dist/collections/attributes.d.ts +8 -0
- package/dist/collections/attributes.js +195 -0
- package/dist/collections/indexes.d.ts +8 -0
- package/dist/collections/indexes.js +150 -0
- package/dist/collections/methods.js +105 -53
- package/dist/interactiveCLI.js +143 -48
- package/dist/migrations/transfer.js +111 -53
- package/package.json +1 -1
- package/src/collections/attributes.ts +339 -0
- package/src/collections/indexes.ts +264 -0
- package/src/collections/methods.ts +175 -87
- package/src/interactiveCLI.ts +146 -48
- package/src/migrations/transfer.ts +228 -121
package/README.md
CHANGED
@@ -327,6 +327,108 @@ This updated CLI ensures that developers have robust tools at their fingertips t
|
|
327
327
|
|
328
328
|
## Changelog
|
329
329
|
|
330
|
+
### 1.1.0 - Enhanced Transfer System with Fault Tolerance
|
331
|
+
|
332
|
+
**š§ Robust Transfer Operations with Status Monitoring**
|
333
|
+
|
334
|
+
#### Enhanced Attribute Creation with Fault Tolerance
|
335
|
+
- **Exponential Backoff**: Intelligent retry strategy starting at 2 seconds, doubling each retry (2s, 4s, 8s, 16s, 30s max)
|
336
|
+
- **Status Monitoring**: Real-time monitoring of attribute states ('available', 'processing', 'stuck', 'failed', 'deleting')
|
337
|
+
- **Retry Logic**: Collection deletion/recreation for stuck attributes with up to 5 retry attempts
|
338
|
+
- **Sequential Processing**: Attributes processed one-by-one to prevent overwhelming the server
|
339
|
+
- **Enhanced Logging**: Comprehensive console feedback with color-coded status messages
|
340
|
+
|
341
|
+
#### Enhanced Index Creation with Status Monitoring
|
342
|
+
- **Similar Fault Tolerance**: Index creation now includes the same robust monitoring and retry logic
|
343
|
+
- **Status Checking**: Real-time monitoring of index creation states with proper error handling
|
344
|
+
- **Collection Recreation**: Automatic collection deletion/recreation for stuck index operations
|
345
|
+
- **Sequential Processing**: Prevents rate limiting by processing indexes individually
|
346
|
+
|
347
|
+
#### Document Transfer Reliability Improvements
|
348
|
+
- **Enhanced Error Handling**: Improved document transfer with exponential backoff retry logic
|
349
|
+
- **Smaller Batch Sizes**: Reduced batch sizes (10 documents) to prevent server overload
|
350
|
+
- **Better Progress Reporting**: Enhanced progress tracking with success/failure counts
|
351
|
+
- **Fault Tolerance**: Graceful handling of duplicate documents and API errors
|
352
|
+
|
353
|
+
#### Remote Database Transfer Enhancements
|
354
|
+
- **Integrated Enhanced Methods**: Updated `transferDatabaseLocalToRemote` to use new attribute and index creation
|
355
|
+
- **Proper Wait Logic**: System now properly waits for attributes/indexes to be fully created before proceeding
|
356
|
+
- **Status Validation**: Comprehensive status checking throughout the transfer process
|
357
|
+
- **Continued Operation**: Transfer continues even if some attributes/indexes fail (with warnings)
|
358
|
+
|
359
|
+
#### AppwriteConfig Integration for Comprehensive Transfer
|
360
|
+
- **Smart Configuration Detection**: Automatically detects existing appwriteConfig for reuse
|
361
|
+
- **Source/Target Options**: Users can select their appwriteConfig for either source or target endpoints
|
362
|
+
- **Streamlined Setup**: Enhanced user experience with clear configuration prompts
|
363
|
+
|
364
|
+
#### Technical Implementation
|
365
|
+
- **Rate Limiting Respect**: Enhanced operations respect existing rate limiting while adding reliability
|
366
|
+
- **Memory Efficiency**: Optimized processing to handle large operations without overwhelming system resources
|
367
|
+
- **Error Resilience**: Comprehensive error handling with detailed user feedback and recovery options
|
368
|
+
- **Status Persistence**: Operations maintain state information for better debugging and monitoring
|
369
|
+
|
370
|
+
#### Usage Benefits
|
371
|
+
- **Reliability**: Transfer operations no longer fail due to timing issues or stuck operations
|
372
|
+
- **Visibility**: Clear progress indicators and status messages throughout all operations
|
373
|
+
- **Recovery**: Automatic retry and recovery mechanisms prevent data loss
|
374
|
+
- **Performance**: Optimized timing prevents API throttling while maintaining speed
|
375
|
+
|
376
|
+
**Breaking Change**: None - fully backward compatible with significantly enhanced reliability.
|
377
|
+
|
378
|
+
### 1.0.9 - Enhanced User Transfer with Password Preservation
|
379
|
+
|
380
|
+
**š Complete Password Hash Preservation During User Transfers**
|
381
|
+
|
382
|
+
#### Password Hash Support
|
383
|
+
- **Universal Hash Support**: Support for all Appwrite password hash types:
|
384
|
+
- **Argon2**: Modern default hashing (preserved)
|
385
|
+
- **Bcrypt**: Industry standard (preserved)
|
386
|
+
- **Scrypt**: Memory-hard function with custom parameters (preserved)
|
387
|
+
- **Scrypt Modified**: Firebase-style with salt/separator/signer (preserved)
|
388
|
+
- **MD5**: Legacy support (preserved)
|
389
|
+
- **SHA variants**: SHA1, SHA256, SHA512 (preserved)
|
390
|
+
- **PHPass**: WordPress-style hashing (preserved)
|
391
|
+
- **Dynamic Hash Detection**: Automatically detects and uses correct hash creation method
|
392
|
+
- **Parameter Preservation**: Maintains hash-specific parameters (salt, iterations, memory cost, etc.)
|
393
|
+
|
394
|
+
#### Enhanced User Transfer Logic
|
395
|
+
- **Smart Password Recreation**: Uses appropriate `create*User` method based on detected hash type
|
396
|
+
- **Fallback Mechanism**: Graceful fallback to temporary passwords if hash recreation fails
|
397
|
+
- **Hash Options Support**: Preserves algorithm-specific configuration from `hashOptions`
|
398
|
+
- **Detailed Logging**: Clear success/failure messages with hash type information
|
399
|
+
|
400
|
+
#### User Experience Improvements
|
401
|
+
- **Accurate Information**: Updated CLI messaging to reflect actual password preservation capabilities
|
402
|
+
- **Clear Expectations**: Distinguishes between users who keep passwords vs. those who need reset
|
403
|
+
- **Success Feedback**: Detailed reporting of password preservation success rate
|
404
|
+
- **Risk Assessment**: Proper warnings only for users who will lose passwords
|
405
|
+
|
406
|
+
#### Technical Implementation
|
407
|
+
- **Hash Type Detection**: `user.hash` field determines creation method
|
408
|
+
- **Configuration Parsing**: `user.hashOptions` provides algorithm parameters
|
409
|
+
- **Error Resilience**: Comprehensive try-catch with fallback to temporary passwords
|
410
|
+
- **Type Safety**: Proper handling of hash option types and parameters
|
411
|
+
|
412
|
+
#### Migration Benefits
|
413
|
+
- **Seamless Login**: Users with preserved hashes can immediately log in with original passwords
|
414
|
+
- **Reduced Support**: Dramatically fewer password reset requests after migration
|
415
|
+
- **Complete Fidelity**: Maintains original security posture and hash strength
|
416
|
+
- **Production Ready**: Safe for live user base migrations
|
417
|
+
|
418
|
+
#### Usage Examples
|
419
|
+
```bash
|
420
|
+
# Users will now preserve passwords during comprehensive transfer
|
421
|
+
npx appwrite-utils-cli@latest appwrite-migrate --it
|
422
|
+
# Select: š Comprehensive transfer (users ā databases ā buckets ā functions)
|
423
|
+
|
424
|
+
# Example output:
|
425
|
+
# ā
User 123 created with preserved argon2 password
|
426
|
+
# ā
User 456 created with preserved bcrypt password
|
427
|
+
# ā ļø User 789 created with temporary password - password reset required
|
428
|
+
```
|
429
|
+
|
430
|
+
**Breaking Change**: None - fully backward compatible with enhanced capabilities.
|
431
|
+
|
330
432
|
### 1.0.8 - Comprehensive Transfer System with Enhanced Rate Limiting
|
331
433
|
|
332
434
|
**š Complete Cross-Instance Transfer Solution**
|
@@ -1,4 +1,12 @@
|
|
1
1
|
import { type Databases, type Models } from "node-appwrite";
|
2
2
|
import { type Attribute } from "appwrite-utils";
|
3
|
+
/**
|
4
|
+
* Enhanced attribute creation with proper status monitoring and retry logic
|
5
|
+
*/
|
6
|
+
export declare const createOrUpdateAttributeWithStatusCheck: (db: Databases, dbId: string, collection: Models.Collection, attribute: Attribute, retryCount?: number, maxRetries?: number) => Promise<boolean>;
|
3
7
|
export declare const createOrUpdateAttribute: (db: Databases, dbId: string, collection: Models.Collection, attribute: Attribute) => Promise<void>;
|
8
|
+
/**
|
9
|
+
* Enhanced collection attribute creation with proper status monitoring
|
10
|
+
*/
|
11
|
+
export declare const createUpdateCollectionAttributesWithStatusCheck: (db: Databases, dbId: string, collection: Models.Collection, attributes: Attribute[]) => Promise<boolean>;
|
4
12
|
export declare const createUpdateCollectionAttributes: (db: Databases, dbId: string, collection: Models.Collection, attributes: Attribute[]) => Promise<void>;
|
@@ -3,6 +3,103 @@ import { attributeSchema, parseAttribute, } from "appwrite-utils";
|
|
3
3
|
import { nameToIdMapping, enqueueOperation } from "../shared/operationQueue.js";
|
4
4
|
import { delay, tryAwaitWithRetry } from "../utils/helperFunctions.js";
|
5
5
|
import chalk from "chalk";
|
6
|
+
/**
|
7
|
+
* Wait for attribute to become available, with retry logic for stuck attributes and exponential backoff
|
8
|
+
*/
|
9
|
+
const waitForAttributeAvailable = async (db, dbId, collectionId, attributeKey, maxWaitTime = 60000, // 1 minute
|
10
|
+
retryCount = 0, maxRetries = 5) => {
|
11
|
+
const startTime = Date.now();
|
12
|
+
let checkInterval = 2000; // Start with 2 seconds
|
13
|
+
// Calculate exponential backoff: 2s, 4s, 8s, 16s, 30s (capped at 30s)
|
14
|
+
if (retryCount > 0) {
|
15
|
+
const exponentialDelay = Math.min(2000 * Math.pow(2, retryCount), 30000);
|
16
|
+
console.log(chalk.blue(`Waiting for attribute '${attributeKey}' to become available (retry ${retryCount}, backoff: ${exponentialDelay}ms)...`));
|
17
|
+
await delay(exponentialDelay);
|
18
|
+
}
|
19
|
+
else {
|
20
|
+
console.log(chalk.blue(`Waiting for attribute '${attributeKey}' to become available...`));
|
21
|
+
}
|
22
|
+
while (Date.now() - startTime < maxWaitTime) {
|
23
|
+
try {
|
24
|
+
const collection = await db.getCollection(dbId, collectionId);
|
25
|
+
const attribute = collection.attributes.find((attr) => attr.key === attributeKey);
|
26
|
+
if (!attribute) {
|
27
|
+
console.log(chalk.red(`Attribute '${attributeKey}' not found`));
|
28
|
+
return false;
|
29
|
+
}
|
30
|
+
console.log(chalk.gray(`Attribute '${attributeKey}' status: ${attribute.status}`));
|
31
|
+
switch (attribute.status) {
|
32
|
+
case 'available':
|
33
|
+
console.log(chalk.green(`ā
Attribute '${attributeKey}' is now available`));
|
34
|
+
return true;
|
35
|
+
case 'failed':
|
36
|
+
console.log(chalk.red(`ā Attribute '${attributeKey}' failed: ${attribute.error}`));
|
37
|
+
return false;
|
38
|
+
case 'stuck':
|
39
|
+
console.log(chalk.yellow(`ā ļø Attribute '${attributeKey}' is stuck, will retry...`));
|
40
|
+
return false;
|
41
|
+
case 'processing':
|
42
|
+
// Continue waiting
|
43
|
+
break;
|
44
|
+
case 'deleting':
|
45
|
+
console.log(chalk.yellow(`Attribute '${attributeKey}' is being deleted`));
|
46
|
+
break;
|
47
|
+
default:
|
48
|
+
console.log(chalk.yellow(`Unknown status '${attribute.status}' for attribute '${attributeKey}'`));
|
49
|
+
break;
|
50
|
+
}
|
51
|
+
await delay(checkInterval);
|
52
|
+
}
|
53
|
+
catch (error) {
|
54
|
+
console.log(chalk.red(`Error checking attribute status: ${error}`));
|
55
|
+
return false;
|
56
|
+
}
|
57
|
+
}
|
58
|
+
// Timeout reached
|
59
|
+
console.log(chalk.yellow(`ā° Timeout waiting for attribute '${attributeKey}' (${maxWaitTime}ms)`));
|
60
|
+
// If we have retries left and this isn't the last retry, try recreating
|
61
|
+
if (retryCount < maxRetries) {
|
62
|
+
console.log(chalk.yellow(`š Retrying attribute creation (attempt ${retryCount + 1}/${maxRetries})`));
|
63
|
+
return false; // Signal that we need to retry
|
64
|
+
}
|
65
|
+
return false;
|
66
|
+
};
|
67
|
+
/**
|
68
|
+
* Wait for all attributes in a collection to become available
|
69
|
+
*/
|
70
|
+
const waitForAllAttributesAvailable = async (db, dbId, collectionId, attributeKeys, maxWaitTime = 60000) => {
|
71
|
+
console.log(chalk.blue(`Waiting for ${attributeKeys.length} attributes to become available...`));
|
72
|
+
const failedAttributes = [];
|
73
|
+
for (const attributeKey of attributeKeys) {
|
74
|
+
const success = await waitForAttributeAvailable(db, dbId, collectionId, attributeKey, maxWaitTime);
|
75
|
+
if (!success) {
|
76
|
+
failedAttributes.push(attributeKey);
|
77
|
+
}
|
78
|
+
}
|
79
|
+
return failedAttributes;
|
80
|
+
};
|
81
|
+
/**
|
82
|
+
* Delete collection and recreate with retry logic
|
83
|
+
*/
|
84
|
+
const deleteAndRecreateCollection = async (db, dbId, collection, retryCount) => {
|
85
|
+
try {
|
86
|
+
console.log(chalk.yellow(`šļø Deleting collection '${collection.name}' for retry ${retryCount}`));
|
87
|
+
// Delete the collection
|
88
|
+
await db.deleteCollection(dbId, collection.$id);
|
89
|
+
console.log(chalk.yellow(`Deleted collection '${collection.name}'`));
|
90
|
+
// Wait a bit before recreating
|
91
|
+
await delay(2000);
|
92
|
+
// Recreate the collection
|
93
|
+
console.log(chalk.blue(`š Recreating collection '${collection.name}'`));
|
94
|
+
const newCollection = await db.createCollection(dbId, collection.$id, collection.name, collection.$permissions, collection.documentSecurity, collection.enabled);
|
95
|
+
console.log(chalk.green(`ā
Recreated collection '${collection.name}'`));
|
96
|
+
return newCollection;
|
97
|
+
}
|
98
|
+
catch (error) {
|
99
|
+
console.log(chalk.red(`Failed to delete/recreate collection '${collection.name}': ${error}`));
|
100
|
+
return null;
|
101
|
+
}
|
102
|
+
};
|
6
103
|
const attributesSame = (databaseAttribute, configAttribute) => {
|
7
104
|
const attributesToCheck = [
|
8
105
|
"key",
|
@@ -53,6 +150,46 @@ const attributesSame = (databaseAttribute, configAttribute) => {
|
|
53
150
|
return false;
|
54
151
|
});
|
55
152
|
};
|
153
|
+
/**
|
154
|
+
* Enhanced attribute creation with proper status monitoring and retry logic
|
155
|
+
*/
|
156
|
+
export const createOrUpdateAttributeWithStatusCheck = async (db, dbId, collection, attribute, retryCount = 0, maxRetries = 5) => {
|
157
|
+
console.log(chalk.blue(`Creating/updating attribute '${attribute.key}' (attempt ${retryCount + 1}/${maxRetries + 1})`));
|
158
|
+
try {
|
159
|
+
// First, try to create/update the attribute using existing logic
|
160
|
+
await createOrUpdateAttribute(db, dbId, collection, attribute);
|
161
|
+
// Now wait for the attribute to become available
|
162
|
+
const success = await waitForAttributeAvailable(db, dbId, collection.$id, attribute.key, 60000, // 1 minute timeout
|
163
|
+
retryCount, maxRetries);
|
164
|
+
if (success) {
|
165
|
+
return true;
|
166
|
+
}
|
167
|
+
// If not successful and we have retries left, delete collection and try again
|
168
|
+
if (retryCount < maxRetries) {
|
169
|
+
console.log(chalk.yellow(`Attribute '${attribute.key}' failed/stuck, retrying...`));
|
170
|
+
// Get fresh collection data
|
171
|
+
const freshCollection = await db.getCollection(dbId, collection.$id);
|
172
|
+
// Delete and recreate collection
|
173
|
+
const newCollection = await deleteAndRecreateCollection(db, dbId, freshCollection, retryCount + 1);
|
174
|
+
if (newCollection) {
|
175
|
+
// Retry with the new collection
|
176
|
+
return await createOrUpdateAttributeWithStatusCheck(db, dbId, newCollection, attribute, retryCount + 1, maxRetries);
|
177
|
+
}
|
178
|
+
}
|
179
|
+
console.log(chalk.red(`ā Failed to create attribute '${attribute.key}' after ${maxRetries + 1} attempts`));
|
180
|
+
return false;
|
181
|
+
}
|
182
|
+
catch (error) {
|
183
|
+
console.log(chalk.red(`Error creating attribute '${attribute.key}': ${error}`));
|
184
|
+
if (retryCount < maxRetries) {
|
185
|
+
console.log(chalk.yellow(`Retrying attribute '${attribute.key}' due to error...`));
|
186
|
+
// Wait a bit before retry
|
187
|
+
await delay(2000);
|
188
|
+
return await createOrUpdateAttributeWithStatusCheck(db, dbId, collection, attribute, retryCount + 1, maxRetries);
|
189
|
+
}
|
190
|
+
return false;
|
191
|
+
}
|
192
|
+
};
|
56
193
|
export const createOrUpdateAttribute = async (db, dbId, collection, attribute) => {
|
57
194
|
let action = "create";
|
58
195
|
let foundAttribute;
|
@@ -273,6 +410,64 @@ export const createOrUpdateAttribute = async (db, dbId, collection, attribute) =
|
|
273
410
|
break;
|
274
411
|
}
|
275
412
|
};
|
413
|
+
/**
|
414
|
+
* Enhanced collection attribute creation with proper status monitoring
|
415
|
+
*/
|
416
|
+
export const createUpdateCollectionAttributesWithStatusCheck = async (db, dbId, collection, attributes) => {
|
417
|
+
console.log(chalk.green(`Creating/Updating attributes for collection: ${collection.name} with status monitoring`));
|
418
|
+
const existingAttributes =
|
419
|
+
// @ts-expect-error
|
420
|
+
collection.attributes.map((attr) => parseAttribute(attr)) || [];
|
421
|
+
const attributesToRemove = existingAttributes.filter((attr) => !attributes.some((a) => a.key === attr.key));
|
422
|
+
const indexesToRemove = collection.indexes.filter((index) => attributesToRemove.some((attr) => index.attributes.includes(attr.key)));
|
423
|
+
// Handle attribute removal first
|
424
|
+
if (attributesToRemove.length > 0) {
|
425
|
+
if (indexesToRemove.length > 0) {
|
426
|
+
console.log(chalk.red(`Removing indexes as they rely on an attribute that is being removed: ${indexesToRemove
|
427
|
+
.map((index) => index.key)
|
428
|
+
.join(", ")}`));
|
429
|
+
for (const index of indexesToRemove) {
|
430
|
+
await tryAwaitWithRetry(async () => await db.deleteIndex(dbId, collection.$id, index.key));
|
431
|
+
await delay(500); // Longer delay for deletions
|
432
|
+
}
|
433
|
+
}
|
434
|
+
for (const attr of attributesToRemove) {
|
435
|
+
console.log(chalk.red(`Removing attribute: ${attr.key} as it is no longer in the collection`));
|
436
|
+
await tryAwaitWithRetry(async () => await db.deleteAttribute(dbId, collection.$id, attr.key));
|
437
|
+
await delay(500); // Longer delay for deletions
|
438
|
+
}
|
439
|
+
}
|
440
|
+
// Create attributes ONE BY ONE with proper status checking
|
441
|
+
console.log(chalk.blue(`Creating ${attributes.length} attributes sequentially with status monitoring...`));
|
442
|
+
let currentCollection = collection;
|
443
|
+
const failedAttributes = [];
|
444
|
+
for (const attribute of attributes) {
|
445
|
+
console.log(chalk.blue(`\n--- Processing attribute: ${attribute.key} ---`));
|
446
|
+
const success = await createOrUpdateAttributeWithStatusCheck(db, dbId, currentCollection, attribute);
|
447
|
+
if (success) {
|
448
|
+
console.log(chalk.green(`ā
Successfully created attribute: ${attribute.key}`));
|
449
|
+
// Get updated collection data for next iteration
|
450
|
+
try {
|
451
|
+
currentCollection = await db.getCollection(dbId, collection.$id);
|
452
|
+
}
|
453
|
+
catch (error) {
|
454
|
+
console.log(chalk.yellow(`Warning: Could not refresh collection data: ${error}`));
|
455
|
+
}
|
456
|
+
// Add delay between successful attributes
|
457
|
+
await delay(1000);
|
458
|
+
}
|
459
|
+
else {
|
460
|
+
console.log(chalk.red(`ā Failed to create attribute: ${attribute.key}`));
|
461
|
+
failedAttributes.push(attribute.key);
|
462
|
+
}
|
463
|
+
}
|
464
|
+
if (failedAttributes.length > 0) {
|
465
|
+
console.log(chalk.red(`\nā Failed to create ${failedAttributes.length} attributes: ${failedAttributes.join(', ')}`));
|
466
|
+
return false;
|
467
|
+
}
|
468
|
+
console.log(chalk.green(`\nā
Successfully created all ${attributes.length} attributes for collection: ${collection.name}`));
|
469
|
+
return true;
|
470
|
+
};
|
276
471
|
export const createUpdateCollectionAttributes = async (db, dbId, collection, attributes) => {
|
277
472
|
console.log(chalk.green(`Creating/Updating attributes for collection: ${collection.name}`));
|
278
473
|
const existingAttributes =
|
@@ -1,4 +1,12 @@
|
|
1
1
|
import { type Index } from "appwrite-utils";
|
2
2
|
import { Databases, type Models } from "node-appwrite";
|
3
|
+
/**
|
4
|
+
* Enhanced index creation with proper status monitoring and retry logic
|
5
|
+
*/
|
6
|
+
export declare const createOrUpdateIndexWithStatusCheck: (dbId: string, db: Databases, collectionId: string, collection: Models.Collection, index: Index, retryCount?: number, maxRetries?: number) => Promise<boolean>;
|
7
|
+
/**
|
8
|
+
* Enhanced index creation with status monitoring for all indexes
|
9
|
+
*/
|
10
|
+
export declare const createOrUpdateIndexesWithStatusCheck: (dbId: string, db: Databases, collectionId: string, collection: Models.Collection, indexes: Index[]) => Promise<boolean>;
|
3
11
|
export declare const createOrUpdateIndex: (dbId: string, db: Databases, collectionId: string, index: Index) => Promise<Models.Index | null>;
|
4
12
|
export declare const createOrUpdateIndexes: (dbId: string, db: Databases, collectionId: string, indexes: Index[]) => Promise<void>;
|
@@ -1,6 +1,156 @@
|
|
1
1
|
import { indexSchema } from "appwrite-utils";
|
2
2
|
import { Databases, IndexType, Query } from "node-appwrite";
|
3
3
|
import { delay, tryAwaitWithRetry } from "../utils/helperFunctions.js";
|
4
|
+
import chalk from "chalk";
|
5
|
+
/**
|
6
|
+
* Wait for index to become available, with retry logic for stuck indexes and exponential backoff
|
7
|
+
*/
|
8
|
+
const waitForIndexAvailable = async (db, dbId, collectionId, indexKey, maxWaitTime = 60000, // 1 minute
|
9
|
+
retryCount = 0, maxRetries = 5) => {
|
10
|
+
const startTime = Date.now();
|
11
|
+
let checkInterval = 2000; // Start with 2 seconds
|
12
|
+
// Calculate exponential backoff: 2s, 4s, 8s, 16s, 30s (capped at 30s)
|
13
|
+
if (retryCount > 0) {
|
14
|
+
const exponentialDelay = Math.min(2000 * Math.pow(2, retryCount), 30000);
|
15
|
+
console.log(chalk.blue(`Waiting for index '${indexKey}' to become available (retry ${retryCount}, backoff: ${exponentialDelay}ms)...`));
|
16
|
+
await delay(exponentialDelay);
|
17
|
+
}
|
18
|
+
else {
|
19
|
+
console.log(chalk.blue(`Waiting for index '${indexKey}' to become available...`));
|
20
|
+
}
|
21
|
+
while (Date.now() - startTime < maxWaitTime) {
|
22
|
+
try {
|
23
|
+
const indexList = await db.listIndexes(dbId, collectionId);
|
24
|
+
const index = indexList.indexes.find((idx) => idx.key === indexKey);
|
25
|
+
if (!index) {
|
26
|
+
console.log(chalk.red(`Index '${indexKey}' not found`));
|
27
|
+
return false;
|
28
|
+
}
|
29
|
+
console.log(chalk.gray(`Index '${indexKey}' status: ${index.status}`));
|
30
|
+
switch (index.status) {
|
31
|
+
case 'available':
|
32
|
+
console.log(chalk.green(`ā
Index '${indexKey}' is now available`));
|
33
|
+
return true;
|
34
|
+
case 'failed':
|
35
|
+
console.log(chalk.red(`ā Index '${indexKey}' failed: ${index.error}`));
|
36
|
+
return false;
|
37
|
+
case 'stuck':
|
38
|
+
console.log(chalk.yellow(`ā ļø Index '${indexKey}' is stuck, will retry...`));
|
39
|
+
return false;
|
40
|
+
case 'processing':
|
41
|
+
// Continue waiting
|
42
|
+
break;
|
43
|
+
case 'deleting':
|
44
|
+
console.log(chalk.yellow(`Index '${indexKey}' is being deleted`));
|
45
|
+
break;
|
46
|
+
default:
|
47
|
+
console.log(chalk.yellow(`Unknown status '${index.status}' for index '${indexKey}'`));
|
48
|
+
break;
|
49
|
+
}
|
50
|
+
await delay(checkInterval);
|
51
|
+
}
|
52
|
+
catch (error) {
|
53
|
+
console.log(chalk.red(`Error checking index status: ${error}`));
|
54
|
+
return false;
|
55
|
+
}
|
56
|
+
}
|
57
|
+
// Timeout reached
|
58
|
+
console.log(chalk.yellow(`ā° Timeout waiting for index '${indexKey}' (${maxWaitTime}ms)`));
|
59
|
+
// If we have retries left and this isn't the last retry, try recreating
|
60
|
+
if (retryCount < maxRetries) {
|
61
|
+
console.log(chalk.yellow(`š Retrying index creation (attempt ${retryCount + 1}/${maxRetries})`));
|
62
|
+
return false; // Signal that we need to retry
|
63
|
+
}
|
64
|
+
return false;
|
65
|
+
};
|
66
|
+
/**
|
67
|
+
* Delete collection and recreate for index retry (reused from attributes.ts)
|
68
|
+
*/
|
69
|
+
const deleteAndRecreateCollectionForIndex = async (db, dbId, collection, retryCount) => {
|
70
|
+
try {
|
71
|
+
console.log(chalk.yellow(`šļø Deleting collection '${collection.name}' for index retry ${retryCount}`));
|
72
|
+
// Delete the collection
|
73
|
+
await db.deleteCollection(dbId, collection.$id);
|
74
|
+
console.log(chalk.yellow(`Deleted collection '${collection.name}'`));
|
75
|
+
// Wait a bit before recreating
|
76
|
+
await delay(2000);
|
77
|
+
// Recreate the collection
|
78
|
+
console.log(chalk.blue(`š Recreating collection '${collection.name}'`));
|
79
|
+
const newCollection = await db.createCollection(dbId, collection.$id, collection.name, collection.$permissions, collection.documentSecurity, collection.enabled);
|
80
|
+
console.log(chalk.green(`ā
Recreated collection '${collection.name}'`));
|
81
|
+
return newCollection;
|
82
|
+
}
|
83
|
+
catch (error) {
|
84
|
+
console.log(chalk.red(`Failed to delete/recreate collection '${collection.name}': ${error}`));
|
85
|
+
return null;
|
86
|
+
}
|
87
|
+
};
|
88
|
+
/**
|
89
|
+
* Enhanced index creation with proper status monitoring and retry logic
|
90
|
+
*/
|
91
|
+
export const createOrUpdateIndexWithStatusCheck = async (dbId, db, collectionId, collection, index, retryCount = 0, maxRetries = 5) => {
|
92
|
+
console.log(chalk.blue(`Creating/updating index '${index.key}' (attempt ${retryCount + 1}/${maxRetries + 1})`));
|
93
|
+
try {
|
94
|
+
// First, try to create/update the index using existing logic
|
95
|
+
await createOrUpdateIndex(dbId, db, collectionId, index);
|
96
|
+
// Now wait for the index to become available
|
97
|
+
const success = await waitForIndexAvailable(db, dbId, collectionId, index.key, 60000, // 1 minute timeout
|
98
|
+
retryCount, maxRetries);
|
99
|
+
if (success) {
|
100
|
+
return true;
|
101
|
+
}
|
102
|
+
// If not successful and we have retries left, delete collection and try again
|
103
|
+
if (retryCount < maxRetries) {
|
104
|
+
console.log(chalk.yellow(`Index '${index.key}' failed/stuck, retrying...`));
|
105
|
+
// Get fresh collection data
|
106
|
+
const freshCollection = await db.getCollection(dbId, collectionId);
|
107
|
+
// Delete and recreate collection
|
108
|
+
const newCollection = await deleteAndRecreateCollectionForIndex(db, dbId, freshCollection, retryCount + 1);
|
109
|
+
if (newCollection) {
|
110
|
+
// Retry with the new collection
|
111
|
+
return await createOrUpdateIndexWithStatusCheck(dbId, db, newCollection.$id, newCollection, index, retryCount + 1, maxRetries);
|
112
|
+
}
|
113
|
+
}
|
114
|
+
console.log(chalk.red(`ā Failed to create index '${index.key}' after ${maxRetries + 1} attempts`));
|
115
|
+
return false;
|
116
|
+
}
|
117
|
+
catch (error) {
|
118
|
+
console.log(chalk.red(`Error creating index '${index.key}': ${error}`));
|
119
|
+
if (retryCount < maxRetries) {
|
120
|
+
console.log(chalk.yellow(`Retrying index '${index.key}' due to error...`));
|
121
|
+
// Wait a bit before retry
|
122
|
+
await delay(2000);
|
123
|
+
return await createOrUpdateIndexWithStatusCheck(dbId, db, collectionId, collection, index, retryCount + 1, maxRetries);
|
124
|
+
}
|
125
|
+
return false;
|
126
|
+
}
|
127
|
+
};
|
128
|
+
/**
|
129
|
+
* Enhanced index creation with status monitoring for all indexes
|
130
|
+
*/
|
131
|
+
export const createOrUpdateIndexesWithStatusCheck = async (dbId, db, collectionId, collection, indexes) => {
|
132
|
+
console.log(chalk.blue(`Creating/updating ${indexes.length} indexes with status monitoring...`));
|
133
|
+
const failedIndexes = [];
|
134
|
+
for (const index of indexes) {
|
135
|
+
console.log(chalk.blue(`\n--- Processing index: ${index.key} ---`));
|
136
|
+
const success = await createOrUpdateIndexWithStatusCheck(dbId, db, collectionId, collection, index);
|
137
|
+
if (success) {
|
138
|
+
console.log(chalk.green(`ā
Successfully created index: ${index.key}`));
|
139
|
+
// Add delay between successful indexes
|
140
|
+
await delay(1000);
|
141
|
+
}
|
142
|
+
else {
|
143
|
+
console.log(chalk.red(`ā Failed to create index: ${index.key}`));
|
144
|
+
failedIndexes.push(index.key);
|
145
|
+
}
|
146
|
+
}
|
147
|
+
if (failedIndexes.length > 0) {
|
148
|
+
console.log(chalk.red(`\nā Failed to create ${failedIndexes.length} indexes: ${failedIndexes.join(', ')}`));
|
149
|
+
return false;
|
150
|
+
}
|
151
|
+
console.log(chalk.green(`\nā
Successfully created all ${indexes.length} indexes`));
|
152
|
+
return true;
|
153
|
+
};
|
4
154
|
export const createOrUpdateIndex = async (dbId, db, collectionId, index) => {
|
5
155
|
const existingIndex = await db.listIndexes(dbId, collectionId, [
|
6
156
|
Query.equal("key", index.key),
|