@joystick.js/db-canary 0.0.0-canary.2250 → 0.0.0-canary.2252
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/client/database.js +1 -1
- package/dist/client/index.js +1 -1
- package/dist/server/cluster/master.js +4 -4
- package/dist/server/cluster/worker.js +1 -1
- package/dist/server/index.js +1 -1
- package/dist/server/lib/auto_index_manager.js +1 -1
- package/dist/server/lib/backup_manager.js +1 -1
- package/dist/server/lib/index_manager.js +1 -1
- package/dist/server/lib/operation_dispatcher.js +1 -1
- package/dist/server/lib/operations/admin.js +1 -1
- package/dist/server/lib/operations/bulk_write.js +1 -1
- package/dist/server/lib/operations/create_index.js +1 -1
- package/dist/server/lib/operations/delete_many.js +1 -1
- package/dist/server/lib/operations/delete_one.js +1 -1
- package/dist/server/lib/operations/find.js +1 -1
- package/dist/server/lib/operations/find_one.js +1 -1
- package/dist/server/lib/operations/insert_one.js +1 -1
- package/dist/server/lib/operations/update_one.js +1 -1
- package/dist/server/lib/send_response.js +1 -1
- package/dist/server/lib/tcp_protocol.js +1 -1
- package/package.json +2 -2
- package/src/client/database.js +92 -119
- package/src/client/index.js +279 -345
- package/src/server/cluster/master.js +265 -156
- package/src/server/cluster/worker.js +26 -18
- package/src/server/index.js +553 -330
- package/src/server/lib/auto_index_manager.js +85 -23
- package/src/server/lib/backup_manager.js +117 -70
- package/src/server/lib/index_manager.js +63 -25
- package/src/server/lib/operation_dispatcher.js +339 -168
- package/src/server/lib/operations/admin.js +343 -205
- package/src/server/lib/operations/bulk_write.js +458 -194
- package/src/server/lib/operations/create_index.js +127 -34
- package/src/server/lib/operations/delete_many.js +204 -67
- package/src/server/lib/operations/delete_one.js +164 -52
- package/src/server/lib/operations/find.js +563 -201
- package/src/server/lib/operations/find_one.js +544 -188
- package/src/server/lib/operations/insert_one.js +147 -52
- package/src/server/lib/operations/update_one.js +334 -93
- package/src/server/lib/send_response.js +37 -17
- package/src/server/lib/tcp_protocol.js +158 -53
- package/tests/server/cluster/master_read_write_operations.test.js +5 -14
- package/tests/server/integration/authentication_integration.test.js +18 -10
- package/tests/server/integration/backup_integration.test.js +35 -27
- package/tests/server/lib/api_key_manager.test.js +88 -32
- package/tests/server/lib/development_mode.test.js +2 -2
- package/tests/server/lib/operations/admin.test.js +20 -12
- package/tests/server/lib/operations/delete_one.test.js +10 -4
- package/tests/server/lib/operations/find_array_queries.test.js +261 -0
|
@@ -334,45 +334,107 @@ const is_auto_created_index = (collection_name, field_name) => {
|
|
|
334
334
|
}
|
|
335
335
|
};
|
|
336
336
|
|
|
337
|
+
/**
|
|
338
|
+
* Checks if collection has reached maximum auto-index limit.
|
|
339
|
+
* @param {string} collection - Collection name
|
|
340
|
+
* @param {Object} config - Auto-index configuration
|
|
341
|
+
* @returns {boolean} True if at maximum limit
|
|
342
|
+
*/
|
|
343
|
+
const has_reached_auto_index_limit = (collection, config) => {
|
|
344
|
+
const existing_indexes = get_indexes('default', collection);
|
|
345
|
+
const auto_index_count = existing_indexes.filter(index =>
|
|
346
|
+
is_auto_created_index(collection, index.field)
|
|
347
|
+
).length;
|
|
348
|
+
|
|
349
|
+
return auto_index_count >= config.max_auto_indexes_per_collection;
|
|
350
|
+
};
|
|
351
|
+
|
|
352
|
+
/**
|
|
353
|
+
* Checks if field is within monitoring window.
|
|
354
|
+
* @param {Object} stats - Field statistics
|
|
355
|
+
* @param {number} window_ms - Monitoring window in milliseconds
|
|
356
|
+
* @returns {boolean} True if within window
|
|
357
|
+
*/
|
|
358
|
+
const is_within_monitoring_window = (stats, window_ms) => {
|
|
359
|
+
const now = new Date();
|
|
360
|
+
const time_since_last_query = now - stats.last_queried;
|
|
361
|
+
return time_since_last_query <= window_ms;
|
|
362
|
+
};
|
|
363
|
+
|
|
364
|
+
/**
|
|
365
|
+
* Checks if field already has an index.
|
|
366
|
+
* @param {string} field - Field name
|
|
367
|
+
* @param {Array} existing_indexes - Array of existing indexes
|
|
368
|
+
* @returns {boolean} True if field has existing index
|
|
369
|
+
*/
|
|
370
|
+
const has_existing_index = (field, existing_indexes) => {
|
|
371
|
+
return existing_indexes.some(index => index.field === field);
|
|
372
|
+
};
|
|
373
|
+
|
|
374
|
+
/**
|
|
375
|
+
* Determines if field meets criteria for auto-indexing.
|
|
376
|
+
* @param {Object} stats - Field statistics
|
|
377
|
+
* @param {Object} config - Auto-index configuration
|
|
378
|
+
* @returns {boolean} True if field meets criteria
|
|
379
|
+
*/
|
|
380
|
+
const meets_auto_index_criteria = (stats, config) => {
|
|
381
|
+
const meets_frequency_threshold = stats.query_count >= config.frequency_threshold;
|
|
382
|
+
const meets_performance_threshold = stats.avg_time_ms >= config.performance_threshold_ms;
|
|
383
|
+
const has_slow_queries = stats.slow_query_count > 0;
|
|
384
|
+
|
|
385
|
+
return meets_frequency_threshold || (meets_performance_threshold && has_slow_queries);
|
|
386
|
+
};
|
|
387
|
+
|
|
388
|
+
/**
|
|
389
|
+
* Calculates priority score for index candidate.
|
|
390
|
+
* @param {Object} stats - Field statistics
|
|
391
|
+
* @param {Object} config - Auto-index configuration
|
|
392
|
+
* @returns {number} Priority score
|
|
393
|
+
*/
|
|
394
|
+
const calculate_candidate_priority = (stats, config) => {
|
|
395
|
+
return stats.slow_query_count * 2 + (stats.query_count / config.frequency_threshold);
|
|
396
|
+
};
|
|
397
|
+
|
|
398
|
+
/**
|
|
399
|
+
* Creates index candidate object.
|
|
400
|
+
* @param {string} collection - Collection name
|
|
401
|
+
* @param {string} field - Field name
|
|
402
|
+
* @param {Object} stats - Field statistics
|
|
403
|
+
* @param {Object} config - Auto-index configuration
|
|
404
|
+
* @returns {Object} Index candidate
|
|
405
|
+
*/
|
|
406
|
+
const create_index_candidate = (collection, field, stats, config) => {
|
|
407
|
+
return {
|
|
408
|
+
collection,
|
|
409
|
+
field,
|
|
410
|
+
stats: { ...stats },
|
|
411
|
+
priority: calculate_candidate_priority(stats, config)
|
|
412
|
+
};
|
|
413
|
+
};
|
|
414
|
+
|
|
337
415
|
const get_auto_index_candidates = () => {
|
|
338
416
|
const config = get_auto_index_config();
|
|
339
417
|
const candidates = [];
|
|
340
|
-
const now = new Date();
|
|
341
418
|
const window_ms = config.monitoring_window_hours * 60 * 60 * 1000;
|
|
342
419
|
|
|
343
420
|
for (const [collection, fields] of query_stats.entries()) {
|
|
344
|
-
|
|
345
|
-
const auto_index_count = existing_indexes.filter(index =>
|
|
346
|
-
is_auto_created_index(collection, index.field)
|
|
347
|
-
).length;
|
|
348
|
-
|
|
349
|
-
if (auto_index_count >= config.max_auto_indexes_per_collection) {
|
|
421
|
+
if (has_reached_auto_index_limit(collection, config)) {
|
|
350
422
|
continue;
|
|
351
423
|
}
|
|
352
424
|
|
|
425
|
+
const existing_indexes = get_indexes('default', collection);
|
|
426
|
+
|
|
353
427
|
for (const [field, stats] of fields.entries()) {
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
if (time_since_last_query > window_ms) {
|
|
428
|
+
if (!is_within_monitoring_window(stats, window_ms)) {
|
|
357
429
|
continue;
|
|
358
430
|
}
|
|
359
431
|
|
|
360
|
-
|
|
361
|
-
if (has_existing_index) {
|
|
432
|
+
if (has_existing_index(field, existing_indexes)) {
|
|
362
433
|
continue;
|
|
363
434
|
}
|
|
364
435
|
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
const has_slow_queries = stats.slow_query_count > 0;
|
|
368
|
-
|
|
369
|
-
if (meets_frequency_threshold || (meets_performance_threshold && has_slow_queries)) {
|
|
370
|
-
candidates.push({
|
|
371
|
-
collection,
|
|
372
|
-
field,
|
|
373
|
-
stats: { ...stats },
|
|
374
|
-
priority: stats.slow_query_count * 2 + (stats.query_count / config.frequency_threshold)
|
|
375
|
-
});
|
|
436
|
+
if (meets_auto_index_criteria(stats, config)) {
|
|
437
|
+
candidates.push(create_index_candidate(collection, field, stats, config));
|
|
376
438
|
}
|
|
377
439
|
}
|
|
378
440
|
}
|
|
@@ -117,6 +117,117 @@ const calculate_sha256 = (file_path) => {
|
|
|
117
117
|
});
|
|
118
118
|
};
|
|
119
119
|
|
|
120
|
+
/**
|
|
121
|
+
* Generates a timestamped backup filename.
|
|
122
|
+
* @returns {string} Backup filename with timestamp
|
|
123
|
+
*/
|
|
124
|
+
const generate_backup_filename = () => {
|
|
125
|
+
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
|
126
|
+
return `joystickdb-backup-${timestamp}.tar.gz`;
|
|
127
|
+
};
|
|
128
|
+
|
|
129
|
+
/**
|
|
130
|
+
* Creates temporary directory for backup operations.
|
|
131
|
+
* @returns {Promise<string>} Path to temporary directory
|
|
132
|
+
*/
|
|
133
|
+
const create_temp_backup_directory = async () => {
|
|
134
|
+
const temp_dir = resolve('./temp_backup');
|
|
135
|
+
await mkdir(temp_dir, { recursive: true });
|
|
136
|
+
return temp_dir;
|
|
137
|
+
};
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Creates compressed tar archive of database data.
|
|
141
|
+
* @param {string} temp_backup_path - Path for the backup file
|
|
142
|
+
* @returns {Promise<void>} Promise that resolves when compression completes
|
|
143
|
+
* @throws {Error} When tar process fails
|
|
144
|
+
*/
|
|
145
|
+
const create_compressed_archive = async (temp_backup_path) => {
|
|
146
|
+
const tar_process = spawn('tar', [
|
|
147
|
+
'--sparse',
|
|
148
|
+
'-czf',
|
|
149
|
+
temp_backup_path,
|
|
150
|
+
'-C',
|
|
151
|
+
'./data',
|
|
152
|
+
'.'
|
|
153
|
+
], {
|
|
154
|
+
stdio: ['pipe', 'pipe', 'pipe']
|
|
155
|
+
});
|
|
156
|
+
|
|
157
|
+
let tar_output = '';
|
|
158
|
+
let tar_error = '';
|
|
159
|
+
|
|
160
|
+
tar_process.stdout.on('data', (data) => {
|
|
161
|
+
tar_output += data.toString();
|
|
162
|
+
});
|
|
163
|
+
|
|
164
|
+
tar_process.stderr.on('data', (data) => {
|
|
165
|
+
tar_error += data.toString();
|
|
166
|
+
});
|
|
167
|
+
|
|
168
|
+
const tar_exit_code = await new Promise((resolve) => {
|
|
169
|
+
tar_process.on('close', resolve);
|
|
170
|
+
});
|
|
171
|
+
|
|
172
|
+
if (tar_exit_code !== 0) {
|
|
173
|
+
throw new Error(`Tar process failed with exit code ${tar_exit_code}: ${tar_error}`);
|
|
174
|
+
}
|
|
175
|
+
};
|
|
176
|
+
|
|
177
|
+
/**
|
|
178
|
+
* Uploads backup file to S3 with metadata.
|
|
179
|
+
* @param {string} temp_backup_path - Path to backup file
|
|
180
|
+
* @param {string} backup_filename - Name of backup file
|
|
181
|
+
* @param {string} checksum - SHA256 checksum
|
|
182
|
+
* @param {number} size_bytes - File size in bytes
|
|
183
|
+
* @param {Object} client - S3 client
|
|
184
|
+
* @param {string} bucket - S3 bucket name
|
|
185
|
+
* @returns {Promise<void>} Promise that resolves when upload completes
|
|
186
|
+
*/
|
|
187
|
+
const upload_backup_to_s3 = async (temp_backup_path, backup_filename, checksum, size_bytes, client, bucket) => {
|
|
188
|
+
const upload_stream = createReadStream(temp_backup_path);
|
|
189
|
+
|
|
190
|
+
const upload_command = new PutObjectCommand({
|
|
191
|
+
Bucket: bucket,
|
|
192
|
+
Key: backup_filename,
|
|
193
|
+
Body: upload_stream,
|
|
194
|
+
Metadata: {
|
|
195
|
+
checksum,
|
|
196
|
+
created_at: new Date().toISOString(),
|
|
197
|
+
size_bytes: size_bytes.toString()
|
|
198
|
+
}
|
|
199
|
+
});
|
|
200
|
+
|
|
201
|
+
await client.send(upload_command);
|
|
202
|
+
};
|
|
203
|
+
|
|
204
|
+
/**
|
|
205
|
+
* Cleans up temporary backup files and directory.
|
|
206
|
+
* @param {string} temp_backup_path - Path to backup file
|
|
207
|
+
* @param {string} temp_dir - Path to temporary directory
|
|
208
|
+
* @returns {Promise<void>} Promise that resolves when cleanup completes
|
|
209
|
+
*/
|
|
210
|
+
const cleanup_temp_backup_files = async (temp_backup_path, temp_dir) => {
|
|
211
|
+
unlinkSync(temp_backup_path);
|
|
212
|
+
await rm(temp_dir, { recursive: true, force: true });
|
|
213
|
+
};
|
|
214
|
+
|
|
215
|
+
/**
|
|
216
|
+
* Handles cleanup on backup failure.
|
|
217
|
+
* @param {Object} log - Logger instance
|
|
218
|
+
* @returns {Promise<void>} Promise that resolves when cleanup completes
|
|
219
|
+
*/
|
|
220
|
+
const handle_backup_failure_cleanup = async (log) => {
|
|
221
|
+
try {
|
|
222
|
+
const temp_dir = resolve('./temp_backup');
|
|
223
|
+
if (existsSync(temp_dir)) {
|
|
224
|
+
await rm(temp_dir, { recursive: true, force: true });
|
|
225
|
+
}
|
|
226
|
+
} catch (cleanup_error) {
|
|
227
|
+
log.warn('Failed to clean up temporary backup files', { error: cleanup_error.message });
|
|
228
|
+
}
|
|
229
|
+
};
|
|
230
|
+
|
|
120
231
|
/**
|
|
121
232
|
* Creates a compressed backup of the database and uploads it to S3.
|
|
122
233
|
* Generates a timestamped tar.gz archive with sparse file support, calculates checksums,
|
|
@@ -135,52 +246,15 @@ const create_backup = async () => {
|
|
|
135
246
|
const backup_start = Date.now();
|
|
136
247
|
|
|
137
248
|
try {
|
|
138
|
-
// NOTE: Ensure S3 is configured.
|
|
139
249
|
const { client, bucket } = get_s3_client();
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
const temp_dir = resolve('./temp_backup');
|
|
143
|
-
await mkdir(temp_dir, { recursive: true });
|
|
144
|
-
|
|
145
|
-
// NOTE: Generate backup filename.
|
|
146
|
-
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
|
147
|
-
const backup_filename = `joystickdb-backup-${timestamp}.tar.gz`;
|
|
250
|
+
const temp_dir = await create_temp_backup_directory();
|
|
251
|
+
const backup_filename = generate_backup_filename();
|
|
148
252
|
const temp_backup_path = join(temp_dir, backup_filename);
|
|
149
253
|
|
|
150
254
|
log.info('Starting backup creation', { backup_filename, temp_backup_path });
|
|
151
255
|
|
|
152
|
-
|
|
153
|
-
const tar_process = spawn('tar', [
|
|
154
|
-
'--sparse',
|
|
155
|
-
'-czf',
|
|
156
|
-
temp_backup_path,
|
|
157
|
-
'-C',
|
|
158
|
-
'./data',
|
|
159
|
-
'.'
|
|
160
|
-
], {
|
|
161
|
-
stdio: ['pipe', 'pipe', 'pipe']
|
|
162
|
-
});
|
|
256
|
+
await create_compressed_archive(temp_backup_path);
|
|
163
257
|
|
|
164
|
-
let tar_output = '';
|
|
165
|
-
let tar_error = '';
|
|
166
|
-
|
|
167
|
-
tar_process.stdout.on('data', (data) => {
|
|
168
|
-
tar_output += data.toString();
|
|
169
|
-
});
|
|
170
|
-
|
|
171
|
-
tar_process.stderr.on('data', (data) => {
|
|
172
|
-
tar_error += data.toString();
|
|
173
|
-
});
|
|
174
|
-
|
|
175
|
-
const tar_exit_code = await new Promise((resolve) => {
|
|
176
|
-
tar_process.on('close', resolve);
|
|
177
|
-
});
|
|
178
|
-
|
|
179
|
-
if (tar_exit_code !== 0) {
|
|
180
|
-
throw new Error(`Tar process failed with exit code ${tar_exit_code}: ${tar_error}`);
|
|
181
|
-
}
|
|
182
|
-
|
|
183
|
-
// NOTE: Calculate checksum.
|
|
184
258
|
const checksum = await calculate_sha256(temp_backup_path);
|
|
185
259
|
const backup_stats = statSync(temp_backup_path);
|
|
186
260
|
|
|
@@ -191,25 +265,8 @@ const create_backup = async () => {
|
|
|
191
265
|
checksum
|
|
192
266
|
});
|
|
193
267
|
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
const upload_command = new PutObjectCommand({
|
|
198
|
-
Bucket: bucket,
|
|
199
|
-
Key: backup_filename,
|
|
200
|
-
Body: upload_stream,
|
|
201
|
-
Metadata: {
|
|
202
|
-
checksum,
|
|
203
|
-
created_at: new Date().toISOString(),
|
|
204
|
-
size_bytes: backup_stats.size.toString()
|
|
205
|
-
}
|
|
206
|
-
});
|
|
207
|
-
|
|
208
|
-
await client.send(upload_command);
|
|
209
|
-
|
|
210
|
-
// NOTE: Clean up temporary file.
|
|
211
|
-
unlinkSync(temp_backup_path);
|
|
212
|
-
await rm(temp_dir, { recursive: true, force: true });
|
|
268
|
+
await upload_backup_to_s3(temp_backup_path, backup_filename, checksum, backup_stats.size, client, bucket);
|
|
269
|
+
await cleanup_temp_backup_files(temp_backup_path, temp_dir);
|
|
213
270
|
|
|
214
271
|
const backup_duration = Date.now() - backup_start;
|
|
215
272
|
|
|
@@ -231,17 +288,7 @@ const create_backup = async () => {
|
|
|
231
288
|
};
|
|
232
289
|
} catch (error) {
|
|
233
290
|
log.error('Backup creation failed', { error: error.message });
|
|
234
|
-
|
|
235
|
-
// NOTE: Clean up on failure.
|
|
236
|
-
try {
|
|
237
|
-
const temp_dir = resolve('./temp_backup');
|
|
238
|
-
if (existsSync(temp_dir)) {
|
|
239
|
-
await rm(temp_dir, { recursive: true, force: true });
|
|
240
|
-
}
|
|
241
|
-
} catch (cleanup_error) {
|
|
242
|
-
log.warn('Failed to clean up temporary backup files', { error: cleanup_error.message });
|
|
243
|
-
}
|
|
244
|
-
|
|
291
|
+
await handle_backup_failure_cleanup(log);
|
|
245
292
|
throw error;
|
|
246
293
|
}
|
|
247
294
|
};
|
|
@@ -124,50 +124,64 @@ const compare_index_definitions = (existing, new_options) => {
|
|
|
124
124
|
};
|
|
125
125
|
|
|
126
126
|
/**
|
|
127
|
-
*
|
|
127
|
+
* Clears existing index entries for a field.
|
|
128
128
|
* @param {string} database_name - Name of the database
|
|
129
129
|
* @param {string} collection_name - Name of the collection
|
|
130
|
-
* @param {string} field_name - Name of the field
|
|
131
|
-
* @param {Object} options - Index options (unique, sparse)
|
|
130
|
+
* @param {string} field_name - Name of the field
|
|
132
131
|
* @param {Object} index_db - Index database instance
|
|
133
|
-
* @param {Object} main_db - Main database instance
|
|
134
|
-
* @throws {Error} When unique constraint violations are found
|
|
135
132
|
*/
|
|
136
|
-
const
|
|
133
|
+
const clear_existing_index_entries = (database_name, collection_name, field_name, index_db) => {
|
|
137
134
|
const index_prefix = `index:${database_name}:${collection_name}:${field_name}:`;
|
|
138
135
|
const range = index_db.getRange({ start: index_prefix, end: index_prefix + '\xFF' });
|
|
139
136
|
|
|
140
137
|
for (const { key } of range) {
|
|
141
138
|
index_db.remove(key);
|
|
142
139
|
}
|
|
143
|
-
|
|
140
|
+
};
|
|
141
|
+
|
|
142
|
+
/**
|
|
143
|
+
* Validates unique constraint for index rebuild.
|
|
144
|
+
* @param {string} database_name - Name of the database
|
|
145
|
+
* @param {string} collection_name - Name of the collection
|
|
146
|
+
* @param {string} field_name - Name of the field
|
|
147
|
+
* @param {Object} main_db - Main database instance
|
|
148
|
+
* @throws {Error} When duplicate values found for unique index
|
|
149
|
+
*/
|
|
150
|
+
const validate_unique_constraint = (database_name, collection_name, field_name, main_db) => {
|
|
144
151
|
const collection_prefix = `${database_name}:${collection_name}:`;
|
|
145
152
|
const document_range = main_db.getRange({ start: collection_prefix, end: collection_prefix + '\xFF' });
|
|
153
|
+
const value_counts = new Map();
|
|
146
154
|
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
const
|
|
155
|
+
for (const { key, value: document_data } of document_range) {
|
|
156
|
+
const document = JSON.parse(document_data);
|
|
157
|
+
const field_value = get_field_value(document, field_name);
|
|
150
158
|
|
|
151
|
-
|
|
152
|
-
const
|
|
153
|
-
const
|
|
159
|
+
if (field_value !== undefined && field_value !== null) {
|
|
160
|
+
const value_str = typeof field_value === 'object' ? JSON.stringify(field_value) : String(field_value);
|
|
161
|
+
const count = value_counts.get(value_str) || 0;
|
|
162
|
+
value_counts.set(value_str, count + 1);
|
|
154
163
|
|
|
155
|
-
if (
|
|
156
|
-
|
|
157
|
-
const count = value_counts.get(value_str) || 0;
|
|
158
|
-
value_counts.set(value_str, count + 1);
|
|
159
|
-
|
|
160
|
-
if (count >= 1) {
|
|
161
|
-
throw new Error(`Duplicate value for unique index on ${database_name}.${collection_name}.${field_name}: ${field_value}`);
|
|
162
|
-
}
|
|
164
|
+
if (count >= 1) {
|
|
165
|
+
throw new Error(`Duplicate value for unique index on ${database_name}.${collection_name}.${field_name}: ${field_value}`);
|
|
163
166
|
}
|
|
164
167
|
}
|
|
165
168
|
}
|
|
169
|
+
};
|
|
170
|
+
|
|
171
|
+
/**
|
|
172
|
+
* Rebuilds index entries for all documents in collection.
|
|
173
|
+
* @param {string} database_name - Name of the database
|
|
174
|
+
* @param {string} collection_name - Name of the collection
|
|
175
|
+
* @param {string} field_name - Name of the field
|
|
176
|
+
* @param {Object} options - Index options
|
|
177
|
+
* @param {Object} index_db - Index database instance
|
|
178
|
+
* @param {Object} main_db - Main database instance
|
|
179
|
+
*/
|
|
180
|
+
const rebuild_index_entries = (database_name, collection_name, field_name, options, index_db, main_db) => {
|
|
181
|
+
const collection_prefix = `${database_name}:${collection_name}:`;
|
|
182
|
+
const document_range = main_db.getRange({ start: collection_prefix, end: collection_prefix + '\xFF' });
|
|
166
183
|
|
|
167
|
-
|
|
168
|
-
const document_range_rebuild = main_db.getRange({ start: collection_prefix, end: collection_prefix + '\xFF' });
|
|
169
|
-
|
|
170
|
-
for (const { key, value: document_data } of document_range_rebuild) {
|
|
184
|
+
for (const { key, value: document_data } of document_range) {
|
|
171
185
|
const document = JSON.parse(document_data);
|
|
172
186
|
const field_value = get_field_value(document, field_name);
|
|
173
187
|
|
|
@@ -188,6 +202,26 @@ const rebuild_index = (database_name, collection_name, field_name, options, inde
|
|
|
188
202
|
}
|
|
189
203
|
};
|
|
190
204
|
|
|
205
|
+
/**
|
|
206
|
+
* Rebuilds an index by clearing existing entries and re-indexing all documents.
|
|
207
|
+
* @param {string} database_name - Name of the database
|
|
208
|
+
* @param {string} collection_name - Name of the collection
|
|
209
|
+
* @param {string} field_name - Name of the field to index
|
|
210
|
+
* @param {Object} options - Index options (unique, sparse)
|
|
211
|
+
* @param {Object} index_db - Index database instance
|
|
212
|
+
* @param {Object} main_db - Main database instance
|
|
213
|
+
* @throws {Error} When unique constraint violations are found
|
|
214
|
+
*/
|
|
215
|
+
const rebuild_index = (database_name, collection_name, field_name, options, index_db, main_db) => {
|
|
216
|
+
clear_existing_index_entries(database_name, collection_name, field_name, index_db);
|
|
217
|
+
|
|
218
|
+
if (options.unique) {
|
|
219
|
+
validate_unique_constraint(database_name, collection_name, field_name, main_db);
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
rebuild_index_entries(database_name, collection_name, field_name, options, index_db, main_db);
|
|
223
|
+
};
|
|
224
|
+
|
|
191
225
|
/**
|
|
192
226
|
* Creates or updates an index on a collection field with support for unique and sparse options.
|
|
193
227
|
* @param {string} database_name - Name of the database
|
|
@@ -434,6 +468,10 @@ const update_indexes_on_insert = async (database_name, collection_name, document
|
|
|
434
468
|
const log = create_context_logger();
|
|
435
469
|
const index_db = get_index_database();
|
|
436
470
|
|
|
471
|
+
if (!document) {
|
|
472
|
+
throw new Error('Document is required for index update');
|
|
473
|
+
}
|
|
474
|
+
|
|
437
475
|
try {
|
|
438
476
|
const indexes = get_indexes(database_name, collection_name);
|
|
439
477
|
|