@memberjunction/metadata-sync 2.67.0 → 2.68.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/README.md +57 -0
  2. package/dist/config.d.ts +4 -0
  3. package/dist/config.js.map +1 -1
  4. package/dist/index.d.ts +2 -0
  5. package/dist/index.js +5 -1
  6. package/dist/index.js.map +1 -1
  7. package/dist/lib/EntityPropertyExtractor.d.ts +60 -0
  8. package/dist/lib/EntityPropertyExtractor.js +166 -0
  9. package/dist/lib/EntityPropertyExtractor.js.map +1 -0
  10. package/dist/lib/FieldExternalizer.d.ts +62 -0
  11. package/dist/lib/FieldExternalizer.js +177 -0
  12. package/dist/lib/FieldExternalizer.js.map +1 -0
  13. package/dist/lib/RecordProcessor.d.ts +82 -0
  14. package/dist/lib/RecordProcessor.js +309 -0
  15. package/dist/lib/RecordProcessor.js.map +1 -0
  16. package/dist/lib/RelatedEntityHandler.d.ts +75 -0
  17. package/dist/lib/RelatedEntityHandler.js +273 -0
  18. package/dist/lib/RelatedEntityHandler.js.map +1 -0
  19. package/dist/lib/file-write-batch.d.ts +61 -0
  20. package/dist/lib/file-write-batch.js +180 -0
  21. package/dist/lib/file-write-batch.js.map +1 -0
  22. package/dist/lib/json-write-helper.d.ts +39 -0
  23. package/dist/lib/json-write-helper.js +105 -0
  24. package/dist/lib/json-write-helper.js.map +1 -0
  25. package/dist/services/FileResetService.js +2 -1
  26. package/dist/services/FileResetService.js.map +1 -1
  27. package/dist/services/PullService.d.ts +22 -2
  28. package/dist/services/PullService.js +268 -173
  29. package/dist/services/PullService.js.map +1 -1
  30. package/dist/services/PushService.js +3 -2
  31. package/dist/services/PushService.js.map +1 -1
  32. package/dist/services/WatchService.js +3 -2
  33. package/dist/services/WatchService.js.map +1 -1
  34. package/package.json +7 -7
@@ -9,14 +9,26 @@ const path_1 = __importDefault(require("path"));
9
9
  const core_1 = require("@memberjunction/core");
10
10
  const config_1 = require("../config");
11
11
  const config_manager_1 = require("../lib/config-manager");
12
+ const file_write_batch_1 = require("../lib/file-write-batch");
13
+ const RecordProcessor_1 = require("../lib/RecordProcessor");
12
14
  class PullService {
13
15
  syncEngine;
14
16
  contextUser;
17
+ createdBackupFiles = [];
18
+ createdBackupDirs = new Set();
19
+ fileWriteBatch;
20
+ recordProcessor;
15
21
  constructor(syncEngine, contextUser) {
16
22
  this.syncEngine = syncEngine;
17
23
  this.contextUser = contextUser;
24
+ this.fileWriteBatch = new file_write_batch_1.FileWriteBatch();
25
+ this.recordProcessor = new RecordProcessor_1.RecordProcessor(syncEngine, contextUser);
18
26
  }
19
27
  async pull(options, callbacks) {
28
+ // Clear any previous batch operations
29
+ this.fileWriteBatch.clear();
30
+ this.createdBackupFiles = [];
31
+ this.createdBackupDirs.clear();
20
32
  let targetDir;
21
33
  let entityConfig;
22
34
  // Check if we should use a specific target directory
@@ -55,7 +67,7 @@ class PullService {
55
67
  }
56
68
  }
57
69
  // Show configuration notice only if relevant and in verbose mode
58
- if (options.verbose && entityConfig.pull?.appendRecordsToExistingFile && entityConfig.pull?.newFileName) {
70
+ if (options.verbose && entityConfig?.pull?.appendRecordsToExistingFile && entityConfig?.pull?.newFileName) {
59
71
  const targetFile = path_1.default.join(targetDir, entityConfig.pull.newFileName.endsWith('.json')
60
72
  ? entityConfig.pull.newFileName
61
73
  : `${entityConfig.pull.newFileName}.json`);
@@ -70,7 +82,7 @@ class PullService {
70
82
  if (options.filter) {
71
83
  filter = options.filter;
72
84
  }
73
- else if (entityConfig.pull?.filter) {
85
+ else if (entityConfig?.pull?.filter) {
74
86
  filter = entityConfig.pull.filter;
75
87
  }
76
88
  const result = await rv.RunView({
@@ -92,49 +104,40 @@ class PullService {
92
104
  targetDir
93
105
  };
94
106
  }
95
- // Check if we need to wait for async property loading
96
- if (entityConfig.pull?.externalizeFields && result.Results.length > 0) {
97
- await this.handleAsyncPropertyLoading(options.entity, entityConfig, options.verbose, callbacks);
98
- }
99
- // Process records
100
- const pullResult = await this.processRecords(result.Results, options, targetDir, entityConfig, callbacks);
101
- return {
102
- ...pullResult,
103
- targetDir
104
- };
105
- }
106
- async handleAsyncPropertyLoading(entityName, entityConfig, verbose, callbacks) {
107
- const metadata = new core_1.Metadata();
108
- const entityInfo = metadata.EntityByName(entityName);
109
- if (!entityInfo)
110
- return;
111
- const externalizeConfig = entityConfig.pull.externalizeFields;
112
- let fieldsToExternalize = [];
113
- if (Array.isArray(externalizeConfig)) {
114
- if (externalizeConfig.length > 0 && typeof externalizeConfig[0] === 'string') {
115
- fieldsToExternalize = externalizeConfig;
107
+ // Process records with error handling and rollback
108
+ let pullResult;
109
+ try {
110
+ pullResult = await this.processRecords(result.Results, options, targetDir, entityConfig, callbacks);
111
+ // Write all batched file changes at once
112
+ if (!options.dryRun) {
113
+ const filesWritten = await this.fileWriteBatch.flush();
114
+ if (options.verbose && filesWritten > 0) {
115
+ callbacks?.onSuccess?.(`Wrote ${filesWritten} files with consistent property ordering`);
116
+ }
116
117
  }
117
- else {
118
- fieldsToExternalize = externalizeConfig
119
- .map(item => item.field);
118
+ // Operation succeeded - clean up backup files
119
+ if (!options.dryRun) {
120
+ await this.cleanupBackupFiles();
120
121
  }
121
122
  }
122
- else {
123
- fieldsToExternalize = Object.keys(externalizeConfig);
124
- }
125
- // Get all field names from entity metadata
126
- const metadataFieldNames = entityInfo.Fields.map(f => f.Name);
127
- // Check if any externalized fields are NOT in metadata (likely computed properties)
128
- const computedFields = fieldsToExternalize.filter(f => !metadataFieldNames.includes(f));
129
- if (computedFields.length > 0) {
130
- if (verbose) {
131
- callbacks?.onProgress?.(`Waiting 5 seconds for async property loading in ${entityName} (${computedFields.join(', ')})...`);
132
- }
133
- await new Promise(resolve => setTimeout(resolve, 5000));
134
- if (verbose) {
135
- callbacks?.onSuccess?.('Async property loading wait complete');
123
+ catch (error) {
124
+ callbacks?.onError?.(`Pull operation failed: ${error.message || error}`);
125
+ // Attempt to rollback file changes if not in dry run mode
126
+ if (!options.dryRun) {
127
+ try {
128
+ await this.rollbackFileChanges(callbacks);
129
+ callbacks?.onWarn?.('File changes have been rolled back due to operation failure');
130
+ }
131
+ catch (rollbackError) {
132
+ callbacks?.onError?.(`Rollback failed: ${rollbackError.message || rollbackError}`);
133
+ }
136
134
  }
135
+ throw error;
137
136
  }
137
+ return {
138
+ ...pullResult,
139
+ targetDir
140
+ };
138
141
  }
139
142
  async processRecords(records, options, targetDir, entityConfig, callbacks) {
140
143
  const entityInfo = this.syncEngine.getEntityInfo(options.entity);
@@ -149,7 +152,9 @@ class PullService {
149
152
  // If multi-file flag is set, collect all records
150
153
  if (options.multiFile) {
151
154
  const allRecords = [];
152
- for (const record of records) {
155
+ const errors = [];
156
+ // Process records in parallel for multi-file mode
157
+ const recordPromises = records.map(async (record, index) => {
153
158
  try {
154
159
  // Build primary key
155
160
  const primaryKey = {};
@@ -157,23 +162,37 @@ class PullService {
157
162
  primaryKey[pk.Name] = record[pk.Name];
158
163
  }
159
164
  // Process record for multi-file
160
- const recordData = await this.processRecordData(record, primaryKey, targetDir, entityConfig, options.verbose, true);
161
- allRecords.push(recordData);
162
- processed++;
163
- if (options.verbose) {
164
- callbacks?.onProgress?.(`Processing records (${processed}/${records.length})`);
165
- }
165
+ const recordData = await this.recordProcessor.processRecord(record, primaryKey, targetDir, entityConfig, options.verbose, true);
166
+ return { success: true, recordData, index };
166
167
  }
167
168
  catch (error) {
168
- callbacks?.onWarn?.(`Failed to process record: ${error.message || error}`);
169
+ const errorMessage = `Failed to process record ${index + 1}: ${error.message || error}`;
170
+ errors.push(errorMessage);
171
+ callbacks?.onWarn?.(errorMessage);
172
+ return { success: false, recordData: null, index };
173
+ }
174
+ });
175
+ const recordResults = await Promise.all(recordPromises);
176
+ // Collect successful records
177
+ for (const result of recordResults) {
178
+ if (result.success && result.recordData) {
179
+ allRecords.push(result.recordData);
180
+ processed++;
169
181
  }
170
182
  }
171
- // Write all records to single file
183
+ if (options.verbose) {
184
+ callbacks?.onProgress?.(`Processed ${processed}/${records.length} records in parallel`);
185
+ }
186
+ // Queue all records to single file for batched write
172
187
  if (allRecords.length > 0) {
173
188
  const fileName = options.multiFile.endsWith('.json') ? options.multiFile : `${options.multiFile}.json`;
174
189
  const filePath = path_1.default.join(targetDir, fileName);
175
- await fs_extra_1.default.writeJson(filePath, allRecords, { spaces: 2 });
176
- callbacks?.onSuccess?.(`Pulled ${processed} records to ${path_1.default.basename(filePath)}`);
190
+ this.fileWriteBatch.queueWrite(filePath, allRecords);
191
+ callbacks?.onSuccess?.(`Queued ${processed} records for ${path_1.default.basename(filePath)}`);
192
+ }
193
+ // If there were errors during parallel processing, throw them
194
+ if (errors.length > 0) {
195
+ throw new Error(`Multi-file processing completed with ${errors.length} errors:\n${errors.join('\n')}`);
177
196
  }
178
197
  }
179
198
  else {
@@ -195,6 +214,107 @@ class PullService {
195
214
  }
196
215
  return { processed, created, updated, skipped };
197
216
  }
217
+ /**
218
+ * Clean up backup files created during the pull operation
219
+ * Should be called after successful pull operations to remove persistent backup files
220
+ */
221
+ async cleanupBackupFiles() {
222
+ if (this.createdBackupFiles.length === 0 && this.createdBackupDirs.size === 0) {
223
+ return;
224
+ }
225
+ const errors = [];
226
+ // Remove backup files
227
+ for (const backupPath of this.createdBackupFiles) {
228
+ try {
229
+ if (await fs_extra_1.default.pathExists(backupPath)) {
230
+ await fs_extra_1.default.remove(backupPath);
231
+ }
232
+ }
233
+ catch (error) {
234
+ errors.push(`Failed to remove backup file ${backupPath}: ${error instanceof Error ? error.message : String(error)}`);
235
+ }
236
+ }
237
+ // Remove empty backup directories
238
+ for (const backupDir of this.createdBackupDirs) {
239
+ try {
240
+ await this.removeEmptyBackupDirectory(backupDir);
241
+ }
242
+ catch (error) {
243
+ errors.push(`Failed to remove backup directory ${backupDir}: ${error instanceof Error ? error.message : String(error)}`);
244
+ }
245
+ }
246
+ // Clear the tracking arrays
247
+ this.createdBackupFiles = [];
248
+ this.createdBackupDirs.clear();
249
+ if (errors.length > 0) {
250
+ throw new Error(`Backup cleanup completed with errors:\n${errors.join('\n')}`);
251
+ }
252
+ }
253
+ /**
254
+ * Remove a backup directory if it's empty
255
+ */
256
+ async removeEmptyBackupDirectory(backupDir) {
257
+ try {
258
+ // Check if directory exists
259
+ if (!(await fs_extra_1.default.pathExists(backupDir))) {
260
+ return;
261
+ }
262
+ // Only remove if it's actually a .backups directory for safety
263
+ if (!backupDir.endsWith('.backups')) {
264
+ return;
265
+ }
266
+ // Check if directory is empty
267
+ const files = await fs_extra_1.default.readdir(backupDir);
268
+ if (files.length === 0) {
269
+ await fs_extra_1.default.remove(backupDir);
270
+ }
271
+ }
272
+ catch (error) {
273
+ // Log error but don't throw - cleanup should be non-critical
274
+ // The error will be caught by the caller and included in the error list
275
+ throw error;
276
+ }
277
+ }
278
+ /**
279
+ * Get the list of backup files created during the current pull operation
280
+ */
281
+ getCreatedBackupFiles() {
282
+ return [...this.createdBackupFiles];
283
+ }
284
+ /**
285
+ * Rollback file changes by restoring from backup files
286
+ * Called when pull operation fails after files have been modified
287
+ */
288
+ async rollbackFileChanges(callbacks) {
289
+ if (this.createdBackupFiles.length === 0) {
290
+ callbacks?.onLog?.('No backup files found - no rollback needed');
291
+ return;
292
+ }
293
+ callbacks?.onProgress?.(`Rolling back ${this.createdBackupFiles.length} file changes...`);
294
+ const errors = [];
295
+ let restoredCount = 0;
296
+ for (const backupPath of this.createdBackupFiles) {
297
+ try {
298
+ // Extract original file path from backup path
299
+ const backupDir = path_1.default.dirname(backupPath);
300
+ const backupFileName = path_1.default.basename(backupPath);
301
+ // Remove timestamp and .backup extension to get original filename
302
+ const originalFileName = backupFileName.replace(/\.\d{4}-\d{2}-\d{2}T\d{2}-\d{2}-\d{2}-\d{3}Z\.backup$/, '.json');
303
+ const originalFilePath = path_1.default.join(path_1.default.dirname(backupDir), originalFileName);
304
+ if (await fs_extra_1.default.pathExists(backupPath)) {
305
+ await fs_extra_1.default.copy(backupPath, originalFilePath);
306
+ restoredCount++;
307
+ }
308
+ }
309
+ catch (error) {
310
+ errors.push(`Failed to restore ${backupPath}: ${error instanceof Error ? error.message : String(error)}`);
311
+ }
312
+ }
313
+ if (errors.length > 0) {
314
+ throw new Error(`Rollback completed with ${errors.length} errors (${restoredCount} files restored):\n${errors.join('\n')}`);
315
+ }
316
+ callbacks?.onSuccess?.(`Successfully rolled back ${restoredCount} file changes`);
317
+ }
198
318
  async processIndividualRecords(records, options, targetDir, entityConfig, entityInfo, callbacks) {
199
319
  let processed = 0;
200
320
  let updated = 0;
@@ -251,160 +371,131 @@ class PullService {
251
371
  }
252
372
  // Track which files have been backed up to avoid duplicates
253
373
  const backedUpFiles = new Set();
254
- // Process existing records updates
255
- for (const { record, primaryKey, filePath } of existingRecordsToUpdate) {
256
- try {
257
- callbacks?.onProgress?.(`Updating existing records (${updated + 1}/${existingRecordsToUpdate.length})`);
258
- // Create backup if configured (only once per file)
259
- if (entityConfig.pull?.backupBeforeUpdate && !backedUpFiles.has(filePath)) {
260
- await this.createBackup(filePath, entityConfig.pull?.backupDirectory);
261
- backedUpFiles.add(filePath);
262
- }
263
- // Load existing file data
264
- const existingData = await fs_extra_1.default.readJson(filePath);
265
- // Find the specific existing record that matches this primary key
266
- let existingRecordData;
267
- if (Array.isArray(existingData)) {
268
- // Find the matching record in the array
269
- const matchingRecord = existingData.find(r => this.createPrimaryKeyLookup(r.primaryKey || {}) === this.createPrimaryKeyLookup(primaryKey));
270
- existingRecordData = matchingRecord || existingData[0]; // Fallback to first if not found
271
- }
272
- else {
273
- existingRecordData = existingData;
274
- }
275
- // Process the new record data (isNewRecord = false for updates)
276
- const newRecordData = await this.processRecordData(record, primaryKey, targetDir, entityConfig, options.verbose, false, existingRecordData);
277
- // Apply merge strategy
278
- const mergedData = await this.mergeRecords(existingRecordData, newRecordData, entityConfig.pull?.mergeStrategy || 'merge', entityConfig.pull?.preserveFields || []);
279
- // Write updated data
280
- if (Array.isArray(existingData)) {
281
- // Update the record in the array
282
- const index = existingData.findIndex(r => this.createPrimaryKeyLookup(r.primaryKey || {}) === this.createPrimaryKeyLookup(primaryKey));
283
- if (index >= 0) {
284
- existingData[index] = mergedData;
285
- await fs_extra_1.default.writeJson(filePath, existingData, { spaces: 2 });
374
+ const errors = [];
375
+ // Process existing records updates in parallel
376
+ if (existingRecordsToUpdate.length > 0) {
377
+ callbacks?.onProgress?.(`Updating existing records (parallel processing)`);
378
+ const updatePromises = existingRecordsToUpdate.map(async ({ record, primaryKey, filePath }, index) => {
379
+ try {
380
+ // Create backup if configured (only once per file)
381
+ if (entityConfig.pull?.backupBeforeUpdate && !backedUpFiles.has(filePath)) {
382
+ await this.createBackup(filePath, entityConfig.pull?.backupDirectory);
383
+ backedUpFiles.add(filePath);
286
384
  }
385
+ // Load existing file data
386
+ const existingData = await fs_extra_1.default.readJson(filePath);
387
+ // Find the specific existing record that matches this primary key
388
+ let existingRecordData;
389
+ if (Array.isArray(existingData)) {
390
+ // Find the matching record in the array
391
+ const matchingRecord = existingData.find(r => this.createPrimaryKeyLookup(r.primaryKey || {}) === this.createPrimaryKeyLookup(primaryKey));
392
+ existingRecordData = matchingRecord || existingData[0]; // Fallback to first if not found
393
+ }
394
+ else {
395
+ existingRecordData = existingData;
396
+ }
397
+ // Process the new record data (isNewRecord = false for updates)
398
+ const newRecordData = await this.recordProcessor.processRecord(record, primaryKey, targetDir, entityConfig, options.verbose, false, existingRecordData);
399
+ // Apply merge strategy
400
+ const mergedData = await this.mergeRecords(existingRecordData, newRecordData, entityConfig.pull?.mergeStrategy || 'merge', entityConfig.pull?.preserveFields || []);
401
+ // Queue updated data for batched write
402
+ if (Array.isArray(existingData)) {
403
+ // Queue array update - batch will handle merging
404
+ const primaryKeyLookup = this.createPrimaryKeyLookup(primaryKey);
405
+ this.fileWriteBatch.queueArrayUpdate(filePath, mergedData, primaryKeyLookup);
406
+ }
407
+ else {
408
+ // Queue single record update
409
+ this.fileWriteBatch.queueSingleUpdate(filePath, mergedData);
410
+ }
411
+ if (options.verbose) {
412
+ callbacks?.onLog?.(`Updated: ${filePath}`);
413
+ }
414
+ return { success: true, index };
287
415
  }
288
- else {
289
- await fs_extra_1.default.writeJson(filePath, mergedData, { spaces: 2 });
290
- }
291
- updated++;
292
- processed++;
293
- if (options.verbose) {
294
- callbacks?.onLog?.(`Updated: ${filePath}`);
416
+ catch (error) {
417
+ const errorMessage = `Failed to update record ${index + 1}: ${error.message || error}`;
418
+ errors.push(errorMessage);
419
+ callbacks?.onWarn?.(errorMessage);
420
+ return { success: false, index };
295
421
  }
296
- }
297
- catch (error) {
298
- callbacks?.onWarn?.(`Failed to update record: ${error.message || error}`);
422
+ });
423
+ const updateResults = await Promise.all(updatePromises);
424
+ updated = updateResults.filter(r => r.success).length;
425
+ processed += updated;
426
+ if (options.verbose) {
427
+ callbacks?.onSuccess?.(`Completed ${updated}/${existingRecordsToUpdate.length} record updates`);
299
428
  }
300
429
  }
301
- // Process new records
430
+ // Process new records in parallel
302
431
  if (newRecords.length > 0) {
303
- callbacks?.onProgress?.(`Creating new records (0/${newRecords.length})`);
432
+ callbacks?.onProgress?.(`Creating new records (parallel processing)`);
304
433
  if (entityConfig.pull?.appendRecordsToExistingFile && entityConfig.pull?.newFileName) {
305
- // Append all new records to a single file
434
+ // Append all new records to a single file using parallel processing
306
435
  const fileName = entityConfig.pull.newFileName.endsWith('.json')
307
436
  ? entityConfig.pull.newFileName
308
437
  : `${entityConfig.pull.newFileName}.json`;
309
438
  const filePath = path_1.default.join(targetDir, fileName);
310
- // Load existing file if it exists
311
- let existingData = [];
312
- if (await fs_extra_1.default.pathExists(filePath)) {
313
- const fileData = await fs_extra_1.default.readJson(filePath);
314
- existingData = Array.isArray(fileData) ? fileData : [fileData];
315
- }
316
- // Process and append all new records
317
- for (const { record, primaryKey } of newRecords) {
439
+ // Process all new records in parallel
440
+ const newRecordPromises = newRecords.map(async ({ record, primaryKey }, index) => {
318
441
  try {
319
442
  // For new records, pass isNewRecord = true (default)
320
- const recordData = await this.processRecordData(record, primaryKey, targetDir, entityConfig, options.verbose, true);
321
- existingData.push(recordData);
322
- created++;
323
- processed++;
324
- if (options.verbose) {
325
- callbacks?.onProgress?.(`Creating new records (${created}/${newRecords.length})`);
326
- }
443
+ const recordData = await this.recordProcessor.processRecord(record, primaryKey, targetDir, entityConfig, options.verbose, true);
444
+ // Use queueArrayUpdate to append the new record without overwriting existing updates
445
+ // For new records, we can use a special lookup key since they don't exist yet
446
+ const newRecordLookup = this.createPrimaryKeyLookup(primaryKey);
447
+ this.fileWriteBatch.queueArrayUpdate(filePath, recordData, newRecordLookup);
448
+ return { success: true, index };
327
449
  }
328
450
  catch (error) {
329
- callbacks?.onWarn?.(`Failed to process new record: ${error.message || error}`);
451
+ const errorMessage = `Failed to process new record ${index + 1}: ${error.message || error}`;
452
+ errors.push(errorMessage);
453
+ callbacks?.onWarn?.(errorMessage);
454
+ return { success: false, index };
330
455
  }
331
- }
332
- // Write the combined data
333
- await fs_extra_1.default.writeJson(filePath, existingData, { spaces: 2 });
456
+ });
457
+ const newRecordResults = await Promise.all(newRecordPromises);
458
+ created = newRecordResults.filter(r => r.success).length;
459
+ processed += created;
334
460
  if (options.verbose) {
335
- callbacks?.onLog?.(`Appended ${created} new records to: ${filePath}`);
461
+ callbacks?.onLog?.(`Queued ${created} new records for: ${filePath}`);
336
462
  }
337
463
  }
338
464
  else {
339
- // Create individual files for each new record
340
- for (const { record, primaryKey } of newRecords) {
465
+ // Create individual files for each new record in parallel
466
+ const individualRecordPromises = newRecords.map(async ({ record, primaryKey }, index) => {
341
467
  try {
342
468
  await this.processRecord(record, primaryKey, targetDir, entityConfig, options.verbose);
343
- created++;
344
- processed++;
345
- if (options.verbose) {
346
- callbacks?.onProgress?.(`Creating new records (${created}/${newRecords.length})`);
347
- }
469
+ return { success: true, index };
348
470
  }
349
471
  catch (error) {
350
- callbacks?.onWarn?.(`Failed to process new record: ${error.message || error}`);
472
+ const errorMessage = `Failed to process new record ${index + 1}: ${error.message || error}`;
473
+ errors.push(errorMessage);
474
+ callbacks?.onWarn?.(errorMessage);
475
+ return { success: false, index };
351
476
  }
477
+ });
478
+ const individualResults = await Promise.all(individualRecordPromises);
479
+ created = individualResults.filter(r => r.success).length;
480
+ processed += created;
481
+ if (options.verbose) {
482
+ callbacks?.onSuccess?.(`Created ${created}/${newRecords.length} individual record files`);
352
483
  }
353
484
  }
354
485
  }
486
+ // If there were errors during parallel processing, throw them
487
+ if (errors.length > 0) {
488
+ throw new Error(`Parallel processing completed with ${errors.length} errors:\n${errors.join('\n')}`);
489
+ }
355
490
  return { processed, updated, created, skipped };
356
491
  }
357
492
  async processRecord(record, primaryKey, targetDir, entityConfig, verbose) {
358
- const recordData = await this.processRecordData(record, primaryKey, targetDir, entityConfig, verbose, true);
493
+ const recordData = await this.recordProcessor.processRecord(record, primaryKey, targetDir, entityConfig, verbose, true);
359
494
  // Determine file path
360
495
  const fileName = this.buildFileName(primaryKey, entityConfig);
361
496
  const filePath = path_1.default.join(targetDir, fileName);
362
- // Write JSON file
363
- await fs_extra_1.default.writeJson(filePath, recordData, { spaces: 2 });
364
- }
365
- async processRecordData(record, primaryKey, targetDir, entityConfig, verbose, isNewRecord = true, existingRecordData, currentDepth = 0, ancestryPath = new Set()) {
366
- // This is a simplified version - the full implementation would need to be extracted
367
- // from the pull command. For now, we'll delegate to a method that would be
368
- // implemented in the full service
369
- // Build record data
370
- const fields = {};
371
- const relatedEntities = {};
372
- // Get the underlying data from the entity object
373
- let dataToProcess = record;
374
- if (typeof record.GetAll === 'function') {
375
- dataToProcess = record.GetAll();
376
- }
377
- // Process fields (simplified - full implementation needed)
378
- for (const [fieldName, fieldValue] of Object.entries(dataToProcess)) {
379
- // Skip primary key fields
380
- if (primaryKey[fieldName] !== undefined) {
381
- continue;
382
- }
383
- // Skip internal fields
384
- if (fieldName.startsWith('__mj_')) {
385
- continue;
386
- }
387
- // Skip excluded fields
388
- if (entityConfig.pull?.excludeFields?.includes(fieldName)) {
389
- continue;
390
- }
391
- fields[fieldName] = fieldValue;
392
- }
393
- // Calculate checksum
394
- const checksum = this.syncEngine.calculateChecksum(fields);
395
- // Build the final record data
396
- const recordData = {
397
- fields,
398
- primaryKey,
399
- sync: {
400
- lastModified: new Date().toISOString(),
401
- checksum: checksum
402
- }
403
- };
404
- if (Object.keys(relatedEntities).length > 0) {
405
- recordData.relatedEntities = relatedEntities;
406
- }
407
- return recordData;
497
+ // Queue JSON file for batched write with controlled property order
498
+ this.fileWriteBatch.queueWrite(filePath, recordData);
408
499
  }
409
500
  async findEntityDirectories(entityName) {
410
501
  const dirs = [];
@@ -428,7 +519,7 @@ class PullService {
428
519
  await searchDirs(config_manager_1.configManager.getOriginalCwd());
429
520
  return dirs;
430
521
  }
431
- buildFileName(primaryKey, entityConfig) {
522
+ buildFileName(primaryKey, _entityConfig) {
432
523
  // Use primary key values to build filename
433
524
  const keys = Object.values(primaryKey);
434
525
  if (keys.length === 1 && typeof keys[0] === 'string') {
@@ -474,7 +565,7 @@ class PullService {
474
565
  }
475
566
  return files;
476
567
  }
477
- async loadExistingRecords(files, entityInfo) {
568
+ async loadExistingRecords(files, _entityInfo) {
478
569
  const recordsMap = new Map();
479
570
  for (const filePath of files) {
480
571
  try {
@@ -548,12 +639,16 @@ class PullService {
548
639
  const backupDir = path_1.default.join(dir, backupDirName || '.backups');
549
640
  // Ensure backup directory exists
550
641
  await fs_extra_1.default.ensureDir(backupDir);
642
+ // Track the backup directory for cleanup
643
+ this.createdBackupDirs.add(backupDir);
551
644
  const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
552
645
  // Remove .json extension, add timestamp, then add .backup extension
553
646
  const backupFileName = fileName.replace(/\.json$/, `.${timestamp}.backup`);
554
647
  const backupPath = path_1.default.join(backupDir, backupFileName);
555
648
  try {
556
649
  await fs_extra_1.default.copy(filePath, backupPath);
650
+ // Track the created backup file for cleanup
651
+ this.createdBackupFiles.push(backupPath);
557
652
  }
558
653
  catch (error) {
559
654
  // Log error but don't throw