@memberjunction/metadata-sync 2.121.0 → 2.122.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +112 -0
- package/dist/index.d.ts +3 -1
- package/dist/index.js +4 -1
- package/dist/index.js.map +1 -1
- package/dist/lib/json-preprocessor.js +4 -2
- package/dist/lib/json-preprocessor.js.map +1 -1
- package/dist/lib/sync-engine.d.ts +40 -4
- package/dist/lib/sync-engine.js +70 -16
- package/dist/lib/sync-engine.js.map +1 -1
- package/dist/services/FormattingService.d.ts +1 -0
- package/dist/services/FormattingService.js +3 -0
- package/dist/services/FormattingService.js.map +1 -1
- package/dist/services/PushService.d.ts +17 -0
- package/dist/services/PushService.js +162 -18
- package/dist/services/PushService.js.map +1 -1
- package/package.json +7 -7
|
@@ -23,6 +23,7 @@ export interface PushResult {
|
|
|
23
23
|
unchanged: number;
|
|
24
24
|
deleted: number;
|
|
25
25
|
skipped: number;
|
|
26
|
+
deferred: number;
|
|
26
27
|
errors: number;
|
|
27
28
|
warnings: string[];
|
|
28
29
|
sqlLogPath?: string;
|
|
@@ -33,6 +34,7 @@ export interface EntityPushResult {
|
|
|
33
34
|
unchanged: number;
|
|
34
35
|
deleted: number;
|
|
35
36
|
skipped: number;
|
|
37
|
+
deferred: number;
|
|
36
38
|
errors: number;
|
|
37
39
|
}
|
|
38
40
|
export declare class PushService {
|
|
@@ -41,6 +43,7 @@ export declare class PushService {
|
|
|
41
43
|
private warnings;
|
|
42
44
|
private syncConfig;
|
|
43
45
|
private deferredFileWrites;
|
|
46
|
+
private deferredRecords;
|
|
44
47
|
constructor(syncEngine: SyncEngine, contextUser: UserInfo);
|
|
45
48
|
push(options: PushOptions, callbacks?: PushCallbacks): Promise<PushResult>;
|
|
46
49
|
private processEntityDirectory;
|
|
@@ -65,6 +68,20 @@ export declare class PushService {
|
|
|
65
68
|
* Recursively check if any records in an array (including nested relatedEntities) have deletions
|
|
66
69
|
*/
|
|
67
70
|
private hasAnyDeletions;
|
|
71
|
+
/**
|
|
72
|
+
* Process deferred records that had lookup failures during initial processing.
|
|
73
|
+
* Called in Phase 2.5 after all records are created/updated but before commit.
|
|
74
|
+
* This handles circular dependencies where records reference each other.
|
|
75
|
+
*
|
|
76
|
+
* Re-runs processFlattenedRecord with allowDefer=false, which processes the
|
|
77
|
+
* entire record exactly as in the initial pass. Now that all records exist,
|
|
78
|
+
* the lookups should succeed.
|
|
79
|
+
*
|
|
80
|
+
* @param options - Push options
|
|
81
|
+
* @param callbacks - Callbacks for progress/error reporting
|
|
82
|
+
* @returns Object with created, updated, and errors counts
|
|
83
|
+
*/
|
|
84
|
+
private processDeferredRecords;
|
|
68
85
|
/**
|
|
69
86
|
* Write all deferred files with updated deletion timestamps
|
|
70
87
|
* Called in Phase 3 after all deletions complete successfully
|
|
@@ -8,6 +8,7 @@ const fs_extra_1 = __importDefault(require("fs-extra"));
|
|
|
8
8
|
const path_1 = __importDefault(require("path"));
|
|
9
9
|
const fast_glob_1 = __importDefault(require("fast-glob"));
|
|
10
10
|
const core_1 = require("@memberjunction/core");
|
|
11
|
+
const sync_engine_1 = require("../lib/sync-engine");
|
|
11
12
|
const config_1 = require("../config");
|
|
12
13
|
const file_backup_manager_1 = require("../lib/file-backup-manager");
|
|
13
14
|
const config_manager_1 = require("../lib/config-manager");
|
|
@@ -27,6 +28,7 @@ class PushService {
|
|
|
27
28
|
warnings = [];
|
|
28
29
|
syncConfig;
|
|
29
30
|
deferredFileWrites = new Map();
|
|
31
|
+
deferredRecords = [];
|
|
30
32
|
constructor(syncEngine, contextUser) {
|
|
31
33
|
this.syncEngine = syncEngine;
|
|
32
34
|
this.contextUser = contextUser;
|
|
@@ -37,7 +39,9 @@ class PushService {
|
|
|
37
39
|
if (options.include && options.exclude) {
|
|
38
40
|
throw new Error('Cannot specify both --include and --exclude options. Please use one or the other.');
|
|
39
41
|
}
|
|
40
|
-
|
|
42
|
+
// Reset deferred tracking for this push operation
|
|
43
|
+
this.deferredFileWrites.clear();
|
|
44
|
+
this.deferredRecords = [];
|
|
41
45
|
const fileBackupManager = new file_backup_manager_1.FileBackupManager();
|
|
42
46
|
// Load sync config for SQL logging settings and autoCreateMissingRecords flag
|
|
43
47
|
// If dir option is specified, load from that directory, otherwise use original CWD
|
|
@@ -126,6 +130,7 @@ class PushService {
|
|
|
126
130
|
let totalUnchanged = 0;
|
|
127
131
|
let totalDeleted = 0;
|
|
128
132
|
let totalSkipped = 0;
|
|
133
|
+
let totalDeferred = 0;
|
|
129
134
|
let totalErrors = 0;
|
|
130
135
|
// PHASE 0: Audit all deletions across all entities (if any exist)
|
|
131
136
|
let deletionAudit = null;
|
|
@@ -164,6 +169,7 @@ class PushService {
|
|
|
164
169
|
unchanged: 0,
|
|
165
170
|
deleted: 0,
|
|
166
171
|
skipped: 0,
|
|
172
|
+
deferred: 0,
|
|
167
173
|
errors: 0,
|
|
168
174
|
warnings: this.warnings
|
|
169
175
|
};
|
|
@@ -216,6 +222,9 @@ class PushService {
|
|
|
216
222
|
if (result.deleted > 0) {
|
|
217
223
|
callbacks?.onLog?.(` ✓ Deleted: ${result.deleted}`);
|
|
218
224
|
}
|
|
225
|
+
if (result.deferred > 0) {
|
|
226
|
+
callbacks?.onLog?.(` ⏳ Deferred: ${result.deferred}`);
|
|
227
|
+
}
|
|
219
228
|
if (result.unchanged > 0) {
|
|
220
229
|
callbacks?.onLog?.(` - Unchanged: ${result.unchanged}`);
|
|
221
230
|
}
|
|
@@ -231,6 +240,7 @@ class PushService {
|
|
|
231
240
|
totalUnchanged += result.unchanged;
|
|
232
241
|
totalDeleted += result.deleted;
|
|
233
242
|
totalSkipped += result.skipped;
|
|
243
|
+
totalDeferred += result.deferred;
|
|
234
244
|
totalErrors += result.errors;
|
|
235
245
|
}
|
|
236
246
|
// PHASE 2: Process deletions in reverse dependency order (if any exist)
|
|
@@ -239,6 +249,13 @@ class PushService {
|
|
|
239
249
|
totalDeleted += deletionResult.deleted;
|
|
240
250
|
totalErrors += deletionResult.errors;
|
|
241
251
|
}
|
|
252
|
+
// PHASE 2.5: Process deferred records (for circular dependencies)
|
|
253
|
+
if (this.deferredRecords.length > 0 && totalErrors === 0) {
|
|
254
|
+
const deferredResult = await this.processDeferredRecords(options, callbacks);
|
|
255
|
+
totalCreated += deferredResult.created;
|
|
256
|
+
totalUpdated += deferredResult.updated;
|
|
257
|
+
totalErrors += deferredResult.errors;
|
|
258
|
+
}
|
|
242
259
|
// Commit transaction if successful
|
|
243
260
|
if (!options.dryRun && totalErrors === 0) {
|
|
244
261
|
await transactionManager.commitTransaction();
|
|
@@ -279,6 +296,7 @@ class PushService {
|
|
|
279
296
|
unchanged: totalUnchanged,
|
|
280
297
|
deleted: totalDeleted,
|
|
281
298
|
skipped: totalSkipped,
|
|
299
|
+
deferred: totalDeferred,
|
|
282
300
|
errors: totalErrors,
|
|
283
301
|
warnings: this.warnings,
|
|
284
302
|
sqlLogPath
|
|
@@ -313,6 +331,7 @@ class PushService {
|
|
|
313
331
|
let unchanged = 0;
|
|
314
332
|
let deleted = 0;
|
|
315
333
|
let skipped = 0;
|
|
334
|
+
let deferred = 0;
|
|
316
335
|
let errors = 0;
|
|
317
336
|
// Find all JSON files in the directory
|
|
318
337
|
const pattern = entityConfig.filePattern || '*.json';
|
|
@@ -335,6 +354,9 @@ class PushService {
|
|
|
335
354
|
}
|
|
336
355
|
// Read the raw file data first
|
|
337
356
|
const rawFileData = await fs_extra_1.default.readJson(filePath);
|
|
357
|
+
// Keep unprocessed data to write back (preserves @file: references)
|
|
358
|
+
const unprocessedRecords = Array.isArray(rawFileData) ? rawFileData : [rawFileData];
|
|
359
|
+
const isArray = Array.isArray(rawFileData);
|
|
338
360
|
// Only preprocess if there are @include directives
|
|
339
361
|
let fileData = rawFileData;
|
|
340
362
|
const jsonString = JSON.stringify(rawFileData);
|
|
@@ -346,7 +368,6 @@ class PushService {
|
|
|
346
368
|
fileData = await jsonPreprocessor.processFile(filePath);
|
|
347
369
|
}
|
|
348
370
|
const records = Array.isArray(fileData) ? fileData : [fileData];
|
|
349
|
-
const isArray = Array.isArray(fileData);
|
|
350
371
|
// Analyze dependencies and get sorted records
|
|
351
372
|
const analyzer = new record_dependency_analyzer_1.RecordDependencyAnalyzer();
|
|
352
373
|
const analysisResult = await analyzer.analyzeFileRecords(records, entityConfig.entity);
|
|
@@ -421,6 +442,10 @@ class PushService {
|
|
|
421
442
|
skipped++;
|
|
422
443
|
else if (result.status === 'error')
|
|
423
444
|
errors++;
|
|
445
|
+
else if (result.status === 'deferred') {
|
|
446
|
+
created++; // Deferred records were saved (count as created)
|
|
447
|
+
deferred++; // Also track separately for reporting
|
|
448
|
+
}
|
|
424
449
|
}
|
|
425
450
|
}
|
|
426
451
|
}
|
|
@@ -450,6 +475,10 @@ class PushService {
|
|
|
450
475
|
skipped++;
|
|
451
476
|
else if (result.status === 'error')
|
|
452
477
|
errors++;
|
|
478
|
+
else if (result.status === 'deferred') {
|
|
479
|
+
created++; // Deferred records were saved (count as created)
|
|
480
|
+
deferred++; // Also track separately for reporting
|
|
481
|
+
}
|
|
453
482
|
}
|
|
454
483
|
}
|
|
455
484
|
}
|
|
@@ -463,23 +492,24 @@ class PushService {
|
|
|
463
492
|
// Check if this file has any deletion records (including nested relatedEntities)
|
|
464
493
|
const hasDeletions = this.hasAnyDeletions(records);
|
|
465
494
|
// Write back to file (handles both single records and arrays)
|
|
495
|
+
// Use unprocessedRecords to preserve @file: references
|
|
466
496
|
// Defer writing if file contains deletions - they'll be written after Phase 2
|
|
467
497
|
if (!options.dryRun) {
|
|
468
498
|
if (hasDeletions) {
|
|
469
499
|
// Store for later writing after deletions complete
|
|
470
500
|
this.deferredFileWrites.set(filePath, {
|
|
471
501
|
filePath,
|
|
472
|
-
records,
|
|
502
|
+
records: unprocessedRecords,
|
|
473
503
|
isArray
|
|
474
504
|
});
|
|
475
505
|
}
|
|
476
506
|
else {
|
|
477
507
|
// Write immediately for files without deletions
|
|
478
508
|
if (isArray) {
|
|
479
|
-
await json_write_helper_1.JsonWriteHelper.writeOrderedRecordData(filePath,
|
|
509
|
+
await json_write_helper_1.JsonWriteHelper.writeOrderedRecordData(filePath, unprocessedRecords);
|
|
480
510
|
}
|
|
481
511
|
else {
|
|
482
|
-
await json_write_helper_1.JsonWriteHelper.writeOrderedRecordData(filePath,
|
|
512
|
+
await json_write_helper_1.JsonWriteHelper.writeOrderedRecordData(filePath, unprocessedRecords[0]);
|
|
483
513
|
}
|
|
484
514
|
}
|
|
485
515
|
}
|
|
@@ -489,9 +519,9 @@ class PushService {
|
|
|
489
519
|
throw fileError;
|
|
490
520
|
}
|
|
491
521
|
}
|
|
492
|
-
return { created, updated, unchanged, deleted, skipped, errors };
|
|
522
|
+
return { created, updated, unchanged, deleted, skipped, deferred, errors };
|
|
493
523
|
}
|
|
494
|
-
async processFlattenedRecord(flattenedRecord, entityDir, options, batchContext, callbacks, entityConfig) {
|
|
524
|
+
async processFlattenedRecord(flattenedRecord, entityDir, options, batchContext, callbacks, entityConfig, allowDefer = true) {
|
|
495
525
|
const metadata = new core_1.Metadata();
|
|
496
526
|
const { record, entityName, parentContext, id: recordId } = flattenedRecord;
|
|
497
527
|
// Skip deletion records - they're handled in Phase 2
|
|
@@ -585,6 +615,8 @@ class PushService {
|
|
|
585
615
|
}
|
|
586
616
|
// Process field values with parent context and batch context
|
|
587
617
|
// Process each field with better error reporting
|
|
618
|
+
// Track if we hit any deferrable lookup errors
|
|
619
|
+
let hasDeferrableLookupError = false;
|
|
588
620
|
for (const [fieldName, fieldValue] of Object.entries(record.fields)) {
|
|
589
621
|
try {
|
|
590
622
|
const processedValue = await this.syncEngine.processFieldValue(fieldValue, entityDir, parentEntity, null, // rootRecord
|
|
@@ -593,7 +625,25 @@ class PushService {
|
|
|
593
625
|
entity.Set(fieldName, processedValue);
|
|
594
626
|
}
|
|
595
627
|
catch (fieldError) {
|
|
596
|
-
//
|
|
628
|
+
// Check if this is a deferrable lookup error first
|
|
629
|
+
if (fieldError instanceof sync_engine_1.DeferrableLookupError) {
|
|
630
|
+
// If allowDefer is false, we're in deferred processing mode - can't defer again
|
|
631
|
+
if (!allowDefer) {
|
|
632
|
+
const err = fieldError;
|
|
633
|
+
throw new Error(`Deferred lookup still failed: ${err.message}`);
|
|
634
|
+
}
|
|
635
|
+
// Mark that we need to defer this entire record
|
|
636
|
+
hasDeferrableLookupError = true;
|
|
637
|
+
// Log that we're deferring this lookup
|
|
638
|
+
if (options.verbose) {
|
|
639
|
+
callbacks?.onLog?.(` ⏳ Deferring lookup for ${entityName}.${fieldName} -> ${fieldError.entityName}`);
|
|
640
|
+
}
|
|
641
|
+
// Don't set this field - continue to try other fields
|
|
642
|
+
// We'll re-process the entire record later
|
|
643
|
+
continue;
|
|
644
|
+
}
|
|
645
|
+
// For other errors, use enhanced error reporting
|
|
646
|
+
const err = fieldError;
|
|
597
647
|
const primaryKeyInfo = record.primaryKey ? JSON.stringify(record.primaryKey) : 'NEW';
|
|
598
648
|
// Helper to log to both console and callbacks
|
|
599
649
|
const logError = (msg) => {
|
|
@@ -601,44 +651,47 @@ class PushService {
|
|
|
601
651
|
callbacks?.onLog?.(msg);
|
|
602
652
|
};
|
|
603
653
|
// Check if this is a lookup failure
|
|
604
|
-
if (
|
|
654
|
+
if (err.message?.includes('Lookup failed:')) {
|
|
605
655
|
logError(`\n❌ LOOKUP FAILURE in ${entityName} (${primaryKeyInfo})`);
|
|
606
656
|
logError(` Field: ${fieldName}`);
|
|
607
657
|
logError(` Value: ${fieldValue}`);
|
|
608
|
-
logError(` Error: ${
|
|
658
|
+
logError(` Error: ${err.message}`);
|
|
609
659
|
logError(` Tip: Check if the referenced record exists in the target entity\n`);
|
|
610
660
|
}
|
|
611
|
-
else if (
|
|
661
|
+
else if (err.message?.includes('Entity not found:')) {
|
|
612
662
|
logError(`\n❌ ENTITY NOT FOUND in ${entityName} (${primaryKeyInfo})`);
|
|
613
663
|
logError(` Field: ${fieldName}`);
|
|
614
664
|
logError(` Value: ${fieldValue}`);
|
|
615
|
-
logError(` Error: ${
|
|
665
|
+
logError(` Error: ${err.message}`);
|
|
616
666
|
logError(` Tip: Check if the entity name is spelled correctly\n`);
|
|
617
667
|
}
|
|
618
|
-
else if (
|
|
668
|
+
else if (err.message?.includes('Field') && err.message?.includes('not found')) {
|
|
619
669
|
logError(`\n❌ FIELD NOT FOUND in ${entityName} (${primaryKeyInfo})`);
|
|
620
670
|
logError(` Field: ${fieldName}`);
|
|
621
671
|
logError(` Value: ${fieldValue}`);
|
|
622
|
-
logError(` Error: ${
|
|
672
|
+
logError(` Error: ${err.message}`);
|
|
623
673
|
logError(` Tip: Check if the field name exists in the target entity\n`);
|
|
624
674
|
}
|
|
625
|
-
else if (
|
|
675
|
+
else if (err.message?.includes('File not found:')) {
|
|
626
676
|
logError(`\n❌ FILE NOT FOUND in ${entityName} (${primaryKeyInfo})`);
|
|
627
677
|
logError(` Field: ${fieldName}`);
|
|
628
678
|
logError(` Value: ${fieldValue}`);
|
|
629
|
-
logError(` Error: ${
|
|
679
|
+
logError(` Error: ${err.message}`);
|
|
630
680
|
logError(` Tip: Check if the file path is correct relative to ${entityDir}\n`);
|
|
631
681
|
}
|
|
632
682
|
else {
|
|
633
683
|
logError(`\n❌ FIELD PROCESSING ERROR in ${entityName} (${primaryKeyInfo})`);
|
|
634
684
|
logError(` Field: ${fieldName}`);
|
|
635
685
|
logError(` Value: ${fieldValue}`);
|
|
636
|
-
logError(` Error: ${
|
|
686
|
+
logError(` Error: ${err.message}\n`);
|
|
637
687
|
}
|
|
638
688
|
// Re-throw with enhanced context
|
|
639
|
-
throw new Error(`Failed to process field '${fieldName}' in ${entityName}: ${
|
|
689
|
+
throw new Error(`Failed to process field '${fieldName}' in ${entityName}: ${err.message}`);
|
|
640
690
|
}
|
|
641
691
|
}
|
|
692
|
+
// Note: If we had deferred fields, we still continue to save the record
|
|
693
|
+
// The deferred fields are not set, but other fields are. We'll queue for
|
|
694
|
+
// re-processing after save succeeds.
|
|
642
695
|
// Check if the record is actually dirty before considering it changed
|
|
643
696
|
let isDirty = entity.Dirty;
|
|
644
697
|
// Force dirty state if alwaysPush is enabled
|
|
@@ -826,6 +879,21 @@ class PushService {
|
|
|
826
879
|
// Add to batch context AFTER save so it has an ID for child @parent:ID references
|
|
827
880
|
// Use the recordId (lookupKey) as the key so child records can find this parent
|
|
828
881
|
batchContext.set(lookupKey, entity);
|
|
882
|
+
// If we had deferred lookup errors, queue the entire record for re-processing
|
|
883
|
+
// The record has been saved (without the deferred fields), so it exists in the DB.
|
|
884
|
+
// In Phase 2.5, we'll re-run processFlattenedRecord with allowDefer=false to fill in the gaps.
|
|
885
|
+
if (hasDeferrableLookupError && allowDefer && entityConfig) {
|
|
886
|
+
this.deferredRecords.push({
|
|
887
|
+
flattenedRecord,
|
|
888
|
+
entityDir,
|
|
889
|
+
entityConfig
|
|
890
|
+
});
|
|
891
|
+
if (options.verbose) {
|
|
892
|
+
callbacks?.onLog?.(` 📋 Queued ${entityName} for deferred processing (record saved, some fields pending)`);
|
|
893
|
+
}
|
|
894
|
+
// Return 'deferred' status - it's saved but incomplete
|
|
895
|
+
// We don't return early here because we still want to update primaryKey and sync metadata
|
|
896
|
+
}
|
|
829
897
|
// Update primaryKey for new records
|
|
830
898
|
if (isNew) {
|
|
831
899
|
const entityInfo = this.syncEngine.getEntityInfo(entityName);
|
|
@@ -852,6 +920,15 @@ class PushService {
|
|
|
852
920
|
}
|
|
853
921
|
// Restore original field values to preserve @ references
|
|
854
922
|
record.fields = originalFields;
|
|
923
|
+
// Return appropriate status
|
|
924
|
+
// If we had deferred lookups, return 'deferred' to indicate partial save
|
|
925
|
+
// The record is saved but will be re-processed in Phase 2.5
|
|
926
|
+
if (hasDeferrableLookupError && allowDefer) {
|
|
927
|
+
return {
|
|
928
|
+
status: 'deferred',
|
|
929
|
+
isDuplicate: false
|
|
930
|
+
};
|
|
931
|
+
}
|
|
855
932
|
return {
|
|
856
933
|
status: isNew ? 'created' : (isDirty ? 'updated' : 'unchanged'),
|
|
857
934
|
isDuplicate: false
|
|
@@ -1212,6 +1289,73 @@ class PushService {
|
|
|
1212
1289
|
}
|
|
1213
1290
|
return false;
|
|
1214
1291
|
}
|
|
1292
|
+
/**
|
|
1293
|
+
* Process deferred records that had lookup failures during initial processing.
|
|
1294
|
+
* Called in Phase 2.5 after all records are created/updated but before commit.
|
|
1295
|
+
* This handles circular dependencies where records reference each other.
|
|
1296
|
+
*
|
|
1297
|
+
* Re-runs processFlattenedRecord with allowDefer=false, which processes the
|
|
1298
|
+
* entire record exactly as in the initial pass. Now that all records exist,
|
|
1299
|
+
* the lookups should succeed.
|
|
1300
|
+
*
|
|
1301
|
+
* @param options - Push options
|
|
1302
|
+
* @param callbacks - Callbacks for progress/error reporting
|
|
1303
|
+
* @returns Object with created, updated, and errors counts
|
|
1304
|
+
*/
|
|
1305
|
+
async processDeferredRecords(options, callbacks) {
|
|
1306
|
+
if (this.deferredRecords.length === 0) {
|
|
1307
|
+
return { created: 0, updated: 0, errors: 0 };
|
|
1308
|
+
}
|
|
1309
|
+
callbacks?.onLog?.(`\n⏳ Processing ${this.deferredRecords.length} deferred record${this.deferredRecords.length > 1 ? 's' : ''}...`);
|
|
1310
|
+
let created = 0;
|
|
1311
|
+
let updated = 0;
|
|
1312
|
+
let errors = 0;
|
|
1313
|
+
// Create a fresh batch context for deferred processing
|
|
1314
|
+
// Records are in DB now, so this is mainly for tracking within this phase
|
|
1315
|
+
const batchContext = new Map();
|
|
1316
|
+
for (const deferred of this.deferredRecords) {
|
|
1317
|
+
const { flattenedRecord, entityDir, entityConfig } = deferred;
|
|
1318
|
+
const entityName = flattenedRecord.entityName;
|
|
1319
|
+
const recordId = flattenedRecord.record.primaryKey
|
|
1320
|
+
? Object.entries(flattenedRecord.record.primaryKey).map(([k, v]) => `${k}=${v}`).join(', ')
|
|
1321
|
+
: (flattenedRecord.record.fields.Name || 'NEW');
|
|
1322
|
+
try {
|
|
1323
|
+
// Re-run processFlattenedRecord with allowDefer=false
|
|
1324
|
+
// This ensures we use the exact same processing logic
|
|
1325
|
+
const result = await this.processFlattenedRecord(flattenedRecord, entityDir, options, batchContext, callbacks, entityConfig, false // allowDefer=false - must succeed or fail, no re-deferring
|
|
1326
|
+
);
|
|
1327
|
+
if (result.status === 'created') {
|
|
1328
|
+
created++;
|
|
1329
|
+
callbacks?.onLog?.(` ✓ ${entityName} (${recordId}) - created`);
|
|
1330
|
+
}
|
|
1331
|
+
else if (result.status === 'updated') {
|
|
1332
|
+
updated++;
|
|
1333
|
+
callbacks?.onLog?.(` ✓ ${entityName} (${recordId}) - updated`);
|
|
1334
|
+
}
|
|
1335
|
+
else if (result.status === 'unchanged') {
|
|
1336
|
+
callbacks?.onLog?.(` - ${entityName} (${recordId}) - unchanged`);
|
|
1337
|
+
}
|
|
1338
|
+
}
|
|
1339
|
+
catch (error) {
|
|
1340
|
+
const err = error;
|
|
1341
|
+
callbacks?.onError?.(` ✗ Failed to process deferred record: ${entityName} (${recordId})`);
|
|
1342
|
+
callbacks?.onError?.(` Error: ${err.message}`);
|
|
1343
|
+
callbacks?.onError?.(` Tip: Ensure all referenced records exist or remove the ?allowDefer flag`);
|
|
1344
|
+
errors++;
|
|
1345
|
+
}
|
|
1346
|
+
}
|
|
1347
|
+
// Summary
|
|
1348
|
+
callbacks?.onLog?.('');
|
|
1349
|
+
const total = created + updated;
|
|
1350
|
+
if (total > 0) {
|
|
1351
|
+
callbacks?.onLog?.(` ✓ Resolved ${total} deferred record${total > 1 ? 's' : ''} (${created} created, ${updated} updated)`);
|
|
1352
|
+
}
|
|
1353
|
+
if (errors > 0) {
|
|
1354
|
+
callbacks?.onLog?.(` ✗ Failed to resolve ${errors} deferred record${errors > 1 ? 's' : ''}`);
|
|
1355
|
+
}
|
|
1356
|
+
callbacks?.onLog?.('');
|
|
1357
|
+
return { created, updated, errors };
|
|
1358
|
+
}
|
|
1215
1359
|
/**
|
|
1216
1360
|
* Write all deferred files with updated deletion timestamps
|
|
1217
1361
|
* Called in Phase 3 after all deletions complete successfully
|