@arela/uploader 0.3.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -172,11 +172,8 @@ export class DatabaseService {
172
172
  record.year = detection.detectedPedimentoYear;
173
173
  }
174
174
 
175
- const rfcField = detection.fields.find(
176
- (f) => f.name === 'rfc' && f.found,
177
- );
178
- if (rfcField) {
179
- record.rfc = rfcField.value;
175
+ if (detection.rfc) {
176
+ record.rfc = detection.rfc;
180
177
  }
181
178
  } else {
182
179
  record.status = 'not-detected';
@@ -360,7 +357,6 @@ export class DatabaseService {
360
357
  * @returns {Promise<Object>} Statistics about the operation
361
358
  */
362
359
  async insertStatsOnlyToUploaderTable(files, options) {
363
- const supabase = await this.#getSupabaseClient();
364
360
  const batchSize = 1000;
365
361
  const quietMode = options?.quietMode || false;
366
362
  const allRecords = [];
@@ -394,28 +390,26 @@ export class DatabaseService {
394
390
 
395
391
  const record = {
396
392
  name: file.originalName || path.basename(file.path),
397
- document_type: null,
393
+ documentType: null,
398
394
  size: stats.size,
399
- num_pedimento: null,
395
+ numPedimento: null,
400
396
  filename: file.originalName || path.basename(file.path),
401
- original_path: originalPath,
402
- arela_path: null,
397
+ originalPath: originalPath,
398
+ arelaPath: null,
403
399
  status: 'fs-stats',
404
400
  rfc: null,
405
401
  message: null,
406
- file_extension: fileExtension,
407
- created_at: new Date().toISOString(),
408
- updated_at: new Date().toISOString(),
409
- modified_at: stats.mtime.toISOString(),
410
- is_like_simplificado:
402
+ fileExtension: fileExtension,
403
+ modifiedAt: stats.mtime.toISOString(),
404
+ isLikeSimplificado:
411
405
  fileExtension === 'pdf' &&
412
406
  (file.originalName || path.basename(file.path))
413
407
  .toLowerCase()
414
408
  .includes('simp'),
415
409
  year: null,
416
410
  // Queue/Processing columns (for arela-api)
417
- processing_status: 'PENDING',
418
- upload_attempts: 0,
411
+ processingStatus: 'PENDING',
412
+ uploadAttempts: 0,
419
413
  };
420
414
 
421
415
  allRecords.push(record);
@@ -440,33 +434,28 @@ export class DatabaseService {
440
434
  let totalInserted = 0;
441
435
  let totalUpdated = 0;
442
436
 
437
+ // Use API service for batch upsert
438
+ // If apiTarget is specified, use that specific API, otherwise use default
439
+ let uploadService;
440
+ if (options.apiTarget) {
441
+ uploadService = await uploadServiceFactory.getApiServiceForTarget(
442
+ options.apiTarget,
443
+ );
444
+ } else {
445
+ uploadService = await uploadServiceFactory.getUploadService(
446
+ options.forceSupabase,
447
+ );
448
+ }
449
+
450
+ // Use API service for batch upsert
443
451
  for (let i = 0; i < allRecords.length; i += batchSize) {
444
452
  const batch = allRecords.slice(i, i + batchSize);
445
453
 
446
454
  try {
447
- // Check which records already exist
448
- const originalPaths = batch.map((r) => r.original_path);
449
- const { data: existingRecords, error: checkError } = await supabase
450
- .from('uploader')
451
- .select('original_path')
452
- .in('original_path', originalPaths);
453
-
454
- if (checkError) {
455
- logger.error(
456
- `Error checking existing records: ${checkError.message}`,
457
- );
458
- continue;
459
- }
455
+ const result = await uploadService.batchUpsertStats(batch);
460
456
 
461
- const existingPaths = new Set(
462
- existingRecords?.map((r) => r.original_path) || [],
463
- );
464
- const newRecords = batch.filter(
465
- (r) => !existingPaths.has(r.original_path),
466
- );
467
- const updateRecords = batch.filter((r) =>
468
- existingPaths.has(r.original_path),
469
- );
457
+ totalInserted += result.inserted || 0;
458
+ totalUpdated += result.updated || 0;
470
459
 
471
460
  // Only log every 10th batch to reduce noise (skip in quiet mode)
472
461
  if (
@@ -475,53 +464,12 @@ export class DatabaseService {
475
464
  Math.floor(i / batchSize) + 1 === 1)
476
465
  ) {
477
466
  logger.info(
478
- `Batch ${Math.floor(i / batchSize) + 1}: ${newRecords.length} new, ${updateRecords.length} updates`,
467
+ `Batch ${Math.floor(i / batchSize) + 1}: ${result.inserted || 0} new, ${result.updated || 0} updates`,
479
468
  );
480
469
  }
481
-
482
- // Insert new records
483
- if (newRecords.length > 0) {
484
- const { error: insertError } = await supabase
485
- .from('uploader')
486
- .insert(newRecords);
487
-
488
- if (insertError) {
489
- logger.error(`Error inserting new records: ${insertError.message}`);
490
- } else {
491
- totalInserted += newRecords.length;
492
- // Only log the batch insertion, not the summary (which comes at the end)
493
- }
494
- }
495
-
496
- // Update existing records
497
- if (updateRecords.length > 0) {
498
- let batchUpdated = 0;
499
- for (const record of updateRecords) {
500
- const { error: updateError } = await supabase
501
- .from('uploader')
502
- .update({
503
- name: record.filename,
504
- size: record.size,
505
- modified_at: record.modified_at,
506
- filename: record.filename,
507
- file_extension: record.file_extension,
508
- is_like_simplificado: record.is_like_simplificado,
509
- })
510
- .eq('original_path', record.original_path);
511
-
512
- if (!updateError) {
513
- batchUpdated++;
514
- }
515
- }
516
- totalUpdated += batchUpdated;
517
- // Reduce logging noise - only log when there are updates (skip in quiet mode)
518
- if (!quietMode && batchUpdated > 0) {
519
- logger.info(`Updated ${batchUpdated} existing records`);
520
- }
521
- }
522
470
  } catch (error) {
523
471
  logger.error(
524
- `Unexpected error in batch ${Math.floor(i / batchSize) + 1}: ${error.message}`,
472
+ `Error in batch ${Math.floor(i / batchSize) + 1}: ${error.message}`,
525
473
  );
526
474
  }
527
475
  }
@@ -545,8 +493,6 @@ export class DatabaseService {
545
493
  * @returns {Promise<Object>} Processing result
546
494
  */
547
495
  async detectPedimentosInDatabase(options = {}) {
548
- const supabase = await this.#getSupabaseClient();
549
-
550
496
  logger.info(
551
497
  'Phase 2: Starting PDF detection for pedimento-simplificado documents...',
552
498
  );
@@ -561,6 +507,25 @@ export class DatabaseService {
561
507
  let offset = 0;
562
508
  let chunkNumber = 1;
563
509
 
510
+ // Get API service - use specific target if provided
511
+ let apiService;
512
+ if (options.apiTarget) {
513
+ apiService = await uploadServiceFactory.getApiServiceForTarget(
514
+ options.apiTarget,
515
+ );
516
+ logger.info(`Using API target: ${options.apiTarget}`);
517
+ } else {
518
+ apiService = await uploadServiceFactory.getUploadService();
519
+ }
520
+
521
+ if (apiService.getServiceName() !== 'Arela API') {
522
+ throw new Error(
523
+ 'API service is required for PDF detection. Please configure ARELA_API_URL and ARELA_API_TOKEN.',
524
+ );
525
+ }
526
+
527
+ logger.info('Using API service for PDF detection...');
528
+
564
529
  logger.info(
565
530
  `Processing PDF files in chunks of ${queryBatchSize} records...`,
566
531
  );
@@ -571,18 +536,12 @@ export class DatabaseService {
571
536
  );
572
537
 
573
538
  try {
574
- // Split the query to make it more efficient with retry logic
539
+ // Fetch records using API
575
540
  const { data: pdfRecords, error: queryError } =
576
- await this.#queryWithRetry(async () => {
577
- return await supabase
578
- .from('uploader')
579
- .select('id, original_path, filename, file_extension, status')
580
- .eq('status', 'fs-stats')
581
- .eq('file_extension', 'pdf')
582
- .eq('is_like_simplificado', true)
583
- .range(offset, offset + queryBatchSize - 1)
584
- .order('created_at');
585
- }, `fetch PDF records chunk ${chunkNumber}`);
541
+ await apiService.fetchPdfRecordsForDetection({
542
+ offset,
543
+ limit: queryBatchSize,
544
+ });
586
545
 
587
546
  if (queryError) {
588
547
  throw new Error(
@@ -604,9 +563,10 @@ export class DatabaseService {
604
563
  let chunkErrors = 0;
605
564
 
606
565
  // Process files in smaller batches
566
+ const batchUpdates = [];
567
+
607
568
  for (let i = 0; i < pdfRecords.length; i += processingBatchSize) {
608
569
  const batch = pdfRecords.slice(i, i + processingBatchSize);
609
- const updatePromises = [];
610
570
 
611
571
  for (const record of batch) {
612
572
  try {
@@ -614,15 +574,11 @@ export class DatabaseService {
614
574
  logger.warn(
615
575
  `File not found: ${record.filename} at ${record.original_path}`,
616
576
  );
617
- updatePromises.push(
618
- supabase
619
- .from('uploader')
620
- .update({
621
- status: 'file-not-found',
622
- message: 'File no longer exists at original path',
623
- })
624
- .eq('id', record.id),
625
- );
577
+ batchUpdates.push({
578
+ id: record.id,
579
+ status: 'file-not-found',
580
+ message: 'File no longer exists at original path',
581
+ });
626
582
  chunkErrors++;
627
583
  totalErrors++;
628
584
  continue;
@@ -635,28 +591,24 @@ export class DatabaseService {
635
591
  totalProcessed++;
636
592
 
637
593
  const updateData = {
594
+ id: record.id,
638
595
  status: detection.detectedType ? 'detected' : 'not-detected',
639
- document_type: detection.detectedType,
640
- num_pedimento: detection.detectedPedimento,
641
- arela_path: detection.arelaPath,
596
+ documentType: detection.detectedType,
597
+ numPedimento: detection.detectedPedimento,
598
+ arelaPath: detection.arelaPath,
642
599
  message: detection.error || null,
643
600
  year: detection.detectedPedimentoYear || null,
644
601
  };
645
602
 
646
- if (detection.fields) {
647
- const rfcField = detection.fields.find(
648
- (f) => f.name === 'rfc' && f.found,
649
- );
650
- if (rfcField) {
651
- updateData.rfc = rfcField.value;
652
- }
603
+ if (detection.rfc) {
604
+ updateData.rfc = detection.rfc;
653
605
  }
654
606
 
655
607
  if (detection.detectedType) {
656
608
  chunkDetected++;
657
609
  totalDetected++;
658
610
  logger.success(
659
- `Detected: ${record.filename} -> ${detection.detectedType} | Pedimento: ${detection.detectedPedimento || 'N/A'} | RFC: ${detection.fields?.rfc || 'N/A'}`,
611
+ `Detected: ${record.filename} -> ${detection.detectedType} | Pedimento: ${detection.detectedPedimento || 'N/A'} | RFC: ${updateData.rfc || 'N/A'}`,
660
612
  );
661
613
  } else {
662
614
  logger.info(
@@ -664,12 +616,7 @@ export class DatabaseService {
664
616
  );
665
617
  }
666
618
 
667
- updatePromises.push(
668
- supabase
669
- .from('uploader')
670
- .update(updateData)
671
- .eq('id', record.id),
672
- );
619
+ batchUpdates.push(updateData);
673
620
  } catch (error) {
674
621
  logger.error(
675
622
  `Error detecting ${record.filename}: ${error.message}`,
@@ -677,25 +624,30 @@ export class DatabaseService {
677
624
  chunkErrors++;
678
625
  totalErrors++;
679
626
 
680
- updatePromises.push(
681
- supabase
682
- .from('uploader')
683
- .update({
684
- status: 'detection-error',
685
- message: error.message,
686
- })
687
- .eq('id', record.id),
688
- );
627
+ batchUpdates.push({
628
+ id: record.id,
629
+ status: 'detection-error',
630
+ message: error.message,
631
+ });
689
632
  }
690
633
  }
634
+ }
691
635
 
692
- try {
693
- await Promise.all(updatePromises);
694
- } catch (error) {
695
- logger.error(
696
- `Error updating batch in chunk ${chunkNumber}: ${error.message}`,
697
- );
636
+ // Batch update using API
637
+ try {
638
+ if (batchUpdates.length > 0) {
639
+ const updateResult =
640
+ await apiService.batchUpdateDetectionResults(batchUpdates);
641
+ if (!updateResult.success) {
642
+ logger.error(
643
+ `Some updates failed in chunk ${chunkNumber}: ${updateResult.errors?.length || 0} errors`,
644
+ );
645
+ }
698
646
  }
647
+ } catch (error) {
648
+ logger.error(
649
+ `Error updating batch in chunk ${chunkNumber}: ${error.message}`,
650
+ );
699
651
  }
700
652
 
701
653
  logger.success(
@@ -741,278 +693,156 @@ export class DatabaseService {
741
693
 
742
694
  /**
743
695
  * Propagate arela_path from pedimento_simplificado records to related files
696
+ * This operation is performed entirely on the backend for efficiency
744
697
  * @param {Object} options - Options for propagation
745
698
  * @returns {Promise<Object>} Processing result
746
699
  */
747
700
  async propagateArelaPath(options = {}) {
748
- const supabase = await this.#getSupabaseClient();
749
-
750
701
  logger.info('Phase 3: Starting arela_path and year propagation process...');
751
- console.log('🔍 Processing pedimento_simplificado records page by page...');
702
+ console.log('🔍 Triggering backend propagation process...');
752
703
 
753
- // Log year filtering configuration
754
- if (appConfig.upload.years && appConfig.upload.years.length > 0) {
755
- logger.info(
756
- `🗓️ Year filter enabled: ${appConfig.upload.years.join(', ')}`,
757
- );
758
- console.log(
759
- `🗓️ Year filter enabled: ${appConfig.upload.years.join(', ')}`,
704
+ // Get API service - use specific target if provided
705
+ let apiService;
706
+ if (options.apiTarget) {
707
+ apiService = await uploadServiceFactory.getApiServiceForTarget(
708
+ options.apiTarget,
760
709
  );
710
+ logger.info(`Using API target: ${options.apiTarget}`);
761
711
  } else {
762
- logger.info('🗓️ No year filter configured - processing all years');
763
- console.log('🗓️ No year filter configured - processing all years');
712
+ apiService = await uploadServiceFactory.getUploadService();
764
713
  }
765
714
 
766
- let totalProcessed = 0;
767
- let totalUpdated = 0;
768
- let totalErrors = 0;
769
- let offset = 0;
770
- const pageSize = 50;
771
- let hasMoreData = true;
772
- let pageNumber = 1;
773
- const BATCH_SIZE = 50; // Process files in batches
774
-
775
- // Process pedimento records page by page for memory efficiency
776
- while (hasMoreData) {
777
- logger.info(
778
- `Fetching and processing pedimento records page ${pageNumber} (records ${offset + 1} to ${offset + pageSize})...`,
779
- );
780
-
781
- let query = supabase
782
- .from('uploader')
783
- .select('id, original_path, arela_path, filename, year')
784
- .eq('document_type', 'pedimento_simplificado')
785
- .not('arela_path', 'is', null);
786
-
787
- // Add year filter if UPLOAD_YEARS is configured
788
- if (appConfig.upload.years && appConfig.upload.years.length > 0) {
789
- query = query.in('year', appConfig.upload.years);
790
- }
791
-
792
- const { data: pedimentoPage, error: pedimentoError } = await query
793
- .range(offset, offset + pageSize - 1)
794
- .order('created_at');
795
-
796
- if (pedimentoError) {
797
- const errorMsg = `Error fetching pedimento records page ${pageNumber}: ${pedimentoError.message}`;
798
- logger.error(errorMsg);
799
- throw new Error(errorMsg);
800
- }
801
-
802
- if (!pedimentoPage || pedimentoPage.length === 0) {
803
- hasMoreData = false;
804
- logger.info('No more pedimento records found');
805
- break;
806
- }
807
-
808
- logger.info(
809
- `Processing page ${pageNumber}: ${pedimentoPage.length} pedimento records`,
715
+ if (apiService.getServiceName() !== 'Arela API') {
716
+ throw new Error(
717
+ 'API service is required for arela_path propagation. Please configure ARELA_API_URL and ARELA_API_TOKEN.',
810
718
  );
719
+ }
811
720
 
812
- // Process each pedimento record in the current page
813
- for (const pedimento of pedimentoPage) {
814
- try {
815
- totalProcessed++;
816
-
817
- // Extract base path from original_path (remove filename)
818
- const basePath = path.dirname(pedimento.original_path);
819
-
820
- logger.info(
821
- `Processing pedimento: ${pedimento.filename} | Base path: ${basePath} | Year: ${pedimento.year || 'N/A'}`,
822
- );
823
-
824
- // Extract folder part from existing arela_path
825
- const existingPath = pedimento.arela_path;
826
- const folderArelaPath = existingPath.includes('/')
827
- ? existingPath.substring(0, existingPath.lastIndexOf('/')) + '/'
828
- : existingPath.endsWith('/')
829
- ? existingPath
830
- : existingPath + '/';
831
-
832
- // Process related files page by page for memory efficiency
833
- let relatedFilesFrom = 0;
834
- const relatedFilesPageSize = 50;
835
- let hasMoreRelatedFiles = true;
836
- let relatedFilesPageNumber = 1;
837
- let totalRelatedFilesProcessed = 0;
838
-
839
- logger.info(
840
- `Searching and processing related files in base path: ${basePath}`,
841
- );
842
-
843
- while (hasMoreRelatedFiles) {
844
- const { data: relatedFilesPage, error: relatedError } =
845
- await this.#queryWithRetry(async () => {
846
- return await supabase
847
- .from('uploader')
848
- .select('id, filename, original_path')
849
- .like('original_path', `${basePath}%`)
850
- .is('arela_path', null)
851
- .neq('id', pedimento.id) // Exclude the pedimento itself
852
- .range(
853
- relatedFilesFrom,
854
- relatedFilesFrom + relatedFilesPageSize - 1,
855
- );
856
- }, `query related files for ${pedimento.filename} (page ${relatedFilesPageNumber})`);
857
-
858
- if (relatedError) {
859
- logger.error(
860
- `Error finding related files for ${pedimento.filename}: ${relatedError.message}`,
861
- );
862
- totalErrors++;
863
- break;
864
- }
865
- // console.log(`query by basePath: ${basePath} count: [${relatedFilesPage.length}]`);
866
-
867
- if (!relatedFilesPage || relatedFilesPage.length === 0) {
868
- hasMoreRelatedFiles = false;
869
- if (totalRelatedFilesProcessed === 0) {
870
- logger.info(`No related files found for ${pedimento.filename}`);
871
- }
872
- break;
873
- }
874
-
875
- logger.info(
876
- `Processing related files page ${relatedFilesPageNumber}: ${relatedFilesPage.length} files for ${pedimento.filename}`,
877
- );
878
-
879
- // Track if any updates occurred in this page
880
- let updatesOccurred = false;
881
- // Process this page of related files in batches
882
- const pageFileIds = relatedFilesPage.map((f) => f.id);
721
+ logger.info('Using API service for arela_path propagation...');
883
722
 
884
- for (let i = 0; i < pageFileIds.length; i += BATCH_SIZE) {
885
- const batchIds = pageFileIds.slice(i, i + BATCH_SIZE);
886
- const batchNumber =
887
- Math.floor(relatedFilesFrom / BATCH_SIZE) +
888
- Math.floor(i / BATCH_SIZE) +
889
- 1;
723
+ // Log year filtering configuration
724
+ const years = appConfig.upload.years || [];
725
+ if (years.length > 0) {
726
+ logger.info(`🗓️ Year filter enabled: ${years.join(', ')}`);
727
+ console.log(`🗓️ Year filter enabled: ${years.join(', ')}`);
728
+ } else {
729
+ logger.info('🗓️ No year filter configured - processing all years');
730
+ console.log('🗓️ No year filter configured - processing all years');
731
+ }
890
732
 
891
- logger.info(
892
- `Updating batch ${batchNumber}: ${batchIds.length} files with arela_path and year...`,
893
- );
733
+ console.log('⏳ Processing on backend... This may take a moment.');
894
734
 
895
- try {
896
- const { error: updateError } = await supabase
897
- .from('uploader')
898
- .update({
899
- arela_path: folderArelaPath,
900
- year: pedimento.year,
901
- })
902
- .in('id', batchIds);
903
-
904
- if (updateError) {
905
- logger.error(
906
- `Error in batch ${batchNumber}: ${updateError.message}`,
907
- );
908
- totalErrors++;
909
- } else {
910
- totalUpdated += batchIds.length;
911
- totalRelatedFilesProcessed += batchIds.length;
912
- updatesOccurred = true; // Mark that updates occurred
913
- logger.info(
914
- `Successfully updated batch ${batchNumber}: ${batchIds.length} files with arela_path and year`,
915
- );
916
- }
917
- } catch (batchError) {
918
- logger.error(
919
- `Exception in batch ${batchNumber}: ${batchError.message}`,
920
- );
921
- totalErrors++;
922
- }
923
- }
735
+ // Trigger backend propagation - all logic runs server-side
736
+ const result = await apiService.propagateArelaPath({ years });
924
737
 
925
- // Check if we need to fetch the next page of related files
926
- if (relatedFilesPage.length < relatedFilesPageSize) {
927
- logger.info('No more related files for', basePath);
928
- hasMoreRelatedFiles = false;
929
- logger.info(
930
- `Completed processing related files for ${pedimento.filename}. Total processed: ${totalRelatedFilesProcessed}`,
931
- );
932
- } else {
933
- // If updates occurred, reset pagination to start from beginning
934
- // since records that matched the query may no longer match after update
935
- if (updatesOccurred) {
936
- relatedFilesFrom = 0;
937
- logger.info(
938
- `Page ${relatedFilesPageNumber} complete with updates: ${relatedFilesPage.length} files processed, restarting pagination from beginning due to query condition changes...`,
939
- );
940
- } else {
941
- relatedFilesFrom += relatedFilesPageSize - 1;
942
- logger.info(
943
- `Page ${relatedFilesPageNumber} complete: ${relatedFilesPage.length} files processed, continuing to next page...`,
944
- );
945
- }
946
- relatedFilesPageNumber++;
947
- }
948
- }
949
- } catch (error) {
950
- logger.error(
951
- `Error processing pedimento ${pedimento.filename}: ${error.message}`,
952
- );
953
- totalErrors++;
954
- }
955
- }
956
-
957
- // Check if we need to fetch the next page
958
- if (pedimentoPage.length < pageSize) {
959
- hasMoreData = false;
960
- logger.info(
961
- `Completed processing. Last page ${pageNumber} had ${pedimentoPage.length} records`,
962
- );
963
- } else {
964
- offset += pageSize;
965
- pageNumber++;
966
- logger.info(
967
- `Page ${pageNumber - 1} complete: ${pedimentoPage.length} records processed, moving to next page...`,
968
- );
969
- }
738
+ if (!result.success) {
739
+ const errorMsg = `Backend propagation failed: ${result.error || 'Unknown error'}`;
740
+ logger.error(errorMsg);
741
+ throw new Error(errorMsg);
970
742
  }
971
743
 
972
- // Final summary
973
- if (totalProcessed === 0) {
744
+ // Display results
745
+ const { processedCount = 0, updatedCount = 0, errorCount = 0 } = result;
746
+
747
+ if (processedCount === 0) {
974
748
  logger.info('No pedimento_simplificado records with arela_path found');
975
749
  console.log(
976
750
  'ℹ️ No pedimento_simplificado records with arela_path found',
977
751
  );
978
752
  } else {
979
- // pageNumber represents the current page (starts at 1)
980
- const totalPages = pageNumber;
981
- console.log(
982
- `📋 Processed ${totalProcessed} pedimento records across ${totalPages} page${totalPages !== 1 ? 's' : ''}`,
983
- );
984
- logger.info(
985
- `Processed ${totalProcessed} pedimento records across ${totalPages} page${totalPages !== 1 ? 's' : ''}`,
986
- );
753
+ console.log(`📋 Processed ${processedCount} pedimento records`);
754
+ logger.info(`Processed ${processedCount} pedimento records`);
987
755
  }
988
756
 
989
- const result = {
990
- processedCount: totalProcessed,
991
- updatedCount: totalUpdated,
992
- errorCount: totalErrors,
993
- };
994
-
995
757
  logger.success(
996
- `Phase 3 Summary: ${totalProcessed} pedimentos processed, ${totalUpdated} files updated with arela_path and year, ${totalErrors} errors`,
758
+ `Phase 3 Summary: ${processedCount} pedimentos processed, ${updatedCount} files updated with arela_path and year, ${errorCount} errors`,
997
759
  );
998
760
 
999
- return result;
761
+ return {
762
+ processedCount,
763
+ updatedCount,
764
+ errorCount,
765
+ };
1000
766
  }
1001
767
 
1002
768
  /**
1003
769
  * Upload files to Arela API based on specific RFC values
770
+ * Supports cross-tenant mode where source and target APIs can be different
1004
771
  * @param {Object} options - Upload options
772
+ * @param {string} options.sourceApi - Source API target for reading data (cross-tenant mode)
773
+ * @param {string} options.targetApi - Target API target for uploading files (cross-tenant mode)
774
+ * @param {string} options.apiTarget - Single API target for both reading and uploading (single API mode)
1005
775
  * @returns {Promise<Object>} Processing result
1006
776
  */
1007
777
  async uploadFilesByRfc(options = {}) {
1008
- const supabase = await this.#getSupabaseClient();
1009
- const uploadService = await uploadServiceFactory.getUploadService();
1010
-
1011
778
  // Get configuration
1012
779
  const appConfig = await import('../config/config.js').then(
1013
780
  (m) => m.appConfig,
1014
781
  );
1015
782
 
783
+ // Determine if we're in cross-tenant mode
784
+ const isCrossTenant =
785
+ options.sourceApi &&
786
+ options.targetApi &&
787
+ options.sourceApi !== options.targetApi;
788
+
789
+ // Determine if we're in single API mode with specific target
790
+ const isSingleApiMode = !isCrossTenant && options.apiTarget;
791
+
792
+ let sourceService, targetService;
793
+
794
+ if (isCrossTenant) {
795
+ console.log('🔀 Cross-tenant upload mode enabled');
796
+ console.log(` 📖 Source API: ${options.sourceApi}`);
797
+ console.log(` 📝 Target API: ${options.targetApi}`);
798
+
799
+ // Get separate services for source and target
800
+ sourceService = await uploadServiceFactory.getApiServiceForTarget(
801
+ options.sourceApi,
802
+ );
803
+ targetService = await uploadServiceFactory.getApiServiceForTarget(
804
+ options.targetApi,
805
+ );
806
+
807
+ // Verify both services are available
808
+ if (!(await sourceService.isAvailable())) {
809
+ throw new Error(`Source API '${options.sourceApi}' is not available`);
810
+ }
811
+ if (!(await targetService.isAvailable())) {
812
+ throw new Error(`Target API '${options.targetApi}' is not available`);
813
+ }
814
+
815
+ console.log(`✅ Connected to source: ${sourceService.baseUrl}`);
816
+ console.log(`✅ Connected to target: ${targetService.baseUrl}`);
817
+ } else if (isSingleApiMode) {
818
+ // Single API mode with specific target - use the same service for both
819
+ console.log(`🎯 Single API mode: ${options.apiTarget}`);
820
+
821
+ const apiService = await uploadServiceFactory.getApiServiceForTarget(
822
+ options.apiTarget,
823
+ );
824
+
825
+ if (!(await apiService.isAvailable())) {
826
+ throw new Error(`API '${options.apiTarget}' is not available`);
827
+ }
828
+
829
+ console.log(`✅ Connected to: ${apiService.baseUrl}`);
830
+ sourceService = apiService;
831
+ targetService = apiService;
832
+ } else {
833
+ // Default mode - use the default service for both
834
+ const apiService = await uploadServiceFactory.getUploadService();
835
+
836
+ if (apiService.getServiceName() !== 'Arela API') {
837
+ throw new Error(
838
+ 'API service is required for RFC-based upload. Please configure ARELA_API_URL and ARELA_API_TOKEN.',
839
+ );
840
+ }
841
+
842
+ sourceService = apiService;
843
+ targetService = apiService;
844
+ }
845
+
1016
846
  if (!appConfig.upload.rfcs || appConfig.upload.rfcs.length === 0) {
1017
847
  const errorMsg =
1018
848
  'No RFCs specified. Please set UPLOAD_RFCS environment variable with pipe-separated RFC values.';
@@ -1021,16 +851,18 @@ export class DatabaseService {
1021
851
  }
1022
852
 
1023
853
  logger.info('Phase 4: Starting RFC-based upload process...');
854
+ logger.info(
855
+ `Using ${isCrossTenant ? 'cross-tenant' : 'standard'} API service for RFC-based upload...`,
856
+ );
1024
857
  console.log('🎯 RFC-based Upload Mode');
1025
858
  console.log(`📋 Target RFCs: ${appConfig.upload.rfcs.join(', ')}`);
1026
859
  console.log('🔍 Searching for files to upload...');
1027
860
 
1028
861
  // First, count total files for the RFCs to show filtering effect
1029
- const { count: totalRfcFiles, error: countError } = await supabase
1030
- .from('uploader')
1031
- .select('*', { count: 'exact', head: true })
1032
- .in('rfc', appConfig.upload.rfcs)
1033
- .not('arela_path', 'is', null);
862
+ const { count: totalRfcFiles, error: countError } =
863
+ await sourceService.fetchRfcFileCount({
864
+ rfcs: appConfig.upload.rfcs,
865
+ });
1034
866
 
1035
867
  if (countError) {
1036
868
  logger.warn(`Could not count total RFC files: ${countError.message}`);
@@ -1056,22 +888,13 @@ export class DatabaseService {
1056
888
  `Fetching pedimento records page ${pageNumber} (records ${offset + 1} to ${offset + pageSize})...`,
1057
889
  );
1058
890
 
1059
- let pedimentoQuery = supabase
1060
- .from('uploader')
1061
- .select('arela_path')
1062
- .eq('document_type', 'pedimento_simplificado')
1063
- .in('rfc', appConfig.upload.rfcs)
1064
- .not('arela_path', 'is', null);
1065
-
1066
- // Add year filter if UPLOAD_YEARS is configured
1067
- if (appConfig.upload.years && appConfig.upload.years.length > 0) {
1068
- pedimentoQuery = pedimentoQuery.in('year', appConfig.upload.years);
1069
- }
1070
-
1071
891
  const { data: pedimentoPage, error: pedimentoError } =
1072
- await pedimentoQuery
1073
- .range(offset, offset + pageSize - 1)
1074
- .order('created_at');
892
+ await sourceService.fetchPedimentosByRfc({
893
+ rfcs: appConfig.upload.rfcs,
894
+ years: appConfig.upload.years || [],
895
+ offset,
896
+ limit: pageSize,
897
+ });
1075
898
 
1076
899
  if (pedimentoError) {
1077
900
  const errorMsg = `Error fetching pedimento RFC records page ${pageNumber}: ${pedimentoError.message}`;
@@ -1162,13 +985,12 @@ export class DatabaseService {
1162
985
  let filePageNum = 1;
1163
986
 
1164
987
  while (hasMoreFiles) {
1165
- const { data: batch, error: queryError } = await supabase
1166
- .from('uploader')
1167
- .select('id, original_path, arela_path, filename, rfc, document_type')
1168
- .in('arela_path', arelaPathChunk)
1169
- .neq('status', 'file-uploaded')
1170
- .order('created_at')
1171
- .range(fileOffset, fileOffset + filePageSize - 1);
988
+ const { data: batch, error: queryError } =
989
+ await sourceService.fetchFilesForUpload({
990
+ arelaPaths: arelaPathChunk,
991
+ offset: fileOffset,
992
+ limit: filePageSize,
993
+ });
1172
994
 
1173
995
  if (queryError) {
1174
996
  const errorMsg = `Error fetching files for chunk ${chunkNumber} page ${filePageNum}: ${queryError.message}`;
@@ -1227,10 +1049,11 @@ export class DatabaseService {
1227
1049
  );
1228
1050
 
1229
1051
  // Process batch using concurrent processing similar to UploadCommand
1052
+ // In cross-tenant mode: targetService for uploading, sourceService for reading
1230
1053
  const batchResults = await this.#processRfcBatch(
1231
1054
  uploadBatch,
1232
- uploadService,
1233
- supabase,
1055
+ targetService, // Used for uploading files
1056
+ sourceService, // Used for reading metadata
1234
1057
  options,
1235
1058
  maxConcurrency,
1236
1059
  );
@@ -1289,7 +1112,14 @@ export class DatabaseService {
1289
1112
  * @returns {Promise<Array>} Array of files ready for upload
1290
1113
  */
1291
1114
  async getFilesReadyForUpload(options = {}) {
1292
- const supabase = await this.#getSupabaseClient();
1115
+ // Get API service
1116
+ const apiService = await uploadServiceFactory.getUploadService();
1117
+
1118
+ if (apiService.getServiceName() !== 'Arela API') {
1119
+ throw new Error(
1120
+ 'API service is required for querying files. Please configure ARELA_API_URL and ARELA_API_TOKEN.',
1121
+ );
1122
+ }
1293
1123
 
1294
1124
  logger.info('Querying files ready for upload...');
1295
1125
  console.log('🔍 Querying files ready for upload...');
@@ -1313,23 +1143,13 @@ export class DatabaseService {
1313
1143
  '🎯 Finding pedimento_simplificado documents for specified RFCs with arela_path...',
1314
1144
  );
1315
1145
 
1316
- let pedimentoReadyQuery = supabase
1317
- .from('uploader')
1318
- .select('arela_path')
1319
- .eq('document_type', 'pedimento_simplificado')
1320
- .in('rfc', uploadRfcs)
1321
- .not('arela_path', 'is', null);
1322
-
1323
- // Add year filter if UPLOAD_YEARS is configured
1324
- if (appConfig.upload.years && appConfig.upload.years.length > 0) {
1325
- pedimentoReadyQuery = pedimentoReadyQuery.in(
1326
- 'year',
1327
- appConfig.upload.years,
1328
- );
1329
- }
1330
-
1331
1146
  const { data: pedimentoRecords, error: pedimentoError } =
1332
- await pedimentoReadyQuery;
1147
+ await apiService.fetchPedimentosByRfc({
1148
+ rfcs: uploadRfcs,
1149
+ years: appConfig.upload.years || [],
1150
+ offset: 0,
1151
+ limit: 10000, // Fetch all pedimentos in one go for this query
1152
+ });
1333
1153
 
1334
1154
  if (pedimentoError) {
1335
1155
  throw new Error(
@@ -1362,22 +1182,19 @@ export class DatabaseService {
1362
1182
  for (let i = 0; i < uniqueArelaPaths.length; i += chunkSize) {
1363
1183
  const pathChunk = uniqueArelaPaths.slice(i, i + chunkSize);
1364
1184
 
1365
- // Query with pagination to get all results (Supabase default limit is 1000)
1185
+ // Query with pagination to get all results
1366
1186
  let chunkFiles = [];
1367
1187
  let from = 0;
1368
1188
  const pageSize = 1000;
1369
1189
  let hasMoreData = true;
1370
1190
 
1371
1191
  while (hasMoreData) {
1372
- const { data: pageData, error: chunkError } = await supabase
1373
- .from('uploader')
1374
- .select(
1375
- 'id, original_path, arela_path, filename, rfc, document_type, status',
1376
- )
1377
- .in('arela_path', pathChunk)
1378
- .neq('status', 'file-uploaded')
1379
- .not('original_path', 'is', null)
1380
- .range(from, from + pageSize - 1);
1192
+ const { data: pageData, error: chunkError } =
1193
+ await apiService.fetchFilesForUpload({
1194
+ arelaPaths: pathChunk,
1195
+ offset: from,
1196
+ limit: pageSize,
1197
+ });
1381
1198
 
1382
1199
  if (chunkError) {
1383
1200
  throw new Error(
@@ -1441,7 +1258,7 @@ export class DatabaseService {
1441
1258
  * Process a batch of files using concurrent processing for RFC uploads
1442
1259
  * @param {Array} files - Files to process in this batch
1443
1260
  * @param {Object} uploadService - Upload service instance
1444
- * @param {Object} supabase - Supabase client
1261
+ * @param {Object} apiService - API service instance for database updates
1445
1262
  * @param {Object} options - Upload options
1446
1263
  * @param {number} maxConcurrency - Maximum concurrent operations
1447
1264
  * @returns {Promise<Object>} Batch processing results
@@ -1449,7 +1266,7 @@ export class DatabaseService {
1449
1266
  async #processRfcBatch(
1450
1267
  files,
1451
1268
  uploadService,
1452
- supabase,
1269
+ apiService,
1453
1270
  options,
1454
1271
  maxConcurrency,
1455
1272
  ) {
@@ -1473,7 +1290,7 @@ export class DatabaseService {
1473
1290
  return await this.#processRfcSingleFile(
1474
1291
  file,
1475
1292
  uploadService,
1476
- supabase,
1293
+ apiService,
1477
1294
  options,
1478
1295
  fs,
1479
1296
  );
@@ -1519,7 +1336,7 @@ export class DatabaseService {
1519
1336
  return await this.#processRfcApiBatch(
1520
1337
  chunk,
1521
1338
  uploadService,
1522
- supabase,
1339
+ apiService,
1523
1340
  options,
1524
1341
  fs,
1525
1342
  );
@@ -1547,20 +1364,20 @@ export class DatabaseService {
1547
1364
  /**
1548
1365
  * Process a single file for RFC upload (Supabase mode)
1549
1366
  */
1550
- async #processRfcSingleFile(file, uploadService, supabase, options, fs) {
1367
+ async #processRfcSingleFile(file, uploadService, apiService, options, fs) {
1551
1368
  try {
1552
1369
  // Check if file exists
1553
1370
  if (!fs.existsSync(file.original_path)) {
1554
1371
  logger.warn(
1555
1372
  `File not found: ${file.filename} at ${file.original_path}`,
1556
1373
  );
1557
- await supabase
1558
- .from('uploader')
1559
- .update({
1374
+ await apiService.updateFileStatus([
1375
+ {
1376
+ id: file.id,
1560
1377
  status: 'file-not-found',
1561
1378
  message: 'File no longer exists at original path',
1562
- })
1563
- .eq('id', file.id);
1379
+ },
1380
+ ]);
1564
1381
  return { success: false, error: 'File not found' };
1565
1382
  }
1566
1383
 
@@ -1587,25 +1404,25 @@ export class DatabaseService {
1587
1404
 
1588
1405
  // Check upload result before updating database status
1589
1406
  if (uploadResult.success) {
1590
- await supabase
1591
- .from('uploader')
1592
- .update({
1407
+ await apiService.updateFileStatus([
1408
+ {
1409
+ id: file.id,
1593
1410
  status: 'file-uploaded',
1594
1411
  message: 'Successfully uploaded to Supabase',
1595
1412
  processing_status: 'UPLOADED',
1596
- })
1597
- .eq('id', file.id);
1413
+ },
1414
+ ]);
1598
1415
 
1599
1416
  logger.info(`✅ Uploaded: ${file.filename}`);
1600
1417
  return { success: true, filename: file.filename };
1601
1418
  } else {
1602
- await supabase
1603
- .from('uploader')
1604
- .update({
1419
+ await apiService.updateFileStatus([
1420
+ {
1421
+ id: file.id,
1605
1422
  status: 'upload-error',
1606
1423
  message: `Upload failed: ${uploadResult.error}`,
1607
- })
1608
- .eq('id', file.id);
1424
+ },
1425
+ ]);
1609
1426
 
1610
1427
  logger.error(
1611
1428
  `❌ Upload failed: ${file.filename} - ${uploadResult.error}`,
@@ -1621,13 +1438,13 @@ export class DatabaseService {
1621
1438
  `❌ Error processing file ${file.filename}: ${error.message}`,
1622
1439
  );
1623
1440
 
1624
- await supabase
1625
- .from('uploader')
1626
- .update({
1441
+ await apiService.updateFileStatus([
1442
+ {
1443
+ id: file.id,
1627
1444
  status: 'upload-error',
1628
1445
  message: `Processing error: ${error.message}`,
1629
- })
1630
- .eq('id', file.id);
1446
+ },
1447
+ ]);
1631
1448
 
1632
1449
  return { success: false, error: error.message, filename: file.filename };
1633
1450
  }
@@ -1636,7 +1453,7 @@ export class DatabaseService {
1636
1453
  /**
1637
1454
  * Process multiple files in a single API batch call (API service mode)
1638
1455
  */
1639
- async #processRfcApiBatch(files, uploadService, supabase, options, fs) {
1456
+ async #processRfcApiBatch(files, uploadService, apiService, options, fs) {
1640
1457
  let processed = 0;
1641
1458
  let uploaded = 0;
1642
1459
  let errors = 0;
@@ -1668,15 +1485,15 @@ export class DatabaseService {
1668
1485
  }
1669
1486
 
1670
1487
  // Update invalid files in database
1671
- for (const file of invalidFiles) {
1672
- await supabase
1673
- .from('uploader')
1674
- .update({
1488
+ if (invalidFiles.length > 0) {
1489
+ await apiService.updateFileStatus(
1490
+ invalidFiles.map((file) => ({
1491
+ id: file.id,
1675
1492
  status: 'file-not-found',
1676
1493
  message: 'File no longer exists at original path',
1677
- })
1678
- .eq('id', file.id);
1679
- errors++;
1494
+ })),
1495
+ );
1496
+ errors += invalidFiles.length;
1680
1497
  }
1681
1498
 
1682
1499
  // Process valid files in batch if any exist
@@ -1693,9 +1510,15 @@ export class DatabaseService {
1693
1510
  }
1694
1511
 
1695
1512
  // Make single API call with multiple files
1513
+ // Include RFC for multi-database routing (required for cross-tenant uploads)
1696
1514
  const uploadResult = await uploadService.upload(
1697
1515
  validFiles.map((f) => f.fileData),
1698
- { folderStructure: fullFolderStructure },
1516
+ {
1517
+ folderStructure: fullFolderStructure,
1518
+ rfc: sampleFile.rfc, // For cross-tenant: routes to correct client DB
1519
+ autoDetect: true, // Enable detection on target API
1520
+ autoOrganize: true, // Enable organization on target API
1521
+ },
1699
1522
  );
1700
1523
 
1701
1524
  if (uploadResult.success && uploadResult.data) {
@@ -1726,6 +1549,9 @@ export class DatabaseService {
1726
1549
  `🔍 Expected filenames: ${Array.from(fileNameToRecord.keys()).join(', ')}`,
1727
1550
  );
1728
1551
 
1552
+ // Prepare status updates
1553
+ const statusUpdates = [];
1554
+
1729
1555
  // Handle successfully uploaded files
1730
1556
  if (apiResult.uploaded && apiResult.uploaded.length > 0) {
1731
1557
  const successfulFileIds = [];
@@ -1750,6 +1576,13 @@ export class DatabaseService {
1750
1576
  successfulFileIds.push(dbRecord.id);
1751
1577
  matchedFilenames.push(possibleFilename);
1752
1578
  logger.debug(`✅ Matched file: ${possibleFilename}`);
1579
+
1580
+ statusUpdates.push({
1581
+ id: dbRecord.id,
1582
+ status: 'file-uploaded',
1583
+ message: 'Successfully uploaded to Arela API (batch)',
1584
+ processing_status: 'UPLOADED',
1585
+ });
1753
1586
  } else {
1754
1587
  logger.warn(
1755
1588
  `⚠️ Could not match uploaded file with any known filename: ${JSON.stringify(uploadedFile)}`,
@@ -1765,30 +1598,21 @@ export class DatabaseService {
1765
1598
  logger.warn(
1766
1599
  `🔄 Fallback: No individual file matches found, but API indicates ${apiResult.uploaded.length} uploads. Marking all ${validFiles.length} batch files as uploaded.`,
1767
1600
  );
1768
- validFiles.forEach((f) => successfulFileIds.push(f.dbRecord.id));
1769
- }
1770
-
1771
- if (successfulFileIds.length > 0) {
1772
- await supabase
1773
- .from('uploader')
1774
- .update({
1601
+ validFiles.forEach((f) => {
1602
+ statusUpdates.push({
1603
+ id: f.dbRecord.id,
1775
1604
  status: 'file-uploaded',
1776
1605
  message: 'Successfully uploaded to Arela API (batch)',
1777
1606
  processing_status: 'UPLOADED',
1778
- })
1779
- .in('id', successfulFileIds);
1780
-
1781
- uploaded += successfulFileIds.length;
1782
- logger.info(
1783
- `✅ Batch upload successful: ${successfulFileIds.length} files uploaded`,
1784
- );
1607
+ });
1608
+ });
1785
1609
  }
1610
+
1611
+ uploaded += successfulFileIds.length || validFiles.length;
1786
1612
  }
1787
1613
 
1788
1614
  // Handle failed files
1789
1615
  if (apiResult.errors && apiResult.errors.length > 0) {
1790
- const failedFileIds = [];
1791
-
1792
1616
  apiResult.errors.forEach((errorInfo) => {
1793
1617
  // Try multiple possible property names for filename in errors
1794
1618
  const possibleFilename =
@@ -1801,32 +1625,21 @@ export class DatabaseService {
1801
1625
 
1802
1626
  const dbRecord = fileNameToRecord.get(possibleFilename);
1803
1627
  if (dbRecord) {
1804
- failedFileIds.push(dbRecord.id);
1628
+ statusUpdates.push({
1629
+ id: dbRecord.id,
1630
+ status: 'upload-error',
1631
+ message: `Upload failed: ${errorInfo.error || 'Unknown error'}`,
1632
+ });
1633
+ errors++;
1805
1634
  } else {
1806
1635
  logger.warn(
1807
1636
  `⚠️ Could not match error file: ${JSON.stringify(errorInfo)}`,
1808
1637
  );
1809
1638
  }
1810
1639
  });
1811
-
1812
- if (failedFileIds.length > 0) {
1813
- await supabase
1814
- .from('uploader')
1815
- .update({
1816
- status: 'upload-error',
1817
- message: `Upload failed: ${apiResult.errors[0].error}`,
1818
- })
1819
- .in('id', failedFileIds);
1820
-
1821
- errors += failedFileIds.length;
1822
- logger.error(
1823
- `❌ Batch upload errors: ${failedFileIds.length} files failed`,
1824
- );
1825
- }
1826
1640
  }
1827
1641
 
1828
1642
  // Handle any remaining files that weren't in uploaded or errors arrays
1829
- // Use robust filename extraction for both uploaded and error files
1830
1643
  const extractFilename = (fileObj) => {
1831
1644
  return (
1832
1645
  fileObj.fileName ||
@@ -1846,26 +1659,24 @@ export class DatabaseService {
1846
1659
  const unprocessedFiles = validFiles.filter(
1847
1660
  (f) => !processedFileNames.has(f.fileData.name),
1848
1661
  );
1662
+
1849
1663
  if (unprocessedFiles.length > 0) {
1850
- // Only mark as unprocessed if we haven't already handled all files through fallback logic
1851
- // If we used fallback (all files marked as uploaded), don't mark any as unprocessed
1852
1664
  const alreadyHandledCount = uploaded + errors;
1853
1665
  const shouldMarkUnprocessed =
1854
1666
  alreadyHandledCount < validFiles.length;
1855
1667
 
1856
1668
  if (shouldMarkUnprocessed) {
1857
- const unprocessedIds = unprocessedFiles.map((f) => f.dbRecord.id);
1858
- await supabase
1859
- .from('uploader')
1860
- .update({
1669
+ unprocessedFiles.forEach((f) => {
1670
+ statusUpdates.push({
1671
+ id: f.dbRecord.id,
1861
1672
  status: 'upload-error',
1862
1673
  message: 'File not found in API response',
1863
- })
1864
- .in('id', unprocessedIds);
1674
+ });
1675
+ });
1676
+ errors += unprocessedFiles.length;
1865
1677
 
1866
- errors += unprocessedIds.length;
1867
1678
  logger.warn(
1868
- `⚠️ Unprocessed files: ${unprocessedIds.length} files not found in API response`,
1679
+ `⚠️ Unprocessed files: ${unprocessedFiles.length} files not found in API response`,
1869
1680
  );
1870
1681
  logger.debug(
1871
1682
  `🔍 API response uploaded array: ${JSON.stringify(apiResult.uploaded)}`,
@@ -1879,16 +1690,30 @@ export class DatabaseService {
1879
1690
  );
1880
1691
  }
1881
1692
  }
1693
+
1694
+ // Batch update all status changes
1695
+ if (statusUpdates.length > 0) {
1696
+ const updateResult =
1697
+ await apiService.updateFileStatus(statusUpdates);
1698
+ if (!updateResult.success) {
1699
+ logger.error(
1700
+ `Some status updates failed: ${updateResult.errors?.length || 0} errors`,
1701
+ );
1702
+ } else {
1703
+ logger.info(
1704
+ `✅ Batch upload successful: ${uploaded} files uploaded`,
1705
+ );
1706
+ }
1707
+ }
1882
1708
  } else {
1883
1709
  // Complete batch failure - mark all files as failed
1884
- const fileIds = validFiles.map((f) => f.dbRecord.id);
1885
- await supabase
1886
- .from('uploader')
1887
- .update({
1888
- status: 'upload-error',
1889
- message: uploadResult.error || 'Batch upload failed',
1890
- })
1891
- .in('id', fileIds);
1710
+ const failureUpdates = validFiles.map((f) => ({
1711
+ id: f.dbRecord.id,
1712
+ status: 'upload-error',
1713
+ message: uploadResult.error || 'Batch upload failed',
1714
+ }));
1715
+
1716
+ await apiService.updateFileStatus(failureUpdates);
1892
1717
 
1893
1718
  errors += validFiles.length;
1894
1719
  logger.error(
@@ -1900,14 +1725,13 @@ export class DatabaseService {
1900
1725
  logger.error(`❌ Error processing batch: ${error.message}`);
1901
1726
 
1902
1727
  // Mark all files as failed
1903
- const fileIds = files.map((f) => f.id);
1904
- await supabase
1905
- .from('uploader')
1906
- .update({
1907
- status: 'upload-error',
1908
- message: `Batch processing error: ${error.message}`,
1909
- })
1910
- .in('id', fileIds);
1728
+ const failureUpdates = files.map((f) => ({
1729
+ id: f.id,
1730
+ status: 'upload-error',
1731
+ message: `Batch processing error: ${error.message}`,
1732
+ }));
1733
+
1734
+ await apiService.updateFileStatus(failureUpdates);
1911
1735
 
1912
1736
  errors += files.length;
1913
1737
  }