@arela/uploader 0.2.11 → 0.2.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ {}
@@ -0,0 +1,157 @@
1
+ # Supabase Upload Issue Fix
2
+
3
+ ## Problem Description
4
+
5
+ When running the CLI with `upload --force-supabase`, the logs displayed that files were being uploaded successfully, but no files appeared in the Supabase bucket.
6
+
7
+ ## Root Cause
8
+
9
+ The issue was in `/src/commands/UploadCommand.js` in the `#processFile` method. The code was **silently ignoring upload failures** from Supabase:
10
+
11
+ ```javascript
12
+ // ❌ BEFORE - Upload result was ignored
13
+ if (uploadService.getServiceName() === 'Arela API') {
14
+ result = await uploadService.upload([fileObject], { ...options, uploadPath });
15
+ } else {
16
+ // Supabase direct upload
17
+ await uploadService.upload([fileObject], { uploadPath });
18
+ // Result ignored! No error checking!
19
+ }
20
+
21
+ logger.info(`SUCCESS: ${path.basename(filePath)} -> ${uploadPath}`);
22
+ ```
23
+
24
+ Even if the Supabase upload failed and returned `{ success: false, error: "..." }`, the code would:
25
+ 1. Ignore the error
26
+ 2. Log "SUCCESS"
27
+ 3. Continue processing
28
+
29
+ This created the illusion that files were being uploaded when they weren't.
30
+
31
+ ## Changes Made
32
+
33
+ ### 1. Fixed Error Handling in `UploadCommand.js`
34
+
35
+ **File:** `/src/commands/UploadCommand.js`
36
+
37
+ Now properly captures and validates the Supabase upload result:
38
+
39
+ ```javascript
40
+ // ✅ AFTER - Upload result is captured and validated
41
+ if (uploadService.getServiceName() === 'Arela API') {
42
+ result = await uploadService.upload([fileObject], { ...options, uploadPath });
43
+ } else {
44
+ // Supabase direct upload
45
+ const uploadResult = await uploadService.upload([fileObject], { uploadPath });
46
+
47
+ // Check if upload was successful
48
+ if (!uploadResult.success) {
49
+ throw new Error(`Supabase upload failed: ${uploadResult.error}`);
50
+ }
51
+
52
+ result = { successCount: 1 };
53
+ }
54
+ ```
55
+
56
+ ### 2. Enhanced Logging in `SupabaseUploadService.js`
57
+
58
+ **File:** `/src/services/upload/SupabaseUploadService.js`
59
+
60
+ Added detailed logging to help diagnose upload issues:
61
+
62
+ #### Connection Initialization
63
+ ```javascript
64
+ console.log(`🔌 Connecting to Supabase: ${appConfig.supabase.url}`);
65
+ console.log(`📦 Using bucket: ${this.bucket}`);
66
+ console.log(`🧪 Testing bucket access...`);
67
+ console.log(`✅ Successfully connected to Supabase bucket: ${this.bucket}`);
68
+ ```
69
+
70
+ #### Upload Operations
71
+ ```javascript
72
+ if (error) {
73
+ console.error(`❌ Supabase upload failed for ${normalizedPath}:`, error.message);
74
+ return { success: false, error: error.message };
75
+ }
76
+
77
+ console.log(`✅ Supabase upload successful: ${normalizedPath}`);
78
+ ```
79
+
80
+ #### Better Error Messages
81
+ ```javascript
82
+ if (!appConfig.supabase.url || !appConfig.supabase.key || !this.bucket) {
83
+ const missing = [];
84
+ if (!appConfig.supabase.url) missing.push('SUPABASE_URL');
85
+ if (!appConfig.supabase.key) missing.push('SUPABASE_KEY');
86
+ if (!this.bucket) missing.push('SUPABASE_BUCKET');
87
+ throw new Error(`Missing Supabase configuration: ${missing.join(', ')}`);
88
+ }
89
+ ```
90
+
91
+ ## Expected Behavior After Fix
92
+
93
+ ### On Successful Upload
94
+ ```
95
+ 🔌 Connecting to Supabase: https://your-project.supabase.co
96
+ 📦 Using bucket: your-bucket-name
97
+ 🧪 Testing bucket access...
98
+ ✅ Successfully connected to Supabase bucket: your-bucket-name
99
+ ✅ Supabase upload successful: 2023/2000601/file.xml
100
+ ```
101
+
102
+ ### On Failed Upload
103
+ ```
104
+ ❌ Supabase upload failed for 2023/2000601/file.xml: [error details]
105
+ Error: Supabase upload failed: [error details]
106
+ ```
107
+
108
+ ### On Configuration Error
109
+ ```
110
+ Error: Missing Supabase configuration: SUPABASE_URL, SUPABASE_KEY
111
+ ```
112
+
113
+ ## Common Issues to Check
114
+
115
+ If uploads still fail after this fix, check:
116
+
117
+ 1. **Environment Variables**
118
+ ```bash
119
+ echo $SUPABASE_URL
120
+ echo $SUPABASE_KEY # Should be anon or service_role key
121
+ echo $SUPABASE_BUCKET
122
+ ```
123
+
124
+ 2. **Bucket Permissions**
125
+ - Check if the bucket exists in Supabase Storage
126
+ - Verify the API key has write permissions to the bucket
127
+ - Check bucket policies (public vs private)
128
+
129
+ 3. **Network/Firewall**
130
+ - Ensure the server can reach Supabase (check firewall rules)
131
+ - Test connection: `curl https://your-project.supabase.co`
132
+
133
+ 4. **File Size Limits**
134
+ - Check Supabase storage limits for your plan
135
+ - Verify file sizes are within allowed limits
136
+
137
+ 5. **Path Format**
138
+ - Ensure paths don't contain invalid characters
139
+ - Paths are normalized (forward slashes only)
140
+
141
+ ## Testing the Fix
142
+
143
+ Run the upload command with verbose logging:
144
+
145
+ ```bash
146
+ arela upload --force-supabase -v
147
+ ```
148
+
149
+ You should now see:
150
+ - Clear connection status messages
151
+ - Per-file upload success/failure messages
152
+ - Proper error messages if uploads fail
153
+ - Accurate success/error counts in the summary
154
+
155
+ ## Migration Notes
156
+
157
+ No database migrations required. This is a code-only fix that improves error handling and logging.
package/commands.md CHANGED
@@ -3,4 +3,12 @@ node src/index.js --detect-pdfs
3
3
  node src/index.js --propagate-arela-path
4
4
  node src/index.js --upload-by-rfc --folder-structure palco
5
5
 
6
- UPLOAD_RFCS="RFC1|RFC2" node src/index.js --upload-by-rfc --folder-structure target-folder
6
+ UPLOAD_RFCS="RFC1|RFC2" node src/index.js --upload-by-rfc --folder-structure target-folder
7
+
8
+
9
+
10
+
11
+ node src/index.js stats --stats-only
12
+ node src/index.js detect --detect-pdfs
13
+ node src/index.js detect --propagate-arela-path
14
+ node src/index.js upload --upload-by-rfc --folder-structure palco
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@arela/uploader",
3
- "version": "0.2.11",
3
+ "version": "0.2.13",
4
4
  "description": "CLI to upload files/directories to Arela",
5
5
  "bin": {
6
6
  "arela": "./src/index.js"
@@ -439,7 +439,14 @@ export class UploadCommand {
439
439
  });
440
440
  } else {
441
441
  // Supabase direct upload
442
- await uploadService.upload([fileObject], { uploadPath });
442
+ const uploadResult = await uploadService.upload([fileObject], { uploadPath });
443
+
444
+ // Check if upload was successful
445
+ if (!uploadResult.success) {
446
+ throw new Error(`Supabase upload failed: ${uploadResult.error}`);
447
+ }
448
+
449
+ result = { successCount: 1 };
443
450
  }
444
451
 
445
452
  logger.info(`SUCCESS: ${path.basename(filePath)} -> ${uploadPath}`);
@@ -28,10 +28,10 @@ class Config {
28
28
  const __dirname = path.dirname(__filename);
29
29
  const packageJsonPath = path.resolve(__dirname, '../../package.json');
30
30
  const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf-8'));
31
- return packageJson.version || '0.2.11';
31
+ return packageJson.version || '0.2.12';
32
32
  } catch (error) {
33
33
  console.warn('⚠️ Could not read package.json version, using fallback');
34
- return '0.2.11';
34
+ return '0.2.12';
35
35
  }
36
36
  }
37
37
 
@@ -51,6 +51,7 @@ export class DatabaseService {
51
51
  error.message?.includes('timeout') ||
52
52
  error.message?.includes('canceling statement') ||
53
53
  error.message?.includes('connection') ||
54
+ error.message?.includes('fetch failed') ||
54
55
  error.code === 'PGRST301'; // PostgREST timeout
55
56
 
56
57
  if (!isRetriableError || attempt === maxRetries) {
@@ -650,16 +651,18 @@ export class DatabaseService {
650
651
 
651
652
  while (hasMoreRelatedFiles) {
652
653
  const { data: relatedFilesPage, error: relatedError } =
653
- await supabase
654
- .from('uploader')
655
- .select('id, filename, original_path')
656
- .like('original_path', `${basePath}%`)
657
- .is('arela_path', null)
658
- .neq('id', pedimento.id) // Exclude the pedimento itself
659
- .range(
660
- relatedFilesFrom,
661
- relatedFilesFrom + relatedFilesPageSize - 1,
662
- );
654
+ await this.#queryWithRetry(async () => {
655
+ return await supabase
656
+ .from('uploader')
657
+ .select('id, filename, original_path')
658
+ .like('original_path', `${basePath}%`)
659
+ .is('arela_path', null)
660
+ .neq('id', pedimento.id) // Exclude the pedimento itself
661
+ .range(
662
+ relatedFilesFrom,
663
+ relatedFilesFrom + relatedFilesPageSize - 1,
664
+ );
665
+ }, `query related files for ${pedimento.filename} (page ${relatedFilesPageNumber})`);
663
666
 
664
667
  if (relatedError) {
665
668
  logger.error(
@@ -730,7 +733,7 @@ export class DatabaseService {
730
733
 
731
734
  // Check if we need to fetch the next page of related files
732
735
  if (relatedFilesPage.length < relatedFilesPageSize) {
733
- console.log('no more related files for ', basePath);
736
+ logger.info('No more related files for', basePath);
734
737
  hasMoreRelatedFiles = false;
735
738
  logger.info(
736
739
  `Completed processing related files for ${pedimento.filename}. Total processed: ${totalRelatedFilesProcessed}`,
@@ -930,193 +933,87 @@ export class DatabaseService {
930
933
  `Found ${allPedimentoRecords.length} pedimento records with ${uniqueArelaPaths.length} unique arela_paths across ${pageNumber - 1} pages`,
931
934
  );
932
935
 
933
- // Step 2: Get all files with these arela_paths that haven't been uploaded yet
934
- let rfcRecords = [];
935
- const chunkSize = 50;
936
-
937
- for (let i = 0; i < uniqueArelaPaths.length; i += chunkSize) {
938
- const pathChunk = uniqueArelaPaths.slice(i, i + chunkSize);
939
-
940
- const { data: chunkFiles, error: chunkError } = await supabase
941
- .from('uploader')
942
- .select('arela_path')
943
- .in('arela_path', pathChunk)
944
- .neq('status', 'file-uploaded')
945
- .not('arela_path', 'is', null);
946
-
947
- if (chunkError) {
948
- const errorMsg = `Error fetching files for arela_paths chunk: ${chunkError.message}`;
949
- logger.error(errorMsg);
950
- throw new Error(errorMsg);
951
- }
952
-
953
- if (chunkFiles && chunkFiles.length > 0) {
954
- rfcRecords = rfcRecords.concat(chunkFiles);
955
- }
956
- }
957
-
958
- if (!rfcRecords || rfcRecords.length === 0) {
959
- if (totalRfcFiles && totalRfcFiles > 0) {
960
- console.log(
961
- `ℹ️ All ${totalRfcFiles} files for the specified RFCs are already uploaded (status: file-uploaded)`,
962
- );
963
- console.log(' No new files to upload.');
964
- logger.info(
965
- `All ${totalRfcFiles} files for specified RFCs already uploaded`,
966
- );
967
- } else {
968
- console.log(
969
- 'ℹ️ No files found for the specified RFCs with arela_path',
970
- );
971
- console.log(
972
- ` Make sure files for RFCs [${appConfig.upload.rfcs.join(', ')}] have been processed and have arela_path values`,
973
- );
974
- logger.info('No files found for specified RFCs with arela_path');
975
- }
976
- return { processedCount: 0, uploadedCount: 0, errorCount: 0 };
977
- }
978
-
979
- // Show filtering effect
980
- const uploadableArelaPaths = [
981
- ...new Set(rfcRecords.map((r) => r.arela_path)),
982
- ];
983
- const skipped = (totalRfcFiles || 0) - rfcRecords.length;
984
- if (skipped > 0) {
985
- console.log(
986
- `📊 Found ${rfcRecords.length} files ready for upload (${skipped} already uploaded, skipped)`,
987
- );
988
- } else {
989
- console.log(`📊 Found ${rfcRecords.length} files ready for upload`);
990
- }
991
- logger.info(
992
- `Found ${rfcRecords.length} files ready for upload, ${skipped} skipped`,
993
- );
994
-
995
- // Step 3: Process files with streaming pagination to avoid memory overload
936
+ // Step 2: Process files with optimized single query per chunk
996
937
  let totalProcessed = 0;
997
938
  let totalUploaded = 0;
998
939
  let totalErrors = 0;
999
940
  let globalFileCount = 0;
1000
941
  const arelaPathChunkSize = 50;
1001
- const queryBatchSize = 500; // Reduced batch size for better memory management
1002
942
  const batchSize = parseInt(options.batchSize) || 10;
1003
943
 
1004
944
  // Import performance configuration
1005
945
  const { performance: perfConfig } = appConfig;
1006
946
  const maxConcurrency = perfConfig?.maxApiConnections || 3;
1007
947
 
1008
- console.log(
1009
- '📥 Processing files with streaming pagination (processing arela_paths in chunks to avoid URI limits and memory overload)...',
1010
- );
948
+ console.log('📥 Processing files in chunks to avoid URI limits...');
1011
949
 
1012
950
  // Process arela_paths in chunks and upload files as we fetch them
1013
- for (let i = 0; i < uploadableArelaPaths.length; i += arelaPathChunkSize) {
1014
- const arelaPathChunk = uploadableArelaPaths.slice(
1015
- i,
1016
- i + arelaPathChunkSize,
1017
- );
951
+ for (let i = 0; i < uniqueArelaPaths.length; i += arelaPathChunkSize) {
952
+ const arelaPathChunk = uniqueArelaPaths.slice(i, i + arelaPathChunkSize);
1018
953
  const chunkNumber = Math.floor(i / arelaPathChunkSize) + 1;
1019
954
  const totalChunks = Math.ceil(
1020
- uploadableArelaPaths.length / arelaPathChunkSize,
955
+ uniqueArelaPaths.length / arelaPathChunkSize,
1021
956
  );
1022
957
 
1023
958
  console.log(
1024
959
  ` Processing arela_path chunk ${chunkNumber}/${totalChunks} (${arelaPathChunk.length} paths)`,
1025
960
  );
1026
961
 
1027
- // For each chunk of arela_paths, use pagination to get related files and process them immediately
1028
- let hasMore = true;
1029
- let offset = 0;
1030
- let chunkFileCount = 0;
1031
-
1032
- while (hasMore) {
1033
- const { data: batch, error: queryError } = await supabase
1034
- .from('uploader')
1035
- .select('id, original_path, arela_path, filename, rfc, document_type')
1036
- .in('arela_path', arelaPathChunk)
1037
- .not('original_path', 'is', null)
1038
- .neq('status', 'file-uploaded')
1039
- .range(offset, offset + queryBatchSize - 1)
1040
- .order('created_at');
1041
-
1042
- if (queryError) {
1043
- const errorMsg = `Error fetching related files for chunk ${chunkNumber}: ${queryError.message}`;
1044
- logger.error(errorMsg);
1045
- throw new Error(errorMsg);
1046
- }
1047
-
1048
- if (!batch || batch.length === 0) {
1049
- hasMore = false;
1050
- break;
1051
- }
962
+ // Fetch all files for this chunk in a single query
963
+ const { data: batch, error: queryError } = await supabase
964
+ .from('uploader')
965
+ .select('id, original_path, arela_path, filename, rfc, document_type')
966
+ .in('arela_path', arelaPathChunk)
967
+ .neq('status', 'file-uploaded')
968
+ .order('created_at');
1052
969
 
1053
- chunkFileCount += batch.length;
1054
- globalFileCount += batch.length;
970
+ if (queryError) {
971
+ const errorMsg = `Error fetching files for chunk ${chunkNumber}: ${queryError.message}`;
972
+ logger.error(errorMsg);
973
+ throw new Error(errorMsg);
974
+ }
1055
975
 
976
+ if (!batch || batch.length === 0) {
1056
977
  console.log(
1057
- ` 📦 Processing batch within chunk ${chunkNumber}: ${batch.length} files (total processed so far: ${globalFileCount})`,
978
+ ` ℹ️ Chunk ${chunkNumber}/${totalChunks}: No files to upload`,
1058
979
  );
980
+ continue;
981
+ }
1059
982
 
1060
- // Track if any uploads occurred in this batch
1061
- let uploadsOccurred = false;
1062
-
1063
- // Process this batch of files immediately using concurrent processing
1064
- // Split batch into upload batches
1065
- for (let j = 0; j < batch.length; j += batchSize) {
1066
- const uploadBatch = batch.slice(j, j + batchSize);
1067
- const batchNum = Math.floor(globalFileCount / batchSize) + 1;
1068
-
1069
- console.log(
1070
- `📦 Processing upload batch ${batchNum} (${uploadBatch.length} files)`,
1071
- );
983
+ const chunkFileCount = batch.length;
984
+ globalFileCount += chunkFileCount;
1072
985
 
1073
- // Process batch using concurrent processing similar to UploadCommand
1074
- const batchResults = await this.#processRfcBatch(
1075
- uploadBatch,
1076
- uploadService,
1077
- supabase,
1078
- options,
1079
- maxConcurrency,
1080
- );
986
+ console.log(
987
+ ` 📦 Chunk ${chunkNumber}/${totalChunks}: Processing ${chunkFileCount} files`,
988
+ );
1081
989
 
1082
- totalProcessed += batchResults.processed;
1083
- totalUploaded += batchResults.uploaded;
1084
- totalErrors += batchResults.errors;
990
+ // Process this batch of files immediately using concurrent processing
991
+ // Split batch into upload batches
992
+ for (let j = 0; j < batch.length; j += batchSize) {
993
+ const uploadBatch = batch.slice(j, j + batchSize);
994
+ const batchNum = Math.floor(j / batchSize) + 1;
995
+ const totalBatches = Math.ceil(batch.length / batchSize);
1085
996
 
1086
- // Track if uploads occurred (status changes from non-uploaded to uploaded)
1087
- if (batchResults.uploaded > 0) {
1088
- uploadsOccurred = true;
1089
- }
997
+ console.log(
998
+ ` 📦 Processing upload batch ${batchNum}/${totalBatches} within chunk ${chunkNumber} (${uploadBatch.length} files)`,
999
+ );
1090
1000
 
1091
- console.log(
1092
- `📊 Upload batch complete - Total progress: ${totalUploaded} uploaded, ${totalErrors} errors`,
1093
- );
1094
- }
1001
+ // Process batch using concurrent processing similar to UploadCommand
1002
+ const batchResults = await this.#processRfcBatch(
1003
+ uploadBatch,
1004
+ uploadService,
1005
+ supabase,
1006
+ options,
1007
+ maxConcurrency,
1008
+ );
1095
1009
 
1096
- // Check if we need more data from this chunk
1097
- if (batch.length < queryBatchSize) {
1098
- hasMore = false;
1099
- } else {
1100
- // If uploads occurred, reset pagination to start from beginning
1101
- // since records that matched the query may no longer match after upload
1102
- if (uploadsOccurred) {
1103
- offset = 0;
1104
- console.log(
1105
- ` 📄 Batch complete with uploads: ${batch.length} files processed, restarting pagination from beginning due to query condition changes...`,
1106
- );
1107
- } else {
1108
- offset += queryBatchSize;
1109
- console.log(
1110
- ` 📄 Batch complete: ${batch.length} files processed, continuing to next page (offset: ${offset})...`,
1111
- );
1112
- }
1113
- }
1010
+ totalProcessed += batchResults.processed;
1011
+ totalUploaded += batchResults.uploaded;
1012
+ totalErrors += batchResults.errors;
1114
1013
 
1115
- if (hasMore) {
1116
- console.log(
1117
- ` 📄 Fetching more files for chunk ${chunkNumber}... (offset: ${offset})`,
1118
- );
1119
- }
1014
+ console.log(
1015
+ ` 📊 Batch complete - Progress: ${totalUploaded} uploaded, ${totalErrors} errors`,
1016
+ );
1120
1017
  }
1121
1018
 
1122
1019
  console.log(
@@ -63,16 +63,20 @@ export class ApiUploadService extends BaseUploadService {
63
63
  for (const file of files) {
64
64
  try {
65
65
  // Check file size for streaming vs buffer approach
66
- const stats = await fs.promises.stat(file.path);
66
+ let size = file.size;
67
+ if (size === undefined || size === null) {
68
+ const stats = await fs.promises.stat(file.path);
69
+ size = stats.size;
70
+ }
67
71
  const fileSizeThreshold = 10 * 1024 * 1024; // 10MB threshold
68
72
 
69
- if (stats.size > fileSizeThreshold) {
73
+ if (size > fileSizeThreshold) {
70
74
  // Use streaming for large files
71
75
  const fileStream = fs.createReadStream(file.path);
72
76
  formData.append('files', fileStream, {
73
77
  filename: file.name,
74
78
  contentType: file.contentType,
75
- knownLength: stats.size,
79
+ knownLength: size,
76
80
  });
77
81
  } else {
78
82
  // Use buffer for smaller files
@@ -26,9 +26,16 @@ export class SupabaseUploadService extends BaseUploadService {
26
26
  }
27
27
 
28
28
  if (!appConfig.supabase.url || !appConfig.supabase.key || !this.bucket) {
29
- throw new Error('Missing Supabase configuration');
29
+ const missing = [];
30
+ if (!appConfig.supabase.url) missing.push('SUPABASE_URL');
31
+ if (!appConfig.supabase.key) missing.push('SUPABASE_KEY');
32
+ if (!this.bucket) missing.push('SUPABASE_BUCKET');
33
+ throw new Error(`Missing Supabase configuration: ${missing.join(', ')}`);
30
34
  }
31
35
 
36
+ console.log(`🔌 Connecting to Supabase: ${appConfig.supabase.url}`);
37
+ console.log(`📦 Using bucket: ${this.bucket}`);
38
+
32
39
  this.client = createClient(appConfig.supabase.url, appConfig.supabase.key, {
33
40
  db: {
34
41
  schema: 'public',
@@ -48,11 +55,15 @@ export class SupabaseUploadService extends BaseUploadService {
48
55
  },
49
56
  });
50
57
 
51
- // Test connection
52
- const { error } = await this.client.storage.from(this.bucket).list('');
58
+ // Test connection and bucket access
59
+ console.log(`🧪 Testing bucket access...`);
60
+ const { error } = await this.client.storage.from(this.bucket).list('', { limit: 1 });
53
61
  if (error) {
54
- throw new Error(`Error connecting to Supabase: ${error.message}`);
62
+ console.error(`❌ Bucket access test failed:`, error.message);
63
+ throw new Error(`Error connecting to Supabase bucket '${this.bucket}': ${error.message}`);
55
64
  }
65
+
66
+ console.log(`✅ Successfully connected to Supabase bucket: ${this.bucket}`);
56
67
  }
57
68
 
58
69
  /**
@@ -80,18 +91,22 @@ export class SupabaseUploadService extends BaseUploadService {
80
91
  const content = fs.readFileSync(file.path);
81
92
  const contentType = mime.lookup(file.path) || 'application/octet-stream';
82
93
 
94
+ const normalizedPath = uploadPath.replace(/\\/g, '/');
95
+
83
96
  const { data, error } = await this.client.storage
84
97
  .from(this.bucket)
85
- .upload(uploadPath.replace(/\\/g, '/'), content, {
98
+ .upload(normalizedPath, content, {
86
99
  upsert: true,
87
100
  contentType,
88
101
  });
89
102
 
90
103
  if (error) {
104
+ console.error(`❌ Supabase upload failed for ${normalizedPath}:`, error.message);
91
105
  return { success: false, error: error.message };
92
106
  }
93
107
 
94
- return { success: true, data };
108
+ console.log(`✅ Supabase upload successful: ${normalizedPath}`);
109
+ return { success: true, data, uploadPath: normalizedPath };
95
110
  }
96
111
 
97
112
  /**