@xenterprises/fastify-ximagepipeline 1.1.1 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,33 +1,41 @@
1
1
  // src/workers/processor.js
2
2
  import { downloadFromS3, deleteFromS3, uploadToS3, getPublicUrl, batchDeleteFromS3 } from "../services/s3.js";
3
- import { stripExif, generateVariants, generateBlurhash, getImageMetadata, compressToJpeg } from "../utils/image.js";
4
- import { getVariantPresets } from "../xImagePipeline.js";
3
+ import { stripExif, generateVariants, generateBlurhash, getImageMetadata, getAspectRatio, compressToJpeg } from "../utils/image.js";
5
4
 
6
5
  /**
7
6
  * Setup media processor worker
8
7
  * Polls job queue and processes media files
9
8
  */
10
9
  export function setupWorker(fastify, context, config) {
11
- // Worker state
12
10
  const workerId = `worker-${Date.now()}-${Math.random().toString(36).substring(7)}`;
13
11
  let isRunning = true;
14
12
 
15
- console.info(` 📊 Media Worker initialized (${workerId})`);
13
+ fastify.log.info(`[xImagePipeline] Media Worker initialized (${workerId})`);
16
14
 
17
15
  // Start polling
18
16
  const pollInterval = setInterval(() => {
19
17
  if (isRunning) {
20
18
  processNextJob(fastify, context, config, workerId).catch((err) => {
21
- console.error("Worker error:", err.message);
19
+ fastify.log.error({ err }, "[xImagePipeline] Worker error");
22
20
  });
23
21
  }
24
22
  }, config.pollInterval);
25
23
 
24
+ // Periodically recover stale locks
25
+ const staleLockInterval = setInterval(() => {
26
+ if (isRunning) {
27
+ recoverStaleLocks(context.db, config.lockTimeout).catch((err) => {
28
+ fastify.log.error({ err }, "[xImagePipeline] Stale lock recovery error");
29
+ });
30
+ }
31
+ }, config.lockTimeout);
32
+
26
33
  // Cleanup on fastify close
27
34
  fastify.addHook("onClose", async () => {
28
35
  isRunning = false;
29
36
  clearInterval(pollInterval);
30
- console.info(" 📊 Media Worker stopped");
37
+ clearInterval(staleLockInterval);
38
+ fastify.log.info("[xImagePipeline] Media Worker stopped");
31
39
  });
32
40
 
33
41
  return { workerId, stop: () => { isRunning = false; } };
@@ -57,7 +65,6 @@ async function processNextJob(fastify, context, config, workerId) {
57
65
  }
58
66
 
59
67
  // Lock the job
60
- const lockExpiry = new Date(Date.now() + config.lockTimeout);
61
68
  const locked = await context.db.mediaQueue.updateMany({
62
69
  where: {
63
70
  id: job.id,
@@ -74,7 +81,7 @@ async function processNextJob(fastify, context, config, workerId) {
74
81
  return; // Job was locked by another worker
75
82
  }
76
83
 
77
- console.info(`Processing job ${job.id}...`);
84
+ fastify.log.info({ jobId: job.id }, "[xImagePipeline] Processing job");
78
85
 
79
86
  try {
80
87
  // Download file from staging
@@ -96,7 +103,6 @@ async function processNextJob(fastify, context, config, workerId) {
96
103
  const moderationResult = await moderateImage(cleanBuffer, context.moderation);
97
104
 
98
105
  if (!moderationResult.passed) {
99
- // Content rejected
100
106
  await context.db.mediaQueue.update({
101
107
  where: { id: job.id },
102
108
  data: {
@@ -108,28 +114,28 @@ async function processNextJob(fastify, context, config, workerId) {
108
114
  },
109
115
  });
110
116
 
111
- // Clean up staging
112
117
  await deleteFromS3(context.s3Client, context.r2Config.bucket, job.stagingKey);
113
- console.info(`Job ${job.id} rejected by moderation`);
118
+ fastify.log.info({ jobId: job.id }, "[xImagePipeline] Job rejected by moderation");
114
119
  return;
115
120
  }
116
121
  } catch (err) {
117
- console.error(`Moderation failed: ${err.message}`);
122
+ fastify.log.error({ err, jobId: job.id }, "[xImagePipeline] Moderation failed");
118
123
  throw err;
119
124
  }
120
125
  }
121
126
 
122
- // Step 4: Generate variants
123
- const variantPresets = getVariantPresets();
124
- const preset = variantPresets[job.sourceType] || [];
127
+ // Step 4: Generate variants using sourceType config
128
+ const sourceTypeConfig = context.sourceTypes[job.sourceType];
129
+ const variantNames = sourceTypeConfig?.variants || [];
125
130
  const variantSpecs = {};
126
- for (const variantName of preset) {
131
+ for (const variantName of variantNames) {
127
132
  if (context.variants[variantName]) {
128
133
  variantSpecs[variantName] = context.variants[variantName];
129
134
  }
130
135
  }
131
136
 
132
- const variants = await generateVariants(cleanBuffer, variantSpecs, job.sourceType);
137
+ const quality = sourceTypeConfig?.quality || 85;
138
+ const variants = await generateVariants(cleanBuffer, variantSpecs, job.sourceType, quality);
133
139
 
134
140
  // Step 5: Generate blurhash
135
141
  const blurhash = await generateBlurhash(cleanBuffer);
@@ -150,10 +156,11 @@ async function processNextJob(fastify, context, config, workerId) {
150
156
  urls[variantName] = getPublicUrl(context.r2Config, variantKey);
151
157
  }
152
158
 
153
- // Upload compressed original (JPEG for space efficiency)
159
+ // Upload compressed original if sourceType config says to
154
160
  let originalUrl = null;
155
- if (context.storeOriginal) {
156
- const compressedOriginal = await compressToJpeg(cleanBuffer, context.originalQuality);
161
+ const storeOriginal = sourceTypeConfig?.storeOriginal ?? false;
162
+ if (storeOriginal) {
163
+ const compressedOriginal = await compressToJpeg(cleanBuffer, quality);
157
164
  const originalKey = `${originalPath}/original.jpg`;
158
165
  await uploadToS3(context.s3Client, context.r2Config.bucket, originalKey, compressedOriginal, {
159
166
  contentType: "image/jpeg",
@@ -171,7 +178,7 @@ async function processNextJob(fastify, context, config, workerId) {
171
178
  width: metadata.width,
172
179
  height: metadata.height,
173
180
  format: metadata.format,
174
- aspectRatio: `${metadata.width}:${metadata.height}`,
181
+ aspectRatio: getAspectRatio(metadata.width, metadata.height),
175
182
  blurhash,
176
183
  originalFilename: job.originalFilename,
177
184
  mimeType: job.mimeType,
@@ -193,9 +200,9 @@ async function processNextJob(fastify, context, config, workerId) {
193
200
  // Step 9: Clean up staging
194
201
  await deleteFromS3(context.s3Client, context.r2Config.bucket, job.stagingKey);
195
202
 
196
- console.info(`Job ${job.id} completed successfully`);
203
+ fastify.log.info({ jobId: job.id, mediaId: media.id }, "[xImagePipeline] Job completed");
197
204
  } catch (error) {
198
- console.error(`Job ${job.id} processing error: ${error.message}`);
205
+ fastify.log.error({ err: error, jobId: job.id }, "[xImagePipeline] Job processing error");
199
206
 
200
207
  // Update job with error
201
208
  const nextAttempt = job.attempts + 1;
@@ -213,26 +220,28 @@ async function processNextJob(fastify, context, config, workerId) {
213
220
  });
214
221
 
215
222
  if (!shouldRetry) {
216
- // Clean up staging after max retries
217
223
  try {
218
224
  await deleteFromS3(context.s3Client, context.r2Config.bucket, job.stagingKey);
219
225
  } catch (cleanupErr) {
220
- console.error(`Failed to cleanup staging: ${cleanupErr.message}`);
226
+ fastify.log.error({ err: cleanupErr }, "[xImagePipeline] Failed to cleanup staging");
221
227
  }
222
228
  }
223
229
  }
224
230
  } catch (error) {
225
- console.error("Worker process error:", error.message);
231
+ fastify.log.error({ err: error }, "[xImagePipeline] Worker process error");
226
232
  }
227
233
  }
228
234
 
229
235
  /**
230
236
  * Moderate image content
231
- * Currently a stub - implement with actual API (Rekognition, Vision, etc.)
237
+ * Stub implementation override via options.moderation.handler to provide real moderation.
238
+ * Expected signature: async (buffer, config) => { passed: boolean, flags: string[], confidence: object }
232
239
  */
233
240
  async function moderateImage(buffer, moderationConfig) {
234
- // TODO: Implement actual moderation API call
235
- // For now, always approve
241
+ if (typeof moderationConfig.handler === "function") {
242
+ return moderationConfig.handler(buffer, moderationConfig);
243
+ }
244
+ // Default: always approve
236
245
  return {
237
246
  passed: true,
238
247
  flags: [],
@@ -241,7 +250,7 @@ async function moderateImage(buffer, moderationConfig) {
241
250
  }
242
251
 
243
252
  /**
244
- * Recover stale locks (jobs locked > lockTimeout)
253
+ * Recover stale locks (jobs locked longer than lockTimeout)
245
254
  */
246
255
  export async function recoverStaleLocks(db, lockTimeout) {
247
256
  const staleThreshold = new Date(Date.now() - lockTimeout);
@@ -1,6 +1,6 @@
1
1
  // src/xImagePipeline.js
2
2
  import fp from "fastify-plugin";
3
- import { initializeS3Client } from "./services/s3.js";
3
+ import { initializeS3Client, deleteFromS3, batchDeleteFromS3 } from "./services/s3.js";
4
4
  import { setupUploadRoute } from "./routes/upload.js";
5
5
  import { setupStatusRoute } from "./routes/status.js";
6
6
  import { setupWorker } from "./workers/processor.js";
@@ -9,26 +9,34 @@ import { setupWorker } from "./workers/processor.js";
9
9
  * xImagePipeline Plugin for Fastify
10
10
  * Handles image uploads with EXIF stripping, moderation, variant generation, and R2 storage
11
11
  *
12
- * @param {Object} fastify - Fastify instance
12
+ * @param {import('fastify').FastifyInstance} fastify - Fastify instance
13
13
  * @param {Object} options - Plugin options
14
14
  * @param {Object} options.r2 - R2 configuration (endpoint, accessKeyId, secretAccessKey, bucket)
15
- * @param {Object} options.db - Database instance (Prisma client or similar)
16
- * @param {Object} options.moderation - Moderation config (provider, apiKey, etc.)
17
- * @param {Object} options.variants - Variant size definitions (optional, uses defaults)
18
- * @param {Object} options.sourceTypes - Source type configurations (optional, uses defaults)
19
- * Each source type defines: variants[], formats[], quality, storeOriginal
20
- * @param {Object} options.worker - Worker configuration (enabled, pollInterval, maxAttempts)
15
+ * @param {Object} options.db - Database instance (Prisma client)
16
+ * @param {Object} [options.moderation] - Moderation config ({ handler: async (buffer, config) => result })
17
+ * @param {Object} [options.variants] - Variant size definitions (default: xs/sm/md/lg/xl/2xl)
18
+ * @param {Object} [options.sourceTypes] - Source type configurations (default: avatar/member_photo/gallery/hero/content)
19
+ * @param {Object} [options.worker] - Worker config ({ enabled, pollInterval, maxAttempts, lockTimeout })
20
+ * @param {string} [options.stagingPath='staging'] - R2 prefix for staging uploads
21
+ * @param {string} [options.mediaPath='media'] - R2 prefix for processed media
22
+ * @param {string} [options.originalsPath='originals'] - R2 prefix for originals
23
+ * @param {number} [options.maxFileSize=52428800] - Max upload size in bytes (default 50MB)
24
+ * @param {string[]} [options.allowedMimeTypes] - Allowed MIME types (default: jpeg/png/webp/gif)
21
25
  */
22
26
  async function xImagePipeline(fastify, options) {
23
27
  // Validate required configuration
24
28
  if (!options.r2) {
25
- throw new Error("R2 configuration is required");
29
+ throw new Error("[xImagePipeline] R2 configuration is required");
26
30
  }
27
31
  if (!options.db) {
28
- throw new Error("Database instance (Prisma client) is required");
32
+ throw new Error("[xImagePipeline] Database instance (Prisma client) is required");
29
33
  }
30
34
 
31
- console.info("\n 🎬 Starting xImagePipeline...\n");
35
+ if (typeof options.r2 !== "object" || !options.r2.endpoint || !options.r2.accessKeyId || !options.r2.secretAccessKey || !options.r2.bucket) {
36
+ throw new Error("[xImagePipeline] R2 configuration must include: endpoint, accessKeyId, secretAccessKey, bucket");
37
+ }
38
+
39
+ fastify.log.info("[xImagePipeline] Starting...");
32
40
 
33
41
  // Initialize R2 S3 client
34
42
  const s3Client = initializeS3Client(options.r2);
@@ -41,7 +49,7 @@ async function xImagePipeline(fastify, options) {
41
49
  variants: options.variants || getDefaultVariants(),
42
50
  sourceTypes: options.sourceTypes || getDefaultSourceTypes(),
43
51
  r2Config: options.r2,
44
- maxFileSize: options.maxFileSize || 50 * 1024 * 1024, // 50MB default
52
+ maxFileSize: options.maxFileSize || 50 * 1024 * 1024,
45
53
  allowedMimeTypes: options.allowedMimeTypes || [
46
54
  "image/jpeg",
47
55
  "image/png",
@@ -55,15 +63,77 @@ async function xImagePipeline(fastify, options) {
55
63
 
56
64
  // Decorate fastify instance with image pipeline utilities
57
65
  fastify.decorate("xImagePipeline", {
58
- upload: async (file, metadata) => {
59
- // Placeholder - implemented in upload route
60
- },
66
+ /**
67
+ * Get the status of a processing job
68
+ * @param {string} jobId
69
+ * @returns {Promise<Object|null>} Job with media relation, or null
70
+ */
61
71
  getStatus: async (jobId) => {
62
- // Placeholder - implemented in status route
72
+ return context.db.mediaQueue.findUnique({
73
+ where: { id: jobId },
74
+ include: { media: true },
75
+ });
63
76
  },
77
+
78
+ /**
79
+ * Delete a media record and all associated R2 objects
80
+ * @param {string} mediaId
81
+ * @returns {Promise<{ deleted: boolean, r2Deleted: number }>}
82
+ */
64
83
  deleteMedia: async (mediaId) => {
65
- // Placeholder - implemented as utility
84
+ const media = await context.db.media.findUnique({ where: { id: mediaId } });
85
+ if (!media) {
86
+ throw new Error(`[xImagePipeline] Media not found: ${mediaId}`);
87
+ }
88
+
89
+ // Delete variant files from R2
90
+ const mediaPrefix = `${context.mediaPath}/${media.sourceType}/${media.sourceId}/${mediaId}`;
91
+ const r2Result = await batchDeleteFromS3(s3Client, context.r2Config.bucket, mediaPrefix);
92
+
93
+ // Delete original if stored
94
+ if (media.originalUrl) {
95
+ const originalPrefix = `${context.originalsPath}/${media.sourceType}/${media.sourceId}/${mediaId}`;
96
+ const origResult = await batchDeleteFromS3(s3Client, context.r2Config.bucket, originalPrefix);
97
+ r2Result.deleted += origResult.deleted;
98
+ }
99
+
100
+ // Delete database records
101
+ await context.db.mediaQueue.deleteMany({ where: { mediaId } });
102
+ await context.db.media.delete({ where: { id: mediaId } });
103
+
104
+ return { deleted: true, r2Deleted: r2Result.deleted };
105
+ },
106
+
107
+ /**
108
+ * List media by sourceType and sourceId
109
+ * @param {string} sourceType
110
+ * @param {string} sourceId
111
+ * @returns {Promise<Object[]>}
112
+ */
113
+ listMedia: async (sourceType, sourceId) => {
114
+ return context.db.media.findMany({
115
+ where: { sourceType, sourceId },
116
+ orderBy: { createdAt: "desc" },
117
+ });
66
118
  },
119
+
120
+ /**
121
+ * Get variant presets for all source types
122
+ * @returns {Object}
123
+ */
124
+ getVariantPresets,
125
+
126
+ /**
127
+ * Get the source type configurations
128
+ * @returns {Object}
129
+ */
130
+ getSourceTypes: () => context.sourceTypes,
131
+
132
+ /**
133
+ * Get variant definitions
134
+ * @returns {Object}
135
+ */
136
+ getVariants: () => context.variants,
67
137
  });
68
138
 
69
139
  // Register routes
@@ -80,16 +150,16 @@ async function xImagePipeline(fastify, options) {
80
150
 
81
151
  try {
82
152
  setupWorker(fastify, context, workerConfig);
83
- console.info(" Image Pipeline Worker Started");
153
+ fastify.log.info("[xImagePipeline] Worker started");
84
154
  } catch (err) {
85
- console.error(" Failed to start image pipeline worker:", err.message);
155
+ fastify.log.error({ err }, "[xImagePipeline] Failed to start worker");
86
156
  if (options.worker?.failOnError !== false) {
87
157
  throw err;
88
158
  }
89
159
  }
90
160
  }
91
161
 
92
- console.info("\n 🎬 xImagePipeline Ready!\n");
162
+ fastify.log.info("[xImagePipeline] Ready");
93
163
  }
94
164
 
95
165
  /**
@@ -108,7 +178,6 @@ function getDefaultVariants() {
108
178
 
109
179
  /**
110
180
  * Get default source type configurations
111
- * Each source type can have different processing settings
112
181
  */
113
182
  function getDefaultSourceTypes() {
114
183
  return {
@@ -146,7 +215,8 @@ function getDefaultSourceTypes() {
146
215
  }
147
216
 
148
217
  /**
149
- * Get variant presets for different source types
218
+ * Get variant presets for all source types
219
+ * @returns {Object<string, string[]>}
150
220
  */
151
221
  export function getVariantPresets() {
152
222
  return {
@@ -161,4 +231,5 @@ export function getVariantPresets() {
161
231
  export default fp(xImagePipeline, {
162
232
  name: "xImagePipeline",
163
233
  fastify: "5.x",
234
+ dependencies: [],
164
235
  });
package/SCHEMA.prisma DELETED
@@ -1,113 +0,0 @@
1
- // This schema should be added to your main Prisma schema.prisma file
2
- // It defines the models needed for the xMedia pipeline
3
-
4
- enum MediaStatus {
5
- PENDING
6
- PROCESSING
7
- COMPLETE
8
- REJECTED
9
- FAILED
10
- }
11
-
12
- enum ModerationResult {
13
- APPROVED
14
- REJECTED
15
- FLAGGED
16
- }
17
-
18
- model MediaQueue {
19
- id String @id @default(cuid())
20
-
21
- // Job status
22
- status MediaStatus @default(PENDING)
23
-
24
- // Source information
25
- sourceType String // avatar, gallery, hero, member_photo, content, etc.
26
- sourceId String // userId, bandId, etc.
27
-
28
- // File information
29
- stagingKey String // Key in R2 staging bucket
30
- originalFilename String
31
- mimeType String
32
- fileSize Int
33
-
34
- // Processing results
35
- mediaId String? // FK to Media after processing
36
- media Media? @relation(fields: [mediaId], references: [id], onDelete: SetNull)
37
-
38
- // Error tracking
39
- attempts Int @default(0)
40
- maxAttempts Int @default(3)
41
- errorMsg String?
42
-
43
- // Moderation results
44
- moderationResult ModerationResult?
45
- moderationDetails Json? // Full moderation API response
46
-
47
- // Locking for worker process
48
- lockedAt DateTime?
49
- lockedBy String? // Worker ID that locked this job
50
-
51
- // Timestamps
52
- createdAt DateTime @default(now())
53
- updatedAt DateTime @updatedAt
54
-
55
- @@index([status, createdAt])
56
- @@index([sourceType, sourceId])
57
- @@index([lockedAt])
58
- }
59
-
60
- model Media {
61
- id String @id @default(cuid())
62
-
63
- // Variant URLs (object mapping variant name to URL)
64
- // e.g., { "xs": "https://...", "sm": "https://...", "md": "https://..." }
65
- urls Json @default("{}")
66
- originalUrl String // URL to full-resolution original
67
-
68
- // Image properties
69
- width Int
70
- height Int
71
- format String // jpeg, png, webp, gif
72
- aspectRatio String // e.g., "16:9", "4:3", "1:1"
73
-
74
- // Loading placeholder
75
- blurhash String // For instant UI placeholder
76
-
77
- // For smart cropping
78
- focalPoint Json @default("{\"x\": 0.5, \"y\": 0.5}") // { x: 0-1, y: 0-1 }
79
-
80
- // Source information (denormalized for queries)
81
- sourceType String
82
- sourceId String
83
-
84
- // File information
85
- originalFilename String
86
- mimeType String
87
- fileSize Int
88
-
89
- // Metadata
90
- exifStripped Boolean @default(true)
91
- createdAt DateTime @default(now())
92
- updatedAt DateTime @updatedAt
93
-
94
- // Relations (optional - add as needed)
95
- // User.avatar => Media
96
- // Band.avatar => Media
97
- // Gallery items, etc.
98
-
99
- queue MediaQueue[]
100
-
101
- @@index([sourceType, sourceId])
102
- @@index([createdAt])
103
- }
104
-
105
- // Example: Add to User model
106
- // avatar Media? @relation(fields: [avatarId], references: [id], onDelete: SetNull)
107
- // avatarId String?
108
-
109
- // Example: Add to Band model
110
- // avatar Media? @relation(fields: [avatarId], references: [id], onDelete: SetNull)
111
- // avatarId String?
112
- // hero Media? @relation(fields: [heroId], references: [id], onDelete: SetNull)
113
- // heroId String?