@uploadista/core 0.0.19 → 0.0.20-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/dist/{checksum-p3NmuAky.cjs → checksum-DVPe3Db4.cjs} +1 -1
  2. package/dist/errors/index.cjs +1 -1
  3. package/dist/errors/index.d.cts +0 -1
  4. package/dist/flow/index.cjs +1 -1
  5. package/dist/flow/index.d.cts +2 -6
  6. package/dist/flow/index.d.mts +2 -2
  7. package/dist/flow/index.mjs +1 -1
  8. package/dist/flow-CAlAQtBK.cjs +1 -0
  9. package/dist/flow-DWNJ-NOU.mjs +2 -0
  10. package/dist/flow-DWNJ-NOU.mjs.map +1 -0
  11. package/dist/index-9gyMMEIB.d.cts.map +1 -1
  12. package/dist/{index-TokXRAZ5.d.mts → index-B3_9v6Z8.d.mts} +494 -36
  13. package/dist/index-B3_9v6Z8.d.mts.map +1 -0
  14. package/dist/index-B9V5SSxl.d.mts.map +1 -1
  15. package/dist/{index-BOic6-Cg.d.cts → index-br6o9tCI.d.cts} +494 -36
  16. package/dist/index-br6o9tCI.d.cts.map +1 -0
  17. package/dist/index.cjs +1 -1
  18. package/dist/index.d.cts +2 -3
  19. package/dist/index.d.mts +2 -2
  20. package/dist/index.mjs +1 -1
  21. package/dist/{stream-limiter-Cem7Zvaw.cjs → stream-limiter-BvkaZXcz.cjs} +1 -1
  22. package/dist/streams/index.cjs +1 -1
  23. package/dist/streams/index.d.cts +0 -1
  24. package/dist/testing/index.cjs +2 -2
  25. package/dist/testing/index.d.cts +1 -5
  26. package/dist/testing/index.d.cts.map +1 -1
  27. package/dist/testing/index.d.mts +1 -1
  28. package/dist/testing/index.d.mts.map +1 -1
  29. package/dist/testing/index.mjs +3 -3
  30. package/dist/testing/index.mjs.map +1 -1
  31. package/dist/types/index.cjs +1 -1
  32. package/dist/types/index.d.cts +2 -6
  33. package/dist/types/index.d.mts +2 -2
  34. package/dist/types/index.mjs +1 -1
  35. package/dist/types-Cws60JHC.cjs +1 -0
  36. package/dist/types-DKGQJIEr.mjs +2 -0
  37. package/dist/types-DKGQJIEr.mjs.map +1 -0
  38. package/dist/upload/index.cjs +1 -1
  39. package/dist/upload/index.d.cts +1 -5
  40. package/dist/upload/index.d.mts +1 -1
  41. package/dist/upload/index.mjs +1 -1
  42. package/dist/{upload-5l3utoc7.cjs → upload-BHDuuJ80.cjs} +1 -1
  43. package/dist/{upload-B2RDFkTe.mjs → upload-tLC7uR9U.mjs} +2 -2
  44. package/dist/upload-tLC7uR9U.mjs.map +1 -0
  45. package/dist/{uploadista-error-BfpQ4mOO.cjs → uploadista-error-BgQU45we.cjs} +1 -1
  46. package/dist/utils/index.cjs +1 -1
  47. package/dist/utils/index.d.cts +0 -1
  48. package/dist/{utils-QJOPnlmt.cjs → utils-UUJt8ILJ.cjs} +1 -1
  49. package/package.json +3 -3
  50. package/src/flow/index.ts +10 -0
  51. package/src/flow/nodes/transform-node.ts +321 -29
  52. package/src/flow/plugins/image-plugin.ts +101 -1
  53. package/src/flow/plugins/video-plugin.ts +124 -1
  54. package/src/testing/mock-upload-server.ts +81 -2
  55. package/src/types/data-store.ts +157 -0
  56. package/src/types/input-file.ts +47 -21
  57. package/src/upload/upload-server.ts +234 -1
  58. package/dist/flow-DKCp_0Y1.mjs +0 -2
  59. package/dist/flow-DKCp_0Y1.mjs.map +0 -1
  60. package/dist/flow-NHkTGTxu.cjs +0 -1
  61. package/dist/index-BOic6-Cg.d.cts.map +0 -1
  62. package/dist/index-TokXRAZ5.d.mts.map +0 -1
  63. package/dist/types-CHbyV8e6.mjs +0 -2
  64. package/dist/types-CHbyV8e6.mjs.map +0 -1
  65. package/dist/types-D3_rWxD0.cjs +0 -1
  66. package/dist/upload-B2RDFkTe.mjs.map +0 -1
@@ -1,6 +1,11 @@
1
- import { Effect, Layer } from "effect";
1
+ import { Effect, Layer, Stream } from "effect";
2
+ import type { UploadistaError } from "../errors";
2
3
  import type { InputFile, UploadFile, WebSocketConnection } from "../types";
3
- import type { DataStoreCapabilities } from "../types/data-store";
4
+ import {
5
+ DEFAULT_STREAMING_CONFIG,
6
+ type DataStoreCapabilities,
7
+ type StreamingConfig,
8
+ } from "../types/data-store";
4
9
  import { UploadServer } from "../upload";
5
10
 
6
11
  /**
@@ -29,6 +34,78 @@ export const TestUploadServer = Layer.succeed(
29
34
  const text = `Content of file ${fileId}`;
30
35
  return new TextEncoder().encode(text);
31
36
  }),
37
+ readStream: (
38
+ fileId: string,
39
+ _clientId: string | null,
40
+ config?: StreamingConfig,
41
+ ) =>
42
+ Effect.sync(() => {
43
+ const effectiveConfig = { ...DEFAULT_STREAMING_CONFIG, ...config };
44
+ // Generate mock file data based on fileId
45
+ const text = `Content of file ${fileId}`;
46
+ const fullData = new TextEncoder().encode(text);
47
+
48
+ // Split data into chunks based on chunkSize
49
+ const chunkSize = effectiveConfig.chunkSize;
50
+ const chunks: Uint8Array[] = [];
51
+ for (let i = 0; i < fullData.length; i += chunkSize) {
52
+ chunks.push(fullData.slice(i, i + chunkSize));
53
+ }
54
+
55
+ // Return as a stream of chunks
56
+ return Stream.fromIterable(chunks);
57
+ }),
58
+ uploadStream: (
59
+ file: Omit<InputFile, "size"> & { size?: number; sizeHint?: number },
60
+ _clientId: string | null,
61
+ stream: Stream.Stream<Uint8Array, UploadistaError>,
62
+ ) =>
63
+ Effect.gen(function* () {
64
+ // Collect stream to calculate total size
65
+ const chunks: Uint8Array[] = [];
66
+ yield* Stream.runForEach(stream, (chunk) =>
67
+ Effect.sync(() => {
68
+ chunks.push(chunk);
69
+ }),
70
+ );
71
+
72
+ const totalSize = chunks.reduce((acc, chunk) => acc + chunk.length, 0);
73
+
74
+ // Parse existing metadata
75
+ const existingMetadata =
76
+ typeof file.metadata === "string"
77
+ ? JSON.parse(file.metadata)
78
+ : file.metadata || {};
79
+
80
+ // Extract extension from fileName
81
+ const extension = file.fileName
82
+ ? file.fileName.split(".").pop()
83
+ : existingMetadata.extension;
84
+
85
+ // Create new UploadFile with final size
86
+ const uploadId = `stream-uploaded-${Date.now()}-${Math.random().toString(36).substring(7)}`;
87
+ return {
88
+ id: uploadId,
89
+ offset: totalSize,
90
+ size: totalSize,
91
+ storage: {
92
+ id: file.storageId,
93
+ type: "memory",
94
+ },
95
+ metadata: {
96
+ ...existingMetadata,
97
+ mimeType: file.type,
98
+ type: file.type,
99
+ "content-type": file.type,
100
+ fileName: file.fileName,
101
+ originalName: file.fileName,
102
+ name: file.fileName,
103
+ extension,
104
+ },
105
+ url: `http://test-storage/${uploadId}`,
106
+ creationDate: new Date().toISOString(),
107
+ } satisfies UploadFile;
108
+ }),
32
109
  upload: (file, _clientId, stream) =>
33
110
  Effect.gen(function* () {
34
111
  // Read stream to completion
@@ -126,6 +203,8 @@ export const TestUploadServer = Layer.succeed(
126
203
  supportsDeferredLength: true,
127
204
  supportsResumableUploads: true,
128
205
  supportsTransactionalUploads: false,
206
+ supportsStreamingRead: true,
207
+ supportsStreamingWrite: true,
129
208
  maxConcurrentUploads: 10,
130
209
  minChunkSize: 5 * 1024 * 1024, // 5MB
131
210
  maxChunkSize: 100 * 1024 * 1024, // 100MB
@@ -24,6 +24,98 @@ export type DataStoreWriteOptions = {
24
24
  */
25
25
  export type UploadStrategy = "single" | "parallel";
26
26
 
27
+ /**
28
+ * Configuration options for streaming file reads.
29
+ *
30
+ * Used to control streaming behavior in transform nodes and data stores.
31
+ *
32
+ * @property fileSizeThreshold - Files below this size use buffered mode (default: 1MB)
33
+ * @property chunkSize - Chunk size for streaming reads in bytes (default: 64KB)
34
+ *
35
+ * @example
36
+ * ```typescript
37
+ * const config: StreamingConfig = {
38
+ * fileSizeThreshold: 1_048_576, // 1MB - use buffered for smaller files
39
+ * chunkSize: 65_536, // 64KB chunks
40
+ * };
41
+ * ```
42
+ */
43
+ export type StreamingConfig = {
44
+ /** Files below this size use buffered mode (default: 1MB = 1_048_576 bytes) */
45
+ fileSizeThreshold?: number;
46
+ /** Chunk size for streaming reads in bytes (default: 64KB = 65_536 bytes) */
47
+ chunkSize?: number;
48
+ };
49
+
50
+ /**
51
+ * Default streaming configuration values.
52
+ */
53
+ export const DEFAULT_STREAMING_CONFIG: Required<StreamingConfig> = {
54
+ fileSizeThreshold: 1_048_576, // 1MB
55
+ chunkSize: 65_536, // 64KB
56
+ };
57
+
58
+ /**
59
+ * Default multipart part size for S3/R2 streaming writes.
60
+ * S3 requires minimum 5MB parts (except for the last part).
61
+ */
62
+ export const DEFAULT_MULTIPART_PART_SIZE = 5 * 1024 * 1024; // 5MB
63
+
64
+ /**
65
+ * Options for streaming write operations.
66
+ *
67
+ * Used when writing file content from a stream with unknown final size.
68
+ * The store will finalize the upload when the stream completes.
69
+ *
70
+ * @property stream - Effect Stream of byte chunks to write
71
+ * @property contentType - Optional MIME type for the file
72
+ * @property metadata - Optional metadata to store with the file
73
+ * @property sizeHint - Optional estimated size for optimization (e.g., multipart part sizing)
74
+ *
75
+ * @example
76
+ * ```typescript
77
+ * const options: StreamWriteOptions = {
78
+ * stream: transformedStream,
79
+ * contentType: "image/webp",
80
+ * metadata: { originalName: "photo.jpg" },
81
+ * sizeHint: 5_000_000, // ~5MB expected
82
+ * };
83
+ * ```
84
+ */
85
+ export type StreamWriteOptions = {
86
+ stream: Stream.Stream<Uint8Array, UploadistaError>;
87
+ contentType?: string;
88
+ metadata?: Record<string, string>;
89
+ /** Optional size hint for optimization (not required) */
90
+ sizeHint?: number;
91
+ };
92
+
93
+ /**
94
+ * Result of a streaming write operation.
95
+ *
96
+ * Contains the final size after the stream completes, along with
97
+ * storage location information.
98
+ *
99
+ * @property id - Unique identifier of the written file
100
+ * @property size - Final size in bytes after stream completed
101
+ * @property path - Storage path or key where file was written
102
+ * @property bucket - Optional bucket/container name (for cloud storage)
103
+ *
104
+ * @example
105
+ * ```typescript
106
+ * const result = yield* dataStore.writeStream(fileId, options);
107
+ * console.log(`Wrote ${result.size} bytes to ${result.path}`);
108
+ * ```
109
+ */
110
+ export type StreamWriteResult = {
111
+ id: string;
112
+ size: number;
113
+ path: string;
114
+ bucket?: string;
115
+ /** Public URL for accessing the uploaded file (if available) */
116
+ url?: string;
117
+ };
118
+
27
119
  /**
28
120
  * Capabilities and constraints of a DataStore implementation.
29
121
  *
@@ -36,6 +128,7 @@ export type UploadStrategy = "single" | "parallel";
36
128
  * @property supportsDeferredLength - Can start upload without knowing final size
37
129
  * @property supportsResumableUploads - Can resume interrupted uploads from last offset
38
130
  * @property supportsTransactionalUploads - Guarantees atomic upload success/failure
131
+ * @property supportsStreamingRead - Can read file content as a stream instead of buffering
39
132
  * @property maxConcurrentUploads - Maximum parallel upload parts (if parallel supported)
40
133
  * @property minChunkSize - Minimum size in bytes for each chunk (except last)
41
134
  * @property maxChunkSize - Maximum size in bytes for each chunk
@@ -57,6 +150,12 @@ export type UploadStrategy = "single" | "parallel";
57
150
  * // Use single upload
58
151
  * uploadAsSingleChunk(file);
59
152
  * }
153
+ *
154
+ * // Check for streaming support
155
+ * if (capabilities.supportsStreamingRead) {
156
+ * // Use streaming for memory-efficient processing
157
+ * const stream = yield* dataStore.readStream(fileId);
158
+ * }
60
159
  * ```
61
160
  */
62
161
  export type DataStoreCapabilities = {
@@ -65,6 +164,10 @@ export type DataStoreCapabilities = {
65
164
  supportsDeferredLength: boolean;
66
165
  supportsResumableUploads: boolean;
67
166
  supportsTransactionalUploads: boolean;
167
+ /** Whether the store supports streaming reads via readStream() */
168
+ supportsStreamingRead?: boolean;
169
+ /** Whether the store supports streaming writes via writeStream() with unknown final size */
170
+ supportsStreamingWrite?: boolean;
68
171
  maxConcurrentUploads?: number;
69
172
  minChunkSize?: number;
70
173
  maxChunkSize?: number;
@@ -149,15 +252,69 @@ export type DataStore<TData = unknown> = {
149
252
  readonly path?: string;
150
253
  readonly create: (file: TData) => Effect.Effect<TData, UploadistaError>;
151
254
  readonly remove: (file_id: string) => Effect.Effect<void, UploadistaError>;
255
+ /**
256
+ * Reads the complete file contents as bytes (buffered mode).
257
+ * For large files, consider using readStream() if available.
258
+ */
152
259
  readonly read: (
153
260
  file_id: string,
154
261
  ) => Effect.Effect<Uint8Array, UploadistaError>;
262
+ /**
263
+ * Reads file content as a stream of chunks for memory-efficient processing.
264
+ * Optional - check getCapabilities().supportsStreamingRead before using.
265
+ *
266
+ * @param file_id - The unique identifier of the file to read
267
+ * @param config - Optional streaming configuration (chunk size)
268
+ * @returns An Effect that resolves to a Stream of byte chunks
269
+ *
270
+ * @example
271
+ * ```typescript
272
+ * const capabilities = dataStore.getCapabilities();
273
+ * if (capabilities.supportsStreamingRead && dataStore.readStream) {
274
+ * const stream = yield* dataStore.readStream(fileId, { chunkSize: 65536 });
275
+ * // Process stream chunk by chunk
276
+ * }
277
+ * ```
278
+ */
279
+ readonly readStream?: (
280
+ file_id: string,
281
+ config?: StreamingConfig,
282
+ ) => Effect.Effect<Stream.Stream<Uint8Array, UploadistaError>, UploadistaError>;
155
283
  readonly write: (
156
284
  options: DataStoreWriteOptions,
157
285
  dependencies: {
158
286
  onProgress?: (chunkSize: number) => void;
159
287
  },
160
288
  ) => Effect.Effect<number, UploadistaError>;
289
+ /**
290
+ * Writes file content from a stream with unknown final size.
291
+ * Optional - check getCapabilities().supportsStreamingWrite before using.
292
+ *
293
+ * This method is optimized for end-to-end streaming where the output
294
+ * size isn't known until the stream completes. It uses store-specific
295
+ * mechanisms like multipart uploads (S3/R2), resumable uploads (GCS),
296
+ * or block staging (Azure) to efficiently handle streaming data.
297
+ *
298
+ * @param fileId - Unique identifier for the file being written
299
+ * @param options - Stream and optional metadata
300
+ * @returns StreamWriteResult containing final size after completion
301
+ *
302
+ * @example
303
+ * ```typescript
304
+ * const capabilities = dataStore.getCapabilities();
305
+ * if (capabilities.supportsStreamingWrite && dataStore.writeStream) {
306
+ * const result = yield* dataStore.writeStream(fileId, {
307
+ * stream: transformedStream,
308
+ * contentType: "image/webp",
309
+ * });
310
+ * console.log(`Wrote ${result.size} bytes`);
311
+ * }
312
+ * ```
313
+ */
314
+ readonly writeStream?: (
315
+ fileId: string,
316
+ options: StreamWriteOptions,
317
+ ) => Effect.Effect<StreamWriteResult, UploadistaError>;
161
318
  readonly deleteExpired?: () => Effect.Effect<number, UploadistaError>;
162
319
  readonly getCapabilities: () => DataStoreCapabilities;
163
320
  readonly validateUploadStrategy: (
@@ -8,24 +8,41 @@ import { z } from "zod";
8
8
  *
9
9
  * @see {@link InputFile} for the TypeScript type
10
10
  */
11
- export const inputFileSchema = z.object({
12
- uploadLengthDeferred: z.boolean().optional(),
13
- storageId: z.string(),
14
- size: z.number(),
15
- type: z.string(),
16
- fileName: z.string().optional(),
17
- lastModified: z.number().optional(),
18
- metadata: z.string().optional(),
19
- checksum: z.string().optional(),
20
- checksumAlgorithm: z.string().optional(),
21
- flow: z
22
- .object({
23
- flowId: z.string(),
24
- nodeId: z.string(),
25
- jobId: z.string(),
26
- })
27
- .optional(),
28
- });
11
+ export const inputFileSchema = z
12
+ .object({
13
+ uploadLengthDeferred: z.boolean().optional(),
14
+ storageId: z.string(),
15
+ /** File size in bytes. Optional when uploadLengthDeferred is true. */
16
+ size: z.number().optional(),
17
+ /** Optional size hint for optimization when size is unknown */
18
+ sizeHint: z.number().optional(),
19
+ type: z.string(),
20
+ fileName: z.string().optional(),
21
+ lastModified: z.number().optional(),
22
+ metadata: z.string().optional(),
23
+ checksum: z.string().optional(),
24
+ checksumAlgorithm: z.string().optional(),
25
+ flow: z
26
+ .object({
27
+ flowId: z.string(),
28
+ nodeId: z.string(),
29
+ jobId: z.string(),
30
+ })
31
+ .optional(),
32
+ })
33
+ .refine(
34
+ (data) => {
35
+ // Size is required unless uploadLengthDeferred is true
36
+ if (data.uploadLengthDeferred === true) {
37
+ return true; // Size can be omitted
38
+ }
39
+ return data.size !== undefined && data.size >= 0;
40
+ },
41
+ {
42
+ message: "size is required when uploadLengthDeferred is not true",
43
+ path: ["size"],
44
+ },
45
+ );
29
46
 
30
47
  /**
31
48
  * Represents the input data for creating a new file upload.
@@ -34,7 +51,8 @@ export const inputFileSchema = z.object({
34
51
  * It's used by clients to provide upload metadata before sending file data.
35
52
  *
36
53
  * @property storageId - Target storage backend identifier (e.g., "s3-production", "azure-blob")
37
- * @property size - File size in bytes
54
+ * @property size - File size in bytes. Optional when uploadLengthDeferred is true.
55
+ * @property sizeHint - Optional size hint for optimization when exact size is unknown
38
56
  * @property type - MIME type of the file (e.g., "image/jpeg", "application/pdf")
39
57
  * @property uploadLengthDeferred - If true, file size is not known upfront (streaming upload)
40
58
  * @property fileName - Original filename from the client
@@ -94,14 +112,22 @@ export const inputFileSchema = z.object({
94
112
  * }
95
113
  * };
96
114
  *
97
- * // Streaming upload (size unknown)
115
+ * // Streaming upload (size unknown) - size can be omitted
98
116
  * const streamingInput: InputFile = {
99
117
  * storageId: "s3-production",
100
- * size: 0, // Will be updated as data arrives
101
118
  * type: "video/mp4",
102
119
  * uploadLengthDeferred: true,
103
120
  * fileName: "live-stream.mp4"
104
121
  * };
122
+ *
123
+ * // Streaming upload with size hint for optimization
124
+ * const streamingWithHint: InputFile = {
125
+ * storageId: "s3-production",
126
+ * type: "image/webp",
127
+ * uploadLengthDeferred: true,
128
+ * sizeHint: 5_000_000, // ~5MB expected
129
+ * fileName: "optimized-image.webp"
130
+ * };
105
131
  * ```
106
132
  */
107
133
  export type InputFile = z.infer<typeof inputFileSchema>;
@@ -1,4 +1,4 @@
1
- import { Context, Effect, Layer } from "effect";
1
+ import { Context, Effect, Layer, Stream } from "effect";
2
2
  import type { UploadistaError } from "../errors";
3
3
  import type {
4
4
  DataStore,
@@ -7,12 +7,14 @@ import type {
7
7
  InputFile,
8
8
  KvStore,
9
9
  Middleware,
10
+ StreamingConfig,
10
11
  UploadEvent,
11
12
  UploadFile,
12
13
  WebSocketConnection,
13
14
  } from "../types";
14
15
  import {
15
16
  UploadEventEmitter,
17
+ UploadEventType,
16
18
  UploadFileDataStores,
17
19
  UploadFileKVStore,
18
20
  } from "../types";
@@ -151,10 +153,70 @@ export type UploadServerShape = {
151
153
  url: string,
152
154
  ) => Effect.Effect<UploadFile, UploadistaError>;
153
155
  getUpload: (uploadId: string) => Effect.Effect<UploadFile, UploadistaError>;
156
+ /**
157
+ * Reads the complete uploaded file data as bytes (buffered mode).
158
+ * For large files, consider using readStream() for memory efficiency.
159
+ */
154
160
  read: (
155
161
  uploadId: string,
156
162
  clientId: string | null,
157
163
  ) => Effect.Effect<Uint8Array, UploadistaError>;
164
+ /**
165
+ * Reads file content as a stream of chunks for memory-efficient processing.
166
+ * Falls back to buffered read if the underlying DataStore doesn't support streaming.
167
+ *
168
+ * @param uploadId - The unique identifier of the upload to read
169
+ * @param clientId - Client identifier for multi-tenant routing
170
+ * @param config - Optional streaming configuration (chunk size)
171
+ * @returns An Effect that resolves to a Stream of byte chunks
172
+ *
173
+ * @example
174
+ * ```typescript
175
+ * const server = yield* UploadServer;
176
+ * const stream = yield* server.readStream(uploadId, clientId, { chunkSize: 65536 });
177
+ * // Process stream chunk by chunk with bounded memory
178
+ * yield* Stream.runForEach(stream, (chunk) => processChunk(chunk));
179
+ * ```
180
+ */
181
+ readStream: (
182
+ uploadId: string,
183
+ clientId: string | null,
184
+ config?: StreamingConfig,
185
+ ) => Effect.Effect<Stream.Stream<Uint8Array, UploadistaError>, UploadistaError>;
186
+ /**
187
+ * Uploads file content from a stream with unknown final size.
188
+ * Creates upload with deferred length, streams content to storage,
189
+ * and updates the upload record with final size when complete.
190
+ *
191
+ * Falls back to buffered upload if the underlying DataStore
192
+ * doesn't support streaming writes.
193
+ *
194
+ * @param file - Input file configuration (size is optional)
195
+ * @param clientId - Client identifier for multi-tenant routing
196
+ * @param stream - Effect Stream of byte chunks to upload
197
+ * @returns The completed UploadFile with final size
198
+ *
199
+ * @example
200
+ * ```typescript
201
+ * const server = yield* UploadServer;
202
+ * const result = yield* server.uploadStream(
203
+ * {
204
+ * storageId: "s3-production",
205
+ * type: "image/webp",
206
+ * uploadLengthDeferred: true,
207
+ * fileName: "optimized.webp",
208
+ * },
209
+ * clientId,
210
+ * transformedStream,
211
+ * );
212
+ * console.log(`Uploaded ${result.size} bytes`);
213
+ * ```
214
+ */
215
+ uploadStream: (
216
+ file: Omit<InputFile, "size"> & { size?: number; sizeHint?: number },
217
+ clientId: string | null,
218
+ stream: Stream.Stream<Uint8Array, UploadistaError>,
219
+ ) => Effect.Effect<UploadFile, UploadistaError>;
158
220
  delete: (
159
221
  uploadId: string,
160
222
  clientId: string | null,
@@ -323,6 +385,177 @@ export function createUploadServer() {
323
385
  );
324
386
  return yield* dataStore.read(uploadId);
325
387
  }),
388
+ readStream: (
389
+ uploadId: string,
390
+ clientId: string | null,
391
+ config?: StreamingConfig,
392
+ ) =>
393
+ Effect.gen(function* () {
394
+ const upload = yield* kvStore.get(uploadId);
395
+ const dataStore = yield* dataStoreService.getDataStore(
396
+ upload.storage.id,
397
+ clientId,
398
+ );
399
+
400
+ // Check if the DataStore supports streaming reads
401
+ const capabilities = dataStore.getCapabilities();
402
+ if (capabilities.supportsStreamingRead && dataStore.readStream) {
403
+ // Use native streaming
404
+ yield* Effect.logDebug(
405
+ `Using streaming read for file ${uploadId}`,
406
+ );
407
+ return yield* dataStore.readStream(uploadId, config);
408
+ }
409
+
410
+ // Fallback: read entire file and convert to stream
411
+ yield* Effect.logDebug(
412
+ `Falling back to buffered read for file ${uploadId} (streaming not supported)`,
413
+ );
414
+ const bytes = yield* dataStore.read(uploadId);
415
+
416
+ // Convert buffered bytes to a single-chunk stream
417
+ return Stream.succeed(bytes);
418
+ }),
419
+ uploadStream: (
420
+ file: Omit<InputFile, "size"> & { size?: number; sizeHint?: number },
421
+ clientId: string | null,
422
+ stream: Stream.Stream<Uint8Array, UploadistaError>,
423
+ ) =>
424
+ Effect.gen(function* () {
425
+ // Get the data store for this storage
426
+ const dataStore = yield* dataStoreService.getDataStore(
427
+ file.storageId,
428
+ clientId,
429
+ );
430
+
431
+ // Check if the DataStore supports streaming writes
432
+ const capabilities = dataStore.getCapabilities();
433
+
434
+ // Generate upload ID
435
+ const uploadId = yield* generateId.generateId();
436
+
437
+ if (capabilities.supportsStreamingWrite && dataStore.writeStream) {
438
+ // Use native streaming write - DO NOT call createUpload as it would
439
+ // create an S3 multipart upload that we won't use (writeStream creates its own)
440
+ yield* Effect.logDebug(
441
+ `Using streaming write for file ${uploadId}`,
442
+ );
443
+
444
+ // Parse metadata
445
+ const metadata =
446
+ typeof file.metadata === "string"
447
+ ? JSON.parse(file.metadata)
448
+ : file.metadata || {};
449
+
450
+ // Convert metadata to Record<string, string> if present
451
+ const stringMetadata = Object.fromEntries(
452
+ Object.entries(metadata).map(([k, v]) => [k, String(v)]),
453
+ );
454
+
455
+ // Create initial upload record in KV store (without creating S3 multipart upload)
456
+ const initialUpload: UploadFile = {
457
+ id: uploadId,
458
+ offset: 0,
459
+ size: file.size ?? 0,
460
+ storage: {
461
+ id: file.storageId,
462
+ type: dataStore.getCapabilities().supportsStreamingWrite
463
+ ? "streaming"
464
+ : "default",
465
+ },
466
+ metadata,
467
+ creationDate: new Date().toISOString(),
468
+ };
469
+ yield* kvStore.set(uploadId, initialUpload);
470
+
471
+ // Emit started event
472
+ yield* eventEmitter.emit(uploadId, {
473
+ type: UploadEventType.UPLOAD_STARTED,
474
+ data: initialUpload,
475
+ });
476
+
477
+ const result = yield* dataStore.writeStream(uploadId, {
478
+ stream,
479
+ contentType: file.type,
480
+ sizeHint: file.sizeHint,
481
+ metadata: stringMetadata,
482
+ });
483
+
484
+ // Update the upload record with the final size and URL
485
+ const completedUpload: UploadFile = {
486
+ ...initialUpload,
487
+ size: result.size,
488
+ offset: result.size,
489
+ storage: {
490
+ ...initialUpload.storage,
491
+ path: result.path,
492
+ },
493
+ ...(result.url && { url: result.url }),
494
+ };
495
+
496
+ yield* kvStore.set(uploadId, completedUpload);
497
+
498
+ // Emit completion event
499
+ yield* eventEmitter.emit(uploadId, {
500
+ type: UploadEventType.UPLOAD_COMPLETE,
501
+ data: completedUpload,
502
+ });
503
+
504
+ return completedUpload;
505
+ }
506
+
507
+ // Fallback: buffer the stream and use regular upload (which calls createUpload + uploadChunk)
508
+ yield* Effect.logWarning(
509
+ `Falling back to buffered upload for file ${uploadId} (streaming write not supported)`,
510
+ );
511
+
512
+ // Collect stream into a buffer
513
+ const chunks: Uint8Array[] = [];
514
+ yield* Stream.runForEach(stream, (chunk) =>
515
+ Effect.sync(() => {
516
+ chunks.push(chunk);
517
+ }),
518
+ );
519
+
520
+ // Calculate total size
521
+ const totalSize = chunks.reduce((acc, chunk) => acc + chunk.length, 0);
522
+
523
+ // Create a combined buffer
524
+ const buffer = new Uint8Array(totalSize);
525
+ let offset = 0;
526
+ for (const chunk of chunks) {
527
+ buffer.set(chunk, offset);
528
+ offset += chunk.length;
529
+ }
530
+
531
+ // Create a readable stream from the buffer
532
+ const readableStream = new ReadableStream({
533
+ start(controller) {
534
+ controller.enqueue(buffer);
535
+ controller.close();
536
+ },
537
+ });
538
+
539
+ // For fallback, use the regular flow with createUpload + uploadChunk
540
+ const inputFile: InputFile = {
541
+ ...file,
542
+ size: totalSize,
543
+ };
544
+
545
+ const uploadFile = yield* createUpload(inputFile, clientId, {
546
+ dataStoreService,
547
+ kvStore,
548
+ eventEmitter,
549
+ generateId: { generateId: () => Effect.succeed(uploadId) },
550
+ });
551
+
552
+ // Use regular uploadChunk
553
+ return yield* uploadChunk(uploadId, clientId, readableStream, {
554
+ dataStoreService,
555
+ kvStore,
556
+ eventEmitter,
557
+ });
558
+ }),
326
559
  delete: (uploadId: string, clientId: string | null) =>
327
560
  Effect.gen(function* () {
328
561
  const upload = yield* kvStore.get(uploadId);