@uploadista/data-store-filesystem 0.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +5 -0
- package/.turbo/turbo-check.log +5 -0
- package/.turbo/turbo-lint.log +0 -0
- package/LICENSE +21 -0
- package/README.md +583 -0
- package/dist/file-store.d.ts +15 -0
- package/dist/file-store.d.ts.map +1 -0
- package/dist/file-store.js +257 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +1 -0
- package/package.json +31 -0
- package/src/file-store.ts +374 -0
- package/src/index.ts +1 -0
- package/tsconfig.json +13 -0
- package/tsconfig.tsbuildinfo +1 -0
|
File without changes
|
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 uploadista
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,583 @@
|
|
|
1
|
+
# @uploadista/data-store-filesystem
|
|
2
|
+
|
|
3
|
+
Local filesystem data store for Uploadista - Store files on disk.
|
|
4
|
+
|
|
5
|
+
Provides file system-based storage perfect for development, testing, and self-hosted deployments. Supports sequential uploads with resumable transfers and automatic cleanup of old files.
|
|
6
|
+
|
|
7
|
+
## Features
|
|
8
|
+
|
|
9
|
+
- **Local Storage** - Files stored on server disk or shared volume
|
|
10
|
+
- **Resumable Uploads** - Resume failed transfers at specific offsets
|
|
11
|
+
- **Sequential Mode** - Safe, ordered chunk uploads
|
|
12
|
+
- **Auto-Cleanup** - Automatic expiration of incomplete uploads
|
|
13
|
+
- **Simple Setup** - No external services required
|
|
14
|
+
- **Full Observability** - Metrics and logging included
|
|
15
|
+
- **TypeScript** - Full type safety with comprehensive JSDoc
|
|
16
|
+
|
|
17
|
+
## Installation
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
npm install @uploadista/data-store-filesystem @uploadista/core
|
|
21
|
+
# or
|
|
22
|
+
pnpm add @uploadista/data-store-filesystem @uploadista/core
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
## Requirements
|
|
26
|
+
|
|
27
|
+
- Node.js 18+
|
|
28
|
+
- Writable disk space
|
|
29
|
+
- Linux/macOS/Windows
|
|
30
|
+
- TypeScript 5.0+ (optional but recommended)
|
|
31
|
+
|
|
32
|
+
## Quick Start
|
|
33
|
+
|
|
34
|
+
### 1. Create Filesystem Data Store
|
|
35
|
+
|
|
36
|
+
```typescript
|
|
37
|
+
import { createFileStore } from "@uploadista/data-store-filesystem";
|
|
38
|
+
import { createUploadServerLayer } from "@uploadista/server";
|
|
39
|
+
import { memoryKvStore } from "@uploadista/kv-store-memory";
|
|
40
|
+
import { webSocketEventEmitter } from "@uploadista/event-emitter-websocket";
|
|
41
|
+
import { Effect } from "effect";
|
|
42
|
+
|
|
43
|
+
// Create file store
|
|
44
|
+
const fileStore = createFileStore({
|
|
45
|
+
directory: "./uploads",
|
|
46
|
+
deliveryUrl: "http://localhost:3000/uploads",
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
// Use in upload server
|
|
50
|
+
const uploadLayer = createUploadServerLayer({
|
|
51
|
+
dataStore: fileStore,
|
|
52
|
+
kvStore: memoryKvStore,
|
|
53
|
+
eventEmitter: webSocketEventEmitter,
|
|
54
|
+
});
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
### 2. Configure Upload Directory
|
|
58
|
+
|
|
59
|
+
```typescript
|
|
60
|
+
import path from "path";
|
|
61
|
+
import { createFileStore } from "@uploadista/data-store-filesystem";
|
|
62
|
+
|
|
63
|
+
// Development
|
|
64
|
+
const devStore = createFileStore({
|
|
65
|
+
directory: "./uploads",
|
|
66
|
+
deliveryUrl: "http://localhost:3000/files",
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
// Production
|
|
70
|
+
const prodStore = createFileStore({
|
|
71
|
+
directory: "/var/uploads",
|
|
72
|
+
deliveryUrl: "https://cdn.example.com/files",
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
// Docker/Kubernetes (mounted volume)
|
|
76
|
+
const containerStore = createFileStore({
|
|
77
|
+
directory: "/mnt/uploads",
|
|
78
|
+
deliveryUrl: "https://uploads.example.com",
|
|
79
|
+
});
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
### 3. Serve Files
|
|
83
|
+
|
|
84
|
+
```typescript
|
|
85
|
+
import express from "express";
|
|
86
|
+
import path from "path";
|
|
87
|
+
|
|
88
|
+
const app = express();
|
|
89
|
+
|
|
90
|
+
// Serve uploaded files statically
|
|
91
|
+
app.use("/uploads", express.static("./uploads"));
|
|
92
|
+
|
|
93
|
+
// Or with caching headers
|
|
94
|
+
app.use("/uploads", express.static("./uploads", {
|
|
95
|
+
maxAge: "1d",
|
|
96
|
+
etag: false,
|
|
97
|
+
}));
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
## Configuration
|
|
101
|
+
|
|
102
|
+
### `FileStoreOptions`
|
|
103
|
+
|
|
104
|
+
```typescript
|
|
105
|
+
type FileStoreOptions = {
|
|
106
|
+
// Required
|
|
107
|
+
directory: string; // Base directory for file storage (created if missing)
|
|
108
|
+
deliveryUrl: string; // URL prefix for serving files
|
|
109
|
+
|
|
110
|
+
// Optional (from parent config)
|
|
111
|
+
kvStore?: KvStore; // Metadata store (defaults to in-memory)
|
|
112
|
+
};
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
### Upload Behavior
|
|
116
|
+
|
|
117
|
+
The filesystem store operates in **sequential mode** only:
|
|
118
|
+
|
|
119
|
+
```typescript
|
|
120
|
+
// Upload state tracking
|
|
121
|
+
// File 1: upload-123 (1MB file)
|
|
122
|
+
// ├── write chunk 0-512KB
|
|
123
|
+
// ├── write chunk 512KB-1MB
|
|
124
|
+
// └── complete
|
|
125
|
+
//
|
|
126
|
+
// Cannot write chunks out of order - must resume from last successful position
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
## Storage Layout
|
|
130
|
+
|
|
131
|
+
Files are stored with metadata:
|
|
132
|
+
|
|
133
|
+
```
|
|
134
|
+
./uploads/
|
|
135
|
+
├── upload-123/
|
|
136
|
+
│ ├── file.bin # Actual file content
|
|
137
|
+
│ ├── .metadata.json # Upload metadata
|
|
138
|
+
│ └── .locks/ # Concurrency locks
|
|
139
|
+
├── upload-456/
|
|
140
|
+
│ ├── file.bin
|
|
141
|
+
│ └── .metadata.json
|
|
142
|
+
└── .expiration.log # Expiration tracking
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
Metadata example:
|
|
146
|
+
```json
|
|
147
|
+
{
|
|
148
|
+
"id": "upload-123",
|
|
149
|
+
"filename": "document.pdf",
|
|
150
|
+
"size": 1048576,
|
|
151
|
+
"uploadedAt": "2024-01-15T10:30:00Z",
|
|
152
|
+
"expiresAt": "2024-01-22T10:30:00Z",
|
|
153
|
+
"mimeType": "application/pdf"
|
|
154
|
+
}
|
|
155
|
+
```
|
|
156
|
+
|
|
157
|
+
## Complete Server Example
|
|
158
|
+
|
|
159
|
+
```typescript
|
|
160
|
+
import express, { Express } from "express";
|
|
161
|
+
import path from "path";
|
|
162
|
+
import { createExpressUploadistaAdapter } from "@uploadista/adapters-express";
|
|
163
|
+
import { createFileStore } from "@uploadista/data-store-filesystem";
|
|
164
|
+
import { memoryKvStore } from "@uploadista/kv-store-memory";
|
|
165
|
+
import { webSocketEventEmitter } from "@uploadista/event-emitter-websocket";
|
|
166
|
+
import { memoryEventBroadcaster } from "@uploadista/event-broadcaster-memory";
|
|
167
|
+
import http from "http";
|
|
168
|
+
import WebSocket from "ws";
|
|
169
|
+
|
|
170
|
+
const app: Express = express();
|
|
171
|
+
|
|
172
|
+
// Serve uploaded files
|
|
173
|
+
app.use("/files", express.static("./uploads"));
|
|
174
|
+
|
|
175
|
+
// Create file store
|
|
176
|
+
const fileStore = createFileStore({
|
|
177
|
+
directory: process.env.UPLOAD_DIR || "./uploads",
|
|
178
|
+
deliveryUrl: process.env.DELIVERY_URL || "http://localhost:3000/files",
|
|
179
|
+
});
|
|
180
|
+
|
|
181
|
+
// Create adapter
|
|
182
|
+
const adapter = await createExpressUploadistaAdapter({
|
|
183
|
+
baseUrl: "uploadista",
|
|
184
|
+
dataStore: fileStore,
|
|
185
|
+
kvStore: memoryKvStore,
|
|
186
|
+
eventEmitter: webSocketEventEmitter,
|
|
187
|
+
eventBroadcaster: memoryEventBroadcaster,
|
|
188
|
+
flows: createFlowsEffect,
|
|
189
|
+
});
|
|
190
|
+
|
|
191
|
+
// Mount HTTP handler
|
|
192
|
+
app.use(`/${adapter.baseUrl}`, (req, res) => {
|
|
193
|
+
adapter.handler(req, res);
|
|
194
|
+
});
|
|
195
|
+
|
|
196
|
+
// WebSocket server
|
|
197
|
+
const server = http.createServer(app);
|
|
198
|
+
const wss = new WebSocket.Server({ server });
|
|
199
|
+
|
|
200
|
+
wss.on("connection", (ws, req) => {
|
|
201
|
+
adapter.websocketConnectionHandler(ws, req);
|
|
202
|
+
});
|
|
203
|
+
|
|
204
|
+
server.listen(3000, () => {
|
|
205
|
+
console.log("Server running on http://localhost:3000");
|
|
206
|
+
console.log(`Files stored in: ${process.env.UPLOAD_DIR || './uploads'}`);
|
|
207
|
+
});
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
## Environment Configuration
|
|
211
|
+
|
|
212
|
+
### .env File
|
|
213
|
+
|
|
214
|
+
```env
|
|
215
|
+
# Storage
|
|
216
|
+
UPLOAD_DIR=./uploads
|
|
217
|
+
DELIVERY_URL=http://localhost:3000/files
|
|
218
|
+
|
|
219
|
+
# Or with subdirectory for temporary uploads
|
|
220
|
+
UPLOAD_DIR=/tmp/uploads
|
|
221
|
+
|
|
222
|
+
# Production with volume mount
|
|
223
|
+
UPLOAD_DIR=/mnt/persistent-storage/uploads
|
|
224
|
+
DELIVERY_URL=https://files.example.com
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
### Creating Upload Directory
|
|
228
|
+
|
|
229
|
+
```typescript
|
|
230
|
+
import fs from "fs";
|
|
231
|
+
import path from "path";
|
|
232
|
+
|
|
233
|
+
// Automatically created by the store
|
|
234
|
+
// But you can pre-create it:
|
|
235
|
+
const uploadDir = process.env.UPLOAD_DIR || "./uploads";
|
|
236
|
+
if (!fs.existsSync(uploadDir)) {
|
|
237
|
+
fs.mkdirSync(uploadDir, { recursive: true, mode: 0o755 });
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
const fileStore = createFileStore({
|
|
241
|
+
directory: uploadDir,
|
|
242
|
+
deliveryUrl: process.env.DELIVERY_URL,
|
|
243
|
+
});
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
## Use Cases
|
|
247
|
+
|
|
248
|
+
### Development
|
|
249
|
+
|
|
250
|
+
Perfect for local development without external dependencies:
|
|
251
|
+
|
|
252
|
+
```typescript
|
|
253
|
+
const devStore = createFileStore({
|
|
254
|
+
directory: "./uploads",
|
|
255
|
+
deliveryUrl: "http://localhost:3000/uploads",
|
|
256
|
+
});
|
|
257
|
+
|
|
258
|
+
// Server runs on your machine - files stored locally
|
|
259
|
+
```
|
|
260
|
+
|
|
261
|
+
### Testing
|
|
262
|
+
|
|
263
|
+
Use temporary directories for test isolation:
|
|
264
|
+
|
|
265
|
+
```typescript
|
|
266
|
+
import { tmpdir } from "os";
|
|
267
|
+
import path from "path";
|
|
268
|
+
|
|
269
|
+
const testDir = path.join(tmpdir(), `uploadista-test-${Date.now()}`);
|
|
270
|
+
const testStore = createFileStore({
|
|
271
|
+
directory: testDir,
|
|
272
|
+
deliveryUrl: "http://localhost:3000/uploads",
|
|
273
|
+
});
|
|
274
|
+
|
|
275
|
+
// Clean up after tests
|
|
276
|
+
afterEach(() => {
|
|
277
|
+
fs.rmSync(testDir, { recursive: true });
|
|
278
|
+
});
|
|
279
|
+
```
|
|
280
|
+
|
|
281
|
+
### Docker/Kubernetes
|
|
282
|
+
|
|
283
|
+
Mount volumes for persistent storage:
|
|
284
|
+
|
|
285
|
+
```typescript
|
|
286
|
+
const k8sStore = createFileStore({
|
|
287
|
+
directory: "/mnt/uploads", // Kubernetes PersistentVolume
|
|
288
|
+
deliveryUrl: "https://files.example.com",
|
|
289
|
+
});
|
|
290
|
+
|
|
291
|
+
// Deployment YAML:
|
|
292
|
+
// volumeMounts:
|
|
293
|
+
// - name: uploads-volume
|
|
294
|
+
// mountPath: /mnt/uploads
|
|
295
|
+
// volumes:
|
|
296
|
+
// - name: uploads-volume
|
|
297
|
+
// persistentVolumeClaim:
|
|
298
|
+
// claimName: uploads-pvc
|
|
299
|
+
```
|
|
300
|
+
|
|
301
|
+
### NFS/Shared Storage
|
|
302
|
+
|
|
303
|
+
For distributed deployments:
|
|
304
|
+
|
|
305
|
+
```typescript
|
|
306
|
+
const nfsStore = createFileStore({
|
|
307
|
+
directory: "/mnt/nfs/uploads", // NFS mount point
|
|
308
|
+
deliveryUrl: "https://files.example.com",
|
|
309
|
+
});
|
|
310
|
+
|
|
311
|
+
// Multiple servers share the same storage
|
|
312
|
+
// Files visible to all instances
|
|
313
|
+
```
|
|
314
|
+
|
|
315
|
+
## Storage Considerations
|
|
316
|
+
|
|
317
|
+
### Disk Space
|
|
318
|
+
|
|
319
|
+
Calculate required storage:
|
|
320
|
+
|
|
321
|
+
```
|
|
322
|
+
Daily uploads = 100 files × 5MB = 500MB/day
|
|
323
|
+
Monthly = 500MB × 30 = 15GB/month
|
|
324
|
+
Yearly = 15GB × 12 = 180GB/year
|
|
325
|
+
|
|
326
|
+
Reserve 20% buffer for OS and system = 216GB required
|
|
327
|
+
```
|
|
328
|
+
|
|
329
|
+
### Backup Strategy
|
|
330
|
+
|
|
331
|
+
```bash
|
|
332
|
+
# Daily backup to cloud storage
|
|
333
|
+
# Example: rsync to AWS S3
|
|
334
|
+
0 2 * * * aws s3 sync /mnt/uploads s3://my-backup-bucket/uploads/$(date +\%Y-\%m-\%d)
|
|
335
|
+
|
|
336
|
+
# Or with tar
|
|
337
|
+
0 2 * * * tar czf /backups/uploads-$(date +\%Y-\%m-\%d).tar.gz /mnt/uploads
|
|
338
|
+
```
|
|
339
|
+
|
|
340
|
+
### Cleanup
|
|
341
|
+
|
|
342
|
+
```typescript
|
|
343
|
+
// Old uploads automatically cleaned up after expiration
|
|
344
|
+
// Default: 7 days (configurable in KV store)
|
|
345
|
+
|
|
346
|
+
// Manual cleanup
|
|
347
|
+
import fs from "fs";
|
|
348
|
+
const uploadDir = "./uploads";
|
|
349
|
+
const files = fs.readdirSync(uploadDir);
|
|
350
|
+
|
|
351
|
+
files.forEach(file => {
|
|
352
|
+
const stats = fs.statSync(`${uploadDir}/${file}`);
|
|
353
|
+
const age = Date.now() - stats.mtimeMs;
|
|
354
|
+
|
|
355
|
+
// Delete files older than 30 days
|
|
356
|
+
if (age > 30 * 24 * 60 * 60 * 1000) {
|
|
357
|
+
fs.rmSync(`${uploadDir}/${file}`, { recursive: true });
|
|
358
|
+
}
|
|
359
|
+
});
|
|
360
|
+
```
|
|
361
|
+
|
|
362
|
+
## Performance Characteristics
|
|
363
|
+
|
|
364
|
+
**Sequential uploads only:**
|
|
365
|
+
```
|
|
366
|
+
✓ Slow uploads (must resume from last position)
|
|
367
|
+
✗ Cannot parallel write chunks (safety constraint)
|
|
368
|
+
✓ Stable and reliable for small-medium files
|
|
369
|
+
✓ Good for development and testing
|
|
370
|
+
```
|
|
371
|
+
|
|
372
|
+
**Use for:**
|
|
373
|
+
- Development environments
|
|
374
|
+
- Testing scenarios
|
|
375
|
+
- Small to medium files (<1GB)
|
|
376
|
+
- Self-hosted deployments with local disk
|
|
377
|
+
- Shared storage (NFS, SMB)
|
|
378
|
+
|
|
379
|
+
**Don't use for:**
|
|
380
|
+
- High-concurrency production (use S3/Azure/GCS instead)
|
|
381
|
+
- Very large files (>10GB)
|
|
382
|
+
- Cloud-native applications (no S3/GCS/Azure integration)
|
|
383
|
+
|
|
384
|
+
## Docker Deployment
|
|
385
|
+
|
|
386
|
+
### Dockerfile
|
|
387
|
+
|
|
388
|
+
```dockerfile
|
|
389
|
+
FROM node:20-alpine
|
|
390
|
+
WORKDIR /app
|
|
391
|
+
|
|
392
|
+
COPY package*.json ./
|
|
393
|
+
RUN npm ci --only=production
|
|
394
|
+
|
|
395
|
+
COPY dist ./dist
|
|
396
|
+
|
|
397
|
+
ENV NODE_ENV=production
|
|
398
|
+
ENV UPLOAD_DIR=/data/uploads
|
|
399
|
+
ENV DELIVERY_URL=https://files.example.com
|
|
400
|
+
|
|
401
|
+
VOLUME ["/data/uploads"]
|
|
402
|
+
|
|
403
|
+
EXPOSE 3000
|
|
404
|
+
CMD ["node", "dist/server.js"]
|
|
405
|
+
```
|
|
406
|
+
|
|
407
|
+
### docker-compose.yml
|
|
408
|
+
|
|
409
|
+
```yaml
|
|
410
|
+
version: "3.8"
|
|
411
|
+
services:
|
|
412
|
+
app:
|
|
413
|
+
build: .
|
|
414
|
+
ports:
|
|
415
|
+
- "3000:3000"
|
|
416
|
+
volumes:
|
|
417
|
+
- uploads:/data/uploads
|
|
418
|
+
- ./uploads:/app/public/uploads:ro # serve from web
|
|
419
|
+
environment:
|
|
420
|
+
UPLOAD_DIR: /data/uploads
|
|
421
|
+
DELIVERY_URL: http://localhost:3000/files
|
|
422
|
+
|
|
423
|
+
nginx:
|
|
424
|
+
image: nginx:alpine
|
|
425
|
+
ports:
|
|
426
|
+
- "80:80"
|
|
427
|
+
volumes:
|
|
428
|
+
- uploads:/usr/share/nginx/html/files:ro
|
|
429
|
+
- ./nginx.conf:/etc/nginx/nginx.conf
|
|
430
|
+
|
|
431
|
+
volumes:
|
|
432
|
+
uploads:
|
|
433
|
+
```
|
|
434
|
+
|
|
435
|
+
### Kubernetes
|
|
436
|
+
|
|
437
|
+
```yaml
|
|
438
|
+
apiVersion: v1
|
|
439
|
+
kind: PersistentVolumeClaim
|
|
440
|
+
metadata:
|
|
441
|
+
name: uploads-pvc
|
|
442
|
+
spec:
|
|
443
|
+
accessModes:
|
|
444
|
+
- ReadWriteOnce
|
|
445
|
+
resources:
|
|
446
|
+
requests:
|
|
447
|
+
storage: 100Gi
|
|
448
|
+
|
|
449
|
+
---
|
|
450
|
+
apiVersion: apps/v1
|
|
451
|
+
kind: Deployment
|
|
452
|
+
metadata:
|
|
453
|
+
name: uploadista-server
|
|
454
|
+
spec:
|
|
455
|
+
replicas: 1 # Note: Single replica due to sequential writes
|
|
456
|
+
selector:
|
|
457
|
+
matchLabels:
|
|
458
|
+
app: uploadista
|
|
459
|
+
template:
|
|
460
|
+
metadata:
|
|
461
|
+
labels:
|
|
462
|
+
app: uploadista
|
|
463
|
+
spec:
|
|
464
|
+
containers:
|
|
465
|
+
- name: app
|
|
466
|
+
image: uploadista:latest
|
|
467
|
+
ports:
|
|
468
|
+
- containerPort: 3000
|
|
469
|
+
env:
|
|
470
|
+
- name: UPLOAD_DIR
|
|
471
|
+
value: /mnt/uploads
|
|
472
|
+
- name: DELIVERY_URL
|
|
473
|
+
value: https://files.example.com
|
|
474
|
+
volumeMounts:
|
|
475
|
+
- name: uploads
|
|
476
|
+
mountPath: /mnt/uploads
|
|
477
|
+
volumes:
|
|
478
|
+
- name: uploads
|
|
479
|
+
persistentVolumeClaim:
|
|
480
|
+
claimName: uploads-pvc
|
|
481
|
+
```
|
|
482
|
+
|
|
483
|
+
## Monitoring
|
|
484
|
+
|
|
485
|
+
### Disk Usage
|
|
486
|
+
|
|
487
|
+
```bash
|
|
488
|
+
# Check disk usage
|
|
489
|
+
du -sh ./uploads
|
|
490
|
+
|
|
491
|
+
# Monitor in real-time
|
|
492
|
+
watch 'du -sh ./uploads && ls -la ./uploads | wc -l'
|
|
493
|
+
```
|
|
494
|
+
|
|
495
|
+
### Metrics
|
|
496
|
+
|
|
497
|
+
```typescript
|
|
498
|
+
// Track storage metrics
|
|
499
|
+
import fs from "fs";
|
|
500
|
+
|
|
501
|
+
const uploadDir = "./uploads";
|
|
502
|
+
const files = fs.readdirSync(uploadDir);
|
|
503
|
+
const totalSize = files.reduce((sum, file) => {
|
|
504
|
+
const stats = fs.statSync(`${uploadDir}/${file}`);
|
|
505
|
+
return sum + stats.size;
|
|
506
|
+
}, 0);
|
|
507
|
+
|
|
508
|
+
console.log(`Total files: ${files.length}`);
|
|
509
|
+
console.log(`Total size: ${(totalSize / 1024 / 1024).toFixed(2)}MB`);
|
|
510
|
+
```
|
|
511
|
+
|
|
512
|
+
## Limitations
|
|
513
|
+
|
|
514
|
+
Due to sequential-only uploads:
|
|
515
|
+
|
|
516
|
+
1. **Single Replica Only** - Multiple servers can't share same directory
|
|
517
|
+
2. **No Parallel Chunks** - Chunks must upload in order
|
|
518
|
+
3. **Slower Resume** - Must re-upload from last position
|
|
519
|
+
4. **Disk Dependent** - Speed limited by disk I/O
|
|
520
|
+
5. **No Cloud Scaling** - Can't span multiple servers
|
|
521
|
+
|
|
522
|
+
For high-scale production, use S3/Azure/GCS instead.
|
|
523
|
+
|
|
524
|
+
## Related Packages
|
|
525
|
+
|
|
526
|
+
- **[@uploadista/data-store-s3](../s3/)** - AWS S3 (recommended for production)
|
|
527
|
+
- **[@uploadista/data-store-azure](../azure/)** - Azure Blob Storage
|
|
528
|
+
- **[@uploadista/data-store-gcs](../gcs/)** - Google Cloud Storage
|
|
529
|
+
- **[@uploadista/server](../../servers/server/)** - Core server utilities
|
|
530
|
+
- **[@uploadista/kv-store-memory](../../kv-stores/memory/)** - In-memory KV store
|
|
531
|
+
- **[@uploadista/core](../../core/)** - Core engine
|
|
532
|
+
|
|
533
|
+
## TypeScript Support
|
|
534
|
+
|
|
535
|
+
Full TypeScript support:
|
|
536
|
+
|
|
537
|
+
```typescript
|
|
538
|
+
import type { FileStoreOptions } from "@uploadista/data-store-filesystem";
|
|
539
|
+
import { createFileStore, fileStore } from "@uploadista/data-store-filesystem";
|
|
540
|
+
```
|
|
541
|
+
|
|
542
|
+
## Troubleshooting
|
|
543
|
+
|
|
544
|
+
### Permission Denied
|
|
545
|
+
|
|
546
|
+
```bash
|
|
547
|
+
# Check directory permissions
|
|
548
|
+
ls -ld ./uploads
|
|
549
|
+
|
|
550
|
+
# Fix permissions
|
|
551
|
+
chmod 755 ./uploads
|
|
552
|
+
chmod 644 ./uploads/*
|
|
553
|
+
```
|
|
554
|
+
|
|
555
|
+
### Disk Full
|
|
556
|
+
|
|
557
|
+
```bash
|
|
558
|
+
# Check available space
|
|
559
|
+
df -h
|
|
560
|
+
|
|
561
|
+
# Find largest files
|
|
562
|
+
du -sh ./uploads/* | sort -rh | head -10
|
|
563
|
+
|
|
564
|
+
# Clean old uploads
|
|
565
|
+
find ./uploads -mtime +30 -delete # Files older than 30 days
|
|
566
|
+
```
|
|
567
|
+
|
|
568
|
+
### Upload Fails
|
|
569
|
+
|
|
570
|
+
```bash
|
|
571
|
+
# Check if directory exists
|
|
572
|
+
test -d ./uploads && echo "exists" || echo "missing"
|
|
573
|
+
|
|
574
|
+
# Create if missing
|
|
575
|
+
mkdir -p ./uploads
|
|
576
|
+
|
|
577
|
+
# Verify write permissions
|
|
578
|
+
touch ./uploads/test && rm ./uploads/test
|
|
579
|
+
```
|
|
580
|
+
|
|
581
|
+
## License
|
|
582
|
+
|
|
583
|
+
MIT
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import type { DataStore, UploadFile } from "@uploadista/core/types";
|
|
2
|
+
import { UploadFileKVStore } from "@uploadista/core/types";
|
|
3
|
+
import { Effect } from "effect";
|
|
4
|
+
export type FileStoreOptions = {
|
|
5
|
+
directory: string;
|
|
6
|
+
deliveryUrl: string;
|
|
7
|
+
};
|
|
8
|
+
/**
|
|
9
|
+
* A data store that stores files in the filesystem.
|
|
10
|
+
* @param options - The options for the file store.
|
|
11
|
+
* @returns A data store that stores files in the filesystem.
|
|
12
|
+
*/
|
|
13
|
+
export declare const createFileStore: ({ directory, deliveryUrl }: FileStoreOptions) => Effect.Effect<DataStore<UploadFile>, never, UploadFileKVStore>;
|
|
14
|
+
export declare const fileStore: (options: FileStoreOptions) => Effect.Effect<DataStore<UploadFile>, never, UploadFileKVStore>;
|
|
15
|
+
//# sourceMappingURL=file-store.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"file-store.d.ts","sourceRoot":"","sources":["../src/file-store.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EACV,SAAS,EAGT,UAAU,EAEX,MAAM,wBAAwB,CAAC;AAChC,OAAO,EAAE,iBAAiB,EAAE,MAAM,wBAAwB,CAAC;AAc3D,OAAO,EAAE,MAAM,EAAqB,MAAM,QAAQ,CAAC;AAEnD,MAAM,MAAM,gBAAgB,GAAG;IAC7B,SAAS,EAAE,MAAM,CAAC;IAClB,WAAW,EAAE,MAAM,CAAC;CACrB,CAAC;AAqGF;;;;GAIG;AACH,eAAO,MAAM,eAAe,GAAI,4BAA4B,gBAAgB,mEA0OxE,CAAC;AAEL,eAAO,MAAM,SAAS,GAAI,SAAS,gBAAgB,mEACzB,CAAC"}
|
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
import fs from "node:fs";
|
|
2
|
+
import fsProm from "node:fs/promises";
|
|
3
|
+
import path from "node:path";
|
|
4
|
+
import { UploadistaError } from "@uploadista/core/errors";
|
|
5
|
+
import { UploadFileKVStore } from "@uploadista/core/types";
|
|
6
|
+
import { filesystemActiveUploadsGauge as activeUploadsGauge, filesystemFileSizeHistogram as fileSizeHistogram, logFilesystemUploadCompletion, filesystemPartSizeHistogram as partSizeHistogram, trackFilesystemError, filesystemUploadDurationHistogram as uploadDurationHistogram, filesystemUploadPartsTotal as uploadPartsTotal, filesystemUploadRequestsTotal as uploadRequestsTotal, filesystemUploadSuccessTotal as uploadSuccessTotal, withFilesystemTimingMetrics as withTimingMetrics, withFilesystemUploadMetrics as withUploadMetrics, } from "@uploadista/observability";
|
|
7
|
+
import { Effect, Ref, Sink, Stream } from "effect";
|
|
8
|
+
const MASK = "0777";
|
|
9
|
+
const IGNORED_MKDIR_ERROR = "EEXIST";
|
|
10
|
+
// const FILE_DOESNT_EXIST = "ENOENT";
|
|
11
|
+
const checkOrCreateDirectory = (directory) => Effect.tryPromise({
|
|
12
|
+
try: () => fsProm.mkdir(directory, { mode: MASK, recursive: true }),
|
|
13
|
+
catch: (error) => {
|
|
14
|
+
if (error instanceof Error &&
|
|
15
|
+
"code" in error &&
|
|
16
|
+
error.code === IGNORED_MKDIR_ERROR) {
|
|
17
|
+
// Directory already exists, not an error
|
|
18
|
+
return new UploadistaError({
|
|
19
|
+
code: "UNKNOWN_ERROR",
|
|
20
|
+
status: 200,
|
|
21
|
+
body: "Directory already exists",
|
|
22
|
+
details: "Directory already exists",
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
return new UploadistaError({
|
|
26
|
+
code: "UNKNOWN_ERROR",
|
|
27
|
+
status: 500,
|
|
28
|
+
body: "Failed to create directory",
|
|
29
|
+
details: `Directory creation failed: ${String(error)}`,
|
|
30
|
+
});
|
|
31
|
+
},
|
|
32
|
+
}).pipe(Effect.orElse(() => Effect.void));
|
|
33
|
+
const createWriteStream = (file_path, offset) => Effect.sync(() => fs.createWriteStream(file_path, {
|
|
34
|
+
flags: "r+",
|
|
35
|
+
start: offset,
|
|
36
|
+
}));
|
|
37
|
+
const writeChunk = ({ writeStream, bytesReceived, onProgress, }) => (chunk) => Effect.gen(function* () {
|
|
38
|
+
yield* Effect.async((resume) => {
|
|
39
|
+
writeStream.write(chunk, (err) => {
|
|
40
|
+
if (err) {
|
|
41
|
+
resume(Effect.fail(new UploadistaError({
|
|
42
|
+
code: "FILE_WRITE_ERROR",
|
|
43
|
+
status: 500,
|
|
44
|
+
body: "Failed to write chunk",
|
|
45
|
+
details: `Chunk write failed: ${String(err)}`,
|
|
46
|
+
})));
|
|
47
|
+
}
|
|
48
|
+
else {
|
|
49
|
+
resume(Effect.succeed(void 0));
|
|
50
|
+
}
|
|
51
|
+
});
|
|
52
|
+
});
|
|
53
|
+
yield* Ref.update(bytesReceived, (size) => size + chunk.length);
|
|
54
|
+
onProgress?.(chunk.length);
|
|
55
|
+
});
|
|
56
|
+
const endWriteStream = (writeStream) => Effect.async((resume) => {
|
|
57
|
+
writeStream.end((err) => {
|
|
58
|
+
if (err) {
|
|
59
|
+
resume(Effect.fail(new UploadistaError({
|
|
60
|
+
code: "FILE_WRITE_ERROR",
|
|
61
|
+
status: 500,
|
|
62
|
+
body: "Failed to close write stream",
|
|
63
|
+
details: `Stream close failed: ${String(err)}`,
|
|
64
|
+
})));
|
|
65
|
+
}
|
|
66
|
+
else {
|
|
67
|
+
resume(Effect.succeed(void 0));
|
|
68
|
+
}
|
|
69
|
+
});
|
|
70
|
+
});
|
|
71
|
+
const destroyWriteStream = (writeStream) => Effect.sync(() => {
|
|
72
|
+
if (!writeStream.destroyed) {
|
|
73
|
+
writeStream.destroy();
|
|
74
|
+
}
|
|
75
|
+
});
|
|
76
|
+
/**
|
|
77
|
+
* A data store that stores files in the filesystem.
|
|
78
|
+
* @param options - The options for the file store.
|
|
79
|
+
* @returns A data store that stores files in the filesystem.
|
|
80
|
+
*/
|
|
81
|
+
export const createFileStore = ({ directory, deliveryUrl }) => Effect.gen(function* () {
|
|
82
|
+
yield* checkOrCreateDirectory(directory);
|
|
83
|
+
const kvStore = yield* UploadFileKVStore;
|
|
84
|
+
const getCapabilities = () => {
|
|
85
|
+
return {
|
|
86
|
+
supportsParallelUploads: false, // Filesystem operations are sequential
|
|
87
|
+
supportsConcatenation: false, // No native concatenation support
|
|
88
|
+
supportsDeferredLength: false,
|
|
89
|
+
supportsResumableUploads: true, // Can write at specific offsets
|
|
90
|
+
supportsTransactionalUploads: false,
|
|
91
|
+
maxConcurrentUploads: 1, // Sequential writes only
|
|
92
|
+
minChunkSize: undefined,
|
|
93
|
+
maxChunkSize: undefined,
|
|
94
|
+
maxParts: undefined,
|
|
95
|
+
optimalChunkSize: 1024 * 1024, // 1MB default
|
|
96
|
+
requiresOrderedChunks: true, // Sequential offset-based writes
|
|
97
|
+
requiresMimeTypeValidation: true,
|
|
98
|
+
maxValidationSize: undefined, // no size limit
|
|
99
|
+
};
|
|
100
|
+
};
|
|
101
|
+
const validateUploadStrategy = (strategy) => {
|
|
102
|
+
const capabilities = getCapabilities();
|
|
103
|
+
switch (strategy) {
|
|
104
|
+
case "parallel":
|
|
105
|
+
return Effect.succeed(capabilities.supportsParallelUploads);
|
|
106
|
+
case "single":
|
|
107
|
+
return Effect.succeed(true);
|
|
108
|
+
default:
|
|
109
|
+
return Effect.succeed(false);
|
|
110
|
+
}
|
|
111
|
+
};
|
|
112
|
+
return {
|
|
113
|
+
bucket: directory,
|
|
114
|
+
create: (file) => {
|
|
115
|
+
const fileName = file.metadata?.fileName?.toString();
|
|
116
|
+
const fileExtension = fileName?.split(".").pop();
|
|
117
|
+
const dirs = file.id.split("/").slice(0, -1);
|
|
118
|
+
const filePath = path.join(directory, fileExtension ? `${file.id}.${fileExtension}` : file.id);
|
|
119
|
+
return Effect.gen(function* () {
|
|
120
|
+
yield* uploadRequestsTotal(Effect.succeed(1));
|
|
121
|
+
yield* activeUploadsGauge(Effect.succeed(1));
|
|
122
|
+
yield* fileSizeHistogram(Effect.succeed(file.size || 0));
|
|
123
|
+
yield* Effect.tryPromise({
|
|
124
|
+
try: () => fsProm.mkdir(path.join(directory, ...dirs), {
|
|
125
|
+
recursive: true,
|
|
126
|
+
}),
|
|
127
|
+
catch: (error) => {
|
|
128
|
+
Effect.runSync(trackFilesystemError("create", error, {
|
|
129
|
+
upload_id: file.id,
|
|
130
|
+
path: filePath,
|
|
131
|
+
}));
|
|
132
|
+
return new UploadistaError({
|
|
133
|
+
code: "UNKNOWN_ERROR",
|
|
134
|
+
status: 500,
|
|
135
|
+
body: "Failed to create file directory",
|
|
136
|
+
details: `Directory creation failed: ${String(error)}`,
|
|
137
|
+
});
|
|
138
|
+
},
|
|
139
|
+
});
|
|
140
|
+
yield* Effect.tryPromise({
|
|
141
|
+
try: () => fsProm.writeFile(filePath, ""),
|
|
142
|
+
catch: (error) => {
|
|
143
|
+
Effect.runSync(trackFilesystemError("create", error, {
|
|
144
|
+
upload_id: file.id,
|
|
145
|
+
path: filePath,
|
|
146
|
+
}));
|
|
147
|
+
return new UploadistaError({
|
|
148
|
+
code: "UNKNOWN_ERROR",
|
|
149
|
+
status: 500,
|
|
150
|
+
body: "Failed to create file",
|
|
151
|
+
details: `File creation failed: ${String(error)}`,
|
|
152
|
+
});
|
|
153
|
+
},
|
|
154
|
+
});
|
|
155
|
+
const fileId = fileExtension
|
|
156
|
+
? `${file.id}.${fileExtension}`
|
|
157
|
+
: file.id;
|
|
158
|
+
file.storage = {
|
|
159
|
+
id: fileId,
|
|
160
|
+
type: file.storage.type,
|
|
161
|
+
path: filePath,
|
|
162
|
+
bucket: directory,
|
|
163
|
+
};
|
|
164
|
+
file.url = `${deliveryUrl}/${fileId}`;
|
|
165
|
+
// Store file metadata in KV store
|
|
166
|
+
yield* kvStore.set(file.id, file);
|
|
167
|
+
return file;
|
|
168
|
+
});
|
|
169
|
+
},
|
|
170
|
+
remove: (file_id) => {
|
|
171
|
+
return Effect.gen(function* () {
|
|
172
|
+
const uploadFile = yield* kvStore.get(file_id);
|
|
173
|
+
const file_path = uploadFile.storage.path || path.join(directory, file_id);
|
|
174
|
+
yield* Effect.tryPromise({
|
|
175
|
+
try: () => fsProm.unlink(file_path),
|
|
176
|
+
catch: (error) => {
|
|
177
|
+
Effect.runSync(trackFilesystemError("remove", error, {
|
|
178
|
+
upload_id: file_id,
|
|
179
|
+
path: file_path,
|
|
180
|
+
}));
|
|
181
|
+
return UploadistaError.fromCode("FILE_NOT_FOUND");
|
|
182
|
+
},
|
|
183
|
+
});
|
|
184
|
+
yield* kvStore.delete(file_id);
|
|
185
|
+
yield* activeUploadsGauge(Effect.succeed(-1));
|
|
186
|
+
});
|
|
187
|
+
},
|
|
188
|
+
write: ({ file_id, stream, offset }, { onProgress }) => {
|
|
189
|
+
return withUploadMetrics(file_id, withTimingMetrics(uploadDurationHistogram, Effect.gen(function* () {
|
|
190
|
+
const startTime = Date.now();
|
|
191
|
+
// Get the upload file from KV store to retrieve the actual file path
|
|
192
|
+
const uploadFile = yield* kvStore.get(file_id);
|
|
193
|
+
const file_path = uploadFile.storage.path || path.join(directory, file_id);
|
|
194
|
+
const bytesReceived = yield* Ref.make(0);
|
|
195
|
+
try {
|
|
196
|
+
const result = yield* Effect.acquireUseRelease(createWriteStream(file_path, offset), (writeStream) => Effect.gen(function* () {
|
|
197
|
+
const sink = Sink.forEach(writeChunk({ writeStream, bytesReceived, onProgress }));
|
|
198
|
+
yield* uploadPartsTotal(Effect.succeed(1));
|
|
199
|
+
yield* Stream.run(stream, sink);
|
|
200
|
+
yield* endWriteStream(writeStream);
|
|
201
|
+
const totalBytes = yield* Ref.get(bytesReceived);
|
|
202
|
+
yield* partSizeHistogram(Effect.succeed(totalBytes));
|
|
203
|
+
return offset + totalBytes;
|
|
204
|
+
}), destroyWriteStream);
|
|
205
|
+
// Check if upload is complete
|
|
206
|
+
if (uploadFile.size && result === uploadFile.size) {
|
|
207
|
+
yield* logFilesystemUploadCompletion(file_id, {
|
|
208
|
+
fileSize: uploadFile.size,
|
|
209
|
+
totalDurationMs: Date.now() - startTime,
|
|
210
|
+
partsCount: 1,
|
|
211
|
+
averagePartSize: uploadFile.size,
|
|
212
|
+
throughputBps: uploadFile.size / (Date.now() - startTime),
|
|
213
|
+
retryCount: 0,
|
|
214
|
+
});
|
|
215
|
+
yield* uploadSuccessTotal(Effect.succeed(1));
|
|
216
|
+
yield* activeUploadsGauge(Effect.succeed(-1));
|
|
217
|
+
}
|
|
218
|
+
return result;
|
|
219
|
+
}
|
|
220
|
+
catch (error) {
|
|
221
|
+
Effect.runSync(trackFilesystemError("write", error, {
|
|
222
|
+
upload_id: file_id,
|
|
223
|
+
path: file_path,
|
|
224
|
+
offset,
|
|
225
|
+
}));
|
|
226
|
+
throw error;
|
|
227
|
+
}
|
|
228
|
+
})));
|
|
229
|
+
},
|
|
230
|
+
getUpload: (id) => Effect.gen(function* () {
|
|
231
|
+
const uploadFile = yield* kvStore.get(id);
|
|
232
|
+
// For filesystem, get the actual file size from disk
|
|
233
|
+
const file_path = uploadFile.storage.path || path.join(directory, id);
|
|
234
|
+
const stats = yield* Effect.tryPromise({
|
|
235
|
+
try: () => fsProm.stat(file_path),
|
|
236
|
+
catch: () => UploadistaError.fromCode("FILE_NOT_FOUND"),
|
|
237
|
+
});
|
|
238
|
+
return {
|
|
239
|
+
...uploadFile,
|
|
240
|
+
offset: stats.size,
|
|
241
|
+
size: uploadFile.size,
|
|
242
|
+
};
|
|
243
|
+
}),
|
|
244
|
+
read: (id) => Effect.gen(function* () {
|
|
245
|
+
const uploadFile = yield* kvStore.get(id);
|
|
246
|
+
const file_path = uploadFile.storage.path || path.join(directory, id);
|
|
247
|
+
const buffer = yield* Effect.tryPromise({
|
|
248
|
+
try: () => fsProm.readFile(file_path),
|
|
249
|
+
catch: () => UploadistaError.fromCode("FILE_READ_ERROR"),
|
|
250
|
+
});
|
|
251
|
+
return new Uint8Array(buffer);
|
|
252
|
+
}),
|
|
253
|
+
getCapabilities,
|
|
254
|
+
validateUploadStrategy,
|
|
255
|
+
};
|
|
256
|
+
});
|
|
257
|
+
export const fileStore = (options) => createFileStore(options);
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,cAAc,CAAC"}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from "./file-store";
|
package/package.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@uploadista/data-store-filesystem",
|
|
3
|
+
"type": "module",
|
|
4
|
+
"version": "0.0.3",
|
|
5
|
+
"description": "File system data store for Uploadista",
|
|
6
|
+
"license": "MIT",
|
|
7
|
+
"author": "Uploadista",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"types": "./dist/index.d.ts",
|
|
11
|
+
"import": "./dist/index.js",
|
|
12
|
+
"default": "./dist/index.js"
|
|
13
|
+
}
|
|
14
|
+
},
|
|
15
|
+
"dependencies": {
|
|
16
|
+
"effect": "3.18.4",
|
|
17
|
+
"@uploadista/observability": "0.0.3",
|
|
18
|
+
"@uploadista/core": "0.0.3"
|
|
19
|
+
},
|
|
20
|
+
"devDependencies": {
|
|
21
|
+
"@types/node": "24.8.1",
|
|
22
|
+
"@uploadista/typescript-config": "0.0.3"
|
|
23
|
+
},
|
|
24
|
+
"scripts": {
|
|
25
|
+
"dev": "tsc -b",
|
|
26
|
+
"build": "tsc -b",
|
|
27
|
+
"format": "biome format --write ./src",
|
|
28
|
+
"lint": "biome lint --write ./src",
|
|
29
|
+
"check": "biome check --write ./src"
|
|
30
|
+
}
|
|
31
|
+
}
|
|
@@ -0,0 +1,374 @@
|
|
|
1
|
+
import fs from "node:fs";
|
|
2
|
+
import fsProm from "node:fs/promises";
|
|
3
|
+
import path from "node:path";
|
|
4
|
+
import { UploadistaError } from "@uploadista/core/errors";
|
|
5
|
+
import type {
|
|
6
|
+
DataStore,
|
|
7
|
+
DataStoreCapabilities,
|
|
8
|
+
DataStoreWriteOptions,
|
|
9
|
+
UploadFile,
|
|
10
|
+
UploadStrategy,
|
|
11
|
+
} from "@uploadista/core/types";
|
|
12
|
+
import { UploadFileKVStore } from "@uploadista/core/types";
|
|
13
|
+
import {
|
|
14
|
+
filesystemActiveUploadsGauge as activeUploadsGauge,
|
|
15
|
+
filesystemFileSizeHistogram as fileSizeHistogram,
|
|
16
|
+
logFilesystemUploadCompletion,
|
|
17
|
+
filesystemPartSizeHistogram as partSizeHistogram,
|
|
18
|
+
trackFilesystemError,
|
|
19
|
+
filesystemUploadDurationHistogram as uploadDurationHistogram,
|
|
20
|
+
filesystemUploadPartsTotal as uploadPartsTotal,
|
|
21
|
+
filesystemUploadRequestsTotal as uploadRequestsTotal,
|
|
22
|
+
filesystemUploadSuccessTotal as uploadSuccessTotal,
|
|
23
|
+
withFilesystemTimingMetrics as withTimingMetrics,
|
|
24
|
+
withFilesystemUploadMetrics as withUploadMetrics,
|
|
25
|
+
} from "@uploadista/observability";
|
|
26
|
+
import { Effect, Ref, Sink, Stream } from "effect";
|
|
27
|
+
|
|
28
|
+
export type FileStoreOptions = {
|
|
29
|
+
directory: string;
|
|
30
|
+
deliveryUrl: string;
|
|
31
|
+
};
|
|
32
|
+
|
|
33
|
+
const MASK = "0777";
|
|
34
|
+
const IGNORED_MKDIR_ERROR = "EEXIST";
|
|
35
|
+
// const FILE_DOESNT_EXIST = "ENOENT";
|
|
36
|
+
|
|
37
|
+
const checkOrCreateDirectory = (directory: string) =>
|
|
38
|
+
Effect.tryPromise({
|
|
39
|
+
try: () => fsProm.mkdir(directory, { mode: MASK, recursive: true }),
|
|
40
|
+
catch: (error) => {
|
|
41
|
+
if (
|
|
42
|
+
error instanceof Error &&
|
|
43
|
+
"code" in error &&
|
|
44
|
+
error.code === IGNORED_MKDIR_ERROR
|
|
45
|
+
) {
|
|
46
|
+
// Directory already exists, not an error
|
|
47
|
+
return new UploadistaError({
|
|
48
|
+
code: "UNKNOWN_ERROR",
|
|
49
|
+
status: 200,
|
|
50
|
+
body: "Directory already exists",
|
|
51
|
+
details: "Directory already exists",
|
|
52
|
+
});
|
|
53
|
+
}
|
|
54
|
+
return new UploadistaError({
|
|
55
|
+
code: "UNKNOWN_ERROR",
|
|
56
|
+
status: 500,
|
|
57
|
+
body: "Failed to create directory",
|
|
58
|
+
details: `Directory creation failed: ${String(error)}`,
|
|
59
|
+
});
|
|
60
|
+
},
|
|
61
|
+
}).pipe(Effect.orElse(() => Effect.void));
|
|
62
|
+
|
|
63
|
+
const createWriteStream = (file_path: string, offset: number) =>
|
|
64
|
+
Effect.sync(() =>
|
|
65
|
+
fs.createWriteStream(file_path, {
|
|
66
|
+
flags: "r+",
|
|
67
|
+
start: offset,
|
|
68
|
+
}),
|
|
69
|
+
);
|
|
70
|
+
|
|
71
|
+
const writeChunk =
|
|
72
|
+
({
|
|
73
|
+
writeStream,
|
|
74
|
+
bytesReceived,
|
|
75
|
+
onProgress,
|
|
76
|
+
}: {
|
|
77
|
+
writeStream: fs.WriteStream;
|
|
78
|
+
bytesReceived: Ref.Ref<number>;
|
|
79
|
+
onProgress?: (chunkSize: number) => void;
|
|
80
|
+
}) =>
|
|
81
|
+
(chunk: Uint8Array) =>
|
|
82
|
+
Effect.gen(function* () {
|
|
83
|
+
yield* Effect.async<void, UploadistaError>((resume) => {
|
|
84
|
+
writeStream.write(chunk, (err) => {
|
|
85
|
+
if (err) {
|
|
86
|
+
resume(
|
|
87
|
+
Effect.fail(
|
|
88
|
+
new UploadistaError({
|
|
89
|
+
code: "FILE_WRITE_ERROR",
|
|
90
|
+
status: 500,
|
|
91
|
+
body: "Failed to write chunk",
|
|
92
|
+
details: `Chunk write failed: ${String(err)}`,
|
|
93
|
+
}),
|
|
94
|
+
),
|
|
95
|
+
);
|
|
96
|
+
} else {
|
|
97
|
+
resume(Effect.succeed(void 0));
|
|
98
|
+
}
|
|
99
|
+
});
|
|
100
|
+
});
|
|
101
|
+
|
|
102
|
+
yield* Ref.update(bytesReceived, (size) => size + chunk.length);
|
|
103
|
+
onProgress?.(chunk.length);
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
const endWriteStream = (writeStream: fs.WriteStream) =>
|
|
107
|
+
Effect.async<void, UploadistaError>((resume) => {
|
|
108
|
+
writeStream.end((err: Error | null | undefined) => {
|
|
109
|
+
if (err) {
|
|
110
|
+
resume(
|
|
111
|
+
Effect.fail(
|
|
112
|
+
new UploadistaError({
|
|
113
|
+
code: "FILE_WRITE_ERROR",
|
|
114
|
+
status: 500,
|
|
115
|
+
body: "Failed to close write stream",
|
|
116
|
+
details: `Stream close failed: ${String(err)}`,
|
|
117
|
+
}),
|
|
118
|
+
),
|
|
119
|
+
);
|
|
120
|
+
} else {
|
|
121
|
+
resume(Effect.succeed(void 0));
|
|
122
|
+
}
|
|
123
|
+
});
|
|
124
|
+
});
|
|
125
|
+
|
|
126
|
+
const destroyWriteStream = (writeStream: fs.WriteStream) =>
|
|
127
|
+
Effect.sync(() => {
|
|
128
|
+
if (!writeStream.destroyed) {
|
|
129
|
+
writeStream.destroy();
|
|
130
|
+
}
|
|
131
|
+
});
|
|
132
|
+
/**
|
|
133
|
+
* A data store that stores files in the filesystem.
|
|
134
|
+
* @param options - The options for the file store.
|
|
135
|
+
* @returns A data store that stores files in the filesystem.
|
|
136
|
+
*/
|
|
137
|
+
export const createFileStore = ({ directory, deliveryUrl }: FileStoreOptions) =>
|
|
138
|
+
Effect.gen(function* () {
|
|
139
|
+
yield* checkOrCreateDirectory(directory);
|
|
140
|
+
const kvStore = yield* UploadFileKVStore;
|
|
141
|
+
|
|
142
|
+
const getCapabilities = (): DataStoreCapabilities => {
|
|
143
|
+
return {
|
|
144
|
+
supportsParallelUploads: false, // Filesystem operations are sequential
|
|
145
|
+
supportsConcatenation: false, // No native concatenation support
|
|
146
|
+
supportsDeferredLength: false,
|
|
147
|
+
supportsResumableUploads: true, // Can write at specific offsets
|
|
148
|
+
supportsTransactionalUploads: false,
|
|
149
|
+
maxConcurrentUploads: 1, // Sequential writes only
|
|
150
|
+
minChunkSize: undefined,
|
|
151
|
+
maxChunkSize: undefined,
|
|
152
|
+
maxParts: undefined,
|
|
153
|
+
optimalChunkSize: 1024 * 1024, // 1MB default
|
|
154
|
+
requiresOrderedChunks: true, // Sequential offset-based writes
|
|
155
|
+
requiresMimeTypeValidation: true,
|
|
156
|
+
maxValidationSize: undefined, // no size limit
|
|
157
|
+
};
|
|
158
|
+
};
|
|
159
|
+
|
|
160
|
+
const validateUploadStrategy = (
|
|
161
|
+
strategy: UploadStrategy,
|
|
162
|
+
): Effect.Effect<boolean, never> => {
|
|
163
|
+
const capabilities = getCapabilities();
|
|
164
|
+
|
|
165
|
+
switch (strategy) {
|
|
166
|
+
case "parallel":
|
|
167
|
+
return Effect.succeed(capabilities.supportsParallelUploads);
|
|
168
|
+
case "single":
|
|
169
|
+
return Effect.succeed(true);
|
|
170
|
+
default:
|
|
171
|
+
return Effect.succeed(false);
|
|
172
|
+
}
|
|
173
|
+
};
|
|
174
|
+
|
|
175
|
+
return {
|
|
176
|
+
bucket: directory,
|
|
177
|
+
create: (
|
|
178
|
+
file: UploadFile,
|
|
179
|
+
): Effect.Effect<UploadFile, UploadistaError> => {
|
|
180
|
+
const fileName = file.metadata?.fileName?.toString();
|
|
181
|
+
const fileExtension = fileName?.split(".").pop();
|
|
182
|
+
|
|
183
|
+
const dirs = file.id.split("/").slice(0, -1);
|
|
184
|
+
const filePath = path.join(
|
|
185
|
+
directory,
|
|
186
|
+
fileExtension ? `${file.id}.${fileExtension}` : file.id,
|
|
187
|
+
);
|
|
188
|
+
|
|
189
|
+
return Effect.gen(function* () {
|
|
190
|
+
yield* uploadRequestsTotal(Effect.succeed(1));
|
|
191
|
+
yield* activeUploadsGauge(Effect.succeed(1));
|
|
192
|
+
yield* fileSizeHistogram(Effect.succeed(file.size || 0));
|
|
193
|
+
|
|
194
|
+
yield* Effect.tryPromise({
|
|
195
|
+
try: () =>
|
|
196
|
+
fsProm.mkdir(path.join(directory, ...dirs), {
|
|
197
|
+
recursive: true,
|
|
198
|
+
}),
|
|
199
|
+
catch: (error) => {
|
|
200
|
+
Effect.runSync(
|
|
201
|
+
trackFilesystemError("create", error, {
|
|
202
|
+
upload_id: file.id,
|
|
203
|
+
path: filePath,
|
|
204
|
+
}),
|
|
205
|
+
);
|
|
206
|
+
return new UploadistaError({
|
|
207
|
+
code: "UNKNOWN_ERROR",
|
|
208
|
+
status: 500,
|
|
209
|
+
body: "Failed to create file directory",
|
|
210
|
+
details: `Directory creation failed: ${String(error)}`,
|
|
211
|
+
});
|
|
212
|
+
},
|
|
213
|
+
});
|
|
214
|
+
|
|
215
|
+
yield* Effect.tryPromise({
|
|
216
|
+
try: () => fsProm.writeFile(filePath, ""),
|
|
217
|
+
catch: (error) => {
|
|
218
|
+
Effect.runSync(
|
|
219
|
+
trackFilesystemError("create", error, {
|
|
220
|
+
upload_id: file.id,
|
|
221
|
+
path: filePath,
|
|
222
|
+
}),
|
|
223
|
+
);
|
|
224
|
+
return new UploadistaError({
|
|
225
|
+
code: "UNKNOWN_ERROR",
|
|
226
|
+
status: 500,
|
|
227
|
+
body: "Failed to create file",
|
|
228
|
+
details: `File creation failed: ${String(error)}`,
|
|
229
|
+
});
|
|
230
|
+
},
|
|
231
|
+
});
|
|
232
|
+
|
|
233
|
+
const fileId = fileExtension
|
|
234
|
+
? `${file.id}.${fileExtension}`
|
|
235
|
+
: file.id;
|
|
236
|
+
file.storage = {
|
|
237
|
+
id: fileId,
|
|
238
|
+
type: file.storage.type,
|
|
239
|
+
path: filePath,
|
|
240
|
+
bucket: directory,
|
|
241
|
+
};
|
|
242
|
+
file.url = `${deliveryUrl}/${fileId}`;
|
|
243
|
+
|
|
244
|
+
// Store file metadata in KV store
|
|
245
|
+
yield* kvStore.set(file.id, file);
|
|
246
|
+
|
|
247
|
+
return file;
|
|
248
|
+
});
|
|
249
|
+
},
|
|
250
|
+
remove: (file_id: string): Effect.Effect<void, UploadistaError> => {
|
|
251
|
+
return Effect.gen(function* () {
|
|
252
|
+
const uploadFile = yield* kvStore.get(file_id);
|
|
253
|
+
const file_path =
|
|
254
|
+
uploadFile.storage.path || path.join(directory, file_id);
|
|
255
|
+
|
|
256
|
+
yield* Effect.tryPromise({
|
|
257
|
+
try: () => fsProm.unlink(file_path),
|
|
258
|
+
catch: (error) => {
|
|
259
|
+
Effect.runSync(
|
|
260
|
+
trackFilesystemError("remove", error, {
|
|
261
|
+
upload_id: file_id,
|
|
262
|
+
path: file_path,
|
|
263
|
+
}),
|
|
264
|
+
);
|
|
265
|
+
return UploadistaError.fromCode("FILE_NOT_FOUND");
|
|
266
|
+
},
|
|
267
|
+
});
|
|
268
|
+
|
|
269
|
+
yield* kvStore.delete(file_id);
|
|
270
|
+
yield* activeUploadsGauge(Effect.succeed(-1));
|
|
271
|
+
});
|
|
272
|
+
},
|
|
273
|
+
write: (
|
|
274
|
+
{ file_id, stream, offset }: DataStoreWriteOptions,
|
|
275
|
+
{ onProgress }: { onProgress?: (chunkSize: number) => void },
|
|
276
|
+
): Effect.Effect<number, UploadistaError> => {
|
|
277
|
+
return withUploadMetrics(
|
|
278
|
+
file_id,
|
|
279
|
+
withTimingMetrics(
|
|
280
|
+
uploadDurationHistogram,
|
|
281
|
+
Effect.gen(function* () {
|
|
282
|
+
const startTime = Date.now();
|
|
283
|
+
// Get the upload file from KV store to retrieve the actual file path
|
|
284
|
+
const uploadFile = yield* kvStore.get(file_id);
|
|
285
|
+
const file_path =
|
|
286
|
+
uploadFile.storage.path || path.join(directory, file_id);
|
|
287
|
+
|
|
288
|
+
const bytesReceived = yield* Ref.make(0);
|
|
289
|
+
|
|
290
|
+
try {
|
|
291
|
+
const result = yield* Effect.acquireUseRelease(
|
|
292
|
+
createWriteStream(file_path, offset),
|
|
293
|
+
(writeStream) =>
|
|
294
|
+
Effect.gen(function* () {
|
|
295
|
+
const sink = Sink.forEach(
|
|
296
|
+
writeChunk({ writeStream, bytesReceived, onProgress }),
|
|
297
|
+
);
|
|
298
|
+
|
|
299
|
+
yield* uploadPartsTotal(Effect.succeed(1));
|
|
300
|
+
yield* Stream.run(stream, sink);
|
|
301
|
+
yield* endWriteStream(writeStream);
|
|
302
|
+
|
|
303
|
+
const totalBytes = yield* Ref.get(bytesReceived);
|
|
304
|
+
yield* partSizeHistogram(Effect.succeed(totalBytes));
|
|
305
|
+
return offset + totalBytes;
|
|
306
|
+
}),
|
|
307
|
+
destroyWriteStream,
|
|
308
|
+
);
|
|
309
|
+
|
|
310
|
+
// Check if upload is complete
|
|
311
|
+
if (uploadFile.size && result === uploadFile.size) {
|
|
312
|
+
yield* logFilesystemUploadCompletion(file_id, {
|
|
313
|
+
fileSize: uploadFile.size,
|
|
314
|
+
totalDurationMs: Date.now() - startTime,
|
|
315
|
+
partsCount: 1,
|
|
316
|
+
averagePartSize: uploadFile.size,
|
|
317
|
+
throughputBps: uploadFile.size / (Date.now() - startTime),
|
|
318
|
+
retryCount: 0,
|
|
319
|
+
});
|
|
320
|
+
yield* uploadSuccessTotal(Effect.succeed(1));
|
|
321
|
+
yield* activeUploadsGauge(Effect.succeed(-1));
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
return result;
|
|
325
|
+
} catch (error) {
|
|
326
|
+
Effect.runSync(
|
|
327
|
+
trackFilesystemError("write", error, {
|
|
328
|
+
upload_id: file_id,
|
|
329
|
+
path: file_path,
|
|
330
|
+
offset,
|
|
331
|
+
}),
|
|
332
|
+
);
|
|
333
|
+
throw error;
|
|
334
|
+
}
|
|
335
|
+
}),
|
|
336
|
+
),
|
|
337
|
+
);
|
|
338
|
+
},
|
|
339
|
+
getUpload: (id: string) =>
|
|
340
|
+
Effect.gen(function* () {
|
|
341
|
+
const uploadFile = yield* kvStore.get(id);
|
|
342
|
+
|
|
343
|
+
// For filesystem, get the actual file size from disk
|
|
344
|
+
const file_path = uploadFile.storage.path || path.join(directory, id);
|
|
345
|
+
const stats = yield* Effect.tryPromise({
|
|
346
|
+
try: () => fsProm.stat(file_path),
|
|
347
|
+
catch: () => UploadistaError.fromCode("FILE_NOT_FOUND"),
|
|
348
|
+
});
|
|
349
|
+
|
|
350
|
+
return {
|
|
351
|
+
...uploadFile,
|
|
352
|
+
offset: stats.size,
|
|
353
|
+
size: uploadFile.size,
|
|
354
|
+
};
|
|
355
|
+
}),
|
|
356
|
+
read: (id: string) =>
|
|
357
|
+
Effect.gen(function* () {
|
|
358
|
+
const uploadFile = yield* kvStore.get(id);
|
|
359
|
+
const file_path = uploadFile.storage.path || path.join(directory, id);
|
|
360
|
+
|
|
361
|
+
const buffer = yield* Effect.tryPromise({
|
|
362
|
+
try: () => fsProm.readFile(file_path),
|
|
363
|
+
catch: () => UploadistaError.fromCode("FILE_READ_ERROR"),
|
|
364
|
+
});
|
|
365
|
+
|
|
366
|
+
return new Uint8Array(buffer);
|
|
367
|
+
}),
|
|
368
|
+
getCapabilities,
|
|
369
|
+
validateUploadStrategy,
|
|
370
|
+
} as DataStore<UploadFile>;
|
|
371
|
+
});
|
|
372
|
+
|
|
373
|
+
export const fileStore = (options: FileStoreOptions) =>
|
|
374
|
+
createFileStore(options);
|
package/src/index.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from "./file-store";
|
package/tsconfig.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"root":["./src/file-store.ts","./src/index.ts"],"version":"5.9.3"}
|