s3db.js 9.2.2 → 10.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +71 -13
- package/dist/s3db.cjs.js +466 -8
- package/dist/s3db.cjs.js.map +1 -1
- package/dist/s3db.es.js +466 -9
- package/dist/s3db.es.js.map +1 -1
- package/mcp/server.js +12 -8
- package/package.json +4 -4
- package/src/client.class.js +2 -2
- package/src/concerns/high-performance-inserter.js +285 -0
- package/src/concerns/partition-queue.js +171 -0
- package/src/errors.js +10 -2
- package/src/partition-drivers/base-partition-driver.js +96 -0
- package/src/partition-drivers/index.js +60 -0
- package/src/partition-drivers/memory-partition-driver.js +274 -0
- package/src/partition-drivers/sqs-partition-driver.js +332 -0
- package/src/partition-drivers/sync-partition-driver.js +38 -0
- package/src/plugins/backup.plugin.js +1 -1
- package/src/plugins/backup.plugin.js.backup +1 -1
- package/src/plugins/eventual-consistency.plugin.js +609 -0
- package/src/plugins/index.js +1 -0
- package/PLUGINS.md +0 -5036
package/mcp/server.js
CHANGED
|
@@ -628,7 +628,7 @@ class S3dbMCPServer {
|
|
|
628
628
|
|
|
629
629
|
setupTransport() {
|
|
630
630
|
const transport = process.argv.includes('--transport=sse') || process.env.MCP_TRANSPORT === 'sse'
|
|
631
|
-
? new SSEServerTransport('/sse', process.env.MCP_SERVER_HOST || '0.0.0.0', parseInt(process.env.MCP_SERVER_PORT || '
|
|
631
|
+
? new SSEServerTransport('/sse', process.env.MCP_SERVER_HOST || '0.0.0.0', parseInt(process.env.MCP_SERVER_PORT || '17500'))
|
|
632
632
|
: new StdioServerTransport();
|
|
633
633
|
|
|
634
634
|
this.server.connect(transport);
|
|
@@ -636,7 +636,7 @@ class S3dbMCPServer {
|
|
|
636
636
|
// SSE specific setup
|
|
637
637
|
if (transport instanceof SSEServerTransport) {
|
|
638
638
|
const host = process.env.MCP_SERVER_HOST || '0.0.0.0';
|
|
639
|
-
const port = process.env.MCP_SERVER_PORT || '
|
|
639
|
+
const port = process.env.MCP_SERVER_PORT || '17500';
|
|
640
640
|
|
|
641
641
|
console.log(`S3DB MCP Server running on http://${host}:${port}/sse`);
|
|
642
642
|
|
|
@@ -723,12 +723,16 @@ class S3dbMCPServer {
|
|
|
723
723
|
|
|
724
724
|
// Add CachePlugin (enabled by default, configurable)
|
|
725
725
|
const cacheEnabled = enableCache !== false && process.env.S3DB_CACHE_ENABLED !== 'false';
|
|
726
|
+
|
|
727
|
+
// Declare cache variables in outer scope to avoid reference errors
|
|
728
|
+
let cacheMaxSizeEnv, cacheTtlEnv, cacheDriverEnv, cacheDirectoryEnv, cachePrefixEnv;
|
|
729
|
+
|
|
726
730
|
if (cacheEnabled) {
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
731
|
+
cacheMaxSizeEnv = process.env.S3DB_CACHE_MAX_SIZE ? parseInt(process.env.S3DB_CACHE_MAX_SIZE) : cacheMaxSize;
|
|
732
|
+
cacheTtlEnv = process.env.S3DB_CACHE_TTL ? parseInt(process.env.S3DB_CACHE_TTL) : cacheTtl;
|
|
733
|
+
cacheDriverEnv = process.env.S3DB_CACHE_DRIVER || cacheDriver;
|
|
734
|
+
cacheDirectoryEnv = process.env.S3DB_CACHE_DIRECTORY || cacheDirectory;
|
|
735
|
+
cachePrefixEnv = process.env.S3DB_CACHE_PREFIX || cachePrefix;
|
|
732
736
|
|
|
733
737
|
let cacheConfig = {
|
|
734
738
|
includePartitions: true
|
|
@@ -1358,7 +1362,7 @@ function parseArgs() {
|
|
|
1358
1362
|
const args = {
|
|
1359
1363
|
transport: 'stdio',
|
|
1360
1364
|
host: '0.0.0.0',
|
|
1361
|
-
port:
|
|
1365
|
+
port: 17500
|
|
1362
1366
|
};
|
|
1363
1367
|
|
|
1364
1368
|
process.argv.forEach((arg, index) => {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "s3db.js",
|
|
3
|
-
"version": "
|
|
3
|
+
"version": "10.0.0",
|
|
4
4
|
"description": "Use AWS S3, the world's most reliable document storage, as a database with this ORM.",
|
|
5
5
|
"main": "dist/s3db.cjs.js",
|
|
6
6
|
"module": "dist/s3db.es.js",
|
|
@@ -58,8 +58,8 @@
|
|
|
58
58
|
"UNLICENSE"
|
|
59
59
|
],
|
|
60
60
|
"dependencies": {
|
|
61
|
-
"@aws-sdk/client-s3": "^3.
|
|
62
|
-
"@modelcontextprotocol/sdk": "^1.17.
|
|
61
|
+
"@aws-sdk/client-s3": "^3.873.0",
|
|
62
|
+
"@modelcontextprotocol/sdk": "^1.17.4",
|
|
63
63
|
"@smithy/node-http-handler": "^4.1.1",
|
|
64
64
|
"@supercharge/promise-pool": "^3.2.0",
|
|
65
65
|
"dotenv": "^17.2.1",
|
|
@@ -112,7 +112,7 @@
|
|
|
112
112
|
"node-loader": "^2.1.0",
|
|
113
113
|
"ora": "^8.2.0",
|
|
114
114
|
"pkg": "^5.8.1",
|
|
115
|
-
"rollup": "^4.
|
|
115
|
+
"rollup": "^4.48.0",
|
|
116
116
|
"rollup-plugin-copy": "^3.5.0",
|
|
117
117
|
"rollup-plugin-esbuild": "^6.2.1",
|
|
118
118
|
"rollup-plugin-polyfill-node": "^0.13.0",
|
package/src/client.class.js
CHANGED
|
@@ -41,8 +41,8 @@ export class Client extends EventEmitter {
|
|
|
41
41
|
this.httpClientOptions = {
|
|
42
42
|
keepAlive: true, // Enabled for better performance
|
|
43
43
|
keepAliveMsecs: 1000, // 1 second keep-alive
|
|
44
|
-
maxSockets:
|
|
45
|
-
maxFreeSockets:
|
|
44
|
+
maxSockets: httpClientOptions.maxSockets || 500, // High concurrency support
|
|
45
|
+
maxFreeSockets: httpClientOptions.maxFreeSockets || 100, // Better connection reuse
|
|
46
46
|
timeout: 60000, // 60 second timeout
|
|
47
47
|
...httpClientOptions,
|
|
48
48
|
};
|
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
import { PromisePool } from '@supercharge/promise-pool';
|
|
2
|
+
import { tryFn } from './try-fn.js';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* High-performance bulk inserter for S3DB
|
|
6
|
+
* Optimized for continuous high-volume inserts with partitions
|
|
7
|
+
*/
|
|
8
|
+
export class HighPerformanceInserter {
|
|
9
|
+
constructor(resource, options = {}) {
|
|
10
|
+
this.resource = resource;
|
|
11
|
+
|
|
12
|
+
// Performance tuning
|
|
13
|
+
this.batchSize = options.batchSize || 100;
|
|
14
|
+
this.concurrency = options.concurrency || 50; // Parallel S3 operations
|
|
15
|
+
this.flushInterval = options.flushInterval || 1000; // ms
|
|
16
|
+
this.disablePartitions = options.disablePartitions || false;
|
|
17
|
+
this.useStreamMode = options.useStreamMode || false;
|
|
18
|
+
|
|
19
|
+
// Buffers
|
|
20
|
+
this.insertBuffer = [];
|
|
21
|
+
this.partitionBuffer = new Map(); // Deferred partition operations
|
|
22
|
+
this.stats = {
|
|
23
|
+
inserted: 0,
|
|
24
|
+
failed: 0,
|
|
25
|
+
partitionsPending: 0,
|
|
26
|
+
avgInsertTime: 0
|
|
27
|
+
};
|
|
28
|
+
|
|
29
|
+
// Auto-flush timer
|
|
30
|
+
this.flushTimer = null;
|
|
31
|
+
this.isProcessing = false;
|
|
32
|
+
|
|
33
|
+
// Partition processing queue
|
|
34
|
+
this.partitionQueue = [];
|
|
35
|
+
this.partitionProcessor = null;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Add item to insert buffer (non-blocking)
|
|
40
|
+
*/
|
|
41
|
+
async add(data) {
|
|
42
|
+
this.insertBuffer.push({
|
|
43
|
+
data,
|
|
44
|
+
timestamp: Date.now(),
|
|
45
|
+
promise: null
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
// Auto-flush when buffer is full
|
|
49
|
+
if (this.insertBuffer.length >= this.batchSize) {
|
|
50
|
+
setImmediate(() => this.flush());
|
|
51
|
+
} else if (!this.flushTimer) {
|
|
52
|
+
// Set flush timer if not already set
|
|
53
|
+
this.flushTimer = setTimeout(() => this.flush(), this.flushInterval);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
return { queued: true, position: this.insertBuffer.length };
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Bulk add items
|
|
61
|
+
*/
|
|
62
|
+
async bulkAdd(items) {
|
|
63
|
+
for (const item of items) {
|
|
64
|
+
await this.add(item);
|
|
65
|
+
}
|
|
66
|
+
return { queued: items.length };
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* Process buffered inserts in parallel
|
|
71
|
+
*/
|
|
72
|
+
async flush() {
|
|
73
|
+
if (this.isProcessing || this.insertBuffer.length === 0) return;
|
|
74
|
+
|
|
75
|
+
this.isProcessing = true;
|
|
76
|
+
clearTimeout(this.flushTimer);
|
|
77
|
+
this.flushTimer = null;
|
|
78
|
+
|
|
79
|
+
// Take current buffer and reset
|
|
80
|
+
const batch = this.insertBuffer.splice(0, this.batchSize);
|
|
81
|
+
const startTime = Date.now();
|
|
82
|
+
|
|
83
|
+
try {
|
|
84
|
+
// Process inserts in parallel with connection pooling
|
|
85
|
+
const { results, errors } = await PromisePool
|
|
86
|
+
.for(batch)
|
|
87
|
+
.withConcurrency(this.concurrency)
|
|
88
|
+
.process(async (item) => {
|
|
89
|
+
return await this.performInsert(item);
|
|
90
|
+
});
|
|
91
|
+
|
|
92
|
+
// Update stats
|
|
93
|
+
const duration = Date.now() - startTime;
|
|
94
|
+
this.stats.inserted += results.filter(r => r.success).length;
|
|
95
|
+
this.stats.failed += errors.length;
|
|
96
|
+
this.stats.avgInsertTime = duration / batch.length;
|
|
97
|
+
|
|
98
|
+
// Process partition queue separately (non-blocking)
|
|
99
|
+
if (!this.disablePartitions && this.partitionQueue.length > 0) {
|
|
100
|
+
this.processPartitionsAsync();
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
} finally {
|
|
104
|
+
this.isProcessing = false;
|
|
105
|
+
|
|
106
|
+
// Continue processing if more items
|
|
107
|
+
if (this.insertBuffer.length > 0) {
|
|
108
|
+
setImmediate(() => this.flush());
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
/**
|
|
114
|
+
* Perform single insert with optimizations
|
|
115
|
+
*/
|
|
116
|
+
async performInsert(item) {
|
|
117
|
+
const { data } = item;
|
|
118
|
+
|
|
119
|
+
try {
|
|
120
|
+
// Temporarily disable partitions for the insert
|
|
121
|
+
const originalAsyncPartitions = this.resource.config.asyncPartitions;
|
|
122
|
+
const originalPartitions = this.resource.config.partitions;
|
|
123
|
+
|
|
124
|
+
if (this.disablePartitions) {
|
|
125
|
+
// Completely bypass partitions during insert
|
|
126
|
+
this.resource.config.partitions = {};
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
// Perform insert
|
|
130
|
+
const [ok, err, result] = await tryFn(() => this.resource.insert(data));
|
|
131
|
+
|
|
132
|
+
if (!ok) {
|
|
133
|
+
return { success: false, error: err };
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// Queue partition creation for later (if not disabled)
|
|
137
|
+
if (!this.disablePartitions && originalPartitions && Object.keys(originalPartitions).length > 0) {
|
|
138
|
+
this.partitionQueue.push({
|
|
139
|
+
operation: 'create',
|
|
140
|
+
data: result,
|
|
141
|
+
partitions: originalPartitions
|
|
142
|
+
});
|
|
143
|
+
this.stats.partitionsPending++;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
// Restore original config
|
|
147
|
+
this.resource.config.partitions = originalPartitions;
|
|
148
|
+
this.resource.config.asyncPartitions = originalAsyncPartitions;
|
|
149
|
+
|
|
150
|
+
return { success: true, data: result };
|
|
151
|
+
|
|
152
|
+
} catch (error) {
|
|
153
|
+
return { success: false, error };
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
/**
|
|
158
|
+
* Process partitions asynchronously in background
|
|
159
|
+
*/
|
|
160
|
+
async processPartitionsAsync() {
|
|
161
|
+
if (this.partitionProcessor) return; // Already processing
|
|
162
|
+
|
|
163
|
+
this.partitionProcessor = setImmediate(async () => {
|
|
164
|
+
const batch = this.partitionQueue.splice(0, 100); // Process 100 at a time
|
|
165
|
+
|
|
166
|
+
if (batch.length === 0) {
|
|
167
|
+
this.partitionProcessor = null;
|
|
168
|
+
return;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
// Create partitions in parallel with lower priority
|
|
172
|
+
await PromisePool
|
|
173
|
+
.for(batch)
|
|
174
|
+
.withConcurrency(10) // Lower concurrency for partitions
|
|
175
|
+
.process(async (item) => {
|
|
176
|
+
try {
|
|
177
|
+
await this.resource.createPartitionReferences(item.data);
|
|
178
|
+
this.stats.partitionsPending--;
|
|
179
|
+
} catch (err) {
|
|
180
|
+
// Silently handle partition errors
|
|
181
|
+
this.resource.emit('partitionIndexError', {
|
|
182
|
+
operation: 'bulk-insert',
|
|
183
|
+
error: err
|
|
184
|
+
});
|
|
185
|
+
}
|
|
186
|
+
});
|
|
187
|
+
|
|
188
|
+
// Continue processing if more partitions
|
|
189
|
+
if (this.partitionQueue.length > 0) {
|
|
190
|
+
this.processPartitionsAsync();
|
|
191
|
+
} else {
|
|
192
|
+
this.partitionProcessor = null;
|
|
193
|
+
}
|
|
194
|
+
});
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
/**
|
|
198
|
+
* Force flush all pending operations
|
|
199
|
+
*/
|
|
200
|
+
async forceFlush() {
|
|
201
|
+
while (this.insertBuffer.length > 0 || this.isProcessing) {
|
|
202
|
+
await this.flush();
|
|
203
|
+
await new Promise(resolve => setTimeout(resolve, 10));
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
/**
|
|
208
|
+
* Get current statistics
|
|
209
|
+
*/
|
|
210
|
+
getStats() {
|
|
211
|
+
return {
|
|
212
|
+
...this.stats,
|
|
213
|
+
bufferSize: this.insertBuffer.length,
|
|
214
|
+
isProcessing: this.isProcessing,
|
|
215
|
+
throughput: this.stats.avgInsertTime > 0
|
|
216
|
+
? Math.round(1000 / this.stats.avgInsertTime)
|
|
217
|
+
: 0 // inserts per second
|
|
218
|
+
};
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
/**
|
|
222
|
+
* Destroy and cleanup
|
|
223
|
+
*/
|
|
224
|
+
destroy() {
|
|
225
|
+
clearTimeout(this.flushTimer);
|
|
226
|
+
this.insertBuffer = [];
|
|
227
|
+
this.partitionQueue = [];
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
/**
|
|
232
|
+
* Stream-based inserter for maximum performance
|
|
233
|
+
*/
|
|
234
|
+
export class StreamInserter {
|
|
235
|
+
constructor(resource, options = {}) {
|
|
236
|
+
this.resource = resource;
|
|
237
|
+
this.concurrency = options.concurrency || 100;
|
|
238
|
+
this.skipPartitions = options.skipPartitions !== false;
|
|
239
|
+
this.skipHooks = options.skipHooks || false;
|
|
240
|
+
this.skipValidation = options.skipValidation || false;
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
/**
|
|
244
|
+
* Direct S3 write bypassing most S3DB overhead
|
|
245
|
+
*/
|
|
246
|
+
async fastInsert(data) {
|
|
247
|
+
const id = data.id || this.resource.generateId();
|
|
248
|
+
const key = this.resource.getResourceKey(id);
|
|
249
|
+
|
|
250
|
+
// Minimal processing
|
|
251
|
+
const metadata = this.skipValidation
|
|
252
|
+
? { id, ...data }
|
|
253
|
+
: await this.resource.schema.mapper({ id, ...data });
|
|
254
|
+
|
|
255
|
+
// Direct S3 put
|
|
256
|
+
const command = {
|
|
257
|
+
Bucket: this.resource.client.config.bucket,
|
|
258
|
+
Key: key,
|
|
259
|
+
Metadata: metadata,
|
|
260
|
+
Body: '' // Empty body for speed
|
|
261
|
+
};
|
|
262
|
+
|
|
263
|
+
await this.resource.client.client.send(new PutObjectCommand(command));
|
|
264
|
+
|
|
265
|
+
return { id, inserted: true };
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
/**
|
|
269
|
+
* Bulk insert with maximum parallelism
|
|
270
|
+
*/
|
|
271
|
+
async bulkInsert(items) {
|
|
272
|
+
const { results, errors } = await PromisePool
|
|
273
|
+
.for(items)
|
|
274
|
+
.withConcurrency(this.concurrency)
|
|
275
|
+
.process(async (item) => {
|
|
276
|
+
return await this.fastInsert(item);
|
|
277
|
+
});
|
|
278
|
+
|
|
279
|
+
return {
|
|
280
|
+
success: results.length,
|
|
281
|
+
failed: errors.length,
|
|
282
|
+
errors: errors.slice(0, 10) // First 10 errors
|
|
283
|
+
};
|
|
284
|
+
}
|
|
285
|
+
}
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
import { EventEmitter } from 'events';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Robust partition operation queue with retry and persistence
|
|
5
|
+
*/
|
|
6
|
+
export class PartitionQueue extends EventEmitter {
|
|
7
|
+
constructor(options = {}) {
|
|
8
|
+
super();
|
|
9
|
+
this.maxRetries = options.maxRetries || 3;
|
|
10
|
+
this.retryDelay = options.retryDelay || 1000;
|
|
11
|
+
this.persistence = options.persistence || null; // Could be filesystem/redis/etc
|
|
12
|
+
this.queue = [];
|
|
13
|
+
this.processing = false;
|
|
14
|
+
this.failures = [];
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Add partition operation to queue
|
|
19
|
+
*/
|
|
20
|
+
async enqueue(operation) {
|
|
21
|
+
const item = {
|
|
22
|
+
id: `${Date.now()}-${Math.random()}`,
|
|
23
|
+
operation,
|
|
24
|
+
retries: 0,
|
|
25
|
+
createdAt: new Date(),
|
|
26
|
+
status: 'pending'
|
|
27
|
+
};
|
|
28
|
+
|
|
29
|
+
this.queue.push(item);
|
|
30
|
+
|
|
31
|
+
// Persist if configured
|
|
32
|
+
if (this.persistence) {
|
|
33
|
+
await this.persistence.save(item);
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
// Start processing if not already
|
|
37
|
+
if (!this.processing) {
|
|
38
|
+
setImmediate(() => this.process());
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
return item.id;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Process queue items
|
|
46
|
+
*/
|
|
47
|
+
async process() {
|
|
48
|
+
if (this.processing || this.queue.length === 0) return;
|
|
49
|
+
|
|
50
|
+
this.processing = true;
|
|
51
|
+
|
|
52
|
+
while (this.queue.length > 0) {
|
|
53
|
+
const item = this.queue.shift();
|
|
54
|
+
|
|
55
|
+
try {
|
|
56
|
+
await this.executeOperation(item);
|
|
57
|
+
item.status = 'completed';
|
|
58
|
+
this.emit('success', item);
|
|
59
|
+
|
|
60
|
+
// Remove from persistence
|
|
61
|
+
if (this.persistence) {
|
|
62
|
+
await this.persistence.remove(item.id);
|
|
63
|
+
}
|
|
64
|
+
} catch (error) {
|
|
65
|
+
item.retries++;
|
|
66
|
+
item.lastError = error;
|
|
67
|
+
|
|
68
|
+
if (item.retries < this.maxRetries) {
|
|
69
|
+
// Retry with exponential backoff
|
|
70
|
+
const delay = this.retryDelay * Math.pow(2, item.retries - 1);
|
|
71
|
+
item.status = 'retrying';
|
|
72
|
+
|
|
73
|
+
setTimeout(() => {
|
|
74
|
+
this.queue.push(item);
|
|
75
|
+
if (!this.processing) this.process();
|
|
76
|
+
}, delay);
|
|
77
|
+
|
|
78
|
+
this.emit('retry', { item, error, delay });
|
|
79
|
+
} else {
|
|
80
|
+
// Max retries reached
|
|
81
|
+
item.status = 'failed';
|
|
82
|
+
this.failures.push(item);
|
|
83
|
+
this.emit('failure', { item, error });
|
|
84
|
+
|
|
85
|
+
// Move to DLQ in persistence
|
|
86
|
+
if (this.persistence) {
|
|
87
|
+
await this.persistence.moveToDLQ(item);
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
this.processing = false;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Execute the actual partition operation
|
|
98
|
+
*/
|
|
99
|
+
async executeOperation(item) {
|
|
100
|
+
const { type, resource, data } = item.operation;
|
|
101
|
+
|
|
102
|
+
switch (type) {
|
|
103
|
+
case 'create':
|
|
104
|
+
return await resource.createPartitionReferences(data);
|
|
105
|
+
case 'update':
|
|
106
|
+
return await resource.handlePartitionReferenceUpdates(data.original, data.updated);
|
|
107
|
+
case 'delete':
|
|
108
|
+
return await resource.deletePartitionReferences(data);
|
|
109
|
+
default:
|
|
110
|
+
throw new Error(`Unknown operation type: ${type}`);
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
/**
|
|
115
|
+
* Recover from persistence on startup
|
|
116
|
+
*/
|
|
117
|
+
async recover() {
|
|
118
|
+
if (!this.persistence) return;
|
|
119
|
+
|
|
120
|
+
const items = await this.persistence.getPending();
|
|
121
|
+
this.queue.push(...items);
|
|
122
|
+
|
|
123
|
+
if (this.queue.length > 0) {
|
|
124
|
+
this.emit('recovered', { count: this.queue.length });
|
|
125
|
+
setImmediate(() => this.process());
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
/**
|
|
130
|
+
* Get queue statistics
|
|
131
|
+
*/
|
|
132
|
+
getStats() {
|
|
133
|
+
return {
|
|
134
|
+
pending: this.queue.length,
|
|
135
|
+
failures: this.failures.length,
|
|
136
|
+
processing: this.processing,
|
|
137
|
+
failureRate: this.failures.length / (this.queue.length + this.failures.length) || 0
|
|
138
|
+
};
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
/**
|
|
143
|
+
* Simple in-memory persistence (can be replaced with Redis, filesystem, etc)
|
|
144
|
+
*/
|
|
145
|
+
export class InMemoryPersistence {
|
|
146
|
+
constructor() {
|
|
147
|
+
this.items = new Map();
|
|
148
|
+
this.dlq = new Map();
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
async save(item) {
|
|
152
|
+
this.items.set(item.id, item);
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
async remove(id) {
|
|
156
|
+
this.items.delete(id);
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
async moveToDLQ(item) {
|
|
160
|
+
this.items.delete(item.id);
|
|
161
|
+
this.dlq.set(item.id, item);
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
async getPending() {
|
|
165
|
+
return Array.from(this.items.values());
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
async getDLQ() {
|
|
169
|
+
return Array.from(this.dlq.values());
|
|
170
|
+
}
|
|
171
|
+
}
|
package/src/errors.js
CHANGED
|
@@ -225,8 +225,16 @@ export function mapAwsError(err, context = {}) {
|
|
|
225
225
|
return new MissingMetadata({ ...context, original: err, metadata, commandName, commandInput, suggestion });
|
|
226
226
|
}
|
|
227
227
|
// Outros mapeamentos podem ser adicionados aqui
|
|
228
|
-
|
|
229
|
-
|
|
228
|
+
// Incluir detalhes do erro original para facilitar debug
|
|
229
|
+
const errorDetails = [
|
|
230
|
+
`Unknown error: ${err.message || err.toString()}`,
|
|
231
|
+
err.code && `Code: ${err.code}`,
|
|
232
|
+
err.statusCode && `Status: ${err.statusCode}`,
|
|
233
|
+
err.stack && `Stack: ${err.stack.split('\n')[0]}`,
|
|
234
|
+
].filter(Boolean).join(' | ');
|
|
235
|
+
|
|
236
|
+
suggestion = `Check the error details and AWS documentation. Original error: ${err.message || err.toString()}`;
|
|
237
|
+
return new UnknownError(errorDetails, { ...context, original: err, metadata, commandName, commandInput, suggestion });
|
|
230
238
|
}
|
|
231
239
|
|
|
232
240
|
export class ConnectionStringError extends S3dbError {
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
import { EventEmitter } from 'events';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Base class for all partition drivers
|
|
5
|
+
* Defines the interface that all drivers must implement
|
|
6
|
+
*/
|
|
7
|
+
export class BasePartitionDriver extends EventEmitter {
|
|
8
|
+
constructor(options = {}) {
|
|
9
|
+
super();
|
|
10
|
+
this.options = options;
|
|
11
|
+
this.stats = {
|
|
12
|
+
queued: 0,
|
|
13
|
+
processed: 0,
|
|
14
|
+
failed: 0,
|
|
15
|
+
processing: 0
|
|
16
|
+
};
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Initialize the driver
|
|
21
|
+
*/
|
|
22
|
+
async initialize() {
|
|
23
|
+
// Override in subclasses if needed
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Queue partition operations for processing
|
|
28
|
+
* @param {Object} operation - The partition operation to queue
|
|
29
|
+
* @param {string} operation.type - 'create', 'update', or 'delete'
|
|
30
|
+
* @param {Object} operation.resource - The resource instance
|
|
31
|
+
* @param {Object} operation.data - The data for the operation
|
|
32
|
+
*/
|
|
33
|
+
async queue(operation) {
|
|
34
|
+
throw new Error('queue() must be implemented by subclass');
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Process a single partition operation
|
|
39
|
+
*/
|
|
40
|
+
async processOperation(operation) {
|
|
41
|
+
const { type, resource, data } = operation;
|
|
42
|
+
|
|
43
|
+
try {
|
|
44
|
+
this.stats.processing++;
|
|
45
|
+
|
|
46
|
+
switch (type) {
|
|
47
|
+
case 'create':
|
|
48
|
+
await resource.createPartitionReferences(data.object);
|
|
49
|
+
break;
|
|
50
|
+
|
|
51
|
+
case 'update':
|
|
52
|
+
await resource.handlePartitionReferenceUpdates(data.original, data.updated);
|
|
53
|
+
break;
|
|
54
|
+
|
|
55
|
+
case 'delete':
|
|
56
|
+
await resource.deletePartitionReferences(data.object);
|
|
57
|
+
break;
|
|
58
|
+
|
|
59
|
+
default:
|
|
60
|
+
throw new Error(`Unknown partition operation type: ${type}`);
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
this.stats.processed++;
|
|
64
|
+
this.emit('processed', operation);
|
|
65
|
+
|
|
66
|
+
} catch (error) {
|
|
67
|
+
this.stats.failed++;
|
|
68
|
+
this.emit('error', { operation, error });
|
|
69
|
+
throw error;
|
|
70
|
+
} finally {
|
|
71
|
+
this.stats.processing--;
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Flush any pending operations
|
|
77
|
+
*/
|
|
78
|
+
async flush() {
|
|
79
|
+
// Override in subclasses if needed
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
/**
|
|
83
|
+
* Get driver statistics
|
|
84
|
+
*/
|
|
85
|
+
getStats() {
|
|
86
|
+
return { ...this.stats };
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Shutdown the driver
|
|
91
|
+
*/
|
|
92
|
+
async shutdown() {
|
|
93
|
+
await this.flush();
|
|
94
|
+
this.removeAllListeners();
|
|
95
|
+
}
|
|
96
|
+
}
|