s3db.js 9.3.0 → 10.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "s3db.js",
3
- "version": "9.3.0",
3
+ "version": "10.0.1",
4
4
  "description": "Use AWS S3, the world's most reliable document storage, as a database with this ORM.",
5
5
  "main": "dist/s3db.cjs.js",
6
6
  "module": "dist/s3db.es.js",
@@ -41,8 +41,8 @@ export class Client extends EventEmitter {
41
41
  this.httpClientOptions = {
42
42
  keepAlive: true, // Enabled for better performance
43
43
  keepAliveMsecs: 1000, // 1 second keep-alive
44
- maxSockets: 50, // Balanced for most applications
45
- maxFreeSockets: 10, // Good connection reuse
44
+ maxSockets: httpClientOptions.maxSockets || 500, // High concurrency support
45
+ maxFreeSockets: httpClientOptions.maxFreeSockets || 100, // Better connection reuse
46
46
  timeout: 60000, // 60 second timeout
47
47
  ...httpClientOptions,
48
48
  };
@@ -116,33 +116,34 @@ export class Client extends EventEmitter {
116
116
  return response;
117
117
  }
118
118
 
119
- async putObject({ key, metadata, contentType, body, contentEncoding, contentLength }) {
119
+ async putObject({ key, metadata, contentType, body, contentEncoding, contentLength, ifMatch }) {
120
120
  const keyPrefix = typeof this.config.keyPrefix === 'string' ? this.config.keyPrefix : '';
121
121
  const fullKey = keyPrefix ? path.join(keyPrefix, key) : key;
122
-
122
+
123
123
  // Ensure all metadata values are strings and use smart encoding
124
124
  const stringMetadata = {};
125
125
  if (metadata) {
126
126
  for (const [k, v] of Object.entries(metadata)) {
127
127
  // Ensure key is a valid string
128
128
  const validKey = String(k).replace(/[^a-zA-Z0-9\-_]/g, '_');
129
-
129
+
130
130
  // Smart encode the value
131
131
  const { encoded } = metadataEncode(v);
132
132
  stringMetadata[validKey] = encoded;
133
133
  }
134
134
  }
135
-
135
+
136
136
  const options = {
137
137
  Bucket: this.config.bucket,
138
138
  Key: keyPrefix ? path.join(keyPrefix, key) : key,
139
139
  Metadata: stringMetadata,
140
140
  Body: body || Buffer.alloc(0),
141
141
  };
142
-
142
+
143
143
  if (contentType !== undefined) options.ContentType = contentType
144
144
  if (contentEncoding !== undefined) options.ContentEncoding = contentEncoding
145
145
  if (contentLength !== undefined) options.ContentLength = contentLength
146
+ if (ifMatch !== undefined) options.IfMatch = ifMatch
146
147
 
147
148
  let response, error;
148
149
  try {
@@ -0,0 +1,285 @@
1
+ import { PromisePool } from '@supercharge/promise-pool';
2
+ import { tryFn } from './try-fn.js';
3
+
4
+ /**
5
+ * High-performance bulk inserter for S3DB
6
+ * Optimized for continuous high-volume inserts with partitions
7
+ */
8
+ export class HighPerformanceInserter {
9
+ constructor(resource, options = {}) {
10
+ this.resource = resource;
11
+
12
+ // Performance tuning
13
+ this.batchSize = options.batchSize || 100;
14
+ this.concurrency = options.concurrency || 50; // Parallel S3 operations
15
+ this.flushInterval = options.flushInterval || 1000; // ms
16
+ this.disablePartitions = options.disablePartitions || false;
17
+ this.useStreamMode = options.useStreamMode || false;
18
+
19
+ // Buffers
20
+ this.insertBuffer = [];
21
+ this.partitionBuffer = new Map(); // Deferred partition operations
22
+ this.stats = {
23
+ inserted: 0,
24
+ failed: 0,
25
+ partitionsPending: 0,
26
+ avgInsertTime: 0
27
+ };
28
+
29
+ // Auto-flush timer
30
+ this.flushTimer = null;
31
+ this.isProcessing = false;
32
+
33
+ // Partition processing queue
34
+ this.partitionQueue = [];
35
+ this.partitionProcessor = null;
36
+ }
37
+
38
+ /**
39
+ * Add item to insert buffer (non-blocking)
40
+ */
41
+ async add(data) {
42
+ this.insertBuffer.push({
43
+ data,
44
+ timestamp: Date.now(),
45
+ promise: null
46
+ });
47
+
48
+ // Auto-flush when buffer is full
49
+ if (this.insertBuffer.length >= this.batchSize) {
50
+ setImmediate(() => this.flush());
51
+ } else if (!this.flushTimer) {
52
+ // Set flush timer if not already set
53
+ this.flushTimer = setTimeout(() => this.flush(), this.flushInterval);
54
+ }
55
+
56
+ return { queued: true, position: this.insertBuffer.length };
57
+ }
58
+
59
+ /**
60
+ * Bulk add items
61
+ */
62
+ async bulkAdd(items) {
63
+ for (const item of items) {
64
+ await this.add(item);
65
+ }
66
+ return { queued: items.length };
67
+ }
68
+
69
+ /**
70
+ * Process buffered inserts in parallel
71
+ */
72
+ async flush() {
73
+ if (this.isProcessing || this.insertBuffer.length === 0) return;
74
+
75
+ this.isProcessing = true;
76
+ clearTimeout(this.flushTimer);
77
+ this.flushTimer = null;
78
+
79
+ // Take current buffer and reset
80
+ const batch = this.insertBuffer.splice(0, this.batchSize);
81
+ const startTime = Date.now();
82
+
83
+ try {
84
+ // Process inserts in parallel with connection pooling
85
+ const { results, errors } = await PromisePool
86
+ .for(batch)
87
+ .withConcurrency(this.concurrency)
88
+ .process(async (item) => {
89
+ return await this.performInsert(item);
90
+ });
91
+
92
+ // Update stats
93
+ const duration = Date.now() - startTime;
94
+ this.stats.inserted += results.filter(r => r.success).length;
95
+ this.stats.failed += errors.length;
96
+ this.stats.avgInsertTime = duration / batch.length;
97
+
98
+ // Process partition queue separately (non-blocking)
99
+ if (!this.disablePartitions && this.partitionQueue.length > 0) {
100
+ this.processPartitionsAsync();
101
+ }
102
+
103
+ } finally {
104
+ this.isProcessing = false;
105
+
106
+ // Continue processing if more items
107
+ if (this.insertBuffer.length > 0) {
108
+ setImmediate(() => this.flush());
109
+ }
110
+ }
111
+ }
112
+
113
+ /**
114
+ * Perform single insert with optimizations
115
+ */
116
+ async performInsert(item) {
117
+ const { data } = item;
118
+
119
+ try {
120
+ // Temporarily disable partitions for the insert
121
+ const originalAsyncPartitions = this.resource.config.asyncPartitions;
122
+ const originalPartitions = this.resource.config.partitions;
123
+
124
+ if (this.disablePartitions) {
125
+ // Completely bypass partitions during insert
126
+ this.resource.config.partitions = {};
127
+ }
128
+
129
+ // Perform insert
130
+ const [ok, err, result] = await tryFn(() => this.resource.insert(data));
131
+
132
+ if (!ok) {
133
+ return { success: false, error: err };
134
+ }
135
+
136
+ // Queue partition creation for later (if not disabled)
137
+ if (!this.disablePartitions && originalPartitions && Object.keys(originalPartitions).length > 0) {
138
+ this.partitionQueue.push({
139
+ operation: 'create',
140
+ data: result,
141
+ partitions: originalPartitions
142
+ });
143
+ this.stats.partitionsPending++;
144
+ }
145
+
146
+ // Restore original config
147
+ this.resource.config.partitions = originalPartitions;
148
+ this.resource.config.asyncPartitions = originalAsyncPartitions;
149
+
150
+ return { success: true, data: result };
151
+
152
+ } catch (error) {
153
+ return { success: false, error };
154
+ }
155
+ }
156
+
157
+ /**
158
+ * Process partitions asynchronously in background
159
+ */
160
+ async processPartitionsAsync() {
161
+ if (this.partitionProcessor) return; // Already processing
162
+
163
+ this.partitionProcessor = setImmediate(async () => {
164
+ const batch = this.partitionQueue.splice(0, 100); // Process 100 at a time
165
+
166
+ if (batch.length === 0) {
167
+ this.partitionProcessor = null;
168
+ return;
169
+ }
170
+
171
+ // Create partitions in parallel with lower priority
172
+ await PromisePool
173
+ .for(batch)
174
+ .withConcurrency(10) // Lower concurrency for partitions
175
+ .process(async (item) => {
176
+ try {
177
+ await this.resource.createPartitionReferences(item.data);
178
+ this.stats.partitionsPending--;
179
+ } catch (err) {
180
+ // Silently handle partition errors
181
+ this.resource.emit('partitionIndexError', {
182
+ operation: 'bulk-insert',
183
+ error: err
184
+ });
185
+ }
186
+ });
187
+
188
+ // Continue processing if more partitions
189
+ if (this.partitionQueue.length > 0) {
190
+ this.processPartitionsAsync();
191
+ } else {
192
+ this.partitionProcessor = null;
193
+ }
194
+ });
195
+ }
196
+
197
+ /**
198
+ * Force flush all pending operations
199
+ */
200
+ async forceFlush() {
201
+ while (this.insertBuffer.length > 0 || this.isProcessing) {
202
+ await this.flush();
203
+ await new Promise(resolve => setTimeout(resolve, 10));
204
+ }
205
+ }
206
+
207
+ /**
208
+ * Get current statistics
209
+ */
210
+ getStats() {
211
+ return {
212
+ ...this.stats,
213
+ bufferSize: this.insertBuffer.length,
214
+ isProcessing: this.isProcessing,
215
+ throughput: this.stats.avgInsertTime > 0
216
+ ? Math.round(1000 / this.stats.avgInsertTime)
217
+ : 0 // inserts per second
218
+ };
219
+ }
220
+
221
+ /**
222
+ * Destroy and cleanup
223
+ */
224
+ destroy() {
225
+ clearTimeout(this.flushTimer);
226
+ this.insertBuffer = [];
227
+ this.partitionQueue = [];
228
+ }
229
+ }
230
+
231
+ /**
232
+ * Stream-based inserter for maximum performance
233
+ */
234
+ export class StreamInserter {
235
+ constructor(resource, options = {}) {
236
+ this.resource = resource;
237
+ this.concurrency = options.concurrency || 100;
238
+ this.skipPartitions = options.skipPartitions !== false;
239
+ this.skipHooks = options.skipHooks || false;
240
+ this.skipValidation = options.skipValidation || false;
241
+ }
242
+
243
+ /**
244
+ * Direct S3 write bypassing most S3DB overhead
245
+ */
246
+ async fastInsert(data) {
247
+ const id = data.id || this.resource.generateId();
248
+ const key = this.resource.getResourceKey(id);
249
+
250
+ // Minimal processing
251
+ const metadata = this.skipValidation
252
+ ? { id, ...data }
253
+ : await this.resource.schema.mapper({ id, ...data });
254
+
255
+ // Direct S3 put
256
+ const command = {
257
+ Bucket: this.resource.client.config.bucket,
258
+ Key: key,
259
+ Metadata: metadata,
260
+ Body: '' // Empty body for speed
261
+ };
262
+
263
+ await this.resource.client.client.send(new PutObjectCommand(command));
264
+
265
+ return { id, inserted: true };
266
+ }
267
+
268
+ /**
269
+ * Bulk insert with maximum parallelism
270
+ */
271
+ async bulkInsert(items) {
272
+ const { results, errors } = await PromisePool
273
+ .for(items)
274
+ .withConcurrency(this.concurrency)
275
+ .process(async (item) => {
276
+ return await this.fastInsert(item);
277
+ });
278
+
279
+ return {
280
+ success: results.length,
281
+ failed: errors.length,
282
+ errors: errors.slice(0, 10) // First 10 errors
283
+ };
284
+ }
285
+ }
@@ -0,0 +1,171 @@
1
+ import { EventEmitter } from 'events';
2
+
3
+ /**
4
+ * Robust partition operation queue with retry and persistence
5
+ */
6
+ export class PartitionQueue extends EventEmitter {
7
+ constructor(options = {}) {
8
+ super();
9
+ this.maxRetries = options.maxRetries || 3;
10
+ this.retryDelay = options.retryDelay || 1000;
11
+ this.persistence = options.persistence || null; // Could be filesystem/redis/etc
12
+ this.queue = [];
13
+ this.processing = false;
14
+ this.failures = [];
15
+ }
16
+
17
+ /**
18
+ * Add partition operation to queue
19
+ */
20
+ async enqueue(operation) {
21
+ const item = {
22
+ id: `${Date.now()}-${Math.random()}`,
23
+ operation,
24
+ retries: 0,
25
+ createdAt: new Date(),
26
+ status: 'pending'
27
+ };
28
+
29
+ this.queue.push(item);
30
+
31
+ // Persist if configured
32
+ if (this.persistence) {
33
+ await this.persistence.save(item);
34
+ }
35
+
36
+ // Start processing if not already
37
+ if (!this.processing) {
38
+ setImmediate(() => this.process());
39
+ }
40
+
41
+ return item.id;
42
+ }
43
+
44
+ /**
45
+ * Process queue items
46
+ */
47
+ async process() {
48
+ if (this.processing || this.queue.length === 0) return;
49
+
50
+ this.processing = true;
51
+
52
+ while (this.queue.length > 0) {
53
+ const item = this.queue.shift();
54
+
55
+ try {
56
+ await this.executeOperation(item);
57
+ item.status = 'completed';
58
+ this.emit('success', item);
59
+
60
+ // Remove from persistence
61
+ if (this.persistence) {
62
+ await this.persistence.remove(item.id);
63
+ }
64
+ } catch (error) {
65
+ item.retries++;
66
+ item.lastError = error;
67
+
68
+ if (item.retries < this.maxRetries) {
69
+ // Retry with exponential backoff
70
+ const delay = this.retryDelay * Math.pow(2, item.retries - 1);
71
+ item.status = 'retrying';
72
+
73
+ setTimeout(() => {
74
+ this.queue.push(item);
75
+ if (!this.processing) this.process();
76
+ }, delay);
77
+
78
+ this.emit('retry', { item, error, delay });
79
+ } else {
80
+ // Max retries reached
81
+ item.status = 'failed';
82
+ this.failures.push(item);
83
+ this.emit('failure', { item, error });
84
+
85
+ // Move to DLQ in persistence
86
+ if (this.persistence) {
87
+ await this.persistence.moveToDLQ(item);
88
+ }
89
+ }
90
+ }
91
+ }
92
+
93
+ this.processing = false;
94
+ }
95
+
96
+ /**
97
+ * Execute the actual partition operation
98
+ */
99
+ async executeOperation(item) {
100
+ const { type, resource, data } = item.operation;
101
+
102
+ switch (type) {
103
+ case 'create':
104
+ return await resource.createPartitionReferences(data);
105
+ case 'update':
106
+ return await resource.handlePartitionReferenceUpdates(data.original, data.updated);
107
+ case 'delete':
108
+ return await resource.deletePartitionReferences(data);
109
+ default:
110
+ throw new Error(`Unknown operation type: ${type}`);
111
+ }
112
+ }
113
+
114
+ /**
115
+ * Recover from persistence on startup
116
+ */
117
+ async recover() {
118
+ if (!this.persistence) return;
119
+
120
+ const items = await this.persistence.getPending();
121
+ this.queue.push(...items);
122
+
123
+ if (this.queue.length > 0) {
124
+ this.emit('recovered', { count: this.queue.length });
125
+ setImmediate(() => this.process());
126
+ }
127
+ }
128
+
129
+ /**
130
+ * Get queue statistics
131
+ */
132
+ getStats() {
133
+ return {
134
+ pending: this.queue.length,
135
+ failures: this.failures.length,
136
+ processing: this.processing,
137
+ failureRate: this.failures.length / (this.queue.length + this.failures.length) || 0
138
+ };
139
+ }
140
+ }
141
+
142
+ /**
143
+ * Simple in-memory persistence (can be replaced with Redis, filesystem, etc)
144
+ */
145
+ export class InMemoryPersistence {
146
+ constructor() {
147
+ this.items = new Map();
148
+ this.dlq = new Map();
149
+ }
150
+
151
+ async save(item) {
152
+ this.items.set(item.id, item);
153
+ }
154
+
155
+ async remove(id) {
156
+ this.items.delete(id);
157
+ }
158
+
159
+ async moveToDLQ(item) {
160
+ this.items.delete(item.id);
161
+ this.dlq.set(item.id, item);
162
+ }
163
+
164
+ async getPending() {
165
+ return Array.from(this.items.values());
166
+ }
167
+
168
+ async getDLQ() {
169
+ return Array.from(this.dlq.values());
170
+ }
171
+ }
package/src/errors.js CHANGED
@@ -225,8 +225,16 @@ export function mapAwsError(err, context = {}) {
225
225
  return new MissingMetadata({ ...context, original: err, metadata, commandName, commandInput, suggestion });
226
226
  }
227
227
  // Outros mapeamentos podem ser adicionados aqui
228
- suggestion = 'Check the error details and AWS documentation.';
229
- return new UnknownError('Unknown error', { ...context, original: err, metadata, commandName, commandInput, suggestion });
228
+ // Incluir detalhes do erro original para facilitar debug
229
+ const errorDetails = [
230
+ `Unknown error: ${err.message || err.toString()}`,
231
+ err.code && `Code: ${err.code}`,
232
+ err.statusCode && `Status: ${err.statusCode}`,
233
+ err.stack && `Stack: ${err.stack.split('\n')[0]}`,
234
+ ].filter(Boolean).join(' | ');
235
+
236
+ suggestion = `Check the error details and AWS documentation. Original error: ${err.message || err.toString()}`;
237
+ return new UnknownError(errorDetails, { ...context, original: err, metadata, commandName, commandInput, suggestion });
230
238
  }
231
239
 
232
240
  export class ConnectionStringError extends S3dbError {
@@ -0,0 +1,96 @@
1
+ import { EventEmitter } from 'events';
2
+
3
+ /**
4
+ * Base class for all partition drivers
5
+ * Defines the interface that all drivers must implement
6
+ */
7
+ export class BasePartitionDriver extends EventEmitter {
8
+ constructor(options = {}) {
9
+ super();
10
+ this.options = options;
11
+ this.stats = {
12
+ queued: 0,
13
+ processed: 0,
14
+ failed: 0,
15
+ processing: 0
16
+ };
17
+ }
18
+
19
+ /**
20
+ * Initialize the driver
21
+ */
22
+ async initialize() {
23
+ // Override in subclasses if needed
24
+ }
25
+
26
+ /**
27
+ * Queue partition operations for processing
28
+ * @param {Object} operation - The partition operation to queue
29
+ * @param {string} operation.type - 'create', 'update', or 'delete'
30
+ * @param {Object} operation.resource - The resource instance
31
+ * @param {Object} operation.data - The data for the operation
32
+ */
33
+ async queue(operation) {
34
+ throw new Error('queue() must be implemented by subclass');
35
+ }
36
+
37
+ /**
38
+ * Process a single partition operation
39
+ */
40
+ async processOperation(operation) {
41
+ const { type, resource, data } = operation;
42
+
43
+ try {
44
+ this.stats.processing++;
45
+
46
+ switch (type) {
47
+ case 'create':
48
+ await resource.createPartitionReferences(data.object);
49
+ break;
50
+
51
+ case 'update':
52
+ await resource.handlePartitionReferenceUpdates(data.original, data.updated);
53
+ break;
54
+
55
+ case 'delete':
56
+ await resource.deletePartitionReferences(data.object);
57
+ break;
58
+
59
+ default:
60
+ throw new Error(`Unknown partition operation type: ${type}`);
61
+ }
62
+
63
+ this.stats.processed++;
64
+ this.emit('processed', operation);
65
+
66
+ } catch (error) {
67
+ this.stats.failed++;
68
+ this.emit('error', { operation, error });
69
+ throw error;
70
+ } finally {
71
+ this.stats.processing--;
72
+ }
73
+ }
74
+
75
+ /**
76
+ * Flush any pending operations
77
+ */
78
+ async flush() {
79
+ // Override in subclasses if needed
80
+ }
81
+
82
+ /**
83
+ * Get driver statistics
84
+ */
85
+ getStats() {
86
+ return { ...this.stats };
87
+ }
88
+
89
+ /**
90
+ * Shutdown the driver
91
+ */
92
+ async shutdown() {
93
+ await this.flush();
94
+ this.removeAllListeners();
95
+ }
96
+ }
@@ -0,0 +1,60 @@
1
+ import { SyncPartitionDriver } from './sync-partition-driver.js';
2
+ import { MemoryPartitionDriver } from './memory-partition-driver.js';
3
+ import { SQSPartitionDriver } from './sqs-partition-driver.js';
4
+
5
+ /**
6
+ * Partition driver factory
7
+ */
8
+ export class PartitionDriverFactory {
9
+ static drivers = {
10
+ sync: SyncPartitionDriver,
11
+ memory: MemoryPartitionDriver,
12
+ sqs: SQSPartitionDriver
13
+ };
14
+
15
+ /**
16
+ * Create a partition driver instance
17
+ * @param {string|Object} config - Driver name or configuration object
18
+ * @returns {BasePartitionDriver} Driver instance
19
+ */
20
+ static create(config) {
21
+ // Handle string shorthand
22
+ if (typeof config === 'string') {
23
+ config = { driver: config };
24
+ }
25
+
26
+ // Default to memory driver
27
+ const driverName = config.driver || 'memory';
28
+
29
+ // Get driver class
30
+ const DriverClass = this.drivers[driverName];
31
+ if (!DriverClass) {
32
+ throw new Error(`Unknown partition driver: ${driverName}. Available: ${Object.keys(this.drivers).join(', ')}`);
33
+ }
34
+
35
+ // Create and initialize driver
36
+ const driver = new DriverClass(config);
37
+
38
+ return driver;
39
+ }
40
+
41
+ /**
42
+ * Register a custom driver
43
+ */
44
+ static register(name, DriverClass) {
45
+ this.drivers[name] = DriverClass;
46
+ }
47
+
48
+ /**
49
+ * Get available driver names
50
+ */
51
+ static getAvailableDrivers() {
52
+ return Object.keys(this.drivers);
53
+ }
54
+ }
55
+
56
+ // Export individual drivers
57
+ export { BasePartitionDriver } from './base-partition-driver.js';
58
+ export { SyncPartitionDriver } from './sync-partition-driver.js';
59
+ export { MemoryPartitionDriver } from './memory-partition-driver.js';
60
+ export { SQSPartitionDriver } from './sqs-partition-driver.js';