s3db.js 9.3.0 → 10.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +71 -13
- package/dist/s3db.cjs.js +466 -8
- package/dist/s3db.cjs.js.map +1 -1
- package/dist/s3db.es.js +466 -9
- package/dist/s3db.es.js.map +1 -1
- package/package.json +1 -1
- package/src/client.class.js +2 -2
- package/src/concerns/high-performance-inserter.js +285 -0
- package/src/concerns/partition-queue.js +171 -0
- package/src/errors.js +10 -2
- package/src/partition-drivers/base-partition-driver.js +96 -0
- package/src/partition-drivers/index.js +60 -0
- package/src/partition-drivers/memory-partition-driver.js +274 -0
- package/src/partition-drivers/sqs-partition-driver.js +332 -0
- package/src/partition-drivers/sync-partition-driver.js +38 -0
- package/src/plugins/backup.plugin.js +1 -1
- package/src/plugins/backup.plugin.js.backup +1 -1
- package/src/plugins/eventual-consistency.plugin.js +609 -0
- package/src/plugins/index.js +1 -0
- package/PLUGINS.md +0 -5036
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
import { BasePartitionDriver } from './base-partition-driver.js';
|
|
2
|
+
import { PromisePool } from '@supercharge/promise-pool';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* In-memory partition driver with background processing
|
|
6
|
+
* Queues operations in memory and processes them asynchronously
|
|
7
|
+
* Fast and efficient for single-instance applications
|
|
8
|
+
*/
|
|
9
|
+
export class MemoryPartitionDriver extends BasePartitionDriver {
|
|
10
|
+
constructor(options = {}) {
|
|
11
|
+
super(options);
|
|
12
|
+
this.name = 'memory';
|
|
13
|
+
|
|
14
|
+
// Configuration
|
|
15
|
+
this.batchSize = options.batchSize || 100;
|
|
16
|
+
this.concurrency = options.concurrency || 10;
|
|
17
|
+
this.flushInterval = options.flushInterval || 1000;
|
|
18
|
+
this.maxQueueSize = options.maxQueueSize || 10000;
|
|
19
|
+
this.maxRetries = options.maxRetries || 3;
|
|
20
|
+
|
|
21
|
+
// State
|
|
22
|
+
this.queue = [];
|
|
23
|
+
this.isProcessing = false;
|
|
24
|
+
this.flushTimer = null;
|
|
25
|
+
this.retryQueue = [];
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
async initialize() {
|
|
29
|
+
// Start background processor
|
|
30
|
+
this.startProcessor();
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Add operation to in-memory queue
|
|
35
|
+
*/
|
|
36
|
+
async queue(operation) {
|
|
37
|
+
// Check queue size limit
|
|
38
|
+
if (this.queue.length >= this.maxQueueSize) {
|
|
39
|
+
const error = new Error(`Memory queue full (${this.maxQueueSize} items)`);
|
|
40
|
+
this.emit('queueFull', { operation, queueSize: this.queue.length });
|
|
41
|
+
|
|
42
|
+
if (this.options.rejectOnFull) {
|
|
43
|
+
throw error;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
// Wait for some space
|
|
47
|
+
await this.waitForSpace();
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// Add to queue with metadata
|
|
51
|
+
const queueItem = {
|
|
52
|
+
...operation,
|
|
53
|
+
id: `${Date.now()}-${Math.random()}`,
|
|
54
|
+
queuedAt: new Date(),
|
|
55
|
+
attempts: 0
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
this.queue.push(queueItem);
|
|
59
|
+
this.stats.queued++;
|
|
60
|
+
|
|
61
|
+
// Auto-flush when batch size reached
|
|
62
|
+
if (this.queue.length >= this.batchSize) {
|
|
63
|
+
this.triggerFlush();
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
return {
|
|
67
|
+
success: true,
|
|
68
|
+
driver: 'memory',
|
|
69
|
+
queuePosition: this.queue.length,
|
|
70
|
+
queueId: queueItem.id
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Start the background processor
|
|
76
|
+
*/
|
|
77
|
+
startProcessor() {
|
|
78
|
+
// Set up periodic flush
|
|
79
|
+
if (this.flushInterval > 0) {
|
|
80
|
+
this.flushTimer = setInterval(() => {
|
|
81
|
+
if (this.queue.length > 0 && !this.isProcessing) {
|
|
82
|
+
this.processQueue();
|
|
83
|
+
}
|
|
84
|
+
}, this.flushInterval);
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Trigger immediate flush
|
|
90
|
+
*/
|
|
91
|
+
triggerFlush() {
|
|
92
|
+
if (!this.isProcessing) {
|
|
93
|
+
setImmediate(() => this.processQueue());
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
/**
|
|
98
|
+
* Process queued operations in batches
|
|
99
|
+
*/
|
|
100
|
+
async processQueue() {
|
|
101
|
+
if (this.isProcessing || this.queue.length === 0) return;
|
|
102
|
+
|
|
103
|
+
this.isProcessing = true;
|
|
104
|
+
|
|
105
|
+
try {
|
|
106
|
+
// Take a batch from the queue
|
|
107
|
+
const batch = this.queue.splice(0, this.batchSize);
|
|
108
|
+
|
|
109
|
+
// Process in parallel with concurrency control
|
|
110
|
+
const { results, errors } = await PromisePool
|
|
111
|
+
.for(batch)
|
|
112
|
+
.withConcurrency(this.concurrency)
|
|
113
|
+
.process(async (item) => {
|
|
114
|
+
try {
|
|
115
|
+
await this.processOperation(item);
|
|
116
|
+
return { success: true, item };
|
|
117
|
+
} catch (error) {
|
|
118
|
+
return this.handleError(item, error);
|
|
119
|
+
}
|
|
120
|
+
});
|
|
121
|
+
|
|
122
|
+
// Handle successful results
|
|
123
|
+
const successful = results.filter(r => r.success);
|
|
124
|
+
this.emit('batchProcessed', {
|
|
125
|
+
processed: successful.length,
|
|
126
|
+
failed: errors.length,
|
|
127
|
+
retried: results.filter(r => r.retried).length
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
} finally {
|
|
131
|
+
this.isProcessing = false;
|
|
132
|
+
|
|
133
|
+
// Continue processing if more items
|
|
134
|
+
if (this.queue.length > 0) {
|
|
135
|
+
setImmediate(() => this.processQueue());
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// Process retry queue if needed
|
|
139
|
+
if (this.retryQueue.length > 0) {
|
|
140
|
+
this.processRetryQueue();
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
/**
|
|
146
|
+
* Handle processing errors with retry logic
|
|
147
|
+
*/
|
|
148
|
+
handleError(item, error) {
|
|
149
|
+
item.attempts++;
|
|
150
|
+
item.lastError = error;
|
|
151
|
+
|
|
152
|
+
if (item.attempts < this.maxRetries) {
|
|
153
|
+
// Add to retry queue with exponential backoff
|
|
154
|
+
const delay = Math.min(1000 * Math.pow(2, item.attempts - 1), 30000);
|
|
155
|
+
|
|
156
|
+
setTimeout(() => {
|
|
157
|
+
this.retryQueue.push(item);
|
|
158
|
+
if (!this.isProcessing) {
|
|
159
|
+
this.processRetryQueue();
|
|
160
|
+
}
|
|
161
|
+
}, delay);
|
|
162
|
+
|
|
163
|
+
this.emit('retry', { item, error, attempt: item.attempts, delay });
|
|
164
|
+
return { success: false, retried: true, item };
|
|
165
|
+
} else {
|
|
166
|
+
// Max retries exceeded
|
|
167
|
+
this.emit('failed', { item, error, attempts: item.attempts });
|
|
168
|
+
return { success: false, retried: false, item };
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
/**
|
|
173
|
+
* Process retry queue
|
|
174
|
+
*/
|
|
175
|
+
async processRetryQueue() {
|
|
176
|
+
if (this.retryQueue.length === 0) return;
|
|
177
|
+
|
|
178
|
+
// Move retry items back to main queue
|
|
179
|
+
const retryItems = this.retryQueue.splice(0, this.batchSize);
|
|
180
|
+
this.queue.unshift(...retryItems);
|
|
181
|
+
|
|
182
|
+
// Trigger processing
|
|
183
|
+
this.triggerFlush();
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
/**
|
|
187
|
+
* Wait for queue space
|
|
188
|
+
*/
|
|
189
|
+
async waitForSpace() {
|
|
190
|
+
const checkInterval = 100;
|
|
191
|
+
const maxWait = 30000;
|
|
192
|
+
const startTime = Date.now();
|
|
193
|
+
|
|
194
|
+
while (this.queue.length >= this.maxQueueSize) {
|
|
195
|
+
if (Date.now() - startTime > maxWait) {
|
|
196
|
+
throw new Error('Timeout waiting for queue space');
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
await new Promise(resolve => setTimeout(resolve, checkInterval));
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
/**
|
|
204
|
+
* Force flush all pending operations
|
|
205
|
+
*/
|
|
206
|
+
async flush() {
|
|
207
|
+
// Process all remaining items
|
|
208
|
+
while (this.queue.length > 0 || this.retryQueue.length > 0 || this.isProcessing) {
|
|
209
|
+
await this.processQueue();
|
|
210
|
+
await new Promise(resolve => setTimeout(resolve, 10));
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
/**
|
|
215
|
+
* Get detailed statistics
|
|
216
|
+
*/
|
|
217
|
+
getStats() {
|
|
218
|
+
return {
|
|
219
|
+
...super.getStats(),
|
|
220
|
+
queueLength: this.queue.length,
|
|
221
|
+
retryQueueLength: this.retryQueue.length,
|
|
222
|
+
isProcessing: this.isProcessing,
|
|
223
|
+
memoryUsage: this.estimateMemoryUsage()
|
|
224
|
+
};
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
/**
|
|
228
|
+
* Estimate memory usage of the queue
|
|
229
|
+
*/
|
|
230
|
+
estimateMemoryUsage() {
|
|
231
|
+
// Rough estimate: 1KB per queue item
|
|
232
|
+
const bytes = (this.queue.length + this.retryQueue.length) * 1024;
|
|
233
|
+
return {
|
|
234
|
+
bytes,
|
|
235
|
+
mb: (bytes / 1024 / 1024).toFixed(2)
|
|
236
|
+
};
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
/**
|
|
240
|
+
* Shutdown the driver
|
|
241
|
+
*/
|
|
242
|
+
async shutdown() {
|
|
243
|
+
// Stop the flush timer
|
|
244
|
+
if (this.flushTimer) {
|
|
245
|
+
clearInterval(this.flushTimer);
|
|
246
|
+
this.flushTimer = null;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
// Flush remaining items
|
|
250
|
+
await this.flush();
|
|
251
|
+
|
|
252
|
+
// Clear queues
|
|
253
|
+
this.queue = [];
|
|
254
|
+
this.retryQueue = [];
|
|
255
|
+
|
|
256
|
+
await super.shutdown();
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
getInfo() {
|
|
260
|
+
return {
|
|
261
|
+
name: this.name,
|
|
262
|
+
mode: 'asynchronous',
|
|
263
|
+
description: 'In-memory queue with background processing',
|
|
264
|
+
config: {
|
|
265
|
+
batchSize: this.batchSize,
|
|
266
|
+
concurrency: this.concurrency,
|
|
267
|
+
flushInterval: this.flushInterval,
|
|
268
|
+
maxQueueSize: this.maxQueueSize,
|
|
269
|
+
maxRetries: this.maxRetries
|
|
270
|
+
},
|
|
271
|
+
stats: this.getStats()
|
|
272
|
+
};
|
|
273
|
+
}
|
|
274
|
+
}
|
|
@@ -0,0 +1,332 @@
|
|
|
1
|
+
import { BasePartitionDriver } from './base-partition-driver.js';
|
|
2
|
+
import { SQSClient, SendMessageCommand, ReceiveMessageCommand, DeleteMessageCommand } from '@aws-sdk/client-sqs';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* SQS-based partition driver for distributed processing
|
|
6
|
+
* Sends partition operations to SQS for processing by workers
|
|
7
|
+
* Ideal for high-volume, distributed systems
|
|
8
|
+
*/
|
|
9
|
+
export class SQSPartitionDriver extends BasePartitionDriver {
|
|
10
|
+
constructor(options = {}) {
|
|
11
|
+
super(options);
|
|
12
|
+
this.name = 'sqs';
|
|
13
|
+
|
|
14
|
+
// SQS Configuration
|
|
15
|
+
this.queueUrl = options.queueUrl;
|
|
16
|
+
if (!this.queueUrl) {
|
|
17
|
+
throw new Error('SQS queue URL is required for SQSPartitionDriver');
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
this.region = options.region || 'us-east-1';
|
|
21
|
+
this.credentials = options.credentials;
|
|
22
|
+
this.dlqUrl = options.dlqUrl; // Dead Letter Queue
|
|
23
|
+
this.messageGroupId = options.messageGroupId || 's3db-partitions';
|
|
24
|
+
this.visibilityTimeout = options.visibilityTimeout || 300; // 5 minutes
|
|
25
|
+
this.batchSize = options.batchSize || 10; // SQS max batch size
|
|
26
|
+
|
|
27
|
+
// Worker configuration
|
|
28
|
+
this.isWorker = options.isWorker || false;
|
|
29
|
+
this.workerConcurrency = options.workerConcurrency || 5;
|
|
30
|
+
this.pollInterval = options.pollInterval || 1000;
|
|
31
|
+
|
|
32
|
+
// Initialize SQS client
|
|
33
|
+
this.sqsClient = new SQSClient({
|
|
34
|
+
region: this.region,
|
|
35
|
+
credentials: this.credentials
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
this.workerRunning = false;
|
|
39
|
+
this.messageBuffer = [];
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
async initialize() {
|
|
43
|
+
// Start worker if configured
|
|
44
|
+
if (this.isWorker) {
|
|
45
|
+
await this.startWorker();
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Send partition operation to SQS
|
|
51
|
+
*/
|
|
52
|
+
async queue(operation) {
|
|
53
|
+
try {
|
|
54
|
+
// Prepare message
|
|
55
|
+
const message = {
|
|
56
|
+
id: `${Date.now()}-${Math.random()}`,
|
|
57
|
+
timestamp: new Date().toISOString(),
|
|
58
|
+
operation: {
|
|
59
|
+
type: operation.type,
|
|
60
|
+
resourceName: operation.resource.name,
|
|
61
|
+
data: this.serializeData(operation.data)
|
|
62
|
+
}
|
|
63
|
+
};
|
|
64
|
+
|
|
65
|
+
// Buffer messages for batch sending
|
|
66
|
+
this.messageBuffer.push(message);
|
|
67
|
+
this.stats.queued++;
|
|
68
|
+
|
|
69
|
+
// Send batch when buffer is full
|
|
70
|
+
if (this.messageBuffer.length >= this.batchSize) {
|
|
71
|
+
await this.flushMessages();
|
|
72
|
+
} else {
|
|
73
|
+
// Schedule flush if not already scheduled
|
|
74
|
+
if (!this.flushTimeout) {
|
|
75
|
+
this.flushTimeout = setTimeout(() => this.flushMessages(), 100);
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
return {
|
|
80
|
+
success: true,
|
|
81
|
+
driver: 'sqs',
|
|
82
|
+
messageId: message.id,
|
|
83
|
+
queueUrl: this.queueUrl
|
|
84
|
+
};
|
|
85
|
+
|
|
86
|
+
} catch (error) {
|
|
87
|
+
this.emit('error', { operation, error });
|
|
88
|
+
throw error;
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Flush buffered messages to SQS
|
|
94
|
+
*/
|
|
95
|
+
async flushMessages() {
|
|
96
|
+
if (this.messageBuffer.length === 0) return;
|
|
97
|
+
|
|
98
|
+
clearTimeout(this.flushTimeout);
|
|
99
|
+
this.flushTimeout = null;
|
|
100
|
+
|
|
101
|
+
const messages = this.messageBuffer.splice(0, this.batchSize);
|
|
102
|
+
|
|
103
|
+
try {
|
|
104
|
+
// For FIFO queues, add deduplication ID
|
|
105
|
+
const isFifo = this.queueUrl.includes('.fifo');
|
|
106
|
+
|
|
107
|
+
for (const message of messages) {
|
|
108
|
+
const params = {
|
|
109
|
+
QueueUrl: this.queueUrl,
|
|
110
|
+
MessageBody: JSON.stringify(message),
|
|
111
|
+
MessageAttributes: {
|
|
112
|
+
Type: {
|
|
113
|
+
DataType: 'String',
|
|
114
|
+
StringValue: message.operation.type
|
|
115
|
+
},
|
|
116
|
+
Resource: {
|
|
117
|
+
DataType: 'String',
|
|
118
|
+
StringValue: message.operation.resourceName
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
};
|
|
122
|
+
|
|
123
|
+
if (isFifo) {
|
|
124
|
+
params.MessageGroupId = this.messageGroupId;
|
|
125
|
+
params.MessageDeduplicationId = message.id;
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
await this.sqsClient.send(new SendMessageCommand(params));
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
this.emit('messagesSent', { count: messages.length });
|
|
132
|
+
|
|
133
|
+
} catch (error) {
|
|
134
|
+
// Return messages to buffer for retry
|
|
135
|
+
this.messageBuffer.unshift(...messages);
|
|
136
|
+
this.emit('sendError', { error, messages: messages.length });
|
|
137
|
+
throw error;
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
/**
|
|
142
|
+
* Start SQS worker to process messages
|
|
143
|
+
*/
|
|
144
|
+
async startWorker() {
|
|
145
|
+
if (this.workerRunning) return;
|
|
146
|
+
|
|
147
|
+
this.workerRunning = true;
|
|
148
|
+
this.emit('workerStarted', { concurrency: this.workerConcurrency });
|
|
149
|
+
|
|
150
|
+
// Start multiple concurrent workers
|
|
151
|
+
for (let i = 0; i < this.workerConcurrency; i++) {
|
|
152
|
+
this.pollMessages(i);
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
/**
|
|
157
|
+
* Poll SQS for messages
|
|
158
|
+
*/
|
|
159
|
+
async pollMessages(workerId) {
|
|
160
|
+
while (this.workerRunning) {
|
|
161
|
+
try {
|
|
162
|
+
// Receive messages from SQS
|
|
163
|
+
const params = {
|
|
164
|
+
QueueUrl: this.queueUrl,
|
|
165
|
+
MaxNumberOfMessages: 10,
|
|
166
|
+
WaitTimeSeconds: 20, // Long polling
|
|
167
|
+
VisibilityTimeout: this.visibilityTimeout,
|
|
168
|
+
MessageAttributeNames: ['All']
|
|
169
|
+
};
|
|
170
|
+
|
|
171
|
+
const response = await this.sqsClient.send(new ReceiveMessageCommand(params));
|
|
172
|
+
|
|
173
|
+
if (response.Messages && response.Messages.length > 0) {
|
|
174
|
+
// Process messages
|
|
175
|
+
for (const message of response.Messages) {
|
|
176
|
+
await this.processMessage(message, workerId);
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
} catch (error) {
|
|
181
|
+
this.emit('pollError', { workerId, error });
|
|
182
|
+
// Wait before retrying
|
|
183
|
+
await new Promise(resolve => setTimeout(resolve, this.pollInterval));
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
/**
|
|
189
|
+
* Process a single SQS message
|
|
190
|
+
*/
|
|
191
|
+
async processMessage(message, workerId) {
|
|
192
|
+
try {
|
|
193
|
+
// Parse message body
|
|
194
|
+
const data = JSON.parse(message.Body);
|
|
195
|
+
const operation = {
|
|
196
|
+
type: data.operation.type,
|
|
197
|
+
data: this.deserializeData(data.operation.data)
|
|
198
|
+
};
|
|
199
|
+
|
|
200
|
+
// Process the partition operation
|
|
201
|
+
// Note: We need the actual resource instance to process
|
|
202
|
+
// This would typically be handled by a separate worker service
|
|
203
|
+
this.emit('processingMessage', { workerId, messageId: message.MessageId });
|
|
204
|
+
|
|
205
|
+
// In a real implementation, you'd look up the resource and process:
|
|
206
|
+
// await this.processOperation(operation);
|
|
207
|
+
|
|
208
|
+
// Delete message from queue after successful processing
|
|
209
|
+
await this.sqsClient.send(new DeleteMessageCommand({
|
|
210
|
+
QueueUrl: this.queueUrl,
|
|
211
|
+
ReceiptHandle: message.ReceiptHandle
|
|
212
|
+
}));
|
|
213
|
+
|
|
214
|
+
this.stats.processed++;
|
|
215
|
+
this.emit('messageProcessed', { workerId, messageId: message.MessageId });
|
|
216
|
+
|
|
217
|
+
} catch (error) {
|
|
218
|
+
this.stats.failed++;
|
|
219
|
+
this.emit('processError', { workerId, error, messageId: message.MessageId });
|
|
220
|
+
|
|
221
|
+
// Message will become visible again after VisibilityTimeout
|
|
222
|
+
// and eventually move to DLQ if configured
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
/**
|
|
227
|
+
* Serialize data for SQS transport
|
|
228
|
+
*/
|
|
229
|
+
serializeData(data) {
|
|
230
|
+
// Remove circular references and functions
|
|
231
|
+
return JSON.parse(JSON.stringify(data, (key, value) => {
|
|
232
|
+
if (typeof value === 'function') return undefined;
|
|
233
|
+
if (value instanceof Buffer) return value.toString('base64');
|
|
234
|
+
return value;
|
|
235
|
+
}));
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
/**
|
|
239
|
+
* Deserialize data from SQS
|
|
240
|
+
*/
|
|
241
|
+
deserializeData(data) {
|
|
242
|
+
return data;
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
/**
|
|
246
|
+
* Stop the worker
|
|
247
|
+
*/
|
|
248
|
+
async stopWorker() {
|
|
249
|
+
this.workerRunning = false;
|
|
250
|
+
this.emit('workerStopped');
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
/**
|
|
254
|
+
* Force flush all pending messages
|
|
255
|
+
*/
|
|
256
|
+
async flush() {
|
|
257
|
+
await this.flushMessages();
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
/**
|
|
261
|
+
* Get queue metrics from SQS
|
|
262
|
+
*/
|
|
263
|
+
async getQueueMetrics() {
|
|
264
|
+
try {
|
|
265
|
+
const { Attributes } = await this.sqsClient.send(new GetQueueAttributesCommand({
|
|
266
|
+
QueueUrl: this.queueUrl,
|
|
267
|
+
AttributeNames: [
|
|
268
|
+
'ApproximateNumberOfMessages',
|
|
269
|
+
'ApproximateNumberOfMessagesNotVisible',
|
|
270
|
+
'ApproximateNumberOfMessagesDelayed'
|
|
271
|
+
]
|
|
272
|
+
}));
|
|
273
|
+
|
|
274
|
+
return {
|
|
275
|
+
messagesAvailable: parseInt(Attributes.ApproximateNumberOfMessages || 0),
|
|
276
|
+
messagesInFlight: parseInt(Attributes.ApproximateNumberOfMessagesNotVisible || 0),
|
|
277
|
+
messagesDelayed: parseInt(Attributes.ApproximateNumberOfMessagesDelayed || 0)
|
|
278
|
+
};
|
|
279
|
+
} catch (error) {
|
|
280
|
+
return null;
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
/**
|
|
285
|
+
* Get detailed statistics
|
|
286
|
+
*/
|
|
287
|
+
async getStats() {
|
|
288
|
+
const baseStats = super.getStats();
|
|
289
|
+
const queueMetrics = await this.getQueueMetrics();
|
|
290
|
+
|
|
291
|
+
return {
|
|
292
|
+
...baseStats,
|
|
293
|
+
bufferLength: this.messageBuffer.length,
|
|
294
|
+
isWorker: this.isWorker,
|
|
295
|
+
workerRunning: this.workerRunning,
|
|
296
|
+
queue: queueMetrics
|
|
297
|
+
};
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
/**
|
|
301
|
+
* Shutdown the driver
|
|
302
|
+
*/
|
|
303
|
+
async shutdown() {
|
|
304
|
+
// Stop worker if running
|
|
305
|
+
await this.stopWorker();
|
|
306
|
+
|
|
307
|
+
// Flush remaining messages
|
|
308
|
+
await this.flush();
|
|
309
|
+
|
|
310
|
+
// Clear buffer
|
|
311
|
+
this.messageBuffer = [];
|
|
312
|
+
|
|
313
|
+
await super.shutdown();
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
getInfo() {
|
|
317
|
+
return {
|
|
318
|
+
name: this.name,
|
|
319
|
+
mode: 'distributed',
|
|
320
|
+
description: 'SQS-based queue for distributed partition processing',
|
|
321
|
+
config: {
|
|
322
|
+
queueUrl: this.queueUrl,
|
|
323
|
+
region: this.region,
|
|
324
|
+
dlqUrl: this.dlqUrl,
|
|
325
|
+
isWorker: this.isWorker,
|
|
326
|
+
workerConcurrency: this.workerConcurrency,
|
|
327
|
+
visibilityTimeout: this.visibilityTimeout
|
|
328
|
+
},
|
|
329
|
+
stats: this.getStats()
|
|
330
|
+
};
|
|
331
|
+
}
|
|
332
|
+
}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { BasePartitionDriver } from './base-partition-driver.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Synchronous partition driver
|
|
5
|
+
* Creates partitions immediately during insert/update/delete
|
|
6
|
+
* Use this when data consistency is critical
|
|
7
|
+
*/
|
|
8
|
+
export class SyncPartitionDriver extends BasePartitionDriver {
|
|
9
|
+
constructor(options = {}) {
|
|
10
|
+
super(options);
|
|
11
|
+
this.name = 'sync';
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Process partition operations synchronously
|
|
16
|
+
*/
|
|
17
|
+
async queue(operation) {
|
|
18
|
+
this.stats.queued++;
|
|
19
|
+
|
|
20
|
+
try {
|
|
21
|
+
// Process immediately and wait for completion
|
|
22
|
+
await this.processOperation(operation);
|
|
23
|
+
return { success: true, driver: 'sync' };
|
|
24
|
+
} catch (error) {
|
|
25
|
+
// Re-throw to make the main operation fail
|
|
26
|
+
throw error;
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
getInfo() {
|
|
31
|
+
return {
|
|
32
|
+
name: this.name,
|
|
33
|
+
mode: 'synchronous',
|
|
34
|
+
description: 'Processes partitions immediately, blocking the main operation',
|
|
35
|
+
stats: this.getStats()
|
|
36
|
+
};
|
|
37
|
+
}
|
|
38
|
+
}
|
|
@@ -101,7 +101,7 @@ export class BackupPlugin extends Plugin {
|
|
|
101
101
|
include: options.include || null,
|
|
102
102
|
exclude: options.exclude || [],
|
|
103
103
|
backupMetadataResource: options.backupMetadataResource || 'backup_metadata',
|
|
104
|
-
tempDir: options.tempDir || '
|
|
104
|
+
tempDir: options.tempDir || '/tmp/s3db/backups',
|
|
105
105
|
verbose: options.verbose || false,
|
|
106
106
|
|
|
107
107
|
// Hooks
|
|
@@ -98,7 +98,7 @@ export class BackupPlugin extends Plugin {
|
|
|
98
98
|
include: options.include || null,
|
|
99
99
|
exclude: options.exclude || [],
|
|
100
100
|
backupMetadataResource: options.backupMetadataResource || 'backup_metadata',
|
|
101
|
-
tempDir: options.tempDir || '
|
|
101
|
+
tempDir: options.tempDir || '/tmp/s3db/backups',
|
|
102
102
|
verbose: options.verbose || false,
|
|
103
103
|
onBackupStart: options.onBackupStart || null,
|
|
104
104
|
onBackupComplete: options.onBackupComplete || null,
|