s3db.js 6.2.0 → 7.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/PLUGINS.md +2724 -0
- package/README.md +372 -469
- package/UNLICENSE +24 -0
- package/dist/s3db.cjs.js +30057 -18387
- package/dist/s3db.cjs.min.js +1 -1
- package/dist/s3db.d.ts +373 -72
- package/dist/s3db.es.js +30043 -18384
- package/dist/s3db.es.min.js +1 -1
- package/dist/s3db.iife.js +29730 -18061
- package/dist/s3db.iife.min.js +1 -1
- package/package.json +44 -69
- package/src/behaviors/body-only.js +110 -0
- package/src/behaviors/body-overflow.js +153 -0
- package/src/behaviors/enforce-limits.js +195 -0
- package/src/behaviors/index.js +39 -0
- package/src/behaviors/truncate-data.js +204 -0
- package/src/behaviors/user-managed.js +147 -0
- package/src/client.class.js +515 -0
- package/src/concerns/base62.js +61 -0
- package/src/concerns/calculator.js +204 -0
- package/src/concerns/crypto.js +142 -0
- package/src/concerns/id.js +8 -0
- package/src/concerns/index.js +5 -0
- package/src/concerns/try-fn.js +151 -0
- package/src/connection-string.class.js +75 -0
- package/src/database.class.js +599 -0
- package/src/errors.js +261 -0
- package/src/index.js +17 -0
- package/src/plugins/audit.plugin.js +442 -0
- package/src/plugins/cache/cache.class.js +53 -0
- package/src/plugins/cache/index.js +6 -0
- package/src/plugins/cache/memory-cache.class.js +164 -0
- package/src/plugins/cache/s3-cache.class.js +189 -0
- package/src/plugins/cache.plugin.js +275 -0
- package/src/plugins/consumers/index.js +24 -0
- package/src/plugins/consumers/rabbitmq-consumer.js +56 -0
- package/src/plugins/consumers/sqs-consumer.js +102 -0
- package/src/plugins/costs.plugin.js +81 -0
- package/src/plugins/fulltext.plugin.js +473 -0
- package/src/plugins/index.js +12 -0
- package/src/plugins/metrics.plugin.js +603 -0
- package/src/plugins/plugin.class.js +210 -0
- package/src/plugins/plugin.obj.js +13 -0
- package/src/plugins/queue-consumer.plugin.js +134 -0
- package/src/plugins/replicator.plugin.js +769 -0
- package/src/plugins/replicators/base-replicator.class.js +85 -0
- package/src/plugins/replicators/bigquery-replicator.class.js +328 -0
- package/src/plugins/replicators/index.js +44 -0
- package/src/plugins/replicators/postgres-replicator.class.js +427 -0
- package/src/plugins/replicators/s3db-replicator.class.js +352 -0
- package/src/plugins/replicators/sqs-replicator.class.js +427 -0
- package/src/resource.class.js +2626 -0
- package/src/s3db.d.ts +1263 -0
- package/src/schema.class.js +706 -0
- package/src/stream/index.js +16 -0
- package/src/stream/resource-ids-page-reader.class.js +10 -0
- package/src/stream/resource-ids-reader.class.js +63 -0
- package/src/stream/resource-reader.class.js +81 -0
- package/src/stream/resource-writer.class.js +92 -0
- package/src/validator.class.js +97 -0
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
import { calculateTotalSize, calculateAttributeSizes, calculateUTF8Bytes } from '../concerns/calculator.js';
|
|
2
|
+
import { calculateEffectiveLimit } from '../concerns/calculator.js';
|
|
3
|
+
import { S3_METADATA_LIMIT_BYTES } from './enforce-limits.js';
|
|
4
|
+
|
|
5
|
+
const TRUNCATED_FLAG = '$truncated';
|
|
6
|
+
const TRUNCATED_FLAG_VALUE = 'true';
|
|
7
|
+
const TRUNCATED_FLAG_BYTES = calculateUTF8Bytes(TRUNCATED_FLAG) + calculateUTF8Bytes(TRUNCATED_FLAG_VALUE);
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Data Truncate Behavior Configuration Documentation
|
|
11
|
+
*
|
|
12
|
+
* The `truncate-data` behavior optimizes metadata usage by sorting attributes by size
|
|
13
|
+
* in ascending order and truncating the last attribute that fits within the available
|
|
14
|
+
* space. This ensures all data stays in metadata for fast access while respecting
|
|
15
|
+
* S3 metadata size limits.
|
|
16
|
+
*
|
|
17
|
+
* ## Purpose & Use Cases
|
|
18
|
+
* - When you need fast access to all data (no body reads required)
|
|
19
|
+
* - For objects that slightly exceed metadata limits
|
|
20
|
+
* - When data loss through truncation is acceptable
|
|
21
|
+
* - For frequently accessed data where performance is critical
|
|
22
|
+
*
|
|
23
|
+
* ## How It Works
|
|
24
|
+
* 1. Calculates the size of each attribute
|
|
25
|
+
* 2. Sorts attributes by size in ascending order (smallest first)
|
|
26
|
+
* 3. Fills metadata with small attributes until limit is approached
|
|
27
|
+
* 4. Truncates the last attribute that fits to maximize data retention
|
|
28
|
+
* 5. Adds a `$truncated` flag to indicate truncation occurred
|
|
29
|
+
*
|
|
30
|
+
* ## Performance Characteristics
|
|
31
|
+
* - Fastest possible access (all data in metadata)
|
|
32
|
+
* - No body reads required
|
|
33
|
+
* - Potential data loss through truncation
|
|
34
|
+
* - Optimal for frequently accessed data
|
|
35
|
+
*
|
|
36
|
+
* @example
|
|
37
|
+
* // Create a resource with truncate-data behavior
|
|
38
|
+
* const resource = await db.createResource({
|
|
39
|
+
* name: 'fast_access_data',
|
|
40
|
+
* attributes: { ... },
|
|
41
|
+
* behavior: 'truncate-data'
|
|
42
|
+
* });
|
|
43
|
+
*
|
|
44
|
+
* // Small fields stay intact, large fields get truncated
|
|
45
|
+
* const doc = await resource.insert({
|
|
46
|
+
* id: 'doc123', // Small -> intact
|
|
47
|
+
* title: 'Short Title', // Small -> intact
|
|
48
|
+
* content: 'Very long...', // Large -> truncated
|
|
49
|
+
* metadata: { ... } // Large -> truncated
|
|
50
|
+
* });
|
|
51
|
+
*
|
|
52
|
+
* ## Comparison to Other Behaviors
|
|
53
|
+
* | Behavior | Metadata Usage | Body Usage | Size Limits | Performance |
|
|
54
|
+
* |------------------|----------------|------------|-------------|-------------|
|
|
55
|
+
* | truncate-data | All (truncated)| None | 2KB metadata | Fast reads |
|
|
56
|
+
* | body-overflow | Optimized | Overflow | 2KB metadata | Balanced |
|
|
57
|
+
* | body-only | Minimal (_v) | All data | 5TB | Slower reads |
|
|
58
|
+
* | enforce-limits | All (limited) | None | 2KB metadata | Fast reads |
|
|
59
|
+
* | user-managed | All (unlimited)| None | S3 limit | Fast reads |
|
|
60
|
+
*
|
|
61
|
+
* @typedef {Object} DataTruncateBehaviorConfig
|
|
62
|
+
* @property {boolean} [enabled=true] - Whether the behavior is active
|
|
63
|
+
* @property {string} [truncateIndicator='...'] - String to append when truncating
|
|
64
|
+
* @property {string[]} [priorityFields] - Fields that should not be truncated
|
|
65
|
+
* @property {boolean} [preserveStructure=true] - Whether to preserve JSON structure
|
|
66
|
+
*/
|
|
67
|
+
export async function handleInsert({ resource, data, mappedData, originalData }) {
|
|
68
|
+
const effectiveLimit = calculateEffectiveLimit({
|
|
69
|
+
s3Limit: S3_METADATA_LIMIT_BYTES,
|
|
70
|
+
systemConfig: {
|
|
71
|
+
version: resource.version,
|
|
72
|
+
timestamps: resource.config.timestamps,
|
|
73
|
+
id: data.id
|
|
74
|
+
}
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
const attributeSizes = calculateAttributeSizes(mappedData);
|
|
78
|
+
const sortedFields = Object.entries(attributeSizes)
|
|
79
|
+
.sort(([, a], [, b]) => a - b);
|
|
80
|
+
|
|
81
|
+
const resultFields = {};
|
|
82
|
+
let currentSize = 0;
|
|
83
|
+
let truncated = false;
|
|
84
|
+
|
|
85
|
+
// Always include version field first
|
|
86
|
+
if (mappedData._v) {
|
|
87
|
+
resultFields._v = mappedData._v;
|
|
88
|
+
currentSize += attributeSizes._v;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// Add fields to metadata until we reach the limit
|
|
92
|
+
for (const [fieldName, size] of sortedFields) {
|
|
93
|
+
if (fieldName === '_v') continue;
|
|
94
|
+
|
|
95
|
+
const fieldValue = mappedData[fieldName];
|
|
96
|
+
const spaceNeeded = size + (truncated ? 0 : TRUNCATED_FLAG_BYTES);
|
|
97
|
+
|
|
98
|
+
if (currentSize + spaceNeeded <= effectiveLimit) {
|
|
99
|
+
// Field fits completely
|
|
100
|
+
resultFields[fieldName] = fieldValue;
|
|
101
|
+
currentSize += size;
|
|
102
|
+
} else {
|
|
103
|
+
// Field needs to be truncated
|
|
104
|
+
const availableSpace = effectiveLimit - currentSize - (truncated ? 0 : TRUNCATED_FLAG_BYTES);
|
|
105
|
+
if (availableSpace > 0) {
|
|
106
|
+
// We can fit part of this field
|
|
107
|
+
const truncatedValue = truncateValue(fieldValue, availableSpace);
|
|
108
|
+
resultFields[fieldName] = truncatedValue;
|
|
109
|
+
truncated = true;
|
|
110
|
+
currentSize += calculateUTF8Bytes(truncatedValue);
|
|
111
|
+
} else {
|
|
112
|
+
// Field doesn't fit at all, but keep it as empty string
|
|
113
|
+
resultFields[fieldName] = '';
|
|
114
|
+
truncated = true;
|
|
115
|
+
}
|
|
116
|
+
// Stop processing - we've reached the limit
|
|
117
|
+
break;
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
// Verify we're within limits and adjust if necessary
|
|
122
|
+
let finalSize = calculateTotalSize(resultFields) + (truncated ? TRUNCATED_FLAG_BYTES : 0);
|
|
123
|
+
|
|
124
|
+
// If still over limit, keep removing/truncating fields until we fit
|
|
125
|
+
while (finalSize > effectiveLimit) {
|
|
126
|
+
const fieldNames = Object.keys(resultFields).filter(f => f !== '_v' && f !== '$truncated');
|
|
127
|
+
if (fieldNames.length === 0) {
|
|
128
|
+
// Only version field remains, this shouldn't happen but just in case
|
|
129
|
+
break;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
// Remove the last field but keep it as empty string
|
|
133
|
+
const lastField = fieldNames[fieldNames.length - 1];
|
|
134
|
+
resultFields[lastField] = '';
|
|
135
|
+
|
|
136
|
+
// Recalculate size
|
|
137
|
+
finalSize = calculateTotalSize(resultFields) + TRUNCATED_FLAG_BYTES;
|
|
138
|
+
truncated = true;
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
if (truncated) {
|
|
142
|
+
resultFields[TRUNCATED_FLAG] = TRUNCATED_FLAG_VALUE;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
return { mappedData: resultFields, body: JSON.stringify(mappedData) };
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
export async function handleUpdate({ resource, id, data, mappedData, originalData }) {
|
|
149
|
+
return handleInsert({ resource, data, mappedData, originalData });
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
export async function handleUpsert({ resource, id, data, mappedData }) {
|
|
153
|
+
return handleInsert({ resource, data, mappedData });
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
export async function handleGet({ resource, metadata, body }) {
|
|
157
|
+
// For truncate-data, all data is in metadata, no body processing needed
|
|
158
|
+
return { metadata, body };
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
/**
|
|
162
|
+
* Truncate a value to fit within the specified byte limit
|
|
163
|
+
* @param {any} value - The value to truncate
|
|
164
|
+
* @param {number} maxBytes - Maximum bytes allowed
|
|
165
|
+
* @returns {any} - Truncated value
|
|
166
|
+
*/
|
|
167
|
+
function truncateValue(value, maxBytes) {
|
|
168
|
+
if (typeof value === 'string') {
|
|
169
|
+
return truncateString(value, maxBytes);
|
|
170
|
+
} else if (typeof value === 'object' && value !== null) {
|
|
171
|
+
// Truncar objeto como JSON string truncada
|
|
172
|
+
const jsonStr = JSON.stringify(value);
|
|
173
|
+
return truncateString(jsonStr, maxBytes);
|
|
174
|
+
} else {
|
|
175
|
+
// Para números, booleanos, etc., converte para string e trunca
|
|
176
|
+
const stringValue = String(value);
|
|
177
|
+
return truncateString(stringValue, maxBytes);
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
/**
|
|
182
|
+
* Truncate a string to fit within byte limit
|
|
183
|
+
* @param {string} str - String to truncate
|
|
184
|
+
* @param {number} maxBytes - Maximum bytes allowed
|
|
185
|
+
* @returns {string} - Truncated string
|
|
186
|
+
*/
|
|
187
|
+
function truncateString(str, maxBytes) {
|
|
188
|
+
const encoder = new TextEncoder();
|
|
189
|
+
let bytes = encoder.encode(str);
|
|
190
|
+
if (bytes.length <= maxBytes) {
|
|
191
|
+
return str;
|
|
192
|
+
}
|
|
193
|
+
// Trunca sem adicionar '...'
|
|
194
|
+
let length = str.length;
|
|
195
|
+
while (length > 0) {
|
|
196
|
+
const truncated = str.substring(0, length);
|
|
197
|
+
bytes = encoder.encode(truncated);
|
|
198
|
+
if (bytes.length <= maxBytes) {
|
|
199
|
+
return truncated;
|
|
200
|
+
}
|
|
201
|
+
length--;
|
|
202
|
+
}
|
|
203
|
+
return '';
|
|
204
|
+
}
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
import { calculateTotalSize } from '../concerns/calculator.js';
|
|
2
|
+
import { calculateEffectiveLimit } from '../concerns/calculator.js';
|
|
3
|
+
import { S3_METADATA_LIMIT_BYTES } from './enforce-limits.js';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* User Managed Behavior Configuration Documentation
|
|
7
|
+
*
|
|
8
|
+
* The `user-managed` behavior is the default for s3db resources. It provides no automatic enforcement
|
|
9
|
+
* of S3 metadata or body size limits, and does not modify or truncate data. Instead, it emits warnings
|
|
10
|
+
* via the `exceedsLimit` event when S3 metadata limits are exceeded, but allows all operations to proceed.
|
|
11
|
+
*
|
|
12
|
+
* ## Purpose & Use Cases
|
|
13
|
+
* - For development, testing, or advanced users who want full control over resource metadata and body size.
|
|
14
|
+
* - Useful when you want to handle S3 metadata limits yourself, or implement custom logic for warnings.
|
|
15
|
+
* - Not recommended for production unless you have custom enforcement or validation in place.
|
|
16
|
+
*
|
|
17
|
+
* ## How It Works
|
|
18
|
+
* - Emits an `exceedsLimit` event (with details) when a resource's metadata size exceeds the S3 2KB limit.
|
|
19
|
+
* - Does NOT block, truncate, or modify data—operations always proceed.
|
|
20
|
+
* - No automatic enforcement of any limits; user is responsible for handling warnings and data integrity.
|
|
21
|
+
*
|
|
22
|
+
* ## Event Emission
|
|
23
|
+
* - Event: `exceedsLimit`
|
|
24
|
+
* - Payload:
|
|
25
|
+
* - `operation`: 'insert' | 'update' | 'upsert'
|
|
26
|
+
* - `id` (for update/upsert): resource id
|
|
27
|
+
* - `totalSize`: total metadata size in bytes
|
|
28
|
+
* - `limit`: S3 metadata limit (2048 bytes)
|
|
29
|
+
* - `excess`: number of bytes over the limit
|
|
30
|
+
* - `data`: the offending data object
|
|
31
|
+
*
|
|
32
|
+
* @example
|
|
33
|
+
* // Listen for warnings on a resource
|
|
34
|
+
* resource.on('exceedsLimit', (info) => {
|
|
35
|
+
* console.warn(`Resource exceeded S3 metadata limit:`, info);
|
|
36
|
+
* });
|
|
37
|
+
*
|
|
38
|
+
* @example
|
|
39
|
+
* // Create a resource with user-managed behavior (default)
|
|
40
|
+
* const resource = await db.createResource({
|
|
41
|
+
* name: 'my_resource',
|
|
42
|
+
* attributes: { ... },
|
|
43
|
+
* behavior: 'user-managed' // or omit for default
|
|
44
|
+
* });
|
|
45
|
+
*
|
|
46
|
+
* ## Comparison to Other Behaviors
|
|
47
|
+
* | Behavior | Enforcement | Data Loss | Event Emission | Use Case |
|
|
48
|
+
* |------------------|-------------|-----------|----------------|-------------------------|
|
|
49
|
+
* | user-managed | None | Possible | Warns | Dev/Test/Advanced users |
|
|
50
|
+
* | enforce-limits | Strict | No | Throws | Production |
|
|
51
|
+
* | truncate-data | Truncates | Yes | Warns | Content Mgmt |
|
|
52
|
+
* | body-overflow | Truncates/Splits | Yes | Warns | Large objects |
|
|
53
|
+
*
|
|
54
|
+
* ## Best Practices & Warnings
|
|
55
|
+
* - Exceeding S3 metadata limits will cause silent data loss or errors at the storage layer.
|
|
56
|
+
* - Use this behavior only if you have custom logic to handle warnings and enforce limits.
|
|
57
|
+
* - For production, prefer `enforce-limits` or `truncate-data` to avoid data loss.
|
|
58
|
+
*
|
|
59
|
+
* ## Migration Tips
|
|
60
|
+
* - To migrate to a stricter behavior, change the resource's behavior to `enforce-limits` or `truncate-data`.
|
|
61
|
+
* - Review emitted warnings to identify resources at risk of exceeding S3 limits.
|
|
62
|
+
*
|
|
63
|
+
* @typedef {Object} UserManagedBehaviorConfig
|
|
64
|
+
* @property {boolean} [enabled=true] - Whether the behavior is active
|
|
65
|
+
*/
|
|
66
|
+
export async function handleInsert({ resource, data, mappedData, originalData }) {
|
|
67
|
+
const totalSize = calculateTotalSize(mappedData);
|
|
68
|
+
|
|
69
|
+
// Calculate effective limit considering system overhead
|
|
70
|
+
const effectiveLimit = calculateEffectiveLimit({
|
|
71
|
+
s3Limit: S3_METADATA_LIMIT_BYTES,
|
|
72
|
+
systemConfig: {
|
|
73
|
+
version: resource.version,
|
|
74
|
+
timestamps: resource.config.timestamps,
|
|
75
|
+
id: data.id
|
|
76
|
+
}
|
|
77
|
+
});
|
|
78
|
+
|
|
79
|
+
if (totalSize > effectiveLimit) {
|
|
80
|
+
resource.emit('exceedsLimit', {
|
|
81
|
+
operation: 'insert',
|
|
82
|
+
totalSize,
|
|
83
|
+
limit: 2047,
|
|
84
|
+
excess: totalSize - 2047,
|
|
85
|
+
data: originalData || data
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
return { mappedData, body: JSON.stringify(data) };
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
export async function handleUpdate({ resource, id, data, mappedData, originalData }) {
|
|
92
|
+
const totalSize = calculateTotalSize(mappedData);
|
|
93
|
+
|
|
94
|
+
// Calculate effective limit considering system overhead
|
|
95
|
+
const effectiveLimit = calculateEffectiveLimit({
|
|
96
|
+
s3Limit: S3_METADATA_LIMIT_BYTES,
|
|
97
|
+
systemConfig: {
|
|
98
|
+
version: resource.version,
|
|
99
|
+
timestamps: resource.config.timestamps,
|
|
100
|
+
id
|
|
101
|
+
}
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
if (totalSize > effectiveLimit) {
|
|
105
|
+
resource.emit('exceedsLimit', {
|
|
106
|
+
operation: 'update',
|
|
107
|
+
id,
|
|
108
|
+
totalSize,
|
|
109
|
+
limit: 2047,
|
|
110
|
+
excess: totalSize - 2047,
|
|
111
|
+
data: originalData || data
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
return { mappedData, body: JSON.stringify(data) };
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
export async function handleUpsert({ resource, id, data, mappedData, originalData }) {
|
|
118
|
+
const totalSize = calculateTotalSize(mappedData);
|
|
119
|
+
|
|
120
|
+
// Calculate effective limit considering system overhead
|
|
121
|
+
const effectiveLimit = calculateEffectiveLimit({
|
|
122
|
+
s3Limit: S3_METADATA_LIMIT_BYTES,
|
|
123
|
+
systemConfig: {
|
|
124
|
+
version: resource.version,
|
|
125
|
+
timestamps: resource.config.timestamps,
|
|
126
|
+
id
|
|
127
|
+
}
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
if (totalSize > effectiveLimit) {
|
|
131
|
+
resource.emit('exceedsLimit', {
|
|
132
|
+
operation: 'upsert',
|
|
133
|
+
id,
|
|
134
|
+
totalSize,
|
|
135
|
+
limit: 2047,
|
|
136
|
+
excess: totalSize - 2047,
|
|
137
|
+
data: originalData || data
|
|
138
|
+
});
|
|
139
|
+
}
|
|
140
|
+
return { mappedData, body: JSON.stringify(data) };
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
export async function handleGet({ resource, metadata, body }) {
|
|
144
|
+
// No special handling needed for user-managed behavior
|
|
145
|
+
// User is responsible for handling metadata as received
|
|
146
|
+
return { metadata, body };
|
|
147
|
+
}
|