dyno-table 1.5.0 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +368 -186
- package/dist/batch-builder-DNsz6zvh.d.cts +398 -0
- package/dist/batch-builder-Dz1yPGrJ.d.ts +398 -0
- package/dist/builders/condition-check-builder.cjs +0 -13
- package/dist/builders/condition-check-builder.cjs.map +1 -1
- package/dist/builders/condition-check-builder.d.cts +0 -13
- package/dist/builders/condition-check-builder.d.ts +0 -13
- package/dist/builders/condition-check-builder.js +0 -13
- package/dist/builders/condition-check-builder.js.map +1 -1
- package/dist/builders/delete-builder.cjs +38 -0
- package/dist/builders/delete-builder.cjs.map +1 -1
- package/dist/builders/delete-builder.d.cts +36 -0
- package/dist/builders/delete-builder.d.ts +36 -0
- package/dist/builders/delete-builder.js +38 -0
- package/dist/builders/delete-builder.js.map +1 -1
- package/dist/builders/paginator.cjs +0 -24
- package/dist/builders/paginator.cjs.map +1 -1
- package/dist/builders/paginator.d.cts +0 -24
- package/dist/builders/paginator.d.ts +0 -24
- package/dist/builders/paginator.js +0 -24
- package/dist/builders/paginator.js.map +1 -1
- package/dist/builders/put-builder.cjs +39 -8
- package/dist/builders/put-builder.cjs.map +1 -1
- package/dist/builders/put-builder.d.cts +37 -8
- package/dist/builders/put-builder.d.ts +37 -8
- package/dist/builders/put-builder.js +39 -8
- package/dist/builders/put-builder.js.map +1 -1
- package/dist/builders/query-builder.cjs +0 -53
- package/dist/builders/query-builder.cjs.map +1 -1
- package/dist/builders/query-builder.d.cts +1 -1
- package/dist/builders/query-builder.d.ts +1 -1
- package/dist/builders/query-builder.js +0 -53
- package/dist/builders/query-builder.js.map +1 -1
- package/dist/builders/transaction-builder.cjs +0 -47
- package/dist/builders/transaction-builder.cjs.map +1 -1
- package/dist/builders/transaction-builder.d.cts +0 -47
- package/dist/builders/transaction-builder.d.ts +0 -47
- package/dist/builders/transaction-builder.js +0 -47
- package/dist/builders/transaction-builder.js.map +1 -1
- package/dist/builders/update-builder.cjs +2 -2
- package/dist/builders/update-builder.cjs.map +1 -1
- package/dist/builders/update-builder.d.cts +2 -2
- package/dist/builders/update-builder.d.ts +2 -2
- package/dist/builders/update-builder.js +2 -2
- package/dist/builders/update-builder.js.map +1 -1
- package/dist/conditions.cjs.map +1 -1
- package/dist/conditions.js.map +1 -1
- package/dist/entity.cjs +162 -86
- package/dist/entity.cjs.map +1 -1
- package/dist/entity.d.cts +39 -15
- package/dist/entity.d.ts +39 -15
- package/dist/entity.js +162 -86
- package/dist/entity.js.map +1 -1
- package/dist/index.cjs +623 -231
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +3 -2
- package/dist/index.d.ts +3 -2
- package/dist/index.js +622 -232
- package/dist/index.js.map +1 -1
- package/dist/{query-builder-CbHvimBk.d.cts → query-builder-BDuHHrb-.d.cts} +0 -34
- package/dist/{query-builder-BhrR31oO.d.ts → query-builder-C6XjVEFH.d.ts} +0 -34
- package/dist/{table-Des8C2od.d.ts → table-BWa4tx63.d.ts} +39 -151
- package/dist/{table-CY9byPEg.d.cts → table-DAKlzQsK.d.cts} +39 -151
- package/dist/table.cjs +459 -145
- package/dist/table.cjs.map +1 -1
- package/dist/table.d.cts +3 -2
- package/dist/table.d.ts +3 -2
- package/dist/table.js +459 -145
- package/dist/table.js.map +1 -1
- package/dist/utils.cjs.map +1 -1
- package/dist/utils.js.map +1 -1
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -45,10 +45,14 @@ await dinoTable
|
|
|
45
45
|
</td>
|
|
46
46
|
</tr>
|
|
47
47
|
<tr>
|
|
48
|
-
<td>
|
|
48
|
+
<td width="50%">
|
|
49
49
|
<h3>⚡ Velociraptor-fast API</h3>
|
|
50
50
|
<p>Intuitive chainable builder pattern for complex operations that feels natural and reduces boilerplate</p>
|
|
51
51
|
</td>
|
|
52
|
+
<td width="50%">
|
|
53
|
+
<h3>🎯 Semantic data access patterns</h3>
|
|
54
|
+
<p>Encourages meaningful, descriptive method names like <code>getUserByEmail()</code> instead of cryptic <code>gsi1</code> references</p>
|
|
55
|
+
</td>
|
|
52
56
|
</tr>
|
|
53
57
|
<tr>
|
|
54
58
|
<td width="50%">
|
|
@@ -65,6 +69,10 @@ await dinoTable
|
|
|
65
69
|
## 📑 Table of Contents
|
|
66
70
|
|
|
67
71
|
- [📦 Installation](#-installation)
|
|
72
|
+
- [🎯 DynamoDB Best Practices](#-dynamodb-best-practices)
|
|
73
|
+
- [Semantic Data Access Patterns](#semantic-data-access-patterns)
|
|
74
|
+
- [The Problem with Generic Index Names](#the-problem-with-generic-index-names)
|
|
75
|
+
- [The Solution: Meaningful Method Names](#the-solution-meaningful-method-names)
|
|
68
76
|
- [🚀 Quick Start](#-quick-start)
|
|
69
77
|
- [1. Configure Your Jurassic Table](#1-configure-your-jurassic-table)
|
|
70
78
|
- [2. Perform Type-Safe Dinosaur Operations](#2-perform-type-safe-dinosaur-operations)
|
|
@@ -93,8 +101,8 @@ await dinoTable
|
|
|
93
101
|
- [Nested Object Support](#nested-object-support)
|
|
94
102
|
- [Type-Safe Conditions](#type-safe-conditions)
|
|
95
103
|
- [🔄 Batch Operations](#-batch-operations)
|
|
96
|
-
- [Batch
|
|
97
|
-
- [Batch
|
|
104
|
+
- [Entity-Based Batch Operations](#-entity-based-batch-operations)
|
|
105
|
+
- [Table-Direct Batch Operations](#-table-direct-batch-operations)
|
|
98
106
|
- [🔒 Transaction Operations](#-transaction-operations)
|
|
99
107
|
- [Transaction Builder](#transaction-builder)
|
|
100
108
|
- [Transaction Options](#transaction-options)
|
|
@@ -137,6 +145,133 @@ pnpm add dyno-table @aws-sdk/client-dynamodb @aws-sdk/lib-dynamodb
|
|
|
137
145
|
```
|
|
138
146
|
</details>
|
|
139
147
|
|
|
148
|
+
## 🎯 DynamoDB Best Practices
|
|
149
|
+
|
|
150
|
+
<div align="center">
|
|
151
|
+
|
|
152
|
+
### **Design Your Data Access Patterns First, Name Them Meaningfully**
|
|
153
|
+
|
|
154
|
+
</div>
|
|
155
|
+
|
|
156
|
+
dyno-table follows DynamoDB best practices by encouraging developers to **define their data access patterns upfront** and assign them **meaningful, descriptive names**. This approach ensures that when writing business logic, developers call semantically clear methods instead of cryptic index references.
|
|
157
|
+
|
|
158
|
+
### Semantic Data Access Patterns
|
|
159
|
+
|
|
160
|
+
The core principle is simple: **your code should read like business logic, not database implementation details**.
|
|
161
|
+
|
|
162
|
+
<table>
|
|
163
|
+
<tr>
|
|
164
|
+
<th>❌ Cryptic Implementation</th>
|
|
165
|
+
<th>✅ Semantic Business Logic</th>
|
|
166
|
+
</tr>
|
|
167
|
+
<tr>
|
|
168
|
+
<td>
|
|
169
|
+
|
|
170
|
+
```ts
|
|
171
|
+
// Hard to understand what this does - using raw AWS Document Client
|
|
172
|
+
import { DynamoDBDocument } from "@aws-sdk/lib-dynamodb";
|
|
173
|
+
import { QueryCommand } from "@aws-sdk/lib-dynamodb";
|
|
174
|
+
|
|
175
|
+
const docClient = DynamoDBDocument.from(new DynamoDBClient({}));
|
|
176
|
+
|
|
177
|
+
const users = await docClient.send(new QueryCommand({
|
|
178
|
+
TableName: "MyTable",
|
|
179
|
+
IndexName: "gsi1",
|
|
180
|
+
KeyConditionExpression: "#pk = :pk",
|
|
181
|
+
ExpressionAttributeNames: { "#pk": "pk" },
|
|
182
|
+
ExpressionAttributeValues: { ":pk": "STATUS#active" }
|
|
183
|
+
}));
|
|
184
|
+
|
|
185
|
+
const orders = await docClient.send(new QueryCommand({
|
|
186
|
+
TableName: "MyTable",
|
|
187
|
+
IndexName: "gsi2",
|
|
188
|
+
KeyConditionExpression: "#pk = :pk",
|
|
189
|
+
ExpressionAttributeNames: { "#pk": "pk" },
|
|
190
|
+
ExpressionAttributeValues: { ":pk": "CUSTOMER#123" }
|
|
191
|
+
}));
|
|
192
|
+
|
|
193
|
+
const products = await docClient.send(new QueryCommand({
|
|
194
|
+
TableName: "MyTable",
|
|
195
|
+
IndexName: "gsi3",
|
|
196
|
+
KeyConditionExpression: "#pk = :pk",
|
|
197
|
+
ExpressionAttributeNames: { "#pk": "pk" },
|
|
198
|
+
ExpressionAttributeValues: { ":pk": "CATEGORY#electronics" }
|
|
199
|
+
}));
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
</td>
|
|
203
|
+
<td>
|
|
204
|
+
|
|
205
|
+
```ts
|
|
206
|
+
// Clear business intent
|
|
207
|
+
const activeUsers = await userRepo.query
|
|
208
|
+
.getActiveUsers()
|
|
209
|
+
.execute();
|
|
210
|
+
|
|
211
|
+
const customerOrders = await orderRepo.query
|
|
212
|
+
.getOrdersByCustomer({ customerId: "123" })
|
|
213
|
+
.execute();
|
|
214
|
+
|
|
215
|
+
const electronics = await productRepo.query
|
|
216
|
+
.getProductsByCategory({ category: "electronics" })
|
|
217
|
+
.execute();
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
</td>
|
|
221
|
+
</tr>
|
|
222
|
+
</table>
|
|
223
|
+
|
|
224
|
+
### The Problem with Generic Index Names
|
|
225
|
+
|
|
226
|
+
When you use generic names like `gsi1`, `gsi2`, `gsi3`, you create several problems:
|
|
227
|
+
|
|
228
|
+
- **🧠 Cognitive Load**: Developers must remember what each index does
|
|
229
|
+
- **📚 Poor Documentation**: Code doesn't self-document its purpose
|
|
230
|
+
- **🐛 Error-Prone**: Easy to use the wrong index for a query
|
|
231
|
+
- **👥 Team Friction**: New team members struggle to understand data access patterns
|
|
232
|
+
- **🔄 Maintenance Issues**: Refactoring becomes risky and unclear
|
|
233
|
+
|
|
234
|
+
### The Solution: Meaningful Method Names
|
|
235
|
+
|
|
236
|
+
dyno-table encourages you to define your access patterns with descriptive names that reflect their business purpose:
|
|
237
|
+
|
|
238
|
+
```ts
|
|
239
|
+
// Define your access patterns with meaningful names
|
|
240
|
+
const UserEntity = defineEntity({
|
|
241
|
+
name: "User",
|
|
242
|
+
schema: userSchema,
|
|
243
|
+
primaryKey,
|
|
244
|
+
queries: {
|
|
245
|
+
// ✅ Clear business purpose
|
|
246
|
+
getActiveUsers: createQuery
|
|
247
|
+
.input(z.object({}))
|
|
248
|
+
.query(({ entity }) => entity.query({ pk: "STATUS#active" }).useIndex("gsi1")),
|
|
249
|
+
|
|
250
|
+
getUsersByEmail: createQuery
|
|
251
|
+
.input(z.object({ email: z.string() }))
|
|
252
|
+
.query(({ input, entity }) => entity.query({ pk: `EMAIL#${input.email}` }).useIndex("gsi1")),
|
|
253
|
+
|
|
254
|
+
getUsersByDepartment: createQuery
|
|
255
|
+
.input(z.object({ department: z.string() }))
|
|
256
|
+
.query(({ input, entity }) => entity.query({ pk: `DEPT#${input.department}` }).useIndex("gsi2")),
|
|
257
|
+
},
|
|
258
|
+
});
|
|
259
|
+
|
|
260
|
+
// Usage in business logic is now self-documenting
|
|
261
|
+
const activeUsers = await userRepo.query.getActiveUsers().execute();
|
|
262
|
+
const engineeringTeam = await userRepo.query.getUsersByDepartment({ department: "engineering" }).execute();
|
|
263
|
+
const user = await userRepo.query.getUsersByEmail({ email: "john@company.com" }).execute();
|
|
264
|
+
```
|
|
265
|
+
|
|
266
|
+
**This pattern promotes:**
|
|
267
|
+
- ✅ **Better code readability and maintainability**
|
|
268
|
+
- ✅ **Self-documenting API design**
|
|
269
|
+
- ✅ **Easier onboarding for new team members**
|
|
270
|
+
- ✅ **Reduced cognitive load when understanding data access patterns**
|
|
271
|
+
- ✅ **Clear separation between business logic and database implementation**
|
|
272
|
+
|
|
273
|
+
> **🏗️ Important Note**: Keep your actual DynamoDB table GSI names generic (`gsi1`, `gsi2`, etc.) for flexibility across different entities. The meaningful, descriptive names should live at the entity/repository level, not at the table level. This allows multiple entities to share the same GSIs while maintaining semantic clarity in your business logic.
|
|
274
|
+
|
|
140
275
|
## 🚀 Quick Start
|
|
141
276
|
|
|
142
277
|
<div align="center">
|
|
@@ -177,6 +312,8 @@ const dinoTable = new Table({
|
|
|
177
312
|
|
|
178
313
|
### 2. Perform Type-Safe Operations directly on the table instance
|
|
179
314
|
|
|
315
|
+
> **💡 Pro Tip**: While you can use the table directly, we recommend using the [Entity Pattern](#-entity-pattern-with-standard-schema-validators) with meaningful, descriptive method names like `getUserByEmail()` instead of generic index references. This follows DynamoDB best practices and makes your code self-documenting.
|
|
316
|
+
|
|
180
317
|
<table>
|
|
181
318
|
<tr>
|
|
182
319
|
<td>
|
|
@@ -271,28 +408,17 @@ await dinoTable.transaction((tx) => {
|
|
|
271
408
|
|
|
272
409
|
<table>
|
|
273
410
|
<tr>
|
|
274
|
-
<th
|
|
275
|
-
<th
|
|
411
|
+
<th>❌ Without dyno-table</th>
|
|
412
|
+
<th>✅ With dyno-table (Entity Pattern)</th>
|
|
276
413
|
</tr>
|
|
277
414
|
<tr>
|
|
278
415
|
<td>
|
|
279
416
|
|
|
280
|
-
```ts
|
|
281
|
-
// Type-safe, clean, and intuitive
|
|
282
|
-
await dinoTable
|
|
283
|
-
.query<Dinosaur>({
|
|
284
|
-
pk: "SPECIES#trex"
|
|
285
|
-
})
|
|
286
|
-
.filter(op =>
|
|
287
|
-
op.contains("features", "feathers")
|
|
288
|
-
)
|
|
289
|
-
.execute();
|
|
290
|
-
```
|
|
291
|
-
|
|
292
417
|
```ts
|
|
293
418
|
// Verbose, error-prone, no type safety
|
|
294
419
|
await docClient.send(new QueryCommand({
|
|
295
420
|
TableName: "JurassicPark",
|
|
421
|
+
IndexName: "gsi1", // What does gsi1 do?
|
|
296
422
|
KeyConditionExpression: "#pk = :pk",
|
|
297
423
|
FilterExpression: "contains(#features, :feathers)",
|
|
298
424
|
ExpressionAttributeNames: {
|
|
@@ -306,10 +432,37 @@ await docClient.send(new QueryCommand({
|
|
|
306
432
|
}));
|
|
307
433
|
```
|
|
308
434
|
|
|
435
|
+
</td>
|
|
436
|
+
<td>
|
|
437
|
+
|
|
438
|
+
```ts
|
|
439
|
+
// Self-documenting, type-safe, semantic
|
|
440
|
+
const featheredTRexes = await dinosaurRepo.query
|
|
441
|
+
.getFeatheredDinosaursBySpecies({
|
|
442
|
+
species: "trex"
|
|
443
|
+
})
|
|
444
|
+
.execute();
|
|
445
|
+
|
|
446
|
+
// Or using table directly (still better than raw SDK)
|
|
447
|
+
await dinoTable
|
|
448
|
+
.query<Dinosaur>({
|
|
449
|
+
pk: "SPECIES#trex"
|
|
450
|
+
})
|
|
451
|
+
.filter(op =>
|
|
452
|
+
op.contains("features", "feathers")
|
|
453
|
+
)
|
|
454
|
+
.execute();
|
|
455
|
+
```
|
|
456
|
+
|
|
309
457
|
</td>
|
|
310
458
|
</tr>
|
|
311
459
|
</table>
|
|
312
460
|
|
|
461
|
+
**Key improvements:**
|
|
462
|
+
- 🛡️ **Type Safety**: Compile-time error checking prevents runtime failures
|
|
463
|
+
- 📖 **Self-Documenting**: Code clearly expresses business intent
|
|
464
|
+
- 🧠 **Reduced Complexity**: No manual expression building or attribute mapping
|
|
465
|
+
|
|
313
466
|
## 🏗️ Entity Pattern with Standard Schema validators
|
|
314
467
|
|
|
315
468
|
<div align="center">
|
|
@@ -336,7 +489,6 @@ await docClient.send(new QueryCommand({
|
|
|
336
489
|
- 🔑 **Automatic key generation**
|
|
337
490
|
- 📦 **Repository pattern**
|
|
338
491
|
- 🔍 **Custom query builders**
|
|
339
|
-
- 🔄 **Lifecycle hooks**
|
|
340
492
|
|
|
341
493
|
</td>
|
|
342
494
|
</tr>
|
|
@@ -460,7 +612,7 @@ await dinosaurRepo.delete({
|
|
|
460
612
|
|
|
461
613
|
#### 3. Custom Queries
|
|
462
614
|
|
|
463
|
-
Define custom queries with
|
|
615
|
+
Define custom queries with **meaningful, descriptive names** that reflect their business purpose. This follows DynamoDB best practices by making your data access patterns self-documenting:
|
|
464
616
|
|
|
465
617
|
```ts
|
|
466
618
|
import { createQueries } from "dyno-table/entity";
|
|
@@ -472,7 +624,8 @@ const DinosaurEntity = defineEntity({
|
|
|
472
624
|
schema: dinosaurSchema,
|
|
473
625
|
primaryKey,
|
|
474
626
|
queries: {
|
|
475
|
-
|
|
627
|
+
// ✅ Semantic method names that describe business intent
|
|
628
|
+
getDinosaursByDiet: createQuery
|
|
476
629
|
.input(
|
|
477
630
|
z.object({
|
|
478
631
|
diet: z.enum(["carnivore", "herbivore", "omnivore"]),
|
|
@@ -485,7 +638,7 @@ const DinosaurEntity = defineEntity({
|
|
|
485
638
|
});
|
|
486
639
|
}),
|
|
487
640
|
|
|
488
|
-
|
|
641
|
+
findDinosaursBySpecies: createQuery
|
|
489
642
|
.input(
|
|
490
643
|
z.object({
|
|
491
644
|
species: z.string(),
|
|
@@ -496,40 +649,89 @@ const DinosaurEntity = defineEntity({
|
|
|
496
649
|
.scan()
|
|
497
650
|
.filter((op) => op.eq("species", input.species));
|
|
498
651
|
}),
|
|
652
|
+
|
|
653
|
+
getActiveCarnivores: createQuery
|
|
654
|
+
.input(z.object({}))
|
|
655
|
+
.query(({ entity }) => {
|
|
656
|
+
return entity
|
|
657
|
+
.query({
|
|
658
|
+
pk: dinosaurPK({diet: "carnivore"})
|
|
659
|
+
})
|
|
660
|
+
.filter((op) => op.eq("status", "active"));
|
|
661
|
+
}),
|
|
662
|
+
|
|
663
|
+
getDangerousDinosaursInEnclosure: createQuery
|
|
664
|
+
.input(
|
|
665
|
+
z.object({
|
|
666
|
+
enclosureId: z.string(),
|
|
667
|
+
minDangerLevel: z.number().min(1).max(10),
|
|
668
|
+
})
|
|
669
|
+
)
|
|
670
|
+
.query(({ input, entity }) => {
|
|
671
|
+
return entity
|
|
672
|
+
.scan()
|
|
673
|
+
.filter((op) => op.and(
|
|
674
|
+
op.contains("enclosureId", input.enclosureId),
|
|
675
|
+
op.gte("dangerLevel", input.minDangerLevel)
|
|
676
|
+
));
|
|
677
|
+
}),
|
|
499
678
|
},
|
|
500
679
|
});
|
|
501
680
|
|
|
502
|
-
//
|
|
503
|
-
const carnivores = await dinosaurRepo.query.
|
|
504
|
-
const trexes = await dinosaurRepo.query.
|
|
681
|
+
// Usage in business logic is now self-documenting
|
|
682
|
+
const carnivores = await dinosaurRepo.query.getDinosaursByDiet({ diet: "carnivore" }).execute();
|
|
683
|
+
const trexes = await dinosaurRepo.query.findDinosaursBySpecies({ species: "Tyrannosaurus Rex" }).execute();
|
|
684
|
+
const activeCarnivores = await dinosaurRepo.query.getActiveCarnivores().execute();
|
|
685
|
+
const dangerousDinos = await dinosaurRepo.query.getDangerousDinosaursInEnclosure({
|
|
686
|
+
enclosureId: "PADDOCK-A",
|
|
687
|
+
minDangerLevel: 8
|
|
688
|
+
}).execute();
|
|
505
689
|
```
|
|
506
690
|
|
|
507
|
-
|
|
691
|
+
**Benefits of semantic naming:**
|
|
692
|
+
- 🎯 **Clear Intent**: Method names immediately convey what data you're accessing
|
|
693
|
+
- 📖 **Self-Documenting**: No need to look up what `gsi1` or `gsi2` does
|
|
694
|
+
- 🧠 **Reduced Cognitive Load**: Developers can focus on business logic, not database details
|
|
695
|
+
- 👥 **Team Collaboration**: New team members understand the codebase faster
|
|
696
|
+
- 🔍 **Better IDE Support**: Autocomplete shows meaningful method names
|
|
697
|
+
|
|
698
|
+
#### 4. Defining GSI Access Patterns
|
|
508
699
|
|
|
509
|
-
Define GSI
|
|
700
|
+
Define GSI access patterns with **meaningful names** that reflect their business purpose. This is crucial for maintaining readable, self-documenting code:
|
|
510
701
|
|
|
511
702
|
```ts
|
|
512
703
|
import { createIndex } from "dyno-table/entity";
|
|
513
704
|
|
|
514
|
-
// Define
|
|
515
|
-
const
|
|
516
|
-
const
|
|
705
|
+
// Define GSI templates with descriptive names that reflect their purpose
|
|
706
|
+
const speciesPK = partitionKey`SPECIES#${"species"}`
|
|
707
|
+
const speciesSK = sortKey`DINOSAUR#${"id"}`
|
|
708
|
+
|
|
709
|
+
const enclosurePK = partitionKey`ENCLOSURE#${"enclosureId"}`
|
|
710
|
+
const enclosureSK = sortKey`DANGER#${"dangerLevel"}#ID#${"id"}`
|
|
517
711
|
|
|
518
|
-
//
|
|
712
|
+
// Create indexes with meaningful names
|
|
519
713
|
const speciesIndex = createIndex()
|
|
520
714
|
.input(dinosaurSchema)
|
|
521
|
-
.partitionKey(({ species }) =>
|
|
522
|
-
.sortKey(({ id }) =>
|
|
715
|
+
.partitionKey(({ species }) => speciesPK({ species }))
|
|
716
|
+
.sortKey(({ id }) => speciesSK({ id }));
|
|
717
|
+
|
|
718
|
+
const enclosureIndex = createIndex()
|
|
719
|
+
.input(dinosaurSchema)
|
|
720
|
+
.partitionKey(({ enclosureId }) => enclosurePK({ enclosureId }))
|
|
721
|
+
.sortKey(({ dangerLevel, id }) => enclosureSK({ dangerLevel, id }));
|
|
523
722
|
|
|
524
723
|
const DinosaurEntity = defineEntity({
|
|
525
724
|
name: "Dinosaur",
|
|
526
725
|
schema: dinosaurSchema,
|
|
527
726
|
primaryKey,
|
|
528
727
|
indexes: {
|
|
529
|
-
|
|
728
|
+
// ✅ Map to generic GSI names for table flexibility
|
|
729
|
+
gsi1: speciesIndex,
|
|
730
|
+
gsi2: enclosureIndex,
|
|
530
731
|
},
|
|
531
732
|
queries: {
|
|
532
|
-
|
|
733
|
+
// ✅ Semantic method names that describe business intent
|
|
734
|
+
getDinosaursBySpecies: createQuery
|
|
533
735
|
.input(
|
|
534
736
|
z.object({
|
|
535
737
|
species: z.string(),
|
|
@@ -538,16 +740,59 @@ const DinosaurEntity = defineEntity({
|
|
|
538
740
|
.query(({ input, entity }) => {
|
|
539
741
|
return entity
|
|
540
742
|
.query({
|
|
541
|
-
|
|
542
|
-
pk: gsi1PK({species: input.species}),
|
|
743
|
+
pk: speciesPK({species: input.species}),
|
|
543
744
|
})
|
|
544
|
-
//
|
|
545
|
-
|
|
745
|
+
.useIndex("gsi1"); // Generic GSI name for table flexibility
|
|
746
|
+
}),
|
|
747
|
+
|
|
748
|
+
getDinosaursByEnclosure: createQuery
|
|
749
|
+
.input(
|
|
750
|
+
z.object({
|
|
751
|
+
enclosureId: z.string(),
|
|
752
|
+
})
|
|
753
|
+
)
|
|
754
|
+
.query(({ input, entity }) => {
|
|
755
|
+
return entity
|
|
756
|
+
.query({
|
|
757
|
+
pk: enclosurePK({enclosureId: input.enclosureId}),
|
|
758
|
+
})
|
|
759
|
+
.useIndex("gsi2");
|
|
760
|
+
}),
|
|
761
|
+
|
|
762
|
+
getMostDangerousInEnclosure: createQuery
|
|
763
|
+
.input(
|
|
764
|
+
z.object({
|
|
765
|
+
enclosureId: z.string(),
|
|
766
|
+
minDangerLevel: z.number().min(1).max(10),
|
|
767
|
+
})
|
|
768
|
+
)
|
|
769
|
+
.query(({ input, entity }) => {
|
|
770
|
+
return entity
|
|
771
|
+
.query({
|
|
772
|
+
pk: enclosurePK({enclosureId: input.enclosureId}),
|
|
773
|
+
sk: (op) => op.gte(`DANGER#${input.minDangerLevel}`)
|
|
774
|
+
})
|
|
775
|
+
.useIndex("gsi2")
|
|
776
|
+
.sortDescending(); // Get most dangerous first
|
|
546
777
|
}),
|
|
547
778
|
},
|
|
548
779
|
});
|
|
780
|
+
|
|
781
|
+
// Usage is now self-documenting
|
|
782
|
+
const trexes = await dinosaurRepo.query.getDinosaursBySpecies({ species: "Tyrannosaurus Rex" }).execute();
|
|
783
|
+
const paddockADinos = await dinosaurRepo.query.getDinosaursByEnclosure({ enclosureId: "PADDOCK-A" }).execute();
|
|
784
|
+
const dangerousDinos = await dinosaurRepo.query.getMostDangerousInEnclosure({
|
|
785
|
+
enclosureId: "PADDOCK-A",
|
|
786
|
+
minDangerLevel: 8
|
|
787
|
+
}).execute();
|
|
549
788
|
```
|
|
550
789
|
|
|
790
|
+
**Key principles for access pattern naming:**
|
|
791
|
+
- 🎯 **Generic GSI Names**: Keep table-level GSI names generic (`gsi1`, `gsi2`) for flexibility across entities
|
|
792
|
+
- 🔍 **Business-Focused**: Method names should reflect what the query achieves, not how it works
|
|
793
|
+
- 📚 **Self-Documenting**: Anyone reading the code should understand the purpose immediately
|
|
794
|
+
- 🏗️ **Entity-Level Semantics**: The meaningful names live at the entity/repository level, not the table level
|
|
795
|
+
|
|
551
796
|
### Complete Entity Example
|
|
552
797
|
|
|
553
798
|
Here's a complete example of using Zod schemas directly:
|
|
@@ -621,7 +866,8 @@ const DinosaurEntity = defineEntity({
|
|
|
621
866
|
gsi2: enclosureIndex,
|
|
622
867
|
},
|
|
623
868
|
queries: {
|
|
624
|
-
|
|
869
|
+
// ✅ Semantic method names that describe business intent
|
|
870
|
+
getDinosaursBySpecies: createQuery
|
|
625
871
|
.input(
|
|
626
872
|
z.object({
|
|
627
873
|
species: z.string(),
|
|
@@ -635,7 +881,7 @@ const DinosaurEntity = defineEntity({
|
|
|
635
881
|
.useIndex("gsi1");
|
|
636
882
|
}),
|
|
637
883
|
|
|
638
|
-
|
|
884
|
+
getDinosaursByEnclosure: createQuery
|
|
639
885
|
.input(
|
|
640
886
|
z.object({
|
|
641
887
|
enclosureId: z.string(),
|
|
@@ -649,7 +895,7 @@ const DinosaurEntity = defineEntity({
|
|
|
649
895
|
.useIndex("gsi2");
|
|
650
896
|
}),
|
|
651
897
|
|
|
652
|
-
|
|
898
|
+
getDangerousDinosaursInEnclosure: createQuery
|
|
653
899
|
.input(
|
|
654
900
|
z.object({
|
|
655
901
|
enclosureId: z.string(),
|
|
@@ -688,27 +934,19 @@ async function main() {
|
|
|
688
934
|
})
|
|
689
935
|
.execute();
|
|
690
936
|
|
|
691
|
-
// Query dinosaurs by species
|
|
692
|
-
const trexes = await dinosaurRepo.query.
|
|
693
|
-
species: "Tyrannosaurus Rex"
|
|
937
|
+
// Query dinosaurs by species using semantic method names
|
|
938
|
+
const trexes = await dinosaurRepo.query.getDinosaursBySpecies({
|
|
939
|
+
species: "Tyrannosaurus Rex"
|
|
694
940
|
}).execute();
|
|
695
941
|
|
|
696
942
|
// Query dangerous dinosaurs in an enclosure
|
|
697
|
-
const dangerousDinos = await dinosaurRepo.query.
|
|
943
|
+
const dangerousDinos = await dinosaurRepo.query.getDangerousDinosaursInEnclosure({
|
|
698
944
|
enclosureId: "enc-001",
|
|
699
945
|
minDangerLevel: 8,
|
|
700
946
|
}).execute();
|
|
701
947
|
}
|
|
702
948
|
```
|
|
703
949
|
|
|
704
|
-
**Key benefits:**
|
|
705
|
-
- 🚫 Prevents accidental cross-type data access
|
|
706
|
-
- 🔍 Automatically filters queries/scans to a repository type
|
|
707
|
-
- 🛡️ Ensures consistent key structure across entities
|
|
708
|
-
- 📦 Encapsulates domain-specific query logic
|
|
709
|
-
- 🧪 Validates data with Zod schemas
|
|
710
|
-
- 🔄 Provides type inference from schemas
|
|
711
|
-
|
|
712
950
|
## 🧩 Advanced Features
|
|
713
951
|
|
|
714
952
|
### Transactional Operations
|
|
@@ -812,108 +1050,6 @@ await dinoTable.transaction(
|
|
|
812
1050
|
);
|
|
813
1051
|
```
|
|
814
1052
|
|
|
815
|
-
**Benefits of this transaction approach:**
|
|
816
|
-
- 🔄 Uses the same familiar API as non-transactional operations
|
|
817
|
-
- 🧠 Maintains consistent mental model for developers
|
|
818
|
-
- 🔒 All operations within the callback are executed as a single transaction
|
|
819
|
-
- 🛡️ Prevents race conditions and data inconsistencies
|
|
820
|
-
- 📊 Supports up to 100 actions per transaction
|
|
821
|
-
|
|
822
|
-
### Batch Processing
|
|
823
|
-
|
|
824
|
-
**Efficient dinosaur park management with bulk operations**
|
|
825
|
-
```ts
|
|
826
|
-
// SCENARIO 1: Morning health check for multiple dinosaurs across enclosures
|
|
827
|
-
// Retrieve health status for multiple dinosaurs in a single operation
|
|
828
|
-
const healthCheckKeys = [
|
|
829
|
-
{ pk: "ENCLOSURE#A", sk: "DINO#001" }, // T-Rex in Paddock A
|
|
830
|
-
{ pk: "ENCLOSURE#B", sk: "DINO#002" }, // Velociraptor in Paddock B
|
|
831
|
-
{ pk: "ENCLOSURE#C", sk: "DINO#003" } // Stegosaurus in Paddock C
|
|
832
|
-
];
|
|
833
|
-
|
|
834
|
-
// Perform batch get operation to retrieve all dinosaurs at once
|
|
835
|
-
// This is much more efficient than individual gets
|
|
836
|
-
const { items: dinosaurs, unprocessedKeys } = await dinoTable.batchGet<Dinosaur>(healthCheckKeys);
|
|
837
|
-
console.log(`Health check completed for ${dinosaurs.length} dinosaurs`);
|
|
838
|
-
|
|
839
|
-
// Process health check results and identify any dinosaurs needing attention
|
|
840
|
-
dinosaurs.forEach(dino => {
|
|
841
|
-
if (dino.health < 80) {
|
|
842
|
-
console.log(`Health alert for ${dino.name} in Enclosure ${dino.enclosureId}`);
|
|
843
|
-
// In a real application, you might trigger alerts or schedule veterinary visits
|
|
844
|
-
}
|
|
845
|
-
});
|
|
846
|
-
|
|
847
|
-
// SCENARIO 2: Adding new herbivores to the park after quarantine
|
|
848
|
-
// Prepare data for multiple new herbivores joining the collection
|
|
849
|
-
const newHerbivores = [
|
|
850
|
-
{
|
|
851
|
-
pk: "ENCLOSURE#D", sk: "DINO#004",
|
|
852
|
-
name: "Triceratops Alpha", // Three-horned herbivore
|
|
853
|
-
species: "Triceratops",
|
|
854
|
-
diet: "Herbivore",
|
|
855
|
-
status: "HEALTHY",
|
|
856
|
-
health: 95, // Excellent health after quarantine
|
|
857
|
-
lastFed: new Date().toISOString() // Just fed before joining main enclosure
|
|
858
|
-
},
|
|
859
|
-
{
|
|
860
|
-
pk: "ENCLOSURE#D", sk: "DINO#005",
|
|
861
|
-
name: "Brachy", // Long-necked herbivore
|
|
862
|
-
species: "Brachiosaurus",
|
|
863
|
-
diet: "Herbivore",
|
|
864
|
-
status: "HEALTHY",
|
|
865
|
-
health: 90,
|
|
866
|
-
lastFed: new Date().toISOString()
|
|
867
|
-
}
|
|
868
|
-
];
|
|
869
|
-
|
|
870
|
-
// Add all new herbivores to the enclosure in a single batch operation
|
|
871
|
-
// More efficient than individual writes and ensures consistent state
|
|
872
|
-
await dinoTable.batchWrite(
|
|
873
|
-
newHerbivores.map(dino => ({
|
|
874
|
-
type: "put", // Create or replace operation
|
|
875
|
-
item: dino // Full dinosaur record
|
|
876
|
-
}))
|
|
877
|
-
);
|
|
878
|
-
|
|
879
|
-
// SCENARIO 3: Releasing a dinosaur from quarantine to general population
|
|
880
|
-
// Multiple related operations performed as a batch
|
|
881
|
-
await dinoTable.batchWrite([
|
|
882
|
-
// Step 1: Remove dinosaur from quarantine enclosure
|
|
883
|
-
{
|
|
884
|
-
type: "delete",
|
|
885
|
-
key: { pk: "ENCLOSURE#QUARANTINE", sk: "DINO#006" }
|
|
886
|
-
},
|
|
887
|
-
|
|
888
|
-
// Step 2: Add recovered dinosaur to main raptor enclosure
|
|
889
|
-
{
|
|
890
|
-
type: "put",
|
|
891
|
-
item: {
|
|
892
|
-
pk: "ENCLOSURE#E", sk: "DINO#006",
|
|
893
|
-
name: "Raptor Beta", // Juvenile Velociraptor
|
|
894
|
-
species: "Velociraptor",
|
|
895
|
-
diet: "Carnivore",
|
|
896
|
-
status: "HEALTHY", // Now healthy after treatment
|
|
897
|
-
health: 100,
|
|
898
|
-
lastFed: new Date().toISOString()
|
|
899
|
-
}
|
|
900
|
-
},
|
|
901
|
-
|
|
902
|
-
// Step 3: Clear quarantine status record
|
|
903
|
-
{
|
|
904
|
-
type: "delete",
|
|
905
|
-
key: { pk: "ENCLOSURE#QUARANTINE", sk: "STATUS#DINO#006" }
|
|
906
|
-
}
|
|
907
|
-
]);
|
|
908
|
-
|
|
909
|
-
// SCENARIO 4: Daily park-wide health monitoring
|
|
910
|
-
// Handle large-scale operations across all dinosaurs
|
|
911
|
-
// The library automatically handles chunking for large batches:
|
|
912
|
-
// - 25 items per batch write
|
|
913
|
-
// - 100 items per batch get
|
|
914
|
-
const dailyHealthUpdates = generateDinosaurHealthUpdates(); // Hundreds of updates
|
|
915
|
-
await dinoTable.batchWrite(dailyHealthUpdates); // Automatically chunked into multiple requests
|
|
916
|
-
```
|
|
917
1053
|
|
|
918
1054
|
### Pagination Made Simple
|
|
919
1055
|
|
|
@@ -995,11 +1131,12 @@ Dyno-table provides comprehensive query methods that match DynamoDB's capabiliti
|
|
|
995
1131
|
| **Greater Than** | `.filter(op => op.gt("price", 50))` | `price > :v1` |
|
|
996
1132
|
| **Greater Than or Equal** | `.filter(op => op.gte("rating", 4))` | `rating >= :v1` |
|
|
997
1133
|
| **Between** | `.filter(op => op.between("age", 18, 65))` | `age BETWEEN :v1 AND :v2` |
|
|
998
|
-
| **
|
|
999
|
-
| **
|
|
1000
|
-
| **
|
|
1001
|
-
| **Attribute
|
|
1002
|
-
| **
|
|
1134
|
+
| **In Array** | `.filter(op => op.inArray("status", ["ACTIVE", "PENDING"]))` | `status IN (:v1, :v2)` |
|
|
1135
|
+
| **Begins With** | `.filter(op => op.beginsWith("email", "@example.com"))` | `begins_with(email, :v1)` |
|
|
1136
|
+
| **Contains** | `.filter(op => op.contains("tags", "important"))` | `contains(tags, :v1)` |
|
|
1137
|
+
| **Attribute Exists** | `.filter(op => op.attributeExists("email"))` | `attribute_exists(email)` |
|
|
1138
|
+
| **Attribute Not Exists** | `.filter(op => op.attributeNotExists("deletedAt"))` | `attribute_not_exists(deletedAt)` |
|
|
1139
|
+
| **Nested Attributes** | `.filter(op => op.eq("address.city", "London"))` | `address.city = :v1` |
|
|
1003
1140
|
|
|
1004
1141
|
### Logical Operators
|
|
1005
1142
|
|
|
@@ -1101,12 +1238,12 @@ const oldDino = await table.put<Dinosaur>({
|
|
|
1101
1238
|
|
|
1102
1239
|
The library supports a comprehensive set of type-safe condition operators:
|
|
1103
1240
|
|
|
1104
|
-
| Category | Operators
|
|
1105
|
-
|
|
1106
|
-
| **Comparison** | `eq`, `ne`, `lt`, `lte`, `gt`, `gte`
|
|
1107
|
-
| **String/Set** | `between`, `beginsWith`, `contains`
|
|
1108
|
-
| **Existence** | `attributeExists`, `attributeNotExists`
|
|
1109
|
-
| **Logical** | `and`, `or`, `not`
|
|
1241
|
+
| Category | Operators | Example |
|
|
1242
|
+
|----------------|----------------------------------------------|-------------------------------------------------------------------------|
|
|
1243
|
+
| **Comparison** | `eq`, `ne`, `lt`, `lte`, `gt`, `gte` | `.condition(op => op.gt("age", 18))` |
|
|
1244
|
+
| **String/Set** | `between`, `beginsWith`, `contains`, `inArray` | `.condition(op => op.inArray("status", ["active", "pending"]))` |
|
|
1245
|
+
| **Existence** | `attributeExists`, `attributeNotExists` | `.condition(op => op.attributeExists("email"))` |
|
|
1246
|
+
| **Logical** | `and`, `or`, `not` | `.condition(op => op.and(op.eq("status", "active"), op.gt("age", 18)))` |
|
|
1110
1247
|
|
|
1111
1248
|
All operators are type-safe and will provide proper TypeScript inference for nested attributes.
|
|
1112
1249
|
|
|
@@ -1191,6 +1328,8 @@ await table.query<DinosaurMonitoring>({
|
|
|
1191
1328
|
op.lt("health", "90"), // ❌ TypeScript Error: health expects number
|
|
1192
1329
|
op.gt("temperature", 38), // ✓ Valid
|
|
1193
1330
|
op.contains("behavior", "aggressive"), // ✓ Valid
|
|
1331
|
+
op.inArray("alertLevel", ["LOW", "MEDIUM", "HIGH"]), // ✓ Valid: matches union type
|
|
1332
|
+
op.inArray("alertLevel", ["UNKNOWN", "INVALID"]), // ❌ TypeScript Error: invalid alert levels
|
|
1194
1333
|
op.eq("alertLevel", "UNKNOWN") // ❌ TypeScript Error: invalid alert level
|
|
1195
1334
|
))
|
|
1196
1335
|
.execute();
|
|
@@ -1198,24 +1337,58 @@ await table.query<DinosaurMonitoring>({
|
|
|
1198
1337
|
|
|
1199
1338
|
## 🔄 Batch Operations
|
|
1200
1339
|
|
|
1201
|
-
|
|
1340
|
+
Efficiently handle multiple items in a single request with automatic chunking and type safety.
|
|
1341
|
+
|
|
1342
|
+
### 🏗️ Entity-Based Batch Operations
|
|
1343
|
+
|
|
1344
|
+
**Type-safe batch operations with automatic entity type inference**
|
|
1202
1345
|
|
|
1203
|
-
### Batch Get
|
|
1204
1346
|
```ts
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1347
|
+
// Create a typed batch builder
|
|
1348
|
+
const batch = table.batchBuilder<{
|
|
1349
|
+
Dinosaur: DinosaurEntity;
|
|
1350
|
+
Fossil: FossilEntity;
|
|
1351
|
+
}>();
|
|
1352
|
+
|
|
1353
|
+
// Add operations - entity type is automatically inferred
|
|
1354
|
+
dinosaurRepo.create(newDinosaur).withBatch(batch);
|
|
1355
|
+
dinosaurRepo.get({ id: 'dino-123', diet: 'carnivore', species: 'Tyrannosaurus Rex' }).withBatch(batch);
|
|
1356
|
+
fossilRepo.create(newFossil).withBatch(batch);
|
|
1357
|
+
|
|
1358
|
+
// Execute and get typed results
|
|
1359
|
+
const result = await batch.execute();
|
|
1360
|
+
const dinosaurs: DinosaurEntity[] = result.reads.itemsByType.Dinosaur;
|
|
1361
|
+
const fossils: FossilEntity[] = result.reads.itemsByType.Fossil;
|
|
1209
1362
|
```
|
|
1210
1363
|
|
|
1211
|
-
### Batch
|
|
1364
|
+
### 📋 Table-Direct Batch Operations
|
|
1365
|
+
|
|
1366
|
+
**Direct table access for maximum control**
|
|
1367
|
+
|
|
1212
1368
|
```ts
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
{
|
|
1216
|
-
|
|
1369
|
+
// Batch get - retrieve multiple items
|
|
1370
|
+
const keys = [
|
|
1371
|
+
{ pk: "DIET#carnivore", sk: "SPECIES#Tyrannosaurus Rex#ID#dino-123" },
|
|
1372
|
+
{ pk: "FOSSIL#456", sk: "DISCOVERY#2024" }
|
|
1373
|
+
];
|
|
1374
|
+
|
|
1375
|
+
const { items, unprocessedKeys } = await table.batchGet<DynamoItem>(keys);
|
|
1376
|
+
|
|
1377
|
+
// Batch write - mix of operations
|
|
1378
|
+
const operations = [
|
|
1379
|
+
{ type: "put" as const, item: { pk: "DIET#herbivore", sk: "SPECIES#Triceratops#ID#dino-789", name: "Spike", dangerLevel: 3 } },
|
|
1380
|
+
{ type: "delete" as const, key: { pk: "FOSSIL#OLD", sk: "DISCOVERY#1990" } }
|
|
1381
|
+
];
|
|
1382
|
+
|
|
1383
|
+
const { unprocessedItems } = await table.batchWrite(operations);
|
|
1384
|
+
|
|
1385
|
+
// Handle unprocessed items (retry if needed)
|
|
1386
|
+
if (unprocessedItems.length > 0) {
|
|
1387
|
+
await table.batchWrite(unprocessedItems);
|
|
1388
|
+
}
|
|
1217
1389
|
```
|
|
1218
1390
|
|
|
1391
|
+
|
|
1219
1392
|
## 🔒 Transaction Operations
|
|
1220
1393
|
|
|
1221
1394
|
Perform multiple operations atomically with transaction support:
|
|
@@ -1285,6 +1458,7 @@ All condition operators are type-safe and will validate against your item type.
|
|
|
1285
1458
|
- `gt(attr, value)` - Greater than (>)
|
|
1286
1459
|
- `gte(attr, value)` - Greater than or equal to (≥)
|
|
1287
1460
|
- `between(attr, lower, upper)` - Between two values (inclusive)
|
|
1461
|
+
- `inArray(attr, values)` - Checks if value is in a list of values (IN operator, max 100 values)
|
|
1288
1462
|
- `beginsWith(attr, value)` - Checks if string begins with value
|
|
1289
1463
|
- `contains(attr, value)` - Checks if string/set contains value
|
|
1290
1464
|
|
|
@@ -1300,6 +1474,18 @@ await dinoTable
|
|
|
1300
1474
|
op.between("stats.weight", 1000, 5000) // Medium-sized dinosaurs
|
|
1301
1475
|
))
|
|
1302
1476
|
.execute();
|
|
1477
|
+
|
|
1478
|
+
// Example: Filter dinosaurs by multiple status values using inArray
|
|
1479
|
+
await dinoTable
|
|
1480
|
+
.query<Dinosaur>({
|
|
1481
|
+
pk: "SPECIES#trex"
|
|
1482
|
+
})
|
|
1483
|
+
.filter((op) => op.and(
|
|
1484
|
+
op.inArray("status", ["ACTIVE", "FEEDING", "RESTING"]), // Multiple valid statuses
|
|
1485
|
+
op.inArray("diet", ["carnivore", "omnivore"]), // Meat-eating dinosaurs
|
|
1486
|
+
op.gt("dangerLevel", 5) // High danger level
|
|
1487
|
+
))
|
|
1488
|
+
.execute();
|
|
1303
1489
|
```
|
|
1304
1490
|
|
|
1305
1491
|
#### Attribute Operators
|
|
@@ -1349,6 +1535,11 @@ await dinoTable
|
|
|
1349
1535
|
op.lt("care.feeding.lastFed", new Date(Date.now() - 8 * 60 * 60 * 1000).toISOString()),
|
|
1350
1536
|
op.contains("behavior", "stressed")
|
|
1351
1537
|
),
|
|
1538
|
+
// Alert: Critical status dinosaurs requiring immediate attention
|
|
1539
|
+
op.and(
|
|
1540
|
+
op.inArray("status", ["SICK", "INJURED", "QUARANTINE"]), // Critical statuses
|
|
1541
|
+
op.inArray("priority", ["HIGH", "URGENT"]) // High priority levels
|
|
1542
|
+
),
|
|
1352
1543
|
// Alert: Enclosure climate issues
|
|
1353
1544
|
op.and(
|
|
1354
1545
|
op.not(op.eq("habitat.enclosure.climate", "Optimal")),
|
|
@@ -1397,15 +1588,6 @@ const quarantinedDinos = await dinoTable
|
|
|
1397
1588
|
.execute();
|
|
1398
1589
|
```
|
|
1399
1590
|
|
|
1400
|
-
Available key conditions for dinosaur queries:
|
|
1401
|
-
- `eq(value)` - Exact match (e.g., specific enclosure)
|
|
1402
|
-
- `lt(value)` - Earlier than date/time
|
|
1403
|
-
- `lte(value)` - Up to and including date/time
|
|
1404
|
-
- `gt(value)` - Later than date/time
|
|
1405
|
-
- `gte(value)` - From date/time onwards
|
|
1406
|
-
- `between(lower, upper)` - Range (e.g., weight range, date range)
|
|
1407
|
-
- `beginsWith(value)` - Prefix match (e.g., all health checks today)
|
|
1408
|
-
|
|
1409
1591
|
## 🔮 Future Roadmap
|
|
1410
1592
|
|
|
1411
1593
|
- [ ] Enhanced query plan visualization
|