dyno-table 2.1.0 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/README.md +628 -348
  2. package/dist/builders/delete-builder.d.cts +1 -1
  3. package/dist/builders/delete-builder.d.ts +1 -1
  4. package/dist/builders/put-builder.cjs.map +1 -1
  5. package/dist/builders/put-builder.d.cts +1 -1
  6. package/dist/builders/put-builder.d.ts +1 -1
  7. package/dist/builders/put-builder.js.map +1 -1
  8. package/dist/builders/query-builder.cjs +44 -21
  9. package/dist/builders/query-builder.cjs.map +1 -1
  10. package/dist/builders/query-builder.d.cts +1 -1
  11. package/dist/builders/query-builder.d.ts +1 -1
  12. package/dist/builders/query-builder.js +44 -21
  13. package/dist/builders/query-builder.js.map +1 -1
  14. package/dist/builders/update-builder.cjs.map +1 -1
  15. package/dist/builders/update-builder.d.cts +6 -6
  16. package/dist/builders/update-builder.d.ts +6 -6
  17. package/dist/builders/update-builder.js.map +1 -1
  18. package/dist/entity.cjs +183 -41
  19. package/dist/entity.cjs.map +1 -1
  20. package/dist/entity.d.cts +91 -10
  21. package/dist/entity.d.ts +91 -10
  22. package/dist/entity.js +183 -41
  23. package/dist/entity.js.map +1 -1
  24. package/dist/index.cjs +2667 -2489
  25. package/dist/index.cjs.map +1 -1
  26. package/dist/index.d.cts +5 -5
  27. package/dist/index.d.ts +5 -5
  28. package/dist/index.js +2667 -2489
  29. package/dist/index.js.map +1 -1
  30. package/dist/{query-builder-BNWRCrJW.d.ts → query-builder-CUWdavZw.d.ts} +2 -0
  31. package/dist/{query-builder-DZ9JKgBN.d.cts → query-builder-DoZzZz_c.d.cts} +2 -0
  32. package/dist/{table-BhEeYauU.d.ts → table-CZBMkW2Z.d.ts} +9 -8
  33. package/dist/{table-BpNOboD9.d.cts → table-f-3wsT7K.d.cts} +9 -8
  34. package/dist/table.cjs +2510 -2474
  35. package/dist/table.cjs.map +1 -1
  36. package/dist/table.d.cts +9 -9
  37. package/dist/table.d.ts +9 -9
  38. package/dist/table.js +2510 -2474
  39. package/dist/table.js.map +1 -1
  40. package/package.json +2 -2
  41. package/dist/{batch-builder-CcxFDKhe.d.cts → batch-builder-BPoHyN_Q.d.cts} +1 -1
  42. package/dist/{batch-builder-BytHNL_u.d.ts → batch-builder-Cdo49C2r.d.ts} +1 -1
package/README.md CHANGED
@@ -4,7 +4,7 @@
4
4
 
5
5
  ### **Tame Your DynamoDB Data with Type-Safe Precision**
6
6
 
7
- [![npm version](https://img.shields.io/npm/v/dyno-table.svg?style=for-the-badge)](https://www.npmjs.com/package/dyno-table)
7
+ [![npm version](https://img.shields.io/npm/v/dyno-table.svg?style=for-the-badge)](https://www.npmjs.com/package/dyno-table)
8
8
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg?style=for-the-badge)](https://opensource.org/licenses/MIT)
9
9
  [![TypeScript](https://img.shields.io/badge/TypeScript-4.0%2B-blue?style=for-the-badge&logo=typescript)](https://www.typescriptlang.org/)
10
10
  [![AWS DynamoDB](https://img.shields.io/badge/AWS-DynamoDB-orange?style=for-the-badge&logo=amazon-aws)](https://aws.amazon.com/dynamodb/)
@@ -22,12 +22,12 @@
22
22
  // Type-safe dinosaur tracking operations made simple
23
23
  await dinoTable
24
24
  .update<Dinosaur>({
25
- pk: 'SPECIES#trex',
26
- sk: 'PROFILE#001'
25
+ pk: "SPECIES#trex",
26
+ sk: "PROFILE#001",
27
27
  })
28
- .set('diet', 'Carnivore') // Update dietary classification
29
- .add('sightings', 1) // Increment sighting counter
30
- .condition(op => op.eq('status', 'ACTIVE')) // Only if dinosaur is active
28
+ .set("diet", "Carnivore") // Update dietary classification
29
+ .add("sightings", 1) // Increment sighting counter
30
+ .condition((op) => op.eq("status", "ACTIVE")) // Only if dinosaur is active
31
31
  .execute();
32
32
  ```
33
33
 
@@ -97,6 +97,7 @@ await dinoTable
97
97
  - [Update Operations](#update-operations)
98
98
  - [Condition Operators](#condition-operators)
99
99
  - [Multiple Operations](#multiple-operations)
100
+ - [Force Rebuilding Read-Only Indexes](#force-rebuilding-read-only-indexes)
100
101
  - [🔄 Type Safety Features](#-type-safety-features)
101
102
  - [Nested Object Support](#nested-object-support)
102
103
  - [Type-Safe Conditions](#type-safe-conditions)
@@ -143,6 +144,7 @@ yarn add dyno-table @aws-sdk/client-dynamodb @aws-sdk/lib-dynamodb
143
144
  # Using PNPM
144
145
  pnpm add dyno-table @aws-sdk/client-dynamodb @aws-sdk/lib-dynamodb
145
146
  ```
147
+
146
148
  </details>
147
149
 
148
150
  ## 🎯 DynamoDB Best Practices
@@ -174,29 +176,35 @@ import { QueryCommand } from "@aws-sdk/lib-dynamodb";
174
176
 
175
177
  const docClient = DynamoDBDocument.from(new DynamoDBClient({}));
176
178
 
177
- const users = await docClient.send(new QueryCommand({
178
- TableName: "MyTable",
179
- IndexName: "gsi1",
180
- KeyConditionExpression: "#pk = :pk",
181
- ExpressionAttributeNames: { "#pk": "pk" },
182
- ExpressionAttributeValues: { ":pk": "STATUS#active" }
183
- }));
184
-
185
- const orders = await docClient.send(new QueryCommand({
186
- TableName: "MyTable",
187
- IndexName: "gsi2",
188
- KeyConditionExpression: "#pk = :pk",
189
- ExpressionAttributeNames: { "#pk": "pk" },
190
- ExpressionAttributeValues: { ":pk": "CUSTOMER#123" }
191
- }));
192
-
193
- const products = await docClient.send(new QueryCommand({
194
- TableName: "MyTable",
195
- IndexName: "gsi3",
196
- KeyConditionExpression: "#pk = :pk",
197
- ExpressionAttributeNames: { "#pk": "pk" },
198
- ExpressionAttributeValues: { ":pk": "CATEGORY#electronics" }
199
- }));
179
+ const users = await docClient.send(
180
+ new QueryCommand({
181
+ TableName: "MyTable",
182
+ IndexName: "gsi1",
183
+ KeyConditionExpression: "#pk = :pk",
184
+ ExpressionAttributeNames: { "#pk": "pk" },
185
+ ExpressionAttributeValues: { ":pk": "STATUS#active" },
186
+ }),
187
+ );
188
+
189
+ const orders = await docClient.send(
190
+ new QueryCommand({
191
+ TableName: "MyTable",
192
+ IndexName: "gsi2",
193
+ KeyConditionExpression: "#pk = :pk",
194
+ ExpressionAttributeNames: { "#pk": "pk" },
195
+ ExpressionAttributeValues: { ":pk": "CUSTOMER#123" },
196
+ }),
197
+ );
198
+
199
+ const products = await docClient.send(
200
+ new QueryCommand({
201
+ TableName: "MyTable",
202
+ IndexName: "gsi3",
203
+ KeyConditionExpression: "#pk = :pk",
204
+ ExpressionAttributeNames: { "#pk": "pk" },
205
+ ExpressionAttributeValues: { ":pk": "CATEGORY#electronics" },
206
+ }),
207
+ );
200
208
  ```
201
209
 
202
210
  </td>
@@ -204,9 +212,7 @@ const products = await docClient.send(new QueryCommand({
204
212
 
205
213
  ```ts
206
214
  // Clear business intent
207
- const activeUsers = await userRepo.query
208
- .getActiveUsers()
209
- .execute();
215
+ const activeUsers = await userRepo.query.getActiveUsers().execute();
210
216
 
211
217
  const customerOrders = await orderRepo.query
212
218
  .getOrdersByCustomer({ customerId: "123" })
@@ -225,11 +231,11 @@ const electronics = await productRepo.query
225
231
 
226
232
  When you use generic names like `gsi1`, `gsi2`, `gsi3`, you create several problems:
227
233
 
228
- - **🧠 Cognitive Load**: Developers must remember what each index does
229
- - **📚 Poor Documentation**: Code doesn't self-document its purpose
230
- - **🐛 Error-Prone**: Easy to use the wrong index for a query
231
- - **👥 Team Friction**: New team members struggle to understand data access patterns
232
- - **🔄 Maintenance Issues**: Refactoring becomes risky and unclear
234
+ - **Cognitive Load**: Developers must remember what each index does
235
+ - **Poor Documentation**: Code doesn't self-document its purpose
236
+ - **Error-Prone**: Easy to use the wrong index for a query
237
+ - **Team Friction**: New team members struggle to understand data access patterns
238
+ - **Maintenance Issues**: Refactoring becomes risky and unclear
233
239
 
234
240
  ### The Solution: Meaningful Method Names
235
241
 
@@ -245,25 +251,36 @@ const UserEntity = defineEntity({
245
251
  // ✅ Clear business purpose
246
252
  getActiveUsers: createQuery
247
253
  .input(z.object({}))
248
- .query(({ entity }) => entity.query({ pk: "STATUS#active" }).useIndex("gsi1")),
254
+ .query(({ entity }) =>
255
+ entity.query({ pk: "STATUS#active" }).useIndex("gsi1"),
256
+ ),
249
257
 
250
258
  getUsersByEmail: createQuery
251
259
  .input(z.object({ email: z.string() }))
252
- .query(({ input, entity }) => entity.query({ pk: `EMAIL#${input.email}` }).useIndex("gsi1")),
260
+ .query(({ input, entity }) =>
261
+ entity.query({ pk: `EMAIL#${input.email}` }).useIndex("gsi1"),
262
+ ),
253
263
 
254
264
  getUsersByDepartment: createQuery
255
265
  .input(z.object({ department: z.string() }))
256
- .query(({ input, entity }) => entity.query({ pk: `DEPT#${input.department}` }).useIndex("gsi2")),
266
+ .query(({ input, entity }) =>
267
+ entity.query({ pk: `DEPT#${input.department}` }).useIndex("gsi2"),
268
+ ),
257
269
  },
258
270
  });
259
271
 
260
272
  // Usage in business logic is now self-documenting
261
273
  const activeUsers = await userRepo.query.getActiveUsers().execute();
262
- const engineeringTeam = await userRepo.query.getUsersByDepartment({ department: "engineering" }).execute();
263
- const user = await userRepo.query.getUsersByEmail({ email: "john@company.com" }).execute();
274
+ const engineeringTeam = await userRepo.query
275
+ .getUsersByDepartment({ department: "engineering" })
276
+ .execute();
277
+ const user = await userRepo.query
278
+ .getUsersByEmail({ email: "john@company.com" })
279
+ .execute();
264
280
  ```
265
281
 
266
282
  **This pattern promotes:**
283
+
267
284
  - ✅ **Better code readability and maintainability**
268
285
  - ✅ **Self-documenting API design**
269
286
  - ✅ **Easier onboarding for new team members**
@@ -330,7 +347,7 @@ const rex = await dinoTable
330
347
  name: "Tyrannosaurus Rex",
331
348
  diet: "carnivore",
332
349
  length: 12.3,
333
- discoveryYear: 1902
350
+ discoveryYear: 1902,
334
351
  })
335
352
  .execute();
336
353
  ```
@@ -343,14 +360,11 @@ const rex = await dinoTable
343
360
  ```ts
344
361
  // Find large carnivorous dinosaurs
345
362
  const largeDinos = await dinoTable
346
- .query<Dinosaur>({
363
+ .query<Dinosaur>({
347
364
  pk: "SPECIES#trex",
348
- sk: (op) => op.beginsWith("PROFILE#")
365
+ sk: (op) => op.beginsWith("PROFILE#"),
349
366
  })
350
- .filter((op) => op.and(
351
- op.gte("length", 10),
352
- op.eq("diet", "carnivore")
353
- ))
367
+ .filter((op) => op.and(op.gte("length", 10), op.eq("diet", "carnivore")))
354
368
  .limit(10)
355
369
  .execute();
356
370
  ```
@@ -365,16 +379,14 @@ const largeDinos = await dinoTable
365
379
  ```ts
366
380
  // Update a dinosaur's classification
367
381
  await dinoTable
368
- .update<Dinosaur>({
382
+ .update<Dinosaur>({
369
383
  pk: "SPECIES#trex",
370
- sk: "PROFILE#trex"
384
+ sk: "PROFILE#trex",
371
385
  })
372
386
  .set("diet", "omnivore")
373
387
  .add("discoveryYear", 1)
374
388
  .remove("outdatedField")
375
- .condition((op) =>
376
- op.attributeExists("discoverySite")
377
- )
389
+ .condition((op) => op.attributeExists("discoverySite"))
378
390
  .execute();
379
391
  ```
380
392
 
@@ -387,13 +399,10 @@ await dinoTable
387
399
  // Perform multiple operations atomically
388
400
  await dinoTable.transaction((tx) => {
389
401
  // Move dinosaur to new enclosure
390
- dinoTable
391
- .delete({ pk: "ENCLOSURE#A", sk: "DINO#1" })
392
- .withTransaction(tx);
402
+ dinoTable.delete({ pk: "ENCLOSURE#A", sk: "DINO#1" }).withTransaction(tx);
393
403
 
394
404
  dinoTable
395
- .create({ pk: "ENCLOSURE#B", sk: "DINO#1",
396
- status: "ACTIVE" })
405
+ .create({ pk: "ENCLOSURE#B", sk: "DINO#1", status: "ACTIVE" })
397
406
  .withTransaction(tx);
398
407
  });
399
408
  ```
@@ -416,20 +425,22 @@ await dinoTable.transaction((tx) => {
416
425
 
417
426
  ```ts
418
427
  // Verbose, error-prone, no type safety
419
- await docClient.send(new QueryCommand({
420
- TableName: "JurassicPark",
421
- IndexName: "gsi1", // What does gsi1 do?
422
- KeyConditionExpression: "#pk = :pk",
423
- FilterExpression: "contains(#features, :feathers)",
424
- ExpressionAttributeNames: {
425
- "#pk": "pk",
426
- "#features": "features"
427
- },
428
- ExpressionAttributeValues: {
429
- ":pk": "SPECIES#trex",
430
- ":feathers": "feathers"
431
- }
432
- }));
428
+ await docClient.send(
429
+ new QueryCommand({
430
+ TableName: "JurassicPark",
431
+ IndexName: "gsi1", // What does gsi1 do?
432
+ KeyConditionExpression: "#pk = :pk",
433
+ FilterExpression: "contains(#features, :feathers)",
434
+ ExpressionAttributeNames: {
435
+ "#pk": "pk",
436
+ "#features": "features",
437
+ },
438
+ ExpressionAttributeValues: {
439
+ ":pk": "SPECIES#trex",
440
+ ":feathers": "feathers",
441
+ },
442
+ }),
443
+ );
433
444
  ```
434
445
 
435
446
  </td>
@@ -439,18 +450,16 @@ await docClient.send(new QueryCommand({
439
450
  // Self-documenting, type-safe, semantic
440
451
  const featheredTRexes = await dinosaurRepo.query
441
452
  .getFeatheredDinosaursBySpecies({
442
- species: "trex"
453
+ species: "trex",
443
454
  })
444
455
  .execute();
445
456
 
446
457
  // Or using table directly (still better than raw SDK)
447
458
  await dinoTable
448
459
  .query<Dinosaur>({
449
- pk: "SPECIES#trex"
460
+ pk: "SPECIES#trex",
450
461
  })
451
- .filter(op =>
452
- op.contains("features", "feathers")
453
- )
462
+ .filter((op) => op.contains("features", "feathers"))
454
463
  .execute();
455
464
  ```
456
465
 
@@ -459,6 +468,7 @@ await dinoTable
459
468
  </table>
460
469
 
461
470
  **Key improvements:**
471
+
462
472
  - 🛡️ **Type Safety**: Compile-time error checking prevents runtime failures
463
473
  - 📖 **Self-Documenting**: Code clearly expresses business intent
464
474
  - 🧠 **Reduced Complexity**: No manual expression building or attribute mapping
@@ -565,10 +575,10 @@ const DinosaurEntity = defineEntity({
565
575
  name: "Dinosaur",
566
576
  schema: dinosaurSchema,
567
577
  primaryKey: createIndex()
568
- .input(z.object({ id: z.string(), diet: z.string(), species: z.string() }))
569
- .partitionKey(({ diet }) => dinosaurPK({ diet }))
570
- // could also be .withoutSortKey() if your table doesn't use sort keys
571
- .sortKey(({ id, species }) => dinosaurSK({ species, id }))
578
+ .input(z.object({ id: z.string(), diet: z.string(), species: z.string() }))
579
+ .partitionKey(({ diet }) => dinosaurPK({ diet }))
580
+ // could also be .withoutSortKey() if your table doesn't use sort keys
581
+ .sortKey(({ id, species }) => dinosaurSK({ species, id })),
572
582
  });
573
583
  ```
574
584
 
@@ -578,36 +588,44 @@ Entities provide type-safe CRUD operations:
578
588
 
579
589
  ```ts
580
590
  // Create a new dinosaur
581
- await dinosaurRepo.create({
582
- id: "dino-001",
583
- species: "Tyrannosaurus Rex",
584
- name: "Rexy",
585
- diet: "carnivore",
586
- dangerLevel: 10,
587
- height: 5.2,
588
- weight: 7000,
589
- status: "active",
590
- }).execute();
591
+ await dinosaurRepo
592
+ .create({
593
+ id: "dino-001",
594
+ species: "Tyrannosaurus Rex",
595
+ name: "Rexy",
596
+ diet: "carnivore",
597
+ dangerLevel: 10,
598
+ height: 5.2,
599
+ weight: 7000,
600
+ status: "active",
601
+ })
602
+ .execute();
591
603
 
592
604
  // Get a dinosaur
593
- const dino = await dinosaurRepo.get({
594
- id: "dino-001",
595
- diet: "carnivore",
596
- species: "Tyrannosaurus Rex",
597
- }).execute();
605
+ const dino = await dinosaurRepo
606
+ .get({
607
+ id: "dino-001",
608
+ diet: "carnivore",
609
+ species: "Tyrannosaurus Rex",
610
+ })
611
+ .execute();
598
612
 
599
613
  // Update a dinosaur
600
- await dinosaurRepo.update(
601
- { id: "dino-001", diet: "carnivore", species: "Tyrannosaurus Rex" },
602
- { weight: 7200, status: "sick" }
603
- ).execute();
614
+ await dinosaurRepo
615
+ .update(
616
+ { id: "dino-001", diet: "carnivore", species: "Tyrannosaurus Rex" },
617
+ { weight: 7200, status: "sick" },
618
+ )
619
+ .execute();
604
620
 
605
621
  // Delete a dinosaur
606
- await dinosaurRepo.delete({
607
- id: "dino-001",
608
- diet: "carnivore",
609
- species: "Tyrannosaurus Rex",
610
- }).execute();
622
+ await dinosaurRepo
623
+ .delete({
624
+ id: "dino-001",
625
+ diet: "carnivore",
626
+ species: "Tyrannosaurus Rex",
627
+ })
628
+ .execute();
611
629
  ```
612
630
 
613
631
  #### 3. Custom Queries
@@ -629,66 +647,145 @@ const DinosaurEntity = defineEntity({
629
647
  .input(
630
648
  z.object({
631
649
  diet: z.enum(["carnivore", "herbivore", "omnivore"]),
632
- })
650
+ }),
633
651
  )
634
652
  .query(({ input, entity }) => {
635
- return entity
636
- .query({
637
- pk: dinosaurPK({diet: input.diet})
638
- });
653
+ return entity.query({
654
+ pk: dinosaurPK({ diet: input.diet }),
655
+ });
639
656
  }),
640
657
 
641
658
  findDinosaursBySpecies: createQuery
642
659
  .input(
643
660
  z.object({
644
661
  species: z.string(),
662
+ }),
663
+ )
664
+ .query(({ input, entity }) => {
665
+ return entity.scan().filter((op) => op.eq("species", input.species));
666
+ }),
667
+
668
+ getActiveCarnivores: createQuery.input(z.object({})).query(({ entity }) => {
669
+ return entity
670
+ .query({
671
+ pk: dinosaurPK({ diet: "carnivore" }),
645
672
  })
673
+ .filter((op) => op.eq("status", "active"));
674
+ }),
675
+
676
+ getDangerousDinosaursInEnclosure: createQuery
677
+ .input(
678
+ z.object({
679
+ enclosureId: z.string(),
680
+ minDangerLevel: z.number().min(1).max(10),
681
+ }),
646
682
  )
647
683
  .query(({ input, entity }) => {
648
684
  return entity
649
685
  .scan()
650
- .filter((op) => op.eq("species", input.species));
686
+ .filter((op) =>
687
+ op.and(
688
+ op.contains("enclosureId", input.enclosureId),
689
+ op.gte("dangerLevel", input.minDangerLevel),
690
+ ),
691
+ );
651
692
  }),
693
+ },
694
+ });
695
+
696
+ // Usage in business logic is now self-documenting
697
+ const carnivores = await dinosaurRepo.query
698
+ .getDinosaursByDiet({ diet: "carnivore" })
699
+ .execute();
700
+ const trexes = await dinosaurRepo.query
701
+ .findDinosaursBySpecies({ species: "Tyrannosaurus Rex" })
702
+ .execute();
703
+ const activeCarnivores = await dinosaurRepo.query
704
+ .getActiveCarnivores()
705
+ .execute();
706
+ const dangerousDinos = await dinosaurRepo.query
707
+ .getDangerousDinosaursInEnclosure({
708
+ enclosureId: "PADDOCK-A",
709
+ minDangerLevel: 8,
710
+ })
711
+ .execute();
712
+ ```
713
+
714
+ **Filter Chaining in Entity Queries**
715
+
716
+ When defining custom queries, you can chain multiple filters together. These filters are automatically combined using AND logic. Additionally, filters applied in the query definition and filters applied at execution time are both respected:
652
717
 
653
- getActiveCarnivores: createQuery
718
+ ```ts
719
+ const DinosaurEntity = defineEntity({
720
+ name: "Dinosaur",
721
+ schema: dinosaurSchema,
722
+ primaryKey,
723
+ queries: {
724
+ // Multiple filters are combined with AND logic
725
+ getHealthyActiveDinosaurs: createQuery
654
726
  .input(z.object({}))
655
727
  .query(({ entity }) => {
656
728
  return entity
657
- .query({
658
- pk: dinosaurPK({diet: "carnivore"})
659
- })
660
- .filter((op) => op.eq("status", "active"));
729
+ .scan()
730
+ .filter((op) => op.eq("status", "active"))
731
+ .filter((op) => op.gt("health", 80))
732
+ .filter((op) => op.attributeExists("lastFed"));
661
733
  }),
662
734
 
663
- getDangerousDinosaursInEnclosure: createQuery
735
+ // Complex filter chaining with conditional logic
736
+ getDinosaursForVetCheck: createQuery
664
737
  .input(
665
738
  z.object({
666
- enclosureId: z.string(),
667
- minDangerLevel: z.number().min(1).max(10),
668
- })
739
+ minHealth: z.number().optional(),
740
+ requiredTag: z.string().optional(),
741
+ }),
742
+ )
743
+ .query(({ input, entity }) => {
744
+ const builder = entity.scan();
745
+
746
+ // Always filter for dinosaurs that need vet attention
747
+ builder.filter((op) => op.lt("health", 90));
748
+
749
+ // Conditionally apply additional filters
750
+ if (input.minHealth) {
751
+ builder.filter((op) => op.gt("health", input.minHealth));
752
+ }
753
+
754
+ if (input.requiredTag) {
755
+ builder.filter((op) => op.contains("tags", input.requiredTag));
756
+ }
757
+
758
+ return builder;
759
+ }),
760
+
761
+ // Pre-applied filters combined with execution-time filters
762
+ getActiveDinosaursByDiet: createQuery
763
+ .input(
764
+ z.object({
765
+ diet: z.enum(["carnivore", "herbivore", "omnivore"]),
766
+ }),
669
767
  )
670
768
  .query(({ input, entity }) => {
769
+ // Apply a filter in the query definition
671
770
  return entity
672
771
  .scan()
673
- .filter((op) => op.and(
674
- op.contains("enclosureId", input.enclosureId),
675
- op.gte("dangerLevel", input.minDangerLevel)
676
- ));
772
+ .filter((op) => op.eq("diet", input.diet))
773
+ .filter((op) => op.eq("status", "active"));
677
774
  }),
678
775
  },
679
776
  });
680
777
 
681
- // Usage in business logic is now self-documenting
682
- const carnivores = await dinosaurRepo.query.getDinosaursByDiet({ diet: "carnivore" }).execute();
683
- const trexes = await dinosaurRepo.query.findDinosaursBySpecies({ species: "Tyrannosaurus Rex" }).execute();
684
- const activeCarnivores = await dinosaurRepo.query.getActiveCarnivores().execute();
685
- const dangerousDinos = await dinosaurRepo.query.getDangerousDinosaursInEnclosure({
686
- enclosureId: "PADDOCK-A",
687
- minDangerLevel: 8
688
- }).execute();
778
+ // Usage with additional execution-time filters
779
+ // Both the pre-applied filters (diet = "carnivore", status = "active")
780
+ // and the execution-time filter (health > 50) will be applied
781
+ const healthyActiveCarnivores = await dinosaurRepo.query
782
+ .getActiveDinosaursByDiet({ diet: "carnivore" })
783
+ .filter((op) => op.gt("health", 50))
784
+ .execute();
689
785
  ```
690
786
 
691
787
  **Benefits of semantic naming:**
788
+
692
789
  - 🎯 **Clear Intent**: Method names immediately convey what data you're accessing
693
790
  - 📖 **Self-Documenting**: No need to look up what `gsi1` or `gsi2` does
694
791
  - 🧠 **Reduced Cognitive Load**: Developers can focus on business logic, not database details
@@ -703,11 +800,11 @@ Define GSI access patterns with **meaningful names** that reflect their business
703
800
  import { createIndex } from "dyno-table/entity";
704
801
 
705
802
  // Define GSI templates with descriptive names that reflect their purpose
706
- const speciesPK = partitionKey`SPECIES#${"species"}`
707
- const speciesSK = sortKey`DINOSAUR#${"id"}`
803
+ const speciesPK = partitionKey`SPECIES#${"species"}`;
804
+ const speciesSK = sortKey`DINOSAUR#${"id"}`;
708
805
 
709
- const enclosurePK = partitionKey`ENCLOSURE#${"enclosureId"}`
710
- const enclosureSK = sortKey`DANGER#${"dangerLevel"}#ID#${"id"}`
806
+ const enclosurePK = partitionKey`ENCLOSURE#${"enclosureId"}`;
807
+ const enclosureSK = sortKey`DANGER#${"dangerLevel"}#ID#${"id"}`;
711
808
 
712
809
  // Create indexes with meaningful names
713
810
  const speciesIndex = createIndex()
@@ -735,12 +832,12 @@ const DinosaurEntity = defineEntity({
735
832
  .input(
736
833
  z.object({
737
834
  species: z.string(),
738
- })
835
+ }),
739
836
  )
740
837
  .query(({ input, entity }) => {
741
838
  return entity
742
839
  .query({
743
- pk: speciesPK({species: input.species}),
840
+ pk: speciesPK({ species: input.species }),
744
841
  })
745
842
  .useIndex("gsi1"); // Generic GSI name for table flexibility
746
843
  }),
@@ -749,12 +846,12 @@ const DinosaurEntity = defineEntity({
749
846
  .input(
750
847
  z.object({
751
848
  enclosureId: z.string(),
752
- })
849
+ }),
753
850
  )
754
851
  .query(({ input, entity }) => {
755
852
  return entity
756
853
  .query({
757
- pk: enclosurePK({enclosureId: input.enclosureId}),
854
+ pk: enclosurePK({ enclosureId: input.enclosureId }),
758
855
  })
759
856
  .useIndex("gsi2");
760
857
  }),
@@ -764,13 +861,13 @@ const DinosaurEntity = defineEntity({
764
861
  z.object({
765
862
  enclosureId: z.string(),
766
863
  minDangerLevel: z.number().min(1).max(10),
767
- })
864
+ }),
768
865
  )
769
866
  .query(({ input, entity }) => {
770
867
  return entity
771
868
  .query({
772
- pk: enclosurePK({enclosureId: input.enclosureId}),
773
- sk: (op) => op.gte(`DANGER#${input.minDangerLevel}`)
869
+ pk: enclosurePK({ enclosureId: input.enclosureId }),
870
+ sk: (op) => op.gte(`DANGER#${input.minDangerLevel}`),
774
871
  })
775
872
  .useIndex("gsi2")
776
873
  .sortDescending(); // Get most dangerous first
@@ -779,15 +876,22 @@ const DinosaurEntity = defineEntity({
779
876
  });
780
877
 
781
878
  // Usage is now self-documenting
782
- const trexes = await dinosaurRepo.query.getDinosaursBySpecies({ species: "Tyrannosaurus Rex" }).execute();
783
- const paddockADinos = await dinosaurRepo.query.getDinosaursByEnclosure({ enclosureId: "PADDOCK-A" }).execute();
784
- const dangerousDinos = await dinosaurRepo.query.getMostDangerousInEnclosure({
785
- enclosureId: "PADDOCK-A",
786
- minDangerLevel: 8
787
- }).execute();
879
+ const trexes = await dinosaurRepo.query
880
+ .getDinosaursBySpecies({ species: "Tyrannosaurus Rex" })
881
+ .execute();
882
+ const paddockADinos = await dinosaurRepo.query
883
+ .getDinosaursByEnclosure({ enclosureId: "PADDOCK-A" })
884
+ .execute();
885
+ const dangerousDinos = await dinosaurRepo.query
886
+ .getMostDangerousInEnclosure({
887
+ enclosureId: "PADDOCK-A",
888
+ minDangerLevel: 8,
889
+ })
890
+ .execute();
788
891
  ```
789
892
 
790
893
  **Key principles for access pattern naming:**
894
+
791
895
  - 🎯 **Generic GSI Names**: Keep table-level GSI names generic (`gsi1`, `gsi2`) for flexibility across entities
792
896
  - 🔍 **Business-Focused**: Method names should reflect what the query achieves, not how it works
793
897
  - 📚 **Self-Documenting**: Anyone reading the code should understand the purpose immediately
@@ -828,11 +932,11 @@ type Dinosaur = z.infer<typeof dinosaurSchema>;
828
932
  const dinosaurPK = partitionKey`DINOSAUR#${"id"}`;
829
933
  const dinosaurSK = sortKey`STATUS#${"status"}`;
830
934
 
831
- const gsi1PK = partitionKey`SPECIES#${"species"}`
832
- const gsi1SK = sortKey`DINOSAUR#${"id"}`
935
+ const gsi1PK = partitionKey`SPECIES#${"species"}`;
936
+ const gsi1SK = sortKey`DINOSAUR#${"id"}`;
833
937
 
834
- const gsi2PK = partitionKey`ENCLOSURE#${"enclosureId"}`
835
- const gsi2SK = sortKey`DINOSAUR#${"id"}`
938
+ const gsi2PK = partitionKey`ENCLOSURE#${"enclosureId"}`;
939
+ const gsi2SK = sortKey`DINOSAUR#${"id"}`;
836
940
 
837
941
  // Create a primary index
838
942
  const primaryKey = createIndex()
@@ -869,11 +973,12 @@ const DinosaurEntity = defineEntity({
869
973
  schema: dinosaurSchema,
870
974
  primaryKey,
871
975
  indexes: {
872
- // These keys need to be named after the name of the GSI that is defined in your table instance
976
+ // These keys need to be named after the name of the GSI that is defined in your table instance
873
977
  gsi1: speciesIndex,
874
978
  gsi2: enclosureIndex,
875
979
  // Example of a read-only index for audit trail data
876
980
  gsi3: auditIndex, // This index will never be updated during entity update operations
981
+ // unless explicitly forced with .forceIndexRebuild('gsi3')
877
982
  },
878
983
  queries: {
879
984
  // ✅ Semantic method names that describe business intent
@@ -881,7 +986,7 @@ const DinosaurEntity = defineEntity({
881
986
  .input(
882
987
  z.object({
883
988
  species: z.string(),
884
- })
989
+ }),
885
990
  )
886
991
  .query(({ input, entity }) => {
887
992
  return entity
@@ -895,7 +1000,7 @@ const DinosaurEntity = defineEntity({
895
1000
  .input(
896
1001
  z.object({
897
1002
  enclosureId: z.string(),
898
- })
1003
+ }),
899
1004
  )
900
1005
  .query(({ input, entity }) => {
901
1006
  return entity
@@ -910,7 +1015,7 @@ const DinosaurEntity = defineEntity({
910
1015
  z.object({
911
1016
  enclosureId: z.string(),
912
1017
  minDangerLevel: z.number().int().min(1).max(10),
913
- })
1018
+ }),
914
1019
  )
915
1020
  .query(({ input, entity }) => {
916
1021
  return entity
@@ -945,15 +1050,19 @@ async function main() {
945
1050
  .execute();
946
1051
 
947
1052
  // Query dinosaurs by species using semantic method names
948
- const trexes = await dinosaurRepo.query.getDinosaursBySpecies({
949
- species: "Tyrannosaurus Rex"
950
- }).execute();
1053
+ const trexes = await dinosaurRepo.query
1054
+ .getDinosaursBySpecies({
1055
+ species: "Tyrannosaurus Rex",
1056
+ })
1057
+ .execute();
951
1058
 
952
1059
  // Query dangerous dinosaurs in an enclosure
953
- const dangerousDinos = await dinosaurRepo.query.getDangerousDinosaursInEnclosure({
954
- enclosureId: "enc-001",
955
- minDangerLevel: 8,
956
- }).execute();
1060
+ const dangerousDinos = await dinosaurRepo.query
1061
+ .getDangerousDinosaursInEnclosure({
1062
+ enclosureId: "enc-001",
1063
+ minDangerLevel: 8,
1064
+ })
1065
+ .execute();
957
1066
  }
958
1067
  ```
959
1068
 
@@ -962,6 +1071,7 @@ async function main() {
962
1071
  ### Transactional Operations
963
1072
 
964
1073
  **Safe dinosaur transfer between enclosures**
1074
+
965
1075
  ```ts
966
1076
  // Start a transaction session for transferring a T-Rex to a new enclosure
967
1077
  // Critical for safety: All operations must succeed or none will be applied
@@ -972,53 +1082,57 @@ await dinoTable.transaction(async (tx) => {
972
1082
  // STEP 1: Check if destination enclosure is ready and compatible with the dinosaur
973
1083
  // We must verify the enclosure is prepared and suitable for a carnivore
974
1084
  await dinoTable
975
- .conditionCheck({
976
- pk: "ENCLOSURE#B", // Target enclosure B
977
- sk: "STATUS" // Check the enclosure status record
1085
+ .conditionCheck({
1086
+ pk: "ENCLOSURE#B", // Target enclosure B
1087
+ sk: "STATUS", // Check the enclosure status record
978
1088
  })
979
- .condition(op => op.and(
980
- op.eq("status", "READY"), // Enclosure must be in READY state
981
- op.eq("diet", "Carnivore") // Must support carnivorous dinosaurs
982
- ))
1089
+ .condition((op) =>
1090
+ op.and(
1091
+ op.eq("status", "READY"), // Enclosure must be in READY state
1092
+ op.eq("diet", "Carnivore"), // Must support carnivorous dinosaurs
1093
+ ),
1094
+ )
983
1095
  .withTransaction(tx);
984
1096
 
985
1097
  // STEP 2: Remove dinosaur from current enclosure
986
1098
  // Only proceed if the dinosaur is healthy enough for transfer
987
1099
  await dinoTable
988
- .delete<Dinosaur>({
989
- pk: "ENCLOSURE#A", // Source enclosure A
990
- sk: "DINO#001" // T-Rex with ID 001
1100
+ .delete<Dinosaur>({
1101
+ pk: "ENCLOSURE#A", // Source enclosure A
1102
+ sk: "DINO#001", // T-Rex with ID 001
991
1103
  })
992
- .condition(op => op.and(
993
- op.eq("status", "HEALTHY"), // Dinosaur must be in HEALTHY state
994
- op.gte("health", 80) // Health must be at least 80%
995
- ))
1104
+ .condition((op) =>
1105
+ op.and(
1106
+ op.eq("status", "HEALTHY"), // Dinosaur must be in HEALTHY state
1107
+ op.gte("health", 80), // Health must be at least 80%
1108
+ ),
1109
+ )
996
1110
  .withTransaction(tx);
997
1111
 
998
1112
  // STEP 3: Add dinosaur to new enclosure
999
1113
  // Create a fresh record in the destination enclosure
1000
1114
  await dinoTable
1001
1115
  .create<Dinosaur>({
1002
- pk: "ENCLOSURE#B", // Destination enclosure B
1003
- sk: "DINO#001", // Same dinosaur ID for tracking
1004
- name: "Rex", // Dinosaur name
1005
- species: "Tyrannosaurus", // Species classification
1006
- diet: "Carnivore", // Dietary requirements
1007
- status: "HEALTHY", // Current health status
1008
- health: 100, // Reset health to 100% after transfer
1009
- enclosureId: "B", // Update enclosure reference
1010
- lastFed: new Date().toISOString() // Reset feeding clock
1116
+ pk: "ENCLOSURE#B", // Destination enclosure B
1117
+ sk: "DINO#001", // Same dinosaur ID for tracking
1118
+ name: "Rex", // Dinosaur name
1119
+ species: "Tyrannosaurus", // Species classification
1120
+ diet: "Carnivore", // Dietary requirements
1121
+ status: "HEALTHY", // Current health status
1122
+ health: 100, // Reset health to 100% after transfer
1123
+ enclosureId: "B", // Update enclosure reference
1124
+ lastFed: new Date().toISOString(), // Reset feeding clock
1011
1125
  })
1012
1126
  .withTransaction(tx);
1013
1127
 
1014
1128
  // STEP 4: Update enclosure occupancy tracking
1015
1129
  // Keep accurate count of dinosaurs in each enclosure
1016
1130
  await dinoTable
1017
- .update<Dinosaur>({
1018
- pk: "ENCLOSURE#B", // Target enclosure B
1019
- sk: "OCCUPANCY" // Occupancy tracking record
1131
+ .update<Dinosaur>({
1132
+ pk: "ENCLOSURE#B", // Target enclosure B
1133
+ sk: "OCCUPANCY", // Occupancy tracking record
1020
1134
  })
1021
- .add("currentOccupants", 1) // Increment occupant count
1135
+ .add("currentOccupants", 1) // Increment occupant count
1022
1136
  .set("lastUpdated", new Date().toISOString()) // Update timestamp
1023
1137
  .withTransaction(tx);
1024
1138
  });
@@ -1031,13 +1145,13 @@ await dinoTable.transaction(
1031
1145
  // Record that the dinosaur has been fed and update its health metrics
1032
1146
  await dinoTable
1033
1147
  .update<Dinosaur>({
1034
- pk: "ENCLOSURE#D", // Herbivore enclosure D
1035
- sk: "DINO#003" // Stegosaurus with ID 003
1148
+ pk: "ENCLOSURE#D", // Herbivore enclosure D
1149
+ sk: "DINO#003", // Stegosaurus with ID 003
1036
1150
  })
1037
1151
  .set({
1038
- status: "HEALTHY", // Update health status
1152
+ status: "HEALTHY", // Update health status
1039
1153
  lastFed: new Date().toISOString(), // Record feeding time
1040
- health: 100 // Reset health to 100%
1154
+ health: 100, // Reset health to 100%
1041
1155
  })
1042
1156
  .deleteElementsFromSet("tags", ["needs_feeding"]) // Remove feeding alert tag
1043
1157
  .withTransaction(tx);
@@ -1046,38 +1160,43 @@ await dinoTable.transaction(
1046
1160
  // Schedule next feeding time for tomorrow
1047
1161
  await dinoTable
1048
1162
  .update<Dinosaur>({
1049
- pk: "ENCLOSURE#D", // Same herbivore enclosure
1050
- sk: "SCHEDULE" // Feeding schedule record
1163
+ pk: "ENCLOSURE#D", // Same herbivore enclosure
1164
+ sk: "SCHEDULE", // Feeding schedule record
1051
1165
  })
1052
- .set("nextFeedingTime", new Date(Date.now() + 24 * 60 * 60 * 1000).toISOString()) // 24 hours from now
1166
+ .set(
1167
+ "nextFeedingTime",
1168
+ new Date(Date.now() + 24 * 60 * 60 * 1000).toISOString(),
1169
+ ) // 24 hours from now
1053
1170
  .withTransaction(tx);
1054
1171
  },
1055
1172
  {
1056
1173
  // Transaction options for tracking and idempotency
1057
1174
  clientRequestToken: "feeding-session-001", // Prevents duplicate feeding operations
1058
- returnConsumedCapacity: "TOTAL" // Track capacity usage for park operations
1059
- }
1175
+ returnConsumedCapacity: "TOTAL", // Track capacity usage for park operations
1176
+ },
1060
1177
  );
1061
1178
  ```
1062
1179
 
1063
-
1064
1180
  ### Pagination Made Simple
1065
1181
 
1066
1182
  **Efficient dinosaur record browsing for park management**
1183
+
1067
1184
  ```ts
1068
1185
  // SCENARIO 1: Herbivore health monitoring with pagination
1069
1186
  // Create a paginator for viewing healthy herbivores in manageable chunks
1070
1187
  // Perfect for veterinary staff doing routine health checks
1071
1188
  const healthyHerbivores = dinoTable
1072
1189
  .query<Dinosaur>({
1073
- pk: "DIET#herbivore", // Target all herbivorous dinosaurs
1074
- sk: op => op.beginsWith("STATUS#HEALTHY") // Only those with HEALTHY status
1190
+ pk: "DIET#herbivore", // Target all herbivorous dinosaurs
1191
+ sk: (op) => op.beginsWith("STATUS#HEALTHY"), // Only those with HEALTHY status
1075
1192
  })
1076
- .filter((op) => op.and(
1077
- op.gte("health", 90), // Only those with excellent health (90%+)
1078
- op.attributeExists("lastFed") // Must have feeding records
1079
- ))
1080
- .paginate(5); // Process in small batches of 5 dinosaurs
1193
+ .filter((op) =>
1194
+ op.and(
1195
+ op.gte("health", 90), // Only those with excellent health (90%+)
1196
+ op.attributeExists("lastFed"), // Must have feeding records
1197
+ ),
1198
+ )
1199
+ .paginate(5); // Process in small batches of 5 dinosaurs
1081
1200
 
1082
1201
  // Iterate through all pages of results - useful for processing large datasets
1083
1202
  // without loading everything into memory at once
@@ -1085,11 +1204,15 @@ console.log("🦕 Beginning herbivore health inspection rounds...");
1085
1204
  while (healthyHerbivores.hasNextPage()) {
1086
1205
  // Get the next page of dinosaurs
1087
1206
  const page = await healthyHerbivores.getNextPage();
1088
- console.log(`Checking herbivores page ${page.page}, found ${page.items.length} dinosaurs`);
1207
+ console.log(
1208
+ `Checking herbivores page ${page.page}, found ${page.items.length} dinosaurs`,
1209
+ );
1089
1210
 
1090
1211
  // Process each dinosaur in the current page
1091
- page.items.forEach(dino => {
1092
- console.log(`${dino.name}: Health ${dino.health}%, Last fed: ${dino.lastFed}`);
1212
+ page.items.forEach((dino) => {
1213
+ console.log(
1214
+ `${dino.name}: Health ${dino.health}%, Last fed: ${dino.lastFed}`,
1215
+ );
1093
1216
  // In a real app, you might update health records or schedule next checkup
1094
1217
  });
1095
1218
  }
@@ -1099,12 +1222,12 @@ while (healthyHerbivores.hasNextPage()) {
1099
1222
  // This approach loads all matching items into memory
1100
1223
  const carnivoreSchedule = await dinoTable
1101
1224
  .query<Dinosaur>({
1102
- pk: "DIET#carnivore", // Target all carnivorous dinosaurs
1103
- sk: op => op.beginsWith("ENCLOSURE#") // Organized by enclosure
1225
+ pk: "DIET#carnivore", // Target all carnivorous dinosaurs
1226
+ sk: (op) => op.beginsWith("ENCLOSURE#"), // Organized by enclosure
1104
1227
  })
1105
- .filter(op => op.attributeExists("lastFed")) // Only those with feeding records
1106
- .paginate(10) // Process in pages of 10
1107
- .getAllPages(); // But collect all results at once
1228
+ .filter((op) => op.attributeExists("lastFed")) // Only those with feeding records
1229
+ .paginate(10) // Process in pages of 10
1230
+ .getAllPages(); // But collect all results at once
1108
1231
 
1109
1232
  console.log(`Scheduling feeding for ${carnivoreSchedule.length} carnivores`);
1110
1233
  // Now we can sort and organize feeding times based on species, size, etc.
@@ -1112,17 +1235,17 @@ console.log(`Scheduling feeding for ${carnivoreSchedule.length} carnivores`);
1112
1235
  // SCENARIO 3: Visitor information kiosk with limited display
1113
1236
  // Create a paginated view for the public-facing dinosaur information kiosk
1114
1237
  const visitorKiosk = dinoTable
1115
- .query<Dinosaur>({
1116
- pk: "VISITOR_VIEW", // Special partition for visitor-facing data
1117
- sk: op => op.beginsWith("SPECIES#") // Organized by species
1238
+ .query<Dinosaur>({
1239
+ pk: "VISITOR_VIEW", // Special partition for visitor-facing data
1240
+ sk: (op) => op.beginsWith("SPECIES#"), // Organized by species
1118
1241
  })
1119
- .filter(op => op.eq("status", "ON_DISPLAY")) // Only show dinosaurs currently on display
1120
- .limit(12) // Show maximum 12 dinosaurs total
1121
- .paginate(4); // Display 4 at a time for easy viewing
1242
+ .filter((op) => op.eq("status", "ON_DISPLAY")) // Only show dinosaurs currently on display
1243
+ .limit(12) // Show maximum 12 dinosaurs total
1244
+ .paginate(4); // Display 4 at a time for easy viewing
1122
1245
 
1123
1246
  // Get first page for initial kiosk display
1124
1247
  const firstPage = await visitorKiosk.getNextPage();
1125
- console.log(`🦖 Now showing: ${firstPage.items.map(d => d.name).join(", ")}`);
1248
+ console.log(`🦖 Now showing: ${firstPage.items.map((d) => d.name).join(", ")}`);
1126
1249
  // Visitors can press "Next" to see more dinosaurs in the collection
1127
1250
  ```
1128
1251
 
@@ -1132,15 +1255,15 @@ Dyno-table provides comprehensive query methods that match DynamoDB's capabiliti
1132
1255
 
1133
1256
  ### Comparison Operators
1134
1257
 
1135
- | Operation | Method Example | Generated Expression |
1136
- |---------------------------|---------------------------------------------------------|-----------------------------------|
1137
- | **Equals** | `.filter(op => op.eq("status", "ACTIVE"))` | `status = :v1` |
1138
- | **Not Equals** | `.filter(op => op.ne("status", "DELETED"))` | `status <> :v1` |
1139
- | **Less Than** | `.filter(op => op.lt("age", 18))` | `age < :v1` |
1140
- | **Less Than or Equal** | `.filter(op => op.lte("score", 100))` | `score <= :v1` |
1141
- | **Greater Than** | `.filter(op => op.gt("price", 50))` | `price > :v1` |
1142
- | **Greater Than or Equal** | `.filter(op => op.gte("rating", 4))` | `rating >= :v1` |
1143
- | **Between** | `.filter(op => op.between("age", 18, 65))` | `age BETWEEN :v1 AND :v2` |
1258
+ | Operation | Method Example | Generated Expression |
1259
+ | ------------------------- | ------------------------------------------------------------ | --------------------------------- |
1260
+ | **Equals** | `.filter(op => op.eq("status", "ACTIVE"))` | `status = :v1` |
1261
+ | **Not Equals** | `.filter(op => op.ne("status", "DELETED"))` | `status <> :v1` |
1262
+ | **Less Than** | `.filter(op => op.lt("age", 18))` | `age < :v1` |
1263
+ | **Less Than or Equal** | `.filter(op => op.lte("score", 100))` | `score <= :v1` |
1264
+ | **Greater Than** | `.filter(op => op.gt("price", 50))` | `price > :v1` |
1265
+ | **Greater Than or Equal** | `.filter(op => op.gte("rating", 4))` | `rating >= :v1` |
1266
+ | **Between** | `.filter(op => op.between("age", 18, 65))` | `age BETWEEN :v1 AND :v2` |
1144
1267
  | **In Array** | `.filter(op => op.inArray("status", ["ACTIVE", "PENDING"]))` | `status IN (:v1, :v2)` |
1145
1268
  | **Begins With** | `.filter(op => op.beginsWith("email", "@example.com"))` | `begins_with(email, :v1)` |
1146
1269
  | **Contains** | `.filter(op => op.contains("tags", "important"))` | `contains(tags, :v1)` |
@@ -1148,10 +1271,59 @@ Dyno-table provides comprehensive query methods that match DynamoDB's capabiliti
1148
1271
  | **Attribute Not Exists** | `.filter(op => op.attributeNotExists("deletedAt"))` | `attribute_not_exists(deletedAt)` |
1149
1272
  | **Nested Attributes** | `.filter(op => op.eq("address.city", "London"))` | `address.city = :v1` |
1150
1273
 
1274
+ ### Filter Chaining
1275
+
1276
+ Filters can be chained together using multiple `.filter()` calls. When multiple filters are applied, they are automatically combined using AND logic:
1277
+
1278
+ ```ts
1279
+ // Chaining multiple filters - these are combined with AND
1280
+ const result = await table
1281
+ .query({ pk: "USER#123" })
1282
+ .filter((op) => op.eq("status", "ACTIVE"))
1283
+ .filter((op) => op.gt("age", 18))
1284
+ .filter((op) => op.contains("tags", "premium"))
1285
+ .execute();
1286
+
1287
+ // This is equivalent to:
1288
+ const result = await table
1289
+ .query({ pk: "USER#123" })
1290
+ .filter((op) =>
1291
+ op.and(
1292
+ op.eq("status", "ACTIVE"),
1293
+ op.gt("age", 18),
1294
+ op.contains("tags", "premium"),
1295
+ ),
1296
+ )
1297
+ .execute();
1298
+ ```
1299
+
1300
+ Both approaches produce the same DynamoDB expression: `status = :v1 AND age > :v2 AND contains(tags, :v3)`
1301
+
1302
+ Filter chaining provides a more readable way to build complex conditions, especially when filters are applied conditionally:
1303
+
1304
+ ```ts
1305
+ const builder = table.query({ pk: "USER#123" });
1306
+
1307
+ // Conditionally apply filters
1308
+ if (statusFilter) {
1309
+ builder.filter((op) => op.eq("status", statusFilter));
1310
+ }
1311
+
1312
+ if (minAge) {
1313
+ builder.filter((op) => op.gt("age", minAge));
1314
+ }
1315
+
1316
+ if (requiredTag) {
1317
+ builder.filter((op) => op.contains("tags", requiredTag));
1318
+ }
1319
+
1320
+ const result = await builder.execute();
1321
+ ```
1322
+
1151
1323
  ### Logical Operators
1152
1324
 
1153
1325
  | Operation | Method Example | Generated Expression |
1154
- |-----------|-----------------------------------------------------------------------------------|--------------------------------|
1326
+ | --------- | --------------------------------------------------------------------------------- | ------------------------------ |
1155
1327
  | **AND** | `.filter(op => op.and(op.eq("status", "ACTIVE"), op.gt("age", 18)))` | `status = :v1 AND age > :v2` |
1156
1328
  | **OR** | `.filter(op => op.or(op.eq("status", "PENDING"), op.eq("status", "PROCESSING")))` | `status = :v1 OR status = :v2` |
1157
1329
  | **NOT** | `.filter(op => op.not(op.eq("status", "DELETED")))` | `NOT status = :v1` |
@@ -1159,12 +1331,13 @@ Dyno-table provides comprehensive query methods that match DynamoDB's capabiliti
1159
1331
  ### Query Operations
1160
1332
 
1161
1333
  | Operation | Method Example | Generated Expression |
1162
- |--------------------------|--------------------------------------------------------------------------------------|---------------------------------------|
1334
+ | ------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------- |
1163
1335
  | **Partition Key Equals** | `.query({ pk: "USER#123" })` | `pk = :pk` |
1164
1336
  | **Sort Key Begins With** | `.query({ pk: "USER#123", sk: op => op.beginsWith("ORDER#2023") })` | `pk = :pk AND begins_with(sk, :v1)` |
1165
1337
  | **Sort Key Between** | `.query({ pk: "USER#123", sk: op => op.between("ORDER#2023-01", "ORDER#2023-12") })` | `pk = :pk AND sk BETWEEN :v1 AND :v2` |
1166
1338
 
1167
1339
  Additional query options:
1340
+
1168
1341
  ```ts
1169
1342
  // Sort order
1170
1343
  const ascending = await table
@@ -1184,16 +1357,13 @@ const partial = await table
1184
1357
  .execute();
1185
1358
 
1186
1359
  // Limit results
1187
- const limited = await table
1188
- .query({ pk: "USER#123" })
1189
- .limit(10)
1190
- .execute();
1360
+ const limited = await table.query({ pk: "USER#123" }).limit(10).execute();
1191
1361
  ```
1192
1362
 
1193
1363
  ### Put Operations
1194
1364
 
1195
1365
  | Operation | Method Example | Description |
1196
- |---------------------|---------------------------------------------------------------------|------------------------------------------------------------------------|
1366
+ | ------------------- | ------------------------------------------------------------------- | ---------------------------------------------------------------------- |
1197
1367
  | **Create New Item** | `.create<Dinosaur>({ pk: "SPECIES#trex", sk: "PROFILE#001", ... })` | Creates a new item with a condition to ensure it doesn't already exist |
1198
1368
  | **Put Item** | `.put<Dinosaur>({ pk: "SPECIES#trex", sk: "PROFILE#001", ... })` | Creates or replaces an item |
1199
1369
  | **With Condition** | `.put(item).condition(op => op.attributeNotExists("pk"))` | Adds a condition that must be satisfied |
@@ -1203,42 +1373,50 @@ const limited = await table
1203
1373
  Control what data is returned from put operations:
1204
1374
 
1205
1375
  | Option | Description | Example |
1206
- |----------------|--------------------------------------------------------------------------------------------------------------------|---------------------------------------------------|
1376
+ | -------------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------- |
1207
1377
  | **NONE** | Default. No return value. | `.put(item).returnValues("NONE").execute()` |
1208
1378
  | **ALL_OLD** | Returns the item's previous state if it existed. (Does not consume any RCU and returns strongly consistent values) | `.put(item).returnValues("ALL_OLD").execute()` |
1209
1379
  | **CONSISTENT** | Performs a consistent GET operation after the put to retrieve the item's new state. (Does consume RCU) | `.put(item).returnValues("CONSISTENT").execute()` |
1210
1380
 
1211
1381
  ```ts
1212
1382
  // Create with no return value (default)
1213
- await table.put<Dinosaur>({
1214
- pk: "SPECIES#trex",
1215
- sk: "PROFILE#001",
1216
- name: "Tyrannosaurus Rex",
1217
- diet: "carnivore"
1218
- }).execute();
1383
+ await table
1384
+ .put<Dinosaur>({
1385
+ pk: "SPECIES#trex",
1386
+ sk: "PROFILE#001",
1387
+ name: "Tyrannosaurus Rex",
1388
+ diet: "carnivore",
1389
+ })
1390
+ .execute();
1219
1391
 
1220
1392
  // Create and return the newly created item
1221
- const newDino = await table.put<Dinosaur>({
1222
- pk: "SPECIES#trex",
1223
- sk: "PROFILE#002",
1224
- name: "Tyrannosaurus Rex",
1225
- diet: "carnivore"
1226
- }).returnValues("CONSISTENT").execute();
1393
+ const newDino = await table
1394
+ .put<Dinosaur>({
1395
+ pk: "SPECIES#trex",
1396
+ sk: "PROFILE#002",
1397
+ name: "Tyrannosaurus Rex",
1398
+ diet: "carnivore",
1399
+ })
1400
+ .returnValues("CONSISTENT")
1401
+ .execute();
1227
1402
 
1228
1403
  // Update with condition and get previous values
1229
- const oldDino = await table.put<Dinosaur>({
1230
- pk: "SPECIES#trex",
1231
- sk: "PROFILE#001",
1232
- name: "Tyrannosaurus Rex",
1233
- diet: "omnivore", // Updated diet
1234
- discoveryYear: 1905
1235
- }).returnValues("ALL_OLD").execute();
1404
+ const oldDino = await table
1405
+ .put<Dinosaur>({
1406
+ pk: "SPECIES#trex",
1407
+ sk: "PROFILE#001",
1408
+ name: "Tyrannosaurus Rex",
1409
+ diet: "omnivore", // Updated diet
1410
+ discoveryYear: 1905,
1411
+ })
1412
+ .returnValues("ALL_OLD")
1413
+ .execute();
1236
1414
  ```
1237
1415
 
1238
1416
  ### Update Operations
1239
1417
 
1240
1418
  | Operation | Method Example | Generated Expression |
1241
- |----------------------|-------------------------------------------------------|----------------------|
1419
+ | -------------------- | ----------------------------------------------------- | -------------------- |
1242
1420
  | **Set Attributes** | `.update(key).set("name", "New Name")` | `SET #name = :v1` |
1243
1421
  | **Add to Number** | `.update(key).add("score", 10)` | `ADD #score :v1` |
1244
1422
  | **Remove Attribute** | `.update(key).remove("temporary")` | `REMOVE #temporary` |
@@ -1248,32 +1426,94 @@ const oldDino = await table.put<Dinosaur>({
1248
1426
 
1249
1427
  The library supports a comprehensive set of type-safe condition operators:
1250
1428
 
1251
- | Category | Operators | Example |
1252
- |----------------|----------------------------------------------|-------------------------------------------------------------------------|
1253
- | **Comparison** | `eq`, `ne`, `lt`, `lte`, `gt`, `gte` | `.condition(op => op.gt("age", 18))` |
1429
+ | Category | Operators | Example |
1430
+ | -------------- | ---------------------------------------------- | ----------------------------------------------------------------------- |
1431
+ | **Comparison** | `eq`, `ne`, `lt`, `lte`, `gt`, `gte` | `.condition(op => op.gt("age", 18))` |
1254
1432
  | **String/Set** | `between`, `beginsWith`, `contains`, `inArray` | `.condition(op => op.inArray("status", ["active", "pending"]))` |
1255
- | **Existence** | `attributeExists`, `attributeNotExists` | `.condition(op => op.attributeExists("email"))` |
1256
- | **Logical** | `and`, `or`, `not` | `.condition(op => op.and(op.eq("status", "active"), op.gt("age", 18)))` |
1433
+ | **Existence** | `attributeExists`, `attributeNotExists` | `.condition(op => op.attributeExists("email"))` |
1434
+ | **Logical** | `and`, `or`, `not` | `.condition(op => op.and(op.eq("status", "active"), op.gt("age", 18)))` |
1257
1435
 
1258
1436
  All operators are type-safe and will provide proper TypeScript inference for nested attributes.
1259
1437
 
1260
1438
  #### Multiple Operations
1439
+
1261
1440
  Operations can be combined in a single update:
1441
+
1262
1442
  ```ts
1263
1443
  const result = await table
1264
1444
  .update({ pk: "USER#123", sk: "PROFILE" })
1265
1445
  .set("name", "Updated Name")
1266
1446
  .add("loginCount", 1)
1267
1447
  .remove("temporaryFlag")
1268
- .condition(op => op.attributeExists("email"))
1448
+ .condition((op) => op.attributeExists("email"))
1449
+ .execute();
1450
+ ```
1451
+
1452
+ #### Force Rebuilding Read-Only Indexes
1453
+
1454
+ When working with entities, some indexes may be marked as read-only to prevent any updates. However, you can force these indexes to be rebuilt during updates using the `forceIndexRebuild()` method:
1455
+
1456
+ ```ts
1457
+ // Force rebuild a single read-only index
1458
+ await dinoRepo
1459
+ .update(
1460
+ { id: "TREX-001" },
1461
+ {
1462
+ name: "Updated T-Rex",
1463
+ excavationSiteId: "new-site-001",
1464
+ },
1465
+ )
1466
+ .forceIndexRebuild("excavation-site-index")
1467
+ .execute();
1468
+
1469
+ // Force rebuild multiple read-only indexes
1470
+ await dinoRepo
1471
+ .update(
1472
+ { id: "TREX-001" },
1473
+ {
1474
+ name: "Updated T-Rex",
1475
+ excavationSiteId: "new-site-001",
1476
+ species: "Tyrannosaurus Rex",
1477
+ diet: "carnivore",
1478
+ },
1479
+ )
1480
+ .forceIndexRebuild(["excavation-site-index", "species-diet-index"])
1481
+ .execute();
1482
+
1483
+ // Chain with other update operations
1484
+ await dinoRepo
1485
+ .update(
1486
+ { id: "TREX-001" },
1487
+ {
1488
+ excavationSiteId: "new-site-002",
1489
+ },
1490
+ )
1491
+ .forceIndexRebuild("excavation-site-index")
1492
+ .set("lastUpdated", new Date().toISOString())
1493
+ .condition((op) => op.eq("status", "INACTIVE"))
1494
+ .returnValues("ALL_NEW")
1269
1495
  .execute();
1270
1496
  ```
1271
1497
 
1498
+ **When to use `forceIndexRebuild()`:**
1499
+
1500
+ - 🔄 You need to update a read-only index with new data
1501
+ - 🛠️ You're performing maintenance operations that require index consistency
1502
+ - 📊 You have all required attributes available for the index and want to force an update
1503
+ - ⚡ You want to override the read-only protection for specific update operations
1504
+
1505
+ **Important Notes:**
1506
+
1507
+ - This method only works with entity repositories, not direct table operations, as it requires knowledge of the entity's index definitions
1508
+ - The index name must be a valid index defined in your entity configuration, otherwise an error will be thrown
1509
+ - You must provide all required attributes for the index template variables, otherwise the update will fail with an error
1510
+
1272
1511
  ## 🔄 Type Safety Features
1273
1512
 
1274
1513
  The library provides comprehensive type safety for all operations:
1275
1514
 
1276
1515
  ### Nested Object Support
1516
+
1277
1517
  ```ts
1278
1518
  interface Dinosaur {
1279
1519
  pk: string;
@@ -1311,7 +1551,8 @@ interface Dinosaur {
1311
1551
  }
1312
1552
 
1313
1553
  // TypeScript ensures type safety for all nested dinosaur attributes
1314
- await table.update<Dinosaur>({ pk: "ENCLOSURE#F", sk: "DINO#007" })
1554
+ await table
1555
+ .update<Dinosaur>({ pk: "ENCLOSURE#F", sk: "DINO#007" })
1315
1556
  .set("stats.health", 95) // ✓ Valid
1316
1557
  .set("habitat.enclosure.climate", "Tropical") // ✓ Valid
1317
1558
  .set("care.feeding.lastFed", new Date().toISOString()) // ✓ Valid
@@ -1320,6 +1561,7 @@ await table.update<Dinosaur>({ pk: "ENCLOSURE#F", sk: "DINO#007" })
1320
1561
  ```
1321
1562
 
1322
1563
  ### Type-Safe Conditions
1564
+
1323
1565
  ```ts
1324
1566
  interface DinosaurMonitoring {
1325
1567
  species: string;
@@ -1330,19 +1572,22 @@ interface DinosaurMonitoring {
1330
1572
  alertLevel: "LOW" | "MEDIUM" | "HIGH";
1331
1573
  }
1332
1574
 
1333
- await table.query<DinosaurMonitoring>({
1334
- pk: "MONITORING",
1335
- sk: op => op.beginsWith("ENCLOSURE#")
1336
- })
1337
- .filter(op => op.and(
1338
- op.lt("health", "90"), // ❌ TypeScript Error: health expects number
1339
- op.gt("temperature", 38), // ✓ Valid
1340
- op.contains("behavior", "aggressive"), // Valid
1341
- op.inArray("alertLevel", ["LOW", "MEDIUM", "HIGH"]), // ✓ Valid: matches union type
1342
- op.inArray("alertLevel", ["UNKNOWN", "INVALID"]), // TypeScript Error: invalid alert levels
1343
- op.eq("alertLevel", "UNKNOWN") // TypeScript Error: invalid alert level
1344
- ))
1345
- .execute();
1575
+ await table
1576
+ .query<DinosaurMonitoring>({
1577
+ pk: "MONITORING",
1578
+ sk: (op) => op.beginsWith("ENCLOSURE#"),
1579
+ })
1580
+ .filter((op) =>
1581
+ op.and(
1582
+ op.lt("health", "90"), // TypeScript Error: health expects number
1583
+ op.gt("temperature", 38), // ✓ Valid
1584
+ op.contains("behavior", "aggressive"), // Valid
1585
+ op.inArray("alertLevel", ["LOW", "MEDIUM", "HIGH"]), // Valid: matches union type
1586
+ op.inArray("alertLevel", ["UNKNOWN", "INVALID"]), // ❌ TypeScript Error: invalid alert levels
1587
+ op.eq("alertLevel", "UNKNOWN"), // ❌ TypeScript Error: invalid alert level
1588
+ ),
1589
+ )
1590
+ .execute();
1346
1591
  ```
1347
1592
 
1348
1593
  ## 🔄 Batch Operations
@@ -1362,7 +1607,9 @@ const batch = table.batchBuilder<{
1362
1607
 
1363
1608
  // Add operations - entity type is automatically inferred
1364
1609
  dinosaurRepo.create(newDinosaur).withBatch(batch);
1365
- dinosaurRepo.get({ id: 'dino-123', diet: 'carnivore', species: 'Tyrannosaurus Rex' }).withBatch(batch);
1610
+ dinosaurRepo
1611
+ .get({ id: "dino-123", diet: "carnivore", species: "Tyrannosaurus Rex" })
1612
+ .withBatch(batch);
1366
1613
  fossilRepo.create(newFossil).withBatch(batch);
1367
1614
 
1368
1615
  // Execute and get typed results
@@ -1379,15 +1626,23 @@ const fossils: FossilEntity[] = result.reads.itemsByType.Fossil;
1379
1626
  // Batch get - retrieve multiple items
1380
1627
  const keys = [
1381
1628
  { pk: "DIET#carnivore", sk: "SPECIES#Tyrannosaurus Rex#ID#dino-123" },
1382
- { pk: "FOSSIL#456", sk: "DISCOVERY#2024" }
1629
+ { pk: "FOSSIL#456", sk: "DISCOVERY#2024" },
1383
1630
  ];
1384
1631
 
1385
1632
  const { items, unprocessedKeys } = await table.batchGet<DynamoItem>(keys);
1386
1633
 
1387
1634
  // Batch write - mix of operations
1388
1635
  const operations = [
1389
- { type: "put" as const, item: { pk: "DIET#herbivore", sk: "SPECIES#Triceratops#ID#dino-789", name: "Spike", dangerLevel: 3 } },
1390
- { type: "delete" as const, key: { pk: "FOSSIL#OLD", sk: "DISCOVERY#1990" } }
1636
+ {
1637
+ type: "put" as const,
1638
+ item: {
1639
+ pk: "DIET#herbivore",
1640
+ sk: "SPECIES#Triceratops#ID#dino-789",
1641
+ name: "Spike",
1642
+ dangerLevel: 3,
1643
+ },
1644
+ },
1645
+ { type: "delete" as const, key: { pk: "FOSSIL#OLD", sk: "DISCOVERY#1990" } },
1391
1646
  ];
1392
1647
 
1393
1648
  const { unprocessedItems } = await table.batchWrite(operations);
@@ -1398,16 +1653,20 @@ if (unprocessedItems.length > 0) {
1398
1653
  }
1399
1654
  ```
1400
1655
 
1401
-
1402
1656
  ## 🔒 Transaction Operations
1403
1657
 
1404
1658
  Perform multiple operations atomically with transaction support:
1405
1659
 
1406
1660
  ### Transaction Builder
1661
+
1407
1662
  ```ts
1408
1663
  const result = await table.transaction(async (tx) => {
1409
1664
  // Building the expression manually
1410
- tx.put("TableName", { pk: "123", sk: "123"}, and(op.attributeNotExists("pk"), op.attributeExists("sk")));
1665
+ tx.put(
1666
+ "TableName",
1667
+ { pk: "123", sk: "123" },
1668
+ and(op.attributeNotExists("pk"), op.attributeExists("sk")),
1669
+ );
1411
1670
 
1412
1671
  // Using table to build the operation
1413
1672
  table
@@ -1435,6 +1694,7 @@ const result = await table.transaction(async (tx) => {
1435
1694
  ```
1436
1695
 
1437
1696
  ### Transaction Options
1697
+
1438
1698
  ```ts
1439
1699
  const result = await table.transaction(
1440
1700
  async (tx) => {
@@ -1443,12 +1703,11 @@ const result = await table.transaction(
1443
1703
  {
1444
1704
  // Optional transaction settings
1445
1705
  idempotencyToken: "unique-token",
1446
- returnValuesOnConditionCheckFailure: true
1447
- }
1706
+ returnValuesOnConditionCheckFailure: true,
1707
+ },
1448
1708
  );
1449
1709
  ```
1450
1710
 
1451
-
1452
1711
  ## 🚨 Error Handling
1453
1712
 
1454
1713
  **TODO:**
@@ -1461,6 +1720,7 @@ to provide a more clear set of error classes and additional information to allow
1461
1720
  All condition operators are type-safe and will validate against your item type. For detailed information about DynamoDB conditions and expressions, see the [AWS DynamoDB Developer Guide](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.OperatorsAndFunctions.html).
1462
1721
 
1463
1722
  #### Comparison Operators
1723
+
1464
1724
  - `eq(attr, value)` - Equals (=)
1465
1725
  - `ne(attr, value)` - Not equals (≠)
1466
1726
  - `lt(attr, value)` - Less than (<)
@@ -1476,29 +1736,37 @@ All condition operators are type-safe and will validate against your item type.
1476
1736
  // Example: Health and feeding monitoring
1477
1737
  await dinoTable
1478
1738
  .query<Dinosaur>({
1479
- pk: "ENCLOSURE#G"
1739
+ pk: "ENCLOSURE#G",
1480
1740
  })
1481
- .filter((op) => op.and(
1482
- op.lt("stats.health", 85), // Health below 85%
1483
- op.lt("care.feeding.lastFed", new Date(Date.now() - 12 * 60 * 60 * 1000).toISOString()), // Not fed in 12 hours
1484
- op.between("stats.weight", 1000, 5000) // Medium-sized dinosaurs
1485
- ))
1741
+ .filter((op) =>
1742
+ op.and(
1743
+ op.lt("stats.health", 85), // Health below 85%
1744
+ op.lt(
1745
+ "care.feeding.lastFed",
1746
+ new Date(Date.now() - 12 * 60 * 60 * 1000).toISOString(),
1747
+ ), // Not fed in 12 hours
1748
+ op.between("stats.weight", 1000, 5000), // Medium-sized dinosaurs
1749
+ ),
1750
+ )
1486
1751
  .execute();
1487
1752
 
1488
1753
  // Example: Filter dinosaurs by multiple status values using inArray
1489
1754
  await dinoTable
1490
1755
  .query<Dinosaur>({
1491
- pk: "SPECIES#trex"
1756
+ pk: "SPECIES#trex",
1492
1757
  })
1493
- .filter((op) => op.and(
1494
- op.inArray("status", ["ACTIVE", "FEEDING", "RESTING"]), // Multiple valid statuses
1495
- op.inArray("diet", ["carnivore", "omnivore"]), // Meat-eating dinosaurs
1496
- op.gt("dangerLevel", 5) // High danger level
1497
- ))
1758
+ .filter((op) =>
1759
+ op.and(
1760
+ op.inArray("status", ["ACTIVE", "FEEDING", "RESTING"]), // Multiple valid statuses
1761
+ op.inArray("diet", ["carnivore", "omnivore"]), // Meat-eating dinosaurs
1762
+ op.gt("dangerLevel", 5), // High danger level
1763
+ ),
1764
+ )
1498
1765
  .execute();
1499
1766
  ```
1500
1767
 
1501
1768
  #### Attribute Operators
1769
+
1502
1770
  - `attributeExists(attr)` - Checks if attribute exists
1503
1771
  - `attributeNotExists(attr)` - Checks if attribute does not exist
1504
1772
 
@@ -1506,23 +1774,26 @@ await dinoTable
1506
1774
  // Example: Validate required attributes for dinosaur transfer
1507
1775
  await dinoTable
1508
1776
  .update<Dinosaur>({
1509
- pk: "ENCLOSURE#H",
1510
- sk: "DINO#008"
1777
+ pk: "ENCLOSURE#H",
1778
+ sk: "DINO#008",
1511
1779
  })
1512
1780
  .set("habitat.enclosure.id", "ENCLOSURE#J")
1513
- .condition((op) => op.and(
1514
- // Ensure all required health data is present
1515
- op.attributeExists("stats.health"),
1516
- op.attributeExists("care.medical.lastCheckup"),
1517
- // Ensure not already in transfer
1518
- op.attributeNotExists("transfer.inProgress"),
1519
- // Verify required monitoring tags
1520
- op.attributeExists("care.medical.vaccinations")
1521
- ))
1781
+ .condition((op) =>
1782
+ op.and(
1783
+ // Ensure all required health data is present
1784
+ op.attributeExists("stats.health"),
1785
+ op.attributeExists("care.medical.lastCheckup"),
1786
+ // Ensure not already in transfer
1787
+ op.attributeNotExists("transfer.inProgress"),
1788
+ // Verify required monitoring tags
1789
+ op.attributeExists("care.medical.vaccinations"),
1790
+ ),
1791
+ )
1522
1792
  .execute();
1523
1793
  ```
1524
1794
 
1525
1795
  #### Logical Operators
1796
+
1526
1797
  - `and(...conditions)` - Combines conditions with AND
1527
1798
  - `or(...conditions)` - Combines conditions with OR
1528
1799
  - `not(condition)` - Negates a condition
@@ -1531,34 +1802,39 @@ await dinoTable
1531
1802
  // Example: Complex safety monitoring conditions
1532
1803
  await dinoTable
1533
1804
  .query<Dinosaur>({
1534
- pk: "MONITORING#ALERTS"
1805
+ pk: "MONITORING#ALERTS",
1535
1806
  })
1536
- .filter((op) => op.or(
1537
- // Alert: Aggressive carnivores with low health
1538
- op.and(
1539
- op.eq("care.feeding.diet", "Carnivore"),
1540
- op.lt("stats.health", 70),
1541
- op.contains("behavior", "aggressive")
1542
- ),
1543
- // Alert: Any dinosaur not fed recently and showing stress
1544
- op.and(
1545
- op.lt("care.feeding.lastFed", new Date(Date.now() - 8 * 60 * 60 * 1000).toISOString()),
1546
- op.contains("behavior", "stressed")
1547
- ),
1548
- // Alert: Critical status dinosaurs requiring immediate attention
1549
- op.and(
1550
- op.inArray("status", ["SICK", "INJURED", "QUARANTINE"]), // Critical statuses
1551
- op.inArray("priority", ["HIGH", "URGENT"]) // High priority levels
1807
+ .filter((op) =>
1808
+ op.or(
1809
+ // Alert: Aggressive carnivores with low health
1810
+ op.and(
1811
+ op.eq("care.feeding.diet", "Carnivore"),
1812
+ op.lt("stats.health", 70),
1813
+ op.contains("behavior", "aggressive"),
1814
+ ),
1815
+ // Alert: Any dinosaur not fed recently and showing stress
1816
+ op.and(
1817
+ op.lt(
1818
+ "care.feeding.lastFed",
1819
+ new Date(Date.now() - 8 * 60 * 60 * 1000).toISOString(),
1820
+ ),
1821
+ op.contains("behavior", "stressed"),
1822
+ ),
1823
+ // Alert: Critical status dinosaurs requiring immediate attention
1824
+ op.and(
1825
+ op.inArray("status", ["SICK", "INJURED", "QUARANTINE"]), // Critical statuses
1826
+ op.inArray("priority", ["HIGH", "URGENT"]), // High priority levels
1827
+ ),
1828
+ // Alert: Enclosure climate issues
1829
+ op.and(
1830
+ op.not(op.eq("habitat.enclosure.climate", "Optimal")),
1831
+ op.or(
1832
+ op.gt("habitat.requirements.temperature", 40),
1833
+ op.lt("habitat.requirements.humidity", 50),
1834
+ ),
1835
+ ),
1552
1836
  ),
1553
- // Alert: Enclosure climate issues
1554
- op.and(
1555
- op.not(op.eq("habitat.enclosure.climate", "Optimal")),
1556
- op.or(
1557
- op.gt("habitat.requirements.temperature", 40),
1558
- op.lt("habitat.requirements.humidity", 50)
1559
- )
1560
- )
1561
- ))
1837
+ )
1562
1838
  .execute();
1563
1839
  ```
1564
1840
 
@@ -1571,7 +1847,8 @@ Special operators for sort key conditions in queries. See [AWS DynamoDB Key Cond
1571
1847
  const recentHealthChecks = await dinoTable
1572
1848
  .query<Dinosaur>({
1573
1849
  pk: "ENCLOSURE#K",
1574
- sk: (op) => op.beginsWith(`HEALTH#${new Date().toISOString().slice(0, 10)}`) // Today's checks
1850
+ sk: (op) =>
1851
+ op.beginsWith(`HEALTH#${new Date().toISOString().slice(0, 10)}`), // Today's checks
1575
1852
  })
1576
1853
  .execute();
1577
1854
 
@@ -1579,10 +1856,11 @@ const recentHealthChecks = await dinoTable
1579
1856
  const largeHerbivores = await dinoTable
1580
1857
  .query<Dinosaur>({
1581
1858
  pk: "DIET#herbivore",
1582
- sk: (op) => op.between(
1583
- `WEIGHT#${5000}`, // 5 tons minimum
1584
- `WEIGHT#${15000}` // 15 tons maximum
1585
- )
1859
+ sk: (op) =>
1860
+ op.between(
1861
+ `WEIGHT#${5000}`, // 5 tons minimum
1862
+ `WEIGHT#${15000}`, // 15 tons maximum
1863
+ ),
1586
1864
  })
1587
1865
  .execute();
1588
1866
 
@@ -1590,10 +1868,11 @@ const largeHerbivores = await dinoTable
1590
1868
  const quarantinedDinos = await dinoTable
1591
1869
  .query<Dinosaur>({
1592
1870
  pk: "STATUS#quarantine",
1593
- sk: (op) => op.between(
1594
- `DATE#${new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString().slice(0, 10)}`, // Last 7 days
1595
- `DATE#${new Date().toISOString().slice(0, 10)}` // Today
1596
- )
1871
+ sk: (op) =>
1872
+ op.between(
1873
+ `DATE#${new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString().slice(0, 10)}`, // Last 7 days
1874
+ `DATE#${new Date().toISOString().slice(0, 10)}`, // Today
1875
+ ),
1597
1876
  })
1598
1877
  .execute();
1599
1878
  ```
@@ -1663,6 +1942,7 @@ First you'll need to install the dependencies:
1663
1942
  ```bash
1664
1943
  pnpm install
1665
1944
  ```
1945
+
1666
1946
  Then setup the test table in local DynamoDB by running the following command:
1667
1947
 
1668
1948
  ```bash