dyno-table 2.1.1 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -4,7 +4,7 @@
4
4
 
5
5
  ### **Tame Your DynamoDB Data with Type-Safe Precision**
6
6
 
7
- [![npm version](https://img.shields.io/npm/v/dyno-table.svg?style=for-the-badge)](https://www.npmjs.com/package/dyno-table)
7
+ [![npm version](https://img.shields.io/npm/v/dyno-table.svg?style=for-the-badge)](https://www.npmjs.com/package/dyno-table)
8
8
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg?style=for-the-badge)](https://opensource.org/licenses/MIT)
9
9
  [![TypeScript](https://img.shields.io/badge/TypeScript-4.0%2B-blue?style=for-the-badge&logo=typescript)](https://www.typescriptlang.org/)
10
10
  [![AWS DynamoDB](https://img.shields.io/badge/AWS-DynamoDB-orange?style=for-the-badge&logo=amazon-aws)](https://aws.amazon.com/dynamodb/)
@@ -22,12 +22,12 @@
22
22
  // Type-safe dinosaur tracking operations made simple
23
23
  await dinoTable
24
24
  .update<Dinosaur>({
25
- pk: 'SPECIES#trex',
26
- sk: 'PROFILE#001'
25
+ pk: "SPECIES#trex",
26
+ sk: "PROFILE#001",
27
27
  })
28
- .set('diet', 'Carnivore') // Update dietary classification
29
- .add('sightings', 1) // Increment sighting counter
30
- .condition(op => op.eq('status', 'ACTIVE')) // Only if dinosaur is active
28
+ .set("diet", "Carnivore") // Update dietary classification
29
+ .add("sightings", 1) // Increment sighting counter
30
+ .condition((op) => op.eq("status", "ACTIVE")) // Only if dinosaur is active
31
31
  .execute();
32
32
  ```
33
33
 
@@ -97,6 +97,7 @@ await dinoTable
97
97
  - [Update Operations](#update-operations)
98
98
  - [Condition Operators](#condition-operators)
99
99
  - [Multiple Operations](#multiple-operations)
100
+ - [Force Rebuilding Read-Only Indexes](#force-rebuilding-read-only-indexes)
100
101
  - [🔄 Type Safety Features](#-type-safety-features)
101
102
  - [Nested Object Support](#nested-object-support)
102
103
  - [Type-Safe Conditions](#type-safe-conditions)
@@ -143,6 +144,7 @@ yarn add dyno-table @aws-sdk/client-dynamodb @aws-sdk/lib-dynamodb
143
144
  # Using PNPM
144
145
  pnpm add dyno-table @aws-sdk/client-dynamodb @aws-sdk/lib-dynamodb
145
146
  ```
147
+
146
148
  </details>
147
149
 
148
150
  ## 🎯 DynamoDB Best Practices
@@ -174,29 +176,35 @@ import { QueryCommand } from "@aws-sdk/lib-dynamodb";
174
176
 
175
177
  const docClient = DynamoDBDocument.from(new DynamoDBClient({}));
176
178
 
177
- const users = await docClient.send(new QueryCommand({
178
- TableName: "MyTable",
179
- IndexName: "gsi1",
180
- KeyConditionExpression: "#pk = :pk",
181
- ExpressionAttributeNames: { "#pk": "pk" },
182
- ExpressionAttributeValues: { ":pk": "STATUS#active" }
183
- }));
184
-
185
- const orders = await docClient.send(new QueryCommand({
186
- TableName: "MyTable",
187
- IndexName: "gsi2",
188
- KeyConditionExpression: "#pk = :pk",
189
- ExpressionAttributeNames: { "#pk": "pk" },
190
- ExpressionAttributeValues: { ":pk": "CUSTOMER#123" }
191
- }));
192
-
193
- const products = await docClient.send(new QueryCommand({
194
- TableName: "MyTable",
195
- IndexName: "gsi3",
196
- KeyConditionExpression: "#pk = :pk",
197
- ExpressionAttributeNames: { "#pk": "pk" },
198
- ExpressionAttributeValues: { ":pk": "CATEGORY#electronics" }
199
- }));
179
+ const users = await docClient.send(
180
+ new QueryCommand({
181
+ TableName: "MyTable",
182
+ IndexName: "gsi1",
183
+ KeyConditionExpression: "#pk = :pk",
184
+ ExpressionAttributeNames: { "#pk": "pk" },
185
+ ExpressionAttributeValues: { ":pk": "STATUS#active" },
186
+ }),
187
+ );
188
+
189
+ const orders = await docClient.send(
190
+ new QueryCommand({
191
+ TableName: "MyTable",
192
+ IndexName: "gsi2",
193
+ KeyConditionExpression: "#pk = :pk",
194
+ ExpressionAttributeNames: { "#pk": "pk" },
195
+ ExpressionAttributeValues: { ":pk": "CUSTOMER#123" },
196
+ }),
197
+ );
198
+
199
+ const products = await docClient.send(
200
+ new QueryCommand({
201
+ TableName: "MyTable",
202
+ IndexName: "gsi3",
203
+ KeyConditionExpression: "#pk = :pk",
204
+ ExpressionAttributeNames: { "#pk": "pk" },
205
+ ExpressionAttributeValues: { ":pk": "CATEGORY#electronics" },
206
+ }),
207
+ );
200
208
  ```
201
209
 
202
210
  </td>
@@ -204,9 +212,7 @@ const products = await docClient.send(new QueryCommand({
204
212
 
205
213
  ```ts
206
214
  // Clear business intent
207
- const activeUsers = await userRepo.query
208
- .getActiveUsers()
209
- .execute();
215
+ const activeUsers = await userRepo.query.getActiveUsers().execute();
210
216
 
211
217
  const customerOrders = await orderRepo.query
212
218
  .getOrdersByCustomer({ customerId: "123" })
@@ -225,11 +231,11 @@ const electronics = await productRepo.query
225
231
 
226
232
  When you use generic names like `gsi1`, `gsi2`, `gsi3`, you create several problems:
227
233
 
228
- - **🧠 Cognitive Load**: Developers must remember what each index does
229
- - **📚 Poor Documentation**: Code doesn't self-document its purpose
230
- - **🐛 Error-Prone**: Easy to use the wrong index for a query
231
- - **👥 Team Friction**: New team members struggle to understand data access patterns
232
- - **🔄 Maintenance Issues**: Refactoring becomes risky and unclear
234
+ - **Cognitive Load**: Developers must remember what each index does
235
+ - **Poor Documentation**: Code doesn't self-document its purpose
236
+ - **Error-Prone**: Easy to use the wrong index for a query
237
+ - **Team Friction**: New team members struggle to understand data access patterns
238
+ - **Maintenance Issues**: Refactoring becomes risky and unclear
233
239
 
234
240
  ### The Solution: Meaningful Method Names
235
241
 
@@ -245,25 +251,36 @@ const UserEntity = defineEntity({
245
251
  // ✅ Clear business purpose
246
252
  getActiveUsers: createQuery
247
253
  .input(z.object({}))
248
- .query(({ entity }) => entity.query({ pk: "STATUS#active" }).useIndex("gsi1")),
254
+ .query(({ entity }) =>
255
+ entity.query({ pk: "STATUS#active" }).useIndex("gsi1"),
256
+ ),
249
257
 
250
258
  getUsersByEmail: createQuery
251
259
  .input(z.object({ email: z.string() }))
252
- .query(({ input, entity }) => entity.query({ pk: `EMAIL#${input.email}` }).useIndex("gsi1")),
260
+ .query(({ input, entity }) =>
261
+ entity.query({ pk: `EMAIL#${input.email}` }).useIndex("gsi1"),
262
+ ),
253
263
 
254
264
  getUsersByDepartment: createQuery
255
265
  .input(z.object({ department: z.string() }))
256
- .query(({ input, entity }) => entity.query({ pk: `DEPT#${input.department}` }).useIndex("gsi2")),
266
+ .query(({ input, entity }) =>
267
+ entity.query({ pk: `DEPT#${input.department}` }).useIndex("gsi2"),
268
+ ),
257
269
  },
258
270
  });
259
271
 
260
272
  // Usage in business logic is now self-documenting
261
273
  const activeUsers = await userRepo.query.getActiveUsers().execute();
262
- const engineeringTeam = await userRepo.query.getUsersByDepartment({ department: "engineering" }).execute();
263
- const user = await userRepo.query.getUsersByEmail({ email: "john@company.com" }).execute();
274
+ const engineeringTeam = await userRepo.query
275
+ .getUsersByDepartment({ department: "engineering" })
276
+ .execute();
277
+ const user = await userRepo.query
278
+ .getUsersByEmail({ email: "john@company.com" })
279
+ .execute();
264
280
  ```
265
281
 
266
282
  **This pattern promotes:**
283
+
267
284
  - ✅ **Better code readability and maintainability**
268
285
  - ✅ **Self-documenting API design**
269
286
  - ✅ **Easier onboarding for new team members**
@@ -330,7 +347,7 @@ const rex = await dinoTable
330
347
  name: "Tyrannosaurus Rex",
331
348
  diet: "carnivore",
332
349
  length: 12.3,
333
- discoveryYear: 1902
350
+ discoveryYear: 1902,
334
351
  })
335
352
  .execute();
336
353
  ```
@@ -343,14 +360,11 @@ const rex = await dinoTable
343
360
  ```ts
344
361
  // Find large carnivorous dinosaurs
345
362
  const largeDinos = await dinoTable
346
- .query<Dinosaur>({
363
+ .query<Dinosaur>({
347
364
  pk: "SPECIES#trex",
348
- sk: (op) => op.beginsWith("PROFILE#")
365
+ sk: (op) => op.beginsWith("PROFILE#"),
349
366
  })
350
- .filter((op) => op.and(
351
- op.gte("length", 10),
352
- op.eq("diet", "carnivore")
353
- ))
367
+ .filter((op) => op.and(op.gte("length", 10), op.eq("diet", "carnivore")))
354
368
  .limit(10)
355
369
  .execute();
356
370
  ```
@@ -365,16 +379,14 @@ const largeDinos = await dinoTable
365
379
  ```ts
366
380
  // Update a dinosaur's classification
367
381
  await dinoTable
368
- .update<Dinosaur>({
382
+ .update<Dinosaur>({
369
383
  pk: "SPECIES#trex",
370
- sk: "PROFILE#trex"
384
+ sk: "PROFILE#trex",
371
385
  })
372
386
  .set("diet", "omnivore")
373
387
  .add("discoveryYear", 1)
374
388
  .remove("outdatedField")
375
- .condition((op) =>
376
- op.attributeExists("discoverySite")
377
- )
389
+ .condition((op) => op.attributeExists("discoverySite"))
378
390
  .execute();
379
391
  ```
380
392
 
@@ -387,13 +399,10 @@ await dinoTable
387
399
  // Perform multiple operations atomically
388
400
  await dinoTable.transaction((tx) => {
389
401
  // Move dinosaur to new enclosure
390
- dinoTable
391
- .delete({ pk: "ENCLOSURE#A", sk: "DINO#1" })
392
- .withTransaction(tx);
402
+ dinoTable.delete({ pk: "ENCLOSURE#A", sk: "DINO#1" }).withTransaction(tx);
393
403
 
394
404
  dinoTable
395
- .create({ pk: "ENCLOSURE#B", sk: "DINO#1",
396
- status: "ACTIVE" })
405
+ .create({ pk: "ENCLOSURE#B", sk: "DINO#1", status: "ACTIVE" })
397
406
  .withTransaction(tx);
398
407
  });
399
408
  ```
@@ -416,20 +425,22 @@ await dinoTable.transaction((tx) => {
416
425
 
417
426
  ```ts
418
427
  // Verbose, error-prone, no type safety
419
- await docClient.send(new QueryCommand({
420
- TableName: "JurassicPark",
421
- IndexName: "gsi1", // What does gsi1 do?
422
- KeyConditionExpression: "#pk = :pk",
423
- FilterExpression: "contains(#features, :feathers)",
424
- ExpressionAttributeNames: {
425
- "#pk": "pk",
426
- "#features": "features"
427
- },
428
- ExpressionAttributeValues: {
429
- ":pk": "SPECIES#trex",
430
- ":feathers": "feathers"
431
- }
432
- }));
428
+ await docClient.send(
429
+ new QueryCommand({
430
+ TableName: "JurassicPark",
431
+ IndexName: "gsi1", // What does gsi1 do?
432
+ KeyConditionExpression: "#pk = :pk",
433
+ FilterExpression: "contains(#features, :feathers)",
434
+ ExpressionAttributeNames: {
435
+ "#pk": "pk",
436
+ "#features": "features",
437
+ },
438
+ ExpressionAttributeValues: {
439
+ ":pk": "SPECIES#trex",
440
+ ":feathers": "feathers",
441
+ },
442
+ }),
443
+ );
433
444
  ```
434
445
 
435
446
  </td>
@@ -439,18 +450,16 @@ await docClient.send(new QueryCommand({
439
450
  // Self-documenting, type-safe, semantic
440
451
  const featheredTRexes = await dinosaurRepo.query
441
452
  .getFeatheredDinosaursBySpecies({
442
- species: "trex"
453
+ species: "trex",
443
454
  })
444
455
  .execute();
445
456
 
446
457
  // Or using table directly (still better than raw SDK)
447
458
  await dinoTable
448
459
  .query<Dinosaur>({
449
- pk: "SPECIES#trex"
460
+ pk: "SPECIES#trex",
450
461
  })
451
- .filter(op =>
452
- op.contains("features", "feathers")
453
- )
462
+ .filter((op) => op.contains("features", "feathers"))
454
463
  .execute();
455
464
  ```
456
465
 
@@ -459,6 +468,7 @@ await dinoTable
459
468
  </table>
460
469
 
461
470
  **Key improvements:**
471
+
462
472
  - 🛡️ **Type Safety**: Compile-time error checking prevents runtime failures
463
473
  - 📖 **Self-Documenting**: Code clearly expresses business intent
464
474
  - 🧠 **Reduced Complexity**: No manual expression building or attribute mapping
@@ -565,10 +575,10 @@ const DinosaurEntity = defineEntity({
565
575
  name: "Dinosaur",
566
576
  schema: dinosaurSchema,
567
577
  primaryKey: createIndex()
568
- .input(z.object({ id: z.string(), diet: z.string(), species: z.string() }))
569
- .partitionKey(({ diet }) => dinosaurPK({ diet }))
570
- // could also be .withoutSortKey() if your table doesn't use sort keys
571
- .sortKey(({ id, species }) => dinosaurSK({ species, id }))
578
+ .input(z.object({ id: z.string(), diet: z.string(), species: z.string() }))
579
+ .partitionKey(({ diet }) => dinosaurPK({ diet }))
580
+ // could also be .withoutSortKey() if your table doesn't use sort keys
581
+ .sortKey(({ id, species }) => dinosaurSK({ species, id })),
572
582
  });
573
583
  ```
574
584
 
@@ -578,36 +588,44 @@ Entities provide type-safe CRUD operations:
578
588
 
579
589
  ```ts
580
590
  // Create a new dinosaur
581
- await dinosaurRepo.create({
582
- id: "dino-001",
583
- species: "Tyrannosaurus Rex",
584
- name: "Rexy",
585
- diet: "carnivore",
586
- dangerLevel: 10,
587
- height: 5.2,
588
- weight: 7000,
589
- status: "active",
590
- }).execute();
591
+ await dinosaurRepo
592
+ .create({
593
+ id: "dino-001",
594
+ species: "Tyrannosaurus Rex",
595
+ name: "Rexy",
596
+ diet: "carnivore",
597
+ dangerLevel: 10,
598
+ height: 5.2,
599
+ weight: 7000,
600
+ status: "active",
601
+ })
602
+ .execute();
591
603
 
592
604
  // Get a dinosaur
593
- const dino = await dinosaurRepo.get({
594
- id: "dino-001",
595
- diet: "carnivore",
596
- species: "Tyrannosaurus Rex",
597
- }).execute();
605
+ const dino = await dinosaurRepo
606
+ .get({
607
+ id: "dino-001",
608
+ diet: "carnivore",
609
+ species: "Tyrannosaurus Rex",
610
+ })
611
+ .execute();
598
612
 
599
613
  // Update a dinosaur
600
- await dinosaurRepo.update(
601
- { id: "dino-001", diet: "carnivore", species: "Tyrannosaurus Rex" },
602
- { weight: 7200, status: "sick" }
603
- ).execute();
614
+ await dinosaurRepo
615
+ .update(
616
+ { id: "dino-001", diet: "carnivore", species: "Tyrannosaurus Rex" },
617
+ { weight: 7200, status: "sick" },
618
+ )
619
+ .execute();
604
620
 
605
621
  // Delete a dinosaur
606
- await dinosaurRepo.delete({
607
- id: "dino-001",
608
- diet: "carnivore",
609
- species: "Tyrannosaurus Rex",
610
- }).execute();
622
+ await dinosaurRepo
623
+ .delete({
624
+ id: "dino-001",
625
+ diet: "carnivore",
626
+ species: "Tyrannosaurus Rex",
627
+ })
628
+ .execute();
611
629
  ```
612
630
 
613
631
  #### 3. Custom Queries
@@ -629,63 +647,68 @@ const DinosaurEntity = defineEntity({
629
647
  .input(
630
648
  z.object({
631
649
  diet: z.enum(["carnivore", "herbivore", "omnivore"]),
632
- })
650
+ }),
633
651
  )
634
652
  .query(({ input, entity }) => {
635
- return entity
636
- .query({
637
- pk: dinosaurPK({diet: input.diet})
638
- });
653
+ return entity.query({
654
+ pk: dinosaurPK({ diet: input.diet }),
655
+ });
639
656
  }),
640
657
 
641
658
  findDinosaursBySpecies: createQuery
642
659
  .input(
643
660
  z.object({
644
661
  species: z.string(),
645
- })
662
+ }),
646
663
  )
647
664
  .query(({ input, entity }) => {
648
- return entity
649
- .scan()
650
- .filter((op) => op.eq("species", input.species));
665
+ return entity.scan().filter((op) => op.eq("species", input.species));
651
666
  }),
652
667
 
653
- getActiveCarnivores: createQuery
654
- .input(z.object({}))
655
- .query(({ entity }) => {
656
- return entity
657
- .query({
658
- pk: dinosaurPK({diet: "carnivore"})
659
- })
660
- .filter((op) => op.eq("status", "active"));
661
- }),
668
+ getActiveCarnivores: createQuery.input(z.object({})).query(({ entity }) => {
669
+ return entity
670
+ .query({
671
+ pk: dinosaurPK({ diet: "carnivore" }),
672
+ })
673
+ .filter((op) => op.eq("status", "active"));
674
+ }),
662
675
 
663
676
  getDangerousDinosaursInEnclosure: createQuery
664
677
  .input(
665
678
  z.object({
666
679
  enclosureId: z.string(),
667
680
  minDangerLevel: z.number().min(1).max(10),
668
- })
681
+ }),
669
682
  )
670
683
  .query(({ input, entity }) => {
671
684
  return entity
672
685
  .scan()
673
- .filter((op) => op.and(
674
- op.contains("enclosureId", input.enclosureId),
675
- op.gte("dangerLevel", input.minDangerLevel)
676
- ));
686
+ .filter((op) =>
687
+ op.and(
688
+ op.contains("enclosureId", input.enclosureId),
689
+ op.gte("dangerLevel", input.minDangerLevel),
690
+ ),
691
+ );
677
692
  }),
678
693
  },
679
694
  });
680
695
 
681
696
  // Usage in business logic is now self-documenting
682
- const carnivores = await dinosaurRepo.query.getDinosaursByDiet({ diet: "carnivore" }).execute();
683
- const trexes = await dinosaurRepo.query.findDinosaursBySpecies({ species: "Tyrannosaurus Rex" }).execute();
684
- const activeCarnivores = await dinosaurRepo.query.getActiveCarnivores().execute();
685
- const dangerousDinos = await dinosaurRepo.query.getDangerousDinosaursInEnclosure({
686
- enclosureId: "PADDOCK-A",
687
- minDangerLevel: 8
688
- }).execute();
697
+ const carnivores = await dinosaurRepo.query
698
+ .getDinosaursByDiet({ diet: "carnivore" })
699
+ .execute();
700
+ const trexes = await dinosaurRepo.query
701
+ .findDinosaursBySpecies({ species: "Tyrannosaurus Rex" })
702
+ .execute();
703
+ const activeCarnivores = await dinosaurRepo.query
704
+ .getActiveCarnivores()
705
+ .execute();
706
+ const dangerousDinos = await dinosaurRepo.query
707
+ .getDangerousDinosaursInEnclosure({
708
+ enclosureId: "PADDOCK-A",
709
+ minDangerLevel: 8,
710
+ })
711
+ .execute();
689
712
  ```
690
713
 
691
714
  **Filter Chaining in Entity Queries**
@@ -708,36 +731,40 @@ const DinosaurEntity = defineEntity({
708
731
  .filter((op) => op.gt("health", 80))
709
732
  .filter((op) => op.attributeExists("lastFed"));
710
733
  }),
711
-
734
+
712
735
  // Complex filter chaining with conditional logic
713
736
  getDinosaursForVetCheck: createQuery
714
- .input(z.object({
715
- minHealth: z.number().optional(),
716
- requiredTag: z.string().optional(),
717
- }))
737
+ .input(
738
+ z.object({
739
+ minHealth: z.number().optional(),
740
+ requiredTag: z.string().optional(),
741
+ }),
742
+ )
718
743
  .query(({ input, entity }) => {
719
744
  const builder = entity.scan();
720
-
745
+
721
746
  // Always filter for dinosaurs that need vet attention
722
747
  builder.filter((op) => op.lt("health", 90));
723
-
748
+
724
749
  // Conditionally apply additional filters
725
750
  if (input.minHealth) {
726
751
  builder.filter((op) => op.gt("health", input.minHealth));
727
752
  }
728
-
753
+
729
754
  if (input.requiredTag) {
730
755
  builder.filter((op) => op.contains("tags", input.requiredTag));
731
756
  }
732
-
757
+
733
758
  return builder;
734
759
  }),
735
-
760
+
736
761
  // Pre-applied filters combined with execution-time filters
737
762
  getActiveDinosaursByDiet: createQuery
738
- .input(z.object({
739
- diet: z.enum(["carnivore", "herbivore", "omnivore"]),
740
- }))
763
+ .input(
764
+ z.object({
765
+ diet: z.enum(["carnivore", "herbivore", "omnivore"]),
766
+ }),
767
+ )
741
768
  .query(({ input, entity }) => {
742
769
  // Apply a filter in the query definition
743
770
  return entity
@@ -749,7 +776,7 @@ const DinosaurEntity = defineEntity({
749
776
  });
750
777
 
751
778
  // Usage with additional execution-time filters
752
- // Both the pre-applied filters (diet = "carnivore", status = "active")
779
+ // Both the pre-applied filters (diet = "carnivore", status = "active")
753
780
  // and the execution-time filter (health > 50) will be applied
754
781
  const healthyActiveCarnivores = await dinosaurRepo.query
755
782
  .getActiveDinosaursByDiet({ diet: "carnivore" })
@@ -758,6 +785,7 @@ const healthyActiveCarnivores = await dinosaurRepo.query
758
785
  ```
759
786
 
760
787
  **Benefits of semantic naming:**
788
+
761
789
  - 🎯 **Clear Intent**: Method names immediately convey what data you're accessing
762
790
  - 📖 **Self-Documenting**: No need to look up what `gsi1` or `gsi2` does
763
791
  - 🧠 **Reduced Cognitive Load**: Developers can focus on business logic, not database details
@@ -772,11 +800,11 @@ Define GSI access patterns with **meaningful names** that reflect their business
772
800
  import { createIndex } from "dyno-table/entity";
773
801
 
774
802
  // Define GSI templates with descriptive names that reflect their purpose
775
- const speciesPK = partitionKey`SPECIES#${"species"}`
776
- const speciesSK = sortKey`DINOSAUR#${"id"}`
803
+ const speciesPK = partitionKey`SPECIES#${"species"}`;
804
+ const speciesSK = sortKey`DINOSAUR#${"id"}`;
777
805
 
778
- const enclosurePK = partitionKey`ENCLOSURE#${"enclosureId"}`
779
- const enclosureSK = sortKey`DANGER#${"dangerLevel"}#ID#${"id"}`
806
+ const enclosurePK = partitionKey`ENCLOSURE#${"enclosureId"}`;
807
+ const enclosureSK = sortKey`DANGER#${"dangerLevel"}#ID#${"id"}`;
780
808
 
781
809
  // Create indexes with meaningful names
782
810
  const speciesIndex = createIndex()
@@ -804,12 +832,12 @@ const DinosaurEntity = defineEntity({
804
832
  .input(
805
833
  z.object({
806
834
  species: z.string(),
807
- })
835
+ }),
808
836
  )
809
837
  .query(({ input, entity }) => {
810
838
  return entity
811
839
  .query({
812
- pk: speciesPK({species: input.species}),
840
+ pk: speciesPK({ species: input.species }),
813
841
  })
814
842
  .useIndex("gsi1"); // Generic GSI name for table flexibility
815
843
  }),
@@ -818,12 +846,12 @@ const DinosaurEntity = defineEntity({
818
846
  .input(
819
847
  z.object({
820
848
  enclosureId: z.string(),
821
- })
849
+ }),
822
850
  )
823
851
  .query(({ input, entity }) => {
824
852
  return entity
825
853
  .query({
826
- pk: enclosurePK({enclosureId: input.enclosureId}),
854
+ pk: enclosurePK({ enclosureId: input.enclosureId }),
827
855
  })
828
856
  .useIndex("gsi2");
829
857
  }),
@@ -833,13 +861,13 @@ const DinosaurEntity = defineEntity({
833
861
  z.object({
834
862
  enclosureId: z.string(),
835
863
  minDangerLevel: z.number().min(1).max(10),
836
- })
864
+ }),
837
865
  )
838
866
  .query(({ input, entity }) => {
839
867
  return entity
840
868
  .query({
841
- pk: enclosurePK({enclosureId: input.enclosureId}),
842
- sk: (op) => op.gte(`DANGER#${input.minDangerLevel}`)
869
+ pk: enclosurePK({ enclosureId: input.enclosureId }),
870
+ sk: (op) => op.gte(`DANGER#${input.minDangerLevel}`),
843
871
  })
844
872
  .useIndex("gsi2")
845
873
  .sortDescending(); // Get most dangerous first
@@ -848,15 +876,22 @@ const DinosaurEntity = defineEntity({
848
876
  });
849
877
 
850
878
  // Usage is now self-documenting
851
- const trexes = await dinosaurRepo.query.getDinosaursBySpecies({ species: "Tyrannosaurus Rex" }).execute();
852
- const paddockADinos = await dinosaurRepo.query.getDinosaursByEnclosure({ enclosureId: "PADDOCK-A" }).execute();
853
- const dangerousDinos = await dinosaurRepo.query.getMostDangerousInEnclosure({
854
- enclosureId: "PADDOCK-A",
855
- minDangerLevel: 8
856
- }).execute();
879
+ const trexes = await dinosaurRepo.query
880
+ .getDinosaursBySpecies({ species: "Tyrannosaurus Rex" })
881
+ .execute();
882
+ const paddockADinos = await dinosaurRepo.query
883
+ .getDinosaursByEnclosure({ enclosureId: "PADDOCK-A" })
884
+ .execute();
885
+ const dangerousDinos = await dinosaurRepo.query
886
+ .getMostDangerousInEnclosure({
887
+ enclosureId: "PADDOCK-A",
888
+ minDangerLevel: 8,
889
+ })
890
+ .execute();
857
891
  ```
858
892
 
859
893
  **Key principles for access pattern naming:**
894
+
860
895
  - 🎯 **Generic GSI Names**: Keep table-level GSI names generic (`gsi1`, `gsi2`) for flexibility across entities
861
896
  - 🔍 **Business-Focused**: Method names should reflect what the query achieves, not how it works
862
897
  - 📚 **Self-Documenting**: Anyone reading the code should understand the purpose immediately
@@ -897,11 +932,11 @@ type Dinosaur = z.infer<typeof dinosaurSchema>;
897
932
  const dinosaurPK = partitionKey`DINOSAUR#${"id"}`;
898
933
  const dinosaurSK = sortKey`STATUS#${"status"}`;
899
934
 
900
- const gsi1PK = partitionKey`SPECIES#${"species"}`
901
- const gsi1SK = sortKey`DINOSAUR#${"id"}`
935
+ const gsi1PK = partitionKey`SPECIES#${"species"}`;
936
+ const gsi1SK = sortKey`DINOSAUR#${"id"}`;
902
937
 
903
- const gsi2PK = partitionKey`ENCLOSURE#${"enclosureId"}`
904
- const gsi2SK = sortKey`DINOSAUR#${"id"}`
938
+ const gsi2PK = partitionKey`ENCLOSURE#${"enclosureId"}`;
939
+ const gsi2SK = sortKey`DINOSAUR#${"id"}`;
905
940
 
906
941
  // Create a primary index
907
942
  const primaryKey = createIndex()
@@ -938,11 +973,12 @@ const DinosaurEntity = defineEntity({
938
973
  schema: dinosaurSchema,
939
974
  primaryKey,
940
975
  indexes: {
941
- // These keys need to be named after the name of the GSI that is defined in your table instance
976
+ // These keys need to be named after the name of the GSI that is defined in your table instance
942
977
  gsi1: speciesIndex,
943
978
  gsi2: enclosureIndex,
944
979
  // Example of a read-only index for audit trail data
945
980
  gsi3: auditIndex, // This index will never be updated during entity update operations
981
+ // unless explicitly forced with .forceIndexRebuild('gsi3')
946
982
  },
947
983
  queries: {
948
984
  // ✅ Semantic method names that describe business intent
@@ -950,7 +986,7 @@ const DinosaurEntity = defineEntity({
950
986
  .input(
951
987
  z.object({
952
988
  species: z.string(),
953
- })
989
+ }),
954
990
  )
955
991
  .query(({ input, entity }) => {
956
992
  return entity
@@ -964,7 +1000,7 @@ const DinosaurEntity = defineEntity({
964
1000
  .input(
965
1001
  z.object({
966
1002
  enclosureId: z.string(),
967
- })
1003
+ }),
968
1004
  )
969
1005
  .query(({ input, entity }) => {
970
1006
  return entity
@@ -979,7 +1015,7 @@ const DinosaurEntity = defineEntity({
979
1015
  z.object({
980
1016
  enclosureId: z.string(),
981
1017
  minDangerLevel: z.number().int().min(1).max(10),
982
- })
1018
+ }),
983
1019
  )
984
1020
  .query(({ input, entity }) => {
985
1021
  return entity
@@ -1014,15 +1050,19 @@ async function main() {
1014
1050
  .execute();
1015
1051
 
1016
1052
  // Query dinosaurs by species using semantic method names
1017
- const trexes = await dinosaurRepo.query.getDinosaursBySpecies({
1018
- species: "Tyrannosaurus Rex"
1019
- }).execute();
1053
+ const trexes = await dinosaurRepo.query
1054
+ .getDinosaursBySpecies({
1055
+ species: "Tyrannosaurus Rex",
1056
+ })
1057
+ .execute();
1020
1058
 
1021
1059
  // Query dangerous dinosaurs in an enclosure
1022
- const dangerousDinos = await dinosaurRepo.query.getDangerousDinosaursInEnclosure({
1023
- enclosureId: "enc-001",
1024
- minDangerLevel: 8,
1025
- }).execute();
1060
+ const dangerousDinos = await dinosaurRepo.query
1061
+ .getDangerousDinosaursInEnclosure({
1062
+ enclosureId: "enc-001",
1063
+ minDangerLevel: 8,
1064
+ })
1065
+ .execute();
1026
1066
  }
1027
1067
  ```
1028
1068
 
@@ -1031,6 +1071,7 @@ async function main() {
1031
1071
  ### Transactional Operations
1032
1072
 
1033
1073
  **Safe dinosaur transfer between enclosures**
1074
+
1034
1075
  ```ts
1035
1076
  // Start a transaction session for transferring a T-Rex to a new enclosure
1036
1077
  // Critical for safety: All operations must succeed or none will be applied
@@ -1041,53 +1082,57 @@ await dinoTable.transaction(async (tx) => {
1041
1082
  // STEP 1: Check if destination enclosure is ready and compatible with the dinosaur
1042
1083
  // We must verify the enclosure is prepared and suitable for a carnivore
1043
1084
  await dinoTable
1044
- .conditionCheck({
1045
- pk: "ENCLOSURE#B", // Target enclosure B
1046
- sk: "STATUS" // Check the enclosure status record
1085
+ .conditionCheck({
1086
+ pk: "ENCLOSURE#B", // Target enclosure B
1087
+ sk: "STATUS", // Check the enclosure status record
1047
1088
  })
1048
- .condition(op => op.and(
1049
- op.eq("status", "READY"), // Enclosure must be in READY state
1050
- op.eq("diet", "Carnivore") // Must support carnivorous dinosaurs
1051
- ))
1089
+ .condition((op) =>
1090
+ op.and(
1091
+ op.eq("status", "READY"), // Enclosure must be in READY state
1092
+ op.eq("diet", "Carnivore"), // Must support carnivorous dinosaurs
1093
+ ),
1094
+ )
1052
1095
  .withTransaction(tx);
1053
1096
 
1054
1097
  // STEP 2: Remove dinosaur from current enclosure
1055
1098
  // Only proceed if the dinosaur is healthy enough for transfer
1056
1099
  await dinoTable
1057
- .delete<Dinosaur>({
1058
- pk: "ENCLOSURE#A", // Source enclosure A
1059
- sk: "DINO#001" // T-Rex with ID 001
1100
+ .delete<Dinosaur>({
1101
+ pk: "ENCLOSURE#A", // Source enclosure A
1102
+ sk: "DINO#001", // T-Rex with ID 001
1060
1103
  })
1061
- .condition(op => op.and(
1062
- op.eq("status", "HEALTHY"), // Dinosaur must be in HEALTHY state
1063
- op.gte("health", 80) // Health must be at least 80%
1064
- ))
1104
+ .condition((op) =>
1105
+ op.and(
1106
+ op.eq("status", "HEALTHY"), // Dinosaur must be in HEALTHY state
1107
+ op.gte("health", 80), // Health must be at least 80%
1108
+ ),
1109
+ )
1065
1110
  .withTransaction(tx);
1066
1111
 
1067
1112
  // STEP 3: Add dinosaur to new enclosure
1068
1113
  // Create a fresh record in the destination enclosure
1069
1114
  await dinoTable
1070
1115
  .create<Dinosaur>({
1071
- pk: "ENCLOSURE#B", // Destination enclosure B
1072
- sk: "DINO#001", // Same dinosaur ID for tracking
1073
- name: "Rex", // Dinosaur name
1074
- species: "Tyrannosaurus", // Species classification
1075
- diet: "Carnivore", // Dietary requirements
1076
- status: "HEALTHY", // Current health status
1077
- health: 100, // Reset health to 100% after transfer
1078
- enclosureId: "B", // Update enclosure reference
1079
- lastFed: new Date().toISOString() // Reset feeding clock
1116
+ pk: "ENCLOSURE#B", // Destination enclosure B
1117
+ sk: "DINO#001", // Same dinosaur ID for tracking
1118
+ name: "Rex", // Dinosaur name
1119
+ species: "Tyrannosaurus", // Species classification
1120
+ diet: "Carnivore", // Dietary requirements
1121
+ status: "HEALTHY", // Current health status
1122
+ health: 100, // Reset health to 100% after transfer
1123
+ enclosureId: "B", // Update enclosure reference
1124
+ lastFed: new Date().toISOString(), // Reset feeding clock
1080
1125
  })
1081
1126
  .withTransaction(tx);
1082
1127
 
1083
1128
  // STEP 4: Update enclosure occupancy tracking
1084
1129
  // Keep accurate count of dinosaurs in each enclosure
1085
1130
  await dinoTable
1086
- .update<Dinosaur>({
1087
- pk: "ENCLOSURE#B", // Target enclosure B
1088
- sk: "OCCUPANCY" // Occupancy tracking record
1131
+ .update<Dinosaur>({
1132
+ pk: "ENCLOSURE#B", // Target enclosure B
1133
+ sk: "OCCUPANCY", // Occupancy tracking record
1089
1134
  })
1090
- .add("currentOccupants", 1) // Increment occupant count
1135
+ .add("currentOccupants", 1) // Increment occupant count
1091
1136
  .set("lastUpdated", new Date().toISOString()) // Update timestamp
1092
1137
  .withTransaction(tx);
1093
1138
  });
@@ -1100,13 +1145,13 @@ await dinoTable.transaction(
1100
1145
  // Record that the dinosaur has been fed and update its health metrics
1101
1146
  await dinoTable
1102
1147
  .update<Dinosaur>({
1103
- pk: "ENCLOSURE#D", // Herbivore enclosure D
1104
- sk: "DINO#003" // Stegosaurus with ID 003
1148
+ pk: "ENCLOSURE#D", // Herbivore enclosure D
1149
+ sk: "DINO#003", // Stegosaurus with ID 003
1105
1150
  })
1106
1151
  .set({
1107
- status: "HEALTHY", // Update health status
1152
+ status: "HEALTHY", // Update health status
1108
1153
  lastFed: new Date().toISOString(), // Record feeding time
1109
- health: 100 // Reset health to 100%
1154
+ health: 100, // Reset health to 100%
1110
1155
  })
1111
1156
  .deleteElementsFromSet("tags", ["needs_feeding"]) // Remove feeding alert tag
1112
1157
  .withTransaction(tx);
@@ -1115,38 +1160,43 @@ await dinoTable.transaction(
1115
1160
  // Schedule next feeding time for tomorrow
1116
1161
  await dinoTable
1117
1162
  .update<Dinosaur>({
1118
- pk: "ENCLOSURE#D", // Same herbivore enclosure
1119
- sk: "SCHEDULE" // Feeding schedule record
1163
+ pk: "ENCLOSURE#D", // Same herbivore enclosure
1164
+ sk: "SCHEDULE", // Feeding schedule record
1120
1165
  })
1121
- .set("nextFeedingTime", new Date(Date.now() + 24 * 60 * 60 * 1000).toISOString()) // 24 hours from now
1166
+ .set(
1167
+ "nextFeedingTime",
1168
+ new Date(Date.now() + 24 * 60 * 60 * 1000).toISOString(),
1169
+ ) // 24 hours from now
1122
1170
  .withTransaction(tx);
1123
1171
  },
1124
1172
  {
1125
1173
  // Transaction options for tracking and idempotency
1126
1174
  clientRequestToken: "feeding-session-001", // Prevents duplicate feeding operations
1127
- returnConsumedCapacity: "TOTAL" // Track capacity usage for park operations
1128
- }
1175
+ returnConsumedCapacity: "TOTAL", // Track capacity usage for park operations
1176
+ },
1129
1177
  );
1130
1178
  ```
1131
1179
 
1132
-
1133
1180
  ### Pagination Made Simple
1134
1181
 
1135
1182
  **Efficient dinosaur record browsing for park management**
1183
+
1136
1184
  ```ts
1137
1185
  // SCENARIO 1: Herbivore health monitoring with pagination
1138
1186
  // Create a paginator for viewing healthy herbivores in manageable chunks
1139
1187
  // Perfect for veterinary staff doing routine health checks
1140
1188
  const healthyHerbivores = dinoTable
1141
1189
  .query<Dinosaur>({
1142
- pk: "DIET#herbivore", // Target all herbivorous dinosaurs
1143
- sk: op => op.beginsWith("STATUS#HEALTHY") // Only those with HEALTHY status
1190
+ pk: "DIET#herbivore", // Target all herbivorous dinosaurs
1191
+ sk: (op) => op.beginsWith("STATUS#HEALTHY"), // Only those with HEALTHY status
1144
1192
  })
1145
- .filter((op) => op.and(
1146
- op.gte("health", 90), // Only those with excellent health (90%+)
1147
- op.attributeExists("lastFed") // Must have feeding records
1148
- ))
1149
- .paginate(5); // Process in small batches of 5 dinosaurs
1193
+ .filter((op) =>
1194
+ op.and(
1195
+ op.gte("health", 90), // Only those with excellent health (90%+)
1196
+ op.attributeExists("lastFed"), // Must have feeding records
1197
+ ),
1198
+ )
1199
+ .paginate(5); // Process in small batches of 5 dinosaurs
1150
1200
 
1151
1201
  // Iterate through all pages of results - useful for processing large datasets
1152
1202
  // without loading everything into memory at once
@@ -1154,11 +1204,15 @@ console.log("🦕 Beginning herbivore health inspection rounds...");
1154
1204
  while (healthyHerbivores.hasNextPage()) {
1155
1205
  // Get the next page of dinosaurs
1156
1206
  const page = await healthyHerbivores.getNextPage();
1157
- console.log(`Checking herbivores page ${page.page}, found ${page.items.length} dinosaurs`);
1207
+ console.log(
1208
+ `Checking herbivores page ${page.page}, found ${page.items.length} dinosaurs`,
1209
+ );
1158
1210
 
1159
1211
  // Process each dinosaur in the current page
1160
- page.items.forEach(dino => {
1161
- console.log(`${dino.name}: Health ${dino.health}%, Last fed: ${dino.lastFed}`);
1212
+ page.items.forEach((dino) => {
1213
+ console.log(
1214
+ `${dino.name}: Health ${dino.health}%, Last fed: ${dino.lastFed}`,
1215
+ );
1162
1216
  // In a real app, you might update health records or schedule next checkup
1163
1217
  });
1164
1218
  }
@@ -1168,12 +1222,12 @@ while (healthyHerbivores.hasNextPage()) {
1168
1222
  // This approach loads all matching items into memory
1169
1223
  const carnivoreSchedule = await dinoTable
1170
1224
  .query<Dinosaur>({
1171
- pk: "DIET#carnivore", // Target all carnivorous dinosaurs
1172
- sk: op => op.beginsWith("ENCLOSURE#") // Organized by enclosure
1225
+ pk: "DIET#carnivore", // Target all carnivorous dinosaurs
1226
+ sk: (op) => op.beginsWith("ENCLOSURE#"), // Organized by enclosure
1173
1227
  })
1174
- .filter(op => op.attributeExists("lastFed")) // Only those with feeding records
1175
- .paginate(10) // Process in pages of 10
1176
- .getAllPages(); // But collect all results at once
1228
+ .filter((op) => op.attributeExists("lastFed")) // Only those with feeding records
1229
+ .paginate(10) // Process in pages of 10
1230
+ .getAllPages(); // But collect all results at once
1177
1231
 
1178
1232
  console.log(`Scheduling feeding for ${carnivoreSchedule.length} carnivores`);
1179
1233
  // Now we can sort and organize feeding times based on species, size, etc.
@@ -1181,17 +1235,17 @@ console.log(`Scheduling feeding for ${carnivoreSchedule.length} carnivores`);
1181
1235
  // SCENARIO 3: Visitor information kiosk with limited display
1182
1236
  // Create a paginated view for the public-facing dinosaur information kiosk
1183
1237
  const visitorKiosk = dinoTable
1184
- .query<Dinosaur>({
1185
- pk: "VISITOR_VIEW", // Special partition for visitor-facing data
1186
- sk: op => op.beginsWith("SPECIES#") // Organized by species
1238
+ .query<Dinosaur>({
1239
+ pk: "VISITOR_VIEW", // Special partition for visitor-facing data
1240
+ sk: (op) => op.beginsWith("SPECIES#"), // Organized by species
1187
1241
  })
1188
- .filter(op => op.eq("status", "ON_DISPLAY")) // Only show dinosaurs currently on display
1189
- .limit(12) // Show maximum 12 dinosaurs total
1190
- .paginate(4); // Display 4 at a time for easy viewing
1242
+ .filter((op) => op.eq("status", "ON_DISPLAY")) // Only show dinosaurs currently on display
1243
+ .limit(12) // Show maximum 12 dinosaurs total
1244
+ .paginate(4); // Display 4 at a time for easy viewing
1191
1245
 
1192
1246
  // Get first page for initial kiosk display
1193
1247
  const firstPage = await visitorKiosk.getNextPage();
1194
- console.log(`🦖 Now showing: ${firstPage.items.map(d => d.name).join(", ")}`);
1248
+ console.log(`🦖 Now showing: ${firstPage.items.map((d) => d.name).join(", ")}`);
1195
1249
  // Visitors can press "Next" to see more dinosaurs in the collection
1196
1250
  ```
1197
1251
 
@@ -1201,15 +1255,15 @@ Dyno-table provides comprehensive query methods that match DynamoDB's capabiliti
1201
1255
 
1202
1256
  ### Comparison Operators
1203
1257
 
1204
- | Operation | Method Example | Generated Expression |
1205
- |---------------------------|---------------------------------------------------------|-----------------------------------|
1206
- | **Equals** | `.filter(op => op.eq("status", "ACTIVE"))` | `status = :v1` |
1207
- | **Not Equals** | `.filter(op => op.ne("status", "DELETED"))` | `status <> :v1` |
1208
- | **Less Than** | `.filter(op => op.lt("age", 18))` | `age < :v1` |
1209
- | **Less Than or Equal** | `.filter(op => op.lte("score", 100))` | `score <= :v1` |
1210
- | **Greater Than** | `.filter(op => op.gt("price", 50))` | `price > :v1` |
1211
- | **Greater Than or Equal** | `.filter(op => op.gte("rating", 4))` | `rating >= :v1` |
1212
- | **Between** | `.filter(op => op.between("age", 18, 65))` | `age BETWEEN :v1 AND :v2` |
1258
+ | Operation | Method Example | Generated Expression |
1259
+ | ------------------------- | ------------------------------------------------------------ | --------------------------------- |
1260
+ | **Equals** | `.filter(op => op.eq("status", "ACTIVE"))` | `status = :v1` |
1261
+ | **Not Equals** | `.filter(op => op.ne("status", "DELETED"))` | `status <> :v1` |
1262
+ | **Less Than** | `.filter(op => op.lt("age", 18))` | `age < :v1` |
1263
+ | **Less Than or Equal** | `.filter(op => op.lte("score", 100))` | `score <= :v1` |
1264
+ | **Greater Than** | `.filter(op => op.gt("price", 50))` | `price > :v1` |
1265
+ | **Greater Than or Equal** | `.filter(op => op.gte("rating", 4))` | `rating >= :v1` |
1266
+ | **Between** | `.filter(op => op.between("age", 18, 65))` | `age BETWEEN :v1 AND :v2` |
1213
1267
  | **In Array** | `.filter(op => op.inArray("status", ["ACTIVE", "PENDING"]))` | `status IN (:v1, :v2)` |
1214
1268
  | **Begins With** | `.filter(op => op.beginsWith("email", "@example.com"))` | `begins_with(email, :v1)` |
1215
1269
  | **Contains** | `.filter(op => op.contains("tags", "important"))` | `contains(tags, :v1)` |
@@ -1225,19 +1279,21 @@ Filters can be chained together using multiple `.filter()` calls. When multiple
1225
1279
  // Chaining multiple filters - these are combined with AND
1226
1280
  const result = await table
1227
1281
  .query({ pk: "USER#123" })
1228
- .filter(op => op.eq("status", "ACTIVE"))
1229
- .filter(op => op.gt("age", 18))
1230
- .filter(op => op.contains("tags", "premium"))
1282
+ .filter((op) => op.eq("status", "ACTIVE"))
1283
+ .filter((op) => op.gt("age", 18))
1284
+ .filter((op) => op.contains("tags", "premium"))
1231
1285
  .execute();
1232
1286
 
1233
1287
  // This is equivalent to:
1234
1288
  const result = await table
1235
1289
  .query({ pk: "USER#123" })
1236
- .filter(op => op.and(
1237
- op.eq("status", "ACTIVE"),
1238
- op.gt("age", 18),
1239
- op.contains("tags", "premium")
1240
- ))
1290
+ .filter((op) =>
1291
+ op.and(
1292
+ op.eq("status", "ACTIVE"),
1293
+ op.gt("age", 18),
1294
+ op.contains("tags", "premium"),
1295
+ ),
1296
+ )
1241
1297
  .execute();
1242
1298
  ```
1243
1299
 
@@ -1250,15 +1306,15 @@ const builder = table.query({ pk: "USER#123" });
1250
1306
 
1251
1307
  // Conditionally apply filters
1252
1308
  if (statusFilter) {
1253
- builder.filter(op => op.eq("status", statusFilter));
1309
+ builder.filter((op) => op.eq("status", statusFilter));
1254
1310
  }
1255
1311
 
1256
1312
  if (minAge) {
1257
- builder.filter(op => op.gt("age", minAge));
1313
+ builder.filter((op) => op.gt("age", minAge));
1258
1314
  }
1259
1315
 
1260
1316
  if (requiredTag) {
1261
- builder.filter(op => op.contains("tags", requiredTag));
1317
+ builder.filter((op) => op.contains("tags", requiredTag));
1262
1318
  }
1263
1319
 
1264
1320
  const result = await builder.execute();
@@ -1267,7 +1323,7 @@ const result = await builder.execute();
1267
1323
  ### Logical Operators
1268
1324
 
1269
1325
  | Operation | Method Example | Generated Expression |
1270
- |-----------|-----------------------------------------------------------------------------------|--------------------------------|
1326
+ | --------- | --------------------------------------------------------------------------------- | ------------------------------ |
1271
1327
  | **AND** | `.filter(op => op.and(op.eq("status", "ACTIVE"), op.gt("age", 18)))` | `status = :v1 AND age > :v2` |
1272
1328
  | **OR** | `.filter(op => op.or(op.eq("status", "PENDING"), op.eq("status", "PROCESSING")))` | `status = :v1 OR status = :v2` |
1273
1329
  | **NOT** | `.filter(op => op.not(op.eq("status", "DELETED")))` | `NOT status = :v1` |
@@ -1275,12 +1331,13 @@ const result = await builder.execute();
1275
1331
  ### Query Operations
1276
1332
 
1277
1333
  | Operation | Method Example | Generated Expression |
1278
- |--------------------------|--------------------------------------------------------------------------------------|---------------------------------------|
1334
+ | ------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------- |
1279
1335
  | **Partition Key Equals** | `.query({ pk: "USER#123" })` | `pk = :pk` |
1280
1336
  | **Sort Key Begins With** | `.query({ pk: "USER#123", sk: op => op.beginsWith("ORDER#2023") })` | `pk = :pk AND begins_with(sk, :v1)` |
1281
1337
  | **Sort Key Between** | `.query({ pk: "USER#123", sk: op => op.between("ORDER#2023-01", "ORDER#2023-12") })` | `pk = :pk AND sk BETWEEN :v1 AND :v2` |
1282
1338
 
1283
1339
  Additional query options:
1340
+
1284
1341
  ```ts
1285
1342
  // Sort order
1286
1343
  const ascending = await table
@@ -1300,16 +1357,13 @@ const partial = await table
1300
1357
  .execute();
1301
1358
 
1302
1359
  // Limit results
1303
- const limited = await table
1304
- .query({ pk: "USER#123" })
1305
- .limit(10)
1306
- .execute();
1360
+ const limited = await table.query({ pk: "USER#123" }).limit(10).execute();
1307
1361
  ```
1308
1362
 
1309
1363
  ### Put Operations
1310
1364
 
1311
1365
  | Operation | Method Example | Description |
1312
- |---------------------|---------------------------------------------------------------------|------------------------------------------------------------------------|
1366
+ | ------------------- | ------------------------------------------------------------------- | ---------------------------------------------------------------------- |
1313
1367
  | **Create New Item** | `.create<Dinosaur>({ pk: "SPECIES#trex", sk: "PROFILE#001", ... })` | Creates a new item with a condition to ensure it doesn't already exist |
1314
1368
  | **Put Item** | `.put<Dinosaur>({ pk: "SPECIES#trex", sk: "PROFILE#001", ... })` | Creates or replaces an item |
1315
1369
  | **With Condition** | `.put(item).condition(op => op.attributeNotExists("pk"))` | Adds a condition that must be satisfied |
@@ -1319,42 +1373,50 @@ const limited = await table
1319
1373
  Control what data is returned from put operations:
1320
1374
 
1321
1375
  | Option | Description | Example |
1322
- |----------------|--------------------------------------------------------------------------------------------------------------------|---------------------------------------------------|
1376
+ | -------------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------- |
1323
1377
  | **NONE** | Default. No return value. | `.put(item).returnValues("NONE").execute()` |
1324
1378
  | **ALL_OLD** | Returns the item's previous state if it existed. (Does not consume any RCU and returns strongly consistent values) | `.put(item).returnValues("ALL_OLD").execute()` |
1325
1379
  | **CONSISTENT** | Performs a consistent GET operation after the put to retrieve the item's new state. (Does consume RCU) | `.put(item).returnValues("CONSISTENT").execute()` |
1326
1380
 
1327
1381
  ```ts
1328
1382
  // Create with no return value (default)
1329
- await table.put<Dinosaur>({
1330
- pk: "SPECIES#trex",
1331
- sk: "PROFILE#001",
1332
- name: "Tyrannosaurus Rex",
1333
- diet: "carnivore"
1334
- }).execute();
1383
+ await table
1384
+ .put<Dinosaur>({
1385
+ pk: "SPECIES#trex",
1386
+ sk: "PROFILE#001",
1387
+ name: "Tyrannosaurus Rex",
1388
+ diet: "carnivore",
1389
+ })
1390
+ .execute();
1335
1391
 
1336
1392
  // Create and return the newly created item
1337
- const newDino = await table.put<Dinosaur>({
1338
- pk: "SPECIES#trex",
1339
- sk: "PROFILE#002",
1340
- name: "Tyrannosaurus Rex",
1341
- diet: "carnivore"
1342
- }).returnValues("CONSISTENT").execute();
1393
+ const newDino = await table
1394
+ .put<Dinosaur>({
1395
+ pk: "SPECIES#trex",
1396
+ sk: "PROFILE#002",
1397
+ name: "Tyrannosaurus Rex",
1398
+ diet: "carnivore",
1399
+ })
1400
+ .returnValues("CONSISTENT")
1401
+ .execute();
1343
1402
 
1344
1403
  // Update with condition and get previous values
1345
- const oldDino = await table.put<Dinosaur>({
1346
- pk: "SPECIES#trex",
1347
- sk: "PROFILE#001",
1348
- name: "Tyrannosaurus Rex",
1349
- diet: "omnivore", // Updated diet
1350
- discoveryYear: 1905
1351
- }).returnValues("ALL_OLD").execute();
1404
+ const oldDino = await table
1405
+ .put<Dinosaur>({
1406
+ pk: "SPECIES#trex",
1407
+ sk: "PROFILE#001",
1408
+ name: "Tyrannosaurus Rex",
1409
+ diet: "omnivore", // Updated diet
1410
+ discoveryYear: 1905,
1411
+ })
1412
+ .returnValues("ALL_OLD")
1413
+ .execute();
1352
1414
  ```
1353
1415
 
1354
1416
  ### Update Operations
1355
1417
 
1356
1418
  | Operation | Method Example | Generated Expression |
1357
- |----------------------|-------------------------------------------------------|----------------------|
1419
+ | -------------------- | ----------------------------------------------------- | -------------------- |
1358
1420
  | **Set Attributes** | `.update(key).set("name", "New Name")` | `SET #name = :v1` |
1359
1421
  | **Add to Number** | `.update(key).add("score", 10)` | `ADD #score :v1` |
1360
1422
  | **Remove Attribute** | `.update(key).remove("temporary")` | `REMOVE #temporary` |
@@ -1364,32 +1426,94 @@ const oldDino = await table.put<Dinosaur>({
1364
1426
 
1365
1427
  The library supports a comprehensive set of type-safe condition operators:
1366
1428
 
1367
- | Category | Operators | Example |
1368
- |----------------|----------------------------------------------|-------------------------------------------------------------------------|
1369
- | **Comparison** | `eq`, `ne`, `lt`, `lte`, `gt`, `gte` | `.condition(op => op.gt("age", 18))` |
1429
+ | Category | Operators | Example |
1430
+ | -------------- | ---------------------------------------------- | ----------------------------------------------------------------------- |
1431
+ | **Comparison** | `eq`, `ne`, `lt`, `lte`, `gt`, `gte` | `.condition(op => op.gt("age", 18))` |
1370
1432
  | **String/Set** | `between`, `beginsWith`, `contains`, `inArray` | `.condition(op => op.inArray("status", ["active", "pending"]))` |
1371
- | **Existence** | `attributeExists`, `attributeNotExists` | `.condition(op => op.attributeExists("email"))` |
1372
- | **Logical** | `and`, `or`, `not` | `.condition(op => op.and(op.eq("status", "active"), op.gt("age", 18)))` |
1433
+ | **Existence** | `attributeExists`, `attributeNotExists` | `.condition(op => op.attributeExists("email"))` |
1434
+ | **Logical** | `and`, `or`, `not` | `.condition(op => op.and(op.eq("status", "active"), op.gt("age", 18)))` |
1373
1435
 
1374
1436
  All operators are type-safe and will provide proper TypeScript inference for nested attributes.
1375
1437
 
1376
1438
  #### Multiple Operations
1439
+
1377
1440
  Operations can be combined in a single update:
1441
+
1378
1442
  ```ts
1379
1443
  const result = await table
1380
1444
  .update({ pk: "USER#123", sk: "PROFILE" })
1381
1445
  .set("name", "Updated Name")
1382
1446
  .add("loginCount", 1)
1383
1447
  .remove("temporaryFlag")
1384
- .condition(op => op.attributeExists("email"))
1448
+ .condition((op) => op.attributeExists("email"))
1449
+ .execute();
1450
+ ```
1451
+
1452
+ #### Force Rebuilding Read-Only Indexes
1453
+
1454
+ When working with entities, some indexes may be marked as read-only to prevent any updates. However, you can force these indexes to be rebuilt during updates using the `forceIndexRebuild()` method:
1455
+
1456
+ ```ts
1457
+ // Force rebuild a single read-only index
1458
+ await dinoRepo
1459
+ .update(
1460
+ { id: "TREX-001" },
1461
+ {
1462
+ name: "Updated T-Rex",
1463
+ excavationSiteId: "new-site-001",
1464
+ },
1465
+ )
1466
+ .forceIndexRebuild("excavation-site-index")
1467
+ .execute();
1468
+
1469
+ // Force rebuild multiple read-only indexes
1470
+ await dinoRepo
1471
+ .update(
1472
+ { id: "TREX-001" },
1473
+ {
1474
+ name: "Updated T-Rex",
1475
+ excavationSiteId: "new-site-001",
1476
+ species: "Tyrannosaurus Rex",
1477
+ diet: "carnivore",
1478
+ },
1479
+ )
1480
+ .forceIndexRebuild(["excavation-site-index", "species-diet-index"])
1481
+ .execute();
1482
+
1483
+ // Chain with other update operations
1484
+ await dinoRepo
1485
+ .update(
1486
+ { id: "TREX-001" },
1487
+ {
1488
+ excavationSiteId: "new-site-002",
1489
+ },
1490
+ )
1491
+ .forceIndexRebuild("excavation-site-index")
1492
+ .set("lastUpdated", new Date().toISOString())
1493
+ .condition((op) => op.eq("status", "INACTIVE"))
1494
+ .returnValues("ALL_NEW")
1385
1495
  .execute();
1386
1496
  ```
1387
1497
 
1498
+ **When to use `forceIndexRebuild()`:**
1499
+
1500
+ - 🔄 You need to update a read-only index with new data
1501
+ - 🛠️ You're performing maintenance operations that require index consistency
1502
+ - 📊 You have all required attributes available for the index and want to force an update
1503
+ - ⚡ You want to override the read-only protection for specific update operations
1504
+
1505
+ **Important Notes:**
1506
+
1507
+ - This method only works with entity repositories, not direct table operations, as it requires knowledge of the entity's index definitions
1508
+ - The index name must be a valid index defined in your entity configuration, otherwise an error will be thrown
1509
+ - You must provide all required attributes for the index template variables, otherwise the update will fail with an error
1510
+
1388
1511
  ## 🔄 Type Safety Features
1389
1512
 
1390
1513
  The library provides comprehensive type safety for all operations:
1391
1514
 
1392
1515
  ### Nested Object Support
1516
+
1393
1517
  ```ts
1394
1518
  interface Dinosaur {
1395
1519
  pk: string;
@@ -1427,7 +1551,8 @@ interface Dinosaur {
1427
1551
  }
1428
1552
 
1429
1553
  // TypeScript ensures type safety for all nested dinosaur attributes
1430
- await table.update<Dinosaur>({ pk: "ENCLOSURE#F", sk: "DINO#007" })
1554
+ await table
1555
+ .update<Dinosaur>({ pk: "ENCLOSURE#F", sk: "DINO#007" })
1431
1556
  .set("stats.health", 95) // ✓ Valid
1432
1557
  .set("habitat.enclosure.climate", "Tropical") // ✓ Valid
1433
1558
  .set("care.feeding.lastFed", new Date().toISOString()) // ✓ Valid
@@ -1436,6 +1561,7 @@ await table.update<Dinosaur>({ pk: "ENCLOSURE#F", sk: "DINO#007" })
1436
1561
  ```
1437
1562
 
1438
1563
  ### Type-Safe Conditions
1564
+
1439
1565
  ```ts
1440
1566
  interface DinosaurMonitoring {
1441
1567
  species: string;
@@ -1446,19 +1572,22 @@ interface DinosaurMonitoring {
1446
1572
  alertLevel: "LOW" | "MEDIUM" | "HIGH";
1447
1573
  }
1448
1574
 
1449
- await table.query<DinosaurMonitoring>({
1450
- pk: "MONITORING",
1451
- sk: op => op.beginsWith("ENCLOSURE#")
1452
- })
1453
- .filter(op => op.and(
1454
- op.lt("health", "90"), // ❌ TypeScript Error: health expects number
1455
- op.gt("temperature", 38), // ✓ Valid
1456
- op.contains("behavior", "aggressive"), // Valid
1457
- op.inArray("alertLevel", ["LOW", "MEDIUM", "HIGH"]), // ✓ Valid: matches union type
1458
- op.inArray("alertLevel", ["UNKNOWN", "INVALID"]), // TypeScript Error: invalid alert levels
1459
- op.eq("alertLevel", "UNKNOWN") // TypeScript Error: invalid alert level
1460
- ))
1461
- .execute();
1575
+ await table
1576
+ .query<DinosaurMonitoring>({
1577
+ pk: "MONITORING",
1578
+ sk: (op) => op.beginsWith("ENCLOSURE#"),
1579
+ })
1580
+ .filter((op) =>
1581
+ op.and(
1582
+ op.lt("health", "90"), // TypeScript Error: health expects number
1583
+ op.gt("temperature", 38), // ✓ Valid
1584
+ op.contains("behavior", "aggressive"), // Valid
1585
+ op.inArray("alertLevel", ["LOW", "MEDIUM", "HIGH"]), // Valid: matches union type
1586
+ op.inArray("alertLevel", ["UNKNOWN", "INVALID"]), // ❌ TypeScript Error: invalid alert levels
1587
+ op.eq("alertLevel", "UNKNOWN"), // ❌ TypeScript Error: invalid alert level
1588
+ ),
1589
+ )
1590
+ .execute();
1462
1591
  ```
1463
1592
 
1464
1593
  ## 🔄 Batch Operations
@@ -1478,7 +1607,9 @@ const batch = table.batchBuilder<{
1478
1607
 
1479
1608
  // Add operations - entity type is automatically inferred
1480
1609
  dinosaurRepo.create(newDinosaur).withBatch(batch);
1481
- dinosaurRepo.get({ id: 'dino-123', diet: 'carnivore', species: 'Tyrannosaurus Rex' }).withBatch(batch);
1610
+ dinosaurRepo
1611
+ .get({ id: "dino-123", diet: "carnivore", species: "Tyrannosaurus Rex" })
1612
+ .withBatch(batch);
1482
1613
  fossilRepo.create(newFossil).withBatch(batch);
1483
1614
 
1484
1615
  // Execute and get typed results
@@ -1495,15 +1626,23 @@ const fossils: FossilEntity[] = result.reads.itemsByType.Fossil;
1495
1626
  // Batch get - retrieve multiple items
1496
1627
  const keys = [
1497
1628
  { pk: "DIET#carnivore", sk: "SPECIES#Tyrannosaurus Rex#ID#dino-123" },
1498
- { pk: "FOSSIL#456", sk: "DISCOVERY#2024" }
1629
+ { pk: "FOSSIL#456", sk: "DISCOVERY#2024" },
1499
1630
  ];
1500
1631
 
1501
1632
  const { items, unprocessedKeys } = await table.batchGet<DynamoItem>(keys);
1502
1633
 
1503
1634
  // Batch write - mix of operations
1504
1635
  const operations = [
1505
- { type: "put" as const, item: { pk: "DIET#herbivore", sk: "SPECIES#Triceratops#ID#dino-789", name: "Spike", dangerLevel: 3 } },
1506
- { type: "delete" as const, key: { pk: "FOSSIL#OLD", sk: "DISCOVERY#1990" } }
1636
+ {
1637
+ type: "put" as const,
1638
+ item: {
1639
+ pk: "DIET#herbivore",
1640
+ sk: "SPECIES#Triceratops#ID#dino-789",
1641
+ name: "Spike",
1642
+ dangerLevel: 3,
1643
+ },
1644
+ },
1645
+ { type: "delete" as const, key: { pk: "FOSSIL#OLD", sk: "DISCOVERY#1990" } },
1507
1646
  ];
1508
1647
 
1509
1648
  const { unprocessedItems } = await table.batchWrite(operations);
@@ -1514,16 +1653,20 @@ if (unprocessedItems.length > 0) {
1514
1653
  }
1515
1654
  ```
1516
1655
 
1517
-
1518
1656
  ## 🔒 Transaction Operations
1519
1657
 
1520
1658
  Perform multiple operations atomically with transaction support:
1521
1659
 
1522
1660
  ### Transaction Builder
1661
+
1523
1662
  ```ts
1524
1663
  const result = await table.transaction(async (tx) => {
1525
1664
  // Building the expression manually
1526
- tx.put("TableName", { pk: "123", sk: "123"}, and(op.attributeNotExists("pk"), op.attributeExists("sk")));
1665
+ tx.put(
1666
+ "TableName",
1667
+ { pk: "123", sk: "123" },
1668
+ and(op.attributeNotExists("pk"), op.attributeExists("sk")),
1669
+ );
1527
1670
 
1528
1671
  // Using table to build the operation
1529
1672
  table
@@ -1551,6 +1694,7 @@ const result = await table.transaction(async (tx) => {
1551
1694
  ```
1552
1695
 
1553
1696
  ### Transaction Options
1697
+
1554
1698
  ```ts
1555
1699
  const result = await table.transaction(
1556
1700
  async (tx) => {
@@ -1559,12 +1703,11 @@ const result = await table.transaction(
1559
1703
  {
1560
1704
  // Optional transaction settings
1561
1705
  idempotencyToken: "unique-token",
1562
- returnValuesOnConditionCheckFailure: true
1563
- }
1706
+ returnValuesOnConditionCheckFailure: true,
1707
+ },
1564
1708
  );
1565
1709
  ```
1566
1710
 
1567
-
1568
1711
  ## 🚨 Error Handling
1569
1712
 
1570
1713
  **TODO:**
@@ -1577,6 +1720,7 @@ to provide a more clear set of error classes and additional information to allow
1577
1720
  All condition operators are type-safe and will validate against your item type. For detailed information about DynamoDB conditions and expressions, see the [AWS DynamoDB Developer Guide](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.OperatorsAndFunctions.html).
1578
1721
 
1579
1722
  #### Comparison Operators
1723
+
1580
1724
  - `eq(attr, value)` - Equals (=)
1581
1725
  - `ne(attr, value)` - Not equals (≠)
1582
1726
  - `lt(attr, value)` - Less than (<)
@@ -1592,29 +1736,37 @@ All condition operators are type-safe and will validate against your item type.
1592
1736
  // Example: Health and feeding monitoring
1593
1737
  await dinoTable
1594
1738
  .query<Dinosaur>({
1595
- pk: "ENCLOSURE#G"
1739
+ pk: "ENCLOSURE#G",
1596
1740
  })
1597
- .filter((op) => op.and(
1598
- op.lt("stats.health", 85), // Health below 85%
1599
- op.lt("care.feeding.lastFed", new Date(Date.now() - 12 * 60 * 60 * 1000).toISOString()), // Not fed in 12 hours
1600
- op.between("stats.weight", 1000, 5000) // Medium-sized dinosaurs
1601
- ))
1741
+ .filter((op) =>
1742
+ op.and(
1743
+ op.lt("stats.health", 85), // Health below 85%
1744
+ op.lt(
1745
+ "care.feeding.lastFed",
1746
+ new Date(Date.now() - 12 * 60 * 60 * 1000).toISOString(),
1747
+ ), // Not fed in 12 hours
1748
+ op.between("stats.weight", 1000, 5000), // Medium-sized dinosaurs
1749
+ ),
1750
+ )
1602
1751
  .execute();
1603
1752
 
1604
1753
  // Example: Filter dinosaurs by multiple status values using inArray
1605
1754
  await dinoTable
1606
1755
  .query<Dinosaur>({
1607
- pk: "SPECIES#trex"
1756
+ pk: "SPECIES#trex",
1608
1757
  })
1609
- .filter((op) => op.and(
1610
- op.inArray("status", ["ACTIVE", "FEEDING", "RESTING"]), // Multiple valid statuses
1611
- op.inArray("diet", ["carnivore", "omnivore"]), // Meat-eating dinosaurs
1612
- op.gt("dangerLevel", 5) // High danger level
1613
- ))
1758
+ .filter((op) =>
1759
+ op.and(
1760
+ op.inArray("status", ["ACTIVE", "FEEDING", "RESTING"]), // Multiple valid statuses
1761
+ op.inArray("diet", ["carnivore", "omnivore"]), // Meat-eating dinosaurs
1762
+ op.gt("dangerLevel", 5), // High danger level
1763
+ ),
1764
+ )
1614
1765
  .execute();
1615
1766
  ```
1616
1767
 
1617
1768
  #### Attribute Operators
1769
+
1618
1770
  - `attributeExists(attr)` - Checks if attribute exists
1619
1771
  - `attributeNotExists(attr)` - Checks if attribute does not exist
1620
1772
 
@@ -1622,23 +1774,26 @@ await dinoTable
1622
1774
  // Example: Validate required attributes for dinosaur transfer
1623
1775
  await dinoTable
1624
1776
  .update<Dinosaur>({
1625
- pk: "ENCLOSURE#H",
1626
- sk: "DINO#008"
1777
+ pk: "ENCLOSURE#H",
1778
+ sk: "DINO#008",
1627
1779
  })
1628
1780
  .set("habitat.enclosure.id", "ENCLOSURE#J")
1629
- .condition((op) => op.and(
1630
- // Ensure all required health data is present
1631
- op.attributeExists("stats.health"),
1632
- op.attributeExists("care.medical.lastCheckup"),
1633
- // Ensure not already in transfer
1634
- op.attributeNotExists("transfer.inProgress"),
1635
- // Verify required monitoring tags
1636
- op.attributeExists("care.medical.vaccinations")
1637
- ))
1781
+ .condition((op) =>
1782
+ op.and(
1783
+ // Ensure all required health data is present
1784
+ op.attributeExists("stats.health"),
1785
+ op.attributeExists("care.medical.lastCheckup"),
1786
+ // Ensure not already in transfer
1787
+ op.attributeNotExists("transfer.inProgress"),
1788
+ // Verify required monitoring tags
1789
+ op.attributeExists("care.medical.vaccinations"),
1790
+ ),
1791
+ )
1638
1792
  .execute();
1639
1793
  ```
1640
1794
 
1641
1795
  #### Logical Operators
1796
+
1642
1797
  - `and(...conditions)` - Combines conditions with AND
1643
1798
  - `or(...conditions)` - Combines conditions with OR
1644
1799
  - `not(condition)` - Negates a condition
@@ -1647,34 +1802,39 @@ await dinoTable
1647
1802
  // Example: Complex safety monitoring conditions
1648
1803
  await dinoTable
1649
1804
  .query<Dinosaur>({
1650
- pk: "MONITORING#ALERTS"
1805
+ pk: "MONITORING#ALERTS",
1651
1806
  })
1652
- .filter((op) => op.or(
1653
- // Alert: Aggressive carnivores with low health
1654
- op.and(
1655
- op.eq("care.feeding.diet", "Carnivore"),
1656
- op.lt("stats.health", 70),
1657
- op.contains("behavior", "aggressive")
1807
+ .filter((op) =>
1808
+ op.or(
1809
+ // Alert: Aggressive carnivores with low health
1810
+ op.and(
1811
+ op.eq("care.feeding.diet", "Carnivore"),
1812
+ op.lt("stats.health", 70),
1813
+ op.contains("behavior", "aggressive"),
1814
+ ),
1815
+ // Alert: Any dinosaur not fed recently and showing stress
1816
+ op.and(
1817
+ op.lt(
1818
+ "care.feeding.lastFed",
1819
+ new Date(Date.now() - 8 * 60 * 60 * 1000).toISOString(),
1820
+ ),
1821
+ op.contains("behavior", "stressed"),
1822
+ ),
1823
+ // Alert: Critical status dinosaurs requiring immediate attention
1824
+ op.and(
1825
+ op.inArray("status", ["SICK", "INJURED", "QUARANTINE"]), // Critical statuses
1826
+ op.inArray("priority", ["HIGH", "URGENT"]), // High priority levels
1827
+ ),
1828
+ // Alert: Enclosure climate issues
1829
+ op.and(
1830
+ op.not(op.eq("habitat.enclosure.climate", "Optimal")),
1831
+ op.or(
1832
+ op.gt("habitat.requirements.temperature", 40),
1833
+ op.lt("habitat.requirements.humidity", 50),
1834
+ ),
1835
+ ),
1658
1836
  ),
1659
- // Alert: Any dinosaur not fed recently and showing stress
1660
- op.and(
1661
- op.lt("care.feeding.lastFed", new Date(Date.now() - 8 * 60 * 60 * 1000).toISOString()),
1662
- op.contains("behavior", "stressed")
1663
- ),
1664
- // Alert: Critical status dinosaurs requiring immediate attention
1665
- op.and(
1666
- op.inArray("status", ["SICK", "INJURED", "QUARANTINE"]), // Critical statuses
1667
- op.inArray("priority", ["HIGH", "URGENT"]) // High priority levels
1668
- ),
1669
- // Alert: Enclosure climate issues
1670
- op.and(
1671
- op.not(op.eq("habitat.enclosure.climate", "Optimal")),
1672
- op.or(
1673
- op.gt("habitat.requirements.temperature", 40),
1674
- op.lt("habitat.requirements.humidity", 50)
1675
- )
1676
- )
1677
- ))
1837
+ )
1678
1838
  .execute();
1679
1839
  ```
1680
1840
 
@@ -1687,7 +1847,8 @@ Special operators for sort key conditions in queries. See [AWS DynamoDB Key Cond
1687
1847
  const recentHealthChecks = await dinoTable
1688
1848
  .query<Dinosaur>({
1689
1849
  pk: "ENCLOSURE#K",
1690
- sk: (op) => op.beginsWith(`HEALTH#${new Date().toISOString().slice(0, 10)}`) // Today's checks
1850
+ sk: (op) =>
1851
+ op.beginsWith(`HEALTH#${new Date().toISOString().slice(0, 10)}`), // Today's checks
1691
1852
  })
1692
1853
  .execute();
1693
1854
 
@@ -1695,10 +1856,11 @@ const recentHealthChecks = await dinoTable
1695
1856
  const largeHerbivores = await dinoTable
1696
1857
  .query<Dinosaur>({
1697
1858
  pk: "DIET#herbivore",
1698
- sk: (op) => op.between(
1699
- `WEIGHT#${5000}`, // 5 tons minimum
1700
- `WEIGHT#${15000}` // 15 tons maximum
1701
- )
1859
+ sk: (op) =>
1860
+ op.between(
1861
+ `WEIGHT#${5000}`, // 5 tons minimum
1862
+ `WEIGHT#${15000}`, // 15 tons maximum
1863
+ ),
1702
1864
  })
1703
1865
  .execute();
1704
1866
 
@@ -1706,10 +1868,11 @@ const largeHerbivores = await dinoTable
1706
1868
  const quarantinedDinos = await dinoTable
1707
1869
  .query<Dinosaur>({
1708
1870
  pk: "STATUS#quarantine",
1709
- sk: (op) => op.between(
1710
- `DATE#${new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString().slice(0, 10)}`, // Last 7 days
1711
- `DATE#${new Date().toISOString().slice(0, 10)}` // Today
1712
- )
1871
+ sk: (op) =>
1872
+ op.between(
1873
+ `DATE#${new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString().slice(0, 10)}`, // Last 7 days
1874
+ `DATE#${new Date().toISOString().slice(0, 10)}`, // Today
1875
+ ),
1713
1876
  })
1714
1877
  .execute();
1715
1878
  ```
@@ -1779,6 +1942,7 @@ First you'll need to install the dependencies:
1779
1942
  ```bash
1780
1943
  pnpm install
1781
1944
  ```
1945
+
1782
1946
  Then setup the test table in local DynamoDB by running the following command:
1783
1947
 
1784
1948
  ```bash