@bitblit/ratchet-aws 6.0.146-alpha → 6.0.148-alpha

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. package/package.json +5 -4
  2. package/src/batch/aws-batch-background-processor.spec.ts +22 -0
  3. package/src/batch/aws-batch-background-processor.ts +71 -0
  4. package/src/batch/aws-batch-ratchet.spec.ts +42 -0
  5. package/src/batch/aws-batch-ratchet.ts +70 -0
  6. package/src/build/ratchet-aws-info.ts +19 -0
  7. package/src/cache/memory-storage-provider.ts +39 -0
  8. package/src/cache/simple-cache-object-wrapper.ts +11 -0
  9. package/src/cache/simple-cache-read-options.ts +9 -0
  10. package/src/cache/simple-cache-storage-provider.ts +15 -0
  11. package/src/cache/simple-cache.spec.ts +42 -0
  12. package/src/cache/simple-cache.ts +81 -0
  13. package/src/cloudwatch/cloud-watch-log-group-ratchet.spec.ts +26 -0
  14. package/src/cloudwatch/cloud-watch-log-group-ratchet.ts +105 -0
  15. package/src/cloudwatch/cloud-watch-logs-ratchet.spec.ts +123 -0
  16. package/src/cloudwatch/cloud-watch-logs-ratchet.ts +232 -0
  17. package/src/cloudwatch/cloud-watch-metrics-ratchet.spec.ts +30 -0
  18. package/src/cloudwatch/cloud-watch-metrics-ratchet.ts +98 -0
  19. package/src/dao/example-prototype-dao-item.ts +8 -0
  20. package/src/dao/memory-prototype-dao-provider.ts +16 -0
  21. package/src/dao/prototype-dao-config.ts +8 -0
  22. package/src/dao/prototype-dao-db.ts +4 -0
  23. package/src/dao/prototype-dao-provider.ts +6 -0
  24. package/src/dao/prototype-dao.spec.ts +33 -0
  25. package/src/dao/prototype-dao.ts +110 -0
  26. package/src/dao/s3-simple-dao.ts +96 -0
  27. package/src/dao/simple-dao-item.ts +13 -0
  28. package/src/dynamodb/dynamo-ratchet-like.ts +61 -0
  29. package/src/dynamodb/dynamo-ratchet.spec.ts +206 -0
  30. package/src/dynamodb/dynamo-ratchet.ts +850 -0
  31. package/src/dynamodb/dynamo-table-ratchet.spec.ts +23 -0
  32. package/src/dynamodb/dynamo-table-ratchet.ts +189 -0
  33. package/src/dynamodb/hash-spreader.spec.ts +22 -0
  34. package/src/dynamodb/hash-spreader.ts +89 -0
  35. package/src/dynamodb/impl/dynamo-db-storage-provider.spec.ts +60 -0
  36. package/src/dynamodb/impl/dynamo-db-storage-provider.ts +140 -0
  37. package/src/dynamodb/impl/dynamo-db-sync-lock.spec.ts +41 -0
  38. package/src/dynamodb/impl/dynamo-db-sync-lock.ts +78 -0
  39. package/src/dynamodb/impl/dynamo-expiring-code-provider.ts +31 -0
  40. package/src/dynamodb/impl/dynamo-runtime-parameter-provider.spec.ts +65 -0
  41. package/src/dynamodb/impl/dynamo-runtime-parameter-provider.ts +44 -0
  42. package/src/ec2/ec2-ratchet.spec.ts +45 -0
  43. package/src/ec2/ec2-ratchet.ts +169 -0
  44. package/src/ecr/ecr-unused-image-cleaner-options.ts +9 -0
  45. package/src/ecr/ecr-unused-image-cleaner-output.ts +8 -0
  46. package/src/ecr/ecr-unused-image-cleaner-repository-output.ts +10 -0
  47. package/src/ecr/ecr-unused-image-cleaner.spec.ts +40 -0
  48. package/src/ecr/ecr-unused-image-cleaner.ts +183 -0
  49. package/src/ecr/retained-image-descriptor.ts +7 -0
  50. package/src/ecr/retained-image-reason.ts +4 -0
  51. package/src/ecr/used-image-finder.ts +6 -0
  52. package/src/ecr/used-image-finders/aws-batch-used-image-finder.ts +40 -0
  53. package/src/ecr/used-image-finders/lambda-used-image-finder.ts +51 -0
  54. package/src/environment/cascade-environment-service-provider.ts +28 -0
  55. package/src/environment/env-var-environment-service-provider.ts +36 -0
  56. package/src/environment/environment-service-config.ts +7 -0
  57. package/src/environment/environment-service-provider.ts +7 -0
  58. package/src/environment/environment-service.spec.ts +41 -0
  59. package/src/environment/environment-service.ts +89 -0
  60. package/src/environment/fixed-environment-service-provider.ts +26 -0
  61. package/src/environment/ssm-environment-service-provider.spec.ts +18 -0
  62. package/src/environment/ssm-environment-service-provider.ts +71 -0
  63. package/src/expiring-code/expiring-code-params.ts +7 -0
  64. package/src/expiring-code/expiring-code-provider.ts +6 -0
  65. package/src/expiring-code/expiring-code-ratchet.spec.ts +10 -0
  66. package/src/expiring-code/expiring-code-ratchet.ts +44 -0
  67. package/src/expiring-code/expiring-code.ts +6 -0
  68. package/src/iam/aws-credentials-ratchet.ts +25 -0
  69. package/src/lambda/lambda-event-detector.ts +55 -0
  70. package/src/lambda/lambda-event-type-guards.ts +38 -0
  71. package/src/model/cloud-watch-metrics-minute-level-dynamo-count-request.ts +18 -0
  72. package/src/model/dynamo-count-result.ts +8 -0
  73. package/src/route53/route-53-ratchet.ts +77 -0
  74. package/src/runtime-parameter/cached-stored-runtime-parameter.ts +5 -0
  75. package/src/runtime-parameter/global-variable-override-runtime-parameter-provider.spec.ts +41 -0
  76. package/src/runtime-parameter/global-variable-override-runtime-parameter-provider.ts +82 -0
  77. package/src/runtime-parameter/memory-runtime-parameter-provider.ts +42 -0
  78. package/src/runtime-parameter/runtime-parameter-provider.ts +12 -0
  79. package/src/runtime-parameter/runtime-parameter-ratchet.spec.ts +53 -0
  80. package/src/runtime-parameter/runtime-parameter-ratchet.ts +84 -0
  81. package/src/runtime-parameter/stored-runtime-parameter.ts +6 -0
  82. package/src/s3/expanded-file-children.ts +5 -0
  83. package/src/s3/impl/s3-environment-service-provider.ts +41 -0
  84. package/src/s3/impl/s3-expiring-code-provider.spec.ts +63 -0
  85. package/src/s3/impl/s3-expiring-code-provider.ts +71 -0
  86. package/src/s3/impl/s3-prototype-dao-provider.spec.ts +45 -0
  87. package/src/s3/impl/s3-prototype-dao-provider.ts +37 -0
  88. package/src/s3/impl/s3-remote-file-tracking-provider-options.ts +6 -0
  89. package/src/s3/impl/s3-remote-file-tracking-provider.spec.ts +67 -0
  90. package/src/s3/impl/s3-remote-file-tracking-provider.ts +157 -0
  91. package/src/s3/impl/s3-storage-provider.spec.ts +32 -0
  92. package/src/s3/impl/s3-storage-provider.ts +60 -0
  93. package/src/s3/s3-cache-ratchet-like.ts +64 -0
  94. package/src/s3/s3-cache-ratchet.spec.ts +150 -0
  95. package/src/s3/s3-cache-ratchet.ts +476 -0
  96. package/src/s3/s3-location-sync-ratchet.ts +207 -0
  97. package/src/s3/s3-ratchet.spec.ts +26 -0
  98. package/src/s3/s3-ratchet.ts +26 -0
  99. package/src/ses/ses-mail-sending-provider.ts +85 -0
  100. package/src/sns/sns-ratchet.spec.ts +24 -0
  101. package/src/sns/sns-ratchet.ts +52 -0
  102. package/src/sync-lock/memory-sync-lock.ts +48 -0
  103. package/src/sync-lock/sync-lock-provider.ts +5 -0
@@ -0,0 +1,850 @@
1
+ /*
2
+ Helper functions for DynamoDB
3
+ */
4
+
5
+ import {
6
+ BatchGetCommand,
7
+ BatchGetCommandInput,
8
+ BatchGetCommandOutput,
9
+ BatchWriteCommand,
10
+ BatchWriteCommandInput,
11
+ BatchWriteCommandOutput,
12
+ DeleteCommand,
13
+ DeleteCommandInput,
14
+ DeleteCommandOutput,
15
+ DynamoDBDocumentClient,
16
+ GetCommand,
17
+ GetCommandInput,
18
+ GetCommandOutput,
19
+ PutCommand,
20
+ PutCommandInput,
21
+ PutCommandOutput,
22
+ QueryCommand,
23
+ QueryCommandInput,
24
+ QueryCommandOutput,
25
+ ScanCommand,
26
+ ScanCommandInput,
27
+ ScanCommandOutput,
28
+ UpdateCommand,
29
+ UpdateCommandInput,
30
+ UpdateCommandOutput,
31
+ } from '@aws-sdk/lib-dynamodb';
32
+
33
+ import { DynamoCountResult } from '../model/dynamo-count-result.js';
34
+ import { DynamoRatchetLike } from './dynamo-ratchet-like.js';
35
+ import { NativeAttributeValue } from '@aws-sdk/util-dynamodb';
36
+ import { ConditionalCheckFailedException, ProvisionedThroughputExceededException } from '@aws-sdk/client-dynamodb';
37
+ import { Logger } from '@bitblit/ratchet-common/logger/logger';
38
+ import { PromiseRatchet } from '@bitblit/ratchet-common/lang/promise-ratchet';
39
+ import { ErrorRatchet } from '@bitblit/ratchet-common/lang/error-ratchet';
40
+ import { DurationRatchet } from '@bitblit/ratchet-common/lang/duration-ratchet';
41
+ import { RequireRatchet } from '@bitblit/ratchet-common/lang/require-ratchet';
42
+ import { NumberRatchet } from '@bitblit/ratchet-common/lang/number-ratchet';
43
+
44
+ export class DynamoRatchet implements DynamoRatchetLike {
45
+ constructor(private awsDDB: DynamoDBDocumentClient) {
46
+ if (!awsDDB) {
47
+ throw 'awsDDB may not be null';
48
+ }
49
+ }
50
+
51
+ public get dynamoDBDocumentClient(): DynamoDBDocumentClient {
52
+ return this.awsDDB;
53
+ }
54
+
55
+ public getDDB(): DynamoDBDocumentClient {
56
+ return this.awsDDB;
57
+ }
58
+
59
+ public async tableIsEmpty(tableName: string): Promise<boolean> {
60
+ const scan: ScanCommandInput = {
61
+ TableName: tableName,
62
+ Limit: 1,
63
+ };
64
+
65
+ const ScanCommandOutput: ScanCommandOutput = await this.throughputSafeScanOrQuery<ScanCommandInput, ScanCommandOutput>(
66
+ (o) => this.scanPromise(o),
67
+ scan,
68
+ );
69
+ return ScanCommandOutput.Items.length === 0;
70
+ }
71
+
72
+ // A little pass-thru to simplify passing around this function
73
+ public async scanPromise(input: ScanCommandInput): Promise<ScanCommandOutput> {
74
+ return this.awsDDB.send(new ScanCommand(input));
75
+ }
76
+
77
+ // A little pass-thru to simplify passing around this function
78
+ public async queryPromise(input: QueryCommandInput): Promise<QueryCommandOutput> {
79
+ return this.awsDDB.send(new QueryCommand(input));
80
+ }
81
+
82
+ // This basically wraps up scans and queries with a function that will auto-retry them if a
83
+ // Throughput exception is encountered (up to a limit) but lets other errors get thrown.
84
+ // Drop-in replacement to make sure that things do not fail just because of throughput issues
85
+ public async throughputSafeScanOrQuery<T, R>(proc: (T) => Promise<R>, input: T, maxTries?: number, inCurrentTry?: number): Promise<R> {
86
+ let rval: R = null;
87
+ if (input) {
88
+ let currentTry: number = inCurrentTry ?? 0;
89
+ do {
90
+ currentTry++;
91
+ try {
92
+ rval = await proc(input);
93
+ } catch (err) {
94
+ if (DynamoRatchet.objectIsErrorWithProvisionedThroughputExceededExceptionCode(err)) {
95
+ const wait: number = Math.pow(2, currentTry) * 1000;
96
+ Logger.debug('Exceeded scan throughput for %j : Try %d of %d (Waiting %d ms)', input, currentTry, maxTries, wait);
97
+ await PromiseRatchet.wait(wait);
98
+ currentTry++;
99
+ } else {
100
+ throw err; // We only catch throughput issues
101
+ }
102
+ }
103
+ } while (!rval && (!maxTries || currentTry < maxTries));
104
+ if (!rval) {
105
+ // We got here because we ran out of tries
106
+ ErrorRatchet.throwFormattedErr(
107
+ 'throughputSafeScan failed - tried %d times, kept running into throughput exceeded : %j',
108
+ maxTries,
109
+ input,
110
+ );
111
+ }
112
+ }
113
+ return rval;
114
+ }
115
+
116
+ public async fullyExecuteQueryCount(qry: QueryCommandInput, delayMS = 0): Promise<DynamoCountResult> {
117
+ try {
118
+ qry.Select = 'COUNT'; // Force it to be a count query
119
+ Logger.debug('Executing count query : %j', qry);
120
+
121
+ const rval: DynamoCountResult = {
122
+ count: 0,
123
+ scannedCount: 0,
124
+ pages: 0,
125
+ };
126
+
127
+ const start: number = new Date().getTime();
128
+ let qryResults: QueryCommandOutput = null;
129
+
130
+ const myLimit: number = qry.Limit;
131
+ qry.Limit = null;
132
+
133
+ do {
134
+ qryResults = await this.throughputSafeScanOrQuery<QueryCommandInput, QueryCommandOutput>((o) => this.queryPromise(o), qry);
135
+ rval.count += qryResults['Count'];
136
+ rval.scannedCount += qryResults['ScannedCount'];
137
+ rval.pages++;
138
+ qry['ExclusiveStartKey'] = qryResults.LastEvaluatedKey;
139
+ await PromiseRatchet.wait(delayMS);
140
+ Logger.silly('Rval is now %j', rval);
141
+ if (myLimit && rval.count >= myLimit && qry['ExclusiveStartKey']) {
142
+ Logger.info('Aborting query since hit limit of %d', myLimit);
143
+ qry['ExclusiveStartKey'] = null;
144
+ }
145
+ } while (qry['ExclusiveStartKey']);
146
+
147
+ const end: number = new Date().getTime();
148
+
149
+ Logger.debug('Finished, returned %j in %s for %j', rval, DurationRatchet.formatMsDuration(end - start, true), qry);
150
+ return rval;
151
+ } catch (err) {
152
+ Logger.error('Failed with %s, q: %j', err, qry, err);
153
+ return null;
154
+ }
155
+ }
156
+
157
+ public async fullyExecuteQuery<T>(qry: QueryCommandInput, delayMS = 0, softLimit: number = null): Promise<T[]> {
158
+ const rval: T[] = [];
159
+ await this.fullyExecuteProcessOverQuery<T>(
160
+ qry,
161
+ async (v) => {
162
+ rval.push(v);
163
+ },
164
+ delayMS,
165
+ softLimit,
166
+ );
167
+ return rval;
168
+ }
169
+
170
+ public async fullyExecuteProcessOverQuery<T>(
171
+ qry: QueryCommandInput,
172
+ proc: (val: T) => Promise<void>,
173
+ delayMS = 0,
174
+ softLimit: number = null,
175
+ ): Promise<number> {
176
+ let cnt: number = 0;
177
+ try {
178
+ Logger.debug('Executing query : %j', qry);
179
+ const start: number = new Date().getTime();
180
+ Logger.debug('Pulling %j', qry);
181
+
182
+ let qryResults: QueryCommandOutput = await this.throughputSafeScanOrQuery<QueryCommandInput, QueryCommandOutput>(
183
+ (o) => this.queryPromise(o),
184
+ qry,
185
+ );
186
+ for (const qri of qryResults.Items) {
187
+ //for (let i = 0; i < qryResults.Items.length; i++) {
188
+ await proc(qri as T);
189
+ cnt++;
190
+ }
191
+
192
+ let pages = 0;
193
+ let blankPages = 0;
194
+
195
+ while (qryResults.LastEvaluatedKey && (softLimit === null || cnt < softLimit) && !qry.Limit) {
196
+ // If Limit was set on the initial query, stop after 1
197
+ Logger.silly('Found more rows - requery with key %j', qryResults.LastEvaluatedKey);
198
+ qry['ExclusiveStartKey'] = qryResults.LastEvaluatedKey;
199
+ qryResults = await this.throughputSafeScanOrQuery<QueryCommandInput, QueryCommandOutput>((o) => this.queryPromise(o), qry);
200
+ for (const qri of qryResults.Items) {
201
+ //for (let i = 0; i < qryResults.Items.length; i++) {
202
+ await proc(qri as T);
203
+ cnt++;
204
+ }
205
+ Logger.silly('Have processed %d items', cnt);
206
+ pages++;
207
+ blankPages += qryResults.Count === 0 ? 1 : 0;
208
+ await PromiseRatchet.wait(delayMS);
209
+ }
210
+
211
+ const end: number = new Date().getTime();
212
+
213
+ Logger.debug(
214
+ 'Finished, processed %d rows in %s for %j (%d blank pages, %d total pages)',
215
+ cnt,
216
+ DurationRatchet.formatMsDuration(end - start, true),
217
+ qry,
218
+ blankPages,
219
+ pages,
220
+ );
221
+ } catch (err) {
222
+ Logger.error('Failed with %s, q: %j', err, qry, err);
223
+ }
224
+ return cnt;
225
+ }
226
+
227
+ public async fullyExecuteScanCount(scan: ScanCommandInput, delayMS = 0): Promise<DynamoCountResult> {
228
+ try {
229
+ scan.Select = 'COUNT'; // Force it to be a count query
230
+ const rval: DynamoCountResult = {
231
+ count: 0,
232
+ scannedCount: 0,
233
+ pages: 0,
234
+ };
235
+
236
+ Logger.debug('Executing scan count : %j', scan);
237
+ const start: number = new Date().getTime();
238
+
239
+ let qryResults: ScanCommandOutput = null;
240
+
241
+ const myLimit: number = scan.Limit;
242
+ scan.Limit = null;
243
+
244
+ do {
245
+ qryResults = await this.throughputSafeScanOrQuery<ScanCommandInput, ScanCommandOutput>((o) => this.scanPromise(o), scan);
246
+ rval.count += qryResults['Count'];
247
+ rval.scannedCount += qryResults['ScannedCount'];
248
+ rval.pages++;
249
+ scan['ExclusiveStartKey'] = qryResults?.LastEvaluatedKey;
250
+ await PromiseRatchet.wait(delayMS);
251
+ Logger.silly('Rval is now %j', rval);
252
+ if (myLimit && rval.count >= myLimit && scan['ExclusiveStartKey']) {
253
+ Logger.info('Aborting scan since hit limit of %d', myLimit);
254
+ scan['ExclusiveStartKey'] = null;
255
+ }
256
+ } while (scan['ExclusiveStartKey']);
257
+
258
+ const end: number = new Date().getTime();
259
+
260
+ Logger.debug('Finished, returned %j in %s for %j', rval, DurationRatchet.formatMsDuration(end - start, true), scan);
261
+ return rval;
262
+ } catch (err) {
263
+ Logger.error('Failed with %s, q: %j', err, scan, err);
264
+ return null;
265
+ }
266
+ }
267
+
268
+ public async fullyExecuteScan<T>(scan: ScanCommandInput, delayMS = 0, softLimit: number = null): Promise<T[]> {
269
+ const rval: T[] = [];
270
+ await this.fullyExecuteProcessOverScan<T>(
271
+ scan,
272
+ async (v) => {
273
+ rval.push(v);
274
+ },
275
+ delayMS,
276
+ softLimit,
277
+ );
278
+ return rval;
279
+ }
280
+
281
+ public async fullyExecuteProcessOverScan<T>(
282
+ scan: ScanCommandInput,
283
+ proc: (val: T) => Promise<void>,
284
+ delayMS = 0,
285
+ softLimit: number = null,
286
+ ): Promise<number> {
287
+ let cnt: number = 0;
288
+ try {
289
+ Logger.debug('Executing scan : %j', scan);
290
+ const start: number = new Date().getTime();
291
+
292
+ Logger.debug('Pulling %j', scan);
293
+
294
+ let qryResults: ScanCommandOutput = await this.throughputSafeScanOrQuery<ScanCommandInput, ScanCommandOutput>(
295
+ (o) => this.scanPromise(o),
296
+ scan,
297
+ );
298
+ for (const qri of qryResults.Items) {
299
+ //for (let i = 0; i < qryResults.Items.length; i++) {
300
+ await proc(qri as T);
301
+ cnt++;
302
+ }
303
+
304
+ while (qryResults.LastEvaluatedKey && (softLimit === null || cnt < softLimit) && !scan.Limit) {
305
+ Logger.silly('Found more rows - requery with key %j', qryResults.LastEvaluatedKey);
306
+ scan['ExclusiveStartKey'] = qryResults.LastEvaluatedKey;
307
+ qryResults = await this.throughputSafeScanOrQuery<ScanCommandInput, ScanCommandOutput>((o) => this.scanPromise(o), scan);
308
+ for (const qri of qryResults.Items) {
309
+ //for (let i = 0; i < qryResults.Items.length; i++) {
310
+ await proc(qri as T);
311
+ cnt++;
312
+ }
313
+ Logger.silly('Rval is now %d items', cnt);
314
+ await PromiseRatchet.wait(delayMS);
315
+ }
316
+
317
+ const end: number = new Date().getTime();
318
+
319
+ Logger.debug('Finished, processed %d results in %s for %j', cnt, DurationRatchet.formatMsDuration(end - start, true), scan);
320
+ } catch (err) {
321
+ Logger.error('Failed with %s, q: %j', err, scan, err);
322
+ }
323
+ return cnt;
324
+ }
325
+
326
+ public async writeAllInBatches<T>(tableName: string, elements: T[], batchSize: number): Promise<number> {
327
+ if (!batchSize || batchSize < 2) {
328
+ throw new Error('Batch size needs to be at least 2, was ' + batchSize);
329
+ }
330
+
331
+ let rval = 0;
332
+ if (!!elements && elements.length > 0) {
333
+ let batchItems: Record<string, any>[] = [];
334
+ elements.forEach((el) => {
335
+ batchItems.push({
336
+ PutRequest: {
337
+ Item: el,
338
+ ReturnConsumedCapacity: 'TOTAL',
339
+ TableName: tableName,
340
+ },
341
+ });
342
+ });
343
+ Logger.debug('Processing %d batch items to %s', batchItems.length, tableName);
344
+
345
+ while (batchItems.length > 0) {
346
+ const curBatch: Record<string, any>[] = batchItems.slice(0, Math.min(batchItems.length, batchSize));
347
+ batchItems = batchItems.slice(curBatch.length);
348
+ const params: BatchWriteCommandInput = {
349
+ RequestItems: {},
350
+ ReturnConsumedCapacity: 'TOTAL',
351
+ ReturnItemCollectionMetrics: 'SIZE',
352
+ };
353
+ params.RequestItems[tableName] = curBatch;
354
+
355
+ let tryCount = 1;
356
+ let done = false;
357
+ let batchResults: BatchWriteCommandOutput = null;
358
+ while (!done && tryCount < 7) {
359
+ try {
360
+ batchResults = await this.awsDDB.send(new BatchWriteCommand(params));
361
+ } catch (err) {
362
+ if (DynamoRatchet.objectIsErrorWithProvisionedThroughputExceededExceptionCode(err)) {
363
+ Logger.info('Caught ProvisionedThroughputExceededException - retrying delete');
364
+ batchResults = { UnprocessedItems: params.RequestItems } as BatchWriteCommandOutput; // Just retry everything
365
+ } else {
366
+ throw err; // We only retry on throughput
367
+ }
368
+ }
369
+ if (
370
+ !!batchResults &&
371
+ !!batchResults.UnprocessedItems &&
372
+ !!batchResults.UnprocessedItems[tableName] &&
373
+ batchResults.UnprocessedItems[tableName].length > 0
374
+ ) {
375
+ const backoff: number = Math.pow(2, tryCount); // Backoff 2,4,8,16,32 seconds to allow capacity recovery
376
+ Logger.warn(
377
+ 'Found %d unprocessed items. Backing off %d seconds and trying again',
378
+ batchResults.UnprocessedItems[tableName].length,
379
+ backoff,
380
+ );
381
+ await PromiseRatchet.wait(backoff * 1000);
382
+ tryCount++;
383
+ params.RequestItems[tableName] = batchResults.UnprocessedItems[tableName];
384
+ } else {
385
+ done = true;
386
+ }
387
+ }
388
+ if (
389
+ !!batchResults &&
390
+ !!batchResults.UnprocessedItems &&
391
+ !!batchResults.UnprocessedItems[tableName] &&
392
+ batchResults.UnprocessedItems[tableName].length > 0
393
+ ) {
394
+ Logger.error('After 6 tries there were still %d unprocessed items', batchResults.UnprocessedItems[tableName].length);
395
+ rval += curBatch.length - batchResults.UnprocessedItems[tableName].length;
396
+ Logger.warn('FIX Unprocessed : %j', batchResults.UnprocessedItems);
397
+ } else {
398
+ rval += curBatch.length;
399
+ }
400
+ }
401
+ }
402
+ return rval;
403
+ }
404
+
405
+ public async fetchFullObjectsMatchingKeysOnlyIndexQuery<T>(
406
+ qry: QueryCommandInput,
407
+ keyNames: string[],
408
+ batchSize: number = 25,
409
+ ): Promise<T[]> {
410
+ RequireRatchet.notNullOrUndefined(qry);
411
+ RequireRatchet.notNullOrUndefined(qry.TableName);
412
+ RequireRatchet.notNullOrUndefined(keyNames);
413
+ RequireRatchet.true(keyNames.length > 0);
414
+
415
+ const keyDataSrc: Record<string, any>[] = await this.fullyExecuteQuery<Record<string, any>>(qry);
416
+ const keysOnly: Record<string, any>[] = DynamoRatchet.stripAllToKeysOnly(keyDataSrc, keyNames);
417
+ const rval: T[] = await this.fetchAllInBatches<T>(qry.TableName, keysOnly, batchSize);
418
+ return rval;
419
+ }
420
+
421
+ public async fetchAllInBatches<T>(tableName: string, inKeys: Record<string, any>[], batchSize: number): Promise<T[]> {
422
+ if (!batchSize || batchSize < 2 || batchSize > 100) {
423
+ throw new Error('Batch size needs to be at least 2 and no more than 100, was ' + batchSize);
424
+ }
425
+
426
+ let rval: T[] = [];
427
+ const batches: BatchGetCommandInput[] = [];
428
+ let remain: Record<string, any>[][] = Object.assign([], inKeys);
429
+ while (remain.length > 0) {
430
+ const curBatch: Record<string, any>[] = remain.slice(0, Math.min(remain.length, batchSize));
431
+ remain = remain.slice(curBatch.length);
432
+ const tableEntry: Record<string, any> = {};
433
+ tableEntry[tableName] = {
434
+ Keys: curBatch,
435
+ };
436
+ const nextBatch: BatchGetCommandInput = {
437
+ RequestItems: tableEntry,
438
+ ReturnConsumedCapacity: 'TOTAL',
439
+ };
440
+ batches.push(nextBatch);
441
+ }
442
+ Logger.debug('Created %d batches', batches.length);
443
+
444
+ for (let i = 0; i < batches.length; i++) {
445
+ // No need to log batch count if there's only one.
446
+ if (batches.length > 1) {
447
+ Logger.info('Processing batch %d of %d', i + 1, batches.length);
448
+ }
449
+ const input: BatchGetCommandInput = batches[i];
450
+ let tryCount: number = 1;
451
+ do {
452
+ Logger.silly('Pulling %j', input);
453
+ const res: BatchGetCommandOutput = await this.awsDDB.send(new BatchGetCommand(input));
454
+
455
+ // Copy in all the data
456
+ rval = rval.concat(res.Responses[tableName] as T[]);
457
+
458
+ // Retry anything we missed
459
+ if (!!res.UnprocessedKeys && !!res.UnprocessedKeys[tableName] && res.UnprocessedKeys[tableName].Keys.length > 0 && tryCount < 15) {
460
+ Logger.silly('Found %d unprocessed, waiting', res.UnprocessedKeys[tableName].Keys);
461
+ await PromiseRatchet.wait(Math.pow(2, tryCount) * 1000);
462
+ tryCount++;
463
+ }
464
+ input.RequestItems = res.UnprocessedKeys;
465
+ } while (!input.RequestItems && input.RequestItems[tableName].Keys.length > 0);
466
+ }
467
+ return rval;
468
+ }
469
+
470
+ public async deleteAllInBatches(tableName: string, keys: Record<string, any>[], batchSize: number): Promise<number> {
471
+ if (!batchSize || batchSize < 2) {
472
+ throw new Error('Batch size needs to be at least 2, was ' + batchSize);
473
+ }
474
+
475
+ let rval = 0;
476
+ if (!!keys && keys.length > 0) {
477
+ let batchItems: Record<string, any>[] = [];
478
+ keys.forEach((el) => {
479
+ batchItems.push({
480
+ DeleteRequest: {
481
+ Key: el,
482
+ ReturnConsumedCapacity: 'TOTAL',
483
+ TableName: tableName,
484
+ },
485
+ });
486
+ });
487
+ Logger.debug('Processing %d DeleteBatch items to %s', batchItems.length, tableName);
488
+
489
+ while (batchItems.length > 0) {
490
+ const curBatch: Record<string, any>[] = batchItems.slice(0, Math.min(batchItems.length, batchSize));
491
+ batchItems = batchItems.slice(curBatch.length);
492
+ const params: BatchWriteCommandInput = {
493
+ RequestItems: {},
494
+ ReturnConsumedCapacity: 'TOTAL',
495
+ ReturnItemCollectionMetrics: 'SIZE',
496
+ };
497
+ params.RequestItems[tableName] = curBatch;
498
+
499
+ let tryCount = 1;
500
+ let done = false;
501
+ let batchResults: BatchWriteCommandOutput = null;
502
+ while (!done && tryCount < 7) {
503
+ try {
504
+ batchResults = await this.awsDDB.send(new BatchWriteCommand(params));
505
+ } catch (err) {
506
+ if (DynamoRatchet.objectIsErrorWithProvisionedThroughputExceededExceptionCode(err)) {
507
+ Logger.info('Caught ProvisionedThroughputExceededException - retrying delete');
508
+ batchResults = { UnprocessedItems: params.RequestItems } as BatchWriteCommandOutput; // Just retry everything
509
+ } else {
510
+ throw err; // We only retry on throughput
511
+ }
512
+ }
513
+ if (
514
+ !!batchResults &&
515
+ !!batchResults.UnprocessedItems &&
516
+ !!batchResults.UnprocessedItems[tableName] &&
517
+ batchResults.UnprocessedItems[tableName].length > 0
518
+ ) {
519
+ const backoff: number = Math.pow(2, tryCount); // Backoff 2,4,8,16,32 seconds to allow capacity recovery
520
+ Logger.warn(
521
+ 'Found %d unprocessed items. Backing off %d seconds and trying again',
522
+ batchResults.UnprocessedItems[tableName].length,
523
+ backoff,
524
+ );
525
+ await PromiseRatchet.wait(backoff * 1000);
526
+ tryCount++;
527
+ params.RequestItems[tableName] = batchResults.UnprocessedItems[tableName];
528
+ } else {
529
+ done = true;
530
+ }
531
+ }
532
+ if (
533
+ !!batchResults &&
534
+ !!batchResults.UnprocessedItems &&
535
+ !!batchResults.UnprocessedItems[tableName] &&
536
+ batchResults.UnprocessedItems[tableName].length > 0
537
+ ) {
538
+ Logger.error('After 6 tries there were still %d unprocessed items', batchResults.UnprocessedItems[tableName].length);
539
+ rval += curBatch.length - batchResults.UnprocessedItems[tableName].length;
540
+ Logger.warn('FIX Unprocessed : %j', batchResults.UnprocessedItems);
541
+ } else {
542
+ rval += curBatch.length;
543
+ }
544
+
545
+ Logger.debug('%d Remain, DeleteBatch Results : %j', batchItems.length, batchResults);
546
+ }
547
+ }
548
+ return rval;
549
+ }
550
+
551
+ public async simplePut(tableName: string, value: Record<string, any>, autoRetryCount: number = 3): Promise<PutCommandOutput> {
552
+ let rval: PutCommandOutput = null;
553
+ let currentTry: number = 0;
554
+
555
+ const params: PutCommandInput = {
556
+ Item: value,
557
+ ReturnConsumedCapacity: 'TOTAL',
558
+ TableName: tableName,
559
+ };
560
+
561
+ while (!rval && currentTry < autoRetryCount) {
562
+ try {
563
+ rval = await this.awsDDB.send(new PutCommand(params));
564
+ } catch (err) {
565
+ if (DynamoRatchet.objectIsErrorWithProvisionedThroughputExceededExceptionCode(err)) {
566
+ const wait: number = Math.pow(2, currentTry) * 1000;
567
+ Logger.debug('Exceeded write throughput for %j : Try %d of %d (Waiting %d ms)', params, currentTry, autoRetryCount, wait);
568
+ await PromiseRatchet.wait(wait);
569
+ currentTry++;
570
+ } else {
571
+ throw err; // We only catch throughput issues
572
+ }
573
+ }
574
+ }
575
+ if (!rval) {
576
+ Logger.warn('Unable to write %j to DDB after %d tries, giving up', params, autoRetryCount);
577
+ }
578
+ return rval;
579
+ }
580
+
581
+ public async simplePutOnlyIfFieldIsNullOrUndefined(tableName: string, value: Record<string, any>, fieldName: string): Promise<boolean> {
582
+ let rval: boolean = false;
583
+ const params: PutCommandInput = {
584
+ Item: value,
585
+ ReturnConsumedCapacity: 'TOTAL',
586
+ ConditionExpression: 'attribute_not_exists(#fieldName) OR #fieldName = :null ',
587
+ ExpressionAttributeNames: {
588
+ '#fieldName': fieldName,
589
+ },
590
+ ExpressionAttributeValues: {
591
+ ':null': null,
592
+ },
593
+ TableName: tableName,
594
+ };
595
+ try {
596
+ const wrote: PutCommandOutput = await this.awsDDB.send(new PutCommand(params));
597
+ Logger.silly('Wrote : %j', wrote);
598
+ rval = true;
599
+ } catch (err) {
600
+ if (DynamoRatchet.objectIsErrorWithProvisionedThroughputExceededExceptionCode(err)) {
601
+ // Infinite retry - probably not smart
602
+ Logger.debug('Exceeded write throughput for %j : (Waiting 2000 ms)', params);
603
+ await PromiseRatchet.wait(2000);
604
+ rval = await this.simplePutOnlyIfFieldIsNullOrUndefined(tableName, value, fieldName);
605
+ } else if (err && err instanceof ConditionalCheckFailedException) {
606
+ Logger.debug('Failed to write %j due to null field failure');
607
+ rval = false;
608
+ } else {
609
+ throw err; // We only catch throughput issues
610
+ }
611
+ }
612
+ return rval;
613
+ }
614
+
615
+ // This works like simplePut, but if a collision is detected it adjusts the object and tries writing again
616
+ // The adjustment function MUST change one of the keys - otherwise this just runs forever (or until it hits "maxAdjusts")
617
+ public async simplePutWithCollisionAvoidance<T>(
618
+ tableName: string,
619
+ value: T,
620
+ keyNames: string[],
621
+ adjustFunction: (val: T) => T,
622
+ maxAdjusts: number = null,
623
+ autoRetryCount: number = 3,
624
+ ): Promise<T> {
625
+ RequireRatchet.true(keyNames && keyNames.length > 0 && keyNames.length < 3, 'You must pass 1 or 2 key names');
626
+ let pio: PutCommandOutput = null;
627
+ let currentTry: number = 0;
628
+
629
+ const attrNames: Record<string, string> = {
630
+ '#key0': keyNames[0],
631
+ };
632
+ const attrValues: Record<string, NativeAttributeValue> = {
633
+ ':key0': value[keyNames[0]],
634
+ };
635
+
636
+ let condExp: string = '#key0 <> :key0';
637
+ if (keyNames.length > 1) {
638
+ condExp += ' AND #key1 <> :key1';
639
+ attrNames['#key1'] = keyNames[1];
640
+ attrValues[':key1'] = value[keyNames[1]];
641
+ }
642
+
643
+ const params: PutCommandInput = {
644
+ Item: value,
645
+ ReturnConsumedCapacity: 'TOTAL',
646
+ ConditionExpression: condExp,
647
+ ExpressionAttributeNames: attrNames,
648
+ ExpressionAttributeValues: attrValues,
649
+ TableName: tableName,
650
+ };
651
+
652
+ let adjustCount: number = 0;
653
+ while (!pio && currentTry < autoRetryCount && (!maxAdjusts || adjustCount < maxAdjusts)) {
654
+ try {
655
+ pio = await this.awsDDB.send(new PutCommand(params));
656
+ } catch (err) {
657
+ if (DynamoRatchet.objectIsErrorWithProvisionedThroughputExceededExceptionCode(err)) {
658
+ currentTry++;
659
+ const wait: number = Math.pow(2, currentTry) * 1000;
660
+ Logger.debug('Exceeded write throughput for %j : Try %d of %d (Waiting %d ms)', params, currentTry, autoRetryCount, wait);
661
+ await PromiseRatchet.wait(wait);
662
+ } else if (err && err instanceof ConditionalCheckFailedException) {
663
+ let newValue: T = Object.assign({}, params.Item) as T;
664
+ Logger.info('Failed to write %j due to collision - adjusting and retrying', newValue);
665
+ newValue = adjustFunction(newValue);
666
+ params.Item = newValue;
667
+ params.ExpressionAttributeValues[':key0'] = newValue[keyNames[0]];
668
+ if (keyNames.length > 1) {
669
+ params.ExpressionAttributeValues[':key1'] = newValue[keyNames[1]];
670
+ }
671
+ adjustCount++;
672
+ } else {
673
+ throw err; // We only catch throughput issues
674
+ }
675
+ }
676
+ }
677
+ if (pio && adjustCount > 0) {
678
+ Logger.info('After adjustment, wrote %j as %j', value, params.Item);
679
+ }
680
+
681
+ if (!pio) {
682
+ Logger.warn('Unable to write %j to DDB after %d provision tries and %d adjusts, giving up', params, currentTry, adjustCount);
683
+ }
684
+
685
+ return pio ? (params.Item as T) : null;
686
+ }
687
+
688
+ public async simpleGet<T>(tableName: string, keys: Record<string, any>, autoRetryCount: number = 3): Promise<T> {
689
+ let holder: GetCommandOutput = null;
690
+ let currentTry: number = 0;
691
+
692
+ const params: GetCommandInput = {
693
+ TableName: tableName,
694
+ Key: keys,
695
+ };
696
+
697
+ while (!holder && currentTry < autoRetryCount) {
698
+ try {
699
+ holder = await this.awsDDB.send(new GetCommand(params));
700
+ } catch (err) {
701
+ if (DynamoRatchet.objectIsErrorWithProvisionedThroughputExceededExceptionCode(err)) {
702
+ const wait: number = Math.pow(2, currentTry) * 1000;
703
+ Logger.debug('Exceeded read throughput for %j : Try %d of %d (Waiting %d ms)', params, currentTry, autoRetryCount, wait);
704
+ await PromiseRatchet.wait(wait);
705
+ currentTry++;
706
+ } else {
707
+ throw err; // We only catch throughput issues
708
+ }
709
+ }
710
+ }
711
+ if (!holder) {
712
+ Logger.warn('Unable to read %j from DDB after %d tries, giving up', params, autoRetryCount);
713
+ }
714
+ const rval: T = !!holder && !!holder.Item ? Object.assign({} as T, holder.Item) : null;
715
+ return rval;
716
+ }
717
+
718
+ public static objectIsErrorWithProvisionedThroughputExceededExceptionCode(err: Record<string, any>): boolean {
719
+ return !!err && err instanceof ProvisionedThroughputExceededException;
720
+ }
721
+
722
+ public async simpleGetWithCounterDecrement<T>(
723
+ tableName: string,
724
+ keys: Record<string, any>,
725
+ counterAttributeName: string,
726
+ deleteOnZero: boolean,
727
+ autoRetryCount: number = 3,
728
+ ): Promise<T> {
729
+ let holder: UpdateCommandOutput = null;
730
+ let currentTry: number = 0;
731
+
732
+ const params: UpdateCommandInput = {
733
+ TableName: tableName,
734
+ Key: keys,
735
+ UpdateExpression: 'set #counter = #counter-:decVal',
736
+ ExpressionAttributeNames: {
737
+ '#counter': counterAttributeName,
738
+ },
739
+ ExpressionAttributeValues: {
740
+ ':decVal': 1,
741
+ ':minVal': 0,
742
+ },
743
+ ConditionExpression: '#counter > :minVal',
744
+ ReturnValues: 'ALL_NEW',
745
+ };
746
+
747
+ let updateFailed: boolean = false;
748
+ while (!holder && currentTry < autoRetryCount && !updateFailed) {
749
+ try {
750
+ holder = await this.awsDDB.send(new UpdateCommand(params));
751
+ } catch (err) {
752
+ if (DynamoRatchet.objectIsErrorWithProvisionedThroughputExceededExceptionCode(err)) {
753
+ const wait: number = Math.pow(2, currentTry) * 1000;
754
+ Logger.debug('Exceeded update throughput for %j : Try %d of %d (Waiting %d ms)', params, currentTry, autoRetryCount, wait);
755
+ await PromiseRatchet.wait(wait);
756
+ currentTry++;
757
+ } else if (!!err && err instanceof ConditionalCheckFailedException) {
758
+ Logger.info('Cannot fetch requested row (%j) - the update check failed', keys);
759
+ updateFailed = true;
760
+ } else {
761
+ throw err; // We only catch throughput issues
762
+ }
763
+ }
764
+ }
765
+ if (!holder && !updateFailed) {
766
+ Logger.warn('Unable to update %j from DDB after %d tries, giving up', params, autoRetryCount);
767
+ }
768
+
769
+ const rval: T = !!holder && !!holder.Attributes ? Object.assign({} as T, holder.Attributes) : null;
770
+
771
+ if (deleteOnZero && rval && rval[counterAttributeName] === 0) {
772
+ Logger.info('Delete on 0 specified, removing');
773
+ await this.simpleDelete(tableName, keys);
774
+ }
775
+
776
+ return rval;
777
+ }
778
+
779
+ public async simpleDelete(tableName: string, keys: Record<string, any>): Promise<DeleteCommandOutput> {
780
+ const params: DeleteCommandInput = {
781
+ TableName: tableName,
782
+ Key: keys,
783
+ };
784
+
785
+ const holder: DeleteCommandOutput = await this.awsDDB.send(new DeleteCommand(params));
786
+ return holder;
787
+ }
788
+
789
+ public async atomicCounter(tableName: string, keys: Record<string, any>, counterFieldName: string, increment = 1): Promise<number> {
790
+ const update: UpdateCommandInput = {
791
+ TableName: tableName,
792
+ Key: keys,
793
+ UpdateExpression: 'SET #counterFieldName = #counterFieldName + :inc',
794
+ ExpressionAttributeNames: {
795
+ '#counterFieldName': counterFieldName,
796
+ },
797
+ ExpressionAttributeValues: {
798
+ ':inc': increment,
799
+ },
800
+ ReturnValues: 'UPDATED_NEW',
801
+ };
802
+
803
+ const ui: UpdateCommandOutput = await this.awsDDB.send(new UpdateCommand(update));
804
+ const rval: number = NumberRatchet.safeNumber(ui.Attributes[counterFieldName]);
805
+ return rval;
806
+ }
807
+
808
+ // Recursively Removes any empty strings in place
809
+ // Here for backwards compatibility - really should just configure your document client the
810
+ // way you want it instead
811
+
812
+ public static cleanObject(ob: Record<string, any>): void {
813
+ if (ob) {
814
+ const rem: string[] = [];
815
+ Object.keys(ob).forEach((k) => {
816
+ const v: any = ob[k];
817
+ if (v === '') {
818
+ rem.push(k);
819
+ } else if (v instanceof Object) {
820
+ DynamoRatchet.cleanObject(v);
821
+ }
822
+ });
823
+ Logger.silly('Removing keys : %j', rem);
824
+ rem.forEach((k) => {
825
+ //eslint-disable-next-line @typescript-eslint/no-dynamic-delete
826
+ delete ob[k];
827
+ });
828
+ }
829
+ }
830
+
831
+ // Given an object, deletes anything that isnt part of the key
832
+ public static stripToKeysOnly(input: Record<string, any>, keysNames: string[]): Record<string, any> {
833
+ let rval: Record<string, any> = null;
834
+ if (!!input && !!keysNames && keysNames.length > 0) {
835
+ rval = {};
836
+ keysNames.forEach((k) => {
837
+ if (!input[k]) {
838
+ ErrorRatchet.throwFormattedErr('Failed key extraction on %j - missing %s', input, k);
839
+ }
840
+ rval[k] = input[k];
841
+ });
842
+ }
843
+ return rval;
844
+ }
845
+
846
+ public static stripAllToKeysOnly(input: Record<string, any>[], keys: string[]): Record<string, any>[] {
847
+ const rval: Record<string, any>[] = input.map((i) => DynamoRatchet.stripToKeysOnly(i, keys));
848
+ return rval;
849
+ }
850
+ }