wirejs-deploy-amplify-basic 0.0.60 → 0.0.61-table-resource
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/amplify-backend-assets/backend.ts +45 -1
- package/amplify-hosting-assets/compute/default/package.json +1 -1
- package/build.js +20 -16
- package/dist/index.d.ts +2 -13
- package/dist/index.js +7 -64
- package/dist/resources/distributed-table.d.ts +40 -0
- package/dist/resources/distributed-table.js +202 -0
- package/dist/services/file.d.ts +13 -0
- package/dist/services/file.js +63 -0
- package/package.json +3 -2
|
@@ -4,9 +4,16 @@ import {
|
|
|
4
4
|
import { RemovalPolicy } from "aws-cdk-lib";
|
|
5
5
|
import { FunctionUrlAuthType } from 'aws-cdk-lib/aws-lambda';
|
|
6
6
|
import { Bucket, BlockPublicAccess } from 'aws-cdk-lib/aws-s3';
|
|
7
|
+
import { Table, AttributeType, BillingMode } from 'aws-cdk-lib/aws-dynamodb';
|
|
7
8
|
import { api } from './functions/api/resource';
|
|
8
9
|
import { auth } from './auth/resource';
|
|
9
10
|
|
|
11
|
+
const APP_ID = process.env.AWS_APP_ID ?? process.env.PWD?.replace(/[^a-zA-Z0-9-_]/g, '_');
|
|
12
|
+
const BRANCH_ID = process.env.AWS_BRANCH ?? process.env.USER ?? 'anonymous';
|
|
13
|
+
const TABLE_NAME_PREFIX = `${APP_ID}-${BRANCH_ID}-`;
|
|
14
|
+
|
|
15
|
+
// @ts-ignore
|
|
16
|
+
import generated from './generated-resources';
|
|
10
17
|
|
|
11
18
|
/**
|
|
12
19
|
* Amplify resources
|
|
@@ -16,7 +23,6 @@ const backend = defineBackend({
|
|
|
16
23
|
api,
|
|
17
24
|
});
|
|
18
25
|
|
|
19
|
-
|
|
20
26
|
/**
|
|
21
27
|
* Amplify resource augmentations
|
|
22
28
|
*/
|
|
@@ -46,6 +52,41 @@ const bucket = new Bucket(backend.stack, 'data', {
|
|
|
46
52
|
});
|
|
47
53
|
bucket.grantReadWrite(backend.api.resources.lambda);
|
|
48
54
|
|
|
55
|
+
/**
|
|
56
|
+
* DDB Tables
|
|
57
|
+
*/
|
|
58
|
+
function isDistributedTable(resource: any): resource is {
|
|
59
|
+
type: 'DistributedTable';
|
|
60
|
+
options: {
|
|
61
|
+
absoluteId: string;
|
|
62
|
+
partitionKey: string;
|
|
63
|
+
sortKey: string[];
|
|
64
|
+
}
|
|
65
|
+
} {
|
|
66
|
+
return resource.type === 'DistributedTable';
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// TODO: Need to get AttributeType from customer definition.
|
|
70
|
+
for (const resource of generated) {
|
|
71
|
+
if (isDistributedTable(resource)) {
|
|
72
|
+
const sanitizedId = resource.options.absoluteId.replace(/[^a-zA-Z0-9-_]/g, '_');
|
|
73
|
+
const table = new Table(backend.stack, sanitizedId, {
|
|
74
|
+
partitionKey: {
|
|
75
|
+
name: resource.options.partitionKey,
|
|
76
|
+
type: AttributeType.STRING,
|
|
77
|
+
},
|
|
78
|
+
sortKey: resource.options.sortKey ? {
|
|
79
|
+
name: resource.options.sortKey[0],
|
|
80
|
+
type: AttributeType.STRING,
|
|
81
|
+
} : undefined,
|
|
82
|
+
removalPolicy: RemovalPolicy.RETAIN,
|
|
83
|
+
tableName: `${TABLE_NAME_PREFIX}${sanitizedId}`,
|
|
84
|
+
billingMode: BillingMode.PAY_PER_REQUEST,
|
|
85
|
+
});
|
|
86
|
+
table.grantReadWriteData(backend.api.resources.lambda);
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
49
90
|
|
|
50
91
|
/**
|
|
51
92
|
* Lambda environment vars
|
|
@@ -56,6 +97,9 @@ backend.api.addEnvironment(
|
|
|
56
97
|
backend.api.addEnvironment(
|
|
57
98
|
'COGNITO_CLIENT_ID', backend.auth.resources.userPoolClient.userPoolClientId
|
|
58
99
|
);
|
|
100
|
+
backend.api.addEnvironment(
|
|
101
|
+
'TABLE_NAME_PREFIX', TABLE_NAME_PREFIX
|
|
102
|
+
);
|
|
59
103
|
|
|
60
104
|
|
|
61
105
|
/**
|
package/build.js
CHANGED
|
@@ -95,29 +95,29 @@ async function buildApiBundle() {
|
|
|
95
95
|
// on the original `wirejs-resources` into the intermediate bundle. doing this
|
|
96
96
|
// allows us to completely override (alias) `wirejs-resources` in the final build
|
|
97
97
|
// without creating a circular alias.
|
|
98
|
-
console.log("creating intermediate wirejs-resources overrides");
|
|
99
|
-
await esbuild.build({
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
});
|
|
98
|
+
// console.log("creating intermediate wirejs-resources overrides");
|
|
99
|
+
// await esbuild.build({
|
|
100
|
+
// entryPoints: [path.join(SELF_DIR, 'wirejs-resources-overrides', 'index.js')],
|
|
101
|
+
// bundle: true,
|
|
102
|
+
// outfile: RESOURCE_OVERRIDES_BUILD,
|
|
103
|
+
// platform: 'node',
|
|
104
|
+
// format: 'esm',
|
|
105
|
+
// external: ['@aws-sdk/client-s3']
|
|
106
|
+
// });
|
|
107
107
|
|
|
108
108
|
// exploratory build. builds using our overrides, which will emit a manifest of
|
|
109
109
|
// resources required by the API when imported.
|
|
110
110
|
console.log("creating api bundle using platform overrides");
|
|
111
111
|
await esbuild.build({
|
|
112
|
-
entryPoints: [path.join('.', 'api', 'index.
|
|
113
|
-
bundle:
|
|
112
|
+
entryPoints: [path.join('.', 'api', 'index.ts')],
|
|
113
|
+
bundle: false,
|
|
114
114
|
outfile: outputPath,
|
|
115
115
|
platform: 'node',
|
|
116
116
|
format: 'esm',
|
|
117
|
-
alias: {
|
|
118
|
-
|
|
119
|
-
},
|
|
120
|
-
external: ['@aws-sdk/client-s3']
|
|
117
|
+
// alias: {
|
|
118
|
+
// 'wirejs-resources': RESOURCE_OVERRIDES_BUILD
|
|
119
|
+
// },
|
|
120
|
+
// external: ['@aws-sdk/client-s3']
|
|
121
121
|
});
|
|
122
122
|
|
|
123
123
|
// exploratory import. not strictly necessary until we're actually using the manifest
|
|
@@ -125,6 +125,10 @@ async function buildApiBundle() {
|
|
|
125
125
|
// confirmational that we're building things properly.
|
|
126
126
|
await import(outputPath);
|
|
127
127
|
console.log('discovered resources', globalThis.wirejsResources);
|
|
128
|
+
await fs.promises.writeFile(
|
|
129
|
+
path.join(BACKEND_DIR, 'generated-resources.ts'),
|
|
130
|
+
`export default ${JSON.stringify(globalThis.wirejsResources, null, 2)}`
|
|
131
|
+
);
|
|
128
132
|
|
|
129
133
|
return outputPath;
|
|
130
134
|
}
|
|
@@ -159,7 +163,7 @@ if (action === 'prebuild') {
|
|
|
159
163
|
console.log("starting prebuild");
|
|
160
164
|
await createSkeleton();
|
|
161
165
|
await installDeps();
|
|
162
|
-
|
|
166
|
+
await buildApiBundle();
|
|
163
167
|
|
|
164
168
|
console.log("prebuild done");
|
|
165
169
|
} else if (action === 'inject-backend') {
|
package/dist/index.d.ts
CHANGED
|
@@ -1,15 +1,4 @@
|
|
|
1
|
-
import { Resource } from 'wirejs-resources';
|
|
2
1
|
export * from 'wirejs-resources';
|
|
2
|
+
export { FileService } from './services/file.js';
|
|
3
3
|
export { AuthenticationService } from './services/authentication.js';
|
|
4
|
-
export
|
|
5
|
-
constructor(scope: Resource | string, id: string);
|
|
6
|
-
read(filename: string, encoding?: BufferEncoding): Promise<string>;
|
|
7
|
-
write(filename: string, data: string, { onlyIfNotExists }?: {
|
|
8
|
-
onlyIfNotExists?: boolean | undefined;
|
|
9
|
-
}): Promise<void>;
|
|
10
|
-
delete(filename: string): Promise<void>;
|
|
11
|
-
list({ prefix }?: {
|
|
12
|
-
prefix?: string | undefined;
|
|
13
|
-
}): AsyncGenerator<string, void, unknown>;
|
|
14
|
-
isAlreadyExistsError(error: any): boolean;
|
|
15
|
-
}
|
|
4
|
+
export { DistributedTable } from './resources/distributed-table.js';
|
package/dist/index.js
CHANGED
|
@@ -1,71 +1,14 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { S3Client, ListObjectsCommand, PutObjectCommand, GetObjectCommand, DeleteObjectCommand } from '@aws-sdk/client-s3';
|
|
3
|
-
import { overrides, Resource, } from 'wirejs-resources';
|
|
1
|
+
import { overrides } from 'wirejs-resources';
|
|
4
2
|
// let's try exporting all the things and overwriting the specific things we
|
|
5
3
|
// want to re-implement.
|
|
6
4
|
export * from 'wirejs-resources';
|
|
7
|
-
import {
|
|
5
|
+
import { FileService } from './services/file.js';
|
|
6
|
+
export { FileService } from './services/file.js';
|
|
8
7
|
import { AuthenticationService } from './services/authentication.js';
|
|
9
8
|
export { AuthenticationService } from './services/authentication.js';
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
export class FileService extends Resource {
|
|
13
|
-
constructor(scope, id) {
|
|
14
|
-
super(scope, id);
|
|
15
|
-
addResource('FileService', { absoluteId: this.absoluteId });
|
|
16
|
-
}
|
|
17
|
-
async read(filename, encoding = 'utf8') {
|
|
18
|
-
const Key = `${this.absoluteId}/${filename}`;
|
|
19
|
-
const command = new GetObjectCommand({ Bucket, Key });
|
|
20
|
-
const result = await s3.send(command);
|
|
21
|
-
return result.Body.transformToString(encoding);
|
|
22
|
-
}
|
|
23
|
-
async write(filename, data, { onlyIfNotExists = false } = {}) {
|
|
24
|
-
const Key = `${this.absoluteId}/${filename}`;
|
|
25
|
-
const Body = data;
|
|
26
|
-
const commandDetails = {
|
|
27
|
-
Bucket, Key, Body
|
|
28
|
-
};
|
|
29
|
-
if (onlyIfNotExists) {
|
|
30
|
-
commandDetails['IfNoneMatch'] = '*';
|
|
31
|
-
}
|
|
32
|
-
const command = new PutObjectCommand(commandDetails);
|
|
33
|
-
await s3.send(command);
|
|
34
|
-
}
|
|
35
|
-
async delete(filename) {
|
|
36
|
-
const Key = `${this.absoluteId}/${filename}`;
|
|
37
|
-
const command = new DeleteObjectCommand({
|
|
38
|
-
Bucket,
|
|
39
|
-
Key
|
|
40
|
-
});
|
|
41
|
-
await s3.send(command);
|
|
42
|
-
}
|
|
43
|
-
async *list({ prefix = '' } = {}) {
|
|
44
|
-
const Prefix = `${this.absoluteId}/${prefix}`;
|
|
45
|
-
let Marker = undefined;
|
|
46
|
-
while (true) {
|
|
47
|
-
const command = new ListObjectsCommand({
|
|
48
|
-
Bucket,
|
|
49
|
-
Prefix,
|
|
50
|
-
MaxKeys: 1000,
|
|
51
|
-
Marker
|
|
52
|
-
});
|
|
53
|
-
const result = await s3.send(command);
|
|
54
|
-
Marker = result.Marker;
|
|
55
|
-
for (const o of result.Contents || []) {
|
|
56
|
-
if (o.Key) {
|
|
57
|
-
yield o.Key.slice(`${this.absoluteId}/`.length);
|
|
58
|
-
}
|
|
59
|
-
}
|
|
60
|
-
if (!Marker)
|
|
61
|
-
break;
|
|
62
|
-
}
|
|
63
|
-
}
|
|
64
|
-
isAlreadyExistsError(error) {
|
|
65
|
-
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
|
|
66
|
-
return error?.$metadata?.httpStatusCode === 412;
|
|
67
|
-
}
|
|
68
|
-
}
|
|
9
|
+
import { DistributedTable } from './resources/distributed-table.js';
|
|
10
|
+
export { DistributedTable } from './resources/distributed-table.js';
|
|
69
11
|
// expose resources to other resources that might depend on it.
|
|
70
|
-
overrides.FileService = FileService;
|
|
71
12
|
overrides.AuthenticationService = AuthenticationService;
|
|
13
|
+
overrides.DistributedTable = DistributedTable;
|
|
14
|
+
overrides.FileService = FileService;
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import { DynamoDBClient } from '@aws-sdk/client-dynamodb';
|
|
2
|
+
import { Filter, Parser, RecordKey, Resource } from 'wirejs-resources';
|
|
3
|
+
export declare function PassThruParser<T>(record: Record<string, any>): T;
|
|
4
|
+
/**
|
|
5
|
+
* A table of records that favors very high *overall* scalability at the expense of
|
|
6
|
+
* scalability *between* partitions. Providers will distribute your data across many
|
|
7
|
+
* servers based on the partition key as the table and/or traffic increases.
|
|
8
|
+
*
|
|
9
|
+
* ### Do NOT change partition keys. (In Production.)
|
|
10
|
+
*
|
|
11
|
+
* Changing it will cause some providers to drop and recreate your table.
|
|
12
|
+
*
|
|
13
|
+
* High cardinality, non-sequential partition keys allow for the best overall scaling.
|
|
14
|
+
*/
|
|
15
|
+
export declare class DistributedTable<const P extends Parser<any>, const T extends ReturnType<P>, const PK extends keyof T & string, const SK extends (keyof T & string)[] | undefined> extends Resource {
|
|
16
|
+
#private;
|
|
17
|
+
parse: P;
|
|
18
|
+
partitionKey: PK;
|
|
19
|
+
sort: SK | undefined;
|
|
20
|
+
ddbClient: DynamoDBClient;
|
|
21
|
+
table: string;
|
|
22
|
+
constructor(scope: Resource | string, id: string, options: {
|
|
23
|
+
parse: P;
|
|
24
|
+
key: {
|
|
25
|
+
partition: PK;
|
|
26
|
+
sort?: SK;
|
|
27
|
+
};
|
|
28
|
+
});
|
|
29
|
+
save(item: T): Promise<void>;
|
|
30
|
+
saveMany(items: T[]): Promise<void>;
|
|
31
|
+
delete(item: RecordKey<T, PK, SK>): Promise<void>;
|
|
32
|
+
deleteMany(items: (RecordKey<T, PK, SK>)[]): Promise<void>;
|
|
33
|
+
get(key: RecordKey<T, PK, SK>): Promise<T | undefined>;
|
|
34
|
+
scan(options?: {
|
|
35
|
+
filter?: Filter<T>;
|
|
36
|
+
}): AsyncGenerator<T>;
|
|
37
|
+
query(partition: Pick<T, PK>, options?: {
|
|
38
|
+
filter?: Filter<T>;
|
|
39
|
+
}): AsyncGenerator<T>;
|
|
40
|
+
}
|
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
import { env } from 'process';
|
|
2
|
+
import { DynamoDBClient, PutItemCommand, DeleteItemCommand, GetItemCommand, ScanCommand, QueryCommand, } from '@aws-sdk/client-dynamodb';
|
|
3
|
+
import { Resource, } from 'wirejs-resources';
|
|
4
|
+
import { addResource } from '../resource-collector.js';
|
|
5
|
+
function isFieldComparison(filter) {
|
|
6
|
+
return !['and', 'or', 'not'].some(key => key in filter);
|
|
7
|
+
}
|
|
8
|
+
function buildFilterExpression(filter) {
|
|
9
|
+
if (filter.and) {
|
|
10
|
+
return `(${filter.and.map(buildFilterExpression).join(' AND ')})`;
|
|
11
|
+
}
|
|
12
|
+
if (filter.or) {
|
|
13
|
+
return `(${filter.or.map(buildFilterExpression).join(' OR ')})`;
|
|
14
|
+
}
|
|
15
|
+
if (filter.not) {
|
|
16
|
+
return `(NOT ${buildFilterExpression(filter.not)})`;
|
|
17
|
+
}
|
|
18
|
+
if (!isFieldComparison(filter)) {
|
|
19
|
+
throw new Error(`Unsupported filter: ${JSON.stringify(filter)}`);
|
|
20
|
+
}
|
|
21
|
+
const [field] = Object.keys(filter);
|
|
22
|
+
const condition = filter[field];
|
|
23
|
+
if ('eq' in condition)
|
|
24
|
+
return `${field} = :${field}`;
|
|
25
|
+
if ('ne' in condition)
|
|
26
|
+
return `${field} <> :${field}`;
|
|
27
|
+
if ('gt' in condition)
|
|
28
|
+
return `${field} > :${field}`;
|
|
29
|
+
if ('ge' in condition)
|
|
30
|
+
return `${field} >= :${field}`;
|
|
31
|
+
if ('lt' in condition)
|
|
32
|
+
return `${field} < :${field}`;
|
|
33
|
+
if ('le' in condition)
|
|
34
|
+
return `${field} <= :${field}`;
|
|
35
|
+
if ('between' in condition)
|
|
36
|
+
return `${field} BETWEEN :${field}Low AND :${field}High`;
|
|
37
|
+
if ('beginsWith' in condition)
|
|
38
|
+
return `begins_with(${field}, :${field})`;
|
|
39
|
+
throw new Error(`Unsupported filter condition: ${JSON.stringify(condition)}`);
|
|
40
|
+
}
|
|
41
|
+
function buildExpressionAttributeValues(filter) {
|
|
42
|
+
const values = {};
|
|
43
|
+
if (filter.and || filter.or || filter.not) {
|
|
44
|
+
const subFilters = filter.and ?? filter.or ?? [filter.not];
|
|
45
|
+
for (const subFilter of subFilters) {
|
|
46
|
+
if (!subFilter)
|
|
47
|
+
continue;
|
|
48
|
+
Object.assign(values, buildExpressionAttributeValues(subFilter));
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
else if (isFieldComparison(filter)) {
|
|
52
|
+
const field = Object.keys(filter)[0];
|
|
53
|
+
const condition = filter[field];
|
|
54
|
+
if ('eq' in condition)
|
|
55
|
+
values[`:${field}`] = { S: condition.eq };
|
|
56
|
+
if ('ne' in condition)
|
|
57
|
+
values[`:${field}`] = { S: condition.ne };
|
|
58
|
+
if ('gt' in condition)
|
|
59
|
+
values[`:${field}`] = { S: condition.gt };
|
|
60
|
+
if ('ge' in condition)
|
|
61
|
+
values[`:${field}`] = { S: condition.ge };
|
|
62
|
+
if ('lt' in condition)
|
|
63
|
+
values[`:${field}`] = { S: condition.lt };
|
|
64
|
+
if ('le' in condition)
|
|
65
|
+
values[`:${field}`] = { S: condition.le };
|
|
66
|
+
if ('between' in condition) {
|
|
67
|
+
values[`:${field}Low`] = { S: condition.between[0] };
|
|
68
|
+
values[`:${field}High`] = { S: condition.between[1] };
|
|
69
|
+
}
|
|
70
|
+
if ('beginsWith' in condition)
|
|
71
|
+
values[`:${field}`] = { S: condition.beginsWith };
|
|
72
|
+
}
|
|
73
|
+
else {
|
|
74
|
+
throw new Error(`Unsupported filter: ${JSON.stringify(filter)}`);
|
|
75
|
+
}
|
|
76
|
+
return values;
|
|
77
|
+
}
|
|
78
|
+
export function PassThruParser(record) {
|
|
79
|
+
return record;
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* A table of records that favors very high *overall* scalability at the expense of
|
|
83
|
+
* scalability *between* partitions. Providers will distribute your data across many
|
|
84
|
+
* servers based on the partition key as the table and/or traffic increases.
|
|
85
|
+
*
|
|
86
|
+
* ### Do NOT change partition keys. (In Production.)
|
|
87
|
+
*
|
|
88
|
+
* Changing it will cause some providers to drop and recreate your table.
|
|
89
|
+
*
|
|
90
|
+
* High cardinality, non-sequential partition keys allow for the best overall scaling.
|
|
91
|
+
*/
|
|
92
|
+
export class DistributedTable extends Resource {
|
|
93
|
+
parse;
|
|
94
|
+
partitionKey;
|
|
95
|
+
sort;
|
|
96
|
+
ddbClient;
|
|
97
|
+
table;
|
|
98
|
+
constructor(scope, id, options) {
|
|
99
|
+
super(scope, id);
|
|
100
|
+
this.parse = options.parse;
|
|
101
|
+
this.partitionKey = options.key.partition;
|
|
102
|
+
this.sort = options.key.sort;
|
|
103
|
+
this.ddbClient = new DynamoDBClient();
|
|
104
|
+
this.table = env['TABLE_NAME_PREFIX'] + this.absoluteId.replace(/[^a-zA-Z0-9-_]/g, '_');
|
|
105
|
+
addResource('DistributedTable', {
|
|
106
|
+
absoluteId: this.absoluteId,
|
|
107
|
+
partitionKey: this.partitionKey,
|
|
108
|
+
sortKey: this.sort,
|
|
109
|
+
});
|
|
110
|
+
}
|
|
111
|
+
#getDDBKey(key) {
|
|
112
|
+
const ddbKey = {
|
|
113
|
+
[this.partitionKey]: { S: key[this.partitionKey] }
|
|
114
|
+
};
|
|
115
|
+
if (this.sort) {
|
|
116
|
+
for (const sk of this.sort) {
|
|
117
|
+
ddbKey[sk] = { S: key[sk] };
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
return ddbKey;
|
|
121
|
+
}
|
|
122
|
+
async save(item) {
|
|
123
|
+
const key = this.#getDDBKey(item);
|
|
124
|
+
const itemToSave = {
|
|
125
|
+
...key,
|
|
126
|
+
...item,
|
|
127
|
+
};
|
|
128
|
+
await this.ddbClient.send(new PutItemCommand({
|
|
129
|
+
TableName: this.table,
|
|
130
|
+
Item: itemToSave,
|
|
131
|
+
}));
|
|
132
|
+
}
|
|
133
|
+
async saveMany(items) {
|
|
134
|
+
const promises = items.map(item => this.save(item));
|
|
135
|
+
await Promise.all(promises);
|
|
136
|
+
}
|
|
137
|
+
async delete(item) {
|
|
138
|
+
const key = this.#getDDBKey(item);
|
|
139
|
+
await this.ddbClient.send(new DeleteItemCommand({
|
|
140
|
+
TableName: this.table,
|
|
141
|
+
Key: key,
|
|
142
|
+
}));
|
|
143
|
+
}
|
|
144
|
+
async deleteMany(items) {
|
|
145
|
+
const promises = items.map(item => this.delete(item));
|
|
146
|
+
await Promise.all(promises);
|
|
147
|
+
}
|
|
148
|
+
async get(key) {
|
|
149
|
+
const ddbKey = this.#getDDBKey(key);
|
|
150
|
+
const result = await this.ddbClient.send(new GetItemCommand({
|
|
151
|
+
TableName: this.table,
|
|
152
|
+
Key: ddbKey,
|
|
153
|
+
}));
|
|
154
|
+
if (!result.Item)
|
|
155
|
+
return undefined;
|
|
156
|
+
return this.parse(result.Item);
|
|
157
|
+
}
|
|
158
|
+
async *scan(options = {}) {
|
|
159
|
+
let lastEvaluatedKey = undefined;
|
|
160
|
+
do {
|
|
161
|
+
const filterExpression = options.filter ? buildFilterExpression(options.filter) : undefined;
|
|
162
|
+
const expressionAttributeValues = options.filter ? buildExpressionAttributeValues(options.filter) : undefined;
|
|
163
|
+
const result = await this.ddbClient.send(new ScanCommand({
|
|
164
|
+
TableName: this.table,
|
|
165
|
+
FilterExpression: filterExpression,
|
|
166
|
+
ExpressionAttributeValues: expressionAttributeValues,
|
|
167
|
+
ExclusiveStartKey: lastEvaluatedKey,
|
|
168
|
+
}));
|
|
169
|
+
for (const item of result.Items || []) {
|
|
170
|
+
if (!item)
|
|
171
|
+
continue;
|
|
172
|
+
const record = this.parse(item);
|
|
173
|
+
yield record;
|
|
174
|
+
}
|
|
175
|
+
lastEvaluatedKey = result.LastEvaluatedKey;
|
|
176
|
+
} while (lastEvaluatedKey);
|
|
177
|
+
}
|
|
178
|
+
async *query(partition, options = {}) {
|
|
179
|
+
let lastEvaluatedKey = undefined;
|
|
180
|
+
do {
|
|
181
|
+
const ddbKey = this.#getDDBKey(partition);
|
|
182
|
+
const filterExpression = options.filter ? buildFilterExpression(options.filter) : undefined;
|
|
183
|
+
const expressionAttributeValues = options.filter ? buildExpressionAttributeValues(options.filter) : undefined;
|
|
184
|
+
const result = await this.ddbClient.send(new QueryCommand({
|
|
185
|
+
TableName: this.table,
|
|
186
|
+
KeyConditionExpression: `${this.partitionKey} = :partitionKey`,
|
|
187
|
+
ExpressionAttributeValues: {
|
|
188
|
+
':partitionKey': ddbKey[this.partitionKey],
|
|
189
|
+
...expressionAttributeValues
|
|
190
|
+
},
|
|
191
|
+
FilterExpression: filterExpression,
|
|
192
|
+
ExclusiveStartKey: lastEvaluatedKey,
|
|
193
|
+
}));
|
|
194
|
+
for (const item of result.Items || []) {
|
|
195
|
+
if (!item)
|
|
196
|
+
continue;
|
|
197
|
+
yield this.parse(item);
|
|
198
|
+
}
|
|
199
|
+
lastEvaluatedKey = result.LastEvaluatedKey;
|
|
200
|
+
} while (lastEvaluatedKey);
|
|
201
|
+
}
|
|
202
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { Resource } from 'wirejs-resources';
|
|
2
|
+
export declare class FileService extends Resource {
|
|
3
|
+
constructor(scope: Resource | string, id: string);
|
|
4
|
+
read(filename: string, encoding?: BufferEncoding): Promise<string>;
|
|
5
|
+
write(filename: string, data: string, { onlyIfNotExists }?: {
|
|
6
|
+
onlyIfNotExists?: boolean | undefined;
|
|
7
|
+
}): Promise<void>;
|
|
8
|
+
delete(filename: string): Promise<void>;
|
|
9
|
+
list({ prefix }?: {
|
|
10
|
+
prefix?: string | undefined;
|
|
11
|
+
}): AsyncGenerator<string, void, unknown>;
|
|
12
|
+
isAlreadyExistsError(error: any): boolean;
|
|
13
|
+
}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
import { env } from 'process';
|
|
2
|
+
import { S3Client, ListObjectsCommand, PutObjectCommand, GetObjectCommand, DeleteObjectCommand } from '@aws-sdk/client-s3';
|
|
3
|
+
import { Resource, } from 'wirejs-resources';
|
|
4
|
+
import { addResource } from '../resource-collector.js';
|
|
5
|
+
const Bucket = env['BUCKET'];
|
|
6
|
+
const s3 = new S3Client();
|
|
7
|
+
export class FileService extends Resource {
|
|
8
|
+
constructor(scope, id) {
|
|
9
|
+
super(scope, id);
|
|
10
|
+
addResource('FileService', { absoluteId: this.absoluteId });
|
|
11
|
+
}
|
|
12
|
+
async read(filename, encoding = 'utf8') {
|
|
13
|
+
const Key = `${this.absoluteId}/${filename}`;
|
|
14
|
+
const command = new GetObjectCommand({ Bucket, Key });
|
|
15
|
+
const result = await s3.send(command);
|
|
16
|
+
return result.Body.transformToString(encoding);
|
|
17
|
+
}
|
|
18
|
+
async write(filename, data, { onlyIfNotExists = false } = {}) {
|
|
19
|
+
const Key = `${this.absoluteId}/${filename}`;
|
|
20
|
+
const Body = data;
|
|
21
|
+
const commandDetails = {
|
|
22
|
+
Bucket, Key, Body
|
|
23
|
+
};
|
|
24
|
+
if (onlyIfNotExists) {
|
|
25
|
+
commandDetails['IfNoneMatch'] = '*';
|
|
26
|
+
}
|
|
27
|
+
const command = new PutObjectCommand(commandDetails);
|
|
28
|
+
await s3.send(command);
|
|
29
|
+
}
|
|
30
|
+
async delete(filename) {
|
|
31
|
+
const Key = `${this.absoluteId}/${filename}`;
|
|
32
|
+
const command = new DeleteObjectCommand({
|
|
33
|
+
Bucket,
|
|
34
|
+
Key
|
|
35
|
+
});
|
|
36
|
+
await s3.send(command);
|
|
37
|
+
}
|
|
38
|
+
async *list({ prefix = '' } = {}) {
|
|
39
|
+
const Prefix = `${this.absoluteId}/${prefix}`;
|
|
40
|
+
let Marker = undefined;
|
|
41
|
+
while (true) {
|
|
42
|
+
const command = new ListObjectsCommand({
|
|
43
|
+
Bucket,
|
|
44
|
+
Prefix,
|
|
45
|
+
MaxKeys: 1000,
|
|
46
|
+
Marker
|
|
47
|
+
});
|
|
48
|
+
const result = await s3.send(command);
|
|
49
|
+
Marker = result.Marker;
|
|
50
|
+
for (const o of result.Contents || []) {
|
|
51
|
+
if (o.Key) {
|
|
52
|
+
yield o.Key.slice(`${this.absoluteId}/`.length);
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
if (!Marker)
|
|
56
|
+
break;
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
isAlreadyExistsError(error) {
|
|
60
|
+
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
|
|
61
|
+
return error?.$metadata?.httpStatusCode === 412;
|
|
62
|
+
}
|
|
63
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "wirejs-deploy-amplify-basic",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.61-table-resource",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"main": "./dist/index.js",
|
|
6
6
|
"types": "./dist/index.d.ts",
|
|
@@ -23,6 +23,7 @@
|
|
|
23
23
|
},
|
|
24
24
|
"dependencies": {
|
|
25
25
|
"@aws-sdk/client-cognito-identity-provider": "^3.741.0",
|
|
26
|
+
"@aws-sdk/client-dynamodb": "^3.774.0",
|
|
26
27
|
"@aws-sdk/client-s3": "^3.738.0",
|
|
27
28
|
"copy": "^0.3.2",
|
|
28
29
|
"esbuild": "^0.24.2",
|
|
@@ -30,7 +31,7 @@
|
|
|
30
31
|
"recursive-copy": "^2.0.14",
|
|
31
32
|
"rimraf": "^6.0.1",
|
|
32
33
|
"wirejs-dom": "^1.0.38",
|
|
33
|
-
"wirejs-resources": "^0.1.
|
|
34
|
+
"wirejs-resources": "^0.1.29-table-resource"
|
|
34
35
|
},
|
|
35
36
|
"devDependencies": {
|
|
36
37
|
"@aws-amplify/backend": "^1.14.0",
|