@dbos-inc/aws-s3-workflows 3.0.29-preview

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.ts ADDED
@@ -0,0 +1,7 @@
1
+ export {
2
+ FileRecord,
3
+ S3WorkflowCallbacks,
4
+ registerS3UploadWorkflow,
5
+ registerS3PresignedUploadWorkflow,
6
+ registerS3DeleteWorkflow,
7
+ } from './src/s3_utils';
package/jest.config.js ADDED
@@ -0,0 +1,8 @@
1
+ /** @type {import('ts-jest').JestConfigWithTsJest} */
2
+ module.exports = {
3
+ preset: 'ts-jest',
4
+ testEnvironment: 'node',
5
+ testRegex: '((\\.|/)(test|spec))\\.ts?$',
6
+ moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'],
7
+ modulePaths: ['./'],
8
+ };
package/knexfile.ts ADDED
@@ -0,0 +1,21 @@
1
+ import { Knex } from 'knex';
2
+ import { parseConfigFile } from '@dbos-inc/dbos-sdk';
3
+ import { DBOSConfig } from '@dbos-inc/dbos-sdk';
4
+
5
+ const [dbosConfig]: [DBOSConfig, unknown] = parseConfigFile();
6
+
7
+ const config: Knex.Config = {
8
+ client: 'pg',
9
+ connection: {
10
+ host: dbosConfig.poolConfig!.host,
11
+ user: dbosConfig.poolConfig!.user,
12
+ password: dbosConfig.poolConfig!.password,
13
+ database: dbosConfig.poolConfig!.database,
14
+ ssl: dbosConfig.poolConfig!.ssl,
15
+ },
16
+ migrations: {
17
+ directory: './migrations',
18
+ },
19
+ };
20
+
21
+ export default config;
@@ -0,0 +1,16 @@
1
+ import { Knex } from 'knex';
2
+
3
+ export async function up(knex: Knex): Promise<void> {
4
+ await knex.schema.createTable('user_files', (table) => {
5
+ table.uuid('file_id').primary();
6
+ table.uuid('user_id'); //.index().references("cusers.user_id");
7
+ table.string('file_status', 16);
8
+ table.string('file_type', 16);
9
+ table.bigint('file_time');
10
+ table.string('file_name', 128);
11
+ });
12
+ }
13
+
14
+ export async function down(knex: Knex): Promise<void> {
15
+ await knex.schema.dropTable('user_files');
16
+ }
package/package.json ADDED
@@ -0,0 +1,37 @@
1
+ {
2
+ "name": "@dbos-inc/aws-s3-workflows",
3
+ "version": "3.0.29-preview",
4
+ "description": "Component library - DBOS S3 steps and workflows",
5
+ "license": "MIT",
6
+ "repository": {
7
+ "type": "git",
8
+ "url": "https://github.com/dbos-inc/dbos-transact-ts",
9
+ "directory": "packages/aws-s3-workflows"
10
+ },
11
+ "homepage": "https://docs.dbos.dev/",
12
+ "main": "dist/index.js",
13
+ "types": "dist/index.d.ts",
14
+ "scripts": {
15
+ "build": "tsc --project tsconfig.json",
16
+ "test": "echo 'no tests'",
17
+ "tests3": "npm run build && npx dbos rollback && npx dbos migrate && jest --detectOpenHandles"
18
+ },
19
+ "devDependencies": {
20
+ "@types/jest": "^29.5.12",
21
+ "@types/supertest": "^6.0.2",
22
+ "axios": "^1.7.4",
23
+ "jest": "^29.7.0",
24
+ "knex": "^3.1.0",
25
+ "supertest": "^7.0.0",
26
+ "ts-node": "^10.9.2",
27
+ "typescript": "^5.3.3"
28
+ },
29
+ "peerDependencies": {
30
+ "@dbos-inc/dbos-sdk": "*"
31
+ },
32
+ "dependencies": {
33
+ "@aws-sdk/s3-presigned-post": "^3.842.0",
34
+ "@aws-sdk/s3-request-presigner": "^3.842.0",
35
+ "uuid": "^9.0.1"
36
+ }
37
+ }
@@ -0,0 +1,350 @@
1
+ import {
2
+ FileRecord,
3
+ S3WorkflowCallbacks,
4
+ registerS3UploadWorkflow,
5
+ registerS3PresignedUploadWorkflow,
6
+ registerS3DeleteWorkflow,
7
+ } from './s3_utils';
8
+ import { DBOS } from '@dbos-inc/dbos-sdk';
9
+
10
+ import { S3Client, DeleteObjectCommand, GetObjectCommand, PutObjectCommand } from '@aws-sdk/client-s3';
11
+ import { createPresignedPost, PresignedPost } from '@aws-sdk/s3-presigned-post';
12
+ import { getSignedUrl } from '@aws-sdk/s3-request-presigner';
13
+
14
+ import FormData from 'form-data';
15
+ import axios, { AxiosResponse } from 'axios';
16
+ import { Readable } from 'stream';
17
+ import * as fs from 'fs';
18
+ import { randomUUID } from 'node:crypto';
19
+
20
+ enum FileStatus {
21
+ PENDING = 'Pending',
22
+ RECEIVED = 'Received',
23
+ ACTIVE = 'Active',
24
+ }
25
+
26
+ interface FileDetails {
27
+ user_id: string;
28
+ file_type: string;
29
+ file_name: string;
30
+ file_id?: string;
31
+ file_status?: string;
32
+ file_time?: number;
33
+ }
34
+
35
+ interface UserFile extends FileDetails, FileRecord {
36
+ user_id: string;
37
+ file_type: string;
38
+ file_name: string;
39
+ file_id: string;
40
+ file_status: string;
41
+ file_time: number;
42
+ }
43
+
44
+ class TestUserFileTable {
45
+ //////////
46
+ //// Database table
47
+ //////////
48
+
49
+ // Pick a file ID
50
+ @DBOS.step()
51
+ static chooseFileRecord(details: FileDetails): Promise<UserFile> {
52
+ const rec: UserFile = {
53
+ user_id: details.user_id,
54
+ file_status: FileStatus.PENDING,
55
+ file_type: details.file_type,
56
+ file_id: randomUUID(),
57
+ file_name: details.file_name,
58
+ file_time: new Date().getTime(),
59
+ key: '',
60
+ };
61
+ rec.key = TestUserFileTable.createS3Key(rec);
62
+ return Promise.resolve(rec);
63
+ }
64
+
65
+ static createS3Key(rec: UserFile) {
66
+ const key = `${rec.file_type}/${rec.user_id}/${rec.file_id}/${rec.file_time}`;
67
+ return key;
68
+ }
69
+
70
+ static toFileDetails(rec: UserFile) {
71
+ return {
72
+ user_id: rec.user_id,
73
+ file_type: rec.file_type,
74
+ file_name: rec.file_name,
75
+ file_id: rec.file_id,
76
+ file_status: rec.file_status,
77
+ file_time: rec.file_time,
78
+ };
79
+ }
80
+
81
+ // File table DML operations
82
+ // Whole record is known
83
+ @DBOS.transaction()
84
+ static async insertFileRecord(rec: UserFile) {
85
+ await DBOS.knexClient<FileDetails>('user_files').insert(TestUserFileTable.toFileDetails(rec));
86
+ }
87
+ @DBOS.transaction()
88
+ static async updateFileRecord(rec: UserFile) {
89
+ await DBOS.knexClient<FileDetails>('user_files')
90
+ .update(TestUserFileTable.toFileDetails(rec))
91
+ .where({ file_id: rec.file_id });
92
+ }
93
+ // Delete when part of record is known
94
+ @DBOS.transaction()
95
+ static async deleteFileRecordById(file_id: string) {
96
+ await DBOS.knexClient<FileDetails>('user_files').delete().where({ file_id });
97
+ }
98
+
99
+ // Queries
100
+ @DBOS.transaction({ readOnly: true })
101
+ static async lookUpByFields(fields: FileDetails) {
102
+ const rv = await DBOS.knexClient<FileDetails>('user_files')
103
+ .select()
104
+ .where({ ...fields, file_status: FileStatus.ACTIVE })
105
+ .orderBy('file_time', 'desc')
106
+ .first();
107
+ return rv ? [rv] : [];
108
+ }
109
+ @DBOS.transaction({ readOnly: true })
110
+ static async lookUpByName(user_id: string, file_type: string, file_name: string) {
111
+ const rv = await DBOS.knexClient<FileDetails>('user_files')
112
+ .select()
113
+ .where({ user_id, file_type, file_name, file_status: FileStatus.ACTIVE })
114
+ .orderBy('file_time', 'desc')
115
+ .first();
116
+ return rv ? [rv] : [];
117
+ }
118
+ @DBOS.transaction({ readOnly: true })
119
+ static async lookUpByType(user_id: string, file_type: string) {
120
+ const rv = await DBOS.knexClient<FileDetails>('user_files')
121
+ .select()
122
+ .where({ user_id, file_type, file_status: FileStatus.ACTIVE });
123
+ return rv;
124
+ }
125
+ @DBOS.transaction({ readOnly: true })
126
+ static async lookUpByUser(user_id: string) {
127
+ const rv = await DBOS.knexClient<FileDetails>('user_files')
128
+ .select()
129
+ .where({ user_id, file_status: FileStatus.ACTIVE });
130
+ return rv;
131
+ }
132
+ }
133
+
134
+ const s3bucket = process.env['S3_BUCKET'];
135
+ const s3region = process.env['AWS_REGION'];
136
+ const s3accessKey = process.env['AWS_ACCESS_KEY_ID'];
137
+ const s3accessSecret = process.env['AWS_SECRET_ACCESS_KEY'];
138
+
139
+ let s3client: S3Client | undefined = undefined;
140
+
141
+ interface Opts {
142
+ contentType?: string;
143
+ }
144
+
145
+ const s3callback: S3WorkflowCallbacks<UserFile, Opts> = {
146
+ // Database operations (these should be transactions)
147
+ newActiveFile: async (rec: UserFile) => {
148
+ rec.file_status = FileStatus.ACTIVE;
149
+ return await TestUserFileTable.insertFileRecord(rec);
150
+ },
151
+ newPendingFile: async (rec: UserFile) => {
152
+ rec.file_status = FileStatus.PENDING;
153
+ return await TestUserFileTable.insertFileRecord(rec);
154
+ },
155
+ fileActivated: async (rec: UserFile) => {
156
+ rec.file_status = FileStatus.ACTIVE;
157
+ return await TestUserFileTable.updateFileRecord(rec);
158
+ },
159
+ fileDeleted: async (rec: UserFile) => {
160
+ return await TestUserFileTable.deleteFileRecordById(rec.file_id);
161
+ },
162
+
163
+ // S3 interaction options, these will be run as steps
164
+ putS3Contents: async (rec: UserFile, content: string, options?: Opts) => {
165
+ return await s3client?.send(
166
+ new PutObjectCommand({
167
+ Bucket: s3bucket,
168
+ Key: rec.key,
169
+ ContentType: options?.contentType ?? 'text/plain',
170
+ Body: content,
171
+ }),
172
+ );
173
+ },
174
+ createPresignedPost: async (rec: UserFile, timeout?: number, opts?: Opts) => {
175
+ const postPresigned = await createPresignedPost(s3client!, {
176
+ Conditions: [
177
+ ['content-length-range', 1, 10000000], // 10MB
178
+ ],
179
+ Bucket: s3bucket!,
180
+ Key: rec.key,
181
+ Expires: timeout || 60,
182
+ Fields: {
183
+ 'Content-Type': opts?.contentType || '*',
184
+ },
185
+ });
186
+ return { url: postPresigned.url, fields: postPresigned.fields };
187
+ },
188
+ validateS3Upload: undefined,
189
+ deleteS3Object: async (rec: UserFile) => {
190
+ return await s3client?.send(
191
+ new DeleteObjectCommand({
192
+ Bucket: s3bucket,
193
+ Key: rec.key,
194
+ }),
195
+ );
196
+ },
197
+ };
198
+
199
+ export const uploadWF = registerS3UploadWorkflow({ className: 'UserFile', name: 'uploadWF' }, s3callback);
200
+ export const uploadPWF = registerS3PresignedUploadWorkflow({ className: 'UserFile', name: 'uploadPWF' }, s3callback);
201
+ export const deleteWF = registerS3DeleteWorkflow({ className: 'UserFile', name: 'deleteWF' }, s3callback);
202
+
203
+ describe('ses-tests', () => {
204
+ let s3IsAvailable = true;
205
+
206
+ beforeAll(async () => {
207
+ // Check if S3 is available and update app config, skip the test if it's not
208
+ if (!s3region || !s3bucket || !s3accessKey || !s3accessSecret) {
209
+ s3IsAvailable = false;
210
+ console.log(
211
+ 'S3 Test is not configured. To run, set AWS_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and S3_BUCKET',
212
+ );
213
+ } else {
214
+ s3client = new S3Client({
215
+ region: s3region,
216
+ credentials: {
217
+ accessKeyId: s3accessKey,
218
+ secretAccessKey: s3accessSecret,
219
+ },
220
+ });
221
+
222
+ await DBOS.launch();
223
+ }
224
+ });
225
+
226
+ afterAll(async () => {
227
+ if (s3IsAvailable) {
228
+ await DBOS.shutdown();
229
+ }
230
+ }, 10000);
231
+
232
+ test('s3-simple-wfs', async () => {
233
+ if (!s3IsAvailable) {
234
+ console.log('S3 unavailable, skipping S3 tests');
235
+ return;
236
+ }
237
+
238
+ const userid = randomUUID();
239
+
240
+ // The simple workflows that will be performed are to:
241
+ // Put file contents into DBOS (w/ table index)
242
+ const myFile: FileDetails = { user_id: userid, file_type: 'text', file_name: 'mytextfile.txt' };
243
+ const myFileRec = await TestUserFileTable.chooseFileRecord(myFile);
244
+ await uploadWF(myFileRec, 'This is my file');
245
+
246
+ // Get the file contents out of DBOS (using the table index)
247
+ const mytxt = await getS3KeyContents(myFileRec.key);
248
+ expect(mytxt).toBe('This is my file');
249
+
250
+ // Delete the file contents out of DBOS (using the table index)
251
+ const dfhandle = await deleteWF(myFileRec);
252
+ expect(dfhandle).toBeDefined();
253
+ }, 10000);
254
+
255
+ test('s3-complex-wfs', async () => {
256
+ if (!s3IsAvailable) {
257
+ console.log('S3 unavailable, skipping S3 tests');
258
+ return;
259
+ }
260
+
261
+ // The complex workflows that will be performed are to:
262
+ // Put the file contents into DBOS with a presigned post
263
+ const userid = randomUUID();
264
+
265
+ // The simple workflows that will be performed are to:
266
+ // Put file contents into DBOS (w/ table index)
267
+ const myFile: FileDetails = { user_id: userid, file_type: 'text', file_name: 'mytextfile.txt' };
268
+ const myFileRec = await TestUserFileTable.chooseFileRecord(myFile);
269
+ const wfhandle = await DBOS.startWorkflow(uploadPWF)(myFileRec, 60, { contentType: 'text/plain' });
270
+ // Get the presigned post
271
+ const ppost = await DBOS.getEvent<PresignedPost>(wfhandle.workflowID, 'uploadkey');
272
+ // Upload to the URL
273
+ try {
274
+ const res = await uploadToS3(ppost!, './src/s3_utils.test.ts');
275
+ expect(res.status.toString()[0]).toBe('2');
276
+ } catch (e) {
277
+ // You do NOT want to accidentally serialize an AxiosError - they don't!
278
+ console.log('Caught something awful!', e);
279
+ expect(e).toBeUndefined();
280
+ }
281
+ // Notify WF
282
+ await DBOS.send<boolean>(wfhandle.workflowID, true, 'uploadfinish');
283
+
284
+ // Wait for WF complete
285
+ const _myFileRecord = await wfhandle.getResult();
286
+
287
+ // Get the file out of DBOS (using a signed URL)
288
+ const myurl = await getS3KeyUrl(myFileRec.key, 60);
289
+ expect(myurl).not.toBeNull();
290
+ // Get the file contents out of S3
291
+ await downloadFromS3(myurl, './deleteme.xxx');
292
+ expect(fs.existsSync('./deleteme.xxx')).toBeTruthy();
293
+ fs.rmSync('./deleteme.xxx');
294
+
295
+ // Delete the file contents out of DBOS (using the table index)
296
+ const dfhandle = await deleteWF(myFileRec);
297
+ expect(dfhandle).toBeDefined();
298
+ }, 10000);
299
+
300
+ async function getS3KeyContents(key: string) {
301
+ return (
302
+ await s3client!.send(
303
+ new GetObjectCommand({
304
+ Bucket: s3bucket!,
305
+ Key: key,
306
+ }),
307
+ )
308
+ ).Body?.transformToString();
309
+ }
310
+
311
+ async function getS3KeyUrl(key: string, expirationSecs: number) {
312
+ const getObjectCommand = new GetObjectCommand({
313
+ Bucket: s3bucket!,
314
+ Key: key,
315
+ });
316
+
317
+ const presignedUrl = await getSignedUrl(s3client!, getObjectCommand, { expiresIn: expirationSecs });
318
+ return presignedUrl;
319
+ }
320
+
321
+ async function uploadToS3(presignedPostData: PresignedPost, filePath: string) {
322
+ const formData = new FormData();
323
+
324
+ // Append all the fields from the presigned post data
325
+ Object.keys(presignedPostData.fields).forEach((key) => {
326
+ formData.append(key, presignedPostData.fields[key]);
327
+ });
328
+
329
+ // Append the file you want to upload
330
+ const fileStream = fs.createReadStream(filePath);
331
+ formData.append('file', fileStream);
332
+
333
+ return await axios.post(presignedPostData.url, formData);
334
+ }
335
+
336
+ async function downloadFromS3(presignedGetUrl: string, outputPath: string) {
337
+ const response: AxiosResponse<Readable> = await axios.get(presignedGetUrl, {
338
+ responseType: 'stream', // Important to handle large files
339
+ });
340
+
341
+ // Use a write stream to save the file to the desired path
342
+ const writer = fs.createWriteStream(outputPath);
343
+ response.data.pipe(writer);
344
+
345
+ return new Promise<void>((resolve, reject) => {
346
+ writer.on('finish', resolve);
347
+ writer.on('error', reject);
348
+ });
349
+ }
350
+ });
@@ -0,0 +1,157 @@
1
+ import { type PresignedPost } from '@aws-sdk/s3-presigned-post';
2
+
3
+ import { DBOS, WorkflowConfig } from '@dbos-inc/dbos-sdk';
4
+
5
+ export interface FileRecord {
6
+ key: string;
7
+ }
8
+
9
+ export interface S3WorkflowCallbacks<R extends FileRecord, Options = unknown> {
10
+ // Database operations (these should be transactions)
11
+ /** Called back when a new, active file is created; should write the file to the database as active */
12
+ newActiveFile: (rec: R) => Promise<unknown>;
13
+ /** Called back when a file might get uploaded; this function may write the file to the database as 'pending' */
14
+ newPendingFile: (rec: R) => Promise<unknown>;
15
+ /** Called back when a pending file becomes active; should write the file to the database as active */
16
+ fileActivated: (rec: R) => Promise<unknown>;
17
+ /** Called back when a file is in the process of getting deleted; should remove the file from the database */
18
+ fileDeleted: (rec: R) => Promise<unknown>;
19
+
20
+ // S3 interaction options
21
+ /** Should execute the S3 operation to write contents to rec.key; this will be run as a step */
22
+ putS3Contents: (rec: R, content: string, options?: Options) => Promise<unknown>;
23
+ /** Should execute the S3 operation to create a presigned post for external upload, this will be run as a step */
24
+ createPresignedPost: (rec: R, timeout?: number, options?: Options) => Promise<PresignedPost>;
25
+ /** Optional validation to check if a client S3 upload is valid, before activating in the datbase. Will run as a step */
26
+ validateS3Upload?: (rec: R) => Promise<void>;
27
+ /** Should execute the S3 operation to delete rec.key; this will be run as a step */
28
+ deleteS3Object: (rec: R) => Promise<unknown>;
29
+ }
30
+
31
+ /**
32
+ * Create a workflow function for deleting S3 objects and removing the DB entry
33
+ * @param options - Registration options for the workflow
34
+ * @param callbacks - S3 operation implementation and database recordkeeping transactions
35
+ */
36
+ export function registerS3DeleteWorkflow<R extends FileRecord, Options = unknown>(
37
+ options: {
38
+ name?: string;
39
+ ctorOrProto?: object;
40
+ className?: string;
41
+ config?: WorkflowConfig;
42
+ },
43
+ callbacks: S3WorkflowCallbacks<R, Options>,
44
+ ) {
45
+ return DBOS.registerWorkflow(async (fileDetails: R) => {
46
+ await callbacks.fileDeleted(fileDetails);
47
+ return await DBOS.runStep(
48
+ async () => {
49
+ return callbacks.deleteS3Object(fileDetails);
50
+ },
51
+ { name: 'deleteS3Object' },
52
+ );
53
+ }, options);
54
+ }
55
+
56
+ /**
57
+ * Create a workflow function for uploading S3 contents from DBOS
58
+ * @param options - Registration options for the workflow
59
+ * @param callbacks - S3 operation implementation and database recordkeeping transactions
60
+ */
61
+ export function registerS3UploadWorkflow<R extends FileRecord, Options = unknown>(
62
+ options: {
63
+ name?: string;
64
+ ctorOrProto?: object;
65
+ className?: string;
66
+ config?: WorkflowConfig;
67
+ },
68
+ callbacks: S3WorkflowCallbacks<R, Options>,
69
+ ) {
70
+ return DBOS.registerWorkflow(async (fileDetails: R, content: string, objOptions?: Options) => {
71
+ try {
72
+ await DBOS.runStep(
73
+ async () => {
74
+ await callbacks.putS3Contents(fileDetails, content, objOptions);
75
+ },
76
+ { name: 'putS3Contents' },
77
+ );
78
+ } catch (e) {
79
+ try {
80
+ await DBOS.runStep(
81
+ async () => {
82
+ return callbacks.deleteS3Object(fileDetails);
83
+ },
84
+ { name: 'deleteS3Object' },
85
+ );
86
+ } catch (e2) {
87
+ DBOS.logger.debug(e2);
88
+ }
89
+ throw e;
90
+ }
91
+
92
+ await callbacks.newActiveFile(fileDetails);
93
+ return fileDetails;
94
+ }, options);
95
+ }
96
+
97
+ /**
98
+ * Create a workflow function for uploading S3 contents externally, via a presigned URL
99
+ * @param options - Registration options for the workflow
100
+ * @param callbacks - S3 operation implementation and database recordkeeping transactions
101
+ */
102
+ export function registerS3PresignedUploadWorkflow<R extends FileRecord, Options = unknown>(
103
+ options: {
104
+ name?: string;
105
+ ctorOrProto?: object;
106
+ className?: string;
107
+ config?: WorkflowConfig;
108
+ },
109
+ callbacks: S3WorkflowCallbacks<R, Options>,
110
+ ) {
111
+ return DBOS.registerWorkflow(async (fileDetails: R, timeoutSeconds: number, objOptions?: Options) => {
112
+ await callbacks.newPendingFile(fileDetails);
113
+
114
+ const upkey = await DBOS.runStep(
115
+ async () => {
116
+ return await callbacks.createPresignedPost(fileDetails, timeoutSeconds, objOptions);
117
+ },
118
+ { name: 'createPresignedPost' },
119
+ );
120
+ await DBOS.setEvent<PresignedPost>('uploadkey', upkey);
121
+
122
+ try {
123
+ const res = await DBOS.recv<boolean>('uploadfinish', timeoutSeconds + 60);
124
+
125
+ if (!res) {
126
+ throw new Error('S3 operation timed out or canceled');
127
+ }
128
+
129
+ // Validate the file, if we have code for that
130
+ if (callbacks.validateS3Upload) {
131
+ await DBOS.runStep(
132
+ async () => {
133
+ await callbacks.validateS3Upload!(fileDetails);
134
+ },
135
+ { name: 'validateFileUpload' },
136
+ );
137
+ }
138
+
139
+ await callbacks?.fileActivated(fileDetails);
140
+ } catch (e) {
141
+ try {
142
+ await callbacks.fileDeleted(fileDetails);
143
+ await DBOS.runStep(
144
+ async () => {
145
+ await callbacks.deleteS3Object(fileDetails);
146
+ },
147
+ { name: 'deleteS3Object' },
148
+ );
149
+ } catch (e2) {
150
+ DBOS.logger.debug(e2);
151
+ }
152
+ throw e;
153
+ }
154
+
155
+ return fileDetails;
156
+ }, options);
157
+ }
package/tsconfig.json ADDED
@@ -0,0 +1,9 @@
1
+ /* Visit https://aka.ms/tsconfig to read more about this file */
2
+ {
3
+ "extends": "../../tsconfig.shared.json",
4
+ "compilerOptions": {
5
+ "outDir": "./dist"
6
+ },
7
+ "include": [/* Specifies an array of filenames or patterns to include in the program. */ "."],
8
+ "exclude": ["dist"]
9
+ }