@dbos-inc/pgnotifier-receiver 3.0.38-preview.g8bb2030562

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,188 @@
1
+ import { DBOS, DBOSConfig } from '@dbos-inc/dbos-sdk';
2
+ import { DBTrigger, TriggerOperation } from '../src';
3
+ import { ClientBase, Pool, PoolClient } from 'pg';
4
+
5
+ import { KnexDataSource } from '@dbos-inc/knex-datasource';
6
+
7
+ const testTableName = 'dbos_test_orders';
8
+
9
+ const config = {
10
+ host: process.env.PGHOST || 'localhost',
11
+ port: parseInt(process.env.PGPORT || '5432'),
12
+ database: process.env.PGDATABASE || 'postgres',
13
+ user: process.env.PGUSER || 'postgres',
14
+ password: process.env.PGPASSWORD || 'dbos',
15
+ };
16
+ const pool = new Pool(config);
17
+
18
+ const kconfig = { client: 'pg', connection: config };
19
+
20
+ const knexds = new KnexDataSource('app', kconfig);
21
+
22
+ const trig = new DBTrigger({
23
+ connect: async () => {
24
+ const conn = pool.connect();
25
+ return conn;
26
+ },
27
+ disconnect: async (c: ClientBase) => {
28
+ (c as PoolClient).release();
29
+ return Promise.resolve();
30
+ },
31
+ query: async <R>(sql: string, params?: unknown[]) => {
32
+ return (await pool.query(sql, params)).rows as R[];
33
+ },
34
+ });
35
+
36
+ function sleepms(ms: number) {
37
+ return new Promise((r) => setTimeout(r, ms));
38
+ }
39
+
40
+ class DBOSTriggerTestClass {
41
+ static nInserts = 0;
42
+ static nDeletes = 0;
43
+ static nUpdates = 0;
44
+ static recordMap: Map<number, TestTable> = new Map();
45
+
46
+ static nWFUpdates = 0;
47
+ static wfRecordMap: Map<number, TestTable> = new Map();
48
+
49
+ static reset() {
50
+ DBOSTriggerTestClass.nInserts = 0;
51
+ DBOSTriggerTestClass.nDeletes = 0;
52
+ DBOSTriggerTestClass.nUpdates = 0;
53
+ DBOSTriggerTestClass.recordMap = new Map();
54
+
55
+ DBOSTriggerTestClass.nWFUpdates = 0;
56
+ DBOSTriggerTestClass.wfRecordMap = new Map();
57
+ }
58
+
59
+ @trig.trigger({ tableName: testTableName, recordIDColumns: ['order_id'], installDBTrigger: true })
60
+ static async triggerNonWF(op: TriggerOperation, key: number[], rec: unknown) {
61
+ if (op === TriggerOperation.RecordDeleted) {
62
+ ++DBOSTriggerTestClass.nDeletes;
63
+ DBOSTriggerTestClass.recordMap.delete(key[0]);
64
+ }
65
+ if (op === TriggerOperation.RecordInserted) {
66
+ DBOSTriggerTestClass.recordMap.set(key[0], rec as TestTable);
67
+ ++DBOSTriggerTestClass.nInserts;
68
+ }
69
+ if (op === TriggerOperation.RecordUpdated) {
70
+ DBOSTriggerTestClass.recordMap.set(key[0], rec as TestTable);
71
+ ++DBOSTriggerTestClass.nUpdates;
72
+ }
73
+ return Promise.resolve();
74
+ }
75
+
76
+ @trig.triggerWorkflow({ tableName: testTableName, recordIDColumns: ['order_id'], installDBTrigger: true })
77
+ @DBOS.workflow()
78
+ static async triggerWF(op: TriggerOperation, key: number[], rec: unknown) {
79
+ DBOS.logger.debug(`WF ${op} - ${JSON.stringify(key)} / ${JSON.stringify(rec)}`);
80
+ expect(op).toBe(TriggerOperation.RecordUpserted);
81
+ if (op === TriggerOperation.RecordUpserted) {
82
+ DBOSTriggerTestClass.wfRecordMap.set(key[0], rec as TestTable);
83
+ ++DBOSTriggerTestClass.nWFUpdates;
84
+ }
85
+ return Promise.resolve();
86
+ }
87
+
88
+ @knexds.transaction()
89
+ static async insertRecord(rec: TestTable) {
90
+ await knexds.client<TestTable>(testTableName).insert(rec);
91
+ }
92
+
93
+ @knexds.transaction()
94
+ static async deleteRecord(order_id: number) {
95
+ await knexds.client<TestTable>(testTableName).where({ order_id }).delete();
96
+ }
97
+
98
+ @knexds.transaction()
99
+ static async updateRecordStatus(order_id: number, status: string) {
100
+ await knexds.client<TestTable>(testTableName).where({ order_id }).update({ status });
101
+ }
102
+ }
103
+
104
+ interface TestTable {
105
+ order_id: number;
106
+ order_date: Date;
107
+ price: number;
108
+ item: string;
109
+ status: string;
110
+ }
111
+
112
+ describe('test-db-triggers', () => {
113
+ beforeAll(async () => {});
114
+
115
+ beforeEach(async () => {
116
+ await KnexDataSource.initializeDBOSSchema(kconfig);
117
+ const config: DBOSConfig = {
118
+ name: 'dbtrigg',
119
+ };
120
+ DBOS.setConfig(config);
121
+ await trig.db.query(`DROP TABLE IF EXISTS ${testTableName};`);
122
+ await trig.db.query(`
123
+ CREATE TABLE IF NOT EXISTS ${testTableName}(
124
+ order_id SERIAL PRIMARY KEY,
125
+ order_date TIMESTAMP,
126
+ price DECIMAL(10,2),
127
+ item TEXT,
128
+ status VARCHAR(10)
129
+ );`);
130
+ DBOSTriggerTestClass.reset();
131
+ await DBOS.launch();
132
+ });
133
+
134
+ afterEach(async () => {
135
+ await DBOS.shutdown();
136
+ await trig.db.query(`DROP TABLE IF EXISTS ${testTableName};`);
137
+ });
138
+
139
+ test('trigger-nonwf', async () => {
140
+ await DBOSTriggerTestClass.insertRecord({
141
+ order_id: 1,
142
+ order_date: new Date(),
143
+ price: 10,
144
+ item: 'Spacely Sprocket',
145
+ status: 'Ordered',
146
+ });
147
+ while (DBOSTriggerTestClass.nInserts < 1) await sleepms(10);
148
+ expect(DBOSTriggerTestClass.nInserts).toBe(1);
149
+ expect(DBOSTriggerTestClass.recordMap.get(1)?.status).toBe('Ordered');
150
+ while (DBOSTriggerTestClass.nWFUpdates < 1) await sleepms(10);
151
+ expect(DBOSTriggerTestClass.nWFUpdates).toBe(1);
152
+ expect(DBOSTriggerTestClass.wfRecordMap.get(1)?.status).toBe('Ordered');
153
+
154
+ await DBOSTriggerTestClass.insertRecord({
155
+ order_id: 2,
156
+ order_date: new Date(),
157
+ price: 10,
158
+ item: 'Cogswell Cog',
159
+ status: 'Ordered',
160
+ });
161
+ while (DBOSTriggerTestClass.nInserts < 2) await sleepms(10);
162
+ expect(DBOSTriggerTestClass.nInserts).toBe(2);
163
+ expect(DBOSTriggerTestClass.nDeletes).toBe(0);
164
+ expect(DBOSTriggerTestClass.nUpdates).toBe(0);
165
+ expect(DBOSTriggerTestClass.recordMap.get(2)?.status).toBe('Ordered');
166
+ while (DBOSTriggerTestClass.nWFUpdates < 2) await sleepms(10);
167
+ expect(DBOSTriggerTestClass.nWFUpdates).toBe(2);
168
+ expect(DBOSTriggerTestClass.wfRecordMap.get(2)?.status).toBe('Ordered');
169
+
170
+ await DBOSTriggerTestClass.deleteRecord(2);
171
+ while (DBOSTriggerTestClass.nDeletes < 1) await sleepms(10);
172
+ expect(DBOSTriggerTestClass.nInserts).toBe(2);
173
+ expect(DBOSTriggerTestClass.nDeletes).toBe(1);
174
+ expect(DBOSTriggerTestClass.nUpdates).toBe(0);
175
+ expect(DBOSTriggerTestClass.recordMap.get(2)?.status).toBeUndefined();
176
+ expect(DBOSTriggerTestClass.nWFUpdates).toBe(2); // Workflow does not trigger on delete
177
+
178
+ await DBOSTriggerTestClass.updateRecordStatus(1, 'Shipped');
179
+ while (DBOSTriggerTestClass.nUpdates < 1) await sleepms(10);
180
+ expect(DBOSTriggerTestClass.nInserts).toBe(2);
181
+ expect(DBOSTriggerTestClass.nDeletes).toBe(1);
182
+ expect(DBOSTriggerTestClass.nUpdates).toBe(1);
183
+ expect(DBOSTriggerTestClass.recordMap.get(1)?.status).toBe('Shipped');
184
+ await sleepms(100);
185
+ // This update does not start a workflow as there is no update marker column.
186
+ expect(DBOSTriggerTestClass.nWFUpdates).toBe(2);
187
+ }, 15000);
188
+ });
@@ -0,0 +1,285 @@
1
+ import { DBOS, DBOSConfig, WorkflowQueue } from '@dbos-inc/dbos-sdk';
2
+
3
+ import { DBTrigger, TriggerOperation } from '../src';
4
+ import { ClientBase, Pool, PoolClient } from 'pg';
5
+ import { KnexDataSource } from '@dbos-inc/knex-datasource';
6
+
7
+ const config = {
8
+ host: process.env.PGHOST || 'localhost',
9
+ port: parseInt(process.env.PGPORT || '5432'),
10
+ database: process.env.PGDATABASE || 'postgres',
11
+ user: process.env.PGUSER || 'postgres',
12
+ password: process.env.PGPASSWORD || 'dbos',
13
+ };
14
+ const pool = new Pool(config);
15
+
16
+ const kconfig = { client: 'pg', connection: config };
17
+
18
+ const knexds = new KnexDataSource('app', kconfig);
19
+
20
+ const trig = new DBTrigger({
21
+ connect: async () => {
22
+ const conn = pool.connect();
23
+ return conn;
24
+ },
25
+ disconnect: async (c: ClientBase) => {
26
+ (c as PoolClient).release();
27
+ return Promise.resolve();
28
+ },
29
+ query: async <R>(sql: string, params?: unknown[]) => {
30
+ return (await pool.query(sql, params)).rows as R[];
31
+ },
32
+ });
33
+
34
+ function sleepms(ms: number) {
35
+ return new Promise((r) => setTimeout(r, ms));
36
+ }
37
+
38
+ const testTableName = 'dbos_test_trig_seq';
39
+
40
+ const q = new WorkflowQueue('schedQ');
41
+
42
+ class DBOSTriggerTestClassSN {
43
+ static nTSUpdates = 0;
44
+ static tsRecordMap: Map<number, TestTable> = new Map();
45
+
46
+ static nSNUpdates = 0;
47
+ static snRecordMap: Map<number, TestTable> = new Map();
48
+
49
+ static reset() {
50
+ DBOSTriggerTestClassSN.nTSUpdates = 0;
51
+ DBOSTriggerTestClassSN.tsRecordMap = new Map();
52
+
53
+ DBOSTriggerTestClassSN.nSNUpdates = 0;
54
+ DBOSTriggerTestClassSN.snRecordMap = new Map();
55
+ }
56
+
57
+ @trig.triggerWorkflow({
58
+ tableName: testTableName,
59
+ recordIDColumns: ['order_id'],
60
+ sequenceNumColumn: 'seqnum',
61
+ sequenceNumJitter: 2,
62
+ queueName: q.name,
63
+ dbPollingInterval: 1000,
64
+ })
65
+ @DBOS.workflow()
66
+ static async pollWFBySeq(op: TriggerOperation, key: number[], rec: unknown) {
67
+ DBOS.logger.debug(`WFSN Poll ${op} - ${JSON.stringify(key)} / ${JSON.stringify(rec)}`);
68
+ expect(op).toBe(TriggerOperation.RecordUpserted);
69
+ const trec = rec as TestTable;
70
+ if (
71
+ !DBOSTriggerTestClassSN.snRecordMap.has(key[0]) ||
72
+ trec.seqnum > DBOSTriggerTestClassSN.snRecordMap.get(key[0])!.seqnum
73
+ ) {
74
+ DBOSTriggerTestClassSN.snRecordMap.set(key[0], trec);
75
+ await DBOSTriggerTestClassSN.snUpdate();
76
+ }
77
+ return Promise.resolve();
78
+ }
79
+
80
+ @trig.triggerWorkflow({
81
+ tableName: testTableName,
82
+ recordIDColumns: ['order_id'],
83
+ timestampColumn: 'update_date',
84
+ timestampSkewMS: 60000,
85
+ dbPollingInterval: 1000,
86
+ })
87
+ @DBOS.workflow()
88
+ static async pollWFByTS(op: TriggerOperation, key: number[], rec: unknown) {
89
+ DBOS.logger.debug(`WFTS Poll ${op} - ${JSON.stringify(key)} / ${JSON.stringify(rec)}`);
90
+ expect(op).toBe(TriggerOperation.RecordUpserted);
91
+ if (op === TriggerOperation.RecordUpserted) {
92
+ const trec = rec as TestTable;
93
+ if (
94
+ !DBOSTriggerTestClassSN.tsRecordMap.has(key[0]) ||
95
+ trec.update_date > DBOSTriggerTestClassSN.tsRecordMap.get(key[0])!.update_date
96
+ ) {
97
+ DBOSTriggerTestClassSN.tsRecordMap.set(key[0], trec);
98
+ await DBOSTriggerTestClassSN.tsUpdate();
99
+ }
100
+ }
101
+ return Promise.resolve();
102
+ }
103
+
104
+ @DBOS.step()
105
+ static async snUpdate() {
106
+ ++DBOSTriggerTestClassSN.nSNUpdates;
107
+ return Promise.resolve();
108
+ }
109
+
110
+ @DBOS.step()
111
+ static async tsUpdate() {
112
+ ++DBOSTriggerTestClassSN.nTSUpdates;
113
+ return Promise.resolve();
114
+ }
115
+
116
+ @knexds.transaction()
117
+ static async insertRecord(rec: TestTable) {
118
+ await knexds.client<TestTable>(testTableName).insert(rec);
119
+ }
120
+
121
+ @knexds.transaction()
122
+ static async deleteRecord(order_id: number) {
123
+ await knexds.client<TestTable>(testTableName).where({ order_id }).delete();
124
+ }
125
+
126
+ @knexds.transaction()
127
+ static async updateRecordStatus(order_id: number, status: string, seqnum: number, update_date: Date) {
128
+ await knexds.client<TestTable>(testTableName).where({ order_id }).update({ status, seqnum, update_date });
129
+ }
130
+ }
131
+
132
+ interface TestTable {
133
+ order_id: number;
134
+ seqnum: number;
135
+ update_date: Date;
136
+ price: number;
137
+ item: string;
138
+ status: string;
139
+ }
140
+
141
+ describe('test-db-trigger-polling', () => {
142
+ beforeAll(async () => {});
143
+
144
+ beforeEach(async () => {
145
+ await KnexDataSource.initializeDBOSSchema(kconfig);
146
+ const config: DBOSConfig = {
147
+ name: 'dbtrig_poll',
148
+ };
149
+ DBOS.setConfig(config);
150
+ await trig.db.query(`DROP TABLE IF EXISTS ${testTableName};`);
151
+ await trig.db.query(`
152
+ CREATE TABLE IF NOT EXISTS ${testTableName}(
153
+ order_id SERIAL PRIMARY KEY,
154
+ seqnum INTEGER,
155
+ update_date TIMESTAMP,
156
+ price DECIMAL(10,2),
157
+ item TEXT,
158
+ status VARCHAR(10)
159
+ );`);
160
+ DBOSTriggerTestClassSN.reset();
161
+ await DBOS.launch();
162
+ });
163
+
164
+ afterEach(async () => {
165
+ await DBOS.shutdown();
166
+ await trig.db.query(`DROP TABLE IF EXISTS ${testTableName};`);
167
+ });
168
+
169
+ test('dbpoll-seqnum', async () => {
170
+ await DBOSTriggerTestClassSN.insertRecord({
171
+ order_id: 1,
172
+ seqnum: 1,
173
+ update_date: new Date('2024-01-01 11:11:11'),
174
+ price: 10,
175
+ item: 'Spacely Sprocket',
176
+ status: 'Ordered',
177
+ });
178
+ await DBOSTriggerTestClassSN.updateRecordStatus(1, 'Packed', 2, new Date('2024-01-01 11:11:12'));
179
+ while (DBOSTriggerTestClassSN.snRecordMap.get(1)?.status !== 'Packed') await sleepms(10);
180
+ while (DBOSTriggerTestClassSN.tsRecordMap.get(1)?.status !== 'Packed') await sleepms(10);
181
+ while (DBOSTriggerTestClassSN.nSNUpdates < 1 || DBOSTriggerTestClassSN.nTSUpdates < 1) await sleepms(10);
182
+
183
+ // If these occurred close together, we would not see the insert+update separately...
184
+ expect(DBOSTriggerTestClassSN.nSNUpdates).toBeGreaterThanOrEqual(1);
185
+ expect(DBOSTriggerTestClassSN.nSNUpdates).toBeLessThanOrEqual(2);
186
+ expect(DBOSTriggerTestClassSN.nTSUpdates).toBeGreaterThanOrEqual(1);
187
+ expect(DBOSTriggerTestClassSN.nTSUpdates).toBeLessThanOrEqual(2);
188
+ expect(DBOSTriggerTestClassSN.snRecordMap.get(1)?.status).toBe('Packed');
189
+ expect(DBOSTriggerTestClassSN.tsRecordMap.get(1)?.status).toBe('Packed');
190
+
191
+ await DBOSTriggerTestClassSN.insertRecord({
192
+ order_id: 2,
193
+ seqnum: 3,
194
+ update_date: new Date('2024-01-01 11:11:13'),
195
+ price: 10,
196
+ item: 'Cogswell Cog',
197
+ status: 'Ordered',
198
+ });
199
+ await DBOSTriggerTestClassSN.updateRecordStatus(1, 'Shipped', 5, new Date('2024-01-01 11:11:15'));
200
+ while (DBOSTriggerTestClassSN.snRecordMap.get(1)?.status !== 'Shipped') await sleepms(10);
201
+ while (DBOSTriggerTestClassSN.tsRecordMap.get(1)?.status !== 'Shipped') await sleepms(10);
202
+ while (DBOSTriggerTestClassSN.snRecordMap.get(2)?.status !== 'Ordered') await sleepms(10);
203
+ while (DBOSTriggerTestClassSN.tsRecordMap.get(2)?.status !== 'Ordered') await sleepms(10);
204
+
205
+ expect(DBOSTriggerTestClassSN.snRecordMap.get(1)?.status).toBe('Shipped');
206
+ expect(DBOSTriggerTestClassSN.tsRecordMap.get(1)?.status).toBe('Shipped');
207
+ expect(DBOSTriggerTestClassSN.snRecordMap.get(2)?.status).toBe('Ordered');
208
+ expect(DBOSTriggerTestClassSN.tsRecordMap.get(2)?.status).toBe('Ordered');
209
+
210
+ // Take down
211
+ await DBOS.deactivateEventReceivers();
212
+
213
+ // Do more stuff
214
+ // Invalid record, won't show up because it is well out of sequence
215
+ await DBOSTriggerTestClassSN.insertRecord({
216
+ order_id: 999,
217
+ seqnum: -999,
218
+ update_date: new Date('1900-01-01 11:11:13'),
219
+ price: 10,
220
+ item: 'Cogswell Cog',
221
+ status: 'Ordered',
222
+ });
223
+
224
+ // A few more valid records, back in time a little
225
+ await DBOSTriggerTestClassSN.insertRecord({
226
+ order_id: 3,
227
+ seqnum: 4,
228
+ update_date: new Date('2024-01-01 11:11:14'),
229
+ price: 10,
230
+ item: 'Griswold Gear',
231
+ status: 'Ordered',
232
+ });
233
+ await DBOSTriggerTestClassSN.insertRecord({
234
+ order_id: 4,
235
+ seqnum: 6,
236
+ update_date: new Date('2024-01-01 11:11:16'),
237
+ price: 10,
238
+ item: 'Wallace Wheel',
239
+ status: 'Ordered',
240
+ });
241
+ await DBOSTriggerTestClassSN.updateRecordStatus(4, 'Shipped', 7, new Date('2024-01-01 11:11:17'));
242
+
243
+ // Test restore
244
+ console.log(
245
+ '************************************************** Restart *****************************************************',
246
+ );
247
+ DBOSTriggerTestClassSN.reset();
248
+
249
+ await DBOS.initEventReceivers();
250
+
251
+ console.log(
252
+ '************************************************** Restarted *****************************************************',
253
+ );
254
+ DBOSTriggerTestClassSN.reset();
255
+
256
+ // Catchup may not be sequential, wait for the last update to get processed...
257
+ while (DBOSTriggerTestClassSN.snRecordMap.get(3)?.status !== 'Ordered') await sleepms(10);
258
+ while (DBOSTriggerTestClassSN.tsRecordMap.get(3)?.status !== 'Ordered') await sleepms(10);
259
+ while (DBOSTriggerTestClassSN.snRecordMap.get(4)?.status !== 'Shipped') await sleepms(10);
260
+ while (DBOSTriggerTestClassSN.tsRecordMap.get(4)?.status !== 'Shipped') await sleepms(10);
261
+ await sleepms(100);
262
+
263
+ console.log(
264
+ '************************************************** Catchup Complete *****************************************************',
265
+ );
266
+
267
+ expect(DBOSTriggerTestClassSN.snRecordMap.get(3)?.status).toBe('Ordered');
268
+ expect(DBOSTriggerTestClassSN.tsRecordMap.get(3)?.status).toBe('Ordered');
269
+ expect(DBOSTriggerTestClassSN.snRecordMap.get(4)?.status).toBe('Shipped');
270
+ expect(DBOSTriggerTestClassSN.tsRecordMap.get(4)?.status).toBe('Shipped');
271
+ expect(DBOSTriggerTestClassSN.snRecordMap.get(999)?.status).toBeUndefined();
272
+ expect(DBOSTriggerTestClassSN.tsRecordMap.get(999)?.status).toBeUndefined();
273
+
274
+ const wfs = await DBOS.getWorkflows({
275
+ workflowName: 'pollWFBySeq',
276
+ });
277
+
278
+ let foundQ = false;
279
+ for (const wfid of wfs.workflowUUIDs) {
280
+ const stat = await DBOS.retrieveWorkflow(wfid).getStatus();
281
+ if (stat?.queueName === q.name) foundQ = true;
282
+ }
283
+ expect(foundQ).toBeTruthy();
284
+ }, 15000);
285
+ });