donobu 5.27.4 → 5.28.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/apis/FileUploadsApi.d.ts +18 -0
- package/dist/apis/FileUploadsApi.js +25 -0
- package/dist/apis/ToolsApi.js +2 -19
- package/dist/clients/AnthropicGptClient.js +4 -0
- package/dist/clients/OpenAiGptClient.js +4 -0
- package/dist/esm/apis/FileUploadsApi.d.ts +18 -0
- package/dist/esm/apis/FileUploadsApi.js +25 -0
- package/dist/esm/apis/ToolsApi.js +2 -19
- package/dist/esm/clients/AnthropicGptClient.js +4 -0
- package/dist/esm/clients/OpenAiGptClient.js +4 -0
- package/dist/esm/init.d.ts +25 -0
- package/dist/esm/init.js +52 -0
- package/dist/esm/lib/test/testExtension.d.ts +15 -1
- package/dist/esm/lib/test/testExtension.js +279 -3
- package/dist/esm/main.d.ts +1 -0
- package/dist/esm/main.js +5 -1
- package/dist/esm/managers/AdminApiController.js +3 -0
- package/dist/esm/managers/DonobuFlow.d.ts +1 -1
- package/dist/esm/models/FlowHandle.d.ts +2 -1
- package/dist/esm/models/FlowMetadata.d.ts +1 -1
- package/dist/esm/models/FlowMetadata.js +4 -1
- package/dist/esm/persistence/DonobuSqliteDb.js +207 -0
- package/dist/esm/persistence/TestConfigHash.js +4 -2
- package/dist/esm/persistence/files/FileUploadCache.d.ts +123 -0
- package/dist/esm/persistence/files/FileUploadCache.js +315 -0
- package/dist/esm/persistence/files/FileUploadWorker.d.ts +66 -0
- package/dist/esm/persistence/files/FileUploadWorker.js +181 -0
- package/dist/esm/persistence/files/fileUploadWorkerRegistry.d.ts +42 -0
- package/dist/esm/persistence/files/fileUploadWorkerRegistry.js +73 -0
- package/dist/esm/persistence/flows/FlowsPersistenceDonobuApi.d.ts +32 -6
- package/dist/esm/persistence/flows/FlowsPersistenceDonobuApi.js +83 -13
- package/dist/esm/persistence/normalizeFlowMetadata.js +35 -0
- package/dist/esm/tools/toolConstants.d.ts +2 -0
- package/dist/esm/tools/toolConstants.js +22 -0
- package/dist/init.d.ts +25 -0
- package/dist/init.js +52 -0
- package/dist/lib/test/testExtension.d.ts +15 -1
- package/dist/lib/test/testExtension.js +279 -3
- package/dist/main.d.ts +1 -0
- package/dist/main.js +5 -1
- package/dist/managers/AdminApiController.js +3 -0
- package/dist/managers/DonobuFlow.d.ts +1 -1
- package/dist/models/FlowHandle.d.ts +2 -1
- package/dist/models/FlowMetadata.d.ts +1 -1
- package/dist/models/FlowMetadata.js +4 -1
- package/dist/persistence/DonobuSqliteDb.js +207 -0
- package/dist/persistence/TestConfigHash.js +4 -2
- package/dist/persistence/files/FileUploadCache.d.ts +123 -0
- package/dist/persistence/files/FileUploadCache.js +315 -0
- package/dist/persistence/files/FileUploadWorker.d.ts +66 -0
- package/dist/persistence/files/FileUploadWorker.js +181 -0
- package/dist/persistence/files/fileUploadWorkerRegistry.d.ts +42 -0
- package/dist/persistence/files/fileUploadWorkerRegistry.js +73 -0
- package/dist/persistence/flows/FlowsPersistenceDonobuApi.d.ts +32 -6
- package/dist/persistence/flows/FlowsPersistenceDonobuApi.js +83 -13
- package/dist/persistence/normalizeFlowMetadata.js +35 -0
- package/dist/tools/toolConstants.d.ts +2 -0
- package/dist/tools/toolConstants.js +22 -0
- package/package.json +1 -1
|
@@ -13,14 +13,21 @@ var __createBinding = (this && this.__createBinding) || (Object.create ? (functi
|
|
|
13
13
|
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
15
|
};
|
|
16
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
17
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
18
|
+
};
|
|
16
19
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
20
|
exports.test = void 0;
|
|
18
21
|
const test_1 = require("@playwright/test");
|
|
19
22
|
const async_hooks_1 = require("async_hooks");
|
|
20
23
|
const crypto_1 = require("crypto");
|
|
24
|
+
const fs_1 = require("fs");
|
|
25
|
+
const os_1 = require("os");
|
|
26
|
+
const path_1 = __importDefault(require("path"));
|
|
21
27
|
const v4_1 = require("zod/v4");
|
|
22
28
|
const envVars_1 = require("../../envVars");
|
|
23
29
|
const DonobuFlowsManager_1 = require("../../managers/DonobuFlowsManager");
|
|
30
|
+
const fileUploadWorkerRegistry_1 = require("../../persistence/files/fileUploadWorkerRegistry");
|
|
24
31
|
const BrowserUtils_1 = require("../../utils/BrowserUtils");
|
|
25
32
|
const FlowLogBuffer_1 = require("../../utils/FlowLogBuffer");
|
|
26
33
|
const Logger_1 = require("../../utils/Logger");
|
|
@@ -32,6 +39,181 @@ const tbd_1 = require("../page/tbd");
|
|
|
32
39
|
const selfHealing_1 = require("./utils/selfHealing");
|
|
33
40
|
const triageTestFailure_1 = require("./utils/triageTestFailure");
|
|
34
41
|
__exportStar(require("@playwright/test"), exports);
|
|
42
|
+
// ---------------------------------------------------------------------------
|
|
43
|
+
// Playwright-recorded video → flow persistence
|
|
44
|
+
// ---------------------------------------------------------------------------
|
|
45
|
+
// Playwright records videos when `use.video` is configured (`'on'`,
|
|
46
|
+
// `'retain-on-failure'`, `'on-first-retry'`). Those videos live in the
|
|
47
|
+
// Playwright `testInfo.outputDir` and ship with the Playwright HTML report,
|
|
48
|
+
// but without explicit wiring they never make it into Donobu's flow
|
|
49
|
+
// persistence (so the Studio UI's flow detail view shows no video).
|
|
50
|
+
//
|
|
51
|
+
// The integration here:
|
|
52
|
+
// 1. Inside `finalizeTest`, decide whether to persist based on the
|
|
53
|
+
// effective `video` option AND `testInfo.status`. The decision matrix
|
|
54
|
+
// mirrors Playwright's own retain semantics so we never persist a
|
|
55
|
+
// video the user told Playwright to discard (e.g.,
|
|
56
|
+
// `'retain-on-failure'` + passing test = no persist).
|
|
57
|
+
// 2. If we should persist, we kick off an async block that uses
|
|
58
|
+
// `Video.saveAs` (which internally waits for the BrowserContext to
|
|
59
|
+
// close before writing the .webm to a tmp path). After the file is
|
|
60
|
+
// finalized we read the bytes and forward them to `setVideo` on the
|
|
61
|
+
// flow's persistence layer (which routes through the FileUploadCache
|
|
62
|
+
// → cloud upload pipeline).
|
|
63
|
+
// 3. The deferred persist promise is registered in a module-level set so
|
|
64
|
+
// the worker-scoped drain guard can await it before the Playwright
|
|
65
|
+
// worker exits — without this, fast tests would beat the saveAs-after-
|
|
66
|
+
// close to the punch and the worker would terminate mid-write.
|
|
67
|
+
/**
|
|
68
|
+
* Maximum time the worker-scoped drain guard waits for in-flight video
|
|
69
|
+
* persists to settle before continuing on to the file-upload drain. Each
|
|
70
|
+
* video persist needs the BrowserContext to close (Playwright's video
|
|
71
|
+
* pipeline only finalizes the .webm at that point); for a normal test
|
|
72
|
+
* teardown that's near-instant.
|
|
73
|
+
*/
|
|
74
|
+
const VIDEO_PERSIST_TIMEOUT_MS = 30_000;
|
|
75
|
+
/**
|
|
76
|
+
* Promises tracking deferred video saves. Each entry resolves once the
|
|
77
|
+
* test's BrowserContext has closed AND we've copied bytes from the
|
|
78
|
+
* finalized .webm into the flow's persistence layer. The worker-scoped
|
|
79
|
+
* `donobuFileUploadDrainGuard` awaits all of these before draining cloud
|
|
80
|
+
* uploads, so we don't lose end-of-session videos.
|
|
81
|
+
*/
|
|
82
|
+
const pendingVideoPersists = new Set();
|
|
83
|
+
function trackVideoPersist(p) {
|
|
84
|
+
pendingVideoPersists.add(p);
|
|
85
|
+
void p.finally(() => {
|
|
86
|
+
pendingVideoPersists.delete(p);
|
|
87
|
+
});
|
|
88
|
+
}
|
|
89
|
+
async function waitForPendingVideoPersists(timeoutMs) {
|
|
90
|
+
const initial = pendingVideoPersists.size;
|
|
91
|
+
if (initial === 0) {
|
|
92
|
+
return { initial: 0, remaining: 0, timedOut: false };
|
|
93
|
+
}
|
|
94
|
+
const all = Array.from(pendingVideoPersists);
|
|
95
|
+
let timer;
|
|
96
|
+
const timeout = new Promise((resolve) => {
|
|
97
|
+
timer = setTimeout(() => resolve('timeout'), timeoutMs);
|
|
98
|
+
});
|
|
99
|
+
const settled = Promise.allSettled(all).then(() => 'done');
|
|
100
|
+
const winner = await Promise.race([timeout, settled]);
|
|
101
|
+
if (timer) {
|
|
102
|
+
clearTimeout(timer);
|
|
103
|
+
}
|
|
104
|
+
return {
|
|
105
|
+
initial,
|
|
106
|
+
remaining: pendingVideoPersists.size,
|
|
107
|
+
timedOut: winner === 'timeout',
|
|
108
|
+
};
|
|
109
|
+
}
|
|
110
|
+
/**
|
|
111
|
+
* Returns true iff Playwright would have RETAINED the video file for this
|
|
112
|
+
* test under the given `video` option, given the test's outcome. Mirrors
|
|
113
|
+
* Playwright's retain semantics (see Playwright `VideoMode` docs):
|
|
114
|
+
*
|
|
115
|
+
* - `'off'` / `undefined` → no video at all; never persist.
|
|
116
|
+
* - `'on'` → always retain → always persist.
|
|
117
|
+
* - `'retain-on-failure'` → retain only on non-passing → persist
|
|
118
|
+
* only when status !== 'passed'.
|
|
119
|
+
* - `'on-first-retry'` → recorded only on retries; if a video
|
|
120
|
+
* exists, we're on a retry → persist.
|
|
121
|
+
* - any unknown future mode → conservatively SKIP, with a warn log.
|
|
122
|
+
* Better to under-persist than violate
|
|
123
|
+
* user intent for a mode we don't yet
|
|
124
|
+
* understand.
|
|
125
|
+
*/
|
|
126
|
+
function shouldPersistVideo(videoOption, status) {
|
|
127
|
+
const mode = typeof videoOption === 'string' ? videoOption : videoOption?.mode;
|
|
128
|
+
switch (mode) {
|
|
129
|
+
case 'on':
|
|
130
|
+
return true;
|
|
131
|
+
case 'on-first-retry':
|
|
132
|
+
// When set, Playwright only records on retries; if a video exists
|
|
133
|
+
// it implies we're on a retry — always retain.
|
|
134
|
+
return true;
|
|
135
|
+
case 'retry-with-video':
|
|
136
|
+
// Deprecated alias for 'on-first-retry' that Playwright still
|
|
137
|
+
// accepts on the type. Same retain semantics.
|
|
138
|
+
return true;
|
|
139
|
+
case 'retain-on-failure':
|
|
140
|
+
return status !== 'passed';
|
|
141
|
+
case 'off':
|
|
142
|
+
case undefined:
|
|
143
|
+
return false;
|
|
144
|
+
default:
|
|
145
|
+
Logger_1.appLogger.warn(`Unknown Playwright video mode "${String(mode)}"; skipping video ` +
|
|
146
|
+
`persistence to avoid violating user intent. If this is a real ` +
|
|
147
|
+
`Playwright mode, add it to shouldPersistVideo() in testExtension.ts.`);
|
|
148
|
+
return false;
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
function describeVideoMode(videoOption) {
|
|
152
|
+
if (typeof videoOption === 'string') {
|
|
153
|
+
return videoOption;
|
|
154
|
+
}
|
|
155
|
+
if (videoOption && typeof videoOption === 'object' && 'mode' in videoOption) {
|
|
156
|
+
return videoOption.mode;
|
|
157
|
+
}
|
|
158
|
+
return 'off';
|
|
159
|
+
}
|
|
160
|
+
/**
|
|
161
|
+
* If Playwright recorded a video for this test AND the effective video
|
|
162
|
+
* mode says we should retain it, kicks off an async save+persist and
|
|
163
|
+
* tracks the promise so the worker-scoped drain guard can await it.
|
|
164
|
+
*
|
|
165
|
+
* This function does NOT block on the actual file write — `Video.saveAs`
|
|
166
|
+
* internally waits for the BrowserContext to close (which can't happen
|
|
167
|
+
* until our page fixture's teardown returns). Awaiting it here would
|
|
168
|
+
* deadlock. Instead we defer and let the worker-scoped fixture await all
|
|
169
|
+
* pending persists at end-of-worker.
|
|
170
|
+
*/
|
|
171
|
+
function persistVideoIfApplicable(page, testInfo, videoOption) {
|
|
172
|
+
const flowId = page._dnb.donobuFlowMetadata.id;
|
|
173
|
+
const video = page.video();
|
|
174
|
+
if (!video) {
|
|
175
|
+
// No video being recorded. Either video is 'off', or 'on-first-retry'
|
|
176
|
+
// and this isn't a retry, etc. Nothing to do.
|
|
177
|
+
return;
|
|
178
|
+
}
|
|
179
|
+
if (!shouldPersistVideo(videoOption, testInfo.status)) {
|
|
180
|
+
Logger_1.appLogger.info(`Skipping video persist for flow ${flowId}: video mode ` +
|
|
181
|
+
`"${describeVideoMode(videoOption)}" + status "${testInfo.status}" ` +
|
|
182
|
+
`means Playwright will discard the file and we'd be violating user ` +
|
|
183
|
+
`intent by keeping a copy.`);
|
|
184
|
+
return;
|
|
185
|
+
}
|
|
186
|
+
const persistence = page._dnb.persistence;
|
|
187
|
+
const persistPromise = (async () => {
|
|
188
|
+
const tmpPath = path_1.default.join((0, os_1.tmpdir)(), `donobu-test-video-${flowId}.webm`);
|
|
189
|
+
try {
|
|
190
|
+
// Video.saveAs blocks until the BrowserContext closes and the .webm
|
|
191
|
+
// is fully written. Playwright closes the context as part of its own
|
|
192
|
+
// fixture teardown, AFTER our page fixture body returns — so this
|
|
193
|
+
// promise unblocks once the test is fully torn down.
|
|
194
|
+
await video.saveAs(tmpPath);
|
|
195
|
+
const bytes = await fs_1.promises.readFile(tmpPath);
|
|
196
|
+
await persistence.setVideo(flowId, bytes);
|
|
197
|
+
Logger_1.appLogger.info(`Persisted video for flow ${flowId} ` +
|
|
198
|
+
`(${(bytes.length / 1024).toFixed(0)} KB) — queued for cloud upload.`);
|
|
199
|
+
}
|
|
200
|
+
catch (err) {
|
|
201
|
+
Logger_1.appLogger.warn(`Failed to persist video for flow ${flowId}: ${err.message}`);
|
|
202
|
+
}
|
|
203
|
+
finally {
|
|
204
|
+
await fs_1.promises.unlink(tmpPath).catch(() => undefined);
|
|
205
|
+
}
|
|
206
|
+
})();
|
|
207
|
+
trackVideoPersist(persistPromise);
|
|
208
|
+
}
|
|
209
|
+
/**
|
|
210
|
+
* Maximum time the worker-scoped post-test-session upload drain will wait
|
|
211
|
+
* for cloud uploads to complete before letting the worker exit. Bytes that
|
|
212
|
+
* aren't uploaded within this window stay on disk and are picked up by the
|
|
213
|
+
* next `donobu` process to start with the same data dir (via the file-upload
|
|
214
|
+
* cache's stale-claim reclaim).
|
|
215
|
+
*/
|
|
216
|
+
const UPLOAD_DRAIN_TIMEOUT_MS = 30_000;
|
|
35
217
|
exports.test = test_1.test.extend({
|
|
36
218
|
/**
|
|
37
219
|
* Establish a logging scope for the entire Playwright test *before* any other
|
|
@@ -75,13 +257,98 @@ exports.test = test_1.test.extend({
|
|
|
75
257
|
},
|
|
76
258
|
{ scope: 'test', auto: true },
|
|
77
259
|
],
|
|
260
|
+
/**
|
|
261
|
+
* Drain in-flight file uploads (videos, screenshots, etc.) to Donobu
|
|
262
|
+
* Cloud at the end of every Playwright worker's lifetime.
|
|
263
|
+
*
|
|
264
|
+
* The {@link FileUploadWorker} runs continuously while tests execute,
|
|
265
|
+
* uploading flow artifacts to the cloud asynchronously. When the worker
|
|
266
|
+
* process exits, any uploads still mid-flight are abandoned at whatever
|
|
267
|
+
* stage they reached. The bytes survive on disk as `.uploading.<token>`
|
|
268
|
+
* markers, but for ephemeral CI runners — where the box gets torn down
|
|
269
|
+
* after the test session — there's no future process to resume them.
|
|
270
|
+
*
|
|
271
|
+
* This auto fixture closes that gap by waiting for the queue to drain
|
|
272
|
+
* before letting the worker exit.
|
|
273
|
+
*/
|
|
274
|
+
donobuFileUploadDrainGuard: [
|
|
275
|
+
async ({}, use) => {
|
|
276
|
+
await use();
|
|
277
|
+
// ── Teardown — runs ONCE per Playwright worker, after every test ──
|
|
278
|
+
// First, wait for any deferred video persists from finalizeTest to
|
|
279
|
+
// complete. Each one is gated on a BrowserContext close (Playwright
|
|
280
|
+
// finalizes the .webm at that point) and then forwards the bytes to
|
|
281
|
+
// setVideo. Without this wait, fast tests would beat the
|
|
282
|
+
// saveAs-after-close to the punch and the worker would exit
|
|
283
|
+
// mid-write — meaning the cloud upload below would never be queued
|
|
284
|
+
// for the missing video.
|
|
285
|
+
const videoWait = await waitForPendingVideoPersists(VIDEO_PERSIST_TIMEOUT_MS);
|
|
286
|
+
if (videoWait.initial > 0) {
|
|
287
|
+
if (videoWait.timedOut) {
|
|
288
|
+
Logger_1.appLogger.warn(`donobuFileUploadDrainGuard: ${videoWait.remaining} of ` +
|
|
289
|
+
`${videoWait.initial} video persist(s) still pending after ` +
|
|
290
|
+
`${Math.round(VIDEO_PERSIST_TIMEOUT_MS / 1000)}s; proceeding ` +
|
|
291
|
+
`with upload drain anyway. Those video files (if any) will ` +
|
|
292
|
+
`not be uploaded this session.`);
|
|
293
|
+
}
|
|
294
|
+
else {
|
|
295
|
+
Logger_1.appLogger.info(`donobuFileUploadDrainGuard: completed ${videoWait.initial} ` +
|
|
296
|
+
`pending video persist(s) before upload drain.`);
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
let initialActive;
|
|
300
|
+
try {
|
|
301
|
+
const status = await (0, fileUploadWorkerRegistry_1.getFileUploadAggregateStatus)();
|
|
302
|
+
initialActive = status.totalPending + status.totalInFlight;
|
|
303
|
+
}
|
|
304
|
+
catch (err) {
|
|
305
|
+
Logger_1.appLogger.warn(`donobuFileUploadDrainGuard: failed to read upload status; ` +
|
|
306
|
+
`skipping drain. Error: ${err.message}`);
|
|
307
|
+
return;
|
|
308
|
+
}
|
|
309
|
+
if (initialActive === 0) {
|
|
310
|
+
Logger_1.appLogger.info('donobuFileUploadDrainGuard: no pending file uploads at end of test ' +
|
|
311
|
+
'session; worker is exiting.');
|
|
312
|
+
return;
|
|
313
|
+
}
|
|
314
|
+
Logger_1.appLogger.info(`donobuFileUploadDrainGuard: test session complete; ${initialActive} ` +
|
|
315
|
+
`file upload(s) still syncing to Donobu Cloud. ` +
|
|
316
|
+
`Waiting up to ${Math.round(UPLOAD_DRAIN_TIMEOUT_MS / 1000)}s for ` +
|
|
317
|
+
`the queue to drain before exit. ` +
|
|
318
|
+
`(Any uploads not finished within the timeout stay on disk and ` +
|
|
319
|
+
`resume on the next \`donobu\` process start.)`);
|
|
320
|
+
const startedAt = Date.now();
|
|
321
|
+
let result;
|
|
322
|
+
try {
|
|
323
|
+
result = await (0, fileUploadWorkerRegistry_1.shutdownFileUploadWorkers)(UPLOAD_DRAIN_TIMEOUT_MS);
|
|
324
|
+
}
|
|
325
|
+
catch (err) {
|
|
326
|
+
Logger_1.appLogger.warn(`donobuFileUploadDrainGuard: drain threw; letting worker exit. ` +
|
|
327
|
+
`Error: ${err.message}`);
|
|
328
|
+
return;
|
|
329
|
+
}
|
|
330
|
+
const elapsedMs = Date.now() - startedAt;
|
|
331
|
+
if (result.drained) {
|
|
332
|
+
Logger_1.appLogger.info(`donobuFileUploadDrainGuard: all ${initialActive} file upload(s) ` +
|
|
333
|
+
`completed in ${Math.round(elapsedMs / 1000)}s. Worker exiting.`);
|
|
334
|
+
}
|
|
335
|
+
else {
|
|
336
|
+
Logger_1.appLogger.warn(`donobuFileUploadDrainGuard: drain timed out after ` +
|
|
337
|
+
`${Math.round(elapsedMs / 1000)}s with ${result.totalRemaining} ` +
|
|
338
|
+
`upload(s) still pending. The bytes are on disk and will resume ` +
|
|
339
|
+
`the next time a \`donobu\` process starts with the same data ` +
|
|
340
|
+
`directory. Worker exiting now.`);
|
|
341
|
+
}
|
|
342
|
+
},
|
|
343
|
+
{ scope: 'worker', auto: true },
|
|
344
|
+
],
|
|
78
345
|
// 1) Declare `gptClient` as an "option" fixture with a default of `undefined`.
|
|
79
346
|
gptClient: [
|
|
80
347
|
undefined, // default
|
|
81
348
|
{ option: true }, // so that test.use({ gptClient: ... }) can override
|
|
82
349
|
],
|
|
83
350
|
// Override the default page fixture
|
|
84
|
-
page: async ({ page, gptClient, headless, flowLoggingContext }, use, testInfo) => {
|
|
351
|
+
page: async ({ page, gptClient, headless, flowLoggingContext, video }, use, testInfo) => {
|
|
85
352
|
Logger_1.appLogger.info(`Test started: "${testInfo.title}"`);
|
|
86
353
|
const overallObjective = testInfo.annotations.find((v) => v.type === 'objective')?.description ??
|
|
87
354
|
null;
|
|
@@ -132,7 +399,7 @@ exports.test = test_1.test.extend({
|
|
|
132
399
|
throw error;
|
|
133
400
|
}
|
|
134
401
|
finally {
|
|
135
|
-
await finalizeTest(extendedPage, testInfo, logBuffer);
|
|
402
|
+
await finalizeTest(extendedPage, testInfo, logBuffer, video);
|
|
136
403
|
}
|
|
137
404
|
},
|
|
138
405
|
});
|
|
@@ -244,8 +511,17 @@ async function attachStepScreenshots(sharedState, testInfo) {
|
|
|
244
511
|
contentType: 'application/json',
|
|
245
512
|
});
|
|
246
513
|
}
|
|
247
|
-
async function finalizeTest(page, testInfo, logBuffer) {
|
|
514
|
+
async function finalizeTest(page, testInfo, logBuffer, videoOption) {
|
|
248
515
|
const sharedState = page._dnb;
|
|
516
|
+
// Kick off video persistence early in teardown. The actual file copy is
|
|
517
|
+
// deferred (it can't run until the BrowserContext closes, which happens
|
|
518
|
+
// AFTER our fixture body returns), so this just queues a tracked promise.
|
|
519
|
+
// Doing it before the rest of finalizeTest's work means the deferred
|
|
520
|
+
// saveAs has already-finalized references to flowId / persistence / video
|
|
521
|
+
// by the time it runs, regardless of what subsequent teardown does.
|
|
522
|
+
if (videoOption !== undefined) {
|
|
523
|
+
persistVideoIfApplicable(page, testInfo, videoOption);
|
|
524
|
+
}
|
|
249
525
|
try {
|
|
250
526
|
sharedState.donobuFlowMetadata.state =
|
|
251
527
|
testInfo.status === 'failed' || testInfo.status === 'timedOut'
|
package/dist/esm/main.d.ts
CHANGED
|
@@ -54,6 +54,7 @@ export * from './models/ToolCallResult';
|
|
|
54
54
|
export * from './models/ToolSchema';
|
|
55
55
|
export type { VideoSegment } from './models/VideoSegment';
|
|
56
56
|
export type { EnvPersistence } from './persistence/env/EnvPersistence';
|
|
57
|
+
export { type FileUploadAggregateStatus, type FileUploadPlatformStatus, getFileUploadAggregateStatus, shutdownFileUploadWorkers, } from './persistence/files/fileUploadWorkerRegistry';
|
|
57
58
|
export type { FlowsPersistence } from './persistence/flows/FlowsPersistence';
|
|
58
59
|
export { type PersistencePlugin, PersistencePluginRegistry, } from './persistence/PersistencePlugin';
|
|
59
60
|
export type { SuitesPersistence } from './persistence/suites/SuitesPersistence';
|
package/dist/esm/main.js
CHANGED
|
@@ -14,7 +14,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
15
|
};
|
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
-
exports.TargetRuntimePluginRegistry = exports.PersistencePluginRegistry = exports.createDefaultToolRegistry = exports.ToolManager = exports.PluginLoader = exports.InteractionVisualizer = exports.setupDonobuStack = exports.prepareToolCallsForRerun = exports.DonobuFlowsManager = exports.distillAllowedEnvVariableNames = exports.DonobuFlow = exports.AdminApiController = exports.env = exports.runGenerateSiteTests = exports.VercelAiGptClient = exports.OpenAiGptClient = exports.GptClientPluginRegistry = exports.GptClient = exports.GoogleGenerativeAiGptClient = exports.fixAssertFields = exports.DonobuGptClient = exports.AnthropicGptClient = void 0;
|
|
17
|
+
exports.TargetRuntimePluginRegistry = exports.PersistencePluginRegistry = exports.shutdownFileUploadWorkers = exports.getFileUploadAggregateStatus = exports.createDefaultToolRegistry = exports.ToolManager = exports.PluginLoader = exports.InteractionVisualizer = exports.setupDonobuStack = exports.prepareToolCallsForRerun = exports.DonobuFlowsManager = exports.distillAllowedEnvVariableNames = exports.DonobuFlow = exports.AdminApiController = exports.env = exports.runGenerateSiteTests = exports.VercelAiGptClient = exports.OpenAiGptClient = exports.GptClientPluginRegistry = exports.GptClient = exports.GoogleGenerativeAiGptClient = exports.fixAssertFields = exports.DonobuGptClient = exports.AnthropicGptClient = void 0;
|
|
18
18
|
exports.startDonobuServer = startDonobuServer;
|
|
19
19
|
const commander_1 = require("commander");
|
|
20
20
|
const v4_1 = require("zod/v4");
|
|
@@ -88,6 +88,9 @@ __exportStar(require("./models/TargetConfig"), exports);
|
|
|
88
88
|
__exportStar(require("./models/TestMetadata"), exports);
|
|
89
89
|
__exportStar(require("./models/ToolCallResult"), exports);
|
|
90
90
|
__exportStar(require("./models/ToolSchema"), exports);
|
|
91
|
+
var fileUploadWorkerRegistry_1 = require("./persistence/files/fileUploadWorkerRegistry");
|
|
92
|
+
Object.defineProperty(exports, "getFileUploadAggregateStatus", { enumerable: true, get: function () { return fileUploadWorkerRegistry_1.getFileUploadAggregateStatus; } });
|
|
93
|
+
Object.defineProperty(exports, "shutdownFileUploadWorkers", { enumerable: true, get: function () { return fileUploadWorkerRegistry_1.shutdownFileUploadWorkers; } });
|
|
91
94
|
var PersistencePlugin_1 = require("./persistence/PersistencePlugin");
|
|
92
95
|
Object.defineProperty(exports, "PersistencePluginRegistry", { enumerable: true, get: function () { return PersistencePlugin_1.PersistencePluginRegistry; } });
|
|
93
96
|
var TargetRuntimePlugin_1 = require("./targets/TargetRuntimePlugin");
|
|
@@ -177,6 +180,7 @@ if (require.main === module) {
|
|
|
177
180
|
// never when imported as a library (where they would hijack the host
|
|
178
181
|
// application's error handling).
|
|
179
182
|
(0, init_1.installCrashHandlers)();
|
|
183
|
+
(0, init_1.installShutdownHandlers)();
|
|
180
184
|
main(process.argv).catch((e) => {
|
|
181
185
|
Logger_1.appLogger.error('Unhandled error in main', e);
|
|
182
186
|
process.exit(1);
|
|
@@ -9,6 +9,7 @@ const v4_1 = require("zod/v4");
|
|
|
9
9
|
const AgentsApi_1 = require("../apis/AgentsApi");
|
|
10
10
|
const AskAiApi_1 = require("../apis/AskAiApi");
|
|
11
11
|
const EnvDataApi_1 = require("../apis/EnvDataApi");
|
|
12
|
+
const FileUploadsApi_1 = require("../apis/FileUploadsApi");
|
|
12
13
|
const FlowsAiQueriesApi_1 = require("../apis/FlowsAiQueriesApi");
|
|
13
14
|
const FlowsApi_1 = require("../apis/FlowsApi");
|
|
14
15
|
const FlowsFilesApi_1 = require("../apis/FlowsFilesApi");
|
|
@@ -246,6 +247,7 @@ class AdminApiController {
|
|
|
246
247
|
pingApi: new PingApi_1.PingApi(),
|
|
247
248
|
schemaApi: new SchemaApi_1.SchemaApi(),
|
|
248
249
|
targetsApi: new TargetsApi_1.TargetsApi(donobuStack.targetRuntimePlugins),
|
|
250
|
+
fileUploadsApi: new FileUploadsApi_1.FileUploadsApi(),
|
|
249
251
|
};
|
|
250
252
|
}
|
|
251
253
|
/**
|
|
@@ -348,6 +350,7 @@ class AdminApiController {
|
|
|
348
350
|
static registerUtilityRoutes(app, apis) {
|
|
349
351
|
app.get('/api/ping', this.asyncHandler(apis.pingApi.ping.bind(apis.pingApi)));
|
|
350
352
|
app.get('/api/schema', this.asyncHandler(apis.schemaApi.getSchema.bind(apis.schemaApi)));
|
|
353
|
+
app.get('/api/uploads/status', this.asyncHandler(apis.fileUploadsApi.getStatus.bind(apis.fileUploadsApi)));
|
|
351
354
|
}
|
|
352
355
|
static asyncHandler(fn) {
|
|
353
356
|
return (req, res, next) => {
|
|
@@ -77,7 +77,7 @@ export declare class DonobuFlow {
|
|
|
77
77
|
* {@link metadata.result}, or `null` when the flow ended without an
|
|
78
78
|
* explicit result.
|
|
79
79
|
*/
|
|
80
|
-
run(): Promise<
|
|
80
|
+
run(): Promise<FlowMetadata['result']>;
|
|
81
81
|
/**
|
|
82
82
|
* Delegates to the inspector to attempt recovery after the target is
|
|
83
83
|
* closed. If recovery fails, the flow is marked as failed.
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import type { DonobuFlow } from '../managers/DonobuFlow';
|
|
2
2
|
import type { FlowLogBuffer } from '../utils/FlowLogBuffer';
|
|
3
|
+
import type { FlowMetadata } from './FlowMetadata';
|
|
3
4
|
/**
|
|
4
5
|
* Handle that pairs an active DonobuFlow with its running job promise.
|
|
5
6
|
*
|
|
@@ -8,7 +9,7 @@ import type { FlowLogBuffer } from '../utils/FlowLogBuffer';
|
|
|
8
9
|
*/
|
|
9
10
|
export interface FlowHandle {
|
|
10
11
|
donobuFlow: DonobuFlow;
|
|
11
|
-
job: Promise<
|
|
12
|
+
job: Promise<FlowMetadata['result']>;
|
|
12
13
|
logBuffer: FlowLogBuffer;
|
|
13
14
|
}
|
|
14
15
|
//# sourceMappingURL=FlowHandle.d.ts.map
|
|
@@ -154,7 +154,7 @@ export declare const FlowMetadataSchema: z.ZodObject<{
|
|
|
154
154
|
DETERMINISTIC: "DETERMINISTIC";
|
|
155
155
|
}>;
|
|
156
156
|
isControlPanelEnabled: z.ZodBoolean;
|
|
157
|
-
result: z.ZodNullable<z.ZodRecord<z.ZodString, z.ZodUnknown>>;
|
|
157
|
+
result: z.ZodNullable<z.ZodUnion<readonly [z.ZodRecord<z.ZodString, z.ZodUnknown>, z.ZodArray<z.ZodRecord<z.ZodString, z.ZodUnknown>>]>>;
|
|
158
158
|
inputTokensUsed: z.ZodNumber;
|
|
159
159
|
completionTokensUsed: z.ZodNumber;
|
|
160
160
|
startedAt: z.ZodNullable<z.ZodNumber>;
|
|
@@ -60,7 +60,10 @@ exports.FlowMetadataSchema = RunConfig_1.RunConfigSchema.extend({
|
|
|
60
60
|
.boolean()
|
|
61
61
|
.describe('Set to true if the control panel should be enabled.'),
|
|
62
62
|
result: v4_1.z
|
|
63
|
-
.
|
|
63
|
+
.union([
|
|
64
|
+
v4_1.z.record(v4_1.z.string(), v4_1.z.unknown()),
|
|
65
|
+
v4_1.z.array(v4_1.z.record(v4_1.z.string(), v4_1.z.unknown())),
|
|
66
|
+
])
|
|
64
67
|
.nullable()
|
|
65
68
|
.describe(`The final output of the flow, populated when the flow reaches a terminal state. The content
|
|
66
69
|
depends on how the flow completes:
|
|
@@ -50,6 +50,7 @@ const MiscUtils_1 = require("../utils/MiscUtils");
|
|
|
50
50
|
const normalizeFlowMetadata_1 = require("./normalizeFlowMetadata");
|
|
51
51
|
const TestConfigHash_1 = require("./TestConfigHash");
|
|
52
52
|
let instance = null;
|
|
53
|
+
const Hex32RegExp = /^[a-f0-9]{32}$/i;
|
|
53
54
|
/**
|
|
54
55
|
* ##################################################################
|
|
55
56
|
* # #
|
|
@@ -517,6 +518,212 @@ CREATE INDEX IF NOT EXISTS idx_ai_queries_flow_id_started_at ON ai_queries(flow_
|
|
|
517
518
|
}
|
|
518
519
|
},
|
|
519
520
|
},
|
|
521
|
+
{
|
|
522
|
+
version: 13,
|
|
523
|
+
up: (db) => {
|
|
524
|
+
// 1. Fetch all flows, oldest first.
|
|
525
|
+
const flows = v4_1.z
|
|
526
|
+
.array(v4_1.z.object({
|
|
527
|
+
id: v4_1.z.string(),
|
|
528
|
+
test_id: v4_1.z.string().nullable(),
|
|
529
|
+
metadata: v4_1.z.string(),
|
|
530
|
+
}))
|
|
531
|
+
.parse(db
|
|
532
|
+
.prepare('SELECT id, test_id, metadata FROM flow_metadata ORDER BY created_at ASC')
|
|
533
|
+
.all())
|
|
534
|
+
// We'll only consider those flows without tests, or those migrated by
|
|
535
|
+
// migration 12. Fortunately, only flows that were created as part of
|
|
536
|
+
// migration 12 have test IDs that are 32-character hexidecimals (tests
|
|
537
|
+
// generated in the app use UUIDs, and tests generated by the SDK use
|
|
538
|
+
// the Playwright testId, both of which contain hypens).
|
|
539
|
+
.filter((flow) => flow.test_id === null || Hex32RegExp.test(flow.test_id));
|
|
540
|
+
if (flows.length === 0) {
|
|
541
|
+
return;
|
|
542
|
+
}
|
|
543
|
+
// 2. Build a config hash for each flow and group by hash.
|
|
544
|
+
const groups = new Map();
|
|
545
|
+
// If 2 flows previously hashed to different tests, but now have the same
|
|
546
|
+
// hash (due to the improvements to hashTestConfig), and the user executed
|
|
547
|
+
// the test that will now be merged into the other one, we'll need to
|
|
548
|
+
// correctly move over the new flow using the same mapping, even if the
|
|
549
|
+
// hash changes (because the user edited the test, say). So we'll need to
|
|
550
|
+
// keep track of the mapping of old hash to new hash.
|
|
551
|
+
const oldHashToNewHash = new Map();
|
|
552
|
+
// We'll also want to delete the old test if it's no longer referenced by
|
|
553
|
+
// any flows.
|
|
554
|
+
const testsToDelete = new Set();
|
|
555
|
+
for (const flow of flows) {
|
|
556
|
+
try {
|
|
557
|
+
const flowMetadata = FlowMetadata_1.FlowMetadataSchema.parse((0, normalizeFlowMetadata_1.normalizeFlowMetadata)(JSON.parse(flow.metadata)));
|
|
558
|
+
/*
|
|
559
|
+
* There are 7 possible scenarios, since the hashing algorithm was
|
|
560
|
+
* improved since migration 12, and some flows will have a new hash,
|
|
561
|
+
* matching them with flows that were previously added to a distinct
|
|
562
|
+
* test:
|
|
563
|
+
*
|
|
564
|
+
* 1. The flow is a new (orphaned) flow, with no testId: use the hash
|
|
565
|
+
* to find or create its test.
|
|
566
|
+
* 2. The flow was previously migrated by migration 12, and correctly:
|
|
567
|
+
* use the hash, which should match the existing testId, to keep it
|
|
568
|
+
* in the existing test.
|
|
569
|
+
* 3. The flow was previously migrated by migration 12, but
|
|
570
|
+
* incorrectly (old hash): use the new hash to add it to the
|
|
571
|
+
* correct test.
|
|
572
|
+
* 4. The flow was created from a previously-migrated test, which was
|
|
573
|
+
* created from a correct hash, and the test was not modified: use
|
|
574
|
+
* the hash, which should match the existing testId, to keep it in
|
|
575
|
+
* the existing test.
|
|
576
|
+
* 5. The flow was created from a previously-migrated test, which was
|
|
577
|
+
* created from a correct hash, and the test was modified: since
|
|
578
|
+
* the test was modified, this will be a brand-new hash, but we
|
|
579
|
+
* want to keep the flow in this test, so use the existing testId
|
|
580
|
+
* to keep it in the existing test.
|
|
581
|
+
* 6. The flow was created from a previously-migrated test, which was
|
|
582
|
+
* created from an incorrect hash, and the test was not modified:
|
|
583
|
+
* use the new hash to add it to the correct test.
|
|
584
|
+
* 7. The flow was created from a previously-migrated test, which was
|
|
585
|
+
* created from an incorrect hash, and the test was modified: since
|
|
586
|
+
* the test was modified, this will be a brand-new hash, but we
|
|
587
|
+
* want to keep it with the other flows from this test, so use a
|
|
588
|
+
* old-hash-to-new-hash map to assign it the correct test based on
|
|
589
|
+
* its current testId.
|
|
590
|
+
*
|
|
591
|
+
* We have no way of distinguishing between scenarios 5 and 7, since
|
|
592
|
+
* they both produce brand-new hashes, so we'll need to keep track of
|
|
593
|
+
* the mapping of old hash to new hash (based on the flows that
|
|
594
|
+
* existed prior to migration 12) to assign the flow to the correct
|
|
595
|
+
* test. Since flows are handled in oldest-to-newest order, the map
|
|
596
|
+
* should be populated by migration 12 flows prior to new flows being
|
|
597
|
+
* encountered.
|
|
598
|
+
*/
|
|
599
|
+
let hash = (0, TestConfigHash_1.hashTestConfig)(flowMetadata);
|
|
600
|
+
if (flowMetadata.testId && flowMetadata.testId !== hash) {
|
|
601
|
+
// Scenario 3, 5, 6
|
|
602
|
+
if (oldHashToNewHash.has(flowMetadata.testId)) {
|
|
603
|
+
hash = oldHashToNewHash.get(flowMetadata.testId);
|
|
604
|
+
}
|
|
605
|
+
else if (groups.has(flowMetadata.testId)) {
|
|
606
|
+
// Scenario 5
|
|
607
|
+
hash = flowMetadata.testId;
|
|
608
|
+
}
|
|
609
|
+
else {
|
|
610
|
+
// Scenario 3 (first time)
|
|
611
|
+
oldHashToNewHash.set(flowMetadata.testId, hash);
|
|
612
|
+
testsToDelete.add(flowMetadata.testId);
|
|
613
|
+
}
|
|
614
|
+
}
|
|
615
|
+
const existing = groups.get(hash);
|
|
616
|
+
if (existing) {
|
|
617
|
+
existing.push(flowMetadata);
|
|
618
|
+
}
|
|
619
|
+
else {
|
|
620
|
+
groups.set(hash, [flowMetadata]);
|
|
621
|
+
}
|
|
622
|
+
}
|
|
623
|
+
catch (error) {
|
|
624
|
+
// Fail open: skip flows we can't parse/normalize so one bad row
|
|
625
|
+
// doesn't block the rest of the migration.
|
|
626
|
+
Logger_1.appLogger.error(`Migration 13: skipping flow ${flow.id} during grouping:`, error);
|
|
627
|
+
}
|
|
628
|
+
}
|
|
629
|
+
Logger_1.appLogger.info(`Migration 13: found ${groups.size} test groups`);
|
|
630
|
+
// 3. Create a test for each group.
|
|
631
|
+
const insertTestStmt = db.prepare(`
|
|
632
|
+
INSERT OR IGNORE INTO test_metadata (id, name, metadata, created_at, suite_id, next_run_mode)
|
|
633
|
+
VALUES (@id, @name, @metadata, @createdAt, @suiteId, @nextRunMode)
|
|
634
|
+
`);
|
|
635
|
+
const updateFlowStmt = db.prepare('UPDATE flow_metadata SET test_id = ?, metadata = ? WHERE id = ?');
|
|
636
|
+
const deleteTestStmt = db.prepare('DELETE FROM test_metadata WHERE id = ?');
|
|
637
|
+
const countFlowsByTestIdStmt = db.prepare('SELECT COUNT(*) FROM flow_metadata WHERE test_id = ?');
|
|
638
|
+
let testsCreated = 0;
|
|
639
|
+
let testsSkipped = 0;
|
|
640
|
+
let flowsLinked = 0;
|
|
641
|
+
let failures = 0;
|
|
642
|
+
let testsDeleted = 0;
|
|
643
|
+
for (const [hash, flows] of groups) {
|
|
644
|
+
try {
|
|
645
|
+
// Flows were inserted oldest-first, so the last entry is newest.
|
|
646
|
+
const newestFlow = flows[flows.length - 1];
|
|
647
|
+
// `maxToolCalls` is null on every deterministic flow, so prefer the
|
|
648
|
+
// most recent autonomous flow's value. Fall back to the newest flow's
|
|
649
|
+
// value (likely null) if no autonomous flow exists in the group.
|
|
650
|
+
const newestAutonomous = [...flows]
|
|
651
|
+
.reverse()
|
|
652
|
+
.find((f) => f.runMode === 'AUTONOMOUS');
|
|
653
|
+
const maxToolCalls = newestAutonomous?.maxToolCalls ?? newestFlow.maxToolCalls;
|
|
654
|
+
const testName = (0, displayName_1.getDisplayName)(newestFlow, 'Untitled Test');
|
|
655
|
+
const testMetadata = {
|
|
656
|
+
id: hash,
|
|
657
|
+
name: testName,
|
|
658
|
+
target: newestFlow.target,
|
|
659
|
+
web: newestFlow.web,
|
|
660
|
+
envVars: newestFlow.envVars,
|
|
661
|
+
customTools: newestFlow.customTools,
|
|
662
|
+
overallObjective: newestFlow.overallObjective,
|
|
663
|
+
allowedTools: newestFlow.allowedTools,
|
|
664
|
+
resultJsonSchema: newestFlow.resultJsonSchema,
|
|
665
|
+
callbackUrl: newestFlow.callbackUrl,
|
|
666
|
+
videoDisabled: newestFlow.videoDisabled,
|
|
667
|
+
maxToolCalls,
|
|
668
|
+
metadataVersion: newestFlow.metadataVersion,
|
|
669
|
+
createdWithDonobuVersion: newestFlow.createdWithDonobuVersion,
|
|
670
|
+
suiteId: null,
|
|
671
|
+
nextRunMode: 'DETERMINISTIC',
|
|
672
|
+
};
|
|
673
|
+
const insertResult = insertTestStmt.run({
|
|
674
|
+
id: hash,
|
|
675
|
+
name: testName,
|
|
676
|
+
metadata: JSON.stringify(testMetadata),
|
|
677
|
+
createdAt: Date.now(),
|
|
678
|
+
suiteId: null,
|
|
679
|
+
nextRunMode: 'DETERMINISTIC',
|
|
680
|
+
});
|
|
681
|
+
if (insertResult.changes > 0) {
|
|
682
|
+
testsCreated++;
|
|
683
|
+
}
|
|
684
|
+
else {
|
|
685
|
+
testsSkipped++;
|
|
686
|
+
}
|
|
687
|
+
// 4. Link each flow to its test (both test_id column and testId in JSON).
|
|
688
|
+
for (const groupedFlow of flows) {
|
|
689
|
+
try {
|
|
690
|
+
groupedFlow.testId = hash;
|
|
691
|
+
updateFlowStmt.run(hash, JSON.stringify(groupedFlow), groupedFlow.id);
|
|
692
|
+
flowsLinked++;
|
|
693
|
+
}
|
|
694
|
+
catch (error) {
|
|
695
|
+
failures++;
|
|
696
|
+
Logger_1.appLogger.error(`Migration 13: failed to link flow ${groupedFlow.id} to test ${hash}:`, error);
|
|
697
|
+
}
|
|
698
|
+
}
|
|
699
|
+
}
|
|
700
|
+
catch (error) {
|
|
701
|
+
// Fail open: skip groups we can't materialize as tests.
|
|
702
|
+
failures++;
|
|
703
|
+
Logger_1.appLogger.error(`Migration 13: skipping test group ${hash}:`, error);
|
|
704
|
+
}
|
|
705
|
+
}
|
|
706
|
+
// 5. Delete any tests that are no longer referenced by any flows.
|
|
707
|
+
for (const testId of testsToDelete) {
|
|
708
|
+
// Verify that there are no flows still referencing the test.
|
|
709
|
+
const flowsReferencingTest = countFlowsByTestIdStmt.get(testId);
|
|
710
|
+
if (Number(flowsReferencingTest) > 0) {
|
|
711
|
+
Logger_1.appLogger.warn(`Migration 13: skipping deletion of test ${testId} because it is still referenced by ${flowsReferencingTest} flow(s).`);
|
|
712
|
+
continue;
|
|
713
|
+
}
|
|
714
|
+
deleteTestStmt.run(testId);
|
|
715
|
+
testsDeleted++;
|
|
716
|
+
}
|
|
717
|
+
Logger_1.appLogger.info(`Migration 13: done:
|
|
718
|
+
- Created ${testsCreated} test(s)
|
|
719
|
+
- Skipped ${testsSkipped} pre-existing test(s)
|
|
720
|
+
- Linked ${flowsLinked} flow(s)
|
|
721
|
+
- Deleted ${testsDeleted} test(s)`);
|
|
722
|
+
if (failures > 0) {
|
|
723
|
+
Logger_1.appLogger.warn(`Migration 13: ${failures} failure(s) (see errors above).`);
|
|
724
|
+
}
|
|
725
|
+
},
|
|
726
|
+
},
|
|
520
727
|
];
|
|
521
728
|
/**
|
|
522
729
|
* Create the SQL schema migrations table that can be used to manage table
|