codeflash 0.0.1 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +104 -0
- package/bin/codeflash-setup.js +13 -0
- package/bin/codeflash.js +131 -0
- package/package.json +84 -6
- package/runtime/capture.js +871 -0
- package/runtime/comparator.js +406 -0
- package/runtime/compare-results.js +331 -0
- package/runtime/index.d.ts +146 -0
- package/runtime/index.js +86 -0
- package/runtime/loop-runner.js +226 -0
- package/runtime/serializer.js +851 -0
- package/scripts/postinstall.js +265 -0
- package/index.js +0 -7
|
@@ -0,0 +1,871 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Codeflash Jest Helper - Unified Test Instrumentation
|
|
3
|
+
*
|
|
4
|
+
* This module provides a unified approach to instrumenting JavaScript tests
|
|
5
|
+
* for both behavior verification and performance measurement.
|
|
6
|
+
*
|
|
7
|
+
* The instrumentation mirrors Python's codeflash implementation:
|
|
8
|
+
* - Static identifiers (testModule, testFunction, lineId) are passed at instrumentation time
|
|
9
|
+
* - Dynamic invocation counter increments only when same call site is seen again (e.g., in loops)
|
|
10
|
+
* - Uses hrtime for nanosecond precision timing
|
|
11
|
+
* - SQLite for consistent data format with Python implementation
|
|
12
|
+
*
|
|
13
|
+
* Usage:
|
|
14
|
+
* const { capture } = require('@codeflash/jest-runtime');
|
|
15
|
+
*
|
|
16
|
+
* // For behavior verification (writes to SQLite):
|
|
17
|
+
* const result = codeflash.capture('functionName', lineId, targetFunction, arg1, arg2);
|
|
18
|
+
*
|
|
19
|
+
* // For performance benchmarking (stdout only):
|
|
20
|
+
* const result = codeflash.capturePerf('functionName', lineId, targetFunction, arg1, arg2);
|
|
21
|
+
*
|
|
22
|
+
* Environment Variables:
|
|
23
|
+
* CODEFLASH_OUTPUT_FILE - Path to write results SQLite file
|
|
24
|
+
* CODEFLASH_LOOP_INDEX - Current benchmark loop iteration (default: 1)
|
|
25
|
+
* CODEFLASH_TEST_ITERATION - Test iteration number (default: 0)
|
|
26
|
+
* CODEFLASH_TEST_MODULE - Test module path
|
|
27
|
+
*/
|
|
28
|
+
|
|
29
|
+
const fs = require('fs');
|
|
30
|
+
const path = require('path');
|
|
31
|
+
const Database = require('better-sqlite3');
|
|
32
|
+
|
|
33
|
+
// Load the codeflash serializer for robust value serialization
|
|
34
|
+
const serializer = require('./serializer');
|
|
35
|
+
|
|
36
|
+
// Try to load better-sqlite3, fall back to JSON if not available
|
|
37
|
+
let useSqlite = false;
|
|
38
|
+
|
|
39
|
+
// Configuration from environment
|
|
40
|
+
const OUTPUT_FILE = process.env.CODEFLASH_OUTPUT_FILE;
|
|
41
|
+
const LOOP_INDEX = parseInt(process.env.CODEFLASH_LOOP_INDEX || '1', 10);
|
|
42
|
+
const TEST_ITERATION = process.env.CODEFLASH_TEST_ITERATION;
|
|
43
|
+
const TEST_MODULE = process.env.CODEFLASH_TEST_MODULE;
|
|
44
|
+
|
|
45
|
+
// Performance loop configuration - controls batched looping in capturePerf
|
|
46
|
+
// Batched looping ensures fair distribution across all test invocations:
|
|
47
|
+
// Batch 1: Test1(5 loops) → Test2(5 loops) → Test3(5 loops)
|
|
48
|
+
// Batch 2: Test1(5 loops) → Test2(5 loops) → Test3(5 loops)
|
|
49
|
+
// ...until time budget exhausted
|
|
50
|
+
const PERF_LOOP_COUNT = parseInt(process.env.CODEFLASH_PERF_LOOP_COUNT || '1', 10);
|
|
51
|
+
const PERF_MIN_LOOPS = parseInt(process.env.CODEFLASH_PERF_MIN_LOOPS || '5', 10);
|
|
52
|
+
const PERF_TARGET_DURATION_MS = parseInt(process.env.CODEFLASH_PERF_TARGET_DURATION_MS || '10000', 10);
|
|
53
|
+
const PERF_BATCH_SIZE = parseInt(process.env.CODEFLASH_PERF_BATCH_SIZE || '10', 10);
|
|
54
|
+
const PERF_STABILITY_CHECK = (process.env.CODEFLASH_PERF_STABILITY_CHECK || 'false').toLowerCase() === 'true';
|
|
55
|
+
// Current batch number - set by loop-runner before each batch
|
|
56
|
+
// This allows continuous loop indices even when Jest resets module state
|
|
57
|
+
const PERF_CURRENT_BATCH = parseInt(process.env.CODEFLASH_PERF_CURRENT_BATCH || '0', 10);
|
|
58
|
+
|
|
59
|
+
// Stability constants (matching Python's config_consts.py)
|
|
60
|
+
const STABILITY_WINDOW_SIZE = 0.35;
|
|
61
|
+
const STABILITY_CENTER_TOLERANCE = 0.0025;
|
|
62
|
+
const STABILITY_SPREAD_TOLERANCE = 0.0025;
|
|
63
|
+
|
|
64
|
+
// Shared state for coordinating batched looping across all capturePerf calls
|
|
65
|
+
// Uses process object to persist across Jest's module reloads per test file
|
|
66
|
+
const PERF_STATE_KEY = '__codeflash_perf_state__';
|
|
67
|
+
if (!process[PERF_STATE_KEY]) {
|
|
68
|
+
process[PERF_STATE_KEY] = {
|
|
69
|
+
startTime: null, // When benchmarking started
|
|
70
|
+
totalLoopsCompleted: 0, // Total loops across all invocations
|
|
71
|
+
shouldStop: false, // Flag to stop all further looping
|
|
72
|
+
currentBatch: 0, // Current batch number (incremented by runner)
|
|
73
|
+
invocationLoopCounts: {}, // Track loops per invocation: {invocationKey: loopCount}
|
|
74
|
+
};
|
|
75
|
+
}
|
|
76
|
+
const sharedPerfState = process[PERF_STATE_KEY];
|
|
77
|
+
|
|
78
|
+
/**
|
|
79
|
+
* Check if the shared time budget has been exceeded.
|
|
80
|
+
* @returns {boolean} True if we should stop looping
|
|
81
|
+
*/
|
|
82
|
+
function checkSharedTimeLimit() {
|
|
83
|
+
if (sharedPerfState.shouldStop) return true;
|
|
84
|
+
if (sharedPerfState.startTime === null) {
|
|
85
|
+
sharedPerfState.startTime = Date.now();
|
|
86
|
+
return false;
|
|
87
|
+
}
|
|
88
|
+
const elapsed = Date.now() - sharedPerfState.startTime;
|
|
89
|
+
if (elapsed >= PERF_TARGET_DURATION_MS && sharedPerfState.totalLoopsCompleted >= PERF_MIN_LOOPS) {
|
|
90
|
+
sharedPerfState.shouldStop = true;
|
|
91
|
+
return true;
|
|
92
|
+
}
|
|
93
|
+
return false;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Get the current loop index for a specific invocation.
|
|
98
|
+
* Each invocation tracks its own loop count independently within a batch.
|
|
99
|
+
* The actual loop index is computed as: (batch - 1) * BATCH_SIZE + localIndex
|
|
100
|
+
* This ensures continuous loop indices even when Jest resets module state.
|
|
101
|
+
* @param {string} invocationKey - Unique key for this test invocation
|
|
102
|
+
* @returns {number} The next global loop index for this invocation
|
|
103
|
+
*/
|
|
104
|
+
function getInvocationLoopIndex(invocationKey) {
|
|
105
|
+
// Track local loop count within this batch (starts at 0)
|
|
106
|
+
if (!sharedPerfState.invocationLoopCounts[invocationKey]) {
|
|
107
|
+
sharedPerfState.invocationLoopCounts[invocationKey] = 0;
|
|
108
|
+
}
|
|
109
|
+
const localIndex = ++sharedPerfState.invocationLoopCounts[invocationKey];
|
|
110
|
+
|
|
111
|
+
// Calculate global loop index using batch number from environment
|
|
112
|
+
// PERF_CURRENT_BATCH is 1-based (set by loop-runner before each batch)
|
|
113
|
+
const currentBatch = parseInt(process.env.CODEFLASH_PERF_CURRENT_BATCH || '1', 10);
|
|
114
|
+
const globalIndex = (currentBatch - 1) * PERF_BATCH_SIZE + localIndex;
|
|
115
|
+
|
|
116
|
+
return globalIndex;
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Increment the batch counter. Called by loop-runner between test file runs.
|
|
121
|
+
*/
|
|
122
|
+
function incrementBatch() {
|
|
123
|
+
sharedPerfState.currentBatch++;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
/**
|
|
127
|
+
* Get current batch number.
|
|
128
|
+
*/
|
|
129
|
+
function getCurrentBatch() {
|
|
130
|
+
return sharedPerfState.currentBatch;
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// Random seed for reproducible test runs
|
|
134
|
+
// Both original and optimized runs use the same seed to get identical "random" values
|
|
135
|
+
const RANDOM_SEED = parseInt(process.env.CODEFLASH_RANDOM_SEED, 10);
|
|
136
|
+
|
|
137
|
+
/**
|
|
138
|
+
* Seeded random number generator using mulberry32 algorithm.
|
|
139
|
+
* This provides reproducible "random" numbers given a fixed seed.
|
|
140
|
+
*/
|
|
141
|
+
function createSeededRandom(seed) {
|
|
142
|
+
let state = seed;
|
|
143
|
+
return function() {
|
|
144
|
+
state |= 0;
|
|
145
|
+
state = state + 0x6D2B79F5 | 0;
|
|
146
|
+
let t = Math.imul(state ^ state >>> 15, 1 | state);
|
|
147
|
+
t = t + Math.imul(t ^ t >>> 7, 61 | t) ^ t;
|
|
148
|
+
return ((t ^ t >>> 14) >>> 0) / 4294967296;
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// Override non-deterministic APIs with seeded versions if seed is provided
|
|
153
|
+
// NOTE: We do NOT seed performance.now() or process.hrtime() as those are used
|
|
154
|
+
// internally by this script for timing measurements.
|
|
155
|
+
if (RANDOM_SEED !== 0) {
|
|
156
|
+
// Seed Math.random
|
|
157
|
+
const seededRandom = createSeededRandom(RANDOM_SEED);
|
|
158
|
+
Math.random = seededRandom;
|
|
159
|
+
|
|
160
|
+
// Seed Date.now() and new Date() - use fixed base timestamp that increments
|
|
161
|
+
const SEEDED_BASE_TIME = 1700000000000; // Nov 14, 2023 - fixed reference point
|
|
162
|
+
let dateOffset = 0;
|
|
163
|
+
const OriginalDate = Date;
|
|
164
|
+
const originalDateNow = Date.now;
|
|
165
|
+
|
|
166
|
+
Date.now = function() {
|
|
167
|
+
return SEEDED_BASE_TIME + (dateOffset++);
|
|
168
|
+
};
|
|
169
|
+
|
|
170
|
+
// Override Date constructor to use seeded time when called without arguments
|
|
171
|
+
function SeededDate(...args) {
|
|
172
|
+
if (args.length === 0) {
|
|
173
|
+
// No arguments: use seeded current time
|
|
174
|
+
return new OriginalDate(SEEDED_BASE_TIME + (dateOffset++));
|
|
175
|
+
}
|
|
176
|
+
// With arguments: use original behavior
|
|
177
|
+
return new OriginalDate(...args);
|
|
178
|
+
}
|
|
179
|
+
SeededDate.prototype = OriginalDate.prototype;
|
|
180
|
+
SeededDate.now = Date.now;
|
|
181
|
+
SeededDate.parse = OriginalDate.parse;
|
|
182
|
+
SeededDate.UTC = OriginalDate.UTC;
|
|
183
|
+
global.Date = SeededDate;
|
|
184
|
+
|
|
185
|
+
// Seed crypto.randomUUID() and crypto.getRandomValues()
|
|
186
|
+
try {
|
|
187
|
+
const crypto = require('crypto');
|
|
188
|
+
const randomForCrypto = createSeededRandom(RANDOM_SEED + 1000); // Different seed to avoid correlation
|
|
189
|
+
|
|
190
|
+
// Seed crypto.randomUUID()
|
|
191
|
+
if (crypto.randomUUID) {
|
|
192
|
+
const originalRandomUUID = crypto.randomUUID.bind(crypto);
|
|
193
|
+
crypto.randomUUID = function() {
|
|
194
|
+
// Generate a deterministic UUID v4 format
|
|
195
|
+
const hex = () => Math.floor(randomForCrypto() * 16).toString(16);
|
|
196
|
+
const bytes = Array.from({ length: 32 }, hex).join('');
|
|
197
|
+
return `${bytes.slice(0, 8)}-${bytes.slice(8, 12)}-4${bytes.slice(13, 16)}-${(8 + Math.floor(randomForCrypto() * 4)).toString(16)}${bytes.slice(17, 20)}-${bytes.slice(20, 32)}`;
|
|
198
|
+
};
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
// Seed crypto.getRandomValues() - used by uuid libraries
|
|
202
|
+
const seededGetRandomValues = function(array) {
|
|
203
|
+
for (let i = 0; i < array.length; i++) {
|
|
204
|
+
if (array instanceof Uint8Array) {
|
|
205
|
+
array[i] = Math.floor(randomForCrypto() * 256);
|
|
206
|
+
} else if (array instanceof Uint16Array) {
|
|
207
|
+
array[i] = Math.floor(randomForCrypto() * 65536);
|
|
208
|
+
} else if (array instanceof Uint32Array) {
|
|
209
|
+
array[i] = Math.floor(randomForCrypto() * 4294967296);
|
|
210
|
+
} else {
|
|
211
|
+
array[i] = Math.floor(randomForCrypto() * 256);
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
return array;
|
|
215
|
+
};
|
|
216
|
+
|
|
217
|
+
if (crypto.getRandomValues) {
|
|
218
|
+
crypto.getRandomValues = seededGetRandomValues;
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
// Also seed webcrypto if available (Node 18+)
|
|
222
|
+
// Use the same seeded function to avoid circular references
|
|
223
|
+
if (crypto.webcrypto) {
|
|
224
|
+
if (crypto.webcrypto.getRandomValues) {
|
|
225
|
+
crypto.webcrypto.getRandomValues = seededGetRandomValues;
|
|
226
|
+
}
|
|
227
|
+
if (crypto.webcrypto.randomUUID) {
|
|
228
|
+
crypto.webcrypto.randomUUID = crypto.randomUUID;
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
} catch (e) {
|
|
232
|
+
// crypto module not available, skip seeding
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
// Current test context (set by Jest hooks)
|
|
237
|
+
let currentTestName = null;
|
|
238
|
+
let currentTestPath = null; // Test file path from Jest
|
|
239
|
+
|
|
240
|
+
// Invocation counter map: tracks how many times each testId has been seen
|
|
241
|
+
// Key: testId (testModule:testClass:testFunction:lineId:loopIndex)
|
|
242
|
+
// Value: count (starts at 0, increments each time same key is seen)
|
|
243
|
+
const invocationCounterMap = new Map();
|
|
244
|
+
|
|
245
|
+
// Results buffer (for JSON fallback)
|
|
246
|
+
const results = [];
|
|
247
|
+
|
|
248
|
+
// SQLite database (lazy initialized)
|
|
249
|
+
let db = null;
|
|
250
|
+
|
|
251
|
+
/**
|
|
252
|
+
* Check if performance has stabilized (for internal looping).
|
|
253
|
+
* Matches Python's pytest_plugin.should_stop() logic.
|
|
254
|
+
*/
|
|
255
|
+
function shouldStopStability(runtimes, window, minWindowSize) {
|
|
256
|
+
if (runtimes.length < window || runtimes.length < minWindowSize) {
|
|
257
|
+
return false;
|
|
258
|
+
}
|
|
259
|
+
const recent = runtimes.slice(-window);
|
|
260
|
+
const recentSorted = [...recent].sort((a, b) => a - b);
|
|
261
|
+
const mid = Math.floor(window / 2);
|
|
262
|
+
const median = window % 2 ? recentSorted[mid] : (recentSorted[mid - 1] + recentSorted[mid]) / 2;
|
|
263
|
+
|
|
264
|
+
for (const r of recent) {
|
|
265
|
+
if (Math.abs(r - median) / median > STABILITY_CENTER_TOLERANCE) {
|
|
266
|
+
return false;
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
const rMin = recentSorted[0];
|
|
270
|
+
const rMax = recentSorted[recentSorted.length - 1];
|
|
271
|
+
if (rMin === 0) return false;
|
|
272
|
+
return (rMax - rMin) / rMin <= STABILITY_SPREAD_TOLERANCE;
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
/**
|
|
276
|
+
* Get high-resolution time in nanoseconds.
|
|
277
|
+
* Prefers process.hrtime.bigint() for nanosecond precision,
|
|
278
|
+
* falls back to performance.now() * 1e6 for non-Node environments.
|
|
279
|
+
*
|
|
280
|
+
* @returns {bigint|number} - Time in nanoseconds
|
|
281
|
+
*/
|
|
282
|
+
function getTimeNs() {
|
|
283
|
+
if (typeof process !== 'undefined' && process.hrtime && process.hrtime.bigint) {
|
|
284
|
+
return process.hrtime.bigint();
|
|
285
|
+
}
|
|
286
|
+
// Fallback to performance.now() in milliseconds, converted to nanoseconds
|
|
287
|
+
const { performance } = require('perf_hooks');
|
|
288
|
+
return BigInt(Math.floor(performance.now() * 1_000_000));
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
/**
|
|
292
|
+
* Calculate duration in nanoseconds.
|
|
293
|
+
*
|
|
294
|
+
* @param {bigint} start - Start time in nanoseconds
|
|
295
|
+
* @param {bigint} end - End time in nanoseconds
|
|
296
|
+
* @returns {number} - Duration in nanoseconds (as Number for SQLite compatibility)
|
|
297
|
+
*/
|
|
298
|
+
function getDurationNs(start, end) {
|
|
299
|
+
const duration = end - start;
|
|
300
|
+
// Convert to Number for SQLite storage (SQLite INTEGER is 64-bit)
|
|
301
|
+
return Number(duration);
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
/**
|
|
305
|
+
* Sanitize a string for use in test IDs.
|
|
306
|
+
* Replaces special characters that could conflict with regex extraction
|
|
307
|
+
* during stdout parsing.
|
|
308
|
+
*
|
|
309
|
+
* Characters replaced with '_': ! # : (space) ( ) [ ] { } | \ / * ? ^ $ . + -
|
|
310
|
+
*
|
|
311
|
+
* @param {string} str - String to sanitize
|
|
312
|
+
* @returns {string} - Sanitized string safe for test IDs
|
|
313
|
+
*/
|
|
314
|
+
function sanitizeTestId(str) {
|
|
315
|
+
if (!str) return str;
|
|
316
|
+
// Replace characters that could conflict with our delimiter pattern (######)
|
|
317
|
+
// or the colon-separated format, or general regex metacharacters
|
|
318
|
+
return str.replace(/[!#: ()\[\]{}|\\/*?^$.+\-]/g, '_');
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
/**
|
|
322
|
+
* Get or create invocation index for a testId.
|
|
323
|
+
* This mirrors Python's index tracking per wrapper function.
|
|
324
|
+
*
|
|
325
|
+
* @param {string} testId - Unique test identifier
|
|
326
|
+
* @returns {number} - Current invocation index (0-based)
|
|
327
|
+
*/
|
|
328
|
+
function getInvocationIndex(testId) {
|
|
329
|
+
const currentIndex = invocationCounterMap.get(testId);
|
|
330
|
+
if (currentIndex === undefined) {
|
|
331
|
+
invocationCounterMap.set(testId, 0);
|
|
332
|
+
return 0;
|
|
333
|
+
}
|
|
334
|
+
invocationCounterMap.set(testId, currentIndex + 1);
|
|
335
|
+
return currentIndex + 1;
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
/**
|
|
339
|
+
* Reset invocation counter for a test.
|
|
340
|
+
* Called at the start of each test to ensure consistent indexing.
|
|
341
|
+
*/
|
|
342
|
+
function resetInvocationCounters() {
|
|
343
|
+
invocationCounterMap.clear();
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
/**
|
|
347
|
+
* Initialize the SQLite database.
|
|
348
|
+
*/
|
|
349
|
+
function initDatabase() {
|
|
350
|
+
if (!useSqlite || db) return;
|
|
351
|
+
|
|
352
|
+
try {
|
|
353
|
+
db = new Database(OUTPUT_FILE);
|
|
354
|
+
db.exec(`
|
|
355
|
+
CREATE TABLE IF NOT EXISTS test_results (
|
|
356
|
+
test_module_path TEXT,
|
|
357
|
+
test_class_name TEXT,
|
|
358
|
+
test_function_name TEXT,
|
|
359
|
+
function_getting_tested TEXT,
|
|
360
|
+
loop_index INTEGER,
|
|
361
|
+
iteration_id TEXT,
|
|
362
|
+
runtime INTEGER,
|
|
363
|
+
return_value BLOB,
|
|
364
|
+
verification_type TEXT
|
|
365
|
+
)
|
|
366
|
+
`);
|
|
367
|
+
} catch (e) {
|
|
368
|
+
console.error('[codeflash] Failed to initialize SQLite:', e.message);
|
|
369
|
+
useSqlite = false;
|
|
370
|
+
}
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
/**
|
|
374
|
+
* Safely serialize a value for storage.
|
|
375
|
+
*
|
|
376
|
+
* @param {any} value - Value to serialize
|
|
377
|
+
* @returns {Buffer} - Serialized value as Buffer
|
|
378
|
+
*/
|
|
379
|
+
function safeSerialize(value) {
|
|
380
|
+
try {
|
|
381
|
+
return serializer.serialize(value);
|
|
382
|
+
} catch (e) {
|
|
383
|
+
console.warn('[codeflash] Serialization failed:', e.message);
|
|
384
|
+
return Buffer.from(JSON.stringify({ __type: 'SerializationError', error: e.message }));
|
|
385
|
+
}
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
/**
|
|
389
|
+
* Safely deserialize a buffer back to a value.
|
|
390
|
+
*
|
|
391
|
+
* @param {Buffer|Uint8Array} buffer - Serialized buffer
|
|
392
|
+
* @returns {any} - Deserialized value
|
|
393
|
+
*/
|
|
394
|
+
function safeDeserialize(buffer) {
|
|
395
|
+
try {
|
|
396
|
+
return serializer.deserialize(buffer);
|
|
397
|
+
} catch (e) {
|
|
398
|
+
console.warn('[codeflash] Deserialization failed:', e.message);
|
|
399
|
+
return { __type: 'DeserializationError', error: e.message };
|
|
400
|
+
}
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
/**
|
|
404
|
+
* Record a test result to SQLite or JSON buffer.
|
|
405
|
+
*
|
|
406
|
+
* @param {string} testModulePath - Test module path
|
|
407
|
+
* @param {string|null} testClassName - Test class name (null for Jest)
|
|
408
|
+
* @param {string} testFunctionName - Test function name
|
|
409
|
+
* @param {string} funcName - Name of the function being tested
|
|
410
|
+
* @param {string} invocationId - Unique invocation identifier (lineId_index)
|
|
411
|
+
* @param {Array} args - Arguments passed to the function
|
|
412
|
+
* @param {any} returnValue - Return value from the function
|
|
413
|
+
* @param {Error|null} error - Error thrown by the function (if any)
|
|
414
|
+
* @param {number} durationNs - Execution time in nanoseconds
|
|
415
|
+
*/
|
|
416
|
+
function recordResult(testModulePath, testClassName, testFunctionName, funcName, invocationId, args, returnValue, error, durationNs) {
|
|
417
|
+
// Serialize the return value (args, kwargs (empty for JS), return_value) like Python does
|
|
418
|
+
const serializedValue = error
|
|
419
|
+
? safeSerialize(error)
|
|
420
|
+
: safeSerialize([args, {}, returnValue]);
|
|
421
|
+
|
|
422
|
+
if (useSqlite && db) {
|
|
423
|
+
try {
|
|
424
|
+
const stmt = db.prepare(`
|
|
425
|
+
INSERT INTO test_results VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
426
|
+
`);
|
|
427
|
+
stmt.run(
|
|
428
|
+
testModulePath, // test_module_path
|
|
429
|
+
testClassName, // test_class_name
|
|
430
|
+
testFunctionName, // test_function_name
|
|
431
|
+
funcName, // function_getting_tested
|
|
432
|
+
LOOP_INDEX, // loop_index
|
|
433
|
+
invocationId, // iteration_id
|
|
434
|
+
durationNs, // runtime (nanoseconds) - no rounding
|
|
435
|
+
serializedValue, // return_value (serialized)
|
|
436
|
+
'function_call' // verification_type
|
|
437
|
+
);
|
|
438
|
+
} catch (e) {
|
|
439
|
+
console.error('[codeflash] Failed to write to SQLite:', e.message);
|
|
440
|
+
// Fall back to JSON
|
|
441
|
+
results.push({
|
|
442
|
+
testModulePath,
|
|
443
|
+
testClassName,
|
|
444
|
+
testFunctionName,
|
|
445
|
+
funcName,
|
|
446
|
+
loopIndex: LOOP_INDEX,
|
|
447
|
+
iterationId: invocationId,
|
|
448
|
+
durationNs,
|
|
449
|
+
returnValue: error ? null : returnValue,
|
|
450
|
+
error: error ? { name: error.name, message: error.message } : null,
|
|
451
|
+
verificationType: 'function_call'
|
|
452
|
+
});
|
|
453
|
+
}
|
|
454
|
+
} else {
|
|
455
|
+
// JSON fallback
|
|
456
|
+
results.push({
|
|
457
|
+
testModulePath,
|
|
458
|
+
testClassName,
|
|
459
|
+
testFunctionName,
|
|
460
|
+
funcName,
|
|
461
|
+
loopIndex: LOOP_INDEX,
|
|
462
|
+
iterationId: invocationId,
|
|
463
|
+
durationNs,
|
|
464
|
+
returnValue: error ? null : returnValue,
|
|
465
|
+
error: error ? { name: error.name, message: error.message } : null,
|
|
466
|
+
verificationType: 'function_call'
|
|
467
|
+
});
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
/**
|
|
472
|
+
* Capture a function call with full behavior tracking.
|
|
473
|
+
*
|
|
474
|
+
* This is the main API for instrumenting function calls for BEHAVIOR verification.
|
|
475
|
+
* It captures inputs, outputs, errors, and timing.
|
|
476
|
+
* Results are written to SQLite for comparison between original and optimized code.
|
|
477
|
+
*
|
|
478
|
+
* Static parameters (funcName, lineId) are determined at instrumentation time.
|
|
479
|
+
* The lineId enables tracking when the same call site is invoked multiple times (e.g., in loops).
|
|
480
|
+
*
|
|
481
|
+
* @param {string} funcName - Name of the function being tested (static)
|
|
482
|
+
* @param {string} lineId - Line number identifier in test file (static)
|
|
483
|
+
* @param {Function} fn - The function to call
|
|
484
|
+
* @param {...any} args - Arguments to pass to the function
|
|
485
|
+
* @returns {any} - The function's return value
|
|
486
|
+
* @throws {Error} - Re-throws any error from the function
|
|
487
|
+
*/
|
|
488
|
+
function capture(funcName, lineId, fn, ...args) {
|
|
489
|
+
// Validate that fn is actually a function
|
|
490
|
+
if (typeof fn !== 'function') {
|
|
491
|
+
const fnType = fn === null ? 'null' : (fn === undefined ? 'undefined' : typeof fn);
|
|
492
|
+
throw new TypeError(
|
|
493
|
+
`codeflash.capture: Expected function '${funcName}' but got ${fnType}. ` +
|
|
494
|
+
`This usually means the function was not imported correctly. ` +
|
|
495
|
+
`Check that the import statement matches how the module exports the function ` +
|
|
496
|
+
`(e.g., default export vs named export, CommonJS vs ES modules).`
|
|
497
|
+
);
|
|
498
|
+
}
|
|
499
|
+
|
|
500
|
+
// Initialize database on first capture
|
|
501
|
+
initDatabase();
|
|
502
|
+
|
|
503
|
+
// Get test context (raw values for SQLite storage)
|
|
504
|
+
// Use TEST_MODULE env var if set, otherwise derive from test file path
|
|
505
|
+
let testModulePath;
|
|
506
|
+
if (TEST_MODULE) {
|
|
507
|
+
testModulePath = TEST_MODULE;
|
|
508
|
+
} else if (currentTestPath) {
|
|
509
|
+
// Get relative path from cwd and convert to module-style path
|
|
510
|
+
const path = require('path');
|
|
511
|
+
const relativePath = path.relative(process.cwd(), currentTestPath);
|
|
512
|
+
// Convert to Python module-style path (e.g., "tests/test_foo.test.js" -> "tests.test_foo.test")
|
|
513
|
+
// This matches what Jest's junit XML produces
|
|
514
|
+
testModulePath = relativePath
|
|
515
|
+
.replace(/\\/g, '/') // Handle Windows paths
|
|
516
|
+
.replace(/\.js$/, '') // Remove .js extension
|
|
517
|
+
.replace(/\.test$/, '.test') // Keep .test suffix
|
|
518
|
+
.replace(/\//g, '.'); // Convert path separators to dots
|
|
519
|
+
} else {
|
|
520
|
+
testModulePath = currentTestName || 'unknown';
|
|
521
|
+
}
|
|
522
|
+
const testClassName = null; // Jest doesn't use classes like Python
|
|
523
|
+
const testFunctionName = currentTestName || 'unknown';
|
|
524
|
+
|
|
525
|
+
// Sanitized versions for stdout tags (avoid regex conflicts)
|
|
526
|
+
const safeModulePath = sanitizeTestId(testModulePath);
|
|
527
|
+
const safeTestFunctionName = sanitizeTestId(testFunctionName);
|
|
528
|
+
|
|
529
|
+
// Create testId for invocation tracking (matches Python format)
|
|
530
|
+
const testId = `${safeModulePath}:${testClassName}:${safeTestFunctionName}:${lineId}:${LOOP_INDEX}`;
|
|
531
|
+
|
|
532
|
+
// Get invocation index (increments if same testId seen again)
|
|
533
|
+
const invocationIndex = getInvocationIndex(testId);
|
|
534
|
+
const invocationId = `${lineId}_${invocationIndex}`;
|
|
535
|
+
|
|
536
|
+
// Format stdout tag (matches Python format, uses sanitized names)
|
|
537
|
+
const testStdoutTag = `${safeModulePath}:${testClassName ? testClassName + '.' : ''}${safeTestFunctionName}:${funcName}:${LOOP_INDEX}:${invocationId}`;
|
|
538
|
+
|
|
539
|
+
// Print start tag
|
|
540
|
+
console.log(`!$######${testStdoutTag}######$!`);
|
|
541
|
+
|
|
542
|
+
// Timing with nanosecond precision
|
|
543
|
+
const startTime = getTimeNs();
|
|
544
|
+
let returnValue;
|
|
545
|
+
let error = null;
|
|
546
|
+
|
|
547
|
+
try {
|
|
548
|
+
returnValue = fn(...args);
|
|
549
|
+
|
|
550
|
+
// Handle promises (async functions)
|
|
551
|
+
if (returnValue instanceof Promise) {
|
|
552
|
+
return returnValue.then(
|
|
553
|
+
(resolved) => {
|
|
554
|
+
const endTime = getTimeNs();
|
|
555
|
+
const durationNs = getDurationNs(startTime, endTime);
|
|
556
|
+
recordResult(testModulePath, testClassName, testFunctionName, funcName, invocationId, args, resolved, null, durationNs);
|
|
557
|
+
// Print end tag (no duration for behavior mode)
|
|
558
|
+
console.log(`!######${testStdoutTag}######!`);
|
|
559
|
+
return resolved;
|
|
560
|
+
},
|
|
561
|
+
(err) => {
|
|
562
|
+
const endTime = getTimeNs();
|
|
563
|
+
const durationNs = getDurationNs(startTime, endTime);
|
|
564
|
+
recordResult(testModulePath, testClassName, testFunctionName, funcName, invocationId, args, null, err, durationNs);
|
|
565
|
+
console.log(`!######${testStdoutTag}######!`);
|
|
566
|
+
throw err;
|
|
567
|
+
}
|
|
568
|
+
);
|
|
569
|
+
}
|
|
570
|
+
} catch (e) {
|
|
571
|
+
error = e;
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
const endTime = getTimeNs();
|
|
575
|
+
const durationNs = getDurationNs(startTime, endTime);
|
|
576
|
+
recordResult(testModulePath, testClassName, testFunctionName, funcName, invocationId, args, returnValue, error, durationNs);
|
|
577
|
+
|
|
578
|
+
// Print end tag (no duration for behavior mode, matching Python)
|
|
579
|
+
console.log(`!######${testStdoutTag}######!`);
|
|
580
|
+
|
|
581
|
+
if (error) throw error;
|
|
582
|
+
return returnValue;
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
/**
|
|
586
|
+
* Capture a function call for PERFORMANCE benchmarking only.
|
|
587
|
+
*
|
|
588
|
+
* This is a lightweight instrumentation that only measures timing.
|
|
589
|
+
* It prints start/end tags to stdout (no SQLite writes, no serialization overhead).
|
|
590
|
+
* Used when we've already verified behavior and just need accurate timing.
|
|
591
|
+
*
|
|
592
|
+
* When CODEFLASH_PERF_LOOP_COUNT > 1, this function loops internally to avoid
|
|
593
|
+
* Jest environment overhead per iteration. This dramatically improves utilization
|
|
594
|
+
* (time spent in actual function execution vs overhead).
|
|
595
|
+
*
|
|
596
|
+
* Output format matches Python's codeflash_performance wrapper:
|
|
597
|
+
* Start: !$######test_module:test_class.test_name:func_name:loop_index:invocation_id######$!
|
|
598
|
+
* End: !######test_module:test_class.test_name:func_name:loop_index:invocation_id:duration_ns######!
|
|
599
|
+
*
|
|
600
|
+
* @param {string} funcName - Name of the function being tested (static)
|
|
601
|
+
* @param {string} lineId - Line number identifier in test file (static)
|
|
602
|
+
* @param {Function} fn - The function to call
|
|
603
|
+
* @param {...any} args - Arguments to pass to the function
|
|
604
|
+
* @returns {any} - The function's return value
|
|
605
|
+
* @throws {Error} - Re-throws any error from the function
|
|
606
|
+
*/
|
|
607
|
+
function capturePerf(funcName, lineId, fn, ...args) {
|
|
608
|
+
// Check if we should skip looping entirely (shared time budget exceeded)
|
|
609
|
+
const shouldLoop = PERF_LOOP_COUNT > 1 && !checkSharedTimeLimit();
|
|
610
|
+
|
|
611
|
+
// Get test context (computed once, reused across batch)
|
|
612
|
+
let testModulePath;
|
|
613
|
+
if (TEST_MODULE) {
|
|
614
|
+
testModulePath = TEST_MODULE;
|
|
615
|
+
} else if (currentTestPath) {
|
|
616
|
+
const path = require('path');
|
|
617
|
+
const relativePath = path.relative(process.cwd(), currentTestPath);
|
|
618
|
+
testModulePath = relativePath
|
|
619
|
+
.replace(/\\/g, '/')
|
|
620
|
+
.replace(/\.js$/, '')
|
|
621
|
+
.replace(/\.test$/, '.test')
|
|
622
|
+
.replace(/\//g, '.');
|
|
623
|
+
} else {
|
|
624
|
+
testModulePath = currentTestName || 'unknown';
|
|
625
|
+
}
|
|
626
|
+
const testClassName = null;
|
|
627
|
+
const testFunctionName = currentTestName || 'unknown';
|
|
628
|
+
|
|
629
|
+
const safeModulePath = sanitizeTestId(testModulePath);
|
|
630
|
+
const safeTestFunctionName = sanitizeTestId(testFunctionName);
|
|
631
|
+
|
|
632
|
+
// Create unique key for this invocation (identifies this specific capturePerf call site)
|
|
633
|
+
const invocationKey = `${safeModulePath}:${testClassName}:${safeTestFunctionName}:${funcName}:${lineId}`;
|
|
634
|
+
|
|
635
|
+
// Check if we've already completed all loops for this invocation
|
|
636
|
+
// If so, just execute the function once without timing (for test assertions)
|
|
637
|
+
const peekLoopIndex = (sharedPerfState.invocationLoopCounts[invocationKey] || 0);
|
|
638
|
+
const currentBatch = parseInt(process.env.CODEFLASH_PERF_CURRENT_BATCH || '1', 10);
|
|
639
|
+
const nextGlobalIndex = (currentBatch - 1) * PERF_BATCH_SIZE + peekLoopIndex + 1;
|
|
640
|
+
|
|
641
|
+
if (shouldLoop && nextGlobalIndex > PERF_LOOP_COUNT) {
|
|
642
|
+
// All loops completed, just execute once for test assertion
|
|
643
|
+
return fn(...args);
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
let lastReturnValue;
|
|
647
|
+
let lastError = null;
|
|
648
|
+
|
|
649
|
+
// Batched looping: run BATCH_SIZE loops per capturePerf call
|
|
650
|
+
// This ensures fair distribution across all test invocations
|
|
651
|
+
const batchSize = shouldLoop ? PERF_BATCH_SIZE : 1;
|
|
652
|
+
|
|
653
|
+
for (let batchIndex = 0; batchIndex < batchSize; batchIndex++) {
|
|
654
|
+
// Check shared time limit BEFORE each iteration
|
|
655
|
+
if (shouldLoop && checkSharedTimeLimit()) {
|
|
656
|
+
break;
|
|
657
|
+
}
|
|
658
|
+
|
|
659
|
+
// Get the global loop index for this invocation (increments across batches)
|
|
660
|
+
const loopIndex = getInvocationLoopIndex(invocationKey);
|
|
661
|
+
|
|
662
|
+
// Check if we've exceeded max loops for this invocation
|
|
663
|
+
if (loopIndex > PERF_LOOP_COUNT) {
|
|
664
|
+
break;
|
|
665
|
+
}
|
|
666
|
+
|
|
667
|
+
// Get invocation index for the timing marker
|
|
668
|
+
const testId = `${safeModulePath}:${testClassName}:${safeTestFunctionName}:${lineId}:${loopIndex}`;
|
|
669
|
+
const invocationIndex = getInvocationIndex(testId);
|
|
670
|
+
const invocationId = `${lineId}_${invocationIndex}`;
|
|
671
|
+
|
|
672
|
+
// Format stdout tag with current loop index
|
|
673
|
+
const testStdoutTag = `${safeModulePath}:${testClassName ? testClassName + '.' : ''}${safeTestFunctionName}:${funcName}:${loopIndex}:${invocationId}`;
|
|
674
|
+
|
|
675
|
+
// Timing with nanosecond precision
|
|
676
|
+
let durationNs;
|
|
677
|
+
try {
|
|
678
|
+
const startTime = getTimeNs();
|
|
679
|
+
lastReturnValue = fn(...args);
|
|
680
|
+
const endTime = getTimeNs();
|
|
681
|
+
durationNs = getDurationNs(startTime, endTime);
|
|
682
|
+
|
|
683
|
+
// Handle promises - for async functions, run once and return
|
|
684
|
+
if (lastReturnValue instanceof Promise) {
|
|
685
|
+
return lastReturnValue.then(
|
|
686
|
+
(resolved) => {
|
|
687
|
+
const asyncEndTime = getTimeNs();
|
|
688
|
+
const asyncDurationNs = getDurationNs(startTime, asyncEndTime);
|
|
689
|
+
console.log(`!######${testStdoutTag}:${asyncDurationNs}######!`);
|
|
690
|
+
sharedPerfState.totalLoopsCompleted++;
|
|
691
|
+
return resolved;
|
|
692
|
+
},
|
|
693
|
+
(err) => {
|
|
694
|
+
const asyncEndTime = getTimeNs();
|
|
695
|
+
const asyncDurationNs = getDurationNs(startTime, asyncEndTime);
|
|
696
|
+
console.log(`!######${testStdoutTag}:${asyncDurationNs}######!`);
|
|
697
|
+
sharedPerfState.totalLoopsCompleted++;
|
|
698
|
+
throw err;
|
|
699
|
+
}
|
|
700
|
+
);
|
|
701
|
+
}
|
|
702
|
+
|
|
703
|
+
lastError = null;
|
|
704
|
+
} catch (e) {
|
|
705
|
+
durationNs = 0;
|
|
706
|
+
lastError = e;
|
|
707
|
+
}
|
|
708
|
+
|
|
709
|
+
// Print end tag with timing
|
|
710
|
+
console.log(`!######${testStdoutTag}:${durationNs}######!`);
|
|
711
|
+
|
|
712
|
+
// Update shared loop counter
|
|
713
|
+
sharedPerfState.totalLoopsCompleted++;
|
|
714
|
+
|
|
715
|
+
// If we had an error, stop looping
|
|
716
|
+
if (lastError) {
|
|
717
|
+
break;
|
|
718
|
+
}
|
|
719
|
+
}
|
|
720
|
+
|
|
721
|
+
if (lastError) throw lastError;
|
|
722
|
+
|
|
723
|
+
// If we never executed (e.g., hit loop limit on first iteration), run once for assertion
|
|
724
|
+
if (lastReturnValue === undefined && !lastError) {
|
|
725
|
+
return fn(...args);
|
|
726
|
+
}
|
|
727
|
+
|
|
728
|
+
return lastReturnValue;
|
|
729
|
+
}
|
|
730
|
+
|
|
731
|
+
/**
|
|
732
|
+
* Capture multiple invocations for benchmarking.
|
|
733
|
+
*
|
|
734
|
+
* @param {string} funcName - Name of the function being tested
|
|
735
|
+
* @param {string} lineId - Line number identifier
|
|
736
|
+
* @param {Function} fn - The function to call
|
|
737
|
+
* @param {Array<Array>} argsList - List of argument arrays to test
|
|
738
|
+
* @returns {Array} - Array of return values
|
|
739
|
+
*/
|
|
740
|
+
function captureMultiple(funcName, lineId, fn, argsList) {
|
|
741
|
+
return argsList.map(args => capture(funcName, lineId, fn, ...args));
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
/**
|
|
745
|
+
* Write remaining JSON results to file (fallback mode).
|
|
746
|
+
* Called automatically via Jest afterAll hook.
|
|
747
|
+
*/
|
|
748
|
+
function writeResults() {
|
|
749
|
+
// Close SQLite connection if open
|
|
750
|
+
if (db) {
|
|
751
|
+
try {
|
|
752
|
+
db.close();
|
|
753
|
+
} catch (e) {
|
|
754
|
+
// Ignore close errors
|
|
755
|
+
}
|
|
756
|
+
db = null;
|
|
757
|
+
return;
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
// Write JSON fallback if SQLite wasn't used
|
|
761
|
+
if (results.length === 0) return;
|
|
762
|
+
|
|
763
|
+
try {
|
|
764
|
+
// Write as JSON for fallback parsing
|
|
765
|
+
const jsonPath = OUTPUT_FILE.replace('.sqlite', '.json');
|
|
766
|
+
const output = {
|
|
767
|
+
version: '1.0.0',
|
|
768
|
+
loopIndex: LOOP_INDEX,
|
|
769
|
+
timestamp: Date.now(),
|
|
770
|
+
results
|
|
771
|
+
};
|
|
772
|
+
fs.writeFileSync(jsonPath, JSON.stringify(output, null, 2));
|
|
773
|
+
} catch (e) {
|
|
774
|
+
console.error('[codeflash] Error writing JSON results:', e.message);
|
|
775
|
+
}
|
|
776
|
+
}
|
|
777
|
+
|
|
778
|
+
/**
|
|
779
|
+
* Reset shared performance state.
|
|
780
|
+
* Should be called at the start of each test file to reset timing.
|
|
781
|
+
*/
|
|
782
|
+
function resetPerfState() {
|
|
783
|
+
sharedPerfState.startTime = null;
|
|
784
|
+
sharedPerfState.totalLoopsCompleted = 0;
|
|
785
|
+
sharedPerfState.shouldStop = false;
|
|
786
|
+
}
|
|
787
|
+
|
|
788
|
+
/**
|
|
789
|
+
* Clear all recorded results.
|
|
790
|
+
* Useful for resetting between test files.
|
|
791
|
+
*/
|
|
792
|
+
function clearResults() {
|
|
793
|
+
results.length = 0;
|
|
794
|
+
resetInvocationCounters();
|
|
795
|
+
resetPerfState();
|
|
796
|
+
}
|
|
797
|
+
|
|
798
|
+
/**
|
|
799
|
+
* Get the current results buffer.
|
|
800
|
+
* Useful for debugging or custom result handling.
|
|
801
|
+
*
|
|
802
|
+
* @returns {Array} - Current results buffer
|
|
803
|
+
*/
|
|
804
|
+
function getResults() {
|
|
805
|
+
return results;
|
|
806
|
+
}
|
|
807
|
+
|
|
808
|
+
/**
|
|
809
|
+
* Set the current test name.
|
|
810
|
+
* Called automatically via Jest beforeEach hook.
|
|
811
|
+
*
|
|
812
|
+
* @param {string} name - Test name
|
|
813
|
+
*/
|
|
814
|
+
function setTestName(name) {
|
|
815
|
+
currentTestName = name;
|
|
816
|
+
resetInvocationCounters();
|
|
817
|
+
}
|
|
818
|
+
|
|
819
|
+
// Jest lifecycle hooks - these run automatically when this module is imported
|
|
820
|
+
if (typeof beforeEach !== 'undefined') {
|
|
821
|
+
beforeEach(() => {
|
|
822
|
+
// Get current test name and path from Jest's expect state
|
|
823
|
+
try {
|
|
824
|
+
const state = expect.getState();
|
|
825
|
+
currentTestName = state.currentTestName || 'unknown';
|
|
826
|
+
// testPath is the absolute path to the test file
|
|
827
|
+
currentTestPath = state.testPath || null;
|
|
828
|
+
} catch (e) {
|
|
829
|
+
currentTestName = 'unknown';
|
|
830
|
+
currentTestPath = null;
|
|
831
|
+
}
|
|
832
|
+
// Reset invocation counters for each test
|
|
833
|
+
resetInvocationCounters();
|
|
834
|
+
});
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
if (typeof afterAll !== 'undefined') {
|
|
838
|
+
afterAll(() => {
|
|
839
|
+
writeResults();
|
|
840
|
+
});
|
|
841
|
+
}
|
|
842
|
+
|
|
843
|
+
// Export public API
|
|
844
|
+
module.exports = {
|
|
845
|
+
capture, // Behavior verification (writes to SQLite)
|
|
846
|
+
capturePerf, // Performance benchmarking (prints to stdout only)
|
|
847
|
+
captureMultiple,
|
|
848
|
+
writeResults,
|
|
849
|
+
clearResults,
|
|
850
|
+
getResults,
|
|
851
|
+
setTestName,
|
|
852
|
+
safeSerialize,
|
|
853
|
+
safeDeserialize,
|
|
854
|
+
initDatabase,
|
|
855
|
+
resetInvocationCounters,
|
|
856
|
+
getInvocationIndex,
|
|
857
|
+
sanitizeTestId, // Sanitize test names for stdout tags
|
|
858
|
+
// Batch looping control (used by loop-runner)
|
|
859
|
+
incrementBatch,
|
|
860
|
+
getCurrentBatch,
|
|
861
|
+
checkSharedTimeLimit,
|
|
862
|
+
// Serializer info
|
|
863
|
+
getSerializerType: serializer.getSerializerType,
|
|
864
|
+
// Constants
|
|
865
|
+
LOOP_INDEX,
|
|
866
|
+
OUTPUT_FILE,
|
|
867
|
+
TEST_ITERATION,
|
|
868
|
+
// Batch configuration
|
|
869
|
+
PERF_BATCH_SIZE,
|
|
870
|
+
PERF_LOOP_COUNT,
|
|
871
|
+
};
|