@eagleoutice/flowr 2.0.8 → 2.0.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/benchmark/stats/print.js +19 -15
- package/benchmark/summarizer/summarizer.js +9 -8
- package/cli/benchmark-app.js +7 -5
- package/cli/benchmark-helper-app.js +3 -0
- package/cli/repl/server/connection.d.ts +4 -0
- package/cli/repl/server/connection.js +25 -11
- package/dataflow/environments/environment.d.ts +2 -2
- package/dataflow/environments/environment.js +1 -1
- package/package.json +1 -1
- package/util/mermaid/dfg.js +3 -3
- package/util/mermaid/mermaid.js +2 -1
- package/util/version.js +1 -1
package/benchmark/stats/print.js
CHANGED
|
@@ -10,6 +10,9 @@ function formatNanoseconds(nanoseconds) {
|
|
|
10
10
|
if (nanoseconds < 0) {
|
|
11
11
|
return '??';
|
|
12
12
|
}
|
|
13
|
+
else if (!Number.isFinite(nanoseconds)) {
|
|
14
|
+
return nanoseconds > 0 ? '∞' : '-∞';
|
|
15
|
+
}
|
|
13
16
|
const wholeNanos = typeof nanoseconds === 'bigint' ? nanoseconds : BigInt(Math.round(nanoseconds));
|
|
14
17
|
const nanos = wholeNanos % BigInt(1e+6);
|
|
15
18
|
const wholeMillis = wholeNanos / BigInt(1e+6);
|
|
@@ -145,7 +148,21 @@ Dataflow:
|
|
|
145
148
|
}
|
|
146
149
|
exports.stats2string = stats2string;
|
|
147
150
|
function ultimateStats2String(stats) {
|
|
148
|
-
|
|
151
|
+
const slice = stats.totalSlices > 0 ? `Slice summary for:
|
|
152
|
+
Total: ${formatSummarizedTimeMeasure(stats.perSliceMeasurements.get('total'))}
|
|
153
|
+
Slice creation: ${formatSummarizedTimeMeasure(stats.perSliceMeasurements.get('static slicing'))}
|
|
154
|
+
Slice creation per token in slice: ${formatSummarizedTimeMeasure(stats.sliceTimePerToken.normalized)}
|
|
155
|
+
Slice creation per R token in slice:${formatSummarizedTimeMeasure(stats.sliceTimePerToken.raw)}
|
|
156
|
+
Reconstruction: ${formatSummarizedTimeMeasure(stats.perSliceMeasurements.get('reconstruct code'))}
|
|
157
|
+
Reconstruction per token in slice: ${formatSummarizedTimeMeasure(stats.reconstructTimePerToken.normalized)}
|
|
158
|
+
Reconstruction per R token in slice:${formatSummarizedTimeMeasure(stats.reconstructTimePerToken.raw)}
|
|
159
|
+
Total per token in slice: ${formatSummarizedTimeMeasure(stats.totalPerSliceTimePerToken.normalized)}
|
|
160
|
+
Total per R token in slice: ${formatSummarizedTimeMeasure(stats.totalPerSliceTimePerToken.raw)}
|
|
161
|
+
Failed to Re-Parse: ${pad(stats.failedToRepParse)}/${stats.totalSlices}
|
|
162
|
+
Times hit Threshold: ${pad(stats.timesHitThreshold)}/${stats.totalSlices}
|
|
163
|
+
${reduction2String('Reductions', stats.reduction)}
|
|
164
|
+
${reduction2String('Reductions without comments and empty lines', stats.reductionNoFluff)}` : 'No slices';
|
|
165
|
+
// Used Slice Criteria Sizes: ${formatSummarizedMeasure(stats.perSliceMeasurements.sliceCriteriaSizes)}
|
|
149
166
|
return `
|
|
150
167
|
Summarized: ${stats.totalRequests} requests and ${stats.totalSlices} slices
|
|
151
168
|
Shell init time: ${formatSummarizedTimeMeasure(stats.commonMeasurements.get('initialize R session'))}
|
|
@@ -161,20 +178,7 @@ Dataflow creation per R token:${formatSummarizedTimeMeasure(stats.dataflowTimePe
|
|
|
161
178
|
Total common time per token: ${formatSummarizedTimeMeasure(stats.totalCommonTimePerToken.normalized)}
|
|
162
179
|
Total common time per R token:${formatSummarizedTimeMeasure(stats.totalCommonTimePerToken.raw)}
|
|
163
180
|
|
|
164
|
-
|
|
165
|
-
Total: ${formatSummarizedTimeMeasure(stats.perSliceMeasurements.get('total'))}
|
|
166
|
-
Slice creation: ${formatSummarizedTimeMeasure(stats.perSliceMeasurements.get('static slicing'))}
|
|
167
|
-
Slice creation per token in slice: ${formatSummarizedTimeMeasure(stats.sliceTimePerToken.normalized)}
|
|
168
|
-
Slice creation per R token in slice:${formatSummarizedTimeMeasure(stats.sliceTimePerToken.raw)}
|
|
169
|
-
Reconstruction: ${formatSummarizedTimeMeasure(stats.perSliceMeasurements.get('reconstruct code'))}
|
|
170
|
-
Reconstruction per token in slice: ${formatSummarizedTimeMeasure(stats.reconstructTimePerToken.normalized)}
|
|
171
|
-
Reconstruction per R token in slice:${formatSummarizedTimeMeasure(stats.reconstructTimePerToken.raw)}
|
|
172
|
-
Total per token in slice: ${formatSummarizedTimeMeasure(stats.totalPerSliceTimePerToken.normalized)}
|
|
173
|
-
Total per R token in slice: ${formatSummarizedTimeMeasure(stats.totalPerSliceTimePerToken.raw)}
|
|
174
|
-
Failed to Re-Parse: ${pad(stats.failedToRepParse)}/${stats.totalSlices}
|
|
175
|
-
Times hit Threshold: ${pad(stats.timesHitThreshold)}/${stats.totalSlices}
|
|
176
|
-
${reduction2String('Reductions', stats.reduction)}
|
|
177
|
-
${reduction2String('Reductions without comments and empty lines', stats.reductionNoFluff)}
|
|
181
|
+
${slice}
|
|
178
182
|
|
|
179
183
|
Shell close: ${formatSummarizedTimeMeasure(stats.commonMeasurements.get('close R session'))}
|
|
180
184
|
Total: ${formatSummarizedTimeMeasure(stats.commonMeasurements.get('total'))}
|
|
@@ -21,20 +21,21 @@ class BenchmarkSummarizer extends summarizer_1.Summarizer {
|
|
|
21
21
|
async preparationPhase() {
|
|
22
22
|
this.removeIfExists(this.summaryFile());
|
|
23
23
|
this.removeIfExists(this.config.intermediateOutputPath);
|
|
24
|
-
fs_1.default.mkdirSync(this.config.intermediateOutputPath);
|
|
25
|
-
|
|
24
|
+
fs_1.default.mkdirSync(this.config.intermediateOutputPath, { recursive: true });
|
|
25
|
+
let fileNum = 0;
|
|
26
26
|
const outputPathsPerRun = new defaultmap_1.DefaultMap(() => []);
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
const outputDir = path_1.default.join(this.config.intermediateOutputPath, path_1.default.
|
|
30
|
-
fs_1.default.mkdirSync(outputDir);
|
|
27
|
+
// recursively find all files in all the input path subdirectories
|
|
28
|
+
for await (const file of (0, files_1.getAllFiles)(this.config.inputPath)) {
|
|
29
|
+
const outputDir = path_1.default.join(this.config.intermediateOutputPath, path_1.default.relative(this.config.inputPath, file));
|
|
30
|
+
fs_1.default.mkdirSync(outputDir, { recursive: true });
|
|
31
31
|
const textOutputPath = path_1.default.join(outputDir, 'summary.log');
|
|
32
32
|
// generate measurements for each run
|
|
33
|
-
await (0, files_1.readLineByLine)(
|
|
33
|
+
await (0, files_1.readLineByLine)(file, (line, lineNumber) => {
|
|
34
34
|
const runOutputPath = path_1.default.join(outputDir, `run-${lineNumber}.json`);
|
|
35
35
|
outputPathsPerRun.get(lineNumber).push(runOutputPath);
|
|
36
|
-
return (0, input_1.processRunMeasurement)(line,
|
|
36
|
+
return (0, input_1.processRunMeasurement)(line, fileNum, lineNumber, textOutputPath, runOutputPath);
|
|
37
37
|
});
|
|
38
|
+
fileNum++;
|
|
38
39
|
}
|
|
39
40
|
// generate combined measurements for each file per run
|
|
40
41
|
for (const [run, paths] of outputPathsPerRun.entries()) {
|
package/cli/benchmark-app.js
CHANGED
|
@@ -32,13 +32,15 @@ function removeIfExists(summarizedRaw) {
|
|
|
32
32
|
}
|
|
33
33
|
async function benchmark() {
|
|
34
34
|
removeIfExists(options.output);
|
|
35
|
-
fs_1.default.mkdirSync(options.output);
|
|
35
|
+
fs_1.default.mkdirSync(options.output, { recursive: true });
|
|
36
36
|
console.log(`Storing output in ${options.output}`);
|
|
37
37
|
console.log(`Using ${options.parallel} parallel executors`);
|
|
38
38
|
// we do not use the limit argument to be able to pick the limit randomly
|
|
39
39
|
const files = [];
|
|
40
|
-
for
|
|
41
|
-
|
|
40
|
+
for (const input of options.input) {
|
|
41
|
+
for await (const file of (0, files_1.allRFiles)(input)) {
|
|
42
|
+
files.push({ request: file, baseDir: input });
|
|
43
|
+
}
|
|
42
44
|
}
|
|
43
45
|
if (options.limit) {
|
|
44
46
|
log_1.log.info(`limiting to ${options.limit} files`);
|
|
@@ -48,9 +50,9 @@ async function benchmark() {
|
|
|
48
50
|
const limit = options.limit ?? files.length;
|
|
49
51
|
const verboseAdd = options.verbose ? ['--verbose'] : [];
|
|
50
52
|
const args = files.map((f, i) => [
|
|
51
|
-
'--input', f.content,
|
|
53
|
+
'--input', f.request.content,
|
|
52
54
|
'--file-id', `${i}`,
|
|
53
|
-
'--output', path_1.default.join(options.output,
|
|
55
|
+
'--output', path_1.default.join(options.output, path_1.default.relative(f.baseDir, `${f.request.content}.json`)),
|
|
54
56
|
'--slice', options.slice, ...verboseAdd
|
|
55
57
|
]);
|
|
56
58
|
const runs = options.runs ?? 1;
|
|
@@ -10,6 +10,7 @@ const json_1 = require("../util/json");
|
|
|
10
10
|
const script_1 = require("./common/script");
|
|
11
11
|
const slicer_1 = require("../benchmark/slicer");
|
|
12
12
|
const all_variables_1 = require("../slicing/criterion/filters/all-variables");
|
|
13
|
+
const path_1 = __importDefault(require("path"));
|
|
13
14
|
const options = (0, script_1.processCommandLineArgs)('benchmark-helper', [], {
|
|
14
15
|
subtitle: 'Will slice for all possible variables, signal by exit code if slicing was successful, and can be run standalone',
|
|
15
16
|
examples: [
|
|
@@ -30,6 +31,7 @@ async function benchmark() {
|
|
|
30
31
|
// prefix for printing to console, includes file id and run number if present
|
|
31
32
|
const prefix = `[${options.input}${options['file-id'] !== undefined ? ` (file ${options['file-id']}, run ${options['run-num']})` : ''}]`;
|
|
32
33
|
console.log(`${prefix} Appending output to ${options.output}`);
|
|
34
|
+
fs_1.default.mkdirSync(path_1.default.parse(options.output).dir, { recursive: true });
|
|
33
35
|
// ensure the file exists
|
|
34
36
|
const fileStat = fs_1.default.statSync(options.input);
|
|
35
37
|
(0, assert_1.guard)(fileStat.isFile(), `File ${options.input} does not exist or is no file`);
|
|
@@ -61,6 +63,7 @@ async function benchmark() {
|
|
|
61
63
|
stats
|
|
62
64
|
};
|
|
63
65
|
// append line by line
|
|
66
|
+
console.log(`Appending benchmark of ${options.input} to ${options.output}`);
|
|
64
67
|
fs_1.default.appendFileSync(options.output, `${JSON.stringify(output, json_1.jsonReplacer)}\n`);
|
|
65
68
|
}
|
|
66
69
|
catch (e) {
|
|
@@ -1,5 +1,8 @@
|
|
|
1
1
|
import type { Socket } from './net';
|
|
2
|
+
import { DEFAULT_SLICING_PIPELINE } from '../../../core/steps/pipeline/default-pipelines';
|
|
2
3
|
import type { RShell } from '../../../r-bridge/shell';
|
|
4
|
+
import type { PipelineOutput } from '../../../core/steps/pipeline/pipeline';
|
|
5
|
+
import type { DeepPartial } from 'ts-essentials';
|
|
3
6
|
/**
|
|
4
7
|
* Each connection handles a single client, answering to its requests.
|
|
5
8
|
* There is no need to construct this class manually, {@link FlowRServer} will do it for you.
|
|
@@ -19,3 +22,4 @@ export declare class FlowRServerConnection {
|
|
|
19
22
|
private handleSliceRequest;
|
|
20
23
|
private handleRepl;
|
|
21
24
|
}
|
|
25
|
+
export declare function sanitizeAnalysisResults(results: Partial<PipelineOutput<typeof DEFAULT_SLICING_PIPELINE>>): DeepPartial<PipelineOutput<typeof DEFAULT_SLICING_PIPELINE>>;
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.FlowRServerConnection = void 0;
|
|
3
|
+
exports.sanitizeAnalysisResults = exports.FlowRServerConnection = void 0;
|
|
4
4
|
const send_1 = require("./send");
|
|
5
5
|
const validate_1 = require("./validate");
|
|
6
6
|
const analysis_1 = require("./messages/analysis");
|
|
@@ -103,6 +103,7 @@ class FlowRServerConnection {
|
|
|
103
103
|
cfg = (0, cfg_1.extractCFG)(results.normalize);
|
|
104
104
|
}
|
|
105
105
|
const config = () => ({ context: message.filename ?? 'unknown', getId: (0, quads_1.defaultQuadIdGenerator)() });
|
|
106
|
+
const sanitizedResults = sanitizeAnalysisResults(results);
|
|
106
107
|
if (message.format === 'n-quads') {
|
|
107
108
|
(0, send_1.sendMessage)(this.socket, {
|
|
108
109
|
type: 'response-file-analysis',
|
|
@@ -110,9 +111,9 @@ class FlowRServerConnection {
|
|
|
110
111
|
id: message.id,
|
|
111
112
|
cfg: cfg ? (0, cfg_1.cfg2quads)(cfg, config()) : undefined,
|
|
112
113
|
results: {
|
|
113
|
-
parse: await (0, print_1.printStepResult)(_00_parse_1.PARSE_WITH_R_SHELL_STEP,
|
|
114
|
-
normalize: await (0, print_1.printStepResult)(_10_normalize_1.NORMALIZE,
|
|
115
|
-
dataflow: await (0, print_1.printStepResult)(_20_dataflow_1.STATIC_DATAFLOW,
|
|
114
|
+
parse: await (0, print_1.printStepResult)(_00_parse_1.PARSE_WITH_R_SHELL_STEP, sanitizedResults.parse, 5 /* StepOutputFormat.RdfQuads */, config()),
|
|
115
|
+
normalize: await (0, print_1.printStepResult)(_10_normalize_1.NORMALIZE, sanitizedResults.normalize, 5 /* StepOutputFormat.RdfQuads */, config()),
|
|
116
|
+
dataflow: await (0, print_1.printStepResult)(_20_dataflow_1.STATIC_DATAFLOW, sanitizedResults.dataflow, 5 /* StepOutputFormat.RdfQuads */, config())
|
|
116
117
|
}
|
|
117
118
|
});
|
|
118
119
|
}
|
|
@@ -122,13 +123,7 @@ class FlowRServerConnection {
|
|
|
122
123
|
format: 'json',
|
|
123
124
|
id: message.id,
|
|
124
125
|
cfg,
|
|
125
|
-
results:
|
|
126
|
-
...results,
|
|
127
|
-
normalize: {
|
|
128
|
-
...results.normalize,
|
|
129
|
-
idMap: undefined
|
|
130
|
-
}
|
|
131
|
-
}
|
|
126
|
+
results: sanitizedResults
|
|
132
127
|
});
|
|
133
128
|
}
|
|
134
129
|
}
|
|
@@ -215,4 +210,23 @@ class FlowRServerConnection {
|
|
|
215
210
|
}
|
|
216
211
|
}
|
|
217
212
|
exports.FlowRServerConnection = FlowRServerConnection;
|
|
213
|
+
function sanitizeAnalysisResults(results) {
|
|
214
|
+
return {
|
|
215
|
+
...results,
|
|
216
|
+
normalize: {
|
|
217
|
+
...results.normalize,
|
|
218
|
+
idMap: undefined
|
|
219
|
+
},
|
|
220
|
+
dataflow: {
|
|
221
|
+
...results.dataflow,
|
|
222
|
+
graph: {
|
|
223
|
+
...results.dataflow?.graph,
|
|
224
|
+
functionCache: undefined,
|
|
225
|
+
// @ts-expect-error this is private, but we want to sanitize it for the purpose of json serialization
|
|
226
|
+
_idMap: undefined
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
};
|
|
230
|
+
}
|
|
231
|
+
exports.sanitizeAnalysisResults = sanitizeAnalysisResults;
|
|
218
232
|
//# sourceMappingURL=connection.js.map
|
|
@@ -12,7 +12,7 @@ export declare function makeAllMaybe(references: readonly IdentifierReference[]
|
|
|
12
12
|
export type EnvironmentMemory = Map<Identifier, IdentifierDefinition[]>;
|
|
13
13
|
export interface IEnvironment {
|
|
14
14
|
/** unique and internally generated identifier -- will not be used for comparison but assists debugging for tracking identities */
|
|
15
|
-
readonly id:
|
|
15
|
+
readonly id: number;
|
|
16
16
|
/** Lexical parent of the environment, if any (can be manipulated by R code) */
|
|
17
17
|
parent: IEnvironment;
|
|
18
18
|
/**
|
|
@@ -21,7 +21,7 @@ export interface IEnvironment {
|
|
|
21
21
|
memory: EnvironmentMemory;
|
|
22
22
|
}
|
|
23
23
|
export declare class Environment implements IEnvironment {
|
|
24
|
-
readonly id:
|
|
24
|
+
readonly id: number;
|
|
25
25
|
parent: IEnvironment;
|
|
26
26
|
memory: Map<Identifier, IdentifierDefinition[]>;
|
|
27
27
|
constructor(parent: IEnvironment);
|
|
@@ -39,7 +39,7 @@ function makeAllMaybe(references, graph, environments, includeDefs, defaultCd =
|
|
|
39
39
|
exports.makeAllMaybe = makeAllMaybe;
|
|
40
40
|
let environmentIdCounter = 0;
|
|
41
41
|
class Environment {
|
|
42
|
-
id =
|
|
42
|
+
id = environmentIdCounter++;
|
|
43
43
|
parent;
|
|
44
44
|
memory;
|
|
45
45
|
constructor(parent) {
|
package/package.json
CHANGED
package/util/mermaid/dfg.js
CHANGED
|
@@ -57,11 +57,11 @@ function printArg(arg) {
|
|
|
57
57
|
return '[empty]';
|
|
58
58
|
}
|
|
59
59
|
else if ((0, graph_1.isNamedArgument)(arg)) {
|
|
60
|
-
const deps = arg.controlDependencies ? ', :
|
|
60
|
+
const deps = arg.controlDependencies ? ', :may:' + arg.controlDependencies.map(c => c.id + (c.when ? '+' : '-')).join(',') : '';
|
|
61
61
|
return `${arg.name} (${arg.nodeId}${deps})`;
|
|
62
62
|
}
|
|
63
63
|
else if ((0, graph_1.isPositionalArgument)(arg)) {
|
|
64
|
-
const deps = arg.controlDependencies ? ' (:
|
|
64
|
+
const deps = arg.controlDependencies ? ' (:may:' + arg.controlDependencies.map(c => c.id + (c.when ? '+' : '-')).join(',') + ')' : '';
|
|
65
65
|
return `${arg.nodeId}${deps}`;
|
|
66
66
|
}
|
|
67
67
|
else {
|
|
@@ -128,7 +128,7 @@ function vertexToMermaid(info, mermaid, id, idPrefix, mark) {
|
|
|
128
128
|
const node = mermaid.rootGraph.idMap?.get(info.id);
|
|
129
129
|
const lexeme = node?.lexeme ?? (node?.type === "RExpressionList" /* RType.ExpressionList */ ? node?.grouping?.[0]?.lexeme : '') ?? '??';
|
|
130
130
|
const escapedName = (0, mermaid_1.escapeMarkdown)(node ? `[${node.type}] ${lexeme}` : '??');
|
|
131
|
-
const deps = info.controlDependencies ? ', :
|
|
131
|
+
const deps = info.controlDependencies ? ', :may:' + info.controlDependencies.map(c => c.id + (c.when ? '+' : '-')).join(',') : '';
|
|
132
132
|
const n = node?.info.fullRange ?? node?.location ?? (node?.type === "RExpressionList" /* RType.ExpressionList */ ? node?.grouping?.[0].location : undefined);
|
|
133
133
|
mermaid.nodeLines.push(` ${idPrefix}${id}${open}"\`${escapedName}${escapedName.length > 10 ? '\n ' : ' '}(${id}${deps})\n *${formatRange(n)}*${fCall ? displayFunctionArgMapping(info.args) : ''}\`"${close}`);
|
|
134
134
|
if (mark?.has(id)) {
|
package/util/mermaid/mermaid.js
CHANGED
package/util/version.js
CHANGED
|
@@ -3,7 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.flowrVersion = void 0;
|
|
4
4
|
const semver_1 = require("semver");
|
|
5
5
|
// this is automatically replaced with the current version by release-it
|
|
6
|
-
const version = '2.0.
|
|
6
|
+
const version = '2.0.10';
|
|
7
7
|
function flowrVersion() {
|
|
8
8
|
return new semver_1.SemVer(version);
|
|
9
9
|
}
|