@reliverse/dler 2.2.5 → 2.2.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +14 -14
- package/dist/cli.js +1 -1
- package/dist/cmds/biome/cmd.js +58 -0
- package/dist/cmds/biome/impl.d.ts +26 -0
- package/dist/cmds/biome/impl.js +272 -0
- package/dist/cmds/build/cmd.js +18 -10
- package/dist/cmds/clean/cmd.js +6 -6
- package/dist/cmds/clean/impl.js +16 -12
- package/dist/cmds/clean/presets.js +2 -2
- package/dist/cmds/publish/cmd.js +7 -7
- package/dist/cmds/senv/cmd.js +13 -15
- package/dist/cmds/tsc/cache.js +1 -1
- package/dist/cmds/tsc/cmd.js +11 -8
- package/dist/cmds/tsc/impl.js +132 -17
- package/dist/cmds/update/cmd.js +11 -10
- package/dist/cmds/update/impl.d.ts +4 -4
- package/dist/cmds/update/impl.js +10 -11
- package/dist/cmds/update/utils.d.ts +23 -4
- package/dist/cmds/update/utils.js +22 -16
- package/package.json +16 -13
- package/dist/cmds/perf/analysis/bundle.d.ts +0 -20
- package/dist/cmds/perf/analysis/bundle.js +0 -225
- package/dist/cmds/perf/analysis/filesystem.d.ts +0 -27
- package/dist/cmds/perf/analysis/filesystem.js +0 -245
- package/dist/cmds/perf/analysis/monorepo.d.ts +0 -30
- package/dist/cmds/perf/analysis/monorepo.js +0 -345
- package/dist/cmds/perf/benchmarks/command.d.ts +0 -21
- package/dist/cmds/perf/benchmarks/command.js +0 -162
- package/dist/cmds/perf/benchmarks/memory.d.ts +0 -41
- package/dist/cmds/perf/benchmarks/memory.js +0 -169
- package/dist/cmds/perf/benchmarks/runner.d.ts +0 -22
- package/dist/cmds/perf/benchmarks/runner.js +0 -157
- package/dist/cmds/perf/cmd.js +0 -240
- package/dist/cmds/perf/impl.d.ts +0 -24
- package/dist/cmds/perf/impl.js +0 -297
- package/dist/cmds/perf/reporters/console.d.ts +0 -12
- package/dist/cmds/perf/reporters/console.js +0 -257
- package/dist/cmds/perf/reporters/html.d.ts +0 -27
- package/dist/cmds/perf/reporters/html.js +0 -881
- package/dist/cmds/perf/reporters/json.d.ts +0 -9
- package/dist/cmds/perf/reporters/json.js +0 -32
- package/dist/cmds/perf/types.d.ts +0 -184
- package/dist/cmds/perf/types.js +0 -0
- package/dist/cmds/perf/utils/cache.d.ts +0 -23
- package/dist/cmds/perf/utils/cache.js +0 -172
- package/dist/cmds/perf/utils/formatter.d.ts +0 -17
- package/dist/cmds/perf/utils/formatter.js +0 -134
- package/dist/cmds/perf/utils/stats.d.ts +0 -15
- package/dist/cmds/perf/utils/stats.js +0 -101
- package/dist/cmds/port/cmd.d.ts +0 -2
- package/dist/cmds/port/cmd.js +0 -58
- package/dist/cmds/port/impl.d.ts +0 -5
- package/dist/cmds/port/impl.js +0 -280
- package/dist/cmds/shell/cmd.d.ts +0 -2
- package/dist/cmds/shell/cmd.js +0 -46
- /package/dist/cmds/{perf → biome}/cmd.d.ts +0 -0
|
@@ -1,169 +0,0 @@
|
|
|
1
|
-
export class MemoryProfiler {
|
|
2
|
-
snapshots = [];
|
|
3
|
-
startMemory = null;
|
|
4
|
-
peakMemory = null;
|
|
5
|
-
start(label) {
|
|
6
|
-
this.startMemory = process.memoryUsage();
|
|
7
|
-
this.peakMemory = { ...this.startMemory };
|
|
8
|
-
this.snapshots.push({
|
|
9
|
-
timestamp: Date.now(),
|
|
10
|
-
memory: this.startMemory,
|
|
11
|
-
label: label ?? "start"
|
|
12
|
-
});
|
|
13
|
-
}
|
|
14
|
-
snapshot(label) {
|
|
15
|
-
const current = process.memoryUsage();
|
|
16
|
-
this.snapshots.push({
|
|
17
|
-
timestamp: Date.now(),
|
|
18
|
-
memory: current,
|
|
19
|
-
label: label ?? `snapshot-${this.snapshots.length}`
|
|
20
|
-
});
|
|
21
|
-
if (!this.peakMemory) {
|
|
22
|
-
this.peakMemory = { ...current };
|
|
23
|
-
} else {
|
|
24
|
-
this.peakMemory = {
|
|
25
|
-
rss: Math.max(this.peakMemory.rss, current.rss),
|
|
26
|
-
heapTotal: Math.max(this.peakMemory.heapTotal, current.heapTotal),
|
|
27
|
-
heapUsed: Math.max(this.peakMemory.heapUsed, current.heapUsed),
|
|
28
|
-
external: Math.max(this.peakMemory.external, current.external),
|
|
29
|
-
arrayBuffers: Math.max(
|
|
30
|
-
this.peakMemory.arrayBuffers,
|
|
31
|
-
current.arrayBuffers
|
|
32
|
-
)
|
|
33
|
-
};
|
|
34
|
-
}
|
|
35
|
-
}
|
|
36
|
-
stop() {
|
|
37
|
-
if (!this.startMemory) {
|
|
38
|
-
return null;
|
|
39
|
-
}
|
|
40
|
-
const endMemory = process.memoryUsage();
|
|
41
|
-
const duration = this.snapshots.length > 0 ? this.snapshots[this.snapshots.length - 1].timestamp - this.snapshots[0].timestamp : 0;
|
|
42
|
-
const snapshot = {
|
|
43
|
-
before: this.startMemory,
|
|
44
|
-
after: endMemory,
|
|
45
|
-
peak: this.peakMemory ?? endMemory,
|
|
46
|
-
growth: endMemory.rss - this.startMemory.rss,
|
|
47
|
-
duration
|
|
48
|
-
};
|
|
49
|
-
this.startMemory = null;
|
|
50
|
-
this.peakMemory = null;
|
|
51
|
-
this.snapshots = [];
|
|
52
|
-
return snapshot;
|
|
53
|
-
}
|
|
54
|
-
getSnapshots() {
|
|
55
|
-
return [...this.snapshots];
|
|
56
|
-
}
|
|
57
|
-
getMemoryGrowth() {
|
|
58
|
-
if (this.snapshots.length < 2) return 0;
|
|
59
|
-
const first = this.snapshots[0].memory;
|
|
60
|
-
const last = this.snapshots[this.snapshots.length - 1].memory;
|
|
61
|
-
return last.rss - first.rss;
|
|
62
|
-
}
|
|
63
|
-
getPeakMemory() {
|
|
64
|
-
return this.peakMemory;
|
|
65
|
-
}
|
|
66
|
-
getAverageMemory() {
|
|
67
|
-
if (this.snapshots.length === 0) return null;
|
|
68
|
-
const sum = this.snapshots.reduce(
|
|
69
|
-
(acc, snapshot) => ({
|
|
70
|
-
rss: acc.rss + snapshot.memory.rss,
|
|
71
|
-
heapTotal: acc.heapTotal + snapshot.memory.heapTotal,
|
|
72
|
-
heapUsed: acc.heapUsed + snapshot.memory.heapUsed,
|
|
73
|
-
external: acc.external + snapshot.memory.external,
|
|
74
|
-
arrayBuffers: acc.arrayBuffers + snapshot.memory.arrayBuffers
|
|
75
|
-
}),
|
|
76
|
-
{ rss: 0, heapTotal: 0, heapUsed: 0, external: 0, arrayBuffers: 0 }
|
|
77
|
-
);
|
|
78
|
-
const count = this.snapshots.length;
|
|
79
|
-
return {
|
|
80
|
-
rss: sum.rss / count,
|
|
81
|
-
heapTotal: sum.heapTotal / count,
|
|
82
|
-
heapUsed: sum.heapUsed / count,
|
|
83
|
-
external: sum.external / count,
|
|
84
|
-
arrayBuffers: sum.arrayBuffers / count
|
|
85
|
-
};
|
|
86
|
-
}
|
|
87
|
-
}
|
|
88
|
-
export const createMemoryProfiler = () => {
|
|
89
|
-
return new MemoryProfiler();
|
|
90
|
-
};
|
|
91
|
-
export const measureMemoryUsage = (fn) => {
|
|
92
|
-
return new Promise((resolve) => {
|
|
93
|
-
const profiler = createMemoryProfiler();
|
|
94
|
-
profiler.start("measurement");
|
|
95
|
-
const executeFn = async () => {
|
|
96
|
-
try {
|
|
97
|
-
await fn();
|
|
98
|
-
} finally {
|
|
99
|
-
const snapshot = profiler.stop();
|
|
100
|
-
resolve(snapshot);
|
|
101
|
-
}
|
|
102
|
-
};
|
|
103
|
-
executeFn();
|
|
104
|
-
});
|
|
105
|
-
};
|
|
106
|
-
export const getCurrentMemoryUsage = () => {
|
|
107
|
-
return process.memoryUsage();
|
|
108
|
-
};
|
|
109
|
-
export const getMemoryInfo = () => {
|
|
110
|
-
const usage = process.memoryUsage();
|
|
111
|
-
const total = usage.rss * 4;
|
|
112
|
-
const used = usage.rss;
|
|
113
|
-
const free = total - used;
|
|
114
|
-
const percentage = used / total * 100;
|
|
115
|
-
return {
|
|
116
|
-
total,
|
|
117
|
-
free,
|
|
118
|
-
used,
|
|
119
|
-
percentage: Math.min(percentage, 100)
|
|
120
|
-
};
|
|
121
|
-
};
|
|
122
|
-
export const formatMemoryUsage = (usage) => {
|
|
123
|
-
const format = (bytes) => {
|
|
124
|
-
if (bytes === 0) return "0 B";
|
|
125
|
-
const k = 1024;
|
|
126
|
-
const sizes = ["B", "KB", "MB", "GB"];
|
|
127
|
-
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
|
128
|
-
return `${parseFloat((bytes / Math.pow(k, i)).toFixed(2))} ${sizes[i]}`;
|
|
129
|
-
};
|
|
130
|
-
return `RSS: ${format(usage.rss)}, Heap: ${format(usage.heapUsed)}/${format(usage.heapTotal)}, External: ${format(usage.external)}`;
|
|
131
|
-
};
|
|
132
|
-
export const detectMemoryLeaks = (snapshots) => {
|
|
133
|
-
if (snapshots.length < 3) {
|
|
134
|
-
return {
|
|
135
|
-
hasLeak: false,
|
|
136
|
-
severity: "low",
|
|
137
|
-
growthRate: 0,
|
|
138
|
-
suggestion: "Need more snapshots to detect leaks"
|
|
139
|
-
};
|
|
140
|
-
}
|
|
141
|
-
const rssValues = snapshots.map((s) => s.memory.rss);
|
|
142
|
-
const growthRate = (rssValues[rssValues.length - 1] - rssValues[0]) / snapshots.length;
|
|
143
|
-
const isConsistentGrowth = rssValues.every(
|
|
144
|
-
(val, i) => i === 0 || val >= rssValues[i - 1] * 0.95
|
|
145
|
-
);
|
|
146
|
-
const hasLeak = isConsistentGrowth && growthRate > 1024 * 1024;
|
|
147
|
-
let severity = "low";
|
|
148
|
-
let suggestion = "";
|
|
149
|
-
if (hasLeak) {
|
|
150
|
-
if (growthRate > 10 * 1024 * 1024) {
|
|
151
|
-
severity = "high";
|
|
152
|
-
suggestion = "Critical memory leak detected. Check for unclosed resources, event listeners, or circular references.";
|
|
153
|
-
} else if (growthRate > 5 * 1024 * 1024) {
|
|
154
|
-
severity = "medium";
|
|
155
|
-
suggestion = "Moderate memory leak detected. Monitor memory usage and consider garbage collection.";
|
|
156
|
-
} else {
|
|
157
|
-
severity = "low";
|
|
158
|
-
suggestion = "Minor memory growth detected. Monitor for patterns over time.";
|
|
159
|
-
}
|
|
160
|
-
} else {
|
|
161
|
-
suggestion = "No significant memory leaks detected.";
|
|
162
|
-
}
|
|
163
|
-
return {
|
|
164
|
-
hasLeak,
|
|
165
|
-
severity,
|
|
166
|
-
growthRate,
|
|
167
|
-
suggestion
|
|
168
|
-
};
|
|
169
|
-
};
|
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
import type { BenchmarkResult } from "../types.js";
|
|
2
|
-
export interface BenchmarkRunnerOptions {
|
|
3
|
-
command: string;
|
|
4
|
-
runs: number;
|
|
5
|
-
warmup: number;
|
|
6
|
-
concurrency: number;
|
|
7
|
-
cwd?: string;
|
|
8
|
-
timeout?: number;
|
|
9
|
-
env?: Record<string, string>;
|
|
10
|
-
verbose?: boolean;
|
|
11
|
-
}
|
|
12
|
-
export declare class BenchmarkRunner {
|
|
13
|
-
readonly options: BenchmarkRunnerOptions;
|
|
14
|
-
constructor(options: BenchmarkRunnerOptions);
|
|
15
|
-
run(): Promise<BenchmarkResult>;
|
|
16
|
-
private runWarmup;
|
|
17
|
-
private runBenchmark;
|
|
18
|
-
private calculateStatistics;
|
|
19
|
-
private calculateMemoryStats;
|
|
20
|
-
}
|
|
21
|
-
export declare const runBenchmark: (options: BenchmarkRunnerOptions) => Promise<BenchmarkResult>;
|
|
22
|
-
export declare const createBenchmarkRunner: (options: BenchmarkRunnerOptions) => BenchmarkRunner;
|
|
@@ -1,157 +0,0 @@
|
|
|
1
|
-
import { logger } from "@reliverse/dler-logger";
|
|
2
|
-
import pMap from "@reliverse/dler-mapper";
|
|
3
|
-
import { formatProgress } from "../utils/formatter.js";
|
|
4
|
-
import {
|
|
5
|
-
calculateMemoryAverage,
|
|
6
|
-
calculateStatistics,
|
|
7
|
-
findPeakMemory
|
|
8
|
-
} from "../utils/stats.js";
|
|
9
|
-
import { executeCommandWithMemoryTracking } from "./command.js";
|
|
10
|
-
export class BenchmarkRunner {
|
|
11
|
-
options;
|
|
12
|
-
constructor(options) {
|
|
13
|
-
this.options = options;
|
|
14
|
-
}
|
|
15
|
-
async run() {
|
|
16
|
-
const { command, runs, warmup, concurrency, verbose } = this.options;
|
|
17
|
-
const startTime = Date.now();
|
|
18
|
-
if (verbose) {
|
|
19
|
-
logger.info(`\u{1F680} Starting benchmark for: ${command}`);
|
|
20
|
-
logger.info(
|
|
21
|
-
` Runs: ${runs}, Warmup: ${warmup}, Concurrency: ${concurrency}`
|
|
22
|
-
);
|
|
23
|
-
}
|
|
24
|
-
if (warmup > 0) {
|
|
25
|
-
if (verbose) {
|
|
26
|
-
logger.info(`\u{1F525} Running ${warmup} warmup iterations...`);
|
|
27
|
-
}
|
|
28
|
-
await this.runWarmup();
|
|
29
|
-
}
|
|
30
|
-
if (verbose) {
|
|
31
|
-
logger.info(`\u{1F4CA} Running ${runs} benchmark iterations...`);
|
|
32
|
-
}
|
|
33
|
-
const measurements = await this.runBenchmark();
|
|
34
|
-
const statistics = this.calculateStatistics(measurements);
|
|
35
|
-
const memory = this.calculateMemoryStats(measurements);
|
|
36
|
-
const executionTime = Date.now() - startTime;
|
|
37
|
-
const failures = measurements.filter((m) => !m.success);
|
|
38
|
-
const success = failures.length === 0;
|
|
39
|
-
if (verbose && failures.length > 0) {
|
|
40
|
-
logger.warn(`\u26A0\uFE0F ${failures.length} out of ${runs} runs failed`);
|
|
41
|
-
}
|
|
42
|
-
return {
|
|
43
|
-
command,
|
|
44
|
-
runs,
|
|
45
|
-
warmup,
|
|
46
|
-
concurrency,
|
|
47
|
-
measurements,
|
|
48
|
-
statistics,
|
|
49
|
-
memory,
|
|
50
|
-
executionTime,
|
|
51
|
-
success,
|
|
52
|
-
error: failures.length > 0 ? `${failures.length} runs failed` : void 0
|
|
53
|
-
};
|
|
54
|
-
}
|
|
55
|
-
async runWarmup() {
|
|
56
|
-
const { command, warmup, cwd, timeout, env } = this.options;
|
|
57
|
-
for (let i = 0; i < warmup; i++) {
|
|
58
|
-
try {
|
|
59
|
-
await executeCommandWithMemoryTracking(command, {
|
|
60
|
-
cwd,
|
|
61
|
-
timeout,
|
|
62
|
-
env
|
|
63
|
-
});
|
|
64
|
-
} catch {
|
|
65
|
-
}
|
|
66
|
-
}
|
|
67
|
-
}
|
|
68
|
-
async runBenchmark() {
|
|
69
|
-
const { command, runs, concurrency, cwd, timeout, env, verbose } = this.options;
|
|
70
|
-
const runIndices = Array.from({ length: runs }, (_, i) => i);
|
|
71
|
-
const measurements = await pMap(
|
|
72
|
-
runIndices,
|
|
73
|
-
async (runIndex) => {
|
|
74
|
-
if (verbose) {
|
|
75
|
-
logger.info(formatProgress(runIndex + 1, runs));
|
|
76
|
-
}
|
|
77
|
-
const measurement = await executeCommandWithMemoryTracking(command, {
|
|
78
|
-
cwd,
|
|
79
|
-
timeout,
|
|
80
|
-
env
|
|
81
|
-
});
|
|
82
|
-
measurement.run = runIndex + 1;
|
|
83
|
-
return measurement;
|
|
84
|
-
},
|
|
85
|
-
{
|
|
86
|
-
concurrency,
|
|
87
|
-
stopOnError: false
|
|
88
|
-
}
|
|
89
|
-
);
|
|
90
|
-
return measurements;
|
|
91
|
-
}
|
|
92
|
-
calculateStatistics(measurements) {
|
|
93
|
-
const durations = measurements.filter((m) => m.success).map((m) => m.duration);
|
|
94
|
-
if (durations.length === 0) {
|
|
95
|
-
return calculateStatistics([]);
|
|
96
|
-
}
|
|
97
|
-
return calculateStatistics(durations);
|
|
98
|
-
}
|
|
99
|
-
calculateMemoryStats(measurements) {
|
|
100
|
-
const successfulMeasurements = measurements.filter((m) => m.success);
|
|
101
|
-
if (successfulMeasurements.length === 0) {
|
|
102
|
-
return {
|
|
103
|
-
peak: {
|
|
104
|
-
rss: 0,
|
|
105
|
-
heapTotal: 0,
|
|
106
|
-
heapUsed: 0,
|
|
107
|
-
external: 0,
|
|
108
|
-
arrayBuffers: 0
|
|
109
|
-
},
|
|
110
|
-
average: {
|
|
111
|
-
rss: 0,
|
|
112
|
-
heapTotal: 0,
|
|
113
|
-
heapUsed: 0,
|
|
114
|
-
external: 0,
|
|
115
|
-
arrayBuffers: 0
|
|
116
|
-
},
|
|
117
|
-
growth: 0
|
|
118
|
-
};
|
|
119
|
-
}
|
|
120
|
-
const rssValues = successfulMeasurements.map((m) => m.memory.rss);
|
|
121
|
-
const heapTotalValues = successfulMeasurements.map(
|
|
122
|
-
(m) => m.memory.heapTotal
|
|
123
|
-
);
|
|
124
|
-
const heapUsedValues = successfulMeasurements.map((m) => m.memory.heapUsed);
|
|
125
|
-
const externalValues = successfulMeasurements.map((m) => m.memory.external);
|
|
126
|
-
const arrayBuffersValues = successfulMeasurements.map(
|
|
127
|
-
(m) => m.memory.arrayBuffers
|
|
128
|
-
);
|
|
129
|
-
const peak = {
|
|
130
|
-
rss: findPeakMemory(rssValues),
|
|
131
|
-
heapTotal: findPeakMemory(heapTotalValues),
|
|
132
|
-
heapUsed: findPeakMemory(heapUsedValues),
|
|
133
|
-
external: findPeakMemory(externalValues),
|
|
134
|
-
arrayBuffers: findPeakMemory(arrayBuffersValues)
|
|
135
|
-
};
|
|
136
|
-
const average = {
|
|
137
|
-
rss: calculateMemoryAverage(rssValues),
|
|
138
|
-
heapTotal: calculateMemoryAverage(heapTotalValues),
|
|
139
|
-
heapUsed: calculateMemoryAverage(heapUsedValues),
|
|
140
|
-
external: calculateMemoryAverage(externalValues),
|
|
141
|
-
arrayBuffers: calculateMemoryAverage(arrayBuffersValues)
|
|
142
|
-
};
|
|
143
|
-
const growth = rssValues.length > 1 ? rssValues[rssValues.length - 1] - rssValues[0] : 0;
|
|
144
|
-
return {
|
|
145
|
-
peak,
|
|
146
|
-
average,
|
|
147
|
-
growth
|
|
148
|
-
};
|
|
149
|
-
}
|
|
150
|
-
}
|
|
151
|
-
export const runBenchmark = async (options) => {
|
|
152
|
-
const runner = new BenchmarkRunner(options);
|
|
153
|
-
return runner.run();
|
|
154
|
-
};
|
|
155
|
-
export const createBenchmarkRunner = (options) => {
|
|
156
|
-
return new BenchmarkRunner(options);
|
|
157
|
-
};
|
package/dist/cmds/perf/cmd.js
DELETED
|
@@ -1,240 +0,0 @@
|
|
|
1
|
-
import { defineArgs, defineCommand } from "@reliverse/dler-launcher";
|
|
2
|
-
import { logger } from "@reliverse/dler-logger";
|
|
3
|
-
import { runPerfAnalysis } from "./impl.js";
|
|
4
|
-
import { createConsoleReporter } from "./reporters/console.js";
|
|
5
|
-
import { createHtmlReporter } from "./reporters/html.js";
|
|
6
|
-
import { createJsonReporter } from "./reporters/json.js";
|
|
7
|
-
export default defineCommand({
|
|
8
|
-
meta: {
|
|
9
|
-
name: "perf",
|
|
10
|
-
description: "Comprehensive performance measurement and analysis tool. Benchmarks commands, analyzes bundles, profiles files/directories, and identifies monorepo bottlenecks.",
|
|
11
|
-
examples: [
|
|
12
|
-
"# Command Benchmarking:",
|
|
13
|
-
"dler perf build",
|
|
14
|
-
'dler perf "bun dler tsc"',
|
|
15
|
-
'dler perf "node --version"',
|
|
16
|
-
"dler perf build --runs 20 --warmup 5",
|
|
17
|
-
"dler perf build --concurrency 4 --verbose",
|
|
18
|
-
"dler perf build --compare --save",
|
|
19
|
-
"",
|
|
20
|
-
"# Bundle Analysis:",
|
|
21
|
-
"dler perf ./dist --type bundle",
|
|
22
|
-
"dler perf ./build --type bundle",
|
|
23
|
-
"dler perf ./dist --type bundle --verbose",
|
|
24
|
-
"",
|
|
25
|
-
"# File System Profiling:",
|
|
26
|
-
"dler perf ./src --type file",
|
|
27
|
-
"dler perf ./packages --type file",
|
|
28
|
-
"dler perf ./src --type file --verbose",
|
|
29
|
-
"",
|
|
30
|
-
"# Monorepo Analysis:",
|
|
31
|
-
"dler perf --type monorepo",
|
|
32
|
-
"dler perf --type monorepo --ignore '@reliverse/*'",
|
|
33
|
-
"dler perf --type monorepo --verbose",
|
|
34
|
-
"",
|
|
35
|
-
"# Auto-detection:",
|
|
36
|
-
"dler perf build # Auto-detects as command",
|
|
37
|
-
"dler perf ./dist # Auto-detects as bundle",
|
|
38
|
-
"dler perf ./src # Auto-detects as file system",
|
|
39
|
-
"dler perf # Auto-detects as monorepo",
|
|
40
|
-
"",
|
|
41
|
-
"# Output Formats:",
|
|
42
|
-
"dler perf build --output json",
|
|
43
|
-
"dler perf build --output html",
|
|
44
|
-
"dler perf build --output all",
|
|
45
|
-
"",
|
|
46
|
-
"# Baseline Comparison:",
|
|
47
|
-
"dler perf build --save # Save current as baseline",
|
|
48
|
-
"dler perf build --compare # Compare with baseline",
|
|
49
|
-
"dler perf build --compare --save # Compare and update baseline",
|
|
50
|
-
"",
|
|
51
|
-
"# Advanced Examples:",
|
|
52
|
-
"dler perf 'bun dler tsc' --runs 50 --warmup 10 --concurrency 2",
|
|
53
|
-
"dler perf ./dist --type bundle --output html --verbose",
|
|
54
|
-
"dler perf --type monorepo --ignore '@reliverse/*' --output json",
|
|
55
|
-
"dler perf build --runs 100 --compare --save --output all",
|
|
56
|
-
"",
|
|
57
|
-
"# Performance Monitoring:",
|
|
58
|
-
"dler perf build --runs 10 --save # Establish baseline",
|
|
59
|
-
"dler perf build --compare # Check for regressions",
|
|
60
|
-
"dler perf build --runs 20 --compare --save # Update baseline",
|
|
61
|
-
"",
|
|
62
|
-
"# Analysis Types:",
|
|
63
|
-
"dler perf --type command <cmd> # Benchmark command execution",
|
|
64
|
-
"dler perf --type bundle <path> # Analyze bundle size and structure",
|
|
65
|
-
"dler perf --type file <path> # Profile file system usage",
|
|
66
|
-
"dler perf --type monorepo # Analyze monorepo dependencies",
|
|
67
|
-
"dler perf --type auto <target> # Auto-detect analysis type",
|
|
68
|
-
"",
|
|
69
|
-
"# Command Examples:",
|
|
70
|
-
"dler perf 'dler build'",
|
|
71
|
-
"dler perf 'bun dler tsc --verbose'",
|
|
72
|
-
"dler perf 'node --version'",
|
|
73
|
-
"dler perf 'npm run build'",
|
|
74
|
-
"dler perf 'yarn test'",
|
|
75
|
-
"",
|
|
76
|
-
"# Bundle Analysis Examples:",
|
|
77
|
-
"dler perf ./dist --type bundle",
|
|
78
|
-
"dler perf ./build --type bundle",
|
|
79
|
-
"dler perf ./out --type bundle",
|
|
80
|
-
"dler perf ./lib --type bundle",
|
|
81
|
-
"",
|
|
82
|
-
"# File System Examples:",
|
|
83
|
-
"dler perf ./src --type file",
|
|
84
|
-
"dler perf ./packages --type file",
|
|
85
|
-
"dler perf ./apps --type file",
|
|
86
|
-
"dler perf ./examples --type file",
|
|
87
|
-
"",
|
|
88
|
-
"# Monorepo Examples:",
|
|
89
|
-
"dler perf --type monorepo",
|
|
90
|
-
"dler perf --type monorepo --cwd /path/to/monorepo",
|
|
91
|
-
"dler perf --type monorepo --ignore '@reliverse/*'",
|
|
92
|
-
"dler perf --type monorepo --ignore '@reliverse/* @company/*'",
|
|
93
|
-
"",
|
|
94
|
-
"# Output Examples:",
|
|
95
|
-
"dler perf build --output console # Console output (default)",
|
|
96
|
-
"dler perf build --output json # JSON output",
|
|
97
|
-
"dler perf build --output html # HTML report",
|
|
98
|
-
"dler perf build --output all # All formats",
|
|
99
|
-
"",
|
|
100
|
-
"# Baseline Management:",
|
|
101
|
-
"dler perf build --save # Save current run as baseline",
|
|
102
|
-
"dler perf build --compare # Compare with saved baseline",
|
|
103
|
-
"dler perf build --compare --save # Compare and update baseline",
|
|
104
|
-
"",
|
|
105
|
-
"# Verbose Output:",
|
|
106
|
-
"dler perf build --verbose # Show detailed progress",
|
|
107
|
-
"dler perf ./dist --type bundle --verbose",
|
|
108
|
-
"dler perf --type monorepo --verbose",
|
|
109
|
-
"",
|
|
110
|
-
"# Concurrency Control:",
|
|
111
|
-
"dler perf build --concurrency 1 # Sequential execution",
|
|
112
|
-
"dler perf build --concurrency 4 # Parallel execution",
|
|
113
|
-
"dler perf build --concurrency 8 # High parallelism",
|
|
114
|
-
"",
|
|
115
|
-
"# Iteration Control:",
|
|
116
|
-
"dler perf build --runs 5 # Few iterations (faster)",
|
|
117
|
-
"dler perf build --runs 50 # Many iterations (more accurate)",
|
|
118
|
-
"dler perf build --warmup 0 # No warmup",
|
|
119
|
-
"dler perf build --warmup 10 # More warmup runs",
|
|
120
|
-
"",
|
|
121
|
-
"# Working Directory:",
|
|
122
|
-
"dler perf build --cwd /path/to/project",
|
|
123
|
-
"dler perf ./dist --type bundle --cwd /path/to/project",
|
|
124
|
-
"dler perf --type monorepo --cwd /path/to/monorepo"
|
|
125
|
-
]
|
|
126
|
-
},
|
|
127
|
-
args: defineArgs({
|
|
128
|
-
target: {
|
|
129
|
-
type: "string",
|
|
130
|
-
description: "What to measure (command name, file path, or directory)"
|
|
131
|
-
},
|
|
132
|
-
type: {
|
|
133
|
-
type: "string",
|
|
134
|
-
description: "Type of analysis: command, bundle, file, monorepo, or auto (default: auto)"
|
|
135
|
-
},
|
|
136
|
-
runs: {
|
|
137
|
-
type: "number",
|
|
138
|
-
description: "Number of benchmark iterations (default: 10)"
|
|
139
|
-
},
|
|
140
|
-
warmup: {
|
|
141
|
-
type: "number",
|
|
142
|
-
description: "Warmup runs before measurement (default: 2)"
|
|
143
|
-
},
|
|
144
|
-
concurrency: {
|
|
145
|
-
type: "number",
|
|
146
|
-
description: "For parallel benchmarks (default: 1)"
|
|
147
|
-
},
|
|
148
|
-
compare: {
|
|
149
|
-
type: "boolean",
|
|
150
|
-
description: "Compare with cached baseline (default: false)"
|
|
151
|
-
},
|
|
152
|
-
output: {
|
|
153
|
-
type: "string",
|
|
154
|
-
description: "Output format: console, json, html, or all (default: console)"
|
|
155
|
-
},
|
|
156
|
-
save: {
|
|
157
|
-
type: "boolean",
|
|
158
|
-
description: "Save results as baseline (default: false)"
|
|
159
|
-
},
|
|
160
|
-
verbose: {
|
|
161
|
-
type: "boolean",
|
|
162
|
-
description: "Detailed output (default: false)"
|
|
163
|
-
},
|
|
164
|
-
cwd: {
|
|
165
|
-
type: "string",
|
|
166
|
-
description: "Working directory (default: current directory)"
|
|
167
|
-
},
|
|
168
|
-
ignore: {
|
|
169
|
-
type: "string",
|
|
170
|
-
description: "Packages to ignore for monorepo analysis (supports wildcards like @reliverse/*)"
|
|
171
|
-
}
|
|
172
|
-
}),
|
|
173
|
-
run: async ({ args }) => {
|
|
174
|
-
try {
|
|
175
|
-
if (typeof process.versions.bun === "undefined") {
|
|
176
|
-
logger.error("\u274C This command requires Bun runtime. Sorry.");
|
|
177
|
-
process.exit(1);
|
|
178
|
-
}
|
|
179
|
-
const validTypes = [
|
|
180
|
-
"command",
|
|
181
|
-
"bundle",
|
|
182
|
-
"file",
|
|
183
|
-
"monorepo",
|
|
184
|
-
"auto"
|
|
185
|
-
];
|
|
186
|
-
if (args.type && !validTypes.includes(args.type)) {
|
|
187
|
-
logger.error(
|
|
188
|
-
`\u274C Invalid type: ${args.type}. Must be one of: ${validTypes.join(", ")}`
|
|
189
|
-
);
|
|
190
|
-
process.exit(1);
|
|
191
|
-
}
|
|
192
|
-
const validOutputs = [
|
|
193
|
-
"console",
|
|
194
|
-
"json",
|
|
195
|
-
"html",
|
|
196
|
-
"all"
|
|
197
|
-
];
|
|
198
|
-
if (args.output && !validOutputs.includes(args.output)) {
|
|
199
|
-
logger.error(
|
|
200
|
-
`\u274C Invalid output format: ${args.output}. Must be one of: ${validOutputs.join(", ")}`
|
|
201
|
-
);
|
|
202
|
-
process.exit(1);
|
|
203
|
-
}
|
|
204
|
-
const result = await runPerfAnalysis(args);
|
|
205
|
-
if (!result.success) {
|
|
206
|
-
logger.error(`\u274C Performance analysis failed: ${result.error}`);
|
|
207
|
-
process.exit(1);
|
|
208
|
-
}
|
|
209
|
-
const outputFormats = args.output === "all" ? ["console", "json", "html"] : [args.output ?? "console"];
|
|
210
|
-
for (const format of outputFormats) {
|
|
211
|
-
switch (format) {
|
|
212
|
-
case "console": {
|
|
213
|
-
const consoleReporter = createConsoleReporter(args.verbose);
|
|
214
|
-
consoleReporter.report(result.report);
|
|
215
|
-
break;
|
|
216
|
-
}
|
|
217
|
-
case "json": {
|
|
218
|
-
const jsonReporter = createJsonReporter();
|
|
219
|
-
jsonReporter.report(result.report);
|
|
220
|
-
break;
|
|
221
|
-
}
|
|
222
|
-
case "html": {
|
|
223
|
-
const htmlReporter = createHtmlReporter();
|
|
224
|
-
htmlReporter.report(result.report);
|
|
225
|
-
break;
|
|
226
|
-
}
|
|
227
|
-
}
|
|
228
|
-
}
|
|
229
|
-
logger.success("\n\u2705 Performance analysis completed successfully!");
|
|
230
|
-
} catch (error) {
|
|
231
|
-
logger.error("\n\u274C Performance analysis failed:");
|
|
232
|
-
if (error instanceof Error) {
|
|
233
|
-
logger.error(error.message);
|
|
234
|
-
} else {
|
|
235
|
-
logger.error(String(error));
|
|
236
|
-
}
|
|
237
|
-
process.exit(1);
|
|
238
|
-
}
|
|
239
|
-
}
|
|
240
|
-
});
|
package/dist/cmds/perf/impl.d.ts
DELETED
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
import type { PerfOptions, PerfReport } from "./types.js";
|
|
2
|
-
export interface PerfAnalysisResult {
|
|
3
|
-
report: PerfReport;
|
|
4
|
-
success: boolean;
|
|
5
|
-
error?: string;
|
|
6
|
-
}
|
|
7
|
-
export declare class PerfAnalyzer {
|
|
8
|
-
private options;
|
|
9
|
-
private cache;
|
|
10
|
-
constructor(options: PerfOptions);
|
|
11
|
-
analyze(): Promise<PerfAnalysisResult>;
|
|
12
|
-
private detectType;
|
|
13
|
-
private detectTargetType;
|
|
14
|
-
private isCommand;
|
|
15
|
-
private isBundleFile;
|
|
16
|
-
private containsBundleFiles;
|
|
17
|
-
private runCommandBenchmark;
|
|
18
|
-
private runBundleAnalysis;
|
|
19
|
-
private runFileSystemAnalysis;
|
|
20
|
-
private runMonorepoAnalysis;
|
|
21
|
-
private compareWithBaseline;
|
|
22
|
-
}
|
|
23
|
-
export declare const runPerfAnalysis: (options: PerfOptions) => Promise<PerfAnalysisResult>;
|
|
24
|
-
export declare const createPerfAnalyzer: (options: PerfOptions) => PerfAnalyzer;
|