bun-scikit 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +187 -0
- package/binding.gyp +21 -0
- package/docs/README.md +7 -0
- package/docs/native-abi.md +53 -0
- package/index.ts +1 -0
- package/package.json +76 -0
- package/scripts/build-node-addon.ts +26 -0
- package/scripts/build-zig-kernels.ts +50 -0
- package/scripts/check-api-docs-coverage.ts +52 -0
- package/scripts/check-benchmark-health.ts +140 -0
- package/scripts/install-native.ts +160 -0
- package/scripts/package-native-artifacts.ts +62 -0
- package/scripts/sync-benchmark-readme.ts +181 -0
- package/scripts/update-benchmark-history.ts +91 -0
- package/src/ensemble/RandomForestClassifier.ts +136 -0
- package/src/ensemble/RandomForestRegressor.ts +136 -0
- package/src/index.ts +32 -0
- package/src/linear_model/LinearRegression.ts +136 -0
- package/src/linear_model/LogisticRegression.ts +260 -0
- package/src/linear_model/SGDClassifier.ts +161 -0
- package/src/linear_model/SGDRegressor.ts +104 -0
- package/src/metrics/classification.ts +294 -0
- package/src/metrics/regression.ts +51 -0
- package/src/model_selection/GridSearchCV.ts +244 -0
- package/src/model_selection/KFold.ts +82 -0
- package/src/model_selection/RepeatedKFold.ts +49 -0
- package/src/model_selection/RepeatedStratifiedKFold.ts +50 -0
- package/src/model_selection/StratifiedKFold.ts +112 -0
- package/src/model_selection/StratifiedShuffleSplit.ts +211 -0
- package/src/model_selection/crossValScore.ts +165 -0
- package/src/model_selection/trainTestSplit.ts +82 -0
- package/src/naive_bayes/GaussianNB.ts +148 -0
- package/src/native/node-addon/bun_scikit_addon.cpp +450 -0
- package/src/native/zigKernels.ts +576 -0
- package/src/neighbors/KNeighborsClassifier.ts +85 -0
- package/src/pipeline/ColumnTransformer.ts +203 -0
- package/src/pipeline/FeatureUnion.ts +123 -0
- package/src/pipeline/Pipeline.ts +168 -0
- package/src/preprocessing/MinMaxScaler.ts +113 -0
- package/src/preprocessing/OneHotEncoder.ts +91 -0
- package/src/preprocessing/PolynomialFeatures.ts +158 -0
- package/src/preprocessing/RobustScaler.ts +149 -0
- package/src/preprocessing/SimpleImputer.ts +150 -0
- package/src/preprocessing/StandardScaler.ts +92 -0
- package/src/svm/LinearSVC.ts +117 -0
- package/src/tree/DecisionTreeClassifier.ts +394 -0
- package/src/tree/DecisionTreeRegressor.ts +407 -0
- package/src/types.ts +18 -0
- package/src/utils/linalg.ts +209 -0
- package/src/utils/validation.ts +78 -0
- package/zig/kernels.zig +1327 -0
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
import type { ClassificationModel, Matrix, Vector } from "../types";
|
|
2
|
+
import { accuracyScore } from "../metrics/classification";
|
|
3
|
+
import { dot } from "../utils/linalg";
|
|
4
|
+
import {
|
|
5
|
+
assertConsistentRowSize,
|
|
6
|
+
assertFiniteMatrix,
|
|
7
|
+
assertFiniteVector,
|
|
8
|
+
validateClassificationInputs,
|
|
9
|
+
} from "../utils/validation";
|
|
10
|
+
|
|
11
|
+
export type SGDClassifierLoss = "hinge" | "log_loss";
|
|
12
|
+
|
|
13
|
+
export interface SGDClassifierOptions {
|
|
14
|
+
loss?: SGDClassifierLoss;
|
|
15
|
+
fitIntercept?: boolean;
|
|
16
|
+
learningRate?: number;
|
|
17
|
+
maxIter?: number;
|
|
18
|
+
tolerance?: number;
|
|
19
|
+
l2?: number;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
function sigmoid(z: number): number {
|
|
23
|
+
if (z >= 0) {
|
|
24
|
+
const expNeg = Math.exp(-z);
|
|
25
|
+
return 1 / (1 + expNeg);
|
|
26
|
+
}
|
|
27
|
+
const expPos = Math.exp(z);
|
|
28
|
+
return expPos / (1 + expPos);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export class SGDClassifier implements ClassificationModel {
|
|
32
|
+
coef_: Vector = [];
|
|
33
|
+
intercept_ = 0;
|
|
34
|
+
classes_: Vector = [0, 1];
|
|
35
|
+
|
|
36
|
+
private readonly loss: SGDClassifierLoss;
|
|
37
|
+
private readonly fitIntercept: boolean;
|
|
38
|
+
private readonly learningRate: number;
|
|
39
|
+
private readonly maxIter: number;
|
|
40
|
+
private readonly tolerance: number;
|
|
41
|
+
private readonly l2: number;
|
|
42
|
+
private isFitted = false;
|
|
43
|
+
|
|
44
|
+
constructor(options: SGDClassifierOptions = {}) {
|
|
45
|
+
this.loss = options.loss ?? "hinge";
|
|
46
|
+
this.fitIntercept = options.fitIntercept ?? true;
|
|
47
|
+
this.learningRate = options.learningRate ?? 0.05;
|
|
48
|
+
this.maxIter = options.maxIter ?? 10_000;
|
|
49
|
+
this.tolerance = options.tolerance ?? 1e-6;
|
|
50
|
+
this.l2 = options.l2 ?? 0;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
fit(X: Matrix, y: Vector): this {
|
|
54
|
+
validateClassificationInputs(X, y);
|
|
55
|
+
const nSamples = X.length;
|
|
56
|
+
const nFeatures = X[0].length;
|
|
57
|
+
const ySigned = y.map((value) => (value === 1 ? 1 : -1));
|
|
58
|
+
|
|
59
|
+
this.coef_ = new Array<number>(nFeatures).fill(0);
|
|
60
|
+
this.intercept_ = 0;
|
|
61
|
+
|
|
62
|
+
for (let iter = 0; iter < this.maxIter; iter += 1) {
|
|
63
|
+
const gradients = new Array<number>(nFeatures).fill(0);
|
|
64
|
+
let interceptGradient = 0;
|
|
65
|
+
|
|
66
|
+
for (let i = 0; i < nSamples; i += 1) {
|
|
67
|
+
const score = dot(X[i], this.coef_) + this.intercept_;
|
|
68
|
+
|
|
69
|
+
if (this.loss === "hinge") {
|
|
70
|
+
const margin = ySigned[i] * score;
|
|
71
|
+
if (margin < 1) {
|
|
72
|
+
const factor = -ySigned[i];
|
|
73
|
+
for (let j = 0; j < nFeatures; j += 1) {
|
|
74
|
+
gradients[j] += factor * X[i][j];
|
|
75
|
+
}
|
|
76
|
+
if (this.fitIntercept) {
|
|
77
|
+
interceptGradient += factor;
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
} else {
|
|
81
|
+
const p = sigmoid(score);
|
|
82
|
+
const error = p - y[i];
|
|
83
|
+
for (let j = 0; j < nFeatures; j += 1) {
|
|
84
|
+
gradients[j] += error * X[i][j];
|
|
85
|
+
}
|
|
86
|
+
if (this.fitIntercept) {
|
|
87
|
+
interceptGradient += error;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
let maxUpdate = 0;
|
|
93
|
+
for (let j = 0; j < nFeatures; j += 1) {
|
|
94
|
+
const grad = gradients[j] / nSamples + this.l2 * this.coef_[j];
|
|
95
|
+
const delta = this.learningRate * grad;
|
|
96
|
+
this.coef_[j] -= delta;
|
|
97
|
+
const absDelta = Math.abs(delta);
|
|
98
|
+
if (absDelta > maxUpdate) {
|
|
99
|
+
maxUpdate = absDelta;
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
if (this.fitIntercept) {
|
|
104
|
+
const interceptDelta = this.learningRate * (interceptGradient / nSamples);
|
|
105
|
+
this.intercept_ -= interceptDelta;
|
|
106
|
+
const absInterceptDelta = Math.abs(interceptDelta);
|
|
107
|
+
if (absInterceptDelta > maxUpdate) {
|
|
108
|
+
maxUpdate = absInterceptDelta;
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
if (maxUpdate < this.tolerance) {
|
|
113
|
+
break;
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
this.isFitted = true;
|
|
118
|
+
return this;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
predictProba(X: Matrix): Matrix {
|
|
122
|
+
if (this.loss !== "log_loss") {
|
|
123
|
+
throw new Error("predictProba is only available when loss='log_loss'.");
|
|
124
|
+
}
|
|
125
|
+
if (!this.isFitted) {
|
|
126
|
+
throw new Error("SGDClassifier has not been fitted.");
|
|
127
|
+
}
|
|
128
|
+
assertConsistentRowSize(X);
|
|
129
|
+
assertFiniteMatrix(X);
|
|
130
|
+
if (X[0].length !== this.coef_.length) {
|
|
131
|
+
throw new Error(`Feature size mismatch. Expected ${this.coef_.length}, got ${X[0].length}.`);
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
return X.map((row) => {
|
|
135
|
+
const positive = sigmoid(dot(row, this.coef_) + this.intercept_);
|
|
136
|
+
return [1 - positive, positive];
|
|
137
|
+
});
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
predict(X: Matrix): Vector {
|
|
141
|
+
if (!this.isFitted) {
|
|
142
|
+
throw new Error("SGDClassifier has not been fitted.");
|
|
143
|
+
}
|
|
144
|
+
assertConsistentRowSize(X);
|
|
145
|
+
assertFiniteMatrix(X);
|
|
146
|
+
if (X[0].length !== this.coef_.length) {
|
|
147
|
+
throw new Error(`Feature size mismatch. Expected ${this.coef_.length}, got ${X[0].length}.`);
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
if (this.loss === "log_loss") {
|
|
151
|
+
return this.predictProba(X).map((pair) => (pair[1] >= 0.5 ? 1 : 0));
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
return X.map((row) => (dot(row, this.coef_) + this.intercept_ >= 0 ? 1 : 0));
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
score(X: Matrix, y: Vector): number {
|
|
158
|
+
assertFiniteVector(y);
|
|
159
|
+
return accuracyScore(y, this.predict(X));
|
|
160
|
+
}
|
|
161
|
+
}
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import type { Matrix, RegressionModel, Vector } from "../types";
|
|
2
|
+
import { r2Score } from "../metrics/regression";
|
|
3
|
+
import { dot } from "../utils/linalg";
|
|
4
|
+
import {
|
|
5
|
+
assertConsistentRowSize,
|
|
6
|
+
assertFiniteMatrix,
|
|
7
|
+
assertFiniteVector,
|
|
8
|
+
validateRegressionInputs,
|
|
9
|
+
} from "../utils/validation";
|
|
10
|
+
|
|
11
|
+
export interface SGDRegressorOptions {
|
|
12
|
+
fitIntercept?: boolean;
|
|
13
|
+
learningRate?: number;
|
|
14
|
+
maxIter?: number;
|
|
15
|
+
tolerance?: number;
|
|
16
|
+
l2?: number;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export class SGDRegressor implements RegressionModel {
|
|
20
|
+
coef_: Vector = [];
|
|
21
|
+
intercept_ = 0;
|
|
22
|
+
|
|
23
|
+
private readonly fitIntercept: boolean;
|
|
24
|
+
private readonly learningRate: number;
|
|
25
|
+
private readonly maxIter: number;
|
|
26
|
+
private readonly tolerance: number;
|
|
27
|
+
private readonly l2: number;
|
|
28
|
+
private isFitted = false;
|
|
29
|
+
|
|
30
|
+
constructor(options: SGDRegressorOptions = {}) {
|
|
31
|
+
this.fitIntercept = options.fitIntercept ?? true;
|
|
32
|
+
this.learningRate = options.learningRate ?? 0.05;
|
|
33
|
+
this.maxIter = options.maxIter ?? 10_000;
|
|
34
|
+
this.tolerance = options.tolerance ?? 1e-6;
|
|
35
|
+
this.l2 = options.l2 ?? 0;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
fit(X: Matrix, y: Vector): this {
|
|
39
|
+
validateRegressionInputs(X, y);
|
|
40
|
+
const nSamples = X.length;
|
|
41
|
+
const nFeatures = X[0].length;
|
|
42
|
+
this.coef_ = new Array<number>(nFeatures).fill(0);
|
|
43
|
+
this.intercept_ = 0;
|
|
44
|
+
|
|
45
|
+
for (let iter = 0; iter < this.maxIter; iter += 1) {
|
|
46
|
+
const gradients = new Array<number>(nFeatures).fill(0);
|
|
47
|
+
let interceptGradient = 0;
|
|
48
|
+
|
|
49
|
+
for (let i = 0; i < nSamples; i += 1) {
|
|
50
|
+
const prediction = dot(X[i], this.coef_) + this.intercept_;
|
|
51
|
+
const error = prediction - y[i];
|
|
52
|
+
for (let j = 0; j < nFeatures; j += 1) {
|
|
53
|
+
gradients[j] += error * X[i][j];
|
|
54
|
+
}
|
|
55
|
+
if (this.fitIntercept) {
|
|
56
|
+
interceptGradient += error;
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
let maxUpdate = 0;
|
|
61
|
+
for (let j = 0; j < nFeatures; j += 1) {
|
|
62
|
+
const grad = gradients[j] / nSamples + this.l2 * this.coef_[j];
|
|
63
|
+
const delta = this.learningRate * grad;
|
|
64
|
+
this.coef_[j] -= delta;
|
|
65
|
+
const absDelta = Math.abs(delta);
|
|
66
|
+
if (absDelta > maxUpdate) {
|
|
67
|
+
maxUpdate = absDelta;
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
if (this.fitIntercept) {
|
|
71
|
+
const interceptDelta = this.learningRate * (interceptGradient / nSamples);
|
|
72
|
+
this.intercept_ -= interceptDelta;
|
|
73
|
+
const absInterceptDelta = Math.abs(interceptDelta);
|
|
74
|
+
if (absInterceptDelta > maxUpdate) {
|
|
75
|
+
maxUpdate = absInterceptDelta;
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
if (maxUpdate < this.tolerance) {
|
|
80
|
+
break;
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
this.isFitted = true;
|
|
85
|
+
return this;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
predict(X: Matrix): Vector {
|
|
89
|
+
if (!this.isFitted) {
|
|
90
|
+
throw new Error("SGDRegressor has not been fitted.");
|
|
91
|
+
}
|
|
92
|
+
assertConsistentRowSize(X);
|
|
93
|
+
assertFiniteMatrix(X);
|
|
94
|
+
if (X[0].length !== this.coef_.length) {
|
|
95
|
+
throw new Error(`Feature size mismatch. Expected ${this.coef_.length}, got ${X[0].length}.`);
|
|
96
|
+
}
|
|
97
|
+
return X.map((row) => dot(row, this.coef_) + this.intercept_);
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
score(X: Matrix, y: Vector): number {
|
|
101
|
+
assertFiniteVector(y);
|
|
102
|
+
return r2Score(y, this.predict(X));
|
|
103
|
+
}
|
|
104
|
+
}
|
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
function validateInputs(yTrue: number[], yPred: number[]): void {
|
|
2
|
+
if (yTrue.length === 0 || yPred.length === 0) {
|
|
3
|
+
throw new Error("yTrue and yPred must be non-empty.");
|
|
4
|
+
}
|
|
5
|
+
|
|
6
|
+
if (yTrue.length !== yPred.length) {
|
|
7
|
+
throw new Error(`Length mismatch: yTrue=${yTrue.length}, yPred=${yPred.length}.`);
|
|
8
|
+
}
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
function validateBinaryTargets(yTrue: number[]): void {
|
|
12
|
+
for (let i = 0; i < yTrue.length; i += 1) {
|
|
13
|
+
const value = yTrue[i];
|
|
14
|
+
if (!(value === 0 || value === 1)) {
|
|
15
|
+
throw new Error(`Binary classification target expected (0/1). Found ${value} at index ${i}.`);
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
function clampProbability(value: number, eps: number): number {
|
|
21
|
+
if (!Number.isFinite(value)) {
|
|
22
|
+
throw new Error(`Probability must be finite. Got ${value}.`);
|
|
23
|
+
}
|
|
24
|
+
if (value < eps) {
|
|
25
|
+
return eps;
|
|
26
|
+
}
|
|
27
|
+
if (value > 1 - eps) {
|
|
28
|
+
return 1 - eps;
|
|
29
|
+
}
|
|
30
|
+
return value;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
function confusionCounts(yTrue: number[], yPred: number[], positiveLabel: number): {
|
|
34
|
+
tp: number;
|
|
35
|
+
fp: number;
|
|
36
|
+
fn: number;
|
|
37
|
+
tn: number;
|
|
38
|
+
} {
|
|
39
|
+
validateInputs(yTrue, yPred);
|
|
40
|
+
|
|
41
|
+
let tp = 0;
|
|
42
|
+
let fp = 0;
|
|
43
|
+
let fn = 0;
|
|
44
|
+
let tn = 0;
|
|
45
|
+
|
|
46
|
+
for (let i = 0; i < yTrue.length; i += 1) {
|
|
47
|
+
const truthPositive = yTrue[i] === positiveLabel;
|
|
48
|
+
const predPositive = yPred[i] === positiveLabel;
|
|
49
|
+
|
|
50
|
+
if (truthPositive && predPositive) {
|
|
51
|
+
tp += 1;
|
|
52
|
+
} else if (!truthPositive && predPositive) {
|
|
53
|
+
fp += 1;
|
|
54
|
+
} else if (truthPositive && !predPositive) {
|
|
55
|
+
fn += 1;
|
|
56
|
+
} else {
|
|
57
|
+
tn += 1;
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
return { tp, fp, fn, tn };
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
export interface ConfusionMatrixResult {
|
|
65
|
+
labels: number[];
|
|
66
|
+
matrix: number[][];
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
export interface ClassificationReportLabelMetrics {
|
|
70
|
+
precision: number;
|
|
71
|
+
recall: number;
|
|
72
|
+
f1Score: number;
|
|
73
|
+
support: number;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
export interface ClassificationReportResult {
|
|
77
|
+
labels: number[];
|
|
78
|
+
perLabel: Record<string, ClassificationReportLabelMetrics>;
|
|
79
|
+
accuracy: number;
|
|
80
|
+
macroAvg: ClassificationReportLabelMetrics;
|
|
81
|
+
weightedAvg: ClassificationReportLabelMetrics;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
export function accuracyScore(yTrue: number[], yPred: number[]): number {
|
|
85
|
+
validateInputs(yTrue, yPred);
|
|
86
|
+
let correct = 0;
|
|
87
|
+
for (let i = 0; i < yTrue.length; i += 1) {
|
|
88
|
+
if (yTrue[i] === yPred[i]) {
|
|
89
|
+
correct += 1;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
return correct / yTrue.length;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
export function precisionScore(
|
|
96
|
+
yTrue: number[],
|
|
97
|
+
yPred: number[],
|
|
98
|
+
positiveLabel = 1,
|
|
99
|
+
): number {
|
|
100
|
+
const { tp, fp } = confusionCounts(yTrue, yPred, positiveLabel);
|
|
101
|
+
const denominator = tp + fp;
|
|
102
|
+
if (denominator === 0) {
|
|
103
|
+
return 0;
|
|
104
|
+
}
|
|
105
|
+
return tp / denominator;
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
export function recallScore(yTrue: number[], yPred: number[], positiveLabel = 1): number {
|
|
109
|
+
const { tp, fn } = confusionCounts(yTrue, yPred, positiveLabel);
|
|
110
|
+
const denominator = tp + fn;
|
|
111
|
+
if (denominator === 0) {
|
|
112
|
+
return 0;
|
|
113
|
+
}
|
|
114
|
+
return tp / denominator;
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
export function f1Score(yTrue: number[], yPred: number[], positiveLabel = 1): number {
|
|
118
|
+
const precision = precisionScore(yTrue, yPred, positiveLabel);
|
|
119
|
+
const recall = recallScore(yTrue, yPred, positiveLabel);
|
|
120
|
+
const denominator = precision + recall;
|
|
121
|
+
if (denominator === 0) {
|
|
122
|
+
return 0;
|
|
123
|
+
}
|
|
124
|
+
return (2 * precision * recall) / denominator;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
export function confusionMatrix(
|
|
128
|
+
yTrue: number[],
|
|
129
|
+
yPred: number[],
|
|
130
|
+
labels?: number[],
|
|
131
|
+
): ConfusionMatrixResult {
|
|
132
|
+
validateInputs(yTrue, yPred);
|
|
133
|
+
const resolvedLabels =
|
|
134
|
+
labels && labels.length > 0
|
|
135
|
+
? labels.slice()
|
|
136
|
+
: Array.from(new Set([...yTrue, ...yPred])).sort((a, b) => a - b);
|
|
137
|
+
if (resolvedLabels.length === 0) {
|
|
138
|
+
throw new Error("confusionMatrix requires at least one label.");
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
const labelToIndex = new Map<number, number>();
|
|
142
|
+
for (let i = 0; i < resolvedLabels.length; i += 1) {
|
|
143
|
+
labelToIndex.set(resolvedLabels[i], i);
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
const matrix = Array.from({ length: resolvedLabels.length }, () =>
|
|
147
|
+
new Array<number>(resolvedLabels.length).fill(0),
|
|
148
|
+
);
|
|
149
|
+
|
|
150
|
+
for (let i = 0; i < yTrue.length; i += 1) {
|
|
151
|
+
const trueLabel = yTrue[i];
|
|
152
|
+
const predLabel = yPred[i];
|
|
153
|
+
const trueIndex = labelToIndex.get(trueLabel);
|
|
154
|
+
const predIndex = labelToIndex.get(predLabel);
|
|
155
|
+
if (trueIndex === undefined || predIndex === undefined) {
|
|
156
|
+
continue;
|
|
157
|
+
}
|
|
158
|
+
matrix[trueIndex][predIndex] += 1;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
return { labels: resolvedLabels, matrix };
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
export function logLoss(yTrue: number[], yPredProb: number[], eps = 1e-15): number {
|
|
165
|
+
validateInputs(yTrue, yPredProb);
|
|
166
|
+
validateBinaryTargets(yTrue);
|
|
167
|
+
if (!Number.isFinite(eps) || eps <= 0 || eps >= 0.5) {
|
|
168
|
+
throw new Error(`eps must be finite and in (0, 0.5). Got ${eps}.`);
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
let total = 0;
|
|
172
|
+
for (let i = 0; i < yTrue.length; i += 1) {
|
|
173
|
+
const p1 = clampProbability(yPredProb[i], eps);
|
|
174
|
+
const p0 = 1 - p1;
|
|
175
|
+
total += -(yTrue[i] * Math.log(p1) + (1 - yTrue[i]) * Math.log(p0));
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
return total / yTrue.length;
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
export function rocAucScore(yTrue: number[], yScore: number[]): number {
|
|
182
|
+
validateInputs(yTrue, yScore);
|
|
183
|
+
validateBinaryTargets(yTrue);
|
|
184
|
+
|
|
185
|
+
let positiveCount = 0;
|
|
186
|
+
for (let i = 0; i < yTrue.length; i += 1) {
|
|
187
|
+
if (yTrue[i] === 1) {
|
|
188
|
+
positiveCount += 1;
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
const negativeCount = yTrue.length - positiveCount;
|
|
192
|
+
if (positiveCount === 0 || negativeCount === 0) {
|
|
193
|
+
throw new Error("rocAucScore requires both positive and negative samples.");
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
const pairs = yScore.map((score, idx) => ({ score, label: yTrue[idx] }));
|
|
197
|
+
pairs.sort((a, b) => a.score - b.score);
|
|
198
|
+
|
|
199
|
+
// Average ranks for ties.
|
|
200
|
+
const ranks = new Array<number>(pairs.length);
|
|
201
|
+
let cursor = 0;
|
|
202
|
+
while (cursor < pairs.length) {
|
|
203
|
+
let tieEnd = cursor + 1;
|
|
204
|
+
while (tieEnd < pairs.length && pairs[tieEnd].score === pairs[cursor].score) {
|
|
205
|
+
tieEnd += 1;
|
|
206
|
+
}
|
|
207
|
+
const startRank = cursor + 1;
|
|
208
|
+
const endRank = tieEnd;
|
|
209
|
+
const averageRank = 0.5 * (startRank + endRank);
|
|
210
|
+
for (let i = cursor; i < tieEnd; i += 1) {
|
|
211
|
+
ranks[i] = averageRank;
|
|
212
|
+
}
|
|
213
|
+
cursor = tieEnd;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
let rankSumPositives = 0;
|
|
217
|
+
for (let i = 0; i < pairs.length; i += 1) {
|
|
218
|
+
if (pairs[i].label === 1) {
|
|
219
|
+
rankSumPositives += ranks[i];
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
const u = rankSumPositives - (positiveCount * (positiveCount + 1)) / 2;
|
|
224
|
+
return u / (positiveCount * negativeCount);
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
export function classificationReport(
|
|
228
|
+
yTrue: number[],
|
|
229
|
+
yPred: number[],
|
|
230
|
+
labels?: number[],
|
|
231
|
+
): ClassificationReportResult {
|
|
232
|
+
validateInputs(yTrue, yPred);
|
|
233
|
+
|
|
234
|
+
const { labels: resolvedLabels, matrix } = confusionMatrix(yTrue, yPred, labels);
|
|
235
|
+
const perLabel: Record<string, ClassificationReportLabelMetrics> = {};
|
|
236
|
+
|
|
237
|
+
let macroPrecision = 0;
|
|
238
|
+
let macroRecall = 0;
|
|
239
|
+
let macroF1 = 0;
|
|
240
|
+
let weightedPrecision = 0;
|
|
241
|
+
let weightedRecall = 0;
|
|
242
|
+
let weightedF1 = 0;
|
|
243
|
+
|
|
244
|
+
for (let labelIndex = 0; labelIndex < resolvedLabels.length; labelIndex += 1) {
|
|
245
|
+
const label = resolvedLabels[labelIndex];
|
|
246
|
+
let rowSum = 0;
|
|
247
|
+
let colSum = 0;
|
|
248
|
+
for (let j = 0; j < resolvedLabels.length; j += 1) {
|
|
249
|
+
rowSum += matrix[labelIndex][j];
|
|
250
|
+
colSum += matrix[j][labelIndex];
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
const tp = matrix[labelIndex][labelIndex];
|
|
254
|
+
const precision = colSum === 0 ? 0 : tp / colSum;
|
|
255
|
+
const recall = rowSum === 0 ? 0 : tp / rowSum;
|
|
256
|
+
const denom = precision + recall;
|
|
257
|
+
const f1 = denom === 0 ? 0 : (2 * precision * recall) / denom;
|
|
258
|
+
|
|
259
|
+
perLabel[String(label)] = {
|
|
260
|
+
precision,
|
|
261
|
+
recall,
|
|
262
|
+
f1Score: f1,
|
|
263
|
+
support: rowSum,
|
|
264
|
+
};
|
|
265
|
+
|
|
266
|
+
macroPrecision += precision;
|
|
267
|
+
macroRecall += recall;
|
|
268
|
+
macroF1 += f1;
|
|
269
|
+
weightedPrecision += precision * rowSum;
|
|
270
|
+
weightedRecall += recall * rowSum;
|
|
271
|
+
weightedF1 += f1 * rowSum;
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
const nLabels = resolvedLabels.length;
|
|
275
|
+
const totalSupport = yTrue.length;
|
|
276
|
+
|
|
277
|
+
return {
|
|
278
|
+
labels: resolvedLabels,
|
|
279
|
+
perLabel,
|
|
280
|
+
accuracy: accuracyScore(yTrue, yPred),
|
|
281
|
+
macroAvg: {
|
|
282
|
+
precision: macroPrecision / nLabels,
|
|
283
|
+
recall: macroRecall / nLabels,
|
|
284
|
+
f1Score: macroF1 / nLabels,
|
|
285
|
+
support: totalSupport,
|
|
286
|
+
},
|
|
287
|
+
weightedAvg: {
|
|
288
|
+
precision: weightedPrecision / totalSupport,
|
|
289
|
+
recall: weightedRecall / totalSupport,
|
|
290
|
+
f1Score: weightedF1 / totalSupport,
|
|
291
|
+
support: totalSupport,
|
|
292
|
+
},
|
|
293
|
+
};
|
|
294
|
+
}
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import { mean } from "../utils/linalg";
|
|
2
|
+
|
|
3
|
+
function validateInputs(yTrue: number[], yPred: number[]): void {
|
|
4
|
+
if (yTrue.length === 0 || yPred.length === 0) {
|
|
5
|
+
throw new Error("yTrue and yPred must be non-empty.");
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
if (yTrue.length !== yPred.length) {
|
|
9
|
+
throw new Error(`Length mismatch: yTrue=${yTrue.length}, yPred=${yPred.length}.`);
|
|
10
|
+
}
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export function meanSquaredError(yTrue: number[], yPred: number[]): number {
|
|
14
|
+
validateInputs(yTrue, yPred);
|
|
15
|
+
let total = 0;
|
|
16
|
+
for (let i = 0; i < yTrue.length; i += 1) {
|
|
17
|
+
const diff = yTrue[i] - yPred[i];
|
|
18
|
+
total += diff * diff;
|
|
19
|
+
}
|
|
20
|
+
return total / yTrue.length;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
export function meanAbsoluteError(yTrue: number[], yPred: number[]): number {
|
|
24
|
+
validateInputs(yTrue, yPred);
|
|
25
|
+
let total = 0;
|
|
26
|
+
for (let i = 0; i < yTrue.length; i += 1) {
|
|
27
|
+
total += Math.abs(yTrue[i] - yPred[i]);
|
|
28
|
+
}
|
|
29
|
+
return total / yTrue.length;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
export function r2Score(yTrue: number[], yPred: number[]): number {
|
|
33
|
+
validateInputs(yTrue, yPred);
|
|
34
|
+
|
|
35
|
+
const yMean = mean(yTrue);
|
|
36
|
+
let ssRes = 0;
|
|
37
|
+
let ssTot = 0;
|
|
38
|
+
|
|
39
|
+
for (let i = 0; i < yTrue.length; i += 1) {
|
|
40
|
+
const residual = yTrue[i] - yPred[i];
|
|
41
|
+
const centered = yTrue[i] - yMean;
|
|
42
|
+
ssRes += residual * residual;
|
|
43
|
+
ssTot += centered * centered;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
if (ssTot === 0) {
|
|
47
|
+
return ssRes === 0 ? 1 : 0;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
return 1 - ssRes / ssTot;
|
|
51
|
+
}
|