classifier 1.4.4 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,387 @@
1
+ /*
2
+ * matrix.c
3
+ * Matrix implementation for Classifier native linear algebra
4
+ */
5
+
6
+ #include "linalg.h"
7
+
8
+ const rb_data_type_t cmatrix_type = {
9
+ .wrap_struct_name = "Classifier::Linalg::Matrix",
10
+ .function = {
11
+ .dmark = NULL,
12
+ .dfree = cmatrix_free,
13
+ .dsize = NULL,
14
+ },
15
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY
16
+ };
17
+
18
+ /* Allocate a new CMatrix */
19
+ CMatrix *cmatrix_alloc(size_t rows, size_t cols)
20
+ {
21
+ CMatrix *m = ALLOC(CMatrix);
22
+ m->rows = rows;
23
+ m->cols = cols;
24
+ m->data = ALLOC_N(double, rows * cols);
25
+ memset(m->data, 0, rows * cols * sizeof(double));
26
+ return m;
27
+ }
28
+
29
+ /* Free a CMatrix */
30
+ void cmatrix_free(void *ptr)
31
+ {
32
+ CMatrix *m = (CMatrix *)ptr;
33
+ if (m) {
34
+ if (m->data) xfree(m->data);
35
+ xfree(m);
36
+ }
37
+ }
38
+
39
+ /* Transpose a matrix */
40
+ CMatrix *cmatrix_transpose(CMatrix *m)
41
+ {
42
+ CMatrix *result = cmatrix_alloc(m->cols, m->rows);
43
+ for (size_t i = 0; i < m->rows; i++) {
44
+ for (size_t j = 0; j < m->cols; j++) {
45
+ MAT_AT(result, j, i) = MAT_AT(m, i, j);
46
+ }
47
+ }
48
+ return result;
49
+ }
50
+
51
+ /* Matrix multiplication */
52
+ CMatrix *cmatrix_multiply(CMatrix *a, CMatrix *b)
53
+ {
54
+ if (a->cols != b->rows) {
55
+ rb_raise(rb_eArgError, "Matrix dimensions don't match for multiplication: %ldx%ld * %ldx%ld",
56
+ (long)a->rows, (long)a->cols, (long)b->rows, (long)b->cols);
57
+ }
58
+
59
+ CMatrix *result = cmatrix_alloc(a->rows, b->cols);
60
+
61
+ for (size_t i = 0; i < a->rows; i++) {
62
+ for (size_t j = 0; j < b->cols; j++) {
63
+ double sum = 0.0;
64
+ for (size_t k = 0; k < a->cols; k++) {
65
+ sum += MAT_AT(a, i, k) * MAT_AT(b, k, j);
66
+ }
67
+ MAT_AT(result, i, j) = sum;
68
+ }
69
+ }
70
+ return result;
71
+ }
72
+
73
+ /* Matrix-vector multiplication */
74
+ CVector *cmatrix_multiply_vector(CMatrix *m, CVector *v)
75
+ {
76
+ if (m->cols != v->size) {
77
+ rb_raise(rb_eArgError, "Matrix columns (%ld) must match vector size (%ld)",
78
+ (long)m->cols, (long)v->size);
79
+ }
80
+
81
+ CVector *result = cvector_alloc(m->rows);
82
+
83
+ for (size_t i = 0; i < m->rows; i++) {
84
+ double sum = 0.0;
85
+ for (size_t j = 0; j < m->cols; j++) {
86
+ sum += MAT_AT(m, i, j) * v->data[j];
87
+ }
88
+ result->data[i] = sum;
89
+ }
90
+ return result;
91
+ }
92
+
93
+ /* Create diagonal matrix from vector */
94
+ CMatrix *cmatrix_diagonal(CVector *v)
95
+ {
96
+ CMatrix *result = cmatrix_alloc(v->size, v->size);
97
+ for (size_t i = 0; i < v->size; i++) {
98
+ MAT_AT(result, i, i) = v->data[i];
99
+ }
100
+ return result;
101
+ }
102
+
103
+ /* Ruby allocation function */
104
+ static VALUE rb_cmatrix_alloc_func(VALUE klass)
105
+ {
106
+ CMatrix *m = cmatrix_alloc(0, 0);
107
+ return TypedData_Wrap_Struct(klass, &cmatrix_type, m);
108
+ }
109
+
110
+ /*
111
+ * Matrix.alloc(*rows)
112
+ * Create a new matrix from nested arrays
113
+ */
114
+ static VALUE rb_cmatrix_s_alloc(int argc, VALUE *argv, VALUE klass)
115
+ {
116
+ CMatrix *m;
117
+ VALUE result;
118
+
119
+ if (argc == 0) {
120
+ rb_raise(rb_eArgError, "Matrix.alloc requires at least one row");
121
+ }
122
+
123
+ /* Handle single array argument containing rows */
124
+ VALUE rows_ary;
125
+ if (argc == 1 && RB_TYPE_P(argv[0], T_ARRAY)) {
126
+ VALUE first = rb_ary_entry(argv[0], 0);
127
+ if (RB_TYPE_P(first, T_ARRAY)) {
128
+ rows_ary = argv[0];
129
+ } else {
130
+ /* Single row */
131
+ rows_ary = rb_ary_new_from_values(argc, argv);
132
+ }
133
+ } else {
134
+ rows_ary = rb_ary_new_from_values(argc, argv);
135
+ }
136
+
137
+ long num_rows = RARRAY_LEN(rows_ary);
138
+ if (num_rows == 0) {
139
+ rb_raise(rb_eArgError, "Matrix cannot be empty");
140
+ }
141
+
142
+ VALUE first_row = rb_ary_entry(rows_ary, 0);
143
+ long num_cols = RARRAY_LEN(first_row);
144
+
145
+ m = cmatrix_alloc((size_t)num_rows, (size_t)num_cols);
146
+
147
+ for (long i = 0; i < num_rows; i++) {
148
+ VALUE row = rb_ary_entry(rows_ary, i);
149
+ if (RARRAY_LEN(row) != num_cols) {
150
+ cmatrix_free(m);
151
+ rb_raise(rb_eArgError, "All rows must have the same length");
152
+ }
153
+ for (long j = 0; j < num_cols; j++) {
154
+ MAT_AT(m, i, j) = NUM2DBL(rb_ary_entry(row, j));
155
+ }
156
+ }
157
+
158
+ result = TypedData_Wrap_Struct(klass, &cmatrix_type, m);
159
+ return result;
160
+ }
161
+
162
+ /*
163
+ * Matrix.diag(vector_or_array)
164
+ * Create diagonal matrix from vector
165
+ */
166
+ static VALUE rb_cmatrix_s_diag(VALUE klass, VALUE arg)
167
+ {
168
+ CVector *v;
169
+ CMatrix *m;
170
+ int free_v = 0;
171
+
172
+ if (rb_obj_is_kind_of(arg, cClassifierVector)) {
173
+ GET_CVECTOR(arg, v);
174
+ } else if (RB_TYPE_P(arg, T_ARRAY)) {
175
+ long len = RARRAY_LEN(arg);
176
+ v = cvector_alloc((size_t)len);
177
+ free_v = 1;
178
+ for (long i = 0; i < len; i++) {
179
+ v->data[i] = NUM2DBL(rb_ary_entry(arg, i));
180
+ }
181
+ } else {
182
+ rb_raise(rb_eTypeError, "Expected Vector or Array");
183
+ return Qnil;
184
+ }
185
+
186
+ m = cmatrix_diagonal(v);
187
+ if (free_v) cvector_free(v);
188
+
189
+ return TypedData_Wrap_Struct(klass, &cmatrix_type, m);
190
+ }
191
+
192
+ /* Matrix#size -> [rows, cols] */
193
+ static VALUE rb_cmatrix_size(VALUE self)
194
+ {
195
+ CMatrix *m;
196
+ GET_CMATRIX(self, m);
197
+ return rb_ary_new_from_args(2, SIZET2NUM(m->rows), SIZET2NUM(m->cols));
198
+ }
199
+
200
+ /* Matrix#row_size */
201
+ static VALUE rb_cmatrix_row_size(VALUE self)
202
+ {
203
+ CMatrix *m;
204
+ GET_CMATRIX(self, m);
205
+ return SIZET2NUM(m->rows);
206
+ }
207
+
208
+ /* Matrix#column_size */
209
+ static VALUE rb_cmatrix_column_size(VALUE self)
210
+ {
211
+ CMatrix *m;
212
+ GET_CMATRIX(self, m);
213
+ return SIZET2NUM(m->cols);
214
+ }
215
+
216
+ /* Matrix#[](i, j) */
217
+ static VALUE rb_cmatrix_aref(VALUE self, VALUE row, VALUE col)
218
+ {
219
+ CMatrix *m;
220
+ GET_CMATRIX(self, m);
221
+ long i = NUM2LONG(row);
222
+ long j = NUM2LONG(col);
223
+
224
+ if (i < 0) i += m->rows;
225
+ if (j < 0) j += m->cols;
226
+ if (i < 0 || (size_t)i >= m->rows || j < 0 || (size_t)j >= m->cols) {
227
+ rb_raise(rb_eIndexError, "index out of bounds");
228
+ }
229
+
230
+ return DBL2NUM(MAT_AT(m, i, j));
231
+ }
232
+
233
+ /* Matrix#[]=(i, j, val) */
234
+ static VALUE rb_cmatrix_aset(VALUE self, VALUE row, VALUE col, VALUE val)
235
+ {
236
+ CMatrix *m;
237
+ GET_CMATRIX(self, m);
238
+ long i = NUM2LONG(row);
239
+ long j = NUM2LONG(col);
240
+
241
+ if (i < 0) i += m->rows;
242
+ if (j < 0) j += m->cols;
243
+ if (i < 0 || (size_t)i >= m->rows || j < 0 || (size_t)j >= m->cols) {
244
+ rb_raise(rb_eIndexError, "index out of bounds");
245
+ }
246
+
247
+ MAT_AT(m, i, j) = NUM2DBL(val);
248
+ return val;
249
+ }
250
+
251
+ /* Matrix#trans (transpose) */
252
+ static VALUE rb_cmatrix_trans(VALUE self)
253
+ {
254
+ CMatrix *m;
255
+ GET_CMATRIX(self, m);
256
+ CMatrix *result = cmatrix_transpose(m);
257
+ return TypedData_Wrap_Struct(cClassifierMatrix, &cmatrix_type, result);
258
+ }
259
+
260
+ /* Matrix#column(n) -> Vector */
261
+ static VALUE rb_cmatrix_column(VALUE self, VALUE col_idx)
262
+ {
263
+ CMatrix *m;
264
+ GET_CMATRIX(self, m);
265
+ long j = NUM2LONG(col_idx);
266
+
267
+ if (j < 0) j += m->cols;
268
+ if (j < 0 || (size_t)j >= m->cols) {
269
+ rb_raise(rb_eIndexError, "column index out of bounds");
270
+ }
271
+
272
+ CVector *v = cvector_alloc(m->rows);
273
+ v->is_col = 1;
274
+ for (size_t i = 0; i < m->rows; i++) {
275
+ v->data[i] = MAT_AT(m, i, j);
276
+ }
277
+
278
+ return TypedData_Wrap_Struct(cClassifierVector, &cvector_type, v);
279
+ }
280
+
281
+ /* Matrix#row(n) -> Vector */
282
+ static VALUE rb_cmatrix_row(VALUE self, VALUE row_idx)
283
+ {
284
+ CMatrix *m;
285
+ GET_CMATRIX(self, m);
286
+ long i = NUM2LONG(row_idx);
287
+
288
+ if (i < 0) i += m->rows;
289
+ if (i < 0 || (size_t)i >= m->rows) {
290
+ rb_raise(rb_eIndexError, "row index out of bounds");
291
+ }
292
+
293
+ CVector *v = cvector_alloc(m->cols);
294
+ v->is_col = 0;
295
+ memcpy(v->data, &MAT_AT(m, i, 0), m->cols * sizeof(double));
296
+
297
+ return TypedData_Wrap_Struct(cClassifierVector, &cvector_type, v);
298
+ }
299
+
300
+ /* Matrix#to_a */
301
+ static VALUE rb_cmatrix_to_a(VALUE self)
302
+ {
303
+ CMatrix *m;
304
+ GET_CMATRIX(self, m);
305
+ VALUE ary = rb_ary_new_capa((long)m->rows);
306
+
307
+ for (size_t i = 0; i < m->rows; i++) {
308
+ VALUE row = rb_ary_new_capa((long)m->cols);
309
+ for (size_t j = 0; j < m->cols; j++) {
310
+ rb_ary_push(row, DBL2NUM(MAT_AT(m, i, j)));
311
+ }
312
+ rb_ary_push(ary, row);
313
+ }
314
+ return ary;
315
+ }
316
+
317
+ /* Matrix#* - multiply with matrix or vector */
318
+ static VALUE rb_cmatrix_mul(VALUE self, VALUE other)
319
+ {
320
+ CMatrix *m;
321
+ GET_CMATRIX(self, m);
322
+
323
+ if (rb_obj_is_kind_of(other, cClassifierMatrix)) {
324
+ CMatrix *b;
325
+ GET_CMATRIX(other, b);
326
+ CMatrix *result = cmatrix_multiply(m, b);
327
+ return TypedData_Wrap_Struct(cClassifierMatrix, &cmatrix_type, result);
328
+ } else if (rb_obj_is_kind_of(other, cClassifierVector)) {
329
+ CVector *v;
330
+ GET_CVECTOR(other, v);
331
+ CVector *result = cmatrix_multiply_vector(m, v);
332
+ return TypedData_Wrap_Struct(cClassifierVector, &cvector_type, result);
333
+ } else if (RB_TYPE_P(other, T_FLOAT) || RB_TYPE_P(other, T_FIXNUM)) {
334
+ /* Scalar multiplication */
335
+ double scalar = NUM2DBL(other);
336
+ CMatrix *result = cmatrix_alloc(m->rows, m->cols);
337
+ for (size_t i = 0; i < m->rows * m->cols; i++) {
338
+ result->data[i] = m->data[i] * scalar;
339
+ }
340
+ return TypedData_Wrap_Struct(cClassifierMatrix, &cmatrix_type, result);
341
+ }
342
+
343
+ rb_raise(rb_eTypeError, "Cannot multiply Matrix with %s", rb_obj_classname(other));
344
+ return Qnil;
345
+ }
346
+
347
+ /* Matrix#_dump for Marshal */
348
+ static VALUE rb_cmatrix_dump(VALUE self, VALUE depth)
349
+ {
350
+ CMatrix *m;
351
+ GET_CMATRIX(self, m);
352
+ VALUE ary = rb_cmatrix_to_a(self);
353
+ return rb_marshal_dump(ary, Qnil);
354
+ }
355
+
356
+ /* Matrix._load for Marshal */
357
+ static VALUE rb_cmatrix_s_load(VALUE klass, VALUE str)
358
+ {
359
+ VALUE ary = rb_marshal_load(str);
360
+ long argc = RARRAY_LEN(ary);
361
+ VALUE *argv = RARRAY_PTR(ary);
362
+ return rb_cmatrix_s_alloc((int)argc, argv, klass);
363
+ }
364
+
365
+ void Init_matrix(void)
366
+ {
367
+ cClassifierMatrix = rb_define_class_under(mClassifierLinalg, "Matrix", rb_cObject);
368
+
369
+ rb_define_alloc_func(cClassifierMatrix, rb_cmatrix_alloc_func);
370
+ rb_define_singleton_method(cClassifierMatrix, "alloc", rb_cmatrix_s_alloc, -1);
371
+ rb_define_singleton_method(cClassifierMatrix, "diag", rb_cmatrix_s_diag, 1);
372
+ rb_define_singleton_method(cClassifierMatrix, "diagonal", rb_cmatrix_s_diag, 1);
373
+ rb_define_singleton_method(cClassifierMatrix, "_load", rb_cmatrix_s_load, 1);
374
+
375
+ rb_define_method(cClassifierMatrix, "size", rb_cmatrix_size, 0);
376
+ rb_define_method(cClassifierMatrix, "row_size", rb_cmatrix_row_size, 0);
377
+ rb_define_method(cClassifierMatrix, "column_size", rb_cmatrix_column_size, 0);
378
+ rb_define_method(cClassifierMatrix, "[]", rb_cmatrix_aref, 2);
379
+ rb_define_method(cClassifierMatrix, "[]=", rb_cmatrix_aset, 3);
380
+ rb_define_method(cClassifierMatrix, "trans", rb_cmatrix_trans, 0);
381
+ rb_define_alias(cClassifierMatrix, "transpose", "trans");
382
+ rb_define_method(cClassifierMatrix, "column", rb_cmatrix_column, 1);
383
+ rb_define_method(cClassifierMatrix, "row", rb_cmatrix_row, 1);
384
+ rb_define_method(cClassifierMatrix, "to_a", rb_cmatrix_to_a, 0);
385
+ rb_define_method(cClassifierMatrix, "*", rb_cmatrix_mul, 1);
386
+ rb_define_method(cClassifierMatrix, "_dump", rb_cmatrix_dump, 1);
387
+ }
@@ -0,0 +1,208 @@
1
+ /*
2
+ * svd.c
3
+ * Jacobi SVD implementation for Classifier native linear algebra
4
+ *
5
+ * This is a port of the pure Ruby SVD implementation from
6
+ * lib/classifier/extensions/vector.rb
7
+ */
8
+
9
+ #include "linalg.h"
10
+
11
+ #define SVD_MAX_SWEEPS 20
12
+ #define SVD_CONVERGENCE_THRESHOLD 0.001
13
+
14
+ /* Helper: Create identity matrix */
15
+ static CMatrix *cmatrix_identity(size_t n)
16
+ {
17
+ CMatrix *m = cmatrix_alloc(n, n);
18
+ for (size_t i = 0; i < n; i++) {
19
+ MAT_AT(m, i, i) = 1.0;
20
+ }
21
+ return m;
22
+ }
23
+
24
+ /* Helper: Clone a matrix */
25
+ static CMatrix *cmatrix_clone(CMatrix *m)
26
+ {
27
+ CMatrix *result = cmatrix_alloc(m->rows, m->cols);
28
+ memcpy(result->data, m->data, m->rows * m->cols * sizeof(double));
29
+ return result;
30
+ }
31
+
32
+ /* Helper: Apply Jacobi rotation to matrix Q and accumulator V */
33
+ static void apply_jacobi_rotation(CMatrix *q, CMatrix *v, size_t p, size_t r,
34
+ double cosine, double sine)
35
+ {
36
+ size_t n = q->rows;
37
+
38
+ /* Apply rotation to Q: Q = R^T * Q * R */
39
+ /* First: Q = Q * R (affects columns p and r) */
40
+ for (size_t i = 0; i < n; i++) {
41
+ double qip = MAT_AT(q, i, p);
42
+ double qir = MAT_AT(q, i, r);
43
+ MAT_AT(q, i, p) = cosine * qip - sine * qir;
44
+ MAT_AT(q, i, r) = sine * qip + cosine * qir;
45
+ }
46
+
47
+ /* Second: Q = R^T * Q (affects rows p and r) */
48
+ for (size_t j = 0; j < n; j++) {
49
+ double qpj = MAT_AT(q, p, j);
50
+ double qrj = MAT_AT(q, r, j);
51
+ MAT_AT(q, p, j) = cosine * qpj - sine * qrj;
52
+ MAT_AT(q, r, j) = sine * qpj + cosine * qrj;
53
+ }
54
+
55
+ /* Accumulate rotation in V: V = V * R */
56
+ for (size_t i = 0; i < v->rows; i++) {
57
+ double vip = MAT_AT(v, i, p);
58
+ double vir = MAT_AT(v, i, r);
59
+ MAT_AT(v, i, p) = cosine * vip - sine * vir;
60
+ MAT_AT(v, i, r) = sine * vip + cosine * vir;
61
+ }
62
+ }
63
+
64
+ /*
65
+ * Jacobi SVD decomposition
66
+ *
67
+ * Computes A = U * S * V^T where:
68
+ * - U is m x m orthogonal
69
+ * - S is m x n diagonal (returned as vector of singular values)
70
+ * - V is n x n orthogonal
71
+ *
72
+ * Based on one-sided Jacobi algorithm from vector.rb
73
+ */
74
+ void jacobi_svd(CMatrix *a, CMatrix **u_out, CMatrix **v_out, CVector **s_out)
75
+ {
76
+ size_t m = a->rows;
77
+ size_t n = a->cols;
78
+ int transposed = 0;
79
+ CMatrix *work_matrix;
80
+
81
+ /* Ensure we work with a "tall" matrix for numerical stability */
82
+ if (m >= n) {
83
+ /* A^T * A */
84
+ CMatrix *at = cmatrix_transpose(a);
85
+ work_matrix = cmatrix_multiply(at, a);
86
+ cmatrix_free(at);
87
+ } else {
88
+ /* A * A^T */
89
+ CMatrix *at = cmatrix_transpose(a);
90
+ work_matrix = cmatrix_multiply(a, at);
91
+ cmatrix_free(at);
92
+ transposed = 1;
93
+ }
94
+
95
+ size_t size = work_matrix->rows; /* This is min(m, n) effectively */
96
+ CMatrix *q = cmatrix_clone(work_matrix);
97
+ CMatrix *v = cmatrix_identity(size);
98
+ CMatrix *prev_q = NULL;
99
+
100
+ /* Jacobi iteration */
101
+ for (int sweep = 0; sweep < SVD_MAX_SWEEPS; sweep++) {
102
+ /* Apply rotations to diagonalize Q */
103
+ for (size_t p = 0; p < size - 1; p++) {
104
+ for (size_t r = p + 1; r < size; r++) {
105
+ double qpr = MAT_AT(q, p, r);
106
+ double qpp = MAT_AT(q, p, p);
107
+ double qrr = MAT_AT(q, r, r);
108
+
109
+ /* Compute rotation angle */
110
+ double numerator = 2.0 * qpr;
111
+ double denominator = qpp - qrr;
112
+ double angle;
113
+
114
+ if (fabs(denominator) < CLASSIFIER_EPSILON) {
115
+ angle = (numerator >= 0) ? M_PI / 4.0 : -M_PI / 4.0;
116
+ } else {
117
+ angle = atan(numerator / denominator) / 2.0;
118
+ }
119
+
120
+ double cosine = cos(angle);
121
+ double sine = sin(angle);
122
+
123
+ apply_jacobi_rotation(q, v, p, r, cosine, sine);
124
+ }
125
+ }
126
+
127
+ /* Check for convergence */
128
+ if (sweep == 0) {
129
+ prev_q = cmatrix_clone(q);
130
+ } else {
131
+ double sum_diff = 0.0;
132
+ for (size_t i = 0; i < size; i++) {
133
+ double diff = fabs(MAT_AT(q, i, i) - MAT_AT(prev_q, i, i));
134
+ if (diff > SVD_CONVERGENCE_THRESHOLD) {
135
+ sum_diff += diff;
136
+ }
137
+ }
138
+
139
+ /* Update prev_q for next iteration */
140
+ memcpy(prev_q->data, q->data, size * size * sizeof(double));
141
+
142
+ if (sum_diff <= SVD_CONVERGENCE_THRESHOLD) {
143
+ break;
144
+ }
145
+ }
146
+ }
147
+
148
+ if (prev_q) cmatrix_free(prev_q);
149
+
150
+ /* Extract singular values (sqrt of diagonal elements of Q) */
151
+ CVector *s = cvector_alloc(size);
152
+ for (size_t i = 0; i < size; i++) {
153
+ double val = MAT_AT(q, i, i);
154
+ s->data[i] = (val > 0) ? sqrt(val) : 0.0;
155
+ }
156
+
157
+ /* Compute U = A * V * S^(-1) or handle transposed case */
158
+ /* Create S^(-1) diagonal matrix */
159
+ CMatrix *s_inv = cmatrix_alloc(size, size);
160
+ for (size_t i = 0; i < size; i++) {
161
+ double sv = s->data[i];
162
+ MAT_AT(s_inv, i, i) = (sv > CLASSIFIER_EPSILON) ? (1.0 / sv) : 0.0;
163
+ }
164
+
165
+ CMatrix *u;
166
+ CMatrix *source = transposed ? cmatrix_transpose(a) : cmatrix_clone(a);
167
+
168
+ /* U = source * V * S^(-1) */
169
+ CMatrix *temp = cmatrix_multiply(source, v);
170
+ u = cmatrix_multiply(temp, s_inv);
171
+
172
+ cmatrix_free(temp);
173
+ cmatrix_free(s_inv);
174
+ cmatrix_free(source);
175
+ cmatrix_free(q);
176
+ cmatrix_free(work_matrix);
177
+
178
+ *u_out = u;
179
+ *v_out = v;
180
+ *s_out = s;
181
+ }
182
+
183
+ /* Ruby wrapper: Matrix#SV_decomp */
184
+ static VALUE rb_cmatrix_sv_decomp(int argc, VALUE *argv, VALUE self)
185
+ {
186
+ CMatrix *m;
187
+ GET_CMATRIX(self, m);
188
+
189
+ /* Optional max_sweeps argument (ignored for now, using default) */
190
+ (void)argc;
191
+ (void)argv;
192
+
193
+ CMatrix *u, *v;
194
+ CVector *s;
195
+ jacobi_svd(m, &u, &v, &s);
196
+
197
+ VALUE rb_u = TypedData_Wrap_Struct(cClassifierMatrix, &cmatrix_type, u);
198
+ VALUE rb_v = TypedData_Wrap_Struct(cClassifierMatrix, &cmatrix_type, v);
199
+ VALUE rb_s = TypedData_Wrap_Struct(cClassifierVector, &cvector_type, s);
200
+
201
+ return rb_ary_new_from_args(3, rb_u, rb_v, rb_s);
202
+ }
203
+
204
+ void Init_svd(void)
205
+ {
206
+ rb_define_method(cClassifierMatrix, "SV_decomp", rb_cmatrix_sv_decomp, -1);
207
+ rb_define_alias(cClassifierMatrix, "svd", "SV_decomp");
208
+ }