numo-libsvm 1.1.2 → 2.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +13 -0
- data/LICENSE.txt +1 -1
- data/README.md +1 -5
- data/ext/numo/libsvm/extconf.rb +6 -16
- data/ext/numo/libsvm/libsvmext.cpp +220 -0
- data/ext/numo/libsvm/libsvmext.hpp +721 -0
- data/ext/numo/libsvm/src/COPYRIGHT +31 -0
- data/ext/numo/libsvm/{libsvm → src}/svm.cpp +134 -18
- data/ext/numo/libsvm/{libsvm → src}/svm.h +2 -1
- data/lib/numo/libsvm/version.rb +1 -1
- data/sig/numo/libsvm.rbs +1 -0
- metadata +12 -31
- data/.github/workflows/build.yml +0 -29
- data/.gitignore +0 -20
- data/.gitmodules +0 -3
- data/.rspec +0 -3
- data/CODE_OF_CONDUCT.md +0 -74
- data/Gemfile +0 -11
- data/Rakefile +0 -15
- data/Steepfile +0 -20
- data/ext/numo/libsvm/converter.c +0 -204
- data/ext/numo/libsvm/converter.h +0 -20
- data/ext/numo/libsvm/kernel_type.c +0 -22
- data/ext/numo/libsvm/kernel_type.h +0 -9
- data/ext/numo/libsvm/libsvmext.c +0 -578
- data/ext/numo/libsvm/libsvmext.h +0 -18
- data/ext/numo/libsvm/svm_model.c +0 -89
- data/ext/numo/libsvm/svm_model.h +0 -15
- data/ext/numo/libsvm/svm_parameter.c +0 -88
- data/ext/numo/libsvm/svm_parameter.h +0 -15
- data/ext/numo/libsvm/svm_problem.c +0 -90
- data/ext/numo/libsvm/svm_problem.h +0 -12
- data/ext/numo/libsvm/svm_type.c +0 -22
- data/ext/numo/libsvm/svm_type.h +0 -9
- data/numo-libsvm.gemspec +0 -47
data/ext/numo/libsvm/converter.c
DELETED
@@ -1,204 +0,0 @@
|
|
1
|
-
|
2
|
-
#include "converter.h"
|
3
|
-
|
4
|
-
VALUE int_vec_to_nary(int* const arr, int const size)
|
5
|
-
{
|
6
|
-
int i;
|
7
|
-
size_t shape[1] = { size };
|
8
|
-
VALUE v = rb_narray_new(numo_cInt32, 1, shape);
|
9
|
-
int32_t* vp = (int32_t*)na_get_pointer_for_write(v);
|
10
|
-
for (i = 0; i < size; i++) { vp[i] = (int32_t)arr[i]; }
|
11
|
-
return v;
|
12
|
-
}
|
13
|
-
|
14
|
-
int* nary_to_int_vec(VALUE vec_val)
|
15
|
-
{
|
16
|
-
int i;
|
17
|
-
int n_elements;
|
18
|
-
narray_t* vec_nary;
|
19
|
-
int32_t* vec_pt;
|
20
|
-
int* vec;
|
21
|
-
|
22
|
-
if (vec_val == Qnil) return NULL;
|
23
|
-
|
24
|
-
GetNArray(vec_val, vec_nary);
|
25
|
-
n_elements = (int)NA_SHAPE(vec_nary)[0];
|
26
|
-
|
27
|
-
vec = ALLOC_N(int, n_elements);
|
28
|
-
vec_pt = (int32_t*)na_get_pointer_for_read(vec_val);
|
29
|
-
for (i = 0; i < n_elements; i++) { vec[i] = (int)vec_pt[i]; }
|
30
|
-
|
31
|
-
RB_GC_GUARD(vec_val);
|
32
|
-
|
33
|
-
return vec;
|
34
|
-
}
|
35
|
-
|
36
|
-
VALUE dbl_vec_to_nary(double* const arr, int const size)
|
37
|
-
{
|
38
|
-
int i;
|
39
|
-
size_t shape[1] = { size };
|
40
|
-
VALUE v = rb_narray_new(numo_cDFloat, 1, shape);
|
41
|
-
double* vp = (double*)na_get_pointer_for_write(v);
|
42
|
-
for (i = 0; i < size; i++) { vp[i] = arr[i]; }
|
43
|
-
return v;
|
44
|
-
}
|
45
|
-
|
46
|
-
double* nary_to_dbl_vec(VALUE vec_val)
|
47
|
-
{
|
48
|
-
int n_elements;
|
49
|
-
narray_t* vec_nary;
|
50
|
-
double* vec_pt;
|
51
|
-
double* vec;
|
52
|
-
|
53
|
-
if (vec_val == Qnil) return NULL;
|
54
|
-
|
55
|
-
GetNArray(vec_val, vec_nary);
|
56
|
-
n_elements = (int)NA_SHAPE(vec_nary)[0];
|
57
|
-
|
58
|
-
vec = ALLOC_N(double, n_elements);
|
59
|
-
vec_pt = (double*)na_get_pointer_for_read(vec_val);
|
60
|
-
memcpy(vec, vec_pt, n_elements * sizeof(double));
|
61
|
-
|
62
|
-
RB_GC_GUARD(vec_val);
|
63
|
-
|
64
|
-
return vec;
|
65
|
-
}
|
66
|
-
|
67
|
-
VALUE dbl_mat_to_nary(double** const mat, int const n_rows, int const n_cols)
|
68
|
-
{
|
69
|
-
int i, j;
|
70
|
-
size_t shape[2] = { n_rows, n_cols };
|
71
|
-
VALUE v = rb_narray_new(numo_cDFloat, 2, shape);
|
72
|
-
double* vp = (double*)na_get_pointer_for_write(v);
|
73
|
-
|
74
|
-
for (i = 0; i < n_rows; i++) {
|
75
|
-
for (j = 0; j < n_cols; j++) {
|
76
|
-
vp[i * n_cols + j] = mat[i][j];
|
77
|
-
}
|
78
|
-
}
|
79
|
-
|
80
|
-
return v;
|
81
|
-
}
|
82
|
-
|
83
|
-
double** nary_to_dbl_mat(VALUE mat_val)
|
84
|
-
{
|
85
|
-
int i, j;
|
86
|
-
int n_rows, n_cols;
|
87
|
-
narray_t* mat_nary;
|
88
|
-
double* mat_pt;
|
89
|
-
double** mat;
|
90
|
-
|
91
|
-
if (mat_val == Qnil) return NULL;
|
92
|
-
|
93
|
-
GetNArray(mat_val, mat_nary);
|
94
|
-
n_rows = (int)NA_SHAPE(mat_nary)[0];
|
95
|
-
n_cols = (int)NA_SHAPE(mat_nary)[1];
|
96
|
-
|
97
|
-
mat_pt = (double*)na_get_pointer_for_read(mat_val);
|
98
|
-
mat = ALLOC_N(double*, n_rows);
|
99
|
-
for (i = 0; i < n_rows; i++) {
|
100
|
-
mat[i] = ALLOC_N(double, n_cols);
|
101
|
-
for (j = 0; j < n_cols; j++) {
|
102
|
-
mat[i][j] = mat_pt[i * n_cols + j];
|
103
|
-
}
|
104
|
-
}
|
105
|
-
|
106
|
-
RB_GC_GUARD(mat_val);
|
107
|
-
|
108
|
-
return mat;
|
109
|
-
}
|
110
|
-
|
111
|
-
VALUE svm_nodes_to_nary(struct svm_node** const support_vecs, const int n_support_vecs)
|
112
|
-
{
|
113
|
-
int i, j;
|
114
|
-
int n_dimensions = 0;
|
115
|
-
size_t shape[2] = { n_support_vecs, 1 };
|
116
|
-
VALUE v;
|
117
|
-
double* vp;
|
118
|
-
|
119
|
-
for (i = 0; i < n_support_vecs; i++) {
|
120
|
-
for (j = 0; support_vecs[i][j].index != -1; j++) {
|
121
|
-
if (n_dimensions < support_vecs[i][j].index) {
|
122
|
-
n_dimensions = support_vecs[i][j].index;
|
123
|
-
}
|
124
|
-
}
|
125
|
-
}
|
126
|
-
|
127
|
-
shape[1] = n_dimensions;
|
128
|
-
v = rb_narray_new(numo_cDFloat, 2, shape);
|
129
|
-
vp = (double*)na_get_pointer_for_write(v);
|
130
|
-
memset(vp, 0, n_support_vecs * n_dimensions * sizeof(double));
|
131
|
-
|
132
|
-
for (i = 0; i < n_support_vecs; i++) {
|
133
|
-
for (j = 0; support_vecs[i][j].index != -1; j++) {
|
134
|
-
vp[i * n_dimensions + support_vecs[i][j].index - 1] = support_vecs[i][j].value;
|
135
|
-
}
|
136
|
-
}
|
137
|
-
|
138
|
-
return v;
|
139
|
-
}
|
140
|
-
|
141
|
-
struct svm_node** nary_to_svm_nodes(VALUE nary_val)
|
142
|
-
{
|
143
|
-
int i, j, k;
|
144
|
-
int n_rows, n_cols, n_nonzero_cols;
|
145
|
-
narray_t* nary;
|
146
|
-
double* nary_pt;
|
147
|
-
struct svm_node** support_vecs;
|
148
|
-
|
149
|
-
if (nary_val == Qnil) return NULL;
|
150
|
-
|
151
|
-
GetNArray(nary_val, nary);
|
152
|
-
n_rows = (int)NA_SHAPE(nary)[0];
|
153
|
-
n_cols = (int)NA_SHAPE(nary)[1];
|
154
|
-
|
155
|
-
nary_pt = (double*)na_get_pointer_for_read(nary_val);
|
156
|
-
support_vecs = ALLOC_N(struct svm_node*, n_rows);
|
157
|
-
for (i = 0; i < n_rows; i++) {
|
158
|
-
n_nonzero_cols = 0;
|
159
|
-
for (j = 0; j < n_cols; j++) {
|
160
|
-
if (nary_pt[i * n_cols + j] != 0) {
|
161
|
-
n_nonzero_cols++;
|
162
|
-
}
|
163
|
-
}
|
164
|
-
support_vecs[i] = ALLOC_N(struct svm_node, n_nonzero_cols + 1);
|
165
|
-
for (j = 0, k = 0; j < n_cols; j++) {
|
166
|
-
if (nary_pt[i * n_cols + j] != 0) {
|
167
|
-
support_vecs[i][k].index = j + 1;
|
168
|
-
support_vecs[i][k].value = nary_pt[i * n_cols + j];
|
169
|
-
k++;
|
170
|
-
}
|
171
|
-
}
|
172
|
-
support_vecs[i][n_nonzero_cols].index = -1;
|
173
|
-
support_vecs[i][n_nonzero_cols].value = 0.0;
|
174
|
-
}
|
175
|
-
|
176
|
-
RB_GC_GUARD(nary_val);
|
177
|
-
|
178
|
-
return support_vecs;
|
179
|
-
}
|
180
|
-
|
181
|
-
struct svm_node* dbl_vec_to_svm_node(double* const arr, int const size)
|
182
|
-
{
|
183
|
-
int i, j;
|
184
|
-
int n_nonzero_elements;
|
185
|
-
struct svm_node* node;
|
186
|
-
|
187
|
-
n_nonzero_elements = 0;
|
188
|
-
for (i = 0; i < size; i++) {
|
189
|
-
if (arr[i] != 0.0) n_nonzero_elements++;
|
190
|
-
}
|
191
|
-
|
192
|
-
node = ALLOC_N(struct svm_node, n_nonzero_elements + 1);
|
193
|
-
for (i = 0, j = 0; i < size; i++) {
|
194
|
-
if (arr[i] != 0.0) {
|
195
|
-
node[j].index = i + 1;
|
196
|
-
node[j].value = arr[i];
|
197
|
-
j++;
|
198
|
-
}
|
199
|
-
}
|
200
|
-
node[n_nonzero_elements].index = -1;
|
201
|
-
node[n_nonzero_elements].value = 0.0;
|
202
|
-
|
203
|
-
return node;
|
204
|
-
}
|
data/ext/numo/libsvm/converter.h
DELETED
@@ -1,20 +0,0 @@
|
|
1
|
-
#ifndef NUMO_LIBSVM_CONVERTER_H
|
2
|
-
#define NUMO_LIBSVM_CONVERTER_H 1
|
3
|
-
|
4
|
-
#include <string.h>
|
5
|
-
#include <svm.h>
|
6
|
-
#include <ruby.h>
|
7
|
-
#include <numo/narray.h>
|
8
|
-
#include <numo/template.h>
|
9
|
-
|
10
|
-
VALUE int_vec_to_nary(int* const arr, int const size);
|
11
|
-
int* nary_to_int_vec(VALUE vec_val);
|
12
|
-
VALUE dbl_vec_to_nary(double* const arr, int const size);
|
13
|
-
double* nary_to_dbl_vec(VALUE vec_val);
|
14
|
-
VALUE dbl_mat_to_nary(double** const mat, int const n_rows, int const n_cols);
|
15
|
-
double** nary_to_dbl_mat(VALUE mat_val);
|
16
|
-
VALUE svm_nodes_to_nary(struct svm_node** const support_vecs, const int n_support_vecs);
|
17
|
-
struct svm_node** nary_to_svm_nodes(VALUE nary_val);
|
18
|
-
struct svm_node* dbl_vec_to_svm_node(double* const arr, int const size);
|
19
|
-
|
20
|
-
#endif /* NUMO_LIBSVM_CONVERTER_H */
|
@@ -1,22 +0,0 @@
|
|
1
|
-
#include "kernel_type.h"
|
2
|
-
|
3
|
-
RUBY_EXTERN VALUE mLibsvm;
|
4
|
-
|
5
|
-
void rb_init_kernel_type_module()
|
6
|
-
{
|
7
|
-
/**
|
8
|
-
* Document-module: Numo::Libsvm::KernelType
|
9
|
-
* The module consisting of constants for kernel type that used for parameter of LIBSVM.
|
10
|
-
*/
|
11
|
-
VALUE mKernelType = rb_define_module_under(mLibsvm, "KernelType");
|
12
|
-
/* Linear kernel; u' * v */
|
13
|
-
rb_define_const(mKernelType, "LINEAR", INT2NUM(LINEAR));
|
14
|
-
/* Polynomial kernel; (gamma * u' * v + coef0)^degree */
|
15
|
-
rb_define_const(mKernelType, "POLY", INT2NUM(POLY));
|
16
|
-
/* RBF kernel; exp(-gamma * ||u - v||^2) */
|
17
|
-
rb_define_const(mKernelType, "RBF", INT2NUM(RBF));
|
18
|
-
/* Sigmoid kernel; tanh(gamma * u' * v + coef0) */
|
19
|
-
rb_define_const(mKernelType, "SIGMOID", INT2NUM(SIGMOID));
|
20
|
-
/* Precomputed kernel */
|
21
|
-
rb_define_const(mKernelType, "PRECOMPUTED", INT2NUM(PRECOMPUTED));
|
22
|
-
}
|