kmat 0.0.3 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +2 -0
- data/.rspec +1 -0
- data/CHANGELOG.md +10 -0
- data/README.md +11 -11
- data/ext/kmat/arith/binary.c +161 -139
- data/ext/kmat/arith/math.c +1 -1
- data/ext/kmat/arith/statistics.c +11 -11
- data/ext/kmat/arith/unary.c +6 -6
- data/ext/kmat/extconf.rb +3 -0
- data/ext/kmat/km_util.h +34 -13
- data/ext/kmat/kmat.h +3 -3
- data/ext/kmat/linalg/dla.c +185 -133
- data/ext/kmat/linalg/linalg.c +33 -17
- data/ext/kmat/linalg/norm.c +83 -69
- data/ext/kmat/linalg/vla.c +23 -23
- data/ext/kmat/linalg/working.c +42 -38
- data/ext/kmat/main.c +4 -4
- data/ext/kmat/smat/accessor.c +104 -104
- data/ext/kmat/smat/array.c +3 -3
- data/ext/kmat/smat/boxmuller.c +5 -5
- data/ext/kmat/smat/constructer.c +52 -52
- data/ext/kmat/smat/convert.c +21 -21
- data/ext/kmat/smat/elem.c +7 -7
- data/ext/kmat/smat/fund.c +37 -37
- data/ext/kmat/smat/share.c +28 -27
- data/ext/kmat/smat/smat.c +58 -42
- data/ext/kmat/smat/sort.c +148 -146
- data/kmat.gemspec +5 -4
- data/lib/kmat/accessor.rb +5 -5
- data/lib/kmat/linalg.rb +1 -2
- data/lib/kmat/random.rb +2 -2
- data/lib/kmat/version.rb +1 -1
- data/lib/kmat.rb +9 -9
- metadata +25 -10
data/ext/kmat/linalg/working.c
CHANGED
@@ -3,7 +3,7 @@
|
|
3
3
|
void *
|
4
4
|
km_alloc_and_copy(SMAT *smat)
|
5
5
|
{
|
6
|
-
void *ret = ruby_xcalloc(
|
6
|
+
void *ret = ruby_xcalloc(LENGTH(smat), km_sizeof_vt(smat->vtype));
|
7
7
|
km_copy2work(ret, smat->m, smat);
|
8
8
|
return ret;
|
9
9
|
}
|
@@ -18,7 +18,7 @@ km_alloc_if_needed(SMAT *smat, LAWORK *lawork)
|
|
18
18
|
lawork->ld = smat->ld;
|
19
19
|
lawork->need_to_free = false;
|
20
20
|
} else {
|
21
|
-
lawork->body = ruby_xcalloc(
|
21
|
+
lawork->body = ruby_xcalloc(smat->m*smat->n, km_sizeof_vt(smat->vtype));
|
22
22
|
lawork->ld = smat->m;
|
23
23
|
lawork->need_to_free = true;
|
24
24
|
}
|
@@ -41,23 +41,23 @@ km_alloc_if_needed_and_0clear(SMAT *smat, LAWORK *lawork)
|
|
41
41
|
km_alloc_if_needed(smat, lawork);
|
42
42
|
if ( lawork->need_to_free ) { return; }
|
43
43
|
if ( smat->vtype == VT_DOUBLE ) {
|
44
|
-
for (
|
44
|
+
for ( size_t i=0; i<smat->m; i++ ) { for ( size_t j=0; j<smat->n; j++ ) {
|
45
45
|
lawork->d[i+j*lawork->ld] = 0.0;
|
46
46
|
} }
|
47
47
|
} else if ( smat->vtype == VT_COMPLEX ) {
|
48
|
-
for (
|
48
|
+
for ( size_t i=0; i<smat->m; i++ ) { for ( size_t j=0; j<smat->n; j++ ) {
|
49
49
|
lawork->z[i+j*lawork->ld] = cpack(0.0, 0.0);
|
50
50
|
} }
|
51
51
|
} else if ( smat->vtype == VT_INT ) {
|
52
|
-
for (
|
52
|
+
for ( size_t i=0; i<smat->m; i++ ) { for ( size_t j=0; j<smat->n; j++ ) {
|
53
53
|
lawork->i[i+j*lawork->ld] = 0;
|
54
54
|
} }
|
55
55
|
} else if ( smat->vtype == VT_BOOL ) {
|
56
|
-
for (
|
56
|
+
for ( size_t i=0; i<smat->m; i++ ) { for ( size_t j=0; j<smat->n; j++ ) {
|
57
57
|
lawork->b[i+j*lawork->ld] = false;
|
58
58
|
} }
|
59
59
|
} else {
|
60
|
-
for (
|
60
|
+
for ( size_t i=0; i<smat->m; i++ ) { for ( size_t j=0; j<smat->n; j++ ) {
|
61
61
|
lawork->v[i+j*lawork->ld] = Qfalse;
|
62
62
|
} }
|
63
63
|
}
|
@@ -72,68 +72,70 @@ struct km_c2w_arg {
|
|
72
72
|
bool *bwork;
|
73
73
|
VALUE *vwork;
|
74
74
|
};
|
75
|
-
|
75
|
+
size_t ld;
|
76
76
|
};
|
77
77
|
void
|
78
|
-
km_c2w_func_d(double *ent,
|
78
|
+
km_c2w_func_d(double *ent, size_t i, size_t j, void *data)
|
79
79
|
{
|
80
80
|
struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
|
81
81
|
arg->dwork[i+j*arg->ld] = *ent;
|
82
82
|
}
|
83
83
|
void
|
84
|
-
km_c2w_func_z(COMPLEX *ent,
|
84
|
+
km_c2w_func_z(COMPLEX *ent, size_t i, size_t j, void *data)
|
85
85
|
{
|
86
86
|
struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
|
87
87
|
arg->zwork[i+j*arg->ld] = *ent;
|
88
88
|
}
|
89
89
|
void
|
90
|
-
km_c2w_func_i(int *ent,
|
90
|
+
km_c2w_func_i(int *ent, size_t i, size_t j, void *data)
|
91
91
|
{
|
92
92
|
struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
|
93
93
|
arg->iwork[i+j*arg->ld] = *ent;
|
94
94
|
}
|
95
95
|
void
|
96
|
-
km_c2w_func_b(bool *ent,
|
96
|
+
km_c2w_func_b(bool *ent, size_t i, size_t j, void *data)
|
97
97
|
{
|
98
98
|
struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
|
99
99
|
arg->bwork[i+j*arg->ld] = *ent;
|
100
100
|
}
|
101
101
|
void
|
102
|
-
km_c2w_func_v(VALUE *ent,
|
102
|
+
km_c2w_func_v(VALUE *ent, size_t i, size_t j, void *data)
|
103
103
|
{
|
104
104
|
struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
|
105
105
|
arg->vwork[i+j*arg->ld] = *ent;
|
106
106
|
}
|
107
107
|
void
|
108
|
-
km_copy2work(void *work,
|
108
|
+
km_copy2work(void *work, size_t ldw, SMAT *smat)
|
109
109
|
{
|
110
110
|
if ( smat->stype == ST_FULL && !(smat->trans) && ldw==smat->ld ) {
|
111
|
-
memcpy(work, smat->body, km_sizeof_vt(smat->vtype)*
|
111
|
+
memcpy(work, smat->body, km_sizeof_vt(smat->vtype)*LENGTH(smat));
|
112
112
|
} else {
|
113
113
|
struct km_c2w_arg data = {{work}, ldw};
|
114
114
|
if ( smat->vtype == VT_DOUBLE ) {
|
115
115
|
if ( smat->stype == ST_RSUB ) {
|
116
116
|
km_smat_each_with_index_d(smat, km_c2w_func_d, &data);
|
117
117
|
} else if ( smat->trans ) {
|
118
|
-
const int one=1;
|
119
|
-
for (
|
120
|
-
dcopy_(&
|
118
|
+
const int one=1, n=s2i(smat->n), ldw_i=s2i(ldw);
|
119
|
+
for ( size_t i=0; i<smat->m; i++ ) {
|
120
|
+
dcopy_(&n, smat->dbody+(i*smat->ld), &one, data.dwork+i, &ldw_i);
|
121
121
|
}
|
122
122
|
} else {
|
123
123
|
char str_a[] = "A";
|
124
|
-
|
124
|
+
int m=s2i(smat->m), n=s2i(smat->n), ld=s2i(smat->ld), ldw_i=s2i(ldw);
|
125
|
+
dlacpy_(str_a, &m, &n, smat->dbody, &ld, work, &ldw_i);
|
125
126
|
}
|
126
127
|
} else if ( smat->vtype == VT_COMPLEX ) {
|
127
128
|
if ( smat->stype == ST_RSUB ) {
|
128
129
|
km_smat_each_with_index_z(smat, km_c2w_func_z, &data);
|
129
130
|
} else if ( smat->trans ) {
|
130
|
-
const int one=1;
|
131
|
-
for (
|
132
|
-
zcopy_(&
|
131
|
+
const int one=1, n=s2i(smat->n), ldw_i=s2i(ldw);
|
132
|
+
for ( size_t i=0; i<smat->m; i++ ) {
|
133
|
+
zcopy_(&n, smat->zbody+(i*smat->ld), &one, data.zwork+i, &ldw_i);
|
133
134
|
}
|
134
135
|
} else {
|
135
136
|
char str_a[] = "A";
|
136
|
-
|
137
|
+
int m=s2i(smat->m), n=s2i(smat->n), ld=s2i(smat->ld), ldw_i=s2i(ldw);
|
138
|
+
zlacpy_(str_a, &m, &n, smat->zbody, &ld, work, &ldw_i);
|
137
139
|
}
|
138
140
|
} else if ( smat->vtype == VT_INT ) {
|
139
141
|
km_smat_each_with_index_i(smat, km_c2w_func_i, &data);
|
@@ -147,65 +149,67 @@ km_copy2work(void *work, int ldw, SMAT *smat)
|
|
147
149
|
}
|
148
150
|
}
|
149
151
|
void
|
150
|
-
km_cfw_func_d(double *ent,
|
152
|
+
km_cfw_func_d(double *ent, size_t i, size_t j, void *data)
|
151
153
|
{
|
152
154
|
struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
|
153
155
|
*ent = arg->dwork[i+j*arg->ld];
|
154
156
|
}
|
155
157
|
void
|
156
|
-
km_cfw_func_z(COMPLEX *ent,
|
158
|
+
km_cfw_func_z(COMPLEX *ent, size_t i, size_t j, void *data)
|
157
159
|
{
|
158
160
|
struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
|
159
161
|
*ent = arg->zwork[i+j*arg->ld];
|
160
162
|
}
|
161
163
|
void
|
162
|
-
km_cfw_func_i(int *ent,
|
164
|
+
km_cfw_func_i(int *ent, size_t i, size_t j, void *data)
|
163
165
|
{
|
164
166
|
struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
|
165
167
|
*ent = arg->iwork[i+j*arg->ld];
|
166
168
|
}
|
167
169
|
void
|
168
|
-
km_cfw_func_b(bool *ent,
|
170
|
+
km_cfw_func_b(bool *ent, size_t i, size_t j, void *data)
|
169
171
|
{
|
170
172
|
struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
|
171
173
|
*ent = arg->bwork[i+j*arg->ld];
|
172
174
|
}
|
173
175
|
void
|
174
|
-
km_cfw_func_v(VALUE *ent,
|
176
|
+
km_cfw_func_v(VALUE *ent, size_t i, size_t j, void *data)
|
175
177
|
{
|
176
178
|
struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
|
177
179
|
*ent = arg->vwork[i+j*arg->ld];
|
178
180
|
}
|
179
181
|
void
|
180
|
-
km_copy_from_work(SMAT *smat, void *work,
|
182
|
+
km_copy_from_work(SMAT *smat, void *work, size_t ldw)
|
181
183
|
{
|
182
184
|
if ( smat->stype == ST_FULL && !(smat->trans) && ldw == smat->ld ) {
|
183
|
-
memcpy(smat->body, work, km_sizeof_vt(smat->vtype)*
|
185
|
+
memcpy(smat->body, work, km_sizeof_vt(smat->vtype)*LENGTH(smat));
|
184
186
|
} else {
|
185
187
|
struct km_c2w_arg data = {{work}, ldw};
|
186
188
|
if ( smat->vtype == VT_DOUBLE ) {
|
187
189
|
if ( smat->stype == ST_RSUB ) {
|
188
190
|
km_smat_each_with_index_d(smat, km_cfw_func_d, &data);
|
189
191
|
} else if ( smat->trans ) {
|
190
|
-
const int one=1;
|
191
|
-
for (
|
192
|
-
dcopy_(&
|
192
|
+
const int one=1, n=s2i(smat->n), ldw_i=s2i(ldw);
|
193
|
+
for ( size_t i=0; i<smat->m; i++ ) {
|
194
|
+
dcopy_(&n, data.dwork+i, &ldw_i, smat->dbody+(i*smat->ld), &one);
|
193
195
|
}
|
194
196
|
} else {
|
195
197
|
char str_a[] = "A";
|
196
|
-
|
198
|
+
int m=s2i(smat->m), n=s2i(smat->n), ld=s2i(smat->ld), ldw_i=s2i(ldw);
|
199
|
+
dlacpy_(str_a, &m, &n, work, &ldw_i, smat->dbody, &ld);
|
197
200
|
}
|
198
201
|
} else if ( smat->vtype == VT_COMPLEX ) {
|
199
202
|
if ( smat->stype == ST_RSUB ) {
|
200
203
|
km_smat_each_with_index_z(smat, km_cfw_func_z, &data);
|
201
204
|
} else if ( smat->trans ) {
|
202
|
-
const int one=1;
|
203
|
-
for (
|
204
|
-
zcopy_(&
|
205
|
+
const int one=1, n=s2i(smat->n), ldw_i=s2i(ldw);
|
206
|
+
for ( size_t i=0; i<smat->m; i++ ) {
|
207
|
+
zcopy_(&n, data.zwork+i, &ldw_i, smat->zbody+(i*smat->ld), &one);
|
205
208
|
}
|
206
209
|
} else {
|
207
210
|
char str_a[] = "A";
|
208
|
-
|
211
|
+
int m=s2i(smat->m), n=s2i(smat->n), ld=s2i(smat->ld), ldw_i=s2i(ldw);
|
212
|
+
zlacpy_(str_a, &m, &n, work, &ldw_i, smat->zbody, &ld);
|
209
213
|
}
|
210
214
|
} else if ( smat->vtype == VT_INT ) {
|
211
215
|
km_smat_each_with_index_i(smat, km_cfw_func_i, &data);
|
data/ext/kmat/main.c
CHANGED
@@ -24,7 +24,7 @@ VALUE rb_sMath;
|
|
24
24
|
static VALUE
|
25
25
|
kmm_obj_value(VALUE self)
|
26
26
|
{
|
27
|
-
return
|
27
|
+
return LONG2NUM((long)self);
|
28
28
|
}
|
29
29
|
|
30
30
|
#include "method_definitions.c"
|
@@ -66,8 +66,8 @@ VALUE
|
|
66
66
|
km_gc_escape(VALUE (*func)(), VALUE data)
|
67
67
|
{
|
68
68
|
int status;
|
69
|
-
VALUE old = rb_gc_disable();
|
70
|
-
VALUE ret = rb_protect(func, data, &status);
|
69
|
+
const VALUE old = rb_gc_disable();
|
70
|
+
const VALUE ret = rb_protect(func, data, &status);
|
71
71
|
if ( old == Qfalse ) {
|
72
72
|
rb_gc_enable();
|
73
73
|
}
|
@@ -85,7 +85,7 @@ VALUE
|
|
85
85
|
km_ensure(VALUE (* b_proc)(ANYARGS), VALUE data1, VALUE (* e_proc)(ANYARGS), VALUE data2)
|
86
86
|
{
|
87
87
|
int status;
|
88
|
-
VALUE ret = rb_protect(b_proc, data1, &status);
|
88
|
+
const VALUE ret = rb_protect(b_proc, data1, &status);
|
89
89
|
(*e_proc)(data2);
|
90
90
|
if ( status != 0 ) {
|
91
91
|
rb_jump_tag(status);
|