kmat 0.0.3 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +2 -0
- data/.rspec +1 -0
- data/CHANGELOG.md +10 -0
- data/README.md +11 -11
- data/ext/kmat/arith/binary.c +161 -139
- data/ext/kmat/arith/math.c +1 -1
- data/ext/kmat/arith/statistics.c +11 -11
- data/ext/kmat/arith/unary.c +6 -6
- data/ext/kmat/extconf.rb +3 -0
- data/ext/kmat/km_util.h +34 -13
- data/ext/kmat/kmat.h +3 -3
- data/ext/kmat/linalg/dla.c +185 -133
- data/ext/kmat/linalg/linalg.c +33 -17
- data/ext/kmat/linalg/norm.c +83 -69
- data/ext/kmat/linalg/vla.c +23 -23
- data/ext/kmat/linalg/working.c +42 -38
- data/ext/kmat/main.c +4 -4
- data/ext/kmat/smat/accessor.c +104 -104
- data/ext/kmat/smat/array.c +3 -3
- data/ext/kmat/smat/boxmuller.c +5 -5
- data/ext/kmat/smat/constructer.c +52 -52
- data/ext/kmat/smat/convert.c +21 -21
- data/ext/kmat/smat/elem.c +7 -7
- data/ext/kmat/smat/fund.c +37 -37
- data/ext/kmat/smat/share.c +28 -27
- data/ext/kmat/smat/smat.c +58 -42
- data/ext/kmat/smat/sort.c +148 -146
- data/kmat.gemspec +5 -4
- data/lib/kmat/accessor.rb +5 -5
- data/lib/kmat/linalg.rb +1 -2
- data/lib/kmat/random.rb +2 -2
- data/lib/kmat/version.rb +1 -1
- data/lib/kmat.rb +9 -9
- metadata +25 -10
data/ext/kmat/smat/fund.c
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
// make a (m, n)-matrix with value type vt
|
4
4
|
VALUE
|
5
|
-
km_Mat(
|
5
|
+
km_Mat(size_t m, size_t n, VTYPE vt)
|
6
6
|
{
|
7
7
|
VALUE ret = km_Mat_alloc(km_cMat);
|
8
8
|
SMAT *smat = km_mat2smat(ret);
|
@@ -16,9 +16,9 @@ km_Mat(int m, int n, VTYPE vt)
|
|
16
16
|
// otherwise, the return is filled by 0
|
17
17
|
// if a block given, the (i, j)-th element is a return of yield(i, j)
|
18
18
|
#define DEFINE_INIT_LOOP_FUNC(id, type, func) static void \
|
19
|
-
km_initialize_loop_func_##id(type *elm,
|
19
|
+
km_initialize_loop_func_##id(type *elm, size_t i, size_t j, void *null) \
|
20
20
|
{\
|
21
|
-
*elm = func( rb_yield( rb_ary_new3(2,
|
21
|
+
*elm = func( rb_yield( rb_ary_new3(2, ZU2NUM(i), ZU2NUM(j)) ) ); \
|
22
22
|
}
|
23
23
|
DEFINE_INIT_LOOP_FUNC(d, double, NUM2DBL)
|
24
24
|
DEFINE_INIT_LOOP_FUNC(z, COMPLEX, km_v2c)
|
@@ -34,7 +34,7 @@ static VALUE
|
|
34
34
|
km_initialize_loop(VALUE varg)
|
35
35
|
{
|
36
36
|
struct km_initialize_loop_arg *arg = (struct km_initialize_loop_arg *)varg;
|
37
|
-
km_smat_alloc_body(arg->smat,
|
37
|
+
km_smat_alloc_body(arg->smat, NUM2ZU(arg->argv[0]), NUM2ZU(arg->argv[1]), arg->vt);
|
38
38
|
if ( rb_block_given_p() ) {
|
39
39
|
VT_SWITCH( arg->vt,
|
40
40
|
km_smat_each_with_index_d(arg->smat, km_initialize_loop_func_d, NULL);,
|
@@ -45,8 +45,8 @@ km_initialize_loop(VALUE varg)
|
|
45
45
|
);
|
46
46
|
} else {
|
47
47
|
if ( arg->vt == VT_VALUE ) {
|
48
|
-
for (
|
49
|
-
for (
|
48
|
+
for ( size_t i=0; i<arg->smat->m; i++ ) {
|
49
|
+
for ( size_t j=0; j<arg->smat->n; j++ ) {
|
50
50
|
arg->smat->vbody[i+j*arg->smat->ld] = Qnil;
|
51
51
|
}
|
52
52
|
}
|
@@ -104,15 +104,15 @@ kmm_mat_reshape(VALUE self, VALUE vm, VALUE vn)
|
|
104
104
|
if ( smat->stype == ST_SSUB ) {
|
105
105
|
rb_raise(km_eShare, "can't reshape submatrix. try detach before reshaping");
|
106
106
|
}
|
107
|
-
|
108
|
-
|
107
|
+
const size_t m = NUM2ZU(vm), n = NUM2ZU(vn);
|
108
|
+
km_check_size2(m, n);
|
109
109
|
if ( m*n == LENGTH(smat) ) {
|
110
110
|
smat->trans = false;
|
111
111
|
smat->m = m; smat->n = n;
|
112
112
|
smat->ld = m;
|
113
113
|
return self;
|
114
114
|
} else {
|
115
|
-
rb_raise(km_eDim, "the length of the matrix must no be changed, (%
|
115
|
+
rb_raise(km_eDim, "the length of the matrix must no be changed, (%zu, %zu) -> (%zu, %zu) is unavailable",
|
116
116
|
smat->m, smat->n, m, n);
|
117
117
|
}
|
118
118
|
}
|
@@ -129,7 +129,7 @@ kmm_mat_resize(VALUE self, VALUE vm, VALUE vn)
|
|
129
129
|
} else if ( kmm_mat_have_submatrix_p(self) ) {
|
130
130
|
rb_raise(km_eShare, "can't resize supermatrix. try detach before reshaping");
|
131
131
|
}
|
132
|
-
|
132
|
+
const size_t m = NUM2ZU(vm), n = NUM2ZU(vn);
|
133
133
|
if ( m*n == LENGTH(smat) ) {
|
134
134
|
return kmm_mat_reshape(self, vm, vn);
|
135
135
|
} else {
|
@@ -146,7 +146,7 @@ kmm_mat_transpose_dest(VALUE self)
|
|
146
146
|
km_check_frozen(self);
|
147
147
|
SMAT *smat = km_mat2smat(self);
|
148
148
|
smat->trans = !(smat->trans);
|
149
|
-
SWAP(
|
149
|
+
SWAP(size_t, smat->m, smat->n);
|
150
150
|
return self;
|
151
151
|
}
|
152
152
|
|
@@ -200,7 +200,7 @@ kmm_mat_zero(VALUE self)
|
|
200
200
|
type one; \
|
201
201
|
}; \
|
202
202
|
static void \
|
203
|
-
km_eye_func_##id(type *elm,
|
203
|
+
km_eye_func_##id(type *elm, size_t i, size_t j, void *data_) \
|
204
204
|
{\
|
205
205
|
struct km_eye_##id *data = (struct km_eye_##id *)data_; \
|
206
206
|
if ( i == j ) { \
|
@@ -293,11 +293,11 @@ kmm_mat__randn0(VALUE self, VALUE random)
|
|
293
293
|
km_fill_normal(LENGTH(smat), smat->dbody, random);
|
294
294
|
} else if ( smat->stype == ST_SSUB ) {
|
295
295
|
if ( smat->trans ) {
|
296
|
-
for (
|
296
|
+
for ( size_t i=0; i<smat->m; i++ ) {
|
297
297
|
km_fill_normal(smat->n, smat->dbody+i*smat->ld, random);
|
298
298
|
}
|
299
299
|
} else {
|
300
|
-
for (
|
300
|
+
for ( size_t i=0; i<smat->n; i++ ) {
|
301
301
|
km_fill_normal(smat->m, smat->dbody+i*smat->ld, random);
|
302
302
|
}
|
303
303
|
}
|
@@ -341,9 +341,9 @@ kmm_mat_each(VALUE self)
|
|
341
341
|
|
342
342
|
// invoke yield(element, i, j) for all the elements
|
343
343
|
#define DEFINE_EACH_WI2_FUNC(id, type, func) static void \
|
344
|
-
km_each_with_index2_func_##id(type *elm,
|
344
|
+
km_each_with_index2_func_##id(type *elm, size_t i, size_t j, void *null) \
|
345
345
|
{ \
|
346
|
-
rb_yield(rb_ary_new3(3, func(*elm),
|
346
|
+
rb_yield(rb_ary_new3(3, func(*elm), ZU2NUM(i), ZU2NUM(j))); \
|
347
347
|
}
|
348
348
|
DEFINE_EACH_WI2_FUNC(d, double, rb_float_new)
|
349
349
|
DEFINE_EACH_WI2_FUNC(z, COMPLEX, km_c2v)
|
@@ -401,9 +401,9 @@ kmm_mat_mmap_dest(VALUE self)
|
|
401
401
|
|
402
402
|
// invoke yield(element, i, j) and replace the element by the return for all the elements
|
403
403
|
#define DEFINE_MMAP_WI2_FUNC(id, type, func, func2) static void \
|
404
|
-
km_mmap_with_index2_func_##id(type *elm,
|
404
|
+
km_mmap_with_index2_func_##id(type *elm, size_t i, size_t j, void *null) \
|
405
405
|
{ \
|
406
|
-
*elm = func2(rb_yield(rb_ary_new3(3, func(*elm),
|
406
|
+
*elm = func2(rb_yield(rb_ary_new3(3, func(*elm), ZU2NUM(i), ZU2NUM(j)))); \
|
407
407
|
}
|
408
408
|
DEFINE_MMAP_WI2_FUNC(d, double, rb_float_new, NUM2DBL)
|
409
409
|
DEFINE_MMAP_WI2_FUNC(z, COMPLEX, km_c2v, km_v2c)
|
@@ -430,9 +430,9 @@ kmm_mat_mmap_with_index2_dest(VALUE self)
|
|
430
430
|
}
|
431
431
|
|
432
432
|
// make a row-major Array of Arrays (Mat#to_a is Enumerable#to_a and it is not the same as this)
|
433
|
-
#define TA_LOOP(id, func) for (
|
434
|
-
VALUE row = rb_ary_new2(smat->n); \
|
435
|
-
for (
|
433
|
+
#define TA_LOOP(id, func) for ( size_t i=0; i<smat->m; i++ ) { \
|
434
|
+
VALUE row = rb_ary_new2(s2l(smat->n)); \
|
435
|
+
for ( size_t j=0; j<smat->n; j++ ) { \
|
436
436
|
rb_ary_push(row, func(smat->id##body[INDEX(smat, i, j)])); \
|
437
437
|
} \
|
438
438
|
rb_ary_push(ret, row); \
|
@@ -441,7 +441,7 @@ VALUE
|
|
441
441
|
kmm_mat_to_ary(VALUE self)
|
442
442
|
{
|
443
443
|
SMAT *smat = km_mat2smat(self);
|
444
|
-
VALUE ret = rb_ary_new2(smat->m);
|
444
|
+
VALUE ret = rb_ary_new2(s2l(smat->m));
|
445
445
|
VT_SWITCH( smat->vtype,
|
446
446
|
TA_LOOP(d, rb_float_new),
|
447
447
|
TA_LOOP(z, km_c2v),
|
@@ -453,9 +453,9 @@ kmm_mat_to_ary(VALUE self)
|
|
453
453
|
}
|
454
454
|
|
455
455
|
// make a String
|
456
|
-
#define TS_LOOP(tid, func) for (
|
457
|
-
VALUE row = rb_ary_new2(smat->n); \
|
458
|
-
for (
|
456
|
+
#define TS_LOOP(tid, func) for ( size_t i=0; i<smat->m; i++ ) { \
|
457
|
+
VALUE row = rb_ary_new2(s2l(smat->n)); \
|
458
|
+
for ( size_t j=0; j<smat->n; j++ ) { \
|
459
459
|
rb_ary_push(row, rb_funcall(func(ENTITY(smat, tid, i, j)), id, 0)); \
|
460
460
|
} \
|
461
461
|
rb_str_cat2(ret, " ["); \
|
@@ -481,9 +481,9 @@ km_smat_to_s(const SMAT *smat, ID id, VALUE vtsym, VALUE stsym)
|
|
481
481
|
rb_str_cat2(ret, ")");
|
482
482
|
return ret;
|
483
483
|
}
|
484
|
-
#define TSP_LOOP(tid, func) for (
|
485
|
-
VALUE row = rb_ary_new2(smat->n); \
|
486
|
-
for (
|
484
|
+
#define TSP_LOOP(tid, func) for ( size_t i=0; i<smat->m; i++ ) { \
|
485
|
+
VALUE row = rb_ary_new2(s2l(smat->n)); \
|
486
|
+
for ( size_t j=0; j<smat->n; j++ ) { \
|
487
487
|
rb_ary_push(row, rb_funcall(format, id_op_percent, 1, func(ENTITY(smat, tid, i, j)))); \
|
488
488
|
} \
|
489
489
|
rb_str_cat2(ret, " ["); \
|
@@ -536,15 +536,15 @@ kmm_mat_symmetrize_dest(VALUE self)
|
|
536
536
|
rb_raise(km_eDim, "non-square matrix cannot be symmetrized");
|
537
537
|
}
|
538
538
|
if ( smat->vtype == VT_DOUBLE ) {
|
539
|
-
for (
|
540
|
-
for (
|
539
|
+
for ( size_t i=0; i<smat->m-1; i++ ) {
|
540
|
+
for ( size_t j=i+1; j<smat->m; j++ ) {
|
541
541
|
smat->dbody[i+j*smat->ld] = ( smat->dbody[i+j*smat->ld] + smat->dbody[j+i*smat->ld] ) * 0.5;
|
542
542
|
smat->dbody[j+i*smat->ld] = smat->dbody[i+j*smat->ld];
|
543
543
|
}
|
544
544
|
}
|
545
545
|
} else if ( smat->vtype == VT_COMPLEX ) {
|
546
|
-
for (
|
547
|
-
for (
|
546
|
+
for ( size_t i=0; i<smat->m-1; i++ ) {
|
547
|
+
for ( size_t j=i+1; j<smat->m; j++ ) {
|
548
548
|
smat->zbody[i+j*smat->ld] = ( smat->zbody[i+j*smat->ld] + smat->zbody[j+i*smat->ld] ) * 0.5;
|
549
549
|
smat->zbody[j+i*smat->ld] = smat->zbody[i+j*smat->ld];
|
550
550
|
}
|
@@ -587,8 +587,8 @@ km_near_v(VALUE a, VALUE b)
|
|
587
587
|
return RTEST(rb_funcall(a, id_op_eq, 1, b));
|
588
588
|
}
|
589
589
|
}
|
590
|
-
#define SYM_BODY(id, func) for (
|
591
|
-
for (
|
590
|
+
#define SYM_BODY(id, func) for ( size_t i=0; i<(smat->n)-1; i++ ) { \
|
591
|
+
for ( size_t j=i+1; j<(smat->n); j++ ) { \
|
592
592
|
if ( !func(smat->id##body[i+j*(smat->ld)], smat->id##body[j+i*(smat->ld)]) ) { \
|
593
593
|
return Qfalse; \
|
594
594
|
} \
|
@@ -622,7 +622,7 @@ kmm_mat_symmetry_p(int argc, VALUE *argv, VALUE self)
|
|
622
622
|
|
623
623
|
// judge whether `self' is near to `other'
|
624
624
|
// consider a is near to b iff |a-b| < tol
|
625
|
-
// the default value of tol is (
|
625
|
+
// the default value of tol is (maximum absolute value of each element) * max(m, n) * eps
|
626
626
|
static bool
|
627
627
|
km_near_abs_d(double a, double b, double tol)
|
628
628
|
{
|
@@ -687,7 +687,7 @@ kmm_mat_near_p(int argc, VALUE *argv, VALUE self)
|
|
687
687
|
struct km_near_arg data = {true, 0.0};
|
688
688
|
if ( argc == 1 ) {
|
689
689
|
double ns = NUM2DBL(kmm_mat_norm_einf(self)), no = NUM2DBL(kmm_mat_norm_einf(argv[0]));
|
690
|
-
data.tol = MAX(ns, no) * MAX(ss->m, ss->n) * DBL_EPSILON;
|
690
|
+
data.tol = MAX(ns, no) * (double)MAX(ss->m, ss->n) * DBL_EPSILON;
|
691
691
|
} else {
|
692
692
|
data.tol = NUM2DBL(argv[1]);
|
693
693
|
}
|
@@ -698,5 +698,5 @@ kmm_mat_near_p(int argc, VALUE *argv, VALUE self)
|
|
698
698
|
km_smat_each2_b(ss, so, km_near_func_b, (void *)&data);,
|
699
699
|
km_smat_each2_v(ss, so, km_near_func_v, (void *)&data);
|
700
700
|
);
|
701
|
-
return data.ret;
|
701
|
+
return TF2V(data.ret);
|
702
702
|
}
|
data/ext/kmat/smat/share.c
CHANGED
@@ -18,6 +18,7 @@ km_mat_deep_freeze0(VALUE self)
|
|
18
18
|
if ( smat->vtype == VT_VALUE ) {
|
19
19
|
km_smat_each_v(smat, km_mat_deep_freeze_func, NULL);
|
20
20
|
}
|
21
|
+
rb_obj_freeze(self);
|
21
22
|
}
|
22
23
|
VALUE
|
23
24
|
kmm_mat_deep_freeze(VALUE self)
|
@@ -85,7 +86,7 @@ bool
|
|
85
86
|
km_smat_have_submatrix_p(SMAT *smat)
|
86
87
|
{
|
87
88
|
if ( smat->may_have_sub ) {
|
88
|
-
VALUE v = km_smat_find_value(smat);
|
89
|
+
const VALUE v = km_smat_find_value(smat);
|
89
90
|
if ( v == Qnil ) {
|
90
91
|
rb_raise(km_eInternal, "km_smat_have_submatrix_p has been called with out-of-ruby-controlled Mat struct");
|
91
92
|
} else {
|
@@ -177,7 +178,7 @@ VALUE
|
|
177
178
|
kmm_mat_detach(int argc, VALUE *argv, VALUE self)
|
178
179
|
{
|
179
180
|
rb_check_arity(argc, 0, 1);
|
180
|
-
bool check_other = ( ( argc == 1 ) && RTEST(argv[0]) );
|
181
|
+
const bool check_other = ( ( argc == 1 ) && RTEST(argv[0]) );
|
181
182
|
SMAT *smat = km_mat2smat(self);
|
182
183
|
if ( smat->stype == ST_FULL ) {
|
183
184
|
VALUE list = kmm_mat_submatricies(self);
|
@@ -191,7 +192,7 @@ kmm_mat_detach(int argc, VALUE *argv, VALUE self)
|
|
191
192
|
smat->may_have_sub = false;
|
192
193
|
return self;
|
193
194
|
} else {
|
194
|
-
VALUE old_parent = smat->parent;
|
195
|
+
const VALUE old_parent = smat->parent;
|
195
196
|
kmm_mat_replace(self, rb_obj_dup(self));
|
196
197
|
if ( check_other ) {
|
197
198
|
kmm_mat_submatricies(old_parent);
|
@@ -204,7 +205,7 @@ kmm_mat_detach(int argc, VALUE *argv, VALUE self)
|
|
204
205
|
// if `self' is a submatrix of a supermatrix, detach from the supermatrix
|
205
206
|
// in any case, `self' is replaced by (0, 0)-matrix, and return `self'
|
206
207
|
static VALUE
|
207
|
-
km_kill_func(
|
208
|
+
km_kill_func(RB_BLOCK_CALL_FUNC_ARGLIST(elm, nil))
|
208
209
|
{
|
209
210
|
SMAT *ssub = km_mat2smat(elm);
|
210
211
|
if ( ssub->stype != ST_SSUB ) {
|
@@ -223,7 +224,7 @@ kmm_mat__kill(VALUE self)
|
|
223
224
|
SMAT *smat = km_mat2smat(self);
|
224
225
|
if ( smat->stype == ST_FULL ) {
|
225
226
|
VALUE list = kmm_mat_submatricies(self);
|
226
|
-
|
227
|
+
rb_block_call(list, id_each, 0, (VALUE *)0, km_kill_func, Qnil);
|
227
228
|
}
|
228
229
|
if ( smat->stype != ST_SSUB ) {
|
229
230
|
ruby_xfree(smat->body);
|
@@ -277,9 +278,9 @@ kmm_mat_replace(VALUE self, VALUE val)
|
|
277
278
|
if ( dest->stype != ST_SSUB ) {
|
278
279
|
ruby_xfree(dest->body);
|
279
280
|
}
|
280
|
-
dest->body = ruby_xcalloc(
|
281
|
+
dest->body = ruby_xcalloc(LENGTH(src), sizeof(void*));
|
281
282
|
}
|
282
|
-
memcpy(dest->body, src->body, sizeof(void*)*
|
283
|
+
memcpy(dest->body, src->body, sizeof(void*)*LENGTH(src));
|
283
284
|
dest->ld = src->ld; dest->m = src->m; dest->n = src->n;
|
284
285
|
dest->vtype = src->vtype; dest->stype = ST_RSUB;
|
285
286
|
dest->trans = src->trans;
|
@@ -291,11 +292,11 @@ kmm_mat_replace(VALUE self, VALUE val)
|
|
291
292
|
}
|
292
293
|
|
293
294
|
VALUE
|
294
|
-
km_Mat_ssub(
|
295
|
+
km_Mat_ssub(size_t i, size_t j, size_t m, size_t n, VALUE super)
|
295
296
|
{
|
296
297
|
SMAT *ssup = km_mat2smat(super);
|
297
298
|
if ( ssup->m < i+m || ssup->n < j+n ) {
|
298
|
-
rb_raise(rb_eIndexError, "given index+size (%
|
299
|
+
rb_raise(rb_eIndexError, "given index+size (%zu+%zu, %zu+%zu) is out of range (%zu, %zu)", i, m, j, n, ssup->m, ssup->n);
|
299
300
|
}
|
300
301
|
SMAT *sret;
|
301
302
|
if ( ssup->stype == ST_FULL ) {
|
@@ -318,9 +319,9 @@ km_Mat_ssub(int i, int j, int m, int n, VALUE super)
|
|
318
319
|
);
|
319
320
|
sret->parent = ssup->parent;
|
320
321
|
} else if ( ssup->stype == ST_RSUB ) {
|
321
|
-
int is[m]
|
322
|
-
for (
|
323
|
-
for (
|
322
|
+
int is[m], js[n];
|
323
|
+
for ( size_t k=0; k<m; k++ ) { is[k] = s2i(i+k); }
|
324
|
+
for ( size_t k=0; k<n; k++ ) { js[k] = s2i(j+k); }
|
324
325
|
return km_Mat_rsub1(m, n, is, js, super);
|
325
326
|
} else {
|
326
327
|
rb_raise(km_eInternal, "unknown storage type");
|
@@ -332,24 +333,24 @@ km_Mat_ssub(int i, int j, int m, int n, VALUE super)
|
|
332
333
|
}
|
333
334
|
|
334
335
|
static void
|
335
|
-
km_rsub_check_range(SMAT *ssup,
|
336
|
+
km_rsub_check_range(SMAT *ssup, size_t ii, size_t jj)
|
336
337
|
{
|
337
|
-
if (
|
338
|
-
rb_raise(rb_eIndexError, "given index (%
|
338
|
+
if ( ssup->m <= ii || ssup->n <= jj ) {
|
339
|
+
rb_raise(rb_eIndexError, "given index (%zu, %zu) is out of range (%zu, %zu)", ii, jj, ssup->m, ssup->n);
|
339
340
|
}
|
340
341
|
}
|
341
|
-
#define RSUB1_LOOPr(id) for (
|
342
|
-
km_rsub_check_range(ssup, is[i], js[j]); \
|
343
|
-
sret->id##pbody[i+j*m] = ssup->id##pbody[INDEX(ssup, is[i], js[j])]; \
|
342
|
+
#define RSUB1_LOOPr(id) for ( size_t i=0; i<m; i++ ) { for ( size_t j=0; j<n; j++ ) { \
|
343
|
+
km_rsub_check_range(ssup, i2s(is[i]), i2s(js[j])); \
|
344
|
+
sret->id##pbody[i+j*m] = ssup->id##pbody[INDEX(ssup, i2s(is[i]), i2s(js[j]))]; \
|
344
345
|
} }
|
345
|
-
#define RSUB1_LOOP(id) for (
|
346
|
-
km_rsub_check_range(ssup, is[i], js[j]); \
|
347
|
-
sret->id##pbody[i+j*m] = ssup->id##body + INDEX(ssup, is[i], js[j]); \
|
346
|
+
#define RSUB1_LOOP(id) for ( size_t i=0; i<m; i++ ) { for ( size_t j=0; j<n; j++ ) { \
|
347
|
+
km_rsub_check_range(ssup, i2s(is[i]), i2s(js[j])); \
|
348
|
+
sret->id##pbody[i+j*m] = ssup->id##body + INDEX(ssup, i2s(is[i]), i2s(js[j])); \
|
348
349
|
} }
|
349
350
|
VALUE
|
350
|
-
km_Mat_rsub1(
|
351
|
+
km_Mat_rsub1(size_t m, size_t n, const int *is, const int *js, VALUE super)
|
351
352
|
{
|
352
|
-
|
353
|
+
km_check_size2(m, n);
|
353
354
|
VALUE ret = km_Mat_alloc(km_cMat);
|
354
355
|
SMAT *ssup = km_mat2smat(super);
|
355
356
|
SMAT *sret = km_mat2smat(ret);
|
@@ -382,18 +383,18 @@ km_Mat_rsub1(int m, int n, int *is, int *js, VALUE super)
|
|
382
383
|
return ret;
|
383
384
|
}
|
384
385
|
|
385
|
-
#define RSUB2_LOOPr(id) for(
|
386
|
+
#define RSUB2_LOOPr(id) for( size_t i=0; i<m; i++ ) { for ( size_t j=0; j<n; j++ ) { \
|
386
387
|
km_rsub_check_range(ssup, is[i+j*m], js[i+j*m]); \
|
387
388
|
sret->id##pbody[i+j*m] = ssup->id##pbody[INDEX(ssup, is[i+j*m], js[i+j*m])]; \
|
388
389
|
} }
|
389
|
-
#define RSUB2_LOOP(id) for(
|
390
|
+
#define RSUB2_LOOP(id) for( size_t i=0; i<m; i++ ) { for ( size_t j=0; j<n; j++ ) { \
|
390
391
|
km_rsub_check_range(ssup, is[i+j*m], js[i+j*m]); \
|
391
392
|
sret->id##pbody[i+j*m] = ssup->id##body+INDEX(ssup, is[i+j*m], js[i+j*m]); \
|
392
393
|
} }
|
393
394
|
VALUE
|
394
|
-
km_Mat_rsub2(
|
395
|
+
km_Mat_rsub2(size_t m, size_t n, const size_t *is, const size_t *js, VALUE super)
|
395
396
|
{
|
396
|
-
|
397
|
+
km_check_size2(m, n);
|
397
398
|
VALUE ret = km_Mat_alloc(km_cMat);
|
398
399
|
SMAT *ssup = km_mat2smat(super);
|
399
400
|
SMAT *sret = km_mat2smat(ret);
|
data/ext/kmat/smat/smat.c
CHANGED
@@ -35,9 +35,9 @@ km_smat_size(const void *_data)
|
|
35
35
|
size_t ret = sizeof(SMAT);
|
36
36
|
if ( data->body != NULL ) {
|
37
37
|
if ( data->stype == ST_FULL ) {
|
38
|
-
ret += km_sizeof_vt(data->vtype)*
|
38
|
+
ret += km_sizeof_vt(data->vtype)*(data->m*data->n);
|
39
39
|
} else if ( data->stype == ST_RSUB ) {
|
40
|
-
ret += sizeof(void *)*
|
40
|
+
ret += sizeof(void *)*(data->m*data->n);
|
41
41
|
}
|
42
42
|
}
|
43
43
|
return ret;
|
@@ -46,6 +46,22 @@ km_smat_size(const void *_data)
|
|
46
46
|
}
|
47
47
|
}
|
48
48
|
|
49
|
+
static void
|
50
|
+
km_rb_gc_location_wrap(VALUE *obj, void *null)
|
51
|
+
{
|
52
|
+
(*obj) = rb_gc_location(*obj);
|
53
|
+
}
|
54
|
+
static void
|
55
|
+
km_smat_compact(void *_data)
|
56
|
+
{
|
57
|
+
SMAT *data = (SMAT *)_data;
|
58
|
+
data->parent = rb_gc_location(data->parent);
|
59
|
+
if ( data->vtype == VT_VALUE && data->body != NULL ) {
|
60
|
+
km_smat_each_v(data, km_rb_gc_location_wrap, NULL);
|
61
|
+
}
|
62
|
+
}
|
63
|
+
|
64
|
+
|
49
65
|
// functions for allocation
|
50
66
|
const rb_data_type_t km_mat_data_type = {
|
51
67
|
"kmat-Mat",
|
@@ -53,7 +69,8 @@ const rb_data_type_t km_mat_data_type = {
|
|
53
69
|
km_smat_mark,
|
54
70
|
km_smat_free,
|
55
71
|
km_smat_size,
|
56
|
-
|
72
|
+
km_smat_compact,
|
73
|
+
{ (void *)0 }
|
57
74
|
},
|
58
75
|
(rb_data_type_t *)0,
|
59
76
|
(void *)0,
|
@@ -68,9 +85,9 @@ km_Mat_alloc(VALUE klass)
|
|
68
85
|
// calloc a side of SMAT
|
69
86
|
// return->body is the argument `body'
|
70
87
|
SMAT *
|
71
|
-
km_smat_alloc_with(
|
88
|
+
km_smat_alloc_with(size_t m, size_t n, VTYPE vt, void *body)
|
72
89
|
{
|
73
|
-
|
90
|
+
km_check_size2(m, n);
|
74
91
|
SMAT *data = ZALLOC(SMAT);
|
75
92
|
data->body = body; data->ld = data->m = m; data->n = n;
|
76
93
|
data->vtype = vt; data->stype = ST_FULL; data->trans = false;
|
@@ -79,19 +96,19 @@ km_smat_alloc_with(int m, int n, VTYPE vt, void *body)
|
|
79
96
|
}
|
80
97
|
|
81
98
|
SMAT *
|
82
|
-
km_smat_alloc(
|
99
|
+
km_smat_alloc(size_t m, size_t n, VTYPE vt)
|
83
100
|
{
|
84
|
-
|
85
|
-
void *body = ruby_xcalloc(
|
101
|
+
km_check_size2(m, n);
|
102
|
+
void *body = ruby_xcalloc(n*m, km_sizeof_vt(vt));
|
86
103
|
return km_smat_alloc_with(m, n, vt, body);
|
87
104
|
}
|
88
105
|
|
89
106
|
// calloc a SMAT of (m, n)-matrix
|
90
107
|
// return->body will be calloc-ed
|
91
108
|
void
|
92
|
-
km_smat_alloc_body(SMAT *data,
|
109
|
+
km_smat_alloc_body(SMAT *data, size_t m, size_t n, VTYPE vt)
|
93
110
|
{
|
94
|
-
|
111
|
+
km_check_size2(m, n);
|
95
112
|
if ( data->stype != ST_FULL ) {
|
96
113
|
rb_raise(km_eShare, "can't re-alloc submatrix body");
|
97
114
|
} else if ( km_smat_have_submatrix_p(data) ) {
|
@@ -99,12 +116,12 @@ km_smat_alloc_body(SMAT *data, int m, int n, VTYPE vt)
|
|
99
116
|
}
|
100
117
|
data->ld = data->m = m; data->n = n; data->vtype = vt; data->stype = ST_FULL;
|
101
118
|
ruby_xfree(data->body);
|
102
|
-
data->body = ruby_xcalloc(
|
119
|
+
data->body = ruby_xcalloc(n*m, km_sizeof_vt(vt));
|
103
120
|
}
|
104
121
|
void
|
105
|
-
km_smat_alloc_pbody(SMAT *data,
|
122
|
+
km_smat_alloc_pbody(SMAT *data, size_t m, size_t n, VTYPE vt)
|
106
123
|
{
|
107
|
-
|
124
|
+
km_check_size2(m, n);
|
108
125
|
if ( data->stype != ST_FULL ) {
|
109
126
|
rb_raise(km_eShare, "can't re-alloc submatrix body");
|
110
127
|
} else if ( km_smat_have_submatrix_p(data) ) {
|
@@ -112,7 +129,7 @@ km_smat_alloc_pbody(SMAT *data, int m, int n, VTYPE vt)
|
|
112
129
|
}
|
113
130
|
data->ld = data->m = m; data->n = n; data->vtype = vt; data->stype = ST_RSUB;
|
114
131
|
ruby_xfree(data->body);
|
115
|
-
data->body = ruby_xcalloc(
|
132
|
+
data->body = ruby_xcalloc(n*m, sizeof(void*));
|
116
133
|
}
|
117
134
|
|
118
135
|
|
@@ -152,7 +169,7 @@ km_smat_copy(SMAT *dest, const SMAT *src)
|
|
152
169
|
rb_raise(km_eShare, "can't copy to value-type mismatched or dimension mismatched submatrix");
|
153
170
|
}
|
154
171
|
ruby_xfree(dest->body);
|
155
|
-
dest->body = ruby_xcalloc(
|
172
|
+
dest->body = ruby_xcalloc(LENGTH(src), km_sizeof_vt(src->vtype));
|
156
173
|
dest->ld = src->m; dest->vtype = src->vtype;
|
157
174
|
} else if ( dest->m != src->m ) { // need not to resize but reshape is needed
|
158
175
|
if ( dest->stype != ST_FULL ) { // the destination must not be a submatrix
|
@@ -163,7 +180,7 @@ km_smat_copy(SMAT *dest, const SMAT *src)
|
|
163
180
|
dest->m = src->m; dest->n = src->n;
|
164
181
|
if ( dest->stype==ST_FULL && src->stype==ST_FULL ) {
|
165
182
|
dest->ld = src->ld; dest->trans = src->trans;
|
166
|
-
memcpy(dest->body, src->body, km_sizeof_vt(src->vtype)*
|
183
|
+
memcpy(dest->body, src->body, km_sizeof_vt(src->vtype)*LENGTH(src));
|
167
184
|
} else {
|
168
185
|
VT_SWITCH( dest->vtype,
|
169
186
|
km_smat_each2_dcd(dest, src, km_smat_copy_d, NULL);,
|
@@ -189,15 +206,15 @@ kmm_mat_marshal_dump(VALUE self)
|
|
189
206
|
if ( smat->stype != ST_FULL ) {
|
190
207
|
smat = km_mat2smat(rb_obj_dup(self));
|
191
208
|
}
|
192
|
-
VALUE headder = rb_str_new((char *)&(smat->vtype), sizeof(VTYPE));
|
209
|
+
VALUE headder = rb_str_new((char *)&(smat->vtype), s2l(sizeof(VTYPE)));
|
193
210
|
VALUE body;
|
194
211
|
if ( smat->vtype == VT_VALUE ) {
|
195
212
|
body = kmm_mat_to_ary(self);
|
196
213
|
} else {
|
197
|
-
rb_str_cat(headder, (char *)&(smat->m), sizeof(
|
198
|
-
rb_str_cat(headder, (char *)&(smat->n), sizeof(
|
214
|
+
rb_str_cat(headder, (char *)&(smat->m), sizeof(size_t));
|
215
|
+
rb_str_cat(headder, (char *)&(smat->n), sizeof(size_t));
|
199
216
|
rb_str_cat(headder, (char *)&(smat->trans), sizeof(bool));
|
200
|
-
body = rb_str_new((char *)(smat->body), (
|
217
|
+
body = rb_str_new((char *)(smat->body), s2l(km_sizeof_vt(smat->vtype)*LENGTH(smat)));
|
201
218
|
}
|
202
219
|
return rb_ary_new3(2, headder, body);
|
203
220
|
}
|
@@ -212,13 +229,13 @@ kmm_mat_marshal_load(VALUE self, VALUE dump)
|
|
212
229
|
km_smat_copy(smat, km_mat2smat(kmm_ary_to_omat(body)));
|
213
230
|
} else {
|
214
231
|
hptr += sizeof(VTYPE);
|
215
|
-
|
216
|
-
memcpy(&m, hptr, sizeof(
|
217
|
-
memcpy(&n, hptr, sizeof(
|
232
|
+
size_t m, n; bool t;
|
233
|
+
memcpy(&m, hptr, sizeof(size_t)); hptr += sizeof(size_t);
|
234
|
+
memcpy(&n, hptr, sizeof(size_t)); hptr += sizeof(size_t);
|
218
235
|
memcpy(&t, hptr, sizeof(bool)); hptr += sizeof(bool);
|
219
236
|
km_smat_alloc_body(smat, m, n, smat->vtype);
|
220
237
|
smat->trans = t;
|
221
|
-
size_t sb = km_sizeof_vt(smat->vtype)*
|
238
|
+
size_t sb = km_sizeof_vt(smat->vtype)*LENGTH(smat);
|
222
239
|
if ( RSTRING_LEN(body) != (long)sb ) {
|
223
240
|
rb_raise(rb_eArgError, "wrong object given");
|
224
241
|
}
|
@@ -233,21 +250,21 @@ km_smat_each_##id(SMAT *smat, void (*func)(type *, void *), void *data) \
|
|
233
250
|
{ \
|
234
251
|
if ( smat->stype == ST_RSUB ) { \
|
235
252
|
if ( smat->trans ) { \
|
236
|
-
for (
|
253
|
+
for ( size_t i=0; i<smat->m; i++ ) { for ( size_t j=0; j<smat->n; j++ ) { \
|
237
254
|
func(smat->id##pbody[j+i*(smat->ld)], data); \
|
238
255
|
} } \
|
239
256
|
} else { \
|
240
|
-
for (
|
257
|
+
for ( size_t i=0; i<smat->m; i++ ) { for ( size_t j=0; j<smat->n; j++ ) { \
|
241
258
|
func(smat->id##pbody[i+j*(smat->ld)], data); \
|
242
259
|
} } \
|
243
260
|
} \
|
244
261
|
} else { \
|
245
262
|
if ( smat->trans ) { \
|
246
|
-
for (
|
263
|
+
for ( size_t i=0; i<smat->m; i++ ) { for ( size_t j=0; j<smat->n; j++ ) { \
|
247
264
|
func(&(smat->id##body[j+i*(smat->ld)]), data); \
|
248
265
|
} } \
|
249
266
|
} else { \
|
250
|
-
for (
|
267
|
+
for ( size_t i=0; i<smat->m; i++ ) { for ( size_t j=0; j<smat->n; j++ ) { \
|
251
268
|
func(&(smat->id##body[i+j*(smat->ld)]), data); \
|
252
269
|
} } \
|
253
270
|
} \
|
@@ -261,25 +278,25 @@ DEFINE_KM_SMAT_EACH_ID(v, VALUE)
|
|
261
278
|
|
262
279
|
// call `func'(&element, `data', i, j) for each elements of `smat'
|
263
280
|
#define DEFINE_KM_SMAT_EACH_WI_ID(id, type) void \
|
264
|
-
km_smat_each_with_index_##id(SMAT *smat, void (*func)(type *,
|
281
|
+
km_smat_each_with_index_##id(SMAT *smat, void (*func)(type *, size_t, size_t, void *), void *data) \
|
265
282
|
{ \
|
266
283
|
if ( smat->stype == ST_RSUB ) { \
|
267
284
|
if ( smat->trans ) { \
|
268
|
-
for (
|
285
|
+
for ( size_t i=0; i<smat->m; i++ ) { for ( size_t j=0; j<smat->n; j++ ) { \
|
269
286
|
func(smat->id##pbody[j+i*(smat->ld)], i, j, data); \
|
270
287
|
} } \
|
271
288
|
} else { \
|
272
|
-
for (
|
289
|
+
for ( size_t i=0; i<smat->m; i++ ) { for ( size_t j=0; j<smat->n; j++ ) { \
|
273
290
|
func(smat->id##pbody[i+j*(smat->ld)], i, j, data); \
|
274
291
|
} } \
|
275
292
|
} \
|
276
293
|
} else { \
|
277
294
|
if ( smat->trans ) { \
|
278
|
-
for (
|
295
|
+
for ( size_t i=0; i<smat->m; i++ ) { for ( size_t j=0; j<smat->n; j++ ) { \
|
279
296
|
func(&(smat->id##body[j+i*(smat->ld)]), i, j, data); \
|
280
297
|
} } \
|
281
298
|
} else { \
|
282
|
-
for (
|
299
|
+
for ( size_t i=0; i<smat->m; i++ ) { for ( size_t j=0; j<smat->n; j++ ) { \
|
283
300
|
func(&(smat->id##body[i+j*(smat->ld)]), i, j, data); \
|
284
301
|
} } \
|
285
302
|
} \
|
@@ -296,21 +313,21 @@ DEFINE_KM_SMAT_EACH_WI_ID(v, VALUE)
|
|
296
313
|
// SEGV will occur if `sb' is smaller than `sa'
|
297
314
|
#define KM_SMAT_EACH2_BLOOP(elma, id) if ( sb->stype == ST_RSUB ) { \
|
298
315
|
if ( sb->trans ) { \
|
299
|
-
for (
|
316
|
+
for ( size_t i=0; i<sa->m; i++ ) { for ( size_t j=0; j<sa->n; j++ ) { \
|
300
317
|
func(elma, sb->id##pbody[j+i*(sb->ld)], data); \
|
301
318
|
} } \
|
302
319
|
} else { \
|
303
|
-
for (
|
320
|
+
for ( size_t i=0; i<sa->m; i++ ) { for ( size_t j=0; j<sa->n; j++ ) { \
|
304
321
|
func(elma, sb->id##pbody[i+j*(sb->ld)], data); \
|
305
322
|
} } \
|
306
323
|
} \
|
307
324
|
} else { \
|
308
325
|
if ( sb->trans ) { \
|
309
|
-
for (
|
326
|
+
for ( size_t i=0; i<sa->m; i++ ) { for ( size_t j=0; j<sa->n; j++ ) { \
|
310
327
|
func(elma, &(sb->id##body[j+i*(sb->ld)]), data); \
|
311
328
|
} } \
|
312
329
|
} else { \
|
313
|
-
for (
|
330
|
+
for ( size_t i=0; i<sa->m; i++ ) { for ( size_t j=0; j<sa->n; j++ ) { \
|
314
331
|
func(elma, &(sb->id##body[i+j*(sb->ld)]), data); \
|
315
332
|
} } \
|
316
333
|
} \
|
@@ -370,21 +387,21 @@ DEFINE_KM_SMAT_EACH2_ID_CONST_ID(d, double, z, COMPLEX)
|
|
370
387
|
// SEGV will occur if `sb' or `sc' is smaller than `sa'
|
371
388
|
#define KM_SMAT_EACH3_CLOOP(elma, elmb, id) if ( sc->stype == ST_RSUB ) { \
|
372
389
|
if ( sc->trans ) { \
|
373
|
-
for (
|
390
|
+
for ( size_t i=0; i<sa->m; i++ ) { for ( size_t j=0; j<sa->n; j++ ) { \
|
374
391
|
func(elma, elmb, sc->id##pbody[j+i*(sc->ld)], data); \
|
375
392
|
} } \
|
376
393
|
} else { \
|
377
|
-
for (
|
394
|
+
for ( size_t i=0; i<sa->m; i++ ) { for ( size_t j=0; j<sa->n; j++ ) { \
|
378
395
|
func(elma, elmb, sc->id##pbody[i+j*(sc->ld)], data); \
|
379
396
|
} } \
|
380
397
|
} \
|
381
398
|
} else { \
|
382
399
|
if ( sc->trans ) { \
|
383
|
-
for (
|
400
|
+
for ( size_t i=0; i<sa->m; i++ ) { for ( size_t j=0; j<sa->n; j++ ) { \
|
384
401
|
func(elma, elmb, &(sc->id##body[j+i*(sc->ld)]), data); \
|
385
402
|
} } \
|
386
403
|
} else { \
|
387
|
-
for (
|
404
|
+
for ( size_t i=0; i<sa->m; i++ ) { for ( size_t j=0; j<sa->n; j++ ) { \
|
388
405
|
func(elma, elmb, &(sc->id##body[i+j*(sc->ld)]), data); \
|
389
406
|
} } \
|
390
407
|
} \
|
@@ -527,4 +544,3 @@ km_smat_each3_bcvcv(SMAT *sa, const SMAT *sb, const SMAT *sc, void (*func)(bool
|
|
527
544
|
}
|
528
545
|
}
|
529
546
|
}
|
530
|
-
|