kmat 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. checksums.yaml +7 -0
  2. data/.gitattributes +3 -0
  3. data/.gitignore +15 -0
  4. data/CHANGELOG.md +15 -0
  5. data/Gemfile +4 -0
  6. data/LICENSE.md +675 -0
  7. data/README.md +224 -0
  8. data/Rakefile +26 -0
  9. data/bin/console +14 -0
  10. data/bin/setup +8 -0
  11. data/ext/kmat/arith/binary.c +1121 -0
  12. data/ext/kmat/arith/logical.c +332 -0
  13. data/ext/kmat/arith/math.c +34 -0
  14. data/ext/kmat/arith/statistics.c +173 -0
  15. data/ext/kmat/arith/unary.c +165 -0
  16. data/ext/kmat/auto_collect.rb +118 -0
  17. data/ext/kmat/elementwise_function.rb +149 -0
  18. data/ext/kmat/extconf.rb +75 -0
  19. data/ext/kmat/id.txt +80 -0
  20. data/ext/kmat/id_sym.rb +40 -0
  21. data/ext/kmat/km_util.h +97 -0
  22. data/ext/kmat/kmat.h +96 -0
  23. data/ext/kmat/lapack_headers/blas.h +354 -0
  24. data/ext/kmat/lapack_headers/lapacke.h +19455 -0
  25. data/ext/kmat/lapack_headers/lapacke_config.h +119 -0
  26. data/ext/kmat/lapack_headers/lapacke_mangling.h +17 -0
  27. data/ext/kmat/lapack_headers/lapacke_utils.h +579 -0
  28. data/ext/kmat/linalg/dla.c +1629 -0
  29. data/ext/kmat/linalg/linalg.c +267 -0
  30. data/ext/kmat/linalg/norm.c +727 -0
  31. data/ext/kmat/linalg/vla.c +102 -0
  32. data/ext/kmat/linalg/working.c +240 -0
  33. data/ext/kmat/main.c +95 -0
  34. data/ext/kmat/smat/accessor.c +719 -0
  35. data/ext/kmat/smat/array.c +108 -0
  36. data/ext/kmat/smat/boxmuller.c +72 -0
  37. data/ext/kmat/smat/constructer.c +302 -0
  38. data/ext/kmat/smat/convert.c +375 -0
  39. data/ext/kmat/smat/elem.c +171 -0
  40. data/ext/kmat/smat/fund.c +702 -0
  41. data/ext/kmat/smat/share.c +427 -0
  42. data/ext/kmat/smat/smat.c +530 -0
  43. data/ext/kmat/smat/sort.c +1156 -0
  44. data/ext/kmat/sym.txt +34 -0
  45. data/kmat.gemspec +46 -0
  46. data/lib/kmat.rb +20 -0
  47. data/lib/kmat/accessor.rb +164 -0
  48. data/lib/kmat/arith.rb +189 -0
  49. data/lib/kmat/linalg.rb +279 -0
  50. data/lib/kmat/logical.rb +150 -0
  51. data/lib/kmat/misc.rb +122 -0
  52. data/lib/kmat/random.rb +106 -0
  53. data/lib/kmat/statistics.rb +98 -0
  54. data/lib/kmat/version.rb +3 -0
  55. metadata +156 -0
@@ -0,0 +1,102 @@
1
+ #include "../kmat.h"
2
+
3
+ struct km_vmat_solve_arg {
4
+ SMAT *sx, *sa, *sb;
5
+ VALUE *a;
6
+ LAWORK x;
7
+ };
8
+ static VALUE
9
+ km_vmat_solve_body(VALUE data)
10
+ {
11
+ struct km_vmat_solve_arg *a = (struct km_vmat_solve_arg *)data;
12
+ int n=a->sx->m, nrhs=a->sx->n;
13
+ km_check_size(4, a->sa->m,n, a->sa->n,n, a->sb->m,n, a->sb->n,nrhs);
14
+
15
+ KALLOCc(a->a, a->sa);
16
+ km_alloc_if_needed(a->sx, &(a->x));
17
+ km_copy2work(a->x.v, a->x.ld, a->sb);
18
+ VALUE zero=INT2NUM(0);
19
+
20
+ // forward elimination
21
+ for ( int k=0; k<n; k++ ) {
22
+ if ( rb_funcall((a->a)[k+k*n], id_op_eq, 1, zero) ) {
23
+ for ( int i=k+1; i<n; i++ ) {
24
+ if ( !rb_funcall((a->a)[i+k*n], id_op_eq, 1, zero) ) {
25
+ for ( int j=k; j<n; j++ ) {
26
+ SWAP(VALUE, (a->a)[k+j*n], (a->a)[i+j*n]);
27
+ }
28
+ for ( int j=0; j<nrhs; j++ ) {
29
+ SWAP(VALUE, (a->x.v)[k+j*(a->x.ld)], (a->x.v)[i+j*(a->x.ld)]);
30
+ }
31
+ goto nonsingular;
32
+ }
33
+ }
34
+ rb_raise(km_eUncomp, "matrix is singular");
35
+ nonsingular: ;
36
+ }
37
+ VALUE akk = (a->a)[k+k*n];
38
+ for ( int j=k+1; j<n; j++ ) {
39
+ (a->a)[k+j*n] = rb_funcall((a->a)[k+j*n], id_quo, 1, akk);
40
+ }
41
+ for ( int j=0; j<nrhs; j++ ) {
42
+ (a->x.v)[k+j*(a->x.ld)] = rb_funcall((a->x.v)[k+j*(a->x.ld)], id_quo, 1, akk);
43
+ }
44
+ for( int i=k+1; i<n; i++ ) {
45
+ VALUE aik = (a->a)[i+k*n];
46
+ for ( int j=k+1; j<n; j++ ) {
47
+ VALUE tmp = rb_funcall(aik, id_op_mul, 1, (a->a)[k+j*n]);
48
+ (a->a)[i+j*n] = rb_funcall((a->a)[i+j*n], id_op_minus, 1, tmp);
49
+ }
50
+ for ( int j=0; j<nrhs; j++ ) {
51
+ VALUE tmp = rb_funcall(aik, id_op_mul, 1, (a->x.v)[k+j*(a->x.ld)]);
52
+ (a->x.v)[i+j*(a->x.ld)] = rb_funcall((a->x.v)[i+j*(a->x.ld)], id_op_minus, 1, tmp);
53
+ }
54
+ }
55
+ }
56
+
57
+ // back substitution
58
+ for ( int k=n-1; k>0; k-- ) {
59
+ for ( int i=0; i<k; i++ ) {
60
+ VALUE aik = (a->a)[i+k*n];
61
+ for ( int j=0; j<nrhs; j++ ) {
62
+ VALUE tmp = rb_funcall(aik, id_op_mul, 1, (a->x.v)[k+j*(a->x.ld)]);
63
+ (a->x.v)[i+j*(a->x.ld)] = rb_funcall((a->x.v)[i+j*(a->x.ld)], id_op_minus, 1, tmp);
64
+ }
65
+ }
66
+ }
67
+
68
+ return Qnil;
69
+ }
70
+ static VALUE
71
+ km_vmat_solve_ensure(VALUE data)
72
+ {
73
+ struct km_vmat_solve_arg *a = (struct km_vmat_solve_arg *)data;
74
+
75
+ ruby_xfree(a->a);
76
+ km_copy_and_free_if_needed(a->sx, &(a->x));
77
+
78
+ return Qnil;
79
+ }
80
+ VALUE
81
+ km_vmat_solve(VALUE self, VALUE va, VALUE vb)
82
+ {
83
+ struct km_vmat_solve_arg a; memset(&a, 0, sizeof(a));
84
+ a.sx=km_mat2smat(self); a.sa=km_mat2smat(va); a.sb=km_mat2smat(vb);
85
+ km_check_value(3, a.sx, a.sa, a.sb);
86
+
87
+ km_ensure(km_vmat_solve_body, (VALUE)(&a), km_vmat_solve_ensure, (VALUE)(&a));
88
+
89
+ return self;
90
+ }
91
+
92
+ VALUE
93
+ km_vmat_inverse(VALUE self, VALUE va)
94
+ {
95
+ SMAT *sx = km_mat2smat(self), *sa = km_mat2smat(va);
96
+ km_check_value(2, sx, sa);
97
+ int n = sa->m;
98
+ km_check_size(3, sx->m,n, sx->n,n, sa->n,n);
99
+ VALUE ident = km_Mat(n, n, VT_VALUE);
100
+ kmm_mat_eye(ident);
101
+ return km_vmat_solve(self, va, ident);
102
+ }
@@ -0,0 +1,240 @@
1
+ #include "../kmat.h"
2
+
3
+ void *
4
+ km_alloc_and_copy(SMAT *smat)
5
+ {
6
+ void *ret = ruby_xcalloc(int2size_t(LENGTH(smat)), km_sizeof_vt(smat->vtype));
7
+ km_copy2work(ret, smat->m, smat);
8
+ return ret;
9
+ }
10
+
11
+ // use `smat'->body if it can be used
12
+ // otherwise, calloc()
13
+ void
14
+ km_alloc_if_needed(SMAT *smat, LAWORK *lawork)
15
+ {
16
+ if ( smat->stype != ST_RSUB && !(smat->trans) ) {
17
+ lawork->body = smat->body;
18
+ lawork->ld = smat->ld;
19
+ lawork->need_to_free = false;
20
+ } else {
21
+ lawork->body = ruby_xcalloc(int2size_t(smat->m*smat->n), km_sizeof_vt(smat->vtype));
22
+ lawork->ld = smat->m;
23
+ lawork->need_to_free = true;
24
+ }
25
+ }
26
+
27
+ // km_alloc_if_needed() and copy from `smat'->body if calloc() is called
28
+ void
29
+ km_alloc_and_copy_if_needed(SMAT *smat, LAWORK *lawork)
30
+ {
31
+ km_alloc_if_needed(smat, lawork);
32
+ if ( lawork->need_to_free ) {
33
+ km_copy2work(lawork->body, lawork->ld, smat);
34
+ }
35
+ }
36
+
37
+ // km_alloc_if_needed() and 0 clear in any case
38
+ void
39
+ km_alloc_if_needed_and_0clear(SMAT *smat, LAWORK *lawork)
40
+ {
41
+ km_alloc_if_needed(smat, lawork);
42
+ if ( lawork->need_to_free ) { return; }
43
+ if ( smat->vtype == VT_DOUBLE ) {
44
+ for ( int i=0; i<smat->m; i++ ) { for ( int j=0; j<smat->n; j++ ) {
45
+ lawork->d[i+j*lawork->ld] = 0.0;
46
+ } }
47
+ } else if ( smat->vtype == VT_COMPLEX ) {
48
+ for ( int i=0; i<smat->m; i++ ) { for ( int j=0; j<smat->n; j++ ) {
49
+ lawork->z[i+j*lawork->ld] = cpack(0.0, 0.0);
50
+ } }
51
+ } else if ( smat->vtype == VT_INT ) {
52
+ for ( int i=0; i<smat->m; i++ ) { for ( int j=0; j<smat->n; j++ ) {
53
+ lawork->i[i+j*lawork->ld] = 0;
54
+ } }
55
+ } else if ( smat->vtype == VT_BOOL ) {
56
+ for ( int i=0; i<smat->m; i++ ) { for ( int j=0; j<smat->n; j++ ) {
57
+ lawork->b[i+j*lawork->ld] = false;
58
+ } }
59
+ } else {
60
+ for ( int i=0; i<smat->m; i++ ) { for ( int j=0; j<smat->n; j++ ) {
61
+ lawork->v[i+j*lawork->ld] = Qfalse;
62
+ } }
63
+ }
64
+ }
65
+
66
+ struct km_c2w_arg {
67
+ union {
68
+ void *work;
69
+ double *dwork;
70
+ COMPLEX *zwork;
71
+ int *iwork;
72
+ bool *bwork;
73
+ VALUE *vwork;
74
+ };
75
+ int ld;
76
+ };
77
+ void
78
+ km_c2w_func_d(double *ent, int i, int j, void *data)
79
+ {
80
+ struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
81
+ arg->dwork[i+j*arg->ld] = *ent;
82
+ }
83
+ void
84
+ km_c2w_func_z(COMPLEX *ent, int i, int j, void *data)
85
+ {
86
+ struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
87
+ arg->zwork[i+j*arg->ld] = *ent;
88
+ }
89
+ void
90
+ km_c2w_func_i(int *ent, int i, int j, void *data)
91
+ {
92
+ struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
93
+ arg->iwork[i+j*arg->ld] = *ent;
94
+ }
95
+ void
96
+ km_c2w_func_b(bool *ent, int i, int j, void *data)
97
+ {
98
+ struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
99
+ arg->bwork[i+j*arg->ld] = *ent;
100
+ }
101
+ void
102
+ km_c2w_func_v(VALUE *ent, int i, int j, void *data)
103
+ {
104
+ struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
105
+ arg->vwork[i+j*arg->ld] = *ent;
106
+ }
107
+ void
108
+ km_copy2work(void *work, int ldw, SMAT *smat)
109
+ {
110
+ if ( smat->stype == ST_FULL && !(smat->trans) && ldw==smat->ld ) {
111
+ memcpy(work, smat->body, km_sizeof_vt(smat->vtype)*LENGTHs(smat));
112
+ } else {
113
+ struct km_c2w_arg data = {{work}, ldw};
114
+ if ( smat->vtype == VT_DOUBLE ) {
115
+ if ( smat->stype == ST_RSUB ) {
116
+ km_smat_each_with_index_d(smat, km_c2w_func_d, &data);
117
+ } else if ( smat->trans ) {
118
+ const int one=1;
119
+ for ( int i=0; i<smat->m; i++ ) {
120
+ dcopy_(&(smat->n), smat->dbody+(i*smat->ld), &one, data.dwork+i, &ldw);
121
+ }
122
+ } else {
123
+ char str_a[] = "A";
124
+ dlacpy_(str_a, &(smat->m), &(smat->n), smat->dbody, &(smat->ld), work, &ldw);
125
+ }
126
+ } else if ( smat->vtype == VT_COMPLEX ) {
127
+ if ( smat->stype == ST_RSUB ) {
128
+ km_smat_each_with_index_z(smat, km_c2w_func_z, &data);
129
+ } else if ( smat->trans ) {
130
+ const int one=1;
131
+ for ( int i=0; i<smat->m; i++ ) {
132
+ zcopy_(&(smat->n), smat->zbody+(i*smat->ld), &one, data.zwork+i, &ldw);
133
+ }
134
+ } else {
135
+ char str_a[] = "A";
136
+ zlacpy_(str_a, &(smat->m), &(smat->n), smat->zbody, &(smat->ld), work, &ldw);
137
+ }
138
+ } else if ( smat->vtype == VT_INT ) {
139
+ km_smat_each_with_index_i(smat, km_c2w_func_i, &data);
140
+ } else if ( smat->vtype == VT_BOOL ) {
141
+ km_smat_each_with_index_b(smat, km_c2w_func_b, &data);
142
+ } else if ( smat->vtype == VT_VALUE ) {
143
+ km_smat_each_with_index_v(smat, km_c2w_func_v, &data);
144
+ } else {
145
+ rb_raise(km_eInternal, "unknown value type");
146
+ }
147
+ }
148
+ }
149
+ void
150
+ km_cfw_func_d(double *ent, int i, int j, void *data)
151
+ {
152
+ struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
153
+ *ent = arg->dwork[i+j*arg->ld];
154
+ }
155
+ void
156
+ km_cfw_func_z(COMPLEX *ent, int i, int j, void *data)
157
+ {
158
+ struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
159
+ *ent = arg->zwork[i+j*arg->ld];
160
+ }
161
+ void
162
+ km_cfw_func_i(int *ent, int i, int j, void *data)
163
+ {
164
+ struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
165
+ *ent = arg->iwork[i+j*arg->ld];
166
+ }
167
+ void
168
+ km_cfw_func_b(bool *ent, int i, int j, void *data)
169
+ {
170
+ struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
171
+ *ent = arg->bwork[i+j*arg->ld];
172
+ }
173
+ void
174
+ km_cfw_func_v(VALUE *ent, int i, int j, void *data)
175
+ {
176
+ struct km_c2w_arg *arg = (struct km_c2w_arg *)data;
177
+ *ent = arg->vwork[i+j*arg->ld];
178
+ }
179
+ void
180
+ km_copy_from_work(SMAT *smat, void *work, int ldw)
181
+ {
182
+ if ( smat->stype == ST_FULL && !(smat->trans) && ldw == smat->ld ) {
183
+ memcpy(smat->body, work, km_sizeof_vt(smat->vtype)*LENGTHs(smat));
184
+ } else {
185
+ struct km_c2w_arg data = {{work}, ldw};
186
+ if ( smat->vtype == VT_DOUBLE ) {
187
+ if ( smat->stype == ST_RSUB ) {
188
+ km_smat_each_with_index_d(smat, km_cfw_func_d, &data);
189
+ } else if ( smat->trans ) {
190
+ const int one=1;
191
+ for ( int i=0; i<smat->m; i++ ) {
192
+ dcopy_(&(smat->n), data.dwork+i, &ldw, smat->dbody+(i*smat->ld), &one);
193
+ }
194
+ } else {
195
+ char str_a[] = "A";
196
+ dlacpy_(str_a, &(smat->m), &(smat->n), work, &ldw, smat->dbody, &(smat->ld));
197
+ }
198
+ } else if ( smat->vtype == VT_COMPLEX ) {
199
+ if ( smat->stype == ST_RSUB ) {
200
+ km_smat_each_with_index_z(smat, km_cfw_func_z, &data);
201
+ } else if ( smat->trans ) {
202
+ const int one=1;
203
+ for ( int i=0; i<smat->m; i++ ) {
204
+ zcopy_(&(smat->n), data.zwork+i, &ldw, smat->zbody+(i*smat->ld), &one);
205
+ }
206
+ } else {
207
+ char str_a[] = "A";
208
+ zlacpy_(str_a, &(smat->m), &(smat->n), work, &ldw, smat->zbody, &(smat->ld));
209
+ }
210
+ } else if ( smat->vtype == VT_INT ) {
211
+ km_smat_each_with_index_i(smat, km_cfw_func_i, &data);
212
+ } else if ( smat->vtype == VT_BOOL ) {
213
+ km_smat_each_with_index_b(smat, km_cfw_func_b, &data);
214
+ } else if ( smat->vtype == VT_VALUE ) {
215
+ km_smat_each_with_index_v(smat, km_cfw_func_v, &data);
216
+ } else {
217
+ rb_raise(km_eInternal, "unknown value type");
218
+ }
219
+ }
220
+ }
221
+
222
+ void
223
+ km_free_if_needed(LAWORK *work)
224
+ {
225
+ if ( work->need_to_free ) {
226
+ ruby_xfree(work->body);
227
+ work->body = NULL;
228
+ work->need_to_free = false;
229
+ }
230
+ }
231
+ void
232
+ km_copy_and_free_if_needed(SMAT *smat, LAWORK *work)
233
+ {
234
+ if ( work->need_to_free ) {
235
+ km_copy_from_work(smat, work->body, work->ld);
236
+ ruby_xfree(work->body);
237
+ work->body = NULL;
238
+ work->need_to_free = false;
239
+ }
240
+ }
@@ -0,0 +1,95 @@
1
+ #include "kmat.h"
2
+
3
+ // km_global_variables_begin
4
+
5
+ VALUE km_cMat;
6
+ VALUE km_sMat;
7
+ VALUE km_eDim;
8
+ VALUE km_eVT;
9
+ VALUE km_eUncomp;
10
+ VALUE km_eInternal;
11
+ VALUE km_eShare;
12
+ VALUE km_eNotImp;
13
+
14
+ VALUE rb_mMarshal;
15
+ VALUE rb_mObjSpace;
16
+ VALUE rb_sMath;
17
+
18
+ // km_global_variables_end
19
+
20
+ #include "id_sym.c"
21
+
22
+ // return VALUE as Integer
23
+ // this is useful for debugging
24
+ static VALUE
25
+ kmm_obj_value(VALUE self)
26
+ {
27
+ return INT2NUM((int)self);
28
+ }
29
+
30
+ #include "method_definitions.c"
31
+ #include "elementwise_function.c"
32
+ #include "elementwise_function_definitions.c"
33
+
34
+ void
35
+ Init_kmat(void)
36
+ {
37
+ rb_mMarshal = rb_const_get(rb_cObject, rb_intern("Marshal"));
38
+ rb_mObjSpace = rb_const_get(rb_cObject, rb_intern("ObjectSpace"));
39
+ rb_sMath = rb_singleton_class(rb_mMath);
40
+
41
+ // km_cMat = rb_define_class("Mat", rb_cObject); # Mat is defined in /lib/kmat.rb
42
+ km_cMat = rb_const_get(rb_cObject, rb_intern("Mat"));
43
+ km_sMat = rb_singleton_class(km_cMat);
44
+ rb_undef_alloc_func(km_cMat);
45
+ rb_define_alloc_func(km_cMat, km_Mat_alloc);
46
+
47
+ km_eDim = rb_define_class_under(km_cMat, "MismatchedDimensionError", rb_eStandardError);
48
+ km_eVT = rb_define_class_under(km_cMat, "ValueTypeError", rb_eStandardError);
49
+ km_eUncomp = rb_define_class_under(km_cMat, "UncomputableMatrixError", rb_eStandardError);
50
+ km_eInternal = rb_define_class_under(km_cMat, "InternalError", rb_eException);
51
+ km_eShare = rb_define_class_under(km_cMat, "SharingError", rb_eStandardError);
52
+ km_eNotImp = rb_define_class_under(km_cMat, "NotImplementedYetError", rb_eStandardError);
53
+
54
+ km_init_id(); km_init_sym();
55
+
56
+ rb_define_alias(km_cMat, "+@", "itself");
57
+ km_define_methods();
58
+ km_define_efs();
59
+ km_Mat_rand_init();
60
+ }
61
+
62
+ // invoke `func'(`data') disabling GC
63
+ // the state of GC will be restored before return
64
+ // return the return value of `func'(`data') unless an exception is raised
65
+ VALUE
66
+ km_gc_escape(VALUE (*func)(), VALUE data)
67
+ {
68
+ int status;
69
+ VALUE old = rb_gc_disable();
70
+ VALUE ret = rb_protect(func, data, &status);
71
+ if ( old == Qfalse ) {
72
+ rb_gc_enable();
73
+ }
74
+ if ( status != 0 ) {
75
+ rb_jump_tag(status);
76
+ } else {
77
+ return ret;
78
+ }
79
+ }
80
+
81
+ // invoke `b_proc'(`data1') and ensure invoking `e_proc'(`data2')
82
+ // if an exception is raised in `b_proc', the exception is re-raised after invoking `e_proc'
83
+ // otherwise, return the return value of `b_proc'
84
+ VALUE
85
+ km_ensure(VALUE (* b_proc)(ANYARGS), VALUE data1, VALUE (* e_proc)(ANYARGS), VALUE data2)
86
+ {
87
+ int status;
88
+ VALUE ret = rb_protect(b_proc, data1, &status);
89
+ (*e_proc)(data2);
90
+ if ( status != 0 ) {
91
+ rb_jump_tag(status);
92
+ } else {
93
+ return ret;
94
+ }
95
+ }
@@ -0,0 +1,719 @@
1
+ #include "../kmat.h"
2
+
3
+ static int
4
+ mod(int a, int b)
5
+ {
6
+ int r = a % b;
7
+ if ( r < 0 ) {
8
+ return r+b;
9
+ } else {
10
+ return r;
11
+ }
12
+ }
13
+
14
+ // returns (i, j)-th element
15
+ VALUE
16
+ kmm_mat_get_value(VALUE self, VALUE vi, VALUE vj)
17
+ {
18
+ SMAT *smat = km_mat2smat(self);
19
+ int i = NUM2INT(vi);
20
+ int j = NUM2INT(vj);
21
+ if ( i < 0 || smat->m <= i || j < 0 || smat->n <= j ) {
22
+ rb_raise(rb_eIndexError, "index (%d, %d) is out of range (%d, %d)", i, j, smat->m, smat->n);
23
+ }
24
+ VT_SWITCH( smat->vtype,
25
+ return rb_float_new(ENTITY(smat, d, i, j));,
26
+ return km_c2v(ENTITY(smat, z, i, j));,
27
+ return INT2NUM(ENTITY(smat, i, i, j));,
28
+ return TF2V(ENTITY(smat, b, i, j));,
29
+ return ENTITY(smat, v, i, j);
30
+ );
31
+ }
32
+
33
+ // make a serial-submatrix with shape (`vm', `vn')
34
+ // (0, 0)-th element of the return is (`vi', `vj')-th element of `self'
35
+ VALUE
36
+ kmm_mat_get_ssub(VALUE self, VALUE vi, VALUE vj, VALUE vm, VALUE vn)
37
+ {
38
+ return km_Mat_ssub(NUM2INT(vi), NUM2INT(vj), NUM2INT(vm), NUM2INT(vn), self);
39
+ }
40
+
41
+ // make a random-submatrix by :ivec indecies `vi', `vj'
42
+ // (i, j)-th element of the return is (`vi'[i], `vj'[j])-th element of `self'
43
+ // if both `vi' and `vj' are serial, serial-submatrix is made
44
+ VALUE
45
+ kmm_mat_get_rsub(VALUE self, VALUE vi, VALUE vj)
46
+ {
47
+ SMAT *si = km_mat2smat(vi);
48
+ SMAT *sj = km_mat2smat(vj);
49
+ if ( si->vtype != VT_INT || sj->vtype != VT_INT ) {
50
+ rb_raise(km_eVT, "index arguments must be int vectors");
51
+ }
52
+ if ( !VECTOR_P(si) || !VECTOR_P(sj) ) {
53
+ rb_raise(km_eDim, "index arguments must be int vectors");
54
+ }
55
+ if ( si->stype != ST_FULL ) {
56
+ si = km_mat2smat(rb_obj_dup(vi));
57
+ }
58
+ if ( sj->stype != ST_FULL ) {
59
+ sj = km_mat2smat(rb_obj_dup(vj));
60
+ }
61
+ int *is = si->ibody, *js = sj->ibody;
62
+ bool flg = true;
63
+ int m = LENGTH(si), n = LENGTH(sj);
64
+ for (int i=0; i<m-1; i++) {
65
+ if ( is[i+1]-is[i] != 1 ) {
66
+ flg = false;
67
+ break;
68
+ }
69
+ }
70
+ if ( flg ) {
71
+ for (int i=0; i<n-1; i++) {
72
+ if ( js[i+1]-js[i] != 1 ) {
73
+ flg = false;
74
+ break;
75
+ }
76
+ }
77
+ }
78
+ if ( flg ) {
79
+ return km_Mat_ssub(is[0], js[0], m, n, self);
80
+ } else {
81
+ return km_Mat_rsub1(m, n, is, js, self);
82
+ }
83
+ }
84
+
85
+ // make a random-submatrix by :vmat index `vi'
86
+ // (i, j)-th element of the return is (`vi'[i, j][0], `vi'[i, j][1])-th element of `self'
87
+ VALUE
88
+ kmm_mat_get_rsub2(VALUE self, VALUE vi)
89
+ {
90
+ SMAT *smat = km_mat2smat(self);
91
+ SMAT *vmat = km_mat2smat(vi);
92
+ if ( vmat->vtype != VT_VALUE ) {
93
+ rb_raise(km_eVT, "index argument must be an object matrix");
94
+ }
95
+ int m = vmat->m, n = vmat->n, idx;
96
+ int m2 = smat->m, n2 = smat->n;
97
+ int *is, *js;
98
+ is = ALLOCA_N(int, int2size_t(n*m));
99
+ js = ALLOCA_N(int, int2size_t(n*m));
100
+ VALUE ij;
101
+ for ( int i=0; i<m; i++ ) { for ( int j=0; j<n; j++ ) {
102
+ ij = ENTITY(vmat, v, i, j);
103
+ if ( (TYPE(ij) != T_ARRAY) || (RARRAY_LEN(ij) != 2) ) {
104
+ rb_raise(rb_eIndexError, "value of vmat-index must be an Array with 2 elements");
105
+ }
106
+ idx = i+j*m;
107
+ is[idx] = mod( NUM2INT(rb_ary_entry(ij, 0)), m2 );
108
+ js[idx] = mod( NUM2INT(rb_ary_entry(ij, 1)), n2 );
109
+ } }
110
+ return km_Mat_rsub2(m, n, is, js, self);
111
+ }
112
+
113
+ // make a random-submatrix by :bmat index `vi'
114
+ // if `vi'[i, j] is true, the return contains `self'[i, j]
115
+ // if `vi'[i, j] is false, the return does not contain `self'[i, j]
116
+ // the return is a vector which length is the number of trues in `vi'
117
+ static void
118
+ km_gbb_len(bool *eb, void *data)
119
+ {
120
+ if ( *eb ) { *((int *)data) += 1; };
121
+ }
122
+ struct km_gbb_arg {
123
+ int l;
124
+ union {
125
+ double **dpbody;
126
+ COMPLEX **zpbody;
127
+ int **ipbody;
128
+ bool **bpbody;
129
+ VALUE **vpbody;
130
+ };
131
+ };
132
+ static void
133
+ km_gbb_func_d(double *ea, const bool *eb, void *data_)
134
+ {
135
+ struct km_gbb_arg *data = (struct km_gbb_arg *)data_;
136
+ if ( *eb ) {
137
+ data->dpbody[data->l] = ea;
138
+ data->l += 1;
139
+ }
140
+ }
141
+ static void
142
+ km_gbb_func_z(COMPLEX *ea, const bool *eb, void *data_)
143
+ {
144
+ struct km_gbb_arg *data = (struct km_gbb_arg *)data_;
145
+ if ( *eb ) {
146
+ data->zpbody[data->l] = ea;
147
+ data->l += 1;
148
+ }
149
+ }
150
+ static void
151
+ km_gbb_func_i(int *ea, const bool *eb, void *data_)
152
+ {
153
+ struct km_gbb_arg *data = (struct km_gbb_arg *)data_;
154
+ if ( *eb ) {
155
+ data->ipbody[data->l] = ea;
156
+ data->l += 1;
157
+ }
158
+ }
159
+ static void
160
+ km_gbb_func_b(bool *ea, const bool *eb, void *data_)
161
+ {
162
+ struct km_gbb_arg *data = (struct km_gbb_arg *)data_;
163
+ if ( *eb ) {
164
+ data->bpbody[data->l] = ea;
165
+ data->l += 1;
166
+ }
167
+ }
168
+ static void
169
+ km_gbb_func_v(VALUE *ea, const bool *eb, void *data_)
170
+ {
171
+ struct km_gbb_arg *data = (struct km_gbb_arg *)data_;
172
+ if ( *eb ) {
173
+ data->vpbody[data->l] = ea;
174
+ data->l += 1;
175
+ }
176
+ }
177
+ VALUE
178
+ kmm_mat_get_by_bmat(VALUE self, VALUE vi)
179
+ {
180
+ SMAT *smat = km_mat2smat(self);
181
+ SMAT *bmat = km_mat2smat(vi);
182
+ struct km_gbb_arg data = {0, {NULL}};
183
+ km_smat_each_b(bmat, km_gbb_len, (void *)&(data.l));
184
+ VALUE ret = km_Mat_alloc(km_cMat);
185
+ SMAT *sr = km_mat2smat(ret);
186
+ km_smat_alloc_pbody(sr, data.l, 1, smat->vtype);
187
+ data.l = 0;
188
+ VT_SWITCH( smat->vtype,
189
+ data.dpbody = sr->dpbody; km_smat_each2_dcb(smat, bmat, km_gbb_func_d, &data);,
190
+ data.zpbody = sr->zpbody; km_smat_each2_zcb(smat, bmat, km_gbb_func_z, &data);,
191
+ data.ipbody = sr->ipbody; km_smat_each2_icb(smat, bmat, km_gbb_func_i, &data);,
192
+ data.bpbody = sr->bpbody; km_smat_each2_bcb(smat, bmat, km_gbb_func_b, &data);,
193
+ data.vpbody = sr->vpbody; km_smat_each2_vcb(smat, bmat, km_gbb_func_v, &data);
194
+ );
195
+ if ( smat->stype == ST_FULL ) {
196
+ smat->may_have_sub = true;
197
+ sr->parent = self;
198
+ } else {
199
+ sr->parent = smat->parent;
200
+ }
201
+ km_infect_frozen(self, ret);
202
+ return ret;
203
+ }
204
+
205
+ // set val to (i, j)-th element
206
+ VALUE
207
+ kmm_mat_set_value(VALUE self, VALUE vi, VALUE vj, VALUE val)
208
+ {
209
+ km_check_frozen(self);
210
+ SMAT *smat = km_mat2smat(self);
211
+ int i = NUM2INT(vi), j = NUM2INT(vj);
212
+ if ( i<0 || smat->m<i || j<0 || smat->n<j ) {
213
+ rb_raise(rb_eIndexError, "index (%d, %d) is out of range (%d, %d)", i, j, smat->m, smat->n);
214
+ }
215
+ if ( smat->stype == ST_RSUB ) {
216
+ VT_SWITCH( smat->vtype,
217
+ ENTITYr0(smat, d, INDEX(smat, i, j)) = NUM2DBL(val);,
218
+ ENTITYr0(smat, z, INDEX(smat, i, j)) = km_v2c(val);,
219
+ ENTITYr0(smat, i, INDEX(smat, i, j)) = NUM2INT(val);,
220
+ ENTITYr0(smat, b, INDEX(smat, i, j)) = RTEST(val);,
221
+ ENTITYr0(smat, v, INDEX(smat, i, j)) = val;
222
+ );
223
+ } else {
224
+ VT_SWITCH( smat->vtype,
225
+ ENTITYd0(smat, d, INDEX(smat, i, j)) = NUM2DBL(val);,
226
+ ENTITYd0(smat, z, INDEX(smat, i, j)) = km_v2c(val);,
227
+ ENTITYd0(smat, i, INDEX(smat, i, j)) = NUM2INT(val);,
228
+ ENTITYd0(smat, b, INDEX(smat, i, j)) = RTEST(val);,
229
+ ENTITYd0(smat, v, INDEX(smat, i, j)) = val;
230
+ );
231
+ }
232
+ return self;
233
+ }
234
+
235
+ // copy the values of `other' to `self'
236
+ VALUE
237
+ kmm_mat_copy_from(VALUE self, VALUE other)
238
+ {
239
+ km_check_frozen(self);
240
+ SMAT *dest = km_mat2smat(self), *src = km_mat2smat(other);
241
+ CHECK_SAME_SIZE(dest, src);
242
+ if ( dest->vtype == src->vtype ) {
243
+ if ( ( dest->stype != ST_FULL && ( dest->parent == src->parent || dest->parent == other ) ) || src->parent == self ) {
244
+ km_smat_copy(dest, km_mat2smat(rb_obj_dup(other)));
245
+ } else {
246
+ km_smat_copy(dest, src);
247
+ }
248
+ } else {
249
+ VT_SWITCH( dest->vtype,
250
+ km_smat_copy(dest, km_mat2smat(kmm_mat_to_fmat(other)));,
251
+ km_smat_copy(dest, km_mat2smat(kmm_mat_to_cmat(other)));,
252
+ km_smat_copy(dest, km_mat2smat(kmm_mat_to_imat(other)));,
253
+ km_smat_copy(dest, km_mat2smat(kmm_mat_to_bmat(other)));,
254
+ km_smat_copy(dest, km_mat2smat(kmm_mat_to_omat(other)));
255
+ );
256
+ }
257
+ return self;
258
+ }
259
+ VALUE
260
+ kmm_mat_tcopy_from(VALUE self, VALUE other)
261
+ {
262
+ km_check_frozen(self);
263
+ SMAT *dest = km_mat2smat(self), *src = km_mat2smat(other);
264
+ if ( dest->m!=src->n || dest->n!=src->m ) {
265
+ rb_raise(km_eDim, "transposed sizes must be the same, (%d, %d) != (%d, %d)", (dest)->m, (dest)->n, (src)->n, (src)->m);
266
+ }
267
+ dest->trans = !dest->trans;
268
+ SWAP(int, dest->m, dest->n);
269
+ if ( dest->vtype == src->vtype ) {
270
+ if ( ( dest->stype != ST_FULL && ( dest->parent == src->parent || dest->parent == other ) ) || src->parent == self ) {
271
+ km_smat_copy(dest, km_mat2smat(rb_obj_dup(other)));
272
+ } else {
273
+ km_smat_copy(dest, src);
274
+ }
275
+ } else {
276
+ VT_SWITCH( dest->vtype,
277
+ km_smat_copy(dest, km_mat2smat(kmm_mat_to_fmat(other)));,
278
+ km_smat_copy(dest, km_mat2smat(kmm_mat_to_cmat(other)));,
279
+ km_smat_copy(dest, km_mat2smat(kmm_mat_to_imat(other)));,
280
+ km_smat_copy(dest, km_mat2smat(kmm_mat_to_bmat(other)));,
281
+ km_smat_copy(dest, km_mat2smat(kmm_mat_to_omat(other)));
282
+ );
283
+ }
284
+ dest->trans = !dest->trans;
285
+ SWAP(int, dest->m, dest->n);
286
+ return self;
287
+ }
288
+
289
+ // make a random-submatrix vector which contains diagonal elements of `self'
290
+ VALUE
291
+ kmm_mat__diag(VALUE self)
292
+ {
293
+ SMAT *smat = km_mat2smat(self);
294
+ VALUE ret = km_Mat_alloc(km_cMat);
295
+ SMAT *sr = km_mat2smat(ret);
296
+ km_smat_alloc_pbody(sr, MIN(smat->m, smat->n), 1, smat->vtype);
297
+ if ( smat->stype == ST_RSUB ) {
298
+ for ( int i=0; i<sr->m; i++ ) {
299
+ sr->pbody[i] = smat->pbody[i+i*(smat->ld)];
300
+ }
301
+ } else {
302
+ VT_SWITCH( smat->vtype,
303
+ for ( int i=0; i<sr->m; i++ ) {
304
+ sr->pbody[i] = smat->dbody+(i+i*smat->ld);
305
+ },
306
+ for ( int i=0; i<sr->m; i++ ) {
307
+ sr->pbody[i] = smat->zbody+(i+i*smat->ld);
308
+ },
309
+ for ( int i=0; i<sr->m; i++ ) {
310
+ sr->pbody[i] = smat->ibody+(i+i*smat->ld);
311
+ },
312
+ for ( int i=0; i<sr->m; i++ ) {
313
+ sr->pbody[i] = smat->bbody+(i+i*smat->ld);
314
+ },
315
+ for ( int i=0; i<sr->m; i++ ) {
316
+ sr->pbody[i] = smat->vbody+(i+i*smat->ld);
317
+ }
318
+ );
319
+ }
320
+ if ( smat->stype == ST_FULL ) {
321
+ sr->parent = self;
322
+ smat->may_have_sub = true;
323
+ } else {
324
+ sr->parent = smat->parent;
325
+ }
326
+ km_infect_frozen(self, ret);
327
+ return ret;
328
+ }
329
+
330
+ VALUE
331
+ kmm_mat__diag_ul(VALUE self, VALUE vk)
332
+ {
333
+ SMAT *smat = km_mat2smat(self);
334
+ VALUE ret = km_Mat_alloc(km_cMat);
335
+ SMAT *sr = km_mat2smat(ret);
336
+ int k = NUM2INT(vk);
337
+ if ( k == 0 ) { return kmm_mat__diag(self); }
338
+ int len, i_s, j_s;
339
+ if ( 0 < k ) {
340
+ if ( k < (smat->n)-(smat->m) ) {
341
+ len = smat->m;
342
+ } else {
343
+ len = smat->n-k;
344
+ }
345
+ if ( smat->trans ) {
346
+ i_s = k; j_s = 0;
347
+ } else {
348
+ i_s = 0; j_s = k;
349
+ }
350
+ } else {
351
+ if ( k < (smat->n)-(smat->m) ) {
352
+ len = smat->m+k;
353
+ } else {
354
+ len = smat->n;
355
+ }
356
+ if ( smat->trans ) {
357
+ i_s = 0; j_s = -k;
358
+ } else {
359
+ i_s = -k; j_s = 0;
360
+ }
361
+ }
362
+ if ( len <= 0 ) {
363
+ rb_raise(rb_eArgError, "given offset %d exceeds range [-%d, %d]", k, smat->m-1, smat->n-1);
364
+ }
365
+ km_smat_alloc_pbody(sr, len, 1, smat->vtype);
366
+ if ( smat->stype == ST_RSUB ) {
367
+ for ( int i=0; i<len; i++ ) {
368
+ sr->pbody[i] = smat->pbody[i_s+i+(j_s+i)*smat->ld];
369
+ }
370
+ } else {
371
+ VT_SWITCH( smat->vtype,
372
+ for ( int i=0; i<len; i++ ) {
373
+ sr->pbody[i] = smat->dbody+(i_s+i+(j_s+i)*smat->ld);
374
+ },
375
+ for ( int i=0; i<len; i++ ) {
376
+ sr->pbody[i] = smat->zbody+(i_s+i+(j_s+i)*smat->ld);
377
+ },
378
+ for ( int i=0; i<len; i++ ) {
379
+ sr->pbody[i] = smat->ibody+(i_s+i+(j_s+i)*smat->ld);
380
+ },
381
+ for ( int i=0; i<len; i++ ) {
382
+ sr->pbody[i] = smat->bbody+(i_s+i+(j_s+i)*smat->ld);
383
+ },
384
+ for ( int i=0; i<len; i++ ) {
385
+ sr->pbody[i] = smat->vbody+(i_s+i+(j_s+i)*smat->ld);
386
+ }
387
+ );
388
+ }
389
+ if ( smat->stype == ST_FULL ) {
390
+ sr->parent = self;
391
+ smat->may_have_sub = true;
392
+ } else {
393
+ sr->parent = smat->parent;
394
+ }
395
+ km_infect_frozen(self, ret);
396
+ return ret;
397
+ }
398
+
399
+ // Mat#[]
400
+ // get an element or make a submatrix
401
+ typedef enum {
402
+ IT_SERIAL,
403
+ IT_IVEC,
404
+ IT_BMAT,
405
+ IT_INT,
406
+ IT_ZERO,
407
+ IT_VMAT,
408
+ IT_END
409
+ } ITSYM;
410
+ static void
411
+ km_index_treatment(VALUE *oidx, ITSYM *itsym, VALUE iidx, int size, bool convert)
412
+ {
413
+ if ( iidx == Qnil ) {
414
+ *oidx = rb_ary_new3(2, INT2NUM(0), INT2NUM(size));
415
+ *itsym = IT_SERIAL;
416
+ } else if ( rb_obj_is_kind_of(iidx, km_cMat) ) {
417
+ SMAT *si = km_mat2smat(iidx);
418
+ if ( si->vtype == VT_INT ) {
419
+ if ( VECTOR_P(si) ) {
420
+ if ( LENGTH(si) == 0 ) {
421
+ *oidx = Qnil;
422
+ *itsym = IT_ZERO;
423
+ } else {
424
+ *oidx = iidx;
425
+ *itsym = IT_IVEC;
426
+ }
427
+ } else {
428
+ rb_raise(rb_eArgError, "int-Mat-index must be a vector");
429
+ }
430
+ } else if ( si->vtype == VT_BOOL ) {
431
+ if ( convert && VECTOR_P(si) ) {
432
+ if ( LENGTH(si) == size ) {
433
+ int oi_size = 0;
434
+ if ( si->stype == ST_RSUB ) {
435
+ for ( int i=0; i<size; i++ ) {
436
+ if ( *((si->bpbody)[i]) ) {
437
+ oi_size++;
438
+ }
439
+ }
440
+ *oidx = km_Mat(oi_size,1, VT_INT);
441
+ SMAT *oi = km_mat2smat(*oidx);
442
+ oi_size = 0;
443
+ for ( int i=0; i<size; i++ ) {
444
+ if ( *((si->bpbody)[i]) ) {
445
+ oi->ibody[oi_size] = i;
446
+ oi_size++;
447
+ }
448
+ }
449
+ } else {
450
+ for ( int i=0; i<size; i++ ) {
451
+ if ( (si->bbody)[i] ) {
452
+ oi_size++;
453
+ }
454
+ }
455
+ *oidx = km_Mat(oi_size,1, VT_INT);
456
+ SMAT *oi = km_mat2smat(*oidx);
457
+ oi_size = 0;
458
+ for ( int i=0; i<size; i++ ) {
459
+ if ( (si->bbody)[i] ) {
460
+ oi->ibody[oi_size] = i;
461
+ oi_size++;
462
+ }
463
+ }
464
+ }
465
+ *itsym = IT_IVEC;
466
+ } else {
467
+ rb_raise(km_eDim, "boolean-Mat-index length must match with object size");
468
+ }
469
+ } else {
470
+ *oidx = iidx;
471
+ *itsym = IT_BMAT;
472
+ }
473
+ } else if ( si->vtype == VT_VALUE ) {
474
+ *oidx = iidx;
475
+ *itsym = IT_VMAT;
476
+ } else {
477
+ rb_raise(km_eVT, "float or complex matrix cannot be an index");
478
+ }
479
+ } else if ( rb_obj_is_kind_of(iidx, rb_cArray) ) {
480
+ int ii_size = (int)RARRAY_LEN(iidx);
481
+ if ( ii_size == 0 ) {
482
+ *oidx = rb_ary_new3(2, INT2NUM(0), INT2NUM(size));
483
+ *itsym = IT_SERIAL;
484
+ } else if ( rb_ary_entry(iidx, 0) == Qtrue || rb_ary_entry(iidx, 0) == Qfalse ) {
485
+ if ( ii_size == size ) {
486
+ int oi_size = 0;
487
+ for ( int i=0; i<size; i++ ) {
488
+ if ( RTEST(rb_ary_entry(iidx, i)) ) {
489
+ oi_size++;
490
+ }
491
+ }
492
+ *oidx = km_Mat(oi_size, 1, VT_INT);
493
+ SMAT *oi = km_mat2smat(*oidx);
494
+ oi_size = 0;
495
+ for ( int i=0; i<size; i++ ) {
496
+ if ( RTEST(rb_ary_entry(iidx, i)) ) {
497
+ oi->ibody[oi_size] = i;
498
+ oi_size++;
499
+ }
500
+ }
501
+ *itsym = IT_IVEC;
502
+ } else {
503
+ rb_raise(km_eDim, "boolean-Array-index length must match with object size");
504
+ }
505
+ } else {
506
+ *oidx = km_Mat(ii_size, 1, VT_INT);
507
+ SMAT *oi = km_mat2smat(*oidx);
508
+ for ( int i=0; i<ii_size; i++ ) {
509
+ (oi->ibody)[i] = mod(NUM2INT(rb_ary_entry(iidx, i)), size);
510
+ }
511
+ *itsym = IT_IVEC;
512
+ }
513
+ } else if ( rb_obj_is_kind_of(iidx, rb_cInteger) ) {
514
+ *oidx = INT2NUM(mod(NUM2INT(iidx), size));
515
+ *itsym = IT_INT;
516
+ } else if ( rb_obj_is_kind_of(iidx, rb_cRange) ) {
517
+ int f = mod(NUM2INT(rb_funcall(iidx, id_first, 0)), size);
518
+ int l = NUM2INT(rb_funcall(iidx, id_last, 0));
519
+ if ( RTEST(rb_funcall(iidx, id_exclude_end_p, 0)) ) {
520
+ l--;
521
+ }
522
+ l = mod(l, size);
523
+ *oidx = rb_ary_new3(2, INT2NUM(f), INT2NUM(l-f+1));
524
+ *itsym = IT_SERIAL;
525
+ } else {
526
+ rb_raise(rb_eArgError, "unknown index type");
527
+ }
528
+ }
529
+
530
+ VALUE
531
+ kmm_mat_bracket(int argc, VALUE *argv, VALUE self)
532
+ {
533
+ rb_check_arity(argc, 0, 2);
534
+ SMAT *smat = km_mat2smat(self);
535
+ if ( LENGTH(smat) == 0 ) {
536
+ rb_raise(km_eDim, "Mat#[] is not available for 0-size matricies");
537
+ }
538
+ if ( argc == 2 ) {
539
+ VALUE ri, ci;
540
+ ITSYM rit, cit;
541
+ km_index_treatment(&ri, &rit, argv[0], smat->m, true);
542
+ km_index_treatment(&ci, &cit, argv[1], smat->n, true);
543
+ if ( rit == IT_INT ) {
544
+ if ( cit == IT_INT ) {
545
+ return kmm_mat_get_value(self, ri, ci);
546
+ } else if ( cit == IT_SERIAL ) {
547
+ return km_Mat_ssub(NUM2INT(ri), NUM2INT(rb_ary_entry(ci, 0)), 1, NUM2INT(rb_ary_entry(ci, 1)), self);
548
+ } else if ( cit == IT_IVEC ) {
549
+ VALUE riv = km_Mat(1, 1, VT_INT);
550
+ (km_mat2smat(riv)->ibody)[0] = NUM2INT(ri);
551
+ return kmm_mat_get_rsub(self, riv, ci);
552
+ }
553
+ } else if ( rit == IT_SERIAL ) {
554
+ if ( cit == IT_INT ) {
555
+ return km_Mat_ssub(NUM2INT(rb_ary_entry(ri, 0)), NUM2INT(ci), NUM2INT(rb_ary_entry(ri, 1)), 1, self);
556
+ } else if ( cit == IT_SERIAL ) {
557
+ return kmm_mat_get_ssub(self, rb_ary_entry(ri, 0), rb_ary_entry(ci, 0), rb_ary_entry(ri, 1), rb_ary_entry(ci, 1));
558
+ } else if ( cit == IT_IVEC ) {
559
+ int ri1 = NUM2INT(rb_ary_entry(ri, 1));
560
+ VALUE riv = km_Mat(ri1, 1, VT_INT);
561
+ int ri0 = NUM2INT(rb_ary_entry(ri, 0));
562
+ SMAT *sri = km_mat2smat(riv);
563
+ for ( int i=0; i<ri1; i++ ) {
564
+ (sri->ibody)[i] = i+ri0;
565
+ }
566
+ return kmm_mat_get_rsub(self, riv, ci);
567
+ }
568
+ } else if ( rit == IT_IVEC ) {
569
+ if ( cit == IT_INT ) {
570
+ VALUE civ = km_Mat(1, 1, VT_INT);
571
+ (km_mat2smat(civ)->ibody)[0] = NUM2INT(ci);
572
+ return kmm_mat_get_rsub(self, ri, civ);
573
+ } else if ( cit == IT_SERIAL ) {
574
+ int ci1 = NUM2INT(rb_ary_entry(ci, 1));
575
+ VALUE civ = km_Mat(ci1, 1, VT_INT);
576
+ int ci0 = NUM2INT(rb_ary_entry(ci, 0));
577
+ SMAT *sci = km_mat2smat(civ);
578
+ for ( int i=0; i<ci1; i++ ) {
579
+ (sci->ibody)[i] = i+ci0;
580
+ }
581
+ return kmm_mat_get_rsub(self, ri, civ);
582
+ } else if ( cit == IT_IVEC ) {
583
+ return kmm_mat_get_rsub(self, ri, ci);
584
+ }
585
+ } else if ( rit == IT_ZERO ) {
586
+ int cs;
587
+ if ( cit == IT_INT ) {
588
+ cs = 1;
589
+ } else if ( cit == IT_SERIAL ) {
590
+ cs = NUM2INT(rb_ary_entry(ci, 1));
591
+ } else if ( cit == IT_IVEC ) {
592
+ cs = LENGTH(km_mat2smat(ci));
593
+ } else if ( cit == IT_ZERO ) {
594
+ cs = 0;
595
+ } else {
596
+ rb_raise(rb_eArgError, "illegal index type combination [%d, %d]", rit, cit);
597
+ }
598
+ return km_Mat(0, cs, smat->vtype);
599
+ }
600
+ rb_raise(rb_eArgError, "illegal index type combination [%d, %d]", rit, cit);
601
+ } else if ( argc == 1 ) {
602
+ VALUE i;
603
+ ITSYM it;
604
+ km_index_treatment(&i, &it, argv[0], LENGTH(smat), false);
605
+ if ( it == IT_BMAT ) {
606
+ SMAT *si = km_mat2smat(i);
607
+ if ( SAME_SIZE(smat, si) ) {
608
+ return kmm_mat_get_by_bmat(self, i);
609
+ } else {
610
+ rb_raise(km_eDim, "boolean index size (%d, %d) must be the same as matrix size (%d, %d)", si->m, si->n, smat->m, smat->n);
611
+ }
612
+ } else if ( it == IT_INT ) {
613
+ if ( smat->n == 1 ) {
614
+ return kmm_mat_get_value(self, i, INT2NUM(0));
615
+ } else if ( smat->m == 1 ) {
616
+ return kmm_mat_get_value(self, INT2NUM(0), i);
617
+ } else {
618
+ rb_raise(rb_eArgError, "single integer index is available only for vectors, not for (%d, %d) matricies", smat->m, smat->n);
619
+ }
620
+ } else if ( it == IT_SERIAL ) {
621
+ if ( smat->n == 1 ) {
622
+ return km_Mat_ssub(NUM2INT(rb_ary_entry(i, 0)), 0, NUM2INT(rb_ary_entry(i, 1)), 1, self);
623
+ } else if ( smat->m == 1 ) {
624
+ return km_Mat_ssub(0, NUM2INT(rb_ary_entry(i, 0)), 1, NUM2INT(rb_ary_entry(i, 1)), self);
625
+ } else {
626
+ rb_raise(rb_eArgError, "single serial index is available only for vectors, not for (%d, %d) matricies", smat->m, smat->n);
627
+ }
628
+ } else if ( it == IT_IVEC ) {
629
+ if ( smat->n == 1 ) {
630
+ VALUE i2 = km_Mat(1, 1, VT_INT);
631
+ (km_mat2smat(i2)->ibody)[0] = 0;
632
+ return kmm_mat_get_rsub(self, i, i2);
633
+ } else if ( smat->m == 1 ) {
634
+ VALUE i2 = km_Mat(1, 1, VT_INT);
635
+ (km_mat2smat(i2)->ibody)[0] = 0;
636
+ return kmm_mat_get_rsub(self, i2, i);
637
+ } else {
638
+ rb_raise(rb_eArgError, "single ivec index is available only for vectors, not for (%d, %d) matricies", smat->m, smat->n);
639
+ }
640
+ } else if ( it == IT_ZERO ) {
641
+ if ( smat->n == 1 ) {
642
+ return km_Mat(1, 0, smat->vtype);
643
+ } else if ( smat->m == 1 ) {
644
+ return km_Mat(0, 1, smat->vtype);
645
+ } else {
646
+ rb_raise(rb_eArgError, "single serial index is available only for vectors, not for (%d, %d) matricies", smat->m, smat->n);
647
+ }
648
+ } else if ( it == IT_VMAT ) {
649
+ return kmm_mat_get_rsub2(self, i);
650
+ } else {
651
+ rb_raise(km_eInternal, "unknown index type %d", it);
652
+ }
653
+ } else {
654
+ return km_Mat_ssub(0, 0, smat->m, smat->n, self);
655
+ }
656
+ }
657
+
658
+ // alias []=
659
+ VALUE
660
+ kmm_mat_bracket_set(int argc, VALUE *argv, VALUE self)
661
+ {
662
+ rb_check_arity(argc, 1, 3);
663
+ km_check_frozen(self);
664
+ SMAT *smat = km_mat2smat(self);
665
+ if ( LENGTH(smat) == 0 ) {
666
+ rb_raise(km_eDim, "Mat#[]= is not avialable for 0-size matricies");
667
+ }
668
+ if ( argc == 3 ) {
669
+ if ( rb_obj_is_kind_of(argv[0], rb_cInteger) && rb_obj_is_kind_of(argv[1], rb_cInteger) ) {
670
+ kmm_mat_set_value(self, INT2NUM(mod(NUM2INT(argv[0]), smat->m)), INT2NUM(mod(NUM2INT(argv[1]), smat->n)), argv[2]);
671
+ } else {
672
+ bool flg = smat->may_have_sub;
673
+ VALUE target = kmm_mat_bracket(2, argv, self);
674
+ if ( rb_obj_is_kind_of(argv[2], km_cMat) ) {
675
+ kmm_mat_copy_from(target, argv[2]);
676
+ } else {
677
+ kmm_mat_fill(target, argv[2]);
678
+ }
679
+ kmm_mat__kill(target);
680
+ smat->may_have_sub = flg;
681
+ }
682
+ } else if ( argc == 2 ) {
683
+ if ( rb_obj_is_kind_of(argv[0], rb_cInteger) ) {
684
+ if ( smat->n == 1 ) {
685
+ kmm_mat_set_value(self, INT2NUM(mod(NUM2INT(argv[0]), smat->m)), INT2NUM(0), argv[1]);
686
+ } else if ( smat->m == 1 ) {
687
+ kmm_mat_set_value(self, INT2NUM(0), INT2NUM(mod(NUM2INT(argv[0]), smat->n)), argv[1]);
688
+ } else {
689
+ rb_raise(rb_eArgError, "setting value with single integer index is available only for vectors, not (%d, %d) matricies", smat->m, smat->n);
690
+ }
691
+ } else {
692
+ bool flg = smat->may_have_sub;
693
+ VALUE target = kmm_mat_bracket(1, argv, self);
694
+ if ( rb_obj_is_kind_of(argv[1], km_cMat) ) {
695
+ if ( rb_obj_is_kind_of(argv[0], km_cMat) && km_mat2smat(argv[0])->vtype == VT_BOOL ) {
696
+ SMAT *smat2 = km_mat2smat(argv[1]);
697
+ bool flg2 = smat2->may_have_sub;
698
+ VALUE vsrc = kmm_mat_bracket(1, argv, argv[1]);
699
+ kmm_mat_copy_from(target, vsrc);
700
+ kmm_mat__kill(vsrc);
701
+ smat2->may_have_sub = flg2;
702
+ } else {
703
+ kmm_mat_copy_from(target, argv[1]);
704
+ }
705
+ } else {
706
+ kmm_mat_fill(target, argv[1]);
707
+ }
708
+ kmm_mat__kill(target);
709
+ smat->may_have_sub = flg;
710
+ }
711
+ } else {
712
+ if ( rb_obj_is_kind_of(argv[0], km_cMat) ) {
713
+ kmm_mat_copy_from(self, argv[0]);
714
+ } else {
715
+ kmm_mat_fill(self, argv[0]);
716
+ }
717
+ }
718
+ return argv[argc-1];
719
+ }