kmat 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. checksums.yaml +7 -0
  2. data/.gitattributes +3 -0
  3. data/.gitignore +15 -0
  4. data/CHANGELOG.md +15 -0
  5. data/Gemfile +4 -0
  6. data/LICENSE.md +675 -0
  7. data/README.md +224 -0
  8. data/Rakefile +26 -0
  9. data/bin/console +14 -0
  10. data/bin/setup +8 -0
  11. data/ext/kmat/arith/binary.c +1121 -0
  12. data/ext/kmat/arith/logical.c +332 -0
  13. data/ext/kmat/arith/math.c +34 -0
  14. data/ext/kmat/arith/statistics.c +173 -0
  15. data/ext/kmat/arith/unary.c +165 -0
  16. data/ext/kmat/auto_collect.rb +118 -0
  17. data/ext/kmat/elementwise_function.rb +149 -0
  18. data/ext/kmat/extconf.rb +75 -0
  19. data/ext/kmat/id.txt +80 -0
  20. data/ext/kmat/id_sym.rb +40 -0
  21. data/ext/kmat/km_util.h +97 -0
  22. data/ext/kmat/kmat.h +96 -0
  23. data/ext/kmat/lapack_headers/blas.h +354 -0
  24. data/ext/kmat/lapack_headers/lapacke.h +19455 -0
  25. data/ext/kmat/lapack_headers/lapacke_config.h +119 -0
  26. data/ext/kmat/lapack_headers/lapacke_mangling.h +17 -0
  27. data/ext/kmat/lapack_headers/lapacke_utils.h +579 -0
  28. data/ext/kmat/linalg/dla.c +1629 -0
  29. data/ext/kmat/linalg/linalg.c +267 -0
  30. data/ext/kmat/linalg/norm.c +727 -0
  31. data/ext/kmat/linalg/vla.c +102 -0
  32. data/ext/kmat/linalg/working.c +240 -0
  33. data/ext/kmat/main.c +95 -0
  34. data/ext/kmat/smat/accessor.c +719 -0
  35. data/ext/kmat/smat/array.c +108 -0
  36. data/ext/kmat/smat/boxmuller.c +72 -0
  37. data/ext/kmat/smat/constructer.c +302 -0
  38. data/ext/kmat/smat/convert.c +375 -0
  39. data/ext/kmat/smat/elem.c +171 -0
  40. data/ext/kmat/smat/fund.c +702 -0
  41. data/ext/kmat/smat/share.c +427 -0
  42. data/ext/kmat/smat/smat.c +530 -0
  43. data/ext/kmat/smat/sort.c +1156 -0
  44. data/ext/kmat/sym.txt +34 -0
  45. data/kmat.gemspec +46 -0
  46. data/lib/kmat.rb +20 -0
  47. data/lib/kmat/accessor.rb +164 -0
  48. data/lib/kmat/arith.rb +189 -0
  49. data/lib/kmat/linalg.rb +279 -0
  50. data/lib/kmat/logical.rb +150 -0
  51. data/lib/kmat/misc.rb +122 -0
  52. data/lib/kmat/random.rb +106 -0
  53. data/lib/kmat/statistics.rb +98 -0
  54. data/lib/kmat/version.rb +3 -0
  55. metadata +156 -0
@@ -0,0 +1,267 @@
1
+ #include "../kmat.h"
2
+
3
+ // pairc に,続けて渡す int の組の数を指定(引数の数は1+parc*2)
4
+ // いずれかの組の値が異なれば,MismatchedDimensionError
5
+
6
+ // the number of pairs of SMATs is specified by `argc'
7
+ // check whether elements of each pair are the same or not
8
+ // raise Mat::MismatchedDimensionError if at least one of the pair has different elements
9
+ void
10
+ km_check_size(int pairc, ...)
11
+ {
12
+ va_list argp;
13
+ va_start(argp, pairc);
14
+ for (int i=0; i<pairc; i++ ) {
15
+ int a = va_arg(argp, int); int b = va_arg(argp, int);
16
+ if ( a != b ) {
17
+ va_end(argp);
18
+ rb_raise(km_eDim, "dimension mismatched (%d != %d)", a, b);
19
+ }
20
+ }
21
+ va_end(argp);
22
+ }
23
+
24
+ // the number of SMATs is specified by `argc'
25
+ // check whether the value types of the arguments are VT_DOUBLE or not
26
+ // raise Mat::ValueTypeError if at least one of the arguments is not a float matrix
27
+ void
28
+ km_check_double(int argc, ...)
29
+ {
30
+ va_list argp;
31
+ va_start(argp, argc);
32
+ for ( int i=0; i<argc; i++ ) {
33
+ SMAT *smat = va_arg(argp, SMAT *);
34
+ if ( smat->vtype != VT_DOUBLE ) {
35
+ va_end(argp);
36
+ rb_raise(km_eVT, "float matrix is expected");
37
+ }
38
+ }
39
+ va_end(argp);
40
+ }
41
+
42
+ // the number of SMATs is specified by `argc'
43
+ // check whether the value types of the arguments are VT_COMPLEX or not
44
+ // raise Mat::ValueTypeError if at least one of the arguments is not a complex matrix
45
+ void
46
+ km_check_complex(int argc, ...)
47
+ {
48
+ va_list argp;
49
+ va_start(argp, argc);
50
+ for ( int i=0; i<argc; i++ ) {
51
+ SMAT *smat = va_arg(argp, SMAT *);
52
+ if ( smat->vtype != VT_COMPLEX ) {
53
+ va_end(argp);
54
+ rb_raise(km_eVT, "complex matrix is expected");
55
+ }
56
+ }
57
+ va_end(argp);
58
+ }
59
+
60
+ // the number of SMATs is specified by `argc'
61
+ // check whether the value types of the arguments are VT_VALUE or not
62
+ // raise Mat::ValueTypeError if at least one of the arguments is not a ruby-object matrix
63
+ void
64
+ km_check_value(int argc, ...)
65
+ {
66
+ va_list argp;
67
+ va_start(argp, argc);
68
+ for ( int i=0; i<argc; i++ ) {
69
+ SMAT *smat = va_arg(argp, SMAT *);
70
+ if ( smat->vtype != VT_VALUE ) {
71
+ va_end(argp);
72
+ rb_raise(km_eVT, "Ruby-object matrix is expected");
73
+ }
74
+ }
75
+ va_end(argp);
76
+ }
77
+
78
+ // check whether LAPACK routine's output `info' is 0 or not
79
+ // raise Mat::InternalError if `info' < 0
80
+ // raise `klass' with message `mess' if `info' > 0
81
+ // if `klass' is Qnil, no exception will raised when `info' > 0
82
+ // on entry, `func' is the name of LAPACK routine that outputs `info'
83
+ void
84
+ km_check_info(int info, VALUE klass, const char *mess, const char *func)
85
+ {
86
+ if ( info == 0 ) {
87
+ return;
88
+ } else if ( info > 0 ) {
89
+ if ( klass != Qnil ) {
90
+ rb_raise(klass, "%s", mess);
91
+ } else {
92
+ return;
93
+ }
94
+ } else {
95
+ rb_raise(km_eInternal, "%d-th argument of %s_() is illegal", -info, func);
96
+ }
97
+ }
98
+
99
+ // compute X which satisfy AX=B. `self' is output
100
+ VALUE
101
+ kmm_mat_solve_destl(VALUE self, VALUE va, VALUE vb)
102
+ {
103
+ km_check_frozen(self);
104
+ SMAT *smat = km_mat2smat(self);
105
+ VT_SWITCH( smat->vtype,
106
+ return km_dmat_solve(self, va, vb);,
107
+ rb_raise(km_eNotImp, "solve for complex matricies is not available yet");,
108
+ rb_raise(km_eVT, "can't solve int matricies");,
109
+ rb_raise(km_eVT, "can't solve boolean matricies");,
110
+ return km_vmat_solve(self, va, vb);
111
+ );
112
+ }
113
+ VALUE
114
+ kmm_mat_solve(VALUE va, VALUE vb)
115
+ {
116
+ SMAT *sb = km_mat2smat(vb);
117
+ return kmm_mat_solve_destl(km_Mat(sb->m, sb->n, sb->vtype), va, vb);
118
+ }
119
+
120
+ // compute X which satisfy A'X=B'. `self' is output
121
+ VALUE
122
+ km_recover_trans(VALUE vab)
123
+ {
124
+ SMAT *sa = km_mat2smat(rb_ary_entry(vab, 0));
125
+ SMAT *sb = km_mat2smat(rb_ary_entry(vab, 1));
126
+ sa->trans = !sa->trans; SWAP(int, sa->m, sa->n);
127
+ sb->trans = !sb->trans; SWAP(int, sb->m, sb->n);
128
+ return vab;
129
+ }
130
+ static VALUE
131
+ km_mat_solve_wrap(VALUE data)
132
+ {
133
+ return kmm_mat_solve_destl(rb_ary_entry(data, 0), rb_ary_entry(data, 1), rb_ary_entry(data, 2));
134
+ }
135
+ VALUE
136
+ kmm_mat_tsolve_destl(VALUE self, VALUE va, VALUE vb)
137
+ {
138
+ VALUE vab = rb_ary_new3(2, va, vb);
139
+ km_recover_trans(vab);
140
+ km_ensure(km_mat_solve_wrap, rb_ary_new3(3, self, va, vb), km_recover_trans, vab);
141
+ return self;
142
+ }
143
+ VALUE
144
+ kmm_mat_tsolve(VALUE va, VALUE vb)
145
+ {
146
+ SMAT *sb = km_mat2smat(vb);
147
+ return kmm_mat_tsolve_destl(km_Mat(sb->n, sb->m, sb->vtype), va, vb);
148
+ }
149
+
150
+ // compute matrix inverse of `self'. `self' is replaced by the result
151
+ // alias inv!
152
+ VALUE
153
+ kmm_mat_inverse_destl(VALUE self, VALUE va)
154
+ {
155
+ km_check_frozen(self);
156
+ SMAT *smat = km_mat2smat(self);
157
+ VT_SWITCH( smat->vtype,
158
+ return km_dmat_inverse(self, va);,
159
+ rb_raise(km_eNotImp, "inverse for complex matrix is not available yet");,
160
+ rb_raise(km_eVT, "can't inverse int matricies");,
161
+ rb_raise(km_eVT, "can't inverse boolean matricies");,
162
+ return km_vmat_inverse(self, va);
163
+ );
164
+ }
165
+ // alias inv
166
+ VALUE
167
+ kmm_mat_inverse(VALUE va)
168
+ {
169
+ SMAT *sa = km_mat2smat(va);
170
+ return kmm_mat_inverse_destl(km_Mat(sa->m, sa->m, sa->vtype), va);
171
+ }
172
+
173
+ // matrix product
174
+ void
175
+ km_dmprod(int m, int n, int k, double *a, int lda, double *b, int ldb, double *r, int ldr)
176
+ {
177
+ static char trans[] = "N";
178
+ static double alpha=1.0, beta=0.0;
179
+ dgemm_(trans, trans, &m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, r, &ldr);
180
+ }
181
+ struct km_mprod_arg {
182
+ SMAT *sr, *sa, *sb;
183
+ LAWORK r, a, b;
184
+ };
185
+ static VALUE
186
+ km_mprod_body(VALUE data)
187
+ {
188
+ struct km_mprod_arg *a = (struct km_mprod_arg *)data;
189
+
190
+ int m = a->sr->m, n = a->sr->n, k = a->sa->n;
191
+ km_check_size(3, a->sb->m,k, a->sa->m,m, a->sb->n,n);
192
+
193
+ KALLOCz(a->r, a->sr);
194
+ KALLOCn(a->a, a->sa);
195
+ KALLOCn(a->b, a->sb);
196
+
197
+ VTYPE vt = a->sr->vtype;
198
+ if ( vt == VT_DOUBLE ) {
199
+ char trans[] = "N";
200
+ double alpha=1.0, beta=0.0;
201
+ dgemm_(trans, trans, &m, &n, &k, &alpha, a->a.d, &(a->a.ld), a->b.d, &(a->b.ld), &beta, a->r.d, &(a->r.ld));
202
+ } else if ( vt == VT_COMPLEX ) {
203
+ char trans[] = "N";
204
+ COMPLEX alpha=cpack(1.0, 0.0), beta=cpack(0.0, 0.0);
205
+ zgemm_(trans, trans, &m, &n, &k, &alpha, a->a.z, &(a->a.ld), a->b.z, &(a->b.ld), &beta, a->r.z, &(a->r.ld));
206
+ } else if ( vt == VT_INT ) {
207
+ for ( int i=0; i<m; i++ ) { for ( int j=0; j<n; j++ ) {
208
+ int *foo=(a->r.i)+(i+j*(a->r.ld));
209
+ int jldb = j*(a->b.ld);
210
+ for ( int l=0; l<k; l++ ) {
211
+ *foo += (a->a.i)[i+l*(a->a.ld)] * (a->b.i)[l+jldb];
212
+ }
213
+ } }
214
+ } else if ( vt == VT_BOOL ) {
215
+ for ( int i=0; i<m; i++ ) { for ( int j=0; j<n; j++ ) {
216
+ bool *foo=(a->r.b)+(i+j*(a->r.ld));
217
+ int jldb = j*(a->b.ld);
218
+ for ( int l=0; l<k; l++ ) {
219
+ bool bar = ( (a->a.b)[i+l*(a->a.ld)] && (a->b.b)[l+jldb] );
220
+ *foo = XOR(*foo, bar);
221
+ }
222
+ } }
223
+ } else if ( vt == VT_VALUE ) {
224
+ for ( int i=0; i<m; i++ ) { for ( int j=0; j<n; j++ ) {
225
+ VALUE *foo=(a->r.v)+(i+j*(a->r.ld));
226
+ *foo = INT2NUM(0);
227
+ int jldb = j*(a->b.ld);
228
+ for ( int l=0; l<k; l++ ) {
229
+ VALUE bar = rb_funcall( (a->a.v)[i+l*(a->a.ld)], id_op_mul, 1, (a->b.v)[l+jldb] );
230
+ *foo = rb_funcall(*foo, id_op_plus, 1, bar);
231
+ }
232
+ } }
233
+ }
234
+
235
+ return Qnil;
236
+ }
237
+ static VALUE
238
+ km_mprod_ensure(VALUE data)
239
+ {
240
+ struct km_mprod_arg *a = (struct km_mprod_arg *)data;
241
+
242
+ km_free_if_needed(&(a->b));
243
+ km_free_if_needed(&(a->a));
244
+ km_copy_and_free_if_needed(a->sr, &(a->r));
245
+
246
+ return Qnil;
247
+ }
248
+ VALUE
249
+ kmm_mat_mprod_destl(VALUE self, VALUE va, VALUE vb)
250
+ {
251
+ km_check_frozen(self);
252
+ struct km_mprod_arg a;
253
+ a.sr = km_mat2smat(self); a.sa = km_mat2smat(va); a.sb = km_mat2smat(vb);
254
+ if ( a.sr->vtype != a.sa->vtype || a.sr->vtype != a.sb->vtype ) {
255
+ rb_raise(km_eVT, "value types must be same");
256
+ }
257
+
258
+ km_ensure(km_mprod_body, (VALUE)&a, km_mprod_ensure, (VALUE)&a);
259
+
260
+ return self;
261
+ }
262
+ VALUE
263
+ kmm_mat_mprod(VALUE va, VALUE vb)
264
+ {
265
+ SMAT *sa = km_mat2smat(va), *sb = km_mat2smat(vb);
266
+ return kmm_mat_mprod_destl(km_Mat(sa->m, sb->n, sa->vtype), va, vb);
267
+ }
@@ -0,0 +1,727 @@
1
+ #include "../kmat.h"
2
+
3
+ #define SET_LD(ldx, smat) int ldx; \
4
+ if ( smat->n == 1 ) { \
5
+ if ( smat->trans ) { \
6
+ ldx = smat->ld; \
7
+ } else { \
8
+ ldx = 1; \
9
+ } \
10
+ } else { \
11
+ if ( smat->trans ) { \
12
+ ldx = 1; \
13
+ } else { \
14
+ ldx = smat->ld; \
15
+ } \
16
+ }
17
+ #define DEF_IPROD(id, type, mul, add, zero) static type \
18
+ km_##id##mat_iprod(SMAT *sa, SMAT *sb) { \
19
+ type ret; \
20
+ SET_LD(lda, sa) \
21
+ SET_LD(ldb, sb) \
22
+ int len = LENGTH(sa); \
23
+ if ( len == 0 ) { return zero; } \
24
+ if ( sa->stype == ST_RSUB ) { \
25
+ if ( sb->stype == ST_RSUB ) { \
26
+ ret = mul( *(sa->id##pbody[0]), *(sb->id##pbody[0]) ); \
27
+ for ( int i=1; i<len; i++ ) { \
28
+ type temp = mul( *(sa->id##pbody[i*lda]), *(sb->id##pbody[i*ldb]) ); \
29
+ ret = add(ret, temp); \
30
+ } \
31
+ } else { \
32
+ ret = mul( *(sa->id##pbody[0]), (sb->id##body[0]) ); \
33
+ for ( int i=1; i<len; i++ ) { \
34
+ type temp = mul( *(sa->id##pbody[i*lda]), (sb->id##body[i*ldb]) ); \
35
+ ret = add(ret, temp); \
36
+ } \
37
+ } \
38
+ } else { \
39
+ if ( sb->stype == ST_RSUB ) { \
40
+ ret = mul( (sa->id##body[0]), *(sb->id##pbody[0]) ); \
41
+ for ( int i=1; i<len; i++ ) { \
42
+ type temp = mul( (sa->id##body[i*lda]), *(sb->id##pbody[i*ldb]) ); \
43
+ ret = add(ret, temp); \
44
+ } \
45
+ } else { \
46
+ ret = mul( (sa->id##body[0]), (sb->id##body[0]) ); \
47
+ for ( int i=1; i<len; i++ ) { \
48
+ type temp = mul( (sa->id##body[i*lda]), (sb->id##body[i*ldb]) ); \
49
+ ret = add(ret, temp); \
50
+ } \
51
+ } \
52
+ } \
53
+ return ret; \
54
+ }
55
+ #define NMUL(a, b) ((a)*(b))
56
+ #define NADD(a, b) ((a)+(b))
57
+ #define BMUL(a, b) ((a)&&(b))
58
+ #define VMUL(a, b) rb_funcall((a), id_op_mul, 1, (b))
59
+ #define VADD(a, b) rb_funcall((a), id_op_plus, 1, (b))
60
+ DEF_IPROD(d, double, NMUL, NADD, 0.0)
61
+ DEF_IPROD(z, COMPLEX, NMUL, NADD, cpack(0.0, 0.0))
62
+ DEF_IPROD(i, int, NMUL, NADD, 0)
63
+ DEF_IPROD(b, bool, BMUL, XOR, false)
64
+ DEF_IPROD(v, VALUE, VMUL, VADD, INT2NUM(0))
65
+ static double
66
+ km_dmat_iprodb(SMAT *sa, SMAT *sb)
67
+ {
68
+ if ( sa->stype == ST_RSUB || sb->stype == ST_RSUB ) {
69
+ return km_dmat_iprod(sa, sb);
70
+ }
71
+ SET_LD(lda, sa)
72
+ SET_LD(ldb, sb)
73
+ int n = LENGTH(sa);
74
+ return ddot_(&n, sa->dbody, &lda, sb->dbody, &ldb);
75
+ }
76
+ static COMPLEX
77
+ km_zmat_iprodb(SMAT *sa, SMAT *sb)
78
+ {
79
+ if ( sa->stype == ST_RSUB || sb->stype == ST_RSUB ) {
80
+ return km_zmat_iprod(sa, sb);
81
+ }
82
+ SET_LD(lda, sa)
83
+ SET_LD(ldb, sb)
84
+ int n = LENGTH(sa);
85
+ return zdotu_(&n, sa->zbody, &lda, sb->zbody, &ldb);
86
+ }
87
+ VALUE
88
+ kmm_mat__iprod(VALUE self, VALUE vb)
89
+ {
90
+ SMAT *sa = km_mat2smat(self), *sb = km_mat2smat(vb);
91
+ if ( !VECTOR_P(sa) || !VECTOR_P(sb) ) {
92
+ rb_raise(km_eDim, "both self and the argument must be vectors");
93
+ } else if ( LENGTH(sa) != LENGTH(sb) ) {
94
+ rb_raise(km_eDim, "dimensions must be the same (%d != %d)", LENGTH(sa), LENGTH(sb));
95
+ } else if ( sa->vtype != sb->vtype ) {
96
+ rb_raise(km_eVT, "value types must be the same");
97
+ }
98
+ VT_SWITCH( sa->vtype,
99
+ return rb_float_new(km_dmat_iprodb(sa, sb));,
100
+ return km_c2v(km_zmat_iprodb(sa, sb));,
101
+ return INT2NUM(km_imat_iprod(sa, sb));,
102
+ return TF2V(km_bmat_iprod(sa, sb));,
103
+ return km_vmat_iprod(sa, sb);
104
+ );
105
+ }
106
+
107
+ // Frobenius norm
108
+ static void
109
+ km_normf_func_d(double *ent, void *data)
110
+ {
111
+ double *ret = (double *)data;
112
+ *ret = hypot(*ret, *ent);
113
+ }
114
+ static void
115
+ km_normf_func_z(COMPLEX *ent, void *data)
116
+ {
117
+ double *ret = (double *)data;
118
+ *ret = hypot(*ret, cabs(*ent));
119
+ }
120
+ static void
121
+ km_normf_func_i(int *ent, void *data)
122
+ {
123
+ double *ret = (double *)data;
124
+ *ret = hypot(*ret, (double)(*ent));
125
+ }
126
+ static void
127
+ km_normf_func_b(bool *ent, void *data)
128
+ {
129
+ bool *ret = (bool *)data;
130
+ *ret = XOR(*ret, *ent);
131
+ }
132
+ static void
133
+ km_normf_func_v(VALUE *ent, void *data)
134
+ {
135
+ VALUE *ret = (VALUE *)data;
136
+ *ret = rb_funcall(rb_mMath, id_hypot, 2, *ret, *ent);
137
+ }
138
+ VALUE
139
+ kmm_mat_normf(VALUE self)
140
+ {
141
+ SMAT *smat = km_mat2smat(self);
142
+ if ( smat->vtype == VT_DOUBLE ) {
143
+ if ( smat->stype == ST_FULL ) {
144
+ int ione=1, n = LENGTH(smat);
145
+ return rb_float_new(dnrm2_(&n, smat->dbody, &ione));
146
+ } else if ( smat->stype == ST_SSUB ) {
147
+ int ione=1, lps, size; double ret=0.0;
148
+ if ( smat->trans ) {
149
+ lps = smat->m; size = smat->n;
150
+ } else {
151
+ lps = smat->n; size = smat->m;
152
+ }
153
+ for ( int i=0; i<lps; i++ ) {
154
+ ret = hypot(ret, dnrm2_(&size, smat->dbody+i*(smat->ld), &ione));
155
+ }
156
+ return rb_float_new(ret);
157
+ } else {
158
+ double ret = 0.0;
159
+ km_smat_each_d(smat, km_normf_func_d, (void *)(&ret));
160
+ return rb_float_new(ret);
161
+ }
162
+ } else if ( smat->vtype == VT_COMPLEX ) {
163
+ if ( smat->stype == ST_FULL ) {
164
+ int ione=1, n = LENGTH(smat);
165
+ return rb_float_new(dznrm2_(&n, smat->zbody, &ione));
166
+ } else if ( smat->stype == ST_SSUB ) {
167
+ int ione=1, lps, size; double ret=0.0;
168
+ if ( smat->trans ) {
169
+ lps = smat->m; size = smat->n;
170
+ } else {
171
+ lps = smat->n; size = smat->m;
172
+ }
173
+ for ( int i=0; i<lps; i++ ) {
174
+ ret = hypot(ret, dznrm2_(&size, smat->zbody+i*(smat->ld), &ione));
175
+ }
176
+ return rb_float_new(ret);
177
+ } else {
178
+ double ret = 0.0;
179
+ km_smat_each_z(smat, km_normf_func_z, (void *)(&ret));
180
+ return rb_float_new(ret);
181
+ }
182
+ } else if ( smat->vtype == VT_INT ) {
183
+ double ret = 0.0;
184
+ km_smat_each_i(smat, km_normf_func_i, (void *)(&ret));
185
+ return rb_float_new(ret);
186
+ } else if ( smat->vtype == VT_BOOL ) {
187
+ bool ret = false;
188
+ km_smat_each_b(smat, km_normf_func_b, (void *)(&ret));
189
+ return TF2V(ret);
190
+ } else if ( smat->vtype == VT_VALUE ) {
191
+ VALUE ret = INT2NUM(0);
192
+ km_smat_each_v(smat, km_normf_func_v, (void *)(&ret));
193
+ return ret;
194
+ } else {
195
+ rb_raise(km_eInternal, "unknown value type");
196
+ }
197
+ }
198
+
199
+ // L-1, L-infinity norm
200
+ #define NORM1(type, id, m_ent, m, n, m_abs, m_add, m_cmp, x2v) \
201
+ type ret = m_abs(m_ent(smat, id, 0)); \
202
+ for ( int i=1; i<m; i++ ) { \
203
+ type foo = m_abs(m_ent(smat, id, i)); \
204
+ ret = m_add(ret, foo); \
205
+ } \
206
+ for ( int j=1; j<n; j++ ) { \
207
+ type bar = m_abs(m_ent(smat, id, j*(smat->ld))); \
208
+ for ( int i=1; i<m; i++ ) { \
209
+ type foo = m_abs(m_ent(smat, id, i+j*(smat->ld))); \
210
+ bar = m_add(bar, foo); \
211
+ } \
212
+ if ( m_cmp(ret, bar) ) { ret = bar; } \
213
+ } \
214
+ return x2v(ret)
215
+ #define NORM1_DZ(id, bid, m, n, m_abs) do { \
216
+ if ( smat->stype == ST_RSUB ) { \
217
+ NORM1(double, id, ENTITYr0, m, n, m_abs, NUM_ADD, NUM_CMP, rb_float_new); \
218
+ } else { \
219
+ const int ione=1; \
220
+ double ret = bid##asum_(&(m), smat->id##body, &ione); \
221
+ for ( int j=1; j<n; j++ ) { \
222
+ double temp = bid##asum_(&(m), smat->id##body+(j*(smat->ld)), &ione); \
223
+ if ( ret < temp ) { ret = temp; } \
224
+ } \
225
+ return rb_float_new(ret); \
226
+ } \
227
+ } while (0)
228
+ #define NORMi(type, id, m_ent, m, n, m_abs, m_add, m_cmp, x2v) \
229
+ type ret = m_abs(m_ent(smat, id, 0)); \
230
+ for ( int j=1; j<n; j++ ) { \
231
+ type foo = m_abs(m_ent(smat, id, j*(smat->ld))); \
232
+ ret = m_add(ret, foo); \
233
+ } \
234
+ for ( int i=1; i<m; i++ ) { \
235
+ type bar = m_abs(m_ent(smat, id, i)); \
236
+ for ( int j=1; j<n; j++ ) { \
237
+ type foo = m_abs(m_ent(smat, id, i+j*(smat->ld))); \
238
+ bar = m_add(bar, foo); \
239
+ } \
240
+ if ( m_cmp(ret, bar) ) { ret = bar; } \
241
+ } \
242
+ return x2v(ret)
243
+ #define NORMi_DZ(id, bid, m, n, m_abs) do { \
244
+ if ( smat->stype == ST_RSUB ) { \
245
+ NORMi(double, id, ENTITYr0, m, n, m_abs, NUM_ADD, NUM_CMP, rb_float_new); \
246
+ } else { \
247
+ double ret = bid##asum_(&(n), smat->id##body, &(smat->ld)); \
248
+ for ( int i=1; i<m; i++ ) { \
249
+ double temp = bid##asum_(&(n), smat->id##body+i, &(smat->ld)); \
250
+ if ( ret < temp ) { ret = temp; } \
251
+ } \
252
+ return rb_float_new(ret); \
253
+ } \
254
+ } while (0)
255
+ #define NORM1i_OTHER(oi, type, id, m, n, m_abs, m_add, m_cmp, x2v) do { \
256
+ if ( smat->stype == ST_RSUB ) { \
257
+ NORM##oi(type, id, ENTITYr0, m, n, m_abs, m_add, m_cmp, x2v); \
258
+ } else { \
259
+ NORM##oi(type, id, ENTITYd0, m, n, m_abs, m_add, m_cmp, x2v); \
260
+ } \
261
+ } while (0)
262
+ #define NUM_ADD(a, b) a+b
263
+ #define NUM_CMP(a, b) a<b
264
+ #define VAL_ABS(a) rb_funcall(a, id_abs, 0)
265
+ #define VAL_ADD(a, b) rb_funcall(a, id_op_plus, 1, b)
266
+ #define VAL_CMP(a, b) RTEST(rb_funcall(a, id_op_lt, 1, b))
267
+
268
+ // induced L-1 norm (maximum of absolute summation of each column)
269
+ VALUE
270
+ kmm_mat_norm1(VALUE self)
271
+ {
272
+ SMAT *smat = km_mat2smat(self);
273
+ if ( smat->trans ) {
274
+ VT_SWITCH( smat->vtype,
275
+ NORMi_DZ(d, d, smat->n, smat->m, fabs);,
276
+ NORMi_DZ(z, dz, smat->n, smat->m, cabs);,
277
+ NORM1i_OTHER(i, int, i, smat->n, smat->m, ABS, NUM_ADD, NUM_CMP, INT2NUM);,
278
+ NORM1i_OTHER(i, bool, b, smat->n, smat->m, ITSELF, XOR, NUM_CMP, TF2V);,
279
+ NORM1i_OTHER(i, VALUE, v, smat->n, smat->m, VAL_ABS, VAL_ADD, VAL_CMP, ITSELF);
280
+ );
281
+ } else {
282
+ VT_SWITCH( smat->vtype,
283
+ NORM1_DZ(d, d, smat->m, smat->n, fabs);,
284
+ NORM1_DZ(z, dz, smat->m, smat->n, cabs);,
285
+ NORM1i_OTHER(1, int, i, smat->m, smat->n, ABS, NUM_ADD, NUM_CMP, INT2NUM);,
286
+ NORM1i_OTHER(1, bool, b, smat->m, smat->n, ITSELF, XOR, NUM_CMP, TF2V);,
287
+ NORM1i_OTHER(1, VALUE, v, smat->m, smat->n, VAL_ABS, VAL_ADD, VAL_CMP, ITSELF);
288
+ );
289
+ }
290
+ }
291
+ // induced L-infinity norm (maximum of absolute summation of each row)
292
+ VALUE
293
+ kmm_mat_normi(VALUE self)
294
+ {
295
+ SMAT *smat = km_mat2smat(self);
296
+ if ( smat->trans ) {
297
+ VT_SWITCH( smat->vtype,
298
+ NORM1_DZ(d, d, smat->n, smat->m, fabs);,
299
+ NORM1_DZ(z, dz, smat->n, smat->m, cabs);,
300
+ NORM1i_OTHER(1, int, i, smat->n, smat->m, ABS, NUM_ADD, NUM_CMP, INT2NUM);,
301
+ NORM1i_OTHER(1, bool, b, smat->n, smat->m, ITSELF, XOR, NUM_CMP, TF2V);,
302
+ NORM1i_OTHER(1, VALUE, v, smat->n, smat->m, VAL_ABS, VAL_ADD, VAL_CMP, ITSELF);
303
+ );
304
+ } else {
305
+ VT_SWITCH( smat->vtype,
306
+ NORMi_DZ(d, d, smat->m, smat->n, fabs);,
307
+ NORMi_DZ(z, dz, smat->m, smat->n, cabs);,
308
+ NORM1i_OTHER(i, int, i, smat->m, smat->n, ABS, NUM_ADD, NUM_CMP, INT2NUM);,
309
+ NORM1i_OTHER(i, bool, b, smat->m, smat->n, ITSELF, XOR, NUM_CMP, TF2V);,
310
+ NORM1i_OTHER(i, VALUE, v, smat->m, smat->n, VAL_ABS, VAL_ADD, VAL_CMP, ITSELF);
311
+ );
312
+ }
313
+ }
314
+
315
+ // element-wise L-1 norm (absolute summation of all elements)
316
+ static void
317
+ km_asum_d(double *ent, void *data)
318
+ {
319
+ double *ret = (double *)data;
320
+ *ret += fabs(*ent);
321
+ }
322
+ static void
323
+ km_asum_z(COMPLEX *ent, void *data)
324
+ {
325
+ double *ret = (double *)data;
326
+ *ret += cabs(*ent);
327
+ }
328
+ static void
329
+ km_asum_i(int *ent, void *data)
330
+ {
331
+ int *ret = (int *)data;
332
+ *ret += ABS(*ent);
333
+ }
334
+ static void
335
+ km_asum_b(bool *ent, void *data)
336
+ {
337
+ bool *ret = (bool *)data;
338
+ *ret = XOR(*ret, *ent);
339
+ }
340
+ static void
341
+ km_asum_v(VALUE *ent, void *data)
342
+ {
343
+ VALUE *ret = (VALUE *)data;
344
+ VALUE temp = rb_funcall(*ent, id_abs, 0);
345
+ *ret = rb_funcall(*ret, id_op_plus, 1, temp);
346
+ }
347
+ VALUE
348
+ kmm_mat_norm_e1(VALUE self)
349
+ {
350
+ SMAT *smat = km_mat2smat(self);
351
+ if ( smat->vtype == VT_DOUBLE ) {
352
+ if ( smat->stype == ST_FULL ) {
353
+ int ione = 1, n = LENGTH(smat);
354
+ return rb_float_new(dasum_(&n, smat->dbody, &ione));
355
+ } else if ( smat->stype == ST_SSUB ) {
356
+ int ione = 1, lps, size; double ret = 0.0;
357
+ if ( smat->trans ) {
358
+ lps = smat->m; size = smat->n;
359
+ } else {
360
+ lps = smat->n; size = smat->m;
361
+ }
362
+ for ( int i=0; i<lps; i++ ) {
363
+ ret += dasum_(&size, smat->dbody+(i*(smat->ld)), &ione);
364
+ }
365
+ return rb_float_new(ret);
366
+ } else {
367
+ double ret = 0.0;
368
+ km_smat_each_d(smat, km_asum_d, (void *)&ret);
369
+ return rb_float_new(ret);
370
+ }
371
+ } else if ( smat->vtype == VT_COMPLEX ) {
372
+ if ( smat->stype == ST_FULL ) {
373
+ int ione = 1, n = LENGTH(smat);
374
+ return rb_float_new(dzasum_(&n, smat->zbody, &ione));
375
+ } else if ( smat->stype == ST_SSUB ) {
376
+ int ione = 1, lps, size; double ret = 0.0;
377
+ if ( smat->trans ) {
378
+ lps = smat->m; size = smat->n;
379
+ } else {
380
+ lps = smat->n; size = smat->m;
381
+ }
382
+ for ( int i=0; i<lps; i++ ) {
383
+ ret += dzasum_(&size, smat->zbody+(i*(smat->ld)), &ione);
384
+ }
385
+ return rb_float_new(ret);
386
+ } else {
387
+ double ret = 0.0;
388
+ km_smat_each_z(smat, km_asum_z, (void *)&ret);
389
+ return rb_float_new(ret);
390
+ }
391
+ } else if ( smat->vtype == VT_INT ) {
392
+ int ret = 0;
393
+ km_smat_each_i(smat, km_asum_i, (void *)&ret);
394
+ return INT2NUM(ret);
395
+ } else if ( smat->vtype == VT_BOOL ) {
396
+ bool ret = false;
397
+ km_smat_each_b(smat, km_asum_b, (void *)&ret);
398
+ return TF2V(ret);
399
+ } else if ( smat->vtype == VT_VALUE ) {
400
+ VALUE ret = INT2NUM(0);
401
+ km_smat_each_v(smat, km_asum_v, (void *)&ret);
402
+ return ret;
403
+ } else {
404
+ rb_raise(km_eInternal, "unknown value type");
405
+ }
406
+ }
407
+
408
+ // element-wise L-infinity norm (maximum of absolute value of each element)
409
+ static void
410
+ km_amax_d(double *ent, void *data)
411
+ {
412
+ double *ret = (double *)data;
413
+ double temp = fabs(*ent);
414
+ if ( *ret < temp ) {
415
+ *ret = temp;
416
+ }
417
+ }
418
+ static void
419
+ km_amax_z(COMPLEX *ent, void *data)
420
+ {
421
+ double *ret = (double *)data;
422
+ double temp = cabs(*ent);
423
+ if ( *ret < temp ) {
424
+ *ret = temp;
425
+ }
426
+ }
427
+ static void
428
+ km_amax_i(int *ent, void *data)
429
+ {
430
+ int *ret = (int *)data;
431
+ int temp = ABS(*ent);
432
+ if ( *ret < temp ) {
433
+ *ret = temp;
434
+ }
435
+ }
436
+ static void
437
+ km_amax_b(bool *ent, void *data)
438
+ {
439
+ bool *ret = (bool *)data;
440
+ *ret = ( *ret || *ent );
441
+ }
442
+ static void
443
+ km_amax_v(VALUE *ent, void *data)
444
+ {
445
+ VALUE *ret = (VALUE *)data;
446
+ VALUE temp = rb_funcall(*ent, id_abs, 0);
447
+ if ( rb_funcall(*ret, id_op_lt, 1, temp) ) {
448
+ *ret = temp;
449
+ }
450
+ }
451
+ VALUE
452
+ kmm_mat_norm_einf(VALUE self)
453
+ {
454
+ SMAT *smat = km_mat2smat(self);
455
+ if ( smat->vtype == VT_DOUBLE ) {
456
+ if ( smat->stype == ST_FULL ) {
457
+ int ione=1, n=LENGTH(smat);
458
+ return rb_float_new(fabs(smat->dbody[idamax_(&n, smat->dbody, &ione)-1]));
459
+ } else if ( smat->stype == ST_SSUB ) {
460
+ int ione=1, lps, size; double ret = 0.0;
461
+ if ( smat->trans ) {
462
+ lps = smat->m; size = smat->n;
463
+ } else {
464
+ lps = smat->n; size = smat->m;
465
+ }
466
+ for ( int i=0; i<lps; i++ ) {
467
+ double temp = fabs(smat->dbody[idamax_(&size, smat->dbody+(i*(smat->ld)), &ione)-1]);
468
+ if ( ret < temp ) { ret = temp; }
469
+ }
470
+ return rb_float_new(ret);
471
+ } else {
472
+ double ret = 0.0;
473
+ km_smat_each_d(smat, km_amax_d, (void *)&ret);
474
+ return rb_float_new(ret);
475
+ }
476
+ } else if ( smat->vtype == VT_COMPLEX ) {
477
+ if ( smat->stype == ST_FULL ) {
478
+ int ione=1, n=LENGTH(smat);
479
+ return rb_float_new(cabs(smat->zbody[izamax_(&n, smat->zbody, &ione)-1]));
480
+ } else if ( smat->stype == ST_SSUB ) {
481
+ int ione=1, lps, size; double ret = 0.0;
482
+ if ( smat->trans ) {
483
+ lps = smat->m; size = smat->n;
484
+ } else {
485
+ lps = smat->n; size = smat->m;
486
+ }
487
+ for ( int i=0; i<lps; i++ ) {
488
+ double temp = cabs(smat->zbody[izamax_(&size, smat->zbody+(i*(smat->ld)), &ione)-1]);
489
+ if ( ret < temp ) { ret = temp; }
490
+ }
491
+ return rb_float_new(ret);
492
+ } else {
493
+ double ret = 0.0;
494
+ km_smat_each_z(smat, km_amax_z, (void *)&ret);
495
+ return rb_float_new(ret);
496
+ }
497
+ } else if ( smat->vtype == VT_INT ) {
498
+ int ret = 0;
499
+ km_smat_each_i(smat, km_amax_i, (void *)&ret);
500
+ return INT2NUM(ret);
501
+ } else if ( smat->vtype == VT_BOOL ) {
502
+ bool ret = false;
503
+ km_smat_each_b(smat, km_amax_b, (void *)&ret);
504
+ return TF2V(ret);
505
+ } else if ( smat->vtype == VT_VALUE ) {
506
+ VALUE ret = INT2NUM(0);
507
+ km_smat_each_v(smat, km_amax_v, (void *)&ret);
508
+ return ret;
509
+ } else {
510
+ rb_raise(km_eInternal, "unknown value type");
511
+ }
512
+ }
513
+
514
+ // induced L-2 norm (maximum of singular values)
515
+ struct km_norm2_arg {
516
+ SMAT *sa;
517
+ double *a, *s, *work;
518
+ int *iwork;
519
+ };
520
+ static VALUE
521
+ km_norm2_body(VALUE data)
522
+ {
523
+ struct km_norm2_arg *a = (struct km_norm2_arg *)data;
524
+
525
+ int m = a->sa->m, n = a->sa->n;
526
+
527
+ double opt; int lwork=-1, info;
528
+ char jobz[] = "N";
529
+ dgesdd_(jobz, &m, &n, NULL, &m, NULL, NULL, &m, NULL, &n, &opt, &lwork, NULL, &info);
530
+ km_check_info(info, rb_eRuntimeError, "error occured while computing optimal lwork", "dgesdd");
531
+ lwork = (int)opt;
532
+
533
+ KALLOCc(a->a, a->sa);
534
+ KALLOC(a->s, MIN(m, n));
535
+ KALLOC(a->work, lwork);
536
+ KALLOC(a->iwork, 8*MIN(m, n));
537
+
538
+ dgesdd_(jobz, &m, &n, a->a, &m, a->s, NULL, &m, NULL, &n, a->work, &lwork, a->iwork, &info);
539
+ km_check_info(info, rb_eRuntimeError, "DBDSDC did not converge", "dgesdd");
540
+
541
+ return rb_float_new((a->s)[0]);
542
+ }
543
+ static VALUE
544
+ km_norm2_ensure(VALUE data)
545
+ {
546
+ struct km_norm2_arg *a = (struct km_norm2_arg *)data;
547
+
548
+ ruby_xfree(a->iwork);
549
+ ruby_xfree(a->work);
550
+ ruby_xfree(a->s);
551
+ ruby_xfree(a->a);
552
+
553
+ return Qnil;
554
+ }
555
+ VALUE
556
+ kmm_mat_norm2(VALUE self)
557
+ {
558
+ struct km_norm2_arg a;
559
+ a.sa = km_mat2smat(self);
560
+ km_check_double(1, a.sa);
561
+
562
+ return km_ensure(km_norm2_body, (VALUE)&a, km_norm2_ensure, (VALUE)&a);
563
+ }
564
+
565
+ // estimate for the reciprocal condition of `self'
566
+ struct km__rcond2_arg {
567
+ SMAT *sa;
568
+ double *a, *s, *work;
569
+ int *iwork;
570
+ };
571
+ static VALUE
572
+ km__rcond2_body(VALUE data)
573
+ {
574
+ struct km__rcond2_arg *a = (struct km__rcond2_arg *)data;
575
+
576
+ int n = a->sa->m;
577
+ km_check_size(1, a->sa->n,n);
578
+
579
+ double opt; int lwork=-1, info;
580
+ char jobz[]="N";
581
+ dgesdd_(jobz, &n, &n, NULL, &n, NULL, NULL, &n, NULL, &n, &opt, &lwork, NULL, &info);
582
+ km_check_info(info, rb_eRuntimeError, "error occured while computing optimal lwork", "dgesdd");
583
+ lwork = (int)opt;
584
+
585
+ KALLOCc(a->a, a->sa);
586
+ KALLOC(a->s, n);
587
+ KALLOC(a->work, lwork);
588
+ KALLOC(a->iwork, 8*n);
589
+
590
+ dgesdd_(jobz, &n, &n, a->a, &n, a->s, NULL, &n, NULL, &n, a->work, &lwork, a->iwork, &info);
591
+ km_check_info(info, rb_eRuntimeError, "DBDSDC did not converge", "dgesdd");
592
+
593
+ return rb_float_new((a->s)[n-1]/(a->s)[0]);
594
+ }
595
+ static VALUE
596
+ km__rcond2_ensure(VALUE data)
597
+ {
598
+ struct km__rcond2_arg *a = (struct km__rcond2_arg *)data;
599
+
600
+ ruby_xfree(a->iwork);
601
+ ruby_xfree(a->work);
602
+ ruby_xfree(a->s);
603
+ ruby_xfree(a->a);
604
+
605
+ return Qnil;
606
+ }
607
+ VALUE
608
+ kmm_mat__rcond2(VALUE self)
609
+ {
610
+ struct km__rcond2_arg a;
611
+ a.sa = km_mat2smat(self);
612
+ km_check_double(1, a.sa);
613
+
614
+ return km_ensure(km__rcond2_body, (VALUE)&a, km__rcond2_ensure, (VALUE)&a);
615
+ }
616
+
617
+ struct km__rcondf_arg {
618
+ SMAT *sa;
619
+ double *a, *work;
620
+ int *ipiv;
621
+ double normf;
622
+ };
623
+ static VALUE
624
+ km__rcondf_body(VALUE data)
625
+ {
626
+ struct km__rcondf_arg *a = (struct km__rcondf_arg *)data;
627
+
628
+ int n = a->sa->m;
629
+ km_check_size(1, a->sa->n,n);
630
+
631
+ double opt; int lwork=-1, info;
632
+ dgetri_(&n, NULL, &n, NULL, &opt, &lwork, &info);
633
+ km_check_info(info, rb_eRuntimeError, "error occured while computing optimal lwork", "dgetri");
634
+ lwork = (int)opt;
635
+ KALLOCc(a->a, a->sa);
636
+ KALLOC(a->ipiv, n);
637
+ KALLOC(a->work, lwork);
638
+
639
+ dgetrf_(&n, &n, a->a, &n, a->ipiv, &info);
640
+ km_check_info(info, km_eUncomp, "the matrix is exactry singular", "dgetrf");
641
+ dgetri_(&n, a->a, &n, a->ipiv, a->work, &lwork, &info);
642
+ km_check_info(info, rb_eRuntimeError, "unexpected info value", "dgetri");
643
+ int len = n*n, ione = 1;
644
+ return rb_float_new(1.0/((a->normf)*dnrm2_(&len, a->a, &ione)));
645
+ }
646
+ static VALUE
647
+ km__rcondf_ensure(VALUE data)
648
+ {
649
+ struct km__rcondf_arg *a = (struct km__rcondf_arg *)data;
650
+
651
+ ruby_xfree(a->work);
652
+ ruby_xfree(a->ipiv);
653
+ ruby_xfree(a->a);
654
+
655
+ return Qnil;
656
+ }
657
+ VALUE
658
+ kmm_mat__rcondf(VALUE self)
659
+ {
660
+ struct km__rcondf_arg a;
661
+ a.sa = km_mat2smat(self);
662
+ km_check_double(1, a.sa);
663
+
664
+ a.normf = NUM2DBL(kmm_mat_normf(self));
665
+
666
+ return km_ensure(km__rcondf_body, (VALUE)&a, km__rcondf_ensure, (VALUE)&a);
667
+ }
668
+
669
+ struct km__rcondoi_arg {
670
+ SMAT *sa;
671
+ char oi[2];
672
+ double *a, *work;
673
+ int *iwork;
674
+ double anorm;
675
+ };
676
+ static VALUE
677
+ km__rcondoi_body(VALUE data)
678
+ {
679
+ struct km__rcondoi_arg *a = (struct km__rcondoi_arg *)data;
680
+
681
+ int n = a->sa->m;
682
+ km_check_size(1, a->sa->n,n);
683
+
684
+ KALLOCc(a->a, a->sa);
685
+ KALLOC(a->iwork, n);
686
+ double rcond;
687
+ int info;
688
+ dgetrf_(&n, &n, a->a, &n, a->iwork, &info);
689
+ km_check_info(info, km_eUncomp, "the matrix is exactry singular", "dgetrf");
690
+ KALLOC(a->work, 4*n);
691
+ dgecon_(a->oi, &n, a->a, &n, &(a->anorm), &rcond, a->work, a->iwork, &info);
692
+ km_check_info(info, rb_eRuntimeError, "unexpected info value", "dgecon");
693
+
694
+ return rb_float_new(rcond);
695
+ }
696
+ static VALUE
697
+ km__rcondoi_ensure(VALUE data)
698
+ {
699
+ struct km__rcondoi_arg *a = (struct km__rcondoi_arg *)data;
700
+
701
+ ruby_xfree(a->iwork);
702
+ ruby_xfree(a->work);
703
+ ruby_xfree(a->a);
704
+
705
+ return Qnil;
706
+ }
707
+ VALUE
708
+ kmm_mat__rcondoi(VALUE self, VALUE sym)
709
+ {
710
+ struct km__rcondoi_arg a;
711
+ a.sa = km_mat2smat(self);
712
+ km_check_double(1, a.sa);
713
+ if ( sym == sym_one ) {
714
+ strcpy(a.oi, "O");
715
+ a.anorm = NUM2DBL(kmm_mat_norm1(self));
716
+ } else if ( sym == sym_infinity ) {
717
+ strcpy(a.oi, "I");
718
+ a.anorm = NUM2DBL(kmm_mat_normi(self));
719
+ } else {
720
+ rb_raise(rb_eArgError, "unknown norm type");
721
+ }
722
+
723
+ km_ensure(km__rcondoi_body, (VALUE)&a, km__rcondoi_ensure, (VALUE)&a);
724
+
725
+ return self;
726
+ }
727
+