numo-linalg-alt 0.2.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +10 -1
  3. data/README.md +3 -1
  4. data/ext/numo/linalg/blas/dot.c +59 -59
  5. data/ext/numo/linalg/blas/dot_sub.c +58 -58
  6. data/ext/numo/linalg/blas/gemm.c +157 -148
  7. data/ext/numo/linalg/blas/gemv.c +131 -127
  8. data/ext/numo/linalg/blas/nrm2.c +50 -50
  9. data/ext/numo/linalg/lapack/gees.c +276 -0
  10. data/ext/numo/linalg/lapack/gees.h +15 -0
  11. data/ext/numo/linalg/lapack/geev.c +127 -110
  12. data/ext/numo/linalg/lapack/gelsd.c +81 -70
  13. data/ext/numo/linalg/lapack/geqrf.c +52 -51
  14. data/ext/numo/linalg/lapack/gerqf.c +70 -0
  15. data/ext/numo/linalg/lapack/gerqf.h +15 -0
  16. data/ext/numo/linalg/lapack/gesdd.c +96 -86
  17. data/ext/numo/linalg/lapack/gesv.c +80 -78
  18. data/ext/numo/linalg/lapack/gesvd.c +140 -129
  19. data/ext/numo/linalg/lapack/getrf.c +51 -50
  20. data/ext/numo/linalg/lapack/getri.c +64 -63
  21. data/ext/numo/linalg/lapack/getrs.c +92 -88
  22. data/ext/numo/linalg/lapack/gges.c +214 -0
  23. data/ext/numo/linalg/lapack/gges.h +15 -0
  24. data/ext/numo/linalg/lapack/heev.c +54 -52
  25. data/ext/numo/linalg/lapack/heevd.c +54 -52
  26. data/ext/numo/linalg/lapack/heevr.c +109 -98
  27. data/ext/numo/linalg/lapack/hegv.c +77 -74
  28. data/ext/numo/linalg/lapack/hegvd.c +77 -74
  29. data/ext/numo/linalg/lapack/hegvx.c +132 -120
  30. data/ext/numo/linalg/lapack/hetrf.c +54 -50
  31. data/ext/numo/linalg/lapack/lange.c +45 -44
  32. data/ext/numo/linalg/lapack/orgqr.c +63 -62
  33. data/ext/numo/linalg/lapack/orgrq.c +78 -0
  34. data/ext/numo/linalg/lapack/orgrq.h +15 -0
  35. data/ext/numo/linalg/lapack/potrf.c +49 -48
  36. data/ext/numo/linalg/lapack/potri.c +49 -48
  37. data/ext/numo/linalg/lapack/potrs.c +74 -72
  38. data/ext/numo/linalg/lapack/syev.c +54 -52
  39. data/ext/numo/linalg/lapack/syevd.c +54 -52
  40. data/ext/numo/linalg/lapack/syevr.c +107 -98
  41. data/ext/numo/linalg/lapack/sygv.c +77 -73
  42. data/ext/numo/linalg/lapack/sygvd.c +77 -73
  43. data/ext/numo/linalg/lapack/sygvx.c +132 -120
  44. data/ext/numo/linalg/lapack/sytrf.c +54 -50
  45. data/ext/numo/linalg/lapack/trtrs.c +79 -75
  46. data/ext/numo/linalg/lapack/ungqr.c +63 -62
  47. data/ext/numo/linalg/lapack/ungrq.c +78 -0
  48. data/ext/numo/linalg/lapack/ungrq.h +15 -0
  49. data/ext/numo/linalg/linalg.c +21 -10
  50. data/ext/numo/linalg/linalg.h +5 -0
  51. data/ext/numo/linalg/util.c +8 -0
  52. data/ext/numo/linalg/util.h +1 -0
  53. data/lib/numo/linalg/version.rb +1 -1
  54. data/lib/numo/linalg.rb +322 -0
  55. metadata +14 -4
@@ -4,84 +4,86 @@ struct _gesv_option {
4
4
  int matrix_layout;
5
5
  };
6
6
 
7
- #define DEF_LINALG_FUNC(tDType, tNAryClass, fLapackFunc) \
8
- static void _iter_##fLapackFunc(na_loop_t* const lp) { \
9
- tDType* a = (tDType*)NDL_PTR(lp, 0); \
10
- tDType* b = (tDType*)NDL_PTR(lp, 1); \
11
- int* ipiv = (int*)NDL_PTR(lp, 2); \
12
- int* info = (int*)NDL_PTR(lp, 3); \
13
- struct _gesv_option* opt = (struct _gesv_option*)(lp->opt_ptr); \
14
- const lapack_int n = (lapack_int)NDL_SHAPE(lp, 0)[0]; \
15
- const lapack_int nhrs = lp->args[1].ndim == 1 ? 1 : (lapack_int)NDL_SHAPE(lp, 1)[1]; \
16
- const lapack_int lda = n; \
17
- const lapack_int ldb = nhrs; \
18
- const lapack_int i = LAPACKE_##fLapackFunc(opt->matrix_layout, n, nhrs, a, lda, ipiv, b, ldb); \
19
- *info = (int)i; \
20
- } \
21
- \
22
- static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) { \
23
- VALUE a_vnary = Qnil; \
24
- VALUE b_vnary = Qnil; \
25
- VALUE kw_args = Qnil; \
26
- \
27
- rb_scan_args(argc, argv, "2:", &a_vnary, &b_vnary, &kw_args); \
28
- \
29
- ID kw_table[1] = { rb_intern("order") }; \
30
- VALUE kw_values[1] = { Qundef }; \
31
- \
32
- rb_get_kwargs(kw_args, kw_table, 0, 1, kw_values); \
33
- \
34
- const int matrix_layout = kw_values[0] != Qundef ? get_matrix_layout(kw_values[0]) : LAPACK_ROW_MAJOR; \
35
- \
36
- if (CLASS_OF(a_vnary) != tNAryClass) { \
37
- a_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, a_vnary); \
38
- } \
39
- if (!RTEST(nary_check_contiguous(a_vnary))) { \
40
- a_vnary = nary_dup(a_vnary); \
41
- } \
42
- if (CLASS_OF(b_vnary) != tNAryClass) { \
43
- b_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, b_vnary); \
44
- } \
45
- if (!RTEST(nary_check_contiguous(b_vnary))) { \
46
- b_vnary = nary_dup(b_vnary); \
47
- } \
48
- \
49
- narray_t* a_nary = NULL; \
50
- narray_t* b_nary = NULL; \
51
- GetNArray(a_vnary, a_nary); \
52
- GetNArray(b_vnary, b_nary); \
53
- const int a_n_dims = NA_NDIM(a_nary); \
54
- const int b_n_dims = NA_NDIM(b_nary); \
55
- if (a_n_dims != 2) { \
56
- rb_raise(rb_eArgError, "input array a must be 2-dimensional"); \
57
- return Qnil; \
58
- } \
59
- if (b_n_dims != 1 && b_n_dims != 2) { \
60
- rb_raise(rb_eArgError, "input array b must be 1- or 2-dimensional"); \
61
- return Qnil; \
62
- } \
63
- \
64
- lapack_int n = (lapack_int)NA_SHAPE(a_nary)[0]; \
65
- lapack_int nb = (lapack_int)(b_n_dims == 1 ? NA_SHAPE(b_nary)[0] : NA_SHAPE(b_nary)[0]); \
66
- if (n != nb) { \
67
- rb_raise(nary_eShapeError, "shape1[1](=%d) != shape2[0](=%d)", n, nb); \
68
- } \
69
- \
70
- lapack_int nhrs = b_n_dims == 1 ? 1 : (lapack_int)NA_SHAPE(b_nary)[1]; \
71
- size_t shape[2] = { (size_t)n, (size_t)nhrs }; \
72
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 2 }, { OVERWRITE, b_n_dims } }; \
73
- ndfunc_arg_out_t aout[2] = { { numo_cInt32, 1, shape }, { numo_cInt32, 0 } }; \
74
- \
75
- ndfunc_t ndf = { _iter_##fLapackFunc, NO_LOOP | NDF_EXTRACT, 2, 2, ain, aout }; \
76
- struct _gesv_option opt = { matrix_layout }; \
77
- VALUE res = na_ndloop3(&ndf, &opt, 2, a_vnary, b_vnary); \
78
- \
79
- VALUE ret = rb_ary_concat(rb_assoc_new(a_vnary, b_vnary), res); \
80
- \
81
- RB_GC_GUARD(a_vnary); \
82
- RB_GC_GUARD(b_vnary); \
83
- \
84
- return ret; \
7
+ #define DEF_LINALG_FUNC(tDType, tNAryClass, fLapackFunc) \
8
+ static void _iter_##fLapackFunc(na_loop_t* const lp) { \
9
+ tDType* a = (tDType*)NDL_PTR(lp, 0); \
10
+ tDType* b = (tDType*)NDL_PTR(lp, 1); \
11
+ int* ipiv = (int*)NDL_PTR(lp, 2); \
12
+ int* info = (int*)NDL_PTR(lp, 3); \
13
+ struct _gesv_option* opt = (struct _gesv_option*)(lp->opt_ptr); \
14
+ const lapack_int n = (lapack_int)NDL_SHAPE(lp, 0)[0]; \
15
+ const lapack_int nhrs = lp->args[1].ndim == 1 ? 1 : (lapack_int)NDL_SHAPE(lp, 1)[1]; \
16
+ const lapack_int lda = n; \
17
+ const lapack_int ldb = nhrs; \
18
+ const lapack_int i = \
19
+ LAPACKE_##fLapackFunc(opt->matrix_layout, n, nhrs, a, lda, ipiv, b, ldb); \
20
+ *info = (int)i; \
21
+ } \
22
+ \
23
+ static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) { \
24
+ VALUE a_vnary = Qnil; \
25
+ VALUE b_vnary = Qnil; \
26
+ VALUE kw_args = Qnil; \
27
+ \
28
+ rb_scan_args(argc, argv, "2:", &a_vnary, &b_vnary, &kw_args); \
29
+ \
30
+ ID kw_table[1] = { rb_intern("order") }; \
31
+ VALUE kw_values[1] = { Qundef }; \
32
+ \
33
+ rb_get_kwargs(kw_args, kw_table, 0, 1, kw_values); \
34
+ \
35
+ const int matrix_layout = \
36
+ kw_values[0] != Qundef ? get_matrix_layout(kw_values[0]) : LAPACK_ROW_MAJOR; \
37
+ \
38
+ if (CLASS_OF(a_vnary) != tNAryClass) { \
39
+ a_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, a_vnary); \
40
+ } \
41
+ if (!RTEST(nary_check_contiguous(a_vnary))) { \
42
+ a_vnary = nary_dup(a_vnary); \
43
+ } \
44
+ if (CLASS_OF(b_vnary) != tNAryClass) { \
45
+ b_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, b_vnary); \
46
+ } \
47
+ if (!RTEST(nary_check_contiguous(b_vnary))) { \
48
+ b_vnary = nary_dup(b_vnary); \
49
+ } \
50
+ \
51
+ narray_t* a_nary = NULL; \
52
+ narray_t* b_nary = NULL; \
53
+ GetNArray(a_vnary, a_nary); \
54
+ GetNArray(b_vnary, b_nary); \
55
+ const int a_n_dims = NA_NDIM(a_nary); \
56
+ const int b_n_dims = NA_NDIM(b_nary); \
57
+ if (a_n_dims != 2) { \
58
+ rb_raise(rb_eArgError, "input array a must be 2-dimensional"); \
59
+ return Qnil; \
60
+ } \
61
+ if (b_n_dims != 1 && b_n_dims != 2) { \
62
+ rb_raise(rb_eArgError, "input array b must be 1- or 2-dimensional"); \
63
+ return Qnil; \
64
+ } \
65
+ \
66
+ lapack_int n = (lapack_int)NA_SHAPE(a_nary)[0]; \
67
+ lapack_int nb = (lapack_int)(b_n_dims == 1 ? NA_SHAPE(b_nary)[0] : NA_SHAPE(b_nary)[0]); \
68
+ if (n != nb) { \
69
+ rb_raise(nary_eShapeError, "shape1[1](=%d) != shape2[0](=%d)", n, nb); \
70
+ } \
71
+ \
72
+ lapack_int nhrs = b_n_dims == 1 ? 1 : (lapack_int)NA_SHAPE(b_nary)[1]; \
73
+ size_t shape[2] = { (size_t)n, (size_t)nhrs }; \
74
+ ndfunc_arg_in_t ain[2] = { { OVERWRITE, 2 }, { OVERWRITE, b_n_dims } }; \
75
+ ndfunc_arg_out_t aout[2] = { { numo_cInt32, 1, shape }, { numo_cInt32, 0 } }; \
76
+ \
77
+ ndfunc_t ndf = { _iter_##fLapackFunc, NO_LOOP | NDF_EXTRACT, 2, 2, ain, aout }; \
78
+ struct _gesv_option opt = { matrix_layout }; \
79
+ VALUE res = na_ndloop3(&ndf, &opt, 2, a_vnary, b_vnary); \
80
+ \
81
+ VALUE ret = rb_ary_concat(rb_assoc_new(a_vnary, b_vnary), res); \
82
+ \
83
+ RB_GC_GUARD(a_vnary); \
84
+ RB_GC_GUARD(b_vnary); \
85
+ \
86
+ return ret; \
85
87
  }
86
88
 
87
89
  DEF_LINALG_FUNC(double, numo_cDFloat, dgesv)
@@ -6,135 +6,146 @@ struct _gesvd_option {
6
6
  char jobvt;
7
7
  };
8
8
 
9
- #define DEF_LINALG_FUNC(tDType, tRtDType, tNAryClass, tRtNAryClass, fLapackFunc) \
10
- static void _iter_##fLapackFunc(na_loop_t* const lp) { \
11
- tDType* a = (tDType*)NDL_PTR(lp, 0); \
12
- tRtDType* s = (tRtDType*)NDL_PTR(lp, 1); \
13
- tDType* u = (tDType*)NDL_PTR(lp, 2); \
14
- tDType* vt = (tDType*)NDL_PTR(lp, 3); \
15
- int* info = (int*)NDL_PTR(lp, 4); \
16
- struct _gesvd_option* opt = (struct _gesvd_option*)(lp->opt_ptr); \
17
- \
18
- const lapack_int m = (lapack_int)(opt->matrix_order == LAPACK_ROW_MAJOR ? NDL_SHAPE(lp, 0)[0] : NDL_SHAPE(lp, 0)[1]); \
19
- const lapack_int n = (lapack_int)(opt->matrix_order == LAPACK_ROW_MAJOR ? NDL_SHAPE(lp, 0)[1] : NDL_SHAPE(lp, 0)[0]); \
20
- const lapack_int min_mn = m < n ? m : n; \
21
- const lapack_int lda = n; \
22
- const lapack_int ldu = opt->jobu == 'A' ? m : min_mn; \
23
- const lapack_int ldvt = n; \
24
- \
25
- tRtDType* superb = (tRtDType*)ruby_xmalloc(min_mn * sizeof(tRtDType)); \
26
- \
27
- lapack_int i = LAPACKE_##fLapackFunc(opt->matrix_order, opt->jobu, opt->jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, superb); \
28
- *info = (int)i; \
29
- \
30
- ruby_xfree(superb); \
31
- } \
32
- \
33
- static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) { \
34
- VALUE a_vnary = Qnil; \
35
- VALUE kw_args = Qnil; \
36
- \
37
- rb_scan_args(argc, argv, "1:", &a_vnary, &kw_args); \
38
- \
39
- ID kw_table[3] = { rb_intern("jobu"), rb_intern("jobvt"), rb_intern("order") }; \
40
- VALUE kw_values[3] = { Qundef, Qundef, Qundef }; \
41
- \
42
- rb_get_kwargs(kw_args, kw_table, 0, 3, kw_values); \
43
- \
44
- const char jobu = kw_values[0] == Qundef ? 'A' : StringValueCStr(kw_values[0])[0]; \
45
- const char jobvt = kw_values[1] == Qundef ? 'A' : StringValueCStr(kw_values[1])[0]; \
46
- const char order = kw_values[2] == Qundef ? 'R' : StringValueCStr(kw_values[2])[0]; \
47
- \
48
- if (jobu == 'O' && jobvt == 'O') { \
49
- rb_raise(rb_eArgError, "jobu and jobvt cannot be both 'O'"); \
50
- return Qnil; \
51
- } \
52
- if (CLASS_OF(a_vnary) != tNAryClass) { \
53
- rb_raise(rb_eTypeError, "type of input array is invalid for overwriting"); \
54
- return Qnil; \
55
- } \
56
- \
57
- if (CLASS_OF(a_vnary) != tNAryClass) { \
58
- a_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, a_vnary); \
59
- } \
60
- if (!RTEST(nary_check_contiguous(a_vnary))) { \
61
- a_vnary = nary_dup(a_vnary); \
62
- } \
63
- \
64
- narray_t* a_nary = NULL; \
65
- GetNArray(a_vnary, a_nary); \
66
- const int n_dims = NA_NDIM(a_nary); \
67
- if (n_dims != 2) { \
68
- rb_raise(rb_eArgError, "input array must be 2-dimensional"); \
69
- return Qnil; \
70
- } \
71
- \
72
- const int matrix_order = order == 'C' ? LAPACK_COL_MAJOR : LAPACK_ROW_MAJOR; \
73
- const size_t m = matrix_order == LAPACK_ROW_MAJOR ? NA_SHAPE(a_nary)[0] : NA_SHAPE(a_nary)[1]; \
74
- const size_t n = matrix_order == LAPACK_ROW_MAJOR ? NA_SHAPE(a_nary)[1] : NA_SHAPE(a_nary)[0]; \
75
- \
76
- const size_t min_mn = m < n ? m : n; \
77
- size_t shape_s[1] = { min_mn }; \
78
- size_t shape_u[2] = { m, m }; \
79
- size_t shape_vt[2] = { n, n }; \
80
- \
81
- ndfunc_arg_in_t ain[1] = { { OVERWRITE, 2 } }; \
82
- ndfunc_arg_out_t aout[4] = { { tRtNAryClass, 1, shape_s }, { tNAryClass, 2, shape_u }, { tNAryClass, 2, shape_vt }, { numo_cInt32, 0 } }; \
83
- \
84
- switch (jobu) { \
85
- case 'A': \
86
- break; \
87
- case 'S': \
88
- shape_u[matrix_order == LAPACK_ROW_MAJOR ? 1 : 0] = min_mn; \
89
- break; \
90
- case 'O': \
91
- case 'N': \
92
- aout[1].dim = 0; \
93
- break; \
94
- default: \
95
- rb_raise(rb_eArgError, "jobu must be 'A', 'S', 'O', or 'N'"); \
96
- return Qnil; \
97
- } \
98
- \
99
- switch (jobvt) { \
100
- case 'A': \
101
- break; \
102
- case 'S': \
103
- shape_vt[matrix_order == LAPACK_ROW_MAJOR ? 0 : 1] = min_mn; \
104
- break; \
105
- case 'O': \
106
- case 'N': \
107
- aout[2].dim = 0; \
108
- break; \
109
- default: \
110
- rb_raise(rb_eArgError, "jobvt must be 'A', 'S', 'O', or 'N'"); \
111
- return Qnil; \
112
- } \
113
- \
114
- ndfunc_t ndf = { _iter_##fLapackFunc, NO_LOOP | NDF_EXTRACT, 1, 4, ain, aout }; \
115
- struct _gesvd_option opt = { matrix_order, jobu, jobvt }; \
116
- VALUE ret = na_ndloop3(&ndf, &opt, 1, a_vnary); \
117
- \
118
- switch (jobu) { \
119
- case 'O': \
120
- rb_ary_store(ret, 1, a_vnary); \
121
- break; \
122
- case 'N': \
123
- rb_ary_store(ret, 1, Qnil); \
124
- break; \
125
- } \
126
- \
127
- switch (jobvt) { \
128
- case 'O': \
129
- rb_ary_store(ret, 2, a_vnary); \
130
- break; \
131
- case 'N': \
132
- rb_ary_store(ret, 2, Qnil); \
133
- break; \
134
- } \
135
- \
136
- RB_GC_GUARD(a_vnary); \
137
- return ret; \
9
+ #define DEF_LINALG_FUNC(tDType, tRtDType, tNAryClass, tRtNAryClass, fLapackFunc) \
10
+ static void _iter_##fLapackFunc(na_loop_t* const lp) { \
11
+ tDType* a = (tDType*)NDL_PTR(lp, 0); \
12
+ tRtDType* s = (tRtDType*)NDL_PTR(lp, 1); \
13
+ tDType* u = (tDType*)NDL_PTR(lp, 2); \
14
+ tDType* vt = (tDType*)NDL_PTR(lp, 3); \
15
+ int* info = (int*)NDL_PTR(lp, 4); \
16
+ struct _gesvd_option* opt = (struct _gesvd_option*)(lp->opt_ptr); \
17
+ \
18
+ const lapack_int m = \
19
+ (lapack_int)(opt->matrix_order == LAPACK_ROW_MAJOR ? NDL_SHAPE(lp, 0)[0] \
20
+ : NDL_SHAPE(lp, 0)[1]); \
21
+ const lapack_int n = \
22
+ (lapack_int)(opt->matrix_order == LAPACK_ROW_MAJOR ? NDL_SHAPE(lp, 0)[1] \
23
+ : NDL_SHAPE(lp, 0)[0]); \
24
+ const lapack_int min_mn = m < n ? m : n; \
25
+ const lapack_int lda = n; \
26
+ const lapack_int ldu = opt->jobu == 'A' ? m : min_mn; \
27
+ const lapack_int ldvt = n; \
28
+ \
29
+ tRtDType* superb = (tRtDType*)ruby_xmalloc(min_mn * sizeof(tRtDType)); \
30
+ \
31
+ lapack_int i = LAPACKE_##fLapackFunc( \
32
+ opt->matrix_order, opt->jobu, opt->jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, superb \
33
+ ); \
34
+ *info = (int)i; \
35
+ \
36
+ ruby_xfree(superb); \
37
+ } \
38
+ \
39
+ static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) { \
40
+ VALUE a_vnary = Qnil; \
41
+ VALUE kw_args = Qnil; \
42
+ \
43
+ rb_scan_args(argc, argv, "1:", &a_vnary, &kw_args); \
44
+ \
45
+ ID kw_table[3] = { rb_intern("jobu"), rb_intern("jobvt"), rb_intern("order") }; \
46
+ VALUE kw_values[3] = { Qundef, Qundef, Qundef }; \
47
+ \
48
+ rb_get_kwargs(kw_args, kw_table, 0, 3, kw_values); \
49
+ \
50
+ const char jobu = kw_values[0] == Qundef ? 'A' : StringValueCStr(kw_values[0])[0]; \
51
+ const char jobvt = kw_values[1] == Qundef ? 'A' : StringValueCStr(kw_values[1])[0]; \
52
+ const char order = kw_values[2] == Qundef ? 'R' : StringValueCStr(kw_values[2])[0]; \
53
+ \
54
+ if (jobu == 'O' && jobvt == 'O') { \
55
+ rb_raise(rb_eArgError, "jobu and jobvt cannot be both 'O'"); \
56
+ return Qnil; \
57
+ } \
58
+ if (CLASS_OF(a_vnary) != tNAryClass) { \
59
+ rb_raise(rb_eTypeError, "type of input array is invalid for overwriting"); \
60
+ return Qnil; \
61
+ } \
62
+ \
63
+ if (CLASS_OF(a_vnary) != tNAryClass) { \
64
+ a_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, a_vnary); \
65
+ } \
66
+ if (!RTEST(nary_check_contiguous(a_vnary))) { \
67
+ a_vnary = nary_dup(a_vnary); \
68
+ } \
69
+ \
70
+ narray_t* a_nary = NULL; \
71
+ GetNArray(a_vnary, a_nary); \
72
+ const int n_dims = NA_NDIM(a_nary); \
73
+ if (n_dims != 2) { \
74
+ rb_raise(rb_eArgError, "input array must be 2-dimensional"); \
75
+ return Qnil; \
76
+ } \
77
+ \
78
+ const int matrix_order = order == 'C' ? LAPACK_COL_MAJOR : LAPACK_ROW_MAJOR; \
79
+ const size_t m = \
80
+ matrix_order == LAPACK_ROW_MAJOR ? NA_SHAPE(a_nary)[0] : NA_SHAPE(a_nary)[1]; \
81
+ const size_t n = \
82
+ matrix_order == LAPACK_ROW_MAJOR ? NA_SHAPE(a_nary)[1] : NA_SHAPE(a_nary)[0]; \
83
+ \
84
+ const size_t min_mn = m < n ? m : n; \
85
+ size_t shape_s[1] = { min_mn }; \
86
+ size_t shape_u[2] = { m, m }; \
87
+ size_t shape_vt[2] = { n, n }; \
88
+ \
89
+ ndfunc_arg_in_t ain[1] = { { OVERWRITE, 2 } }; \
90
+ ndfunc_arg_out_t aout[4] = { { tRtNAryClass, 1, shape_s }, \
91
+ { tNAryClass, 2, shape_u }, \
92
+ { tNAryClass, 2, shape_vt }, \
93
+ { numo_cInt32, 0 } }; \
94
+ \
95
+ switch (jobu) { \
96
+ case 'A': \
97
+ break; \
98
+ case 'S': \
99
+ shape_u[matrix_order == LAPACK_ROW_MAJOR ? 1 : 0] = min_mn; \
100
+ break; \
101
+ case 'O': \
102
+ case 'N': \
103
+ aout[1].dim = 0; \
104
+ break; \
105
+ default: \
106
+ rb_raise(rb_eArgError, "jobu must be 'A', 'S', 'O', or 'N'"); \
107
+ return Qnil; \
108
+ } \
109
+ \
110
+ switch (jobvt) { \
111
+ case 'A': \
112
+ break; \
113
+ case 'S': \
114
+ shape_vt[matrix_order == LAPACK_ROW_MAJOR ? 0 : 1] = min_mn; \
115
+ break; \
116
+ case 'O': \
117
+ case 'N': \
118
+ aout[2].dim = 0; \
119
+ break; \
120
+ default: \
121
+ rb_raise(rb_eArgError, "jobvt must be 'A', 'S', 'O', or 'N'"); \
122
+ return Qnil; \
123
+ } \
124
+ \
125
+ ndfunc_t ndf = { _iter_##fLapackFunc, NO_LOOP | NDF_EXTRACT, 1, 4, ain, aout }; \
126
+ struct _gesvd_option opt = { matrix_order, jobu, jobvt }; \
127
+ VALUE ret = na_ndloop3(&ndf, &opt, 1, a_vnary); \
128
+ \
129
+ switch (jobu) { \
130
+ case 'O': \
131
+ rb_ary_store(ret, 1, a_vnary); \
132
+ break; \
133
+ case 'N': \
134
+ rb_ary_store(ret, 1, Qnil); \
135
+ break; \
136
+ } \
137
+ \
138
+ switch (jobvt) { \
139
+ case 'O': \
140
+ rb_ary_store(ret, 2, a_vnary); \
141
+ break; \
142
+ case 'N': \
143
+ rb_ary_store(ret, 2, Qnil); \
144
+ break; \
145
+ } \
146
+ \
147
+ RB_GC_GUARD(a_vnary); \
148
+ return ret; \
138
149
  }
139
150
 
140
151
  DEF_LINALG_FUNC(double, double, numo_cDFloat, numo_cDFloat, dgesvd)