numo-linalg-alt 0.2.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +10 -1
  3. data/README.md +3 -1
  4. data/ext/numo/linalg/blas/dot.c +59 -59
  5. data/ext/numo/linalg/blas/dot_sub.c +58 -58
  6. data/ext/numo/linalg/blas/gemm.c +157 -148
  7. data/ext/numo/linalg/blas/gemv.c +131 -127
  8. data/ext/numo/linalg/blas/nrm2.c +50 -50
  9. data/ext/numo/linalg/lapack/gees.c +276 -0
  10. data/ext/numo/linalg/lapack/gees.h +15 -0
  11. data/ext/numo/linalg/lapack/geev.c +127 -110
  12. data/ext/numo/linalg/lapack/gelsd.c +81 -70
  13. data/ext/numo/linalg/lapack/geqrf.c +52 -51
  14. data/ext/numo/linalg/lapack/gerqf.c +70 -0
  15. data/ext/numo/linalg/lapack/gerqf.h +15 -0
  16. data/ext/numo/linalg/lapack/gesdd.c +96 -86
  17. data/ext/numo/linalg/lapack/gesv.c +80 -78
  18. data/ext/numo/linalg/lapack/gesvd.c +140 -129
  19. data/ext/numo/linalg/lapack/getrf.c +51 -50
  20. data/ext/numo/linalg/lapack/getri.c +64 -63
  21. data/ext/numo/linalg/lapack/getrs.c +92 -88
  22. data/ext/numo/linalg/lapack/gges.c +214 -0
  23. data/ext/numo/linalg/lapack/gges.h +15 -0
  24. data/ext/numo/linalg/lapack/heev.c +54 -52
  25. data/ext/numo/linalg/lapack/heevd.c +54 -52
  26. data/ext/numo/linalg/lapack/heevr.c +109 -98
  27. data/ext/numo/linalg/lapack/hegv.c +77 -74
  28. data/ext/numo/linalg/lapack/hegvd.c +77 -74
  29. data/ext/numo/linalg/lapack/hegvx.c +132 -120
  30. data/ext/numo/linalg/lapack/hetrf.c +54 -50
  31. data/ext/numo/linalg/lapack/lange.c +45 -44
  32. data/ext/numo/linalg/lapack/orgqr.c +63 -62
  33. data/ext/numo/linalg/lapack/orgrq.c +78 -0
  34. data/ext/numo/linalg/lapack/orgrq.h +15 -0
  35. data/ext/numo/linalg/lapack/potrf.c +49 -48
  36. data/ext/numo/linalg/lapack/potri.c +49 -48
  37. data/ext/numo/linalg/lapack/potrs.c +74 -72
  38. data/ext/numo/linalg/lapack/syev.c +54 -52
  39. data/ext/numo/linalg/lapack/syevd.c +54 -52
  40. data/ext/numo/linalg/lapack/syevr.c +107 -98
  41. data/ext/numo/linalg/lapack/sygv.c +77 -73
  42. data/ext/numo/linalg/lapack/sygvd.c +77 -73
  43. data/ext/numo/linalg/lapack/sygvx.c +132 -120
  44. data/ext/numo/linalg/lapack/sytrf.c +54 -50
  45. data/ext/numo/linalg/lapack/trtrs.c +79 -75
  46. data/ext/numo/linalg/lapack/ungqr.c +63 -62
  47. data/ext/numo/linalg/lapack/ungrq.c +78 -0
  48. data/ext/numo/linalg/lapack/ungrq.h +15 -0
  49. data/ext/numo/linalg/linalg.c +21 -10
  50. data/ext/numo/linalg/linalg.h +5 -0
  51. data/ext/numo/linalg/util.c +8 -0
  52. data/ext/numo/linalg/util.h +1 -0
  53. data/lib/numo/linalg/version.rb +1 -1
  54. data/lib/numo/linalg.rb +322 -0
  55. metadata +14 -4
@@ -5,78 +5,80 @@ struct _potrs_option {
5
5
  char uplo;
6
6
  };
7
7
 
8
- #define DEF_LINALG_FUNC(tDType, tNAryClass, fLapackFunc) \
9
- static void _iter_##fLapackFunc(na_loop_t* const lp) { \
10
- tDType* a = (tDType*)NDL_PTR(lp, 0); \
11
- tDType* b = (tDType*)NDL_PTR(lp, 1); \
12
- int* info = (int*)NDL_PTR(lp, 2); \
13
- struct _potrs_option* opt = (struct _potrs_option*)(lp->opt_ptr); \
14
- const lapack_int n = (lapack_int)NDL_SHAPE(lp, 0)[0]; \
15
- const lapack_int nrhs = lp->args[1].ndim == 1 ? 1 : (lapack_int)NDL_SHAPE(lp, 1)[1]; \
16
- const lapack_int lda = n; \
17
- const lapack_int ldb = nrhs; \
18
- const lapack_int i = LAPACKE_##fLapackFunc(opt->matrix_layout, opt->uplo, n, nrhs, a, lda, b, ldb); \
19
- *info = (int)i; \
20
- } \
21
- \
22
- static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) { \
23
- VALUE a_vnary = Qnil; \
24
- VALUE b_vnary = Qnil; \
25
- VALUE kw_args = Qnil; \
26
- rb_scan_args(argc, argv, "2:", &a_vnary, &b_vnary, &kw_args); \
27
- ID kw_table[2] = { rb_intern("order"), rb_intern("uplo") }; \
28
- VALUE kw_values[2] = { Qundef, Qundef }; \
29
- rb_get_kwargs(kw_args, kw_table, 0, 2, kw_values); \
30
- const int matrix_layout = kw_values[0] != Qundef ? get_matrix_layout(kw_values[0]) : LAPACK_ROW_MAJOR; \
31
- const char uplo = kw_values[1] != Qundef ? get_uplo(kw_values[1]) : 'U'; \
32
- \
33
- if (CLASS_OF(a_vnary) != tNAryClass) { \
34
- a_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, a_vnary); \
35
- } \
36
- if (!RTEST(nary_check_contiguous(a_vnary))) { \
37
- a_vnary = nary_dup(a_vnary); \
38
- } \
39
- if (CLASS_OF(b_vnary) != tNAryClass) { \
40
- b_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, b_vnary); \
41
- } \
42
- if (!RTEST(nary_check_contiguous(b_vnary))) { \
43
- b_vnary = nary_dup(b_vnary); \
44
- } \
45
- \
46
- narray_t* a_nary = NULL; \
47
- GetNArray(a_vnary, a_nary); \
48
- if (NA_NDIM(a_nary) != 2) { \
49
- rb_raise(rb_eArgError, "input array a must be 2-dimensional"); \
50
- return Qnil; \
51
- } \
52
- if (NA_SHAPE(a_nary)[0] != NA_SHAPE(a_nary)[1]) { \
53
- rb_raise(rb_eArgError, "input array a must be square"); \
54
- return Qnil; \
55
- } \
56
- narray_t* b_nary = NULL; \
57
- GetNArray(b_vnary, b_nary); \
58
- const int b_n_dims = NA_NDIM(b_nary); \
59
- if (b_n_dims != 1 && b_n_dims != 2) { \
60
- rb_raise(rb_eArgError, "input array b must be 1- or 2-dimensional"); \
61
- return Qnil; \
62
- } \
63
- \
64
- lapack_int n = (lapack_int)NA_SHAPE(a_nary)[0]; \
65
- lapack_int nb = (lapack_int)NA_SHAPE(b_nary)[0]; \
66
- if (n != nb) { \
67
- rb_raise(nary_eShapeError, "shape1[0](=%d) != shape2[0](=%d)", n, nb); \
68
- } \
69
- \
70
- ndfunc_arg_in_t ain[2] = { { tNAryClass, 2 }, { OVERWRITE, b_n_dims } }; \
71
- ndfunc_arg_out_t aout[1] = { { numo_cInt32, 0 } }; \
72
- ndfunc_t ndf = { _iter_##fLapackFunc, NO_LOOP | NDF_EXTRACT, 2, 1, ain, aout }; \
73
- struct _potrs_option opt = { matrix_layout, uplo }; \
74
- VALUE res = na_ndloop3(&ndf, &opt, 2, a_vnary, b_vnary); \
75
- VALUE ret = rb_ary_new3(2, b_vnary, res); \
76
- \
77
- RB_GC_GUARD(a_vnary); \
78
- RB_GC_GUARD(b_vnary); \
79
- return ret; \
8
+ #define DEF_LINALG_FUNC(tDType, tNAryClass, fLapackFunc) \
9
+ static void _iter_##fLapackFunc(na_loop_t* const lp) { \
10
+ tDType* a = (tDType*)NDL_PTR(lp, 0); \
11
+ tDType* b = (tDType*)NDL_PTR(lp, 1); \
12
+ int* info = (int*)NDL_PTR(lp, 2); \
13
+ struct _potrs_option* opt = (struct _potrs_option*)(lp->opt_ptr); \
14
+ const lapack_int n = (lapack_int)NDL_SHAPE(lp, 0)[0]; \
15
+ const lapack_int nrhs = lp->args[1].ndim == 1 ? 1 : (lapack_int)NDL_SHAPE(lp, 1)[1]; \
16
+ const lapack_int lda = n; \
17
+ const lapack_int ldb = nrhs; \
18
+ const lapack_int i = \
19
+ LAPACKE_##fLapackFunc(opt->matrix_layout, opt->uplo, n, nrhs, a, lda, b, ldb); \
20
+ *info = (int)i; \
21
+ } \
22
+ \
23
+ static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) { \
24
+ VALUE a_vnary = Qnil; \
25
+ VALUE b_vnary = Qnil; \
26
+ VALUE kw_args = Qnil; \
27
+ rb_scan_args(argc, argv, "2:", &a_vnary, &b_vnary, &kw_args); \
28
+ ID kw_table[2] = { rb_intern("order"), rb_intern("uplo") }; \
29
+ VALUE kw_values[2] = { Qundef, Qundef }; \
30
+ rb_get_kwargs(kw_args, kw_table, 0, 2, kw_values); \
31
+ const int matrix_layout = \
32
+ kw_values[0] != Qundef ? get_matrix_layout(kw_values[0]) : LAPACK_ROW_MAJOR; \
33
+ const char uplo = kw_values[1] != Qundef ? get_uplo(kw_values[1]) : 'U'; \
34
+ \
35
+ if (CLASS_OF(a_vnary) != tNAryClass) { \
36
+ a_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, a_vnary); \
37
+ } \
38
+ if (!RTEST(nary_check_contiguous(a_vnary))) { \
39
+ a_vnary = nary_dup(a_vnary); \
40
+ } \
41
+ if (CLASS_OF(b_vnary) != tNAryClass) { \
42
+ b_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, b_vnary); \
43
+ } \
44
+ if (!RTEST(nary_check_contiguous(b_vnary))) { \
45
+ b_vnary = nary_dup(b_vnary); \
46
+ } \
47
+ \
48
+ narray_t* a_nary = NULL; \
49
+ GetNArray(a_vnary, a_nary); \
50
+ if (NA_NDIM(a_nary) != 2) { \
51
+ rb_raise(rb_eArgError, "input array a must be 2-dimensional"); \
52
+ return Qnil; \
53
+ } \
54
+ if (NA_SHAPE(a_nary)[0] != NA_SHAPE(a_nary)[1]) { \
55
+ rb_raise(rb_eArgError, "input array a must be square"); \
56
+ return Qnil; \
57
+ } \
58
+ narray_t* b_nary = NULL; \
59
+ GetNArray(b_vnary, b_nary); \
60
+ const int b_n_dims = NA_NDIM(b_nary); \
61
+ if (b_n_dims != 1 && b_n_dims != 2) { \
62
+ rb_raise(rb_eArgError, "input array b must be 1- or 2-dimensional"); \
63
+ return Qnil; \
64
+ } \
65
+ \
66
+ lapack_int n = (lapack_int)NA_SHAPE(a_nary)[0]; \
67
+ lapack_int nb = (lapack_int)NA_SHAPE(b_nary)[0]; \
68
+ if (n != nb) { \
69
+ rb_raise(nary_eShapeError, "shape1[0](=%d) != shape2[0](=%d)", n, nb); \
70
+ } \
71
+ \
72
+ ndfunc_arg_in_t ain[2] = { { tNAryClass, 2 }, { OVERWRITE, b_n_dims } }; \
73
+ ndfunc_arg_out_t aout[1] = { { numo_cInt32, 0 } }; \
74
+ ndfunc_t ndf = { _iter_##fLapackFunc, NO_LOOP | NDF_EXTRACT, 2, 1, ain, aout }; \
75
+ struct _potrs_option opt = { matrix_layout, uplo }; \
76
+ VALUE res = na_ndloop3(&ndf, &opt, 2, a_vnary, b_vnary); \
77
+ VALUE ret = rb_ary_new3(2, b_vnary, res); \
78
+ \
79
+ RB_GC_GUARD(a_vnary); \
80
+ RB_GC_GUARD(b_vnary); \
81
+ return ret; \
80
82
  }
81
83
 
82
84
  DEF_LINALG_FUNC(double, numo_cDFloat, dpotrs)
@@ -6,58 +6,60 @@ struct _syev_option {
6
6
  char uplo;
7
7
  };
8
8
 
9
- #define DEF_LINALG_FUNC(tDType, tNAryClass, fLapackFunc) \
10
- static void _iter_##fLapackFunc(na_loop_t* const lp) { \
11
- tDType* a = (tDType*)NDL_PTR(lp, 0); \
12
- tDType* w = (tDType*)NDL_PTR(lp, 1); \
13
- int* info = (int*)NDL_PTR(lp, 2); \
14
- struct _syev_option* opt = (struct _syev_option*)(lp->opt_ptr); \
15
- const lapack_int n = (lapack_int)NDL_SHAPE(lp, 0)[1]; \
16
- const lapack_int lda = (lapack_int)NDL_SHAPE(lp, 0)[0]; \
17
- const lapack_int i = LAPACKE_##fLapackFunc(opt->matrix_layout, opt->jobz, opt->uplo, n, a, lda, w); \
18
- *info = (int)i; \
19
- } \
20
- \
21
- static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) { \
22
- VALUE a_vnary = Qnil; \
23
- VALUE kw_args = Qnil; \
24
- rb_scan_args(argc, argv, "1:", &a_vnary, &kw_args); \
25
- ID kw_table[3] = { rb_intern("jobz"), rb_intern("uplo"), rb_intern("order") }; \
26
- VALUE kw_values[3] = { Qundef, Qundef, Qundef }; \
27
- rb_get_kwargs(kw_args, kw_table, 0, 3, kw_values); \
28
- const char jobz = kw_values[0] != Qundef ? get_jobz(kw_values[0]) : 'V'; \
29
- const char uplo = kw_values[1] != Qundef ? get_uplo(kw_values[1]) : 'U'; \
30
- const int matrix_layout = kw_values[2] != Qundef ? get_matrix_layout(kw_values[2]) : LAPACK_ROW_MAJOR; \
31
- \
32
- if (CLASS_OF(a_vnary) != tNAryClass) { \
33
- a_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, a_vnary); \
34
- } \
35
- if (!RTEST(nary_check_contiguous(a_vnary))) { \
36
- a_vnary = nary_dup(a_vnary); \
37
- } \
38
- \
39
- narray_t* a_nary = NULL; \
40
- GetNArray(a_vnary, a_nary); \
41
- if (NA_NDIM(a_nary) != 2) { \
42
- rb_raise(rb_eArgError, "input array a must be 2-dimensional"); \
43
- return Qnil; \
44
- } \
45
- if (NA_SHAPE(a_nary)[0] != NA_SHAPE(a_nary)[1]) { \
46
- rb_raise(rb_eArgError, "input array a must be square"); \
47
- return Qnil; \
48
- } \
49
- \
50
- const size_t n = NA_SHAPE(a_nary)[1]; \
51
- size_t shape[1] = { n }; \
52
- ndfunc_arg_in_t ain[1] = { { OVERWRITE, 2 } }; \
53
- ndfunc_arg_out_t aout[2] = { { tNAryClass, 1, shape }, { numo_cInt32, 0 } }; \
54
- ndfunc_t ndf = { _iter_##fLapackFunc, NO_LOOP | NDF_EXTRACT, 1, 2, ain, aout }; \
55
- struct _syev_option opt = { matrix_layout, jobz, uplo }; \
56
- VALUE res = na_ndloop3(&ndf, &opt, 1, a_vnary); \
57
- VALUE ret = rb_ary_new3(3, a_vnary, rb_ary_entry(res, 0), rb_ary_entry(res, 1)); \
58
- \
59
- RB_GC_GUARD(a_vnary); \
60
- return ret; \
9
+ #define DEF_LINALG_FUNC(tDType, tNAryClass, fLapackFunc) \
10
+ static void _iter_##fLapackFunc(na_loop_t* const lp) { \
11
+ tDType* a = (tDType*)NDL_PTR(lp, 0); \
12
+ tDType* w = (tDType*)NDL_PTR(lp, 1); \
13
+ int* info = (int*)NDL_PTR(lp, 2); \
14
+ struct _syev_option* opt = (struct _syev_option*)(lp->opt_ptr); \
15
+ const lapack_int n = (lapack_int)NDL_SHAPE(lp, 0)[1]; \
16
+ const lapack_int lda = (lapack_int)NDL_SHAPE(lp, 0)[0]; \
17
+ const lapack_int i = \
18
+ LAPACKE_##fLapackFunc(opt->matrix_layout, opt->jobz, opt->uplo, n, a, lda, w); \
19
+ *info = (int)i; \
20
+ } \
21
+ \
22
+ static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) { \
23
+ VALUE a_vnary = Qnil; \
24
+ VALUE kw_args = Qnil; \
25
+ rb_scan_args(argc, argv, "1:", &a_vnary, &kw_args); \
26
+ ID kw_table[3] = { rb_intern("jobz"), rb_intern("uplo"), rb_intern("order") }; \
27
+ VALUE kw_values[3] = { Qundef, Qundef, Qundef }; \
28
+ rb_get_kwargs(kw_args, kw_table, 0, 3, kw_values); \
29
+ const char jobz = kw_values[0] != Qundef ? get_jobz(kw_values[0]) : 'V'; \
30
+ const char uplo = kw_values[1] != Qundef ? get_uplo(kw_values[1]) : 'U'; \
31
+ const int matrix_layout = \
32
+ kw_values[2] != Qundef ? get_matrix_layout(kw_values[2]) : LAPACK_ROW_MAJOR; \
33
+ \
34
+ if (CLASS_OF(a_vnary) != tNAryClass) { \
35
+ a_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, a_vnary); \
36
+ } \
37
+ if (!RTEST(nary_check_contiguous(a_vnary))) { \
38
+ a_vnary = nary_dup(a_vnary); \
39
+ } \
40
+ \
41
+ narray_t* a_nary = NULL; \
42
+ GetNArray(a_vnary, a_nary); \
43
+ if (NA_NDIM(a_nary) != 2) { \
44
+ rb_raise(rb_eArgError, "input array a must be 2-dimensional"); \
45
+ return Qnil; \
46
+ } \
47
+ if (NA_SHAPE(a_nary)[0] != NA_SHAPE(a_nary)[1]) { \
48
+ rb_raise(rb_eArgError, "input array a must be square"); \
49
+ return Qnil; \
50
+ } \
51
+ \
52
+ const size_t n = NA_SHAPE(a_nary)[1]; \
53
+ size_t shape[1] = { n }; \
54
+ ndfunc_arg_in_t ain[1] = { { OVERWRITE, 2 } }; \
55
+ ndfunc_arg_out_t aout[2] = { { tNAryClass, 1, shape }, { numo_cInt32, 0 } }; \
56
+ ndfunc_t ndf = { _iter_##fLapackFunc, NO_LOOP | NDF_EXTRACT, 1, 2, ain, aout }; \
57
+ struct _syev_option opt = { matrix_layout, jobz, uplo }; \
58
+ VALUE res = na_ndloop3(&ndf, &opt, 1, a_vnary); \
59
+ VALUE ret = rb_ary_new3(3, a_vnary, rb_ary_entry(res, 0), rb_ary_entry(res, 1)); \
60
+ \
61
+ RB_GC_GUARD(a_vnary); \
62
+ return ret; \
61
63
  }
62
64
 
63
65
  DEF_LINALG_FUNC(double, numo_cDFloat, dsyev)
@@ -6,58 +6,60 @@ struct _syevd_option {
6
6
  char uplo;
7
7
  };
8
8
 
9
- #define DEF_LINALG_FUNC(tDType, tNAryClass, fLapackFunc) \
10
- static void _iter_##fLapackFunc(na_loop_t* const lp) { \
11
- tDType* a = (tDType*)NDL_PTR(lp, 0); \
12
- tDType* w = (tDType*)NDL_PTR(lp, 1); \
13
- int* info = (int*)NDL_PTR(lp, 2); \
14
- struct _syevd_option* opt = (struct _syevd_option*)(lp->opt_ptr); \
15
- const lapack_int n = (lapack_int)NDL_SHAPE(lp, 0)[1]; \
16
- const lapack_int lda = (lapack_int)NDL_SHAPE(lp, 0)[0]; \
17
- const lapack_int i = LAPACKE_##fLapackFunc(opt->matrix_layout, opt->jobz, opt->uplo, n, a, lda, w); \
18
- *info = (int)i; \
19
- } \
20
- \
21
- static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) { \
22
- VALUE a_vnary = Qnil; \
23
- VALUE kw_args = Qnil; \
24
- rb_scan_args(argc, argv, "1:", &a_vnary, &kw_args); \
25
- ID kw_table[3] = { rb_intern("jobz"), rb_intern("uplo"), rb_intern("order") }; \
26
- VALUE kw_values[3] = { Qundef, Qundef, Qundef }; \
27
- rb_get_kwargs(kw_args, kw_table, 0, 3, kw_values); \
28
- const char jobz = kw_values[0] != Qundef ? get_jobz(kw_values[0]) : 'V'; \
29
- const char uplo = kw_values[1] != Qundef ? get_uplo(kw_values[1]) : 'U'; \
30
- const int matrix_layout = kw_values[2] != Qundef ? get_matrix_layout(kw_values[2]) : LAPACK_ROW_MAJOR; \
31
- \
32
- if (CLASS_OF(a_vnary) != tNAryClass) { \
33
- a_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, a_vnary); \
34
- } \
35
- if (!RTEST(nary_check_contiguous(a_vnary))) { \
36
- a_vnary = nary_dup(a_vnary); \
37
- } \
38
- \
39
- narray_t* a_nary = NULL; \
40
- GetNArray(a_vnary, a_nary); \
41
- if (NA_NDIM(a_nary) != 2) { \
42
- rb_raise(rb_eArgError, "input array a must be 2-dimensional"); \
43
- return Qnil; \
44
- } \
45
- if (NA_SHAPE(a_nary)[0] != NA_SHAPE(a_nary)[1]) { \
46
- rb_raise(rb_eArgError, "input array a must be square"); \
47
- return Qnil; \
48
- } \
49
- \
50
- const size_t n = NA_SHAPE(a_nary)[1]; \
51
- size_t shape[1] = { n }; \
52
- ndfunc_arg_in_t ain[1] = { { OVERWRITE, 2 } }; \
53
- ndfunc_arg_out_t aout[2] = { { tNAryClass, 1, shape }, { numo_cInt32, 0 } }; \
54
- ndfunc_t ndf = { _iter_##fLapackFunc, NO_LOOP | NDF_EXTRACT, 1, 2, ain, aout }; \
55
- struct _syevd_option opt = { matrix_layout, jobz, uplo }; \
56
- VALUE res = na_ndloop3(&ndf, &opt, 1, a_vnary); \
57
- VALUE ret = rb_ary_new3(3, a_vnary, rb_ary_entry(res, 0), rb_ary_entry(res, 1)); \
58
- \
59
- RB_GC_GUARD(a_vnary); \
60
- return ret; \
9
+ #define DEF_LINALG_FUNC(tDType, tNAryClass, fLapackFunc) \
10
+ static void _iter_##fLapackFunc(na_loop_t* const lp) { \
11
+ tDType* a = (tDType*)NDL_PTR(lp, 0); \
12
+ tDType* w = (tDType*)NDL_PTR(lp, 1); \
13
+ int* info = (int*)NDL_PTR(lp, 2); \
14
+ struct _syevd_option* opt = (struct _syevd_option*)(lp->opt_ptr); \
15
+ const lapack_int n = (lapack_int)NDL_SHAPE(lp, 0)[1]; \
16
+ const lapack_int lda = (lapack_int)NDL_SHAPE(lp, 0)[0]; \
17
+ const lapack_int i = \
18
+ LAPACKE_##fLapackFunc(opt->matrix_layout, opt->jobz, opt->uplo, n, a, lda, w); \
19
+ *info = (int)i; \
20
+ } \
21
+ \
22
+ static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) { \
23
+ VALUE a_vnary = Qnil; \
24
+ VALUE kw_args = Qnil; \
25
+ rb_scan_args(argc, argv, "1:", &a_vnary, &kw_args); \
26
+ ID kw_table[3] = { rb_intern("jobz"), rb_intern("uplo"), rb_intern("order") }; \
27
+ VALUE kw_values[3] = { Qundef, Qundef, Qundef }; \
28
+ rb_get_kwargs(kw_args, kw_table, 0, 3, kw_values); \
29
+ const char jobz = kw_values[0] != Qundef ? get_jobz(kw_values[0]) : 'V'; \
30
+ const char uplo = kw_values[1] != Qundef ? get_uplo(kw_values[1]) : 'U'; \
31
+ const int matrix_layout = \
32
+ kw_values[2] != Qundef ? get_matrix_layout(kw_values[2]) : LAPACK_ROW_MAJOR; \
33
+ \
34
+ if (CLASS_OF(a_vnary) != tNAryClass) { \
35
+ a_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, a_vnary); \
36
+ } \
37
+ if (!RTEST(nary_check_contiguous(a_vnary))) { \
38
+ a_vnary = nary_dup(a_vnary); \
39
+ } \
40
+ \
41
+ narray_t* a_nary = NULL; \
42
+ GetNArray(a_vnary, a_nary); \
43
+ if (NA_NDIM(a_nary) != 2) { \
44
+ rb_raise(rb_eArgError, "input array a must be 2-dimensional"); \
45
+ return Qnil; \
46
+ } \
47
+ if (NA_SHAPE(a_nary)[0] != NA_SHAPE(a_nary)[1]) { \
48
+ rb_raise(rb_eArgError, "input array a must be square"); \
49
+ return Qnil; \
50
+ } \
51
+ \
52
+ const size_t n = NA_SHAPE(a_nary)[1]; \
53
+ size_t shape[1] = { n }; \
54
+ ndfunc_arg_in_t ain[1] = { { OVERWRITE, 2 } }; \
55
+ ndfunc_arg_out_t aout[2] = { { tNAryClass, 1, shape }, { numo_cInt32, 0 } }; \
56
+ ndfunc_t ndf = { _iter_##fLapackFunc, NO_LOOP | NDF_EXTRACT, 1, 2, ain, aout }; \
57
+ struct _syevd_option opt = { matrix_layout, jobz, uplo }; \
58
+ VALUE res = na_ndloop3(&ndf, &opt, 1, a_vnary); \
59
+ VALUE ret = rb_ary_new3(3, a_vnary, rb_ary_entry(res, 0), rb_ary_entry(res, 1)); \
60
+ \
61
+ RB_GC_GUARD(a_vnary); \
62
+ return ret; \
61
63
  }
62
64
 
63
65
  DEF_LINALG_FUNC(double, numo_cDFloat, dsyevd)