numo-linalg-alt 0.3.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/ext/numo/linalg/blas/dot.c +59 -59
- data/ext/numo/linalg/blas/dot_sub.c +58 -58
- data/ext/numo/linalg/blas/gemm.c +157 -148
- data/ext/numo/linalg/blas/gemv.c +131 -127
- data/ext/numo/linalg/blas/nrm2.c +50 -50
- data/ext/numo/linalg/lapack/gees.c +239 -220
- data/ext/numo/linalg/lapack/geev.c +127 -110
- data/ext/numo/linalg/lapack/gelsd.c +81 -70
- data/ext/numo/linalg/lapack/geqrf.c +52 -51
- data/ext/numo/linalg/lapack/gerqf.c +70 -0
- data/ext/numo/linalg/lapack/gerqf.h +15 -0
- data/ext/numo/linalg/lapack/gesdd.c +96 -86
- data/ext/numo/linalg/lapack/gesv.c +80 -78
- data/ext/numo/linalg/lapack/gesvd.c +140 -129
- data/ext/numo/linalg/lapack/getrf.c +51 -50
- data/ext/numo/linalg/lapack/getri.c +64 -63
- data/ext/numo/linalg/lapack/getrs.c +92 -88
- data/ext/numo/linalg/lapack/gges.c +214 -0
- data/ext/numo/linalg/lapack/gges.h +15 -0
- data/ext/numo/linalg/lapack/heev.c +54 -52
- data/ext/numo/linalg/lapack/heevd.c +54 -52
- data/ext/numo/linalg/lapack/heevr.c +109 -98
- data/ext/numo/linalg/lapack/hegv.c +77 -74
- data/ext/numo/linalg/lapack/hegvd.c +77 -74
- data/ext/numo/linalg/lapack/hegvx.c +132 -120
- data/ext/numo/linalg/lapack/hetrf.c +54 -50
- data/ext/numo/linalg/lapack/lange.c +45 -44
- data/ext/numo/linalg/lapack/orgqr.c +63 -62
- data/ext/numo/linalg/lapack/orgrq.c +78 -0
- data/ext/numo/linalg/lapack/orgrq.h +15 -0
- data/ext/numo/linalg/lapack/potrf.c +49 -48
- data/ext/numo/linalg/lapack/potri.c +49 -48
- data/ext/numo/linalg/lapack/potrs.c +74 -72
- data/ext/numo/linalg/lapack/syev.c +54 -52
- data/ext/numo/linalg/lapack/syevd.c +54 -52
- data/ext/numo/linalg/lapack/syevr.c +107 -98
- data/ext/numo/linalg/lapack/sygv.c +77 -73
- data/ext/numo/linalg/lapack/sygvd.c +77 -73
- data/ext/numo/linalg/lapack/sygvx.c +132 -120
- data/ext/numo/linalg/lapack/sytrf.c +54 -50
- data/ext/numo/linalg/lapack/trtrs.c +79 -75
- data/ext/numo/linalg/lapack/ungqr.c +63 -62
- data/ext/numo/linalg/lapack/ungrq.c +78 -0
- data/ext/numo/linalg/lapack/ungrq.h +15 -0
- data/ext/numo/linalg/linalg.c +20 -10
- data/ext/numo/linalg/linalg.h +4 -0
- data/ext/numo/linalg/util.c +8 -0
- data/ext/numo/linalg/util.h +1 -0
- data/lib/numo/linalg/version.rb +1 -1
- data/lib/numo/linalg.rb +139 -3
- metadata +10 -2
@@ -22,118 +22,135 @@ char _get_jobvr(VALUE val) {
|
|
22
22
|
return jobvr;
|
23
23
|
}
|
24
24
|
|
25
|
-
#define DEF_LINALG_FUNC(tDType, tNAryClass, fLapackFunc)
|
26
|
-
static void _iter_##fLapackFunc(na_loop_t* const lp) {
|
27
|
-
tDType* a = (tDType*)NDL_PTR(lp, 0);
|
28
|
-
tDType* wr = (tDType*)NDL_PTR(lp, 1);
|
29
|
-
tDType* wi = (tDType*)NDL_PTR(lp, 2);
|
30
|
-
tDType* vl = (tDType*)NDL_PTR(lp, 3);
|
31
|
-
tDType* vr = (tDType*)NDL_PTR(lp, 4);
|
32
|
-
int* info = (int*)NDL_PTR(lp, 5);
|
33
|
-
struct _geev_option* opt = (struct _geev_option*)(lp->opt_ptr);
|
34
|
-
const lapack_int n =
|
35
|
-
|
36
|
-
|
37
|
-
const lapack_int
|
38
|
-
lapack_int
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
VALUE
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
25
|
+
#define DEF_LINALG_FUNC(tDType, tNAryClass, fLapackFunc) \
|
26
|
+
static void _iter_##fLapackFunc(na_loop_t* const lp) { \
|
27
|
+
tDType* a = (tDType*)NDL_PTR(lp, 0); \
|
28
|
+
tDType* wr = (tDType*)NDL_PTR(lp, 1); \
|
29
|
+
tDType* wi = (tDType*)NDL_PTR(lp, 2); \
|
30
|
+
tDType* vl = (tDType*)NDL_PTR(lp, 3); \
|
31
|
+
tDType* vr = (tDType*)NDL_PTR(lp, 4); \
|
32
|
+
int* info = (int*)NDL_PTR(lp, 5); \
|
33
|
+
struct _geev_option* opt = (struct _geev_option*)(lp->opt_ptr); \
|
34
|
+
const lapack_int n = \
|
35
|
+
(lapack_int)(opt->matrix_layout == LAPACK_ROW_MAJOR ? NDL_SHAPE(lp, 0)[0] \
|
36
|
+
: NDL_SHAPE(lp, 0)[1]); \
|
37
|
+
const lapack_int lda = n; \
|
38
|
+
const lapack_int ldvl = (opt->jobvl == 'N') ? 1 : n; \
|
39
|
+
const lapack_int ldvr = (opt->jobvr == 'N') ? 1 : n; \
|
40
|
+
lapack_int i = LAPACKE_##fLapackFunc( \
|
41
|
+
opt->matrix_layout, opt->jobvl, opt->jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr \
|
42
|
+
); \
|
43
|
+
*info = (int)i; \
|
44
|
+
} \
|
45
|
+
\
|
46
|
+
static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) { \
|
47
|
+
VALUE a_vnary = Qnil; \
|
48
|
+
VALUE kw_args = Qnil; \
|
49
|
+
rb_scan_args(argc, argv, "1:", &a_vnary, &kw_args); \
|
50
|
+
ID kw_table[3] = { rb_intern("order"), rb_intern("jobvl"), rb_intern("jobvr") }; \
|
51
|
+
VALUE kw_values[3] = { Qundef, Qundef, Qundef }; \
|
52
|
+
rb_get_kwargs(kw_args, kw_table, 0, 3, kw_values); \
|
53
|
+
const int matrix_layout = \
|
54
|
+
kw_values[0] != Qundef ? get_matrix_layout(kw_values[0]) : LAPACK_ROW_MAJOR; \
|
55
|
+
const char jobvl = kw_values[1] != Qundef ? _get_jobvl(kw_values[1]) : 'V'; \
|
56
|
+
const char jobvr = kw_values[2] != Qundef ? _get_jobvr(kw_values[2]) : 'V'; \
|
57
|
+
\
|
58
|
+
if (CLASS_OF(a_vnary) != tNAryClass) { \
|
59
|
+
a_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, a_vnary); \
|
60
|
+
} \
|
61
|
+
if (!RTEST(nary_check_contiguous(a_vnary))) { \
|
62
|
+
a_vnary = nary_dup(a_vnary); \
|
63
|
+
} \
|
64
|
+
\
|
65
|
+
narray_t* a_nary = NULL; \
|
66
|
+
GetNArray(a_vnary, a_nary); \
|
67
|
+
const int n_dims = NA_NDIM(a_nary); \
|
68
|
+
if (n_dims != 2) { \
|
69
|
+
rb_raise(rb_eArgError, "input array a must be 2-dimensional"); \
|
70
|
+
return Qnil; \
|
71
|
+
} \
|
72
|
+
\
|
73
|
+
size_t n = matrix_layout == LAPACK_ROW_MAJOR ? NA_SHAPE(a_nary)[0] : NA_SHAPE(a_nary)[1]; \
|
74
|
+
size_t shape_wr[1] = { n }; \
|
75
|
+
size_t shape_wi[1] = { n }; \
|
76
|
+
size_t shape_vl[2] = { n, (jobvl == 'N') ? 1 : n }; \
|
77
|
+
size_t shape_vr[2] = { n, (jobvr == 'N') ? 1 : n }; \
|
78
|
+
ndfunc_arg_in_t ain[1] = { { OVERWRITE, 2 } }; \
|
79
|
+
ndfunc_arg_out_t aout[5] = { { tNAryClass, 1, shape_wr }, \
|
80
|
+
{ tNAryClass, 1, shape_wi }, \
|
81
|
+
{ tNAryClass, 2, shape_vl }, \
|
82
|
+
{ tNAryClass, 2, shape_vr }, \
|
83
|
+
{ numo_cInt32, 0 } }; \
|
84
|
+
ndfunc_t ndf = { _iter_##fLapackFunc, NO_LOOP | NDF_EXTRACT, 1, 5, ain, aout }; \
|
85
|
+
struct _geev_option opt = { matrix_layout, jobvl, jobvr }; \
|
86
|
+
VALUE ret = na_ndloop3(&ndf, &opt, 1, a_vnary); \
|
87
|
+
\
|
88
|
+
RB_GC_GUARD(a_vnary); \
|
89
|
+
return ret; \
|
81
90
|
}
|
82
91
|
|
83
|
-
#define DEF_LINALG_FUNC_COMPLEX(tDType, tNAryClass, fLapackFunc)
|
84
|
-
static void _iter_##fLapackFunc(na_loop_t* const lp) {
|
85
|
-
tDType* a = (tDType*)NDL_PTR(lp, 0);
|
86
|
-
tDType* w = (tDType*)NDL_PTR(lp, 1);
|
87
|
-
tDType* vl = (tDType*)NDL_PTR(lp, 2);
|
88
|
-
tDType* vr = (tDType*)NDL_PTR(lp, 3);
|
89
|
-
int* info = (int*)NDL_PTR(lp, 4);
|
90
|
-
struct _geev_option* opt = (struct _geev_option*)(lp->opt_ptr);
|
91
|
-
const lapack_int n =
|
92
|
-
|
93
|
-
|
94
|
-
const lapack_int
|
95
|
-
lapack_int
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
VALUE
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
92
|
+
#define DEF_LINALG_FUNC_COMPLEX(tDType, tNAryClass, fLapackFunc) \
|
93
|
+
static void _iter_##fLapackFunc(na_loop_t* const lp) { \
|
94
|
+
tDType* a = (tDType*)NDL_PTR(lp, 0); \
|
95
|
+
tDType* w = (tDType*)NDL_PTR(lp, 1); \
|
96
|
+
tDType* vl = (tDType*)NDL_PTR(lp, 2); \
|
97
|
+
tDType* vr = (tDType*)NDL_PTR(lp, 3); \
|
98
|
+
int* info = (int*)NDL_PTR(lp, 4); \
|
99
|
+
struct _geev_option* opt = (struct _geev_option*)(lp->opt_ptr); \
|
100
|
+
const lapack_int n = \
|
101
|
+
(lapack_int)(opt->matrix_layout == LAPACK_ROW_MAJOR ? NDL_SHAPE(lp, 0)[0] \
|
102
|
+
: NDL_SHAPE(lp, 0)[1]); \
|
103
|
+
const lapack_int lda = n; \
|
104
|
+
const lapack_int ldvl = (opt->jobvl == 'N') ? 1 : n; \
|
105
|
+
const lapack_int ldvr = (opt->jobvr == 'N') ? 1 : n; \
|
106
|
+
lapack_int i = LAPACKE_##fLapackFunc( \
|
107
|
+
opt->matrix_layout, opt->jobvl, opt->jobvr, n, a, lda, w, vl, ldvl, vr, ldvr \
|
108
|
+
); \
|
109
|
+
*info = (int)i; \
|
110
|
+
} \
|
111
|
+
\
|
112
|
+
static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) { \
|
113
|
+
VALUE a_vnary = Qnil; \
|
114
|
+
VALUE kw_args = Qnil; \
|
115
|
+
rb_scan_args(argc, argv, "1:", &a_vnary, &kw_args); \
|
116
|
+
ID kw_table[3] = { rb_intern("order"), rb_intern("jobvl"), rb_intern("jobvr") }; \
|
117
|
+
VALUE kw_values[3] = { Qundef, Qundef, Qundef }; \
|
118
|
+
rb_get_kwargs(kw_args, kw_table, 0, 3, kw_values); \
|
119
|
+
const int matrix_layout = \
|
120
|
+
kw_values[0] != Qundef ? get_matrix_layout(kw_values[0]) : LAPACK_ROW_MAJOR; \
|
121
|
+
const char jobvl = kw_values[1] != Qundef ? _get_jobvl(kw_values[1]) : 'V'; \
|
122
|
+
const char jobvr = kw_values[2] != Qundef ? _get_jobvr(kw_values[2]) : 'V'; \
|
123
|
+
\
|
124
|
+
if (CLASS_OF(a_vnary) != tNAryClass) { \
|
125
|
+
a_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, a_vnary); \
|
126
|
+
} \
|
127
|
+
if (!RTEST(nary_check_contiguous(a_vnary))) { \
|
128
|
+
a_vnary = nary_dup(a_vnary); \
|
129
|
+
} \
|
130
|
+
\
|
131
|
+
narray_t* a_nary = NULL; \
|
132
|
+
GetNArray(a_vnary, a_nary); \
|
133
|
+
const int n_dims = NA_NDIM(a_nary); \
|
134
|
+
if (n_dims != 2) { \
|
135
|
+
rb_raise(rb_eArgError, "input array a must be 2-dimensional"); \
|
136
|
+
return Qnil; \
|
137
|
+
} \
|
138
|
+
\
|
139
|
+
size_t n = matrix_layout == LAPACK_ROW_MAJOR ? NA_SHAPE(a_nary)[0] : NA_SHAPE(a_nary)[1]; \
|
140
|
+
size_t shape_w[1] = { n }; \
|
141
|
+
size_t shape_vl[2] = { n, (jobvl == 'N') ? 1 : n }; \
|
142
|
+
size_t shape_vr[2] = { n, (jobvr == 'N') ? 1 : n }; \
|
143
|
+
ndfunc_arg_in_t ain[1] = { { OVERWRITE, 2 } }; \
|
144
|
+
ndfunc_arg_out_t aout[4] = { { tNAryClass, 1, shape_w }, \
|
145
|
+
{ tNAryClass, 2, shape_vl }, \
|
146
|
+
{ tNAryClass, 2, shape_vr }, \
|
147
|
+
{ numo_cInt32, 0 } }; \
|
148
|
+
ndfunc_t ndf = { _iter_##fLapackFunc, NO_LOOP | NDF_EXTRACT, 1, 4, ain, aout }; \
|
149
|
+
struct _geev_option opt = { matrix_layout, jobvl, jobvr }; \
|
150
|
+
VALUE ret = na_ndloop3(&ndf, &opt, 1, a_vnary); \
|
151
|
+
\
|
152
|
+
RB_GC_GUARD(a_vnary); \
|
153
|
+
return ret; \
|
137
154
|
}
|
138
155
|
|
139
156
|
DEF_LINALG_FUNC(double, numo_cDFloat, dgeev)
|
@@ -5,76 +5,87 @@ struct _gelsd_option {
|
|
5
5
|
double rcond;
|
6
6
|
};
|
7
7
|
|
8
|
-
#define DEF_LINALG_FUNC(tDType, tRtDType, tNAryClass, tRtNAryClass, fLapackFunc)
|
9
|
-
static void _iter_##fLapackFunc(na_loop_t* const lp) {
|
10
|
-
tDType* a = (tDType*)NDL_PTR(lp, 0);
|
11
|
-
tDType* b = (tDType*)NDL_PTR(lp, 1);
|
12
|
-
tRtDType* s = (tRtDType*)NDL_PTR(lp, 2);
|
13
|
-
int* rank = (int*)NDL_PTR(lp, 3);
|
14
|
-
int* info = (int*)NDL_PTR(lp, 4);
|
15
|
-
struct _gelsd_option* opt = (struct _gelsd_option*)(lp->opt_ptr);
|
16
|
-
const lapack_int m =
|
17
|
-
|
18
|
-
|
19
|
-
const lapack_int
|
20
|
-
|
21
|
-
|
22
|
-
lapack_int
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
if (
|
48
|
-
|
49
|
-
}
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
8
|
+
#define DEF_LINALG_FUNC(tDType, tRtDType, tNAryClass, tRtNAryClass, fLapackFunc) \
|
9
|
+
static void _iter_##fLapackFunc(na_loop_t* const lp) { \
|
10
|
+
tDType* a = (tDType*)NDL_PTR(lp, 0); \
|
11
|
+
tDType* b = (tDType*)NDL_PTR(lp, 1); \
|
12
|
+
tRtDType* s = (tRtDType*)NDL_PTR(lp, 2); \
|
13
|
+
int* rank = (int*)NDL_PTR(lp, 3); \
|
14
|
+
int* info = (int*)NDL_PTR(lp, 4); \
|
15
|
+
struct _gelsd_option* opt = (struct _gelsd_option*)(lp->opt_ptr); \
|
16
|
+
const lapack_int m = \
|
17
|
+
(lapack_int)(opt->matrix_layout == LAPACK_ROW_MAJOR ? NDL_SHAPE(lp, 0)[0] \
|
18
|
+
: NDL_SHAPE(lp, 0)[1]); \
|
19
|
+
const lapack_int n = \
|
20
|
+
(lapack_int)(opt->matrix_layout == LAPACK_ROW_MAJOR ? NDL_SHAPE(lp, 0)[1] \
|
21
|
+
: NDL_SHAPE(lp, 0)[0]); \
|
22
|
+
const lapack_int nrhs = lp->args[1].ndim == 1 ? 1 : (lapack_int)NDL_SHAPE(lp, 1)[1]; \
|
23
|
+
const lapack_int lda = n; \
|
24
|
+
const lapack_int ldb = nrhs; \
|
25
|
+
lapack_int r = 0; \
|
26
|
+
lapack_int i = LAPACKE_##fLapackFunc( \
|
27
|
+
opt->matrix_layout, m, n, nrhs, a, lda, b, ldb, s, (tRtDType)(opt->rcond), &r \
|
28
|
+
); \
|
29
|
+
*rank = (int)r; \
|
30
|
+
*info = (int)i; \
|
31
|
+
} \
|
32
|
+
\
|
33
|
+
static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) { \
|
34
|
+
VALUE a_vnary = Qnil; \
|
35
|
+
VALUE b_vnary = Qnil; \
|
36
|
+
VALUE kw_args = Qnil; \
|
37
|
+
rb_scan_args(argc, argv, "2:", &a_vnary, &b_vnary, &kw_args); \
|
38
|
+
ID kw_table[2] = { rb_intern("matrix_layout"), rb_intern("rcond") }; \
|
39
|
+
VALUE kw_values[2] = { Qundef, Qundef }; \
|
40
|
+
rb_get_kwargs(kw_args, kw_table, 0, 2, kw_values); \
|
41
|
+
const int matrix_layout = kw_values[0] != Qundef && kw_values[0] != Qnil \
|
42
|
+
? get_matrix_layout(kw_values[0]) \
|
43
|
+
: LAPACK_ROW_MAJOR; \
|
44
|
+
const double rcond = \
|
45
|
+
kw_values[1] != Qundef && kw_values[1] != Qnil ? NUM2DBL(kw_values[1]) : -1.0; \
|
46
|
+
\
|
47
|
+
if (CLASS_OF(a_vnary) != tNAryClass) { \
|
48
|
+
a_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, a_vnary); \
|
49
|
+
} \
|
50
|
+
if (!RTEST(nary_check_contiguous(a_vnary))) { \
|
51
|
+
a_vnary = nary_dup(a_vnary); \
|
52
|
+
} \
|
53
|
+
if (CLASS_OF(b_vnary) != tNAryClass) { \
|
54
|
+
b_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, b_vnary); \
|
55
|
+
} \
|
56
|
+
if (!RTEST(nary_check_contiguous(b_vnary))) { \
|
57
|
+
b_vnary = nary_dup(b_vnary); \
|
58
|
+
} \
|
59
|
+
\
|
60
|
+
narray_t* a_nary = NULL; \
|
61
|
+
GetNArray(a_vnary, a_nary); \
|
62
|
+
const int n_dims = NA_NDIM(a_nary); \
|
63
|
+
if (n_dims != 2) { \
|
64
|
+
rb_raise(rb_eArgError, "input array a must be 2-dimensional"); \
|
65
|
+
return Qnil; \
|
66
|
+
} \
|
67
|
+
narray_t* b_nary = NULL; \
|
68
|
+
GetNArray(b_vnary, b_nary); \
|
69
|
+
const int b_n_dims = NA_NDIM(b_nary); \
|
70
|
+
if (b_n_dims != 1 && b_n_dims != 2) { \
|
71
|
+
rb_raise(rb_eArgError, "input array b must be 1 or 2-dimensional"); \
|
72
|
+
return Qnil; \
|
73
|
+
} \
|
74
|
+
\
|
75
|
+
const size_t m = NA_SHAPE(a_nary)[0]; \
|
76
|
+
const size_t n = NA_SHAPE(a_nary)[1]; \
|
77
|
+
size_t shape_s[1] = { m < n ? m : n }; \
|
78
|
+
ndfunc_arg_in_t ain[2] = { { tNAryClass, 2 }, { OVERWRITE, b_n_dims } }; \
|
79
|
+
ndfunc_arg_out_t aout[3] = { { tRtNAryClass, 1, shape_s }, \
|
80
|
+
{ numo_cInt32, 0 }, \
|
81
|
+
{ numo_cInt32, 0 } }; \
|
82
|
+
ndfunc_t ndf = { _iter_##fLapackFunc, NO_LOOP | NDF_EXTRACT, 2, 3, ain, aout }; \
|
83
|
+
struct _gelsd_option opt = { matrix_layout, rcond }; \
|
84
|
+
VALUE ret = na_ndloop3(&ndf, &opt, 2, a_vnary, b_vnary); \
|
85
|
+
\
|
86
|
+
RB_GC_GUARD(a_vnary); \
|
87
|
+
RB_GC_GUARD(b_vnary); \
|
88
|
+
return ret; \
|
78
89
|
}
|
79
90
|
|
80
91
|
DEF_LINALG_FUNC(double, double, numo_cDFloat, numo_cDFloat, dgelsd)
|
@@ -4,57 +4,58 @@ struct _geqrf_option {
|
|
4
4
|
int matrix_layout;
|
5
5
|
};
|
6
6
|
|
7
|
-
#define DEF_LINALG_FUNC(tDType, tNAryClass, fLapackFunc)
|
8
|
-
static void _iter_##fLapackFunc(na_loop_t* const lp) {
|
9
|
-
tDType* a = (tDType*)NDL_PTR(lp, 0);
|
10
|
-
tDType* tau = (tDType*)NDL_PTR(lp, 1);
|
11
|
-
int* info = (int*)NDL_PTR(lp, 2);
|
12
|
-
struct _geqrf_option* opt = (struct _geqrf_option*)(lp->opt_ptr);
|
13
|
-
const lapack_int m = (lapack_int)NDL_SHAPE(lp, 0)[0];
|
14
|
-
const lapack_int n = (lapack_int)NDL_SHAPE(lp, 0)[1];
|
15
|
-
const lapack_int lda = n;
|
16
|
-
const lapack_int i = LAPACKE_##fLapackFunc(opt->matrix_layout, m, n, a, lda, tau);
|
17
|
-
*info = (int)i;
|
18
|
-
}
|
19
|
-
|
20
|
-
static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) {
|
21
|
-
VALUE a_vnary = Qnil;
|
22
|
-
VALUE kw_args = Qnil;
|
23
|
-
rb_scan_args(argc, argv, "1:", &a_vnary, &kw_args);
|
24
|
-
ID kw_table[1] = { rb_intern("order") };
|
25
|
-
VALUE kw_values[1] = { Qundef };
|
26
|
-
rb_get_kwargs(kw_args, kw_table, 0, 1, kw_values);
|
27
|
-
const int matrix_layout =
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
size_t
|
46
|
-
size_t
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
7
|
+
#define DEF_LINALG_FUNC(tDType, tNAryClass, fLapackFunc) \
|
8
|
+
static void _iter_##fLapackFunc(na_loop_t* const lp) { \
|
9
|
+
tDType* a = (tDType*)NDL_PTR(lp, 0); \
|
10
|
+
tDType* tau = (tDType*)NDL_PTR(lp, 1); \
|
11
|
+
int* info = (int*)NDL_PTR(lp, 2); \
|
12
|
+
struct _geqrf_option* opt = (struct _geqrf_option*)(lp->opt_ptr); \
|
13
|
+
const lapack_int m = (lapack_int)NDL_SHAPE(lp, 0)[0]; \
|
14
|
+
const lapack_int n = (lapack_int)NDL_SHAPE(lp, 0)[1]; \
|
15
|
+
const lapack_int lda = n; \
|
16
|
+
const lapack_int i = LAPACKE_##fLapackFunc(opt->matrix_layout, m, n, a, lda, tau); \
|
17
|
+
*info = (int)i; \
|
18
|
+
} \
|
19
|
+
\
|
20
|
+
static VALUE _linalg_lapack_##fLapackFunc(int argc, VALUE* argv, VALUE self) { \
|
21
|
+
VALUE a_vnary = Qnil; \
|
22
|
+
VALUE kw_args = Qnil; \
|
23
|
+
rb_scan_args(argc, argv, "1:", &a_vnary, &kw_args); \
|
24
|
+
ID kw_table[1] = { rb_intern("order") }; \
|
25
|
+
VALUE kw_values[1] = { Qundef }; \
|
26
|
+
rb_get_kwargs(kw_args, kw_table, 0, 1, kw_values); \
|
27
|
+
const int matrix_layout = \
|
28
|
+
kw_values[0] != Qundef ? get_matrix_layout(kw_values[0]) : LAPACK_ROW_MAJOR; \
|
29
|
+
\
|
30
|
+
if (CLASS_OF(a_vnary) != tNAryClass) { \
|
31
|
+
a_vnary = rb_funcall(tNAryClass, rb_intern("cast"), 1, a_vnary); \
|
32
|
+
} \
|
33
|
+
if (!RTEST(nary_check_contiguous(a_vnary))) { \
|
34
|
+
a_vnary = nary_dup(a_vnary); \
|
35
|
+
} \
|
36
|
+
\
|
37
|
+
narray_t* a_nary = NULL; \
|
38
|
+
GetNArray(a_vnary, a_nary); \
|
39
|
+
const int n_dims = NA_NDIM(a_nary); \
|
40
|
+
if (n_dims != 2) { \
|
41
|
+
rb_raise(rb_eArgError, "input array a must be 2-dimensional"); \
|
42
|
+
return Qnil; \
|
43
|
+
} \
|
44
|
+
\
|
45
|
+
size_t m = NA_SHAPE(a_nary)[0]; \
|
46
|
+
size_t n = NA_SHAPE(a_nary)[1]; \
|
47
|
+
size_t shape[1] = { m < n ? m : n }; \
|
48
|
+
ndfunc_arg_in_t ain[1] = { { OVERWRITE, 2 } }; \
|
49
|
+
ndfunc_arg_out_t aout[2] = { { tNAryClass, 1, shape }, { numo_cInt32, 0 } }; \
|
50
|
+
ndfunc_t ndf = { _iter_##fLapackFunc, NO_LOOP | NDF_EXTRACT, 1, 2, ain, aout }; \
|
51
|
+
struct _geqrf_option opt = { matrix_layout }; \
|
52
|
+
VALUE res = na_ndloop3(&ndf, &opt, 1, a_vnary); \
|
53
|
+
\
|
54
|
+
VALUE ret = rb_ary_concat(rb_ary_new3(1, a_vnary), res); \
|
55
|
+
\
|
56
|
+
RB_GC_GUARD(a_vnary); \
|
57
|
+
\
|
58
|
+
return ret; \
|
58
59
|
}
|
59
60
|
|
60
61
|
DEF_LINALG_FUNC(double, numo_cDFloat, dgeqrf)
|