snow-math 0.0.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,2 @@
1
+ #include "maths_local.h"
2
+ #define __SNOW__MATHS_C__
@@ -0,0 +1,257 @@
1
+ /*
2
+ 3D math types & macros
3
+ Written by Noel Cower
4
+
5
+ See COPYING for license information
6
+ */
7
+
8
+ #ifndef __SNOW__MATHS_H__
9
+ #define __SNOW__MATHS_H__
10
+
11
+ #ifdef __cplusplus
12
+ #include <cmath>
13
+ #else
14
+ #include <math.h>
15
+ #endif
16
+
17
+ #ifdef __SNOW__MATHS_C__
18
+ #define S_INLINE
19
+ #else
20
+ #define S_INLINE inline
21
+ #endif
22
+
23
+ #if defined(__cplusplus)
24
+ extern "C" {
25
+ #endif /* __cplusplus */
26
+
27
+ /* Typedefs and macros for specific floating point types */
28
+ #ifdef USE_FLOAT
29
+ typedef float s_float_t;
30
+ #define s_cos cosf
31
+ #define s_sin sinf
32
+ #define s_tan tanf
33
+ #define s_acos acosf
34
+ #define s_asin asinf
35
+ #define s_atan atanf
36
+ #define s_fabs fabsf
37
+ #else
38
+ typedef double s_float_t;
39
+ #define s_cos cos
40
+ #define s_sin sin
41
+ #define s_tan tan
42
+ #define s_acos acos
43
+ #define s_asin asin
44
+ #define s_atan atan
45
+ #define s_fabs fabs
46
+ #endif
47
+
48
+ typedef s_float_t mat4_t[16];
49
+ typedef s_float_t vec4_t[4];
50
+ typedef s_float_t vec3_t[3];
51
+ typedef s_float_t vec2_t[2];
52
+ typedef s_float_t quat_t[4];
53
+
54
+
55
+ /*!
56
+ * Floating point epsilon for double comparisons. This is, more or less, the
57
+ * limit to accuracy. If the difference between two floating point values is
58
+ * less than the epsilon, they are for all intents and purposes the same.
59
+ *
60
+ * It should be stressed that this is absolutely not an accurate epsilon.
61
+ */
62
+ #define S_FLOAT_EPSILON (1.0e-9)
63
+
64
+ #define S_DEG2RAD (0.01745329)
65
+ #define S_RAD2DEG (57.2957795)
66
+
67
+
68
+
69
+ /* Float comparison functions */
70
+
71
+ S_INLINE int float_is_zero(const s_float_t x)
72
+ {
73
+ return (s_fabs(x) < S_FLOAT_EPSILON);
74
+ }
75
+
76
+ S_INLINE int float_equals(const s_float_t x, const s_float_t y)
77
+ {
78
+ return float_is_zero(x - y);
79
+ }
80
+
81
+
82
+
83
+ /*==============================================================================
84
+
85
+ 3-Component Vector (vec3_t)
86
+
87
+ ==============================================================================*/
88
+
89
+ extern const vec3_t g_vec3_zero;
90
+ extern const vec3_t g_vec3_one;
91
+
92
+ void vec3_copy(const vec3_t in, vec3_t out);
93
+ void vec3_set(s_float_t x, s_float_t y, s_float_t z, vec3_t v);
94
+
95
+ /*!
96
+ * Gets the squared length of a vector. Useful for approximations and when
97
+ * you don't need the actual magnitude.
98
+ */
99
+ s_float_t vec3_length_squared(const vec3_t v);
100
+ /*!
101
+ * Gets the length/magnitude of a vector.
102
+ */
103
+ s_float_t vec3_length(const vec3_t v);
104
+ void vec3_normalize(const vec3_t in, vec3_t out);
105
+
106
+ void vec3_subtract(const vec3_t left, const vec3_t right, vec3_t out);
107
+ void vec3_add(const vec3_t left, const vec3_t right, vec3_t out);
108
+ void vec3_multiply(const vec3_t left, const vec3_t right, vec3_t out);
109
+ void vec3_negate(const vec3_t v, vec3_t out);
110
+ void vec3_inverse(const vec3_t v, vec3_t out);
111
+
112
+ void vec3_cross_product(const vec3_t left, const vec3_t right, vec3_t out);
113
+ s_float_t vec3_dot_product(const vec3_t left, const vec3_t right);
114
+
115
+ void vec3_scale(const vec3_t v, s_float_t scalar, vec3_t out);
116
+ int vec3_divide(const vec3_t v, s_float_t divisor, vec3_t out);
117
+
118
+ int vec3_equals(const vec3_t left, const vec3_t right);
119
+
120
+
121
+
122
+ /*==============================================================================
123
+
124
+ 4-Component Vector (vec4_t)
125
+
126
+ ==============================================================================*/
127
+
128
+ extern const vec4_t g_vec4_zero;
129
+ extern const vec4_t g_vec4_one;
130
+ extern const vec4_t g_vec4_identity;
131
+
132
+ void vec4_copy(const vec4_t in, vec4_t out);
133
+ void vec4_set(s_float_t x, s_float_t y, s_float_t z, s_float_t w, vec4_t v);
134
+
135
+ /*!
136
+ * Gets the squared length of a vector. Useful for approximations and when
137
+ * you don't need the actual magnitude.
138
+ */
139
+ s_float_t vec4_length_squared(const vec4_t v);
140
+ /*!
141
+ * Gets the length/magnitude of a vector.
142
+ */
143
+ s_float_t vec4_length(const vec4_t v);
144
+ void vec4_normalize(const vec4_t in, vec4_t out);
145
+
146
+ void vec4_subtract(const vec4_t left, const vec4_t right, vec4_t out);
147
+ void vec4_add(const vec4_t left, const vec4_t right, vec4_t out);
148
+ void vec4_multiply(const vec4_t left, const vec4_t right, vec4_t out);
149
+ void vec4_negate(const vec4_t v, vec4_t out);
150
+ void vec4_inverse(const vec4_t v, vec4_t out);
151
+
152
+ s_float_t vec4_dot_product(const vec4_t left, const vec4_t right);
153
+
154
+ void vec4_scale(const vec4_t v, s_float_t scalar, vec4_t out);
155
+ int vec4_divide(const vec4_t v, s_float_t divisor, vec4_t out);
156
+
157
+ int vec4_equals(const vec4_t left, const vec4_t right);
158
+
159
+
160
+
161
+ /*==============================================================================
162
+
163
+ 4x4 Matrix (mat4_t)
164
+
165
+ ==============================================================================*/
166
+
167
+ extern const mat4_t g_mat4_identity;
168
+
169
+ void mat4_identity(mat4_t out);
170
+ void mat4_copy(const mat4_t in, mat4_t out);
171
+ void mat4_set(
172
+ s_float_t m00, s_float_t m01, s_float_t m02, s_float_t m03,
173
+ s_float_t m04, s_float_t m05, s_float_t m06, s_float_t m07,
174
+ s_float_t m08, s_float_t m09, s_float_t m10, s_float_t m11,
175
+ s_float_t m12, s_float_t m13, s_float_t m14, s_float_t m15,
176
+ mat4_t out);
177
+
178
+ void mat4_set_axes3(const vec3_t x, const vec3_t y, const vec3_t z, const vec3_t w, mat4_t out);
179
+ void mat4_get_axes3(const mat4_t m, vec3_t x, vec3_t y, vec3_t z, vec3_t w);
180
+ void mat4_set_axes4(const vec4_t x, const vec4_t y, const vec4_t z, const vec4_t w, mat4_t out);
181
+ void mat4_get_axes4(const mat4_t m, vec4_t x, vec4_t y, vec4_t z, vec4_t w);
182
+
183
+ /*! Builds a rotation matrix with the given angle and axis. */
184
+ void mat4_rotation(s_float_t angle, s_float_t x, s_float_t y, s_float_t z, mat4_t out);
185
+ void mat4_frustum(s_float_t left, s_float_t right, s_float_t bottom, s_float_t top, s_float_t near, s_float_t far, mat4_t out);
186
+ void mat4_orthographic(s_float_t left, s_float_t right, s_float_t bottom, s_float_t top, s_float_t near, s_float_t far, mat4_t out);
187
+ void mat4_perspective(s_float_t fov_y, s_float_t aspect, s_float_t near, s_float_t far, mat4_t out);
188
+ void mat4_look_at(const vec3_t eye, const vec3_t center, const vec3_t up, mat4_t out);
189
+ void mat4_from_quat(const quat_t quat, mat4_t out);
190
+
191
+ void mat4_get_row4(const mat4_t in, int row, vec4_t out);
192
+ void mat4_get_row3(const mat4_t in, int row, vec3_t out);
193
+ void mat4_get_column4(const mat4_t in, int column, vec4_t out);
194
+ void mat4_get_column3(const mat4_t in, int column, vec3_t out);
195
+
196
+ void mat4_set_row4(int row, const vec4_t value, mat4_t inout);
197
+ void mat4_set_row3(int row, const vec3_t value, mat4_t inout);
198
+ void mat4_set_column4(int column, const vec4_t value, mat4_t inout);
199
+ void mat4_set_column3(int column, const vec3_t value, mat4_t inout);
200
+
201
+ int mat4_equals(const mat4_t left, const mat4_t right);
202
+
203
+ void mat4_transpose(const mat4_t in, mat4_t out);
204
+ void mat4_inverse_orthogonal(const mat4_t in, mat4_t out);
205
+ /*!
206
+ * Writes the inverse affine of the input matrix to the output matrix.
207
+ * \returns Non-zero if an inverse affine matrix can be created, otherwise
208
+ * zero if not. If zero, the output matrix is the identity matrix.
209
+ */
210
+ int mat4_inverse_affine(const mat4_t in, mat4_t out);
211
+ void mat4_adjoint(const mat4_t in, mat4_t out);
212
+ s_float_t mat4_determinant(const mat4_t m);
213
+ int mat4_inverse_general(const mat4_t in, mat4_t out);
214
+
215
+ /*! Translates the given matrix by <X, Y, Z>. */
216
+ void mat4_translate(s_float_t x, s_float_t y, s_float_t z, const mat4_t in, mat4_t out);
217
+ void mat4_translation(s_float_t x, s_float_t y, s_float_t z, mat4_t out);
218
+ void mat4_multiply(const mat4_t left, const mat4_t right, mat4_t out);
219
+ void mat4_multiply_vec4(const mat4_t left, const vec4_t right, vec4_t out);
220
+ void mat4_transform_vec3(const mat4_t left, const vec3_t right, vec3_t out);
221
+ void mat4_rotate_vec3(const mat4_t left, const vec3_t right, vec3_t out);
222
+ void mat4_inv_rotate_vec3(const mat4_t left, const vec3_t right, vec3_t out);
223
+ void mat4_scale(const mat4_t in, s_float_t x, s_float_t y, s_float_t z, mat4_t out);
224
+
225
+
226
+
227
+ /*==============================================================================
228
+
229
+ Quaternion (quat_t)
230
+
231
+ ==============================================================================*/
232
+
233
+ extern const quat_t g_quat_identity;
234
+
235
+ /* note: all methods assume that input quaternions are unit quaternions */
236
+
237
+ void quat_set(s_float_t x, s_float_t y, s_float_t z, s_float_t w, quat_t out);
238
+ void quat_copy(const quat_t in, quat_t out);
239
+ void quat_identity(quat_t q);
240
+
241
+ void quat_inverse(const quat_t in, quat_t out);
242
+ void quat_negate(const quat_t in, quat_t out);
243
+
244
+ void quat_multiply(const quat_t left, const quat_t right, quat_t out);
245
+ void quat_multiply_vec3(const quat_t left, const vec3_t right, vec3_t out);
246
+
247
+ void quat_from_angle_axis(s_float_t angle, s_float_t x, s_float_t y, s_float_t z, quat_t out);
248
+ void quat_from_mat4(const mat4_t mat, quat_t out);
249
+
250
+ void quat_slerp(const quat_t from, const quat_t to, s_float_t delta, quat_t out);
251
+
252
+ #if defined(__cplusplus)
253
+ }
254
+ #endif /* __cplusplus */
255
+
256
+ #endif /* end of include guard: __SNOW__MATHS_H__ */
257
+
@@ -0,0 +1,187 @@
1
+ /*
2
+ Quaternion maths
3
+ Written by Noel Cower
4
+
5
+ See COPYING for license information
6
+ */
7
+
8
+ #define __SNOW__QUAT_C__
9
+
10
+ #include "maths_local.h"
11
+
12
+ #if defined(__cplusplus)
13
+ extern "C"
14
+ {
15
+ #endif /* __cplusplus */
16
+
17
+ const quat_t g_quat_identity = {0.0, 0.0, 0.0, 1.0};
18
+
19
+ void quat_set(s_float_t x, s_float_t y, s_float_t z, s_float_t w, quat_t out)
20
+ {
21
+ out[0] = x;
22
+ out[1] = y;
23
+ out[2] = z;
24
+ out[3] = w;
25
+ }
26
+
27
+ void quat_copy(const quat_t in, quat_t out)
28
+ {
29
+ if (in == out) return;
30
+
31
+ out[0] = in[0];
32
+ out[1] = in[1];
33
+ out[2] = in[2];
34
+ out[3] = in[3];
35
+ }
36
+
37
+ void quat_identity(quat_t q)
38
+ {
39
+ q[0] = q[1] = q[2] = 0.0;
40
+ q[3] = 1.0;
41
+ }
42
+
43
+ void quat_inverse(const quat_t in, quat_t out)
44
+ {
45
+ out[0] = -in[0];
46
+ out[1] = -in[1];
47
+ out[2] = -in[2];
48
+ out[3] = in[3];
49
+ }
50
+
51
+ void quat_negate(const quat_t in, quat_t out)
52
+ {
53
+ out[0] = -in[0];
54
+ out[1] = -in[1];
55
+ out[2] = -in[2];
56
+ out[3] = -in[3];
57
+ }
58
+
59
+ void quat_multiply(const quat_t left, const quat_t right, quat_t out)
60
+ {
61
+ s_float_t w;
62
+ s_float_t w1, w2;
63
+ vec3_t t1, t2;
64
+ w1 = left[3];
65
+ w2 = right[3];
66
+
67
+ w = (w1 * w2) - vec3_dot_product(left, right);
68
+ vec3_copy(right, t1);
69
+ vec3_scale(t1, w1, t1);
70
+ vec3_copy(left, t2);
71
+ vec3_scale(t2, w2, t2);
72
+ vec3_add(t1, t2, t1);
73
+ vec3_cross_product(right, left, t2);
74
+ vec3_add(t1, t2, t1);
75
+ vec3_copy(t1, out);
76
+ out[3] = w;
77
+ }
78
+
79
+ void quat_multiply_vec3(const quat_t left, const vec3_t right, vec3_t out)
80
+ {
81
+ vec3_t lxr_cross, lxlr_cross;
82
+ vec3_cross_product(left, right, lxr_cross);
83
+ vec3_cross_product(left, lxr_cross, lxlr_cross);
84
+ vec3_scale(lxr_cross, 2.0 * left[3], lxr_cross);
85
+ vec3_scale(lxlr_cross, 2.0, lxlr_cross);
86
+ vec3_add(lxr_cross, lxlr_cross, lxr_cross);
87
+ vec3_add(right, lxr_cross, out);
88
+ }
89
+
90
+ void quat_from_angle_axis(s_float_t angle, s_float_t x, s_float_t y, s_float_t z, quat_t out)
91
+ {
92
+ vec3_t v = {x, y, z};
93
+ vec3_normalize(v, v);
94
+
95
+ angle *= (S_DEG2RAD * 0.5);
96
+ const s_float_t s = s_sin(angle);
97
+
98
+ out[0] = v[0] * s;
99
+ out[1] = v[1] * s;
100
+ out[2] = v[2] * s;
101
+ out[3] = s_cos(angle);
102
+ }
103
+
104
+ void quat_from_mat4(const mat4_t mat, quat_t out)
105
+ {
106
+ s_float_t trace, r;
107
+ int index;
108
+
109
+ trace = mat[0] + mat[5] + mat[10];
110
+ if (trace > 0) {
111
+ r = sqrtf(trace + 1.0);
112
+ out[3] = r * 0.5;
113
+ r = 0.5 / r;
114
+ out[0] = (mat[9] - mat[6]) * r;
115
+ out[1] = (mat[2] - mat[8]) * r;
116
+ out[2] = (mat[4] - mat[1]) * r;
117
+ } else {
118
+ index = 0;
119
+ if (mat[5] > mat[0]) index = 1;
120
+ if (mat[10] > mat[index * 5]) index = 2;
121
+
122
+ r = sqrtf(mat[index * 5] - (mat[((index + 1)) % 3 * 5] + mat[((index + 2) % 3) * 5]) + 1.0);
123
+ out[index] = r * 0.5;
124
+
125
+ if (r) r = 0.5 / r;
126
+
127
+ switch (index)
128
+ {
129
+ case 0:
130
+ out[1] = (mat[1] + mat[4]) * r;
131
+ out[2] = (mat[2] + mat[8]) * r;
132
+ out[3] = (mat[9] - mat[6]) * r;
133
+ break;
134
+
135
+ case 1:
136
+ out[3] = (mat[2] + mat[8]) * r;
137
+ out[2] = (mat[9] + mat[6]) * r;
138
+ out[0] = (mat[1] - mat[4]) * r;
139
+ break;
140
+
141
+ case 2:
142
+ out[3] = (mat[4] + mat[1]) * r;
143
+ out[0] = (mat[2] + mat[8]) * r;
144
+ out[1] = (mat[6] - mat[9]) * r;
145
+ break;
146
+ }
147
+ }
148
+ }
149
+
150
+ void quat_slerp(const quat_t from, const quat_t to, s_float_t delta, quat_t out)
151
+ {
152
+ s_float_t dot, scale0, scale1, angle, inverse_sin;
153
+ s_float_t dx, dy, dz, dw;
154
+
155
+ dot = vec4_dot_product((const s_float_t *)from, (const s_float_t *)to);
156
+
157
+ if (dot < 0.0) {
158
+ dot = -dot;
159
+ dx = -to[0];
160
+ dy = -to[1];
161
+ dz = -to[2];
162
+ dw = -to[3];
163
+ } else {
164
+ dx = to[0];
165
+ dy = to[1];
166
+ dz = to[2];
167
+ dw = to[3];
168
+ }
169
+
170
+ delta = fminf(fmaxf(delta, 0.0), 1.0);
171
+
172
+ angle = s_acos(dot);
173
+ inverse_sin = 1.0 / s_sin(dot);
174
+
175
+ scale0 = s_sin((1.0 - delta) * angle) * inverse_sin;
176
+ scale1 = s_sin(delta * angle) * inverse_sin;
177
+
178
+ out[0] = (from[0] * scale0) + (dx * scale1);
179
+ out[1] = (from[1] * scale0) + (dy * scale1);
180
+ out[2] = (from[2] * scale0) + (dz * scale1);
181
+ out[3] = (from[3] * scale0) + (dw * scale1);
182
+ }
183
+
184
+ #if defined(__cplusplus)
185
+ }
186
+ #endif /* __cplusplus */
187
+
@@ -0,0 +1,2117 @@
1
+ /*
2
+ Maths bindings for Ruby
3
+ Written by Noel Cower
4
+
5
+ See COPYING for license information
6
+ */
7
+
8
+ #include "maths_local.h"
9
+ #include "ruby.h"
10
+
11
+ /*
12
+ Generates a label within the current function with the given name. Should be
13
+ as unique as anyone needs.
14
+ */
15
+ #define SM_LABEL(NAME) __FUNCTION__##NAME
16
+
17
+ /*
18
+ Returns the Ruby class value's identifier
19
+ */
20
+ #define SM_KLASS(TYPE) s_sm_##TYPE##_klass
21
+
22
+ /*
23
+ Returns the wrap identifier for a given type. Only useful in functions
24
+ generated by macros. The wrapping function will always be sm_wrap_TYPE.
25
+ */
26
+ #define SM_WRAP(TYPE) sm_wrap_##TYPE
27
+
28
+ /*
29
+ Same as SM_WRAP but for unwrapping. Will always be sm_unwrap_TYPE
30
+ */
31
+ #define SM_UNWRAP(TYPE) sm_unwrap_##TYPE
32
+
33
+ /*
34
+ Defines the static class value for a type.
35
+ */
36
+ #define DEF_SM_TYPE_KLASS(TYPE) static VALUE SM_KLASS(TYPE) = Qnil
37
+
38
+ /*
39
+ Returns whether a given ruby value is a kind of RB_TYPE (a Ruby value for a
40
+ class).
41
+ */
42
+ #define SM_RB_IS_A(RB_VALUE, RB_TYPE) (RTEST(rb_obj_is_kind_of((RB_VALUE), (RB_TYPE))))
43
+
44
+ /*
45
+ Wrapper around SM_RB_IS_A that checks for SM_RB_IS_A(value, SM_KLASS(type)).
46
+ Vaguely convenient.
47
+ */
48
+ #define SM_IS_A(SM_VALUE, SM_TYPE) SM_RB_IS_A(SM_VALUE, SM_KLASS(SM_TYPE))
49
+
50
+ /*
51
+ Convenience macro to raise an exception if a value isn't of a given type. Only
52
+ useful for a few things, as vec3, vec4, and quat are semi-compatible due to
53
+ having similar sizes. So any vec4/quat can be used as a vec3 in some cases and
54
+ any quat can be used in place of a vec4 except in the case of a few binary ops
55
+ and such (because the binary op macro doesn't check for much -- so that might
56
+ need rewriting later).
57
+ */
58
+ #define SM_RAISE_IF_NOT_TYPE(SM_VALUE, SM_TYPE) do { \
59
+ if (!SM_IS_A(SM_VALUE, SM_TYPE)) { \
60
+ rb_raise(rb_eTypeError, "Expected %s, got %s", \
61
+ rb_class2name(SM_KLASS(SM_TYPE)), \
62
+ rb_obj_classname((SM_VALUE))); \
63
+ } } while (0)
64
+
65
+ /* Declares the wrap function for a type. */
66
+ #define DECL_SM_WRAP_OP(TYPE) \
67
+ static VALUE SM_WRAP(TYPE) (const TYPE##_t value, VALUE klass)
68
+
69
+ /* Declares the unwrap function for a type. */
70
+ #define DECL_SM_UNWRAP_OP(TYPE) \
71
+ static TYPE##_t * SM_UNWRAP(TYPE) (VALUE sm_value, TYPE##_t store)
72
+
73
+
74
+ /*
75
+ Note about generated wrap / unwrap functions:
76
+ Neither function actually cares about its input. It will not verify that a
77
+ quat is passed to sm_unwrap_quat and you can similarly pass a vec4 to
78
+ sm_wrap_vec3 or sm_wrap_quat, so the types are mostly interchangeable provided
79
+ they're of the same size.
80
+
81
+ This allows some flexibility when passing Quats to Vec4 functions and so on,
82
+ though it's a better idea to simply create a new Vec4 or Quat when you need to
83
+ pass one to another's function. The conversion is easy enough, so it's not a
84
+ huge deal.
85
+ */
86
+
87
+ /* Defines the wrap function for a type. */
88
+ #define DEF_SM_WRAP_OP(TYPE) \
89
+ DECL_SM_WRAP_OP(TYPE) \
90
+ { \
91
+ TYPE##_t *copy; \
92
+ if (!RTEST(klass)) { \
93
+ klass = SM_KLASS(TYPE); \
94
+ } \
95
+ VALUE sm_wrapped = Data_Make_Struct(klass, TYPE##_t, 0, free, copy); \
96
+ if (value) { \
97
+ TYPE##_copy(value, *copy); \
98
+ } \
99
+ return sm_wrapped; \
100
+ }
101
+
102
+ /* Defines the unwrap function for a type. */
103
+ #define DEF_SM_UNWRAP_OP(TYPE) \
104
+ DECL_SM_UNWRAP_OP(TYPE) \
105
+ { \
106
+ TYPE##_t *value; \
107
+ Data_Get_Struct(sm_value, TYPE##_t, value); \
108
+ if(store) TYPE##_copy(*value, store); \
109
+ return value; \
110
+ }
111
+
112
+ /*
113
+ Defines a binary op for a given type (defined as
114
+ TYPE_FUNC(input_rhs, input_lhs, output) where input_rhs is self, input_lhs
115
+ is an arbitrary value, and output is either nil or a value of OTYPE to store
116
+ the result of the op in).
117
+
118
+ Binary ops are strict about the input/output types. You cannot pass a vec4 to
119
+ a quat function and vice versa. This is slightly contrary to other functions
120
+ that are hand-written for the type because I obviously know more about the
121
+ input than I do in the case of a macro-generated function. In the future, I'll
122
+ probably replace all generated functions with hand-written ones, but in the
123
+ meantime, just be aware that you can get exceptions for otherwise compatible
124
+ types.
125
+
126
+ By default, if no output object is provided, a new one is created and
127
+ returned. If an output object is provided, it is returned.
128
+ */
129
+ #define DEF_SM_BINARY_OP(FUNC, TYPE, RHSTYPE, OTYPE) \
130
+ static VALUE sm_##TYPE##_##FUNC (int argc, VALUE *argv, VALUE sm_self) \
131
+ { \
132
+ VALUE sm_rhs; \
133
+ VALUE sm_out; \
134
+ TYPE##_t *self; \
135
+ RHSTYPE##_t *rhs; \
136
+ rb_scan_args(argc, argv, "11", &sm_rhs, &sm_out); \
137
+ self = SM_UNWRAP(TYPE)(sm_self, NULL); \
138
+ SM_RAISE_IF_NOT_TYPE(sm_rhs, RHSTYPE); \
139
+ rhs = SM_UNWRAP(RHSTYPE)(sm_rhs, NULL); \
140
+ if (argc == 2) { \
141
+ if (!RTEST(sm_out)) { \
142
+ goto SM_LABEL(skip_output); \
143
+ } \
144
+ SM_RAISE_IF_NOT_TYPE(sm_out, OTYPE); \
145
+ OTYPE##_t *output = SM_UNWRAP(OTYPE)(sm_out, NULL); \
146
+ TYPE##_##FUNC (*self, *rhs, *output); \
147
+ } else if (argc == 1) { \
148
+ SM_LABEL(skip_output): { \
149
+ OTYPE##_t output; \
150
+ TYPE##_##FUNC (*self, *rhs, output); \
151
+ sm_out = SM_WRAP(OTYPE)(output, (SM_KLASS(OTYPE) == SM_KLASS(TYPE) \
152
+ ? rb_obj_class(sm_self) \
153
+ : (SM_KLASS(OTYPE) == SM_KLASS(RHSTYPE) \
154
+ ? rb_obj_class(sm_rhs) \
155
+ : SM_KLASS(OTYPE)))); \
156
+ rb_obj_call_init(sm_out, 0, 0); \
157
+ }} else { \
158
+ rb_raise(rb_eArgError, "Invalid number of arguments to " #FUNC); \
159
+ } \
160
+ return sm_out; \
161
+ }
162
+
163
+ /*
164
+ Defines a unary op for the given type (defined as TYPE_FUNC(input, output)
165
+ where input is self and output is either nil or an arbitrary value of type
166
+ OTYPE to store the result in).
167
+
168
+ By default, if no output object is provided, a new one is created and
169
+ returned. If an output object is provided, it is returned.
170
+ */
171
+ #define DEF_SM_UNARY_OP(FUNC, TYPE, OTYPE) \
172
+ static VALUE sm_##TYPE##_##FUNC (int argc, VALUE *argv, VALUE sm_self) \
173
+ { \
174
+ VALUE sm_out; \
175
+ TYPE##_t *self; \
176
+ rb_scan_args(argc, argv, "01", &sm_out); \
177
+ self = SM_UNWRAP(TYPE)(sm_self, NULL); \
178
+ if (argc == 1) { \
179
+ if (!RTEST(sm_out)) { \
180
+ goto SM_LABEL(skip_output); \
181
+ } \
182
+ SM_RAISE_IF_NOT_TYPE(sm_out, OTYPE); \
183
+ OTYPE##_t *output = SM_UNWRAP(OTYPE)(sm_out, NULL); \
184
+ TYPE##_##FUNC (*self, *output); \
185
+ } else if (argc == 0) { \
186
+ SM_LABEL(skip_output): { \
187
+ OTYPE##_t output; \
188
+ TYPE##_##FUNC (*self, output); \
189
+ sm_out = SM_WRAP(OTYPE)(output, (SM_KLASS(OTYPE) == SM_KLASS(TYPE) \
190
+ ? rb_obj_class(sm_self) \
191
+ : SM_KLASS(OTYPE))); \
192
+ rb_obj_call_init(sm_out, 0, 0); \
193
+ }} else { \
194
+ rb_raise(rb_eArgError, "Invalid number of arguments to " #FUNC); \
195
+ } \
196
+ return sm_out; \
197
+ }
198
+
199
+ /* Deref/fetch op for a given type. Does bounds-checking. */
200
+ #define DEF_SM_FETCH_OP(TYPE) \
201
+ static VALUE sm_##TYPE##_fetch (VALUE sm_self, VALUE sm_index) \
202
+ { \
203
+ static const int max_index = sizeof(TYPE##_t) / sizeof(s_float_t); \
204
+ const TYPE##_t *self = SM_UNWRAP(TYPE)(sm_self, NULL); \
205
+ int index = NUM2INT(sm_index); \
206
+ if (index < 0 || index >= max_index) { \
207
+ rb_raise(rb_eRangeError, \
208
+ "Index %d is out of bounds, must be from 0 through %d", index, max_index - 1); \
209
+ } \
210
+ return rb_float_new(self[0][NUM2INT(sm_index)]); \
211
+ }
212
+
213
+ /* Deref-assignment/store op for a given type. Does bounds-checking. */
214
+ #define DEF_SM_STORE_OP(TYPE) \
215
+ static VALUE sm_##TYPE##_store (VALUE sm_self, VALUE sm_index, VALUE sm_value) \
216
+ { \
217
+ static const int max_index = sizeof(TYPE##_t) / sizeof(s_float_t); \
218
+ TYPE##_t *self = SM_UNWRAP(TYPE)(sm_self, NULL); \
219
+ int index = NUM2INT(sm_index); \
220
+ if (index < 0 || index >= max_index) { \
221
+ rb_raise(rb_eRangeError, \
222
+ "Index %d is out of bounds, must be from 0 through %d", index, max_index - 1); \
223
+ } \
224
+ self[0][index] = (s_float_t)rb_num2dbl(sm_value); \
225
+ return sm_value; \
226
+ }
227
+
228
+ /* Size op. Returns the size in bytes of a given type. */
229
+ #define DEF_SM_SIZE_OP(TYPE) \
230
+ static VALUE sm_##TYPE##_size (VALUE self) \
231
+ { \
232
+ return SIZET2NUM(sizeof(TYPE##_t)); \
233
+ }
234
+
235
+ /* Length op. Returns the size in s_float_t elements of the given type. */
236
+ #define DEF_SM_LENGTH_OP(TYPE) \
237
+ static VALUE sm_##TYPE##_length (VALUE self) \
238
+ { \
239
+ return SIZET2NUM(sizeof(TYPE##_t) / sizeof(s_float_t)); \
240
+ }
241
+
242
+
243
+
244
+ /*
245
+ Array types -- optional if BUILD_ARRAY_TYPE isn't defined.
246
+
247
+ All array types are defined as something like this in Ruby:
248
+
249
+ class TypeArray
250
+ def fetch(index) -> Type
251
+ Returns an Object of Type that references array data (non-const)
252
+
253
+ def store(index, value) -> value
254
+ Copies value object of Type's data to the array's data. This is a nop if
255
+ the value already references the array's data.
256
+ end
257
+ */
258
+ #define BUILD_ARRAY_TYPE
259
+ #ifdef BUILD_ARRAY_TYPE
260
+
261
+ static ID kRB_IVAR_MATHARRAY_LENGTH;
262
+ static ID kRB_IVAR_MATHARRAY_SOURCE;
263
+
264
+ static VALUE sm_mathtype_array_length(VALUE sm_self)
265
+ {
266
+ return rb_ivar_get(sm_self, kRB_IVAR_MATHARRAY_LENGTH);
267
+ }
268
+
269
+ #define SM_ARR_KLASS(ELEM_TYPE) SM_KLASS(ELEM_TYPE##_array)
270
+
271
+ #define REG_SM_ARR_TYPE(ELEM_TYPE, NAME) do { \
272
+ VALUE klass_ = \
273
+ SM_ARR_KLASS(ELEM_TYPE) = rb_define_class_under(s_sm_snowmath_mod, NAME, rb_cObject); \
274
+ rb_define_singleton_method(klass_, "new", sm_##ELEM_TYPE##_array_new, 1); \
275
+ rb_define_method(klass_, "fetch", sm_##ELEM_TYPE##_array_fetch, 1); \
276
+ rb_define_method(klass_, "store", sm_##ELEM_TYPE##_array_store, 2); \
277
+ rb_define_method(klass_, "size", sm_##ELEM_TYPE##_array_size, 0); \
278
+ rb_define_method(klass_, "length", sm_mathtype_array_length, 0); \
279
+ rb_define_method(klass_, "address", sm_get_address, 0); \
280
+ } while (0)
281
+
282
+ #define DEF_SM_ARR_TYPE(ELEM_TYPE) \
283
+ static VALUE SM_ARR_KLASS(ELEM_TYPE) = Qnil; \
284
+ \
285
+ static VALUE sm_##ELEM_TYPE##_array_new(VALUE sm_self, VALUE sm_length) \
286
+ { \
287
+ int length = 0; \
288
+ ELEM_TYPE##_t *arr; \
289
+ VALUE sm_type_array; \
290
+ int copy_array = 0; \
291
+ if ((copy_array = SM_IS_A(sm_length, ELEM_TYPE##_array))) { \
292
+ length = NUM2INT(sm_mathtype_array_length(sm_length)); \
293
+ } else { \
294
+ length = NUM2INT(sm_length); \
295
+ } \
296
+ if (length <= 0) { \
297
+ return Qnil; \
298
+ } \
299
+ arr = ALLOC_N(ELEM_TYPE##_t, length); \
300
+ if (copy_array) { \
301
+ const ELEM_TYPE##_t *source; \
302
+ Data_Get_Struct(sm_length, ELEM_TYPE##_t, source); \
303
+ MEMCPY(arr, source, ELEM_TYPE##_t, length); \
304
+ sm_length = sm_mathtype_array_length(sm_length); \
305
+ } \
306
+ sm_type_array = Data_Wrap_Struct(sm_self, 0, free, arr); \
307
+ rb_ivar_set(sm_type_array, kRB_IVAR_MATHARRAY_LENGTH, sm_length); \
308
+ rb_obj_call_init(sm_type_array, 0, 0); \
309
+ return sm_type_array; \
310
+ } \
311
+ \
312
+ static VALUE sm_##ELEM_TYPE##_array_fetch(VALUE sm_self, VALUE sm_index) \
313
+ { \
314
+ ELEM_TYPE##_t *arr; \
315
+ int length = NUM2INT(sm_mathtype_array_length(sm_self)); \
316
+ int index = NUM2INT(sm_index); \
317
+ VALUE sm_inner; \
318
+ if (index < 0 || index >= length) { \
319
+ rb_raise(rb_eRangeError, \
320
+ "Index %d out of bounds for array with length %d", \
321
+ index, length); \
322
+ } \
323
+ Data_Get_Struct(sm_self, ELEM_TYPE##_t, arr); \
324
+ sm_inner = Data_Wrap_Struct(SM_KLASS(ELEM_TYPE), 0, 0, arr[index]); \
325
+ rb_ivar_set(sm_inner, kRB_IVAR_MATHARRAY_SOURCE, sm_self); \
326
+ return sm_inner; \
327
+ } \
328
+ \
329
+ static VALUE sm_##ELEM_TYPE##_array_store(VALUE sm_self, VALUE sm_index, VALUE sm_value) \
330
+ { \
331
+ ELEM_TYPE##_t *arr; \
332
+ ELEM_TYPE##_t *value; \
333
+ int length = NUM2INT(sm_mathtype_array_length(sm_self)); \
334
+ int index = NUM2INT(sm_index); \
335
+ if (index < 0 || index >= length) { \
336
+ rb_raise(rb_eRangeError, \
337
+ "Index %d out of bounds for array with length %d", \
338
+ index, length); \
339
+ } else if (!SM_IS_A(sm_value, ELEM_TYPE)) { \
340
+ rb_raise(rb_eTypeError, \
341
+ "Invalid value to store: expected %s, got %s", \
342
+ rb_class2name(SM_KLASS(ELEM_TYPE)), \
343
+ rb_obj_classname(sm_value)); \
344
+ } \
345
+ Data_Get_Struct(sm_self, ELEM_TYPE##_t, arr); \
346
+ value = SM_UNWRAP(ELEM_TYPE)(sm_value, NULL); \
347
+ if ((void *)value >= (void *)arr && (void *)value < (void *)(&arr[length])) { \
348
+ return sm_value; \
349
+ } \
350
+ ELEM_TYPE##_copy(*value, arr[index]); \
351
+ return sm_value; \
352
+ } \
353
+ \
354
+ static VALUE sm_##ELEM_TYPE##_array_size(VALUE sm_self) \
355
+ { \
356
+ int length = NUM2INT(sm_mathtype_array_length(sm_self)); \
357
+ return SIZET2NUM(length * sizeof(ELEM_TYPE##_t)); \
358
+ }
359
+
360
+ #endif
361
+
362
+
363
+ /*==============================================================================
364
+
365
+ Static Ruby class / module values
366
+
367
+ ==============================================================================*/
368
+
369
+ static VALUE s_sm_snowmath_mod = Qnil;
370
+ DEF_SM_TYPE_KLASS(vec3);
371
+ DEF_SM_TYPE_KLASS(vec4);
372
+ DEF_SM_TYPE_KLASS(quat);
373
+ DEF_SM_TYPE_KLASS(mat4);
374
+
375
+ // Declare wrapping operations
376
+ DECL_SM_UNWRAP_OP(vec3);
377
+ DECL_SM_UNWRAP_OP(vec4);
378
+ DECL_SM_UNWRAP_OP(quat);
379
+ DECL_SM_UNWRAP_OP(mat4);
380
+
381
+ // Declare unwrapping operations
382
+ DECL_SM_WRAP_OP(vec3);
383
+ DECL_SM_WRAP_OP(vec4);
384
+ DECL_SM_WRAP_OP(quat);
385
+ DECL_SM_WRAP_OP(mat4);
386
+
387
+
388
+
389
+ /*==============================================================================
390
+
391
+ Array types
392
+
393
+ ==============================================================================*/
394
+
395
+ #ifdef BUILD_ARRAY_TYPE
396
+ DEF_SM_ARR_TYPE(vec3);
397
+ DEF_SM_ARR_TYPE(vec4);
398
+ DEF_SM_ARR_TYPE(quat);
399
+ DEF_SM_ARR_TYPE(mat4);
400
+ #endif
401
+
402
+
403
+
404
+ /*==============================================================================
405
+
406
+ vec3_t functions
407
+
408
+ ==============================================================================*/
409
+
410
+ DEF_SM_WRAP_OP(vec3);
411
+ DEF_SM_UNWRAP_OP(vec3);
412
+ DEF_SM_SIZE_OP(vec3);
413
+ DEF_SM_LENGTH_OP(vec3);
414
+ DEF_SM_FETCH_OP(vec3);
415
+ DEF_SM_STORE_OP(vec3);
416
+ DEF_SM_UNARY_OP(copy, vec3, vec3);
417
+ DEF_SM_UNARY_OP(normalize, vec3, vec3);
418
+ DEF_SM_UNARY_OP(inverse, vec3, vec3);
419
+ DEF_SM_UNARY_OP(negate, vec3, vec3);
420
+ DEF_SM_BINARY_OP(cross_product, vec3, vec3, vec3);
421
+ DEF_SM_BINARY_OP(multiply, vec3, vec3, vec3);
422
+ DEF_SM_BINARY_OP(add, vec3, vec3, vec3);
423
+ DEF_SM_BINARY_OP(subtract, vec3, vec3, vec3);
424
+
425
+ static VALUE sm_vec3_dot_product(VALUE sm_self, VALUE sm_other)
426
+ {
427
+ if (!SM_IS_A(sm_other, vec3) &&
428
+ !SM_IS_A(sm_other, vec4) &&
429
+ !SM_IS_A(sm_other, quat)) {
430
+ rb_raise(rb_eArgError,
431
+ "Expected a Quat, Vec3, or Vec4, got %s",
432
+ rb_obj_classname(sm_other));
433
+ return Qnil;
434
+ }
435
+ return rb_float_new(
436
+ vec3_dot_product(
437
+ *sm_unwrap_vec3(sm_self, NULL),
438
+ *sm_unwrap_vec3(sm_other, NULL)));
439
+ }
440
+
441
+
442
+
443
+ static VALUE sm_vec3_new(int argc, VALUE *argv, VALUE self)
444
+ {
445
+ VALUE sm_vec = sm_wrap_vec3(g_vec3_zero, self);
446
+ rb_obj_call_init(sm_vec, argc, argv);
447
+ return sm_vec;
448
+ }
449
+
450
+
451
+
452
+ static VALUE sm_vec3_init(int argc, VALUE *argv, VALUE sm_self)
453
+ {
454
+ vec3_t *self = sm_unwrap_vec3(sm_self, NULL);
455
+ size_t arr_index = 0;
456
+
457
+ switch(argc) {
458
+
459
+ // Default value
460
+ case 0: { break; }
461
+
462
+ // Copy or by-array
463
+ case 1: {
464
+ if (SM_IS_A(argv[0], vec3) ||
465
+ SM_IS_A(argv[0], vec4) ||
466
+ SM_IS_A(argv[0], quat)) {
467
+ sm_unwrap_vec3(argv[0], *self);
468
+ break;
469
+ }
470
+
471
+ // Optional offset into array provided
472
+ if (0) {
473
+ case 2:
474
+ arr_index = NUM2SIZET(argv[1]);
475
+ }
476
+
477
+ // Array of values
478
+ if (SM_RB_IS_A(argv[0], rb_cArray)) {
479
+ VALUE arrdata = argv[0];
480
+ const size_t arr_end = arr_index + 3;
481
+ s_float_t *vec_elem = *self;
482
+ for (; arr_index < arr_end; ++arr_index, ++vec_elem) {
483
+ *vec_elem = (s_float_t)rb_num2dbl(rb_ary_entry(arrdata, (long)arr_index));
484
+ }
485
+ break;
486
+ }
487
+
488
+ rb_raise(rb_eArgError, "Expected either an array of Numerics or a Vec3");
489
+ break;
490
+ }
491
+
492
+ // X, Y, Z
493
+ case 3: {
494
+ self[0][0] = (s_float_t)rb_num2dbl(argv[0]);
495
+ self[0][1] = (s_float_t)rb_num2dbl(argv[1]);
496
+ self[0][2] = (s_float_t)rb_num2dbl(argv[2]);
497
+ break;
498
+ }
499
+
500
+ default: {
501
+ rb_raise(rb_eArgError, "Invalid arguments to Vec3.initialize");
502
+ break;
503
+ }
504
+ } // switch (argc)
505
+
506
+ return sm_self;
507
+ }
508
+
509
+
510
+
511
+ static VALUE sm_vec3_to_s(VALUE self)
512
+ {
513
+ const s_float_t *v;
514
+ v = (const s_float_t *)*sm_unwrap_vec3(self, NULL);
515
+ return rb_sprintf(
516
+ "{ "
517
+ "%f, %f, %f"
518
+ " }",
519
+ v[0], v[1], v[2]);
520
+ }
521
+
522
+
523
+
524
+ static VALUE sm_vec3_magnitude_squared(VALUE sm_self)
525
+ {
526
+ return rb_float_new(vec3_length_squared(*sm_unwrap_vec3(sm_self, NULL)));
527
+ }
528
+
529
+
530
+
531
+ static VALUE sm_vec3_magnitude(VALUE sm_self)
532
+ {
533
+ return rb_float_new(vec3_length(*sm_unwrap_vec3(sm_self, NULL)));
534
+ }
535
+
536
+
537
+
538
+ static VALUE sm_vec3_scale(int argc, VALUE *argv, VALUE sm_self)
539
+ {
540
+ VALUE sm_out;
541
+ VALUE sm_scalar;
542
+ s_float_t scalar;
543
+ vec3_t *self = sm_unwrap_vec3(sm_self, NULL);
544
+
545
+ rb_scan_args(argc, argv, "11", &sm_scalar, &sm_out);
546
+ scalar = rb_num2dbl(sm_scalar);
547
+
548
+ if (SM_IS_A(sm_out, vec3)) {
549
+ vec3_scale(*self, scalar, *sm_unwrap_vec3(sm_out, NULL));
550
+ } else {
551
+ vec3_t out;
552
+ vec3_scale(*self, scalar, out);
553
+ sm_out = sm_wrap_vec3(out, rb_obj_class(sm_self));
554
+ rb_obj_call_init(sm_out, 0, 0);
555
+ }
556
+
557
+ return sm_out;
558
+ }
559
+
560
+
561
+
562
+ static VALUE sm_vec3_divide(int argc, VALUE *argv, VALUE sm_self)
563
+ {
564
+ VALUE sm_out;
565
+ VALUE sm_scalar;
566
+ s_float_t scalar;
567
+ vec3_t *self = sm_unwrap_vec3(sm_self, NULL);
568
+
569
+ rb_scan_args(argc, argv, "11", &sm_scalar, &sm_out);
570
+ scalar = rb_num2dbl(sm_scalar);
571
+
572
+ if (SM_IS_A(sm_out, vec3)) {
573
+ vec3_divide(*self, scalar, *sm_unwrap_vec3(sm_out, NULL));
574
+ } else {
575
+ vec3_t out;
576
+ vec3_divide(*self, scalar, out);
577
+ sm_out = sm_wrap_vec3(out, rb_obj_class(sm_self));
578
+ rb_obj_call_init(sm_out, 0, 0);
579
+ }
580
+
581
+ return sm_out;
582
+ }
583
+
584
+
585
+
586
+ static VALUE sm_vec3_equals(VALUE sm_self, VALUE sm_other)
587
+ {
588
+ if (!RTEST(sm_other)) {
589
+ return Qfalse;
590
+ } else if (!SM_IS_A(sm_other, vec3) && !SM_IS_A(sm_other, vec4) && !SM_IS_A(sm_other, quat)) {
591
+ rb_raise(rb_eTypeError,
592
+ "Expected Vec3, Vec4, or Quat, got %s",
593
+ rb_obj_classname(sm_other));
594
+ }
595
+
596
+ return vec3_equals(*sm_unwrap_vec3(sm_self, NULL), *sm_unwrap_vec3(sm_other, NULL)) ? Qtrue : Qfalse;
597
+ }
598
+
599
+
600
+
601
+ /*==============================================================================
602
+
603
+ vec4_t functions
604
+
605
+ ==============================================================================*/
606
+
607
+ DEF_SM_WRAP_OP(vec4);
608
+ DEF_SM_UNWRAP_OP(vec4);
609
+ DEF_SM_SIZE_OP(vec4);
610
+ DEF_SM_LENGTH_OP(vec4);
611
+ DEF_SM_FETCH_OP(vec4);
612
+ DEF_SM_STORE_OP(vec4);
613
+ DEF_SM_UNARY_OP(copy, vec4, vec4);
614
+ DEF_SM_UNARY_OP(normalize, vec4, vec4);
615
+ DEF_SM_UNARY_OP(inverse, vec4, vec4);
616
+ DEF_SM_UNARY_OP(negate, vec4, vec4);
617
+ DEF_SM_BINARY_OP(multiply, vec4, vec4, vec4);
618
+ DEF_SM_BINARY_OP(add, vec4, vec4, vec4);
619
+ DEF_SM_BINARY_OP(subtract, vec4, vec4, vec4);
620
+
621
+ static VALUE sm_vec4_dot_product(VALUE sm_self, VALUE sm_other)
622
+ {
623
+ if (!SM_IS_A(sm_other, vec4) &&
624
+ !SM_IS_A(sm_other, quat)) {
625
+ rb_raise(rb_eArgError,
626
+ "Expected a Quat or Vec4, got %s",
627
+ rb_obj_classname(sm_other));
628
+ return Qnil;
629
+ }
630
+ return rb_float_new(
631
+ vec4_dot_product(
632
+ *sm_unwrap_vec4(sm_self, NULL),
633
+ *sm_unwrap_vec4(sm_other, NULL)));
634
+ }
635
+
636
+
637
+
638
+ static VALUE sm_vec4_new(int argc, VALUE *argv, VALUE self)
639
+ {
640
+ VALUE sm_vec = sm_wrap_vec4(g_vec4_identity, self);
641
+ rb_obj_call_init(sm_vec, argc, argv);
642
+ return sm_vec;
643
+ }
644
+
645
+
646
+
647
+ static VALUE sm_vec4_init(int argc, VALUE *argv, VALUE sm_self)
648
+ {
649
+ vec4_t *self = sm_unwrap_vec4(sm_self, NULL);
650
+ size_t arr_index = 0;
651
+
652
+ switch(argc) {
653
+
654
+ // Default value
655
+ case 0: { break; }
656
+
657
+ // Copy or by-array
658
+ case 1: {
659
+ if (SM_IS_A(argv[0], quat) ||
660
+ SM_IS_A(argv[0], vec4)) {
661
+ sm_unwrap_quat(argv[0], *self);
662
+ break;
663
+ }
664
+
665
+ if (SM_IS_A(argv[0], vec3)) {
666
+ sm_unwrap_vec3(argv[0], *self);
667
+ break;
668
+ }
669
+
670
+ // Optional offset into array provided
671
+ if (0) {
672
+ case 2:
673
+ arr_index = NUM2SIZET(argv[1]);
674
+ }
675
+
676
+ // Array of values
677
+ if (SM_RB_IS_A(argv[0], rb_cArray)) {
678
+ VALUE arrdata = argv[0];
679
+ const size_t arr_end = arr_index + 4;
680
+ s_float_t *vec_elem = *self;
681
+ for (; arr_index < arr_end; ++arr_index, ++vec_elem) {
682
+ *vec_elem = (s_float_t)rb_num2dbl(rb_ary_entry(arrdata, (long)arr_index));
683
+ }
684
+ break;
685
+ }
686
+
687
+ rb_raise(rb_eArgError, "Expected either an array of Numerics or a Vec4");
688
+ break;
689
+ }
690
+
691
+ // W
692
+ case 4: {
693
+ self[0][3] = (s_float_t)rb_num2dbl(argv[3]);
694
+ case 3: // X, Y, Z
695
+ self[0][0] = (s_float_t)rb_num2dbl(argv[0]);
696
+ self[0][1] = (s_float_t)rb_num2dbl(argv[1]);
697
+ self[0][2] = (s_float_t)rb_num2dbl(argv[2]);
698
+ break;
699
+ }
700
+
701
+ default: {
702
+ rb_raise(rb_eArgError, "Invalid arguments to Vec4.initialize");
703
+ break;
704
+ }
705
+ } // switch (argc)
706
+
707
+ return sm_self;
708
+ }
709
+
710
+
711
+
712
+ static VALUE sm_vec4_to_s(VALUE self)
713
+ {
714
+ const s_float_t *v;
715
+ v = (const s_float_t *)*sm_unwrap_vec4(self, NULL);
716
+ return rb_sprintf(
717
+ "{ "
718
+ "%f, %f, %f, %f"
719
+ " }",
720
+ v[0], v[1], v[2], v[3]);
721
+ }
722
+
723
+
724
+
725
+ static VALUE sm_vec4_magnitude_squared(VALUE sm_self)
726
+ {
727
+ return rb_float_new(vec4_length_squared(*sm_unwrap_vec4(sm_self, NULL)));
728
+ }
729
+
730
+
731
+
732
+ static VALUE sm_vec4_magnitude(VALUE sm_self)
733
+ {
734
+ return rb_float_new(vec4_length(*sm_unwrap_vec4(sm_self, NULL)));
735
+ }
736
+
737
+
738
+
739
+ static VALUE sm_vec4_scale(int argc, VALUE *argv, VALUE sm_self)
740
+ {
741
+ VALUE sm_out;
742
+ VALUE sm_scalar;
743
+ s_float_t scalar;
744
+ vec4_t *self = sm_unwrap_vec4(sm_self, NULL);
745
+
746
+ rb_scan_args(argc, argv, "11", &sm_scalar, &sm_out);
747
+ scalar = rb_num2dbl(sm_scalar);
748
+
749
+ if ((SM_IS_A(sm_out, vec4) || SM_IS_A(sm_out, quat))) {
750
+ vec4_scale(*self, scalar, *sm_unwrap_vec4(sm_out, NULL));
751
+ } else {
752
+ vec4_t out;
753
+ vec4_scale(*self, scalar, out);
754
+ sm_out = sm_wrap_vec4(out, rb_obj_class(sm_self));
755
+ rb_obj_call_init(sm_out, 0, 0);
756
+ }
757
+
758
+ return sm_out;
759
+ }
760
+
761
+
762
+
763
+ static VALUE sm_vec4_divide(int argc, VALUE *argv, VALUE sm_self)
764
+ {
765
+ VALUE sm_out;
766
+ VALUE sm_scalar;
767
+ s_float_t scalar;
768
+ vec4_t *self = sm_unwrap_vec4(sm_self, NULL);
769
+
770
+ rb_scan_args(argc, argv, "11", &sm_scalar, &sm_out);
771
+ scalar = rb_num2dbl(sm_scalar);
772
+
773
+ if ((SM_IS_A(sm_out, vec4) || SM_IS_A(sm_out, quat))) {
774
+ vec4_divide(*self, scalar, *sm_unwrap_vec4(sm_out, NULL));
775
+ } else {
776
+ vec4_t out;
777
+ vec4_divide(*self, scalar, out);
778
+ sm_out = sm_wrap_vec4(out, rb_obj_class(sm_self));
779
+ rb_obj_call_init(sm_out, 0, 0);
780
+ }
781
+
782
+ return sm_out;
783
+ }
784
+
785
+
786
+
787
+ static VALUE sm_vec4_equals(VALUE sm_self, VALUE sm_other)
788
+ {
789
+ if (!RTEST(sm_other)) {
790
+ return Qfalse;
791
+ } else if (!SM_IS_A(sm_other, vec4) && !SM_IS_A(sm_other, quat)) {
792
+ rb_raise(rb_eTypeError,
793
+ "Expected Vec4 or Quat, got %s",
794
+ rb_obj_classname(sm_other));
795
+ }
796
+
797
+ return vec4_equals(*sm_unwrap_vec4(sm_self, NULL), *sm_unwrap_vec4(sm_other, NULL)) ? Qtrue : Qfalse;
798
+ }
799
+
800
+
801
+
802
+ /*==============================================================================
803
+
804
+ quat_t functions
805
+
806
+ ==============================================================================*/
807
+
808
+ DEF_SM_WRAP_OP(quat);
809
+ DEF_SM_UNWRAP_OP(quat);
810
+ DEF_SM_SIZE_OP(quat);
811
+ DEF_SM_LENGTH_OP(quat);
812
+ DEF_SM_FETCH_OP(quat);
813
+ DEF_SM_STORE_OP(quat);
814
+ DEF_SM_UNARY_OP(copy, quat, quat);
815
+ DEF_SM_UNARY_OP(inverse, quat, quat);
816
+ DEF_SM_UNARY_OP(negate, quat, quat);
817
+ DEF_SM_BINARY_OP(multiply, quat, quat, quat);
818
+ DEF_SM_BINARY_OP(multiply_vec3, quat, vec3, vec3);
819
+
820
+ static VALUE sm_quat_new(int argc, VALUE *argv, VALUE self)
821
+ {
822
+ VALUE sm_quat = sm_wrap_quat(g_quat_identity, self);
823
+ rb_obj_call_init(sm_quat, argc, argv);
824
+ return sm_quat;
825
+ }
826
+
827
+
828
+
829
+ static VALUE sm_quat_init(int argc, VALUE *argv, VALUE sm_self)
830
+ {
831
+ quat_t *self = sm_unwrap_quat(sm_self, NULL);
832
+ size_t arr_index = 0;
833
+
834
+ switch(argc) {
835
+
836
+ // Default value
837
+ case 0: { break; }
838
+
839
+ // Copy or by-array
840
+ case 1: {
841
+ if (SM_IS_A(argv[0], vec3)) {
842
+ sm_unwrap_vec3(argv[0], *self);
843
+ break;
844
+ }
845
+
846
+ if (SM_IS_A(argv[0], quat) ||
847
+ SM_IS_A(argv[0], vec4)) {
848
+ sm_unwrap_quat(argv[0], *self);
849
+ break;
850
+ }
851
+
852
+ if (SM_IS_A(argv[0], mat4)) {
853
+ const mat4_t *mat = sm_unwrap_mat4(argv[0], NULL);
854
+ quat_from_mat4(*mat, *self);
855
+ break;
856
+ }
857
+
858
+ // Optional offset into array provided
859
+ if (0) {
860
+ case 2:
861
+ arr_index = NUM2SIZET(argv[1]);
862
+ }
863
+
864
+ // Array of values
865
+ if (SM_RB_IS_A(argv[0], rb_cArray)) {
866
+ VALUE arrdata = argv[0];
867
+ const size_t arr_end = arr_index + 3;
868
+ s_float_t *vec_elem = *self;
869
+ for (; arr_index < arr_end; ++arr_index, ++vec_elem) {
870
+ *vec_elem = (s_float_t)rb_num2dbl(rb_ary_entry(arrdata, (long)arr_index));
871
+ }
872
+ break;
873
+ }
874
+
875
+ rb_raise(rb_eArgError, "Expected either an array of Numerics or a Quat");
876
+ break;
877
+ }
878
+
879
+ // W
880
+ case 4: {
881
+ self[0][3] = (s_float_t)rb_num2dbl(argv[3]);
882
+ case 3: // X, Y, Z
883
+ self[0][0] = (s_float_t)rb_num2dbl(argv[0]);
884
+ self[0][1] = (s_float_t)rb_num2dbl(argv[1]);
885
+ self[0][2] = (s_float_t)rb_num2dbl(argv[2]);
886
+ break;
887
+ }
888
+
889
+ default: {
890
+ rb_raise(rb_eArgError, "Invalid arguments to Quat.initialize");
891
+ break;
892
+ }
893
+ } // switch (argc)
894
+
895
+ return sm_self;
896
+ }
897
+
898
+
899
+
900
+ static VALUE sm_quat_to_s(VALUE self)
901
+ {
902
+ const s_float_t *v;
903
+ v = (const s_float_t *)*sm_unwrap_quat(self, NULL);
904
+ return rb_sprintf(
905
+ "{ "
906
+ "%f, %f, %f, %f"
907
+ " }",
908
+ v[0], v[1], v[2], v[3]);
909
+ }
910
+
911
+
912
+
913
+
914
+
915
+
916
+
917
+ static VALUE sm_quat_angle_axis(int argc, VALUE *argv, VALUE self)
918
+ {
919
+ VALUE sm_angle;
920
+ VALUE sm_axis;
921
+ VALUE sm_out;
922
+ s_float_t angle;
923
+ const vec3_t *axis;
924
+
925
+ rb_scan_args(argc, argv, "21", &sm_angle, &sm_axis, &sm_out);
926
+ SM_RAISE_IF_NOT_TYPE(sm_axis, vec3);
927
+
928
+ angle = (s_float_t)rb_num2dbl(sm_angle);
929
+ axis = sm_unwrap_vec3(sm_axis, NULL);
930
+
931
+ if (SM_IS_A(sm_out, quat) || SM_IS_A(sm_out, vec4)) {
932
+ quat_t *out = sm_unwrap_quat(sm_out, NULL);
933
+ quat_from_angle_axis(angle, (*axis)[0], (*axis)[1], (*axis)[2], *out);
934
+ } else {
935
+ quat_t out;
936
+ quat_from_angle_axis(angle, (*axis)[0], (*axis)[1], (*axis)[2], out);
937
+ sm_out = sm_wrap_quat(out, self);
938
+ rb_obj_call_init(sm_out, 0, 0);
939
+ }
940
+
941
+ return sm_out;
942
+ }
943
+
944
+
945
+
946
+ static VALUE sm_quat_identity(VALUE sm_self)
947
+ {
948
+ quat_t *self = sm_unwrap_quat(sm_self, NULL);
949
+ quat_identity(*self);
950
+ return sm_self;
951
+ }
952
+
953
+
954
+
955
+ static VALUE sm_quat_scale(int argc, VALUE *argv, VALUE sm_self)
956
+ {
957
+ VALUE sm_out;
958
+ VALUE sm_scalar;
959
+ s_float_t scalar;
960
+ vec4_t *self = sm_unwrap_vec4(sm_self, NULL);
961
+
962
+ rb_scan_args(argc, argv, "11", &sm_scalar, &sm_out);
963
+ scalar = rb_num2dbl(sm_scalar);
964
+
965
+ if ((SM_IS_A(sm_out, vec4) || SM_IS_A(sm_out, quat))) {
966
+ vec4_scale(*self, scalar, *sm_unwrap_vec4(sm_out, NULL));
967
+ } else {
968
+ vec4_t out;
969
+ vec4_scale(*self, scalar, out);
970
+ sm_out = sm_wrap_quat(out, rb_obj_class(sm_self));
971
+ rb_obj_call_init(sm_out, 0, 0);
972
+ }
973
+
974
+ return sm_out;
975
+ }
976
+
977
+
978
+
979
+ static VALUE sm_quat_divide(int argc, VALUE *argv, VALUE sm_self)
980
+ {
981
+ VALUE sm_out;
982
+ VALUE sm_scalar;
983
+ s_float_t scalar;
984
+ vec4_t *self = sm_unwrap_vec4(sm_self, NULL);
985
+
986
+ rb_scan_args(argc, argv, "11", &sm_scalar, &sm_out);
987
+ scalar = rb_num2dbl(sm_scalar);
988
+
989
+ if ((SM_IS_A(sm_out, vec4) || SM_IS_A(sm_out, quat))) {
990
+ vec4_divide(*self, scalar, *sm_unwrap_vec4(sm_out, NULL));
991
+ } else {
992
+ vec4_t out;
993
+ vec4_divide(*self, scalar, out);
994
+ sm_out = sm_wrap_quat(out, rb_obj_class(sm_self));
995
+ rb_obj_call_init(sm_out, 0, 0);
996
+ }
997
+
998
+ return sm_out;
999
+ }
1000
+
1001
+
1002
+
1003
+ static VALUE sm_quat_slerp(int argc, VALUE *argv, VALUE sm_self)
1004
+ {
1005
+ VALUE sm_out;
1006
+ VALUE sm_destination;
1007
+ VALUE sm_alpha;
1008
+ quat_t *destination;
1009
+ quat_t *self = sm_unwrap_vec4(sm_self, NULL);
1010
+ s_float_t alpha;
1011
+
1012
+ rb_scan_args(argc, argv, "21", &sm_destination, &sm_alpha, &sm_out);
1013
+ alpha = rb_num2dbl(sm_alpha);
1014
+
1015
+ if (!SM_IS_A(sm_destination, vec4) && !SM_IS_A(sm_destination, quat)) {
1016
+ rb_raise(rb_eTypeError,
1017
+ "Expected either Vec4 or Quat, got %s",
1018
+ rb_obj_classname(sm_destination));
1019
+ return Qnil;
1020
+ }
1021
+
1022
+ destination = sm_unwrap_quat(sm_destination, NULL);
1023
+
1024
+ if ((SM_IS_A(sm_out, vec4) || SM_IS_A(sm_out, quat))) {
1025
+ quat_slerp(*self, *destination, alpha, *sm_unwrap_quat(sm_out, NULL));
1026
+ } else {
1027
+ quat_t out;
1028
+ quat_slerp(*self, *destination, alpha, out);
1029
+ sm_out = sm_wrap_quat(out, rb_obj_class(sm_self));
1030
+ rb_obj_call_init(sm_out, 0, 0);
1031
+ }
1032
+
1033
+ return sm_out;
1034
+ }
1035
+
1036
+
1037
+
1038
+ static VALUE sm_quat_normalize(int argc, VALUE *argv, VALUE sm_self)
1039
+ {
1040
+ VALUE sm_out;
1041
+ VALUE sm_scalar;
1042
+ s_float_t scalar;
1043
+ vec4_t *self = sm_unwrap_vec4(sm_self, NULL);
1044
+
1045
+ rb_scan_args(argc, argv, "01", &sm_scalar, &sm_out);
1046
+ scalar = rb_num2dbl(sm_scalar);
1047
+
1048
+ if ((SM_IS_A(sm_out, vec4) || SM_IS_A(sm_out, quat))) {
1049
+ vec4_normalize(*self, *sm_unwrap_vec4(sm_out, NULL));
1050
+ } else {
1051
+ vec4_t out;
1052
+ vec4_normalize(*self, out);
1053
+ sm_out = sm_wrap_quat(out, rb_obj_class(sm_self));
1054
+ rb_obj_call_init(sm_out, 0, 0);
1055
+ }
1056
+
1057
+ return sm_out;
1058
+ }
1059
+
1060
+
1061
+
1062
+ static VALUE sm_quat_add(int argc, VALUE *argv, VALUE sm_self)
1063
+ {
1064
+ VALUE sm_out;
1065
+ VALUE sm_left;
1066
+ vec4_t *self = sm_unwrap_vec4(sm_self, NULL);
1067
+
1068
+ rb_scan_args(argc, argv, "11", &sm_left, &sm_out);
1069
+
1070
+ if (!SM_IS_A(sm_left, vec4) && !SM_IS_A(sm_left, quat)) {
1071
+ rb_raise(rb_eTypeError,
1072
+ "Expected either Vec4 or Quat, got %s",
1073
+ rb_obj_classname(sm_left));
1074
+ return Qnil;
1075
+ }
1076
+
1077
+ if ((SM_IS_A(sm_out, vec4) || SM_IS_A(sm_out, quat))) {
1078
+ vec4_add(*self, *sm_unwrap_vec4(sm_left, NULL), *sm_unwrap_vec4(sm_out, NULL));
1079
+ } else {
1080
+ vec4_t out;
1081
+ vec4_add(*self, *sm_unwrap_vec4(sm_left, NULL), out);
1082
+ sm_out = sm_wrap_quat(out, rb_obj_class(sm_self));
1083
+ rb_obj_call_init(sm_out, 0, 0);
1084
+ }
1085
+
1086
+ return sm_out;
1087
+ }
1088
+
1089
+
1090
+
1091
+ static VALUE sm_quat_subtract(int argc, VALUE *argv, VALUE sm_self)
1092
+ {
1093
+ VALUE sm_out;
1094
+ VALUE sm_left;
1095
+ vec4_t *self = sm_unwrap_vec4(sm_self, NULL);
1096
+
1097
+ rb_scan_args(argc, argv, "11", &sm_left, &sm_out);
1098
+
1099
+ if (!SM_IS_A(sm_left, vec4) && !SM_IS_A(sm_left, quat)) {
1100
+ rb_raise(rb_eTypeError,
1101
+ "Expected either Vec4 or Quat, got %s",
1102
+ rb_obj_classname(sm_left));
1103
+ return Qnil;
1104
+ }
1105
+
1106
+ if ((SM_IS_A(sm_out, vec4) || SM_IS_A(sm_out, quat))) {
1107
+ vec4_subtract(*self, *sm_unwrap_vec4(sm_left, NULL), *sm_unwrap_vec4(sm_out, NULL));
1108
+ } else {
1109
+ vec4_t out;
1110
+ vec4_subtract(*self, *sm_unwrap_vec4(sm_left, NULL), out);
1111
+ sm_out = sm_wrap_quat(out, rb_obj_class(sm_self));
1112
+ rb_obj_call_init(sm_out, 0, 0);
1113
+ }
1114
+
1115
+ return sm_out;
1116
+ }
1117
+
1118
+
1119
+
1120
+ /*==============================================================================
1121
+
1122
+ mat4_t functions
1123
+
1124
+ ==============================================================================*/
1125
+
1126
+ DEF_SM_WRAP_OP(mat4);
1127
+ DEF_SM_UNWRAP_OP(mat4);
1128
+ DEF_SM_SIZE_OP(mat4);
1129
+ DEF_SM_LENGTH_OP(mat4);
1130
+ DEF_SM_FETCH_OP(mat4);
1131
+ DEF_SM_STORE_OP(mat4);
1132
+ DEF_SM_UNARY_OP(copy, mat4, mat4);
1133
+ DEF_SM_UNARY_OP(transpose, mat4, mat4);
1134
+ DEF_SM_UNARY_OP(inverse_orthogonal, mat4, mat4);
1135
+ DEF_SM_UNARY_OP(adjoint, mat4, mat4);
1136
+ DEF_SM_BINARY_OP(multiply, mat4, mat4, mat4);
1137
+ DEF_SM_BINARY_OP(multiply_vec4, mat4, vec4, vec4);
1138
+ DEF_SM_BINARY_OP(transform_vec3, mat4, vec3, vec3);
1139
+ DEF_SM_BINARY_OP(rotate_vec3, mat4, vec3, vec3);
1140
+ DEF_SM_BINARY_OP(inv_rotate_vec3, mat4, vec3, vec3);
1141
+
1142
+ static VALUE sm_mat4_inverse_affine(int argc, VALUE *argv, VALUE sm_self)
1143
+ {
1144
+ VALUE sm_out = Qnil;
1145
+ mat4_t *self;
1146
+
1147
+ rb_scan_args(argc, argv, "01", &sm_out);
1148
+ self = sm_unwrap_mat4(sm_self, NULL);
1149
+
1150
+ if (argc == 1) {
1151
+ mat4_t *output;
1152
+
1153
+ if (!RTEST(sm_out)) {
1154
+ goto SM_LABEL(output_lbl);
1155
+ }
1156
+
1157
+ if (!SM_IS_A(sm_out, mat4)) {
1158
+ rb_raise(rb_eTypeError,
1159
+ "Invalid argument to output of inverse_affine: expected %s, got %s",
1160
+ rb_class2name(SM_KLASS(mat4)),
1161
+ rb_obj_classname(sm_out));
1162
+ return Qnil;
1163
+ }
1164
+
1165
+ output = sm_unwrap_mat4(sm_out, NULL);
1166
+ if (!mat4_inverse_affine(*self, *output)) {
1167
+ return Qnil;
1168
+ }
1169
+
1170
+ } else if (argc == 0) {
1171
+ SM_LABEL(output_lbl): {
1172
+ mat4_t output;
1173
+ if (!mat4_inverse_affine(*self, output)) {
1174
+ return Qnil;
1175
+ }
1176
+
1177
+ sm_out = sm_wrap_mat4(output, rb_obj_class(sm_self));
1178
+ rb_obj_call_init(sm_out, 0, 0);
1179
+ }
1180
+ } else {
1181
+ rb_raise(rb_eArgError, "Invalid number of arguments to inverse_affine");
1182
+ }
1183
+
1184
+ return sm_out;
1185
+ }
1186
+
1187
+
1188
+
1189
+ static VALUE sm_mat4_inverse_general(int argc, VALUE *argv, VALUE sm_self)
1190
+ {
1191
+ VALUE sm_out = Qnil;
1192
+ mat4_t *self;
1193
+
1194
+ rb_scan_args(argc, argv, "01", &sm_out);
1195
+ self = sm_unwrap_mat4(sm_self, NULL);
1196
+
1197
+ if (argc == 1) {
1198
+ mat4_t *output;
1199
+
1200
+ if (!RTEST(sm_out)) {
1201
+ goto SM_LABEL(skip_output);
1202
+ }
1203
+
1204
+ if (!SM_IS_A(sm_out, mat4)) {
1205
+ rb_raise(rb_eTypeError,
1206
+ "Invalid argument to output of inverse_general: expected %s, got %s",
1207
+ rb_class2name(SM_KLASS(mat4)),
1208
+ rb_obj_classname(sm_out));
1209
+ return Qnil;
1210
+ }
1211
+
1212
+ output = sm_unwrap_mat4(sm_out, NULL);
1213
+ if (!mat4_inverse_general(*self, *output)) {
1214
+ return Qnil;
1215
+ }
1216
+
1217
+ } else if (argc == 0) {
1218
+ SM_LABEL(skip_output): {
1219
+ mat4_t output;
1220
+ if (!mat4_inverse_general(*self, output)) {
1221
+ return Qnil;
1222
+ }
1223
+
1224
+ sm_out = sm_wrap_mat4(output, rb_obj_class(sm_self));
1225
+ rb_obj_call_init(sm_out, 0, 0);
1226
+ }
1227
+ } else {
1228
+ rb_raise(rb_eArgError, "Invalid number of arguments to inverse_general");
1229
+ }
1230
+
1231
+ return sm_out;
1232
+ }
1233
+
1234
+
1235
+
1236
+ static VALUE sm_mat4_determinant(VALUE sm_self)
1237
+ {
1238
+ return mat4_determinant(*sm_unwrap_mat4(sm_self, NULL));
1239
+ }
1240
+
1241
+
1242
+
1243
+ static VALUE sm_mat4_translate(int argc, VALUE *argv, VALUE sm_self)
1244
+ {
1245
+ VALUE sm_out = Qnil;
1246
+ mat4_t *self = sm_unwrap_mat4(sm_self, NULL);
1247
+ vec3_t xyz;
1248
+
1249
+ SM_LABEL(argc_reconfig):
1250
+ switch (argc) {
1251
+ case 2: case 4: {
1252
+ sm_out = argv[--argc];
1253
+ if (RTEST(sm_out)) {
1254
+ SM_RAISE_IF_NOT_TYPE(sm_out, mat4);
1255
+ }
1256
+ goto SM_LABEL(argc_reconfig);
1257
+ }
1258
+
1259
+ case 1: {
1260
+ sm_unwrap_vec3(argv[0], xyz);
1261
+ goto SM_LABEL(get_output);
1262
+ }
1263
+
1264
+ case 3: {
1265
+ xyz[0] = rb_num2dbl(argv[0]);
1266
+ xyz[1] = rb_num2dbl(argv[1]);
1267
+ xyz[2] = rb_num2dbl(argv[2]);
1268
+
1269
+ SM_LABEL(get_output):
1270
+ if (RTEST(sm_out)) {
1271
+ mat4_t *out = sm_unwrap_mat4(sm_out, NULL);
1272
+ mat4_translate(xyz[0], xyz[1], xyz[2], *self, *out);
1273
+ } else {
1274
+ mat4_t out;
1275
+ mat4_translate(xyz[0], xyz[1], xyz[2], *self, out);
1276
+ sm_out = sm_wrap_mat4(out, rb_obj_class(sm_self));
1277
+ rb_obj_call_init(sm_out, 0, 0);
1278
+ }
1279
+ }
1280
+ }
1281
+
1282
+ return sm_out;
1283
+ }
1284
+
1285
+
1286
+
1287
+ static VALUE sm_mat4_translation(int argc, VALUE *argv, VALUE sm_self)
1288
+ {
1289
+ VALUE sm_out = Qnil;
1290
+ vec3_t xyz;
1291
+
1292
+ SM_LABEL(argc_reconfig):
1293
+ switch (argc) {
1294
+ case 2: case 4: {
1295
+ sm_out = argv[--argc];
1296
+ if (RTEST(sm_out)) {
1297
+ SM_RAISE_IF_NOT_TYPE(sm_out, mat4);
1298
+ }
1299
+ goto SM_LABEL(argc_reconfig);
1300
+ }
1301
+
1302
+ case 1: {
1303
+ sm_unwrap_vec3(argv[0], xyz);
1304
+ goto SM_LABEL(get_output);
1305
+ }
1306
+
1307
+ case 3: {
1308
+ xyz[0] = rb_num2dbl(argv[0]);
1309
+ xyz[1] = rb_num2dbl(argv[1]);
1310
+ xyz[2] = rb_num2dbl(argv[2]);
1311
+
1312
+ SM_LABEL(get_output):
1313
+ if (RTEST(sm_out)) {
1314
+ mat4_t *out = sm_unwrap_mat4(sm_out, NULL);
1315
+ mat4_translation(xyz[0], xyz[1], xyz[2], *out);
1316
+ } else {
1317
+ mat4_t out;
1318
+ mat4_translation(xyz[0], xyz[1], xyz[2], out);
1319
+ sm_out = sm_wrap_mat4(out, sm_self);
1320
+ rb_obj_call_init(sm_out, 0, 0);
1321
+ }
1322
+ }
1323
+ }
1324
+
1325
+ return sm_out;
1326
+ }
1327
+
1328
+
1329
+
1330
+ static VALUE sm_mat4_new(int argc, VALUE *argv, VALUE self)
1331
+ {
1332
+ VALUE sm_mat = sm_wrap_mat4(g_mat4_identity, self);
1333
+ rb_obj_call_init(sm_mat, argc, argv);
1334
+ return sm_mat;
1335
+ }
1336
+
1337
+
1338
+
1339
+ static VALUE sm_mat4_init(int argc, VALUE *argv, VALUE sm_self)
1340
+ {
1341
+ mat4_t *self = sm_unwrap_mat4(sm_self, NULL);
1342
+ size_t arr_index = 0;
1343
+
1344
+ switch (argc) {
1345
+
1346
+ case 0: {
1347
+ // Identity (handled in _new)
1348
+ break;
1349
+ }
1350
+
1351
+ // Copy Mat4 or provided [Numeric..]
1352
+ case 1: {
1353
+ // Copy Mat4
1354
+ if (SM_IS_A(argv[0], mat4)) {
1355
+ sm_unwrap_mat4(argv[0], *self);
1356
+ break;
1357
+ }
1358
+
1359
+ // Build from Quaternion
1360
+ if (SM_IS_A(argv[0], quat)) {
1361
+ mat4_from_quat(*sm_unwrap_quat(argv[0], NULL), *self);
1362
+ break;
1363
+ }
1364
+
1365
+ // Optional offset into array provided
1366
+ if (0) {
1367
+ case 2:
1368
+ arr_index = NUM2SIZET(argv[1]);
1369
+ }
1370
+
1371
+ // Array of values
1372
+ if (SM_RB_IS_A(argv[0], rb_cArray)) {
1373
+ VALUE arrdata = argv[0];
1374
+ const size_t arr_end = arr_index + 16;
1375
+ s_float_t *mat_elem = *self;
1376
+ for (; arr_index < arr_end; ++arr_index, ++mat_elem) {
1377
+ *mat_elem = rb_num2dbl(rb_ary_entry(arrdata, (long)arr_index));
1378
+ }
1379
+ break;
1380
+ }
1381
+
1382
+ rb_raise(rb_eArgError, "Expected either an array of Numerics or a Mat4");
1383
+ break;
1384
+ }
1385
+
1386
+ // Mat4(Vec4, Vec4, Vec4, Vec4)
1387
+ case 4: {
1388
+ size_t arg_index;
1389
+ s_float_t *mat_elem = *self;
1390
+ for (arg_index = 0; arg_index < 4; ++arg_index, mat_elem += 4) {
1391
+ if (!SM_IS_A(argv[arg_index], vec4) && !SM_IS_A(argv[arg_index], quat)) {
1392
+ rb_raise(
1393
+ rb_eArgError,
1394
+ "Argument %d must be a Vec4 or Quat when supplying four arguments to Mat4.initialize",
1395
+ (int)(arg_index + 1));
1396
+ }
1397
+
1398
+ sm_unwrap_vec4(argv[arg_index], mat_elem);
1399
+ }
1400
+ break;
1401
+ }
1402
+
1403
+ // Mat4(Numeric m00 .. m16)
1404
+ case 16: {
1405
+ s_float_t *mat_elem = *self;
1406
+ VALUE *argv_p = argv;
1407
+ for (; argc; --argc, ++argv_p, ++mat_elem) {
1408
+ *mat_elem = (s_float_t)rb_num2dbl(*argv_p);
1409
+ }
1410
+ break;
1411
+ }
1412
+
1413
+ default: {
1414
+ rb_raise(rb_eArgError, "Invalid arguments to Mat4.initialize");
1415
+ break;
1416
+ }
1417
+ } // swtich (argc)
1418
+
1419
+ return sm_self;
1420
+ }
1421
+
1422
+
1423
+
1424
+ static VALUE sm_mat4_to_s(VALUE self)
1425
+ {
1426
+ const s_float_t *v;
1427
+ v = (const s_float_t *)*sm_unwrap_mat4(self, NULL);
1428
+ return rb_sprintf(
1429
+ "{ "
1430
+ "%f, %f, %f, %f" ", "
1431
+ "%f, %f, %f, %f" ", "
1432
+ "%f, %f, %f, %f" ", "
1433
+ "%f, %f, %f, %f"
1434
+ " }",
1435
+ v[0], v[1], v[2], v[3],
1436
+ v[4], v[5], v[6], v[7],
1437
+ v[8], v[9], v[10], v[11],
1438
+ v[12], v[13], v[14], v[15]);
1439
+ }
1440
+
1441
+
1442
+
1443
+ static VALUE sm_mat4_angle_axis(int argc, VALUE *argv, VALUE self)
1444
+ {
1445
+ VALUE sm_angle;
1446
+ VALUE sm_axis;
1447
+ VALUE sm_out;
1448
+ s_float_t angle;
1449
+ const vec3_t *axis;
1450
+
1451
+ rb_scan_args(argc, argv, "21", &sm_angle, &sm_axis, &sm_out);
1452
+ SM_RAISE_IF_NOT_TYPE(sm_axis, vec3);
1453
+
1454
+ angle = (s_float_t)rb_num2dbl(sm_angle);
1455
+ axis = sm_unwrap_vec3(sm_axis, NULL);
1456
+
1457
+ if (SM_IS_A(sm_out, mat4)) {
1458
+ mat4_t *out = sm_unwrap_mat4(sm_out, NULL);
1459
+ mat4_rotation(angle, (*axis)[0], (*axis)[1], (*axis)[2], *out);
1460
+ } else {
1461
+ mat4_t out;
1462
+ mat4_rotation(angle, (*axis)[0], (*axis)[1], (*axis)[2], out);
1463
+ sm_out = sm_wrap_mat4(out, self);
1464
+ rb_obj_call_init(sm_out, 0, 0);
1465
+ }
1466
+
1467
+ return sm_out;
1468
+ }
1469
+
1470
+
1471
+
1472
+ static VALUE sm_mat4_get_row3(int argc, VALUE *argv, VALUE sm_self)
1473
+ {
1474
+ mat4_t *self;
1475
+ int index;
1476
+ VALUE sm_out;
1477
+
1478
+ self = sm_unwrap_mat4(sm_self, NULL);
1479
+ index = NUM2INT(argv[0]);
1480
+ sm_out = Qnil;
1481
+
1482
+ if (index < 0 || index > 3) {
1483
+ rb_raise(rb_eRangeError, "Index %d is out of range, must be (0 .. 3)", index);
1484
+ return Qnil;
1485
+ }
1486
+
1487
+ switch (argc) {
1488
+ case 2: {
1489
+ vec3_t *out;
1490
+
1491
+ sm_out = argv[1];
1492
+
1493
+ if (RTEST(sm_out)) {
1494
+ SM_RAISE_IF_NOT_TYPE(sm_out, vec3);
1495
+ } else {
1496
+ goto SM_LABEL(no_output);
1497
+ }
1498
+
1499
+ out = sm_unwrap_vec3(sm_out, NULL);
1500
+ mat4_get_row3(*self, index, *out);
1501
+
1502
+ break;
1503
+ }
1504
+
1505
+ case 1: SM_LABEL(no_output): {
1506
+ vec3_t out;
1507
+ mat4_get_row3(*self, index, out);
1508
+ sm_out = sm_wrap_vec3(out, Qnil);
1509
+ rb_obj_call_init(sm_out, 0, 0);
1510
+ break;
1511
+ }
1512
+
1513
+ default: {
1514
+ rb_raise(rb_eArgError, "Invalid number of arguments to get_row3 - expected 1 or 2");
1515
+ break;
1516
+ }
1517
+ }
1518
+
1519
+ return sm_out;
1520
+ }
1521
+
1522
+
1523
+
1524
+ static VALUE sm_mat4_get_row4(int argc, VALUE *argv, VALUE sm_self)
1525
+ {
1526
+ mat4_t *self;
1527
+ int index;
1528
+ VALUE sm_out;
1529
+
1530
+ self = sm_unwrap_mat4(sm_self, NULL);
1531
+ index = NUM2INT(argv[0]);
1532
+ sm_out = Qnil;
1533
+
1534
+ if (index < 0 || index > 3) {
1535
+ rb_raise(rb_eRangeError, "Index %d is out of range, must be (0 .. 3)", index);
1536
+ return Qnil;
1537
+ }
1538
+
1539
+ switch (argc) {
1540
+ case 2: {
1541
+ vec4_t *out;
1542
+
1543
+ sm_out = argv[1];
1544
+
1545
+ if (RTEST(sm_out)) {
1546
+ SM_RAISE_IF_NOT_TYPE(sm_out, vec4);
1547
+ } else {
1548
+ goto SM_LABEL(no_output);
1549
+ }
1550
+
1551
+ out = sm_unwrap_vec4(sm_out, NULL);
1552
+ mat4_get_row4(*self, index, *out);
1553
+
1554
+ break;
1555
+ }
1556
+
1557
+ case 1: SM_LABEL(no_output): {
1558
+ vec4_t out;
1559
+ mat4_get_row4(*self, index, out);
1560
+ sm_out = sm_wrap_vec4(out, Qnil);
1561
+ rb_obj_call_init(sm_out, 0, 0);
1562
+ break;
1563
+ }
1564
+
1565
+ default: {
1566
+ rb_raise(rb_eArgError, "Invalid number of arguments to get_row4 - expected 1 or 2");
1567
+ break;
1568
+ }
1569
+ }
1570
+
1571
+ return sm_out;
1572
+ }
1573
+
1574
+
1575
+
1576
+ static VALUE sm_mat4_get_column3(int argc, VALUE *argv, VALUE sm_self)
1577
+ {
1578
+ mat4_t *self;
1579
+ int index;
1580
+ VALUE sm_out;
1581
+
1582
+ self = sm_unwrap_mat4(sm_self, NULL);
1583
+ index = NUM2INT(argv[0]);
1584
+ sm_out = Qnil;
1585
+
1586
+ if (index < 0 || index > 3) {
1587
+ rb_raise(rb_eRangeError, "Index %d is out of range, must be (0 .. 3)", index);
1588
+ return Qnil;
1589
+ }
1590
+
1591
+ switch (argc) {
1592
+ case 2: {
1593
+ vec3_t *out;
1594
+
1595
+ sm_out = argv[1];
1596
+
1597
+ if (RTEST(sm_out)) {
1598
+ SM_RAISE_IF_NOT_TYPE(sm_out, vec3);
1599
+ } else {
1600
+ goto SM_LABEL(no_output);
1601
+ }
1602
+
1603
+ out = sm_unwrap_vec3(sm_out, NULL);
1604
+ mat4_get_column3(*self, index, *out);
1605
+
1606
+ break;
1607
+ }
1608
+
1609
+ case 1: SM_LABEL(no_output): {
1610
+ vec3_t out;
1611
+ mat4_get_column3(*self, index, out);
1612
+ sm_out = sm_wrap_vec3(out, Qnil);
1613
+ rb_obj_call_init(sm_out, 0, 0);
1614
+ break;
1615
+ }
1616
+
1617
+ default: {
1618
+ rb_raise(rb_eArgError, "Invalid number of arguments to get_column3 - expected 1 or 2");
1619
+ break;
1620
+ }
1621
+ }
1622
+
1623
+ return sm_out;
1624
+ }
1625
+
1626
+
1627
+
1628
+ static VALUE sm_mat4_get_column4(int argc, VALUE *argv, VALUE sm_self)
1629
+ {
1630
+ mat4_t *self;
1631
+ int index;
1632
+ VALUE sm_out;
1633
+
1634
+ self = sm_unwrap_mat4(sm_self, NULL);
1635
+ index = NUM2INT(argv[0]);
1636
+ sm_out = Qnil;
1637
+
1638
+ if (index < 0 || index > 3) {
1639
+ rb_raise(rb_eRangeError, "Index %d is out of range, must be (0 .. 3)", index);
1640
+ return Qnil;
1641
+ }
1642
+
1643
+ switch (argc) {
1644
+ case 2: {
1645
+ vec4_t *out;
1646
+
1647
+ sm_out = argv[1];
1648
+
1649
+ if (RTEST(sm_out)) {
1650
+ SM_RAISE_IF_NOT_TYPE(sm_out, vec4);
1651
+ } else {
1652
+ goto SM_LABEL(no_output);
1653
+ }
1654
+
1655
+ out = sm_unwrap_vec4(sm_out, NULL);
1656
+ mat4_get_column4(*self, index, *out);
1657
+
1658
+ break;
1659
+ }
1660
+
1661
+ case 1: SM_LABEL(no_output): {
1662
+ vec4_t out;
1663
+ mat4_get_column4(*self, index, out);
1664
+ sm_out = sm_wrap_vec4(out, Qnil);
1665
+ rb_obj_call_init(sm_out, 0, 0);
1666
+ break;
1667
+ }
1668
+
1669
+ default: {
1670
+ rb_raise(rb_eArgError, "Invalid number of arguments to get_column4 - expected 1 or 2");
1671
+ break;
1672
+ }
1673
+ }
1674
+
1675
+ return sm_out;
1676
+ }
1677
+
1678
+
1679
+
1680
+ static VALUE sm_mat4_set_row3(VALUE sm_self, VALUE sm_index, VALUE sm_value)
1681
+ {
1682
+ const vec3_t *value;
1683
+ int index;
1684
+ mat4_t *self;
1685
+
1686
+ SM_RAISE_IF_NOT_TYPE(sm_value, vec3);
1687
+
1688
+ self = sm_unwrap_mat4(sm_self, NULL);
1689
+ value = sm_unwrap_vec3(sm_value, NULL);
1690
+ index = NUM2INT(sm_index);
1691
+
1692
+ if (index < 0 || index > 3) {
1693
+ rb_raise(rb_eRangeError, "Index %d is out of range, must be (0 .. 3)", index);
1694
+ return Qnil;
1695
+ }
1696
+
1697
+ mat4_set_row3(index, *value, *self);
1698
+
1699
+ return sm_self;
1700
+ }
1701
+
1702
+
1703
+
1704
+ static VALUE sm_mat4_set_column3(VALUE sm_self, VALUE sm_index, VALUE sm_value)
1705
+ {
1706
+ const vec3_t *value;
1707
+ int index;
1708
+ mat4_t *self;
1709
+
1710
+ SM_RAISE_IF_NOT_TYPE(sm_value, vec3);
1711
+
1712
+ self = sm_unwrap_mat4(sm_self, NULL);
1713
+ value = sm_unwrap_vec3(sm_value, NULL);
1714
+ index = NUM2INT(sm_index);
1715
+
1716
+ if (index < 0 || index > 3) {
1717
+ rb_raise(rb_eRangeError, "Index %d is out of range, must be (0 .. 3)", index);
1718
+ return Qnil;
1719
+ }
1720
+
1721
+ mat4_set_column3(index, *value, *self);
1722
+
1723
+ return sm_self;
1724
+ }
1725
+
1726
+
1727
+
1728
+ static VALUE sm_mat4_set_row4(VALUE sm_self, VALUE sm_index, VALUE sm_value)
1729
+ {
1730
+ const vec4_t *value;
1731
+ int index;
1732
+ mat4_t *self;
1733
+
1734
+ SM_RAISE_IF_NOT_TYPE(sm_value, vec4);
1735
+
1736
+ self = sm_unwrap_mat4(sm_self, NULL);
1737
+ value = sm_unwrap_vec4(sm_value, NULL);
1738
+ index = NUM2INT(sm_index);
1739
+
1740
+ if (index < 0 || index > 3) {
1741
+ rb_raise(rb_eRangeError, "Index %d is out of range, must be (0 .. 3)", index);
1742
+ return Qnil;
1743
+ }
1744
+
1745
+ mat4_set_row4(index, *value, *self);
1746
+
1747
+ return sm_self;
1748
+ }
1749
+
1750
+
1751
+
1752
+ static VALUE sm_mat4_set_column4(VALUE sm_self, VALUE sm_index, VALUE sm_value)
1753
+ {
1754
+ const vec4_t *value;
1755
+ int index;
1756
+ mat4_t *self;
1757
+
1758
+ SM_RAISE_IF_NOT_TYPE(sm_value, vec4);
1759
+
1760
+ self = sm_unwrap_mat4(sm_self, NULL);
1761
+ value = sm_unwrap_vec4(sm_value, NULL);
1762
+ index = NUM2INT(sm_index);
1763
+
1764
+ if (index < 0 || index > 3) {
1765
+ rb_raise(rb_eRangeError, "Index %d is out of range, must be (0 .. 3)", index);
1766
+ return Qnil;
1767
+ }
1768
+
1769
+ mat4_set_column4(index, *value, *self);
1770
+
1771
+ return sm_self;
1772
+ }
1773
+
1774
+
1775
+
1776
+ static VALUE sm_mat4_identity(VALUE sm_self)
1777
+ {
1778
+ mat4_t *self = sm_unwrap_mat4(sm_self, NULL);
1779
+ mat4_identity(*self);
1780
+ return sm_self;
1781
+ }
1782
+
1783
+
1784
+
1785
+ static VALUE sm_mat4_frustum(int argc, VALUE *argv, VALUE self)
1786
+ {
1787
+ VALUE sm_left;
1788
+ VALUE sm_right;
1789
+ VALUE sm_bottom;
1790
+ VALUE sm_top;
1791
+ VALUE sm_z_near;
1792
+ VALUE sm_z_far;
1793
+ VALUE sm_out;
1794
+ s_float_t left;
1795
+ s_float_t right;
1796
+ s_float_t bottom;
1797
+ s_float_t top;
1798
+ s_float_t z_near;
1799
+ s_float_t z_far;
1800
+
1801
+ rb_scan_args(argc, argv, "61", &sm_left, &sm_right, &sm_bottom, &sm_top, &sm_z_near, &sm_z_far, &sm_out);
1802
+
1803
+ left = (s_float_t)rb_num2dbl(sm_left);
1804
+ right = (s_float_t)rb_num2dbl(sm_right);
1805
+ bottom = (s_float_t)rb_num2dbl(sm_bottom);
1806
+ top = (s_float_t)rb_num2dbl(sm_top);
1807
+ z_near = (s_float_t)rb_num2dbl(sm_z_near);
1808
+ z_far = (s_float_t)rb_num2dbl(sm_z_far);
1809
+
1810
+ if (SM_IS_A(sm_out, mat4)) {
1811
+ mat4_t *out = sm_unwrap_mat4(sm_out, NULL);
1812
+ mat4_frustum(left, right, bottom, top, z_near, z_far, *out);
1813
+ } else {
1814
+ mat4_t out;
1815
+ mat4_frustum(left, right, bottom, top, z_near, z_far, out);
1816
+ sm_out = sm_wrap_mat4(out, Qnil);
1817
+ rb_obj_call_init(sm_out, 0, 0);
1818
+ }
1819
+
1820
+ return sm_out;
1821
+ }
1822
+
1823
+
1824
+
1825
+ static VALUE sm_mat4_orthographic(int argc, VALUE *argv, VALUE self)
1826
+ {
1827
+ VALUE sm_left;
1828
+ VALUE sm_right;
1829
+ VALUE sm_bottom;
1830
+ VALUE sm_top;
1831
+ VALUE sm_z_near;
1832
+ VALUE sm_z_far;
1833
+ VALUE sm_out;
1834
+ s_float_t left;
1835
+ s_float_t right;
1836
+ s_float_t bottom;
1837
+ s_float_t top;
1838
+ s_float_t z_near;
1839
+ s_float_t z_far;
1840
+
1841
+ rb_scan_args(argc, argv, "61", &sm_left, &sm_right, &sm_bottom, &sm_top, &sm_z_near, &sm_z_far, &sm_out);
1842
+
1843
+ left = (s_float_t)rb_num2dbl(sm_left);
1844
+ right = (s_float_t)rb_num2dbl(sm_right);
1845
+ bottom = (s_float_t)rb_num2dbl(sm_bottom);
1846
+ top = (s_float_t)rb_num2dbl(sm_top);
1847
+ z_near = (s_float_t)rb_num2dbl(sm_z_near);
1848
+ z_far = (s_float_t)rb_num2dbl(sm_z_far);
1849
+
1850
+ if (SM_IS_A(sm_out, mat4)) {
1851
+ mat4_t *out = sm_unwrap_mat4(sm_out, NULL);
1852
+ mat4_orthographic(left, right, bottom, top, z_near, z_far, *out);
1853
+ } else {
1854
+ mat4_t out;
1855
+ mat4_orthographic(left, right, bottom, top, z_near, z_far, out);
1856
+ sm_out = sm_wrap_mat4(out, self);
1857
+ rb_obj_call_init(sm_out, 0, 0);
1858
+ }
1859
+
1860
+ return sm_out;
1861
+ }
1862
+
1863
+
1864
+
1865
+ static VALUE sm_mat4_perspective(int argc, VALUE *argv, VALUE self)
1866
+ {
1867
+ VALUE sm_fov_y;
1868
+ VALUE sm_aspect;
1869
+ VALUE sm_z_near;
1870
+ VALUE sm_z_far;
1871
+ VALUE sm_out;
1872
+ s_float_t fov_y;
1873
+ s_float_t aspect;
1874
+ s_float_t z_near;
1875
+ s_float_t z_far;
1876
+
1877
+ rb_scan_args(argc, argv, "41", &sm_fov_y, &sm_aspect, &sm_z_near, &sm_z_far, &sm_out);
1878
+
1879
+ fov_y = (s_float_t)rb_num2dbl(sm_fov_y);
1880
+ aspect = (s_float_t)rb_num2dbl(sm_aspect);
1881
+ z_near = (s_float_t)rb_num2dbl(sm_z_near);
1882
+ z_far = (s_float_t)rb_num2dbl(sm_z_far);
1883
+
1884
+ if (SM_IS_A(sm_out, mat4)) {
1885
+ mat4_t *out = sm_unwrap_mat4(sm_out, NULL);
1886
+ mat4_perspective(fov_y, aspect, z_near, z_far, *out);
1887
+ } else {
1888
+ mat4_t out;
1889
+ mat4_perspective(fov_y, aspect, z_near, z_far, out);
1890
+ sm_out = sm_wrap_mat4(out, self);
1891
+ rb_obj_call_init(sm_out, 0, 0);
1892
+ }
1893
+
1894
+ return sm_out;
1895
+ }
1896
+
1897
+
1898
+
1899
+ static VALUE sm_mat4_look_at(int argc, VALUE *argv, VALUE self)
1900
+ {
1901
+ VALUE sm_eye;
1902
+ VALUE sm_center;
1903
+ VALUE sm_up;
1904
+ VALUE sm_out;
1905
+ const vec3_t *eye;
1906
+ const vec3_t *center;
1907
+ const vec3_t *up;
1908
+
1909
+ rb_scan_args(argc, argv, "31", &sm_eye, &sm_center, &sm_up, &sm_out);
1910
+
1911
+ eye = sm_unwrap_vec3(sm_eye, NULL);
1912
+ center = sm_unwrap_vec3(sm_center, NULL);
1913
+ up = sm_unwrap_vec3(sm_up, NULL);
1914
+
1915
+ if (SM_IS_A(sm_out, mat4)) {
1916
+ mat4_t *out = sm_unwrap_mat4(sm_out, NULL);
1917
+ mat4_look_at(*eye, *center, *up, *out);
1918
+ } else {
1919
+ mat4_t out;
1920
+ mat4_look_at(*eye, *center, *up, out);
1921
+ sm_out = sm_wrap_mat4(out, self);
1922
+ rb_obj_call_init(sm_out, 0, 0);
1923
+ }
1924
+
1925
+ return sm_out;
1926
+ }
1927
+
1928
+
1929
+
1930
+ static VALUE sm_mat4_scale(int argc, VALUE *argv, VALUE sm_self)
1931
+ {
1932
+ VALUE sm_out;
1933
+ VALUE sm_x, sm_y, sm_z;
1934
+ s_float_t x, y, z;
1935
+ mat4_t *self = sm_unwrap_mat4(sm_self, NULL);
1936
+
1937
+ rb_scan_args(argc, argv, "31", &sm_x, &sm_y, &sm_z, &sm_out);
1938
+ x = rb_num2dbl(sm_x);
1939
+ y = rb_num2dbl(sm_y);
1940
+ z = rb_num2dbl(sm_z);
1941
+
1942
+ if (SM_IS_A(sm_out, mat4)) {
1943
+ mat4_scale(*self, x, y, z, *sm_unwrap_mat4(sm_out, NULL));
1944
+ } else {
1945
+ mat4_t out;
1946
+ mat4_scale(*self, x, y, z, out);
1947
+ sm_out = sm_wrap_mat4(out, rb_obj_class(sm_self));
1948
+ rb_obj_call_init(sm_out, 0, 0);
1949
+ }
1950
+
1951
+ return sm_out;
1952
+ }
1953
+
1954
+
1955
+
1956
+ static VALUE sm_mat4_equals(VALUE sm_self, VALUE sm_other)
1957
+ {
1958
+ if (!RTEST(sm_other)) {
1959
+ return Qfalse;
1960
+ } else {
1961
+ SM_RAISE_IF_NOT_TYPE(sm_other, mat4);
1962
+ }
1963
+
1964
+ return mat4_equals(*sm_unwrap_mat4(sm_self, NULL), *sm_unwrap_mat4(sm_other, NULL)) ? Qtrue : Qfalse;
1965
+ }
1966
+
1967
+
1968
+
1969
+ /*==============================================================================
1970
+
1971
+ General-purpose functions
1972
+
1973
+ ==============================================================================*/
1974
+
1975
+ static VALUE sm_get_address(VALUE sm_self)
1976
+ {
1977
+ void *data_ptr = NULL;
1978
+ Data_Get_Struct(sm_self, void, data_ptr);
1979
+ return ULL2NUM((unsigned long long)data_ptr);
1980
+ }
1981
+
1982
+
1983
+
1984
+ void Init_bindings()
1985
+ {
1986
+ kRB_IVAR_MATHARRAY_LENGTH = rb_intern("__length");
1987
+ kRB_IVAR_MATHARRAY_SOURCE = rb_intern("__source");
1988
+
1989
+ s_sm_snowmath_mod = rb_define_module("Snow");
1990
+ SM_KLASS(vec3) = rb_define_class_under(s_sm_snowmath_mod, "Vec3", rb_cObject);
1991
+ SM_KLASS(vec4) = rb_define_class_under(s_sm_snowmath_mod, "Vec4", rb_cObject);
1992
+ SM_KLASS(quat) = rb_define_class_under(s_sm_snowmath_mod, "Quat", rb_cObject);
1993
+ SM_KLASS(mat4) = rb_define_class_under(s_sm_snowmath_mod, "Mat4", rb_cObject);
1994
+
1995
+ rb_define_singleton_method(SM_KLASS(vec3), "new", sm_vec3_new, -1);
1996
+ rb_define_method(SM_KLASS(vec3), "initialize", sm_vec3_init, -1);
1997
+ rb_define_method(SM_KLASS(vec3), "set", sm_vec3_init, -1);
1998
+ rb_define_method(SM_KLASS(vec3), "fetch", sm_vec3_fetch, 1);
1999
+ rb_define_method(SM_KLASS(vec3), "store", sm_vec3_store, 2);
2000
+ rb_define_method(SM_KLASS(vec3), "size", sm_vec3_size, 0);
2001
+ rb_define_method(SM_KLASS(vec3), "length", sm_vec3_length, 0);
2002
+ rb_define_method(SM_KLASS(vec3), "to_s", sm_vec3_to_s, 0);
2003
+ rb_define_method(SM_KLASS(vec3), "address", sm_get_address, 0);
2004
+ rb_define_method(SM_KLASS(vec3), "copy", sm_vec3_copy, -1);
2005
+ rb_define_method(SM_KLASS(vec3), "normalize", sm_vec3_normalize, -1);
2006
+ rb_define_method(SM_KLASS(vec3), "inverse", sm_vec3_inverse, -1);
2007
+ rb_define_method(SM_KLASS(vec3), "negate", sm_vec3_negate, -1);
2008
+ rb_define_method(SM_KLASS(vec3), "cross_product", sm_vec3_cross_product, -1);
2009
+ rb_define_method(SM_KLASS(vec3), "multiply_vec3", sm_vec3_multiply, -1);
2010
+ rb_define_method(SM_KLASS(vec3), "add", sm_vec3_add, -1);
2011
+ rb_define_method(SM_KLASS(vec3), "subtract", sm_vec3_subtract, -1);
2012
+ rb_define_method(SM_KLASS(vec3), "dot_product", sm_vec3_dot_product, 1);
2013
+ rb_define_method(SM_KLASS(vec3), "magnitude_squared", sm_vec3_magnitude_squared, 0);
2014
+ rb_define_method(SM_KLASS(vec3), "magnitude", sm_vec3_magnitude, 0);
2015
+ rb_define_method(SM_KLASS(vec3), "scale", sm_vec3_scale, -1);
2016
+ rb_define_method(SM_KLASS(vec3), "divide", sm_vec3_divide, -1);
2017
+ rb_define_method(SM_KLASS(vec3), "==", sm_vec3_equals, 1);
2018
+
2019
+ rb_define_singleton_method(SM_KLASS(vec4), "new", sm_vec4_new, -1);
2020
+ rb_define_method(SM_KLASS(vec4), "initialize", sm_vec4_init, -1);
2021
+ rb_define_method(SM_KLASS(vec4), "set", sm_vec4_init, -1);
2022
+ rb_define_method(SM_KLASS(vec4), "fetch", sm_vec4_fetch, 1);
2023
+ rb_define_method(SM_KLASS(vec4), "store", sm_vec4_store, 2);
2024
+ rb_define_method(SM_KLASS(vec4), "size", sm_vec4_size, 0);
2025
+ rb_define_method(SM_KLASS(vec4), "length", sm_vec4_length, 0);
2026
+ rb_define_method(SM_KLASS(vec4), "to_s", sm_vec4_to_s, 0);
2027
+ rb_define_method(SM_KLASS(vec4), "address", sm_get_address, 0);
2028
+ rb_define_method(SM_KLASS(vec4), "copy", sm_vec4_copy, -1);
2029
+ rb_define_method(SM_KLASS(vec4), "normalize", sm_vec4_normalize, -1);
2030
+ rb_define_method(SM_KLASS(vec4), "inverse", sm_vec4_inverse, -1);
2031
+ rb_define_method(SM_KLASS(vec4), "negate", sm_vec4_negate, -1);
2032
+ rb_define_method(SM_KLASS(vec4), "multiply_vec4", sm_vec4_multiply, -1);
2033
+ rb_define_method(SM_KLASS(vec4), "add", sm_vec4_add, -1);
2034
+ rb_define_method(SM_KLASS(vec4), "subtract", sm_vec4_subtract, -1);
2035
+ rb_define_method(SM_KLASS(vec4), "dot_product", sm_vec4_dot_product, 1);
2036
+ rb_define_method(SM_KLASS(vec4), "magnitude_squared", sm_vec4_magnitude_squared, 0);
2037
+ rb_define_method(SM_KLASS(vec4), "magnitude", sm_vec4_magnitude, 0);
2038
+ rb_define_method(SM_KLASS(vec4), "scale", sm_vec4_scale, -1);
2039
+ rb_define_method(SM_KLASS(vec4), "divide", sm_vec4_divide, -1);
2040
+ rb_define_method(SM_KLASS(vec4), "==", sm_vec4_equals, 1);
2041
+
2042
+ rb_define_singleton_method(SM_KLASS(quat), "new", sm_quat_new, -1);
2043
+ rb_define_singleton_method(SM_KLASS(quat), "angle_axis", sm_quat_angle_axis, -1);
2044
+ rb_define_method(SM_KLASS(quat), "initialize", sm_quat_init, -1);
2045
+ rb_define_method(SM_KLASS(quat), "set", sm_quat_init, -1);
2046
+ rb_define_method(SM_KLASS(quat), "load_identity", sm_quat_identity, 0);
2047
+ rb_define_method(SM_KLASS(quat), "fetch", sm_quat_fetch, 1);
2048
+ rb_define_method(SM_KLASS(quat), "store", sm_quat_store, 2);
2049
+ rb_define_method(SM_KLASS(quat), "size", sm_quat_size, 0);
2050
+ rb_define_method(SM_KLASS(quat), "length", sm_quat_length, 0);
2051
+ rb_define_method(SM_KLASS(quat), "to_s", sm_quat_to_s, 0);
2052
+ rb_define_method(SM_KLASS(quat), "address", sm_get_address, 0);
2053
+ rb_define_method(SM_KLASS(quat), "copy", sm_quat_copy, -1);
2054
+ rb_define_method(SM_KLASS(quat), "inverse", sm_quat_inverse, -1);
2055
+ rb_define_method(SM_KLASS(quat), "negate", sm_quat_negate, -1);
2056
+ rb_define_method(SM_KLASS(quat), "multiply_quat", sm_quat_multiply, -1);
2057
+ rb_define_method(SM_KLASS(quat), "multiply_vec3", sm_quat_multiply_vec3, -1);
2058
+ rb_define_method(SM_KLASS(quat), "normalize", sm_quat_normalize, -1);
2059
+ rb_define_method(SM_KLASS(quat), "scale", sm_quat_scale, -1);
2060
+ rb_define_method(SM_KLASS(quat), "divide", sm_quat_divide, -1);
2061
+ rb_define_method(SM_KLASS(quat), "add", sm_quat_add, -1);
2062
+ rb_define_method(SM_KLASS(quat), "subtract", sm_quat_subtract, -1);
2063
+ rb_define_method(SM_KLASS(quat), "slerp", sm_quat_slerp, -1);
2064
+ // Borrow some functions from vec4
2065
+ rb_define_method(SM_KLASS(quat), "dot_product", sm_vec4_dot_product, 1);
2066
+ rb_define_method(SM_KLASS(quat), "magnitude_squared", sm_vec4_magnitude_squared, 0);
2067
+ rb_define_method(SM_KLASS(quat), "magnitude", sm_vec4_magnitude, 0);
2068
+ rb_define_method(SM_KLASS(quat), "==", sm_vec4_equals, 1);
2069
+
2070
+ rb_define_singleton_method(SM_KLASS(mat4), "new", sm_mat4_new, -1);
2071
+ rb_define_singleton_method(SM_KLASS(mat4), "translation", sm_mat4_translation, -1);
2072
+ rb_define_singleton_method(SM_KLASS(mat4), "angle_axis", sm_mat4_angle_axis, -1);
2073
+ rb_define_singleton_method(SM_KLASS(mat4), "frustum", sm_mat4_frustum, -1);
2074
+ rb_define_singleton_method(SM_KLASS(mat4), "perspective", sm_mat4_perspective, -1);
2075
+ rb_define_singleton_method(SM_KLASS(mat4), "orthographic", sm_mat4_orthographic, -1);
2076
+ rb_define_singleton_method(SM_KLASS(mat4), "look_at", sm_mat4_look_at, -1);
2077
+ rb_define_method(SM_KLASS(mat4), "initialize", sm_mat4_init, -1);
2078
+ rb_define_method(SM_KLASS(mat4), "set", sm_mat4_init, -1);
2079
+ rb_define_method(SM_KLASS(mat4), "load_identity", sm_mat4_identity, 0);
2080
+ rb_define_method(SM_KLASS(mat4), "fetch", sm_mat4_fetch, 1);
2081
+ rb_define_method(SM_KLASS(mat4), "store", sm_mat4_store, 2);
2082
+ rb_define_method(SM_KLASS(mat4), "size", sm_mat4_size, 0);
2083
+ rb_define_method(SM_KLASS(mat4), "length", sm_mat4_length, 0);
2084
+ rb_define_method(SM_KLASS(mat4), "to_s", sm_mat4_to_s, 0);
2085
+ rb_define_method(SM_KLASS(mat4), "address", sm_get_address, 0);
2086
+ rb_define_method(SM_KLASS(mat4), "copy", sm_mat4_copy, -1);
2087
+ rb_define_method(SM_KLASS(mat4), "transpose", sm_mat4_transpose, -1);
2088
+ rb_define_method(SM_KLASS(mat4), "inverse_orthogonal", sm_mat4_inverse_orthogonal, -1);
2089
+ rb_define_method(SM_KLASS(mat4), "adjoint", sm_mat4_adjoint, -1);
2090
+ rb_define_method(SM_KLASS(mat4), "scale", sm_mat4_scale, -1);
2091
+ rb_define_method(SM_KLASS(mat4), "multiply_mat4", sm_mat4_multiply, -1);
2092
+ rb_define_method(SM_KLASS(mat4), "multiply_vec4", sm_mat4_multiply_vec4, -1);
2093
+ rb_define_method(SM_KLASS(mat4), "transform_vec3", sm_mat4_transform_vec3, -1);
2094
+ rb_define_method(SM_KLASS(mat4), "rotate_vec3", sm_mat4_rotate_vec3, -1);
2095
+ rb_define_method(SM_KLASS(mat4), "inverse_rotate_vec3", sm_mat4_inv_rotate_vec3, -1);
2096
+ rb_define_method(SM_KLASS(mat4), "inverse_affine", sm_mat4_inverse_affine, -1);
2097
+ rb_define_method(SM_KLASS(mat4), "inverse_general", sm_mat4_inverse_general, -1);
2098
+ rb_define_method(SM_KLASS(mat4), "determinant", sm_mat4_determinant, 0);
2099
+ rb_define_method(SM_KLASS(mat4), "translate", sm_mat4_translate, -1);
2100
+ rb_define_method(SM_KLASS(mat4), "set_row3", sm_mat4_set_row3, 2);
2101
+ rb_define_method(SM_KLASS(mat4), "set_row4", sm_mat4_set_row4, 2);
2102
+ rb_define_method(SM_KLASS(mat4), "get_row3", sm_mat4_get_row3, -1);
2103
+ rb_define_method(SM_KLASS(mat4), "get_row4", sm_mat4_get_row4, -1);
2104
+ rb_define_method(SM_KLASS(mat4), "set_column3", sm_mat4_set_column3, 2);
2105
+ rb_define_method(SM_KLASS(mat4), "set_column4", sm_mat4_set_column4, 2);
2106
+ rb_define_method(SM_KLASS(mat4), "get_column3", sm_mat4_get_column3, -1);
2107
+ rb_define_method(SM_KLASS(mat4), "get_column4", sm_mat4_get_column4, -1);
2108
+ rb_define_method(SM_KLASS(mat4), "==", sm_mat4_equals, 1);
2109
+
2110
+ #ifdef BUILD_ARRAY_TYPE
2111
+ REG_SM_ARR_TYPE(vec3, "Vec3Array");
2112
+ REG_SM_ARR_TYPE(vec4, "Vec4Array");
2113
+ REG_SM_ARR_TYPE(quat, "QuatArray");
2114
+ REG_SM_ARR_TYPE(mat4, "Mat4Array");
2115
+ #endif
2116
+
2117
+ }