snow-math 1.3.1 → 1.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +49 -0
- data/ext/snow-math/maths_local.h +39 -0
- data/ext/snow-math/snow-math.c +1175 -187
- data/ext/snow-math/vec2.c +134 -0
- data/lib/snow-math.rb +2 -1
- data/lib/snow-math/inspect.rb +7 -1
- data/lib/snow-math/marshal.rb +6 -0
- data/lib/snow-math/mat3.rb +4 -0
- data/lib/snow-math/mat4.rb +5 -1
- data/lib/snow-math/ptr.rb +11 -1
- data/lib/snow-math/quat.rb +24 -0
- data/lib/snow-math/swizzle.rb +14 -7
- data/lib/snow-math/to_a.rb +29 -1
- data/lib/snow-math/vec2.rb +168 -0
- data/lib/snow-math/vec3.rb +17 -1
- data/lib/snow-math/vec4.rb +17 -1
- metadata +4 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 9a85f485d607f507213ddfc79b09c0f7429364bb
|
4
|
+
data.tar.gz: 3e517d63c5721f56bd5455de3ed62c0d6854581b
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 3f9ec0589e102f5c9dac662f77fc09cb00fa7878d4a4b9081363db47ab23748c2bef7ee64fab4b3b0425112bb3c8dcb0948ba4d6804c2f88b0cea63de545e699
|
7
|
+
data.tar.gz: 3f72346489f5b5ff2314fcf0c561a27279e57d881db65ee69f4ee8ab9cab74e9e1800c59dc4a1cd527b3e5c59108fcc81d6792665a23d24ec8d5d69cf42c6ad4
|
data/README.md
CHANGED
@@ -10,6 +10,7 @@ snow-math is a small, fairly simple library of 3D math routines implemented in
|
|
10
10
|
C with Ruby bindings. It's intended for use with OpenGL and such. Currently, it
|
11
11
|
provides four 3D math types:
|
12
12
|
|
13
|
+
- Snow::Vec2
|
13
14
|
- Snow::Vec3
|
14
15
|
- Snow::Vec4
|
15
16
|
- Snow::Quat
|
@@ -29,6 +30,28 @@ Like so:
|
|
29
30
|
|
30
31
|
If you prefer shorter command-line options, `-F` is a synonym for `--use-float`.
|
31
32
|
|
33
|
+
All options:
|
34
|
+
|
35
|
+
- `--use-float` or `-F` -- Compiles the extension using 32-bit floats instead
|
36
|
+
of 64-bit doubles. This is only really useful if you're concerned about
|
37
|
+
reducing memory usage by math types or don't want the additional precision.
|
38
|
+
You can specify you want doubles by passing `--use-double` or `-NF`, which
|
39
|
+
is essentially a no-op unless it follows a previous argument specifying
|
40
|
+
enabling the use of floats. It is not possible to compile the gem for both
|
41
|
+
types.
|
42
|
+
|
43
|
+
- `--debug` or `-D` -- Compiles the extension with debugging symbols. If you
|
44
|
+
want to explicitly indicate that you want a release built (meaning
|
45
|
+
optimization and such), you can pass `--release` or `-ND`
|
46
|
+
|
47
|
+
- `--use-fast-math` or `-FM` -- When not compiled for debugging, the extension
|
48
|
+
is built with `-ffast-math` enabled. This may mean that the math code is
|
49
|
+
not entirely IEEE-compliant but may produce slightly faster code. That
|
50
|
+
said, this is generally not a huge benefit and IEEE-compliance is often
|
51
|
+
better than negligible performance improvements in areas that aren't a
|
52
|
+
bottleneck. If you would like to explicitly disable it, you may also pass
|
53
|
+
`--no-fast-math` or `-NFM`.
|
54
|
+
|
32
55
|
|
33
56
|
## Usage
|
34
57
|
|
@@ -126,20 +149,33 @@ bodies below except where their behaviour is notably different.
|
|
126
149
|
object to the block and returns an object of the same type with the results.
|
127
150
|
In the second form, returns an Enumerator.
|
128
151
|
|
152
|
+
Provided by the Snow::ArraySupport module.
|
153
|
+
|
129
154
|
- `map!(&block)` and `map!` - Same as the above, but operates on self rather
|
130
155
|
than creating a new object.
|
131
156
|
|
157
|
+
Provided by the Snow::ArraySupport module.
|
158
|
+
|
132
159
|
- `each(&object)` and `each` - Does what you think it does. Second form returns
|
133
160
|
an Enumerator.
|
134
161
|
|
162
|
+
Provided by the Snow::ArraySupport module.
|
163
|
+
|
135
164
|
- `to_a` - Returns an array of all components in the given object.
|
136
165
|
|
166
|
+
Provided by the Snow::ArraySupport module.
|
167
|
+
|
137
168
|
- `to_ptr` - You have to `require 'snow-math/ptr'` for this. Returns a new
|
138
169
|
`Fiddle::Pointer` pointing to the object's address.
|
139
170
|
|
171
|
+
Provided by the Snow::FiddlePointerSupport module.
|
172
|
+
|
140
173
|
- `to_s` - Converts the object to a string that looks more or less like
|
141
174
|
`"{ fetch(0), fetch(1), ..., fetch(length - 1) }"`.
|
142
175
|
|
176
|
+
- `inspect` - does what you think it does. This is provided under the
|
177
|
+
Snow::InspectSupport module.
|
178
|
+
|
143
179
|
|
144
180
|
#### Swizzling
|
145
181
|
|
@@ -149,21 +185,34 @@ that returns a new Vec3 with components Z, Y, and X of the original vector, in
|
|
149
185
|
that order. The components you can use for swizzling on each type are fairly
|
150
186
|
obvious but are as follows:
|
151
187
|
|
188
|
+
- __Vec2__
|
189
|
+
Components: X and Y.
|
190
|
+
Swizzling two components returns a Vec2.
|
191
|
+
Swizzling three components returns a Vec3.
|
192
|
+
Swizzling four components returns a Vec4.
|
193
|
+
|
152
194
|
- __Vec3__
|
153
195
|
Components: X, Y, and Z.
|
196
|
+
Swizzling two components returns a Vec2.
|
154
197
|
Swizzling three components returns a Vec3.
|
155
198
|
Swizzling four components returns a Vec4.
|
156
199
|
|
157
200
|
- __Vec4__
|
158
201
|
Components: X, Y, Z, and W.
|
202
|
+
Swizzling two components returns a Vec2.
|
159
203
|
Swizzling three components returns a Vec3.
|
160
204
|
Swizzling four components returns a Vec4.
|
161
205
|
|
162
206
|
- __Quat__
|
163
207
|
Components: X, Y, Z, and W.
|
208
|
+
Swizzling two components returns a Vec2.
|
164
209
|
Swizzling three components returns a Vec3.
|
165
210
|
Swizzling four components returns a Quat.
|
166
211
|
|
212
|
+
Swizzling is provided by the Snow::SwizzleSupport module. If you find yourself
|
213
|
+
using swizzling heavily and the generated methods aren't peformant enough, it
|
214
|
+
might be worth your time to explicitly define the ones you use the most to
|
215
|
+
ensure they're not wasting too many cycles for you.
|
167
216
|
|
168
217
|
|
169
218
|
## License
|
data/ext/snow-math/maths_local.h
CHANGED
@@ -89,6 +89,45 @@ S_INLINE int float_equals(const s_float_t x, const s_float_t y)
|
|
89
89
|
}
|
90
90
|
|
91
91
|
|
92
|
+
/*==============================================================================
|
93
|
+
|
94
|
+
2-Component Vector (vec2_t)
|
95
|
+
|
96
|
+
==============================================================================*/
|
97
|
+
|
98
|
+
extern const vec2_t g_vec2_zero;
|
99
|
+
extern const vec2_t g_vec2_one;
|
100
|
+
|
101
|
+
void vec2_copy(const vec2_t in, vec2_t out);
|
102
|
+
void vec2_set(s_float_t x, s_float_t y, vec2_t v);
|
103
|
+
|
104
|
+
/*!
|
105
|
+
* Gets the squared length of a vector. Useful for approximations and when
|
106
|
+
* you don't need the actual magnitude.
|
107
|
+
*/
|
108
|
+
s_float_t vec2_length_squared(const vec2_t v);
|
109
|
+
/*!
|
110
|
+
* Gets the length/magnitude of a vector.
|
111
|
+
*/
|
112
|
+
s_float_t vec2_length(const vec2_t v);
|
113
|
+
void vec2_normalize(const vec2_t in, vec2_t out);
|
114
|
+
|
115
|
+
void vec2_subtract(const vec2_t left, const vec2_t right, vec2_t out);
|
116
|
+
void vec2_add(const vec2_t left, const vec2_t right, vec2_t out);
|
117
|
+
void vec2_multiply(const vec2_t left, const vec2_t right, vec2_t out);
|
118
|
+
void vec2_negate(const vec2_t v, vec2_t out);
|
119
|
+
void vec2_inverse(const vec2_t v, vec2_t out);
|
120
|
+
|
121
|
+
void vec2_project(const vec2_t in, const vec2_t normal, vec2_t out);
|
122
|
+
void vec2_reflect(const vec2_t in, const vec2_t normal, vec2_t out);
|
123
|
+
s_float_t vec2_dot_product(const vec2_t left, const vec2_t right);
|
124
|
+
|
125
|
+
void vec2_scale(const vec2_t v, s_float_t scalar, vec2_t out);
|
126
|
+
int vec2_divide(const vec2_t v, s_float_t divisor, vec2_t out);
|
127
|
+
|
128
|
+
int vec2_equals(const vec2_t left, const vec2_t right);
|
129
|
+
|
130
|
+
|
92
131
|
|
93
132
|
/*==============================================================================
|
94
133
|
|
data/ext/snow-math/snow-math.c
CHANGED
@@ -8,6 +8,7 @@ See COPYING for license information
|
|
8
8
|
#include "maths_local.h"
|
9
9
|
#include "ruby.h"
|
10
10
|
|
11
|
+
#define kSM_WANT_TWO_TO_FOUR_FORMAT_LIT ("Expected a Vec2, Vec3, Vec4, or Quat, got %s")
|
11
12
|
#define kSM_WANT_THREE_OR_FOUR_FORMAT_LIT ("Expected a Vec3, Vec4, or Quat, got %s")
|
12
13
|
#define kSM_WANT_FOUR_FORMAT_LIT ("Expected a Vec4 or Quat, got %s")
|
13
14
|
|
@@ -95,6 +96,7 @@ static VALUE sm_mathtype_array_length(VALUE sm_self)
|
|
95
96
|
==============================================================================*/
|
96
97
|
|
97
98
|
static VALUE s_sm_snowmath_mod = Qnil;
|
99
|
+
static VALUE s_sm_vec2_klass = Qnil;
|
98
100
|
static VALUE s_sm_vec3_klass = Qnil;
|
99
101
|
static VALUE s_sm_vec4_klass = Qnil;
|
100
102
|
static VALUE s_sm_quat_klass = Qnil;
|
@@ -114,6 +116,8 @@ static VALUE s_sm_mat4_klass = Qnil;
|
|
114
116
|
pass one to another's function. The conversion is easy enough, so it's not a
|
115
117
|
huge deal.
|
116
118
|
*/
|
119
|
+
static VALUE sm_wrap_vec2(const vec2_t value, VALUE klass);
|
120
|
+
static vec2_t * sm_unwrap_vec2(VALUE sm_value, vec2_t store);
|
117
121
|
static VALUE sm_wrap_vec3(const vec3_t value, VALUE klass);
|
118
122
|
static vec3_t * sm_unwrap_vec3(VALUE sm_value, vec3_t store);
|
119
123
|
static VALUE sm_wrap_vec4(const vec4_t value, VALUE klass);
|
@@ -135,6 +139,197 @@ static mat4_t * sm_unwrap_mat4(VALUE sm_value, mat4_t store);
|
|
135
139
|
|
136
140
|
#if BUILD_ARRAY_TYPE
|
137
141
|
|
142
|
+
/*==============================================================================
|
143
|
+
|
144
|
+
Snow::Vec2Array methods (s_sm_vec2_array_klass)
|
145
|
+
|
146
|
+
==============================================================================*/
|
147
|
+
|
148
|
+
static VALUE s_sm_vec2_array_klass = Qnil;
|
149
|
+
|
150
|
+
/*
|
151
|
+
* In the first form, a new typed array of Vec2 elements is allocated and
|
152
|
+
* returned. In the second form, a copy of a typed array of Vec2 objects is
|
153
|
+
* made and returned. Copied arrays do not share data.
|
154
|
+
*
|
155
|
+
* call-seq:
|
156
|
+
* new(size) -> new vec2_array
|
157
|
+
* new(vec2_array) -> copy of vec2_array
|
158
|
+
*/
|
159
|
+
static VALUE sm_vec2_array_new(VALUE sm_self, VALUE sm_length_or_copy)
|
160
|
+
{
|
161
|
+
size_t length = 0;
|
162
|
+
vec2_t *arr;
|
163
|
+
VALUE sm_type_array;
|
164
|
+
int copy_array = 0;
|
165
|
+
if ((copy_array = SM_IS_A(sm_length_or_copy, vec2_array))) {
|
166
|
+
length = NUM2SIZET(sm_mathtype_array_length(sm_length_or_copy));
|
167
|
+
} else {
|
168
|
+
length = NUM2SIZET(sm_length_or_copy);
|
169
|
+
}
|
170
|
+
if (length <= 0) {
|
171
|
+
return Qnil;
|
172
|
+
}
|
173
|
+
arr = ALLOC_N(vec2_t, length);
|
174
|
+
if (copy_array) {
|
175
|
+
const vec2_t *source;
|
176
|
+
Data_Get_Struct(sm_length_or_copy, vec2_t, source);
|
177
|
+
MEMCPY(arr, source, vec2_t, length);
|
178
|
+
sm_length_or_copy = sm_mathtype_array_length(sm_length_or_copy);
|
179
|
+
sm_self = rb_obj_class(sm_length_or_copy);
|
180
|
+
}
|
181
|
+
sm_type_array = Data_Wrap_Struct(sm_self, 0, free, arr);
|
182
|
+
rb_ivar_set(sm_type_array, kRB_IVAR_MATHARRAY_LENGTH, sm_length_or_copy);
|
183
|
+
rb_ivar_set(sm_type_array, kRB_IVAR_MATHARRAY_CACHE, rb_ary_new2((long)length));
|
184
|
+
rb_obj_call_init(sm_type_array, 0, 0);
|
185
|
+
return sm_type_array;
|
186
|
+
}
|
187
|
+
|
188
|
+
|
189
|
+
|
190
|
+
/*
|
191
|
+
* Resizes the array to new_length and returns self.
|
192
|
+
*
|
193
|
+
* If resizing to a length smaller than the previous length, excess array
|
194
|
+
* elements are discarded and the array is truncated. Otherwise, when resizing
|
195
|
+
* the array to a greater length than previous, new elements in the array will
|
196
|
+
* contain garbage values.
|
197
|
+
*
|
198
|
+
* If new_length is equal to self.length, the call does nothing to the array.
|
199
|
+
*
|
200
|
+
* Attempting to resize an array to a new length of zero or less will raise a
|
201
|
+
* RangeError. Do not try to resize arrays to zero or less. Do not be that
|
202
|
+
* person.
|
203
|
+
*
|
204
|
+
* call-seq:
|
205
|
+
* resize!(new_length) -> self
|
206
|
+
*/
|
207
|
+
static VALUE sm_vec2_array_resize(VALUE sm_self, VALUE sm_new_length)
|
208
|
+
{
|
209
|
+
size_t new_length;
|
210
|
+
size_t old_length;
|
211
|
+
|
212
|
+
old_length = NUM2SIZET(sm_mathtype_array_length(sm_self));
|
213
|
+
new_length = NUM2SIZET(sm_new_length);
|
214
|
+
|
215
|
+
if (old_length == new_length) {
|
216
|
+
/* No change, done */
|
217
|
+
return sm_self;
|
218
|
+
} else if (new_length < 1) {
|
219
|
+
/* Someone decided to be that person. */
|
220
|
+
rb_raise(rb_eRangeError,
|
221
|
+
"Cannot resize array to length less than or equal to 0.");
|
222
|
+
return sm_self;
|
223
|
+
}
|
224
|
+
|
225
|
+
REALLOC_N(RDATA(sm_self)->data, vec2_t, new_length);
|
226
|
+
rb_ivar_set(sm_self, kRB_IVAR_MATHARRAY_LENGTH, sm_new_length);
|
227
|
+
rb_ary_clear(rb_ivar_get(sm_self, kRB_IVAR_MATHARRAY_CACHE));
|
228
|
+
|
229
|
+
return sm_self;
|
230
|
+
}
|
231
|
+
|
232
|
+
|
233
|
+
|
234
|
+
/*
|
235
|
+
* Fetches a Vec2 from the array at the index and returns it. The returned Vec2
|
236
|
+
* may be a cached object. In all cases, values returned from a typed array are
|
237
|
+
* associated with the memory of the array and not given their own memory. So,
|
238
|
+
* modifying a Vec2 fetched from an array modifies the array's data.
|
239
|
+
*
|
240
|
+
* As a result, objects returned by a Vec2Array should not be considered
|
241
|
+
* thread-safe, nor should manipulating a Vec2Array be considered thread-safe
|
242
|
+
* either. If you want to work with data returned from an array without altering
|
243
|
+
* the array data, you should call Vec2#dup or Vec2#copy to get a new Vec2 with a
|
244
|
+
* copy of the array object's data.
|
245
|
+
*
|
246
|
+
* call-seq: fetch(index) -> vec2
|
247
|
+
*/
|
248
|
+
static VALUE sm_vec2_array_fetch(VALUE sm_self, VALUE sm_index)
|
249
|
+
{
|
250
|
+
vec2_t *arr;
|
251
|
+
size_t length = NUM2SIZET(sm_mathtype_array_length(sm_self));
|
252
|
+
size_t index = NUM2SIZET(sm_index);
|
253
|
+
VALUE sm_inner;
|
254
|
+
VALUE sm_cache;
|
255
|
+
if (index >= length) {
|
256
|
+
rb_raise(rb_eRangeError,
|
257
|
+
"Index %zu out of bounds for array with length %zu",
|
258
|
+
index, length);
|
259
|
+
}
|
260
|
+
|
261
|
+
sm_cache = rb_ivar_get(sm_self, kRB_IVAR_MATHARRAY_CACHE);
|
262
|
+
if (!RTEST(sm_cache)) {
|
263
|
+
rb_raise(rb_eRuntimeError, "No cache available");
|
264
|
+
}
|
265
|
+
sm_inner = rb_ary_entry(sm_cache, (long)index);
|
266
|
+
|
267
|
+
if (!RTEST(sm_inner)) {
|
268
|
+
/* No cached value, create one. */
|
269
|
+
Data_Get_Struct(sm_self, vec2_t, arr);
|
270
|
+
sm_inner = Data_Wrap_Struct(s_sm_vec2_klass, 0, 0, arr[index]);
|
271
|
+
rb_ivar_set(sm_inner, kRB_IVAR_MATHARRAY_SOURCE, sm_self);
|
272
|
+
/* Store the Vec2 in the cache */
|
273
|
+
rb_ary_store(sm_cache, (long)index, sm_inner);
|
274
|
+
}
|
275
|
+
|
276
|
+
return sm_inner;
|
277
|
+
}
|
278
|
+
|
279
|
+
|
280
|
+
|
281
|
+
/*
|
282
|
+
* Stores a Vec2 at the given index. If the provided Vec2 is a member of the
|
283
|
+
* array and stored at the index, then no copy is done, otherwise the Vec2 is
|
284
|
+
* copied to the array.
|
285
|
+
*
|
286
|
+
* call-seq: store(index, value) -> value
|
287
|
+
*/
|
288
|
+
static VALUE sm_vec2_array_store(VALUE sm_self, VALUE sm_index, VALUE sm_value)
|
289
|
+
{
|
290
|
+
vec2_t *arr;
|
291
|
+
vec2_t *value;
|
292
|
+
size_t length = NUM2SIZET(sm_mathtype_array_length(sm_self));
|
293
|
+
size_t index = NUM2SIZET(sm_index);
|
294
|
+
|
295
|
+
if (index >= length) {
|
296
|
+
rb_raise(rb_eRangeError,
|
297
|
+
"Index %zu out of bounds for array with length %zu",
|
298
|
+
index, length);
|
299
|
+
} else if (!SM_IS_A(sm_value, vec2) && !SM_IS_A(sm_value, vec3) && !SM_IS_A(sm_value, vec4) && !SM_IS_A(sm_value, quat)) {
|
300
|
+
rb_raise(rb_eTypeError,
|
301
|
+
"Invalid value to store: expected Vec2, Vec3, Vec4, or Quat, got %s",
|
302
|
+
rb_obj_classname(sm_value));
|
303
|
+
}
|
304
|
+
|
305
|
+
Data_Get_Struct(sm_self, vec2_t, arr);
|
306
|
+
value = sm_unwrap_vec2(sm_value, NULL);
|
307
|
+
|
308
|
+
if (value == &arr[index]) {
|
309
|
+
/* The object's part of the array, don't bother copying */
|
310
|
+
return sm_value;
|
311
|
+
}
|
312
|
+
|
313
|
+
vec2_copy(*value, arr[index]);
|
314
|
+
return sm_value;
|
315
|
+
}
|
316
|
+
|
317
|
+
|
318
|
+
|
319
|
+
/*
|
320
|
+
* Returns the length of the array.
|
321
|
+
*
|
322
|
+
* call-seq: length -> fixnum
|
323
|
+
*/
|
324
|
+
static VALUE sm_vec2_array_size(VALUE sm_self)
|
325
|
+
{
|
326
|
+
size_t length = NUM2SIZET(sm_mathtype_array_length(sm_self));
|
327
|
+
return SIZET2NUM(length * sizeof(vec2_t));
|
328
|
+
}
|
329
|
+
|
330
|
+
|
331
|
+
|
332
|
+
|
138
333
|
/*==============================================================================
|
139
334
|
|
140
335
|
Snow::Vec3Array methods (s_sm_vec3_array_klass)
|
@@ -883,220 +1078,950 @@ static VALUE sm_mat3_array_store(VALUE sm_self, VALUE sm_index, VALUE sm_value)
|
|
883
1078
|
} else {
|
884
1079
|
mat4_to_mat3(*sm_unwrap_mat4(sm_value, NULL), arr[index]);
|
885
1080
|
}
|
886
|
-
return sm_value;
|
1081
|
+
return sm_value;
|
1082
|
+
}
|
1083
|
+
|
1084
|
+
|
1085
|
+
|
1086
|
+
/*
|
1087
|
+
* Returns the length of the array.
|
1088
|
+
*
|
1089
|
+
* call-seq: length -> fixnum
|
1090
|
+
*/
|
1091
|
+
static VALUE sm_mat3_array_size(VALUE sm_self)
|
1092
|
+
{
|
1093
|
+
size_t length = NUM2SIZET(sm_mathtype_array_length(sm_self));
|
1094
|
+
return SIZET2NUM(length * sizeof(mat3_t));
|
1095
|
+
}
|
1096
|
+
|
1097
|
+
|
1098
|
+
|
1099
|
+
/*==============================================================================
|
1100
|
+
|
1101
|
+
Snow::Mat4Array methods (s_sm_mat4_array_klass)
|
1102
|
+
|
1103
|
+
==============================================================================*/
|
1104
|
+
|
1105
|
+
static VALUE s_sm_mat4_array_klass = Qnil;
|
1106
|
+
|
1107
|
+
/*
|
1108
|
+
* In the first form, a new typed array of Mat4 elements is allocated and
|
1109
|
+
* returned. In the second form, a copy of a typed array of Mat4 objects is
|
1110
|
+
* made and returned. Copied arrays do not share data.
|
1111
|
+
*
|
1112
|
+
* call-seq:
|
1113
|
+
* new(size) -> new mat4_array
|
1114
|
+
* new(mat4_array) -> copy of mat4_array
|
1115
|
+
*/
|
1116
|
+
static VALUE sm_mat4_array_new(VALUE sm_self, VALUE sm_length_or_copy)
|
1117
|
+
{
|
1118
|
+
size_t length = 0;
|
1119
|
+
mat4_t *arr;
|
1120
|
+
VALUE sm_type_array;
|
1121
|
+
int copy_array = 0;
|
1122
|
+
if ((copy_array = SM_IS_A(sm_length_or_copy, mat4_array))) {
|
1123
|
+
length = NUM2SIZET(sm_mathtype_array_length(sm_length_or_copy));
|
1124
|
+
} else {
|
1125
|
+
length = NUM2SIZET(sm_length_or_copy);
|
1126
|
+
}
|
1127
|
+
if (length <= 0) {
|
1128
|
+
return Qnil;
|
1129
|
+
}
|
1130
|
+
arr = ALLOC_N(mat4_t, length);
|
1131
|
+
if (copy_array) {
|
1132
|
+
const mat4_t *source;
|
1133
|
+
Data_Get_Struct(sm_length_or_copy, mat4_t, source);
|
1134
|
+
MEMCPY(arr, source, mat4_t, length);
|
1135
|
+
sm_length_or_copy = sm_mathtype_array_length(sm_length_or_copy);
|
1136
|
+
sm_self = rb_obj_class(sm_length_or_copy);
|
1137
|
+
}
|
1138
|
+
sm_type_array = Data_Wrap_Struct(sm_self, 0, free, arr);
|
1139
|
+
rb_ivar_set(sm_type_array, kRB_IVAR_MATHARRAY_LENGTH, sm_length_or_copy);
|
1140
|
+
rb_ivar_set(sm_type_array, kRB_IVAR_MATHARRAY_CACHE, rb_ary_new2((long)length));
|
1141
|
+
rb_obj_call_init(sm_type_array, 0, 0);
|
1142
|
+
return sm_type_array;
|
1143
|
+
}
|
1144
|
+
|
1145
|
+
|
1146
|
+
|
1147
|
+
/*
|
1148
|
+
* Resizes the array to new_length and returns self.
|
1149
|
+
*
|
1150
|
+
* If resizing to a length smaller than the previous length, excess array
|
1151
|
+
* elements are discarded and the array is truncated. Otherwise, when resizing
|
1152
|
+
* the array to a greater length than previous, new elements in the array will
|
1153
|
+
* contain garbage values.
|
1154
|
+
*
|
1155
|
+
* If new_length is equal to self.length, the call does nothing to the array.
|
1156
|
+
*
|
1157
|
+
* Attempting to resize an array to a new length of zero or less will raise a
|
1158
|
+
* RangeError. Do not try to resize arrays to zero or less. Do not be that
|
1159
|
+
* person.
|
1160
|
+
*
|
1161
|
+
* call-seq:
|
1162
|
+
* resize!(new_length) -> self
|
1163
|
+
*/
|
1164
|
+
static VALUE sm_mat4_array_resize(VALUE sm_self, VALUE sm_new_length)
|
1165
|
+
{
|
1166
|
+
size_t new_length;
|
1167
|
+
size_t old_length;
|
1168
|
+
|
1169
|
+
old_length = NUM2SIZET(sm_mathtype_array_length(sm_self));
|
1170
|
+
new_length = NUM2SIZET(sm_new_length);
|
1171
|
+
|
1172
|
+
if (old_length == new_length) {
|
1173
|
+
/* No change, done */
|
1174
|
+
return sm_self;
|
1175
|
+
} else if (new_length < 1) {
|
1176
|
+
/* Someone decided to be that person. */
|
1177
|
+
rb_raise(rb_eRangeError,
|
1178
|
+
"Cannot resize array to length less than or equal to 0.");
|
1179
|
+
return sm_self;
|
1180
|
+
}
|
1181
|
+
|
1182
|
+
REALLOC_N(RDATA(sm_self)->data, mat4_t, new_length);
|
1183
|
+
rb_ivar_set(sm_self, kRB_IVAR_MATHARRAY_LENGTH, sm_new_length);
|
1184
|
+
rb_ary_clear(rb_ivar_get(sm_self, kRB_IVAR_MATHARRAY_CACHE));
|
1185
|
+
|
1186
|
+
return sm_self;
|
1187
|
+
}
|
1188
|
+
|
1189
|
+
|
1190
|
+
|
1191
|
+
/*
|
1192
|
+
* Fetches a Mat4 from the array at the index and returns it. The returned Mat4
|
1193
|
+
* may be a cached object. In all cases, values returned from a typed array are
|
1194
|
+
* associated with the memory of the array and not given their own memory. So,
|
1195
|
+
* modifying a Mat4 fetched from an array modifies the array's data.
|
1196
|
+
*
|
1197
|
+
* As a result, objects returned by a Mat4Array should not be considered
|
1198
|
+
* thread-safe, nor should manipulating a Mat4Array be considered thread-safe
|
1199
|
+
* either. If you want to work with data returned from an array without altering
|
1200
|
+
* the array data, you should call Mat4#dup or Mat4#copy to get a new Mat4 with a
|
1201
|
+
* copy of the array object's data.
|
1202
|
+
*
|
1203
|
+
* call-seq: fetch(index) -> mat4
|
1204
|
+
*/
|
1205
|
+
static VALUE sm_mat4_array_fetch(VALUE sm_self, VALUE sm_index)
|
1206
|
+
{
|
1207
|
+
mat4_t *arr;
|
1208
|
+
size_t length = NUM2SIZET(sm_mathtype_array_length(sm_self));
|
1209
|
+
size_t index = NUM2SIZET(sm_index);
|
1210
|
+
VALUE sm_inner;
|
1211
|
+
VALUE sm_cache;
|
1212
|
+
if (index >= length) {
|
1213
|
+
rb_raise(rb_eRangeError,
|
1214
|
+
"Index %zu out of bounds for array with length %zu",
|
1215
|
+
index, length);
|
1216
|
+
}
|
1217
|
+
|
1218
|
+
sm_cache = rb_ivar_get(sm_self, kRB_IVAR_MATHARRAY_CACHE);
|
1219
|
+
if (!RTEST(sm_cache)) {
|
1220
|
+
rb_raise(rb_eRuntimeError, "No cache available");
|
1221
|
+
}
|
1222
|
+
sm_inner = rb_ary_entry(sm_cache, (long)index);
|
1223
|
+
|
1224
|
+
if (!RTEST(sm_inner)) {
|
1225
|
+
/* No cached value, create one. */
|
1226
|
+
Data_Get_Struct(sm_self, mat4_t, arr);
|
1227
|
+
sm_inner = Data_Wrap_Struct(s_sm_mat4_klass, 0, 0, arr[index]);
|
1228
|
+
rb_ivar_set(sm_inner, kRB_IVAR_MATHARRAY_SOURCE, sm_self);
|
1229
|
+
/* Store the Mat4 in the cache */
|
1230
|
+
rb_ary_store(sm_cache, (long)index, sm_inner);
|
1231
|
+
}
|
1232
|
+
|
1233
|
+
return sm_inner;
|
1234
|
+
}
|
1235
|
+
|
1236
|
+
|
1237
|
+
|
1238
|
+
/*
|
1239
|
+
* Stores a Mat4 at the given index. If the provided Mat4 is a member of the
|
1240
|
+
* array and stored at the index, then no copy is done, otherwise the Mat4 is
|
1241
|
+
* copied to the array.
|
1242
|
+
*
|
1243
|
+
* If the value stored is a Mat3, it will be converted to a Mat4 for storage,
|
1244
|
+
* though this will not modify the value directly.
|
1245
|
+
*
|
1246
|
+
* call-seq: store(index, value) -> value
|
1247
|
+
*/
|
1248
|
+
static VALUE sm_mat4_array_store(VALUE sm_self, VALUE sm_index, VALUE sm_value)
|
1249
|
+
{
|
1250
|
+
mat4_t *arr;
|
1251
|
+
size_t length = NUM2SIZET(sm_mathtype_array_length(sm_self));
|
1252
|
+
size_t index = NUM2SIZET(sm_index);
|
1253
|
+
int is_mat4 = 0;
|
1254
|
+
|
1255
|
+
if (index >= length) {
|
1256
|
+
rb_raise(rb_eRangeError,
|
1257
|
+
"Index %zu out of bounds for array with length %zu",
|
1258
|
+
index, length);
|
1259
|
+
} else if (!(is_mat4 = SM_IS_A(sm_value, mat4)) && !SM_IS_A(sm_value, mat3)) {
|
1260
|
+
rb_raise(rb_eTypeError,
|
1261
|
+
"Invalid value to store: expected Mat3 or Mat4, got %s",
|
1262
|
+
rb_obj_classname(sm_value));
|
1263
|
+
}
|
1264
|
+
|
1265
|
+
Data_Get_Struct(sm_self, mat4_t, arr);
|
1266
|
+
|
1267
|
+
if (is_mat4) {
|
1268
|
+
mat4_t *value = sm_unwrap_mat4(sm_value, NULL);
|
1269
|
+
if (value == &arr[index]) {
|
1270
|
+
/* The object's part of the array, don't bother copying */
|
1271
|
+
return sm_value;
|
1272
|
+
}
|
1273
|
+
mat4_copy(*value, arr[index]);
|
1274
|
+
} else {
|
1275
|
+
mat3_to_mat4(*sm_unwrap_mat3(sm_value, NULL), arr[index]);
|
1276
|
+
}
|
1277
|
+
return sm_value;
|
1278
|
+
}
|
1279
|
+
|
1280
|
+
|
1281
|
+
|
1282
|
+
/*
|
1283
|
+
* Returns the length of the array.
|
1284
|
+
*
|
1285
|
+
* call-seq: length -> fixnum
|
1286
|
+
*/
|
1287
|
+
static VALUE sm_mat4_array_size(VALUE sm_self)
|
1288
|
+
{
|
1289
|
+
size_t length = NUM2SIZET(sm_mathtype_array_length(sm_self));
|
1290
|
+
return SIZET2NUM(length * sizeof(mat4_t));
|
1291
|
+
}
|
1292
|
+
|
1293
|
+
|
1294
|
+
#endif /* BUILD_ARRAY_TYPE */
|
1295
|
+
|
1296
|
+
|
1297
|
+
|
1298
|
+
/*==============================================================================
|
1299
|
+
|
1300
|
+
vec2_t functions
|
1301
|
+
|
1302
|
+
==============================================================================*/
|
1303
|
+
|
1304
|
+
static VALUE sm_wrap_vec2(const vec2_t value, VALUE klass)
|
1305
|
+
{
|
1306
|
+
vec2_t *copy;
|
1307
|
+
VALUE sm_wrapped = Qnil;
|
1308
|
+
if (!RTEST(klass)) {
|
1309
|
+
klass = s_sm_vec2_klass;
|
1310
|
+
}
|
1311
|
+
sm_wrapped = Data_Make_Struct(klass, vec2_t, 0, free, copy);
|
1312
|
+
if (value) {
|
1313
|
+
vec2_copy(value, *copy);
|
1314
|
+
}
|
1315
|
+
return sm_wrapped;
|
1316
|
+
}
|
1317
|
+
|
1318
|
+
|
1319
|
+
|
1320
|
+
static vec2_t *sm_unwrap_vec2(VALUE sm_value, vec2_t store)
|
1321
|
+
{
|
1322
|
+
vec2_t *value;
|
1323
|
+
Data_Get_Struct(sm_value, vec2_t, value);
|
1324
|
+
if(store) vec2_copy(*value, store);
|
1325
|
+
return value;
|
1326
|
+
}
|
1327
|
+
|
1328
|
+
|
1329
|
+
|
1330
|
+
/*
|
1331
|
+
* Gets the component of the Vec2 at the given index.
|
1332
|
+
*
|
1333
|
+
* call-seq: fetch(index) -> float
|
1334
|
+
*/
|
1335
|
+
static VALUE sm_vec2_fetch (VALUE sm_self, VALUE sm_index)
|
1336
|
+
{
|
1337
|
+
static const int max_index = sizeof(vec2_t) / sizeof(s_float_t);
|
1338
|
+
const vec2_t *self = sm_unwrap_vec2(sm_self, NULL);
|
1339
|
+
int index = NUM2INT(sm_index);
|
1340
|
+
if (index < 0 || index >= max_index) {
|
1341
|
+
rb_raise(rb_eRangeError,
|
1342
|
+
"Index %d is out of bounds, must be from 0 through %d", index, max_index - 1);
|
1343
|
+
}
|
1344
|
+
return rb_float_new(self[0][NUM2INT(sm_index)]);
|
1345
|
+
}
|
1346
|
+
|
1347
|
+
|
1348
|
+
|
1349
|
+
/*
|
1350
|
+
* Sets the Vec2's component at the index to the value.
|
1351
|
+
*
|
1352
|
+
* call-seq: store(index, value) -> value
|
1353
|
+
*/
|
1354
|
+
static VALUE sm_vec2_store (VALUE sm_self, VALUE sm_index, VALUE sm_value)
|
1355
|
+
{
|
1356
|
+
static const int max_index = sizeof(vec2_t) / sizeof(s_float_t);
|
1357
|
+
vec2_t *self = sm_unwrap_vec2(sm_self, NULL);
|
1358
|
+
int index = NUM2INT(sm_index);
|
1359
|
+
if (index < 0 || index >= max_index) {
|
1360
|
+
rb_raise(rb_eRangeError,
|
1361
|
+
"Index %d is out of bounds, must be from 0 through %d", index, max_index - 1);
|
1362
|
+
}
|
1363
|
+
self[0][index] = (s_float_t)rb_num2dbl(sm_value);
|
1364
|
+
return sm_value;
|
1365
|
+
}
|
1366
|
+
|
1367
|
+
|
1368
|
+
|
1369
|
+
/*
|
1370
|
+
* Returns the length in bytes of the Vec2. When compiled to use doubles as the
|
1371
|
+
* base type, this is always 16. Otherwise, when compiled to use floats, it's
|
1372
|
+
* always 8.
|
1373
|
+
*
|
1374
|
+
* call-seq: size -> fixnum
|
1375
|
+
*/
|
1376
|
+
static VALUE sm_vec2_size (VALUE self)
|
1377
|
+
{
|
1378
|
+
return SIZET2NUM(sizeof(vec2_t));
|
1379
|
+
}
|
1380
|
+
|
1381
|
+
|
1382
|
+
|
1383
|
+
/*
|
1384
|
+
* Returns the length of the Vec2 in components. Result is always 2.
|
1385
|
+
*
|
1386
|
+
* call-seq: length -> fixnum
|
1387
|
+
*/
|
1388
|
+
static VALUE sm_vec2_length (VALUE self)
|
1389
|
+
{
|
1390
|
+
return SIZET2NUM(sizeof(vec2_t) / sizeof(s_float_t));
|
1391
|
+
}
|
1392
|
+
|
1393
|
+
|
1394
|
+
|
1395
|
+
/*
|
1396
|
+
* Returns a copy of self.
|
1397
|
+
*
|
1398
|
+
* call-seq:
|
1399
|
+
* copy(output = nil) -> output or new vec2
|
1400
|
+
*/
|
1401
|
+
static VALUE sm_vec2_copy(int argc, VALUE *argv, VALUE sm_self)
|
1402
|
+
{
|
1403
|
+
VALUE sm_out;
|
1404
|
+
vec2_t *self;
|
1405
|
+
rb_scan_args(argc, argv, "01", &sm_out);
|
1406
|
+
self = sm_unwrap_vec2(sm_self, NULL);
|
1407
|
+
if (argc == 1) {
|
1408
|
+
if (!RTEST(sm_out)) {
|
1409
|
+
goto SM_LABEL(skip_output);
|
1410
|
+
}{
|
1411
|
+
if (!SM_IS_A(sm_out, vec2) && !SM_IS_A(sm_out, vec3) && !SM_IS_A(sm_out, vec4) && !SM_IS_A(sm_out, quat)) {
|
1412
|
+
rb_raise(rb_eTypeError,
|
1413
|
+
kSM_WANT_TWO_TO_FOUR_FORMAT_LIT,
|
1414
|
+
rb_obj_classname(sm_out));
|
1415
|
+
return Qnil;
|
1416
|
+
}
|
1417
|
+
vec2_t *output = sm_unwrap_vec2(sm_out, NULL);
|
1418
|
+
vec2_copy (*self, *output);
|
1419
|
+
}} else if (argc == 0) {
|
1420
|
+
SM_LABEL(skip_output): {
|
1421
|
+
vec2_t output;
|
1422
|
+
vec2_copy (*self, output);
|
1423
|
+
sm_out = sm_wrap_vec2(output, rb_obj_class(sm_self));
|
1424
|
+
rb_obj_call_init(sm_out, 0, 0);
|
1425
|
+
}} else {
|
1426
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to copy");
|
1427
|
+
}
|
1428
|
+
return sm_out;
|
1429
|
+
}
|
1430
|
+
|
1431
|
+
|
1432
|
+
|
1433
|
+
/*
|
1434
|
+
* Returns a vector whose components are the multiplicative inverse of this
|
1435
|
+
* vector's.
|
1436
|
+
*
|
1437
|
+
* call-seq:
|
1438
|
+
* normalize(output = nil) -> output or new vec2
|
1439
|
+
*/
|
1440
|
+
static VALUE sm_vec2_normalize(int argc, VALUE *argv, VALUE sm_self)
|
1441
|
+
{
|
1442
|
+
VALUE sm_out;
|
1443
|
+
vec2_t *self;
|
1444
|
+
rb_scan_args(argc, argv, "01", &sm_out);
|
1445
|
+
self = sm_unwrap_vec2(sm_self, NULL);
|
1446
|
+
if (argc == 1) {
|
1447
|
+
if (!RTEST(sm_out)) {
|
1448
|
+
goto SM_LABEL(skip_output);
|
1449
|
+
}{
|
1450
|
+
if (!SM_IS_A(sm_out, vec2) && !SM_IS_A(sm_out, vec3) && !SM_IS_A(sm_out, vec4) && !SM_IS_A(sm_out, quat)) {
|
1451
|
+
rb_raise(rb_eTypeError,
|
1452
|
+
kSM_WANT_TWO_TO_FOUR_FORMAT_LIT,
|
1453
|
+
rb_obj_classname(sm_out));
|
1454
|
+
return Qnil;
|
1455
|
+
}
|
1456
|
+
vec2_t *output = sm_unwrap_vec2(sm_out, NULL);
|
1457
|
+
vec2_normalize (*self, *output);
|
1458
|
+
}} else if (argc == 0) {
|
1459
|
+
SM_LABEL(skip_output): {
|
1460
|
+
vec2_t output;
|
1461
|
+
vec2_normalize (*self, output);
|
1462
|
+
sm_out = sm_wrap_vec2(output, rb_obj_class(sm_self));
|
1463
|
+
rb_obj_call_init(sm_out, 0, 0);
|
1464
|
+
}} else {
|
1465
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to normalize");
|
1466
|
+
}
|
1467
|
+
return sm_out;
|
1468
|
+
}
|
1469
|
+
|
1470
|
+
|
1471
|
+
|
1472
|
+
/*
|
1473
|
+
* Returns a vector whose components are the multiplicative inverse of this
|
1474
|
+
* vector's.
|
1475
|
+
*
|
1476
|
+
* call-seq:
|
1477
|
+
* inverse(output = nil) -> output or new vec2
|
1478
|
+
*/
|
1479
|
+
static VALUE sm_vec2_inverse(int argc, VALUE *argv, VALUE sm_self)
|
1480
|
+
{
|
1481
|
+
VALUE sm_out;
|
1482
|
+
vec2_t *self;
|
1483
|
+
rb_scan_args(argc, argv, "01", &sm_out);
|
1484
|
+
self = sm_unwrap_vec2(sm_self, NULL);
|
1485
|
+
if (argc == 1) {
|
1486
|
+
if (!RTEST(sm_out)) {
|
1487
|
+
goto SM_LABEL(skip_output);
|
1488
|
+
}{
|
1489
|
+
if (!SM_IS_A(sm_out, vec2) && !SM_IS_A(sm_out, vec3) && !SM_IS_A(sm_out, vec4) && !SM_IS_A(sm_out, quat)) {
|
1490
|
+
rb_raise(rb_eTypeError,
|
1491
|
+
kSM_WANT_TWO_TO_FOUR_FORMAT_LIT,
|
1492
|
+
rb_obj_classname(sm_out));
|
1493
|
+
return Qnil;
|
1494
|
+
}
|
1495
|
+
vec2_t *output = sm_unwrap_vec2(sm_out, NULL);
|
1496
|
+
vec2_inverse (*self, *output);
|
1497
|
+
}} else if (argc == 0) {
|
1498
|
+
SM_LABEL(skip_output): {
|
1499
|
+
vec2_t output;
|
1500
|
+
vec2_inverse (*self, output);
|
1501
|
+
sm_out = sm_wrap_vec2(output, rb_obj_class(sm_self));
|
1502
|
+
rb_obj_call_init(sm_out, 0, 0);
|
1503
|
+
}} else {
|
1504
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to inverse");
|
1505
|
+
}
|
1506
|
+
return sm_out;
|
1507
|
+
}
|
1508
|
+
|
1509
|
+
|
1510
|
+
|
1511
|
+
/*
|
1512
|
+
* Negates this vector's components and returns the result.
|
1513
|
+
*
|
1514
|
+
* call-seq:
|
1515
|
+
* negate(output = nil) -> output or new vec2
|
1516
|
+
*/
|
1517
|
+
static VALUE sm_vec2_negate(int argc, VALUE *argv, VALUE sm_self)
|
1518
|
+
{
|
1519
|
+
VALUE sm_out;
|
1520
|
+
vec2_t *self;
|
1521
|
+
rb_scan_args(argc, argv, "01", &sm_out);
|
1522
|
+
self = sm_unwrap_vec2(sm_self, NULL);
|
1523
|
+
if (argc == 1) {
|
1524
|
+
if (!RTEST(sm_out)) {
|
1525
|
+
goto SM_LABEL(skip_output);
|
1526
|
+
}{
|
1527
|
+
if (!SM_IS_A(sm_out, vec2) && !SM_IS_A(sm_out, vec2) && !SM_IS_A(sm_out, vec4) && !SM_IS_A(sm_out, quat)) {
|
1528
|
+
rb_raise(rb_eTypeError,
|
1529
|
+
kSM_WANT_TWO_TO_FOUR_FORMAT_LIT,
|
1530
|
+
rb_obj_classname(sm_out));
|
1531
|
+
return Qnil;
|
1532
|
+
}
|
1533
|
+
vec2_t *output = sm_unwrap_vec2(sm_out, NULL);
|
1534
|
+
vec2_negate (*self, *output);
|
1535
|
+
}} else if (argc == 0) {
|
1536
|
+
SM_LABEL(skip_output): {
|
1537
|
+
vec2_t output;
|
1538
|
+
vec2_negate (*self, output);
|
1539
|
+
sm_out = sm_wrap_vec2(output, rb_obj_class(sm_self));
|
1540
|
+
rb_obj_call_init(sm_out, 0, 0);
|
1541
|
+
}} else {
|
1542
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to negate");
|
1543
|
+
}
|
1544
|
+
return sm_out;
|
1545
|
+
}
|
1546
|
+
|
1547
|
+
|
1548
|
+
|
1549
|
+
/*
|
1550
|
+
* Projects this vector onto a normal vector and returns the result.
|
1551
|
+
*
|
1552
|
+
* call-seq:
|
1553
|
+
* project(normal, output = nil) -> output or new vec2
|
1554
|
+
*/
|
1555
|
+
static VALUE sm_vec2_project(int argc, VALUE *argv, VALUE sm_self)
|
1556
|
+
{
|
1557
|
+
VALUE sm_rhs;
|
1558
|
+
VALUE sm_out;
|
1559
|
+
vec2_t *self;
|
1560
|
+
vec2_t *rhs;
|
1561
|
+
rb_scan_args(argc, argv, "11", &sm_rhs, &sm_out);
|
1562
|
+
self = sm_unwrap_vec2(sm_self, NULL);
|
1563
|
+
if (!SM_IS_A(sm_rhs, vec2) && !SM_IS_A(sm_rhs, vec3) && !SM_IS_A(sm_rhs, vec4) && !SM_IS_A(sm_rhs, quat)) {
|
1564
|
+
rb_raise(rb_eTypeError,
|
1565
|
+
kSM_WANT_TWO_TO_FOUR_FORMAT_LIT,
|
1566
|
+
rb_obj_classname(sm_rhs));
|
1567
|
+
return Qnil;
|
1568
|
+
}
|
1569
|
+
rhs = sm_unwrap_vec2(sm_rhs, NULL);
|
1570
|
+
if (argc == 2) {
|
1571
|
+
if (!RTEST(sm_out)) {
|
1572
|
+
goto SM_LABEL(skip_output);
|
1573
|
+
}{
|
1574
|
+
if (!SM_IS_A(sm_out, vec2) && !SM_IS_A(sm_out, vec3) && !SM_IS_A(sm_out, vec4) && !SM_IS_A(sm_out, quat)) {
|
1575
|
+
rb_raise(rb_eTypeError,
|
1576
|
+
kSM_WANT_TWO_TO_FOUR_FORMAT_LIT,
|
1577
|
+
rb_obj_classname(sm_out));
|
1578
|
+
return Qnil;
|
1579
|
+
}
|
1580
|
+
vec2_t *output = sm_unwrap_vec2(sm_out, NULL);
|
1581
|
+
vec2_project(*self, *rhs, *output);
|
1582
|
+
}} else if (argc == 1) {
|
1583
|
+
SM_LABEL(skip_output): {
|
1584
|
+
vec2_t output;
|
1585
|
+
vec2_project(*self, *rhs, output);
|
1586
|
+
sm_out = sm_wrap_vec2(output, rb_obj_class(sm_self));
|
1587
|
+
rb_obj_call_init(sm_out, 0, 0);
|
1588
|
+
}} else {
|
1589
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to project");
|
1590
|
+
}
|
1591
|
+
return sm_out;
|
1592
|
+
}
|
1593
|
+
|
1594
|
+
|
1595
|
+
|
1596
|
+
/*
|
1597
|
+
* Reflects this vector against a normal vector and returns the result.
|
1598
|
+
*
|
1599
|
+
* call-seq:
|
1600
|
+
* reflect(normal, output = nil) -> output or new vec2
|
1601
|
+
*/
|
1602
|
+
static VALUE sm_vec2_reflect(int argc, VALUE *argv, VALUE sm_self)
|
1603
|
+
{
|
1604
|
+
VALUE sm_rhs;
|
1605
|
+
VALUE sm_out;
|
1606
|
+
vec2_t *self;
|
1607
|
+
vec2_t *rhs;
|
1608
|
+
rb_scan_args(argc, argv, "11", &sm_rhs, &sm_out);
|
1609
|
+
self = sm_unwrap_vec2(sm_self, NULL);
|
1610
|
+
if (!SM_IS_A(sm_rhs, vec2) && !SM_IS_A(sm_rhs, vec3) && !SM_IS_A(sm_rhs, vec4) && !SM_IS_A(sm_rhs, quat)) {
|
1611
|
+
rb_raise(rb_eTypeError,
|
1612
|
+
kSM_WANT_TWO_TO_FOUR_FORMAT_LIT,
|
1613
|
+
rb_obj_classname(sm_rhs));
|
1614
|
+
return Qnil;
|
1615
|
+
}
|
1616
|
+
rhs = sm_unwrap_vec2(sm_rhs, NULL);
|
1617
|
+
if (argc == 2) {
|
1618
|
+
if (!RTEST(sm_out)) {
|
1619
|
+
goto SM_LABEL(skip_output);
|
1620
|
+
}{
|
1621
|
+
if (!SM_IS_A(sm_out, vec2) && !SM_IS_A(sm_out, vec3) && !SM_IS_A(sm_out, vec4) && !SM_IS_A(sm_out, quat)) {
|
1622
|
+
rb_raise(rb_eTypeError,
|
1623
|
+
kSM_WANT_TWO_TO_FOUR_FORMAT_LIT,
|
1624
|
+
rb_obj_classname(sm_out));
|
1625
|
+
return Qnil;
|
1626
|
+
}
|
1627
|
+
vec2_t *output = sm_unwrap_vec2(sm_out, NULL);
|
1628
|
+
vec2_reflect(*self, *rhs, *output);
|
1629
|
+
}} else if (argc == 1) {
|
1630
|
+
SM_LABEL(skip_output): {
|
1631
|
+
vec2_t output;
|
1632
|
+
vec2_reflect(*self, *rhs, output);
|
1633
|
+
sm_out = sm_wrap_vec2(output, rb_obj_class(sm_self));
|
1634
|
+
rb_obj_call_init(sm_out, 0, 0);
|
1635
|
+
}} else {
|
1636
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to reflect");
|
1637
|
+
}
|
1638
|
+
return sm_out;
|
1639
|
+
}
|
1640
|
+
|
1641
|
+
|
1642
|
+
|
1643
|
+
/*
|
1644
|
+
* Multiplies this and another vector's components together and returns the
|
1645
|
+
* result.
|
1646
|
+
*
|
1647
|
+
* call-seq:
|
1648
|
+
* multiply_vec2(vec2, output = nil) -> output or new vec2
|
1649
|
+
*/
|
1650
|
+
static VALUE sm_vec2_multiply(int argc, VALUE *argv, VALUE sm_self)
|
1651
|
+
{
|
1652
|
+
VALUE sm_rhs;
|
1653
|
+
VALUE sm_out;
|
1654
|
+
vec2_t *self;
|
1655
|
+
vec2_t *rhs;
|
1656
|
+
rb_scan_args(argc, argv, "11", &sm_rhs, &sm_out);
|
1657
|
+
self = sm_unwrap_vec2(sm_self, NULL);
|
1658
|
+
if (!SM_IS_A(sm_rhs, vec2) && !SM_IS_A(sm_rhs, vec3) && !SM_IS_A(sm_rhs, vec4) && !SM_IS_A(sm_rhs, quat)) {
|
1659
|
+
rb_raise(rb_eTypeError,
|
1660
|
+
kSM_WANT_TWO_TO_FOUR_FORMAT_LIT,
|
1661
|
+
rb_obj_classname(sm_rhs));
|
1662
|
+
return Qnil;
|
1663
|
+
}
|
1664
|
+
rhs = sm_unwrap_vec2(sm_rhs, NULL);
|
1665
|
+
if (argc == 2) {
|
1666
|
+
if (!RTEST(sm_out)) {
|
1667
|
+
goto SM_LABEL(skip_output);
|
1668
|
+
}{
|
1669
|
+
if (!SM_IS_A(sm_out, vec2) && !SM_IS_A(sm_out, vec3) && !SM_IS_A(sm_out, vec4) && !SM_IS_A(sm_out, quat)) {
|
1670
|
+
rb_raise(rb_eTypeError,
|
1671
|
+
kSM_WANT_TWO_TO_FOUR_FORMAT_LIT,
|
1672
|
+
rb_obj_classname(sm_out));
|
1673
|
+
return Qnil;
|
1674
|
+
}
|
1675
|
+
vec2_t *output = sm_unwrap_vec2(sm_out, NULL);
|
1676
|
+
vec2_multiply(*self, *rhs, *output);
|
1677
|
+
}} else if (argc == 1) {
|
1678
|
+
SM_LABEL(skip_output): {
|
1679
|
+
vec2_t output;
|
1680
|
+
vec2_multiply(*self, *rhs, output);
|
1681
|
+
sm_out = sm_wrap_vec2(output, rb_obj_class(sm_self));
|
1682
|
+
rb_obj_call_init(sm_out, 0, 0);
|
1683
|
+
}} else {
|
1684
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to multiply");
|
1685
|
+
}
|
1686
|
+
return sm_out;
|
1687
|
+
}
|
1688
|
+
|
1689
|
+
|
1690
|
+
|
1691
|
+
/*
|
1692
|
+
* Adds this and another vector's components together and returns the result.
|
1693
|
+
*
|
1694
|
+
* call-seq:
|
1695
|
+
* add(vec2, output = nil) -> output or new vec2
|
1696
|
+
*/
|
1697
|
+
static VALUE sm_vec2_add(int argc, VALUE *argv, VALUE sm_self)
|
1698
|
+
{
|
1699
|
+
VALUE sm_rhs;
|
1700
|
+
VALUE sm_out;
|
1701
|
+
vec2_t *self;
|
1702
|
+
vec2_t *rhs;
|
1703
|
+
rb_scan_args(argc, argv, "11", &sm_rhs, &sm_out);
|
1704
|
+
self = sm_unwrap_vec2(sm_self, NULL);
|
1705
|
+
if (!SM_IS_A(sm_rhs, vec2) && !SM_IS_A(sm_rhs, vec3) && !SM_IS_A(sm_rhs, vec4) && !SM_IS_A(sm_rhs, quat)) {
|
1706
|
+
rb_raise(rb_eTypeError,
|
1707
|
+
kSM_WANT_TWO_TO_FOUR_FORMAT_LIT,
|
1708
|
+
rb_obj_classname(sm_rhs));
|
1709
|
+
return Qnil;
|
1710
|
+
}
|
1711
|
+
rhs = sm_unwrap_vec2(sm_rhs, NULL);
|
1712
|
+
if (argc == 2) {
|
1713
|
+
if (!RTEST(sm_out)) {
|
1714
|
+
goto SM_LABEL(skip_output);
|
1715
|
+
}{
|
1716
|
+
if (!SM_IS_A(sm_out, vec2) && !SM_IS_A(sm_out, vec3) && !SM_IS_A(sm_out, vec4) && !SM_IS_A(sm_out, quat)) {
|
1717
|
+
rb_raise(rb_eTypeError,
|
1718
|
+
kSM_WANT_TWO_TO_FOUR_FORMAT_LIT,
|
1719
|
+
rb_obj_classname(sm_out));
|
1720
|
+
return Qnil;
|
1721
|
+
}
|
1722
|
+
vec2_t *output = sm_unwrap_vec2(sm_out, NULL);
|
1723
|
+
vec2_add(*self, *rhs, *output);
|
1724
|
+
}} else if (argc == 1) {
|
1725
|
+
SM_LABEL(skip_output): {
|
1726
|
+
vec2_t output;
|
1727
|
+
vec2_add(*self, *rhs, output);
|
1728
|
+
sm_out = sm_wrap_vec2(output, rb_obj_class(sm_self));
|
1729
|
+
rb_obj_call_init(sm_out, 0, 0);
|
1730
|
+
}} else {
|
1731
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to add");
|
1732
|
+
}
|
1733
|
+
return sm_out;
|
1734
|
+
}
|
1735
|
+
|
1736
|
+
|
1737
|
+
|
1738
|
+
/*
|
1739
|
+
* Subtracts another vector's components from this vector's and returns the
|
1740
|
+
* result.
|
1741
|
+
*
|
1742
|
+
* call-seq:
|
1743
|
+
* subtract(vec2, output = nil) -> output or new vec2
|
1744
|
+
*/
|
1745
|
+
static VALUE sm_vec2_subtract(int argc, VALUE *argv, VALUE sm_self)
|
1746
|
+
{
|
1747
|
+
VALUE sm_rhs;
|
1748
|
+
VALUE sm_out;
|
1749
|
+
vec2_t *self;
|
1750
|
+
vec2_t *rhs;
|
1751
|
+
rb_scan_args(argc, argv, "11", &sm_rhs, &sm_out);
|
1752
|
+
self = sm_unwrap_vec2(sm_self, NULL);
|
1753
|
+
if (!SM_IS_A(sm_rhs, vec2) && !SM_IS_A(sm_rhs, vec3) && !SM_IS_A(sm_rhs, vec4) && !SM_IS_A(sm_rhs, quat)) {
|
1754
|
+
rb_raise(rb_eTypeError,
|
1755
|
+
kSM_WANT_TWO_TO_FOUR_FORMAT_LIT,
|
1756
|
+
rb_obj_classname(sm_rhs));
|
1757
|
+
return Qnil;
|
1758
|
+
}
|
1759
|
+
rhs = sm_unwrap_vec2(sm_rhs, NULL);
|
1760
|
+
if (argc == 2) {
|
1761
|
+
if (!RTEST(sm_out)) {
|
1762
|
+
goto SM_LABEL(skip_output);
|
1763
|
+
}{
|
1764
|
+
if (!SM_IS_A(sm_out, vec2) && !SM_IS_A(sm_out, vec3) && !SM_IS_A(sm_out, vec4) && !SM_IS_A(sm_out, quat)) {
|
1765
|
+
rb_raise(rb_eTypeError,
|
1766
|
+
kSM_WANT_TWO_TO_FOUR_FORMAT_LIT,
|
1767
|
+
rb_obj_classname(sm_out));
|
1768
|
+
return Qnil;
|
1769
|
+
}
|
1770
|
+
vec2_t *output = sm_unwrap_vec2(sm_out, NULL);
|
1771
|
+
vec2_subtract(*self, *rhs, *output);
|
1772
|
+
}} else if (argc == 1) {
|
1773
|
+
SM_LABEL(skip_output): {
|
1774
|
+
vec2_t output;
|
1775
|
+
vec2_subtract(*self, *rhs, output);
|
1776
|
+
sm_out = sm_wrap_vec2(output, rb_obj_class(sm_self));
|
1777
|
+
rb_obj_call_init(sm_out, 0, 0);
|
1778
|
+
}} else {
|
1779
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to subtract");
|
1780
|
+
}
|
1781
|
+
return sm_out;
|
1782
|
+
}
|
1783
|
+
|
1784
|
+
|
1785
|
+
|
1786
|
+
/*
|
1787
|
+
* Returns the dot product of this and another Vec2 or the XY components of a
|
1788
|
+
* Vec3, Vec4, or Quat.
|
1789
|
+
*
|
1790
|
+
* call-seq:
|
1791
|
+
* dot_product(vec2) -> float
|
1792
|
+
* dot_product(vec3) -> float
|
1793
|
+
* dot_product(vec4) -> float
|
1794
|
+
* dot_product(quat) -> float
|
1795
|
+
*/
|
1796
|
+
static VALUE sm_vec2_dot_product(VALUE sm_self, VALUE sm_other)
|
1797
|
+
{
|
1798
|
+
if (!SM_IS_A(sm_other, vec2) &&
|
1799
|
+
!SM_IS_A(sm_other, vec3) &&
|
1800
|
+
!SM_IS_A(sm_other, vec4) &&
|
1801
|
+
!SM_IS_A(sm_other, quat)) {
|
1802
|
+
rb_raise(rb_eArgError,
|
1803
|
+
"Expected a Vec2, Vec3, Vec4, or Quat, got %s",
|
1804
|
+
rb_obj_classname(sm_other));
|
1805
|
+
return Qnil;
|
1806
|
+
}
|
1807
|
+
return rb_float_new(
|
1808
|
+
vec2_dot_product(
|
1809
|
+
*sm_unwrap_vec2(sm_self, NULL),
|
1810
|
+
*sm_unwrap_vec2(sm_other, NULL)));
|
887
1811
|
}
|
888
1812
|
|
889
1813
|
|
890
1814
|
|
891
1815
|
/*
|
892
|
-
*
|
1816
|
+
* Allocates a Vec2.
|
893
1817
|
*
|
894
|
-
* call-seq:
|
1818
|
+
* call-seq:
|
1819
|
+
* new() -> vec2 with components [0, 0]
|
1820
|
+
* new(x, y) -> vec2 with components [x, y]
|
1821
|
+
* new([x, y]) -> vec2 with components [x, y]
|
1822
|
+
* new(vec2) -> copy of vec3
|
1823
|
+
* new(vec3) -> vec2 of vec3's x and y components
|
1824
|
+
* new(vec4) -> vec2 of vec4's x and y components
|
1825
|
+
* new(quat) -> vec2 of quat's x and y components
|
895
1826
|
*/
|
896
|
-
static VALUE
|
1827
|
+
static VALUE sm_vec2_new(int argc, VALUE *argv, VALUE self)
|
897
1828
|
{
|
898
|
-
|
899
|
-
|
1829
|
+
VALUE sm_vec = sm_wrap_vec2(g_vec2_zero, self);
|
1830
|
+
rb_obj_call_init(sm_vec, argc, argv);
|
1831
|
+
return sm_vec;
|
900
1832
|
}
|
901
1833
|
|
902
1834
|
|
903
1835
|
|
904
|
-
/*==============================================================================
|
905
|
-
|
906
|
-
Snow::Mat4Array methods (s_sm_mat4_array_klass)
|
907
|
-
|
908
|
-
==============================================================================*/
|
909
|
-
|
910
|
-
static VALUE s_sm_mat4_array_klass = Qnil;
|
911
|
-
|
912
1836
|
/*
|
913
|
-
*
|
914
|
-
* returned. In the second form, a copy of a typed array of Mat4 objects is
|
915
|
-
* made and returned. Copied arrays do not share data.
|
1837
|
+
* Sets the Vec2's components.
|
916
1838
|
*
|
917
1839
|
* call-seq:
|
918
|
-
*
|
919
|
-
*
|
1840
|
+
* set(x, y) -> vec2 with components [x, y]
|
1841
|
+
* set([x, y]) -> vec2 with components [x, y]
|
1842
|
+
* set(vec2) -> copy of vec2
|
1843
|
+
* set(vec3) -> vec2 with components [vec3.xy]
|
1844
|
+
* set(vec4) -> vec2 with components [vec4.xy]
|
1845
|
+
* set(quat) -> vec2 with components [quat.xy]
|
920
1846
|
*/
|
921
|
-
static VALUE
|
1847
|
+
static VALUE sm_vec2_init(int argc, VALUE *argv, VALUE sm_self)
|
922
1848
|
{
|
923
|
-
|
924
|
-
|
925
|
-
|
926
|
-
|
927
|
-
|
928
|
-
|
929
|
-
|
930
|
-
|
931
|
-
|
932
|
-
|
933
|
-
|
1849
|
+
vec2_t *self = sm_unwrap_vec2(sm_self, NULL);
|
1850
|
+
size_t arr_index = 0;
|
1851
|
+
|
1852
|
+
switch(argc) {
|
1853
|
+
|
1854
|
+
// Default value
|
1855
|
+
case 0: { break; }
|
1856
|
+
|
1857
|
+
// Copy or by-array
|
1858
|
+
case 1: {
|
1859
|
+
if (SM_IS_A(argv[0], vec2) ||
|
1860
|
+
SM_IS_A(argv[0], vec3) ||
|
1861
|
+
SM_IS_A(argv[0], vec4) ||
|
1862
|
+
SM_IS_A(argv[0], quat)) {
|
1863
|
+
sm_unwrap_vec2(argv[0], *self);
|
1864
|
+
break;
|
1865
|
+
}
|
1866
|
+
|
1867
|
+
// Optional offset into array provided
|
1868
|
+
if (0) {
|
1869
|
+
case 2:
|
1870
|
+
if (!SM_RB_IS_A(argv[0], rb_cArray)) {
|
1871
|
+
self[0][0] = (s_float_t)rb_num2dbl(argv[0]);
|
1872
|
+
self[0][1] = (s_float_t)rb_num2dbl(argv[1]);
|
1873
|
+
break;
|
1874
|
+
}
|
1875
|
+
arr_index = NUM2SIZET(argv[1]);
|
1876
|
+
}
|
1877
|
+
|
1878
|
+
// Array of values
|
1879
|
+
VALUE arrdata = argv[0];
|
1880
|
+
const size_t arr_end = arr_index + 2;
|
1881
|
+
s_float_t *vec_elem = *self;
|
1882
|
+
for (; arr_index < arr_end; ++arr_index, ++vec_elem) {
|
1883
|
+
*vec_elem = (s_float_t)rb_num2dbl(rb_ary_entry(arrdata, (long)arr_index));
|
1884
|
+
}
|
1885
|
+
break;
|
934
1886
|
}
|
935
|
-
|
936
|
-
|
937
|
-
|
938
|
-
|
939
|
-
MEMCPY(arr, source, mat4_t, length);
|
940
|
-
sm_length_or_copy = sm_mathtype_array_length(sm_length_or_copy);
|
941
|
-
sm_self = rb_obj_class(sm_length_or_copy);
|
1887
|
+
|
1888
|
+
default: {
|
1889
|
+
rb_raise(rb_eArgError, "Invalid arguments to initialize/set");
|
1890
|
+
break;
|
942
1891
|
}
|
943
|
-
|
944
|
-
|
945
|
-
|
946
|
-
rb_obj_call_init(sm_type_array, 0, 0);
|
947
|
-
return sm_type_array;
|
1892
|
+
} // switch (argc)
|
1893
|
+
|
1894
|
+
return sm_self;
|
948
1895
|
}
|
949
1896
|
|
950
1897
|
|
951
1898
|
|
952
1899
|
/*
|
953
|
-
*
|
954
|
-
*
|
955
|
-
* If resizing to a length smaller than the previous length, excess array
|
956
|
-
* elements are discarded and the array is truncated. Otherwise, when resizing
|
957
|
-
* the array to a greater length than previous, new elements in the array will
|
958
|
-
* contain garbage values.
|
959
|
-
*
|
960
|
-
* If new_length is equal to self.length, the call does nothing to the array.
|
1900
|
+
* Returns a string representation of self.
|
961
1901
|
*
|
962
|
-
*
|
963
|
-
* RangeError. Do not try to resize arrays to zero or less. Do not be that
|
964
|
-
* person.
|
1902
|
+
* Vec2[].to_s # => "{ 0.0, 0.0 }"
|
965
1903
|
*
|
966
1904
|
* call-seq:
|
967
|
-
*
|
1905
|
+
* to_s -> string
|
968
1906
|
*/
|
969
|
-
static VALUE
|
1907
|
+
static VALUE sm_vec2_to_s(VALUE self)
|
970
1908
|
{
|
971
|
-
|
972
|
-
|
973
|
-
|
974
|
-
|
975
|
-
|
1909
|
+
const s_float_t *v;
|
1910
|
+
v = (const s_float_t *)*sm_unwrap_vec2(self, NULL);
|
1911
|
+
return rb_sprintf(
|
1912
|
+
"{ "
|
1913
|
+
"%f, %f"
|
1914
|
+
" }",
|
1915
|
+
v[0], v[1]);
|
1916
|
+
}
|
976
1917
|
|
977
|
-
if (old_length == new_length) {
|
978
|
-
/* No change, done */
|
979
|
-
return sm_self;
|
980
|
-
} else if (new_length < 1) {
|
981
|
-
/* Someone decided to be that person. */
|
982
|
-
rb_raise(rb_eRangeError,
|
983
|
-
"Cannot resize array to length less than or equal to 0.");
|
984
|
-
return sm_self;
|
985
|
-
}
|
986
1918
|
|
987
|
-
REALLOC_N(RDATA(sm_self)->data, mat4_t, new_length);
|
988
|
-
rb_ivar_set(sm_self, kRB_IVAR_MATHARRAY_LENGTH, sm_new_length);
|
989
|
-
rb_ary_clear(rb_ivar_get(sm_self, kRB_IVAR_MATHARRAY_CACHE));
|
990
1919
|
|
991
|
-
|
1920
|
+
/*
|
1921
|
+
* Returns the squared magnitude of self.
|
1922
|
+
*
|
1923
|
+
* call-seq:
|
1924
|
+
* magnitude_squared -> float
|
1925
|
+
*/
|
1926
|
+
static VALUE sm_vec2_magnitude_squared(VALUE sm_self)
|
1927
|
+
{
|
1928
|
+
return rb_float_new(vec2_length_squared(*sm_unwrap_vec2(sm_self, NULL)));
|
992
1929
|
}
|
993
1930
|
|
994
1931
|
|
995
1932
|
|
996
1933
|
/*
|
997
|
-
*
|
998
|
-
* may be a cached object. In all cases, values returned from a typed array are
|
999
|
-
* associated with the memory of the array and not given their own memory. So,
|
1000
|
-
* modifying a Mat4 fetched from an array modifies the array's data.
|
1934
|
+
* Returns the magnitude of self.
|
1001
1935
|
*
|
1002
|
-
*
|
1003
|
-
*
|
1004
|
-
|
1005
|
-
|
1006
|
-
|
1936
|
+
* call-seq:
|
1937
|
+
* magnitude -> float
|
1938
|
+
*/
|
1939
|
+
static VALUE sm_vec2_magnitude(VALUE sm_self)
|
1940
|
+
{
|
1941
|
+
return rb_float_new(vec2_length(*sm_unwrap_vec2(sm_self, NULL)));
|
1942
|
+
}
|
1943
|
+
|
1944
|
+
|
1945
|
+
|
1946
|
+
/*
|
1947
|
+
* Scales this vector's components by a scalar value and returns the result.
|
1007
1948
|
*
|
1008
|
-
* call-seq:
|
1949
|
+
* call-seq:
|
1950
|
+
* scale(scalar, output = nil) -> output or new vec2
|
1009
1951
|
*/
|
1010
|
-
static VALUE
|
1952
|
+
static VALUE sm_vec2_scale(int argc, VALUE *argv, VALUE sm_self)
|
1011
1953
|
{
|
1012
|
-
|
1013
|
-
|
1014
|
-
|
1015
|
-
|
1016
|
-
VALUE sm_cache;
|
1017
|
-
if (index >= length) {
|
1018
|
-
rb_raise(rb_eRangeError,
|
1019
|
-
"Index %zu out of bounds for array with length %zu",
|
1020
|
-
index, length);
|
1021
|
-
}
|
1954
|
+
VALUE sm_out;
|
1955
|
+
VALUE sm_scalar;
|
1956
|
+
s_float_t scalar;
|
1957
|
+
vec2_t *self = sm_unwrap_vec2(sm_self, NULL);
|
1022
1958
|
|
1023
|
-
|
1024
|
-
|
1025
|
-
rb_raise(rb_eRuntimeError, "No cache available");
|
1026
|
-
}
|
1027
|
-
sm_inner = rb_ary_entry(sm_cache, (long)index);
|
1959
|
+
rb_scan_args(argc, argv, "11", &sm_scalar, &sm_out);
|
1960
|
+
scalar = rb_num2dbl(sm_scalar);
|
1028
1961
|
|
1029
|
-
if (
|
1030
|
-
|
1031
|
-
|
1032
|
-
|
1033
|
-
|
1034
|
-
|
1035
|
-
|
1962
|
+
if (SM_IS_A(sm_out, vec2) || SM_IS_A(sm_out, vec3) || SM_IS_A(sm_out, vec4) || SM_IS_A(sm_out, quat)) {
|
1963
|
+
vec2_scale(*self, scalar, *sm_unwrap_vec2(sm_out, NULL));
|
1964
|
+
} else {
|
1965
|
+
vec2_t out;
|
1966
|
+
vec2_scale(*self, scalar, out);
|
1967
|
+
sm_out = sm_wrap_vec2(out, rb_obj_class(sm_self));
|
1968
|
+
rb_obj_call_init(sm_out, 0, 0);
|
1036
1969
|
}
|
1037
1970
|
|
1038
|
-
return
|
1971
|
+
return sm_out;
|
1039
1972
|
}
|
1040
1973
|
|
1041
1974
|
|
1042
1975
|
|
1043
1976
|
/*
|
1044
|
-
*
|
1045
|
-
* array and stored at the index, then no copy is done, otherwise the Mat4 is
|
1046
|
-
* copied to the array.
|
1047
|
-
*
|
1048
|
-
* If the value stored is a Mat3, it will be converted to a Mat4 for storage,
|
1049
|
-
* though this will not modify the value directly.
|
1977
|
+
* Divides this vector's components by a scalar value and returns the result.
|
1050
1978
|
*
|
1051
|
-
* call-seq:
|
1979
|
+
* call-seq:
|
1980
|
+
* divide(scalar, output = nil) -> output or new vec2
|
1052
1981
|
*/
|
1053
|
-
static VALUE
|
1982
|
+
static VALUE sm_vec2_divide(int argc, VALUE *argv, VALUE sm_self)
|
1054
1983
|
{
|
1055
|
-
|
1056
|
-
|
1057
|
-
|
1058
|
-
|
1059
|
-
|
1060
|
-
if (index >= length) {
|
1061
|
-
rb_raise(rb_eRangeError,
|
1062
|
-
"Index %zu out of bounds for array with length %zu",
|
1063
|
-
index, length);
|
1064
|
-
} else if (!(is_mat4 = SM_IS_A(sm_value, mat4)) && !SM_IS_A(sm_value, mat3)) {
|
1065
|
-
rb_raise(rb_eTypeError,
|
1066
|
-
"Invalid value to store: expected Mat3 or Mat4, got %s",
|
1067
|
-
rb_obj_classname(sm_value));
|
1068
|
-
}
|
1984
|
+
VALUE sm_out;
|
1985
|
+
VALUE sm_scalar;
|
1986
|
+
s_float_t scalar;
|
1987
|
+
vec2_t *self = sm_unwrap_vec2(sm_self, NULL);
|
1069
1988
|
|
1070
|
-
|
1989
|
+
rb_scan_args(argc, argv, "11", &sm_scalar, &sm_out);
|
1990
|
+
scalar = rb_num2dbl(sm_scalar);
|
1071
1991
|
|
1072
|
-
if (
|
1073
|
-
|
1074
|
-
if (value == &arr[index]) {
|
1075
|
-
/* The object's part of the array, don't bother copying */
|
1076
|
-
return sm_value;
|
1077
|
-
}
|
1078
|
-
mat4_copy(*value, arr[index]);
|
1992
|
+
if (SM_IS_A(sm_out, vec2) || SM_IS_A(sm_out, vec3) || SM_IS_A(sm_out, vec4) || SM_IS_A(sm_out, quat)) {
|
1993
|
+
vec2_divide(*self, scalar, *sm_unwrap_vec2(sm_out, NULL));
|
1079
1994
|
} else {
|
1080
|
-
|
1995
|
+
vec2_t out;
|
1996
|
+
vec2_divide(*self, scalar, out);
|
1997
|
+
sm_out = sm_wrap_vec2(out, rb_obj_class(sm_self));
|
1998
|
+
rb_obj_call_init(sm_out, 0, 0);
|
1081
1999
|
}
|
1082
|
-
|
2000
|
+
|
2001
|
+
return sm_out;
|
1083
2002
|
}
|
1084
2003
|
|
1085
2004
|
|
1086
2005
|
|
1087
2006
|
/*
|
1088
|
-
*
|
2007
|
+
* Tests whether a Vec2 is equivalent to another Vec2, a Vec3, Vec4, or a Quat.
|
2008
|
+
* When testing for equivalency against 4-component objects, only the first two
|
2009
|
+
* components are compared.
|
1089
2010
|
*
|
1090
|
-
* call-seq:
|
2011
|
+
* call-seq:
|
2012
|
+
* vec2 == other_vec2 -> bool
|
2013
|
+
* vec2 == vec3 -> bool
|
2014
|
+
* vec2 == vec4 -> bool
|
2015
|
+
* vec2 == quat -> bool
|
1091
2016
|
*/
|
1092
|
-
static VALUE
|
2017
|
+
static VALUE sm_vec2_equals(VALUE sm_self, VALUE sm_other)
|
1093
2018
|
{
|
1094
|
-
|
1095
|
-
|
1096
|
-
}
|
1097
|
-
|
2019
|
+
if (!RTEST(sm_other) || (!SM_IS_A(sm_other, vec2) && !SM_IS_A(sm_other, vec3) && !SM_IS_A(sm_other, vec4) && !SM_IS_A(sm_other, quat))) {
|
2020
|
+
return Qfalse;
|
2021
|
+
}
|
1098
2022
|
|
1099
|
-
|
2023
|
+
return vec2_equals(*sm_unwrap_vec2(sm_self, NULL), *sm_unwrap_vec2(sm_other, NULL)) ? Qtrue : Qfalse;
|
2024
|
+
}
|
1100
2025
|
|
1101
2026
|
|
1102
2027
|
|
@@ -1391,7 +2316,7 @@ SM_LABEL(skip_output): {
|
|
1391
2316
|
sm_out = sm_wrap_vec3(output, rb_obj_class(sm_self));
|
1392
2317
|
rb_obj_call_init(sm_out, 0, 0);
|
1393
2318
|
}} else {
|
1394
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
2319
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to project");
|
1395
2320
|
}
|
1396
2321
|
return sm_out;
|
1397
2322
|
}
|
@@ -1438,7 +2363,7 @@ SM_LABEL(skip_output): {
|
|
1438
2363
|
sm_out = sm_wrap_vec3(output, rb_obj_class(sm_self));
|
1439
2364
|
rb_obj_call_init(sm_out, 0, 0);
|
1440
2365
|
}} else {
|
1441
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
2366
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to reflect");
|
1442
2367
|
}
|
1443
2368
|
return sm_out;
|
1444
2369
|
}
|
@@ -1485,7 +2410,7 @@ SM_LABEL(skip_output): {
|
|
1485
2410
|
sm_out = sm_wrap_vec3(output, rb_obj_class(sm_self));
|
1486
2411
|
rb_obj_call_init(sm_out, 0, 0);
|
1487
2412
|
}} else {
|
1488
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
2413
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to cross_product");
|
1489
2414
|
}
|
1490
2415
|
return sm_out;
|
1491
2416
|
}
|
@@ -1497,7 +2422,7 @@ SM_LABEL(skip_output): {
|
|
1497
2422
|
* result.
|
1498
2423
|
*
|
1499
2424
|
* call-seq:
|
1500
|
-
*
|
2425
|
+
* multiply_vec3(vec3, output = nil) -> output or new vec3
|
1501
2426
|
*/
|
1502
2427
|
static VALUE sm_vec3_multiply(int argc, VALUE *argv, VALUE sm_self)
|
1503
2428
|
{
|
@@ -1533,7 +2458,7 @@ SM_LABEL(skip_output): {
|
|
1533
2458
|
sm_out = sm_wrap_vec3(output, rb_obj_class(sm_self));
|
1534
2459
|
rb_obj_call_init(sm_out, 0, 0);
|
1535
2460
|
}} else {
|
1536
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
2461
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to multiply_vec3");
|
1537
2462
|
}
|
1538
2463
|
return sm_out;
|
1539
2464
|
}
|
@@ -1580,7 +2505,7 @@ SM_LABEL(skip_output): {
|
|
1580
2505
|
sm_out = sm_wrap_vec3(output, rb_obj_class(sm_self));
|
1581
2506
|
rb_obj_call_init(sm_out, 0, 0);
|
1582
2507
|
}} else {
|
1583
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
2508
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to add");
|
1584
2509
|
}
|
1585
2510
|
return sm_out;
|
1586
2511
|
}
|
@@ -1628,7 +2553,7 @@ SM_LABEL(skip_output): {
|
|
1628
2553
|
sm_out = sm_wrap_vec3(output, rb_obj_class(sm_self));
|
1629
2554
|
rb_obj_call_init(sm_out, 0, 0);
|
1630
2555
|
}} else {
|
1631
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
2556
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to subtract");
|
1632
2557
|
}
|
1633
2558
|
return sm_out;
|
1634
2559
|
}
|
@@ -1688,9 +2613,10 @@ static VALUE sm_vec3_new(int argc, VALUE *argv, VALUE self)
|
|
1688
2613
|
* call-seq:
|
1689
2614
|
* set(x, y, z) -> vec3 with components [x, y, z]
|
1690
2615
|
* set([x, y, z]) -> vec3 with components [x, y, z]
|
2616
|
+
* set(vec2) -> vec3 with components [vec2.xy, 0]
|
1691
2617
|
* set(vec3) -> copy of vec3
|
1692
|
-
* set(vec4) -> vec3
|
1693
|
-
* set(quat) -> vec3
|
2618
|
+
* set(vec4) -> vec3 with components [vec4.xyz]
|
2619
|
+
* set(quat) -> vec3 with components [quat.xyz]
|
1694
2620
|
*/
|
1695
2621
|
static VALUE sm_vec3_init(int argc, VALUE *argv, VALUE sm_self)
|
1696
2622
|
{
|
@@ -1711,6 +2637,12 @@ static VALUE sm_vec3_init(int argc, VALUE *argv, VALUE sm_self)
|
|
1711
2637
|
break;
|
1712
2638
|
}
|
1713
2639
|
|
2640
|
+
if (SM_IS_A(argv[0], vec2)) {
|
2641
|
+
sm_unwrap_vec2(argv[0], *self);
|
2642
|
+
self[0][2] = s_float_lit(0.0);
|
2643
|
+
break;
|
2644
|
+
}
|
2645
|
+
|
1714
2646
|
// Optional offset into array provided
|
1715
2647
|
if (0) {
|
1716
2648
|
case 2:
|
@@ -1741,7 +2673,7 @@ static VALUE sm_vec3_init(int argc, VALUE *argv, VALUE sm_self)
|
|
1741
2673
|
}
|
1742
2674
|
|
1743
2675
|
default: {
|
1744
|
-
rb_raise(rb_eArgError, "Invalid arguments to
|
2676
|
+
rb_raise(rb_eArgError, "Invalid arguments to initialize/set");
|
1745
2677
|
break;
|
1746
2678
|
}
|
1747
2679
|
} // switch (argc)
|
@@ -2169,7 +3101,7 @@ SM_LABEL(skip_output): {
|
|
2169
3101
|
sm_out = sm_wrap_vec4(output, rb_obj_class(sm_self));
|
2170
3102
|
rb_obj_call_init(sm_out, 0, 0);
|
2171
3103
|
}} else {
|
2172
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
3104
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to project");
|
2173
3105
|
}
|
2174
3106
|
return sm_out;
|
2175
3107
|
}
|
@@ -2216,7 +3148,7 @@ SM_LABEL(skip_output): {
|
|
2216
3148
|
sm_out = sm_wrap_vec4(output, rb_obj_class(sm_self));
|
2217
3149
|
rb_obj_call_init(sm_out, 0, 0);
|
2218
3150
|
}} else {
|
2219
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
3151
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to reflect");
|
2220
3152
|
}
|
2221
3153
|
return sm_out;
|
2222
3154
|
}
|
@@ -2228,7 +3160,7 @@ SM_LABEL(skip_output): {
|
|
2228
3160
|
* result.
|
2229
3161
|
*
|
2230
3162
|
* call-seq:
|
2231
|
-
*
|
3163
|
+
* multiply_vec4(vec4, output = nil) -> output or new vec4
|
2232
3164
|
*/
|
2233
3165
|
static VALUE sm_vec4_multiply(int argc, VALUE *argv, VALUE sm_self)
|
2234
3166
|
{
|
@@ -2264,7 +3196,7 @@ SM_LABEL(skip_output): {
|
|
2264
3196
|
sm_out = sm_wrap_vec4(output, rb_obj_class(sm_self));
|
2265
3197
|
rb_obj_call_init(sm_out, 0, 0);
|
2266
3198
|
}} else {
|
2267
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
3199
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to multiply_vec4");
|
2268
3200
|
}
|
2269
3201
|
return sm_out;
|
2270
3202
|
}
|
@@ -2312,7 +3244,7 @@ SM_LABEL(skip_output): {
|
|
2312
3244
|
sm_out = sm_wrap_vec4(output, rb_obj_class(sm_self));
|
2313
3245
|
rb_obj_call_init(sm_out, 0, 0);
|
2314
3246
|
}} else {
|
2315
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
3247
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to add");
|
2316
3248
|
}
|
2317
3249
|
return sm_out;
|
2318
3250
|
}
|
@@ -2360,7 +3292,7 @@ SM_LABEL(skip_output): {
|
|
2360
3292
|
sm_out = sm_wrap_vec4(output, rb_obj_class(sm_self));
|
2361
3293
|
rb_obj_call_init(sm_out, 0, 0);
|
2362
3294
|
}} else {
|
2363
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
3295
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to subtract");
|
2364
3296
|
}
|
2365
3297
|
return sm_out;
|
2366
3298
|
}
|
@@ -2418,8 +3350,9 @@ static VALUE sm_vec4_new(int argc, VALUE *argv, VALUE self)
|
|
2418
3350
|
* call-seq:
|
2419
3351
|
* set(x, y, z, w = 1) -> new vec4 with components [x, y, z, w]
|
2420
3352
|
* set([x, y, z, w]) -> new vec4 with components [x, y, z, w]
|
3353
|
+
* set(vec3) -> vec4 with components [vec3.xyz, 1]
|
3354
|
+
* set(vec2) -> vec4 with components [vec2.xy, 0, 1]
|
2421
3355
|
* set(vec4) -> copy of vec4
|
2422
|
-
* set(vec3) -> copy of vec3 with w component of 1
|
2423
3356
|
* set(quat) -> copy of quat as vec4
|
2424
3357
|
*/
|
2425
3358
|
static VALUE sm_vec4_init(int argc, VALUE *argv, VALUE sm_self)
|
@@ -2442,6 +3375,14 @@ static VALUE sm_vec4_init(int argc, VALUE *argv, VALUE sm_self)
|
|
2442
3375
|
|
2443
3376
|
if (SM_IS_A(argv[0], vec3)) {
|
2444
3377
|
sm_unwrap_vec3(argv[0], *self);
|
3378
|
+
self[0][3] = s_float_lit(1.0);
|
3379
|
+
break;
|
3380
|
+
}
|
3381
|
+
|
3382
|
+
if (SM_IS_A(argv[0], vec2)) {
|
3383
|
+
sm_unwrap_vec2(argv[0], *self);
|
3384
|
+
self[0][2] = s_float_lit(0.0);
|
3385
|
+
self[0][3] = s_float_lit(1.0);
|
2445
3386
|
break;
|
2446
3387
|
}
|
2447
3388
|
|
@@ -2477,7 +3418,7 @@ static VALUE sm_vec4_init(int argc, VALUE *argv, VALUE sm_self)
|
|
2477
3418
|
}
|
2478
3419
|
|
2479
3420
|
default: {
|
2480
|
-
rb_raise(rb_eArgError, "Invalid arguments to
|
3421
|
+
rb_raise(rb_eArgError, "Invalid arguments to initialize/set");
|
2481
3422
|
break;
|
2482
3423
|
}
|
2483
3424
|
} // switch (argc)
|
@@ -2792,7 +3733,7 @@ SM_LABEL(skip_output): {
|
|
2792
3733
|
sm_out = sm_wrap_quat(output, rb_obj_class(sm_self));
|
2793
3734
|
rb_obj_call_init(sm_out, 0, 0);
|
2794
3735
|
}} else {
|
2795
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
3736
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to multiply_quat");
|
2796
3737
|
}
|
2797
3738
|
return sm_out;
|
2798
3739
|
}
|
@@ -2839,7 +3780,7 @@ SM_LABEL(skip_output): {
|
|
2839
3780
|
sm_out = sm_wrap_vec3(output, rb_obj_class(sm_rhs));
|
2840
3781
|
rb_obj_call_init(sm_out, 0, 0);
|
2841
3782
|
}} else {
|
2842
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
3783
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to multiply_vec3");
|
2843
3784
|
}
|
2844
3785
|
return sm_out;
|
2845
3786
|
}
|
@@ -2875,6 +3816,7 @@ static VALUE sm_quat_new(int argc, VALUE *argv, VALUE self)
|
|
2875
3816
|
* set(x, y, z, w = 1) -> new quaternion with components [x, y, z, w]
|
2876
3817
|
* set([x, y, z, w]) -> new quaternion with components [x, y, z, w]
|
2877
3818
|
* set(quat) -> copy of quat
|
3819
|
+
* set(vec2) -> new quaternion with the components [vec2.xy, 0, 1]
|
2878
3820
|
* set(vec3) -> new quaternion with the components [vec3.xyz, 1]
|
2879
3821
|
* set(vec4) -> new quaternion with the components of vec4
|
2880
3822
|
* set(mat3) -> new quaternion from mat3
|
@@ -2892,17 +3834,25 @@ static VALUE sm_quat_init(int argc, VALUE *argv, VALUE sm_self)
|
|
2892
3834
|
|
2893
3835
|
// Copy or by-array
|
2894
3836
|
case 1: {
|
2895
|
-
if (SM_IS_A(argv[0], vec3)) {
|
2896
|
-
sm_unwrap_vec3(argv[0], *self);
|
2897
|
-
break;
|
2898
|
-
}
|
2899
|
-
|
2900
3837
|
if (SM_IS_A(argv[0], quat) ||
|
2901
3838
|
SM_IS_A(argv[0], vec4)) {
|
2902
3839
|
sm_unwrap_quat(argv[0], *self);
|
2903
3840
|
break;
|
2904
3841
|
}
|
2905
3842
|
|
3843
|
+
if (SM_IS_A(argv[0], vec2)) {
|
3844
|
+
sm_unwrap_vec2(argv[0], *self);
|
3845
|
+
self[0][2] = s_float_lit(0.0);
|
3846
|
+
self[0][3] = s_float_lit(1.0);
|
3847
|
+
break;
|
3848
|
+
}
|
3849
|
+
|
3850
|
+
if (SM_IS_A(argv[0], vec3)) {
|
3851
|
+
sm_unwrap_vec3(argv[0], *self);
|
3852
|
+
self[0][3] = s_float_lit(1.0);
|
3853
|
+
break;
|
3854
|
+
}
|
3855
|
+
|
2906
3856
|
if (SM_IS_A(argv[0], mat4)) {
|
2907
3857
|
const mat4_t *mat = sm_unwrap_mat4(argv[0], NULL);
|
2908
3858
|
quat_from_mat4(*mat, *self);
|
@@ -2947,7 +3897,7 @@ static VALUE sm_quat_init(int argc, VALUE *argv, VALUE sm_self)
|
|
2947
3897
|
}
|
2948
3898
|
|
2949
3899
|
default: {
|
2950
|
-
rb_raise(rb_eArgError, "Invalid arguments to
|
3900
|
+
rb_raise(rb_eArgError, "Invalid arguments to initialize/set");
|
2951
3901
|
break;
|
2952
3902
|
}
|
2953
3903
|
} // switch (argc)
|
@@ -3368,7 +4318,7 @@ SM_LABEL(skip_output): {
|
|
3368
4318
|
sm_out = sm_wrap_mat4(output, rb_obj_class(sm_self));
|
3369
4319
|
rb_obj_call_init(sm_out, 0, 0);
|
3370
4320
|
}} else {
|
3371
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
4321
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to multiply_mat4");
|
3372
4322
|
}
|
3373
4323
|
return sm_out;
|
3374
4324
|
}
|
@@ -3415,7 +4365,7 @@ SM_LABEL(skip_output): {
|
|
3415
4365
|
sm_out = sm_wrap_vec4(output, rb_obj_class(sm_rhs));
|
3416
4366
|
rb_obj_call_init(sm_out, 0, 0);
|
3417
4367
|
}} else {
|
3418
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
4368
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to multiply_vec4");
|
3419
4369
|
}
|
3420
4370
|
return sm_out;
|
3421
4371
|
}
|
@@ -3462,7 +4412,7 @@ SM_LABEL(skip_output): {
|
|
3462
4412
|
sm_out = sm_wrap_vec3(output, rb_obj_class(sm_rhs));
|
3463
4413
|
rb_obj_call_init(sm_out, 0, 0);
|
3464
4414
|
}} else {
|
3465
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
4415
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to transform_vec3");
|
3466
4416
|
}
|
3467
4417
|
return sm_out;
|
3468
4418
|
}
|
@@ -3510,7 +4460,7 @@ SM_LABEL(skip_output): {
|
|
3510
4460
|
sm_out = sm_wrap_vec3(output, rb_obj_class(sm_rhs));
|
3511
4461
|
rb_obj_call_init(sm_out, 0, 0);
|
3512
4462
|
}} else {
|
3513
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
4463
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to rotate_vec3");
|
3514
4464
|
}
|
3515
4465
|
return sm_out;
|
3516
4466
|
}
|
@@ -3558,7 +4508,7 @@ SM_LABEL(skip_output): {
|
|
3558
4508
|
sm_out = sm_wrap_vec3(output, rb_obj_class(sm_rhs));
|
3559
4509
|
rb_obj_call_init(sm_out, 0, 0);
|
3560
4510
|
}} else {
|
3561
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
4511
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to inverse_rotate_vec3");
|
3562
4512
|
}
|
3563
4513
|
return sm_out;
|
3564
4514
|
}
|
@@ -3881,7 +4831,7 @@ static VALUE sm_mat4_init(int argc, VALUE *argv, VALUE sm_self)
|
|
3881
4831
|
if (!SM_IS_A(argv[arg_index], vec4) && !SM_IS_A(argv[arg_index], quat)) {
|
3882
4832
|
rb_raise(
|
3883
4833
|
rb_eArgError,
|
3884
|
-
"Argument %d must be a Vec4 or Quat when supplying four arguments to
|
4834
|
+
"Argument %d must be a Vec4 or Quat when supplying four arguments to initialize/set",
|
3885
4835
|
(int)(arg_index + 1));
|
3886
4836
|
}
|
3887
4837
|
|
@@ -3901,7 +4851,7 @@ static VALUE sm_mat4_init(int argc, VALUE *argv, VALUE sm_self)
|
|
3901
4851
|
}
|
3902
4852
|
|
3903
4853
|
default: {
|
3904
|
-
rb_raise(rb_eArgError, "Invalid arguments to
|
4854
|
+
rb_raise(rb_eArgError, "Invalid arguments to initialize/set");
|
3905
4855
|
break;
|
3906
4856
|
}
|
3907
4857
|
} // swtich (argc)
|
@@ -4932,7 +5882,7 @@ SM_LABEL(skip_output): {
|
|
4932
5882
|
sm_out = sm_wrap_mat3(output, rb_obj_class(sm_self));
|
4933
5883
|
rb_obj_call_init(sm_out, 0, 0);
|
4934
5884
|
}} else {
|
4935
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
5885
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to multiply_mat3");
|
4936
5886
|
}
|
4937
5887
|
return sm_out;
|
4938
5888
|
}
|
@@ -4979,7 +5929,7 @@ SM_LABEL(skip_output): {
|
|
4979
5929
|
sm_out = sm_wrap_vec3(output, rb_obj_class(sm_rhs));
|
4980
5930
|
rb_obj_call_init(sm_out, 0, 0);
|
4981
5931
|
}} else {
|
4982
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
5932
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to rotate_vec3");
|
4983
5933
|
}
|
4984
5934
|
return sm_out;
|
4985
5935
|
}
|
@@ -5027,7 +5977,7 @@ SM_LABEL(skip_output): {
|
|
5027
5977
|
sm_out = sm_wrap_vec3(output, rb_obj_class(sm_rhs));
|
5028
5978
|
rb_obj_call_init(sm_out, 0, 0);
|
5029
5979
|
}} else {
|
5030
|
-
rb_raise(rb_eArgError, "Invalid number of arguments to
|
5980
|
+
rb_raise(rb_eArgError, "Invalid number of arguments to inverse_rotate_vec3");
|
5031
5981
|
}
|
5032
5982
|
return sm_out;
|
5033
5983
|
}
|
@@ -5190,10 +6140,10 @@ static VALUE sm_mat3_init(int argc, VALUE *argv, VALUE sm_self)
|
|
5190
6140
|
size_t arg_index;
|
5191
6141
|
s_float_t *mat_elem = *self;
|
5192
6142
|
for (arg_index = 0; arg_index < 3; ++arg_index, mat_elem += 3) {
|
5193
|
-
if (!SM_IS_A(argv[arg_index], vec3)) {
|
6143
|
+
if (!SM_IS_A(argv[arg_index], vec3) && !SM_IS_A(argv[arg_index], vec4) && !SM_IS_A(argv[arg_index], quat)) {
|
5194
6144
|
rb_raise(
|
5195
6145
|
rb_eArgError,
|
5196
|
-
"Argument %d must be a Vec3 when supplying three arguments to
|
6146
|
+
"Argument %d must be a Vec3, Vec4, or Quat when supplying three arguments to initialize/set",
|
5197
6147
|
(int)(arg_index + 1));
|
5198
6148
|
}
|
5199
6149
|
|
@@ -5213,7 +6163,7 @@ static VALUE sm_mat3_init(int argc, VALUE *argv, VALUE sm_self)
|
|
5213
6163
|
}
|
5214
6164
|
|
5215
6165
|
default: {
|
5216
|
-
rb_raise(rb_eArgError, "Invalid arguments to
|
6166
|
+
rb_raise(rb_eArgError, "Invalid arguments to initialize/set");
|
5217
6167
|
break;
|
5218
6168
|
}
|
5219
6169
|
} // swtich (argc)
|
@@ -5588,6 +6538,7 @@ void Init_bindings()
|
|
5588
6538
|
*/
|
5589
6539
|
|
5590
6540
|
s_sm_snowmath_mod = rb_define_module("Snow");
|
6541
|
+
s_sm_vec2_klass = rb_define_class_under(s_sm_snowmath_mod, "Vec2", rb_cObject);
|
5591
6542
|
s_sm_vec3_klass = rb_define_class_under(s_sm_snowmath_mod, "Vec3", rb_cObject);
|
5592
6543
|
s_sm_vec4_klass = rb_define_class_under(s_sm_snowmath_mod, "Vec4", rb_cObject);
|
5593
6544
|
s_sm_quat_klass = rb_define_class_under(s_sm_snowmath_mod, "Quat", rb_cObject);
|
@@ -5595,17 +6546,44 @@ void Init_bindings()
|
|
5595
6546
|
s_sm_mat4_klass = rb_define_class_under(s_sm_snowmath_mod, "Mat4", rb_cObject);
|
5596
6547
|
|
5597
6548
|
rb_const_set(s_sm_snowmath_mod, kRB_CONST_FLOAT_SIZE, INT2FIX(sizeof(s_float_t)));
|
6549
|
+
rb_const_set(s_sm_vec2_klass, kRB_CONST_SIZE, INT2FIX(sizeof(vec2_t)));
|
5598
6550
|
rb_const_set(s_sm_vec3_klass, kRB_CONST_SIZE, INT2FIX(sizeof(vec3_t)));
|
5599
6551
|
rb_const_set(s_sm_vec4_klass, kRB_CONST_SIZE, INT2FIX(sizeof(vec4_t)));
|
5600
6552
|
rb_const_set(s_sm_quat_klass, kRB_CONST_SIZE, INT2FIX(sizeof(quat_t)));
|
5601
6553
|
rb_const_set(s_sm_mat3_klass, kRB_CONST_SIZE, INT2FIX(sizeof(mat3_t)));
|
5602
6554
|
rb_const_set(s_sm_mat4_klass, kRB_CONST_SIZE, INT2FIX(sizeof(mat4_t)));
|
6555
|
+
rb_const_set(s_sm_vec2_klass, kRB_CONST_LENGTH, INT2FIX(sizeof(vec2_t) / sizeof(s_float_t)));
|
5603
6556
|
rb_const_set(s_sm_vec3_klass, kRB_CONST_LENGTH, INT2FIX(sizeof(vec3_t) / sizeof(s_float_t)));
|
5604
6557
|
rb_const_set(s_sm_vec4_klass, kRB_CONST_LENGTH, INT2FIX(sizeof(vec4_t) / sizeof(s_float_t)));
|
5605
6558
|
rb_const_set(s_sm_quat_klass, kRB_CONST_LENGTH, INT2FIX(sizeof(quat_t) / sizeof(s_float_t)));
|
5606
6559
|
rb_const_set(s_sm_mat3_klass, kRB_CONST_LENGTH, INT2FIX(sizeof(mat3_t) / sizeof(s_float_t)));
|
5607
6560
|
rb_const_set(s_sm_mat4_klass, kRB_CONST_LENGTH, INT2FIX(sizeof(mat4_t) / sizeof(s_float_t)));
|
5608
6561
|
|
6562
|
+
rb_define_singleton_method(s_sm_vec2_klass, "new", sm_vec2_new, -1);
|
6563
|
+
rb_define_method(s_sm_vec2_klass, "initialize", sm_vec2_init, -1);
|
6564
|
+
rb_define_method(s_sm_vec2_klass, "set", sm_vec2_init, -1);
|
6565
|
+
rb_define_method(s_sm_vec2_klass, "fetch", sm_vec2_fetch, 1);
|
6566
|
+
rb_define_method(s_sm_vec2_klass, "store", sm_vec2_store, 2);
|
6567
|
+
rb_define_method(s_sm_vec2_klass, "size", sm_vec2_size, 0);
|
6568
|
+
rb_define_method(s_sm_vec2_klass, "length", sm_vec2_length, 0);
|
6569
|
+
rb_define_method(s_sm_vec2_klass, "to_s", sm_vec2_to_s, 0);
|
6570
|
+
rb_define_method(s_sm_vec2_klass, "address", sm_get_address, 0);
|
6571
|
+
rb_define_method(s_sm_vec2_klass, "copy", sm_vec2_copy, -1);
|
6572
|
+
rb_define_method(s_sm_vec2_klass, "normalize", sm_vec2_normalize, -1);
|
6573
|
+
rb_define_method(s_sm_vec2_klass, "inverse", sm_vec2_inverse, -1);
|
6574
|
+
rb_define_method(s_sm_vec2_klass, "negate", sm_vec2_negate, -1);
|
6575
|
+
rb_define_method(s_sm_vec2_klass, "multiply_vec2", sm_vec2_multiply, -1);
|
6576
|
+
rb_define_method(s_sm_vec2_klass, "add", sm_vec2_add, -1);
|
6577
|
+
rb_define_method(s_sm_vec2_klass, "subtract", sm_vec2_subtract, -1);
|
6578
|
+
rb_define_method(s_sm_vec2_klass, "reflect", sm_vec2_reflect, -1);
|
6579
|
+
rb_define_method(s_sm_vec2_klass, "project", sm_vec2_project, -1);
|
6580
|
+
rb_define_method(s_sm_vec2_klass, "dot_product", sm_vec2_dot_product, 1);
|
6581
|
+
rb_define_method(s_sm_vec2_klass, "magnitude_squared", sm_vec2_magnitude_squared, 0);
|
6582
|
+
rb_define_method(s_sm_vec2_klass, "magnitude", sm_vec2_magnitude, 0);
|
6583
|
+
rb_define_method(s_sm_vec2_klass, "scale", sm_vec2_scale, -1);
|
6584
|
+
rb_define_method(s_sm_vec2_klass, "divide", sm_vec2_divide, -1);
|
6585
|
+
rb_define_method(s_sm_vec2_klass, "==", sm_vec2_equals, 1);
|
6586
|
+
|
5609
6587
|
rb_define_singleton_method(s_sm_vec3_klass, "new", sm_vec3_new, -1);
|
5610
6588
|
rb_define_method(s_sm_vec3_klass, "initialize", sm_vec3_init, -1);
|
5611
6589
|
rb_define_method(s_sm_vec3_klass, "set", sm_vec3_init, -1);
|
@@ -5757,6 +6735,16 @@ void Init_bindings()
|
|
5757
6735
|
|
5758
6736
|
#if BUILD_ARRAY_TYPE
|
5759
6737
|
|
6738
|
+
s_sm_vec2_array_klass = rb_define_class_under(s_sm_snowmath_mod, "Vec2Array", rb_cObject);
|
6739
|
+
rb_const_set(s_sm_vec2_array_klass, kRB_CONST_TYPE, s_sm_vec2_klass);
|
6740
|
+
rb_define_singleton_method(s_sm_vec2_array_klass, "new", sm_vec2_array_new, 1);
|
6741
|
+
rb_define_method(s_sm_vec2_array_klass, "fetch", sm_vec2_array_fetch, 1);
|
6742
|
+
rb_define_method(s_sm_vec2_array_klass, "store", sm_vec2_array_store, 2);
|
6743
|
+
rb_define_method(s_sm_vec2_array_klass, "resize!", sm_vec2_array_resize, 1);
|
6744
|
+
rb_define_method(s_sm_vec2_array_klass, "size", sm_vec2_array_size, 0);
|
6745
|
+
rb_define_method(s_sm_vec2_array_klass, "length", sm_mathtype_array_length, 0);
|
6746
|
+
rb_define_method(s_sm_vec2_array_klass, "address", sm_get_address, 0);
|
6747
|
+
|
5760
6748
|
s_sm_vec3_array_klass = rb_define_class_under(s_sm_snowmath_mod, "Vec3Array", rb_cObject);
|
5761
6749
|
rb_const_set(s_sm_vec3_array_klass, kRB_CONST_TYPE, s_sm_vec3_klass);
|
5762
6750
|
rb_define_singleton_method(s_sm_vec3_array_klass, "new", sm_vec3_array_new, 1);
|