ray 0.0.0.pre1 → 0.0.0.pre2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (110) hide show
  1. data/.gitignore +15 -0
  2. data/.gitmodules +3 -0
  3. data/Rakefile +148 -5
  4. data/VERSION +1 -1
  5. data/ext/event.c +535 -0
  6. data/ext/extconf.rb +7 -1
  7. data/ext/image.c +110 -18
  8. data/ext/joystick.c +145 -0
  9. data/ext/ray.c +288 -35
  10. data/ext/ray.h +54 -2
  11. data/lib/ray/config.rb +84 -0
  12. data/lib/ray/dsl.rb +19 -0
  13. data/lib/ray/dsl/converter.rb +65 -0
  14. data/lib/ray/dsl/event.rb +52 -0
  15. data/lib/ray/dsl/event_raiser.rb +21 -0
  16. data/lib/ray/dsl/event_runner.rb +39 -0
  17. data/lib/ray/dsl/event_translator.rb +38 -0
  18. data/lib/ray/dsl/handler.rb +71 -0
  19. data/lib/ray/dsl/listener.rb +30 -0
  20. data/lib/ray/dsl/matcher.rb +60 -0
  21. data/lib/ray/dsl/type.rb +58 -0
  22. data/lib/ray/game.rb +107 -0
  23. data/lib/ray/helper.rb +17 -0
  24. data/lib/ray/image.rb +11 -0
  25. data/lib/ray/ray.rb +8 -0
  26. data/lib/ray/scene.rb +102 -0
  27. data/psp/SDL_psp_main.c +84 -0
  28. data/psp/bigdecimal/README +60 -0
  29. data/psp/bigdecimal/bigdecimal.c +4697 -0
  30. data/psp/bigdecimal/bigdecimal.h +216 -0
  31. data/psp/bigdecimal/lib/bigdecimal/jacobian.rb +85 -0
  32. data/psp/bigdecimal/lib/bigdecimal/ludcmp.rb +84 -0
  33. data/psp/bigdecimal/lib/bigdecimal/math.rb +235 -0
  34. data/psp/bigdecimal/lib/bigdecimal/newton.rb +77 -0
  35. data/psp/bigdecimal/lib/bigdecimal/util.rb +65 -0
  36. data/psp/digest/bubblebabble/bubblebabble.c +142 -0
  37. data/psp/digest/defs.h +20 -0
  38. data/psp/digest/digest.c +643 -0
  39. data/psp/digest/digest.h +32 -0
  40. data/psp/digest/lib/digest.rb +50 -0
  41. data/psp/digest/lib/md5.rb +27 -0
  42. data/psp/digest/lib/sha1.rb +27 -0
  43. data/psp/digest/md5/md5.c +420 -0
  44. data/psp/digest/md5/md5.h +80 -0
  45. data/psp/digest/md5/md5init.c +40 -0
  46. data/psp/digest/rmd160/rmd160.c +457 -0
  47. data/psp/digest/rmd160/rmd160.h +56 -0
  48. data/psp/digest/rmd160/rmd160init.c +40 -0
  49. data/psp/digest/sha1/sha1.c +269 -0
  50. data/psp/digest/sha1/sha1.h +39 -0
  51. data/psp/digest/sha1/sha1init.c +40 -0
  52. data/psp/digest/sha2/lib/sha2.rb +73 -0
  53. data/psp/digest/sha2/sha2.c +919 -0
  54. data/psp/digest/sha2/sha2.h +109 -0
  55. data/psp/digest/sha2/sha2init.c +52 -0
  56. data/psp/enumerator/enumerator.c +298 -0
  57. data/psp/etc/etc.c +559 -0
  58. data/psp/ext.c +285 -0
  59. data/psp/fcntl/fcntl.c +187 -0
  60. data/psp/lib/rbconfig.rb +178 -0
  61. data/psp/nkf/lib/kconv.rb +367 -0
  62. data/psp/nkf/nkf-utf8/config.h +88 -0
  63. data/psp/nkf/nkf-utf8/nkf.c +6040 -0
  64. data/psp/nkf/nkf-utf8/utf8tbl.c +8500 -0
  65. data/psp/nkf/nkf-utf8/utf8tbl.h +34 -0
  66. data/psp/nkf/nkf.c +654 -0
  67. data/psp/socket/addrinfo.h +173 -0
  68. data/psp/socket/getaddrinfo.c +676 -0
  69. data/psp/socket/getnameinfo.c +270 -0
  70. data/psp/socket/pspsocket.c +71 -0
  71. data/psp/socket/pspsocket.h +28 -0
  72. data/psp/socket/socket.c +4662 -0
  73. data/psp/socket/sockport.h +76 -0
  74. data/psp/stringio/stringio.c +1306 -0
  75. data/psp/strscan/strscan.c +1320 -0
  76. data/psp/syck/bytecode.c +1166 -0
  77. data/psp/syck/emitter.c +1242 -0
  78. data/psp/syck/gram.c +1894 -0
  79. data/psp/syck/gram.h +79 -0
  80. data/psp/syck/handler.c +174 -0
  81. data/psp/syck/implicit.c +2990 -0
  82. data/psp/syck/node.c +408 -0
  83. data/psp/syck/rubyext.c +2367 -0
  84. data/psp/syck/syck.c +504 -0
  85. data/psp/syck/syck.h +456 -0
  86. data/psp/syck/token.c +2725 -0
  87. data/psp/syck/yaml2byte.c +257 -0
  88. data/psp/syck/yamlbyte.h +170 -0
  89. data/psp/thread/thread.c +1175 -0
  90. data/psp/zlib/zlib.c +3547 -0
  91. data/script.rb +10 -0
  92. data/spec/ray/config_spec.rb +90 -0
  93. data/spec/ray/conversion_spec.rb +43 -0
  94. data/spec/ray/event_spec.rb +191 -0
  95. data/spec/ray/image_spec.rb +43 -1
  96. data/spec/ray/joystick_spec.rb +17 -0
  97. data/spec/ray/matcher_spec.rb +73 -0
  98. data/spec/ray/ray_spec.rb +72 -1
  99. data/spec/ray/type_spec.rb +17 -0
  100. data/spec/res/aqua.bmp +0 -0
  101. data/spec/res/aqua.png +0 -0
  102. data/spec/res/not_a_jpeg.jpeg +0 -0
  103. data/spec_runner.rb +4 -0
  104. metadata +101 -9
  105. data/ext/Makefile +0 -189
  106. data/ext/ray +0 -0
  107. data/ext/ray.bundle +0 -0
  108. data/ext/ray_ext.bundle +0 -0
  109. data/ext/ray_ext.so +0 -0
  110. data/ext/test.rb +0 -21
@@ -0,0 +1,257 @@
1
+ /*
2
+ * yaml2byte.c
3
+ *
4
+ * $Author: shyouhei $
5
+ * $Date: 2007-02-13 08:01:19 +0900 (Tue, 13 Feb 2007) $
6
+ *
7
+ * Copyright (C) 2003 why the lucky stiff, clark evans
8
+ *
9
+ * WARNING WARNING WARNING --- THIS IS *NOT JUST* PLAYING
10
+ * ANYMORE! -- WHY HAS EMBRACED THIS AS THE REAL THING!
11
+ */
12
+ #include <ruby/ruby.h>
13
+ #include "syck.h"
14
+ #include <assert.h>
15
+ #define YAMLBYTE_UTF8
16
+ #include "yamlbyte.h"
17
+
18
+ #include <stdio.h>
19
+ #define TRACE0(a) \
20
+ do { printf(a); printf("\n"); fflush(stdout); } while(0)
21
+ #define TRACE1(a,b) \
22
+ do { printf(a,b); printf("\n"); fflush(stdout); } while(0)
23
+ #define TRACE2(a,b,c) \
24
+ do { printf(a,b,c); printf("\n"); fflush(stdout); } while(0)
25
+ #define TRACE3(a,b,c,d) \
26
+ do { printf(a,b,c,d); printf("\n"); fflush(stdout); } while(0)
27
+
28
+ /* Reinvent the wheel... */
29
+ #define CHUNKSIZE 64
30
+ #define HASH ((long)0xCAFECAFE)
31
+ typedef struct {
32
+ long hash;
33
+ char *buffer;
34
+ long length;
35
+ long remaining;
36
+ int printed;
37
+ } bytestring_t;
38
+ bytestring_t *bytestring_alloc() {
39
+ bytestring_t *ret;
40
+ /*TRACE0("bytestring_alloc()");*/
41
+ ret = S_ALLOC(bytestring_t);
42
+ ret->hash = HASH;
43
+ ret->length = CHUNKSIZE;
44
+ ret->remaining = ret->length;
45
+ ret->buffer = S_ALLOC_N(char, ret->length + 1 );
46
+ ret->buffer[0] = 0;
47
+ ret->printed = 0;
48
+ return ret;
49
+ }
50
+ void bytestring_append(bytestring_t *str, char code,
51
+ char *start, char *finish)
52
+ {
53
+ long grow;
54
+ long length = 2; /* CODE + LF */
55
+ char *curr;
56
+ assert(str && HASH == str->hash);
57
+ /*TRACE0("bytestring_append()");*/
58
+ if(start) {
59
+ if(!finish)
60
+ finish = start + strlen(start);
61
+ length += (finish-start);
62
+ }
63
+ if(length > str->remaining) {
64
+ grow = (length - str->remaining) + CHUNKSIZE;
65
+ str->remaining += grow;
66
+ str->length += grow;
67
+ str->buffer = S_REALLOC_N( str->buffer, char, str->length + 1 );
68
+ assert(str->buffer);
69
+ }
70
+ curr = str->buffer + (str->length - str->remaining);
71
+ *curr = code;
72
+ curr += 1;
73
+ if(start)
74
+ while(start < finish)
75
+ *curr ++ = *start ++;
76
+ *curr = '\n';
77
+ curr += 1;
78
+ *curr = 0;
79
+ str->remaining = str->remaining - length;
80
+ assert( (str->buffer + str->length) - str->remaining );
81
+ }
82
+ void bytestring_extend(bytestring_t *str, bytestring_t *ext)
83
+ {
84
+ char *from;
85
+ char *curr;
86
+ char *stop;
87
+ long grow;
88
+ long length;
89
+ assert(str && HASH == str->hash);
90
+ assert(ext && HASH == ext->hash);
91
+ if(ext->printed) {
92
+ assert(ext->buffer[0] ==YAMLBYTE_ANCHOR);
93
+ curr = ext->buffer;
94
+ while( '\n' != *curr)
95
+ curr++;
96
+ bytestring_append(str, YAMLBYTE_ALIAS, ext->buffer + 1, curr);
97
+ } else {
98
+ ext->printed = 1;
99
+ length = (ext->length - ext->remaining);
100
+ if(length > str->remaining) {
101
+ grow = (length - str->remaining) + CHUNKSIZE;
102
+ str->remaining += grow;
103
+ str->length += grow;
104
+ str->buffer = S_REALLOC_N( str->buffer, char, str->length + 1 );
105
+ }
106
+ curr = str->buffer + (str->length - str->remaining);
107
+ from = ext->buffer;
108
+ stop = ext->buffer + length;
109
+ while( from < stop )
110
+ *curr ++ = *from ++;
111
+ *curr = 0;
112
+ str->remaining = str->remaining - length;
113
+ assert( (str->buffer + str->length) - str->remaining );
114
+ }
115
+ }
116
+
117
+ /* convert SyckNode into yamlbyte_buffer_t objects */
118
+ SYMID
119
+ syck_yaml2byte_handler(p, n)
120
+ SyckParser *p;
121
+ SyckNode *n;
122
+ {
123
+ SYMID oid;
124
+ long i;
125
+ char ch;
126
+ char nextcode;
127
+ char *start;
128
+ char *current;
129
+ char *finish;
130
+ bytestring_t *val = NULL;
131
+ bytestring_t *sav = NULL;
132
+ /*TRACE0("syck_yaml2byte_handler()");*/
133
+ val = bytestring_alloc();
134
+ if(n->anchor) bytestring_append(val,YAMLBYTE_ANCHOR, n->anchor, NULL);
135
+ if ( n->type_id )
136
+ {
137
+ if ( p->taguri_expansion )
138
+ {
139
+ bytestring_append(val,YAMLBYTE_TRANSFER, n->type_id, NULL);
140
+ }
141
+ else
142
+ {
143
+ char *type_tag = S_ALLOC_N( char, strlen( n->type_id ) + 1 );
144
+ type_tag[0] = '\0';
145
+ strcat( type_tag, "!" );
146
+ strcat( type_tag, n->type_id );
147
+ bytestring_append( val, YAMLBYTE_TRANSFER, type_tag, NULL);
148
+ S_FREE(type_tag);
149
+ }
150
+ }
151
+ switch (n->kind)
152
+ {
153
+ case syck_str_kind:
154
+ nextcode = YAMLBYTE_SCALAR;
155
+ start = n->data.str->ptr;
156
+ finish = start + n->data.str->len - 1;
157
+ current = start;
158
+ /*TRACE2("SCALAR: %s %d", start, n->data.str->len); */
159
+ while(1) {
160
+ ch = *current;
161
+ if('\n' == ch || 0 == ch || current > finish) {
162
+ if(current >= start) {
163
+ bytestring_append(val, nextcode, start, current);
164
+ nextcode = YAMLBYTE_CONTINUE;
165
+ }
166
+ start = current + 1;
167
+ if(current > finish)
168
+ {
169
+ break;
170
+ }
171
+ else if('\n' == ch )
172
+ {
173
+ bytestring_append(val,YAMLBYTE_NEWLINE,NULL,NULL);
174
+ }
175
+ else if(0 == ch)
176
+ {
177
+ bytestring_append(val,YAMLBYTE_NULLCHAR,NULL,NULL);
178
+ }
179
+ else
180
+ {
181
+ assert("oops");
182
+ }
183
+ }
184
+ current += 1;
185
+ }
186
+ break;
187
+ case syck_seq_kind:
188
+ bytestring_append(val,YAMLBYTE_SEQUENCE,NULL,NULL);
189
+ for ( i = 0; i < n->data.list->idx; i++ )
190
+ {
191
+ oid = syck_seq_read( n, i );
192
+ syck_lookup_sym( p, oid, (char **)&sav );
193
+ bytestring_extend(val, sav);
194
+ }
195
+ bytestring_append(val,YAMLBYTE_END_BRANCH,NULL,NULL);
196
+ break;
197
+ case syck_map_kind:
198
+ bytestring_append(val,YAMLBYTE_MAPPING,NULL,NULL);
199
+ for ( i = 0; i < n->data.pairs->idx; i++ )
200
+ {
201
+ oid = syck_map_read( n, map_key, i );
202
+ syck_lookup_sym( p, oid, (char **)&sav );
203
+ bytestring_extend(val, sav);
204
+ oid = syck_map_read( n, map_value, i );
205
+ syck_lookup_sym( p, oid, (char **)&sav );
206
+ bytestring_extend(val, sav);
207
+ }
208
+ bytestring_append(val,YAMLBYTE_END_BRANCH,NULL,NULL);
209
+ break;
210
+ }
211
+ oid = syck_add_sym( p, (char *) val );
212
+ /*TRACE1("Saving: %s", val->buffer );*/
213
+ return oid;
214
+ }
215
+
216
+ char *
217
+ syck_yaml2byte(char *yamlstr)
218
+ {
219
+ SYMID oid;
220
+ char *ret;
221
+ bytestring_t *sav;
222
+
223
+ SyckParser *parser = syck_new_parser();
224
+ syck_parser_str_auto( parser, yamlstr, NULL );
225
+ syck_parser_handler( parser, syck_yaml2byte_handler );
226
+ syck_parser_error_handler( parser, NULL );
227
+ syck_parser_implicit_typing( parser, 1 );
228
+ syck_parser_taguri_expansion( parser, 1 );
229
+ oid = syck_parse( parser );
230
+
231
+ if ( syck_lookup_sym( parser, oid, (char **)&sav ) == 1 ) {
232
+ ret = S_ALLOC_N( char, strlen( sav->buffer ) + 3 );
233
+ ret[0] = '\0';
234
+ strcat( ret, "D\n" );
235
+ strcat( ret, sav->buffer );
236
+ }
237
+ else
238
+ {
239
+ ret = NULL;
240
+ }
241
+
242
+ syck_free_parser( parser );
243
+ return ret;
244
+ }
245
+
246
+ #ifdef TEST_YBEXT
247
+ #include <stdio.h>
248
+ int main() {
249
+ char *yaml = "test: 1\nand: \"with new\\nline\\n\"\nalso: &3 three\nmore: *3";
250
+ printf("--- # YAML \n");
251
+ printf(yaml);
252
+ printf("\n...\n");
253
+ printf(syck_yaml2byte(yaml));
254
+ return 0;
255
+ }
256
+ #endif
257
+
@@ -0,0 +1,170 @@
1
+ /* yamlbyte.h
2
+ *
3
+ * The YAML bytecode "C" interface header file. See the YAML bytecode
4
+ * reference for bytecode sequence rules and for the meaning of each
5
+ * bytecode.
6
+ */
7
+
8
+ #ifndef YAMLBYTE_H
9
+ #define YAMLBYTE_H
10
+ #include <stddef.h>
11
+
12
+ /* define what a character is */
13
+ typedef unsigned char yamlbyte_utf8_t;
14
+ typedef unsigned short yamlbyte_utf16_t;
15
+ #ifdef YAMLBYTE_UTF8
16
+ #ifdef YAMLBYTE_UTF16
17
+ #error Must only define YAMLBYTE_UTF8 or YAMLBYTE_UTF16
18
+ #endif
19
+ typedef yamlbyte_utf8_t yamlbyte_char_t;
20
+ #else
21
+ #ifdef YAMLBYTE_UTF16
22
+ typedef yamlbyte_utf16_t yamlbyte_char_t;
23
+ #else
24
+ #error Must define YAMLBYTE_UTF8 or YAMLBYTE_UTF16
25
+ #endif
26
+ #endif
27
+
28
+ /* specify list of bytecodes */
29
+ #define YAMLBYTE_FINISH ((yamlbyte_char_t) 0)
30
+ #define YAMLBYTE_DOCUMENT ((yamlbyte_char_t)'D')
31
+ #define YAMLBYTE_DIRECTIVE ((yamlbyte_char_t)'V')
32
+ #define YAMLBYTE_PAUSE ((yamlbyte_char_t)'P')
33
+ #define YAMLBYTE_MAPPING ((yamlbyte_char_t)'M')
34
+ #define YAMLBYTE_SEQUENCE ((yamlbyte_char_t)'Q')
35
+ #define YAMLBYTE_END_BRANCH ((yamlbyte_char_t)'E')
36
+ #define YAMLBYTE_SCALAR ((yamlbyte_char_t)'S')
37
+ #define YAMLBYTE_CONTINUE ((yamlbyte_char_t)'C')
38
+ #define YAMLBYTE_NEWLINE ((yamlbyte_char_t)'N')
39
+ #define YAMLBYTE_NULLCHAR ((yamlbyte_char_t)'Z')
40
+ #define YAMLBYTE_ANCHOR ((yamlbyte_char_t)'A')
41
+ #define YAMLBYTE_ALIAS ((yamlbyte_char_t)'R')
42
+ #define YAMLBYTE_TRANSFER ((yamlbyte_char_t)'T')
43
+ /* formatting bytecodes */
44
+ #define YAMLBYTE_COMMENT ((yamlbyte_char_t)'c')
45
+ #define YAMLBYTE_INDENT ((yamlbyte_char_t)'i')
46
+ #define YAMLBYTE_STYLE ((yamlbyte_char_t)'s')
47
+ /* other bytecodes */
48
+ #define YAMLBYTE_LINE_NUMBER ((yamlbyte_char_t)'#')
49
+ #define YAMLBYTE_WHOLE_SCALAR ((yamlbyte_char_t)'<')
50
+ #define YAMLBYTE_NOTICE ((yamlbyte_char_t)'!')
51
+ #define YAMLBYTE_SPAN ((yamlbyte_char_t)')')
52
+ #define YAMLBYTE_ALLOC ((yamlbyte_char_t)'@')
53
+
54
+ /* second level style bytecodes, ie "s>" */
55
+ #define YAMLBYTE_FLOW ((yamlbyte_char_t)'>')
56
+ #define YAMLBYTE_LITERAL ((yamlbyte_char_t)'|')
57
+ #define YAMLBYTE_BLOCK ((yamlbyte_char_t)'b')
58
+ #define YAMLBYTE_PLAIN ((yamlbyte_char_t)'p')
59
+ #define YAMLBYTE_INLINE_MAPPING ((yamlbyte_char_t)'{')
60
+ #define YAMLBYTE_INLINE_SEQUENCE ((yamlbyte_char_t)'[')
61
+ #define YAMLBYTE_SINGLE_QUOTED ((yamlbyte_char_t)39)
62
+ #define YAMLBYTE_DOUBLE_QUOTED ((yamlbyte_char_t)'"')
63
+
64
+ /*
65
+ * The "C" API has two variants, one based on instructions,
66
+ * with events delivered via pointers; and the other one
67
+ * is character based where one or more instructions are
68
+ * serialized into a buffer.
69
+ *
70
+ * Note: In the instruction based API, WHOLE_SCALAR does
71
+ * not have the '<here' marshalling stuff.
72
+ */
73
+
74
+ typedef void * yamlbyte_consumer_t;
75
+ typedef void * yamlbyte_producer_t;
76
+
77
+ /* push and pull APIs need a way to communicate results */
78
+ typedef enum {
79
+ YAMLBYTE_OK = 0, /* proceed */
80
+ YAMLBYTE_E_MEMORY = 'M', /* could not allocate memory */
81
+ YAMLBYTE_E_READ = 'R', /* input stream read error */
82
+ YAMLBYTE_E_WRITE = 'W', /* output stream write error */
83
+ YAMLBYTE_E_OTHER = '?', /* some other error condition */
84
+ YAMLBYTE_E_PARSE = 'P', /* parse error, check bytecodes */
85
+ } yamlbyte_result_t;
86
+
87
+ typedef const yamlbyte_char_t *yamlbyte_buff_t;
88
+
89
+ /*
90
+ * The "Instruction" API
91
+ */
92
+
93
+ typedef struct yaml_instruction {
94
+ yamlbyte_char_t bytecode;
95
+ yamlbyte_buff_t start;
96
+ yamlbyte_buff_t finish; /* open range, *finish is _not_ part */
97
+ } *yamlbyte_inst_t;
98
+
99
+ /* producer pushes the instruction with one bytecode event to the
100
+ * consumer; if the consumer's result is not YAMLBYTE_OK, then
101
+ * the producer should stop */
102
+ typedef
103
+ yamlbyte_result_t
104
+ (*yamlbyte_push_t)(
105
+ yamlbyte_consumer_t self,
106
+ yamlbyte_inst_t inst
107
+ );
108
+
109
+ /* consumer pulls a bytecode instruction from the producer; in this
110
+ * case the instruction (and is buffer) are owned by the producer and
111
+ * will remain valid till the pull function is called once again;
112
+ * if the instruction is NULL, then there are no more results; and
113
+ * it is important to call the pull function till it returns NULL so
114
+ * that the producer can clean up its memory allocations */
115
+ typedef
116
+ yamlbyte_result_t
117
+ (*yamlbyte_pull_t)(
118
+ yamlbyte_producer_t self,
119
+ yamlbyte_inst_t *inst /* to be filled in by the producer */
120
+ );
121
+
122
+ /*
123
+ * Buffer based API
124
+ */
125
+
126
+ /* producer pushes a null terminated buffer filled with one or more
127
+ * bytecode events to the consumer; if the consumer's result is not
128
+ * YAMLBYTE_OK, then the producer should stop */
129
+ typedef
130
+ yamlbyte_result_t
131
+ (*yamlbyte_pushbuff_t)(
132
+ yamlbyte_consumer_t self,
133
+ yamlbyte_buff_t buff
134
+ );
135
+
136
+ /* consumer pulls bytecode events from the producer; in this case
137
+ * the buffer is owned by the producer, and will remain valid till
138
+ * the pull function is called once again; if the buffer pointer
139
+ * is set to NULL, then there are no more results; it is important
140
+ * to call the pull function till it returns NULL so that the
141
+ * producer can clean up its memory allocations */
142
+ typedef
143
+ yamlbyte_result_t
144
+ (*yamlbyte_pullbuff_t)(
145
+ yamlbyte_producer_t self,
146
+ yamlbyte_buff_t *buff /* to be filled in by the producer */
147
+ );
148
+
149
+ /* convert a pull interface to a push interface; the reverse process
150
+ * requires threads and thus is language dependent */
151
+ #define YAMLBYTE_PULL2PUSH(pull,producer,push,consumer,result) \
152
+ do { \
153
+ yamlbyte_pullbuff_t _pull = (pull); \
154
+ yamlbyte_pushbuff_t _push = (push); \
155
+ yamlbyte_result_t _result = YAMLBYTE_OK; \
156
+ yamlbyte_producer_t _producer = (producer); \
157
+ yamlbyte_consumer_t _consumer = (consumer); \
158
+ while(1) { \
159
+ yamlbyte_buff_t buff = NULL; \
160
+ _result = _pull(_producer,&buff); \
161
+ if(YAMLBYTE_OK != result || NULL == buff) \
162
+ break; \
163
+ _result = _push(_consumer,buff); \
164
+ if(YAMLBYTE_OK != result) \
165
+ break; \
166
+ } \
167
+ (result) = _result; \
168
+ } while(0)
169
+
170
+ #endif
@@ -0,0 +1,1175 @@
1
+ /*
2
+ * Optimized Ruby Mutex implementation, loosely based on thread.rb by
3
+ * Yukihiro Matsumoto <matz@ruby-lang.org>
4
+ *
5
+ * Copyright 2006-2007 MenTaLguY <mental@rydia.net>
6
+ *
7
+ * RDoc taken from original.
8
+ *
9
+ * This file is made available under the same terms as Ruby.
10
+ */
11
+
12
+ #include <ruby/ruby.h>
13
+ #include <ruby/intern.h>
14
+ #include <ruby/rubysig.h>
15
+
16
+ static VALUE rb_cMutex;
17
+ static VALUE rb_cConditionVariable;
18
+ static VALUE rb_cQueue;
19
+ static VALUE rb_cSizedQueue;
20
+
21
+ static VALUE set_critical(VALUE value);
22
+
23
+ static VALUE
24
+ thread_exclusive_do(void)
25
+ {
26
+ rb_thread_critical = 1;
27
+
28
+ return rb_yield(Qundef);
29
+ }
30
+
31
+ /*
32
+ * call-seq:
33
+ * Thread.exclusive { block } => obj
34
+ *
35
+ * Wraps a block in Thread.critical, restoring the original value
36
+ * upon exit from the critical section, and returns the value of the
37
+ * block.
38
+ */
39
+
40
+ static VALUE
41
+ rb_thread_exclusive(void)
42
+ {
43
+ return rb_ensure(thread_exclusive_do, Qundef, set_critical, rb_thread_critical);
44
+ }
45
+
46
+ typedef struct _Entry {
47
+ VALUE value;
48
+ struct _Entry *next;
49
+ } Entry;
50
+
51
+ typedef struct _List {
52
+ Entry *entries;
53
+ Entry *last_entry;
54
+ Entry *entry_pool;
55
+ unsigned long size;
56
+ } List;
57
+
58
+ static void
59
+ init_list(List *list)
60
+ {
61
+ list->entries = NULL;
62
+ list->last_entry = NULL;
63
+ list->entry_pool = NULL;
64
+ list->size = 0;
65
+ }
66
+
67
+ static void
68
+ mark_list(List *list)
69
+ {
70
+ Entry *entry;
71
+ for (entry = list->entries; entry; entry = entry->next) {
72
+ rb_gc_mark(entry->value);
73
+ }
74
+ }
75
+
76
+ static void
77
+ free_entries(Entry *first)
78
+ {
79
+ Entry *next;
80
+ while (first) {
81
+ next = first->next;
82
+ xfree(first);
83
+ first = next;
84
+ }
85
+ }
86
+
87
+ static void
88
+ finalize_list(List *list)
89
+ {
90
+ free_entries(list->entries);
91
+ free_entries(list->entry_pool);
92
+ }
93
+
94
+ static void
95
+ push_list(List *list, VALUE value)
96
+ {
97
+ Entry *entry;
98
+
99
+ if (list->entry_pool) {
100
+ entry = list->entry_pool;
101
+ list->entry_pool = entry->next;
102
+ } else {
103
+ entry = ALLOC(Entry);
104
+ }
105
+
106
+ entry->value = value;
107
+ entry->next = NULL;
108
+
109
+ if (list->last_entry) {
110
+ list->last_entry->next = entry;
111
+ } else {
112
+ list->entries = entry;
113
+ }
114
+ list->last_entry = entry;
115
+
116
+ ++list->size;
117
+ }
118
+
119
+ static void
120
+ push_multiple_list(List *list, VALUE *values, unsigned count)
121
+ {
122
+ unsigned i;
123
+ for (i = 0; i < count; i++) {
124
+ push_list(list, values[i]);
125
+ }
126
+ }
127
+
128
+ static void
129
+ recycle_entries(List *list, Entry *first_entry, Entry *last_entry)
130
+ {
131
+ #ifdef USE_MEM_POOLS
132
+ last_entry->next = list->entry_pool;
133
+ list->entry_pool = first_entry;
134
+ #else
135
+ last_entry->next = NULL;
136
+ free_entries(first_entry);
137
+ #endif
138
+ }
139
+
140
+ static VALUE
141
+ shift_list(List *list)
142
+ {
143
+ Entry *entry;
144
+ VALUE value;
145
+
146
+ entry = list->entries;
147
+ if (!entry) return Qundef;
148
+
149
+ list->entries = entry->next;
150
+ if (entry == list->last_entry) {
151
+ list->last_entry = NULL;
152
+ }
153
+
154
+ --list->size;
155
+
156
+ value = entry->value;
157
+ recycle_entries(list, entry, entry);
158
+
159
+ return value;
160
+ }
161
+
162
+ static void
163
+ remove_one(List *list, VALUE value)
164
+ {
165
+ Entry **ref;
166
+ Entry *entry;
167
+
168
+ for (ref = &list->entries, entry = list->entries;
169
+ entry != NULL;
170
+ ref = &entry->next, entry = entry->next) {
171
+ if (entry->value == value) {
172
+ *ref = entry->next;
173
+ recycle_entries(list, entry, entry);
174
+ break;
175
+ }
176
+ }
177
+ }
178
+
179
+ static void
180
+ clear_list(List *list)
181
+ {
182
+ if (list->last_entry) {
183
+ recycle_entries(list, list->entries, list->last_entry);
184
+ list->entries = NULL;
185
+ list->last_entry = NULL;
186
+ list->size = 0;
187
+ }
188
+ }
189
+
190
+ static VALUE
191
+ array_from_list(List const *list)
192
+ {
193
+ VALUE ary;
194
+ Entry *entry;
195
+ ary = rb_ary_new();
196
+ for (entry = list->entries; entry; entry = entry->next) {
197
+ rb_ary_push(ary, entry->value);
198
+ }
199
+ return ary;
200
+ }
201
+
202
+ static VALUE
203
+ wake_thread(VALUE thread)
204
+ {
205
+ return rb_rescue2(rb_thread_wakeup, thread,
206
+ NULL, Qundef, rb_eThreadError, 0);
207
+ }
208
+
209
+ static VALUE
210
+ run_thread(VALUE thread)
211
+ {
212
+ return rb_rescue2(rb_thread_run, thread,
213
+ NULL, Qundef, rb_eThreadError, 0);
214
+ }
215
+
216
+ static VALUE
217
+ wake_one(List *list)
218
+ {
219
+ VALUE waking;
220
+
221
+ waking = Qnil;
222
+ while (list->entries && !RTEST(waking)) {
223
+ waking = wake_thread(shift_list(list));
224
+ }
225
+
226
+ return waking;
227
+ }
228
+
229
+ static VALUE
230
+ wake_all(List *list)
231
+ {
232
+ while (list->entries) {
233
+ wake_one(list);
234
+ }
235
+ return Qnil;
236
+ }
237
+
238
+ static VALUE
239
+ wait_list_inner(List *list)
240
+ {
241
+ push_list(list, rb_thread_current());
242
+ rb_thread_stop();
243
+ return Qnil;
244
+ }
245
+
246
+ static VALUE
247
+ wait_list_cleanup(List *list)
248
+ {
249
+ /* cleanup in case of spurious wakeups */
250
+ remove_one(list, rb_thread_current());
251
+ return Qnil;
252
+ }
253
+
254
+ static void
255
+ wait_list(List *list)
256
+ {
257
+ rb_ensure(wait_list_inner, (VALUE)list, wait_list_cleanup, (VALUE)list);
258
+ }
259
+
260
+ static void
261
+ assert_no_survivors(List *waiting, const char *label, void *addr)
262
+ {
263
+ Entry *entry;
264
+ for (entry = waiting->entries; entry; entry = entry->next) {
265
+ if (RTEST(wake_thread(entry->value))) {
266
+ rb_bug("%s %p freed with live thread(s) waiting", label, addr);
267
+ }
268
+ }
269
+ }
270
+
271
+ /*
272
+ * Document-class: Mutex
273
+ *
274
+ * Mutex implements a simple semaphore that can be used to coordinate access to
275
+ * shared data from multiple concurrent threads.
276
+ *
277
+ * Example:
278
+ *
279
+ * require 'thread'
280
+ * semaphore = Mutex.new
281
+ *
282
+ * a = Thread.new {
283
+ * semaphore.synchronize {
284
+ * # access shared resource
285
+ * }
286
+ * }
287
+ *
288
+ * b = Thread.new {
289
+ * semaphore.synchronize {
290
+ * # access shared resource
291
+ * }
292
+ * }
293
+ *
294
+ */
295
+
296
+ typedef struct _Mutex {
297
+ VALUE owner;
298
+ List waiting;
299
+ } Mutex;
300
+
301
+ static void
302
+ mark_mutex(Mutex *mutex)
303
+ {
304
+ rb_gc_mark(mutex->owner);
305
+ mark_list(&mutex->waiting);
306
+ }
307
+
308
+ static void
309
+ finalize_mutex(Mutex *mutex)
310
+ {
311
+ finalize_list(&mutex->waiting);
312
+ }
313
+
314
+ static void
315
+ free_mutex(Mutex *mutex)
316
+ {
317
+ assert_no_survivors(&mutex->waiting, "mutex", mutex);
318
+ finalize_mutex(mutex);
319
+ xfree(mutex);
320
+ }
321
+
322
+ static void
323
+ init_mutex(Mutex *mutex)
324
+ {
325
+ mutex->owner = Qnil;
326
+ init_list(&mutex->waiting);
327
+ }
328
+
329
+ /*
330
+ * Document-method: new
331
+ * call-seq: Mutex.new
332
+ *
333
+ * Creates a new Mutex
334
+ *
335
+ */
336
+
337
+ static VALUE
338
+ rb_mutex_alloc(VALUE klass)
339
+ {
340
+ Mutex *mutex;
341
+ mutex = ALLOC(Mutex);
342
+ init_mutex(mutex);
343
+ return Data_Wrap_Struct(klass, mark_mutex, free_mutex, mutex);
344
+ }
345
+
346
+ /*
347
+ * Document-method: locked?
348
+ * call-seq: locked?
349
+ *
350
+ * Returns +true+ if this lock is currently held by some thread.
351
+ *
352
+ */
353
+
354
+ static VALUE
355
+ rb_mutex_locked_p(VALUE self)
356
+ {
357
+ Mutex *mutex;
358
+ Data_Get_Struct(self, Mutex, mutex);
359
+ return RTEST(mutex->owner) ? Qtrue : Qfalse;
360
+ }
361
+
362
+ /*
363
+ * Document-method: try_lock
364
+ * call-seq: try_lock
365
+ *
366
+ * Attempts to obtain the lock and returns immediately. Returns +true+ if the
367
+ * lock was granted.
368
+ *
369
+ */
370
+
371
+ static VALUE
372
+ rb_mutex_try_lock(VALUE self)
373
+ {
374
+ Mutex *mutex;
375
+
376
+ Data_Get_Struct(self, Mutex, mutex);
377
+
378
+ if (RTEST(mutex->owner))
379
+ return Qfalse;
380
+
381
+ mutex->owner = rb_thread_current();
382
+ return Qtrue;
383
+ }
384
+
385
+ /*
386
+ * Document-method: lock
387
+ * call-seq: lock
388
+ *
389
+ * Attempts to grab the lock and waits if it isn't available.
390
+ *
391
+ */
392
+
393
+ static void
394
+ lock_mutex(Mutex *mutex)
395
+ {
396
+ VALUE current;
397
+ current = rb_thread_current();
398
+
399
+ rb_thread_critical = 1;
400
+
401
+ while (RTEST(mutex->owner)) {
402
+ wait_list(&mutex->waiting);
403
+ rb_thread_critical = 1;
404
+ }
405
+ mutex->owner = current;
406
+
407
+ rb_thread_critical = 0;
408
+ }
409
+
410
+ static VALUE
411
+ rb_mutex_lock(VALUE self)
412
+ {
413
+ Mutex *mutex;
414
+ Data_Get_Struct(self, Mutex, mutex);
415
+ lock_mutex(mutex);
416
+ return self;
417
+ }
418
+
419
+ /*
420
+ * Document-method: unlock
421
+ *
422
+ * Releases the lock. Returns +nil+ if ref wasn't locked.
423
+ *
424
+ */
425
+
426
+ static VALUE
427
+ unlock_mutex_inner(Mutex *mutex)
428
+ {
429
+ VALUE waking;
430
+
431
+ if (!RTEST(mutex->owner)) {
432
+ return Qundef;
433
+ }
434
+ mutex->owner = Qnil;
435
+ waking = wake_one(&mutex->waiting);
436
+
437
+ return waking;
438
+ }
439
+
440
+ static VALUE
441
+ set_critical(VALUE value)
442
+ {
443
+ rb_thread_critical = (int)value;
444
+ return Qundef;
445
+ }
446
+
447
+ static VALUE
448
+ unlock_mutex(Mutex *mutex)
449
+ {
450
+ VALUE waking;
451
+
452
+ rb_thread_critical = 1;
453
+ waking = rb_ensure(unlock_mutex_inner, (VALUE)mutex, set_critical, 0);
454
+
455
+ if (waking == Qundef) {
456
+ return Qfalse;
457
+ }
458
+
459
+ if (RTEST(waking)) {
460
+ run_thread(waking);
461
+ }
462
+
463
+ return Qtrue;
464
+ }
465
+
466
+ static VALUE
467
+ rb_mutex_unlock(VALUE self)
468
+ {
469
+ Mutex *mutex;
470
+ Data_Get_Struct(self, Mutex, mutex);
471
+
472
+ if (RTEST(unlock_mutex(mutex))) {
473
+ return self;
474
+ } else {
475
+ return Qnil;
476
+ }
477
+ }
478
+
479
+ /*
480
+ * Document-method: exclusive_unlock
481
+ * call-seq: exclusive_unlock { ... }
482
+ *
483
+ * If the mutex is locked, unlocks the mutex, wakes one waiting thread, and
484
+ * yields in a critical section.
485
+ *
486
+ */
487
+
488
+ static VALUE
489
+ rb_mutex_exclusive_unlock_inner(Mutex *mutex)
490
+ {
491
+ VALUE waking;
492
+ waking = unlock_mutex_inner(mutex);
493
+ rb_yield(Qundef);
494
+ return waking;
495
+ }
496
+
497
+ static VALUE
498
+ rb_mutex_exclusive_unlock(VALUE self)
499
+ {
500
+ Mutex *mutex;
501
+ VALUE waking;
502
+ Data_Get_Struct(self, Mutex, mutex);
503
+
504
+ rb_thread_critical = 1;
505
+ waking = rb_ensure(rb_mutex_exclusive_unlock_inner, (VALUE)mutex, set_critical, 0);
506
+
507
+ if (waking == Qundef) {
508
+ return Qnil;
509
+ }
510
+
511
+ if (RTEST(waking)) {
512
+ run_thread(waking);
513
+ }
514
+
515
+ return self;
516
+ }
517
+
518
+ /*
519
+ * Document-method: synchronize
520
+ * call-seq: synchronize { ... }
521
+ *
522
+ * Obtains a lock, runs the block, and releases the lock when the block
523
+ * completes. See the example under Mutex.
524
+ *
525
+ */
526
+
527
+ static VALUE
528
+ rb_mutex_synchronize(VALUE self)
529
+ {
530
+ rb_mutex_lock(self);
531
+ return rb_ensure(rb_yield, Qundef, rb_mutex_unlock, self);
532
+ }
533
+
534
+ /*
535
+ * Document-class: ConditionVariable
536
+ *
537
+ * ConditionVariable objects augment class Mutex. Using condition variables,
538
+ * it is possible to suspend while in the middle of a critical section until a
539
+ * resource becomes available.
540
+ *
541
+ * Example:
542
+ *
543
+ * require 'thread'
544
+ *
545
+ * mutex = Mutex.new
546
+ * resource = ConditionVariable.new
547
+ *
548
+ * a = Thread.new {
549
+ * mutex.synchronize {
550
+ * # Thread 'a' now needs the resource
551
+ * resource.wait(mutex)
552
+ * # 'a' can now have the resource
553
+ * }
554
+ * }
555
+ *
556
+ * b = Thread.new {
557
+ * mutex.synchronize {
558
+ * # Thread 'b' has finished using the resource
559
+ * resource.signal
560
+ * }
561
+ * }
562
+ *
563
+ */
564
+
565
+ typedef struct _ConditionVariable {
566
+ List waiting;
567
+ } ConditionVariable;
568
+
569
+ static void
570
+ mark_condvar(ConditionVariable *condvar)
571
+ {
572
+ mark_list(&condvar->waiting);
573
+ }
574
+
575
+ static void
576
+ finalize_condvar(ConditionVariable *condvar)
577
+ {
578
+ finalize_list(&condvar->waiting);
579
+ }
580
+
581
+ static void
582
+ free_condvar(ConditionVariable *condvar)
583
+ {
584
+ assert_no_survivors(&condvar->waiting, "condition variable", condvar);
585
+ finalize_condvar(condvar);
586
+ xfree(condvar);
587
+ }
588
+
589
+ static void
590
+ init_condvar(ConditionVariable *condvar)
591
+ {
592
+ init_list(&condvar->waiting);
593
+ }
594
+
595
+ /*
596
+ * Document-method: new
597
+ * call-seq: ConditionVariable.new
598
+ *
599
+ * Creates a new ConditionVariable
600
+ *
601
+ */
602
+
603
+ static VALUE
604
+ rb_condvar_alloc(VALUE klass)
605
+ {
606
+ ConditionVariable *condvar;
607
+
608
+ condvar = ALLOC(ConditionVariable);
609
+ init_condvar(condvar);
610
+
611
+ return Data_Wrap_Struct(klass, mark_condvar, free_condvar, condvar);
612
+ }
613
+
614
+ /*
615
+ * Document-method: wait
616
+ * call-seq: wait
617
+ *
618
+ * Releases the lock held in +mutex+ and waits; reacquires the lock on wakeup.
619
+ *
620
+ */
621
+
622
+ static void
623
+ wait_condvar(ConditionVariable *condvar, Mutex *mutex)
624
+ {
625
+ rb_thread_critical = 1;
626
+ if (!RTEST(mutex->owner)) {
627
+ rb_thread_critical = 0;
628
+ return;
629
+ }
630
+ if (mutex->owner != rb_thread_current()) {
631
+ rb_thread_critical = 0;
632
+ rb_raise(rb_eThreadError, "Not owner");
633
+ }
634
+ mutex->owner = Qnil;
635
+ wait_list(&condvar->waiting);
636
+
637
+ lock_mutex(mutex);
638
+ }
639
+
640
+ static VALUE
641
+ legacy_exclusive_unlock(VALUE mutex)
642
+ {
643
+ return rb_funcall(mutex, rb_intern("exclusive_unlock"), 0);
644
+ }
645
+
646
+ typedef struct {
647
+ ConditionVariable *condvar;
648
+ VALUE mutex;
649
+ } legacy_wait_args;
650
+
651
+ static VALUE
652
+ legacy_wait(VALUE unused, legacy_wait_args *args)
653
+ {
654
+ wait_list(&args->condvar->waiting);
655
+ rb_funcall(args->mutex, rb_intern("lock"), 0);
656
+ return Qnil;
657
+ }
658
+
659
+ static VALUE
660
+ rb_condvar_wait(VALUE self, VALUE mutex_v)
661
+ {
662
+ ConditionVariable *condvar;
663
+ Data_Get_Struct(self, ConditionVariable, condvar);
664
+
665
+ if (CLASS_OF(mutex_v) != rb_cMutex) {
666
+ /* interoperate with legacy mutex */
667
+ legacy_wait_args args;
668
+ args.condvar = condvar;
669
+ args.mutex = mutex_v;
670
+ rb_iterate(legacy_exclusive_unlock, mutex_v, legacy_wait, (VALUE)&args);
671
+ } else {
672
+ Mutex *mutex;
673
+ Data_Get_Struct(mutex_v, Mutex, mutex);
674
+ wait_condvar(condvar, mutex);
675
+ }
676
+
677
+ return self;
678
+ }
679
+
680
+ /*
681
+ * Document-method: broadcast
682
+ * call-seq: broadcast
683
+ *
684
+ * Wakes up all threads waiting for this condition.
685
+ *
686
+ */
687
+
688
+ static VALUE
689
+ rb_condvar_broadcast(VALUE self)
690
+ {
691
+ ConditionVariable *condvar;
692
+
693
+ Data_Get_Struct(self, ConditionVariable, condvar);
694
+
695
+ rb_thread_critical = 1;
696
+ rb_ensure(wake_all, (VALUE)&condvar->waiting, set_critical, 0);
697
+ rb_thread_schedule();
698
+
699
+ return self;
700
+ }
701
+
702
+ /*
703
+ * Document-method: signal
704
+ * call-seq: signal
705
+ *
706
+ * Wakes up the first thread in line waiting for this condition.
707
+ *
708
+ */
709
+
710
+ static void
711
+ signal_condvar(ConditionVariable *condvar)
712
+ {
713
+ VALUE waking;
714
+ rb_thread_critical = 1;
715
+ waking = rb_ensure(wake_one, (VALUE)&condvar->waiting, set_critical, 0);
716
+ if (RTEST(waking)) {
717
+ run_thread(waking);
718
+ }
719
+ }
720
+
721
+ static VALUE
722
+ rb_condvar_signal(VALUE self)
723
+ {
724
+ ConditionVariable *condvar;
725
+ Data_Get_Struct(self, ConditionVariable, condvar);
726
+ signal_condvar(condvar);
727
+ return self;
728
+ }
729
+
730
+ /*
731
+ * Document-class: Queue
732
+ *
733
+ * This class provides a way to synchronize communication between threads.
734
+ *
735
+ * Example:
736
+ *
737
+ * require 'thread'
738
+ *
739
+ * queue = Queue.new
740
+ *
741
+ * producer = Thread.new do
742
+ * 5.times do |i|
743
+ * sleep rand(i) # simulate expense
744
+ * queue << i
745
+ * puts "#{i} produced"
746
+ * end
747
+ * end
748
+ *
749
+ * consumer = Thread.new do
750
+ * 5.times do |i|
751
+ * value = queue.pop
752
+ * sleep rand(i/2) # simulate expense
753
+ * puts "consumed #{value}"
754
+ * end
755
+ * end
756
+ *
757
+ * consumer.join
758
+ *
759
+ */
760
+
761
+ typedef struct _Queue {
762
+ Mutex mutex;
763
+ ConditionVariable value_available;
764
+ ConditionVariable space_available;
765
+ List values;
766
+ unsigned long capacity;
767
+ } Queue;
768
+
769
+ static void
770
+ mark_queue(Queue *queue)
771
+ {
772
+ mark_mutex(&queue->mutex);
773
+ mark_condvar(&queue->value_available);
774
+ mark_condvar(&queue->space_available);
775
+ mark_list(&queue->values);
776
+ }
777
+
778
+ static void
779
+ finalize_queue(Queue *queue)
780
+ {
781
+ finalize_mutex(&queue->mutex);
782
+ finalize_condvar(&queue->value_available);
783
+ finalize_condvar(&queue->space_available);
784
+ finalize_list(&queue->values);
785
+ }
786
+
787
+ static void
788
+ free_queue(Queue *queue)
789
+ {
790
+ assert_no_survivors(&queue->mutex.waiting, "queue", queue);
791
+ assert_no_survivors(&queue->space_available.waiting, "queue", queue);
792
+ assert_no_survivors(&queue->value_available.waiting, "queue", queue);
793
+ finalize_queue(queue);
794
+ xfree(queue);
795
+ }
796
+
797
+ static void
798
+ init_queue(Queue *queue)
799
+ {
800
+ init_mutex(&queue->mutex);
801
+ init_condvar(&queue->value_available);
802
+ init_condvar(&queue->space_available);
803
+ init_list(&queue->values);
804
+ queue->capacity = 0;
805
+ }
806
+
807
+ /*
808
+ * Document-method: new
809
+ * call-seq: new
810
+ *
811
+ * Creates a new queue.
812
+ *
813
+ */
814
+
815
+ static VALUE
816
+ rb_queue_alloc(VALUE klass)
817
+ {
818
+ Queue *queue;
819
+ queue = ALLOC(Queue);
820
+ init_queue(queue);
821
+ return Data_Wrap_Struct(klass, mark_queue, free_queue, queue);
822
+ }
823
+
824
+ static VALUE
825
+ rb_queue_marshal_load(VALUE self, VALUE data)
826
+ {
827
+ Queue *queue;
828
+ VALUE array;
829
+ Data_Get_Struct(self, Queue, queue);
830
+
831
+ array = rb_marshal_load(data);
832
+ if (TYPE(array) != T_ARRAY) {
833
+ rb_raise(rb_eRuntimeError, "expected Array of queue data");
834
+ }
835
+ if (RARRAY(array)->len < 1) {
836
+ rb_raise(rb_eRuntimeError, "missing capacity value");
837
+ }
838
+ queue->capacity = NUM2ULONG(rb_ary_shift(array));
839
+ push_multiple_list(&queue->values, RARRAY(array)->ptr, (unsigned)RARRAY(array)->len);
840
+
841
+ return self;
842
+ }
843
+
844
+ static VALUE
845
+ rb_queue_marshal_dump(VALUE self)
846
+ {
847
+ Queue *queue;
848
+ VALUE array;
849
+ Data_Get_Struct(self, Queue, queue);
850
+
851
+ array = array_from_list(&queue->values);
852
+ rb_ary_unshift(array, ULONG2NUM(queue->capacity));
853
+ return rb_marshal_dump(array, Qnil);
854
+ }
855
+
856
+ /*
857
+ * Document-method: clear
858
+ * call-seq: clear
859
+ *
860
+ * Removes all objects from the queue.
861
+ *
862
+ */
863
+
864
+ static VALUE
865
+ rb_queue_clear(VALUE self)
866
+ {
867
+ Queue *queue;
868
+ Data_Get_Struct(self, Queue, queue);
869
+
870
+ lock_mutex(&queue->mutex);
871
+ clear_list(&queue->values);
872
+ signal_condvar(&queue->space_available);
873
+ unlock_mutex(&queue->mutex);
874
+
875
+ return self;
876
+ }
877
+
878
+ /*
879
+ * Document-method: empty?
880
+ * call-seq: empty?
881
+ *
882
+ * Returns +true+ if the queue is empty.
883
+ *
884
+ */
885
+
886
+ static VALUE
887
+ rb_queue_empty_p(VALUE self)
888
+ {
889
+ Queue *queue;
890
+ VALUE result;
891
+ Data_Get_Struct(self, Queue, queue);
892
+
893
+ lock_mutex(&queue->mutex);
894
+ result = queue->values.size == 0 ? Qtrue : Qfalse;
895
+ unlock_mutex(&queue->mutex);
896
+
897
+ return result;
898
+ }
899
+
900
+ /*
901
+ * Document-method: length
902
+ * call-seq: length
903
+ *
904
+ * Returns the length of the queue.
905
+ *
906
+ */
907
+
908
+ static VALUE
909
+ rb_queue_length(VALUE self)
910
+ {
911
+ Queue *queue;
912
+ VALUE result;
913
+ Data_Get_Struct(self, Queue, queue);
914
+
915
+ lock_mutex(&queue->mutex);
916
+ result = ULONG2NUM(queue->values.size);
917
+ unlock_mutex(&queue->mutex);
918
+
919
+ return result;
920
+ }
921
+
922
+ /*
923
+ * Document-method: num_waiting
924
+ * call-seq: num_waiting
925
+ *
926
+ * Returns the number of threads waiting on the queue.
927
+ *
928
+ */
929
+
930
+ static VALUE
931
+ rb_queue_num_waiting(VALUE self)
932
+ {
933
+ Queue *queue;
934
+ VALUE result;
935
+ Data_Get_Struct(self, Queue, queue);
936
+
937
+ lock_mutex(&queue->mutex);
938
+ result = ULONG2NUM(queue->value_available.waiting.size +
939
+ queue->space_available.waiting.size);
940
+ unlock_mutex(&queue->mutex);
941
+
942
+ return result;
943
+ }
944
+
945
+ /*
946
+ * Document-method: pop
947
+ * call_seq: pop(non_block=false)
948
+ *
949
+ * Retrieves data from the queue. If the queue is empty, the calling thread is
950
+ * suspended until data is pushed onto the queue. If +non_block+ is true, the
951
+ * thread isn't suspended, and an exception is raised.
952
+ *
953
+ */
954
+
955
+ static VALUE
956
+ rb_queue_pop(int argc, VALUE *argv, VALUE self)
957
+ {
958
+ Queue *queue;
959
+ int should_block;
960
+ VALUE result;
961
+ Data_Get_Struct(self, Queue, queue);
962
+
963
+ if (argc == 0) {
964
+ should_block = 1;
965
+ } else if (argc == 1) {
966
+ should_block = !RTEST(argv[0]);
967
+ } else {
968
+ rb_raise(rb_eArgError, "wrong number of arguments (%d for 1)", argc);
969
+ }
970
+
971
+ lock_mutex(&queue->mutex);
972
+ if (!queue->values.entries && !should_block) {
973
+ unlock_mutex(&queue->mutex);
974
+ rb_raise(rb_eThreadError, "queue empty");
975
+ }
976
+
977
+ while (!queue->values.entries) {
978
+ wait_condvar(&queue->value_available, &queue->mutex);
979
+ }
980
+
981
+ result = shift_list(&queue->values);
982
+ if (queue->capacity && queue->values.size < queue->capacity) {
983
+ signal_condvar(&queue->space_available);
984
+ }
985
+ unlock_mutex(&queue->mutex);
986
+
987
+ return result;
988
+ }
989
+
990
+ /*
991
+ * Document-method: push
992
+ * call-seq: push(obj)
993
+ *
994
+ * Pushes +obj+ to the queue.
995
+ *
996
+ */
997
+
998
+ static VALUE
999
+ rb_queue_push(VALUE self, VALUE value)
1000
+ {
1001
+ Queue *queue;
1002
+ Data_Get_Struct(self, Queue, queue);
1003
+
1004
+ lock_mutex(&queue->mutex);
1005
+ while (queue->capacity && queue->values.size >= queue->capacity) {
1006
+ wait_condvar(&queue->space_available, &queue->mutex);
1007
+ }
1008
+ push_list(&queue->values, value);
1009
+ signal_condvar(&queue->value_available);
1010
+ unlock_mutex(&queue->mutex);
1011
+
1012
+ return self;
1013
+ }
1014
+
1015
+ /*
1016
+ * Document-class: SizedQueue
1017
+ *
1018
+ * This class represents queues of specified size capacity. The push operation
1019
+ * may be blocked if the capacity is full.
1020
+ *
1021
+ * See Queue for an example of how a SizedQueue works.
1022
+ *
1023
+ */
1024
+
1025
+ /*
1026
+ * Document-method: new
1027
+ * call-seq: new
1028
+ *
1029
+ * Creates a fixed-length queue with a maximum size of +max+.
1030
+ *
1031
+ */
1032
+
1033
+ /*
1034
+ * Document-method: max
1035
+ * call-seq: max
1036
+ *
1037
+ * Returns the maximum size of the queue.
1038
+ *
1039
+ */
1040
+
1041
+ static VALUE
1042
+ rb_sized_queue_max(VALUE self)
1043
+ {
1044
+ Queue *queue;
1045
+ VALUE result;
1046
+ Data_Get_Struct(self, Queue, queue);
1047
+
1048
+ lock_mutex(&queue->mutex);
1049
+ result = ULONG2NUM(queue->capacity);
1050
+ unlock_mutex(&queue->mutex);
1051
+
1052
+ return result;
1053
+ }
1054
+
1055
+ /*
1056
+ * Document-method: max=
1057
+ * call-seq: max=(size)
1058
+ *
1059
+ * Sets the maximum size of the queue.
1060
+ *
1061
+ */
1062
+
1063
+ static VALUE
1064
+ rb_sized_queue_max_set(VALUE self, VALUE value)
1065
+ {
1066
+ Queue *queue;
1067
+ unsigned long new_capacity;
1068
+ unsigned long difference;
1069
+ Data_Get_Struct(self, Queue, queue);
1070
+
1071
+ new_capacity = NUM2ULONG(value);
1072
+
1073
+ if (new_capacity < 1) {
1074
+ rb_raise(rb_eArgError, "value must be positive");
1075
+ }
1076
+
1077
+ lock_mutex(&queue->mutex);
1078
+ if (queue->capacity && new_capacity > queue->capacity) {
1079
+ difference = new_capacity - queue->capacity;
1080
+ } else {
1081
+ difference = 0;
1082
+ }
1083
+ queue->capacity = new_capacity;
1084
+ for (; difference > 0; --difference) {
1085
+ signal_condvar(&queue->space_available);
1086
+ }
1087
+ unlock_mutex(&queue->mutex);
1088
+
1089
+ return self;
1090
+ }
1091
+
1092
+ /*
1093
+ * Document-method: push
1094
+ * call-seq: push(obj)
1095
+ *
1096
+ * Pushes +obj+ to the queue. If there is no space left in the queue, waits
1097
+ * until space becomes available.
1098
+ *
1099
+ */
1100
+
1101
+ /*
1102
+ * Document-method: pop
1103
+ * call-seq: pop(non_block=false)
1104
+ *
1105
+ * Retrieves data from the queue and runs a waiting thread, if any.
1106
+ *
1107
+ */
1108
+
1109
+ /* for marshalling mutexes and condvars */
1110
+
1111
+ static VALUE
1112
+ dummy_load(VALUE self, VALUE string)
1113
+ {
1114
+ return Qnil;
1115
+ }
1116
+
1117
+ static VALUE
1118
+ dummy_dump(VALUE self)
1119
+ {
1120
+ return rb_str_new2("");
1121
+ }
1122
+
1123
+ void
1124
+ Init_thread(void)
1125
+ {
1126
+ rb_define_singleton_method(rb_cThread, "exclusive", rb_thread_exclusive, 0);
1127
+
1128
+ rb_cMutex = rb_define_class("Mutex", rb_cObject);
1129
+ rb_define_alloc_func(rb_cMutex, rb_mutex_alloc);
1130
+ rb_define_method(rb_cMutex, "marshal_load", dummy_load, 1);
1131
+ rb_define_method(rb_cMutex, "marshal_dump", dummy_dump, 0);
1132
+ rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
1133
+ rb_define_method(rb_cMutex, "try_lock", rb_mutex_try_lock, 0);
1134
+ rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
1135
+ rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
1136
+ rb_define_method(rb_cMutex, "exclusive_unlock", rb_mutex_exclusive_unlock, 0);
1137
+ rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize, 0);
1138
+
1139
+ rb_cConditionVariable = rb_define_class("ConditionVariable", rb_cObject);
1140
+ rb_define_alloc_func(rb_cConditionVariable, rb_condvar_alloc);
1141
+ rb_define_method(rb_cConditionVariable, "marshal_load", dummy_load, 1);
1142
+ rb_define_method(rb_cConditionVariable, "marshal_dump", dummy_dump, 0);
1143
+ rb_define_method(rb_cConditionVariable, "wait", rb_condvar_wait, 1);
1144
+ rb_define_method(rb_cConditionVariable, "broadcast", rb_condvar_broadcast, 0);
1145
+ rb_define_method(rb_cConditionVariable, "signal", rb_condvar_signal, 0);
1146
+
1147
+ rb_cQueue = rb_define_class("Queue", rb_cObject);
1148
+ rb_define_alloc_func(rb_cQueue, rb_queue_alloc);
1149
+ rb_define_method(rb_cQueue, "marshal_load", rb_queue_marshal_load, 1);
1150
+ rb_define_method(rb_cQueue, "marshal_dump", rb_queue_marshal_dump, 0);
1151
+ rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0);
1152
+ rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0);
1153
+ rb_define_method(rb_cQueue, "length", rb_queue_length, 0);
1154
+ rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0);
1155
+ rb_define_method(rb_cQueue, "pop", rb_queue_pop, -1);
1156
+ rb_define_method(rb_cQueue, "push", rb_queue_push, 1);
1157
+ rb_alias(rb_cQueue, rb_intern("enq"), rb_intern("push"));
1158
+ rb_alias(rb_cQueue, rb_intern("<<"), rb_intern("push"));
1159
+ rb_alias(rb_cQueue, rb_intern("deq"), rb_intern("pop"));
1160
+ rb_alias(rb_cQueue, rb_intern("shift"), rb_intern("pop"));
1161
+ rb_alias(rb_cQueue, rb_intern("size"), rb_intern("length"));
1162
+
1163
+ rb_cSizedQueue = rb_define_class("SizedQueue", rb_cQueue);
1164
+ rb_define_method(rb_cSizedQueue, "initialize", rb_sized_queue_max_set, 1);
1165
+ rb_define_method(rb_cSizedQueue, "num_waiting", rb_queue_num_waiting, 0);
1166
+ rb_define_method(rb_cSizedQueue, "pop", rb_queue_pop, -1);
1167
+ rb_define_method(rb_cSizedQueue, "push", rb_queue_push, 1);
1168
+ rb_define_method(rb_cSizedQueue, "max", rb_sized_queue_max, 0);
1169
+ rb_define_method(rb_cSizedQueue, "max=", rb_sized_queue_max_set, 1);
1170
+ rb_alias(rb_cSizedQueue, rb_intern("enq"), rb_intern("push"));
1171
+ rb_alias(rb_cSizedQueue, rb_intern("<<"), rb_intern("push"));
1172
+ rb_alias(rb_cSizedQueue, rb_intern("deq"), rb_intern("pop"));
1173
+ rb_alias(rb_cSizedQueue, rb_intern("shift"), rb_intern("pop"));
1174
+ }
1175
+