polyphony 0.43.1 → 0.43.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/test.yml +1 -1
  3. data/CHANGELOG.md +37 -0
  4. data/Gemfile.lock +2 -2
  5. data/README.md +2 -1
  6. data/docs/_includes/head.html +40 -0
  7. data/docs/_includes/title.html +1 -0
  8. data/docs/_user-guide/web-server.md +11 -11
  9. data/docs/getting-started/overview.md +2 -2
  10. data/docs/index.md +4 -3
  11. data/docs/main-concepts/design-principles.md +23 -34
  12. data/docs/main-concepts/fiber-scheduling.md +1 -1
  13. data/docs/polyphony-logo.png +0 -0
  14. data/examples/adapters/concurrent-ruby.rb +9 -0
  15. data/examples/adapters/redis_blpop.rb +12 -0
  16. data/examples/core/xx-daemon.rb +14 -0
  17. data/examples/io/xx-happy-eyeballs.rb +21 -22
  18. data/examples/io/xx-zip.rb +19 -0
  19. data/examples/performance/mem-usage.rb +34 -28
  20. data/examples/performance/messaging.rb +29 -0
  21. data/examples/performance/multi_snooze.rb +11 -9
  22. data/examples/xx-spin.rb +32 -0
  23. data/ext/polyphony/libev_agent.c +242 -145
  24. data/ext/polyphony/libev_queue.c +129 -57
  25. data/ext/polyphony/polyphony.h +12 -5
  26. data/ext/polyphony/ring_buffer.c +120 -0
  27. data/ext/polyphony/ring_buffer.h +28 -0
  28. data/ext/polyphony/thread.c +13 -7
  29. data/lib/polyphony.rb +29 -10
  30. data/lib/polyphony/adapters/redis.rb +3 -2
  31. data/lib/polyphony/core/global_api.rb +5 -3
  32. data/lib/polyphony/core/resource_pool.rb +19 -9
  33. data/lib/polyphony/core/thread_pool.rb +1 -1
  34. data/lib/polyphony/extensions/core.rb +40 -0
  35. data/lib/polyphony/extensions/fiber.rb +9 -14
  36. data/lib/polyphony/extensions/io.rb +17 -16
  37. data/lib/polyphony/extensions/openssl.rb +8 -0
  38. data/lib/polyphony/extensions/socket.rb +12 -0
  39. data/lib/polyphony/version.rb +1 -1
  40. data/test/q.rb +24 -0
  41. data/test/test_agent.rb +13 -7
  42. data/test/test_fiber.rb +3 -3
  43. data/test/test_global_api.rb +50 -17
  44. data/test/test_io.rb +10 -2
  45. data/test/test_queue.rb +26 -1
  46. data/test/test_resource_pool.rb +12 -0
  47. data/test/test_socket.rb +43 -0
  48. data/test/test_throttler.rb +6 -5
  49. metadata +13 -2
@@ -1,4 +1,5 @@
1
1
  #include "polyphony.h"
2
+ #include "ring_buffer.h"
2
3
 
3
4
  struct async_watcher {
4
5
  ev_async async;
@@ -6,48 +7,65 @@ struct async_watcher {
6
7
  VALUE fiber;
7
8
  };
8
9
 
9
- struct async_queue {
10
+ struct async_watcher_queue {
10
11
  struct async_watcher **queue;
11
- unsigned int len;
12
+ unsigned int length;
12
13
  unsigned int count;
13
14
  unsigned int push_idx;
14
- unsigned int pop_idx;
15
+ unsigned int shift_idx;
15
16
  };
16
17
 
17
- void async_queue_init(struct async_queue *queue) {
18
- queue->len = 4;
18
+ void async_watcher_queue_init(struct async_watcher_queue *queue) {
19
+ queue->length = 1;
19
20
  queue->count = 0;
20
- queue->queue = malloc(sizeof(struct async_watcher *) * queue->len);
21
+ queue->queue = malloc(sizeof(struct async_watcher *) * queue->length);
21
22
  queue->push_idx = 0;
22
- queue->pop_idx = 0;
23
+ queue->shift_idx = 0;
23
24
  }
24
25
 
25
- void async_queue_free(struct async_queue *queue) {
26
+ void async_watcher_queue_free(struct async_watcher_queue *queue) {
26
27
  free(queue->queue);
27
28
  }
28
29
 
29
- void async_queue_push(struct async_queue *queue, struct async_watcher *watcher) {
30
- if (queue->push_idx == queue->len) {
31
- queue->len = queue->len * 2;
32
- queue->queue = realloc(queue->queue, sizeof(struct async_watcher *) * queue->len);
33
- }
30
+ void async_watcher_queue_realign(struct async_watcher_queue *queue) {
31
+ memmove(
32
+ queue->queue,
33
+ queue->queue + queue->shift_idx,
34
+ queue->count * sizeof(struct async_watcher *)
35
+ );
36
+ queue->push_idx = queue->push_idx - queue->shift_idx;
37
+ queue->shift_idx = 0;
38
+ }
39
+
40
+ #define QUEUE_REALIGN_THRESHOLD 32
41
+
42
+ void async_watcher_queue_push(struct async_watcher_queue *queue, struct async_watcher *watcher) {
34
43
  if (queue->count == 0) {
35
44
  queue->push_idx = 0;
36
- queue->pop_idx = 0;
45
+ queue->shift_idx = 0;
46
+ }
47
+ if (queue->push_idx == queue->length) {
48
+ // prevent shift idx moving too much away from zero
49
+ if (queue->length >= QUEUE_REALIGN_THRESHOLD && queue->shift_idx >= (queue->length / 2))
50
+ async_watcher_queue_realign(queue);
51
+ else {
52
+ queue->length = (queue->length == 1) ? 4 : queue->length * 2;
53
+ queue->queue = realloc(queue->queue, sizeof(struct async_watcher *) * queue->length);
54
+ }
37
55
  }
38
56
  queue->count++;
39
57
  queue->queue[queue->push_idx++] = watcher;
40
58
  }
41
59
 
42
- struct async_watcher *async_queue_pop(struct async_queue *queue) {
60
+ struct async_watcher *async_watcher_queue_shift(struct async_watcher_queue *queue) {
43
61
  if (queue->count == 0) return 0;
44
62
 
45
63
  queue->count--;
46
64
 
47
- return queue->queue[queue->pop_idx++];
65
+ return queue->queue[queue->shift_idx++];
48
66
  }
49
67
 
50
- void async_queue_remove_at_idx(struct async_queue *queue, unsigned int remove_idx) {
68
+ void async_watcher_queue_remove_at_idx(struct async_watcher_queue *queue, unsigned int remove_idx) {
51
69
  queue->count--;
52
70
  queue->push_idx--;
53
71
  if (remove_idx < queue->push_idx)
@@ -58,33 +76,33 @@ void async_queue_remove_at_idx(struct async_queue *queue, unsigned int remove_id
58
76
  );
59
77
  }
60
78
 
61
- void async_queue_remove_by_fiber(struct async_queue *queue, VALUE fiber) {
79
+ void async_watcher_queue_remove_by_fiber(struct async_watcher_queue *queue, VALUE fiber) {
62
80
  if (queue->count == 0) return;
63
81
 
64
- for (unsigned idx = queue->pop_idx; idx < queue->push_idx; idx++) {
82
+ for (unsigned idx = queue->shift_idx; idx < queue->push_idx; idx++) {
65
83
  if (queue->queue[idx]->fiber == fiber) {
66
- async_queue_remove_at_idx(queue, idx);
84
+ async_watcher_queue_remove_at_idx(queue, idx);
67
85
  return;
68
86
  }
69
87
  }
70
88
  }
71
89
 
72
90
  typedef struct queue {
73
- VALUE items;
74
- struct async_queue shift_queue;
91
+ ring_buffer values;
92
+ struct async_watcher_queue shift_queue;
75
93
  } LibevQueue_t;
76
94
 
77
-
78
95
  VALUE cLibevQueue = Qnil;
79
96
 
80
97
  static void LibevQueue_mark(void *ptr) {
81
98
  LibevQueue_t *queue = ptr;
82
- rb_gc_mark(queue->items);
99
+ ring_buffer_mark(&queue->values);
83
100
  }
84
101
 
85
102
  static void LibevQueue_free(void *ptr) {
86
103
  LibevQueue_t *queue = ptr;
87
- async_queue_free(&queue->shift_queue);
104
+ ring_buffer_free(&queue->values);
105
+ async_watcher_queue_free(&queue->shift_queue);
88
106
  xfree(ptr);
89
107
  }
90
108
 
@@ -112,27 +130,41 @@ static VALUE LibevQueue_initialize(VALUE self) {
112
130
  LibevQueue_t *queue;
113
131
  GetQueue(self, queue);
114
132
 
115
- queue->items = rb_ary_new();
116
- async_queue_init(&queue->shift_queue);
133
+ ring_buffer_init(&queue->values);
134
+ async_watcher_queue_init(&queue->shift_queue);
117
135
 
118
136
  return self;
119
137
  }
120
138
 
121
139
  VALUE LibevQueue_push(VALUE self, VALUE value) {
122
140
  LibevQueue_t *queue;
123
- struct async_watcher *watcher;
124
141
  GetQueue(self, queue);
125
- watcher = async_queue_pop(&queue->shift_queue);
126
- if (watcher) {
127
- ev_async_send(watcher->ev_loop, &watcher->async);
142
+ if (queue->shift_queue.count > 0) {
143
+ struct async_watcher *watcher = async_watcher_queue_shift(&queue->shift_queue);
144
+ if (watcher) {
145
+ ev_async_send(watcher->ev_loop, &watcher->async);
146
+ }
128
147
  }
129
- rb_ary_push(queue->items, value);
148
+ ring_buffer_push(&queue->values, value);
149
+ return self;
150
+ }
151
+
152
+ VALUE LibevQueue_unshift(VALUE self, VALUE value) {
153
+ LibevQueue_t *queue;
154
+ GetQueue(self, queue);
155
+ if (queue->shift_queue.count > 0) {
156
+ struct async_watcher *watcher = async_watcher_queue_shift(&queue->shift_queue);
157
+ if (watcher) {
158
+ ev_async_send(watcher->ev_loop, &watcher->async);
159
+ }
160
+ }
161
+ ring_buffer_unshift(&queue->values, value);
130
162
  return self;
131
163
  }
132
164
 
133
165
  struct ev_loop *LibevAgent_ev_loop(VALUE self);
134
166
 
135
- void async_queue_callback(struct ev_loop *ev_loop, struct ev_async *ev_async, int revents) {
167
+ void async_watcher_queue_callback(struct ev_loop *ev_loop, struct ev_async *ev_async, int revents) {
136
168
  struct async_watcher *watcher = (struct async_watcher *)ev_async;
137
169
  Fiber_make_runnable(watcher->fiber, Qnil);
138
170
  }
@@ -143,22 +175,22 @@ VALUE LibevQueue_shift(VALUE self) {
143
175
  LibevQueue_t *queue;
144
176
  GetQueue(self, queue);
145
177
 
146
- if (RARRAY_LEN(queue->items) == 0) {
178
+ if (queue->values.count == 0) {
147
179
  struct async_watcher watcher;
148
180
  VALUE agent = rb_ivar_get(rb_thread_current(), ID_ivar_agent);
149
181
  VALUE switchpoint_result = Qnil;
150
182
 
151
183
  watcher.ev_loop = LibevAgent_ev_loop(agent);
152
184
  watcher.fiber = rb_fiber_current();
153
- async_queue_push(&queue->shift_queue, &watcher);
154
- ev_async_init(&watcher.async, async_queue_callback);
185
+ async_watcher_queue_push(&queue->shift_queue, &watcher);
186
+ ev_async_init(&watcher.async, async_watcher_queue_callback);
155
187
  ev_async_start(watcher.ev_loop, &watcher.async);
156
188
 
157
189
  switchpoint_result = libev_agent_await(agent);
158
190
  ev_async_stop(watcher.ev_loop, &watcher.async);
159
191
 
160
192
  if (RTEST(rb_obj_is_kind_of(switchpoint_result, rb_eException))) {
161
- async_queue_remove_by_fiber(&queue->shift_queue, watcher.fiber);
193
+ async_watcher_queue_remove_by_fiber(&queue->shift_queue, watcher.fiber);
162
194
  return rb_funcall(rb_mKernel, ID_raise, 1, switchpoint_result);
163
195
  }
164
196
  RB_GC_GUARD(watcher.fiber);
@@ -166,36 +198,72 @@ VALUE LibevQueue_shift(VALUE self) {
166
198
  RB_GC_GUARD(switchpoint_result);
167
199
  }
168
200
 
169
- return rb_ary_shift(queue->items);
201
+ return ring_buffer_shift(&queue->values);
202
+ }
203
+
204
+ VALUE LibevQueue_shift_no_wait(VALUE self) {
205
+ LibevQueue_t *queue;
206
+ GetQueue(self, queue);
207
+
208
+ return ring_buffer_shift(&queue->values);
209
+ }
210
+
211
+ VALUE LibevQueue_delete(VALUE self, VALUE value) {
212
+ LibevQueue_t *queue;
213
+ GetQueue(self, queue);
214
+
215
+ ring_buffer_delete(&queue->values, value);
216
+ return self;
217
+ }
218
+
219
+ VALUE LibevQueue_clear(VALUE self) {
220
+ LibevQueue_t *queue;
221
+ GetQueue(self, queue);
222
+
223
+ ring_buffer_clear(&queue->values);
224
+ return self;
225
+ }
226
+
227
+ long LibevQueue_len(VALUE self) {
228
+ LibevQueue_t *queue;
229
+ GetQueue(self, queue);
230
+
231
+ return queue->values.count;
170
232
  }
171
233
 
172
234
  VALUE LibevQueue_shift_each(VALUE self) {
173
235
  LibevQueue_t *queue;
174
- VALUE old_queue;
175
236
  GetQueue(self, queue);
176
- old_queue = queue->items;
177
- queue->items = rb_ary_new();
178
-
179
- if (rb_block_given_p()) {
180
- long len = RARRAY_LEN(old_queue);
181
- long i;
182
- for (i = 0; i < len; i++) {
183
- rb_yield(RARRAY_AREF(old_queue, i));
184
- }
185
- RB_GC_GUARD(old_queue);
186
- return self;
187
- }
188
- else {
189
- RB_GC_GUARD(old_queue);
190
- return old_queue;
191
- }
237
+
238
+ ring_buffer_shift_each(&queue->values);
239
+ return self;
240
+ }
241
+
242
+ VALUE LibevQueue_shift_all(VALUE self) {
243
+ LibevQueue_t *queue;
244
+ GetQueue(self, queue);
245
+
246
+ return ring_buffer_shift_all(&queue->values);
192
247
  }
193
248
 
194
249
  VALUE LibevQueue_empty_p(VALUE self) {
195
250
  LibevQueue_t *queue;
196
251
  GetQueue(self, queue);
197
252
 
198
- return (RARRAY_LEN(queue->items) == 0) ? Qtrue : Qfalse;
253
+ return (queue->values.count == 0) ? Qtrue : Qfalse;
254
+ }
255
+
256
+ void LibevQueue_trace(VALUE self) {
257
+ LibevQueue_t *queue;
258
+ GetQueue(self, queue);
259
+
260
+ printf(
261
+ "queue size: %d count: %d head: %d tail: %d\n",
262
+ queue->values.size,
263
+ queue->values.count,
264
+ queue->values.head,
265
+ queue->values.tail
266
+ );
199
267
  }
200
268
 
201
269
  void Init_LibevQueue() {
@@ -205,11 +273,15 @@ void Init_LibevQueue() {
205
273
  rb_define_method(cLibevQueue, "initialize", LibevQueue_initialize, 0);
206
274
  rb_define_method(cLibevQueue, "push", LibevQueue_push, 1);
207
275
  rb_define_method(cLibevQueue, "<<", LibevQueue_push, 1);
276
+ rb_define_method(cLibevQueue, "unshift", LibevQueue_unshift, 1);
208
277
 
209
- rb_define_method(cLibevQueue, "pop", LibevQueue_shift, 0);
210
278
  rb_define_method(cLibevQueue, "shift", LibevQueue_shift, 0);
279
+ rb_define_method(cLibevQueue, "pop", LibevQueue_shift, 0);
280
+ rb_define_method(cLibevQueue, "shift_no_wait", LibevQueue_shift_no_wait, 0);
281
+ rb_define_method(cLibevQueue, "delete", LibevQueue_delete, 1);
211
282
 
212
283
  rb_define_method(cLibevQueue, "shift_each", LibevQueue_shift_each, 0);
284
+ rb_define_method(cLibevQueue, "shift_all", LibevQueue_shift_all, 0);
213
285
  rb_define_method(cLibevQueue, "empty?", LibevQueue_empty_p, 0);
214
286
  }
215
287
 
@@ -1,5 +1,5 @@
1
- #ifndef RUBY_EV_H
2
- #define RUBY_EV_H
1
+ #ifndef POLYPHONY_H
2
+ #define POLYPHONY_H
3
3
 
4
4
  #include "ruby.h"
5
5
  #include "ruby/io.h"
@@ -76,9 +76,16 @@ VALUE LibevAgent_unref(VALUE self);
76
76
  int LibevAgent_ref_count(VALUE self);
77
77
  void LibevAgent_reset_ref_count(VALUE self);
78
78
 
79
- VALUE Polyphony_snooze(VALUE self);
79
+ VALUE LibevQueue_push(VALUE self, VALUE value);
80
+ VALUE LibevQueue_unshift(VALUE self, VALUE value);
81
+ VALUE LibevQueue_shift(VALUE self);
82
+ VALUE LibevQueue_shift_no_wait(VALUE self);
83
+ VALUE LibevQueue_clear(VALUE self);
84
+ VALUE LibevQueue_delete(VALUE self, VALUE value);
85
+ long LibevQueue_len(VALUE self);
86
+ void LibevQueue_trace(VALUE self);
80
87
 
81
- VALUE Polyphony_Queue_push(VALUE self, VALUE value);
88
+ VALUE Polyphony_snooze(VALUE self);
82
89
 
83
90
  VALUE Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
84
91
  VALUE Thread_switch_fiber(VALUE thread);
@@ -87,4 +94,4 @@ int io_setstrbuf(VALUE *str, long len);
87
94
  void io_set_read_length(VALUE str, long n, int shrinkable);
88
95
  VALUE io_enc_str(VALUE str, rb_io_t *fptr);
89
96
 
90
- #endif /* RUBY_EV_H */
97
+ #endif /* POLYPHONY_H */
@@ -0,0 +1,120 @@
1
+ #include "polyphony.h"
2
+ #include "ring_buffer.h"
3
+
4
+ void ring_buffer_init(ring_buffer *buffer) {
5
+ buffer->size = 1;
6
+ buffer->count = 0;
7
+ buffer->entries = malloc(buffer->size * sizeof(VALUE));
8
+ buffer->head = 0;
9
+ buffer->tail = 0;
10
+ }
11
+
12
+ void ring_buffer_free(ring_buffer *buffer) {
13
+ free(buffer->entries);
14
+ }
15
+
16
+ int ring_buffer_empty_p(ring_buffer *buffer) {
17
+ return buffer->count == 0;
18
+ }
19
+
20
+ #define TRACE_RING_BUFFER(func, buffer) printf( \
21
+ "%s size: %d count: %d head: %d tail: %d\n", \
22
+ func, \
23
+ buffer->size, \
24
+ buffer->count, \
25
+ buffer->head, \
26
+ buffer->tail \
27
+ )
28
+
29
+ VALUE ring_buffer_shift(ring_buffer *buffer) {
30
+ // TRACE_RING_BUFFER("ring_buffer_shift", buffer);
31
+
32
+ VALUE value;
33
+ if (buffer->count == 0) return Qnil;
34
+
35
+ value = buffer->entries[buffer->head];
36
+ buffer->head = (buffer->head + 1) % buffer->size;
37
+ buffer->count--;
38
+ // INSPECT(value);
39
+ return value;
40
+ }
41
+
42
+ void ring_buffer_resize(ring_buffer *buffer) {
43
+ // TRACE_RING_BUFFER("ring_buffer_resize", buffer);
44
+
45
+ unsigned int old_size = buffer->size;
46
+ buffer->size = old_size == 1 ? 4 : old_size * 2;
47
+ // printf("new size: %d\n", buffer->size);
48
+ buffer->entries = realloc(buffer->entries, buffer->size * sizeof(VALUE));
49
+ for (unsigned int idx = 0; idx < buffer->head && idx < buffer->tail; idx++)
50
+ buffer->entries[old_size + idx] = buffer->entries[idx];
51
+ buffer->tail = buffer->head + buffer->count;
52
+ }
53
+
54
+ void ring_buffer_unshift(ring_buffer *buffer, VALUE value) {
55
+ // TRACE_RING_BUFFER("ring_buffer_unshift", buffer);
56
+ // INSPECT(value);
57
+
58
+ if (buffer->count == buffer->size) ring_buffer_resize(buffer);
59
+
60
+ buffer->head = (buffer->head - 1) % buffer->size;
61
+ buffer->entries[buffer->head] = value;
62
+ buffer->count++;
63
+ }
64
+
65
+ void ring_buffer_push(ring_buffer *buffer, VALUE value) {
66
+ // TRACE_RING_BUFFER("ring_buffer_push", buffer);
67
+ // INSPECT(value);
68
+ if (buffer->count == buffer->size) ring_buffer_resize(buffer);
69
+
70
+ buffer->entries[buffer->tail] = value;
71
+ buffer->tail = (buffer->tail + 1) % buffer->size;
72
+ buffer->count++;
73
+ }
74
+
75
+ void ring_buffer_mark(ring_buffer *buffer) {
76
+ for (unsigned int i = 0; i < buffer->count; i++)
77
+ rb_gc_mark(buffer->entries[(buffer->head + i) % buffer->size]);
78
+ }
79
+
80
+ void ring_buffer_shift_each(ring_buffer *buffer) {
81
+ // TRACE_RING_BUFFER("ring_buffer_shift_each", buffer);
82
+
83
+ for (unsigned int i = 0; i < buffer->count; i++)
84
+ rb_yield(buffer->entries[(buffer->head + i) % buffer->size]);
85
+
86
+ buffer->count = buffer->head = buffer->tail = 0;
87
+ }
88
+
89
+ VALUE ring_buffer_shift_all(ring_buffer *buffer) {
90
+ // TRACE_RING_BUFFER("ring_buffer_all", buffer);
91
+ VALUE array = rb_ary_new_capa(buffer->count);
92
+ for (unsigned int i = 0; i < buffer->count; i++)
93
+ rb_ary_push(array, buffer->entries[(buffer->head + i) % buffer->size]);
94
+ buffer->count = buffer->head = buffer->tail = 0;
95
+ return array;
96
+ }
97
+
98
+ void ring_buffer_delete_at(ring_buffer *buffer, unsigned int idx) {
99
+ for (unsigned int idx2 = idx; idx2 != buffer->tail; idx2 = (idx2 + 1) % buffer->size) {
100
+ buffer->entries[idx2] = buffer->entries[(idx2 + 1) % buffer->size];
101
+ }
102
+ buffer->count--;
103
+ buffer->tail = (buffer->tail - 1) % buffer->size;
104
+ }
105
+
106
+ void ring_buffer_delete(ring_buffer *buffer, VALUE value) {
107
+ // TRACE_RING_BUFFER("ring_buffer_delete", buffer);
108
+ for (unsigned int i = 0; i < buffer->count; i++) {
109
+ unsigned int idx = (buffer->head + i) % buffer->size;
110
+ if (buffer->entries[idx] == value) {
111
+ ring_buffer_delete_at(buffer, idx);
112
+ return;
113
+ }
114
+ }
115
+ }
116
+
117
+ void ring_buffer_clear(ring_buffer *buffer) {
118
+ // TRACE_RING_BUFFER("ring_buffer_clear", buffer);
119
+ buffer->count = buffer->head = buffer->tail = 0;
120
+ }