polyphony 0.46.1 → 0.47.4

Sign up to get free protection for your applications and to get access to all the features.
@@ -27,7 +27,6 @@ static int pidfd_open(pid_t pid, unsigned int flags) {
27
27
  return syscall(__NR_pidfd_open, pid, flags);
28
28
  }
29
29
 
30
- VALUE cTCPSocket;
31
30
  VALUE SYM_io_uring;
32
31
 
33
32
  typedef struct Backend_t {
@@ -171,7 +170,7 @@ void io_uring_backend_handle_completion(struct io_uring_cqe *cqe, Backend_t *bac
171
170
  // otherwise, we mark it as completed, schedule the fiber and let it deal
172
171
  // with releasing the context
173
172
  ctx->completed = 1;
174
- if (ctx->result != -ECANCELED) Fiber_make_runnable(ctx->fiber, Qnil);
173
+ if (ctx->result != -ECANCELED) Fiber_make_runnable(ctx->fiber, ctx->resume_value);
175
174
  }
176
175
  }
177
176
 
@@ -669,15 +668,16 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str) {
669
668
  return INT2NUM(len);
670
669
  }
671
670
 
672
- VALUE io_uring_backend_accept(Backend_t *backend, VALUE sock, int loop) {
671
+ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, int loop) {
673
672
  rb_io_t *fptr;
674
673
  struct sockaddr addr;
675
674
  socklen_t len = (socklen_t)sizeof addr;
676
- VALUE underlying_sock = rb_ivar_get(sock, ID_ivar_io);
677
675
  VALUE socket = Qnil;
678
- if (underlying_sock != Qnil) sock = underlying_sock;
676
+ VALUE socket_class = ConnectionSocketClass(server_socket);
677
+ VALUE underlying_sock = rb_ivar_get(server_socket, ID_ivar_io);
678
+ if (underlying_sock != Qnil) server_socket = underlying_sock;
679
679
 
680
- GetOpenFile(sock, fptr);
680
+ GetOpenFile(server_socket, fptr);
681
681
  while (1) {
682
682
  VALUE resume_value = Qnil;
683
683
  op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_ACCEPT);
@@ -695,7 +695,7 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE sock, int loop) {
695
695
  else {
696
696
  rb_io_t *fp;
697
697
 
698
- socket = rb_obj_alloc(cTCPSocket);
698
+ socket = rb_obj_alloc(socket_class);
699
699
  MakeOpenFile(socket, fp);
700
700
  rb_update_max_fd(fd);
701
701
  fp->fd = fd;
@@ -774,15 +774,23 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
774
774
  return self;
775
775
  }
776
776
 
777
- // returns true if completed, 0 otherwise
778
- int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duration, VALUE *resume_value) {
777
+ inline struct __kernel_timespec double_to_timespec(double duration) {
779
778
  double duration_integral;
780
779
  double duration_fraction = modf(duration, &duration_integral);
781
780
  struct __kernel_timespec ts;
782
-
783
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
784
781
  ts.tv_sec = duration_integral;
785
782
  ts.tv_nsec = floor(duration_fraction * 1000000000);
783
+ return ts;
784
+ }
785
+
786
+ inline struct __kernel_timespec duration_to_timespec(VALUE duration) {
787
+ return double_to_timespec(NUM2DBL(duration));
788
+ }
789
+
790
+ // returns true if completed, 0 otherwise
791
+ int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duration, VALUE *resume_value) {
792
+ struct __kernel_timespec ts = double_to_timespec(duration);
793
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
786
794
 
787
795
  op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
788
796
  io_uring_prep_timeout(sqe, &ts, 0, 0);
@@ -830,6 +838,73 @@ VALUE Backend_timer_loop(VALUE self, VALUE interval) {
830
838
  }
831
839
  }
832
840
 
841
+ VALUE Backend_timeout_safe(VALUE arg) {
842
+ return rb_yield(arg);
843
+ }
844
+
845
+ VALUE Backend_timeout_rescue(VALUE arg, VALUE exception) {
846
+ return exception;
847
+ }
848
+
849
+ VALUE Backend_timeout_ensure_safe(VALUE arg) {
850
+ return rb_rescue2(Backend_timeout_safe, Qnil, Backend_timeout_rescue, Qnil, rb_eException, (VALUE)0);
851
+ }
852
+
853
+ struct Backend_timeout_ctx {
854
+ Backend_t *backend;
855
+ op_context_t *ctx;
856
+ };
857
+
858
+ VALUE Backend_timeout_ensure(VALUE arg) {
859
+ struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
860
+ if (!timeout_ctx->ctx->completed) {
861
+ timeout_ctx->ctx->result = -ECANCELED;
862
+
863
+ // op was not completed, so we need to cancel it
864
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&timeout_ctx->backend->ring);
865
+ io_uring_prep_cancel(sqe, timeout_ctx->ctx, 0);
866
+ timeout_ctx->backend->pending_sqes = 0;
867
+ io_uring_submit(&timeout_ctx->backend->ring);
868
+ }
869
+ OP_CONTEXT_RELEASE(&timeout_ctx->backend->store, timeout_ctx->ctx);
870
+ return Qnil;
871
+ }
872
+
873
+ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
874
+ VALUE duration;
875
+ VALUE exception;
876
+ VALUE move_on_value = Qnil;
877
+ rb_scan_args(argc, argv, "21", &duration, &exception, &move_on_value);
878
+
879
+ struct __kernel_timespec ts = duration_to_timespec(duration);
880
+ Backend_t *backend;
881
+ GetBackend(self, backend);
882
+ VALUE result = Qnil;
883
+ VALUE timeout = rb_funcall(cTimeoutException, ID_new, 0);
884
+
885
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
886
+
887
+ op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
888
+ ctx->resume_value = timeout;
889
+ io_uring_prep_timeout(sqe, &ts, 0, 0);
890
+ io_uring_sqe_set_data(sqe, ctx);
891
+ io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
892
+ io_uring_backend_defer_submit(backend);
893
+
894
+ struct Backend_timeout_ctx timeout_ctx = {backend, ctx};
895
+ result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
896
+
897
+ if (result == timeout) {
898
+ if (exception == Qnil) return move_on_value;
899
+ RAISE_EXCEPTION(backend_timeout_exception(exception));
900
+ }
901
+
902
+ RAISE_IF_EXCEPTION(result);
903
+ RB_GC_GUARD(result);
904
+ RB_GC_GUARD(timeout);
905
+ return result;
906
+ }
907
+
833
908
  VALUE Backend_waitpid(VALUE self, VALUE pid) {
834
909
  Backend_t *backend;
835
910
  int pid_int = NUM2INT(pid);
@@ -870,8 +945,7 @@ VALUE Backend_kind(VALUE self) {
870
945
  }
871
946
 
872
947
  void Init_Backend() {
873
- rb_require("socket");
874
- cTCPSocket = rb_const_get(rb_cObject, rb_intern("TCPSocket"));
948
+ Init_SocketClasses();
875
949
 
876
950
  VALUE cBackend = rb_define_class_under(mPolyphony, "Backend", rb_cData);
877
951
  rb_define_alloc_func(cBackend, Backend_allocate);
@@ -899,6 +973,7 @@ void Init_Backend() {
899
973
  rb_define_method(cBackend, "wait_io", Backend_wait_io, 2);
900
974
  rb_define_method(cBackend, "sleep", Backend_sleep, 1);
901
975
  rb_define_method(cBackend, "timer_loop", Backend_timer_loop, 1);
976
+ rb_define_method(cBackend, "timeout", Backend_timeout, -1);
902
977
  rb_define_method(cBackend, "waitpid", Backend_waitpid, 1);
903
978
  rb_define_method(cBackend, "wait_event", Backend_wait_event, 1);
904
979
 
@@ -42,6 +42,7 @@ inline op_context_t *context_store_acquire(op_context_store_t *store, enum op_ty
42
42
 
43
43
  ctx->type = type;
44
44
  ctx->fiber = rb_fiber_current();
45
+ ctx->resume_value = Qnil;
45
46
  ctx->completed = 0;
46
47
  ctx->result = 0;
47
48
 
@@ -24,6 +24,7 @@ typedef struct op_context {
24
24
  int id;
25
25
  int result;
26
26
  VALUE fiber;
27
+ VALUE resume_value;
27
28
  } op_context_t;
28
29
 
29
30
  typedef struct op_context_store {
@@ -13,7 +13,6 @@
13
13
  #include "../libev/ev.h"
14
14
  #include "ruby/io.h"
15
15
 
16
- VALUE cTCPSocket;
17
16
  VALUE SYM_libev;
18
17
 
19
18
  ID ID_ivar_is_nonblocking;
@@ -494,7 +493,7 @@ VALUE Backend_write_m(int argc, VALUE *argv, VALUE self) {
494
493
  Backend_writev(self, argv[0], argc - 1, argv + 1);
495
494
  }
496
495
 
497
- VALUE Backend_accept(VALUE self, VALUE sock) {
496
+ VALUE Backend_accept(VALUE self, VALUE server_socket) {
498
497
  Backend_t *backend;
499
498
  struct libev_io watcher;
500
499
  rb_io_t *fptr;
@@ -502,12 +501,13 @@ VALUE Backend_accept(VALUE self, VALUE sock) {
502
501
  struct sockaddr addr;
503
502
  socklen_t len = (socklen_t)sizeof addr;
504
503
  VALUE switchpoint_result = Qnil;
505
- VALUE underlying_sock = rb_ivar_get(sock, ID_ivar_io);
506
- if (underlying_sock != Qnil) sock = underlying_sock;
504
+ VALUE socket_class = ConnectionSocketClass(server_socket);
505
+ VALUE underlying_sock = rb_ivar_get(server_socket, ID_ivar_io);
506
+ if (underlying_sock != Qnil) server_socket = underlying_sock;
507
507
 
508
508
  GetBackend(self, backend);
509
- GetOpenFile(sock, fptr);
510
- io_set_nonblock(fptr, sock);
509
+ GetOpenFile(server_socket, fptr);
510
+ io_set_nonblock(fptr, server_socket);
511
511
  watcher.fiber = Qnil;
512
512
  while (1) {
513
513
  fd = accept(fptr->fd, &addr, &len);
@@ -529,7 +529,7 @@ VALUE Backend_accept(VALUE self, VALUE sock) {
529
529
  goto error;
530
530
  }
531
531
 
532
- socket = rb_obj_alloc(cTCPSocket);
532
+ socket = rb_obj_alloc(socket_class);
533
533
  MakeOpenFile(socket, fp);
534
534
  rb_update_max_fd(fd);
535
535
  fp->fd = fd;
@@ -550,7 +550,7 @@ error:
550
550
  return RAISE_EXCEPTION(switchpoint_result);
551
551
  }
552
552
 
553
- VALUE Backend_accept_loop(VALUE self, VALUE sock) {
553
+ VALUE Backend_accept_loop(VALUE self, VALUE server_socket) {
554
554
  Backend_t *backend;
555
555
  struct libev_io watcher;
556
556
  rb_io_t *fptr;
@@ -559,12 +559,13 @@ VALUE Backend_accept_loop(VALUE self, VALUE sock) {
559
559
  socklen_t len = (socklen_t)sizeof addr;
560
560
  VALUE switchpoint_result = Qnil;
561
561
  VALUE socket = Qnil;
562
- VALUE underlying_sock = rb_ivar_get(sock, ID_ivar_io);
563
- if (underlying_sock != Qnil) sock = underlying_sock;
562
+ VALUE socket_class = ConnectionSocketClass(server_socket);
563
+ VALUE underlying_sock = rb_ivar_get(server_socket, ID_ivar_io);
564
+ if (underlying_sock != Qnil) server_socket = underlying_sock;
564
565
 
565
566
  GetBackend(self, backend);
566
- GetOpenFile(sock, fptr);
567
- io_set_nonblock(fptr, sock);
567
+ GetOpenFile(server_socket, fptr);
568
+ io_set_nonblock(fptr, server_socket);
568
569
  watcher.fiber = Qnil;
569
570
 
570
571
  while (1) {
@@ -586,7 +587,7 @@ VALUE Backend_accept_loop(VALUE self, VALUE sock) {
586
587
  goto error;
587
588
  }
588
589
 
589
- socket = rb_obj_alloc(cTCPSocket);
590
+ socket = rb_obj_alloc(socket_class);
590
591
  MakeOpenFile(socket, fp);
591
592
  rb_update_max_fd(fd);
592
593
  fp->fd = fd;
@@ -722,6 +723,72 @@ noreturn VALUE Backend_timer_loop(VALUE self, VALUE interval) {
722
723
  }
723
724
  }
724
725
 
726
+ VALUE Backend_timeout_safe(VALUE arg) {
727
+ return rb_yield(arg);
728
+ }
729
+
730
+ VALUE Backend_timeout_rescue(VALUE arg, VALUE exception) {
731
+ return exception;
732
+ }
733
+
734
+ VALUE Backend_timeout_ensure_safe(VALUE arg) {
735
+ return rb_rescue2(Backend_timeout_safe, Qnil, Backend_timeout_rescue, Qnil, rb_eException, (VALUE)0);
736
+ }
737
+
738
+ struct libev_timeout {
739
+ struct ev_timer timer;
740
+ VALUE fiber;
741
+ VALUE resume_value;
742
+ };
743
+
744
+ struct Backend_timeout_ctx {
745
+ Backend_t *backend;
746
+ struct libev_timeout *watcher;
747
+ };
748
+
749
+ VALUE Backend_timeout_ensure(VALUE arg) {
750
+ struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
751
+ ev_timer_stop(timeout_ctx->backend->ev_loop, &(timeout_ctx->watcher->timer));
752
+ return Qnil;
753
+ }
754
+
755
+ void Backend_timeout_callback(EV_P_ ev_timer *w, int revents)
756
+ {
757
+ struct libev_timeout *watcher = (struct libev_timeout *)w;
758
+ Fiber_make_runnable(watcher->fiber, watcher->resume_value);
759
+ }
760
+
761
+ VALUE Backend_timeout(int argc,VALUE *argv, VALUE self) {
762
+ VALUE duration;
763
+ VALUE exception;
764
+ VALUE move_on_value = Qnil;
765
+ rb_scan_args(argc, argv, "21", &duration, &exception, &move_on_value);
766
+
767
+ Backend_t *backend;
768
+ struct libev_timeout watcher;
769
+ VALUE result = Qnil;
770
+ VALUE timeout = rb_funcall(cTimeoutException, ID_new, 0);
771
+
772
+ GetBackend(self, backend);
773
+ watcher.fiber = rb_fiber_current();
774
+ watcher.resume_value = timeout;
775
+ ev_timer_init(&watcher.timer, Backend_timeout_callback, NUM2DBL(duration), 0.);
776
+ ev_timer_start(backend->ev_loop, &watcher.timer);
777
+
778
+ struct Backend_timeout_ctx timeout_ctx = {backend, &watcher};
779
+ result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
780
+
781
+ if (result == timeout) {
782
+ if (exception == Qnil) return move_on_value;
783
+ RAISE_EXCEPTION(backend_timeout_exception(exception));
784
+ }
785
+
786
+ RAISE_IF_EXCEPTION(result);
787
+ RB_GC_GUARD(result);
788
+ RB_GC_GUARD(timeout);
789
+ return result;
790
+ }
791
+
725
792
  struct libev_child {
726
793
  struct ev_child child;
727
794
  VALUE fiber;
@@ -783,8 +850,7 @@ VALUE Backend_kind(VALUE self) {
783
850
  void Init_Backend() {
784
851
  ev_set_allocator(xrealloc);
785
852
 
786
- rb_require("socket");
787
- cTCPSocket = rb_const_get(rb_cObject, rb_intern("TCPSocket"));
853
+ Init_SocketClasses();
788
854
 
789
855
  VALUE cBackend = rb_define_class_under(mPolyphony, "Backend", rb_cData);
790
856
  rb_define_alloc_func(cBackend, Backend_allocate);
@@ -812,6 +878,7 @@ void Init_Backend() {
812
878
  rb_define_method(cBackend, "wait_io", Backend_wait_io, 2);
813
879
  rb_define_method(cBackend, "sleep", Backend_sleep, 1);
814
880
  rb_define_method(cBackend, "timer_loop", Backend_timer_loop, 1);
881
+ rb_define_method(cBackend, "timeout", Backend_timeout, -1);
815
882
  rb_define_method(cBackend, "waitpid", Backend_waitpid, 1);
816
883
  rb_define_method(cBackend, "wait_event", Backend_wait_event, 1);
817
884
 
@@ -128,6 +128,15 @@ VALUE Fiber_receive(VALUE self) {
128
128
  return Queue_shift(mailbox);
129
129
  }
130
130
 
131
+ VALUE Fiber_mailbox(VALUE self) {
132
+ VALUE mailbox = rb_ivar_get(self, ID_ivar_mailbox);
133
+ if (mailbox == Qnil) {
134
+ mailbox = rb_funcall(cQueue, ID_new, 0);
135
+ rb_ivar_set(self, ID_ivar_mailbox, mailbox);
136
+ }
137
+ return mailbox;
138
+ }
139
+
131
140
  VALUE Fiber_receive_all_pending(VALUE self) {
132
141
  VALUE mailbox = rb_ivar_get(self, ID_ivar_mailbox);
133
142
  return (mailbox == Qnil) ? rb_ary_new() : Queue_shift_all(mailbox);
@@ -146,9 +155,9 @@ void Init_Fiber() {
146
155
 
147
156
  rb_define_method(cFiber, "<<", Fiber_send, 1);
148
157
  rb_define_method(cFiber, "send", Fiber_send, 1);
149
-
150
158
  rb_define_method(cFiber, "receive", Fiber_receive, 0);
151
159
  rb_define_method(cFiber, "receive_all_pending", Fiber_receive_all_pending, 0);
160
+ rb_define_method(cFiber, "mailbox", Fiber_mailbox, 0);
152
161
 
153
162
  SYM_dead = ID2SYM(rb_intern("dead"));
154
163
  SYM_running = ID2SYM(rb_intern("running"));
@@ -1,6 +1,7 @@
1
1
  #include "polyphony.h"
2
2
 
3
3
  VALUE mPolyphony;
4
+ VALUE cTimeoutException;
4
5
 
5
6
  ID ID_call;
6
7
  ID ID_caller;
@@ -55,6 +56,8 @@ void Init_Polyphony() {
55
56
  rb_define_global_function("snooze", Polyphony_snooze, 0);
56
57
  rb_define_global_function("suspend", Polyphony_suspend, 0);
57
58
 
59
+ cTimeoutException = rb_define_class_under(mPolyphony, "TimeoutException", rb_eException);
60
+
58
61
  ID_call = rb_intern("call");
59
62
  ID_caller = rb_intern("caller");
60
63
  ID_clear = rb_intern("clear");
@@ -39,6 +39,7 @@ extern VALUE mPolyphony;
39
39
  extern VALUE cQueue;
40
40
  extern VALUE cEvent;
41
41
  extern VALUE cRunqueue;
42
+ extern VALUE cTimeoutException;
42
43
 
43
44
  extern ID ID_call;
44
45
  extern ID ID_caller;
@@ -82,12 +83,6 @@ VALUE Queue_push(VALUE self, VALUE value);
82
83
  VALUE Queue_unshift(VALUE self, VALUE value);
83
84
  VALUE Queue_shift(VALUE self);
84
85
  VALUE Queue_shift_all(VALUE self);
85
- VALUE Queue_shift_no_wait(VALUE self);
86
- VALUE Queue_clear(VALUE self);
87
- VALUE Queue_delete(VALUE self, VALUE value);
88
- long Queue_len(VALUE self);
89
- void Queue_trace(VALUE self);
90
-
91
86
 
92
87
  void Runqueue_push(VALUE self, VALUE fiber, VALUE value, int reschedule);
93
88
  void Runqueue_unshift(VALUE self, VALUE fiber, VALUE value, int reschedule);
@@ -101,4 +96,6 @@ VALUE Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
101
96
  VALUE Thread_schedule_fiber_with_priority(VALUE thread, VALUE fiber, VALUE value);
102
97
  VALUE Thread_switch_fiber(VALUE thread);
103
98
 
99
+ VALUE Polyphony_snooze(VALUE self);
100
+
104
101
  #endif /* POLYPHONY_H */
@@ -4,6 +4,8 @@
4
4
  typedef struct queue {
5
5
  ring_buffer values;
6
6
  ring_buffer shift_queue;
7
+ ring_buffer push_queue;
8
+ unsigned int capacity;
7
9
  } Queue_t;
8
10
 
9
11
  VALUE cQueue = Qnil;
@@ -12,12 +14,14 @@ static void Queue_mark(void *ptr) {
12
14
  Queue_t *queue = ptr;
13
15
  ring_buffer_mark(&queue->values);
14
16
  ring_buffer_mark(&queue->shift_queue);
17
+ ring_buffer_mark(&queue->push_queue);
15
18
  }
16
19
 
17
20
  static void Queue_free(void *ptr) {
18
21
  Queue_t *queue = ptr;
19
22
  ring_buffer_free(&queue->values);
20
23
  ring_buffer_free(&queue->shift_queue);
24
+ ring_buffer_free(&queue->push_queue);
21
25
  xfree(ptr);
22
26
  }
23
27
 
@@ -41,36 +45,77 @@ static VALUE Queue_allocate(VALUE klass) {
41
45
  #define GetQueue(obj, queue) \
42
46
  TypedData_Get_Struct((obj), Queue_t, &Queue_type, (queue))
43
47
 
44
- static VALUE Queue_initialize(VALUE self) {
48
+ static VALUE Queue_initialize(int argc, VALUE *argv, VALUE self) {
45
49
  Queue_t *queue;
46
50
  GetQueue(self, queue);
47
51
 
48
52
  ring_buffer_init(&queue->values);
49
53
  ring_buffer_init(&queue->shift_queue);
54
+ ring_buffer_init(&queue->push_queue);
55
+ queue->capacity = (argc == 1) ? NUM2UINT(argv[0]) : 0;
50
56
 
51
57
  return self;
52
58
  }
53
59
 
60
+ inline void queue_resume_first_blocked_fiber(ring_buffer *queue) {
61
+ if (queue->count) {
62
+ VALUE fiber = ring_buffer_shift(queue);
63
+ if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
64
+ }
65
+ }
66
+
67
+ inline void queue_resume_all_blocked_fibers(ring_buffer *queue) {
68
+ while (queue->count) {
69
+ VALUE fiber = ring_buffer_shift(queue);
70
+ if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
71
+ }
72
+ }
73
+
74
+ inline void queue_resume_blocked_fibers_to_capacity(Queue_t *queue) {
75
+ for (unsigned int i = queue->values.count; (i < queue->capacity) && queue->push_queue.count; i++) {
76
+ VALUE fiber = ring_buffer_shift(&queue->push_queue);
77
+ if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
78
+ }
79
+ }
80
+
81
+ inline void capped_queue_block_push(Queue_t *queue) {
82
+ VALUE fiber = rb_fiber_current();
83
+ VALUE backend = rb_ivar_get(rb_thread_current(), ID_ivar_backend);
84
+ VALUE switchpoint_result;
85
+ while (1) {
86
+ if (queue->capacity > queue->values.count) Fiber_make_runnable(fiber, Qnil);
87
+
88
+ ring_buffer_push(&queue->push_queue, fiber);
89
+ switchpoint_result = __BACKEND__.wait_event(backend, Qnil);
90
+ ring_buffer_delete(&queue->push_queue, fiber);
91
+
92
+ RAISE_IF_EXCEPTION(switchpoint_result);
93
+ RB_GC_GUARD(switchpoint_result);
94
+ if (queue->capacity > queue->values.count) break;
95
+ }
96
+ }
97
+
54
98
  VALUE Queue_push(VALUE self, VALUE value) {
55
99
  Queue_t *queue;
56
100
  GetQueue(self, queue);
57
101
 
58
- if (queue->shift_queue.count > 0) {
59
- VALUE fiber = ring_buffer_shift(&queue->shift_queue);
60
- if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
61
- }
102
+ if (queue->capacity) capped_queue_block_push(queue);
103
+
104
+ queue_resume_first_blocked_fiber(&queue->shift_queue);
62
105
  ring_buffer_push(&queue->values, value);
106
+
63
107
  return self;
64
108
  }
65
109
 
66
110
  VALUE Queue_unshift(VALUE self, VALUE value) {
67
111
  Queue_t *queue;
68
112
  GetQueue(self, queue);
69
- if (queue->shift_queue.count > 0) {
70
- VALUE fiber = ring_buffer_shift(&queue->shift_queue);
71
- if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
72
- }
113
+
114
+ if (queue->capacity) capped_queue_block_push(queue);
115
+
116
+ queue_resume_first_blocked_fiber(&queue->shift_queue);
73
117
  ring_buffer_unshift(&queue->values, value);
118
+
74
119
  return self;
75
120
  }
76
121
 
@@ -83,42 +128,63 @@ VALUE Queue_shift(VALUE self) {
83
128
  VALUE backend = rb_ivar_get(thread, ID_ivar_backend);
84
129
 
85
130
  while (1) {
86
- ring_buffer_push(&queue->shift_queue, fiber);
87
- if (queue->values.count > 0) Fiber_make_runnable(fiber, Qnil);
131
+ if (queue->values.count) Fiber_make_runnable(fiber, Qnil);
88
132
 
133
+ ring_buffer_push(&queue->shift_queue, fiber);
89
134
  VALUE switchpoint_result = __BACKEND__.wait_event(backend, Qnil);
90
135
  ring_buffer_delete(&queue->shift_queue, fiber);
91
136
 
92
137
  RAISE_IF_EXCEPTION(switchpoint_result);
93
138
  RB_GC_GUARD(switchpoint_result);
94
-
95
- if (queue->values.count > 0)
96
- return ring_buffer_shift(&queue->values);
139
+ if (queue->values.count) break;
97
140
  }
98
-
99
- return Qnil;
141
+ VALUE value = ring_buffer_shift(&queue->values);
142
+ if ((queue->capacity) && (queue->capacity > queue->values.count))
143
+ queue_resume_first_blocked_fiber(&queue->push_queue);
144
+ RB_GC_GUARD(value);
145
+ return value;
100
146
  }
101
147
 
102
- VALUE Queue_shift_no_wait(VALUE self) {
103
- Queue_t *queue;
148
+ VALUE Queue_delete(VALUE self, VALUE value) {
149
+ Queue_t *queue;
104
150
  GetQueue(self, queue);
105
151
 
106
- return ring_buffer_shift(&queue->values);
152
+ ring_buffer_delete(&queue->values, value);
153
+
154
+ if (queue->capacity && (queue->capacity > queue->values.count))
155
+ queue_resume_first_blocked_fiber(&queue->push_queue);
156
+
157
+ return self;
107
158
  }
108
159
 
109
- VALUE Queue_delete(VALUE self, VALUE value) {
160
+ VALUE Queue_cap(VALUE self, VALUE cap) {
161
+ unsigned int new_capacity = NUM2UINT(cap);
110
162
  Queue_t *queue;
111
163
  GetQueue(self, queue);
112
-
113
- ring_buffer_delete(&queue->values, value);
164
+ queue->capacity = new_capacity;
165
+
166
+ if (queue->capacity)
167
+ queue_resume_blocked_fibers_to_capacity(queue);
168
+ else
169
+ queue_resume_all_blocked_fibers(&queue->push_queue);
170
+
114
171
  return self;
115
172
  }
116
173
 
174
+ VALUE Queue_capped_p(VALUE self) {
175
+ Queue_t *queue;
176
+ GetQueue(self, queue);
177
+
178
+ return queue->capacity ? UINT2NUM(queue->capacity) : Qnil;
179
+ }
180
+
117
181
  VALUE Queue_clear(VALUE self) {
118
182
  Queue_t *queue;
119
183
  GetQueue(self, queue);
120
184
 
121
185
  ring_buffer_clear(&queue->values);
186
+ if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
187
+
122
188
  return self;
123
189
  }
124
190
 
@@ -134,6 +200,7 @@ VALUE Queue_shift_each(VALUE self) {
134
200
  GetQueue(self, queue);
135
201
 
136
202
  ring_buffer_shift_each(&queue->values);
203
+ if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
137
204
  return self;
138
205
  }
139
206
 
@@ -141,7 +208,9 @@ VALUE Queue_shift_all(VALUE self) {
141
208
  Queue_t *queue;
142
209
  GetQueue(self, queue);
143
210
 
144
- return ring_buffer_shift_all(&queue->values);
211
+ VALUE result = ring_buffer_shift_all(&queue->values);
212
+ if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
213
+ return result;
145
214
  }
146
215
 
147
216
  VALUE Queue_flush_waiters(VALUE self, VALUE value) {
@@ -160,14 +229,14 @@ VALUE Queue_empty_p(VALUE self) {
160
229
  Queue_t *queue;
161
230
  GetQueue(self, queue);
162
231
 
163
- return (queue->values.count == 0) ? Qtrue : Qfalse;
232
+ return (!queue->values.count) ? Qtrue : Qfalse;
164
233
  }
165
234
 
166
235
  VALUE Queue_pending_p(VALUE self) {
167
236
  Queue_t *queue;
168
237
  GetQueue(self, queue);
169
238
 
170
- return (queue->shift_queue.count > 0) ? Qtrue : Qfalse;
239
+ return (queue->shift_queue.count) ? Qtrue : Qfalse;
171
240
  }
172
241
 
173
242
  VALUE Queue_size_m(VALUE self) {
@@ -177,26 +246,22 @@ VALUE Queue_size_m(VALUE self) {
177
246
  return INT2NUM(queue->values.count);
178
247
  }
179
248
 
180
- void Queue_trace(VALUE self) {
181
- Queue_t *queue;
182
- GetQueue(self, queue);
183
-
184
- printf("run queue size: %d count: %d\n", queue->values.size, queue->values.count);
185
- }
186
-
187
249
  void Init_Queue() {
188
250
  cQueue = rb_define_class_under(mPolyphony, "Queue", rb_cData);
189
251
  rb_define_alloc_func(cQueue, Queue_allocate);
190
252
 
191
- rb_define_method(cQueue, "initialize", Queue_initialize, 0);
253
+ rb_define_method(cQueue, "initialize", Queue_initialize, -1);
192
254
  rb_define_method(cQueue, "push", Queue_push, 1);
193
255
  rb_define_method(cQueue, "<<", Queue_push, 1);
194
256
  rb_define_method(cQueue, "unshift", Queue_unshift, 1);
195
257
 
196
258
  rb_define_method(cQueue, "shift", Queue_shift, 0);
197
259
  rb_define_method(cQueue, "pop", Queue_shift, 0);
198
- rb_define_method(cQueue, "shift_no_wait", Queue_shift_no_wait, 0);
199
260
  rb_define_method(cQueue, "delete", Queue_delete, 1);
261
+ rb_define_method(cQueue, "clear", Queue_clear, 0);
262
+
263
+ rb_define_method(cQueue, "cap", Queue_cap, 1);
264
+ rb_define_method(cQueue, "capped?", Queue_capped_p, 0);
200
265
 
201
266
  rb_define_method(cQueue, "shift_each", Queue_shift_each, 0);
202
267
  rb_define_method(cQueue, "shift_all", Queue_shift_all, 0);