couchbase 1.2.0.z.beta-x86-mingw32 → 1.2.1-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (45) hide show
  1. data/.travis.yml +1 -1
  2. data/Makefile +3 -0
  3. data/README.markdown +15 -4
  4. data/RELEASE_NOTES.markdown +526 -0
  5. data/couchbase.gemspec +0 -1
  6. data/ext/couchbase_ext/arguments.c +161 -244
  7. data/ext/couchbase_ext/arithmetic.c +29 -37
  8. data/ext/couchbase_ext/bucket.c +252 -219
  9. data/ext/couchbase_ext/couchbase_ext.c +540 -417
  10. data/ext/couchbase_ext/couchbase_ext.h +218 -191
  11. data/ext/couchbase_ext/delete.c +30 -27
  12. data/ext/couchbase_ext/extconf.rb +15 -3
  13. data/ext/couchbase_ext/get.c +45 -37
  14. data/ext/couchbase_ext/http.c +95 -74
  15. data/ext/couchbase_ext/multithread_plugin.c +1238 -0
  16. data/ext/couchbase_ext/observe.c +42 -37
  17. data/ext/couchbase_ext/result.c +17 -20
  18. data/ext/couchbase_ext/stats.c +30 -28
  19. data/ext/couchbase_ext/store.c +47 -39
  20. data/ext/couchbase_ext/timer.c +11 -11
  21. data/ext/couchbase_ext/touch.c +30 -27
  22. data/ext/couchbase_ext/unlock.c +30 -27
  23. data/ext/couchbase_ext/utils.c +166 -89
  24. data/ext/couchbase_ext/version.c +29 -26
  25. data/lib/action_dispatch/middleware/session/couchbase_store.rb +2 -2
  26. data/lib/active_support/cache/couchbase_store.rb +6 -6
  27. data/lib/couchbase.rb +1 -0
  28. data/lib/couchbase/bucket.rb +6 -11
  29. data/lib/couchbase/cluster.rb +105 -0
  30. data/lib/couchbase/utils.rb +8 -5
  31. data/lib/couchbase/version.rb +1 -1
  32. data/lib/couchbase/view.rb +51 -5
  33. data/lib/couchbase/view_row.rb +1 -1
  34. data/lib/ext/multi_json_fix.rb +13 -9
  35. data/lib/rack/session/couchbase.rb +11 -7
  36. data/tasks/compile.rake +1 -1
  37. data/tasks/test.rake +40 -34
  38. data/tasks/util.rake +1 -1
  39. data/test/setup.rb +9 -2
  40. data/test/test_arithmetic.rb +37 -0
  41. data/test/test_async.rb +22 -18
  42. data/test/test_unlock.rb +0 -1
  43. data/test/test_utils.rb +32 -0
  44. metadata +13 -23
  45. data/HISTORY.markdown +0 -219
@@ -0,0 +1,1238 @@
1
+ /* vim: ft=c et ts=8 sts=4 sw=4 cino=
2
+ *
3
+ * Copyright 2012 Couchbase, Inc.
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+
18
+ #include "couchbase_ext.h"
19
+
20
+
21
+ #ifndef _WIN32
22
+
23
+ #ifndef HAVE_RB_THREAD_BLOCKING_REGION
24
+ #include <rubysig.h>
25
+ #endif
26
+ #include <errno.h>
27
+ #include <sys/types.h>
28
+ #include <sys/socket.h>
29
+ #include <unistd.h>
30
+ #ifdef HAVE_FCNTL_H
31
+ #include <fcntl.h>
32
+ #endif
33
+ #ifdef HAVE_POLL
34
+ #include <poll.h>
35
+ #endif
36
+ #define INVALID_SOCKET (-1)
37
+
38
+ /* Copied from libev plugin */
39
+ static lcb_ssize_t
40
+ lcb_io_recv(struct lcb_io_opt_st *iops, lcb_socket_t sock,
41
+ void *buffer, lcb_size_t len, int flags)
42
+ {
43
+ lcb_ssize_t ret = recv(sock, buffer, len, flags);
44
+ if (ret < 0) {
45
+ iops->v.v0.error = errno;
46
+ }
47
+ return ret;
48
+ }
49
+
50
+ static lcb_ssize_t
51
+ lcb_io_recvv(struct lcb_io_opt_st *iops, lcb_socket_t sock,
52
+ struct lcb_iovec_st *iov, lcb_size_t niov)
53
+ {
54
+ struct msghdr msg;
55
+ struct iovec vec[2];
56
+ lcb_ssize_t ret;
57
+
58
+ if (niov != 2) {
59
+ return -1;
60
+ }
61
+ memset(&msg, 0, sizeof(msg));
62
+ msg.msg_iov = vec;
63
+ msg.msg_iovlen = iov[1].iov_len ? (lcb_size_t)2 : (lcb_size_t)1;
64
+ msg.msg_iov[0].iov_base = iov[0].iov_base;
65
+ msg.msg_iov[0].iov_len = iov[0].iov_len;
66
+ msg.msg_iov[1].iov_base = iov[1].iov_base;
67
+ msg.msg_iov[1].iov_len = iov[1].iov_len;
68
+ ret = recvmsg(sock, &msg, 0);
69
+
70
+ if (ret < 0) {
71
+ iops->v.v0.error = errno;
72
+ }
73
+
74
+ return ret;
75
+ }
76
+
77
+ static lcb_ssize_t
78
+ lcb_io_send(struct lcb_io_opt_st *iops, lcb_socket_t sock,
79
+ const void *msg, lcb_size_t len, int flags)
80
+ {
81
+ lcb_ssize_t ret = send(sock, msg, len, flags);
82
+ if (ret < 0) {
83
+ iops->v.v0.error = errno;
84
+ }
85
+ return ret;
86
+ }
87
+
88
+ static lcb_ssize_t
89
+ lcb_io_sendv(struct lcb_io_opt_st *iops, lcb_socket_t sock,
90
+ struct lcb_iovec_st *iov, lcb_size_t niov)
91
+ {
92
+ struct msghdr msg;
93
+ struct iovec vec[2];
94
+ lcb_ssize_t ret;
95
+
96
+ if (niov != 2) {
97
+ return -1;
98
+ }
99
+ memset(&msg, 0, sizeof(msg));
100
+ msg.msg_iov = vec;
101
+ msg.msg_iovlen = iov[1].iov_len ? (lcb_size_t)2 : (lcb_size_t)1;
102
+ msg.msg_iov[0].iov_base = iov[0].iov_base;
103
+ msg.msg_iov[0].iov_len = iov[0].iov_len;
104
+ msg.msg_iov[1].iov_base = iov[1].iov_base;
105
+ msg.msg_iov[1].iov_len = iov[1].iov_len;
106
+ ret = sendmsg(sock, &msg, 0);
107
+
108
+ if (ret < 0) {
109
+ iops->v.v0.error = errno;
110
+ }
111
+ return ret;
112
+ }
113
+
114
+ static int
115
+ make_socket_nonblocking(lcb_socket_t sock)
116
+ {
117
+ int flags;
118
+ if ((flags = fcntl(sock, F_GETFL, NULL)) < 0) {
119
+ return -1;
120
+ }
121
+ if (fcntl(sock, F_SETFL, flags | O_NONBLOCK) == -1) {
122
+ return -1;
123
+ }
124
+
125
+ return 0;
126
+ }
127
+
128
+ static int
129
+ close_socket(lcb_socket_t sock)
130
+ {
131
+ return close(sock);
132
+ }
133
+
134
+ static lcb_socket_t
135
+ lcb_io_socket(struct lcb_io_opt_st *iops, int domain, int type,
136
+ int protocol)
137
+ {
138
+ lcb_socket_t sock = socket(domain, type, protocol);
139
+ if (sock == INVALID_SOCKET) {
140
+ iops->v.v0.error = errno;
141
+ } else {
142
+ if (make_socket_nonblocking(sock) != 0) {
143
+ int error = errno;
144
+ iops->v.v0.close(iops, sock);
145
+ iops->v.v0.error = error;
146
+ sock = INVALID_SOCKET;
147
+ }
148
+ }
149
+
150
+ return sock;
151
+ }
152
+
153
+ static void
154
+ lcb_io_close(struct lcb_io_opt_st *iops, lcb_socket_t sock)
155
+ {
156
+ close_socket(sock);
157
+ (void)iops;
158
+ }
159
+
160
+ static int
161
+ lcb_io_connect(struct lcb_io_opt_st *iops, lcb_socket_t sock,
162
+ const struct sockaddr *name, unsigned int namelen)
163
+ {
164
+ int ret = connect(sock, name, (socklen_t)namelen);
165
+ if (ret < 0) {
166
+ iops->v.v0.error = errno;
167
+ }
168
+ return ret;
169
+ }
170
+
171
+ /* events sorted array */
172
+ typedef struct rb_mt_event rb_mt_event;
173
+ struct rb_mt_event {
174
+ void *cb_data;
175
+ void (*handler)(lcb_socket_t sock, short which, void *cb_data);
176
+ lcb_socket_t socket;
177
+ int loop_index;
178
+ short flags;
179
+ short actual_flags;
180
+ short inserted;
181
+ rb_mt_event *next;
182
+ };
183
+
184
+ typedef struct rb_mt_socket_list rb_mt_socket_list;
185
+ struct rb_mt_socket_list {
186
+ lcb_socket_t socket;
187
+ short flags;
188
+ rb_mt_event *first;
189
+ };
190
+
191
+ typedef struct rb_mt_events rb_mt_events;
192
+ struct rb_mt_events {
193
+ uint32_t capa;
194
+ uint32_t count;
195
+ rb_mt_socket_list *sockets;
196
+ };
197
+
198
+ static int
199
+ events_init(rb_mt_events *events)
200
+ {
201
+ rb_mt_socket_list *new_socks = malloc(4 * sizeof(*new_socks));
202
+ if (new_socks == NULL) {
203
+ return 0;
204
+ }
205
+ events->capa = 4;
206
+ events->count = 0;
207
+ events->sockets = new_socks;
208
+ return 1;
209
+ }
210
+
211
+ static void
212
+ events_finalize(rb_mt_events *events)
213
+ {
214
+ if (events->sockets) {
215
+ uint32_t i;
216
+ for(i = 0; i < events->count; i++) {
217
+ rb_mt_socket_list *list = &events->sockets[i];
218
+ while(list->first) {
219
+ rb_mt_event *next = list->first->next;
220
+ free(list->first);
221
+ list->first = next;
222
+ }
223
+ }
224
+ free(events->sockets);
225
+ events->sockets = NULL;
226
+ }
227
+ events->capa = 0;
228
+ events->count = 0;
229
+ }
230
+
231
+ static uint32_t
232
+ events_index(rb_mt_events *events, lcb_socket_t socket)
233
+ {
234
+ uint32_t m, l = 0, r = events->count;
235
+ while(l < r) {
236
+ m = l + (r - l) / 2;
237
+ if (events->sockets[m].socket >= socket) {
238
+ r = m;
239
+ } else {
240
+ l = m + 1;
241
+ }
242
+ }
243
+ return l;
244
+ }
245
+
246
+ static void
247
+ events_insert(rb_mt_events *events, rb_mt_event *event)
248
+ {
249
+ uint32_t i = events_index(events, event->socket);
250
+ rb_mt_socket_list *list = &events->sockets[i];
251
+ if (i == events->count || list->socket != event->socket) {
252
+ if (events->capa == events->count) {
253
+ uint32_t new_capa = events->capa << 1;
254
+ rb_mt_socket_list *new_socks = realloc(events->sockets, new_capa * sizeof(*new_socks));
255
+ if (new_socks == NULL) {
256
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for events array");
257
+ }
258
+ events->sockets = new_socks;
259
+ events->capa = new_capa;
260
+ list = &events->sockets[i];
261
+ }
262
+ if (i < events->count) {
263
+ MEMMOVE(events->sockets+i+1, events->sockets+i, rb_mt_socket_list, events->count - i);
264
+ }
265
+ events->count++;
266
+ list->socket = event->socket;
267
+ list->flags = event->flags;
268
+ list->first = event;
269
+ event->next = NULL;
270
+ } else {
271
+ list->flags |= event->flags;
272
+ event->next = list->first;
273
+ list->first = event;
274
+ }
275
+ event->inserted = 1;
276
+ }
277
+
278
+ static void
279
+ event_list_fix_flags(rb_mt_socket_list *list)
280
+ {
281
+ short flags = 0;
282
+ rb_mt_event *event = list->first;
283
+ while (event) {
284
+ flags |= event->flags;
285
+ event = event->next;
286
+ }
287
+ list->flags = flags;
288
+ }
289
+
290
+ static void
291
+ events_remove(rb_mt_events *events, rb_mt_event *event)
292
+ {
293
+ uint32_t i = events_index(events, event->socket);
294
+ rb_mt_socket_list *list = &events->sockets[i];
295
+ rb_mt_event **next;
296
+ if (list->socket != event->socket) {
297
+ rb_raise(rb_eIndexError, "There is no socket in event loop");
298
+ }
299
+ next = &list->first;
300
+ for(;;) {
301
+ if (*next == NULL) {
302
+ rb_raise(rb_eIndexError, "There is no event in event loop");
303
+ }
304
+ if (*next == event) {
305
+ *next = event->next;
306
+ event->next = NULL;
307
+ event->inserted = 0;
308
+ break;
309
+ }
310
+ next = &event->next;
311
+ }
312
+ if (list->first == NULL) {
313
+ MEMMOVE(events->sockets + i, events->sockets + i + 1, rb_mt_socket_list, events->count - i - 1);
314
+ events->count--;
315
+ } else {
316
+ event_list_fix_flags(list);
317
+ }
318
+ }
319
+
320
+ static void
321
+ events_fix_flags(rb_mt_events *events, lcb_socket_t socket)
322
+ {
323
+ uint32_t i = events_index(events, socket);
324
+ rb_mt_socket_list *list = &events->sockets[i];
325
+ if (list->socket != socket) {
326
+ rb_raise(rb_eIndexError, "There is no socket in event loop");
327
+ }
328
+ event_list_fix_flags(list);
329
+ }
330
+
331
+ static inline lcb_socket_t
332
+ events_max_fd(rb_mt_events *events)
333
+ {
334
+ if (events->count) {
335
+ return events->sockets[events->count - 1].socket;
336
+ } else {
337
+ return -1;
338
+ }
339
+ }
340
+
341
+ /* events sorted array end */
342
+
343
+ /* timers heap */
344
+ typedef struct rb_mt_timer rb_mt_timer;
345
+ struct rb_mt_timer {
346
+ void *cb_data;
347
+ void (*handler)(lcb_socket_t sock, short which, void *cb_data);
348
+ int index;
349
+ hrtime_t ts;
350
+ hrtime_t period;
351
+ };
352
+
353
+ typedef struct rb_mt_timers rb_mt_timers;
354
+ struct rb_mt_timers {
355
+ uint32_t capa;
356
+ uint32_t count;
357
+ rb_mt_timer **timers;
358
+ };
359
+
360
+ static int
361
+ timers_init(rb_mt_timers *timers)
362
+ {
363
+ rb_mt_timer **new_timers = malloc(4 * sizeof(*new_timers));
364
+ if (new_timers == NULL) {
365
+ return 0;
366
+ }
367
+ timers->capa = 4;
368
+ timers->count = 0;
369
+ timers->timers = new_timers;
370
+ return 1;
371
+ }
372
+
373
+ static void
374
+ timers_finalize(rb_mt_timers *timers)
375
+ {
376
+ if (timers->timers) {
377
+ uint32_t i;
378
+ for(i = 0; i < timers->count; i++) {
379
+ free(timers->timers[i]);
380
+ }
381
+ free(timers->timers);
382
+ timers->timers = NULL;
383
+ }
384
+ timers->count = 0;
385
+ timers->capa = 0;
386
+ }
387
+
388
+ #define tms_at(_timers, at) (_timers)->timers[(at)]
389
+ #define tms_ts_at(timers, at) tms_at((timers), (at))->ts
390
+
391
+ static void
392
+ timers_move_last(rb_mt_timers *timers, uint32_t to)
393
+ {
394
+ if (to < timers->count - 1) {
395
+ rb_mt_timer *last = tms_at(timers, timers->count - 1);
396
+ tms_at(timers, to) = last;
397
+ last->index = to;
398
+ }
399
+ timers->count--;
400
+ }
401
+
402
+ static inline void
403
+ timers_swap(rb_mt_timers *timers, uint32_t i, uint32_t j)
404
+ {
405
+ rb_mt_timer *itmp = tms_at(timers, j);
406
+ rb_mt_timer *jtmp = tms_at(timers, i);
407
+ tms_at(timers, i) = itmp;
408
+ tms_at(timers, j) = jtmp;
409
+ itmp->index = i;
410
+ jtmp->index = j;
411
+ }
412
+
413
+ static void timers_heapify_up(rb_mt_timers *timers, uint32_t pos);
414
+
415
+ static void
416
+ timers_insert(rb_mt_timers *timers, rb_mt_timer *timer)
417
+ {
418
+ if (timers->count == timers->capa) {
419
+ rb_mt_timer **new_timers;
420
+ size_t new_capa = timers->capa << 1;
421
+ new_timers = realloc(timers->timers, new_capa * sizeof(rb_mt_timer*));
422
+ if (new_timers == NULL) {
423
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for timers heap");
424
+ }
425
+ timers->timers = new_timers;
426
+ timers->capa = new_capa;
427
+ }
428
+ tms_at(timers, timers->count) = timer;
429
+ timer->index = timers->count;
430
+ timers->count++;
431
+ timers_heapify_up(timers, timer->index);
432
+ }
433
+
434
+ static void
435
+ timers_heapify_up(rb_mt_timers *timers, uint32_t pos)
436
+ {
437
+ hrtime_t cur_ts = tms_ts_at(timers, pos);
438
+ uint32_t higher = (pos - 1) / 2;
439
+ while (pos && tms_ts_at(timers, higher) > cur_ts) {
440
+ timers_swap(timers, higher, pos);
441
+ pos = higher;
442
+ higher = (pos - 1) / 2;
443
+ }
444
+ }
445
+
446
+ static void
447
+ timers_heapify_down(rb_mt_timers *timers, uint32_t pos)
448
+ {
449
+ uint32_t count = timers->count;
450
+ uint32_t middle = (timers->count - 2) / 2;
451
+ hrtime_t cur_ts = tms_ts_at(timers, pos);
452
+ if (count == 1) return;
453
+ while (pos <= middle) {
454
+ uint32_t min_pos = pos;
455
+ hrtime_t ch_ts, min_ts = cur_ts;
456
+
457
+ if ((ch_ts = tms_ts_at(timers, pos * 2 + 1)) < min_ts) {
458
+ min_pos = pos * 2 + 1;
459
+ min_ts = ch_ts;
460
+ }
461
+
462
+ if (pos * 2 + 2 < count && tms_ts_at(timers, pos * 2 + 2) < min_ts) {
463
+ min_pos = pos * 2 + 2;
464
+ }
465
+
466
+ if (min_pos == pos) break;
467
+ timers_swap(timers, pos, min_pos);
468
+ pos = min_pos;
469
+ }
470
+ }
471
+
472
+ static void
473
+ timers_heapify_item(rb_mt_timers *timers, uint32_t pos)
474
+ {
475
+ if (pos && tms_ts_at(timers, pos) < tms_ts_at(timers, (pos - 1) / 2)) {
476
+ timers_heapify_up(timers, pos);
477
+ } else {
478
+ timers_heapify_down(timers, pos);
479
+ }
480
+ }
481
+
482
+ static inline hrtime_t
483
+ timers_minimum(rb_mt_timers *timers)
484
+ {
485
+ if (timers->count) {
486
+ return tms_ts_at(timers, 0);
487
+ } else {
488
+ return 0;
489
+ }
490
+ }
491
+
492
+ static inline rb_mt_timer *
493
+ timers_first(rb_mt_timers *timers)
494
+ {
495
+ if (timers->count) {
496
+ return tms_at(timers, 0);
497
+ } else {
498
+ return 0;
499
+ }
500
+ }
501
+
502
+ static void
503
+ timers_remove_timer(rb_mt_timers *timers, rb_mt_timer *timer)
504
+ {
505
+ uint32_t at = timer->index;
506
+ timer->index = -1;
507
+ if (at < timers->count - 1) {
508
+ timers_move_last(timers, at);
509
+ timers_heapify_item(timers, at);
510
+ } else {
511
+ timers->count--;
512
+ }
513
+ }
514
+
515
+ static void
516
+ timers_run(rb_mt_timers *timers, hrtime_t now)
517
+ {
518
+ hrtime_t next_time = timers_minimum(timers);
519
+ while (next_time && next_time < now) {
520
+ rb_mt_timer *first = timers_first(timers);
521
+
522
+ first->ts = now + first->period;
523
+ timers_heapify_item(timers, 0);
524
+
525
+ first->handler(-1, 0, first->cb_data);
526
+
527
+ next_time = timers_minimum(timers);
528
+ }
529
+ }
530
+ /* timers heap end */
531
+
532
+ /* callbacks array */
533
+ typedef struct rb_mt_callbacks rb_mt_callbacks;
534
+ struct rb_mt_callbacks {
535
+ uint32_t capa;
536
+ uint32_t count;
537
+ rb_mt_event **events;
538
+ };
539
+
540
+ static int
541
+ callbacks_init(rb_mt_callbacks *callbacks)
542
+ {
543
+ rb_mt_event **new_events = calloc(4, sizeof(*new_events));
544
+ if (new_events == NULL) {
545
+ return 0;
546
+ }
547
+ callbacks->events = new_events;
548
+ callbacks->capa = 4;
549
+ callbacks->count = 0;
550
+ return 1;
551
+ }
552
+
553
+ static void
554
+ callbacks_finalize(rb_mt_callbacks *callbacks)
555
+ {
556
+ if (callbacks->events) {
557
+ free(callbacks->events);
558
+ callbacks->events = NULL;
559
+ }
560
+ callbacks->capa = 0;
561
+ callbacks->count = 0;
562
+ }
563
+
564
+ static void
565
+ callbacks_push(rb_mt_callbacks *callbacks, rb_mt_event *event)
566
+ {
567
+ if (callbacks->count == callbacks->capa) {
568
+ uint32_t new_capa = callbacks->capa << 1;
569
+ rb_mt_event **new_events = realloc(callbacks->events, new_capa * sizeof(*new_events));
570
+ if (new_events == NULL) {
571
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for callbacks array");
572
+ }
573
+ callbacks->capa = new_capa;
574
+ callbacks->events = new_events;
575
+ }
576
+ callbacks->events[callbacks->count] = event;
577
+ callbacks->count++;
578
+ }
579
+
580
+ static void
581
+ callbacks_remove(rb_mt_callbacks *callbacks, rb_mt_event *event)
582
+ {
583
+ int i = event->loop_index;
584
+ if (i >= 0) {
585
+ if (callbacks->events[i] != event) {
586
+ rb_raise(rb_eIndexError, "callback index belongs to different callback");
587
+ }
588
+ event->loop_index = -1;
589
+ callbacks->events[i] = NULL;
590
+ }
591
+ }
592
+
593
+ static void
594
+ callbacks_run(rb_mt_callbacks *callbacks)
595
+ {
596
+ uint32_t i;
597
+ for(i = 0; i < callbacks->count; i++) {
598
+ rb_mt_event *cb = callbacks->events[i];
599
+ if (cb) {
600
+ cb->handler(cb->socket, cb->actual_flags, cb->cb_data);
601
+ }
602
+ }
603
+ callbacks->count = 0;
604
+ }
605
+
606
+ static void
607
+ callbacks_clean(rb_mt_callbacks *callbacks)
608
+ {
609
+ uint32_t i;
610
+ for(i = 0; i < callbacks->count; i++) {
611
+ if (callbacks->events[i]) {
612
+ callbacks->events[i]->loop_index = -1;
613
+ callbacks->events[i] = NULL;
614
+ }
615
+ }
616
+ callbacks->count = 0;
617
+ }
618
+ /* callbacks array end */
619
+
620
+ typedef struct rb_mt_loop rb_mt_loop;
621
+ struct rb_mt_loop {
622
+ rb_mt_events events;
623
+ rb_mt_timers timers;
624
+ rb_mt_callbacks callbacks;
625
+ short run;
626
+ };
627
+
628
+ static rb_mt_loop*
629
+ loop_create()
630
+ {
631
+ rb_mt_loop *loop = calloc(1, sizeof(*loop));
632
+ if (loop == NULL) return NULL;
633
+ if (!events_init(&loop->events)) goto free_loop;
634
+ if (!timers_init(&loop->timers)) goto free_events;
635
+ if (!callbacks_init(&loop->callbacks)) goto free_timers;
636
+ return loop;
637
+
638
+ free_timers:
639
+ timers_finalize(&loop->timers);
640
+ free_events:
641
+ events_finalize(&loop->events);
642
+ free_loop:
643
+ free(loop);
644
+ return NULL;
645
+ }
646
+
647
+ static void
648
+ loop_destroy(rb_mt_loop *loop)
649
+ {
650
+ events_finalize(&loop->events);
651
+ timers_finalize(&loop->timers);
652
+ callbacks_finalize(&loop->callbacks);
653
+ free(loop);
654
+ }
655
+
656
+ static void
657
+ loop_remove_event(rb_mt_loop *loop, rb_mt_event *event)
658
+ {
659
+ if (event->inserted) {
660
+ events_remove(&loop->events, event);
661
+ }
662
+ callbacks_remove(&loop->callbacks, event);
663
+ }
664
+
665
+ static void
666
+ loop_enque_events(rb_mt_callbacks *callbacks, rb_mt_event *sock, short flags)
667
+ {
668
+ while (sock) {
669
+ short actual = sock->flags & flags;
670
+ if (actual) {
671
+ sock->actual_flags = actual;
672
+ callbacks_push(callbacks, (rb_mt_event*)sock);
673
+ }
674
+ sock = sock->next;
675
+ }
676
+ }
677
+
678
+ /* loop select implementation */
679
+ #ifndef HAVE_RB_THREAD_FD_SELECT
680
+ typedef fd_set rb_fdset_t;
681
+ #define rb_fd_init FD_ZERO
682
+ #define rb_fd_set FD_SET
683
+ #define rb_fd_isset FD_ISSET
684
+ #define rb_fd_term(set) (void)0
685
+ #define rb_thread_fd_select rb_thread_select
686
+ #endif
687
+
688
+ typedef struct loop_select_arg {
689
+ rb_mt_loop *loop;
690
+ rb_fdset_t in, out;
691
+ } ls_arg;
692
+
693
+ static void
694
+ ls_arg_free(void *p) {
695
+ ls_arg *args = p;
696
+ if (args) {
697
+ rb_fd_term(&args->in);
698
+ rb_fd_term(&args->out);
699
+ xfree(args);
700
+ }
701
+ }
702
+
703
+ static VALUE
704
+ ls_arg_alloc(ls_arg **args)
705
+ {
706
+ return Data_Make_Struct(rb_cObject, ls_arg, 0, ls_arg_free, *args);
707
+ }
708
+
709
+ static VALUE
710
+ loop_run_select(VALUE argp)
711
+ {
712
+ ls_arg *args = (ls_arg*) argp;
713
+ rb_mt_loop *loop = args->loop;
714
+ rb_fdset_t *in = NULL, *out = NULL;
715
+ struct timeval timeout;
716
+ struct timeval *timeoutp = NULL;
717
+ int result, max = 0;
718
+ hrtime_t now, next_time;
719
+
720
+ next_time = timers_minimum(&loop->timers);
721
+ if (next_time) {
722
+ now = gethrtime();
723
+ if (next_time <= now) {
724
+ timeout.tv_sec = 0;
725
+ timeout.tv_usec = 0;
726
+ } else {
727
+ hrtime_t hrto = (next_time - now) / 1000;
728
+ timeout.tv_sec = (long)(hrto / 1000000);
729
+ timeout.tv_usec = (long)(hrto % 1000000);
730
+ }
731
+ timeoutp = &timeout;
732
+ }
733
+
734
+ if (loop->events.count) {
735
+ uint32_t i;
736
+ rb_fd_init(&args->in);
737
+ rb_fd_init(&args->out);
738
+ for(i = 0; i < loop->events.count; i++) {
739
+ rb_mt_socket_list *list = &loop->events.sockets[i];
740
+ if (list->flags & LCB_READ_EVENT) {
741
+ in = &args->in;
742
+ rb_fd_set(list->socket, in);
743
+ }
744
+ if (list->flags & LCB_WRITE_EVENT) {
745
+ out = &args->out;
746
+ rb_fd_set(list->socket, out);
747
+ }
748
+ }
749
+ max = events_max_fd(&loop->events) + 1;
750
+ }
751
+
752
+ result = rb_thread_fd_select(max, in, out, NULL, timeoutp);
753
+
754
+ if (result < 0) {
755
+ rb_sys_fail("rb_thread_fd_select");
756
+ }
757
+ /* fix current time so that socket callbacks will not cause timers timeouts */
758
+ if (next_time) {
759
+ now = gethrtime();
760
+ }
761
+
762
+ if (result > 0) {
763
+ uint32_t i;
764
+ for(i = 0; i < loop->events.count && result; i++) {
765
+ rb_mt_socket_list *list = loop->events.sockets + i;
766
+ rb_mt_event *sock = list->first;
767
+ short flags = 0;
768
+ if (in && rb_fd_isset(list->socket, in)) {
769
+ flags |= LCB_READ_EVENT;
770
+ result--;
771
+ }
772
+ if (out && rb_fd_isset(list->socket, out)) {
773
+ flags |= LCB_WRITE_EVENT;
774
+ result--;
775
+ }
776
+ if (flags) {
777
+ loop_enque_events(&loop->callbacks, sock, flags);
778
+ }
779
+ }
780
+ callbacks_run(&loop->callbacks);
781
+ }
782
+
783
+ if (next_time) {
784
+ timers_run(&loop->timers, now);
785
+ }
786
+ if (loop->events.count == 0 && loop->timers.count == 0) {
787
+ loop->run = 0;
788
+ }
789
+ return Qnil;
790
+ }
791
+
792
+ static VALUE
793
+ loop_select_cleanup(VALUE argp)
794
+ {
795
+ ls_arg *args = DATA_PTR(argp);
796
+ if (args) {
797
+ callbacks_clean(&args->loop->callbacks);
798
+ ls_arg_free(args);
799
+ DATA_PTR(argp) = 0;
800
+ }
801
+ return Qnil;
802
+ }
803
+ /* loop select implementaion end */
804
+
805
+ /* loop poll implementation */
806
+ #ifdef HAVE_POLL
807
+ /* code influenced by ruby's source and cool.io */
808
+ #define POLLIN_SET (POLLIN | POLLHUP | POLLERR)
809
+ #define POLLOUT_SET (POLLOUT | POLLHUP | POLLERR)
810
+
811
+ #ifndef HAVE_PPOLL
812
+ #if SIZEOF_TIME_T == SIZEOF_LONG
813
+ typedef unsigned long unsigned_time_t;
814
+ #elif SIZEOF_TIME_T == SIZEOF_INT
815
+ typedef unsigned int unsigned_time_t;
816
+ #elif SIZEOF_TIME_T == SIZEOF_LONG_LONG
817
+ typedef unsigned LONG_LONG unsigned_time_t;
818
+ #else
819
+ # error cannot find integer type which size is same as time_t.
820
+ #endif
821
+ #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
822
+ static int
823
+ ppoll(struct pollfd *fds, nfds_t nfds,
824
+ const struct timespec *ts, const sigset_t *sigmask)
825
+ {
826
+ int timeout_ms;
827
+
828
+ if (ts) {
829
+ int tmp, tmp2;
830
+
831
+ if (ts->tv_sec > TIMET_MAX/1000) {
832
+ timeout_ms = -1;
833
+ } else {
834
+ tmp = ts->tv_sec * 1000;
835
+ tmp2 = (ts->tv_nsec + 999999) / (1000 * 1000);
836
+ if (TIMET_MAX - tmp < tmp2) {
837
+ timeout_ms = -1;
838
+ } else {
839
+ timeout_ms = tmp + tmp2;
840
+ }
841
+ }
842
+ } else {
843
+ timeout_ms = -1;
844
+ }
845
+
846
+ (void)sigmask;
847
+
848
+ return poll(fds, nfds, timeout_ms);
849
+ }
850
+ #endif
851
+
852
+ typedef struct poll_args lp_arg;
853
+ struct poll_args {
854
+ rb_mt_loop *loop;
855
+ struct pollfd *fds;
856
+ nfds_t nfd;
857
+ struct timespec *ts;
858
+ int result;
859
+ int lerrno;
860
+ };
861
+
862
+ static void
863
+ lp_arg_free(void *p)
864
+ {
865
+ lp_arg *args = p;
866
+ if (args) {
867
+ if (args->fds) {
868
+ free(args->fds);
869
+ }
870
+ xfree(args);
871
+ }
872
+ }
873
+
874
+ static VALUE
875
+ lp_arg_alloc(lp_arg **args)
876
+ {
877
+ return Data_Make_Struct(rb_cObject, lp_arg, 0, lp_arg_free, *args);
878
+ }
879
+
880
+ #ifdef HAVE_RB_THREAD_BLOCKING_REGION
881
+ static VALUE
882
+ loop_blocking_poll(void *argp)
883
+ {
884
+ lp_arg *args = argp;
885
+ args->result = ppoll(args->fds, args->nfd, args->ts, NULL);
886
+ if (args->result < 0) args->lerrno = errno;
887
+ return Qnil;
888
+ }
889
+ #endif
890
+
891
+ static VALUE
892
+ loop_run_poll(VALUE argp)
893
+ {
894
+ lp_arg *args = (lp_arg*)argp;
895
+ rb_mt_loop *loop = args->loop;
896
+ struct timespec ts;
897
+ hrtime_t now, next_time;
898
+
899
+ if (loop->events.count) {
900
+ uint32_t i;
901
+ args->fds = calloc(loop->events.count, sizeof(struct pollfd));
902
+ if (args->fds == NULL) {
903
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for pollfd");
904
+ }
905
+ for(i = 0; i < loop->events.count; i++) {
906
+ rb_mt_socket_list *list = &loop->events.sockets[i];
907
+ args->fds[i].fd = list->socket;
908
+ args->fds[i].events =
909
+ (list->flags & LCB_READ_EVENT ? POLLIN : 0) |
910
+ (list->flags & LCB_WRITE_EVENT ? POLLOUT : 0);
911
+ }
912
+ args->nfd = loop->events.count;
913
+ }
914
+
915
+ retry:
916
+ next_time = timers_minimum(&loop->timers);
917
+ if (next_time) {
918
+ now = gethrtime();
919
+ if (next_time <= now) {
920
+ ts.tv_sec = 0;
921
+ ts.tv_nsec = 0;
922
+ } else {
923
+ hrtime_t hrto = next_time - now;
924
+ ts.tv_sec = (long)(hrto / 1000000000);
925
+ ts.tv_nsec = (long)(hrto % 1000000000);
926
+ }
927
+ args->ts = &ts;
928
+ } else {
929
+ args->ts = NULL;
930
+ }
931
+
932
+ #ifdef HAVE_RB_THREAD_BLOCKING_REGION
933
+ rb_thread_blocking_region(loop_blocking_poll, args, RUBY_UBF_PROCESS, NULL);
934
+ #else
935
+ if (rb_thread_alone()) {
936
+ TRAP_BEG;
937
+ args->result = ppoll(args->fds, args->nfd, args->ts, NULL);
938
+ if (args->result < 0) args->lerrno = errno;
939
+ TRAP_END;
940
+ } else {
941
+ struct timespec mini_pause;
942
+ int exact = 0;
943
+ mini_pause.tv_sec = 0;
944
+ /* 5 millisecond pause */
945
+ mini_pause.tv_nsec = 5000000;
946
+ if (args->ts && ts.tv_sec == 0 && ts.tv_nsec < 5000000) {
947
+ mini_pause.tv_nsec = ts.tv_nsec;
948
+ exact = 1;
949
+ }
950
+ TRAP_BEG;
951
+ args->result = ppoll(args->fds, args->nfd, &mini_pause, NULL);
952
+ if (args->result < 0) args->lerrno = errno;
953
+ TRAP_END;
954
+ if (args->result == 0 && !exact) {
955
+ args->result = -1;
956
+ args->lerrno = EINTR;
957
+ }
958
+ }
959
+ #endif
960
+
961
+ if (args->result < 0) {
962
+ errno = args->lerrno;
963
+ switch (errno) {
964
+ case EINTR:
965
+ #ifdef ERESTART
966
+ case ERESTART:
967
+ #endif
968
+ #ifndef HAVE_RB_THREAD_BLOCKING_REGION
969
+ rb_thread_schedule();
970
+ #endif
971
+ goto retry;
972
+ }
973
+ rb_sys_fail("poll");
974
+ return Qnil;
975
+ }
976
+
977
+ if (next_time) {
978
+ now = gethrtime();
979
+ }
980
+
981
+ if (args->result > 0) {
982
+ uint32_t cnt = args->result;
983
+ uint32_t fd_n = 0, ev_n = 0;
984
+ while (cnt && fd_n < args->nfd && ev_n < loop->events.count) {
985
+ struct pollfd *res = args->fds + fd_n;
986
+ rb_mt_socket_list *list = loop->events.sockets + ev_n;
987
+ rb_mt_event *sock = list->first;
988
+
989
+ /* if plugin used correctly, this checks are noop */
990
+ if (res->fd < list->socket) {
991
+ fd_n++;
992
+ continue;
993
+ } else if (res->fd > list->socket) {
994
+ ev_n++;
995
+ continue;
996
+ }
997
+
998
+ if (res->revents) {
999
+ short flags =
1000
+ ((res->revents & POLLIN_SET) ? LCB_READ_EVENT : 0) |
1001
+ ((res->revents & POLLOUT_SET) ? LCB_WRITE_EVENT : 0);
1002
+ cnt--;
1003
+ loop_enque_events(&loop->callbacks, sock, flags);
1004
+ }
1005
+ fd_n++;
1006
+ ev_n++;
1007
+ }
1008
+ callbacks_run(&loop->callbacks);
1009
+ }
1010
+
1011
+ if (next_time) {
1012
+ timers_run(&loop->timers, now);
1013
+ }
1014
+ if (loop->events.count == 0 && loop->timers.count == 0) {
1015
+ loop->run = 0;
1016
+ }
1017
+ return Qnil;
1018
+ }
1019
+
1020
+ static VALUE
1021
+ loop_poll_cleanup(VALUE argp)
1022
+ {
1023
+ lp_arg *args = DATA_PTR(argp);
1024
+ if (args) {
1025
+ callbacks_clean(&args->loop->callbacks);
1026
+ lp_arg_free(args);
1027
+ DATA_PTR(argp) = 0;
1028
+ }
1029
+ return Qnil;
1030
+ }
1031
+ #endif
1032
+ /* loop poll implementation end */
1033
+
1034
+ static void
1035
+ loop_run(rb_mt_loop *loop)
1036
+ {
1037
+
1038
+ loop->run = 1;
1039
+
1040
+ while(loop->run) {
1041
+ #ifdef HAVE_POLL
1042
+ /* prefer use of poll when it gives some benefits, but use rb_thread_fd_select when it is sufficient */
1043
+ lcb_socket_t max = events_max_fd(&loop->events);
1044
+ int use_poll = max >= 128;
1045
+ if (use_poll) {
1046
+ lp_arg *args;
1047
+ VALUE argp = lp_arg_alloc(&args);
1048
+ args->loop = loop;
1049
+ rb_ensure(loop_run_poll, (VALUE)args, loop_poll_cleanup, argp);
1050
+ } else
1051
+ #endif
1052
+ {
1053
+ ls_arg *args;
1054
+ VALUE argp = ls_arg_alloc(&args);
1055
+ args->loop = loop;
1056
+ rb_ensure(loop_run_select, (VALUE)args, loop_select_cleanup, argp);
1057
+ }
1058
+ }
1059
+ }
1060
+
1061
+ static void *
1062
+ lcb_io_create_event(struct lcb_io_opt_st *iops)
1063
+ {
1064
+ rb_mt_event *event = calloc(1, sizeof(*event));
1065
+ (void)iops;
1066
+ event->loop_index = -1;
1067
+ return event;
1068
+ }
1069
+
1070
+ static int
1071
+ lcb_io_update_event(struct lcb_io_opt_st *iops,
1072
+ lcb_socket_t sock,
1073
+ void *eventp,
1074
+ short flags,
1075
+ void *cb_data,
1076
+ void (*handler)(lcb_socket_t sock,
1077
+ short which,
1078
+ void *cb_data))
1079
+ {
1080
+ rb_mt_loop *loop = iops->v.v0.cookie;
1081
+ rb_mt_event *event = eventp;
1082
+ short old_flags = event->flags;
1083
+
1084
+ if (event->inserted && old_flags == flags &&
1085
+ cb_data == event->cb_data && handler == event->handler)
1086
+ {
1087
+ return 0;
1088
+ }
1089
+ loop_remove_event(loop, event);
1090
+ event->flags = flags;
1091
+ event->cb_data = cb_data;
1092
+ event->handler = handler;
1093
+ event->socket = sock;
1094
+ if (!event->inserted) {
1095
+ events_insert(&loop->events, event);
1096
+ }
1097
+ if ((old_flags & flags) != old_flags) {
1098
+ events_fix_flags(&loop->events, sock);
1099
+ }
1100
+ return 0;
1101
+ }
1102
+
1103
+ static void
1104
+ lcb_io_delete_event(struct lcb_io_opt_st *iops,
1105
+ lcb_socket_t sock,
1106
+ void *event)
1107
+ {
1108
+ loop_remove_event((rb_mt_loop*)iops->v.v0.cookie, (rb_mt_event*)event);
1109
+ (void)sock;
1110
+ }
1111
+
1112
+ static void
1113
+ lcb_io_destroy_event(struct lcb_io_opt_st *iops,
1114
+ void *event)
1115
+ {
1116
+ lcb_io_delete_event(iops, -1, event);
1117
+ free(event);
1118
+ }
1119
+
1120
+ static void *
1121
+ lcb_io_create_timer(struct lcb_io_opt_st *iops)
1122
+ {
1123
+ rb_mt_timer *timer = calloc(1, sizeof(*timer));
1124
+ timer->index = -1;
1125
+ (void)iops;
1126
+ return timer;
1127
+ }
1128
+
1129
+ static int
1130
+ lcb_io_update_timer(struct lcb_io_opt_st *iops, void *event,
1131
+ lcb_uint32_t usec, void *cb_data,
1132
+ void (*handler)(lcb_socket_t sock, short which, void *cb_data))
1133
+ {
1134
+ rb_mt_loop *loop = iops->v.v0.cookie;
1135
+ rb_mt_timer *timer = event;
1136
+
1137
+ timer->period = usec * (hrtime_t)1000;
1138
+ timer->ts = gethrtime() + timer->period;
1139
+ timer->cb_data = cb_data;
1140
+ timer->handler = handler;
1141
+ if (timer->index != -1) {
1142
+ timers_heapify_item(&loop->timers, timer->index);
1143
+ } else {
1144
+ timers_insert(&loop->timers, timer);
1145
+ }
1146
+ return 0;
1147
+ }
1148
+
1149
+ static void
1150
+ lcb_io_delete_timer(struct lcb_io_opt_st *iops, void *event)
1151
+ {
1152
+ rb_mt_loop *loop = iops->v.v0.cookie;
1153
+ rb_mt_timer *timer = event;
1154
+ if (timer->index != -1) {
1155
+ timers_remove_timer(&loop->timers, timer);
1156
+ }
1157
+ }
1158
+
1159
+ static void
1160
+ lcb_io_destroy_timer(struct lcb_io_opt_st *iops, void *timer)
1161
+ {
1162
+ lcb_io_delete_timer(iops, timer);
1163
+ free(timer);
1164
+ }
1165
+
1166
+ static void
1167
+ lcb_io_stop_event_loop(struct lcb_io_opt_st *iops)
1168
+ {
1169
+ rb_mt_loop *loop = iops->v.v0.cookie;
1170
+ loop->run = 0;
1171
+ }
1172
+
1173
+ static void
1174
+ lcb_io_run_event_loop(struct lcb_io_opt_st *iops)
1175
+ {
1176
+ rb_mt_loop *loop = iops->v.v0.cookie;
1177
+ loop_run(loop);
1178
+ }
1179
+
1180
+ static void
1181
+ lcb_destroy_io_opts(struct lcb_io_opt_st *iops)
1182
+ {
1183
+ rb_mt_loop *loop = iops->v.v0.cookie;
1184
+ loop_destroy(loop);
1185
+ free(iops);
1186
+ }
1187
+
1188
+ LIBCOUCHBASE_API lcb_error_t
1189
+ cb_create_ruby_mt_io_opts(int version, lcb_io_opt_t *io, void *arg)
1190
+ {
1191
+ struct lcb_io_opt_st *ret;
1192
+ rb_mt_loop *loop;
1193
+ (void)arg;
1194
+ if (version != 0) {
1195
+ return LCB_PLUGIN_VERSION_MISMATCH;
1196
+ }
1197
+ ret = calloc(1, sizeof(*ret));
1198
+ if (ret == NULL) {
1199
+ free(ret);
1200
+ return LCB_CLIENT_ENOMEM;
1201
+ }
1202
+
1203
+ ret->version = 0;
1204
+ ret->dlhandle = NULL;
1205
+ ret->destructor = lcb_destroy_io_opts;
1206
+ /* consider that struct isn't allocated by the library,
1207
+ * `need_cleanup' flag might be set in lcb_create() */
1208
+ ret->v.v0.need_cleanup = 0;
1209
+ ret->v.v0.recv = lcb_io_recv;
1210
+ ret->v.v0.send = lcb_io_send;
1211
+ ret->v.v0.recvv = lcb_io_recvv;
1212
+ ret->v.v0.sendv = lcb_io_sendv;
1213
+ ret->v.v0.socket = lcb_io_socket;
1214
+ ret->v.v0.close = lcb_io_close;
1215
+ ret->v.v0.connect = lcb_io_connect;
1216
+ ret->v.v0.delete_event = lcb_io_delete_event;
1217
+ ret->v.v0.destroy_event = lcb_io_destroy_event;
1218
+ ret->v.v0.create_event = lcb_io_create_event;
1219
+ ret->v.v0.update_event = lcb_io_update_event;
1220
+
1221
+ ret->v.v0.delete_timer = lcb_io_delete_timer;
1222
+ ret->v.v0.destroy_timer = lcb_io_destroy_timer;
1223
+ ret->v.v0.create_timer = lcb_io_create_timer;
1224
+ ret->v.v0.update_timer = lcb_io_update_timer;
1225
+
1226
+ ret->v.v0.run_event_loop = lcb_io_run_event_loop;
1227
+ ret->v.v0.stop_event_loop = lcb_io_stop_event_loop;
1228
+
1229
+ loop = loop_create();
1230
+ if (loop == NULL) {
1231
+ free(ret);
1232
+ return LCB_CLIENT_ENOMEM;
1233
+ }
1234
+ ret->v.v0.cookie = loop;
1235
+ *io = ret;
1236
+ return LCB_SUCCESS;
1237
+ }
1238
+ #endif /* _WIN32 */