couchbase 1.2.0.beta-x86-mingw32 → 1.2.0-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (45) hide show
  1. data/.travis.yml +1 -1
  2. data/Makefile +3 -0
  3. data/README.markdown +15 -4
  4. data/RELEASE_NOTES.markdown +513 -0
  5. data/couchbase.gemspec +0 -1
  6. data/ext/couchbase_ext/arguments.c +161 -244
  7. data/ext/couchbase_ext/arithmetic.c +29 -37
  8. data/ext/couchbase_ext/bucket.c +252 -219
  9. data/ext/couchbase_ext/couchbase_ext.c +540 -417
  10. data/ext/couchbase_ext/couchbase_ext.h +218 -191
  11. data/ext/couchbase_ext/delete.c +30 -27
  12. data/ext/couchbase_ext/extconf.rb +15 -3
  13. data/ext/couchbase_ext/get.c +45 -37
  14. data/ext/couchbase_ext/http.c +95 -74
  15. data/ext/couchbase_ext/multithread_plugin.c +1201 -0
  16. data/ext/couchbase_ext/observe.c +42 -37
  17. data/ext/couchbase_ext/result.c +17 -20
  18. data/ext/couchbase_ext/stats.c +30 -28
  19. data/ext/couchbase_ext/store.c +46 -39
  20. data/ext/couchbase_ext/timer.c +11 -11
  21. data/ext/couchbase_ext/touch.c +30 -27
  22. data/ext/couchbase_ext/unlock.c +30 -27
  23. data/ext/couchbase_ext/utils.c +166 -89
  24. data/ext/couchbase_ext/version.c +29 -26
  25. data/lib/action_dispatch/middleware/session/couchbase_store.rb +2 -2
  26. data/lib/active_support/cache/couchbase_store.rb +6 -6
  27. data/lib/couchbase.rb +1 -0
  28. data/lib/couchbase/bucket.rb +6 -11
  29. data/lib/couchbase/cluster.rb +105 -0
  30. data/lib/couchbase/utils.rb +8 -5
  31. data/lib/couchbase/version.rb +1 -1
  32. data/lib/couchbase/view.rb +51 -5
  33. data/lib/couchbase/view_row.rb +1 -1
  34. data/lib/ext/multi_json_fix.rb +13 -9
  35. data/lib/rack/session/couchbase.rb +11 -7
  36. data/tasks/compile.rake +1 -1
  37. data/tasks/test.rake +40 -34
  38. data/tasks/util.rake +1 -1
  39. data/test/setup.rb +9 -2
  40. data/test/test_arithmetic.rb +37 -0
  41. data/test/test_async.rb +22 -18
  42. data/test/test_unlock.rb +0 -1
  43. data/test/test_utils.rb +32 -0
  44. metadata +13 -23
  45. data/HISTORY.markdown +0 -215
@@ -0,0 +1,1201 @@
1
+ /* vim: ft=c et ts=8 sts=4 sw=4 cino=
2
+ *
3
+ * Copyright 2012 Couchbase, Inc.
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+
18
+ #include "couchbase_ext.h"
19
+
20
+
21
+ #ifndef _WIN32
22
+
23
+ #ifndef HAVE_RB_THREAD_BLOCKING_REGION
24
+ #include <rubysig.h>
25
+ #endif
26
+ #include <errno.h>
27
+ #include <sys/types.h>
28
+ #include <sys/socket.h>
29
+ #include <unistd.h>
30
+ #ifdef HAVE_FCNTL_H
31
+ #include <fcntl.h>
32
+ #endif
33
+ #ifdef HAVE_POLL
34
+ #include <poll.h>
35
+ #endif
36
+ #define INVALID_SOCKET (-1)
37
+
38
+ /* Copied from libev plugin */
39
+ static lcb_ssize_t
40
+ lcb_io_recv(struct lcb_io_opt_st *iops, lcb_socket_t sock,
41
+ void *buffer, lcb_size_t len, int flags)
42
+ {
43
+ lcb_ssize_t ret = recv(sock, buffer, len, flags);
44
+ if (ret < 0) {
45
+ iops->v.v0.error = errno;
46
+ }
47
+ return ret;
48
+ }
49
+
50
+ static lcb_ssize_t
51
+ lcb_io_recvv(struct lcb_io_opt_st *iops, lcb_socket_t sock,
52
+ struct lcb_iovec_st *iov, lcb_size_t niov)
53
+ {
54
+ struct msghdr msg;
55
+ struct iovec vec[2];
56
+ lcb_ssize_t ret;
57
+
58
+ if (niov != 2) {
59
+ return -1;
60
+ }
61
+ memset(&msg, 0, sizeof(msg));
62
+ msg.msg_iov = vec;
63
+ msg.msg_iovlen = iov[1].iov_len ? (lcb_size_t)2 : (lcb_size_t)1;
64
+ msg.msg_iov[0].iov_base = iov[0].iov_base;
65
+ msg.msg_iov[0].iov_len = iov[0].iov_len;
66
+ msg.msg_iov[1].iov_base = iov[1].iov_base;
67
+ msg.msg_iov[1].iov_len = iov[1].iov_len;
68
+ ret = recvmsg(sock, &msg, 0);
69
+
70
+ if (ret < 0) {
71
+ iops->v.v0.error = errno;
72
+ }
73
+
74
+ return ret;
75
+ }
76
+
77
+ static lcb_ssize_t
78
+ lcb_io_send(struct lcb_io_opt_st *iops, lcb_socket_t sock,
79
+ const void *msg, lcb_size_t len, int flags)
80
+ {
81
+ lcb_ssize_t ret = send(sock, msg, len, flags);
82
+ if (ret < 0) {
83
+ iops->v.v0.error = errno;
84
+ }
85
+ return ret;
86
+ }
87
+
88
+ static lcb_ssize_t
89
+ lcb_io_sendv(struct lcb_io_opt_st *iops, lcb_socket_t sock,
90
+ struct lcb_iovec_st *iov, lcb_size_t niov)
91
+ {
92
+ struct msghdr msg;
93
+ struct iovec vec[2];
94
+ lcb_ssize_t ret;
95
+
96
+ if (niov != 2) {
97
+ return -1;
98
+ }
99
+ memset(&msg, 0, sizeof(msg));
100
+ msg.msg_iov = vec;
101
+ msg.msg_iovlen = iov[1].iov_len ? (lcb_size_t)2 : (lcb_size_t)1;
102
+ msg.msg_iov[0].iov_base = iov[0].iov_base;
103
+ msg.msg_iov[0].iov_len = iov[0].iov_len;
104
+ msg.msg_iov[1].iov_base = iov[1].iov_base;
105
+ msg.msg_iov[1].iov_len = iov[1].iov_len;
106
+ ret = sendmsg(sock, &msg, 0);
107
+
108
+ if (ret < 0) {
109
+ iops->v.v0.error = errno;
110
+ }
111
+ return ret;
112
+ }
113
+
114
+ static int
115
+ make_socket_nonblocking(lcb_socket_t sock)
116
+ {
117
+ int flags;
118
+ if ((flags = fcntl(sock, F_GETFL, NULL)) < 0) {
119
+ return -1;
120
+ }
121
+ if (fcntl(sock, F_SETFL, flags | O_NONBLOCK) == -1) {
122
+ return -1;
123
+ }
124
+
125
+ return 0;
126
+ }
127
+
128
+ static int
129
+ close_socket(lcb_socket_t sock)
130
+ {
131
+ return close(sock);
132
+ }
133
+
134
+ static lcb_socket_t
135
+ lcb_io_socket(struct lcb_io_opt_st *iops, int domain, int type,
136
+ int protocol)
137
+ {
138
+ lcb_socket_t sock = socket(domain, type, protocol);
139
+ if (sock == INVALID_SOCKET) {
140
+ iops->v.v0.error = errno;
141
+ } else {
142
+ if (make_socket_nonblocking(sock) != 0) {
143
+ int error = errno;
144
+ iops->v.v0.close(iops, sock);
145
+ iops->v.v0.error = error;
146
+ sock = INVALID_SOCKET;
147
+ }
148
+ }
149
+
150
+ return sock;
151
+ }
152
+
153
+ static void
154
+ lcb_io_close(struct lcb_io_opt_st *iops, lcb_socket_t sock)
155
+ {
156
+ close_socket(sock);
157
+ (void)iops;
158
+ }
159
+
160
+ static int
161
+ lcb_io_connect(struct lcb_io_opt_st *iops, lcb_socket_t sock,
162
+ const struct sockaddr *name, unsigned int namelen)
163
+ {
164
+ int ret = connect(sock, name, (socklen_t)namelen);
165
+ if (ret < 0) {
166
+ iops->v.v0.error = errno;
167
+ }
168
+ return ret;
169
+ }
170
+
171
+ /* events sorted array */
172
+ typedef struct rb_mt_event rb_mt_event;
173
+ struct rb_mt_event {
174
+ void *cb_data;
175
+ void (*handler)(lcb_socket_t sock, short which, void *cb_data);
176
+ lcb_socket_t socket;
177
+ int loop_index;
178
+ short flags;
179
+ short actual_flags;
180
+ short inserted;
181
+ rb_mt_event *next;
182
+ };
183
+
184
+ typedef struct rb_mt_socket_list rb_mt_socket_list;
185
+ struct rb_mt_socket_list {
186
+ lcb_socket_t socket;
187
+ short flags;
188
+ rb_mt_event *first;
189
+ };
190
+
191
+ typedef struct rb_mt_events rb_mt_events;
192
+ struct rb_mt_events {
193
+ uint32_t capa;
194
+ uint32_t count;
195
+ rb_mt_socket_list *sockets;
196
+ };
197
+
198
+ static int
199
+ events_init(rb_mt_events *events)
200
+ {
201
+ rb_mt_socket_list *new_socks = malloc(4 * sizeof(*new_socks));
202
+ if (new_socks == NULL) {
203
+ return 0;
204
+ }
205
+ events->capa = 4;
206
+ events->count = 0;
207
+ events->sockets = new_socks;
208
+ return 1;
209
+ }
210
+
211
+ static void
212
+ events_finalize(rb_mt_events *events)
213
+ {
214
+ if (events->sockets) {
215
+ uint32_t i;
216
+ for(i = 0; i < events->count; i++) {
217
+ rb_mt_socket_list *list = &events->sockets[i];
218
+ while(list->first) {
219
+ rb_mt_event *next = list->first->next;
220
+ free(list->first);
221
+ list->first = next;
222
+ }
223
+ }
224
+ free(events->sockets);
225
+ events->sockets = NULL;
226
+ }
227
+ events->capa = 0;
228
+ events->count = 0;
229
+ }
230
+
231
+ static uint32_t
232
+ events_index(rb_mt_events *events, lcb_socket_t socket)
233
+ {
234
+ uint32_t m, l = 0, r = events->count;
235
+ while(l < r) {
236
+ m = l + (r - l) / 2;
237
+ if (events->sockets[m].socket >= socket) {
238
+ r = m;
239
+ } else {
240
+ l = m + 1;
241
+ }
242
+ }
243
+ return l;
244
+ }
245
+
246
+ static void
247
+ events_insert(rb_mt_events *events, rb_mt_event *event)
248
+ {
249
+ uint32_t i = events_index(events, event->socket);
250
+ rb_mt_socket_list *list = &events->sockets[i];
251
+ if (i == events->count || list->socket != event->socket) {
252
+ if (events->capa == events->count) {
253
+ uint32_t new_capa = events->capa << 1;
254
+ rb_mt_socket_list *new_socks = realloc(events->sockets, new_capa * sizeof(*new_socks));
255
+ if (new_socks == NULL) {
256
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for events array");
257
+ }
258
+ events->sockets = new_socks;
259
+ events->capa = new_capa;
260
+ list = &events->sockets[i];
261
+ }
262
+ if (i < events->count) {
263
+ MEMMOVE(events->sockets+i+1, events->sockets+i, rb_mt_socket_list, events->count - i);
264
+ }
265
+ events->count++;
266
+ list->socket = event->socket;
267
+ list->flags = event->flags;
268
+ list->first = event;
269
+ event->next = NULL;
270
+ } else {
271
+ list->flags |= event->flags;
272
+ event->next = list->first;
273
+ list->first = event;
274
+ }
275
+ event->inserted = 1;
276
+ }
277
+
278
+ static void
279
+ event_list_fix_flags(rb_mt_socket_list *list)
280
+ {
281
+ short flags = 0;
282
+ rb_mt_event *event = list->first;
283
+ while (event) {
284
+ flags |= event->flags;
285
+ event = event->next;
286
+ }
287
+ list->flags = flags;
288
+ }
289
+
290
+ static void
291
+ events_remove(rb_mt_events *events, rb_mt_event *event)
292
+ {
293
+ uint32_t i = events_index(events, event->socket);
294
+ rb_mt_socket_list *list = &events->sockets[i];
295
+ rb_mt_event **next;
296
+ if (list->socket != event->socket) {
297
+ rb_raise(rb_eIndexError, "There is no socket in event loop");
298
+ }
299
+ next = &list->first;
300
+ for(;;) {
301
+ if (*next == NULL) {
302
+ rb_raise(rb_eIndexError, "There is no event in event loop");
303
+ }
304
+ if (*next == event) {
305
+ *next = event->next;
306
+ event->next = NULL;
307
+ event->inserted = 0;
308
+ break;
309
+ }
310
+ next = &event->next;
311
+ }
312
+ if (list->first == NULL) {
313
+ MEMMOVE(events->sockets + i, events->sockets + i + 1, rb_mt_socket_list, events->count - i - 1);
314
+ events->count--;
315
+ } else {
316
+ event_list_fix_flags(list);
317
+ }
318
+ }
319
+
320
+ static void
321
+ events_fix_flags(rb_mt_events *events, lcb_socket_t socket)
322
+ {
323
+ uint32_t i = events_index(events, socket);
324
+ rb_mt_socket_list *list = &events->sockets[i];
325
+ if (list->socket != socket) {
326
+ rb_raise(rb_eIndexError, "There is no socket in event loop");
327
+ }
328
+ event_list_fix_flags(list);
329
+ }
330
+
331
+ static inline lcb_socket_t
332
+ events_max_fd(rb_mt_events *events)
333
+ {
334
+ if (events->count) {
335
+ return events->sockets[events->count - 1].socket;
336
+ } else {
337
+ return -1;
338
+ }
339
+ }
340
+
341
+ /* events sorted array end */
342
+
343
+ /* timers heap */
344
+ typedef struct rb_mt_timer rb_mt_timer;
345
+ struct rb_mt_timer {
346
+ void *cb_data;
347
+ void (*handler)(lcb_socket_t sock, short which, void *cb_data);
348
+ int index;
349
+ hrtime_t ts;
350
+ hrtime_t period;
351
+ };
352
+
353
+ typedef struct rb_mt_timers rb_mt_timers;
354
+ struct rb_mt_timers {
355
+ uint32_t capa;
356
+ uint32_t count;
357
+ rb_mt_timer **timers;
358
+ };
359
+
360
+ static int
361
+ timers_init(rb_mt_timers *timers)
362
+ {
363
+ rb_mt_timer **new_timers = malloc(4 * sizeof(*new_timers));
364
+ if (new_timers == NULL) {
365
+ return 0;
366
+ }
367
+ timers->capa = 4;
368
+ timers->count = 0;
369
+ timers->timers = new_timers;
370
+ return 1;
371
+ }
372
+
373
+ static void
374
+ timers_finalize(rb_mt_timers *timers)
375
+ {
376
+ if (timers->timers) {
377
+ uint32_t i;
378
+ for(i = 0; i < timers->count; i++) {
379
+ free(timers->timers[i]);
380
+ }
381
+ free(timers->timers);
382
+ timers->timers = NULL;
383
+ }
384
+ timers->count = 0;
385
+ timers->capa = 0;
386
+ }
387
+
388
+ #define tms_at(_timers, at) (_timers)->timers[(at)]
389
+ #define tms_ts_at(timers, at) tms_at((timers), (at))->ts
390
+
391
+ static void
392
+ timers_move_last(rb_mt_timers *timers, uint32_t to)
393
+ {
394
+ if (to < timers->count - 1) {
395
+ rb_mt_timer *last = tms_at(timers, timers->count - 1);
396
+ tms_at(timers, to) = last;
397
+ last->index = to;
398
+ }
399
+ timers->count--;
400
+ }
401
+
402
+ static inline void
403
+ timers_swap(rb_mt_timers *timers, uint32_t i, uint32_t j)
404
+ {
405
+ rb_mt_timer *itmp = tms_at(timers, j);
406
+ rb_mt_timer *jtmp = tms_at(timers, i);
407
+ tms_at(timers, i) = itmp;
408
+ tms_at(timers, j) = jtmp;
409
+ itmp->index = i;
410
+ jtmp->index = j;
411
+ }
412
+
413
+ static void timers_heapify_up(rb_mt_timers *timers, uint32_t pos);
414
+
415
+ static void
416
+ timers_insert(rb_mt_timers *timers, rb_mt_timer *timer)
417
+ {
418
+ if (timers->count == timers->capa) {
419
+ rb_mt_timer **new_timers;
420
+ size_t new_capa = timers->capa << 1;
421
+ new_timers = realloc(timers->timers, new_capa * sizeof(rb_mt_timer*));
422
+ if (new_timers == NULL) {
423
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for timers heap");
424
+ }
425
+ timers->timers = new_timers;
426
+ timers->capa = new_capa;
427
+ }
428
+ tms_at(timers, timers->count) = timer;
429
+ timer->index = timers->count;
430
+ timers->count++;
431
+ timers_heapify_up(timers, timer->index);
432
+ }
433
+
434
+ static void
435
+ timers_heapify_up(rb_mt_timers *timers, uint32_t pos)
436
+ {
437
+ hrtime_t cur_ts = tms_ts_at(timers, pos);
438
+ uint32_t higher = (pos - 1) / 2;
439
+ while (pos && tms_ts_at(timers, higher) > cur_ts) {
440
+ timers_swap(timers, higher, pos);
441
+ pos = higher;
442
+ higher = (pos - 1) / 2;
443
+ }
444
+ }
445
+
446
+ static void
447
+ timers_heapify_down(rb_mt_timers *timers, uint32_t pos)
448
+ {
449
+ uint32_t count = timers->count;
450
+ uint32_t middle = (timers->count - 2) / 2;
451
+ hrtime_t cur_ts = tms_ts_at(timers, pos);
452
+ if (count == 1) return;
453
+ while (pos <= middle) {
454
+ uint32_t min_pos = pos;
455
+ hrtime_t ch_ts, min_ts = cur_ts;
456
+
457
+ if ((ch_ts = tms_ts_at(timers, pos * 2 + 1)) < min_ts) {
458
+ min_pos = pos * 2 + 1;
459
+ min_ts = ch_ts;
460
+ }
461
+
462
+ if (pos * 2 + 2 < count && tms_ts_at(timers, pos * 2 + 2) < min_ts) {
463
+ min_pos = pos * 2 + 2;
464
+ }
465
+
466
+ if (min_pos == pos) break;
467
+ timers_swap(timers, pos, min_pos);
468
+ pos = min_pos;
469
+ }
470
+ }
471
+
472
+ static void
473
+ timers_heapify_item(rb_mt_timers *timers, uint32_t pos)
474
+ {
475
+ if (pos && tms_ts_at(timers, pos) < tms_ts_at(timers, (pos - 1) / 2)) {
476
+ timers_heapify_up(timers, pos);
477
+ } else {
478
+ timers_heapify_down(timers, pos);
479
+ }
480
+ }
481
+
482
+ static inline hrtime_t
483
+ timers_minimum(rb_mt_timers *timers)
484
+ {
485
+ if (timers->count) {
486
+ return tms_ts_at(timers, 0);
487
+ } else {
488
+ return 0;
489
+ }
490
+ }
491
+
492
+ static inline rb_mt_timer *
493
+ timers_first(rb_mt_timers *timers)
494
+ {
495
+ if (timers->count) {
496
+ return tms_at(timers, 0);
497
+ } else {
498
+ return 0;
499
+ }
500
+ }
501
+
502
+ static void
503
+ timers_remove_timer(rb_mt_timers *timers, rb_mt_timer *timer)
504
+ {
505
+ uint32_t at = timer->index;
506
+ timer->index = -1;
507
+ if (at < timers->count - 1) {
508
+ timers_move_last(timers, at);
509
+ timers_heapify_item(timers, at);
510
+ } else {
511
+ timers->count--;
512
+ }
513
+ }
514
+
515
+ static void
516
+ timers_run(rb_mt_timers *timers, hrtime_t now)
517
+ {
518
+ hrtime_t next_time = timers_minimum(timers);
519
+ while (next_time && next_time < now) {
520
+ rb_mt_timer *first = timers_first(timers);
521
+
522
+ first->ts = now + first->period;
523
+ timers_heapify_item(timers, 0);
524
+
525
+ first->handler(-1, 0, first->cb_data);
526
+
527
+ next_time = timers_minimum(timers);
528
+ }
529
+ }
530
+ /* timers heap end */
531
+
532
+ /* callbacks array */
533
+ typedef struct rb_mt_callbacks rb_mt_callbacks;
534
+ struct rb_mt_callbacks {
535
+ uint32_t capa;
536
+ uint32_t count;
537
+ rb_mt_event **events;
538
+ };
539
+
540
+ static int
541
+ callbacks_init(rb_mt_callbacks *callbacks)
542
+ {
543
+ rb_mt_event **new_events = calloc(4, sizeof(*new_events));
544
+ if (new_events == NULL) {
545
+ return 0;
546
+ }
547
+ callbacks->events = new_events;
548
+ callbacks->capa = 4;
549
+ callbacks->count = 0;
550
+ return 1;
551
+ }
552
+
553
+ static void
554
+ callbacks_finalize(rb_mt_callbacks *callbacks)
555
+ {
556
+ if (callbacks->events) {
557
+ free(callbacks->events);
558
+ callbacks->events = NULL;
559
+ }
560
+ callbacks->capa = 0;
561
+ callbacks->count = 0;
562
+ }
563
+
564
+ static void
565
+ callbacks_push(rb_mt_callbacks *callbacks, rb_mt_event *event)
566
+ {
567
+ if (callbacks->count == callbacks->capa) {
568
+ uint32_t new_capa = callbacks->capa << 1;
569
+ rb_mt_event **new_events = realloc(callbacks->events, new_capa * sizeof(*new_events));
570
+ if (new_events == NULL) {
571
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for callbacks array");
572
+ }
573
+ callbacks->capa = new_capa;
574
+ callbacks->events = new_events;
575
+ }
576
+ callbacks->events[callbacks->count] = event;
577
+ callbacks->count++;
578
+ }
579
+
580
+ static void
581
+ callbacks_remove(rb_mt_callbacks *callbacks, rb_mt_event *event)
582
+ {
583
+ int i = event->loop_index;
584
+ if (i >= 0) {
585
+ if (callbacks->events[i] != event) {
586
+ rb_raise(rb_eIndexError, "callback index belongs to different callback");
587
+ }
588
+ event->loop_index = -1;
589
+ callbacks->events[i] = NULL;
590
+ }
591
+ }
592
+
593
+ static void
594
+ callbacks_run(rb_mt_callbacks *callbacks)
595
+ {
596
+ uint32_t i;
597
+ for(i = 0; i < callbacks->count; i++) {
598
+ rb_mt_event *cb = callbacks->events[i];
599
+ if (cb) {
600
+ cb->handler(cb->socket, cb->actual_flags, cb->cb_data);
601
+ }
602
+ }
603
+ callbacks->count = 0;
604
+ }
605
+
606
+ static void
607
+ callbacks_clean(rb_mt_callbacks *callbacks)
608
+ {
609
+ uint32_t i;
610
+ for(i = 0; i < callbacks->count; i++) {
611
+ if (callbacks->events[i]) {
612
+ callbacks->events[i]->loop_index = -1;
613
+ callbacks->events[i] = NULL;
614
+ }
615
+ }
616
+ callbacks->count = 0;
617
+ }
618
+ /* callbacks array end */
619
+
620
+ typedef struct rb_mt_loop rb_mt_loop;
621
+ struct rb_mt_loop {
622
+ rb_mt_events events;
623
+ rb_mt_timers timers;
624
+ rb_mt_callbacks callbacks;
625
+ short run;
626
+ };
627
+
628
+ static rb_mt_loop*
629
+ loop_create()
630
+ {
631
+ rb_mt_loop *loop = calloc(1, sizeof(*loop));
632
+ if (loop == NULL) return NULL;
633
+ if (!events_init(&loop->events)) goto free_loop;
634
+ if (!timers_init(&loop->timers)) goto free_events;
635
+ if (!callbacks_init(&loop->callbacks)) goto free_timers;
636
+ return loop;
637
+
638
+ free_timers:
639
+ timers_finalize(&loop->timers);
640
+ free_events:
641
+ events_finalize(&loop->events);
642
+ free_loop:
643
+ free(loop);
644
+ return NULL;
645
+ }
646
+
647
+ static void
648
+ loop_destroy(rb_mt_loop *loop)
649
+ {
650
+ events_finalize(&loop->events);
651
+ timers_finalize(&loop->timers);
652
+ callbacks_finalize(&loop->callbacks);
653
+ free(loop);
654
+ }
655
+
656
+ static void
657
+ loop_remove_event(rb_mt_loop *loop, rb_mt_event *event)
658
+ {
659
+ if (event->inserted) {
660
+ events_remove(&loop->events, event);
661
+ }
662
+ callbacks_remove(&loop->callbacks, event);
663
+ }
664
+
665
+ static void
666
+ loop_enque_events(rb_mt_callbacks *callbacks, rb_mt_event *sock, short flags)
667
+ {
668
+ while (sock) {
669
+ short actual = sock->flags & flags;
670
+ if (actual) {
671
+ sock->actual_flags = actual;
672
+ callbacks_push(callbacks, (rb_mt_event*)sock);
673
+ }
674
+ sock = sock->next;
675
+ }
676
+ }
677
+
678
+ /* loop select implementation */
679
+ #ifndef HAVE_RB_THREAD_FD_SELECT
680
+ typedef fd_set rb_fdset_t;
681
+ #define rb_fd_init FD_ZERO
682
+ #define rb_fd_set FD_SET
683
+ #define rb_fd_isset FD_ISSET
684
+ #define rb_fd_term(set) (void)0
685
+ #define rb_thread_fd_select rb_thread_select
686
+ #endif
687
+
688
+ typedef struct loop_select_arg {
689
+ rb_mt_loop *loop;
690
+ rb_fdset_t in, out;
691
+ } ls_arg;
692
+
693
+ static VALUE
694
+ loop_run_select(VALUE argp)
695
+ {
696
+ ls_arg *args = (ls_arg*) argp;
697
+ rb_mt_loop *loop = args->loop;
698
+ rb_fdset_t *in = NULL, *out = NULL;
699
+ struct timeval timeout;
700
+ struct timeval *timeoutp = NULL;
701
+ int result, max = 0;
702
+ hrtime_t now, next_time;
703
+
704
+ next_time = timers_minimum(&loop->timers);
705
+ if (next_time) {
706
+ now = gethrtime();
707
+ if (next_time <= now) {
708
+ timeout.tv_sec = 0;
709
+ timeout.tv_usec = 0;
710
+ } else {
711
+ hrtime_t hrto = (next_time - now) / 1000;
712
+ timeout.tv_sec = (long)(hrto / 1000000);
713
+ timeout.tv_usec = (long)(hrto % 1000000);
714
+ }
715
+ timeoutp = &timeout;
716
+ }
717
+
718
+ if (loop->events.count) {
719
+ uint32_t i;
720
+ rb_fd_init(&args->in);
721
+ rb_fd_init(&args->out);
722
+ for(i = 0; i < loop->events.count; i++) {
723
+ rb_mt_socket_list *list = &loop->events.sockets[i];
724
+ if (list->flags & LCB_READ_EVENT) {
725
+ in = &args->in;
726
+ rb_fd_set(list->socket, in);
727
+ }
728
+ if (list->flags & LCB_WRITE_EVENT) {
729
+ out = &args->out;
730
+ rb_fd_set(list->socket, out);
731
+ }
732
+ }
733
+ max = events_max_fd(&loop->events) + 1;
734
+ }
735
+
736
+ result = rb_thread_fd_select(max, in, out, NULL, timeoutp);
737
+
738
+ if (result < 0) {
739
+ rb_sys_fail("rb_thread_fd_select");
740
+ }
741
+ /* fix current time so that socket callbacks will not cause timers timeouts */
742
+ if (next_time) {
743
+ now = gethrtime();
744
+ }
745
+
746
+ if (result > 0) {
747
+ uint32_t i;
748
+ for(i = 0; i < loop->events.count && result; i++) {
749
+ rb_mt_socket_list *list = loop->events.sockets + i;
750
+ rb_mt_event *sock = list->first;
751
+ short flags = 0;
752
+ if (in && rb_fd_isset(list->socket, in)) {
753
+ flags |= LCB_READ_EVENT;
754
+ result--;
755
+ }
756
+ if (out && rb_fd_isset(list->socket, out)) {
757
+ flags |= LCB_WRITE_EVENT;
758
+ result--;
759
+ }
760
+ if (flags) {
761
+ loop_enque_events(&loop->callbacks, sock, flags);
762
+ }
763
+ }
764
+ callbacks_run(&loop->callbacks);
765
+ }
766
+
767
+ if (next_time) {
768
+ timers_run(&loop->timers, now);
769
+ }
770
+ if (loop->events.count == 0 && loop->timers.count == 0) {
771
+ loop->run = 0;
772
+ }
773
+ return Qnil;
774
+ }
775
+
776
+ static VALUE
777
+ loop_select_cleanup(VALUE argp)
778
+ {
779
+ ls_arg *args = (ls_arg*) argp;
780
+ rb_fd_term(&args->in);
781
+ rb_fd_term(&args->out);
782
+ callbacks_clean(&args->loop->callbacks);
783
+ return Qnil;
784
+ }
785
+ /* loop select implementaion end */
786
+
787
+ /* loop poll implementation */
788
+ #ifdef HAVE_POLL
789
+ /* code influenced by ruby's source and cool.io */
790
+ #define POLLIN_SET (POLLIN | POLLHUP | POLLERR)
791
+ #define POLLOUT_SET (POLLOUT | POLLHUP | POLLERR)
792
+
793
+ #ifndef HAVE_PPOLL
794
+ #if SIZEOF_TIME_T == SIZEOF_LONG
795
+ typedef unsigned long unsigned_time_t;
796
+ #elif SIZEOF_TIME_T == SIZEOF_INT
797
+ typedef unsigned int unsigned_time_t;
798
+ #elif SIZEOF_TIME_T == SIZEOF_LONG_LONG
799
+ typedef unsigned LONG_LONG unsigned_time_t;
800
+ #else
801
+ # error cannot find integer type which size is same as time_t.
802
+ #endif
803
+ #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
804
+ static int
805
+ ppoll(struct pollfd *fds, nfds_t nfds,
806
+ const struct timespec *ts, const sigset_t *sigmask)
807
+ {
808
+ int timeout_ms;
809
+
810
+ if (ts) {
811
+ int tmp, tmp2;
812
+
813
+ if (ts->tv_sec > TIMET_MAX/1000) {
814
+ timeout_ms = -1;
815
+ } else {
816
+ tmp = ts->tv_sec * 1000;
817
+ tmp2 = (ts->tv_nsec + 999999) / (1000 * 1000);
818
+ if (TIMET_MAX - tmp < tmp2) {
819
+ timeout_ms = -1;
820
+ } else {
821
+ timeout_ms = tmp + tmp2;
822
+ }
823
+ }
824
+ } else {
825
+ timeout_ms = -1;
826
+ }
827
+
828
+ (void)sigmask;
829
+
830
+ return poll(fds, nfds, timeout_ms);
831
+ }
832
+ #endif
833
+
834
+ typedef struct poll_args lp_arg;
835
+ struct poll_args {
836
+ rb_mt_loop *loop;
837
+ struct pollfd *fds;
838
+ nfds_t nfd;
839
+ struct timespec *ts;
840
+ int result;
841
+ int lerrno;
842
+ };
843
+
844
+ #ifdef HAVE_RB_THREAD_BLOCKING_REGION
845
+ static VALUE
846
+ loop_blocking_poll(void *argp)
847
+ {
848
+ lp_arg *args = argp;
849
+ args->result = ppoll(args->fds, args->nfd, args->ts, NULL);
850
+ if (args->result < 0) args->lerrno = errno;
851
+ return Qnil;
852
+ }
853
+ #endif
854
+
855
+ static VALUE
856
+ loop_run_poll(VALUE argp)
857
+ {
858
+ lp_arg *args = (struct poll_args *)argp;
859
+ rb_mt_loop *loop = args->loop;
860
+ struct timespec ts;
861
+ hrtime_t now, next_time;
862
+
863
+ if (loop->events.count) {
864
+ uint32_t i;
865
+ args->fds = calloc(loop->events.count, sizeof(struct pollfd));
866
+ if (args->fds == NULL) {
867
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for pollfd");
868
+ }
869
+ for(i = 0; i < loop->events.count; i++) {
870
+ rb_mt_socket_list *list = &loop->events.sockets[i];
871
+ args->fds[i].fd = list->socket;
872
+ args->fds[i].events =
873
+ (list->flags & LCB_READ_EVENT ? POLLIN : 0) |
874
+ (list->flags & LCB_WRITE_EVENT ? POLLOUT : 0);
875
+ }
876
+ args->nfd = loop->events.count;
877
+ }
878
+
879
+ retry:
880
+ next_time = timers_minimum(&loop->timers);
881
+ if (next_time) {
882
+ now = gethrtime();
883
+ if (next_time <= now) {
884
+ ts.tv_sec = 0;
885
+ ts.tv_nsec = 0;
886
+ } else {
887
+ hrtime_t hrto = next_time - now;
888
+ ts.tv_sec = (long)(hrto / 1000000000);
889
+ ts.tv_nsec = (long)(hrto % 1000000000);
890
+ }
891
+ args->ts = &ts;
892
+ } else {
893
+ args->ts = NULL;
894
+ }
895
+
896
+ #ifdef HAVE_RB_THREAD_BLOCKING_REGION
897
+ rb_thread_blocking_region(loop_blocking_poll, args, RUBY_UBF_PROCESS, NULL);
898
+ #else
899
+ if (rb_thread_alone()) {
900
+ TRAP_BEG;
901
+ args->result = ppoll(args->fds, args->nfd, args->ts, NULL);
902
+ if (args->result < 0) args->lerrno = errno;
903
+ TRAP_END;
904
+ } else {
905
+ struct timespec mini_pause;
906
+ int exact = 0;
907
+ mini_pause.tv_sec = 0;
908
+ /* 5 millisecond pause */
909
+ mini_pause.tv_nsec = 5000000;
910
+ if (args->ts && ts.tv_sec == 0 && ts.tv_nsec < 5000000) {
911
+ mini_pause.tv_nsec = ts.tv_nsec;
912
+ exact = 1;
913
+ }
914
+ TRAP_BEG;
915
+ args->result = ppoll(args->fds, args->nfd, &mini_pause, NULL);
916
+ if (args->result < 0) args->lerrno = errno;
917
+ TRAP_END;
918
+ if (args->result == 0 && !exact) {
919
+ args->result = -1;
920
+ args->lerrno = EINTR;
921
+ }
922
+ }
923
+ #endif
924
+
925
+ if (args->result < 0) {
926
+ errno = args->lerrno;
927
+ switch (errno) {
928
+ case EINTR:
929
+ #ifdef ERESTART
930
+ case ERESTART:
931
+ #endif
932
+ #ifndef HAVE_RB_THREAD_BLOCKING_REGION
933
+ rb_thread_schedule();
934
+ #endif
935
+ goto retry;
936
+ }
937
+ rb_sys_fail("poll");
938
+ return Qnil;
939
+ }
940
+
941
+ if (next_time) {
942
+ now = gethrtime();
943
+ }
944
+
945
+ if (args->result > 0) {
946
+ uint32_t cnt = args->result;
947
+ uint32_t fd_n = 0, ev_n = 0;
948
+ while (cnt && fd_n < args->nfd && ev_n < loop->events.count) {
949
+ struct pollfd *res = args->fds + fd_n;
950
+ rb_mt_socket_list *list = loop->events.sockets + ev_n;
951
+ rb_mt_event *sock = list->first;
952
+
953
+ /* if plugin used correctly, this checks are noop */
954
+ if (res->fd < list->socket) {
955
+ fd_n++;
956
+ continue;
957
+ } else if (res->fd > list->socket) {
958
+ ev_n++;
959
+ continue;
960
+ }
961
+
962
+ if (res->revents) {
963
+ short flags =
964
+ ((res->revents & POLLIN_SET) ? LCB_READ_EVENT : 0) |
965
+ ((res->revents & POLLOUT_SET) ? LCB_WRITE_EVENT : 0);
966
+ cnt--;
967
+ loop_enque_events(&loop->callbacks, sock, flags);
968
+ }
969
+ fd_n++;
970
+ ev_n++;
971
+ }
972
+ callbacks_run(&loop->callbacks);
973
+ }
974
+
975
+ if (next_time) {
976
+ timers_run(&loop->timers, now);
977
+ }
978
+ if (loop->events.count == 0 && loop->timers.count == 0) {
979
+ loop->run = 0;
980
+ }
981
+ return Qnil;
982
+ }
983
+
984
+ static VALUE
985
+ loop_poll_cleanup(VALUE argp)
986
+ {
987
+ lp_arg *args = (struct poll_args *)argp;
988
+ if (args->fds) {
989
+ free(args->fds);
990
+ }
991
+ callbacks_clean(&args->loop->callbacks);
992
+ return Qnil;
993
+ }
994
+ #endif
995
+ /* loop poll implementation end */
996
+
997
+ static void
998
+ loop_run(rb_mt_loop *loop)
999
+ {
1000
+
1001
+ loop->run = 1;
1002
+
1003
+ while(loop->run) {
1004
+ #ifdef HAVE_POLL
1005
+ /* prefer use of poll when it gives some benefits, but use rb_thread_fd_select when it is sufficient */
1006
+ lcb_socket_t max = events_max_fd(&loop->events);
1007
+ int use_poll = max >= 128;
1008
+ if (use_poll) {
1009
+ lp_arg args;
1010
+ memset(&args, 0, sizeof(args));
1011
+ args.loop = loop;
1012
+ rb_ensure(loop_run_poll, (VALUE)&args, loop_poll_cleanup, (VALUE)&args);
1013
+ } else
1014
+ #endif
1015
+ {
1016
+ ls_arg args;
1017
+ memset(&args, 0, sizeof(args));
1018
+ args.loop = loop;
1019
+ rb_ensure(loop_run_select, (VALUE)&args, loop_select_cleanup, (VALUE)&args);
1020
+ }
1021
+ }
1022
+ }
1023
+
1024
+ static void *
1025
+ lcb_io_create_event(struct lcb_io_opt_st *iops)
1026
+ {
1027
+ rb_mt_event *event = calloc(1, sizeof(*event));
1028
+ (void)iops;
1029
+ event->loop_index = -1;
1030
+ return event;
1031
+ }
1032
+
1033
+ static int
1034
+ lcb_io_update_event(struct lcb_io_opt_st *iops,
1035
+ lcb_socket_t sock,
1036
+ void *eventp,
1037
+ short flags,
1038
+ void *cb_data,
1039
+ void (*handler)(lcb_socket_t sock,
1040
+ short which,
1041
+ void *cb_data))
1042
+ {
1043
+ rb_mt_loop *loop = iops->v.v0.cookie;
1044
+ rb_mt_event *event = eventp;
1045
+ short old_flags = event->flags;
1046
+
1047
+ if (event->inserted && old_flags == flags &&
1048
+ cb_data == event->cb_data && handler == event->handler)
1049
+ {
1050
+ return 0;
1051
+ }
1052
+ loop_remove_event(loop, event);
1053
+ event->flags = flags;
1054
+ event->cb_data = cb_data;
1055
+ event->handler = handler;
1056
+ event->socket = sock;
1057
+ if (!event->inserted) {
1058
+ events_insert(&loop->events, event);
1059
+ }
1060
+ if ((old_flags & flags) != old_flags) {
1061
+ events_fix_flags(&loop->events, sock);
1062
+ }
1063
+ return 0;
1064
+ }
1065
+
1066
+ static void
1067
+ lcb_io_delete_event(struct lcb_io_opt_st *iops,
1068
+ lcb_socket_t sock,
1069
+ void *event)
1070
+ {
1071
+ loop_remove_event((rb_mt_loop*)iops->v.v0.cookie, (rb_mt_event*)event);
1072
+ (void)sock;
1073
+ }
1074
+
1075
+ static void
1076
+ lcb_io_destroy_event(struct lcb_io_opt_st *iops,
1077
+ void *event)
1078
+ {
1079
+ lcb_io_delete_event(iops, -1, event);
1080
+ free(event);
1081
+ }
1082
+
1083
+ static void *
1084
+ lcb_io_create_timer(struct lcb_io_opt_st *iops)
1085
+ {
1086
+ rb_mt_timer *timer = calloc(1, sizeof(*timer));
1087
+ timer->index = -1;
1088
+ (void)iops;
1089
+ return timer;
1090
+ }
1091
+
1092
+ static int
1093
+ lcb_io_update_timer(struct lcb_io_opt_st *iops, void *event,
1094
+ lcb_uint32_t usec, void *cb_data,
1095
+ void (*handler)(lcb_socket_t sock, short which, void *cb_data))
1096
+ {
1097
+ rb_mt_loop *loop = iops->v.v0.cookie;
1098
+ rb_mt_timer *timer = event;
1099
+
1100
+ timer->period = usec * (hrtime_t)1000;
1101
+ timer->ts = gethrtime() + timer->period;
1102
+ timer->cb_data = cb_data;
1103
+ timer->handler = handler;
1104
+ if (timer->index != -1) {
1105
+ timers_heapify_item(&loop->timers, timer->index);
1106
+ } else {
1107
+ timers_insert(&loop->timers, timer);
1108
+ }
1109
+ return 0;
1110
+ }
1111
+
1112
+ static void
1113
+ lcb_io_delete_timer(struct lcb_io_opt_st *iops, void *event)
1114
+ {
1115
+ rb_mt_loop *loop = iops->v.v0.cookie;
1116
+ rb_mt_timer *timer = event;
1117
+ if (timer->index != -1) {
1118
+ timers_remove_timer(&loop->timers, timer);
1119
+ }
1120
+ }
1121
+
1122
+ static void
1123
+ lcb_io_destroy_timer(struct lcb_io_opt_st *iops, void *timer)
1124
+ {
1125
+ lcb_io_delete_timer(iops, timer);
1126
+ free(timer);
1127
+ }
1128
+
1129
+ static void
1130
+ lcb_io_stop_event_loop(struct lcb_io_opt_st *iops)
1131
+ {
1132
+ rb_mt_loop *loop = iops->v.v0.cookie;
1133
+ loop->run = 0;
1134
+ }
1135
+
1136
+ static void
1137
+ lcb_io_run_event_loop(struct lcb_io_opt_st *iops)
1138
+ {
1139
+ rb_mt_loop *loop = iops->v.v0.cookie;
1140
+ loop_run(loop);
1141
+ }
1142
+
1143
+ static void
1144
+ lcb_destroy_io_opts(struct lcb_io_opt_st *iops)
1145
+ {
1146
+ rb_mt_loop *loop = iops->v.v0.cookie;
1147
+ loop_destroy(loop);
1148
+ free(iops);
1149
+ }
1150
+
1151
+ LIBCOUCHBASE_API lcb_error_t
1152
+ cb_create_ruby_mt_io_opts(int version, lcb_io_opt_t *io, void *arg)
1153
+ {
1154
+ struct lcb_io_opt_st *ret;
1155
+ rb_mt_loop *loop;
1156
+ (void)arg;
1157
+ if (version != 0) {
1158
+ return LCB_PLUGIN_VERSION_MISMATCH;
1159
+ }
1160
+ ret = calloc(1, sizeof(*ret));
1161
+ if (ret == NULL) {
1162
+ free(ret);
1163
+ return LCB_CLIENT_ENOMEM;
1164
+ }
1165
+
1166
+ ret->version = 0;
1167
+ ret->dlhandle = NULL;
1168
+ ret->destructor = lcb_destroy_io_opts;
1169
+ /* consider that struct isn't allocated by the library,
1170
+ * `need_cleanup' flag might be set in lcb_create() */
1171
+ ret->v.v0.need_cleanup = 0;
1172
+ ret->v.v0.recv = lcb_io_recv;
1173
+ ret->v.v0.send = lcb_io_send;
1174
+ ret->v.v0.recvv = lcb_io_recvv;
1175
+ ret->v.v0.sendv = lcb_io_sendv;
1176
+ ret->v.v0.socket = lcb_io_socket;
1177
+ ret->v.v0.close = lcb_io_close;
1178
+ ret->v.v0.connect = lcb_io_connect;
1179
+ ret->v.v0.delete_event = lcb_io_delete_event;
1180
+ ret->v.v0.destroy_event = lcb_io_destroy_event;
1181
+ ret->v.v0.create_event = lcb_io_create_event;
1182
+ ret->v.v0.update_event = lcb_io_update_event;
1183
+
1184
+ ret->v.v0.delete_timer = lcb_io_delete_timer;
1185
+ ret->v.v0.destroy_timer = lcb_io_destroy_timer;
1186
+ ret->v.v0.create_timer = lcb_io_create_timer;
1187
+ ret->v.v0.update_timer = lcb_io_update_timer;
1188
+
1189
+ ret->v.v0.run_event_loop = lcb_io_run_event_loop;
1190
+ ret->v.v0.stop_event_loop = lcb_io_stop_event_loop;
1191
+
1192
+ loop = loop_create();
1193
+ if (loop == NULL) {
1194
+ free(ret);
1195
+ return LCB_CLIENT_ENOMEM;
1196
+ }
1197
+ ret->v.v0.cookie = loop;
1198
+ *io = ret;
1199
+ return LCB_SUCCESS;
1200
+ }
1201
+ #endif /* _WIN32 */