curb 1.1.0 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +5 -1
- data/ext/curb.c +6 -4
- data/ext/curb.h +3 -3
- data/ext/curb_easy.c +97 -8
- data/ext/curb_multi.c +477 -15
- data/ext/curb_postfield.c +6 -0
- data/ext/curb_upload.c +3 -0
- data/ext/extconf.rb +214 -17
- data/tests/bug_issue_noproxy.rb +56 -0
- data/tests/bug_issue_post_redirect.rb +93 -0
- data/tests/bug_issue_spnego.rb +41 -0
- data/tests/helper.rb +33 -0
- data/tests/mem_check.rb +3 -0
- data/tests/tc_curl_download.rb +2 -1
- data/tests/tc_curl_easy.rb +65 -6
- data/tests/tc_curl_multi.rb +2 -0
- data/tests/tc_fiber_scheduler.rb +190 -0
- data/tests/test_basic.rb +29 -0
- data/tests/test_fiber_debug.rb +69 -0
- data/tests/test_fiber_simple.rb +65 -0
- data/tests/test_real_url.rb +65 -0
- data/tests/test_simple_fiber.rb +34 -0
- metadata +20 -6
- data/tests/bug_issue277.rb +0 -32
- data/tests/bug_resolve.rb +0 -15
data/ext/curb_multi.c
CHANGED
@@ -5,6 +5,9 @@
|
|
5
5
|
*/
|
6
6
|
#include "curb_config.h"
|
7
7
|
#include <ruby.h>
|
8
|
+
#ifdef HAVE_RUBY_IO_H
|
9
|
+
#include <ruby/io.h>
|
10
|
+
#endif
|
8
11
|
#ifdef HAVE_RUBY_ST_H
|
9
12
|
#include <ruby/st.h>
|
10
13
|
#else
|
@@ -14,6 +17,9 @@
|
|
14
17
|
#ifdef HAVE_RB_THREAD_CALL_WITHOUT_GVL
|
15
18
|
#include <ruby/thread.h>
|
16
19
|
#endif
|
20
|
+
#ifdef HAVE_RUBY_FIBER_SCHEDULER_H
|
21
|
+
#include <ruby/fiber/scheduler.h>
|
22
|
+
#endif
|
17
23
|
|
18
24
|
#include "curb_easy.h"
|
19
25
|
#include "curb_errors.h"
|
@@ -21,13 +27,25 @@
|
|
21
27
|
#include "curb_multi.h"
|
22
28
|
|
23
29
|
#include <errno.h>
|
30
|
+
#include <stdarg.h>
|
31
|
+
|
32
|
+
/*
|
33
|
+
* Optional socket-action debug logging. Enabled by defining CURB_SOCKET_DEBUG=1
|
34
|
+
* at compile time (e.g. via environment variable passed to extconf.rb).
|
35
|
+
*/
|
36
|
+
#ifndef CURB_SOCKET_DEBUG
|
37
|
+
#define CURB_SOCKET_DEBUG 0
|
38
|
+
#endif
|
39
|
+
#if !CURB_SOCKET_DEBUG
|
40
|
+
#define curb_debugf(...) ((void)0)
|
41
|
+
#endif
|
24
42
|
|
25
43
|
#ifdef _WIN32
|
26
44
|
// for O_RDWR and O_BINARY
|
27
45
|
#include <fcntl.h>
|
28
46
|
#endif
|
29
47
|
|
30
|
-
#
|
48
|
+
#if 0 /* disabled curl_multi_wait in favor of scheduler-aware fdsets */
|
31
49
|
#include <stdint.h> /* for intptr_t */
|
32
50
|
|
33
51
|
struct wait_args {
|
@@ -102,6 +120,9 @@ static void ruby_curl_multi_init(ruby_curl_multi *rbcm) {
|
|
102
120
|
*/
|
103
121
|
VALUE ruby_curl_multi_new(VALUE klass) {
|
104
122
|
ruby_curl_multi *rbcm = ALLOC(ruby_curl_multi);
|
123
|
+
if (!rbcm) {
|
124
|
+
rb_raise(rb_eNoMemError, "Failed to allocate memory for Curl::Multi");
|
125
|
+
}
|
105
126
|
|
106
127
|
ruby_curl_multi_init(rbcm);
|
107
128
|
|
@@ -324,6 +345,23 @@ static VALUE call_status_handler2(VALUE ary) {
|
|
324
345
|
return rb_funcall(rb_ary_entry(ary, 0), idCall, 2, rb_ary_entry(ary, 1), rb_ary_entry(ary, 2));
|
325
346
|
}
|
326
347
|
|
348
|
+
static void flush_stderr_if_any(ruby_curl_easy *rbce) {
|
349
|
+
VALUE stderr_io = rb_easy_get("stderr_io");
|
350
|
+
if (stderr_io != Qnil) {
|
351
|
+
/* Flush via Ruby IO API */
|
352
|
+
rb_funcall(stderr_io, rb_intern("flush"), 0);
|
353
|
+
#ifdef HAVE_RUBY_IO_H
|
354
|
+
/* Additionally flush underlying FILE* to be extra safe. */
|
355
|
+
rb_io_t *open_f_ptr;
|
356
|
+
if (RB_TYPE_P(stderr_io, T_FILE)) {
|
357
|
+
GetOpenFile(stderr_io, open_f_ptr);
|
358
|
+
FILE *fp = rb_io_stdio_file(open_f_ptr);
|
359
|
+
if (fp) fflush(fp);
|
360
|
+
}
|
361
|
+
#endif
|
362
|
+
}
|
363
|
+
}
|
364
|
+
|
327
365
|
static void rb_curl_mutli_handle_complete(VALUE self, CURL *easy_handle, int result) {
|
328
366
|
long response_code = -1;
|
329
367
|
VALUE easy;
|
@@ -336,6 +374,10 @@ static void rb_curl_mutli_handle_complete(VALUE self, CURL *easy_handle, int res
|
|
336
374
|
|
337
375
|
rbce->last_result = result; /* save the last easy result code */
|
338
376
|
|
377
|
+
/* Ensure any verbose output redirected via CURLOPT_STDERR is flushed
|
378
|
+
* before we tear down handler state. */
|
379
|
+
flush_stderr_if_any(rbce);
|
380
|
+
|
339
381
|
// remove the easy handle from multi on completion so it can be reused again
|
340
382
|
rb_funcall(self, rb_intern("remove"), 1, easy);
|
341
383
|
|
@@ -345,6 +387,9 @@ static void rb_curl_mutli_handle_complete(VALUE self, CURL *easy_handle, int res
|
|
345
387
|
rbce->curl_headers = NULL;
|
346
388
|
}
|
347
389
|
|
390
|
+
/* Flush again after removal to cover any last buffered data. */
|
391
|
+
flush_stderr_if_any(rbce);
|
392
|
+
|
348
393
|
if (ecode != 0) {
|
349
394
|
raise_curl_easy_error_exception(ecode);
|
350
395
|
}
|
@@ -361,7 +406,7 @@ static void rb_curl_mutli_handle_complete(VALUE self, CURL *easy_handle, int res
|
|
361
406
|
|
362
407
|
#ifdef HAVE_CURLINFO_RESPONSE_CODE
|
363
408
|
curl_easy_getinfo(rbce->curl, CURLINFO_RESPONSE_CODE, &response_code);
|
364
|
-
#else
|
409
|
+
#else /* use fdsets path for waiting */
|
365
410
|
// old libcurl
|
366
411
|
curl_easy_getinfo(rbce->curl, CURLINFO_HTTP_CODE, &response_code);
|
367
412
|
#endif
|
@@ -386,11 +431,16 @@ static void rb_curl_mutli_handle_complete(VALUE self, CURL *easy_handle, int res
|
|
386
431
|
CURB_CHECK_RB_CALLBACK_RAISE(did_raise);
|
387
432
|
|
388
433
|
} else if (!rb_easy_nil("redirect_proc") && ((response_code >= 300 && response_code < 400) || redirect_count > 0) ) {
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
434
|
+
/* Skip on_redirect callback if follow_location is false AND max_redirects is 0 */
|
435
|
+
if (!rbce->follow_location && rbce->max_redirs == 0) {
|
436
|
+
// Do nothing - skip the callback
|
437
|
+
} else {
|
438
|
+
rbce->callback_active = 1;
|
439
|
+
callargs = rb_ary_new3(3, rb_easy_get("redirect_proc"), easy, rb_curl_easy_error(result));
|
440
|
+
rbce->callback_active = 0;
|
441
|
+
rb_rescue(call_status_handler2, callargs, callback_exception, did_raise);
|
442
|
+
CURB_CHECK_RB_CALLBACK_RAISE(did_raise);
|
443
|
+
}
|
394
444
|
} else if (!rb_easy_nil("missing_proc") &&
|
395
445
|
(response_code >= 400 && response_code < 500)) {
|
396
446
|
rbce->callback_active = 1;
|
@@ -466,6 +516,367 @@ static void rb_curl_multi_run(VALUE self, CURLM *multi_handle, int *still_runnin
|
|
466
516
|
*/
|
467
517
|
}
|
468
518
|
|
519
|
+
#if defined(HAVE_CURL_MULTI_SOCKET_ACTION) && defined(HAVE_CURLMOPT_SOCKETFUNCTION) && defined(HAVE_CURLMOPT_TIMERFUNCTION) && defined(HAVE_RB_THREAD_FD_SELECT) && !defined(_WIN32)
|
520
|
+
/* ---- socket-action implementation (scheduler-friendly) ---- */
|
521
|
+
typedef struct {
|
522
|
+
st_table *sock_map; /* key: int fd, value: int 'what' (CURL_POLL_*) */
|
523
|
+
long timeout_ms; /* last timeout set by libcurl timer callback */
|
524
|
+
} multi_socket_ctx;
|
525
|
+
|
526
|
+
#if CURB_SOCKET_DEBUG
|
527
|
+
static void curb_debugf(const char *fmt, ...) {
|
528
|
+
va_list ap;
|
529
|
+
va_start(ap, fmt);
|
530
|
+
vfprintf(stderr, fmt, ap);
|
531
|
+
fputc('\n', stderr);
|
532
|
+
fflush(stderr);
|
533
|
+
va_end(ap);
|
534
|
+
}
|
535
|
+
|
536
|
+
static const char *poll_what_str(int what, char *buf, size_t n) {
|
537
|
+
/* what is one of CURL_POLL_*, not a bitmask except INOUT */
|
538
|
+
if (what == CURL_POLL_REMOVE) snprintf(buf, n, "REMOVE");
|
539
|
+
else if (what == CURL_POLL_IN) snprintf(buf, n, "IN");
|
540
|
+
else if (what == CURL_POLL_OUT) snprintf(buf, n, "OUT");
|
541
|
+
else if (what == CURL_POLL_INOUT) snprintf(buf, n, "INOUT");
|
542
|
+
else snprintf(buf, n, "WHAT=%d", what);
|
543
|
+
return buf;
|
544
|
+
}
|
545
|
+
|
546
|
+
static const char *cselect_flags_str(int flags, char *buf, size_t n) {
|
547
|
+
char tmp[32]; tmp[0] = 0;
|
548
|
+
int off = 0;
|
549
|
+
if (flags & CURL_CSELECT_IN) off += snprintf(tmp+off, (size_t)(sizeof(tmp)-off), "%sIN", off?"|":"");
|
550
|
+
if (flags & CURL_CSELECT_OUT) off += snprintf(tmp+off, (size_t)(sizeof(tmp)-off), "%sOUT", off?"|":"");
|
551
|
+
if (flags & CURL_CSELECT_ERR) off += snprintf(tmp+off, (size_t)(sizeof(tmp)-off), "%sERR", off?"|":"");
|
552
|
+
if (off == 0) snprintf(tmp, sizeof(tmp), "0");
|
553
|
+
snprintf(buf, n, "%s", tmp);
|
554
|
+
return buf;
|
555
|
+
}
|
556
|
+
#else
|
557
|
+
#define poll_what_str(...) ""
|
558
|
+
#define cselect_flags_str(...) ""
|
559
|
+
#endif
|
560
|
+
|
561
|
+
/* Protected call to rb_fiber_scheduler_io_wait to avoid unwinding into C on TypeError. */
|
562
|
+
struct fiber_io_wait_args { VALUE scheduler; VALUE io; int events; VALUE timeout; };
|
563
|
+
static VALUE fiber_io_wait_protected(VALUE argp) {
|
564
|
+
struct fiber_io_wait_args *a = (struct fiber_io_wait_args *)argp;
|
565
|
+
return rb_fiber_scheduler_io_wait(a->scheduler, a->io, a->events, a->timeout);
|
566
|
+
}
|
567
|
+
|
568
|
+
static int multi_socket_cb(CURL *easy, curl_socket_t s, int what, void *userp, void *socketp) {
|
569
|
+
multi_socket_ctx *ctx = (multi_socket_ctx *)userp;
|
570
|
+
(void)easy; (void)socketp;
|
571
|
+
int fd = (int)s;
|
572
|
+
|
573
|
+
if (!ctx || !ctx->sock_map) return 0;
|
574
|
+
|
575
|
+
if (what == CURL_POLL_REMOVE) {
|
576
|
+
st_data_t k = (st_data_t)fd;
|
577
|
+
st_data_t rec;
|
578
|
+
st_delete(ctx->sock_map, &k, &rec);
|
579
|
+
{
|
580
|
+
char b[16];
|
581
|
+
curb_debugf("[curb.socket] sock_cb fd=%d what=%s (removed)", fd, poll_what_str(what, b, sizeof(b)));
|
582
|
+
}
|
583
|
+
} else {
|
584
|
+
/* store current interest mask for this fd */
|
585
|
+
st_insert(ctx->sock_map, (st_data_t)fd, (st_data_t)what);
|
586
|
+
{
|
587
|
+
char b[16];
|
588
|
+
curb_debugf("[curb.socket] sock_cb fd=%d what=%s (tracked)", fd, poll_what_str(what, b, sizeof(b)));
|
589
|
+
}
|
590
|
+
}
|
591
|
+
return 0;
|
592
|
+
}
|
593
|
+
|
594
|
+
static int multi_timer_cb(CURLM *multi, long timeout_ms, void *userp) {
|
595
|
+
(void)multi;
|
596
|
+
multi_socket_ctx *ctx = (multi_socket_ctx *)userp;
|
597
|
+
if (ctx) ctx->timeout_ms = timeout_ms;
|
598
|
+
curb_debugf("[curb.socket] timer_cb timeout_ms=%ld", timeout_ms);
|
599
|
+
return 0;
|
600
|
+
}
|
601
|
+
|
602
|
+
struct build_fdset_args { rb_fdset_t *r; rb_fdset_t *w; rb_fdset_t *e; int maxfd; };
|
603
|
+
static int rb_fdset_from_sockmap_i(st_data_t key, st_data_t val, st_data_t argp) {
|
604
|
+
struct build_fdset_args *a = (struct build_fdset_args *)argp;
|
605
|
+
int fd = (int)key;
|
606
|
+
int what = (int)val;
|
607
|
+
if (what & CURL_POLL_IN) rb_fd_set(fd, a->r);
|
608
|
+
if (what & CURL_POLL_OUT) rb_fd_set(fd, a->w);
|
609
|
+
rb_fd_set(fd, a->e);
|
610
|
+
if (fd > a->maxfd) a->maxfd = fd;
|
611
|
+
return ST_CONTINUE;
|
612
|
+
}
|
613
|
+
static void rb_fdset_from_sockmap(st_table *map, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, int *maxfd_out) {
|
614
|
+
if (!map) { *maxfd_out = -1; return; }
|
615
|
+
struct build_fdset_args a; a.r = rfds; a.w = wfds; a.e = efds; a.maxfd = -1;
|
616
|
+
st_foreach(map, rb_fdset_from_sockmap_i, (st_data_t)&a);
|
617
|
+
*maxfd_out = a.maxfd;
|
618
|
+
}
|
619
|
+
|
620
|
+
struct dispatch_args { CURLM *mh; int *running; CURLMcode mrc; rb_fdset_t *r; rb_fdset_t *w; rb_fdset_t *e; };
|
621
|
+
static int dispatch_ready_fd_i(st_data_t key, st_data_t val, st_data_t argp) {
|
622
|
+
(void)val;
|
623
|
+
struct dispatch_args *dp = (struct dispatch_args *)argp;
|
624
|
+
int fd = (int)key;
|
625
|
+
int flags = 0;
|
626
|
+
if (rb_fd_isset(fd, dp->r)) flags |= CURL_CSELECT_IN;
|
627
|
+
if (rb_fd_isset(fd, dp->w)) flags |= CURL_CSELECT_OUT;
|
628
|
+
if (rb_fd_isset(fd, dp->e)) flags |= CURL_CSELECT_ERR;
|
629
|
+
if (flags) {
|
630
|
+
dp->mrc = curl_multi_socket_action(dp->mh, (curl_socket_t)fd, flags, dp->running);
|
631
|
+
if (dp->mrc != CURLM_OK) return ST_STOP;
|
632
|
+
}
|
633
|
+
return ST_CONTINUE;
|
634
|
+
}
|
635
|
+
|
636
|
+
/* Helpers used with st_foreach to avoid compiler-specific nested functions. */
|
637
|
+
struct pick_one_state { int fd; int what; int found; };
|
638
|
+
static int st_pick_one_i(st_data_t key, st_data_t val, st_data_t argp) {
|
639
|
+
struct pick_one_state *s = (struct pick_one_state *)argp;
|
640
|
+
s->fd = (int)key;
|
641
|
+
s->what = (int)val;
|
642
|
+
s->found = 1;
|
643
|
+
return ST_STOP;
|
644
|
+
}
|
645
|
+
struct counter_state { int count; };
|
646
|
+
static int st_count_i(st_data_t k, st_data_t v, st_data_t argp) {
|
647
|
+
(void)k; (void)v;
|
648
|
+
struct counter_state *c = (struct counter_state *)argp;
|
649
|
+
c->count++;
|
650
|
+
return ST_CONTINUE;
|
651
|
+
}
|
652
|
+
|
653
|
+
static void rb_curl_multi_socket_drive(VALUE self, ruby_curl_multi *rbcm, multi_socket_ctx *ctx, VALUE block) {
|
654
|
+
/* prime the state: let libcurl act on timeouts to setup sockets */
|
655
|
+
CURLMcode mrc = curl_multi_socket_action(rbcm->handle, CURL_SOCKET_TIMEOUT, 0, &rbcm->running);
|
656
|
+
if (mrc != CURLM_OK) raise_curl_multi_error_exception(mrc);
|
657
|
+
curb_debugf("[curb.socket] drive: initial socket_action timeout -> mrc=%d running=%d", mrc, rbcm->running);
|
658
|
+
rb_curl_multi_read_info(self, rbcm->handle);
|
659
|
+
if (block != Qnil) rb_funcall(block, rb_intern("call"), 1, self);
|
660
|
+
|
661
|
+
while (rbcm->running) {
|
662
|
+
struct timeval tv = {0, 0};
|
663
|
+
if (ctx->timeout_ms < 0) {
|
664
|
+
tv.tv_sec = cCurlMutiDefaulttimeout / 1000;
|
665
|
+
tv.tv_usec = (cCurlMutiDefaulttimeout % 1000) * 1000;
|
666
|
+
} else {
|
667
|
+
long t = ctx->timeout_ms;
|
668
|
+
if (t > cCurlMutiDefaulttimeout) t = cCurlMutiDefaulttimeout;
|
669
|
+
if (t < 0) t = 0;
|
670
|
+
tv.tv_sec = t / 1000;
|
671
|
+
tv.tv_usec = (t % 1000) * 1000;
|
672
|
+
}
|
673
|
+
|
674
|
+
/* Find a representative fd to wait on (if any). */
|
675
|
+
int wait_fd = -1;
|
676
|
+
int wait_what = 0;
|
677
|
+
if (ctx->sock_map) {
|
678
|
+
struct pick_one_state st = { -1, 0, 0 };
|
679
|
+
st_foreach(ctx->sock_map, st_pick_one_i, (st_data_t)&st);
|
680
|
+
if (st.found) { wait_fd = st.fd; wait_what = st.what; }
|
681
|
+
}
|
682
|
+
|
683
|
+
/* Count tracked fds for logging */
|
684
|
+
int count_tracked = 0;
|
685
|
+
if (ctx->sock_map) {
|
686
|
+
struct counter_state cs = { 0 };
|
687
|
+
st_foreach(ctx->sock_map, st_count_i, (st_data_t)&cs);
|
688
|
+
count_tracked = cs.count;
|
689
|
+
}
|
690
|
+
|
691
|
+
curb_debugf("[curb.socket] wait phase: tracked_fds=%d fd=%d what=%d tv=%ld.%06ld", count_tracked, wait_fd, wait_what, (long)tv.tv_sec, (long)tv.tv_usec);
|
692
|
+
|
693
|
+
int did_timeout = 0;
|
694
|
+
int any_ready = 0;
|
695
|
+
|
696
|
+
int handled_wait = 0;
|
697
|
+
if (count_tracked > 1) {
|
698
|
+
/* Multi-fd wait using scheduler-aware rb_thread_fd_select. */
|
699
|
+
rb_fdset_t rfds, wfds, efds;
|
700
|
+
rb_fd_init(&rfds); rb_fd_init(&wfds); rb_fd_init(&efds);
|
701
|
+
int maxfd = -1;
|
702
|
+
struct build_fdset_args a2; a2.r = &rfds; a2.w = &wfds; a2.e = &efds; a2.maxfd = -1;
|
703
|
+
st_foreach(ctx->sock_map, rb_fdset_from_sockmap_i, (st_data_t)&a2);
|
704
|
+
maxfd = a2.maxfd;
|
705
|
+
int rc = rb_thread_fd_select(maxfd + 1, &rfds, &wfds, &efds, &tv);
|
706
|
+
curb_debugf("[curb.socket] rb_thread_fd_select(multi) rc=%d maxfd=%d", rc, maxfd);
|
707
|
+
if (rc < 0) {
|
708
|
+
rb_fd_term(&rfds); rb_fd_term(&wfds); rb_fd_term(&efds);
|
709
|
+
if (errno != EINTR) rb_raise(rb_eRuntimeError, "select(): %s", strerror(errno));
|
710
|
+
continue;
|
711
|
+
}
|
712
|
+
any_ready = (rc > 0);
|
713
|
+
did_timeout = (rc == 0);
|
714
|
+
if (any_ready) {
|
715
|
+
struct dispatch_args d; d.mh = rbcm->handle; d.running = &rbcm->running; d.mrc = CURLM_OK; d.r = &rfds; d.w = &wfds; d.e = &efds;
|
716
|
+
st_foreach(ctx->sock_map, dispatch_ready_fd_i, (st_data_t)&d);
|
717
|
+
if (d.mrc != CURLM_OK) {
|
718
|
+
rb_fd_term(&rfds); rb_fd_term(&wfds); rb_fd_term(&efds);
|
719
|
+
raise_curl_multi_error_exception(d.mrc);
|
720
|
+
}
|
721
|
+
}
|
722
|
+
rb_fd_term(&rfds); rb_fd_term(&wfds); rb_fd_term(&efds);
|
723
|
+
handled_wait = 1;
|
724
|
+
} else if (count_tracked == 1) {
|
725
|
+
#if defined(HAVE_RB_WAIT_FOR_SINGLE_FD)
|
726
|
+
if (wait_fd >= 0) {
|
727
|
+
int ev = 0;
|
728
|
+
if (wait_what == CURL_POLL_IN) ev = RB_WAITFD_IN;
|
729
|
+
else if (wait_what == CURL_POLL_OUT) ev = RB_WAITFD_OUT;
|
730
|
+
else if (wait_what == CURL_POLL_INOUT) ev = RB_WAITFD_IN|RB_WAITFD_OUT;
|
731
|
+
int rc = rb_wait_for_single_fd(wait_fd, ev, &tv);
|
732
|
+
curb_debugf("[curb.socket] rb_wait_for_single_fd rc=%d fd=%d ev=%d", rc, wait_fd, ev);
|
733
|
+
if (rc < 0) {
|
734
|
+
if (errno != EINTR) rb_raise(rb_eRuntimeError, "wait_for_single_fd(): %s", strerror(errno));
|
735
|
+
continue;
|
736
|
+
}
|
737
|
+
any_ready = (rc != 0);
|
738
|
+
did_timeout = (rc == 0);
|
739
|
+
handled_wait = 1;
|
740
|
+
}
|
741
|
+
#endif
|
742
|
+
#if defined(HAVE_RB_FIBER_SCHEDULER_IO_WAIT) && defined(HAVE_RB_FIBER_SCHEDULER_CURRENT)
|
743
|
+
if (!handled_wait) {
|
744
|
+
VALUE scheduler = rb_fiber_scheduler_current();
|
745
|
+
if (scheduler != Qnil) {
|
746
|
+
int events = 0;
|
747
|
+
if (wait_fd >= 0) {
|
748
|
+
if (wait_what == CURL_POLL_IN) events = RB_WAITFD_IN;
|
749
|
+
else if (wait_what == CURL_POLL_OUT) events = RB_WAITFD_OUT;
|
750
|
+
else if (wait_what == CURL_POLL_INOUT) events = RB_WAITFD_IN|RB_WAITFD_OUT;
|
751
|
+
else events = RB_WAITFD_IN|RB_WAITFD_OUT;
|
752
|
+
}
|
753
|
+
double timeout_s = (double)tv.tv_sec + ((double)tv.tv_usec / 1e6);
|
754
|
+
VALUE timeout = rb_float_new(timeout_s);
|
755
|
+
if (wait_fd < 0) {
|
756
|
+
rb_thread_wait_for(tv);
|
757
|
+
did_timeout = 1;
|
758
|
+
} else {
|
759
|
+
const char *mode = (wait_what == CURL_POLL_IN) ? "r" : (wait_what == CURL_POLL_OUT) ? "w" : "r+";
|
760
|
+
VALUE io = rb_funcall(rb_cIO, rb_intern("for_fd"), 2, INT2NUM(wait_fd), rb_str_new_cstr(mode));
|
761
|
+
rb_funcall(io, rb_intern("autoclose="), 1, Qfalse);
|
762
|
+
struct fiber_io_wait_args args = { scheduler, io, events, timeout };
|
763
|
+
int state = 0;
|
764
|
+
VALUE ready = rb_protect(fiber_io_wait_protected, (VALUE)&args, &state);
|
765
|
+
if (state) {
|
766
|
+
did_timeout = 1; any_ready = 0;
|
767
|
+
} else {
|
768
|
+
any_ready = (ready != Qfalse);
|
769
|
+
did_timeout = !any_ready;
|
770
|
+
}
|
771
|
+
}
|
772
|
+
handled_wait = 1;
|
773
|
+
}
|
774
|
+
}
|
775
|
+
#endif
|
776
|
+
if (!handled_wait) {
|
777
|
+
/* Fallback: single-fd select. */
|
778
|
+
rb_fdset_t rfds, wfds, efds;
|
779
|
+
rb_fd_init(&rfds); rb_fd_init(&wfds); rb_fd_init(&efds);
|
780
|
+
int maxfd = -1;
|
781
|
+
if (wait_fd >= 0) {
|
782
|
+
if (wait_what == CURL_POLL_IN || wait_what == CURL_POLL_INOUT) rb_fd_set(wait_fd, &rfds);
|
783
|
+
if (wait_what == CURL_POLL_OUT || wait_what == CURL_POLL_INOUT) rb_fd_set(wait_fd, &wfds);
|
784
|
+
rb_fd_set(wait_fd, &efds);
|
785
|
+
maxfd = wait_fd;
|
786
|
+
}
|
787
|
+
int rc = rb_thread_fd_select(maxfd + 1, &rfds, &wfds, &efds, &tv);
|
788
|
+
curb_debugf("[curb.socket] rb_thread_fd_select(single) rc=%d fd=%d", rc, wait_fd);
|
789
|
+
if (rc < 0) {
|
790
|
+
rb_fd_term(&rfds); rb_fd_term(&wfds); rb_fd_term(&efds);
|
791
|
+
if (errno != EINTR) rb_raise(rb_eRuntimeError, "select(): %s", strerror(errno));
|
792
|
+
continue;
|
793
|
+
}
|
794
|
+
any_ready = (rc > 0);
|
795
|
+
did_timeout = (rc == 0);
|
796
|
+
rb_fd_term(&rfds); rb_fd_term(&wfds); rb_fd_term(&efds);
|
797
|
+
}
|
798
|
+
} else { /* count_tracked == 0 */
|
799
|
+
rb_thread_wait_for(tv);
|
800
|
+
did_timeout = 1;
|
801
|
+
}
|
802
|
+
|
803
|
+
if (did_timeout) {
|
804
|
+
mrc = curl_multi_socket_action(rbcm->handle, CURL_SOCKET_TIMEOUT, 0, &rbcm->running);
|
805
|
+
curb_debugf("[curb.socket] socket_action timeout -> mrc=%d running=%d", mrc, rbcm->running);
|
806
|
+
if (mrc != CURLM_OK) raise_curl_multi_error_exception(mrc);
|
807
|
+
} else if (any_ready) {
|
808
|
+
if (count_tracked == 1 && wait_fd >= 0) {
|
809
|
+
int flags = 0;
|
810
|
+
if (wait_what == CURL_POLL_IN || wait_what == CURL_POLL_INOUT) flags |= CURL_CSELECT_IN;
|
811
|
+
if (wait_what == CURL_POLL_OUT || wait_what == CURL_POLL_INOUT) flags |= CURL_CSELECT_OUT;
|
812
|
+
flags |= CURL_CSELECT_ERR;
|
813
|
+
char b[32];
|
814
|
+
curb_debugf("[curb.socket] socket_action fd=%d flags=%s", wait_fd, cselect_flags_str(flags, b, sizeof(b)));
|
815
|
+
mrc = curl_multi_socket_action(rbcm->handle, (curl_socket_t)wait_fd, flags, &rbcm->running);
|
816
|
+
curb_debugf("[curb.socket] socket_action -> mrc=%d running=%d", mrc, rbcm->running);
|
817
|
+
if (mrc != CURLM_OK) raise_curl_multi_error_exception(mrc);
|
818
|
+
}
|
819
|
+
}
|
820
|
+
|
821
|
+
rb_curl_multi_read_info(self, rbcm->handle);
|
822
|
+
curb_debugf("[curb.socket] processed completions; running=%d", rbcm->running);
|
823
|
+
if (block != Qnil) rb_funcall(block, rb_intern("call"), 1, self);
|
824
|
+
}
|
825
|
+
}
|
826
|
+
|
827
|
+
struct socket_drive_args { VALUE self; ruby_curl_multi *rbcm; multi_socket_ctx *ctx; VALUE block; };
|
828
|
+
static VALUE ruby_curl_multi_socket_drive_body(VALUE argp) {
|
829
|
+
struct socket_drive_args *a = (struct socket_drive_args *)argp;
|
830
|
+
rb_curl_multi_socket_drive(a->self, a->rbcm, a->ctx, a->block);
|
831
|
+
return Qtrue;
|
832
|
+
}
|
833
|
+
struct socket_cleanup_args { ruby_curl_multi *rbcm; multi_socket_ctx *ctx; };
|
834
|
+
static VALUE ruby_curl_multi_socket_drive_ensure(VALUE argp) {
|
835
|
+
struct socket_cleanup_args *c = (struct socket_cleanup_args *)argp;
|
836
|
+
if (c->rbcm && c->rbcm->handle) {
|
837
|
+
curl_multi_setopt(c->rbcm->handle, CURLMOPT_SOCKETFUNCTION, NULL);
|
838
|
+
curl_multi_setopt(c->rbcm->handle, CURLMOPT_SOCKETDATA, NULL);
|
839
|
+
curl_multi_setopt(c->rbcm->handle, CURLMOPT_TIMERFUNCTION, NULL);
|
840
|
+
curl_multi_setopt(c->rbcm->handle, CURLMOPT_TIMERDATA, NULL);
|
841
|
+
}
|
842
|
+
if (c->ctx && c->ctx->sock_map) {
|
843
|
+
st_free_table(c->ctx->sock_map);
|
844
|
+
c->ctx->sock_map = NULL;
|
845
|
+
}
|
846
|
+
return Qnil;
|
847
|
+
}
|
848
|
+
|
849
|
+
VALUE ruby_curl_multi_socket_perform(int argc, VALUE *argv, VALUE self) {
|
850
|
+
ruby_curl_multi *rbcm;
|
851
|
+
VALUE block = Qnil;
|
852
|
+
rb_scan_args(argc, argv, "0&", &block);
|
853
|
+
|
854
|
+
Data_Get_Struct(self, ruby_curl_multi, rbcm);
|
855
|
+
|
856
|
+
multi_socket_ctx ctx;
|
857
|
+
ctx.sock_map = st_init_numtable();
|
858
|
+
ctx.timeout_ms = -1;
|
859
|
+
|
860
|
+
/* install socket/timer callbacks */
|
861
|
+
curl_multi_setopt(rbcm->handle, CURLMOPT_SOCKETFUNCTION, multi_socket_cb);
|
862
|
+
curl_multi_setopt(rbcm->handle, CURLMOPT_SOCKETDATA, &ctx);
|
863
|
+
curl_multi_setopt(rbcm->handle, CURLMOPT_TIMERFUNCTION, multi_timer_cb);
|
864
|
+
curl_multi_setopt(rbcm->handle, CURLMOPT_TIMERDATA, &ctx);
|
865
|
+
|
866
|
+
/* run using socket action loop with ensure-cleanup */
|
867
|
+
struct socket_drive_args body_args = { self, rbcm, &ctx, block };
|
868
|
+
struct socket_cleanup_args ensure_args = { rbcm, &ctx };
|
869
|
+
rb_ensure(ruby_curl_multi_socket_drive_body, (VALUE)&body_args, ruby_curl_multi_socket_drive_ensure, (VALUE)&ensure_args);
|
870
|
+
|
871
|
+
/* finalize */
|
872
|
+
rb_curl_multi_read_info(self, rbcm->handle);
|
873
|
+
if (block != Qnil) rb_funcall(block, rb_intern("call"), 1, self);
|
874
|
+
if (cCurlMutiAutoClose == 1) rb_funcall(self, rb_intern("close"), 0);
|
875
|
+
|
876
|
+
return Qtrue;
|
877
|
+
}
|
878
|
+
#endif /* socket-action implementation */
|
879
|
+
|
469
880
|
#ifdef _WIN32
|
470
881
|
void create_crt_fd(fd_set *os_set, fd_set *crt_set)
|
471
882
|
{
|
@@ -588,23 +999,38 @@ VALUE ruby_curl_multi_perform(int argc, VALUE *argv, VALUE self) {
|
|
588
999
|
/* or buggy versions libcurl sometimes reports huge timeouts... let's cap it */
|
589
1000
|
}
|
590
1001
|
|
591
|
-
#
|
1002
|
+
#if defined(HAVE_CURL_MULTI_WAIT) && !defined(HAVE_RB_THREAD_FD_SELECT)
|
592
1003
|
{
|
593
1004
|
struct wait_args wait_args;
|
594
1005
|
wait_args.handle = rbcm->handle;
|
595
1006
|
wait_args.timeout_ms = timeout_milliseconds;
|
596
1007
|
wait_args.numfds = 0;
|
1008
|
+
/*
|
1009
|
+
* When a Fiber scheduler is available (Ruby >= 3.x), rb_thread_fd_select
|
1010
|
+
* integrates with it. If we have rb_thread_fd_select available at build
|
1011
|
+
* time, we avoid curl_multi_wait entirely (see preprocessor guard above)
|
1012
|
+
* and use the fdset branch below. Otherwise, we use curl_multi_wait and
|
1013
|
+
* release the GVL so Ruby threads can continue to run.
|
1014
|
+
*/
|
1015
|
+
CURLMcode wait_rc;
|
597
1016
|
#if defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL)
|
598
|
-
|
599
|
-
|
1017
|
+
wait_rc = (CURLMcode)(intptr_t)rb_thread_call_without_gvl(
|
1018
|
+
curl_multi_wait_wrapper, &wait_args, RUBY_UBF_IO, NULL
|
1019
|
+
);
|
600
1020
|
#else
|
601
|
-
|
1021
|
+
wait_rc = curl_multi_wait(rbcm->handle, NULL, 0, timeout_milliseconds, &wait_args.numfds);
|
602
1022
|
#endif
|
603
1023
|
if (wait_rc != CURLM_OK) {
|
604
1024
|
raise_curl_multi_error_exception(wait_rc);
|
605
1025
|
}
|
606
1026
|
if (wait_args.numfds == 0) {
|
1027
|
+
#ifdef HAVE_RB_THREAD_FD_SELECT
|
1028
|
+
struct timeval tv_sleep = tv_100ms;
|
1029
|
+
/* Sleep in a scheduler-aware way. */
|
1030
|
+
rb_thread_fd_select(0, NULL, NULL, NULL, &tv_sleep);
|
1031
|
+
#else
|
607
1032
|
rb_thread_wait_for(tv_100ms);
|
1033
|
+
#endif
|
608
1034
|
}
|
609
1035
|
/* Process pending transfers after waiting */
|
610
1036
|
rb_curl_multi_run(self, rbcm->handle, &(rbcm->running));
|
@@ -628,7 +1054,12 @@ VALUE ruby_curl_multi_perform(int argc, VALUE *argv, VALUE self) {
|
|
628
1054
|
|
629
1055
|
if (maxfd == -1) {
|
630
1056
|
/* libcurl recommends sleeping for 100ms */
|
1057
|
+
#if HAVE_RB_THREAD_FD_SELECT
|
1058
|
+
struct timeval tv_sleep = tv_100ms;
|
1059
|
+
rb_thread_fd_select(0, NULL, NULL, NULL, &tv_sleep);
|
1060
|
+
#else
|
631
1061
|
rb_thread_wait_for(tv_100ms);
|
1062
|
+
#endif
|
632
1063
|
rb_curl_multi_run( self, rbcm->handle, &(rbcm->running) );
|
633
1064
|
rb_curl_multi_read_info( self, rbcm->handle );
|
634
1065
|
if (block != Qnil) { rb_funcall(block, rb_intern("call"), 1, self); }
|
@@ -650,12 +1081,37 @@ VALUE ruby_curl_multi_perform(int argc, VALUE *argv, VALUE self) {
|
|
650
1081
|
fdset_args.tv = &tv;
|
651
1082
|
#endif
|
652
1083
|
|
653
|
-
#
|
1084
|
+
#if HAVE_RB_THREAD_FD_SELECT
|
1085
|
+
/* Prefer scheduler-aware waiting when available. Build rb_fdset_t sets. */
|
1086
|
+
{
|
1087
|
+
rb_fdset_t rfds, wfds, efds;
|
1088
|
+
rb_fd_init(&rfds);
|
1089
|
+
rb_fd_init(&wfds);
|
1090
|
+
rb_fd_init(&efds);
|
1091
|
+
#ifdef _WIN32
|
1092
|
+
/* On Windows, iterate explicit fd arrays for CRT fds. */
|
1093
|
+
int i;
|
1094
|
+
for (i = 0; i < crt_fdread.fd_count; i++) rb_fd_set(crt_fdread.fd_array[i], &rfds);
|
1095
|
+
for (i = 0; i < crt_fdwrite.fd_count; i++) rb_fd_set(crt_fdwrite.fd_array[i], &wfds);
|
1096
|
+
for (i = 0; i < crt_fdexcep.fd_count; i++) rb_fd_set(crt_fdexcep.fd_array[i], &efds);
|
1097
|
+
rc = rb_thread_fd_select(0, &rfds, &wfds, &efds, &tv);
|
1098
|
+
#else
|
1099
|
+
int fd;
|
1100
|
+
for (fd = 0; fd <= maxfd; fd++) {
|
1101
|
+
if (FD_ISSET(fd, &fdread)) rb_fd_set(fd, &rfds);
|
1102
|
+
if (FD_ISSET(fd, &fdwrite)) rb_fd_set(fd, &wfds);
|
1103
|
+
if (FD_ISSET(fd, &fdexcep)) rb_fd_set(fd, &efds);
|
1104
|
+
}
|
1105
|
+
rc = rb_thread_fd_select(maxfd+1, &rfds, &wfds, &efds, &tv);
|
1106
|
+
#endif
|
1107
|
+
rb_fd_term(&rfds);
|
1108
|
+
rb_fd_term(&wfds);
|
1109
|
+
rb_fd_term(&efds);
|
1110
|
+
}
|
1111
|
+
#elif defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL)
|
654
1112
|
rc = (int)(VALUE) rb_thread_call_without_gvl((void *(*)(void *))curb_select, &fdset_args, RUBY_UBF_IO, 0);
|
655
1113
|
#elif HAVE_RB_THREAD_BLOCKING_REGION
|
656
1114
|
rc = rb_thread_blocking_region(curb_select, &fdset_args, RUBY_UBF_IO, 0);
|
657
|
-
#elif HAVE_RB_THREAD_FD_SELECT
|
658
|
-
rc = rb_thread_fd_select(maxfd+1, &fdread, &fdwrite, &fdexcep, &tv);
|
659
1115
|
#else
|
660
1116
|
rc = rb_thread_select(maxfd+1, &fdread, &fdwrite, &fdexcep, &tv);
|
661
1117
|
#endif
|
@@ -679,7 +1135,7 @@ VALUE ruby_curl_multi_perform(int argc, VALUE *argv, VALUE self) {
|
|
679
1135
|
if (block != Qnil) { rb_funcall(block, rb_intern("call"), 1, self); }
|
680
1136
|
break;
|
681
1137
|
}
|
682
|
-
#endif /*
|
1138
|
+
#endif /* disabled curl_multi_wait: use fdsets */
|
683
1139
|
}
|
684
1140
|
|
685
1141
|
} while( rbcm->running );
|
@@ -727,6 +1183,12 @@ void init_curb_multi() {
|
|
727
1183
|
rb_define_method(cCurlMulti, "pipeline=", ruby_curl_multi_pipeline, 1);
|
728
1184
|
rb_define_method(cCurlMulti, "_add", ruby_curl_multi_add, 1);
|
729
1185
|
rb_define_method(cCurlMulti, "_remove", ruby_curl_multi_remove, 1);
|
1186
|
+
/* Prefer a socket-action based perform when supported and scheduler-aware. */
|
1187
|
+
#if defined(HAVE_CURL_MULTI_SOCKET_ACTION) && defined(HAVE_CURLMOPT_SOCKETFUNCTION) && defined(HAVE_RB_THREAD_FD_SELECT) && !defined(_WIN32)
|
1188
|
+
extern VALUE ruby_curl_multi_socket_perform(int argc, VALUE *argv, VALUE self);
|
1189
|
+
rb_define_method(cCurlMulti, "perform", ruby_curl_multi_socket_perform, -1);
|
1190
|
+
#else
|
730
1191
|
rb_define_method(cCurlMulti, "perform", ruby_curl_multi_perform, -1);
|
1192
|
+
#endif
|
731
1193
|
rb_define_method(cCurlMulti, "_close", ruby_curl_multi_close, 0);
|
732
1194
|
}
|
data/ext/curb_postfield.c
CHANGED
@@ -209,6 +209,9 @@ void curl_postfield_free(ruby_curl_postfield *rbcpf) {
|
|
209
209
|
*/
|
210
210
|
static VALUE ruby_curl_postfield_new_content(int argc, VALUE *argv, VALUE klass) {
|
211
211
|
ruby_curl_postfield *rbcpf = ALLOC(ruby_curl_postfield);
|
212
|
+
if (!rbcpf) {
|
213
|
+
rb_raise(rb_eNoMemError, "Failed to allocate memory for Curl::PostField");
|
214
|
+
}
|
212
215
|
|
213
216
|
// wierdness - we actually require two args, unless a block is provided, but
|
214
217
|
// we have to work that out below.
|
@@ -255,6 +258,9 @@ static VALUE ruby_curl_postfield_new_content(int argc, VALUE *argv, VALUE klass)
|
|
255
258
|
static VALUE ruby_curl_postfield_new_file(int argc, VALUE *argv, VALUE klass) {
|
256
259
|
// TODO needs to handle content-type too
|
257
260
|
ruby_curl_postfield *rbcpf = ALLOC(ruby_curl_postfield);
|
261
|
+
if (!rbcpf) {
|
262
|
+
rb_raise(rb_eNoMemError, "Failed to allocate memory for Curl::PostField");
|
263
|
+
}
|
258
264
|
|
259
265
|
rb_scan_args(argc, argv, "21&", &rbcpf->name, &rbcpf->local_file, &rbcpf->remote_file, &rbcpf->content_proc);
|
260
266
|
|
data/ext/curb_upload.c
CHANGED
@@ -24,6 +24,9 @@ static void curl_upload_free(ruby_curl_upload *rbcu) {
|
|
24
24
|
VALUE ruby_curl_upload_new(VALUE klass) {
|
25
25
|
VALUE upload;
|
26
26
|
ruby_curl_upload *rbcu = ALLOC(ruby_curl_upload);
|
27
|
+
if (!rbcu) {
|
28
|
+
rb_raise(rb_eNoMemError, "Failed to allocate memory for Curl::Upload");
|
29
|
+
}
|
27
30
|
rbcu->stream = Qnil;
|
28
31
|
rbcu->offset = 0;
|
29
32
|
upload = Data_Wrap_Struct(klass, curl_upload_mark, curl_upload_free, rbcu);
|