iodine 0.2.9 → 0.2.10

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of iodine might be problematic. Click here for more details.

@@ -240,6 +240,8 @@ static int for_each_header_data(VALUE key, VALUE val, VALUE _res) {
240
240
 
241
241
  // writes the body to the response object
242
242
  static VALUE for_each_body_string(VALUE str, VALUE _res, int argc, VALUE argv) {
243
+ (void)(argv);
244
+ (void)(argc);
243
245
  // fprintf(stderr, "For_each - body\n");
244
246
  // write body
245
247
  if (TYPE(str) != T_STRING) {
@@ -264,6 +266,7 @@ static VALUE for_each_body_string(VALUE str, VALUE _res, int argc, VALUE argv) {
264
266
 
265
267
  static inline int ruby2c_response_send(http_response_s *response,
266
268
  VALUE rbresponse, VALUE env) {
269
+ (void)(env);
267
270
  VALUE body = rb_ary_entry(rbresponse, 2);
268
271
  if (response->status < 200 || response->status == 204 ||
269
272
  response->status == 304) {
@@ -551,7 +554,7 @@ int iodine_http_review(void) {
551
554
  VALUE iodine_version = rb_const_get(Iodine, rb_intern("VERSION"));
552
555
  VALUE ruby_version = rb_const_get(Iodine, rb_intern("RUBY_VERSION"));
553
556
  if (public_folder)
554
- fprintf(stderr, "Starting up Iodine Http Server:\n"
557
+ fprintf(stderr, "Starting up Iodine HTTP Server:\n"
555
558
  " * Ruby v.%s\n * Iodine v.%s \n"
556
559
  " * %lu max concurrent connections / open files\n"
557
560
  " * Serving static files from:\n"
@@ -559,7 +562,7 @@ int iodine_http_review(void) {
559
562
  StringValueCStr(ruby_version), StringValueCStr(iodine_version),
560
563
  (size_t)sock_max_capacity(), public_folder);
561
564
  else
562
- fprintf(stderr, "Starting up Iodine Http Server:\n"
565
+ fprintf(stderr, "Starting up Iodine HTTP Server:\n"
563
566
  " * Ruby v.%s\n * Iodine v.%s \n"
564
567
  " * %lu max concurrent connections / open files\n"
565
568
  "\n",
@@ -1,12 +1,12 @@
1
1
  /*
2
- copyright: Boaz segev, 2016-2017
2
+ copyright: Boaz Segev, 2016-2017
3
3
  license: MIT
4
4
 
5
5
  Feel free to copy, use and enjoy according to the license provided.
6
6
  */
7
- // clang-format off
8
- #include "rb-libasync.h"
9
- // clang-format on
7
+
8
+ #include "rb-libasync.h" // enable this line for Iodine's Ruby
9
+
10
10
  #ifndef _GNU_SOURCE
11
11
  #define _GNU_SOURCE
12
12
  #endif
@@ -30,7 +30,7 @@ Performance options.
30
30
  */
31
31
 
32
32
  #ifndef ASYNC_TASK_POOL_SIZE
33
- #define ASYNC_TASK_POOL_SIZE 170
33
+ #define ASYNC_TASK_POOL_SIZE 1024
34
34
  #endif
35
35
 
36
36
  /* Spinlock vs. Mutex data protection. */
@@ -40,7 +40,7 @@ Performance options.
40
40
 
41
41
  /* use pipe for wakeup if == 0 else, use nanosleep when no tasks. */
42
42
  #ifndef ASYNC_NANO_SLEEP
43
- #define ASYNC_NANO_SLEEP 16777216 // 8388608 // 1048576 // 524288
43
+ #define ASYNC_NANO_SLEEP 8388608 // 1048576 // 524288 // 16777216
44
44
  #endif
45
45
 
46
46
  /* Sentinal thread to respawn crashed threads - limited crash resistance. */
@@ -253,7 +253,9 @@ static inline void pause_thread() {
253
253
  read(async->io.in, &tmp, 1);
254
254
  }
255
255
  #else
256
- struct timespec act, tm = {.tv_sec = 0, .tv_nsec = ASYNC_NANO_SLEEP};
256
+ struct timespec act,
257
+ tm = {.tv_sec = 0,
258
+ .tv_nsec = ASYNC_NANO_SLEEP * (async ? async->thread_count : 1)};
257
259
  nanosleep(&tm, &act);
258
260
  // sched_yield();
259
261
  #endif
@@ -298,7 +300,8 @@ static void on_err_signal(int sig) {
298
300
  }
299
301
 
300
302
  // The worker cycle
301
- static void *worker_thread_cycle(void *_) {
303
+ static void *worker_thread_cycle(void *unused) {
304
+ (void)(unused);
302
305
  // register error signals when using a sentinal
303
306
  if (ASYNC_USE_SENTINEL) {
304
307
  signal(SIGSEGV, on_err_signal);
@@ -385,6 +388,11 @@ Use:
385
388
  */
386
389
  void async_perform() { perform_tasks(); }
387
390
 
391
+ /**
392
+ Returns TRUE (not 0) if there are any pending tasks.
393
+ */
394
+ int async_any(void) { return (async && async->tasks); }
395
+
388
396
  /**
389
397
  Schedules a task to be performed by the thread pool.
390
398
 
@@ -462,19 +470,22 @@ Test
462
470
  static spn_lock_i i_lock = SPN_LOCK_INIT;
463
471
  static size_t i_count = 0;
464
472
 
465
- static void sample_task(void *_) {
473
+ static void sample_task(void *unused) {
474
+ (void)(unused);
466
475
  spn_lock(&i_lock);
467
476
  i_count++;
468
477
  spn_unlock(&i_lock);
469
478
  }
470
479
 
471
- static void sched_sample_task(void *_) {
480
+ static void sched_sample_task(void *unused) {
481
+ (void)(unused);
472
482
  for (size_t i = 0; i < 1024; i++) {
473
483
  async_run(sample_task, async);
474
484
  }
475
485
  }
476
486
 
477
- static void text_task_text(void *_) {
487
+ static void text_task_text(void *unused) {
488
+ (void)(unused);
478
489
  spn_lock(&i_lock);
479
490
  fprintf(stderr, "this text should print before async_finish returns\n");
480
491
  spn_unlock(&i_lock);
@@ -1,5 +1,5 @@
1
1
  /*
2
- Copyright: Boaz segev, 2016-2017
2
+ Copyright: Boaz Segev, 2016-2017
3
3
  License: MIT
4
4
 
5
5
  Feel free to copy, use and enjoy according to the license provided.
@@ -57,6 +57,11 @@ Use:
57
57
  */
58
58
  void async_perform();
59
59
 
60
+ /**
61
+ Returns TRUE (not 0) if there are any pending tasks.
62
+ */
63
+ int async_any(void);
64
+
60
65
  /**
61
66
  Waits for all the present tasks to complete and threads to exist.
62
67
 
@@ -1,5 +1,5 @@
1
1
  /*
2
- Copyright: Boaz segev, 2016-2017
2
+ Copyright: Boaz Segev, 2016-2017
3
3
  License: MIT
4
4
 
5
5
  Feel free to copy, use and enjoy according to the license provided.
@@ -34,7 +34,7 @@ Callbacks
34
34
  */
35
35
 
36
36
  #pragma weak reactor_on_close
37
- void reactor_on_close(intptr_t uuid) {}
37
+ void reactor_on_close(intptr_t uuid) { (void)(uuid); }
38
38
 
39
39
  #pragma weak reactor_on_data
40
40
  void reactor_on_data(intptr_t uuid) {
@@ -44,14 +44,17 @@ void reactor_on_data(intptr_t uuid) {
44
44
  }
45
45
 
46
46
  #pragma weak reactor_on_ready
47
- void reactor_on_ready(intptr_t uuid) {}
47
+ void reactor_on_ready(intptr_t uuid) { (void)(uuid); }
48
48
 
49
49
  /* *****************************************************************************
50
50
  Integrate the `libsock` library if exists.
51
51
  */
52
52
 
53
53
  #pragma weak sock_flush
54
- ssize_t sock_flush(intptr_t uuid) { return 0; }
54
+ ssize_t sock_flush(intptr_t uuid) {
55
+ (void)(uuid);
56
+ return 0;
57
+ }
55
58
 
56
59
  #pragma weak sock_close
57
60
  void sock_close(intptr_t uuid) {
@@ -118,7 +121,7 @@ thing.
118
121
  This method promises that the timer will be repeated when running on epoll. This
119
122
  method is redundent on kqueue.
120
123
  */
121
- void reactor_reset_timer(intptr_t uuid) {} /* EPoll only */
124
+ void reactor_reset_timer(intptr_t uuid) { (void)(uuid); } /* EPoll only */
122
125
 
123
126
  /**
124
127
  Creates a timer file descriptor, system dependent.
@@ -1,5 +1,5 @@
1
1
  /*
2
- Copyright: Boaz segev, 2016-2017
2
+ Copyright: Boaz Segev, 2016-2017
3
3
  License: MIT
4
4
 
5
5
  Feel free to copy, use and enjoy according to the license provided.
@@ -1,5 +1,5 @@
1
1
  /*
2
- Copyright: Boaz segev, 2016-2017
2
+ Copyright: Boaz Segev, 2016-2017
3
3
  License: MIT
4
4
 
5
5
  Feel free to copy, use and enjoy according to the license provided.
@@ -101,8 +101,8 @@ static void server_cleanup(void) {
101
101
  server_on_shutdown();
102
102
  // free any lock objects (no need to change code if changing locking systems)
103
103
  for (size_t i = 0; i < server_data.capacity - 1; i++) {
104
- server_data.fds[i] = (fd_data_s){0};
105
104
  lock_fd_destroy(server_data.fds + i);
105
+ server_data.fds[i] = (fd_data_s){.protocol = NULL};
106
106
  }
107
107
  // free memory
108
108
  if (server_data.fds) {
@@ -121,7 +121,7 @@ static void init_server(void) {
121
121
  PROT_READ | PROT_WRITE | PROT_EXEC,
122
122
  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
123
123
  for (size_t i = 0; i < server_data.capacity - 1; i++) {
124
- server_data.fds[i] = (fd_data_s){0};
124
+ server_data.fds[i] = (fd_data_s){.protocol = NULL};
125
125
  lock_fd_init(server_data.fds + i);
126
126
  }
127
127
  }
@@ -219,6 +219,7 @@ http://www.microhowto.info/howto/reap_zombie_processes_using_a_sigchld_handler.h
219
219
  */
220
220
 
221
221
  void reap_child_handler(int sig) {
222
+ (void)(sig);
222
223
  int old_errno = errno;
223
224
  while (waitpid(-1, NULL, WNOHANG) > 0)
224
225
  ;
@@ -278,11 +279,11 @@ static void listener_on_data(intptr_t uuid, protocol_s *_listener) {
278
279
  intptr_t new_client;
279
280
  struct ListenerProtocol *listener = (void *)_listener;
280
281
  while ((new_client = sock_accept(uuid)) != -1) {
281
- // make sure it's a clean slate... although it should be assumed to be.
282
- lock_uuid(new_client);
283
- clear_uuid(new_client);
284
- unlock_uuid(new_client);
285
- // assume that sock_accept calls reactor_on_close if needed
282
+ // assume that sock_accept calls if needed
283
+ // it's a clean slate in reactor_on_close ...
284
+ // lock_uuid(new_client);
285
+ // clear_uuid(new_client);
286
+ // unlock_uuid(new_client);
286
287
  protocol_uuid(new_client) = listener->on_open(new_client, listener->udata);
287
288
  if (protocol_uuid(new_client)) {
288
289
  uuid_data(new_client).active = server_data.last_tick;
@@ -442,8 +443,17 @@ static inline void timeout_review(void) {
442
443
  }
443
444
  }
444
445
 
445
- static void server_cycle(void *_) {
446
+ static void server_cycle(void *unused) {
447
+ (void)(unused);
446
448
  static int8_t perform_idle = 1;
449
+
450
+ #if SERVER_DELAY_IO
451
+ if (async_any()) {
452
+ async_run(server_cycle, NULL);
453
+ return;
454
+ }
455
+ #endif
456
+
447
457
  time(&server_data.last_tick);
448
458
  if (server_data.running) {
449
459
  timeout_review();
@@ -458,9 +468,6 @@ static void server_cycle(void *_) {
458
468
  } else {
459
469
  perform_idle = 1;
460
470
  }
461
- #if SERVER_DELAY_IO
462
- async_perform();
463
- #endif
464
471
  async_run(server_cycle, NULL);
465
472
  }
466
473
  }
@@ -531,7 +538,7 @@ ssize_t server_run(struct ServerSettings settings) {
531
538
  pid_t *children = NULL;
532
539
  if (settings.processes > 1) {
533
540
  children = malloc(sizeof(*children) * settings.processes);
534
- for (size_t i = 0; i < settings.processes - 1; i++) {
541
+ for (size_t i = 0; i < (size_t)(settings.processes - 1); i++) {
535
542
  if (fork() == 0)
536
543
  break;
537
544
  }
@@ -647,6 +654,17 @@ void server_set_timeout(intptr_t fd, uint8_t timeout) {
647
654
  uuid_data(fd).timeout = timeout;
648
655
  unlock_uuid(fd);
649
656
  }
657
+ /**
658
+ Gets a connection's timeout, type of uint8_t.
659
+
660
+ A value of 0 might mean that no timeout was set OR that the connection inquired
661
+ about was invalid.
662
+ */
663
+ uint8_t server_get_timeout(intptr_t fd) {
664
+ if (valid_uuid(fd) == 0)
665
+ return 0;
666
+ return uuid_data(fd).timeout;
667
+ }
650
668
 
651
669
  /** Attaches an existing connection (fd) to the server's reactor and protocol
652
670
  management system, so that the server can be used also to manage connection
@@ -770,7 +788,7 @@ static void perform_single_task(void *task) {
770
788
  static void perform_each_task(void *task) {
771
789
  intptr_t uuid;
772
790
  protocol_s *protocol;
773
- while (p2task(task).target < server_data.capacity) {
791
+ while (p2task(task).target < (intptr_t)server_data.capacity) {
774
792
  uuid = sock_fd2uuid(p2task(task).target);
775
793
  if (uuid == -1 || uuid == p2task(task).origin) {
776
794
  ++p2task(task).target;
@@ -1,5 +1,5 @@
1
1
  /*
2
- Copyright: Boaz segev, 2016-2017
2
+ Copyright: Boaz Segev, 2016-2017
3
3
  License: MIT
4
4
 
5
5
  Feel free to copy, use and enjoy according to the license provided.
@@ -45,10 +45,9 @@ messages regarding the server state (start / finish / listen messages).
45
45
  #warning Lib-Server dependency versions are not in sync. Please review API versions.
46
46
  #endif
47
47
 
48
- #ifndef __unused
49
- #define __unused __attribute__((unused))
48
+ #ifndef UNUSED_FUNC
49
+ #define UNUSED_FUNC __attribute__((unused))
50
50
  #endif
51
-
52
51
  /** \file
53
52
  ## LibServer - a dynamic protocol network services library
54
53
 
@@ -366,6 +365,13 @@ Sets a connection's timeout.
366
365
  Returns -1 on error (i.e. connection closed), otherwise returns 0.
367
366
  */
368
367
  void server_set_timeout(intptr_t uuid, uint8_t timeout);
368
+ /**
369
+ Returns a connection's timeout, as a `uint8_t` value.
370
+
371
+ A value of 0 might mean that no timeout was set OR that the connection inquired
372
+ about was invalid.
373
+ */
374
+ uint8_t server_get_timeout(intptr_t uuid);
369
375
 
370
376
  /** Attaches an existing connection (fd) to the server's reactor and protocol
371
377
  management system, so that the server can be used also to manage connection
@@ -467,8 +473,8 @@ int server_run_every(size_t milliseconds, size_t repetitions,
467
473
  /** Creates a system timer (at the cost of 1 file descriptor) and pushes the
468
474
  timer to the reactor. The task will NOT repeat. Returns -1 on error or the
469
475
  new file descriptor on succeess. */
470
- __unused static inline int server_run_after(size_t milliseconds,
471
- void task(void *), void *arg) {
476
+ UNUSED_FUNC static inline int server_run_after(size_t milliseconds,
477
+ void task(void *), void *arg) {
472
478
  return server_run_every(milliseconds, 1, task, arg, NULL);
473
479
  }
474
480
 
@@ -1,5 +1,5 @@
1
1
  /*
2
- Copyright: Boaz segev, 2016-2017
2
+ Copyright: Boaz Segev, 2016-2017
3
3
  License: MIT
4
4
 
5
5
  Feel free to copy, use and enjoy according to the license provided.
@@ -35,21 +35,28 @@ Support `libreact` on_close callback, if exist.
35
35
  */
36
36
 
37
37
  #pragma weak reactor_on_close
38
- void reactor_on_close(intptr_t uuid) {}
38
+ void reactor_on_close(intptr_t uuid) { (void)(uuid); }
39
39
  #pragma weak reactor_remove
40
- int reactor_remove(intptr_t uuid) { return -1; }
40
+ int reactor_remove(intptr_t uuid) {
41
+ (void)(uuid);
42
+ return -1;
43
+ }
41
44
 
42
45
  /* *****************************************************************************
43
46
  Support timeout setting.
44
47
  */
45
48
  #pragma weak sock_touch
46
- void sock_touch(intptr_t uuid) {}
49
+ void sock_touch(intptr_t uuid) { (void)(uuid); }
47
50
 
48
51
  /* *****************************************************************************
49
52
  Support event based `write` scheduling.
50
53
  */
51
54
  #pragma weak async_run
52
- int async_run(void (*task)(void *), void *arg) { return -1; }
55
+ int async_run(void (*task)(void *), void *arg) {
56
+ (void)(task);
57
+ (void)(arg);
58
+ return -1;
59
+ }
53
60
 
54
61
  /* *****************************************************************************
55
62
  OS Sendfile settings.
@@ -131,7 +138,7 @@ ssize_t sock_max_capacity(void) {
131
138
  flim = OPEN_MAX;
132
139
  #endif
133
140
  // try to maximize limits - collect max and set to max
134
- struct rlimit rlim = {0};
141
+ struct rlimit rlim = {.rlim_max = 0};
135
142
  getrlimit(RLIMIT_NOFILE, &rlim);
136
143
  // printf("Meximum open files are %llu out of %llu\n", rlim.rlim_cur,
137
144
  // rlim.rlim_max);
@@ -146,7 +153,7 @@ ssize_t sock_max_capacity(void) {
146
153
  // printf("Meximum open files are %llu out of %llu\n", rlim.rlim_cur,
147
154
  // rlim.rlim_max);
148
155
  // if the current limit is higher than it was, update
149
- if (flim < rlim.rlim_cur)
156
+ if (flim < ((ssize_t)rlim.rlim_cur))
150
157
  flim = rlim.rlim_cur;
151
158
  // return what we have
152
159
  return flim;
@@ -344,7 +351,7 @@ static inline int sock_flush_fd_failed(int fd) {
344
351
 
345
352
  #if defined(__linux__) /* linux sendfile API */
346
353
  static inline int sock_flush_os_sendfile(int fd) {
347
- size_t sent;
354
+ ssize_t sent;
348
355
  sock_packet_s *packet = fd_info[fd].packet;
349
356
  sent =
350
357
  sendfile64(fd, (int)((ssize_t)packet->buffer), &packet->metadata.offset,
@@ -765,7 +772,7 @@ Returns TRUE (non 0) if there is data waiting to be written to the socket in
765
772
  the
766
773
  user-land buffer.
767
774
  */
768
- _Bool sock_packets_pending(intptr_t uuid) {
775
+ int sock_packets_pending(intptr_t uuid) {
769
776
  return fd_info && uuid2info(uuid).packet != NULL;
770
777
  }
771
778
 
@@ -1,5 +1,5 @@
1
1
  /*
2
- Copyright: Boaz segev, 2016-2017
2
+ Copyright: Boaz Segev, 2016-2017
3
3
  License: MIT
4
4
 
5
5
  Feel free to copy, use and enjoy according to the license provided.
@@ -25,8 +25,8 @@ The library is designed to be thread safe, but not fork safe.
25
25
  #include <sys/types.h>
26
26
  #include <unistd.h>
27
27
 
28
- #ifndef __unused
29
- #define __unused __attribute__((unused))
28
+ #ifndef UNUSED_FUNC
29
+ #define UNUSED_FUNC __attribute__((unused))
30
30
  #endif
31
31
 
32
32
  /* *****************************************************************************
@@ -36,7 +36,7 @@ This information is also useful when implementing read / write hooks.
36
36
  */
37
37
  #ifndef BUFFER_PACKET_SIZE
38
38
  #define BUFFER_PACKET_SIZE \
39
- (1024 * 16) /* Use 32 Kb. With sendfile, 16 Kb might be better. */
39
+ (1024 * 16) /* Use 32 Kb. With sendfile, 16 Kb appears to work better. */
40
40
  #endif
41
41
  #ifndef BUFFER_FILE_READ_SIZE
42
42
  #define BUFFER_FILE_READ_SIZE BUFFER_PACKET_SIZE
@@ -297,8 +297,8 @@ the maximum amount of data to be sent.
297
297
 
298
298
  Returns -1 and closes the file on error. Returns 0 on success.
299
299
  */
300
- __unused static inline ssize_t sock_sendfile(intptr_t uuid, int source_fd,
301
- off_t offset, size_t length) {
300
+ UNUSED_FUNC static inline ssize_t sock_sendfile(intptr_t uuid, int source_fd,
301
+ off_t offset, size_t length) {
302
302
  return sock_write2(.fduuid = uuid, .buffer = (void *)((intptr_t)source_fd),
303
303
  .length = length, .is_fd = 1, .offset = offset);
304
304
  }
@@ -415,7 +415,7 @@ ssize_t sock_send_packet(intptr_t uuid, sock_packet_s *packet);
415
415
  Returns TRUE (non 0) if there is data waiting to be written to the socket in the
416
416
  user-land buffer.
417
417
  */
418
- _Bool sock_packets_pending(intptr_t uuid);
418
+ int sock_packets_pending(intptr_t uuid);
419
419
 
420
420
  /**
421
421
  Use `sock_free_packet` to free unused packets that were checked-out using