iodine 0.7.11 → 0.7.12
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of iodine might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/ext/iodine/fio.c +341 -154
- data/ext/iodine/fio.h +71 -33
- data/ext/iodine/fio_cli.c +25 -24
- data/ext/iodine/fiobj.h +4 -0
- data/ext/iodine/{fiobj4sock.h → fiobj4fio.h} +0 -0
- data/ext/iodine/fiobj_mustache.c +77 -54
- data/ext/iodine/fiobj_mustache.h +21 -3
- data/ext/iodine/http.c +19 -0
- data/ext/iodine/http.h +5 -1
- data/ext/iodine/http_internal.h +0 -2
- data/ext/iodine/iodine_connection.c +1 -1
- data/ext/iodine/iodine_http.c +17 -9
- data/ext/iodine/iodine_mustache.c +174 -91
- data/ext/iodine/mustache_parser.h +772 -695
- data/ext/iodine/redis_engine.c +0 -1
- data/lib/iodine/mustache.rb +4 -4
- data/lib/iodine/version.rb +1 -1
- metadata +4 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 754d32e0aca1e86482fbe961e45127e068c9890d3cff09c67e70349872bfa87e
|
4
|
+
data.tar.gz: e7d9c1abacfec11b27eeb8879585f12c010ddb52c64938cffe530d60f325dd69
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f6141afd0ec6d66f0de2cfb0d85d997be21287b0c307c1f370374f5ed2758c77aa206a4e9a09abd7a7ee701423177655e5c67ef3dec69de3a189b3597177c258
|
7
|
+
data.tar.gz: 6bc527391894adeb5431d2fa57e6c8c073036775ad4cd4f4571765f9baa3e01cdb4eb00b39c230d7296d8df707bdffc0f926ef3a8bf66c3d0c21eed28132ea5c
|
data/CHANGELOG.md
CHANGED
@@ -6,6 +6,10 @@ Please notice that this change log contains changes for upcoming releases as wel
|
|
6
6
|
|
7
7
|
## Changes:
|
8
8
|
|
9
|
+
#### Change log v.0.7.12
|
10
|
+
|
11
|
+
**Fix**: (`mustache`) fixed multiple issues with `Iodine::Mustache` and added lambda support for mustache templates.
|
12
|
+
|
9
13
|
#### Change log v.0.7.11
|
10
14
|
|
11
15
|
**Fix**: (`fio`) Deletes Unix sockets once done listening. Fixes an issue where the files would remain intact.
|
data/ext/iodine/fio.c
CHANGED
@@ -21,6 +21,7 @@ Feel free to copy, use and enjoy according to the license provided.
|
|
21
21
|
#include <netinet/in.h>
|
22
22
|
#include <netinet/tcp.h>
|
23
23
|
|
24
|
+
#include <poll.h>
|
24
25
|
#include <sys/ioctl.h>
|
25
26
|
#include <sys/resource.h>
|
26
27
|
#include <sys/socket.h>
|
@@ -52,8 +53,6 @@ Feel free to copy, use and enjoy according to the license provided.
|
|
52
53
|
#elif FIO_ENGINE_KQUEUE
|
53
54
|
|
54
55
|
#include <sys/event.h>
|
55
|
-
#elif FIO_ENGINE_POLL
|
56
|
-
#include <poll.h>
|
57
56
|
#endif
|
58
57
|
|
59
58
|
/* for kqueue and epoll only */
|
@@ -327,6 +326,11 @@ static inline int fio_clear_fd(intptr_t fd, uint8_t is_open) {
|
|
327
326
|
return 0;
|
328
327
|
}
|
329
328
|
|
329
|
+
static inline void fio_force_close_in_poll(intptr_t uuid) {
|
330
|
+
uuid_data(uuid).close = 2;
|
331
|
+
fio_force_close(uuid);
|
332
|
+
}
|
333
|
+
|
330
334
|
/* *****************************************************************************
|
331
335
|
Protocol Locking and UUID validation
|
332
336
|
***************************************************************************** */
|
@@ -495,12 +499,12 @@ Section Start Marker
|
|
495
499
|
|
496
500
|
|
497
501
|
|
502
|
+
Default Thread / Fork handler
|
498
503
|
|
504
|
+
And Concurrency Helpers
|
499
505
|
|
500
506
|
|
501
|
-
Task Management
|
502
507
|
|
503
|
-
Task / Event schduling and execution
|
504
508
|
|
505
509
|
|
506
510
|
|
@@ -510,32 +514,253 @@ Section Start Marker
|
|
510
514
|
|
511
515
|
|
512
516
|
|
517
|
+
***************************************************************************** */
|
513
518
|
|
519
|
+
/**
|
520
|
+
OVERRIDE THIS to replace the default `fork` implementation.
|
514
521
|
|
522
|
+
Behaves like the system's `fork`.
|
523
|
+
*/
|
524
|
+
#pragma weak fio_fork
|
525
|
+
int __attribute__((weak)) fio_fork(void) { return fork(); }
|
515
526
|
|
527
|
+
/**
|
528
|
+
* OVERRIDE THIS to replace the default pthread implementation.
|
529
|
+
*
|
530
|
+
* Accepts a pointer to a function and a single argument that should be executed
|
531
|
+
* within a new thread.
|
532
|
+
*
|
533
|
+
* The function should allocate memory for the thread object and return a
|
534
|
+
* pointer to the allocated memory that identifies the thread.
|
535
|
+
*
|
536
|
+
* On error NULL should be returned.
|
537
|
+
*/
|
538
|
+
#pragma weak fio_thread_new
|
539
|
+
void *__attribute__((weak))
|
540
|
+
fio_thread_new(void *(*thread_func)(void *), void *arg) {
|
541
|
+
pthread_t *thread = malloc(sizeof(*thread));
|
542
|
+
FIO_ASSERT_ALLOC(thread);
|
543
|
+
if (pthread_create(thread, NULL, thread_func, arg))
|
544
|
+
goto error;
|
545
|
+
return thread;
|
546
|
+
error:
|
547
|
+
free(thread);
|
548
|
+
return NULL;
|
549
|
+
}
|
516
550
|
|
551
|
+
/**
|
552
|
+
* OVERRIDE THIS to replace the default pthread implementation.
|
553
|
+
*
|
554
|
+
* Frees the memory associated with a thread identifier (allows the thread to
|
555
|
+
* run it's course, just the identifier is freed).
|
556
|
+
*/
|
557
|
+
#pragma weak fio_thread_free
|
558
|
+
void __attribute__((weak)) fio_thread_free(void *p_thr) {
|
559
|
+
if (*((pthread_t *)p_thr)) {
|
560
|
+
pthread_detach(*((pthread_t *)p_thr));
|
561
|
+
}
|
562
|
+
free(p_thr);
|
563
|
+
}
|
517
564
|
|
565
|
+
/**
|
566
|
+
* OVERRIDE THIS to replace the default pthread implementation.
|
567
|
+
*
|
568
|
+
* Accepts a pointer returned from `fio_thread_new` (should also free any
|
569
|
+
* allocated memory) and joins the associated thread.
|
570
|
+
*
|
571
|
+
* Return value is ignored.
|
572
|
+
*/
|
573
|
+
#pragma weak fio_thread_join
|
574
|
+
int __attribute__((weak)) fio_thread_join(void *p_thr) {
|
575
|
+
if (!p_thr || !(*((pthread_t *)p_thr)))
|
576
|
+
return -1;
|
577
|
+
pthread_join(*((pthread_t *)p_thr), NULL);
|
578
|
+
*((pthread_t *)p_thr) = (pthread_t)NULL;
|
579
|
+
free(p_thr);
|
580
|
+
return 0;
|
581
|
+
}
|
518
582
|
|
583
|
+
/* *****************************************************************************
|
584
|
+
Suspending and renewing thread execution (signaling events)
|
519
585
|
***************************************************************************** */
|
586
|
+
|
520
587
|
#ifndef DEFER_THROTTLE
|
521
588
|
#define DEFER_THROTTLE 2097148UL
|
522
589
|
#endif
|
523
590
|
#ifndef FIO_DEFER_THROTTLE_LIMIT
|
524
|
-
#define FIO_DEFER_THROTTLE_LIMIT
|
591
|
+
#define FIO_DEFER_THROTTLE_LIMIT 134217472UL
|
525
592
|
#endif
|
526
593
|
|
527
594
|
/**
|
528
|
-
* The
|
529
|
-
*
|
595
|
+
* The polling throttling model will use pipes to suspend and resume threads...
|
596
|
+
*
|
597
|
+
* However, it seems the approach is currently broken, at least on macOS.
|
598
|
+
* I don't know why.
|
530
599
|
*
|
531
|
-
*
|
532
|
-
*
|
533
|
-
*
|
600
|
+
* If polling is disabled, the progressive throttling model will be used.
|
601
|
+
*
|
602
|
+
* The progressive throttling makes concurrency and parallelism likely, but uses
|
603
|
+
* progressive nano-sleep throttling system that is less exact.
|
534
604
|
*/
|
535
|
-
#ifndef
|
536
|
-
#define
|
605
|
+
#ifndef FIO_DEFER_THROTTLE_POLL
|
606
|
+
#define FIO_DEFER_THROTTLE_POLL 0
|
537
607
|
#endif
|
538
608
|
|
609
|
+
typedef struct fio_thread_queue_s {
|
610
|
+
fio_ls_embd_s node;
|
611
|
+
int fd_wait; /* used for weaiting (read signal) */
|
612
|
+
int fd_signal; /* used for signalling (write) */
|
613
|
+
} fio_thread_queue_s;
|
614
|
+
|
615
|
+
fio_ls_embd_s fio_thread_queue = FIO_LS_INIT(fio_thread_queue);
|
616
|
+
fio_lock_i fio_thread_lock = FIO_LOCK_INIT;
|
617
|
+
static __thread fio_thread_queue_s fio_thread_data = {.fd_wait = -1,
|
618
|
+
.fd_signal = -1};
|
619
|
+
|
620
|
+
FIO_FUNC inline void fio_thread_make_suspendable(void) {
|
621
|
+
if (fio_thread_data.fd_signal >= 0)
|
622
|
+
return;
|
623
|
+
int fd[2] = {0, 0};
|
624
|
+
int ret = pipe(fd);
|
625
|
+
FIO_ASSERT(ret == 0, "`pipe` failed.");
|
626
|
+
FIO_ASSERT(fio_set_non_block(fd[0]) == 0,
|
627
|
+
"(fio) couldn't set internal pipe to non-blocking mode.");
|
628
|
+
FIO_ASSERT(fio_set_non_block(fd[1]) == 0,
|
629
|
+
"(fio) couldn't set internal pipe to non-blocking mode.");
|
630
|
+
fio_thread_data.fd_wait = fd[0];
|
631
|
+
fio_thread_data.fd_signal = fd[1];
|
632
|
+
}
|
633
|
+
|
634
|
+
FIO_FUNC inline void fio_thread_cleanup(void) {
|
635
|
+
if (fio_thread_data.fd_signal < 0)
|
636
|
+
return;
|
637
|
+
close(fio_thread_data.fd_wait);
|
638
|
+
close(fio_thread_data.fd_signal);
|
639
|
+
fio_thread_data.fd_wait = -1;
|
640
|
+
fio_thread_data.fd_signal = -1;
|
641
|
+
}
|
642
|
+
|
643
|
+
/* suspend thread execution (might be resumed unexpectedly) */
|
644
|
+
FIO_FUNC void fio_thread_suspend(void) {
|
645
|
+
fio_lock(&fio_thread_lock);
|
646
|
+
fio_ls_embd_push(&fio_thread_queue, &fio_thread_data.node);
|
647
|
+
fio_unlock(&fio_thread_lock);
|
648
|
+
struct pollfd list = {
|
649
|
+
.events = (POLLPRI | POLLIN),
|
650
|
+
.fd = fio_thread_data.fd_wait,
|
651
|
+
};
|
652
|
+
if (poll(&list, 1, 5000) > 0) {
|
653
|
+
/* thread was removed from the list through signal */
|
654
|
+
uint64_t data;
|
655
|
+
int r = read(fio_thread_data.fd_wait, &data, sizeof(data));
|
656
|
+
(void)r;
|
657
|
+
} else {
|
658
|
+
/* remove self from list */
|
659
|
+
fio_lock(&fio_thread_lock);
|
660
|
+
fio_ls_embd_remove(&fio_thread_data.node);
|
661
|
+
fio_unlock(&fio_thread_lock);
|
662
|
+
}
|
663
|
+
}
|
664
|
+
|
665
|
+
/* wake up a single thread */
|
666
|
+
FIO_FUNC void fio_thread_signal(void) {
|
667
|
+
fio_thread_queue_s *t;
|
668
|
+
int fd = -2;
|
669
|
+
fio_lock(&fio_thread_lock);
|
670
|
+
t = (fio_thread_queue_s *)fio_ls_embd_shift(&fio_thread_queue);
|
671
|
+
if (t)
|
672
|
+
fd = t->fd_signal;
|
673
|
+
fio_unlock(&fio_thread_lock);
|
674
|
+
if (fd >= 0) {
|
675
|
+
uint64_t data = 1;
|
676
|
+
int r = write(fd, (void *)&data, sizeof(data));
|
677
|
+
(void)r;
|
678
|
+
} else if (fd == -1) {
|
679
|
+
/* hardly the best way, but there's a thread sleeping on air */
|
680
|
+
kill(getpid(), SIGCONT);
|
681
|
+
}
|
682
|
+
}
|
683
|
+
|
684
|
+
/* wake up all threads */
|
685
|
+
FIO_FUNC void fio_thread_broadcast(void) {
|
686
|
+
while (fio_ls_embd_any(&fio_thread_queue)) {
|
687
|
+
fio_thread_signal();
|
688
|
+
}
|
689
|
+
}
|
690
|
+
|
691
|
+
static size_t fio_poll(void);
|
692
|
+
/**
|
693
|
+
* A thread entering this function should wait for new evennts.
|
694
|
+
*/
|
695
|
+
static void fio_defer_thread_wait(void) {
|
696
|
+
#if FIO_ENGINE_POLL
|
697
|
+
fio_poll();
|
698
|
+
return;
|
699
|
+
#endif
|
700
|
+
if (FIO_DEFER_THROTTLE_POLL) {
|
701
|
+
fio_thread_suspend();
|
702
|
+
} else {
|
703
|
+
/* keeps threads active (concurrent), but reduces performance */
|
704
|
+
static __thread size_t static_throttle = 262143UL;
|
705
|
+
fio_throttle_thread(static_throttle);
|
706
|
+
if (fio_defer_has_queue())
|
707
|
+
static_throttle = 1;
|
708
|
+
else if (static_throttle < FIO_DEFER_THROTTLE_LIMIT)
|
709
|
+
static_throttle = (static_throttle << 1);
|
710
|
+
}
|
711
|
+
}
|
712
|
+
|
713
|
+
static inline void fio_defer_on_thread_start(void) {
|
714
|
+
if (FIO_DEFER_THROTTLE_POLL)
|
715
|
+
fio_thread_make_suspendable();
|
716
|
+
}
|
717
|
+
static inline void fio_defer_thread_signal(void) {
|
718
|
+
if (FIO_DEFER_THROTTLE_POLL)
|
719
|
+
fio_thread_signal();
|
720
|
+
}
|
721
|
+
static inline void fio_defer_on_thread_end(void) {
|
722
|
+
if (FIO_DEFER_THROTTLE_POLL) {
|
723
|
+
fio_thread_broadcast();
|
724
|
+
fio_thread_cleanup();
|
725
|
+
}
|
726
|
+
}
|
727
|
+
|
728
|
+
/* *****************************************************************************
|
729
|
+
Section Start Marker
|
730
|
+
|
731
|
+
|
732
|
+
|
733
|
+
|
734
|
+
|
735
|
+
|
736
|
+
|
737
|
+
|
738
|
+
|
739
|
+
|
740
|
+
|
741
|
+
|
742
|
+
|
743
|
+
|
744
|
+
Task Management
|
745
|
+
|
746
|
+
Task / Event schduling and execution
|
747
|
+
|
748
|
+
|
749
|
+
|
750
|
+
|
751
|
+
|
752
|
+
|
753
|
+
|
754
|
+
|
755
|
+
|
756
|
+
|
757
|
+
|
758
|
+
|
759
|
+
|
760
|
+
|
761
|
+
|
762
|
+
***************************************************************************** */
|
763
|
+
|
539
764
|
#ifndef DEFER_QUEUE_BLOCK_COUNT
|
540
765
|
#if UINTPTR_MAX <= 0xFFFFFFFF
|
541
766
|
/* Almost a page of memory on most 32 bit machines: ((4096/4)-8)/3 */
|
@@ -640,9 +865,12 @@ critical_error:
|
|
640
865
|
}
|
641
866
|
|
642
867
|
#define fio_defer_push_task(func_, arg1_, arg2_) \
|
643
|
-
|
644
|
-
|
645
|
-
|
868
|
+
do { \
|
869
|
+
fio_defer_push_task_fn( \
|
870
|
+
(fio_defer_task_s){.func = func_, .arg1 = arg1_, .arg2 = arg2_}, \
|
871
|
+
&task_queue_normal); \
|
872
|
+
fio_defer_thread_signal(); \
|
873
|
+
} while (0)
|
646
874
|
|
647
875
|
#if FIO_USE_URGENT_QUEUE
|
648
876
|
#define fio_defer_push_urgent(func_, arg1_, arg2_) \
|
@@ -716,14 +944,30 @@ static inline void fio_defer_clear_tasks_for_queue(fio_task_queue_s *queue) {
|
|
716
944
|
fio_unlock(&queue->lock);
|
717
945
|
}
|
718
946
|
|
947
|
+
/**
|
948
|
+
* Performs a single task from the queue, returning -1 if the queue was empty.
|
949
|
+
*/
|
950
|
+
static inline int
|
951
|
+
fio_defer_perform_single_task_for_queue(fio_task_queue_s *queue) {
|
952
|
+
fio_defer_task_s task = fio_defer_pop_task(queue);
|
953
|
+
if (!task.func)
|
954
|
+
return -1;
|
955
|
+
task.func(task.arg1, task.arg2);
|
956
|
+
return 0;
|
957
|
+
}
|
958
|
+
|
719
959
|
static inline void fio_defer_clear_tasks(void) {
|
720
960
|
fio_defer_clear_tasks_for_queue(&task_queue_normal);
|
961
|
+
#if FIO_USE_URGENT_QUEUE
|
721
962
|
fio_defer_clear_tasks_for_queue(&task_queue_urgent);
|
963
|
+
#endif
|
722
964
|
}
|
723
965
|
|
724
966
|
static void fio_defer_on_fork(void) {
|
725
967
|
task_queue_normal.lock = FIO_LOCK_INIT;
|
968
|
+
#if FIO_USE_URGENT_QUEUE
|
726
969
|
task_queue_urgent.lock = FIO_LOCK_INIT;
|
970
|
+
#endif
|
727
971
|
}
|
728
972
|
|
729
973
|
/* *****************************************************************************
|
@@ -744,37 +988,54 @@ call_error:
|
|
744
988
|
|
745
989
|
/** Performs all deferred functions until the queue had been depleted. */
|
746
990
|
void fio_defer_perform(void) {
|
747
|
-
for (;;) {
|
748
991
|
#if FIO_USE_URGENT_QUEUE
|
749
|
-
|
750
|
-
|
751
|
-
|
992
|
+
while (fio_defer_perform_single_task_for_queue(&task_queue_urgent) == 0 ||
|
993
|
+
fio_defer_perform_single_task_for_queue(&task_queue_normal) == 0)
|
994
|
+
;
|
752
995
|
#else
|
753
|
-
|
996
|
+
while (fio_defer_perform_single_task_for_queue(&task_queue_normal) == 0)
|
997
|
+
;
|
754
998
|
#endif
|
755
|
-
|
756
|
-
|
757
|
-
|
758
|
-
|
999
|
+
// for (;;) {
|
1000
|
+
// #if FIO_USE_URGENT_QUEUE
|
1001
|
+
// fio_defer_task_s task = fio_defer_pop_task(&task_queue_urgent);
|
1002
|
+
// if (!task.func)
|
1003
|
+
// task = fio_defer_pop_task(&task_queue_normal);
|
1004
|
+
// #else
|
1005
|
+
// fio_defer_task_s task = fio_defer_pop_task(&task_queue_normal);
|
1006
|
+
// #endif
|
1007
|
+
// if (!task.func)
|
1008
|
+
// return;
|
1009
|
+
// task.func(task.arg1, task.arg2);
|
1010
|
+
// }
|
759
1011
|
}
|
760
1012
|
|
761
1013
|
/** Returns true if there are deferred functions waiting for execution. */
|
762
1014
|
int fio_defer_has_queue(void) {
|
1015
|
+
#if FIO_USE_URGENT_QUEUE
|
763
1016
|
return task_queue_urgent.reader != task_queue_urgent.writer ||
|
764
1017
|
task_queue_urgent.reader->write != task_queue_urgent.reader->read ||
|
765
1018
|
task_queue_normal.reader != task_queue_normal.writer ||
|
766
1019
|
task_queue_normal.reader->write != task_queue_normal.reader->read;
|
1020
|
+
#else
|
1021
|
+
return task_queue_normal.reader != task_queue_normal.writer ||
|
1022
|
+
task_queue_normal.reader->write != task_queue_normal.reader->read;
|
1023
|
+
#endif
|
767
1024
|
}
|
768
1025
|
|
769
1026
|
/** Clears the queue. */
|
770
1027
|
void fio_defer_clear_queue(void) { fio_defer_clear_tasks(); }
|
771
1028
|
|
772
|
-
|
1029
|
+
/* Thread pool task */
|
773
1030
|
static void *fio_defer_cycle(void *ignr) {
|
774
|
-
|
1031
|
+
fio_defer_on_thread_start();
|
1032
|
+
for (;;) {
|
775
1033
|
fio_defer_perform();
|
1034
|
+
if (!fio_is_running())
|
1035
|
+
break;
|
776
1036
|
fio_defer_thread_wait();
|
777
|
-
}
|
1037
|
+
}
|
1038
|
+
fio_defer_on_thread_end();
|
778
1039
|
return ignr;
|
779
1040
|
}
|
780
1041
|
|
@@ -1013,10 +1274,7 @@ Section Start Marker
|
|
1013
1274
|
|
1014
1275
|
|
1015
1276
|
|
1016
|
-
|
1017
|
-
|
1018
|
-
And Concurrency Helpers
|
1019
|
-
|
1277
|
+
Concurrency Helpers
|
1020
1278
|
|
1021
1279
|
|
1022
1280
|
|
@@ -1028,101 +1286,7 @@ Section Start Marker
|
|
1028
1286
|
|
1029
1287
|
|
1030
1288
|
|
1031
|
-
***************************************************************************** */
|
1032
|
-
|
1033
|
-
/**
|
1034
|
-
OVERRIDE THIS to replace the default `fork` implementation.
|
1035
|
-
|
1036
|
-
Behaves like the system's `fork`.
|
1037
|
-
*/
|
1038
|
-
#pragma weak fio_fork
|
1039
|
-
int __attribute__((weak)) fio_fork(void) { return fork(); }
|
1040
|
-
|
1041
|
-
/**
|
1042
|
-
* OVERRIDE THIS to replace the default pthread implementation.
|
1043
|
-
*
|
1044
|
-
* Accepts a pointer to a function and a single argument that should be executed
|
1045
|
-
* within a new thread.
|
1046
|
-
*
|
1047
|
-
* The function should allocate memory for the thread object and return a
|
1048
|
-
* pointer to the allocated memory that identifies the thread.
|
1049
|
-
*
|
1050
|
-
* On error NULL should be returned.
|
1051
|
-
*/
|
1052
|
-
#pragma weak fio_thread_new
|
1053
|
-
void *__attribute__((weak))
|
1054
|
-
fio_thread_new(void *(*thread_func)(void *), void *arg) {
|
1055
|
-
pthread_t *thread = malloc(sizeof(*thread));
|
1056
|
-
FIO_ASSERT_ALLOC(thread);
|
1057
|
-
if (pthread_create(thread, NULL, thread_func, arg))
|
1058
|
-
goto error;
|
1059
|
-
return thread;
|
1060
|
-
error:
|
1061
|
-
free(thread);
|
1062
|
-
return NULL;
|
1063
|
-
}
|
1064
|
-
|
1065
|
-
/**
|
1066
|
-
* OVERRIDE THIS to replace the default pthread implementation.
|
1067
|
-
*
|
1068
|
-
* Frees the memory associated with a thread identifier (allows the thread to
|
1069
|
-
* run it's course, just the identifier is freed).
|
1070
|
-
*/
|
1071
|
-
#pragma weak fio_thread_free
|
1072
|
-
void __attribute__((weak)) fio_thread_free(void *p_thr) {
|
1073
|
-
if (*((pthread_t *)p_thr)) {
|
1074
|
-
pthread_detach(*((pthread_t *)p_thr));
|
1075
|
-
}
|
1076
|
-
free(p_thr);
|
1077
|
-
}
|
1078
|
-
|
1079
|
-
/**
|
1080
|
-
* OVERRIDE THIS to replace the default pthread implementation.
|
1081
|
-
*
|
1082
|
-
* Accepts a pointer returned from `fio_thread_new` (should also free any
|
1083
|
-
* allocated memory) and joins the associated thread.
|
1084
|
-
*
|
1085
|
-
* Return value is ignored.
|
1086
|
-
*/
|
1087
|
-
#pragma weak fio_thread_join
|
1088
|
-
int __attribute__((weak)) fio_thread_join(void *p_thr) {
|
1089
|
-
if (!p_thr || !(*((pthread_t *)p_thr)))
|
1090
|
-
return -1;
|
1091
|
-
pthread_join(*((pthread_t *)p_thr), NULL);
|
1092
|
-
*((pthread_t *)p_thr) = (pthread_t)NULL;
|
1093
|
-
free(p_thr);
|
1094
|
-
return 0;
|
1095
|
-
}
|
1096
|
-
|
1097
|
-
static size_t fio_poll(void);
|
1098
|
-
/**
|
1099
|
-
* A thread entering this function should wait for new evennts.
|
1100
|
-
*/
|
1101
|
-
static void fio_defer_thread_wait(void) {
|
1102
|
-
#if FIO_ENGINE_POLL
|
1103
|
-
fio_poll();
|
1104
|
-
return;
|
1105
|
-
#endif
|
1106
|
-
if (FIO_DEFER_THROTTLE_PROGRESSIVE) {
|
1107
|
-
/* keeps threads active (concurrent), but reduces performance */
|
1108
|
-
static __thread size_t static_throttle = 262143UL;
|
1109
|
-
if (static_throttle < FIO_DEFER_THROTTLE_LIMIT)
|
1110
|
-
static_throttle = (static_throttle << 1);
|
1111
|
-
fio_throttle_thread(static_throttle);
|
1112
|
-
if (fio_defer_has_queue())
|
1113
|
-
static_throttle = 1;
|
1114
|
-
} else {
|
1115
|
-
/* Protects against slow user code, but mostly a single active thread */
|
1116
|
-
size_t throttle = fio_data->threads ? ((fio_data->threads) * DEFER_THROTTLE)
|
1117
|
-
: FIO_DEFER_THROTTLE_LIMIT;
|
1118
|
-
if (throttle > FIO_DEFER_THROTTLE_LIMIT)
|
1119
|
-
throttle = FIO_DEFER_THROTTLE_LIMIT;
|
1120
|
-
fio_throttle_thread(throttle);
|
1121
|
-
}
|
1122
|
-
}
|
1123
1289
|
|
1124
|
-
/* *****************************************************************************
|
1125
|
-
Concurrency Helpers
|
1126
1290
|
***************************************************************************** */
|
1127
1291
|
|
1128
1292
|
volatile uint8_t fio_signal_children_flag = 0;
|
@@ -1174,6 +1338,8 @@ static void sig_int_handler(int sig) {
|
|
1174
1338
|
static void fio_signal_handler_setup(void) {
|
1175
1339
|
/* setup signal handling */
|
1176
1340
|
struct sigaction act, old;
|
1341
|
+
memset(&act, 0, sizeof(old));
|
1342
|
+
memset(&old, 0, sizeof(old));
|
1177
1343
|
|
1178
1344
|
act.sa_handler = sig_int_handler;
|
1179
1345
|
sigemptyset(&act.sa_mask);
|
@@ -1203,7 +1369,8 @@ static void fio_signal_handler_setup(void) {
|
|
1203
1369
|
}
|
1204
1370
|
static void fio_signal_handler_reset(void) {
|
1205
1371
|
struct sigaction act, old;
|
1206
|
-
|
1372
|
+
memset(&act, 0, sizeof(old));
|
1373
|
+
memset(&old, 0, sizeof(old));
|
1207
1374
|
act.sa_handler = SIG_DFL;
|
1208
1375
|
sigemptyset(&act.sa_mask);
|
1209
1376
|
sigaction(SIGINT, &act, &old);
|
@@ -1464,7 +1631,7 @@ static size_t fio_poll(void) {
|
|
1464
1631
|
for (int i = 0; i < active_count; i++) {
|
1465
1632
|
if (events[i].events & (~(EPOLLIN | EPOLLOUT))) {
|
1466
1633
|
// errors are hendled as disconnections (on_close)
|
1467
|
-
|
1634
|
+
fio_force_close_in_poll(fd2uuid(events[i].data.fd));
|
1468
1635
|
} else {
|
1469
1636
|
// no error, then it's an active event(s)
|
1470
1637
|
if (events[i].events & EPOLLOUT) {
|
@@ -1600,7 +1767,11 @@ static size_t fio_poll(void) {
|
|
1600
1767
|
if (active_count > 0) {
|
1601
1768
|
for (int i = 0; i < active_count; i++) {
|
1602
1769
|
// test for event(s) type
|
1603
|
-
if (events[i].filter ==
|
1770
|
+
if (events[i].filter == EVFILT_WRITE) {
|
1771
|
+
// we can only write if there's no error in the socket
|
1772
|
+
fio_defer_push_urgent(deferred_on_ready,
|
1773
|
+
((void *)fd2uuid(events[i].udata)), NULL);
|
1774
|
+
} else if (events[i].filter == EVFILT_READ) {
|
1604
1775
|
fio_defer_push_task(deferred_on_data, (void *)fd2uuid(events[i].udata),
|
1605
1776
|
NULL);
|
1606
1777
|
}
|
@@ -1613,11 +1784,7 @@ static size_t fio_poll(void) {
|
|
1613
1784
|
// ? "EV_EOF"
|
1614
1785
|
// : (events[i].flags & EV_ERROR) ? "EV_ERROR" : "WTF?");
|
1615
1786
|
// uuid_data(events[i].udata).open = 0;
|
1616
|
-
|
1617
|
-
} else if (events[i].filter == EVFILT_WRITE) {
|
1618
|
-
// we can only write if there's no error in the socket
|
1619
|
-
fio_defer_push_urgent(deferred_on_ready,
|
1620
|
-
((void *)fd2uuid(events[i].udata)), NULL);
|
1787
|
+
fio_force_close_in_poll(fd2uuid(events[i].udata));
|
1621
1788
|
}
|
1622
1789
|
}
|
1623
1790
|
} else if (active_count < 0) {
|
@@ -1748,20 +1915,20 @@ static size_t fio_poll(void) {
|
|
1748
1915
|
if (list[i].revents) {
|
1749
1916
|
touchfd(i);
|
1750
1917
|
++count;
|
1751
|
-
if (list[i].revents & FIO_POLL_READ_EVENTS) {
|
1752
|
-
// FIO_LOG_DEBUG("Poll Read %zu => %p", i, (void *)fd2uuid(i));
|
1753
|
-
fio_poll_remove_read(i);
|
1754
|
-
fio_defer_push_task(deferred_on_data, (void *)fd2uuid(i), NULL);
|
1755
|
-
}
|
1756
1918
|
if (list[i].revents & FIO_POLL_WRITE_EVENTS) {
|
1757
1919
|
// FIO_LOG_DEBUG("Poll Write %zu => %p", i, (void *)fd2uuid(i));
|
1758
1920
|
fio_poll_remove_write(i);
|
1759
1921
|
fio_defer_push_urgent(deferred_on_ready, (void *)fd2uuid(i), NULL);
|
1760
1922
|
}
|
1923
|
+
if (list[i].revents & FIO_POLL_READ_EVENTS) {
|
1924
|
+
// FIO_LOG_DEBUG("Poll Read %zu => %p", i, (void *)fd2uuid(i));
|
1925
|
+
fio_poll_remove_read(i);
|
1926
|
+
fio_defer_push_task(deferred_on_data, (void *)fd2uuid(i), NULL);
|
1927
|
+
}
|
1761
1928
|
if (list[i].revents & (POLLHUP | POLLERR)) {
|
1762
1929
|
// FIO_LOG_DEBUG("Poll Hangup %zu => %p", i, (void *)fd2uuid(i));
|
1763
1930
|
fio_poll_remove_fd(i);
|
1764
|
-
|
1931
|
+
fio_force_close_in_poll(fd2uuid(i));
|
1765
1932
|
}
|
1766
1933
|
if (list[i].revents & POLLNVAL) {
|
1767
1934
|
// FIO_LOG_DEBUG("Poll Invalid %zu => %p", i, (void *)fd2uuid(i));
|
@@ -2053,7 +2220,7 @@ int fio_set_non_block(int fd) {
|
|
2053
2220
|
if (-1 == (flags = fcntl(fd, F_GETFL, 0)))
|
2054
2221
|
flags = 0;
|
2055
2222
|
// printf("flags initial value was %d\n", flags);
|
2056
|
-
return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
|
2223
|
+
return fcntl(fd, F_SETFL, flags | O_NONBLOCK | O_CLOEXEC);
|
2057
2224
|
#elif defined(FIONBIO)
|
2058
2225
|
/* Otherwise, use the old way of doing it */
|
2059
2226
|
static int flags = 1;
|
@@ -2091,7 +2258,7 @@ intptr_t fio_accept(intptr_t srv_uuid) {
|
|
2091
2258
|
int client;
|
2092
2259
|
#ifdef SOCK_NONBLOCK
|
2093
2260
|
client = accept4(fio_uuid2fd(srv_uuid), (struct sockaddr *)addrinfo, &addrlen,
|
2094
|
-
SOCK_NONBLOCK);
|
2261
|
+
SOCK_NONBLOCK | SOCK_CLOEXEC);
|
2095
2262
|
if (client <= 0)
|
2096
2263
|
return -1;
|
2097
2264
|
#else
|
@@ -2142,17 +2309,11 @@ intptr_t fio_accept(intptr_t srv_uuid) {
|
|
2142
2309
|
/* Creates a Unix socket - returning it's uuid (or -1) */
|
2143
2310
|
static intptr_t fio_unix_socket(const char *address, uint8_t server) {
|
2144
2311
|
/* Unix socket */
|
2145
|
-
if (!address) {
|
2146
|
-
errno = EINVAL;
|
2147
|
-
FIO_LOG_ERROR(
|
2148
|
-
"(fio) a Unix socket requires a valid address.\n"
|
2149
|
-
" Specify port for TCP/IP socket or change address.");
|
2150
|
-
return -1;
|
2151
|
-
}
|
2152
2312
|
struct sockaddr_un addr = {0};
|
2153
2313
|
size_t addr_len = strlen(address);
|
2154
2314
|
if (addr_len >= sizeof(addr.sun_path)) {
|
2155
|
-
FIO_LOG_ERROR("(
|
2315
|
+
FIO_LOG_ERROR("(fio_unix_socket) address too long (%zu bytes > %zu bytes).",
|
2316
|
+
addr_len, sizeof(addr.sun_path) - 1);
|
2156
2317
|
errno = ENAMETOOLONG;
|
2157
2318
|
return -1;
|
2158
2319
|
}
|
@@ -2283,7 +2444,29 @@ socket_okay:
|
|
2283
2444
|
/* PUBLIC API: opens a server or client socket */
|
2284
2445
|
intptr_t fio_socket(const char *address, const char *port, uint8_t server) {
|
2285
2446
|
intptr_t uuid;
|
2286
|
-
if (
|
2447
|
+
if (port) {
|
2448
|
+
char *pos = (char *)port;
|
2449
|
+
int64_t n = fio_atol(&pos);
|
2450
|
+
/* make sure port is only numerical */
|
2451
|
+
if (*pos) {
|
2452
|
+
FIO_LOG_ERROR("(fio_socket) port %s is not a number.", port);
|
2453
|
+
errno = EINVAL;
|
2454
|
+
return -1;
|
2455
|
+
}
|
2456
|
+
/* a negative port number will revert to a Unix socket. */
|
2457
|
+
if (n <= 0) {
|
2458
|
+
if (n < -1)
|
2459
|
+
FIO_LOG_WARNING("(fio_socket) negative port number %s is ignored.",
|
2460
|
+
port);
|
2461
|
+
port = NULL;
|
2462
|
+
}
|
2463
|
+
}
|
2464
|
+
if (!address && !port) {
|
2465
|
+
FIO_LOG_ERROR("(fio_socket) both address and port are missing or invalid.");
|
2466
|
+
errno = EINVAL;
|
2467
|
+
return -1;
|
2468
|
+
}
|
2469
|
+
if (!port) {
|
2287
2470
|
do {
|
2288
2471
|
errno = 0;
|
2289
2472
|
uuid = fio_unix_socket(address, server);
|
@@ -2635,7 +2818,7 @@ void fio_force_close(intptr_t uuid) {
|
|
2635
2818
|
fio_lock(&uuid_data(uuid).protocol_lock);
|
2636
2819
|
fio_clear_fd(fio_uuid2fd(uuid), 0);
|
2637
2820
|
fio_unlock(&uuid_data(uuid).protocol_lock);
|
2638
|
-
close(fio_uuid2fd(uuid));
|
2821
|
+
close(fio_uuid2fd(uuid));
|
2639
2822
|
#if FIO_ENGINE_POLL
|
2640
2823
|
fio_poll_remove_fd(fio_uuid2fd(uuid));
|
2641
2824
|
#endif
|
@@ -3327,6 +3510,7 @@ static void fio_cycle_schedule_events(void) {
|
|
3327
3510
|
fio_timer_schedule();
|
3328
3511
|
fio_max_fd_shrink();
|
3329
3512
|
if (fio_signal_children_flag) {
|
3513
|
+
/* hot restart support */
|
3330
3514
|
fio_signal_children_flag = 0;
|
3331
3515
|
fio_cluster_signal_children();
|
3332
3516
|
}
|
@@ -4055,6 +4239,7 @@ intptr_t fio_listen FIO_IGNORE_MACRO(struct fio_listen_args args) {
|
|
4055
4239
|
errno = EINVAL;
|
4056
4240
|
goto error;
|
4057
4241
|
}
|
4242
|
+
|
4058
4243
|
size_t addr_len = 0;
|
4059
4244
|
size_t port_len = 0;
|
4060
4245
|
if (args.address)
|
@@ -6230,6 +6415,7 @@ static struct {
|
|
6230
6415
|
// intptr_t count; /* free list counter */
|
6231
6416
|
size_t cores; /* the number of detected CPU cores*/
|
6232
6417
|
fio_lock_i lock; /* a global lock */
|
6418
|
+
uint8_t forked; /* a forked collection indicator. */
|
6233
6419
|
} memory = {
|
6234
6420
|
.cores = 1,
|
6235
6421
|
.lock = FIO_LOCK_INIT,
|
@@ -6304,6 +6490,7 @@ void fio_malloc_after_fork(void) {
|
|
6304
6490
|
return;
|
6305
6491
|
}
|
6306
6492
|
memory.lock = FIO_LOCK_INIT;
|
6493
|
+
memory.forked = 1;
|
6307
6494
|
for (size_t i = 0; i < memory.cores; ++i) {
|
6308
6495
|
arenas[i].lock = FIO_LOCK_INIT;
|
6309
6496
|
}
|
@@ -6532,7 +6719,7 @@ static void fio_mem_destroy(void) {
|
|
6532
6719
|
block_free(arenas[i].block);
|
6533
6720
|
arenas[i].block = NULL;
|
6534
6721
|
}
|
6535
|
-
if (fio_ls_embd_any(&memory.available)) {
|
6722
|
+
if (!memory.forked && fio_ls_embd_any(&memory.available)) {
|
6536
6723
|
FIO_LOG_WARNING("facil.io detected memory traces remaining after cleanup"
|
6537
6724
|
" - memory leak?");
|
6538
6725
|
FIO_MEMORY_PRINT_BLOCK_STAT();
|
@@ -8085,11 +8272,11 @@ FIO_FUNC inline void fio_str_test(void) {
|
|
8085
8272
|
fio_str_capa_assert(&str, sizeof(fio_str_s) - 1);
|
8086
8273
|
FIO_ASSERT(!str.small,
|
8087
8274
|
"Long String reporting as small after capacity update!");
|
8088
|
-
FIO_ASSERT(fio_str_capa(&str)
|
8275
|
+
FIO_ASSERT(fio_str_capa(&str) >= sizeof(fio_str_s) - 1,
|
8089
8276
|
"Long String capacity update error (%zu != %zu)!",
|
8090
8277
|
fio_str_capa(&str), sizeof(fio_str_s));
|
8091
8278
|
FIO_ASSERT(fio_str_data(&str) == fio_str_info(&str).data,
|
8092
|
-
"Long String `fio_str_data`
|
8279
|
+
"Long String `fio_str_data` !>= `fio_str_info(s).data` (%p != %p)",
|
8093
8280
|
(void *)fio_str_data(&str), (void *)fio_str_info(&str).data);
|
8094
8281
|
|
8095
8282
|
FIO_ASSERT(
|