nio4r 2.4.0 → 2.5.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/workflow.yml +47 -0
- data/.rubocop.yml +30 -11
- data/CHANGES.md +63 -0
- data/Gemfile +1 -1
- data/README.md +57 -30
- data/examples/echo_server.rb +2 -2
- data/ext/libev/Changes +90 -2
- data/ext/libev/README +2 -1
- data/ext/libev/ev.c +708 -247
- data/ext/libev/ev.h +33 -29
- data/ext/libev/ev_epoll.c +41 -28
- data/ext/libev/ev_iouring.c +694 -0
- data/ext/libev/ev_kqueue.c +15 -9
- data/ext/libev/ev_linuxaio.c +620 -0
- data/ext/libev/ev_poll.c +19 -14
- data/ext/libev/ev_port.c +8 -5
- data/ext/libev/ev_select.c +6 -6
- data/ext/libev/ev_vars.h +46 -1
- data/ext/libev/ev_win32.c +2 -2
- data/ext/libev/ev_wrap.h +72 -0
- data/ext/nio4r/.clang-format +16 -0
- data/ext/nio4r/bytebuffer.c +27 -28
- data/ext/nio4r/extconf.rb +9 -0
- data/ext/nio4r/libev.h +1 -3
- data/ext/nio4r/monitor.c +34 -31
- data/ext/nio4r/nio4r.h +7 -12
- data/ext/nio4r/org/nio4r/ByteBuffer.java +2 -0
- data/ext/nio4r/org/nio4r/Monitor.java +1 -0
- data/ext/nio4r/org/nio4r/Selector.java +13 -11
- data/ext/nio4r/selector.c +66 -51
- data/lib/nio.rb +20 -1
- data/lib/nio/bytebuffer.rb +4 -0
- data/lib/nio/monitor.rb +1 -1
- data/lib/nio/selector.rb +12 -10
- data/lib/nio/version.rb +1 -1
- data/nio4r.gemspec +10 -2
- data/spec/nio/bytebuffer_spec.rb +0 -1
- data/spec/nio/selectables/ssl_socket_spec.rb +3 -1
- data/spec/nio/selectables/udp_socket_spec.rb +2 -2
- data/spec/nio/selector_spec.rb +27 -5
- data/spec/spec_helper.rb +2 -0
- metadata +17 -12
- data/.travis.yml +0 -29
- data/Guardfile +0 -10
- data/LICENSE.txt +0 -20
- data/appveyor.yml +0 -40
data/ext/libev/README
CHANGED
@@ -18,7 +18,8 @@ ABOUT
|
|
18
18
|
- extensive and detailed, readable documentation (not doxygen garbage).
|
19
19
|
- fully supports fork, can detect fork in various ways and automatically
|
20
20
|
re-arms kernel mechanisms that do not support fork.
|
21
|
-
- highly optimised select, poll, epoll,
|
21
|
+
- highly optimised select, poll, linux epoll, linux aio, bsd kqueue
|
22
|
+
and solaris event ports backends.
|
22
23
|
- filesystem object (path) watching (with optional linux inotify support).
|
23
24
|
- wallclock-based times (using absolute time, cron-like).
|
24
25
|
- relative timers/timeouts (handle time jumps).
|
data/ext/libev/ev.c
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
/*
|
2
2
|
* libev event processing core, watcher management
|
3
3
|
*
|
4
|
-
* Copyright (c) 2007-
|
4
|
+
* Copyright (c) 2007-2019 Marc Alexander Lehmann <libev@schmorp.de>
|
5
5
|
* All rights reserved.
|
6
6
|
*
|
7
7
|
* Redistribution and use in source and binary forms, with or without modifica-
|
@@ -116,7 +116,7 @@
|
|
116
116
|
# undef EV_USE_POLL
|
117
117
|
# define EV_USE_POLL 0
|
118
118
|
# endif
|
119
|
-
|
119
|
+
|
120
120
|
# if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
|
121
121
|
# ifndef EV_USE_EPOLL
|
122
122
|
# define EV_USE_EPOLL EV_FEATURE_BACKENDS
|
@@ -125,7 +125,25 @@
|
|
125
125
|
# undef EV_USE_EPOLL
|
126
126
|
# define EV_USE_EPOLL 0
|
127
127
|
# endif
|
128
|
-
|
128
|
+
|
129
|
+
# if HAVE_LINUX_AIO_ABI_H
|
130
|
+
# ifndef EV_USE_LINUXAIO
|
131
|
+
# define EV_USE_LINUXAIO 0 /* was: EV_FEATURE_BACKENDS, always off by default */
|
132
|
+
# endif
|
133
|
+
# else
|
134
|
+
# undef EV_USE_LINUXAIO
|
135
|
+
# define EV_USE_LINUXAIO 0
|
136
|
+
# endif
|
137
|
+
|
138
|
+
# if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T
|
139
|
+
# ifndef EV_USE_IOURING
|
140
|
+
# define EV_USE_IOURING EV_FEATURE_BACKENDS
|
141
|
+
# endif
|
142
|
+
# else
|
143
|
+
# undef EV_USE_IOURING
|
144
|
+
# define EV_USE_IOURING 0
|
145
|
+
# endif
|
146
|
+
|
129
147
|
# if HAVE_KQUEUE && HAVE_SYS_EVENT_H
|
130
148
|
# ifndef EV_USE_KQUEUE
|
131
149
|
# define EV_USE_KQUEUE EV_FEATURE_BACKENDS
|
@@ -134,7 +152,7 @@
|
|
134
152
|
# undef EV_USE_KQUEUE
|
135
153
|
# define EV_USE_KQUEUE 0
|
136
154
|
# endif
|
137
|
-
|
155
|
+
|
138
156
|
# if HAVE_PORT_H && HAVE_PORT_CREATE
|
139
157
|
# ifndef EV_USE_PORT
|
140
158
|
# define EV_USE_PORT EV_FEATURE_BACKENDS
|
@@ -170,7 +188,16 @@
|
|
170
188
|
# undef EV_USE_EVENTFD
|
171
189
|
# define EV_USE_EVENTFD 0
|
172
190
|
# endif
|
173
|
-
|
191
|
+
|
192
|
+
# if HAVE_SYS_TIMERFD_H
|
193
|
+
# ifndef EV_USE_TIMERFD
|
194
|
+
# define EV_USE_TIMERFD EV_FEATURE_OS
|
195
|
+
# endif
|
196
|
+
# else
|
197
|
+
# undef EV_USE_TIMERFD
|
198
|
+
# define EV_USE_TIMERFD 0
|
199
|
+
# endif
|
200
|
+
|
174
201
|
#endif
|
175
202
|
|
176
203
|
/* OS X, in its infinite idiocy, actually HARDCODES
|
@@ -326,6 +353,14 @@
|
|
326
353
|
# define EV_USE_PORT 0
|
327
354
|
#endif
|
328
355
|
|
356
|
+
#ifndef EV_USE_LINUXAIO
|
357
|
+
# define EV_USE_LINUXAIO 0
|
358
|
+
#endif
|
359
|
+
|
360
|
+
#ifndef EV_USE_IOURING
|
361
|
+
# define EV_USE_IOURING 0
|
362
|
+
#endif
|
363
|
+
|
329
364
|
#ifndef EV_USE_INOTIFY
|
330
365
|
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
|
331
366
|
# define EV_USE_INOTIFY EV_FEATURE_OS
|
@@ -358,6 +393,14 @@
|
|
358
393
|
# endif
|
359
394
|
#endif
|
360
395
|
|
396
|
+
#ifndef EV_USE_TIMERFD
|
397
|
+
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))
|
398
|
+
# define EV_USE_TIMERFD EV_FEATURE_OS
|
399
|
+
# else
|
400
|
+
# define EV_USE_TIMERFD 0
|
401
|
+
# endif
|
402
|
+
#endif
|
403
|
+
|
361
404
|
#if 0 /* debugging */
|
362
405
|
# define EV_VERIFY 3
|
363
406
|
# define EV_USE_4HEAP 1
|
@@ -400,6 +443,7 @@
|
|
400
443
|
# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
|
401
444
|
# undef EV_USE_MONOTONIC
|
402
445
|
# define EV_USE_MONOTONIC 1
|
446
|
+
# define EV_NEED_SYSCALL 1
|
403
447
|
# else
|
404
448
|
# undef EV_USE_CLOCK_SYSCALL
|
405
449
|
# define EV_USE_CLOCK_SYSCALL 0
|
@@ -430,6 +474,31 @@
|
|
430
474
|
# endif
|
431
475
|
#endif
|
432
476
|
|
477
|
+
#if EV_USE_LINUXAIO
|
478
|
+
# include <sys/syscall.h>
|
479
|
+
# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
|
480
|
+
# define EV_NEED_SYSCALL 1
|
481
|
+
# else
|
482
|
+
# undef EV_USE_LINUXAIO
|
483
|
+
# define EV_USE_LINUXAIO 0
|
484
|
+
# endif
|
485
|
+
#endif
|
486
|
+
|
487
|
+
#if EV_USE_IOURING
|
488
|
+
# include <sys/syscall.h>
|
489
|
+
# if !SYS_io_uring_setup && __linux && !__alpha
|
490
|
+
# define SYS_io_uring_setup 425
|
491
|
+
# define SYS_io_uring_enter 426
|
492
|
+
# define SYS_io_uring_wregister 427
|
493
|
+
# endif
|
494
|
+
# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
|
495
|
+
# define EV_NEED_SYSCALL 1
|
496
|
+
# else
|
497
|
+
# undef EV_USE_IOURING
|
498
|
+
# define EV_USE_IOURING 0
|
499
|
+
# endif
|
500
|
+
#endif
|
501
|
+
|
433
502
|
#if EV_USE_INOTIFY
|
434
503
|
# include <sys/statfs.h>
|
435
504
|
# include <sys/inotify.h>
|
@@ -441,7 +510,7 @@
|
|
441
510
|
#endif
|
442
511
|
|
443
512
|
#if EV_USE_EVENTFD
|
444
|
-
/* our minimum requirement is glibc 2.7 which has the stub, but not the header */
|
513
|
+
/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
|
445
514
|
# include <stdint.h>
|
446
515
|
# ifndef EFD_NONBLOCK
|
447
516
|
# define EFD_NONBLOCK O_NONBLOCK
|
@@ -457,7 +526,7 @@ EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
|
|
457
526
|
#endif
|
458
527
|
|
459
528
|
#if EV_USE_SIGNALFD
|
460
|
-
/* our minimum requirement is glibc 2.7 which has the stub, but not the header */
|
529
|
+
/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
|
461
530
|
# include <stdint.h>
|
462
531
|
# ifndef SFD_NONBLOCK
|
463
532
|
# define SFD_NONBLOCK O_NONBLOCK
|
@@ -469,7 +538,7 @@ EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
|
|
469
538
|
# define SFD_CLOEXEC 02000000
|
470
539
|
# endif
|
471
540
|
# endif
|
472
|
-
EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags);
|
541
|
+
EV_CPP (extern "C") int (signalfd) (int fd, const sigset_t *mask, int flags);
|
473
542
|
|
474
543
|
struct signalfd_siginfo
|
475
544
|
{
|
@@ -478,7 +547,17 @@ struct signalfd_siginfo
|
|
478
547
|
};
|
479
548
|
#endif
|
480
549
|
|
481
|
-
|
550
|
+
/* for timerfd, libev core requires TFD_TIMER_CANCEL_ON_SET &c */
|
551
|
+
#if EV_USE_TIMERFD
|
552
|
+
# include <sys/timerfd.h>
|
553
|
+
/* timerfd is only used for periodics */
|
554
|
+
# if !(defined (TFD_TIMER_CANCEL_ON_SET) && defined (TFD_CLOEXEC) && defined (TFD_NONBLOCK)) || !EV_PERIODIC_ENABLE
|
555
|
+
# undef EV_USE_TIMERFD
|
556
|
+
# define EV_USE_TIMERFD 0
|
557
|
+
# endif
|
558
|
+
#endif
|
559
|
+
|
560
|
+
/*****************************************************************************/
|
482
561
|
|
483
562
|
#if EV_VERIFY >= 3
|
484
563
|
# define EV_FREQUENT_CHECK ev_verify (EV_A)
|
@@ -493,18 +572,34 @@ struct signalfd_siginfo
|
|
493
572
|
#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
|
494
573
|
/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
|
495
574
|
|
496
|
-
#define MIN_TIMEJUMP
|
497
|
-
#define MAX_BLOCKTIME
|
575
|
+
#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
|
576
|
+
#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
|
577
|
+
#define MAX_BLOCKTIME2 1500001.07 /* same, but when timerfd is used to detect jumps, also safe delay to not overflow */
|
498
578
|
|
499
|
-
|
500
|
-
|
579
|
+
/* find a portable timestamp that is "always" in the future but fits into time_t.
|
580
|
+
* this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
|
581
|
+
* and sizes larger than 32 bit, and maybe the unlikely floating point time_t */
|
582
|
+
#define EV_TSTAMP_HUGE \
|
583
|
+
(sizeof (time_t) >= 8 ? 10000000000000. \
|
584
|
+
: 0 < (time_t)4294967295 ? 4294967295. \
|
585
|
+
: 2147483647.) \
|
586
|
+
|
587
|
+
#ifndef EV_TS_CONST
|
588
|
+
# define EV_TS_CONST(nv) nv
|
589
|
+
# define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999
|
590
|
+
# define EV_TS_FROM_USEC(us) us * 1e-6
|
591
|
+
# define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
|
592
|
+
# define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
|
593
|
+
# define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
|
594
|
+
# define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
|
595
|
+
#endif
|
501
596
|
|
502
597
|
/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
|
503
598
|
/* ECB.H BEGIN */
|
504
599
|
/*
|
505
600
|
* libecb - http://software.schmorp.de/pkg/libecb
|
506
601
|
*
|
507
|
-
* Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de>
|
602
|
+
* Copyright (©) 2009-2015,2018-2020 Marc Alexander Lehmann <libecb@schmorp.de>
|
508
603
|
* Copyright (©) 2011 Emanuele Giaquinta
|
509
604
|
* All rights reserved.
|
510
605
|
*
|
@@ -545,15 +640,23 @@ struct signalfd_siginfo
|
|
545
640
|
#define ECB_H
|
546
641
|
|
547
642
|
/* 16 bits major, 16 bits minor */
|
548
|
-
#define ECB_VERSION
|
643
|
+
#define ECB_VERSION 0x00010008
|
549
644
|
|
550
|
-
#
|
645
|
+
#include <string.h> /* for memcpy */
|
646
|
+
|
647
|
+
#if defined (_WIN32) && !defined (__MINGW32__)
|
551
648
|
typedef signed char int8_t;
|
552
649
|
typedef unsigned char uint8_t;
|
650
|
+
typedef signed char int_fast8_t;
|
651
|
+
typedef unsigned char uint_fast8_t;
|
553
652
|
typedef signed short int16_t;
|
554
653
|
typedef unsigned short uint16_t;
|
654
|
+
typedef signed int int_fast16_t;
|
655
|
+
typedef unsigned int uint_fast16_t;
|
555
656
|
typedef signed int int32_t;
|
556
657
|
typedef unsigned int uint32_t;
|
658
|
+
typedef signed int int_fast32_t;
|
659
|
+
typedef unsigned int uint_fast32_t;
|
557
660
|
#if __GNUC__
|
558
661
|
typedef signed long long int64_t;
|
559
662
|
typedef unsigned long long uint64_t;
|
@@ -561,6 +664,8 @@ struct signalfd_siginfo
|
|
561
664
|
typedef signed __int64 int64_t;
|
562
665
|
typedef unsigned __int64 uint64_t;
|
563
666
|
#endif
|
667
|
+
typedef int64_t int_fast64_t;
|
668
|
+
typedef uint64_t uint_fast64_t;
|
564
669
|
#ifdef _WIN64
|
565
670
|
#define ECB_PTRSIZE 8
|
566
671
|
typedef uint64_t uintptr_t;
|
@@ -582,6 +687,14 @@ struct signalfd_siginfo
|
|
582
687
|
#define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
|
583
688
|
#define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
|
584
689
|
|
690
|
+
#ifndef ECB_OPTIMIZE_SIZE
|
691
|
+
#if __OPTIMIZE_SIZE__
|
692
|
+
#define ECB_OPTIMIZE_SIZE 1
|
693
|
+
#else
|
694
|
+
#define ECB_OPTIMIZE_SIZE 0
|
695
|
+
#endif
|
696
|
+
#endif
|
697
|
+
|
585
698
|
/* work around x32 idiocy by defining proper macros */
|
586
699
|
#if ECB_GCC_AMD64 || ECB_MSVC_AMD64
|
587
700
|
#if _ILP32
|
@@ -669,6 +782,7 @@ struct signalfd_siginfo
|
|
669
782
|
|
670
783
|
#ifndef ECB_MEMORY_FENCE
|
671
784
|
#if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
|
785
|
+
#define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory")
|
672
786
|
#if __i386 || __i386__
|
673
787
|
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
|
674
788
|
#define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
|
@@ -728,12 +842,14 @@ struct signalfd_siginfo
|
|
728
842
|
#define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
|
729
843
|
#define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
|
730
844
|
#define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
|
845
|
+
#define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED)
|
731
846
|
|
732
847
|
#elif ECB_CLANG_EXTENSION(c_atomic)
|
733
848
|
/* see comment below (stdatomic.h) about the C11 memory model. */
|
734
849
|
#define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
|
735
850
|
#define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
|
736
851
|
#define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
|
852
|
+
#define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED)
|
737
853
|
|
738
854
|
#elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
|
739
855
|
#define ECB_MEMORY_FENCE __sync_synchronize ()
|
@@ -753,9 +869,10 @@ struct signalfd_siginfo
|
|
753
869
|
#define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
|
754
870
|
#elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
|
755
871
|
#include <mbarrier.h>
|
756
|
-
#define ECB_MEMORY_FENCE __machine_rw_barrier
|
757
|
-
#define ECB_MEMORY_FENCE_ACQUIRE
|
758
|
-
#define ECB_MEMORY_FENCE_RELEASE
|
872
|
+
#define ECB_MEMORY_FENCE __machine_rw_barrier ()
|
873
|
+
#define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier ()
|
874
|
+
#define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier ()
|
875
|
+
#define ECB_MEMORY_FENCE_RELAXED __compiler_barrier ()
|
759
876
|
#elif __xlC__
|
760
877
|
#define ECB_MEMORY_FENCE __sync ()
|
761
878
|
#endif
|
@@ -766,15 +883,9 @@ struct signalfd_siginfo
|
|
766
883
|
/* we assume that these memory fences work on all variables/all memory accesses, */
|
767
884
|
/* not just C11 atomics and atomic accesses */
|
768
885
|
#include <stdatomic.h>
|
769
|
-
/* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
|
770
|
-
/* any fence other than seq_cst, which isn't very efficient for us. */
|
771
|
-
/* Why that is, we don't know - either the C11 memory model is quite useless */
|
772
|
-
/* for most usages, or gcc and clang have a bug */
|
773
|
-
/* I *currently* lean towards the latter, and inefficiently implement */
|
774
|
-
/* all three of ecb's fences as a seq_cst fence */
|
775
|
-
/* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
|
776
|
-
/* for all __atomic_thread_fence's except seq_cst */
|
777
886
|
#define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
|
887
|
+
#define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
|
888
|
+
#define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
|
778
889
|
#endif
|
779
890
|
#endif
|
780
891
|
|
@@ -804,6 +915,10 @@ struct signalfd_siginfo
|
|
804
915
|
#define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
|
805
916
|
#endif
|
806
917
|
|
918
|
+
#if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE
|
919
|
+
#define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */
|
920
|
+
#endif
|
921
|
+
|
807
922
|
/*****************************************************************************/
|
808
923
|
|
809
924
|
#if ECB_CPP
|
@@ -1095,6 +1210,44 @@ ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { retu
|
|
1095
1210
|
ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
|
1096
1211
|
ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
|
1097
1212
|
|
1213
|
+
#if ECB_CPP
|
1214
|
+
|
1215
|
+
inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); }
|
1216
|
+
inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); }
|
1217
|
+
inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); }
|
1218
|
+
inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); }
|
1219
|
+
|
1220
|
+
inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); }
|
1221
|
+
inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); }
|
1222
|
+
inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); }
|
1223
|
+
inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); }
|
1224
|
+
|
1225
|
+
inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); }
|
1226
|
+
inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); }
|
1227
|
+
inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); }
|
1228
|
+
inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); }
|
1229
|
+
|
1230
|
+
inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); }
|
1231
|
+
inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); }
|
1232
|
+
inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); }
|
1233
|
+
inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); }
|
1234
|
+
|
1235
|
+
inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); }
|
1236
|
+
inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); }
|
1237
|
+
inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); }
|
1238
|
+
|
1239
|
+
inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); }
|
1240
|
+
inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); }
|
1241
|
+
inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); }
|
1242
|
+
inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); }
|
1243
|
+
|
1244
|
+
inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); }
|
1245
|
+
inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); }
|
1246
|
+
inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); }
|
1247
|
+
inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); }
|
1248
|
+
|
1249
|
+
#endif
|
1250
|
+
|
1098
1251
|
#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
|
1099
1252
|
#if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
|
1100
1253
|
#define ecb_bswap16(x) __builtin_bswap16 (x)
|
@@ -1175,6 +1328,78 @@ ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_he
|
|
1175
1328
|
ecb_inline ecb_const ecb_bool ecb_little_endian (void);
|
1176
1329
|
ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
|
1177
1330
|
|
1331
|
+
/*****************************************************************************/
|
1332
|
+
/* unaligned load/store */
|
1333
|
+
|
1334
|
+
ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
|
1335
|
+
ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
|
1336
|
+
ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
|
1337
|
+
|
1338
|
+
ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
|
1339
|
+
ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
|
1340
|
+
ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
|
1341
|
+
|
1342
|
+
ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; }
|
1343
|
+
ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; }
|
1344
|
+
ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; }
|
1345
|
+
|
1346
|
+
ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); }
|
1347
|
+
ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); }
|
1348
|
+
ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); }
|
1349
|
+
|
1350
|
+
ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); }
|
1351
|
+
ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); }
|
1352
|
+
ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); }
|
1353
|
+
|
1354
|
+
ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
|
1355
|
+
ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
|
1356
|
+
ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
|
1357
|
+
|
1358
|
+
ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
|
1359
|
+
ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
|
1360
|
+
ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
|
1361
|
+
|
1362
|
+
ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); }
|
1363
|
+
ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); }
|
1364
|
+
ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); }
|
1365
|
+
|
1366
|
+
ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); }
|
1367
|
+
ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); }
|
1368
|
+
ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); }
|
1369
|
+
|
1370
|
+
ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); }
|
1371
|
+
ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); }
|
1372
|
+
ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); }
|
1373
|
+
|
1374
|
+
#if ECB_CPP
|
1375
|
+
|
1376
|
+
inline uint8_t ecb_bswap (uint8_t v) { return v; }
|
1377
|
+
inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); }
|
1378
|
+
inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); }
|
1379
|
+
inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); }
|
1380
|
+
|
1381
|
+
template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
|
1382
|
+
template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
|
1383
|
+
template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; }
|
1384
|
+
template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); }
|
1385
|
+
template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); }
|
1386
|
+
template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; }
|
1387
|
+
template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); }
|
1388
|
+
template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); }
|
1389
|
+
|
1390
|
+
template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
|
1391
|
+
template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
|
1392
|
+
template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; }
|
1393
|
+
template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); }
|
1394
|
+
template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); }
|
1395
|
+
template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); }
|
1396
|
+
template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); }
|
1397
|
+
template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); }
|
1398
|
+
|
1399
|
+
#endif
|
1400
|
+
|
1401
|
+
/*****************************************************************************/
|
1402
|
+
|
1178
1403
|
#if ECB_GCC_VERSION(3,0) || ECB_C99
|
1179
1404
|
#define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
|
1180
1405
|
#else
|
@@ -1208,6 +1433,8 @@ ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_he
|
|
1208
1433
|
#define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
|
1209
1434
|
#endif
|
1210
1435
|
|
1436
|
+
/*****************************************************************************/
|
1437
|
+
|
1211
1438
|
ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
|
1212
1439
|
ecb_function_ ecb_const uint32_t
|
1213
1440
|
ecb_binary16_to_binary32 (uint32_t x)
|
@@ -1325,7 +1552,6 @@ ecb_binary32_to_binary16 (uint32_t x)
|
|
1325
1552
|
|| (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
|
1326
1553
|
|| defined __aarch64__
|
1327
1554
|
#define ECB_STDFP 1
|
1328
|
-
#include <string.h> /* for memcpy */
|
1329
1555
|
#else
|
1330
1556
|
#define ECB_STDFP 0
|
1331
1557
|
#endif
|
@@ -1520,7 +1746,7 @@ ecb_binary32_to_binary16 (uint32_t x)
|
|
1520
1746
|
#if ECB_MEMORY_FENCE_NEEDS_PTHREADS
|
1521
1747
|
/* if your architecture doesn't need memory fences, e.g. because it is
|
1522
1748
|
* single-cpu/core, or if you use libev in a project that doesn't use libev
|
1523
|
-
* from multiple threads, then you can define
|
1749
|
+
* from multiple threads, then you can define ECB_NO_THREADS when compiling
|
1524
1750
|
* libev, in which cases the memory fences become nops.
|
1525
1751
|
* alternatively, you can remove this #error and link against libpthread,
|
1526
1752
|
* which will then provide the memory fences.
|
@@ -1534,18 +1760,80 @@ ecb_binary32_to_binary16 (uint32_t x)
|
|
1534
1760
|
# define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
|
1535
1761
|
#endif
|
1536
1762
|
|
1537
|
-
#define expect_false(cond) ecb_expect_false (cond)
|
1538
|
-
#define expect_true(cond) ecb_expect_true (cond)
|
1539
|
-
#define noinline ecb_noinline
|
1540
|
-
|
1541
1763
|
#define inline_size ecb_inline
|
1542
1764
|
|
1543
1765
|
#if EV_FEATURE_CODE
|
1544
1766
|
# define inline_speed ecb_inline
|
1545
1767
|
#else
|
1546
|
-
# define inline_speed
|
1768
|
+
# define inline_speed ecb_noinline static
|
1547
1769
|
#endif
|
1548
1770
|
|
1771
|
+
/*****************************************************************************/
|
1772
|
+
/* raw syscall wrappers */
|
1773
|
+
|
1774
|
+
#if EV_NEED_SYSCALL
|
1775
|
+
|
1776
|
+
#include <sys/syscall.h>
|
1777
|
+
|
1778
|
+
/*
|
1779
|
+
* define some syscall wrappers for common architectures
|
1780
|
+
* this is mostly for nice looks during debugging, not performance.
|
1781
|
+
* our syscalls return < 0, not == -1, on error. which is good
|
1782
|
+
* enough for linux aio.
|
1783
|
+
* TODO: arm is also common nowadays, maybe even mips and x86
|
1784
|
+
* TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
|
1785
|
+
*/
|
1786
|
+
#if __GNUC__ && __linux && ECB_AMD64 && !EV_FEATURE_CODE
|
1787
|
+
/* the costly errno access probably kills this for size optimisation */
|
1788
|
+
|
1789
|
+
#define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
|
1790
|
+
({ \
|
1791
|
+
long res; \
|
1792
|
+
register unsigned long r6 __asm__ ("r9" ); \
|
1793
|
+
register unsigned long r5 __asm__ ("r8" ); \
|
1794
|
+
register unsigned long r4 __asm__ ("r10"); \
|
1795
|
+
register unsigned long r3 __asm__ ("rdx"); \
|
1796
|
+
register unsigned long r2 __asm__ ("rsi"); \
|
1797
|
+
register unsigned long r1 __asm__ ("rdi"); \
|
1798
|
+
if (narg >= 6) r6 = (unsigned long)(arg6); \
|
1799
|
+
if (narg >= 5) r5 = (unsigned long)(arg5); \
|
1800
|
+
if (narg >= 4) r4 = (unsigned long)(arg4); \
|
1801
|
+
if (narg >= 3) r3 = (unsigned long)(arg3); \
|
1802
|
+
if (narg >= 2) r2 = (unsigned long)(arg2); \
|
1803
|
+
if (narg >= 1) r1 = (unsigned long)(arg1); \
|
1804
|
+
__asm__ __volatile__ ( \
|
1805
|
+
"syscall\n\t" \
|
1806
|
+
: "=a" (res) \
|
1807
|
+
: "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
|
1808
|
+
: "cc", "r11", "cx", "memory"); \
|
1809
|
+
errno = -res; \
|
1810
|
+
res; \
|
1811
|
+
})
|
1812
|
+
|
1813
|
+
#endif
|
1814
|
+
|
1815
|
+
#ifdef ev_syscall
|
1816
|
+
#define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
|
1817
|
+
#define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
|
1818
|
+
#define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
|
1819
|
+
#define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
|
1820
|
+
#define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
|
1821
|
+
#define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
|
1822
|
+
#define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
|
1823
|
+
#else
|
1824
|
+
#define ev_syscall0(nr) syscall (nr)
|
1825
|
+
#define ev_syscall1(nr,arg1) syscall (nr, arg1)
|
1826
|
+
#define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
|
1827
|
+
#define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
|
1828
|
+
#define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
|
1829
|
+
#define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
|
1830
|
+
#define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
|
1831
|
+
#endif
|
1832
|
+
|
1833
|
+
#endif
|
1834
|
+
|
1835
|
+
/*****************************************************************************/
|
1836
|
+
|
1549
1837
|
#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
|
1550
1838
|
|
1551
1839
|
#if EV_MINPRI == EV_MAXPRI
|
@@ -1554,8 +1842,7 @@ ecb_binary32_to_binary16 (uint32_t x)
|
|
1554
1842
|
# define ABSPRI(w) (((W)w)->priority - EV_MINPRI)
|
1555
1843
|
#endif
|
1556
1844
|
|
1557
|
-
#define EMPTY
|
1558
|
-
#define EMPTY2(a,b) /* used to suppress some warnings */
|
1845
|
+
#define EMPTY /* required for microsofts broken pseudo-c compiler */
|
1559
1846
|
|
1560
1847
|
typedef ev_watcher *W;
|
1561
1848
|
typedef ev_watcher_list *WL;
|
@@ -1590,6 +1877,10 @@ static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work?
|
|
1590
1877
|
|
1591
1878
|
/*****************************************************************************/
|
1592
1879
|
|
1880
|
+
#if EV_USE_LINUXAIO
|
1881
|
+
# include <linux/aio_abi.h> /* probably only needed for aio_context_t */
|
1882
|
+
#endif
|
1883
|
+
|
1593
1884
|
/* define a suitable floor function (only used by periodics atm) */
|
1594
1885
|
|
1595
1886
|
#if EV_USE_FLOOR
|
@@ -1600,7 +1891,7 @@ static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work?
|
|
1600
1891
|
#include <float.h>
|
1601
1892
|
|
1602
1893
|
/* a floor() replacement function, should be independent of ev_tstamp type */
|
1603
|
-
|
1894
|
+
ecb_noinline
|
1604
1895
|
static ev_tstamp
|
1605
1896
|
ev_floor (ev_tstamp v)
|
1606
1897
|
{
|
@@ -1611,26 +1902,26 @@ ev_floor (ev_tstamp v)
|
|
1611
1902
|
const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
|
1612
1903
|
#endif
|
1613
1904
|
|
1614
|
-
/*
|
1615
|
-
if (
|
1905
|
+
/* special treatment for negative arguments */
|
1906
|
+
if (ecb_expect_false (v < 0.))
|
1907
|
+
{
|
1908
|
+
ev_tstamp f = -ev_floor (-v);
|
1909
|
+
|
1910
|
+
return f - (f == v ? 0 : 1);
|
1911
|
+
}
|
1912
|
+
|
1913
|
+
/* argument too large for an unsigned long? then reduce it */
|
1914
|
+
if (ecb_expect_false (v >= shift))
|
1616
1915
|
{
|
1617
1916
|
ev_tstamp f;
|
1618
1917
|
|
1619
1918
|
if (v == v - 1.)
|
1620
|
-
return v; /* very large
|
1919
|
+
return v; /* very large numbers are assumed to be integer */
|
1621
1920
|
|
1622
1921
|
f = shift * ev_floor (v * (1. / shift));
|
1623
1922
|
return f + ev_floor (v - f);
|
1624
1923
|
}
|
1625
1924
|
|
1626
|
-
/* special treatment for negative args? */
|
1627
|
-
if (expect_false (v < 0.))
|
1628
|
-
{
|
1629
|
-
ev_tstamp f = -ev_floor (-v);
|
1630
|
-
|
1631
|
-
return f - (f == v ? 0 : 1);
|
1632
|
-
}
|
1633
|
-
|
1634
1925
|
/* fits into an unsigned long */
|
1635
1926
|
return (unsigned long)v;
|
1636
1927
|
}
|
@@ -1643,7 +1934,7 @@ ev_floor (ev_tstamp v)
|
|
1643
1934
|
# include <sys/utsname.h>
|
1644
1935
|
#endif
|
1645
1936
|
|
1646
|
-
|
1937
|
+
ecb_noinline ecb_cold
|
1647
1938
|
static unsigned int
|
1648
1939
|
ev_linux_version (void)
|
1649
1940
|
{
|
@@ -1683,7 +1974,7 @@ ev_linux_version (void)
|
|
1683
1974
|
/*****************************************************************************/
|
1684
1975
|
|
1685
1976
|
#if EV_AVOID_STDIO
|
1686
|
-
|
1977
|
+
ecb_noinline ecb_cold
|
1687
1978
|
static void
|
1688
1979
|
ev_printerr (const char *msg)
|
1689
1980
|
{
|
@@ -1700,7 +1991,7 @@ ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT
|
|
1700
1991
|
syserr_cb = cb;
|
1701
1992
|
}
|
1702
1993
|
|
1703
|
-
|
1994
|
+
ecb_noinline ecb_cold
|
1704
1995
|
static void
|
1705
1996
|
ev_syserr (const char *msg)
|
1706
1997
|
{
|
@@ -1724,7 +2015,7 @@ ev_syserr (const char *msg)
|
|
1724
2015
|
}
|
1725
2016
|
|
1726
2017
|
static void *
|
1727
|
-
ev_realloc_emul (void *ptr,
|
2018
|
+
ev_realloc_emul (void *ptr, size_t size) EV_NOEXCEPT
|
1728
2019
|
{
|
1729
2020
|
/* some systems, notably openbsd and darwin, fail to properly
|
1730
2021
|
* implement realloc (x, 0) (as required by both ansi c-89 and
|
@@ -1740,17 +2031,17 @@ ev_realloc_emul (void *ptr, long size) EV_NOEXCEPT
|
|
1740
2031
|
return 0;
|
1741
2032
|
}
|
1742
2033
|
|
1743
|
-
static void *(*alloc)(void *ptr,
|
2034
|
+
static void *(*alloc)(void *ptr, size_t size) EV_NOEXCEPT = ev_realloc_emul;
|
1744
2035
|
|
1745
2036
|
ecb_cold
|
1746
2037
|
void
|
1747
|
-
ev_set_allocator (void *(*cb)(void *ptr,
|
2038
|
+
ev_set_allocator (void *(*cb)(void *ptr, size_t size) EV_NOEXCEPT) EV_NOEXCEPT
|
1748
2039
|
{
|
1749
2040
|
alloc = cb;
|
1750
2041
|
}
|
1751
2042
|
|
1752
2043
|
inline_speed void *
|
1753
|
-
ev_realloc (void *ptr,
|
2044
|
+
ev_realloc (void *ptr, size_t size)
|
1754
2045
|
{
|
1755
2046
|
ptr = alloc (ptr, size);
|
1756
2047
|
|
@@ -1781,8 +2072,8 @@ typedef struct
|
|
1781
2072
|
WL head;
|
1782
2073
|
unsigned char events; /* the events watched for */
|
1783
2074
|
unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
|
1784
|
-
unsigned char emask; /*
|
1785
|
-
unsigned char
|
2075
|
+
unsigned char emask; /* some backends store the actual kernel mask in here */
|
2076
|
+
unsigned char eflags; /* flags field for use by backends */
|
1786
2077
|
#if EV_USE_EPOLL
|
1787
2078
|
unsigned int egen; /* generation counter to counter epoll bugs */
|
1788
2079
|
#endif
|
@@ -1846,7 +2137,7 @@ typedef struct
|
|
1846
2137
|
|
1847
2138
|
#else
|
1848
2139
|
|
1849
|
-
EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */
|
2140
|
+
EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */
|
1850
2141
|
#define VAR(name,decl) static decl;
|
1851
2142
|
#include "ev_vars.h"
|
1852
2143
|
#undef VAR
|
@@ -1856,8 +2147,8 @@ typedef struct
|
|
1856
2147
|
#endif
|
1857
2148
|
|
1858
2149
|
#if EV_FEATURE_API
|
1859
|
-
# define EV_RELEASE_CB if (
|
1860
|
-
# define EV_ACQUIRE_CB if (
|
2150
|
+
# define EV_RELEASE_CB if (ecb_expect_false (release_cb)) release_cb (EV_A)
|
2151
|
+
# define EV_ACQUIRE_CB if (ecb_expect_false (acquire_cb)) acquire_cb (EV_A)
|
1861
2152
|
# define EV_INVOKE_PENDING invoke_cb (EV_A)
|
1862
2153
|
#else
|
1863
2154
|
# define EV_RELEASE_CB (void)0
|
@@ -1874,17 +2165,19 @@ ev_tstamp
|
|
1874
2165
|
ev_time (void) EV_NOEXCEPT
|
1875
2166
|
{
|
1876
2167
|
#if EV_USE_REALTIME
|
1877
|
-
if (
|
2168
|
+
if (ecb_expect_true (have_realtime))
|
1878
2169
|
{
|
1879
2170
|
struct timespec ts;
|
1880
2171
|
clock_gettime (CLOCK_REALTIME, &ts);
|
1881
|
-
return
|
2172
|
+
return EV_TS_GET (ts);
|
1882
2173
|
}
|
1883
2174
|
#endif
|
1884
2175
|
|
1885
|
-
|
1886
|
-
|
1887
|
-
|
2176
|
+
{
|
2177
|
+
struct timeval tv;
|
2178
|
+
gettimeofday (&tv, 0);
|
2179
|
+
return EV_TV_GET (tv);
|
2180
|
+
}
|
1888
2181
|
}
|
1889
2182
|
#endif
|
1890
2183
|
|
@@ -1892,11 +2185,11 @@ inline_size ev_tstamp
|
|
1892
2185
|
get_clock (void)
|
1893
2186
|
{
|
1894
2187
|
#if EV_USE_MONOTONIC
|
1895
|
-
if (
|
2188
|
+
if (ecb_expect_true (have_monotonic))
|
1896
2189
|
{
|
1897
2190
|
struct timespec ts;
|
1898
2191
|
clock_gettime (CLOCK_MONOTONIC, &ts);
|
1899
|
-
return
|
2192
|
+
return EV_TS_GET (ts);
|
1900
2193
|
}
|
1901
2194
|
#endif
|
1902
2195
|
|
@@ -1914,7 +2207,7 @@ ev_now (EV_P) EV_NOEXCEPT
|
|
1914
2207
|
void
|
1915
2208
|
ev_sleep (ev_tstamp delay) EV_NOEXCEPT
|
1916
2209
|
{
|
1917
|
-
if (delay > 0.)
|
2210
|
+
if (delay > EV_TS_CONST (0.))
|
1918
2211
|
{
|
1919
2212
|
#if EV_USE_NANOSLEEP
|
1920
2213
|
struct timespec ts;
|
@@ -1924,7 +2217,7 @@ ev_sleep (ev_tstamp delay) EV_NOEXCEPT
|
|
1924
2217
|
#elif defined _WIN32
|
1925
2218
|
/* maybe this should round up, as ms is very low resolution */
|
1926
2219
|
/* compared to select (µs) or nanosleep (ns) */
|
1927
|
-
Sleep ((unsigned long)(delay
|
2220
|
+
Sleep ((unsigned long)(EV_TS_TO_MSEC (delay)));
|
1928
2221
|
#else
|
1929
2222
|
struct timeval tv;
|
1930
2223
|
|
@@ -1964,7 +2257,7 @@ array_nextsize (int elem, int cur, int cnt)
|
|
1964
2257
|
return ncur;
|
1965
2258
|
}
|
1966
2259
|
|
1967
|
-
|
2260
|
+
ecb_noinline ecb_cold
|
1968
2261
|
static void *
|
1969
2262
|
array_realloc (int elem, void *base, int *cur, int cnt)
|
1970
2263
|
{
|
@@ -1972,16 +2265,18 @@ array_realloc (int elem, void *base, int *cur, int cnt)
|
|
1972
2265
|
return ev_realloc (base, elem * *cur);
|
1973
2266
|
}
|
1974
2267
|
|
1975
|
-
#define
|
1976
|
-
|
2268
|
+
#define array_needsize_noinit(base,offset,count)
|
2269
|
+
|
2270
|
+
#define array_needsize_zerofill(base,offset,count) \
|
2271
|
+
memset ((void *)(base + offset), 0, sizeof (*(base)) * (count))
|
1977
2272
|
|
1978
2273
|
#define array_needsize(type,base,cur,cnt,init) \
|
1979
|
-
if (
|
2274
|
+
if (ecb_expect_false ((cnt) > (cur))) \
|
1980
2275
|
{ \
|
1981
2276
|
ecb_unused int ocur_ = (cur); \
|
1982
2277
|
(base) = (type *)array_realloc \
|
1983
2278
|
(sizeof (type), (base), &(cur), (cnt)); \
|
1984
|
-
init ((base)
|
2279
|
+
init ((base), ocur_, ((cur) - ocur_)); \
|
1985
2280
|
}
|
1986
2281
|
|
1987
2282
|
#if 0
|
@@ -2000,25 +2295,25 @@ array_realloc (int elem, void *base, int *cur, int cnt)
|
|
2000
2295
|
/*****************************************************************************/
|
2001
2296
|
|
2002
2297
|
/* dummy callback for pending events */
|
2003
|
-
|
2298
|
+
ecb_noinline
|
2004
2299
|
static void
|
2005
2300
|
pendingcb (EV_P_ ev_prepare *w, int revents)
|
2006
2301
|
{
|
2007
2302
|
}
|
2008
2303
|
|
2009
|
-
|
2304
|
+
ecb_noinline
|
2010
2305
|
void
|
2011
2306
|
ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT
|
2012
2307
|
{
|
2013
2308
|
W w_ = (W)w;
|
2014
2309
|
int pri = ABSPRI (w_);
|
2015
2310
|
|
2016
|
-
if (
|
2311
|
+
if (ecb_expect_false (w_->pending))
|
2017
2312
|
pendings [pri][w_->pending - 1].events |= revents;
|
2018
2313
|
else
|
2019
2314
|
{
|
2020
2315
|
w_->pending = ++pendingcnt [pri];
|
2021
|
-
array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending,
|
2316
|
+
array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, array_needsize_noinit);
|
2022
2317
|
pendings [pri][w_->pending - 1].w = w_;
|
2023
2318
|
pendings [pri][w_->pending - 1].events = revents;
|
2024
2319
|
}
|
@@ -2029,7 +2324,7 @@ ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT
|
|
2029
2324
|
inline_speed void
|
2030
2325
|
feed_reverse (EV_P_ W w)
|
2031
2326
|
{
|
2032
|
-
array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1,
|
2327
|
+
array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, array_needsize_noinit);
|
2033
2328
|
rfeeds [rfeedcnt++] = w;
|
2034
2329
|
}
|
2035
2330
|
|
@@ -2074,7 +2369,7 @@ fd_event (EV_P_ int fd, int revents)
|
|
2074
2369
|
{
|
2075
2370
|
ANFD *anfd = anfds + fd;
|
2076
2371
|
|
2077
|
-
if (
|
2372
|
+
if (ecb_expect_true (!anfd->reify))
|
2078
2373
|
fd_event_nocheck (EV_A_ fd, revents);
|
2079
2374
|
}
|
2080
2375
|
|
@@ -2092,8 +2387,20 @@ fd_reify (EV_P)
|
|
2092
2387
|
{
|
2093
2388
|
int i;
|
2094
2389
|
|
2390
|
+
/* most backends do not modify the fdchanges list in backend_modfiy.
|
2391
|
+
* except io_uring, which has fixed-size buffers which might force us
|
2392
|
+
* to handle events in backend_modify, causing fdchanges to be amended,
|
2393
|
+
* which could result in an endless loop.
|
2394
|
+
* to avoid this, we do not dynamically handle fds that were added
|
2395
|
+
* during fd_reify. that means that for those backends, fdchangecnt
|
2396
|
+
* might be non-zero during poll, which must cause them to not block.
|
2397
|
+
* to not put too much of a burden on other backends, this detail
|
2398
|
+
* needs to be handled in the backend.
|
2399
|
+
*/
|
2400
|
+
int changecnt = fdchangecnt;
|
2401
|
+
|
2095
2402
|
#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
|
2096
|
-
for (i = 0; i <
|
2403
|
+
for (i = 0; i < changecnt; ++i)
|
2097
2404
|
{
|
2098
2405
|
int fd = fdchanges [i];
|
2099
2406
|
ANFD *anfd = anfds + fd;
|
@@ -2117,7 +2424,7 @@ fd_reify (EV_P)
|
|
2117
2424
|
}
|
2118
2425
|
#endif
|
2119
2426
|
|
2120
|
-
for (i = 0; i <
|
2427
|
+
for (i = 0; i < changecnt; ++i)
|
2121
2428
|
{
|
2122
2429
|
int fd = fdchanges [i];
|
2123
2430
|
ANFD *anfd = anfds + fd;
|
@@ -2126,9 +2433,9 @@ fd_reify (EV_P)
|
|
2126
2433
|
unsigned char o_events = anfd->events;
|
2127
2434
|
unsigned char o_reify = anfd->reify;
|
2128
2435
|
|
2129
|
-
anfd->reify
|
2436
|
+
anfd->reify = 0;
|
2130
2437
|
|
2131
|
-
/*if (
|
2438
|
+
/*if (ecb_expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
|
2132
2439
|
{
|
2133
2440
|
anfd->events = 0;
|
2134
2441
|
|
@@ -2143,7 +2450,14 @@ fd_reify (EV_P)
|
|
2143
2450
|
backend_modify (EV_A_ fd, o_events, anfd->events);
|
2144
2451
|
}
|
2145
2452
|
|
2146
|
-
fdchangecnt
|
2453
|
+
/* normally, fdchangecnt hasn't changed. if it has, then new fds have been added.
|
2454
|
+
* this is a rare case (see beginning comment in this function), so we copy them to the
|
2455
|
+
* front and hope the backend handles this case.
|
2456
|
+
*/
|
2457
|
+
if (ecb_expect_false (fdchangecnt != changecnt))
|
2458
|
+
memmove (fdchanges, fdchanges + changecnt, (fdchangecnt - changecnt) * sizeof (*fdchanges));
|
2459
|
+
|
2460
|
+
fdchangecnt -= changecnt;
|
2147
2461
|
}
|
2148
2462
|
|
2149
2463
|
/* something about the given fd changed */
|
@@ -2152,12 +2466,12 @@ void
|
|
2152
2466
|
fd_change (EV_P_ int fd, int flags)
|
2153
2467
|
{
|
2154
2468
|
unsigned char reify = anfds [fd].reify;
|
2155
|
-
anfds [fd].reify
|
2469
|
+
anfds [fd].reify = reify | flags;
|
2156
2470
|
|
2157
|
-
if (
|
2471
|
+
if (ecb_expect_true (!reify))
|
2158
2472
|
{
|
2159
2473
|
++fdchangecnt;
|
2160
|
-
array_needsize (int, fdchanges, fdchangemax, fdchangecnt,
|
2474
|
+
array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit);
|
2161
2475
|
fdchanges [fdchangecnt - 1] = fd;
|
2162
2476
|
}
|
2163
2477
|
}
|
@@ -2187,7 +2501,7 @@ fd_valid (int fd)
|
|
2187
2501
|
}
|
2188
2502
|
|
2189
2503
|
/* called on EBADF to verify fds */
|
2190
|
-
|
2504
|
+
ecb_noinline ecb_cold
|
2191
2505
|
static void
|
2192
2506
|
fd_ebadf (EV_P)
|
2193
2507
|
{
|
@@ -2200,7 +2514,7 @@ fd_ebadf (EV_P)
|
|
2200
2514
|
}
|
2201
2515
|
|
2202
2516
|
/* called on ENOMEM in select/poll to kill some fds and retry */
|
2203
|
-
|
2517
|
+
ecb_noinline ecb_cold
|
2204
2518
|
static void
|
2205
2519
|
fd_enomem (EV_P)
|
2206
2520
|
{
|
@@ -2215,7 +2529,7 @@ fd_enomem (EV_P)
|
|
2215
2529
|
}
|
2216
2530
|
|
2217
2531
|
/* usually called after fork if backend needs to re-arm all fds from scratch */
|
2218
|
-
|
2532
|
+
ecb_noinline
|
2219
2533
|
static void
|
2220
2534
|
fd_rearm_all (EV_P)
|
2221
2535
|
{
|
@@ -2279,19 +2593,19 @@ downheap (ANHE *heap, int N, int k)
|
|
2279
2593
|
ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
|
2280
2594
|
|
2281
2595
|
/* find minimum child */
|
2282
|
-
if (
|
2596
|
+
if (ecb_expect_true (pos + DHEAP - 1 < E))
|
2283
2597
|
{
|
2284
2598
|
/* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
|
2285
|
-
if ( ANHE_at (pos [1])
|
2286
|
-
if ( ANHE_at (pos [2])
|
2287
|
-
if ( ANHE_at (pos [3])
|
2599
|
+
if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
|
2600
|
+
if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
|
2601
|
+
if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
|
2288
2602
|
}
|
2289
2603
|
else if (pos < E)
|
2290
2604
|
{
|
2291
2605
|
/* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
|
2292
|
-
if (pos + 1 < E && ANHE_at (pos [1])
|
2293
|
-
if (pos + 2 < E && ANHE_at (pos [2])
|
2294
|
-
if (pos + 3 < E && ANHE_at (pos [3])
|
2606
|
+
if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
|
2607
|
+
if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
|
2608
|
+
if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
|
2295
2609
|
}
|
2296
2610
|
else
|
2297
2611
|
break;
|
@@ -2309,7 +2623,7 @@ downheap (ANHE *heap, int N, int k)
|
|
2309
2623
|
ev_active (ANHE_w (he)) = k;
|
2310
2624
|
}
|
2311
2625
|
|
2312
|
-
#else /* 4HEAP */
|
2626
|
+
#else /* not 4HEAP */
|
2313
2627
|
|
2314
2628
|
#define HEAP0 1
|
2315
2629
|
#define HPARENT(k) ((k) >> 1)
|
@@ -2336,7 +2650,7 @@ downheap (ANHE *heap, int N, int k)
|
|
2336
2650
|
|
2337
2651
|
heap [k] = heap [c];
|
2338
2652
|
ev_active (ANHE_w (heap [k])) = k;
|
2339
|
-
|
2653
|
+
|
2340
2654
|
k = c;
|
2341
2655
|
}
|
2342
2656
|
|
@@ -2391,7 +2705,7 @@ reheap (ANHE *heap, int N)
|
|
2391
2705
|
|
2392
2706
|
/*****************************************************************************/
|
2393
2707
|
|
2394
|
-
/* associate signal watchers to a signal
|
2708
|
+
/* associate signal watchers to a signal */
|
2395
2709
|
typedef struct
|
2396
2710
|
{
|
2397
2711
|
EV_ATOMIC_T pending;
|
@@ -2407,7 +2721,7 @@ static ANSIG signals [EV_NSIG - 1];
|
|
2407
2721
|
|
2408
2722
|
#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
|
2409
2723
|
|
2410
|
-
|
2724
|
+
ecb_noinline ecb_cold
|
2411
2725
|
static void
|
2412
2726
|
evpipe_init (EV_P)
|
2413
2727
|
{
|
@@ -2458,7 +2772,7 @@ evpipe_write (EV_P_ EV_ATOMIC_T *flag)
|
|
2458
2772
|
{
|
2459
2773
|
ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */
|
2460
2774
|
|
2461
|
-
if (
|
2775
|
+
if (ecb_expect_true (*flag))
|
2462
2776
|
return;
|
2463
2777
|
|
2464
2778
|
*flag = 1;
|
@@ -2545,7 +2859,7 @@ pipecb (EV_P_ ev_io *iow, int revents)
|
|
2545
2859
|
ECB_MEMORY_FENCE;
|
2546
2860
|
|
2547
2861
|
for (i = EV_NSIG - 1; i--; )
|
2548
|
-
if (
|
2862
|
+
if (ecb_expect_false (signals [i].pending))
|
2549
2863
|
ev_feed_signal_event (EV_A_ i + 1);
|
2550
2864
|
}
|
2551
2865
|
#endif
|
@@ -2596,13 +2910,13 @@ ev_sighandler (int signum)
|
|
2596
2910
|
ev_feed_signal (signum);
|
2597
2911
|
}
|
2598
2912
|
|
2599
|
-
|
2913
|
+
ecb_noinline
|
2600
2914
|
void
|
2601
2915
|
ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
|
2602
2916
|
{
|
2603
2917
|
WL w;
|
2604
2918
|
|
2605
|
-
if (
|
2919
|
+
if (ecb_expect_false (signum <= 0 || signum >= EV_NSIG))
|
2606
2920
|
return;
|
2607
2921
|
|
2608
2922
|
--signum;
|
@@ -2611,7 +2925,7 @@ ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
|
|
2611
2925
|
/* it is permissible to try to feed a signal to the wrong loop */
|
2612
2926
|
/* or, likely more useful, feeding a signal nobody is waiting for */
|
2613
2927
|
|
2614
|
-
if (
|
2928
|
+
if (ecb_expect_false (signals [signum].loop != EV_A))
|
2615
2929
|
return;
|
2616
2930
|
#endif
|
2617
2931
|
|
@@ -2705,6 +3019,57 @@ childcb (EV_P_ ev_signal *sw, int revents)
|
|
2705
3019
|
|
2706
3020
|
/*****************************************************************************/
|
2707
3021
|
|
3022
|
+
#if EV_USE_TIMERFD
|
3023
|
+
|
3024
|
+
static void periodics_reschedule (EV_P);
|
3025
|
+
|
3026
|
+
static void
|
3027
|
+
timerfdcb (EV_P_ ev_io *iow, int revents)
|
3028
|
+
{
|
3029
|
+
struct itimerspec its = { 0 };
|
3030
|
+
|
3031
|
+
its.it_value.tv_sec = ev_rt_now + (int)MAX_BLOCKTIME2;
|
3032
|
+
timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0);
|
3033
|
+
|
3034
|
+
ev_rt_now = ev_time ();
|
3035
|
+
/* periodics_reschedule only needs ev_rt_now */
|
3036
|
+
/* but maybe in the future we want the full treatment. */
|
3037
|
+
/*
|
3038
|
+
now_floor = EV_TS_CONST (0.);
|
3039
|
+
time_update (EV_A_ EV_TSTAMP_HUGE);
|
3040
|
+
*/
|
3041
|
+
#if EV_PERIODIC_ENABLE
|
3042
|
+
periodics_reschedule (EV_A);
|
3043
|
+
#endif
|
3044
|
+
}
|
3045
|
+
|
3046
|
+
ecb_noinline ecb_cold
|
3047
|
+
static void
|
3048
|
+
evtimerfd_init (EV_P)
|
3049
|
+
{
|
3050
|
+
if (!ev_is_active (&timerfd_w))
|
3051
|
+
{
|
3052
|
+
timerfd = timerfd_create (CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC);
|
3053
|
+
|
3054
|
+
if (timerfd >= 0)
|
3055
|
+
{
|
3056
|
+
fd_intern (timerfd); /* just to be sure */
|
3057
|
+
|
3058
|
+
ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ);
|
3059
|
+
ev_set_priority (&timerfd_w, EV_MINPRI);
|
3060
|
+
ev_io_start (EV_A_ &timerfd_w);
|
3061
|
+
ev_unref (EV_A); /* watcher should not keep loop alive */
|
3062
|
+
|
3063
|
+
/* (re-) arm timer */
|
3064
|
+
timerfdcb (EV_A_ 0, 0);
|
3065
|
+
}
|
3066
|
+
}
|
3067
|
+
}
|
3068
|
+
|
3069
|
+
#endif
|
3070
|
+
|
3071
|
+
/*****************************************************************************/
|
3072
|
+
|
2708
3073
|
#if EV_USE_IOCP
|
2709
3074
|
# include "ev_iocp.c"
|
2710
3075
|
#endif
|
@@ -2717,6 +3082,12 @@ childcb (EV_P_ ev_signal *sw, int revents)
|
|
2717
3082
|
#if EV_USE_EPOLL
|
2718
3083
|
# include "ev_epoll.c"
|
2719
3084
|
#endif
|
3085
|
+
#if EV_USE_LINUXAIO
|
3086
|
+
# include "ev_linuxaio.c"
|
3087
|
+
#endif
|
3088
|
+
#if EV_USE_IOURING
|
3089
|
+
# include "ev_iouring.c"
|
3090
|
+
#endif
|
2720
3091
|
#if EV_USE_POLL
|
2721
3092
|
# include "ev_poll.c"
|
2722
3093
|
#endif
|
@@ -2754,12 +3125,14 @@ ev_supported_backends (void) EV_NOEXCEPT
|
|
2754
3125
|
{
|
2755
3126
|
unsigned int flags = 0;
|
2756
3127
|
|
2757
|
-
if (EV_USE_PORT
|
2758
|
-
if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE;
|
2759
|
-
if (EV_USE_EPOLL
|
2760
|
-
if (
|
2761
|
-
if (
|
2762
|
-
|
3128
|
+
if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
|
3129
|
+
if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
|
3130
|
+
if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
|
3131
|
+
if (EV_USE_LINUXAIO && ev_linux_version () >= 0x041300) flags |= EVBACKEND_LINUXAIO; /* 4.19+ */
|
3132
|
+
if (EV_USE_IOURING && ev_linux_version () >= 0x050601 ) flags |= EVBACKEND_IOURING; /* 5.6.1+ */
|
3133
|
+
if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
|
3134
|
+
if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
|
3135
|
+
|
2763
3136
|
return flags;
|
2764
3137
|
}
|
2765
3138
|
|
@@ -2769,21 +3142,31 @@ ev_recommended_backends (void) EV_NOEXCEPT
|
|
2769
3142
|
{
|
2770
3143
|
unsigned int flags = ev_supported_backends ();
|
2771
3144
|
|
2772
|
-
|
2773
|
-
|
2774
|
-
#elif defined(__NetBSD__)
|
2775
|
-
/* kqueue is borked on everything but netbsd apparently */
|
2776
|
-
/* it usually doesn't work correctly on anything but sockets and pipes */
|
2777
|
-
#else
|
3145
|
+
/* apple has a poor track record but post 10.12.2 it seems to work sufficiently well */
|
3146
|
+
#if defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_14)
|
2778
3147
|
/* only select works correctly on that "unix-certified" platform */
|
2779
3148
|
flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */
|
2780
3149
|
flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */
|
2781
3150
|
#endif
|
2782
3151
|
|
3152
|
+
#if !defined(__NetBSD__) && !defined(__APPLE__)
|
3153
|
+
/* kqueue is borked on everything but netbsd and osx >= 10.12.2 apparently */
|
3154
|
+
/* it usually doesn't work correctly on anything but sockets and pipes */
|
3155
|
+
flags &= ~EVBACKEND_KQUEUE;
|
3156
|
+
#endif
|
3157
|
+
|
2783
3158
|
#ifdef __FreeBSD__
|
2784
3159
|
flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */
|
2785
3160
|
#endif
|
2786
3161
|
|
3162
|
+
#ifdef __linux__
|
3163
|
+
/* NOTE: linuxaio is very experimental, never recommend */
|
3164
|
+
flags &= ~EVBACKEND_LINUXAIO;
|
3165
|
+
|
3166
|
+
/* NOTE: io_uring is super experimental, never recommend */
|
3167
|
+
flags &= ~EVBACKEND_IOURING;
|
3168
|
+
#endif
|
3169
|
+
|
2787
3170
|
return flags;
|
2788
3171
|
}
|
2789
3172
|
|
@@ -2791,12 +3174,14 @@ ecb_cold
|
|
2791
3174
|
unsigned int
|
2792
3175
|
ev_embeddable_backends (void) EV_NOEXCEPT
|
2793
3176
|
{
|
2794
|
-
int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
|
3177
|
+
int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT | EVBACKEND_IOURING;
|
2795
3178
|
|
2796
3179
|
/* epoll embeddability broken on all linux versions up to at least 2.6.23 */
|
2797
3180
|
if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
|
2798
3181
|
flags &= ~EVBACKEND_EPOLL;
|
2799
3182
|
|
3183
|
+
/* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
|
3184
|
+
|
2800
3185
|
return flags;
|
2801
3186
|
}
|
2802
3187
|
|
@@ -2858,7 +3243,7 @@ ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)
|
|
2858
3243
|
#endif
|
2859
3244
|
|
2860
3245
|
/* initialise a loop structure, must be zero-initialised */
|
2861
|
-
|
3246
|
+
ecb_noinline ecb_cold
|
2862
3247
|
static void
|
2863
3248
|
loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
|
2864
3249
|
{
|
@@ -2923,27 +3308,36 @@ loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
|
|
2923
3308
|
#if EV_USE_SIGNALFD
|
2924
3309
|
sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
|
2925
3310
|
#endif
|
3311
|
+
#if EV_USE_TIMERFD
|
3312
|
+
timerfd = flags & EVFLAG_NOTIMERFD ? -1 : -2;
|
3313
|
+
#endif
|
2926
3314
|
|
2927
3315
|
if (!(flags & EVBACKEND_MASK))
|
2928
3316
|
flags |= ev_recommended_backends ();
|
2929
3317
|
|
2930
3318
|
#if EV_USE_IOCP
|
2931
|
-
if (!backend && (flags & EVBACKEND_IOCP
|
3319
|
+
if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags);
|
2932
3320
|
#endif
|
2933
3321
|
#if EV_USE_PORT
|
2934
|
-
if (!backend && (flags & EVBACKEND_PORT
|
3322
|
+
if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
|
2935
3323
|
#endif
|
2936
3324
|
#if EV_USE_KQUEUE
|
2937
|
-
if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init
|
3325
|
+
if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags);
|
3326
|
+
#endif
|
3327
|
+
#if EV_USE_IOURING
|
3328
|
+
if (!backend && (flags & EVBACKEND_IOURING )) backend = iouring_init (EV_A_ flags);
|
3329
|
+
#endif
|
3330
|
+
#if EV_USE_LINUXAIO
|
3331
|
+
if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags);
|
2938
3332
|
#endif
|
2939
3333
|
#if EV_USE_EPOLL
|
2940
|
-
if (!backend && (flags & EVBACKEND_EPOLL
|
3334
|
+
if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
|
2941
3335
|
#endif
|
2942
3336
|
#if EV_USE_POLL
|
2943
|
-
if (!backend && (flags & EVBACKEND_POLL
|
3337
|
+
if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
|
2944
3338
|
#endif
|
2945
3339
|
#if EV_USE_SELECT
|
2946
|
-
if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init
|
3340
|
+
if (!backend && (flags & EVBACKEND_SELECT )) backend = select_init (EV_A_ flags);
|
2947
3341
|
#endif
|
2948
3342
|
|
2949
3343
|
ev_prepare_init (&pending_w, pendingcb);
|
@@ -2970,7 +3364,7 @@ ev_loop_destroy (EV_P)
|
|
2970
3364
|
|
2971
3365
|
#if EV_CLEANUP_ENABLE
|
2972
3366
|
/* queue cleanup watchers (and execute them) */
|
2973
|
-
if (
|
3367
|
+
if (ecb_expect_false (cleanupcnt))
|
2974
3368
|
{
|
2975
3369
|
queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
|
2976
3370
|
EV_INVOKE_PENDING;
|
@@ -2999,6 +3393,11 @@ ev_loop_destroy (EV_P)
|
|
2999
3393
|
close (sigfd);
|
3000
3394
|
#endif
|
3001
3395
|
|
3396
|
+
#if EV_USE_TIMERFD
|
3397
|
+
if (ev_is_active (&timerfd_w))
|
3398
|
+
close (timerfd);
|
3399
|
+
#endif
|
3400
|
+
|
3002
3401
|
#if EV_USE_INOTIFY
|
3003
3402
|
if (fs_fd >= 0)
|
3004
3403
|
close (fs_fd);
|
@@ -3008,22 +3407,28 @@ ev_loop_destroy (EV_P)
|
|
3008
3407
|
close (backend_fd);
|
3009
3408
|
|
3010
3409
|
#if EV_USE_IOCP
|
3011
|
-
if (backend == EVBACKEND_IOCP
|
3410
|
+
if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A);
|
3012
3411
|
#endif
|
3013
3412
|
#if EV_USE_PORT
|
3014
|
-
if (backend == EVBACKEND_PORT
|
3413
|
+
if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
|
3015
3414
|
#endif
|
3016
3415
|
#if EV_USE_KQUEUE
|
3017
|
-
if (backend == EVBACKEND_KQUEUE) kqueue_destroy
|
3416
|
+
if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A);
|
3417
|
+
#endif
|
3418
|
+
#if EV_USE_IOURING
|
3419
|
+
if (backend == EVBACKEND_IOURING ) iouring_destroy (EV_A);
|
3420
|
+
#endif
|
3421
|
+
#if EV_USE_LINUXAIO
|
3422
|
+
if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A);
|
3018
3423
|
#endif
|
3019
3424
|
#if EV_USE_EPOLL
|
3020
|
-
if (backend == EVBACKEND_EPOLL
|
3425
|
+
if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
|
3021
3426
|
#endif
|
3022
3427
|
#if EV_USE_POLL
|
3023
|
-
if (backend == EVBACKEND_POLL
|
3428
|
+
if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
|
3024
3429
|
#endif
|
3025
3430
|
#if EV_USE_SELECT
|
3026
|
-
if (backend == EVBACKEND_SELECT) select_destroy
|
3431
|
+
if (backend == EVBACKEND_SELECT ) select_destroy (EV_A);
|
3027
3432
|
#endif
|
3028
3433
|
|
3029
3434
|
for (i = NUMPRI; i--; )
|
@@ -3075,34 +3480,62 @@ inline_size void
|
|
3075
3480
|
loop_fork (EV_P)
|
3076
3481
|
{
|
3077
3482
|
#if EV_USE_PORT
|
3078
|
-
if (backend == EVBACKEND_PORT
|
3483
|
+
if (backend == EVBACKEND_PORT ) port_fork (EV_A);
|
3079
3484
|
#endif
|
3080
3485
|
#if EV_USE_KQUEUE
|
3081
|
-
if (backend == EVBACKEND_KQUEUE) kqueue_fork
|
3486
|
+
if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A);
|
3487
|
+
#endif
|
3488
|
+
#if EV_USE_IOURING
|
3489
|
+
if (backend == EVBACKEND_IOURING ) iouring_fork (EV_A);
|
3490
|
+
#endif
|
3491
|
+
#if EV_USE_LINUXAIO
|
3492
|
+
if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A);
|
3082
3493
|
#endif
|
3083
3494
|
#if EV_USE_EPOLL
|
3084
|
-
if (backend == EVBACKEND_EPOLL
|
3495
|
+
if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
|
3085
3496
|
#endif
|
3086
3497
|
#if EV_USE_INOTIFY
|
3087
3498
|
infy_fork (EV_A);
|
3088
3499
|
#endif
|
3089
3500
|
|
3090
|
-
|
3091
|
-
if (ev_is_active (&pipe_w) && postfork != 2)
|
3501
|
+
if (postfork != 2)
|
3092
3502
|
{
|
3093
|
-
|
3503
|
+
#if EV_USE_SIGNALFD
|
3504
|
+
/* surprisingly, nothing needs to be done for signalfd, accoridng to docs, it does the right thing on fork */
|
3505
|
+
#endif
|
3094
3506
|
|
3095
|
-
|
3096
|
-
|
3507
|
+
#if EV_USE_TIMERFD
|
3508
|
+
if (ev_is_active (&timerfd_w))
|
3509
|
+
{
|
3510
|
+
ev_ref (EV_A);
|
3511
|
+
ev_io_stop (EV_A_ &timerfd_w);
|
3097
3512
|
|
3098
|
-
|
3099
|
-
|
3513
|
+
close (timerfd);
|
3514
|
+
timerfd = -2;
|
3100
3515
|
|
3101
|
-
|
3102
|
-
|
3103
|
-
|
3516
|
+
evtimerfd_init (EV_A);
|
3517
|
+
/* reschedule periodics, in case we missed something */
|
3518
|
+
ev_feed_event (EV_A_ &timerfd_w, EV_CUSTOM);
|
3519
|
+
}
|
3520
|
+
#endif
|
3521
|
+
|
3522
|
+
#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
|
3523
|
+
if (ev_is_active (&pipe_w))
|
3524
|
+
{
|
3525
|
+
/* pipe_write_wanted must be false now, so modifying fd vars should be safe */
|
3526
|
+
|
3527
|
+
ev_ref (EV_A);
|
3528
|
+
ev_io_stop (EV_A_ &pipe_w);
|
3529
|
+
|
3530
|
+
if (evpipe [0] >= 0)
|
3531
|
+
EV_WIN32_CLOSE_FD (evpipe [0]);
|
3532
|
+
|
3533
|
+
evpipe_init (EV_A);
|
3534
|
+
/* iterate over everything, in case we missed something before */
|
3535
|
+
ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
|
3536
|
+
}
|
3537
|
+
#endif
|
3104
3538
|
}
|
3105
|
-
#endif
|
3106
3539
|
|
3107
3540
|
postfork = 0;
|
3108
3541
|
}
|
@@ -3128,7 +3561,7 @@ ev_loop_new (unsigned int flags) EV_NOEXCEPT
|
|
3128
3561
|
#endif /* multiplicity */
|
3129
3562
|
|
3130
3563
|
#if EV_VERIFY
|
3131
|
-
|
3564
|
+
ecb_noinline ecb_cold
|
3132
3565
|
static void
|
3133
3566
|
verify_watcher (EV_P_ W w)
|
3134
3567
|
{
|
@@ -3138,7 +3571,7 @@ verify_watcher (EV_P_ W w)
|
|
3138
3571
|
assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
|
3139
3572
|
}
|
3140
3573
|
|
3141
|
-
|
3574
|
+
ecb_noinline ecb_cold
|
3142
3575
|
static void
|
3143
3576
|
verify_heap (EV_P_ ANHE *heap, int N)
|
3144
3577
|
{
|
@@ -3154,7 +3587,7 @@ verify_heap (EV_P_ ANHE *heap, int N)
|
|
3154
3587
|
}
|
3155
3588
|
}
|
3156
3589
|
|
3157
|
-
|
3590
|
+
ecb_noinline ecb_cold
|
3158
3591
|
static void
|
3159
3592
|
array_verify (EV_P_ W *ws, int cnt)
|
3160
3593
|
{
|
@@ -3313,7 +3746,7 @@ ev_pending_count (EV_P) EV_NOEXCEPT
|
|
3313
3746
|
return count;
|
3314
3747
|
}
|
3315
3748
|
|
3316
|
-
|
3749
|
+
ecb_noinline
|
3317
3750
|
void
|
3318
3751
|
ev_invoke_pending (EV_P)
|
3319
3752
|
{
|
@@ -3342,7 +3775,7 @@ ev_invoke_pending (EV_P)
|
|
3342
3775
|
inline_size void
|
3343
3776
|
idle_reify (EV_P)
|
3344
3777
|
{
|
3345
|
-
if (
|
3778
|
+
if (ecb_expect_false (idleall))
|
3346
3779
|
{
|
3347
3780
|
int pri;
|
3348
3781
|
|
@@ -3382,7 +3815,7 @@ timers_reify (EV_P)
|
|
3382
3815
|
if (ev_at (w) < mn_now)
|
3383
3816
|
ev_at (w) = mn_now;
|
3384
3817
|
|
3385
|
-
assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.));
|
3818
|
+
assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.)));
|
3386
3819
|
|
3387
3820
|
ANHE_at_cache (timers [HEAP0]);
|
3388
3821
|
downheap (timers, timercnt, HEAP0);
|
@@ -3401,7 +3834,7 @@ timers_reify (EV_P)
|
|
3401
3834
|
|
3402
3835
|
#if EV_PERIODIC_ENABLE
|
3403
3836
|
|
3404
|
-
|
3837
|
+
ecb_noinline
|
3405
3838
|
static void
|
3406
3839
|
periodic_recalc (EV_P_ ev_periodic *w)
|
3407
3840
|
{
|
@@ -3414,7 +3847,7 @@ periodic_recalc (EV_P_ ev_periodic *w)
|
|
3414
3847
|
ev_tstamp nat = at + w->interval;
|
3415
3848
|
|
3416
3849
|
/* when resolution fails us, we use ev_rt_now */
|
3417
|
-
if (
|
3850
|
+
if (ecb_expect_false (nat == at))
|
3418
3851
|
{
|
3419
3852
|
at = ev_rt_now;
|
3420
3853
|
break;
|
@@ -3470,7 +3903,7 @@ periodics_reify (EV_P)
|
|
3470
3903
|
|
3471
3904
|
/* simply recalculate all periodics */
|
3472
3905
|
/* TODO: maybe ensure that at least one event happens when jumping forward? */
|
3473
|
-
|
3906
|
+
ecb_noinline ecb_cold
|
3474
3907
|
static void
|
3475
3908
|
periodics_reschedule (EV_P)
|
3476
3909
|
{
|
@@ -3494,7 +3927,7 @@ periodics_reschedule (EV_P)
|
|
3494
3927
|
#endif
|
3495
3928
|
|
3496
3929
|
/* adjust all timers by a given offset */
|
3497
|
-
|
3930
|
+
ecb_noinline ecb_cold
|
3498
3931
|
static void
|
3499
3932
|
timers_reschedule (EV_P_ ev_tstamp adjust)
|
3500
3933
|
{
|
@@ -3514,7 +3947,7 @@ inline_speed void
|
|
3514
3947
|
time_update (EV_P_ ev_tstamp max_block)
|
3515
3948
|
{
|
3516
3949
|
#if EV_USE_MONOTONIC
|
3517
|
-
if (
|
3950
|
+
if (ecb_expect_true (have_monotonic))
|
3518
3951
|
{
|
3519
3952
|
int i;
|
3520
3953
|
ev_tstamp odiff = rtmn_diff;
|
@@ -3523,7 +3956,7 @@ time_update (EV_P_ ev_tstamp max_block)
|
|
3523
3956
|
|
3524
3957
|
/* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
|
3525
3958
|
/* interpolate in the meantime */
|
3526
|
-
if (
|
3959
|
+
if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5)))
|
3527
3960
|
{
|
3528
3961
|
ev_rt_now = rtmn_diff + mn_now;
|
3529
3962
|
return;
|
@@ -3547,7 +3980,7 @@ time_update (EV_P_ ev_tstamp max_block)
|
|
3547
3980
|
|
3548
3981
|
diff = odiff - rtmn_diff;
|
3549
3982
|
|
3550
|
-
if (
|
3983
|
+
if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP)))
|
3551
3984
|
return; /* all is well */
|
3552
3985
|
|
3553
3986
|
ev_rt_now = ev_time ();
|
@@ -3566,7 +3999,7 @@ time_update (EV_P_ ev_tstamp max_block)
|
|
3566
3999
|
{
|
3567
4000
|
ev_rt_now = ev_time ();
|
3568
4001
|
|
3569
|
-
if (
|
4002
|
+
if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP)))
|
3570
4003
|
{
|
3571
4004
|
/* adjust timers. this is easy, as the offset is the same for all of them */
|
3572
4005
|
timers_reschedule (EV_A_ ev_rt_now - mn_now);
|
@@ -3586,11 +4019,13 @@ struct ev_poll_args {
|
|
3586
4019
|
};
|
3587
4020
|
|
3588
4021
|
static
|
3589
|
-
|
4022
|
+
void * ev_backend_poll(void *ptr)
|
3590
4023
|
{
|
3591
4024
|
struct ev_poll_args *args = (struct ev_poll_args *)ptr;
|
3592
4025
|
struct ev_loop *loop = args->loop;
|
3593
4026
|
backend_poll (EV_A_ args->waittime);
|
4027
|
+
|
4028
|
+
return NULL;
|
3594
4029
|
}
|
3595
4030
|
/* ######################################## */
|
3596
4031
|
|
@@ -3598,7 +4033,7 @@ int
|
|
3598
4033
|
ev_run (EV_P_ int flags)
|
3599
4034
|
{
|
3600
4035
|
/* ########## NIO4R PATCHERY HO! ########## */
|
3601
|
-
|
4036
|
+
struct ev_poll_args poll_args;
|
3602
4037
|
/* ######################################## */
|
3603
4038
|
|
3604
4039
|
#if EV_FEATURE_API
|
@@ -3618,8 +4053,8 @@ ev_run (EV_P_ int flags)
|
|
3618
4053
|
#endif
|
3619
4054
|
|
3620
4055
|
#ifndef _WIN32
|
3621
|
-
if (
|
3622
|
-
if (
|
4056
|
+
if (ecb_expect_false (curpid)) /* penalise the forking check even more */
|
4057
|
+
if (ecb_expect_false (getpid () != curpid))
|
3623
4058
|
{
|
3624
4059
|
curpid = getpid ();
|
3625
4060
|
postfork = 1;
|
@@ -3628,7 +4063,7 @@ ev_run (EV_P_ int flags)
|
|
3628
4063
|
|
3629
4064
|
#if EV_FORK_ENABLE
|
3630
4065
|
/* we might have forked, so queue fork handlers */
|
3631
|
-
if (
|
4066
|
+
if (ecb_expect_false (postfork))
|
3632
4067
|
if (forkcnt)
|
3633
4068
|
{
|
3634
4069
|
queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
|
@@ -3638,18 +4073,18 @@ ev_run (EV_P_ int flags)
|
|
3638
4073
|
|
3639
4074
|
#if EV_PREPARE_ENABLE
|
3640
4075
|
/* queue prepare watchers (and execute them) */
|
3641
|
-
if (
|
4076
|
+
if (ecb_expect_false (preparecnt))
|
3642
4077
|
{
|
3643
4078
|
queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
|
3644
4079
|
EV_INVOKE_PENDING;
|
3645
4080
|
}
|
3646
4081
|
#endif
|
3647
4082
|
|
3648
|
-
if (
|
4083
|
+
if (ecb_expect_false (loop_done))
|
3649
4084
|
break;
|
3650
4085
|
|
3651
4086
|
/* we might have forked, so reify kernel state if necessary */
|
3652
|
-
if (
|
4087
|
+
if (ecb_expect_false (postfork))
|
3653
4088
|
loop_fork (EV_A);
|
3654
4089
|
|
3655
4090
|
/* update fd-related kernel structures */
|
@@ -3664,16 +4099,28 @@ ev_run (EV_P_ int flags)
|
|
3664
4099
|
ev_tstamp prev_mn_now = mn_now;
|
3665
4100
|
|
3666
4101
|
/* update time to cancel out callback processing overhead */
|
3667
|
-
time_update (EV_A_
|
4102
|
+
time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE));
|
3668
4103
|
|
3669
4104
|
/* from now on, we want a pipe-wake-up */
|
3670
4105
|
pipe_write_wanted = 1;
|
3671
4106
|
|
3672
4107
|
ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
|
3673
4108
|
|
3674
|
-
if (
|
4109
|
+
if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
|
3675
4110
|
{
|
3676
|
-
waittime = MAX_BLOCKTIME;
|
4111
|
+
waittime = EV_TS_CONST (MAX_BLOCKTIME);
|
4112
|
+
|
4113
|
+
#if EV_USE_TIMERFD
|
4114
|
+
/* sleep a lot longer when we can reliably detect timejumps */
|
4115
|
+
if (ecb_expect_true (timerfd >= 0))
|
4116
|
+
waittime = EV_TS_CONST (MAX_BLOCKTIME2);
|
4117
|
+
#endif
|
4118
|
+
#if !EV_PERIODIC_ENABLE
|
4119
|
+
/* without periodics but with monotonic clock there is no need */
|
4120
|
+
/* for any time jump detection, so sleep longer */
|
4121
|
+
if (ecb_expect_true (have_monotonic))
|
4122
|
+
waittime = EV_TS_CONST (MAX_BLOCKTIME2);
|
4123
|
+
#endif
|
3677
4124
|
|
3678
4125
|
if (timercnt)
|
3679
4126
|
{
|
@@ -3690,23 +4137,28 @@ ev_run (EV_P_ int flags)
|
|
3690
4137
|
#endif
|
3691
4138
|
|
3692
4139
|
/* don't let timeouts decrease the waittime below timeout_blocktime */
|
3693
|
-
if (
|
4140
|
+
if (ecb_expect_false (waittime < timeout_blocktime))
|
3694
4141
|
waittime = timeout_blocktime;
|
3695
4142
|
|
3696
|
-
/*
|
3697
|
-
|
3698
|
-
|
3699
|
-
|
4143
|
+
/* now there are two more special cases left, either we have
|
4144
|
+
* already-expired timers, so we should not sleep, or we have timers
|
4145
|
+
* that expire very soon, in which case we need to wait for a minimum
|
4146
|
+
* amount of time for some event loop backends.
|
4147
|
+
*/
|
4148
|
+
if (ecb_expect_false (waittime < backend_mintime))
|
4149
|
+
waittime = waittime <= EV_TS_CONST (0.)
|
4150
|
+
? EV_TS_CONST (0.)
|
4151
|
+
: backend_mintime;
|
3700
4152
|
|
3701
4153
|
/* extra check because io_blocktime is commonly 0 */
|
3702
|
-
if (
|
4154
|
+
if (ecb_expect_false (io_blocktime))
|
3703
4155
|
{
|
3704
4156
|
sleeptime = io_blocktime - (mn_now - prev_mn_now);
|
3705
4157
|
|
3706
4158
|
if (sleeptime > waittime - backend_mintime)
|
3707
4159
|
sleeptime = waittime - backend_mintime;
|
3708
4160
|
|
3709
|
-
if (
|
4161
|
+
if (ecb_expect_true (sleeptime > EV_TS_CONST (0.)))
|
3710
4162
|
{
|
3711
4163
|
ev_sleep (sleeptime);
|
3712
4164
|
waittime -= sleeptime;
|
@@ -3777,7 +4229,6 @@ rb_thread_unsafe_dangerous_crazy_blocking_region_end(...);
|
|
3777
4229
|
ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
|
3778
4230
|
}
|
3779
4231
|
|
3780
|
-
|
3781
4232
|
/* update ev_rt_now, do magic */
|
3782
4233
|
time_update (EV_A_ waittime + sleeptime);
|
3783
4234
|
}
|
@@ -3795,13 +4246,13 @@ rb_thread_unsafe_dangerous_crazy_blocking_region_end(...);
|
|
3795
4246
|
|
3796
4247
|
#if EV_CHECK_ENABLE
|
3797
4248
|
/* queue check watchers, to be executed first */
|
3798
|
-
if (
|
4249
|
+
if (ecb_expect_false (checkcnt))
|
3799
4250
|
queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
|
3800
4251
|
#endif
|
3801
4252
|
|
3802
4253
|
EV_INVOKE_PENDING;
|
3803
4254
|
}
|
3804
|
-
while (
|
4255
|
+
while (ecb_expect_true (
|
3805
4256
|
activecnt
|
3806
4257
|
&& !loop_done
|
3807
4258
|
&& !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
|
@@ -3838,7 +4289,7 @@ ev_unref (EV_P) EV_NOEXCEPT
|
|
3838
4289
|
void
|
3839
4290
|
ev_now_update (EV_P) EV_NOEXCEPT
|
3840
4291
|
{
|
3841
|
-
time_update (EV_A_
|
4292
|
+
time_update (EV_A_ EV_TSTAMP_HUGE);
|
3842
4293
|
}
|
3843
4294
|
|
3844
4295
|
void
|
@@ -3875,7 +4326,7 @@ wlist_del (WL *head, WL elem)
|
|
3875
4326
|
{
|
3876
4327
|
while (*head)
|
3877
4328
|
{
|
3878
|
-
if (
|
4329
|
+
if (ecb_expect_true (*head == elem))
|
3879
4330
|
{
|
3880
4331
|
*head = elem->next;
|
3881
4332
|
break;
|
@@ -3902,7 +4353,7 @@ ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT
|
|
3902
4353
|
W w_ = (W)w;
|
3903
4354
|
int pending = w_->pending;
|
3904
4355
|
|
3905
|
-
if (
|
4356
|
+
if (ecb_expect_true (pending))
|
3906
4357
|
{
|
3907
4358
|
ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
|
3908
4359
|
p->w = (W)&pending_w;
|
@@ -3939,22 +4390,25 @@ ev_stop (EV_P_ W w)
|
|
3939
4390
|
|
3940
4391
|
/*****************************************************************************/
|
3941
4392
|
|
3942
|
-
|
4393
|
+
ecb_noinline
|
3943
4394
|
void
|
3944
4395
|
ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
|
3945
4396
|
{
|
3946
4397
|
int fd = w->fd;
|
3947
4398
|
|
3948
|
-
if (
|
4399
|
+
if (ecb_expect_false (ev_is_active (w)))
|
3949
4400
|
return;
|
3950
4401
|
|
3951
4402
|
assert (("libev: ev_io_start called with negative fd", fd >= 0));
|
3952
4403
|
assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE))));
|
3953
4404
|
|
4405
|
+
#if EV_VERIFY >= 2
|
4406
|
+
assert (("libev: ev_io_start called on watcher with invalid fd", fd_valid (fd)));
|
4407
|
+
#endif
|
3954
4408
|
EV_FREQUENT_CHECK;
|
3955
4409
|
|
3956
4410
|
ev_start (EV_A_ (W)w, 1);
|
3957
|
-
array_needsize (ANFD, anfds, anfdmax, fd + 1,
|
4411
|
+
array_needsize (ANFD, anfds, anfdmax, fd + 1, array_needsize_zerofill);
|
3958
4412
|
wlist_add (&anfds[fd].head, (WL)w);
|
3959
4413
|
|
3960
4414
|
/* common bug, apparently */
|
@@ -3966,16 +4420,19 @@ ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
|
|
3966
4420
|
EV_FREQUENT_CHECK;
|
3967
4421
|
}
|
3968
4422
|
|
3969
|
-
|
4423
|
+
ecb_noinline
|
3970
4424
|
void
|
3971
4425
|
ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
|
3972
4426
|
{
|
3973
4427
|
clear_pending (EV_A_ (W)w);
|
3974
|
-
if (
|
4428
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
3975
4429
|
return;
|
3976
4430
|
|
3977
4431
|
assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
|
3978
4432
|
|
4433
|
+
#if EV_VERIFY >= 2
|
4434
|
+
assert (("libev: ev_io_stop called on watcher with invalid fd", fd_valid (w->fd)));
|
4435
|
+
#endif
|
3979
4436
|
EV_FREQUENT_CHECK;
|
3980
4437
|
|
3981
4438
|
wlist_del (&anfds[w->fd].head, (WL)w);
|
@@ -3986,11 +4443,11 @@ ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
|
|
3986
4443
|
EV_FREQUENT_CHECK;
|
3987
4444
|
}
|
3988
4445
|
|
3989
|
-
|
4446
|
+
ecb_noinline
|
3990
4447
|
void
|
3991
4448
|
ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
|
3992
4449
|
{
|
3993
|
-
if (
|
4450
|
+
if (ecb_expect_false (ev_is_active (w)))
|
3994
4451
|
return;
|
3995
4452
|
|
3996
4453
|
ev_at (w) += mn_now;
|
@@ -4001,7 +4458,7 @@ ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
|
|
4001
4458
|
|
4002
4459
|
++timercnt;
|
4003
4460
|
ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1);
|
4004
|
-
array_needsize (ANHE, timers, timermax, ev_active (w) + 1,
|
4461
|
+
array_needsize (ANHE, timers, timermax, ev_active (w) + 1, array_needsize_noinit);
|
4005
4462
|
ANHE_w (timers [ev_active (w)]) = (WT)w;
|
4006
4463
|
ANHE_at_cache (timers [ev_active (w)]);
|
4007
4464
|
upheap (timers, ev_active (w));
|
@@ -4011,12 +4468,12 @@ ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
|
|
4011
4468
|
/*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
|
4012
4469
|
}
|
4013
4470
|
|
4014
|
-
|
4471
|
+
ecb_noinline
|
4015
4472
|
void
|
4016
4473
|
ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
|
4017
4474
|
{
|
4018
4475
|
clear_pending (EV_A_ (W)w);
|
4019
|
-
if (
|
4476
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4020
4477
|
return;
|
4021
4478
|
|
4022
4479
|
EV_FREQUENT_CHECK;
|
@@ -4028,7 +4485,7 @@ ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
|
|
4028
4485
|
|
4029
4486
|
--timercnt;
|
4030
4487
|
|
4031
|
-
if (
|
4488
|
+
if (ecb_expect_true (active < timercnt + HEAP0))
|
4032
4489
|
{
|
4033
4490
|
timers [active] = timers [timercnt + HEAP0];
|
4034
4491
|
adjustheap (timers, timercnt, active);
|
@@ -4042,7 +4499,7 @@ ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
|
|
4042
4499
|
EV_FREQUENT_CHECK;
|
4043
4500
|
}
|
4044
4501
|
|
4045
|
-
|
4502
|
+
ecb_noinline
|
4046
4503
|
void
|
4047
4504
|
ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
|
4048
4505
|
{
|
@@ -4073,17 +4530,22 @@ ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
|
|
4073
4530
|
ev_tstamp
|
4074
4531
|
ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT
|
4075
4532
|
{
|
4076
|
-
return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
|
4533
|
+
return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.));
|
4077
4534
|
}
|
4078
4535
|
|
4079
4536
|
#if EV_PERIODIC_ENABLE
|
4080
|
-
|
4537
|
+
ecb_noinline
|
4081
4538
|
void
|
4082
4539
|
ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
4083
4540
|
{
|
4084
|
-
if (
|
4541
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4085
4542
|
return;
|
4086
4543
|
|
4544
|
+
#if EV_USE_TIMERFD
|
4545
|
+
if (timerfd == -2)
|
4546
|
+
evtimerfd_init (EV_A);
|
4547
|
+
#endif
|
4548
|
+
|
4087
4549
|
if (w->reschedule_cb)
|
4088
4550
|
ev_at (w) = w->reschedule_cb (w, ev_rt_now);
|
4089
4551
|
else if (w->interval)
|
@@ -4098,7 +4560,7 @@ ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
|
4098
4560
|
|
4099
4561
|
++periodiccnt;
|
4100
4562
|
ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1);
|
4101
|
-
array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1,
|
4563
|
+
array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, array_needsize_noinit);
|
4102
4564
|
ANHE_w (periodics [ev_active (w)]) = (WT)w;
|
4103
4565
|
ANHE_at_cache (periodics [ev_active (w)]);
|
4104
4566
|
upheap (periodics, ev_active (w));
|
@@ -4108,12 +4570,12 @@ ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
|
4108
4570
|
/*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
|
4109
4571
|
}
|
4110
4572
|
|
4111
|
-
|
4573
|
+
ecb_noinline
|
4112
4574
|
void
|
4113
4575
|
ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
4114
4576
|
{
|
4115
4577
|
clear_pending (EV_A_ (W)w);
|
4116
|
-
if (
|
4578
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4117
4579
|
return;
|
4118
4580
|
|
4119
4581
|
EV_FREQUENT_CHECK;
|
@@ -4125,7 +4587,7 @@ ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
|
4125
4587
|
|
4126
4588
|
--periodiccnt;
|
4127
4589
|
|
4128
|
-
if (
|
4590
|
+
if (ecb_expect_true (active < periodiccnt + HEAP0))
|
4129
4591
|
{
|
4130
4592
|
periodics [active] = periodics [periodiccnt + HEAP0];
|
4131
4593
|
adjustheap (periodics, periodiccnt, active);
|
@@ -4137,7 +4599,7 @@ ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
|
4137
4599
|
EV_FREQUENT_CHECK;
|
4138
4600
|
}
|
4139
4601
|
|
4140
|
-
|
4602
|
+
ecb_noinline
|
4141
4603
|
void
|
4142
4604
|
ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
4143
4605
|
{
|
@@ -4153,11 +4615,11 @@ ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
|
4153
4615
|
|
4154
4616
|
#if EV_SIGNAL_ENABLE
|
4155
4617
|
|
4156
|
-
|
4618
|
+
ecb_noinline
|
4157
4619
|
void
|
4158
4620
|
ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
|
4159
4621
|
{
|
4160
|
-
if (
|
4622
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4161
4623
|
return;
|
4162
4624
|
|
4163
4625
|
assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
|
@@ -4236,12 +4698,12 @@ ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
|
|
4236
4698
|
EV_FREQUENT_CHECK;
|
4237
4699
|
}
|
4238
4700
|
|
4239
|
-
|
4701
|
+
ecb_noinline
|
4240
4702
|
void
|
4241
4703
|
ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT
|
4242
4704
|
{
|
4243
4705
|
clear_pending (EV_A_ (W)w);
|
4244
|
-
if (
|
4706
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4245
4707
|
return;
|
4246
4708
|
|
4247
4709
|
EV_FREQUENT_CHECK;
|
@@ -4284,7 +4746,7 @@ ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT
|
|
4284
4746
|
#if EV_MULTIPLICITY
|
4285
4747
|
assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
|
4286
4748
|
#endif
|
4287
|
-
if (
|
4749
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4288
4750
|
return;
|
4289
4751
|
|
4290
4752
|
EV_FREQUENT_CHECK;
|
@@ -4299,7 +4761,7 @@ void
|
|
4299
4761
|
ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
|
4300
4762
|
{
|
4301
4763
|
clear_pending (EV_A_ (W)w);
|
4302
|
-
if (
|
4764
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4303
4765
|
return;
|
4304
4766
|
|
4305
4767
|
EV_FREQUENT_CHECK;
|
@@ -4323,14 +4785,14 @@ ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
|
|
4323
4785
|
#define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
|
4324
4786
|
#define MIN_STAT_INTERVAL 0.1074891
|
4325
4787
|
|
4326
|
-
|
4788
|
+
ecb_noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents);
|
4327
4789
|
|
4328
4790
|
#if EV_USE_INOTIFY
|
4329
4791
|
|
4330
4792
|
/* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
|
4331
4793
|
# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
|
4332
4794
|
|
4333
|
-
|
4795
|
+
ecb_noinline
|
4334
4796
|
static void
|
4335
4797
|
infy_add (EV_P_ ev_stat *w)
|
4336
4798
|
{
|
@@ -4405,7 +4867,7 @@ infy_add (EV_P_ ev_stat *w)
|
|
4405
4867
|
if (ev_is_active (&w->timer)) ev_unref (EV_A);
|
4406
4868
|
}
|
4407
4869
|
|
4408
|
-
|
4870
|
+
ecb_noinline
|
4409
4871
|
static void
|
4410
4872
|
infy_del (EV_P_ ev_stat *w)
|
4411
4873
|
{
|
@@ -4423,7 +4885,7 @@ infy_del (EV_P_ ev_stat *w)
|
|
4423
4885
|
inotify_rm_watch (fs_fd, wd);
|
4424
4886
|
}
|
4425
4887
|
|
4426
|
-
|
4888
|
+
ecb_noinline
|
4427
4889
|
static void
|
4428
4890
|
infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
|
4429
4891
|
{
|
@@ -4579,7 +5041,7 @@ ev_stat_stat (EV_P_ ev_stat *w) EV_NOEXCEPT
|
|
4579
5041
|
w->attr.st_nlink = 1;
|
4580
5042
|
}
|
4581
5043
|
|
4582
|
-
|
5044
|
+
ecb_noinline
|
4583
5045
|
static void
|
4584
5046
|
stat_timer_cb (EV_P_ ev_timer *w_, int revents)
|
4585
5047
|
{
|
@@ -4623,7 +5085,7 @@ stat_timer_cb (EV_P_ ev_timer *w_, int revents)
|
|
4623
5085
|
void
|
4624
5086
|
ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT
|
4625
5087
|
{
|
4626
|
-
if (
|
5088
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4627
5089
|
return;
|
4628
5090
|
|
4629
5091
|
ev_stat_stat (EV_A_ w);
|
@@ -4655,7 +5117,7 @@ void
|
|
4655
5117
|
ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
|
4656
5118
|
{
|
4657
5119
|
clear_pending (EV_A_ (W)w);
|
4658
|
-
if (
|
5120
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4659
5121
|
return;
|
4660
5122
|
|
4661
5123
|
EV_FREQUENT_CHECK;
|
@@ -4680,7 +5142,7 @@ ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
|
|
4680
5142
|
void
|
4681
5143
|
ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT
|
4682
5144
|
{
|
4683
|
-
if (
|
5145
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4684
5146
|
return;
|
4685
5147
|
|
4686
5148
|
pri_adjust (EV_A_ (W)w);
|
@@ -4693,7 +5155,7 @@ ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT
|
|
4693
5155
|
++idleall;
|
4694
5156
|
ev_start (EV_A_ (W)w, active);
|
4695
5157
|
|
4696
|
-
array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active,
|
5158
|
+
array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, array_needsize_noinit);
|
4697
5159
|
idles [ABSPRI (w)][active - 1] = w;
|
4698
5160
|
}
|
4699
5161
|
|
@@ -4704,7 +5166,7 @@ void
|
|
4704
5166
|
ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
|
4705
5167
|
{
|
4706
5168
|
clear_pending (EV_A_ (W)w);
|
4707
|
-
if (
|
5169
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4708
5170
|
return;
|
4709
5171
|
|
4710
5172
|
EV_FREQUENT_CHECK;
|
@@ -4727,13 +5189,13 @@ ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
|
|
4727
5189
|
void
|
4728
5190
|
ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT
|
4729
5191
|
{
|
4730
|
-
if (
|
5192
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4731
5193
|
return;
|
4732
5194
|
|
4733
5195
|
EV_FREQUENT_CHECK;
|
4734
5196
|
|
4735
5197
|
ev_start (EV_A_ (W)w, ++preparecnt);
|
4736
|
-
array_needsize (ev_prepare *, prepares, preparemax, preparecnt,
|
5198
|
+
array_needsize (ev_prepare *, prepares, preparemax, preparecnt, array_needsize_noinit);
|
4737
5199
|
prepares [preparecnt - 1] = w;
|
4738
5200
|
|
4739
5201
|
EV_FREQUENT_CHECK;
|
@@ -4743,7 +5205,7 @@ void
|
|
4743
5205
|
ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
|
4744
5206
|
{
|
4745
5207
|
clear_pending (EV_A_ (W)w);
|
4746
|
-
if (
|
5208
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4747
5209
|
return;
|
4748
5210
|
|
4749
5211
|
EV_FREQUENT_CHECK;
|
@@ -4765,13 +5227,13 @@ ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
|
|
4765
5227
|
void
|
4766
5228
|
ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT
|
4767
5229
|
{
|
4768
|
-
if (
|
5230
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4769
5231
|
return;
|
4770
5232
|
|
4771
5233
|
EV_FREQUENT_CHECK;
|
4772
5234
|
|
4773
5235
|
ev_start (EV_A_ (W)w, ++checkcnt);
|
4774
|
-
array_needsize (ev_check *, checks, checkmax, checkcnt,
|
5236
|
+
array_needsize (ev_check *, checks, checkmax, checkcnt, array_needsize_noinit);
|
4775
5237
|
checks [checkcnt - 1] = w;
|
4776
5238
|
|
4777
5239
|
EV_FREQUENT_CHECK;
|
@@ -4781,7 +5243,7 @@ void
|
|
4781
5243
|
ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
|
4782
5244
|
{
|
4783
5245
|
clear_pending (EV_A_ (W)w);
|
4784
|
-
if (
|
5246
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4785
5247
|
return;
|
4786
5248
|
|
4787
5249
|
EV_FREQUENT_CHECK;
|
@@ -4800,7 +5262,7 @@ ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
|
|
4800
5262
|
#endif
|
4801
5263
|
|
4802
5264
|
#if EV_EMBED_ENABLE
|
4803
|
-
|
5265
|
+
ecb_noinline
|
4804
5266
|
void
|
4805
5267
|
ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT
|
4806
5268
|
{
|
@@ -4834,6 +5296,7 @@ embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents)
|
|
4834
5296
|
}
|
4835
5297
|
}
|
4836
5298
|
|
5299
|
+
#if EV_FORK_ENABLE
|
4837
5300
|
static void
|
4838
5301
|
embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
|
4839
5302
|
{
|
@@ -4850,6 +5313,7 @@ embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
|
|
4850
5313
|
|
4851
5314
|
ev_embed_start (EV_A_ w);
|
4852
5315
|
}
|
5316
|
+
#endif
|
4853
5317
|
|
4854
5318
|
#if 0
|
4855
5319
|
static void
|
@@ -4862,7 +5326,7 @@ embed_idle_cb (EV_P_ ev_idle *idle, int revents)
|
|
4862
5326
|
void
|
4863
5327
|
ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
|
4864
5328
|
{
|
4865
|
-
if (
|
5329
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4866
5330
|
return;
|
4867
5331
|
|
4868
5332
|
{
|
@@ -4880,8 +5344,10 @@ ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
|
|
4880
5344
|
ev_set_priority (&w->prepare, EV_MINPRI);
|
4881
5345
|
ev_prepare_start (EV_A_ &w->prepare);
|
4882
5346
|
|
5347
|
+
#if EV_FORK_ENABLE
|
4883
5348
|
ev_fork_init (&w->fork, embed_fork_cb);
|
4884
5349
|
ev_fork_start (EV_A_ &w->fork);
|
5350
|
+
#endif
|
4885
5351
|
|
4886
5352
|
/*ev_idle_init (&w->idle, e,bed_idle_cb);*/
|
4887
5353
|
|
@@ -4894,14 +5360,16 @@ void
|
|
4894
5360
|
ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
|
4895
5361
|
{
|
4896
5362
|
clear_pending (EV_A_ (W)w);
|
4897
|
-
if (
|
5363
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4898
5364
|
return;
|
4899
5365
|
|
4900
5366
|
EV_FREQUENT_CHECK;
|
4901
5367
|
|
4902
5368
|
ev_io_stop (EV_A_ &w->io);
|
4903
5369
|
ev_prepare_stop (EV_A_ &w->prepare);
|
5370
|
+
#if EV_FORK_ENABLE
|
4904
5371
|
ev_fork_stop (EV_A_ &w->fork);
|
5372
|
+
#endif
|
4905
5373
|
|
4906
5374
|
ev_stop (EV_A_ (W)w);
|
4907
5375
|
|
@@ -4913,13 +5381,13 @@ ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
|
|
4913
5381
|
void
|
4914
5382
|
ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT
|
4915
5383
|
{
|
4916
|
-
if (
|
5384
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4917
5385
|
return;
|
4918
5386
|
|
4919
5387
|
EV_FREQUENT_CHECK;
|
4920
5388
|
|
4921
5389
|
ev_start (EV_A_ (W)w, ++forkcnt);
|
4922
|
-
array_needsize (ev_fork *, forks, forkmax, forkcnt,
|
5390
|
+
array_needsize (ev_fork *, forks, forkmax, forkcnt, array_needsize_noinit);
|
4923
5391
|
forks [forkcnt - 1] = w;
|
4924
5392
|
|
4925
5393
|
EV_FREQUENT_CHECK;
|
@@ -4929,7 +5397,7 @@ void
|
|
4929
5397
|
ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
|
4930
5398
|
{
|
4931
5399
|
clear_pending (EV_A_ (W)w);
|
4932
|
-
if (
|
5400
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4933
5401
|
return;
|
4934
5402
|
|
4935
5403
|
EV_FREQUENT_CHECK;
|
@@ -4951,13 +5419,13 @@ ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
|
|
4951
5419
|
void
|
4952
5420
|
ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT
|
4953
5421
|
{
|
4954
|
-
if (
|
5422
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4955
5423
|
return;
|
4956
5424
|
|
4957
5425
|
EV_FREQUENT_CHECK;
|
4958
5426
|
|
4959
5427
|
ev_start (EV_A_ (W)w, ++cleanupcnt);
|
4960
|
-
array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt,
|
5428
|
+
array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, array_needsize_noinit);
|
4961
5429
|
cleanups [cleanupcnt - 1] = w;
|
4962
5430
|
|
4963
5431
|
/* cleanup watchers should never keep a refcount on the loop */
|
@@ -4969,7 +5437,7 @@ void
|
|
4969
5437
|
ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
|
4970
5438
|
{
|
4971
5439
|
clear_pending (EV_A_ (W)w);
|
4972
|
-
if (
|
5440
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4973
5441
|
return;
|
4974
5442
|
|
4975
5443
|
EV_FREQUENT_CHECK;
|
@@ -4992,7 +5460,7 @@ ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
|
|
4992
5460
|
void
|
4993
5461
|
ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT
|
4994
5462
|
{
|
4995
|
-
if (
|
5463
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4996
5464
|
return;
|
4997
5465
|
|
4998
5466
|
w->sent = 0;
|
@@ -5002,7 +5470,7 @@ ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT
|
|
5002
5470
|
EV_FREQUENT_CHECK;
|
5003
5471
|
|
5004
5472
|
ev_start (EV_A_ (W)w, ++asynccnt);
|
5005
|
-
array_needsize (ev_async *, asyncs, asyncmax, asynccnt,
|
5473
|
+
array_needsize (ev_async *, asyncs, asyncmax, asynccnt, array_needsize_noinit);
|
5006
5474
|
asyncs [asynccnt - 1] = w;
|
5007
5475
|
|
5008
5476
|
EV_FREQUENT_CHECK;
|
@@ -5012,7 +5480,7 @@ void
|
|
5012
5480
|
ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT
|
5013
5481
|
{
|
5014
5482
|
clear_pending (EV_A_ (W)w);
|
5015
|
-
if (
|
5483
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
5016
5484
|
return;
|
5017
5485
|
|
5018
5486
|
EV_FREQUENT_CHECK;
|
@@ -5081,12 +5549,6 @@ ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, vo
|
|
5081
5549
|
{
|
5082
5550
|
struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
|
5083
5551
|
|
5084
|
-
if (expect_false (!once))
|
5085
|
-
{
|
5086
|
-
cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMER, arg);
|
5087
|
-
return;
|
5088
|
-
}
|
5089
|
-
|
5090
5552
|
once->cb = cb;
|
5091
5553
|
once->arg = arg;
|
5092
5554
|
|
@@ -5225,4 +5687,3 @@ ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_NOEXCEPT
|
|
5225
5687
|
#if EV_MULTIPLICITY
|
5226
5688
|
#include "ev_wrap.h"
|
5227
5689
|
#endif
|
5228
|
-
|