iodine 0.7.41 → 0.7.45

Sign up to get free protection for your applications and to get access to all the features.
Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/.github/ISSUE_TEMPLATE/bug_report.md +1 -1
  3. data/.gitignore +1 -0
  4. data/CHANGELOG.md +24 -0
  5. data/README.md +2 -2
  6. data/SPEC-PubSub-Draft.md +89 -47
  7. data/SPEC-WebSocket-Draft.md +92 -55
  8. data/examples/async_task.ru +92 -0
  9. data/ext/iodine/extconf.rb +21 -16
  10. data/ext/iodine/fio.c +1108 -162
  11. data/ext/iodine/fio.h +49 -13
  12. data/ext/iodine/fio_cli.c +1 -1
  13. data/ext/iodine/fio_tls_missing.c +8 -0
  14. data/ext/iodine/fio_tls_openssl.c +8 -0
  15. data/ext/iodine/fio_tmpfile.h +13 -1
  16. data/ext/iodine/fiobj_data.c +6 -4
  17. data/ext/iodine/fiobj_data.h +2 -1
  18. data/ext/iodine/fiobj_hash.c +32 -6
  19. data/ext/iodine/fiobj_mustache.c +9 -0
  20. data/ext/iodine/fiobj_numbers.c +86 -8
  21. data/ext/iodine/fiobj_str.c +24 -11
  22. data/ext/iodine/fiobject.c +1 -1
  23. data/ext/iodine/fiobject.h +5 -3
  24. data/ext/iodine/http.c +66 -10
  25. data/ext/iodine/http1.c +2 -1
  26. data/ext/iodine/http1_parser.h +1065 -103
  27. data/ext/iodine/http_internal.c +1 -0
  28. data/ext/iodine/http_internal.h +4 -2
  29. data/ext/iodine/iodine.c +66 -1
  30. data/ext/iodine/iodine.h +3 -0
  31. data/ext/iodine/iodine_caller.c +48 -8
  32. data/ext/iodine/iodine_connection.c +24 -8
  33. data/ext/iodine/iodine_http.c +32 -8
  34. data/ext/iodine/iodine_mustache.c +2 -4
  35. data/ext/iodine/iodine_rack_io.c +21 -0
  36. data/ext/iodine/iodine_tcp.c +14 -0
  37. data/ext/iodine/iodine_tls.c +8 -0
  38. data/ext/iodine/mustache_parser.h +4 -0
  39. data/ext/iodine/redis_engine.c +14 -11
  40. data/ext/iodine/websockets.c +7 -3
  41. data/iodine.gemspec +5 -4
  42. data/lib/iodine/version.rb +1 -1
  43. data/lib/rack/handler/iodine.rb +6 -0
  44. metadata +15 -13
data/ext/iodine/fio.c CHANGED
@@ -4,6 +4,11 @@ License: MIT
4
4
 
5
5
  Feel free to copy, use and enjoy according to the license provided.
6
6
  ***************************************************************************** */
7
+ #ifdef __MINGW32__
8
+ /** iodine/ruby specific, don use: #define FD_SETSIZE 1024 */
9
+ #define FIO_FORCE_MALLOC 1
10
+ #define FIO_DISABLE_HOT_RESTART 1
11
+ #endif
7
12
 
8
13
  #include <fio.h>
9
14
 
@@ -18,9 +23,12 @@ Feel free to copy, use and enjoy according to the license provided.
18
23
  #include <errno.h>
19
24
  #include <limits.h>
20
25
  #include <pthread.h>
26
+ #ifndef __MINGW32__
21
27
  #include <sys/mman.h>
28
+ #endif
22
29
  #include <unistd.h>
23
30
 
31
+ #ifndef __MINGW32__
24
32
  #include <netdb.h>
25
33
  #include <netinet/in.h>
26
34
  #include <netinet/tcp.h>
@@ -29,12 +37,27 @@ Feel free to copy, use and enjoy according to the license provided.
29
37
  #include <sys/ioctl.h>
30
38
  #include <sys/resource.h>
31
39
  #include <sys/socket.h>
40
+ #endif
32
41
  #include <sys/stat.h>
33
42
  #include <sys/types.h>
43
+ #ifndef __MINGW32__
34
44
  #include <sys/un.h>
35
45
  #include <sys/wait.h>
36
46
 
37
47
  #include <arpa/inet.h>
48
+ #endif
49
+
50
+ #ifdef __MINGW32__
51
+ #include <windef.h>
52
+ #include <winsock2.h>
53
+ #include <ws2tcpip.h>
54
+ #endif
55
+
56
+ #if HAVE_OPENSSL
57
+ #include <openssl/bio.h>
58
+ #include <openssl/err.h>
59
+ #include <openssl/ssl.h>
60
+ #endif
38
61
 
39
62
  #if HAVE_OPENSSL
40
63
  #include <openssl/bio.h>
@@ -47,12 +70,14 @@ Feel free to copy, use and enjoy according to the license provided.
47
70
  #define FIO_ENGINE_POLL 0
48
71
  #endif
49
72
 
50
- #if !FIO_ENGINE_POLL && !FIO_ENGINE_EPOLL && !FIO_ENGINE_KQUEUE
73
+ #if !FIO_ENGINE_POLL && !FIO_ENGINE_EPOLL && !FIO_ENGINE_KQUEUE && !FIO_ENGINE_WSAPOLL
51
74
  #if defined(__linux__)
52
75
  #define FIO_ENGINE_EPOLL 1
53
76
  #elif defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
54
77
  defined(__OpenBSD__) || defined(__bsdi__) || defined(__DragonFly__)
55
78
  #define FIO_ENGINE_KQUEUE 1
79
+ #elif defined(__MINGW32__)
80
+ #define FIO_ENGINE_WSAPOLL 1
56
81
  #else
57
82
  #define FIO_ENGINE_POLL 1
58
83
  #endif
@@ -68,8 +93,10 @@ Feel free to copy, use and enjoy according to the license provided.
68
93
  #endif
69
94
 
70
95
  #ifndef FIO_USE_URGENT_QUEUE
96
+ #ifndef __MINGW32__
71
97
  #define FIO_USE_URGENT_QUEUE 1
72
98
  #endif
99
+ #endif
73
100
 
74
101
  #ifndef DEBUG_SPINLOCK
75
102
  #define DEBUG_SPINLOCK 0
@@ -80,10 +107,6 @@ Feel free to copy, use and enjoy according to the license provided.
80
107
  #define FIO_SLOWLORIS_LIMIT (1 << 10)
81
108
  #endif
82
109
 
83
- #if !defined(__clang__) && !defined(__GNUC__)
84
- #define __thread _Thread_value
85
- #endif
86
-
87
110
  #ifndef FIO_TLS_WEAK
88
111
  #define FIO_TLS_WEAK __attribute__((weak))
89
112
  #endif
@@ -97,6 +120,162 @@ Feel free to copy, use and enjoy according to the license provided.
97
120
  #endif
98
121
  #endif
99
122
 
123
+ /* *****************************************************************************
124
+ Windows support functions
125
+ ***************************************************************************** */
126
+ #ifdef __MINGW32__
127
+
128
+ #define WIFEXITED(s) (!((s)&0xFF))
129
+ #define WEXITSTATUS(s) (((s)>>8)&0xFF)
130
+
131
+ FARPROC accept_ptr;
132
+ FARPROC bind_ptr;
133
+ FARPROC closesocket_ptr;
134
+ FARPROC connect_ptr;
135
+ FARPROC getsockopt_ptr;
136
+ FARPROC ioctlsocket_ptr;
137
+ FARPROC listen_ptr;
138
+ FARPROC recv_ptr;
139
+ FARPROC send_ptr;
140
+ FARPROC setsockopt_ptr;
141
+ FARPROC socket_ptr;
142
+
143
+ int fork() {
144
+ fprintf(stderr, "fork() is not supported on Windows.\n");
145
+ errno = ENOSYS;
146
+ return -1;
147
+ }
148
+
149
+ int ioctl (int fd, u_long request, int* argp) {
150
+ int error;
151
+ u_long flags;
152
+ flags = *argp;
153
+ error = ioctlsocket_ptr(fd, request, &flags);
154
+ if (error > 0) { return -1; }
155
+ else { return 0; }
156
+ }
157
+
158
+ int kill(int pid, int sig) {
159
+ /* Credit to Jan Biedermann (GitHub: @janbiedermann) */
160
+ HANDLE handle;
161
+ DWORD status;
162
+ if (sig < 0 || sig >= NSIG) {
163
+ errno = EINVAL;
164
+ return -1;
165
+ }
166
+ #ifdef SIGCONT
167
+ if (sig == SIGCONT) {
168
+ errno = ENOSYS;
169
+ return -1;
170
+ }
171
+ #endif
172
+
173
+ if (pid == -1)
174
+ pid = 0;
175
+
176
+ if (!pid)
177
+ handle = GetCurrentProcess();
178
+ else
179
+ handle =
180
+ OpenProcess(PROCESS_TERMINATE | PROCESS_QUERY_INFORMATION, FALSE, pid);
181
+ if (!handle)
182
+ goto something_went_wrong;
183
+
184
+ switch (sig) {
185
+ case SIGKILL:
186
+ case SIGTERM:
187
+ case SIGINT: /* terminate */
188
+ if (!TerminateProcess(handle, 1))
189
+ goto something_went_wrong;
190
+ break;
191
+ case 0: /* check status */
192
+ if (!GetExitCodeProcess(handle, &status))
193
+ goto something_went_wrong;
194
+ if (status != STILL_ACTIVE) {
195
+ errno = ESRCH;
196
+ goto cleanup_after_error;
197
+ }
198
+ break;
199
+ default: /* not supported? */
200
+ errno = ENOSYS;
201
+ goto cleanup_after_error;
202
+ }
203
+
204
+ if (pid) {
205
+ CloseHandle(handle);
206
+ }
207
+ return 0;
208
+
209
+ something_went_wrong:
210
+ switch (GetLastError()) {
211
+ case ERROR_INVALID_PARAMETER:
212
+ errno = ESRCH;
213
+ break;
214
+ case ERROR_ACCESS_DENIED:
215
+ errno = EPERM;
216
+ if (handle && GetExitCodeProcess(handle, &status) && status != STILL_ACTIVE)
217
+ errno = ESRCH;
218
+ break;
219
+ default:
220
+ errno = GetLastError();
221
+ }
222
+ cleanup_after_error:
223
+ if (handle && pid)
224
+ CloseHandle(handle);
225
+ return -1;
226
+ }
227
+
228
+ ssize_t pread(int fd, void *buf, size_t count, off_t offset) {
229
+ /* Credit to Jan Biedermann (GitHub: @janbiedermann) */
230
+ ssize_t bytes_read = 0;
231
+ HANDLE handle = (HANDLE)_get_osfhandle(fd);
232
+ if (handle == INVALID_HANDLE_VALUE)
233
+ goto bad_file;
234
+ OVERLAPPED overlapped = {0};
235
+ if (offset > 0)
236
+ overlapped.Offset = offset;
237
+ if (ReadFile(handle, buf, count, (u_long *)&bytes_read, &overlapped))
238
+ return bytes_read;
239
+ if (GetLastError() == ERROR_HANDLE_EOF)
240
+ return bytes_read;
241
+ errno = EIO;
242
+ return -1;
243
+ bad_file:
244
+ errno = EBADF;
245
+ return -1;
246
+ }
247
+
248
+ ssize_t pwrite(int fd, const void *buf, size_t count, off_t offset) {
249
+ /* Credit to Jan Biedermann (GitHub: @janbiedermann) */
250
+ ssize_t bytes_written = 0;
251
+ HANDLE handle = (HANDLE)_get_osfhandle(fd);
252
+ if (handle == INVALID_HANDLE_VALUE)
253
+ goto bad_file;
254
+ OVERLAPPED overlapped = {0};
255
+ if (offset > 0)
256
+ overlapped.Offset = offset;
257
+ if (WriteFile(handle, buf, count, (u_long *)&bytes_written, &overlapped))
258
+ return bytes_written;
259
+ errno = EIO;
260
+ return -1;
261
+ bad_file:
262
+ errno = EBADF;
263
+ return -1;
264
+ }
265
+
266
+ pid_t wait(int *stat_loc) {
267
+ fprintf(stderr, "wait() is not supported on Windows.\n");
268
+ errno = ENOSYS;
269
+ return -1;
270
+ }
271
+
272
+ pid_t waitpid(pid_t pid, int *stat_loc, int options) {
273
+ fprintf(stderr, "waitpid() is not supported on Windows.\n");
274
+ errno = ENOSYS;
275
+ return -1;
276
+ }
277
+ #endif
278
+
100
279
  /* *****************************************************************************
101
280
  Event deferring (declarations)
102
281
  ***************************************************************************** */
@@ -185,7 +364,7 @@ typedef struct {
185
364
  uint8_t close;
186
365
  /** peer address length */
187
366
  uint8_t addr_len;
188
- /** peer address length */
367
+ /** peer address */
189
368
  uint8_t addr[48];
190
369
  /** RW hooks. */
191
370
  fio_rw_hook_s *rw_hooks;
@@ -193,6 +372,11 @@ typedef struct {
193
372
  void *rw_udata;
194
373
  /* Objects linked to the UUID */
195
374
  fio_uuid_links_s links;
375
+ #ifdef __MINGW32__
376
+ /* Winsock operating system socket handle */
377
+ SOCKET socket_handle;
378
+ int osffd;
379
+ #endif
196
380
  } fio_fd_data_s;
197
381
 
198
382
  typedef struct {
@@ -219,7 +403,7 @@ typedef struct {
219
403
  uint32_t max_protocol_fd;
220
404
  /* timer handler */
221
405
  pid_t parent;
222
- #if FIO_ENGINE_POLL
406
+ #if FIO_ENGINE_POLL || FIO_ENGINE_WSAPOLL
223
407
  struct pollfd *poll;
224
408
  #endif
225
409
  fio_fd_data_s info[];
@@ -296,6 +480,10 @@ static inline int fio_clear_fd(intptr_t fd, uint8_t is_open) {
296
480
  protocol = fd_data(fd).protocol;
297
481
  rw_hooks = fd_data(fd).rw_hooks;
298
482
  rw_udata = fd_data(fd).rw_udata;
483
+ #ifdef __MINGW32__
484
+ SOCKET socket_handle = fd_data(fd).socket_handle;
485
+ int osffd = fd_data(fd).osffd;
486
+ #endif
299
487
  fd_data(fd) = (fio_fd_data_s){
300
488
  .open = is_open,
301
489
  .sock_lock = fd_data(fd).sock_lock,
@@ -303,8 +491,12 @@ static inline int fio_clear_fd(intptr_t fd, uint8_t is_open) {
303
491
  .rw_hooks = (fio_rw_hook_s *)&FIO_DEFAULT_RW_HOOKS,
304
492
  .counter = fd_data(fd).counter + 1,
305
493
  .packet_last = &fd_data(fd).packet,
494
+ #ifdef __MINGW32__
495
+ .socket_handle = socket_handle,
496
+ .osffd = osffd,
497
+ #endif
306
498
  };
307
- if (fio_data->max_protocol_fd < fd) {
499
+ if (is_open && fio_data->max_protocol_fd < fd) {
308
500
  fio_data->max_protocol_fd = fd;
309
501
  } else {
310
502
  while (fio_data->max_protocol_fd &&
@@ -329,8 +521,8 @@ static inline int fio_clear_fd(intptr_t fd, uint8_t is_open) {
329
521
  if (protocol && protocol->on_close) {
330
522
  fio_defer(deferred_on_close, (void *)fd2uuid(fd), protocol);
331
523
  }
332
- FIO_LOG_DEBUG("FD %d re-initialized (state: %p-%s).", (int)fd,
333
- (void *)fd2uuid(fd), (is_open ? "open" : "closed"));
524
+ // FIO_LOG_DEBUG("FD %d re-initialized (state: %p-%s).", (int)fd,
525
+ // (void *)fd2uuid(fd), (is_open ? "open" : "closed"));
334
526
  return 0;
335
527
  }
336
528
 
@@ -652,22 +844,58 @@ Suspending and renewing thread execution (signaling events)
652
844
  * progressive nano-sleep throttling system that is less exact.
653
845
  */
654
846
  #ifndef FIO_DEFER_THROTTLE_POLL
847
+ #ifdef __MINGW32__
848
+ #define FIO_DEFER_THROTTLE_POLL 1
849
+ #else
655
850
  #define FIO_DEFER_THROTTLE_POLL 0
656
851
  #endif
852
+ #endif
657
853
 
658
854
  typedef struct fio_thread_queue_s {
659
855
  fio_ls_embd_s node;
660
- int fd_wait; /* used for weaiting (read signal) */
856
+ #ifdef __MINGW32__
857
+ HANDLE handle;
858
+ int in_list;
859
+ #else
860
+ int fd_wait; /* used for waiting (read signal) */
661
861
  int fd_signal; /* used for signalling (write) */
862
+ #endif
662
863
  } fio_thread_queue_s;
663
864
 
664
865
  fio_ls_embd_s fio_thread_queue = FIO_LS_INIT(fio_thread_queue);
665
866
  fio_lock_i fio_thread_lock = FIO_LOCK_INIT;
666
- static __thread fio_thread_queue_s fio_thread_data = {.fd_wait = -1,
667
- .fd_signal = -1};
867
+
868
+ static pthread_key_t fio_thread_data_key;
869
+ static pthread_once_t fio_thread_data_once = PTHREAD_ONCE_INIT;
870
+ static void init_fio_thread_data_key(void) {
871
+ pthread_key_create(&fio_thread_data_key, free);
872
+ }
873
+ static void init_fio_thread_data_ptr(void) {
874
+ fio_thread_queue_s *fio_thread_data = malloc(sizeof(fio_thread_queue_s));
875
+ FIO_ASSERT_ALLOC(fio_thread_data);
876
+ memset(fio_thread_data, 0, sizeof(fio_thread_queue_s));
877
+ #ifdef __MINGW32__
878
+ fio_thread_data->handle = INVALID_HANDLE_VALUE;
879
+ #else
880
+ fio_thread_data->fd_wait = -1;
881
+ fio_thread_data->fd_signal = -1;
882
+ #endif
883
+ pthread_setspecific(fio_thread_data_key, fio_thread_data);
884
+ }
668
885
 
669
886
  FIO_FUNC inline void fio_thread_make_suspendable(void) {
670
- if (fio_thread_data.fd_signal >= 0)
887
+ pthread_once(&fio_thread_data_once, init_fio_thread_data_key);
888
+ fio_thread_queue_s *fio_thread_data = (fio_thread_queue_s *)pthread_getspecific(fio_thread_data_key);
889
+ if (!fio_thread_data) {
890
+ init_fio_thread_data_ptr();
891
+ fio_thread_data = (fio_thread_queue_s *)pthread_getspecific(fio_thread_data_key);
892
+ }
893
+ #ifdef __MINGW32__
894
+ /** create automatically reseting event */
895
+ fio_thread_data->handle = CreateEvent(NULL, FALSE, FALSE, TEXT("thread signal"));
896
+ fio_thread_data->in_list = 0;
897
+ #else
898
+ if (fio_thread_data->fd_signal >= 0)
671
899
  return;
672
900
  int fd[2] = {0, 0};
673
901
  int ret = pipe(fd);
@@ -676,43 +904,76 @@ FIO_FUNC inline void fio_thread_make_suspendable(void) {
676
904
  "(fio) couldn't set internal pipe to non-blocking mode.");
677
905
  FIO_ASSERT(fio_set_non_block(fd[1]) == 0,
678
906
  "(fio) couldn't set internal pipe to non-blocking mode.");
679
- fio_thread_data.fd_wait = fd[0];
680
- fio_thread_data.fd_signal = fd[1];
907
+ fio_thread_data->fd_wait = fd[0];
908
+ fio_thread_data->fd_signal = fd[1];
909
+ #endif
681
910
  }
682
911
 
683
912
  FIO_FUNC inline void fio_thread_cleanup(void) {
684
- if (fio_thread_data.fd_signal < 0)
913
+ fio_thread_queue_s *fio_thread_data = (fio_thread_queue_s *)pthread_getspecific(fio_thread_data_key);
914
+ #ifdef __MINGW32__
915
+ HANDLE h = fio_thread_data->handle;
916
+ fio_thread_data->handle = INVALID_HANDLE_VALUE;
917
+ CloseHandle(h);
918
+ #else
919
+ if (fio_thread_data->fd_signal < 0)
685
920
  return;
686
- close(fio_thread_data.fd_wait);
687
- close(fio_thread_data.fd_signal);
688
- fio_thread_data.fd_wait = -1;
689
- fio_thread_data.fd_signal = -1;
921
+ close(fio_thread_data->fd_wait);
922
+ close(fio_thread_data->fd_signal);
923
+ fio_thread_data->fd_wait = -1;
924
+ fio_thread_data->fd_signal = -1;
925
+ #endif
690
926
  }
691
927
 
692
928
  /* suspend thread execution (might be resumed unexpectedly) */
693
929
  FIO_FUNC void fio_thread_suspend(void) {
930
+ fio_thread_queue_s *fio_thread_data = (fio_thread_queue_s *)pthread_getspecific(fio_thread_data_key);
931
+ #ifdef __MINGW32__
932
+ fio_lock(&fio_thread_lock);
933
+ /** don't add thread to queue if its already in there
934
+ * to prevent queue breakage
935
+ * can happen if WaitForSingleObject returns for other reasons */
936
+ if (!fio_thread_data->in_list) {
937
+ fio_ls_embd_push(&fio_thread_queue, &fio_thread_data->node);
938
+ fio_thread_data->in_list = 1;
939
+ }
940
+ fio_unlock(&fio_thread_lock);
941
+ WaitForSingleObject(fio_thread_data->handle, 500);
942
+ #else
694
943
  fio_lock(&fio_thread_lock);
695
- fio_ls_embd_push(&fio_thread_queue, &fio_thread_data.node);
944
+ fio_ls_embd_push(&fio_thread_queue, &fio_thread_data->node);
696
945
  fio_unlock(&fio_thread_lock);
697
946
  struct pollfd list = {
698
947
  .events = (POLLPRI | POLLIN),
699
- .fd = fio_thread_data.fd_wait,
948
+ .fd = fio_thread_data->fd_wait,
700
949
  };
701
950
  if (poll(&list, 1, 5000) > 0) {
702
951
  /* thread was removed from the list through signal */
703
952
  uint64_t data;
704
- int r = read(fio_thread_data.fd_wait, &data, sizeof(data));
953
+ int r = read(fio_thread_data->fd_wait, &data, sizeof(data));
705
954
  (void)r;
706
955
  } else {
707
956
  /* remove self from list */
708
957
  fio_lock(&fio_thread_lock);
709
- fio_ls_embd_remove(&fio_thread_data.node);
958
+ fio_ls_embd_remove(&fio_thread_data->node);
710
959
  fio_unlock(&fio_thread_lock);
711
960
  }
961
+ #endif
712
962
  }
713
963
 
714
964
  /* wake up a single thread */
715
965
  FIO_FUNC void fio_thread_signal(void) {
966
+ #ifdef __MINGW32__
967
+ fio_lock(&fio_thread_lock);
968
+ fio_thread_queue_s *t = (fio_thread_queue_s *)fio_ls_embd_shift(&fio_thread_queue);
969
+ if (t) {
970
+ t->in_list = 0;
971
+ fio_unlock(&fio_thread_lock);
972
+ SetEvent(t->handle);
973
+ } else {
974
+ fio_unlock(&fio_thread_lock);
975
+ }
976
+ #else
716
977
  fio_thread_queue_s *t;
717
978
  int fd = -2;
718
979
  fio_lock(&fio_thread_lock);
@@ -728,6 +989,7 @@ FIO_FUNC void fio_thread_signal(void) {
728
989
  /* hardly the best way, but there's a thread sleeping on air */
729
990
  kill(getpid(), SIGCONT);
730
991
  }
992
+ #endif
731
993
  }
732
994
 
733
995
  /* wake up all threads */
@@ -737,9 +999,16 @@ FIO_FUNC void fio_thread_broadcast(void) {
737
999
  }
738
1000
  }
739
1001
 
1002
+ static pthread_key_t static_throttle_key;
1003
+ static pthread_once_t static_throttle_once = PTHREAD_ONCE_INIT;
1004
+ static void init_static_throttle_key(void) {
1005
+ pthread_key_create(&static_throttle_key, NULL);
1006
+ pthread_setspecific(static_throttle_key, (void *)262143UL);
1007
+ }
1008
+
740
1009
  static size_t fio_poll(void);
741
1010
  /**
742
- * A thread entering this function should wait for new evennts.
1011
+ * A thread entering this function should wait for new events.
743
1012
  */
744
1013
  static void fio_defer_thread_wait(void) {
745
1014
  #if FIO_ENGINE_POLL
@@ -750,12 +1019,13 @@ static void fio_defer_thread_wait(void) {
750
1019
  fio_thread_suspend();
751
1020
  } else {
752
1021
  /* keeps threads active (concurrent), but reduces performance */
753
- static __thread size_t static_throttle = 262143UL;
1022
+ pthread_once(&static_throttle_once, init_static_throttle_key);
1023
+ size_t static_throttle = (size_t)pthread_getspecific(static_throttle_key);
754
1024
  fio_throttle_thread(static_throttle);
755
1025
  if (fio_defer_has_queue())
756
- static_throttle = 1;
1026
+ pthread_setspecific(static_throttle_key, (void *)1);
757
1027
  else if (static_throttle < FIO_DEFER_THROTTLE_LIMIT)
758
- static_throttle = (static_throttle << 1);
1028
+ pthread_setspecific(static_throttle_key, (void *)(static_throttle << 1));
759
1029
  }
760
1030
  }
761
1031
 
@@ -1341,7 +1611,11 @@ Section Start Marker
1341
1611
 
1342
1612
  volatile uint8_t fio_signal_children_flag = 0;
1343
1613
  volatile fio_lock_i fio_signal_set_flag = 0;
1344
- /* store old signal handlers to propegate signal handling */
1614
+ /* store old signal handlers to propagate signal handling */
1615
+ #ifdef __MINGW32__
1616
+ void (*fio_old_sig_int)(int);
1617
+ void (*fio_old_sig_term)(int);
1618
+ #else
1345
1619
  static struct sigaction fio_old_sig_chld;
1346
1620
  static struct sigaction fio_old_sig_pipe;
1347
1621
  static struct sigaction fio_old_sig_term;
@@ -1349,6 +1623,10 @@ static struct sigaction fio_old_sig_int;
1349
1623
  #if !FIO_DISABLE_HOT_RESTART
1350
1624
  static struct sigaction fio_old_sig_usr1;
1351
1625
  #endif
1626
+ #endif
1627
+
1628
+ #ifndef __MINGW32__
1629
+ /* there are no process children on Windows, as there is only one worker */
1352
1630
 
1353
1631
  /*
1354
1632
  * Zombie Reaping
@@ -1380,10 +1658,15 @@ void fio_reap_children(void) {
1380
1658
  exit(errno);
1381
1659
  }
1382
1660
  }
1661
+ #endif
1383
1662
 
1384
1663
  /* handles the SIGUSR1, SIGINT and SIGTERM signals. */
1385
1664
  static void sig_int_handler(int sig) {
1665
+ #ifdef __MINGW32__
1666
+ void (*old)(int) = NULL;
1667
+ #else
1386
1668
  struct sigaction *old = NULL;
1669
+ #endif
1387
1670
  switch (sig) {
1388
1671
  #if !FIO_DISABLE_HOT_RESTART
1389
1672
  case SIGUSR1:
@@ -1394,28 +1677,47 @@ static void sig_int_handler(int sig) {
1394
1677
  /* fallthrough */
1395
1678
  case SIGINT:
1396
1679
  if (!old)
1680
+ #ifdef __MINGW32__
1681
+ old = fio_old_sig_int;
1682
+ #else
1397
1683
  old = &fio_old_sig_int;
1684
+ #endif
1398
1685
  /* fallthrough */
1399
1686
  case SIGTERM:
1400
1687
  if (!old)
1688
+ #ifdef __MINGW32__
1689
+ old = fio_old_sig_term;
1690
+ #else
1401
1691
  old = &fio_old_sig_term;
1692
+ #endif
1402
1693
  fio_stop();
1403
1694
  break;
1695
+ #ifndef __MINGW32__
1404
1696
  case SIGPIPE:
1405
1697
  if (!old)
1406
1698
  old = &fio_old_sig_pipe;
1699
+ #endif
1407
1700
  /* fallthrough */
1408
1701
  default:
1409
1702
  break;
1410
1703
  }
1704
+ #ifdef __MINGW32__
1705
+ if (old)
1706
+ fio_old_sig_int(sig);
1707
+ #else
1411
1708
  /* propagate signale handling to previous existing handler (if any) */
1412
1709
  if (old && old->sa_handler != SIG_IGN && old->sa_handler != SIG_DFL)
1413
1710
  old->sa_handler(sig);
1711
+ #endif
1414
1712
  }
1415
1713
 
1416
1714
  /* setup handling for the SIGUSR1, SIGPIPE, SIGINT and SIGTERM signals. */
1417
1715
  static void fio_signal_handler_setup(void) {
1418
1716
  /* setup signal handling */
1717
+ #ifdef __MINGW32__
1718
+ fio_old_sig_int = signal(SIGINT, sig_int_handler);
1719
+ fio_old_sig_term = signal(SIGTERM, sig_int_handler);
1720
+ #else
1419
1721
  struct sigaction act;
1420
1722
  if (fio_trylock(&fio_signal_set_flag))
1421
1723
  return;
@@ -1447,9 +1749,14 @@ static void fio_signal_handler_setup(void) {
1447
1749
  perror("couldn't set signal handler");
1448
1750
  return;
1449
1751
  };
1752
+ #endif
1450
1753
  }
1451
1754
 
1452
1755
  void fio_signal_handler_reset(void) {
1756
+ #ifdef __MINGW32__
1757
+ signal(SIGINT, fio_old_sig_int);
1758
+ signal(SIGTERM, fio_old_sig_term);
1759
+ #else
1453
1760
  struct sigaction old;
1454
1761
  if (fio_signal_set_flag)
1455
1762
  return;
@@ -1457,17 +1764,21 @@ void fio_signal_handler_reset(void) {
1457
1764
  memset(&old, 0, sizeof(old));
1458
1765
  sigaction(SIGINT, &fio_old_sig_int, &old);
1459
1766
  sigaction(SIGTERM, &fio_old_sig_term, &old);
1767
+
1460
1768
  sigaction(SIGPIPE, &fio_old_sig_pipe, &old);
1461
1769
  if (fio_old_sig_chld.sa_handler)
1462
1770
  sigaction(SIGCHLD, &fio_old_sig_chld, &old);
1463
1771
  #if !FIO_DISABLE_HOT_RESTART
1464
1772
  sigaction(SIGUSR1, &fio_old_sig_usr1, &old);
1465
1773
  memset(&fio_old_sig_usr1, 0, sizeof(fio_old_sig_usr1));
1774
+ #endif
1466
1775
  #endif
1467
1776
  memset(&fio_old_sig_int, 0, sizeof(fio_old_sig_int));
1468
1777
  memset(&fio_old_sig_term, 0, sizeof(fio_old_sig_term));
1778
+ #ifndef __MINGW32__
1469
1779
  memset(&fio_old_sig_pipe, 0, sizeof(fio_old_sig_pipe));
1470
1780
  memset(&fio_old_sig_chld, 0, sizeof(fio_old_sig_chld));
1781
+ #endif
1471
1782
  }
1472
1783
 
1473
1784
  /**
@@ -1495,7 +1806,11 @@ pid_t fio_parent_pid(void) { return fio_data->parent; }
1495
1806
 
1496
1807
  static inline size_t fio_detect_cpu_cores(void) {
1497
1808
  ssize_t cpu_count = 0;
1498
- #ifdef _SC_NPROCESSORS_ONLN
1809
+ #if defined(__MINGW32__)
1810
+ SYSTEM_INFO sys_info;
1811
+ GetSystemInfo(&sys_info);
1812
+ cpu_count = sys_info.dwNumberOfProcessors;
1813
+ #elif defined(_SC_NPROCESSORS_ONLN)
1499
1814
  cpu_count = sysconf(_SC_NPROCESSORS_ONLN);
1500
1815
  if (cpu_count < 0) {
1501
1816
  FIO_LOG_WARNING("CPU core count auto-detection failed.");
@@ -1536,11 +1851,16 @@ void fio_expected_concurrency(int16_t *threads, int16_t *processes) {
1536
1851
  cpu_count = FIO_CPU_CORES_LIMIT;
1537
1852
  }
1538
1853
  #endif
1854
+ #ifdef __MINGW32__
1855
+ *threads = (int16_t)cpu_count;
1856
+ *processes = 1;
1857
+ #else
1539
1858
  *threads = *processes = (int16_t)cpu_count;
1540
1859
  if (cpu_count > 3) {
1541
1860
  /* leave a core available for the kernel */
1542
1861
  --(*processes);
1543
1862
  }
1863
+ #endif
1544
1864
  } else if (*threads < 0 || *processes < 0) {
1545
1865
  /* Set any option that is less than 0 be equal to cores/value */
1546
1866
  /* Set any option equal to 0 be equal to the other option in value */
@@ -1577,9 +1897,14 @@ void fio_expected_concurrency(int16_t *threads, int16_t *processes) {
1577
1897
  }
1578
1898
  }
1579
1899
 
1580
- /* make sure we have at least one process and at least one thread */
1900
+ /* make sure we have at least one (or exactly one on Windows) process and at least one thread */
1901
+ #ifdef __MINGW32__
1902
+ FIO_LOG_WARNING("Using only 1 worker on Windows, because fork() support is missing.");
1903
+ *processes = 1;
1904
+ #else
1581
1905
  if (*processes <= 0)
1582
1906
  *processes = 1;
1907
+ #endif
1583
1908
  if (*threads <= 0)
1584
1909
  *threads = 1;
1585
1910
  }
@@ -2048,6 +2373,188 @@ Section Start Marker
2048
2373
 
2049
2374
 
2050
2375
 
2376
+
2377
+ Polling State Machine - wsapoll
2378
+
2379
+
2380
+
2381
+
2382
+
2383
+
2384
+
2385
+
2386
+
2387
+
2388
+
2389
+
2390
+
2391
+
2392
+ ***************************************************************************** */
2393
+
2394
+ #if FIO_ENGINE_WSAPOLL
2395
+
2396
+ void fio_clear_handle(int fd);
2397
+ int fio_fd4handle(SOCKET handle);
2398
+
2399
+ /**
2400
+ * Returns a C string detailing the IO engine selected during compilation.
2401
+ *
2402
+ * Valid values are "kqueue", "epoll", "poll" and "wsapoll".
2403
+ */
2404
+ char const *fio_engine(void) { return "wsapoll"; }
2405
+
2406
+ #define FIO_POLL_READ_EVENTS (POLLIN)
2407
+ #define FIO_POLL_WRITE_EVENTS (POLLOUT)
2408
+
2409
+ static void fio_poll_close(void) {
2410
+ WSACleanup();
2411
+ }
2412
+
2413
+ static void fio_poll_init(void) {
2414
+ int result;
2415
+ WSADATA wsa_data;
2416
+ result = WSAStartup(MAKEWORD(2,2), &wsa_data);
2417
+ if (result != 0) {
2418
+ FIO_LOG_FATAL("WSA startup failed.\n");
2419
+ exit(result);
2420
+ }
2421
+ }
2422
+
2423
+ static inline void fio_poll_remove_fd(int fd) {
2424
+ fio_data->poll[fd].fd = -1;
2425
+ fio_data->poll[fd].events = 0;
2426
+ }
2427
+
2428
+ static inline void fio_poll_add_read(int fd) {
2429
+ fio_data->poll[fd].fd = fd;
2430
+ fio_data->poll[fd].events |= FIO_POLL_READ_EVENTS;
2431
+ }
2432
+
2433
+ static inline void fio_poll_add_write(int fd) {
2434
+ fio_data->poll[fd].fd = fd;
2435
+ fio_data->poll[fd].events |= FIO_POLL_WRITE_EVENTS;
2436
+ }
2437
+
2438
+ static inline void fio_poll_add(int fd) {
2439
+ fio_data->poll[fd].fd = fd;
2440
+ fio_data->poll[fd].events = FIO_POLL_READ_EVENTS | FIO_POLL_WRITE_EVENTS;
2441
+ }
2442
+
2443
+ static inline void fio_poll_remove_read(int fd) {
2444
+ fio_lock(&fio_data->lock);
2445
+ if (fio_data->poll[fd].events & FIO_POLL_WRITE_EVENTS)
2446
+ fio_data->poll[fd].events = FIO_POLL_WRITE_EVENTS;
2447
+ else {
2448
+ fio_poll_remove_fd(fd);
2449
+ }
2450
+ fio_unlock(&fio_data->lock);
2451
+ }
2452
+
2453
+ static inline void fio_poll_remove_write(int fd) {
2454
+ fio_lock(&fio_data->lock);
2455
+ if (fio_data->poll[fd].events & FIO_POLL_READ_EVENTS)
2456
+ fio_data->poll[fd].events = FIO_POLL_READ_EVENTS;
2457
+ else {
2458
+ fio_poll_remove_fd(fd);
2459
+ }
2460
+ fio_unlock(&fio_data->lock);
2461
+ }
2462
+
2463
+ /** returns non-zero if events were scheduled, 0 if idle */
2464
+ static size_t fio_poll(void) {
2465
+ /* shrink fd poll range */
2466
+ size_t end = fio_data->capa; // max_protocol_fd might break TLS
2467
+ size_t start = 0;
2468
+ struct pollfd *list = NULL;
2469
+ fio_lock(&fio_data->lock);
2470
+ while (start < end && fio_data->poll[start].fd == (SOCKET)-1)
2471
+ ++start;
2472
+ while (start < end && fio_data->poll[end-1].fd == (SOCKET)-1)
2473
+ --end;
2474
+ fio_unlock(&fio_data->lock);
2475
+
2476
+ /* copy poll list for multi-threaded poll */
2477
+ list = fio_malloc(sizeof(*list) * (end - start + 1));
2478
+ FIO_ASSERT_ALLOC(list);
2479
+
2480
+ // replace facil fds with actual Windows socket handles in list
2481
+ size_t i = 0;
2482
+ size_t j = 0;
2483
+
2484
+ for(i = start; i <= end; i++) {
2485
+ if (fd_data(i).socket_handle != INVALID_SOCKET && fd_data(i).socket_handle > 0) {
2486
+ list[j].fd = fd_data(i).socket_handle;
2487
+ list[j].events = fio_data->poll[i].events;
2488
+ list[j].revents = fio_data->poll[i].revents;
2489
+ j++;
2490
+ }
2491
+ }
2492
+
2493
+ if (WSAPoll(list, j, 1) == SOCKET_ERROR) {
2494
+ int error = WSAGetLastError();
2495
+ FIO_LOG_DEBUG("fio_poll WSAPoll error %i", error);
2496
+ goto finish;
2497
+ }
2498
+
2499
+ size_t count = 0;
2500
+ int fd;
2501
+
2502
+ for (i = 0; i < j; i++) {
2503
+ if (list[i].fd != INVALID_SOCKET && list[i].revents) {
2504
+ fd = fio_fd4handle(list[i].fd);
2505
+ if (fd == -1)
2506
+ continue;
2507
+ touchfd(fd);
2508
+ ++count;
2509
+
2510
+ if (list[i].revents & FIO_POLL_WRITE_EVENTS) {
2511
+ // FIO_LOG_DEBUG("Poll Write %zu => %p", fd, (void *)fd2uuid(fd));
2512
+ fio_poll_remove_write(fd);
2513
+ fio_defer_push_urgent(deferred_on_ready, (void *)fd2uuid(fd), NULL);
2514
+ }
2515
+ if (list[i].revents & FIO_POLL_READ_EVENTS) {
2516
+ // FIO_LOG_DEBUG("Poll Read %zu => %p", fd, (void *)fd2uuid(fd));
2517
+ fio_poll_remove_read(fd);
2518
+ fio_defer_push_task(deferred_on_data, (void *)fd2uuid(fd), NULL);
2519
+ continue;
2520
+ }
2521
+ if (list[i].revents & (POLLHUP | POLLERR)) {
2522
+ // FIO_LOG_DEBUG("Poll Hangup %zu => %p", fd, (void *)fd2uuid(fd));
2523
+ fio_poll_remove_fd(fd);
2524
+ fio_force_close_in_poll(fd2uuid(fd));
2525
+ continue;
2526
+ }
2527
+ if (list[i].revents & POLLNVAL) {
2528
+ // FIO_LOG_DEBUG("Poll Invalid %zu => %p", fd, (void *)fd2uuid(fd));
2529
+ fio_poll_remove_fd(fd);
2530
+ fio_lock(&fd_data(fd).protocol_lock);
2531
+ fio_clear_handle(fd);
2532
+ fio_clear_fd(fd, 0);
2533
+ fio_unlock(&fd_data(fd).protocol_lock);
2534
+ }
2535
+ }
2536
+ }
2537
+ finish:
2538
+ fio_free(list);
2539
+ return count;
2540
+ }
2541
+
2542
+ #endif /* FIO_ENGINE_WSAPOLL */
2543
+
2544
+ /* *****************************************************************************
2545
+ Section Start Marker
2546
+
2547
+
2548
+
2549
+
2550
+
2551
+
2552
+
2553
+
2554
+
2555
+
2556
+
2557
+
2051
2558
  IO Callbacks / Event Handling
2052
2559
 
2053
2560
 
@@ -2348,6 +2855,62 @@ static void fio_tcp_addr_cpy(int fd, int family, struct sockaddr *addrinfo) {
2348
2855
  }
2349
2856
  }
2350
2857
 
2858
+ #ifdef __MINGW32__
2859
+ int fio_handle2fd(SOCKET handle) {
2860
+ int found = 0;
2861
+ int fd = -1;
2862
+ int it = 0;
2863
+ while(!found) {
2864
+ fd++;
2865
+ if (fd >= (int)fio_data->capa) {
2866
+ fd = 0;
2867
+ it++;
2868
+ if (it > 2)
2869
+ return -1;
2870
+ fio_reschedule_thread();
2871
+ }
2872
+ if (fd_data(fd).socket_handle != INVALID_SOCKET && fd_data(fd).socket_handle > 0)
2873
+ continue;
2874
+ fio_lock(&(fd_data(fd).sock_lock));
2875
+ if (fd_data(fd).socket_handle != INVALID_SOCKET && fd_data(fd).socket_handle > 0) {
2876
+ fio_unlock(&(fd_data(fd).sock_lock));
2877
+ continue;
2878
+ } else {
2879
+ fd_data(fd).socket_handle = handle;
2880
+ found = 1;
2881
+ fio_unlock(&(fd_data(fd).sock_lock));
2882
+ }
2883
+ }
2884
+ return fd;
2885
+ }
2886
+
2887
+ int fio_fd4handle(SOCKET handle) {
2888
+ unsigned int fd = 0;
2889
+ while(fd < fio_data->capa) {
2890
+ if (fd_data(fd).socket_handle == handle) {
2891
+ return fd;
2892
+ }
2893
+ fd++;
2894
+ }
2895
+ return -1;
2896
+ }
2897
+
2898
+ int fio_osffd4fd(unsigned int fd) {
2899
+ if (fd_data(fd).osffd != -1)
2900
+ return fd_data(fd).osffd;
2901
+ int osffd = _open_osfhandle(fd_data(fd).socket_handle, _O_RDWR);
2902
+ fd_data(fd).osffd = osffd;
2903
+ return osffd;
2904
+ }
2905
+
2906
+ void fio_clear_handle(int fd) {
2907
+ fio_lock(&(fd_data(fd).sock_lock));
2908
+ fd_data(fd).socket_handle = INVALID_SOCKET;
2909
+ fd_data(fd).osffd = -1;
2910
+ fio_unlock(&(fd_data(fd).sock_lock));
2911
+ }
2912
+ #endif
2913
+
2351
2914
  /**
2352
2915
  * `fio_accept` accepts a new socket connection from a server socket - see the
2353
2916
  * server flag on `fio_socket`.
@@ -2358,30 +2921,58 @@ static void fio_tcp_addr_cpy(int fd, int family, struct sockaddr *addrinfo) {
2358
2921
  intptr_t fio_accept(intptr_t srv_uuid) {
2359
2922
  struct sockaddr_in6 addrinfo[2]; /* grab a slice of stack (aligned) */
2360
2923
  socklen_t addrlen = sizeof(addrinfo);
2924
+ #ifdef __MINGW32__
2925
+ SOCKET client;
2926
+ #else
2361
2927
  int client;
2928
+ #endif
2362
2929
  #ifdef SOCK_NONBLOCK
2363
2930
  client = accept4(fio_uuid2fd(srv_uuid), (struct sockaddr *)addrinfo, &addrlen,
2364
2931
  SOCK_NONBLOCK | SOCK_CLOEXEC);
2365
2932
  if (client <= 0)
2366
2933
  return -1;
2934
+ #else
2935
+ #ifdef __MINGW32__
2936
+ client = accept_ptr(fd_data(fio_uuid2fd(srv_uuid)).socket_handle, (struct sockaddr *)addrinfo, &addrlen);
2937
+ if (client == INVALID_SOCKET)
2938
+ return -1;
2367
2939
  #else
2368
2940
  client = accept(fio_uuid2fd(srv_uuid), (struct sockaddr *)addrinfo, &addrlen);
2369
2941
  if (client <= 0)
2370
2942
  return -1;
2943
+ #endif
2371
2944
  if (fio_set_non_block(client) == -1) {
2372
- close(client);
2945
+ #ifdef __MINGW32__
2946
+ closesocket_ptr(client);
2947
+ #else
2948
+ close(client);
2949
+ #endif
2373
2950
  return -1;
2374
2951
  }
2375
2952
  #endif
2376
2953
  // avoid the TCP delay algorithm.
2377
2954
  {
2955
+ #ifdef __MINGW32__
2956
+ char optval = 1;
2957
+ setsockopt_ptr(client, IPPROTO_TCP, TCP_NODELAY, &optval, sizeof(optval));
2958
+ #else
2378
2959
  int optval = 1;
2379
2960
  setsockopt(client, IPPROTO_TCP, TCP_NODELAY, &optval, sizeof(optval));
2961
+ #endif
2380
2962
  }
2381
2963
  // handle socket buffers.
2382
2964
  {
2383
2965
  int optval = 0;
2384
2966
  socklen_t size = (socklen_t)sizeof(optval);
2967
+ #ifdef __MINGW32__
2968
+ if (!getsockopt_ptr(client, SOL_SOCKET, SO_SNDBUF, (char *)&optval, &size) &&
2969
+ optval <= 131072) {
2970
+ optval = 131072;
2971
+ setsockopt_ptr(client, SOL_SOCKET, SO_SNDBUF, (char *)&optval, sizeof(optval));
2972
+ optval = 131072;
2973
+ setsockopt_ptr(client, SOL_SOCKET, SO_RCVBUF, (char *)&optval, sizeof(optval));
2974
+ }
2975
+ #else
2385
2976
  if (!getsockopt(client, SOL_SOCKET, SO_SNDBUF, &optval, &size) &&
2386
2977
  optval <= 131072) {
2387
2978
  optval = 131072;
@@ -2389,8 +2980,13 @@ intptr_t fio_accept(intptr_t srv_uuid) {
2389
2980
  optval = 131072;
2390
2981
  setsockopt(client, SOL_SOCKET, SO_RCVBUF, &optval, sizeof(optval));
2391
2982
  }
2983
+ #endif
2392
2984
  }
2393
-
2985
+ #ifdef __MINGW32__
2986
+ client = fio_handle2fd(client);
2987
+ if (client == (SOCKET)-1)
2988
+ return -1;
2989
+ #endif
2394
2990
  fio_lock(&fd_data(client).protocol_lock);
2395
2991
  fio_clear_fd(client, 1);
2396
2992
  fio_unlock(&fd_data(client).protocol_lock);
@@ -2409,6 +3005,172 @@ intptr_t fio_accept(intptr_t srv_uuid) {
2409
3005
  return fd2uuid(client);
2410
3006
  }
2411
3007
 
3008
+ /* Creates a TCP/IP socket - returning it's uuid (or -1) */
3009
+ static intptr_t fio_tcp_socket(const char *address, const char *port,
3010
+ uint8_t server) {
3011
+ /* TCP/IP socket */
3012
+ // setup the address
3013
+ struct addrinfo hints = {0};
3014
+ struct addrinfo *addrinfo; // will point to the results
3015
+ memset(&hints, 0, sizeof hints); // make sure the struct is empty
3016
+ hints.ai_family = AF_UNSPEC; // don't care IPv4 or IPv6
3017
+ hints.ai_socktype = SOCK_STREAM; // TCP stream sockets
3018
+ hints.ai_flags = AI_PASSIVE; // fill in my IP for me
3019
+ if (getaddrinfo(address, port, &hints, &addrinfo)) {
3020
+ // perror("addr err");
3021
+ return -1;
3022
+ }
3023
+ // get the file descriptor
3024
+ #ifdef __MINGW32__
3025
+ SOCKET fd =
3026
+ socket_ptr(addrinfo->ai_family, addrinfo->ai_socktype, addrinfo->ai_protocol);
3027
+ if (fd == INVALID_SOCKET) {
3028
+ freeaddrinfo(addrinfo);
3029
+ return -1;
3030
+ }
3031
+ // ensure dual-mode socket, enable IPV4
3032
+ DWORD v6val = 0;
3033
+ setsockopt_ptr(fd, IPPROTO_IPV6, IPV6_V6ONLY, &v6val, sizeof(v6val));
3034
+ #else
3035
+ int fd =
3036
+ socket(addrinfo->ai_family, addrinfo->ai_socktype, addrinfo->ai_protocol);
3037
+ if (fd <= 0) {
3038
+ freeaddrinfo(addrinfo);
3039
+ return -1;
3040
+ }
3041
+ #endif
3042
+ // make sure the socket is non-blocking
3043
+ if (fio_set_non_block(fd) < 0) {
3044
+ freeaddrinfo(addrinfo);
3045
+ #ifdef __MINGW32__
3046
+ closesocket_ptr(fd);
3047
+ #else
3048
+ close(fd); // socket
3049
+ #endif
3050
+ return -1;
3051
+ }
3052
+ if (server) {
3053
+ {
3054
+ // avoid the "address taken"
3055
+ #ifdef __MINGW32__
3056
+ char optval = 1;
3057
+ setsockopt_ptr(fd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
3058
+ #else
3059
+ int optval = 1;
3060
+ setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
3061
+ #endif
3062
+ }
3063
+ // bind the address to the socket
3064
+ int bound = 0;
3065
+ #ifdef __MINGW32__
3066
+ for (struct addrinfo *i = addrinfo; i != NULL; i = i->ai_next) {
3067
+ if (!bind_ptr(fd, i->ai_addr, i->ai_addrlen))
3068
+ bound = 1;
3069
+ }
3070
+ #else
3071
+ for (struct addrinfo *i = addrinfo; i != NULL; i = i->ai_next) {
3072
+ if (!bind(fd, i->ai_addr, i->ai_addrlen))
3073
+ bound = 1;
3074
+ }
3075
+ #endif
3076
+ if (!bound) {
3077
+ // perror("bind err");
3078
+ freeaddrinfo(addrinfo);
3079
+ #ifdef __MINGW32__
3080
+ closesocket_ptr(fd);
3081
+ #else
3082
+ close(fd);
3083
+ #endif
3084
+ return -1;
3085
+ }
3086
+ #ifdef TCP_FASTOPEN
3087
+ {
3088
+ // support TCP Fast Open when available
3089
+ int optval = 128;
3090
+ #ifdef __MINGW32__
3091
+ setsockopt_ptr(fd, addrinfo->ai_protocol, TCP_FASTOPEN, &optval,
3092
+ sizeof(optval));
3093
+ #else
3094
+ setsockopt(fd, addrinfo->ai_protocol, TCP_FASTOPEN, &optval,
3095
+ sizeof(optval));
3096
+ #endif
3097
+ }
3098
+ #endif
3099
+ #ifdef __MINGW32__
3100
+ if (listen_ptr(fd, SOMAXCONN) < 0) {
3101
+ freeaddrinfo(addrinfo);
3102
+ closesocket_ptr(fd);
3103
+ return -1;
3104
+ }
3105
+ #else
3106
+ if (listen(fd, SOMAXCONN) < 0) {
3107
+ freeaddrinfo(addrinfo);
3108
+ close(fd);
3109
+ return -1;
3110
+ }
3111
+ #endif
3112
+ } else {
3113
+ #ifdef __MINGW32__
3114
+ char optval = 1;
3115
+ setsockopt_ptr(fd, IPPROTO_TCP, TCP_NODELAY, &optval, sizeof(optval));
3116
+ #else
3117
+ int optval = 1;
3118
+ setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &optval, sizeof(optval));
3119
+ #endif
3120
+ errno = 0;
3121
+ for (struct addrinfo *i = addrinfo; i; i = i->ai_next) {
3122
+ #ifdef __MINGW32__
3123
+ int connres = connect_ptr(fd, i->ai_addr, i->ai_addrlen);
3124
+ if (connres == SOCKET_ERROR) {
3125
+ int error = WSAGetLastError();
3126
+ if (error == WSAEISCONN || error == WSAEWOULDBLOCK || error == WSAEINPROGRESS)
3127
+ goto socket_okay;
3128
+ } else if (connres == 0) { goto socket_okay; }
3129
+ #else
3130
+ if (connect(fd, i->ai_addr, i->ai_addrlen) == 0 || errno == EINPROGRESS)
3131
+ goto socket_okay;
3132
+ #endif
3133
+ }
3134
+ freeaddrinfo(addrinfo);
3135
+ #ifdef __MINGW32__
3136
+ closesocket_ptr(fd);
3137
+ #else
3138
+ close(fd); // socket
3139
+ #endif
3140
+ return -1;
3141
+ }
3142
+ socket_okay:
3143
+ #ifdef __MINGW32__
3144
+ fd = fio_handle2fd(fd);
3145
+ if (fd == (SOCKET)-1)
3146
+ return -1;
3147
+ #endif
3148
+ fio_lock(&fd_data(fd).protocol_lock);
3149
+ fio_clear_fd(fd, 1);
3150
+ fio_unlock(&fd_data(fd).protocol_lock);
3151
+ fio_tcp_addr_cpy(fd, addrinfo->ai_family, (void *)addrinfo);
3152
+ freeaddrinfo(addrinfo);
3153
+ intptr_t ufd = fd2uuid(fd);
3154
+ return ufd;
3155
+ }
3156
+
3157
+ #ifdef __MINGW32__
3158
+ /* Creates a tcp socket in the 10000 to 19999 port range */
3159
+ static intptr_t fio_unix_socket(const char *address, uint8_t server) {
3160
+ static char *localhost = "localhost";
3161
+ char localport[6];
3162
+ int vary = _getpid();
3163
+ int iterations = sizeof(int) * 8;
3164
+ int i = iterations;
3165
+ while (vary > 9999) {
3166
+ i--;
3167
+ vary &= ~(1u << i);
3168
+ }
3169
+ sprintf_s(localport, 6, "1%04u", vary);
3170
+ FIO_LOG_WARNING("Using tcp socket on localhost port %s for IPC instead of unix socket on Windows.", localport);
3171
+ return fio_tcp_socket(localhost, localport, server);
3172
+ }
3173
+ #else
2412
3174
  /* Creates a Unix socket - returning it's uuid (or -1) */
2413
3175
  static intptr_t fio_unix_socket(const char *address, uint8_t server) {
2414
3176
  /* Unix socket */
@@ -2431,7 +3193,7 @@ static intptr_t fio_unix_socket(const char *address, uint8_t server) {
2431
3193
  return -1;
2432
3194
  }
2433
3195
  if (fio_set_non_block(fd) == -1) {
2434
- close(fd);
3196
+ close(fd);
2435
3197
  return -1;
2436
3198
  }
2437
3199
  if (server) {
@@ -2446,7 +3208,7 @@ static intptr_t fio_unix_socket(const char *address, uint8_t server) {
2446
3208
  close(fd);
2447
3209
  return -1;
2448
3210
  }
2449
- /* chmod for foriegn connections */
3211
+ /* chmod for foreign connections */
2450
3212
  fchmod(fd, 0777);
2451
3213
  } else {
2452
3214
  if (connect(fd, (struct sockaddr *)&addr, sizeof(addr)) == -1 &&
@@ -2464,86 +3226,7 @@ static intptr_t fio_unix_socket(const char *address, uint8_t server) {
2464
3226
  }
2465
3227
  return fd2uuid(fd);
2466
3228
  }
2467
-
2468
- /* Creates a TCP/IP socket - returning it's uuid (or -1) */
2469
- static intptr_t fio_tcp_socket(const char *address, const char *port,
2470
- uint8_t server) {
2471
- /* TCP/IP socket */
2472
- // setup the address
2473
- struct addrinfo hints = {0};
2474
- struct addrinfo *addrinfo; // will point to the results
2475
- memset(&hints, 0, sizeof hints); // make sure the struct is empty
2476
- hints.ai_family = AF_UNSPEC; // don't care IPv4 or IPv6
2477
- hints.ai_socktype = SOCK_STREAM; // TCP stream sockets
2478
- hints.ai_flags = AI_PASSIVE; // fill in my IP for me
2479
- if (getaddrinfo(address, port, &hints, &addrinfo)) {
2480
- // perror("addr err");
2481
- return -1;
2482
- }
2483
- // get the file descriptor
2484
- int fd =
2485
- socket(addrinfo->ai_family, addrinfo->ai_socktype, addrinfo->ai_protocol);
2486
- if (fd <= 0) {
2487
- freeaddrinfo(addrinfo);
2488
- return -1;
2489
- }
2490
- // make sure the socket is non-blocking
2491
- if (fio_set_non_block(fd) < 0) {
2492
- freeaddrinfo(addrinfo);
2493
- close(fd);
2494
- return -1;
2495
- }
2496
- if (server) {
2497
- {
2498
- // avoid the "address taken"
2499
- int optval = 1;
2500
- setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
2501
- }
2502
- // bind the address to the socket
2503
- int bound = 0;
2504
- for (struct addrinfo *i = addrinfo; i != NULL; i = i->ai_next) {
2505
- if (!bind(fd, i->ai_addr, i->ai_addrlen))
2506
- bound = 1;
2507
- }
2508
- if (!bound) {
2509
- // perror("bind err");
2510
- freeaddrinfo(addrinfo);
2511
- close(fd);
2512
- return -1;
2513
- }
2514
- #ifdef TCP_FASTOPEN
2515
- {
2516
- // support TCP Fast Open when available
2517
- int optval = 128;
2518
- setsockopt(fd, addrinfo->ai_protocol, TCP_FASTOPEN, &optval,
2519
- sizeof(optval));
2520
- }
2521
3229
  #endif
2522
- if (listen(fd, SOMAXCONN) < 0) {
2523
- freeaddrinfo(addrinfo);
2524
- close(fd);
2525
- return -1;
2526
- }
2527
- } else {
2528
- int one = 1;
2529
- setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &one, sizeof(one));
2530
- errno = 0;
2531
- for (struct addrinfo *i = addrinfo; i; i = i->ai_next) {
2532
- if (connect(fd, i->ai_addr, i->ai_addrlen) == 0 || errno == EINPROGRESS)
2533
- goto socket_okay;
2534
- }
2535
- freeaddrinfo(addrinfo);
2536
- close(fd);
2537
- return -1;
2538
- }
2539
- socket_okay:
2540
- fio_lock(&fd_data(fd).protocol_lock);
2541
- fio_clear_fd(fd, 1);
2542
- fio_unlock(&fd_data(fd).protocol_lock);
2543
- fio_tcp_addr_cpy(fd, addrinfo->ai_family, (void *)addrinfo);
2544
- freeaddrinfo(addrinfo);
2545
- return fd2uuid(fd);
2546
- }
2547
3230
 
2548
3231
  /* PUBLIC API: opens a server or client socket */
2549
3232
  intptr_t fio_socket(const char *address, const char *port, uint8_t server) {
@@ -2606,7 +3289,20 @@ Internal socket flushing related functions
2606
3289
 
2607
3290
  #endif
2608
3291
 
3292
+ #ifdef __MINGW32__
3293
+ static void fio_sock_perform_close_fd(intptr_t fd) {
3294
+ SOCKET s = fd_data(fd).socket_handle;
3295
+ int osffd = fd_data(fd).osffd;
3296
+ fio_clear_handle(fd);
3297
+ if (osffd != -1) {
3298
+ _close(osffd);
3299
+ } else if (s != -1) {
3300
+ closesocket_ptr(s);
3301
+ }
3302
+ }
3303
+ #else
2609
3304
  static void fio_sock_perform_close_fd(intptr_t fd) { close(fd); }
3305
+ #endif
2610
3306
 
2611
3307
  static inline void fio_sock_packet_rotate_unsafe(uintptr_t fd) {
2612
3308
  fio_packet_s *packet = fd_data(fd).packet;
@@ -2844,8 +3540,7 @@ ssize_t fio_write2_fn(intptr_t uuid, fio_write_args_s options) {
2844
3540
  locked_error:
2845
3541
  fio_unlock(&uuid_data(uuid).sock_lock);
2846
3542
  fio_packet_free(packet);
2847
- errno = EBADF;
2848
- return -1;
3543
+ /** fallthrough and free buffer */
2849
3544
  error:
2850
3545
  if (options.after.dealloc) {
2851
3546
  options.after.dealloc((void *)options.data.buffer);
@@ -2924,8 +3619,12 @@ void fio_force_close(intptr_t uuid) {
2924
3619
  fio_lock(&uuid_data(uuid).protocol_lock);
2925
3620
  fio_clear_fd(fio_uuid2fd(uuid), 0);
2926
3621
  fio_unlock(&uuid_data(uuid).protocol_lock);
3622
+ #ifdef __MINGW32__
3623
+ fio_sock_perform_close_fd(fio_uuid2fd(uuid));
3624
+ #else
2927
3625
  close(fio_uuid2fd(uuid));
2928
- #if FIO_ENGINE_POLL
3626
+ #endif
3627
+ #if FIO_ENGINE_POLL || FIO_ENGINE_WSAPOLL
2929
3628
  fio_poll_remove_fd(fio_uuid2fd(uuid));
2930
3629
  #endif
2931
3630
  if (fio_data->connection_count)
@@ -3062,12 +3761,37 @@ Connection Read / Write Hooks, for overriding the system calls
3062
3761
 
3063
3762
  static ssize_t fio_hooks_default_read(intptr_t uuid, void *udata, void *buf,
3064
3763
  size_t count) {
3764
+ #ifdef __MINGW32__
3765
+ int len = recv_ptr(fd_data(fio_uuid2fd(uuid)).socket_handle, buf, count, 0);
3766
+ if (len != SOCKET_ERROR)
3767
+ return len;
3768
+ int error = WSAGetLastError();
3769
+ switch (error) {
3770
+ case WSAEWOULDBLOCK:
3771
+ errno = EWOULDBLOCK;
3772
+ break;
3773
+ case WSAENOTCONN:
3774
+ errno = ENOTCONN;
3775
+ break;
3776
+ case WSAEINTR:
3777
+ errno = EINTR;
3778
+ break;
3779
+ default:
3780
+ errno = error;
3781
+ }
3782
+ return -1;
3783
+ #else
3065
3784
  return read(fio_uuid2fd(uuid), buf, count);
3785
+ #endif
3066
3786
  (void)(udata);
3067
3787
  }
3068
3788
  static ssize_t fio_hooks_default_write(intptr_t uuid, void *udata,
3069
3789
  const void *buf, size_t count) {
3790
+ #ifdef __MINGW32__
3791
+ return send_ptr(fd_data(fio_uuid2fd(uuid)).socket_handle, buf, count, 0);
3792
+ #else
3070
3793
  return write(fio_uuid2fd(uuid), buf, count);
3794
+ #endif
3071
3795
  (void)(udata);
3072
3796
  }
3073
3797
 
@@ -3540,7 +4264,9 @@ static void __attribute__((destructor)) fio_lib_destroy(void) {
3540
4264
  }
3541
4265
 
3542
4266
  static void fio_mem_init(void);
4267
+ #ifndef __MINGW32__
3543
4268
  static void fio_cluster_init(void);
4269
+ #endif
3544
4270
  static void fio_pubsub_initialize(void);
3545
4271
  static void __attribute__((constructor)) fio_lib_init(void) {
3546
4272
  /* detect socket capacity - MUST be first...*/
@@ -3548,9 +4274,25 @@ static void __attribute__((constructor)) fio_lib_init(void) {
3548
4274
  {
3549
4275
  #ifdef _SC_OPEN_MAX
3550
4276
  capa = sysconf(_SC_OPEN_MAX);
4277
+ #elif defined(__MINGW32__)
4278
+ /** iodine/ruby specific */
4279
+ capa = 1024;
4280
+ HMODULE mh = GetModuleHandleA("ws2_32.dll");
4281
+ accept_ptr = GetProcAddress(mh, "accept");
4282
+ bind_ptr = GetProcAddress(mh, "bind");
4283
+ closesocket_ptr = GetProcAddress(mh, "closesocket");
4284
+ connect_ptr = GetProcAddress(mh, "connect");
4285
+ getsockopt_ptr = GetProcAddress(mh, "getsockopt");
4286
+ ioctlsocket_ptr = GetProcAddress(mh, "ioctlsocket");
4287
+ listen_ptr = GetProcAddress(mh, "listen");
4288
+ recv_ptr = GetProcAddress(mh, "recv");
4289
+ send_ptr = GetProcAddress(mh, "send");
4290
+ setsockopt_ptr = GetProcAddress(mh, "setsockopt");
4291
+ socket_ptr = GetProcAddress(mh, "socket");
3551
4292
  #elif defined(FOPEN_MAX)
3552
4293
  capa = FOPEN_MAX;
3553
4294
  #endif
4295
+ #ifndef __MINGW32__
3554
4296
  // try to maximize limits - collect max and set to max
3555
4297
  struct rlimit rlim = {.rlim_max = 0};
3556
4298
  if (getrlimit(RLIMIT_NOFILE, &rlim) == -1) {
@@ -3569,6 +4311,8 @@ static void __attribute__((constructor)) fio_lib_init(void) {
3569
4311
  if (capa > 1024) /* leave a slice of room */
3570
4312
  capa -= 16;
3571
4313
  }
4314
+ #endif
4315
+
3572
4316
  /* initialize memory allocator */
3573
4317
  fio_mem_init();
3574
4318
  /* initialize polling engine */
@@ -3586,6 +4330,16 @@ static void __attribute__((constructor)) fio_lib_init(void) {
3586
4330
  (capa * (sizeof(*fio_data->info)))),
3587
4331
  (sizeof(*fio_data->poll) + sizeof(*fio_data->info)),
3588
4332
  sizeof(*fio_data));
4333
+ #elif FIO_ENGINE_WSAPOLL
4334
+ FIO_LOG_INFO("facil.io " FIO_VERSION_STRING " capacity initialization:\n"
4335
+ "* Meximum open files %zu out of %zu\n"
4336
+ "* Allocating %zu bytes for state handling.\n"
4337
+ "* %zu bytes per connection + %zu for state handling.",
4338
+ capa, FOPEN_MAX,
4339
+ (sizeof(*fio_data) + (capa * (sizeof(*fio_data->poll))) +
4340
+ (capa * (sizeof(*fio_data->info)))),
4341
+ (sizeof(*fio_data->poll) + sizeof(*fio_data->info)),
4342
+ sizeof(*fio_data));
3589
4343
  #else
3590
4344
  FIO_LOG_INFO("facil.io " FIO_VERSION_STRING " capacity initialization:\n"
3591
4345
  "* Meximum open files %zu out of %zu\n"
@@ -3598,7 +4352,7 @@ static void __attribute__((constructor)) fio_lib_init(void) {
3598
4352
  #endif
3599
4353
  }
3600
4354
 
3601
- #if FIO_ENGINE_POLL
4355
+ #if FIO_ENGINE_POLL || FIO_ENGINE_WSAPOLL
3602
4356
  /* allocate and initialize main data structures by detected capacity */
3603
4357
  fio_data = fio_mmap(sizeof(*fio_data) + (capa * (sizeof(*fio_data->poll))) +
3604
4358
  (capa * (sizeof(*fio_data->info))));
@@ -3618,7 +4372,10 @@ static void __attribute__((constructor)) fio_lib_init(void) {
3618
4372
 
3619
4373
  for (ssize_t i = 0; i < capa; ++i) {
3620
4374
  fio_clear_fd(i, 0);
3621
- #if FIO_ENGINE_POLL
4375
+ #ifdef __MINGW32__
4376
+ fio_clear_handle(i);
4377
+ #endif
4378
+ #if FIO_ENGINE_POLL || FIO_ENGINE_WSAPOLL
3622
4379
  fio_data->poll[i].fd = -1;
3623
4380
  #endif
3624
4381
  }
@@ -3803,11 +4560,21 @@ static void fio_worker_cleanup(void) {
3803
4560
  }
3804
4561
  fio_defer_push_task(fio_cycle_unwind, NULL, NULL);
3805
4562
  fio_defer_perform();
4563
+ #ifdef __MINGW32__
4564
+ size_t end = fio_data->capa;
4565
+ while (0 < end && fio_data->poll[end-1].fd == (SOCKET)-1)
4566
+ --end;
4567
+ for (size_t i = 0; i <= fio_data->capa; ++i) {
4568
+ // ensure _all_ socket handles and fds are closed for good
4569
+ fio_force_close(fd2uuid(i));
4570
+ }
4571
+ #else
3806
4572
  for (size_t i = 0; i <= fio_data->max_protocol_fd; ++i) {
3807
4573
  if (fd_data(i).protocol || fd_data(i).open) {
3808
4574
  fio_force_close(fd2uuid(i));
3809
4575
  }
3810
4576
  }
4577
+ #endif
3811
4578
  fio_timer_clear_all();
3812
4579
  fio_defer_perform();
3813
4580
  if (!fio_data->is_worker) {
@@ -3917,23 +4684,31 @@ void fio_start FIO_IGNORE_MACRO(struct fio_start_args args) {
3917
4684
  fio_data->is_worker = 0;
3918
4685
 
3919
4686
  fio_state_callback_force(FIO_CALL_PRE_START);
4687
+ #if HAVE_OPENSSL
3920
4688
  FIO_LOG_INFO(
3921
4689
  "Server is running %u %s X %u %s with facil.io " FIO_VERSION_STRING
3922
4690
  " (%s)\n"
3923
- #if HAVE_OPENSSL
3924
4691
  "* Linked to %s\n"
3925
- #endif
3926
4692
  "* Detected capacity: %d open file limit\n"
3927
4693
  "* Root pid: %d\n"
3928
4694
  "* Press ^C to stop\n",
3929
4695
  fio_data->workers, fio_data->workers > 1 ? "workers" : "worker",
3930
4696
  fio_data->threads, fio_data->threads > 1 ? "threads" : "thread",
3931
4697
  fio_engine(),
3932
- #if HAVE_OPENSSL
3933
4698
  OpenSSL_version(0),
3934
- #endif
3935
4699
  fio_data->capa, (int)fio_data->parent);
3936
-
4700
+ #else
4701
+ FIO_LOG_INFO(
4702
+ "Server is running %u %s X %u %s with facil.io " FIO_VERSION_STRING
4703
+ " (%s)\n"
4704
+ "* Detected capacity: %d open file limit\n"
4705
+ "* Root pid: %d\n"
4706
+ "* Press ^C to stop\n",
4707
+ fio_data->workers, fio_data->workers > 1 ? "workers" : "worker",
4708
+ fio_data->threads, fio_data->threads > 1 ? "threads" : "thread",
4709
+ fio_engine(),
4710
+ fio_data->capa, (int)fio_data->parent);
4711
+ #endif
3937
4712
  if (args.workers > 1) {
3938
4713
  for (int i = 0; i < args.workers && fio_data->active; ++i) {
3939
4714
  fio_sentinel_task(NULL, NULL);
@@ -4359,7 +5134,7 @@ Section Start Marker
4359
5134
 
4360
5135
 
4361
5136
  ***************************************************************************** */
4362
-
5137
+ #ifndef __MINGW32__
4363
5138
  /**
4364
5139
  * Returns the number of registered ALPN protocol names.
4365
5140
  *
@@ -4433,6 +5208,7 @@ void FIO_TLS_WEAK fio_tls_destroy(void *tls) {
4433
5208
  return;
4434
5209
  (void)tls;
4435
5210
  }
5211
+ #endif
4436
5212
 
4437
5213
  /* *****************************************************************************
4438
5214
  Section Start Marker
@@ -4490,8 +5266,10 @@ typedef struct {
4490
5266
 
4491
5267
  static void fio_listen_cleanup_task(void *pr_) {
4492
5268
  fio_listen_protocol_s *pr = pr_;
5269
+ #ifndef __MINGW32__
4493
5270
  if (pr->tls)
4494
5271
  fio_tls_destroy(pr->tls);
5272
+ #endif
4495
5273
  if (pr->on_finish) {
4496
5274
  pr->on_finish(pr->uuid, pr->udata);
4497
5275
  }
@@ -4532,6 +5310,7 @@ static void fio_listen_on_data(intptr_t uuid, fio_protocol_s *pr_) {
4532
5310
  }
4533
5311
  }
4534
5312
 
5313
+ #ifndef __MINGW32__
4535
5314
  static void fio_listen_on_data_tls(intptr_t uuid, fio_protocol_s *pr_) {
4536
5315
  fio_listen_protocol_s *pr = (fio_listen_protocol_s *)pr_;
4537
5316
  for (int i = 0; i < 4; ++i) {
@@ -4552,6 +5331,7 @@ static void fio_listen_on_data_tls_alpn(intptr_t uuid, fio_protocol_s *pr_) {
4552
5331
  fio_tls_accept(client, pr->tls, pr->udata);
4553
5332
  }
4554
5333
  }
5334
+ #endif
4555
5335
 
4556
5336
  /* stub for editor - unused */
4557
5337
  void fio_listen____(void);
@@ -4562,11 +5342,19 @@ void fio_listen____(void);
4562
5342
  */
4563
5343
  intptr_t fio_listen FIO_IGNORE_MACRO(struct fio_listen_args args) {
4564
5344
  // ...
5345
+ #ifdef __MINGW32__
5346
+ if ((!args.on_open) ||
5347
+ (!args.address && !args.port)) {
5348
+ errno = EINVAL;
5349
+ goto error;
5350
+ }
5351
+ #else
4565
5352
  if ((!args.on_open && (!args.tls || !fio_tls_alpn_count(args.tls))) ||
4566
5353
  (!args.address && !args.port)) {
4567
5354
  errno = EINVAL;
4568
5355
  goto error;
4569
5356
  }
5357
+ #endif
4570
5358
 
4571
5359
  size_t addr_len = 0;
4572
5360
  size_t port_len = 0;
@@ -4592,19 +5380,23 @@ intptr_t fio_listen FIO_IGNORE_MACRO(struct fio_listen_args args) {
4592
5380
  fio_listen_protocol_s *pr = malloc(sizeof(*pr) + addr_len + port_len +
4593
5381
  ((addr_len + port_len) ? 2 : 0));
4594
5382
  FIO_ASSERT_ALLOC(pr);
4595
-
5383
+ #ifndef __MINGW32__
4596
5384
  if (args.tls)
4597
5385
  fio_tls_dup(args.tls);
4598
-
5386
+ #endif
4599
5387
  *pr = (fio_listen_protocol_s){
4600
5388
  .pr =
4601
5389
  {
4602
5390
  .on_close = fio_listen_on_close,
4603
5391
  .ping = mock_ping_eternal,
5392
+ #ifdef __MINGW32__
5393
+ .on_data = fio_listen_on_data,
5394
+ #else
4604
5395
  .on_data = (args.tls ? (fio_tls_alpn_count(args.tls)
4605
5396
  ? fio_listen_on_data_tls_alpn
4606
5397
  : fio_listen_on_data_tls)
4607
5398
  : fio_listen_on_data),
5399
+ #endif
4608
5400
  },
4609
5401
  .uuid = uuid,
4610
5402
  .udata = args.udata,
@@ -4696,8 +5488,10 @@ static void fio_connect_on_close(intptr_t uuid, fio_protocol_s *pr_) {
4696
5488
  fio_connect_protocol_s *pr = (fio_connect_protocol_s *)pr_;
4697
5489
  if (pr->on_fail)
4698
5490
  pr->on_fail(uuid, pr->udata);
5491
+ #ifndef __MINGW32__
4699
5492
  if (pr->tls)
4700
5493
  fio_tls_destroy(pr->tls);
5494
+ #endif
4701
5495
  fio_free(pr);
4702
5496
  (void)uuid;
4703
5497
  }
@@ -4713,6 +5507,7 @@ static void fio_connect_on_ready(intptr_t uuid, fio_protocol_s *pr_) {
4713
5507
  (void)uuid;
4714
5508
  }
4715
5509
 
5510
+ #ifndef __MINGW32__
4716
5511
  static void fio_connect_on_ready_tls(intptr_t uuid, fio_protocol_s *pr_) {
4717
5512
  fio_connect_protocol_s *pr = (fio_connect_protocol_s *)pr_;
4718
5513
  if (pr->pr.on_ready == mock_on_ev)
@@ -4735,16 +5530,25 @@ static void fio_connect_on_ready_tls_alpn(intptr_t uuid, fio_protocol_s *pr_) {
4735
5530
  fio_poll_add(fio_uuid2fd(uuid));
4736
5531
  (void)uuid;
4737
5532
  }
5533
+ #endif
4738
5534
 
4739
5535
  /* stub for sublime text function navigation */
4740
5536
  intptr_t fio_connect___(struct fio_connect_args args);
4741
5537
 
4742
5538
  intptr_t fio_connect FIO_IGNORE_MACRO(struct fio_connect_args args) {
5539
+ #ifdef __MINGW32__
5540
+ if ((!args.on_connect) ||
5541
+ (!args.address && !args.port)) {
5542
+ errno = EINVAL;
5543
+ goto error;
5544
+ }
5545
+ #else
4743
5546
  if ((!args.on_connect && (!args.tls || !fio_tls_alpn_count(args.tls))) ||
4744
5547
  (!args.address && !args.port)) {
4745
5548
  errno = EINVAL;
4746
5549
  goto error;
4747
5550
  }
5551
+ #endif
4748
5552
  const intptr_t uuid = fio_socket(args.address, args.port, 0);
4749
5553
  if (uuid == -1)
4750
5554
  goto error;
@@ -4752,17 +5556,21 @@ intptr_t fio_connect FIO_IGNORE_MACRO(struct fio_connect_args args) {
4752
5556
 
4753
5557
  fio_connect_protocol_s *pr = fio_malloc(sizeof(*pr));
4754
5558
  FIO_ASSERT_ALLOC(pr);
4755
-
5559
+ #ifndef __MINGW32__
4756
5560
  if (args.tls)
4757
5561
  fio_tls_dup(args.tls);
4758
-
5562
+ #endif
4759
5563
  *pr = (fio_connect_protocol_s){
4760
5564
  .pr =
4761
5565
  {
5566
+ #ifdef __MINGW32__
5567
+ .on_ready = fio_connect_on_ready,
5568
+ #else
4762
5569
  .on_ready = (args.tls ? (fio_tls_alpn_count(args.tls)
4763
5570
  ? fio_connect_on_ready_tls_alpn
4764
5571
  : fio_connect_on_ready_tls)
4765
5572
  : fio_connect_on_ready),
5573
+ #endif
4766
5574
  .on_close = fio_connect_on_close,
4767
5575
  },
4768
5576
  .uuid = uuid,
@@ -5136,7 +5944,7 @@ struct subscription_s {
5136
5944
  fio_lock_i unsubscribed;
5137
5945
  };
5138
5946
 
5139
- /* Use `malloc` / `free`, because channles might have a long life. */
5947
+ /* Use `malloc` / `free`, because channels might have a long life. */
5140
5948
 
5141
5949
  /** Used internally by the Set object to create a new channel. */
5142
5950
  static channel_s *fio_channel_copy(channel_s *src) {
@@ -5378,10 +6186,27 @@ static inline channel_s *fio_filter_dup_lock_internal(channel_s *ch,
5378
6186
  fio_collection_s *c) {
5379
6187
  fio_lock(&c->lock);
5380
6188
  ch = fio_ch_set_insert(&c->channels, hashed, ch);
5381
- fio_channel_dup(ch);
5382
- fio_lock(&ch->lock);
5383
6189
  fio_unlock(&c->lock);
5384
- return ch;
6190
+ /* respect locking order to prevent deadlock with fio_unsubscribe */
6191
+ fio_lock(&ch->lock);
6192
+ fio_lock(&c->lock);
6193
+ /* check again if channels is still in collection */
6194
+ channel_s *found_ch = fio_ch_set_find(&c->channels, hashed, ch);
6195
+ if (found_ch == ch) {
6196
+ /* channel is still in collection:
6197
+ * unlock the collection
6198
+ * increase reference counter
6199
+ * leave the channel locked and return it */
6200
+ fio_unlock(&c->lock);
6201
+ fio_channel_dup(ch);
6202
+ return ch;
6203
+ } else {
6204
+ /* channel could not be found, it has been removed from the collection */
6205
+ /* insert it again */
6206
+ fio_unlock(&c->lock);
6207
+ fio_unlock(&ch->lock);
6208
+ return fio_filter_dup_lock_internal(ch, hashed, c);
6209
+ }
5385
6210
  }
5386
6211
 
5387
6212
  /** Creates / finds a filter channel, adds a reference count and locks it. */
@@ -5391,6 +6216,7 @@ static channel_s *fio_filter_dup_lock(uint32_t filter) {
5391
6216
  .name_len = (sizeof(filter)),
5392
6217
  .parent = &fio_postoffice.filters,
5393
6218
  .ref = 8, /* avoid freeing stack memory */
6219
+ .lock = FIO_LOCK_INIT,
5394
6220
  };
5395
6221
  return fio_filter_dup_lock_internal(&ch, filter, &fio_postoffice.filters);
5396
6222
  }
@@ -5402,6 +6228,7 @@ static channel_s *fio_channel_dup_lock(fio_str_info_s name) {
5402
6228
  .name_len = name.len,
5403
6229
  .parent = &fio_postoffice.pubsub,
5404
6230
  .ref = 8, /* avoid freeing stack memory */
6231
+ .lock = FIO_LOCK_INIT,
5405
6232
  };
5406
6233
  uint64_t hashed_name = FIO_HASH_FN(
5407
6234
  name.data, name.len, &fio_postoffice.pubsub, &fio_postoffice.pubsub);
@@ -5422,6 +6249,7 @@ static channel_s *fio_channel_match_dup_lock(fio_str_info_s name,
5422
6249
  .parent = &fio_postoffice.patterns,
5423
6250
  .match = match,
5424
6251
  .ref = 8, /* avoid freeing stack memory */
6252
+ .lock = FIO_LOCK_INIT,
5425
6253
  };
5426
6254
  uint64_t hashed_name = FIO_HASH_FN(
5427
6255
  name.data, name.len, &fio_postoffice.pubsub, &fio_postoffice.pubsub);
@@ -5448,7 +6276,7 @@ static inline void fio_subscription_free(subscription_s *s) {
5448
6276
  /** SublimeText 3 marker */
5449
6277
  subscription_s *fio_subscribe___(subscribe_args_s args);
5450
6278
 
5451
- /** Subscribes to a filter, pub/sub channle or patten */
6279
+ /** Subscribes to a filter, pub/sub channel or pattern */
5452
6280
  subscription_s *fio_subscribe FIO_IGNORE_MACRO(subscribe_args_s args) {
5453
6281
  if (!args.on_message)
5454
6282
  goto error;
@@ -5480,7 +6308,7 @@ error:
5480
6308
  return NULL;
5481
6309
  }
5482
6310
 
5483
- /** Unsubscribes from a filter, pub/sub channle or patten */
6311
+ /** Unsubscribes from a filter, pub/sub channel or pattern */
5484
6312
  void fio_unsubscribe(subscription_s *s) {
5485
6313
  if (!s)
5486
6314
  return;
@@ -5531,11 +6359,11 @@ fio_str_info_s fio_subscription_channel(subscription_s *subscription) {
5531
6359
  /* *****************************************************************************
5532
6360
  Engine handling and Management
5533
6361
  ***************************************************************************** */
5534
-
6362
+ #ifndef __MINGW32__
5535
6363
  /* implemented later, informs root process about pub/sub subscriptions */
5536
6364
  static inline void fio_cluster_inform_root_about_channel(channel_s *ch,
5537
6365
  int add);
5538
-
6366
+ #endif
5539
6367
  /* runs in lock(!) let'm all know */
5540
6368
  static void fio_pubsub_on_channel_create(channel_s *ch) {
5541
6369
  fio_lock(&fio_postoffice.engines.lock);
@@ -5547,7 +6375,9 @@ static void fio_pubsub_on_channel_create(channel_s *ch) {
5547
6375
  ch->match);
5548
6376
  }
5549
6377
  fio_unlock(&fio_postoffice.engines.lock);
6378
+ #ifndef __MINGW32__
5550
6379
  fio_cluster_inform_root_about_channel(ch, 1);
6380
+ #endif
5551
6381
  }
5552
6382
 
5553
6383
  /* runs in lock(!) let'm all know */
@@ -5561,7 +6391,9 @@ static void fio_pubsub_on_channel_destroy(channel_s *ch) {
5561
6391
  ch->match);
5562
6392
  }
5563
6393
  fio_unlock(&fio_postoffice.engines.lock);
6394
+ #ifndef __MINGW32__
5564
6395
  fio_cluster_inform_root_about_channel(ch, 0);
6396
+ #endif
5565
6397
  }
5566
6398
 
5567
6399
  /**
@@ -5853,6 +6685,7 @@ static struct cluster_data_s {
5853
6685
  } cluster_data = {.clients = FIO_LS_INIT(cluster_data.clients),
5854
6686
  .lock = FIO_LOCK_INIT};
5855
6687
 
6688
+ #ifndef __MINGW32__
5856
6689
  static void fio_cluster_data_cleanup(int delete_file) {
5857
6690
  if (delete_file && cluster_data.name[0]) {
5858
6691
  #if DEBUG
@@ -5913,7 +6746,7 @@ static void fio_cluster_init(void) {
5913
6746
  /* add cleanup callback */
5914
6747
  fio_state_callback_add(FIO_CALL_AT_EXIT, fio_cluster_cleanup, NULL);
5915
6748
  }
5916
-
6749
+ #endif
5917
6750
  /* *****************************************************************************
5918
6751
  * Cluster Protocol callbacks
5919
6752
  **************************************************************************** */
@@ -6043,7 +6876,9 @@ static void fio_cluster_on_close(intptr_t uuid, fio_protocol_s *pr_) {
6043
6876
  FIO_LOG_FATAL("(%d) Parent Process crash detected!", (int)getpid());
6044
6877
  fio_state_callback_force(FIO_CALL_ON_PARENT_CRUSH);
6045
6878
  fio_state_callback_clear(FIO_CALL_ON_PARENT_CRUSH);
6879
+ #ifndef __MINGW32__
6046
6880
  fio_cluster_data_cleanup(1);
6881
+ #endif
6047
6882
  kill(getpid(), SIGINT);
6048
6883
  }
6049
6884
  }
@@ -6082,7 +6917,7 @@ fio_cluster_protocol_alloc(intptr_t uuid,
6082
6917
  /* *****************************************************************************
6083
6918
  * Master (server) IPC Connections
6084
6919
  **************************************************************************** */
6085
-
6920
+ #ifndef __MINGW32__
6086
6921
  static void fio_cluster_server_sender(void *m_, intptr_t avoid_uuid) {
6087
6922
  fio_msg_internal_s *m = m_;
6088
6923
  fio_lock(&cluster_data.lock);
@@ -6230,16 +7065,20 @@ static void fio_listen2cluster(void *ignore) {
6230
7065
  .ping = mock_ping_eternal,
6231
7066
  .on_close = fio_cluster_listen_on_close,
6232
7067
  };
7068
+ #ifdef __MINGW32__
7069
+ FIO_LOG_DEBUG("(%d) Listening to cluster on above tcp socket.", (int)getpid());
7070
+ #else
6233
7071
  FIO_LOG_DEBUG("(%d) Listening to cluster: %s", (int)getpid(),
6234
7072
  cluster_data.name);
7073
+ #endif
6235
7074
  fio_attach(cluster_data.uuid, p);
6236
7075
  (void)ignore;
6237
7076
  }
6238
-
7077
+ #endif
6239
7078
  /* *****************************************************************************
6240
7079
  * Worker (client) IPC connections
6241
7080
  **************************************************************************** */
6242
-
7081
+ #ifndef __MINGW32__
6243
7082
  static void fio_cluster_client_handler(struct cluster_pr_s *pr) {
6244
7083
  /* what to do? */
6245
7084
  switch ((fio_cluster_message_type_e)pr->type) {
@@ -6349,11 +7188,11 @@ static void fio_send2cluster(fio_msg_internal_s *m) {
6349
7188
  fio_cluster_client_sender(fio_msg_internal_dup(m), -1);
6350
7189
  }
6351
7190
  }
6352
-
7191
+ #endif
6353
7192
  /* *****************************************************************************
6354
- * Propegation
7193
+ * Propagation
6355
7194
  **************************************************************************** */
6356
-
7195
+ #ifndef __MINGW32__
6357
7196
  static inline void fio_cluster_inform_root_about_channel(channel_s *ch,
6358
7197
  int add) {
6359
7198
  if (!fio_data->is_worker || fio_data->workers == 1 || !cluster_data.uuid ||
@@ -6371,7 +7210,7 @@ static inline void fio_cluster_inform_root_about_channel(channel_s *ch,
6371
7210
  #endif
6372
7211
  char buf[8] = {0};
6373
7212
  if (ch->match) {
6374
- fio_u2str64(buf, (uint64_t)ch->match);
7213
+ fio_u2str64(buf, (uintptr_t)ch->match);
6375
7214
  msg.data = buf;
6376
7215
  msg.len = sizeof(ch->match);
6377
7216
  }
@@ -6386,14 +7225,16 @@ static inline void fio_cluster_inform_root_about_channel(channel_s *ch,
6386
7225
  ch_name, msg, 0, 1),
6387
7226
  -1);
6388
7227
  }
6389
-
7228
+ #endif
6390
7229
  /* *****************************************************************************
6391
7230
  * Initialization
6392
7231
  **************************************************************************** */
6393
-
7232
+ #ifndef __MINGW32__
6394
7233
  static void fio_accept_after_fork(void *ignore) {
6395
7234
  /* prevent `accept` backlog in parent */
7235
+ #ifndef __MINGW32__
6396
7236
  fio_cluster_listen_accept(cluster_data.uuid, NULL);
7237
+ #endif
6397
7238
  (void)ignore;
6398
7239
  }
6399
7240
 
@@ -6448,14 +7289,17 @@ static void fio_cluster_at_exit(void *ignore) {
6448
7289
  fio_defer_perform();
6449
7290
  (void)ignore;
6450
7291
  }
7292
+ #endif
6451
7293
 
6452
7294
  static void fio_pubsub_initialize(void) {
7295
+ #ifndef __MINGW32__
6453
7296
  fio_cluster_init();
6454
7297
  fio_state_callback_add(FIO_CALL_PRE_START, fio_listen2cluster, NULL);
6455
7298
  fio_state_callback_add(FIO_CALL_IN_MASTER, fio_accept_after_fork, NULL);
6456
7299
  fio_state_callback_add(FIO_CALL_IN_CHILD, fio_connect2cluster, NULL);
6457
7300
  fio_state_callback_add(FIO_CALL_ON_FINISH, fio_cluster_cleanup, NULL);
6458
7301
  fio_state_callback_add(FIO_CALL_AT_EXIT, fio_cluster_at_exit, NULL);
7302
+ #endif
6459
7303
  }
6460
7304
 
6461
7305
  /* *****************************************************************************
@@ -6506,11 +7350,13 @@ static void fio_cluster_signal_children(void) {
6506
7350
  fio_stop();
6507
7351
  return;
6508
7352
  }
7353
+ #ifndef __MINGW32__
6509
7354
  fio_cluster_server_sender(fio_msg_internal_create(0, FIO_CLUSTER_MSG_SHUTDOWN,
6510
7355
  (fio_str_info_s){.len = 0},
6511
7356
  (fio_str_info_s){.len = 0},
6512
7357
  0, 1),
6513
7358
  -1);
7359
+ #endif
6514
7360
  }
6515
7361
 
6516
7362
  /* Sublime Text marker */
@@ -6546,7 +7392,9 @@ void fio_publish FIO_IGNORE_MACRO(fio_publish_args_s args) {
6546
7392
  args.filter,
6547
7393
  (args.is_json ? FIO_CLUSTER_MSG_JSON : FIO_CLUSTER_MSG_FORWARD),
6548
7394
  args.channel, args.message, args.is_json, 1);
7395
+ #ifndef __MINGW32__
6549
7396
  fio_send2cluster(m);
7397
+ #endif
6550
7398
  fio_publish2process(m);
6551
7399
  break;
6552
7400
  case 2UL: // ((uintptr_t)FIO_PUBSUB_PROCESS):
@@ -6559,7 +7407,9 @@ void fio_publish FIO_IGNORE_MACRO(fio_publish_args_s args) {
6559
7407
  args.filter,
6560
7408
  (args.is_json ? FIO_CLUSTER_MSG_JSON : FIO_CLUSTER_MSG_FORWARD),
6561
7409
  args.channel, args.message, args.is_json, 1);
7410
+ #ifndef __MINGW32__
6562
7411
  fio_send2cluster(m);
7412
+ #endif
6563
7413
  fio_msg_internal_free(m);
6564
7414
  m = NULL;
6565
7415
  break;
@@ -6568,11 +7418,15 @@ void fio_publish FIO_IGNORE_MACRO(fio_publish_args_s args) {
6568
7418
  args.filter,
6569
7419
  (args.is_json ? FIO_CLUSTER_MSG_ROOT_JSON : FIO_CLUSTER_MSG_ROOT),
6570
7420
  args.channel, args.message, args.is_json, 1);
7421
+ #ifdef __MINGW32__
7422
+ fio_publish2process(m);
7423
+ #else
6571
7424
  if (fio_data->is_worker == 0 || fio_data->workers == 1) {
6572
7425
  fio_publish2process(m);
6573
7426
  } else {
6574
7427
  fio_cluster_client_sender(m, -1);
6575
7428
  }
7429
+ #endif
6576
7430
  break;
6577
7431
  default:
6578
7432
  if (args.filter != 0) {
@@ -7072,15 +7926,29 @@ static inline arena_s *arena_lock(arena_s *preffered) {
7072
7926
  } while (1);
7073
7927
  }
7074
7928
 
7075
- static __thread arena_s *arena_last_used;
7929
+ static pthread_key_t arena_last_used_key;
7930
+ static pthread_once_t arena_last_used_once;
7931
+ static void init_arena_last_used_key(void) {
7932
+ pthread_key_create(&arena_last_used_key, NULL);
7933
+ }
7076
7934
 
7077
- static void arena_enter(void) { arena_last_used = arena_lock(arena_last_used); }
7935
+ static void arena_enter(void) {
7936
+ pthread_once(&arena_last_used_once, init_arena_last_used_key);
7937
+ arena_s *arena_last_used = pthread_getspecific(arena_last_used_key);
7938
+ arena_last_used = arena_lock(arena_last_used);
7939
+ pthread_setspecific(arena_last_used_key, arena_last_used);
7940
+ }
7078
7941
 
7079
- static inline void arena_exit(void) { fio_unlock(&arena_last_used->lock); }
7942
+ static inline void arena_exit(void) {
7943
+ pthread_once(&arena_last_used_once, init_arena_last_used_key);
7944
+ arena_s *arena_last_used = pthread_getspecific(arena_last_used_key);
7945
+ fio_unlock(&arena_last_used->lock);
7946
+ }
7080
7947
 
7081
7948
  /** Clears any memory locks, in case of a system call to `fork`. */
7082
7949
  void fio_malloc_after_fork(void) {
7083
- arena_last_used = NULL;
7950
+ pthread_once(&arena_last_used_once, init_arena_last_used_key);
7951
+ pthread_setspecific(arena_last_used_key, NULL);
7084
7952
  if (!arenas) {
7085
7953
  return;
7086
7954
  }
@@ -7184,6 +8052,8 @@ static inline block_s *block_new(void) {
7184
8052
 
7185
8053
  /* allocates memory from within a block - called within an arena's lock */
7186
8054
  static inline void *block_slice(uint16_t units) {
8055
+ pthread_once(&arena_last_used_once, init_arena_last_used_key);
8056
+ arena_s *arena_last_used = pthread_getspecific(arena_last_used_key);
7187
8057
  block_s *blk = arena_last_used->block;
7188
8058
  if (!blk) {
7189
8059
  /* arena is empty */
@@ -7295,6 +8165,10 @@ static void fio_mem_init(void) {
7295
8165
  ssize_t cpu_count = 0;
7296
8166
  #ifdef _SC_NPROCESSORS_ONLN
7297
8167
  cpu_count = sysconf(_SC_NPROCESSORS_ONLN);
8168
+ #elif defined(__MINGW32__)
8169
+ SYSTEM_INFO sys_info;
8170
+ GetSystemInfo(&sys_info);
8171
+ cpu_count = sys_info.dwNumberOfProcessors;
7298
8172
  #else
7299
8173
  #warning Dynamic CPU core count is unavailable - assuming 8 cores for memory allocation pools.
7300
8174
  #endif
@@ -7304,7 +8178,9 @@ static void fio_mem_init(void) {
7304
8178
  arenas = big_alloc(sizeof(*arenas) * cpu_count);
7305
8179
  FIO_ASSERT_ALLOC(arenas);
7306
8180
  block_free(block_new());
8181
+ #ifndef __MINGW32__
7307
8182
  pthread_atfork(NULL, NULL, fio_malloc_after_fork);
8183
+ #endif
7308
8184
  }
7309
8185
 
7310
8186
  static void fio_mem_destroy(void) {
@@ -7457,13 +8333,35 @@ void *realloc(void *ptr, size_t new_size) { return fio_realloc(ptr, new_size); }
7457
8333
 
7458
8334
  ***************************************************************************** */
7459
8335
 
8336
+ static pthread_key_t s_key;
8337
+ static pthread_key_t c_key;
8338
+ static pthread_once_t s_c_once = PTHREAD_ONCE_INIT;
8339
+ static void init_s_c_key(void) {
8340
+ pthread_key_create(&s_key, free);
8341
+ pthread_key_create(&c_key, free);
8342
+ }
8343
+ static void init_s_c_ptr(void) {
8344
+ uint64_t *s = malloc(sizeof(uint64_t) * 2);
8345
+ FIO_ASSERT_ALLOC(s);
8346
+ memset(s, 0, sizeof(uint64_t) * 2);
8347
+ uint16_t *c = malloc(sizeof(uint16_t));
8348
+ FIO_ASSERT_ALLOC(c);
8349
+ memset(c, 0, sizeof(uint16_t));
8350
+ pthread_setspecific(s_key, s);
8351
+ pthread_setspecific(c_key, c);
8352
+ }
7460
8353
  /* tested for randomness using code from: http://xoshiro.di.unimi.it/hwd.php */
7461
8354
  uint64_t fio_rand64(void) {
7462
8355
  /* modeled after xoroshiro128+, by David Blackman and Sebastiano Vigna */
7463
- static __thread uint64_t s[2]; /* random state */
7464
- static __thread uint16_t c; /* seed counter */
8356
+ pthread_once(&s_c_once, init_s_c_key);
8357
+ uint64_t *s = (uint64_t *)pthread_getspecific(s_key); /* random state */
8358
+ if (!s) {
8359
+ init_s_c_ptr();
8360
+ s = (uint64_t *)pthread_getspecific(s_key);
8361
+ }
8362
+ uint16_t *c = (uint16_t *)pthread_getspecific(c_key); /* seed counter */
7465
8363
  const uint64_t P[] = {0x37701261ED6C16C7ULL, 0x764DBBB75F3B3E0DULL};
7466
- if (c++ == 0) {
8364
+ if (*c++ == 0) {
7467
8365
  /* re-seed state every 65,536 requests */
7468
8366
  #ifdef RUSAGE_SELF
7469
8367
  struct rusage rusage;
@@ -9204,6 +10102,8 @@ FIO_FUNC void fio_malloc_test(void) {
9204
10102
  mem = fio_realloc(mem, 1);
9205
10103
  FIO_ASSERT(mem, "fio_realloc failed!\n");
9206
10104
  FIO_ASSERT(mem[0] == 'a', "fio_realloc memory wasn't copied!\n");
10105
+ pthread_once(&arena_last_used_once, init_arena_last_used_key);
10106
+ arena_s *arena_last_used = pthread_getspecific(arena_last_used_key);
9207
10107
  FIO_ASSERT(arena_last_used, "arena_last_used wasn't initialized!\n");
9208
10108
  fio_free(mem);
9209
10109
  block_s *b = arena_last_used->block;
@@ -9452,6 +10352,12 @@ FIO_FUNC void fio_socket_test(void) {
9452
10352
 
9453
10353
  fprintf(stderr, "=== Testing facil.io listening socket creation (partial "
9454
10354
  "testing only).\n");
10355
+ #ifdef __MINGW32__
10356
+ fprintf(stderr, "* testing on TCP/IP port 8765\n");
10357
+ intptr_t uuid;
10358
+ intptr_t client1;
10359
+ intptr_t client2;
10360
+ #else
9455
10361
  fprintf(stderr, "* testing on TCP/IP port 8765 and Unix socket: %s\n",
9456
10362
  fio_str_data(&sock_name));
9457
10363
  intptr_t uuid = fio_socket(fio_str_data(&sock_name), NULL, 1);
@@ -9508,6 +10414,7 @@ FIO_FUNC void fio_socket_test(void) {
9508
10414
  unlink(fio_str_data(&sock_name));
9509
10415
  /* free unix socket name */
9510
10416
  fio_str_free(&sock_name);
10417
+ #endif
9511
10418
 
9512
10419
  uuid = fio_socket(NULL, "8765", 1);
9513
10420
  FIO_ASSERT(uuid != -1, "Failed to open TCP/IP socket on port 8765");
@@ -10496,7 +11403,7 @@ FIO_FUNC void fio_base64_test(void) {
10496
11403
  fprintf(stderr,
10497
11404
  ":\n--- fio Base64 Test FAILED!\nstring: %s\nlength: %lu\n "
10498
11405
  "expected: %s\ngot: %s\n\n",
10499
- sets[i].str, strlen(sets[i].str), sets[i].base64, buffer);
11406
+ sets[i].str, (u_long)strlen(sets[i].str), sets[i].base64, buffer);
10500
11407
  FIO_ASSERT(0, "Base64 failure.");
10501
11408
  }
10502
11409
  i++;
@@ -10569,7 +11476,45 @@ FIO_FUNC void fio_test_random(void) {
10569
11476
  /* *****************************************************************************
10570
11477
  Poll (not kqueue or epoll) tests
10571
11478
  ***************************************************************************** */
10572
- #if FIO_ENGINE_POLL
11479
+ #if FIO_ENGINE_POLL || FIO_ENGINE_WSAPOLL
11480
+ #ifdef __MINGW32__
11481
+ FIO_FUNC void fio_poll_test(void) {
11482
+ fprintf(stderr, "=== Testing poll add / remove fd\n");
11483
+ fio_poll_add(5);
11484
+ FIO_ASSERT(fio_data->poll[5].fd == 5, "fio_poll_add didn't set used fd data");
11485
+ FIO_ASSERT(fio_data->poll[5].events ==
11486
+ (FIO_POLL_READ_EVENTS | FIO_POLL_WRITE_EVENTS),
11487
+ "fio_poll_add didn't set used fd flags");
11488
+ fio_poll_add(7);
11489
+ FIO_ASSERT(fio_data->poll[6].fd == INVALID_SOCKET,
11490
+ "fio_poll_add didn't reset unused fd data %d",
11491
+ fio_data->poll[6].fd);
11492
+ fio_poll_add(6);
11493
+ fio_poll_remove_fd(6);
11494
+ FIO_ASSERT(fio_data->poll[6].fd == INVALID_SOCKET,
11495
+ "fio_poll_remove_fd didn't reset unused fd data");
11496
+ FIO_ASSERT(fio_data->poll[6].events == 0,
11497
+ "fio_poll_remove_fd didn't reset unused fd flags");
11498
+ fio_poll_remove_read(7);
11499
+ FIO_ASSERT(fio_data->poll[7].events == (FIO_POLL_WRITE_EVENTS),
11500
+ "fio_poll_remove_read didn't remove read flags");
11501
+ fio_poll_add_read(7);
11502
+ fio_poll_remove_write(7);
11503
+ FIO_ASSERT(fio_data->poll[7].events == (FIO_POLL_READ_EVENTS),
11504
+ "fio_poll_remove_write didn't remove read flags");
11505
+ fio_poll_add_write(7);
11506
+ fio_poll_remove_read(7);
11507
+ FIO_ASSERT(fio_data->poll[7].events == (FIO_POLL_WRITE_EVENTS),
11508
+ "fio_poll_add_write didn't add the write flag?");
11509
+ fio_poll_remove_write(7);
11510
+ FIO_ASSERT(fio_data->poll[7].fd == INVALID_SOCKET,
11511
+ "fio_poll_remove (both) didn't reset unused fd data");
11512
+ FIO_ASSERT(fio_data->poll[7].events == 0,
11513
+ "fio_poll_remove (both) didn't reset unused fd flags");
11514
+ fio_poll_remove_fd(5);
11515
+ fprintf(stderr, "\n* passed.\n");
11516
+ }
11517
+ #else
10573
11518
  FIO_FUNC void fio_poll_test(void) {
10574
11519
  fprintf(stderr, "=== Testing poll add / remove fd\n");
10575
11520
  fio_poll_add(5);
@@ -10606,6 +11551,7 @@ FIO_FUNC void fio_poll_test(void) {
10606
11551
  fio_poll_remove_fd(5);
10607
11552
  fprintf(stderr, "\n* passed.\n");
10608
11553
  }
11554
+ #endif
10609
11555
  #else
10610
11556
  #define fio_poll_test()
10611
11557
  #endif
@@ -10831,7 +11777,7 @@ FIO_FUNC void fio_atol_test(void) {
10831
11777
  __asm__ volatile("" ::: "memory");
10832
11778
  }
10833
11779
  end = clock();
10834
- fprintf(stderr, "fio_atol base 10 (%ld): %zd CPU cycles\n", result,
11780
+ fprintf(stderr, "fio_atol base 10 (%ld): %zd CPU cycles\n", (long int)result,
10835
11781
  end - start);
10836
11782
 
10837
11783
  result = 0;
@@ -10842,7 +11788,7 @@ FIO_FUNC void fio_atol_test(void) {
10842
11788
  __asm__ volatile("" ::: "memory");
10843
11789
  }
10844
11790
  end = clock();
10845
- fprintf(stderr, "native strtol base 10 (%ld): %zd CPU cycles\n", result,
11791
+ fprintf(stderr, "native strtol base 10 (%ld): %zd CPU cycles\n", (long int)result,
10846
11792
  end - start);
10847
11793
 
10848
11794
  result = 0;
@@ -10854,7 +11800,7 @@ FIO_FUNC void fio_atol_test(void) {
10854
11800
  __asm__ volatile("" ::: "memory");
10855
11801
  }
10856
11802
  end = clock();
10857
- fprintf(stderr, "fio_atol base 16 (%ld): %zd CPU cycles\n", result,
11803
+ fprintf(stderr, "fio_atol base 16 (%ld): %zd CPU cycles\n", (long int)result,
10858
11804
  end - start);
10859
11805
 
10860
11806
  result = 0;
@@ -10865,7 +11811,7 @@ FIO_FUNC void fio_atol_test(void) {
10865
11811
  __asm__ volatile("" ::: "memory");
10866
11812
  }
10867
11813
  end = clock();
10868
- fprintf(stderr, "native strtol base 16 (%ld): %zd CPU cycles%s\n", result,
11814
+ fprintf(stderr, "native strtol base 16 (%ld): %zd CPU cycles%s\n", (long int)result,
10869
11815
  end - start, (result != expect ? " (!?stdlib overflow?!)" : ""));
10870
11816
 
10871
11817
  result = 0;
@@ -10889,7 +11835,7 @@ FIO_FUNC void fio_atol_test(void) {
10889
11835
  start = clock();
10890
11836
  for (size_t i = 0; i < FIO_ATOL_TEST_MAX_CYCLES; ++i) {
10891
11837
  __asm__ volatile("" ::: "memory");
10892
- sprintf(number, "%ld", expect);
11838
+ sprintf(number, "%ld", (long int)expect);
10893
11839
  __asm__ volatile("" ::: "memory");
10894
11840
  }
10895
11841
  end = clock();