iodine 0.1.21 → 0.2.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of iodine might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/.gitignore +3 -2
- data/.travis.yml +23 -2
- data/CHANGELOG.md +9 -2
- data/README.md +232 -179
- data/Rakefile +13 -1
- data/bin/config.ru +63 -0
- data/bin/console +6 -0
- data/bin/echo +42 -32
- data/bin/http-hello +62 -0
- data/bin/http-playground +124 -0
- data/bin/playground +62 -0
- data/bin/poc/Gemfile.lock +23 -0
- data/bin/poc/README.md +37 -0
- data/bin/poc/config.ru +66 -0
- data/bin/poc/gemfile +1 -0
- data/bin/poc/www/index.html +57 -0
- data/bin/raw-rbhttp +35 -0
- data/bin/raw_broadcast +66 -0
- data/bin/test_with_faye +40 -0
- data/bin/ws-broadcast +108 -0
- data/bin/ws-echo +108 -0
- data/exe/iodine +59 -0
- data/ext/iodine/base64.c +264 -0
- data/ext/iodine/base64.h +72 -0
- data/ext/iodine/bscrypt-common.h +109 -0
- data/ext/iodine/bscrypt.h +49 -0
- data/ext/iodine/extconf.rb +41 -0
- data/ext/iodine/hex.c +123 -0
- data/ext/iodine/hex.h +70 -0
- data/ext/iodine/http.c +200 -0
- data/ext/iodine/http.h +128 -0
- data/ext/iodine/http1.c +402 -0
- data/ext/iodine/http1.h +56 -0
- data/ext/iodine/http1_simple_parser.c +473 -0
- data/ext/iodine/http1_simple_parser.h +59 -0
- data/ext/iodine/http_request.h +128 -0
- data/ext/iodine/http_response.c +1606 -0
- data/ext/iodine/http_response.h +393 -0
- data/ext/iodine/http_response_http1.h +374 -0
- data/ext/iodine/iodine_core.c +641 -0
- data/ext/iodine/iodine_core.h +70 -0
- data/ext/iodine/iodine_http.c +615 -0
- data/ext/iodine/iodine_http.h +19 -0
- data/ext/iodine/iodine_websocket.c +430 -0
- data/ext/iodine/iodine_websocket.h +21 -0
- data/ext/iodine/libasync.c +552 -0
- data/ext/iodine/libasync.h +117 -0
- data/ext/iodine/libreact.c +347 -0
- data/ext/iodine/libreact.h +244 -0
- data/ext/iodine/libserver.c +912 -0
- data/ext/iodine/libserver.h +435 -0
- data/ext/iodine/libsock.c +950 -0
- data/ext/iodine/libsock.h +478 -0
- data/ext/iodine/misc.c +181 -0
- data/ext/iodine/misc.h +76 -0
- data/ext/iodine/random.c +193 -0
- data/ext/iodine/random.h +48 -0
- data/ext/iodine/rb-call.c +127 -0
- data/ext/iodine/rb-call.h +60 -0
- data/ext/iodine/rb-libasync.h +79 -0
- data/ext/iodine/rb-rack-io.c +389 -0
- data/ext/iodine/rb-rack-io.h +17 -0
- data/ext/iodine/rb-registry.c +213 -0
- data/ext/iodine/rb-registry.h +33 -0
- data/ext/iodine/sha1.c +359 -0
- data/ext/iodine/sha1.h +85 -0
- data/ext/iodine/sha2.c +825 -0
- data/ext/iodine/sha2.h +138 -0
- data/ext/iodine/siphash.c +136 -0
- data/ext/iodine/siphash.h +15 -0
- data/ext/iodine/spnlock.h +235 -0
- data/ext/iodine/websockets.c +696 -0
- data/ext/iodine/websockets.h +120 -0
- data/ext/iodine/xor-crypt.c +189 -0
- data/ext/iodine/xor-crypt.h +107 -0
- data/iodine.gemspec +25 -18
- data/lib/iodine.rb +57 -58
- data/lib/iodine/http.rb +0 -189
- data/lib/iodine/protocol.rb +36 -245
- data/lib/iodine/version.rb +1 -1
- data/lib/rack/handler/iodine.rb +145 -2
- metadata +115 -37
- data/bin/core_http_test +0 -51
- data/bin/em playground +0 -56
- data/bin/hello_world +0 -75
- data/bin/setup +0 -7
- data/lib/iodine/client.rb +0 -5
- data/lib/iodine/core.rb +0 -102
- data/lib/iodine/core_init.rb +0 -143
- data/lib/iodine/http/hpack.rb +0 -553
- data/lib/iodine/http/http1.rb +0 -251
- data/lib/iodine/http/http2.rb +0 -507
- data/lib/iodine/http/rack_support.rb +0 -108
- data/lib/iodine/http/request.rb +0 -462
- data/lib/iodine/http/response.rb +0 -474
- data/lib/iodine/http/session.rb +0 -143
- data/lib/iodine/http/websocket_client.rb +0 -335
- data/lib/iodine/http/websocket_handler.rb +0 -101
- data/lib/iodine/http/websockets.rb +0 -336
- data/lib/iodine/io.rb +0 -56
- data/lib/iodine/logging.rb +0 -46
- data/lib/iodine/settings.rb +0 -158
- data/lib/iodine/ssl_connector.rb +0 -48
- data/lib/iodine/timers.rb +0 -95
@@ -0,0 +1,552 @@
|
|
1
|
+
/*
|
2
|
+
copyright: Boaz segev, 2016
|
3
|
+
license: MIT
|
4
|
+
|
5
|
+
Feel free to copy, use and enjoy according to the license provided.
|
6
|
+
*/
|
7
|
+
#include "rb-libasync.h"
|
8
|
+
#ifndef _GNU_SOURCE
|
9
|
+
#define _GNU_SOURCE
|
10
|
+
#endif
|
11
|
+
|
12
|
+
#include "libasync.h"
|
13
|
+
|
14
|
+
#include <stdlib.h>
|
15
|
+
#include <stdio.h>
|
16
|
+
#include <errno.h>
|
17
|
+
#include <signal.h>
|
18
|
+
#include <unistd.h>
|
19
|
+
#include <execinfo.h>
|
20
|
+
#include <pthread.h>
|
21
|
+
#include <fcntl.h>
|
22
|
+
#include <sched.h>
|
23
|
+
#include <sys/mman.h>
|
24
|
+
#include <string.h>
|
25
|
+
|
26
|
+
/* *****************************************************************************
|
27
|
+
Performance options.
|
28
|
+
*/
|
29
|
+
|
30
|
+
#ifndef ASYNC_TASK_POOL_SIZE
|
31
|
+
#define ASYNC_TASK_POOL_SIZE 170
|
32
|
+
#endif
|
33
|
+
|
34
|
+
/* Spinlock vs. Mutex data protection. */
|
35
|
+
#ifndef ASYNC_USE_SPINLOCK
|
36
|
+
#define ASYNC_USE_SPINLOCK 1
|
37
|
+
#endif
|
38
|
+
|
39
|
+
/* use pipe for wakeup if == 0 else, use nanosleep when no tasks. */
|
40
|
+
#ifndef ASYNC_NANO_SLEEP
|
41
|
+
#define ASYNC_NANO_SLEEP 16777216 // 8388608 // 1048576 // 524288
|
42
|
+
#endif
|
43
|
+
|
44
|
+
/* Sentinal thread to respawn crashed threads - limited crash resistance. */
|
45
|
+
#ifndef ASYNC_USE_SENTINEL
|
46
|
+
#define ASYNC_USE_SENTINEL 0
|
47
|
+
#endif
|
48
|
+
|
49
|
+
/* *****************************************************************************
|
50
|
+
Forward declarations - used for functions that might be needed before they are
|
51
|
+
defined.
|
52
|
+
*/
|
53
|
+
|
54
|
+
// the actual working thread
|
55
|
+
static void *worker_thread_cycle(void *);
|
56
|
+
|
57
|
+
// A thread sentinal (optional - edit the ASYNC_USE_SENTINEL macro to use or
|
58
|
+
// disable)
|
59
|
+
static void *sentinal_thread(void *);
|
60
|
+
|
61
|
+
/******************************************************************************
|
62
|
+
Portability - used to help port this to different frameworks (i.e. Ruby).
|
63
|
+
*/
|
64
|
+
|
65
|
+
#ifndef THREAD_TYPE
|
66
|
+
#define THREAD_TYPE pthread_t
|
67
|
+
|
68
|
+
static void *join_thread(THREAD_TYPE thr) {
|
69
|
+
void *ret;
|
70
|
+
pthread_join(thr, &ret);
|
71
|
+
return ret;
|
72
|
+
}
|
73
|
+
|
74
|
+
static int create_thread(THREAD_TYPE *thr, void *(*thread_func)(void *),
|
75
|
+
void *async) {
|
76
|
+
return pthread_create(thr, NULL, thread_func, async);
|
77
|
+
}
|
78
|
+
|
79
|
+
#endif
|
80
|
+
/******************************************************************************
|
81
|
+
Data Types
|
82
|
+
*/
|
83
|
+
|
84
|
+
/**
|
85
|
+
A task
|
86
|
+
*/
|
87
|
+
typedef struct {
|
88
|
+
void (*task)(void *);
|
89
|
+
void *arg;
|
90
|
+
} task_s;
|
91
|
+
|
92
|
+
/**
|
93
|
+
A task node
|
94
|
+
*/
|
95
|
+
typedef struct async_task_ns {
|
96
|
+
task_s task;
|
97
|
+
struct async_task_ns *next;
|
98
|
+
} async_task_ns;
|
99
|
+
|
100
|
+
/* *****************************************************************************
|
101
|
+
Use spinlocks "spnlock.h".
|
102
|
+
|
103
|
+
For portability, it's possible copy "spnlock.h" directly after this line.
|
104
|
+
*/
|
105
|
+
#include "spnlock.h"
|
106
|
+
|
107
|
+
/******************************************************************************
|
108
|
+
Core Data
|
109
|
+
*/
|
110
|
+
|
111
|
+
typedef struct {
|
112
|
+
#if !defined(ASYNC_USE_SPINLOCK) || ASYNC_USE_SPINLOCK != 1
|
113
|
+
/* if using mutex */
|
114
|
+
pthread_mutex_t lock;
|
115
|
+
#endif
|
116
|
+
|
117
|
+
/* task management*/
|
118
|
+
async_task_ns memory[ASYNC_TASK_POOL_SIZE];
|
119
|
+
async_task_ns *pool;
|
120
|
+
async_task_ns *tasks;
|
121
|
+
async_task_ns **pos;
|
122
|
+
|
123
|
+
/* thread management*/
|
124
|
+
size_t thread_count;
|
125
|
+
|
126
|
+
#if ASYNC_NANO_SLEEP == 0
|
127
|
+
/* when using pipes */
|
128
|
+
struct {
|
129
|
+
int in;
|
130
|
+
int out;
|
131
|
+
} io;
|
132
|
+
#endif
|
133
|
+
|
134
|
+
#if defined(ASYNC_USE_SPINLOCK) && ASYNC_USE_SPINLOCK == 1 // use spinlock
|
135
|
+
/* if using spinlock */
|
136
|
+
spn_lock_i lock;
|
137
|
+
#endif
|
138
|
+
|
139
|
+
/* state management*/
|
140
|
+
struct {
|
141
|
+
unsigned run : 1;
|
142
|
+
} flags;
|
143
|
+
|
144
|
+
/** the threads array, must be last */
|
145
|
+
THREAD_TYPE threads[];
|
146
|
+
} async_data_s;
|
147
|
+
|
148
|
+
static async_data_s *async;
|
149
|
+
|
150
|
+
/******************************************************************************
|
151
|
+
Core Data initialization and lock/unlock
|
152
|
+
*/
|
153
|
+
|
154
|
+
#if defined(ASYNC_USE_SPINLOCK) && ASYNC_USE_SPINLOCK == 1 // use spinlock
|
155
|
+
#define lock_async_init() (spn_unlock(&(async->lock)), 0)
|
156
|
+
#define lock_async_destroy() ;
|
157
|
+
#define lock_async() spn_lock(&(async->lock))
|
158
|
+
#define unlock_async() spn_unlock(&(async->lock))
|
159
|
+
|
160
|
+
#else // Using Mutex
|
161
|
+
#define lock_async_init() (pthread_mutex_init(&((async)->lock), NULL))
|
162
|
+
#define lock_async_destroy() (pthread_mutex_destroy(&((async)->lock)))
|
163
|
+
#define lock_async() (pthread_mutex_lock(&((async)->lock)))
|
164
|
+
#define unlock_async() (pthread_mutex_unlock(&((async)->lock)))
|
165
|
+
#endif
|
166
|
+
|
167
|
+
static inline void async_free(void) {
|
168
|
+
#if ASYNC_NANO_SLEEP == 0
|
169
|
+
if (async->io.in) {
|
170
|
+
close(async->io.in);
|
171
|
+
close(async->io.out);
|
172
|
+
}
|
173
|
+
#endif
|
174
|
+
lock_async_destroy();
|
175
|
+
munmap(async, (sizeof(async_data_s) +
|
176
|
+
(sizeof(THREAD_TYPE) * (async->thread_count))));
|
177
|
+
async = NULL;
|
178
|
+
}
|
179
|
+
|
180
|
+
static inline void async_alloc(size_t threads) {
|
181
|
+
async = mmap(NULL, (sizeof(async_data_s) + (sizeof(THREAD_TYPE) * (threads))),
|
182
|
+
PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS,
|
183
|
+
-1, 0);
|
184
|
+
if (async == MAP_FAILED) {
|
185
|
+
async = NULL;
|
186
|
+
}
|
187
|
+
*async = (async_data_s){.flags.run = 1};
|
188
|
+
async->pos = &async->tasks;
|
189
|
+
|
190
|
+
if (lock_async_init()) {
|
191
|
+
async_free();
|
192
|
+
return;
|
193
|
+
}
|
194
|
+
|
195
|
+
#if ASYNC_NANO_SLEEP == 0 // using pipes?
|
196
|
+
if (pipe(&async->io.in)) {
|
197
|
+
async_free();
|
198
|
+
return;
|
199
|
+
}
|
200
|
+
fcntl(async->io.out, F_SETFL, O_NONBLOCK);
|
201
|
+
#endif
|
202
|
+
|
203
|
+
// initialize pool
|
204
|
+
for (size_t i = 0; i < ASYNC_TASK_POOL_SIZE - 1; i++) {
|
205
|
+
async->memory[i].next = async->memory + i + 1;
|
206
|
+
}
|
207
|
+
async->memory[ASYNC_TASK_POOL_SIZE - 1].next = NULL;
|
208
|
+
async->pool = async->memory;
|
209
|
+
}
|
210
|
+
|
211
|
+
/******************************************************************************
|
212
|
+
Perfoming tasks
|
213
|
+
*/
|
214
|
+
|
215
|
+
static inline void perform_tasks(void) {
|
216
|
+
task_s tsk;
|
217
|
+
async_task_ns *t;
|
218
|
+
while (async) {
|
219
|
+
lock_async();
|
220
|
+
t = async->tasks;
|
221
|
+
if (t) {
|
222
|
+
async->tasks = t->next;
|
223
|
+
if (async->tasks == NULL)
|
224
|
+
async->pos = &(async->tasks);
|
225
|
+
tsk = t->task;
|
226
|
+
if (t >= async->memory &&
|
227
|
+
(t <= (async->memory + ASYNC_TASK_POOL_SIZE - 1))) {
|
228
|
+
t->next = async->pool;
|
229
|
+
async->pool = t;
|
230
|
+
} else {
|
231
|
+
free(t);
|
232
|
+
}
|
233
|
+
unlock_async();
|
234
|
+
tsk.task(tsk.arg);
|
235
|
+
continue;
|
236
|
+
}
|
237
|
+
async->pos = &(async->tasks);
|
238
|
+
unlock_async();
|
239
|
+
return;
|
240
|
+
}
|
241
|
+
}
|
242
|
+
|
243
|
+
/******************************************************************************
|
244
|
+
Pasuing and resuming threads
|
245
|
+
*/
|
246
|
+
|
247
|
+
static inline void pause_thread() {
|
248
|
+
#if ASYNC_NANO_SLEEP == 0
|
249
|
+
if (async && async->flags.run) {
|
250
|
+
uint8_t tmp;
|
251
|
+
read(async->io.in, &tmp, 1);
|
252
|
+
}
|
253
|
+
#else
|
254
|
+
struct timespec act, tm = {.tv_sec = 0, .tv_nsec = ASYNC_NANO_SLEEP};
|
255
|
+
nanosleep(&tm, &act);
|
256
|
+
// sched_yield();
|
257
|
+
#endif
|
258
|
+
}
|
259
|
+
|
260
|
+
static inline void wake_thread() {
|
261
|
+
#if ASYNC_NANO_SLEEP == 0
|
262
|
+
write(async->io.out, async, 1);
|
263
|
+
#endif
|
264
|
+
}
|
265
|
+
|
266
|
+
static inline void wake_all_threads() {
|
267
|
+
#if ASYNC_NANO_SLEEP == 0
|
268
|
+
write(async->io.out, async, async->thread_count + 16);
|
269
|
+
#endif
|
270
|
+
}
|
271
|
+
|
272
|
+
/******************************************************************************
|
273
|
+
Worker threads
|
274
|
+
*/
|
275
|
+
|
276
|
+
// on thread failure, a backtrace should be printed (if
|
277
|
+
// using sentinal)
|
278
|
+
// manage thread error signals
|
279
|
+
static void on_err_signal(int sig) {
|
280
|
+
void *array[22];
|
281
|
+
size_t size;
|
282
|
+
char **strings;
|
283
|
+
size_t i;
|
284
|
+
size = backtrace(array, 22);
|
285
|
+
strings = backtrace_symbols(array, size);
|
286
|
+
perror("\nERROR");
|
287
|
+
fprintf(stderr, "Async: Error signal received"
|
288
|
+
" - %s (errno %d).\nBacktrace (%zd):\n",
|
289
|
+
strsignal(sig), errno, size);
|
290
|
+
for (i = 2; i < size; i++)
|
291
|
+
fprintf(stderr, "%s\n", strings[i]);
|
292
|
+
free(strings);
|
293
|
+
fprintf(stderr, "\n");
|
294
|
+
// pthread_exit(0); // for testing
|
295
|
+
pthread_exit((void *)on_err_signal);
|
296
|
+
}
|
297
|
+
|
298
|
+
// The worker cycle
|
299
|
+
static void *worker_thread_cycle(void *_) {
|
300
|
+
// register error signals when using a sentinal
|
301
|
+
if (ASYNC_USE_SENTINEL) {
|
302
|
+
signal(SIGSEGV, on_err_signal);
|
303
|
+
signal(SIGFPE, on_err_signal);
|
304
|
+
signal(SIGILL, on_err_signal);
|
305
|
+
#ifdef SIGBUS
|
306
|
+
signal(SIGBUS, on_err_signal);
|
307
|
+
#endif
|
308
|
+
#ifdef SIGSYS
|
309
|
+
signal(SIGSYS, on_err_signal);
|
310
|
+
#endif
|
311
|
+
#ifdef SIGXFSZ
|
312
|
+
signal(SIGXFSZ, on_err_signal);
|
313
|
+
#endif
|
314
|
+
}
|
315
|
+
|
316
|
+
// ignore pipe issues
|
317
|
+
signal(SIGPIPE, SIG_IGN);
|
318
|
+
|
319
|
+
// pause for signal for as long as we're active.
|
320
|
+
while (async && async->flags.run) {
|
321
|
+
perform_tasks();
|
322
|
+
pause_thread();
|
323
|
+
}
|
324
|
+
perform_tasks();
|
325
|
+
return 0;
|
326
|
+
}
|
327
|
+
|
328
|
+
// an optional sentinal
|
329
|
+
static void *sentinal_thread(void *_) {
|
330
|
+
THREAD_TYPE thr;
|
331
|
+
while (async != NULL && async->flags.run == 1 &&
|
332
|
+
create_thread(&thr, worker_thread_cycle, _) == 0)
|
333
|
+
join_thread(thr);
|
334
|
+
return 0;
|
335
|
+
}
|
336
|
+
|
337
|
+
/******************************************************************************
|
338
|
+
API
|
339
|
+
*/
|
340
|
+
|
341
|
+
/**
|
342
|
+
Starts running the global thread pool. Use:
|
343
|
+
|
344
|
+
async_start(8);
|
345
|
+
|
346
|
+
*/
|
347
|
+
ssize_t async_start(size_t threads) {
|
348
|
+
async_alloc(threads);
|
349
|
+
if (async == NULL)
|
350
|
+
return -1;
|
351
|
+
// initialize threads
|
352
|
+
for (size_t i = 0; i < threads; i++) {
|
353
|
+
if (create_thread(
|
354
|
+
async->threads + i,
|
355
|
+
(ASYNC_USE_SENTINEL ? sentinal_thread : worker_thread_cycle),
|
356
|
+
NULL) < 0) {
|
357
|
+
async->flags.run = 0;
|
358
|
+
wake_all_threads();
|
359
|
+
async_free();
|
360
|
+
return -1;
|
361
|
+
}
|
362
|
+
++async->thread_count;
|
363
|
+
}
|
364
|
+
signal(SIGPIPE, SIG_IGN);
|
365
|
+
return 0;
|
366
|
+
}
|
367
|
+
|
368
|
+
/**
|
369
|
+
Waits for all the present tasks to complete.
|
370
|
+
|
371
|
+
The thread pool will remain active, waiting for new tasts.
|
372
|
+
|
373
|
+
This function will wait forever or until a signal is
|
374
|
+
received and all the tasks in the queue have been processed.
|
375
|
+
|
376
|
+
Unline finish (that uses `join`) this is an **active** wait where the waiting
|
377
|
+
thread acts as a working thread and performs any pending tasks.
|
378
|
+
|
379
|
+
Use:
|
380
|
+
|
381
|
+
Async.wait(async);
|
382
|
+
|
383
|
+
*/
|
384
|
+
void async_perform() { perform_tasks(); }
|
385
|
+
|
386
|
+
/**
|
387
|
+
Schedules a task to be performed by the thread pool.
|
388
|
+
|
389
|
+
The Task should be a function such as `void task(void
|
390
|
+
*arg)`.
|
391
|
+
|
392
|
+
Use:
|
393
|
+
|
394
|
+
void task(void * arg) { printf("%s", arg); }
|
395
|
+
|
396
|
+
char arg[] = "Demo Task";
|
397
|
+
|
398
|
+
async_run(task, arg);
|
399
|
+
|
400
|
+
*/
|
401
|
+
int async_run(void (*task)(void *), void *arg) {
|
402
|
+
if (async == NULL)
|
403
|
+
return -1;
|
404
|
+
async_task_ns *tsk;
|
405
|
+
lock_async();
|
406
|
+
tsk = async->pool;
|
407
|
+
if (tsk) {
|
408
|
+
async->pool = tsk->next;
|
409
|
+
} else {
|
410
|
+
tsk = malloc(sizeof(*tsk));
|
411
|
+
if (!tsk)
|
412
|
+
goto error;
|
413
|
+
}
|
414
|
+
*tsk = (async_task_ns){.task.task = task, .task.arg = arg};
|
415
|
+
*(async->pos) = tsk;
|
416
|
+
async->pos = &(tsk->next);
|
417
|
+
unlock_async();
|
418
|
+
wake_thread();
|
419
|
+
return 0;
|
420
|
+
error:
|
421
|
+
unlock_async();
|
422
|
+
return -1;
|
423
|
+
}
|
424
|
+
|
425
|
+
/**
|
426
|
+
Waits for existing tasks to complete and releases the thread
|
427
|
+
pool and it's
|
428
|
+
resources.
|
429
|
+
*/
|
430
|
+
void async_join() {
|
431
|
+
if (async == NULL)
|
432
|
+
return;
|
433
|
+
for (size_t i = 0; i < async->thread_count; i++) {
|
434
|
+
join_thread(async->threads[i]);
|
435
|
+
}
|
436
|
+
perform_tasks();
|
437
|
+
async_free();
|
438
|
+
};
|
439
|
+
|
440
|
+
/**
|
441
|
+
Waits for existing tasks to complete and releases the thread
|
442
|
+
pool and it's
|
443
|
+
resources.
|
444
|
+
*/
|
445
|
+
void async_signal() {
|
446
|
+
if (async == NULL)
|
447
|
+
return;
|
448
|
+
async->flags.run = 0;
|
449
|
+
wake_all_threads();
|
450
|
+
};
|
451
|
+
|
452
|
+
/******************************************************************************
|
453
|
+
Test
|
454
|
+
*/
|
455
|
+
|
456
|
+
#ifdef DEBUG
|
457
|
+
|
458
|
+
#define ASYNC_SPEED_TEST_THREAD_COUNT 120
|
459
|
+
|
460
|
+
static size_t _Atomic i_count = 0;
|
461
|
+
|
462
|
+
static void sample_task(void *_) {
|
463
|
+
__asm__ volatile("" ::: "memory");
|
464
|
+
atomic_fetch_add(&i_count, 1);
|
465
|
+
}
|
466
|
+
|
467
|
+
static void sched_sample_task(void *_) {
|
468
|
+
for (size_t i = 0; i < 1024; i++) {
|
469
|
+
async_run(sample_task, async);
|
470
|
+
}
|
471
|
+
}
|
472
|
+
|
473
|
+
static void text_task_text(void *_) {
|
474
|
+
__asm__ volatile("" ::: "memory");
|
475
|
+
fprintf(stderr, "this text should print before async_finish returns\n");
|
476
|
+
}
|
477
|
+
|
478
|
+
static void text_task(void *_) {
|
479
|
+
sleep(2);
|
480
|
+
async_run(text_task_text, _);
|
481
|
+
}
|
482
|
+
|
483
|
+
#if ASYNC_USE_SENTINEL == 1
|
484
|
+
static void evil_task(void *_) {
|
485
|
+
__asm__ volatile("" ::: "memory");
|
486
|
+
fprintf(stderr, "EVIL CODE RUNNING!\n");
|
487
|
+
sprintf(NULL,
|
488
|
+
"Never write text to a NULL pointer, this is a terrible idea that "
|
489
|
+
"should segfault.\n");
|
490
|
+
}
|
491
|
+
#endif
|
492
|
+
|
493
|
+
void async_test_library_speed(void) {
|
494
|
+
atomic_store(&i_count, 0);
|
495
|
+
time_t start, end;
|
496
|
+
fprintf(stderr, "Starting Async testing\n");
|
497
|
+
if (async_start(ASYNC_SPEED_TEST_THREAD_COUNT) == 0) {
|
498
|
+
fprintf(stderr, "Thread count test %s %lu/%d\n",
|
499
|
+
(async->thread_count == ASYNC_SPEED_TEST_THREAD_COUNT ? "PASSED"
|
500
|
+
: "FAILED"),
|
501
|
+
async->thread_count, ASYNC_SPEED_TEST_THREAD_COUNT);
|
502
|
+
start = clock();
|
503
|
+
for (size_t i = 0; i < 1024; i++) {
|
504
|
+
async_run(sched_sample_task, async);
|
505
|
+
}
|
506
|
+
async_finish();
|
507
|
+
end = clock();
|
508
|
+
fprintf(stderr, "Async performance test %lu cycles with i_count = %lu\n",
|
509
|
+
end - start, atomic_load(&i_count));
|
510
|
+
} else {
|
511
|
+
fprintf(stderr, "Async test couldn't be initialized\n");
|
512
|
+
exit(-1);
|
513
|
+
}
|
514
|
+
if (async_start(8)) {
|
515
|
+
fprintf(stderr, "Couldn't start thread pool!\n");
|
516
|
+
exit(-1);
|
517
|
+
}
|
518
|
+
fprintf(stderr, "calling async_perform.\n");
|
519
|
+
async_run(text_task, NULL);
|
520
|
+
sleep(1);
|
521
|
+
async_perform();
|
522
|
+
fprintf(stderr, "async_perform returned.\n");
|
523
|
+
fprintf(stderr, "calling finish.\n");
|
524
|
+
async_run(text_task, NULL);
|
525
|
+
sleep(1);
|
526
|
+
async_finish();
|
527
|
+
fprintf(stderr, "finish returned.\n");
|
528
|
+
|
529
|
+
#if ASYNC_USE_SENTINEL == 1
|
530
|
+
if (async_start(8)) {
|
531
|
+
fprintf(stderr, "Couldn't start thread pool!\n");
|
532
|
+
exit(-1);
|
533
|
+
}
|
534
|
+
sleep(1);
|
535
|
+
fprintf(stderr, "calling evil task.\n");
|
536
|
+
async_run(evil_task, NULL);
|
537
|
+
sleep(1);
|
538
|
+
fprintf(stderr, "calling finish.\n");
|
539
|
+
async_finish();
|
540
|
+
#endif
|
541
|
+
|
542
|
+
// async_start(8);
|
543
|
+
// fprintf(stderr,
|
544
|
+
// "calling a few tasks and sleeping 12 seconds before finishing
|
545
|
+
// up...\n"
|
546
|
+
// "check the processor CPU cycles - are we busy?\n");
|
547
|
+
// async_run(sched_sample_task, NULL);
|
548
|
+
// sleep(12);
|
549
|
+
// async_finish();
|
550
|
+
}
|
551
|
+
|
552
|
+
#endif
|