iodine 0.6.5 → 0.7.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of iodine might be problematic. Click here for more details.

Files changed (98) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +11 -0
  3. data/README.md +4 -4
  4. data/SPEC-Websocket-Draft.md +3 -6
  5. data/bin/mustache.rb +128 -0
  6. data/examples/test_template.mustache +16 -0
  7. data/ext/iodine/fio.c +9397 -0
  8. data/ext/iodine/fio.h +4723 -0
  9. data/ext/iodine/fio_ary.h +353 -54
  10. data/ext/iodine/fio_cli.c +351 -361
  11. data/ext/iodine/fio_cli.h +84 -105
  12. data/ext/iodine/fio_hashmap.h +70 -16
  13. data/ext/iodine/fio_json_parser.h +35 -24
  14. data/ext/iodine/fio_siphash.c +104 -4
  15. data/ext/iodine/fio_siphash.h +18 -2
  16. data/ext/iodine/fio_str.h +1218 -0
  17. data/ext/iodine/fio_tmpfile.h +1 -1
  18. data/ext/iodine/fiobj.h +13 -8
  19. data/ext/iodine/fiobj4sock.h +6 -8
  20. data/ext/iodine/fiobj_ary.c +107 -17
  21. data/ext/iodine/fiobj_ary.h +36 -4
  22. data/ext/iodine/fiobj_data.c +146 -127
  23. data/ext/iodine/fiobj_data.h +25 -23
  24. data/ext/iodine/fiobj_hash.c +7 -7
  25. data/ext/iodine/fiobj_hash.h +6 -5
  26. data/ext/iodine/fiobj_json.c +20 -17
  27. data/ext/iodine/fiobj_json.h +5 -5
  28. data/ext/iodine/fiobj_mem.h +71 -0
  29. data/ext/iodine/fiobj_mustache.c +310 -0
  30. data/ext/iodine/fiobj_mustache.h +40 -0
  31. data/ext/iodine/fiobj_numbers.c +199 -94
  32. data/ext/iodine/fiobj_numbers.h +7 -7
  33. data/ext/iodine/fiobj_str.c +142 -333
  34. data/ext/iodine/fiobj_str.h +65 -55
  35. data/ext/iodine/fiobject.c +49 -11
  36. data/ext/iodine/fiobject.h +40 -39
  37. data/ext/iodine/http.c +382 -190
  38. data/ext/iodine/http.h +124 -80
  39. data/ext/iodine/http1.c +99 -127
  40. data/ext/iodine/http1.h +5 -5
  41. data/ext/iodine/http1_parser.c +3 -2
  42. data/ext/iodine/http1_parser.h +2 -2
  43. data/ext/iodine/http_internal.c +14 -12
  44. data/ext/iodine/http_internal.h +25 -19
  45. data/ext/iodine/iodine.c +37 -18
  46. data/ext/iodine/iodine.h +4 -0
  47. data/ext/iodine/iodine_caller.c +9 -2
  48. data/ext/iodine/iodine_caller.h +2 -0
  49. data/ext/iodine/iodine_connection.c +82 -117
  50. data/ext/iodine/iodine_defer.c +57 -50
  51. data/ext/iodine/iodine_defer.h +0 -1
  52. data/ext/iodine/iodine_fiobj2rb.h +4 -2
  53. data/ext/iodine/iodine_helpers.c +4 -4
  54. data/ext/iodine/iodine_http.c +25 -32
  55. data/ext/iodine/iodine_json.c +2 -1
  56. data/ext/iodine/iodine_mustache.c +423 -0
  57. data/ext/iodine/iodine_mustache.h +6 -0
  58. data/ext/iodine/iodine_pubsub.c +48 -153
  59. data/ext/iodine/iodine_pubsub.h +5 -4
  60. data/ext/iodine/iodine_rack_io.c +7 -5
  61. data/ext/iodine/iodine_store.c +16 -13
  62. data/ext/iodine/iodine_tcp.c +26 -34
  63. data/ext/iodine/mustache_parser.h +1085 -0
  64. data/ext/iodine/redis_engine.c +740 -646
  65. data/ext/iodine/redis_engine.h +13 -15
  66. data/ext/iodine/resp_parser.h +11 -5
  67. data/ext/iodine/websocket_parser.h +13 -13
  68. data/ext/iodine/websockets.c +240 -393
  69. data/ext/iodine/websockets.h +52 -113
  70. data/lib/iodine.rb +1 -1
  71. data/lib/iodine/mustache.rb +140 -0
  72. data/lib/iodine/version.rb +1 -1
  73. metadata +15 -28
  74. data/ext/iodine/defer.c +0 -566
  75. data/ext/iodine/defer.h +0 -148
  76. data/ext/iodine/evio.c +0 -26
  77. data/ext/iodine/evio.h +0 -161
  78. data/ext/iodine/evio_callbacks.c +0 -26
  79. data/ext/iodine/evio_epoll.c +0 -251
  80. data/ext/iodine/evio_kqueue.c +0 -194
  81. data/ext/iodine/facil.c +0 -2325
  82. data/ext/iodine/facil.h +0 -616
  83. data/ext/iodine/fio_base64.c +0 -277
  84. data/ext/iodine/fio_base64.h +0 -71
  85. data/ext/iodine/fio_llist.h +0 -257
  86. data/ext/iodine/fio_mem.c +0 -675
  87. data/ext/iodine/fio_mem.h +0 -143
  88. data/ext/iodine/fio_random.c +0 -248
  89. data/ext/iodine/fio_random.h +0 -45
  90. data/ext/iodine/fio_sha1.c +0 -362
  91. data/ext/iodine/fio_sha1.h +0 -107
  92. data/ext/iodine/fio_sha2.c +0 -842
  93. data/ext/iodine/fio_sha2.h +0 -169
  94. data/ext/iodine/pubsub.c +0 -867
  95. data/ext/iodine/pubsub.h +0 -221
  96. data/ext/iodine/sock.c +0 -1366
  97. data/ext/iodine/sock.h +0 -566
  98. data/ext/iodine/spnlock.inc +0 -111
@@ -1,566 +0,0 @@
1
- /*
2
- Copyright: Boaz Segev, 2016-2017
3
- License: MIT
4
-
5
- Feel free to copy, use and enjoy according to the license provided.
6
- */
7
- #include "spnlock.inc"
8
-
9
- #include "defer.h"
10
-
11
- #include <errno.h>
12
- #include <signal.h>
13
- #include <stdint.h>
14
- #include <stdio.h>
15
- #include <sys/types.h>
16
- #include <sys/wait.h>
17
- #include <unistd.h>
18
-
19
- /* *****************************************************************************
20
- Compile time settings
21
- ***************************************************************************** */
22
-
23
- #ifndef DEFER_THROTTLE
24
- #define DEFER_THROTTLE 1048574UL
25
- #endif
26
- #ifndef DEFER_THROTTLE_LIMIT
27
- #define DEFER_THROTTLE_LIMIT 2097148UL
28
- #endif
29
-
30
- /**
31
- * The progressive throttling model makes concurrency and parallelism more
32
- * likely.
33
- *
34
- * Otherwise threads are assumed to be intended for "fallback" in case of slow
35
- * user code, where a single thread should be active most of the time and other
36
- * threads are activated only when that single thread is slow to perform.
37
- */
38
- #ifndef DEFER_THROTTLE_PROGRESSIVE
39
- #define DEFER_THROTTLE_PROGRESSIVE 1
40
- #endif
41
-
42
- #ifndef DEFER_QUEUE_BLOCK_COUNT
43
- #if UINTPTR_MAX <= 0xFFFFFFFF
44
- /* Almost a page of memory on most 32 bit machines: ((4096/4)-5)/3 */
45
- #define DEFER_QUEUE_BLOCK_COUNT 339
46
- #else
47
- /* Almost a page of memory on most 64 bit machines: ((4096/8)-5)/3 */
48
- #define DEFER_QUEUE_BLOCK_COUNT 168
49
- #endif
50
- #endif
51
-
52
- /* *****************************************************************************
53
- Data Structures
54
- ***************************************************************************** */
55
-
56
- /* task node data */
57
- typedef struct {
58
- void (*func)(void *, void *);
59
- void *arg1;
60
- void *arg2;
61
- } task_s;
62
-
63
- /* task queue block */
64
- typedef struct queue_block_s {
65
- task_s tasks[DEFER_QUEUE_BLOCK_COUNT];
66
- struct queue_block_s *next;
67
- size_t write;
68
- size_t read;
69
- unsigned char state;
70
- } queue_block_s;
71
-
72
- static queue_block_s static_queue;
73
-
74
- /* the state machine - this holds all the data about the task queue and pool */
75
- static struct {
76
- /* a lock for the state machine, used for multi-threading support */
77
- spn_lock_i lock;
78
- /* current active block to pop tasks */
79
- queue_block_s *reader;
80
- /* current active block to push tasks */
81
- queue_block_s *writer;
82
- } deferred = {.reader = &static_queue, .writer = &static_queue};
83
-
84
- /* *****************************************************************************
85
- Internal Data API
86
- ***************************************************************************** */
87
-
88
- #if DEBUG
89
- static size_t count_alloc, count_dealloc;
90
- #define COUNT_ALLOC spn_add(&count_alloc, 1)
91
- #define COUNT_DEALLOC spn_add(&count_dealloc, 1)
92
- #define COUNT_RESET \
93
- do { \
94
- count_alloc = count_dealloc = 0; \
95
- } while (0)
96
- #else
97
- #define COUNT_ALLOC
98
- #define COUNT_DEALLOC
99
- #define COUNT_RESET
100
- #endif
101
-
102
- static inline void push_task(task_s task) {
103
- spn_lock(&deferred.lock);
104
-
105
- /* test if full */
106
- if (deferred.writer->state &&
107
- deferred.writer->write == deferred.writer->read) {
108
- /* return to static buffer or allocate new buffer */
109
- if (static_queue.state == 2) {
110
- deferred.writer->next = &static_queue;
111
- } else {
112
- deferred.writer->next = malloc(sizeof(*deferred.writer->next));
113
- COUNT_ALLOC;
114
- if (!deferred.writer->next)
115
- goto critical_error;
116
- }
117
- deferred.writer = deferred.writer->next;
118
- deferred.writer->write = 0;
119
- deferred.writer->read = 0;
120
- deferred.writer->state = 0;
121
- deferred.writer->next = NULL;
122
- }
123
-
124
- /* place task and finish */
125
- deferred.writer->tasks[deferred.writer->write++] = task;
126
- /* cycle buffer */
127
- if (deferred.writer->write == DEFER_QUEUE_BLOCK_COUNT) {
128
- deferred.writer->write = 0;
129
- deferred.writer->state = 1;
130
- }
131
- spn_unlock(&deferred.lock);
132
- return;
133
-
134
- critical_error:
135
- spn_unlock(&deferred.lock);
136
- perror("ERROR CRITICAL: defer can't allocate task");
137
- kill(0, SIGINT);
138
- exit(errno);
139
- }
140
-
141
- static inline task_s pop_task(void) {
142
- task_s ret = (task_s){.func = NULL};
143
- queue_block_s *to_free = NULL;
144
- /* lock the state machine, grab/create a task and place it at the tail */
145
- spn_lock(&deferred.lock);
146
-
147
- /* empty? */
148
- if (deferred.reader->write == deferred.reader->read &&
149
- !deferred.reader->state)
150
- goto finish;
151
- /* collect task */
152
- ret = deferred.reader->tasks[deferred.reader->read++];
153
- /* cycle */
154
- if (deferred.reader->read == DEFER_QUEUE_BLOCK_COUNT) {
155
- deferred.reader->read = 0;
156
- deferred.reader->state = 0;
157
- }
158
- /* did we finish the queue in the buffer? */
159
- if (deferred.reader->write == deferred.reader->read) {
160
- if (deferred.reader->next) {
161
- to_free = deferred.reader;
162
- deferred.reader = deferred.reader->next;
163
- } else {
164
- if (deferred.reader != &static_queue && static_queue.state == 2) {
165
- to_free = deferred.reader;
166
- deferred.writer = &static_queue;
167
- deferred.reader = &static_queue;
168
- }
169
- deferred.reader->write = deferred.reader->read = deferred.reader->state =
170
- 0;
171
- }
172
- goto finish;
173
- }
174
-
175
- finish:
176
- if (to_free == &static_queue) {
177
- static_queue.state = 2;
178
- static_queue.next = NULL;
179
- }
180
- spn_unlock(&deferred.lock);
181
-
182
- if (to_free && to_free != &static_queue) {
183
- free(to_free);
184
- COUNT_DEALLOC;
185
- }
186
- return ret;
187
- }
188
-
189
- static inline void clear_tasks(void) {
190
- spn_lock(&deferred.lock);
191
- while (deferred.reader) {
192
- queue_block_s *tmp = deferred.reader;
193
- deferred.reader = deferred.reader->next;
194
- if (tmp != &static_queue) {
195
- COUNT_DEALLOC;
196
- free(tmp);
197
- }
198
- }
199
- static_queue = (queue_block_s){.next = NULL};
200
- deferred.reader = deferred.writer = &static_queue;
201
- spn_unlock(&deferred.lock);
202
- }
203
-
204
- void defer_on_fork(void) { deferred.lock = SPN_LOCK_INIT; }
205
-
206
- #define push_task(...) push_task((task_s){__VA_ARGS__})
207
-
208
- /* *****************************************************************************
209
- API
210
- ***************************************************************************** */
211
-
212
- /** Defer an execution of a function for later. */
213
- int defer(void (*func)(void *, void *), void *arg1, void *arg2) {
214
- /* must have a task to defer */
215
- if (!func)
216
- goto call_error;
217
- push_task(.func = func, .arg1 = arg1, .arg2 = arg2);
218
- defer_thread_signal();
219
- return 0;
220
-
221
- call_error:
222
- return -1;
223
- }
224
-
225
- /** Performs all deferred functions until the queue had been depleted. */
226
- void defer_perform(void) {
227
- task_s task = pop_task();
228
- while (task.func) {
229
- task.func(task.arg1, task.arg2);
230
- task = pop_task();
231
- }
232
- }
233
-
234
- /** Returns true if there are deferred functions waiting for execution. */
235
- int defer_has_queue(void) {
236
- return deferred.reader->read != deferred.reader->write;
237
- }
238
-
239
- /** Clears the queue. */
240
- void defer_clear_queue(void) { clear_tasks(); }
241
-
242
- /* *****************************************************************************
243
- Thread Pool Support
244
- ***************************************************************************** */
245
-
246
- /* thread pool data container */
247
- struct defer_pool {
248
- volatile unsigned int flag;
249
- unsigned int count;
250
- struct thread_msg_s {
251
- pool_pt pool;
252
- void *thrd;
253
- } threads[];
254
- };
255
-
256
- #if defined(__unix__) || defined(__APPLE__) || defined(__linux__) || \
257
- defined(DEBUG)
258
- #include <pthread.h>
259
-
260
- /* `weak` functions can be overloaded to change the thread implementation. */
261
-
262
- #pragma weak defer_new_thread
263
- void *defer_new_thread(void *(*thread_func)(void *), void *arg) {
264
- pthread_t *thread = malloc(sizeof(*thread));
265
- if (thread == NULL || pthread_create(thread, NULL, thread_func, arg))
266
- goto error;
267
- return thread;
268
- error:
269
- free(thread);
270
- return NULL;
271
- }
272
-
273
- /**
274
- * OVERRIDE THIS to replace the default pthread implementation.
275
- *
276
- * Frees the memory asociated with a thread indentifier (allows the thread to
277
- * run it's course, just the identifier is freed).
278
- */
279
- #pragma weak defer_free_thread
280
- void defer_free_thread(void *p_thr) {
281
- if (*((pthread_t *)p_thr)) {
282
- pthread_detach(*((pthread_t *)p_thr));
283
- }
284
- free(p_thr);
285
- }
286
-
287
- #pragma weak defer_join_thread
288
- int defer_join_thread(void *p_thr) {
289
- if (!p_thr || !(*((pthread_t *)p_thr)))
290
- return -1;
291
- pthread_join(*((pthread_t *)p_thr), NULL);
292
- *((pthread_t *)p_thr) = (pthread_t)NULL;
293
- defer_free_thread(p_thr);
294
- return 0;
295
- }
296
-
297
- #pragma weak defer_thread_throttle
298
- void defer_thread_throttle(unsigned long microsec) {
299
- throttle_thread(microsec);
300
- }
301
-
302
- #else /* No pthreads... BYO thread implementation. This one simply fails. */
303
-
304
- #pragma weak defer_new_thread
305
- void *defer_new_thread(void *(*thread_func)(void *), void *arg) {
306
- (void)thread_func;
307
- (void)arg;
308
- return NULL;
309
- }
310
-
311
- #pragma weak defer_free_thread
312
- void defer_free_thread(void *p_thr) { void(p_thr); }
313
-
314
- #pragma weak defer_join_thread
315
- int defer_join_thread(void *p_thr) {
316
- (void)p_thr;
317
- return -1;
318
- }
319
-
320
- #pragma weak defer_thread_throttle
321
- void defer_thread_throttle(unsigned long microsec) { return; }
322
-
323
- #endif /* DEBUG || pthread default */
324
-
325
- /**
326
- * A thread entering this function should wait for new evennts.
327
- */
328
- #pragma weak defer_thread_wait
329
- void defer_thread_wait(pool_pt pool, void *p_thr) {
330
- if (DEFER_THROTTLE_PROGRESSIVE) {
331
- /* keeps threads active (concurrent), but reduces performance */
332
- static __thread size_t static_throttle = 1;
333
- if (static_throttle < DEFER_THROTTLE_LIMIT)
334
- static_throttle = (static_throttle << 1);
335
- throttle_thread(static_throttle);
336
- if (defer_has_queue())
337
- static_throttle = 1;
338
- (void)p_thr;
339
- (void)pool;
340
- } else {
341
- /* Protects against slow user code, but mostly a single active thread */
342
- size_t throttle =
343
- pool ? ((pool->count) * DEFER_THROTTLE) : DEFER_THROTTLE_LIMIT;
344
- if (!throttle || throttle > DEFER_THROTTLE_LIMIT)
345
- throttle = DEFER_THROTTLE_LIMIT;
346
- if (throttle == DEFER_THROTTLE)
347
- throttle <<= 1;
348
- throttle_thread(throttle);
349
- (void)p_thr;
350
- }
351
- }
352
-
353
- /**
354
- * This should signal a single waiting thread to wake up (a new task entered the
355
- * queue).
356
- */
357
- #pragma weak defer_thread_signal
358
- void defer_thread_signal(void) { (void)0; }
359
-
360
- /* a thread's cycle. This is what a worker thread does... repeatedly. */
361
- static void *defer_worker_thread(void *pool_) {
362
- struct thread_msg_s volatile *data = pool_;
363
- signal(SIGPIPE, SIG_IGN);
364
- /* perform any available tasks */
365
- defer_perform();
366
- /* as long as the flag is true, wait for and perform tasks. */
367
- do {
368
- defer_thread_wait(data->pool, data->thrd);
369
- defer_perform();
370
- } while (data->pool->flag);
371
- return NULL;
372
- }
373
-
374
- /** Signals a running thread pool to stop. Returns immediately. */
375
- void defer_pool_stop(pool_pt pool) {
376
- if (!pool)
377
- return;
378
- pool->flag = 0;
379
- for (size_t i = 0; i < pool->count; ++i) {
380
- defer_thread_signal();
381
- }
382
- }
383
-
384
- /** Returns TRUE (1) if the pool is hadn't been signaled to finish up. */
385
- int defer_pool_is_active(pool_pt pool) { return (int)pool->flag; }
386
-
387
- /**
388
- * Waits for a running thread pool, joining threads and finishing all tasks.
389
- *
390
- * This function MUST be called in order to free the pool's data (the
391
- * `pool_pt`).
392
- */
393
- void defer_pool_wait(pool_pt pool) {
394
- while (pool->count) {
395
- pool->count--;
396
- defer_join_thread(pool->threads[pool->count].thrd);
397
- }
398
- free(pool);
399
- }
400
-
401
- /** The logic behind `defer_pool_start`. */
402
- static inline pool_pt defer_pool_initialize(unsigned int thread_count,
403
- pool_pt pool) {
404
- pool->flag = 1;
405
- pool->count = 0;
406
- while (pool->count < thread_count &&
407
- (pool->threads[pool->count].pool = pool) &&
408
- (pool->threads[pool->count].thrd = defer_new_thread(
409
- defer_worker_thread, (void *)(pool->threads + pool->count))))
410
-
411
- pool->count++;
412
- if (pool->count == thread_count) {
413
- return pool;
414
- }
415
- defer_pool_stop(pool);
416
- return NULL;
417
- }
418
-
419
- /** Starts a thread pool that will run deferred tasks in the background. */
420
- pool_pt defer_pool_start(unsigned int thread_count) {
421
- if (thread_count == 0)
422
- return NULL;
423
- pool_pt pool =
424
- malloc(sizeof(*pool) + (thread_count * sizeof(*pool->threads)));
425
- if (!pool)
426
- return NULL;
427
-
428
- return defer_pool_initialize(thread_count, pool);
429
- }
430
-
431
- /* *****************************************************************************
432
- Test
433
- ***************************************************************************** */
434
- #ifdef DEBUG
435
-
436
- #include <pthread.h>
437
- #include <stdio.h>
438
- #include <sys/stat.h>
439
-
440
- static size_t i_count = 0;
441
-
442
- #define TOTAL_COUNT (512 * 1024)
443
-
444
- static void sample_task(void *unused, void *unused2) {
445
- (void)(unused);
446
- (void)(unused2);
447
- spn_add(&i_count, 1);
448
- }
449
-
450
- static void sched_sample_task(void *count, void *unused2) {
451
- (void)(unused2);
452
- for (size_t i = 0; i < (uintptr_t)count; i++) {
453
- defer(sample_task, NULL, NULL);
454
- }
455
- }
456
-
457
- static void text_task_text(void *unused, void *unused2) {
458
- (void)(unused);
459
- (void)(unused2);
460
- fprintf(stderr, "this text should print before defer_perform returns\n");
461
- }
462
-
463
- static void text_task(void *a1, void *a2) {
464
- static const struct timespec tm = {.tv_sec = 2};
465
- nanosleep(&tm, NULL);
466
- defer(text_task_text, a1, a2);
467
- }
468
-
469
- void defer_test(void) {
470
- #define TEST_ASSERT(cond, ...) \
471
- if (!(cond)) { \
472
- fprintf(stderr, "* " __VA_ARGS__); \
473
- fprintf(stderr, "Testing failed.\n"); \
474
- exit(-1); \
475
- }
476
-
477
- clock_t start, end;
478
- fprintf(stderr, "Starting defer testing\n");
479
- i_count = 0;
480
- start = clock();
481
- for (size_t i = 0; i < TOTAL_COUNT; i++) {
482
- sample_task(NULL, NULL);
483
- }
484
- end = clock();
485
- fprintf(stderr,
486
- "Deferless (direct call) counter: %lu cycles with i_count = %lu, "
487
- "%lu/%lu free/malloc\n",
488
- (unsigned long)(end - start), (unsigned long)i_count,
489
- (unsigned long)count_dealloc, (unsigned long)count_alloc);
490
- size_t i_count_should_be = i_count;
491
-
492
- fprintf(stderr, "\n");
493
-
494
- for (int i = 1; TOTAL_COUNT >> i; ++i) {
495
- COUNT_RESET;
496
- i_count = 0;
497
- const size_t per_task = TOTAL_COUNT >> i;
498
- const size_t tasks = 1 << i;
499
- start = clock();
500
- for (size_t j = 0; j < tasks; ++j) {
501
- defer(sched_sample_task, (void *)per_task, NULL);
502
- }
503
- defer_perform();
504
- end = clock();
505
- fprintf(stderr,
506
- "- Defer single thread, %zu scheduling loops (%zu each):\n"
507
- " %lu cycles with i_count = %lu, %lu/%lu "
508
- "free/malloc\n",
509
- tasks, per_task, (unsigned long)(end - start),
510
- (unsigned long)i_count, (unsigned long)count_dealloc,
511
- (unsigned long)count_alloc);
512
- TEST_ASSERT(i_count == i_count_should_be, "ERROR: defer count invalid\n");
513
- }
514
-
515
- ssize_t cpu_count = 8;
516
- #ifdef _SC_NPROCESSORS_ONLN
517
- cpu_count = (sysconf(_SC_NPROCESSORS_ONLN) >> 1) | 1;
518
- #endif
519
-
520
- fprintf(stderr, "\n");
521
-
522
- for (int i = 1; TOTAL_COUNT >> i; ++i) {
523
- COUNT_RESET;
524
- i_count = 0;
525
- const size_t per_task = TOTAL_COUNT >> i;
526
- const size_t tasks = 1 << i;
527
- pool_pt pool = defer_pool_start(cpu_count);
528
- start = clock();
529
- for (size_t j = 0; j < tasks; ++j) {
530
- defer(sched_sample_task, (void *)per_task, NULL);
531
- }
532
- defer_pool_stop(pool);
533
- defer_pool_wait(pool);
534
- end = clock();
535
- fprintf(stderr,
536
- "- Defer %zu threads, %zu scheduling loops (%zu each):\n"
537
- " %lu cycles with i_count = %lu, %lu/%lu "
538
- "free/malloc\n",
539
- (size_t)cpu_count, tasks, per_task, (unsigned long)(end - start),
540
- (unsigned long)i_count, (unsigned long)count_dealloc,
541
- (unsigned long)count_alloc);
542
- TEST_ASSERT(i_count == i_count_should_be, "ERROR: defer count invalid\n");
543
- }
544
-
545
- COUNT_RESET;
546
- i_count = 0;
547
- for (size_t i = 0; i < 1024; i++) {
548
- defer(sched_sample_task, NULL, NULL);
549
- }
550
- defer_perform();
551
- defer(text_task, NULL, NULL);
552
- fprintf(stderr, "calling defer_perform.\n");
553
- defer_perform();
554
- fprintf(stderr,
555
- "defer_perform returned. i_count = %lu, %lu/%lu free/malloc\n",
556
- (unsigned long)i_count, (unsigned long)count_dealloc,
557
- (unsigned long)count_alloc);
558
-
559
- COUNT_RESET;
560
- i_count = 0;
561
- defer_clear_queue();
562
- fprintf(stderr, "* Defer cleared queue: %lu/%lu free/malloc\n\n",
563
- (unsigned long)count_dealloc, (unsigned long)count_alloc);
564
- }
565
-
566
- #endif