iodine 0.7.10 → 0.7.11

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of iodine might be problematic. Click here for more details.

checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: bd4ea42a7f008886bcadee4c829499686a1fbceeb2738daf56e4155063d9054c
4
- data.tar.gz: aeb66d448ad9cd09e55f7f15c79bcb1b3e8f0a41f3e2adfc9e2c894c20d7c80f
3
+ metadata.gz: d1140536cb23442b4b484d0c193cfd8cd207775e557dda8e132dbe52607a82f3
4
+ data.tar.gz: 3a7d54ecdb80550a348984f52c553afb8e98c514a027bd61d26736332a4e9fce
5
5
  SHA512:
6
- metadata.gz: ecdde8977f2b48089f370356f3bbd272054df193f9be220ac28ad8491c59f749a1c21e1506bb45c9f2532e9bf072b9e7bf365c92ccb4b3a651b52bacbdfb21ad
7
- data.tar.gz: 5268a0318e3484a0a703fa68e52f057c6cab513066243ff37e052c58669d8babe5c53650082349bb9b2610f80afd0f5e551f530a3e6d662ad49dfa4609f3d6b1
6
+ metadata.gz: ef3f67bfb2bd6c3ceed9772b54644861718bcb8a1af7d1660c747020dfb3ced1cf7121ea03bc980d531f691904e8619f9bb1f790b0065d905760f27dfc800bd1
7
+ data.tar.gz: 31325c6c16d8d9fe19043fb223d66ade09774a49a466dfe2f80b64f00fbc068169bffb39f37a3f336415dcf8acb5de222a97e46d2f53064b24c373ba8504ed1d
@@ -6,6 +6,12 @@ Please notice that this change log contains changes for upcoming releases as wel
6
6
 
7
7
  ## Changes:
8
8
 
9
+ #### Change log v.0.7.11
10
+
11
+ **Fix**: (`fio`) Deletes Unix sockets once done listening. Fixes an issue where the files would remain intact.
12
+
13
+ **Optimization**: (`fio`) significant memory allocation optimizations. The facil.io allocator (included with iodine) helps to protect against heap fragmentation and improves speed for concurrent memory allocations when forking / multi-threading.
14
+
9
15
  #### Change log v.0.7.10
10
16
 
11
17
  **Fix**: (pub/sub) fixed connection lock for pub/sub tasks. Now pub/sub Ruby tasks will lock the connection, protecting the user's code against concurrent access to the connection's data.
data/README.md CHANGED
@@ -18,6 +18,7 @@ Iodine is a fast concurrent web server for real-time Ruby applications, with nat
18
18
  * Client connectivity (attach client sockets to make them evented);
19
19
  * Custom protocol authoring;
20
20
  * Optimized Logging to `stderr`.
21
+ * [Sequel](https://github.com/jeremyevans/sequel) and ActiveRecord forking protection.
21
22
  * and more!
22
23
 
23
24
  Iodine is an **evented** framework with a simple API that ports much of the [C facil.io framework](https://github.com/boazsegev/facil.io) to Ruby. This means that:
@@ -265,7 +266,7 @@ To hot-restart iodine, send the `SIGUSR1` signal to the root process.
265
266
  The following code will hot-restart iodine every 4 hours when iodine is running in cluster mode:
266
267
 
267
268
  ```ruby
268
- Iodine.run_every(2 * 60 * 60 * 1000) do
269
+ Iodine.run_every(4 * 60 * 60 * 1000) do
269
270
  Process.kill("SIGUSR1", Process.pid) unless Iodine.worker?
270
271
  end
271
272
  ```
@@ -306,7 +307,7 @@ However, it's also true that these issues go unnoticed by many developers, since
306
307
 
307
308
  With iodine, there's no need to worry.
308
309
 
309
- Iodine provides built-in `fork` handling for both ActiveRecord and Sequel, in order to protect against these possible errors.
310
+ Iodine provides built-in `fork` handling for both ActiveRecord and [Sequel](https://github.com/jeremyevans/sequel), in order to protect against these possible errors.
310
311
 
311
312
  ### TCP/IP (raw) sockets
312
313
 
@@ -77,27 +77,6 @@ Feel free to copy, use and enjoy according to the license provided.
77
77
  #define __thread _Thread_value
78
78
  #endif
79
79
 
80
- /* *****************************************************************************
81
- Patch for OSX version < 10.12 from https://stackoverflow.com/a/9781275/4025095
82
- ***************************************************************************** */
83
- #if defined(__MACH__) && !defined(CLOCK_REALTIME)
84
- #include <sys/time.h>
85
- #define CLOCK_REALTIME 0
86
- #define clock_gettime patch_clock_gettime
87
- // clock_gettime is not implemented on older versions of OS X (< 10.12).
88
- // If implemented, CLOCK_REALTIME will have already been defined.
89
- static inline int patch_clock_gettime(int clk_id, struct timespec *t) {
90
- struct timeval now;
91
- int rv = gettimeofday(&now, NULL);
92
- if (rv)
93
- return rv;
94
- t->tv_sec = now.tv_sec;
95
- t->tv_nsec = now.tv_usec * 1000;
96
- return 0;
97
- (void)clk_id;
98
- }
99
- #endif
100
-
101
80
  /* *****************************************************************************
102
81
  Event deferring (declarations)
103
82
  ***************************************************************************** */
@@ -4027,6 +4006,13 @@ static void fio_listen_cleanup_task(void *pr_) {
4027
4006
  pr->on_finish(pr->uuid, pr->udata);
4028
4007
  }
4029
4008
  fio_force_close(pr->uuid);
4009
+ if (pr->addr &&
4010
+ (!pr->port || *pr->port == 0 ||
4011
+ (pr->port[0] == '0' && pr->port[1] == 0)) &&
4012
+ fio_is_master()) {
4013
+ /* delete Unix sockets */
4014
+ unlink(pr->addr);
4015
+ }
4030
4016
  free(pr_);
4031
4017
  }
4032
4018
 
@@ -5981,6 +5967,39 @@ Section Start Marker
5981
5967
 
5982
5968
 
5983
5969
 
5970
+ ***************************************************************************** */
5971
+
5972
+ /* *****************************************************************************
5973
+ Allocator default settings
5974
+ ***************************************************************************** */
5975
+
5976
+ /* doun't change these */
5977
+ #undef FIO_MEMORY_BLOCK_SLICES
5978
+ #undef FIO_MEMORY_BLOCK_HEADER_SIZE
5979
+ #undef FIO_MEMORY_BLOCK_START_POS
5980
+ #undef FIO_MEMORY_MAX_SLICES_PER_BLOCK
5981
+ #undef FIO_MEMORY_BLOCK_MASK
5982
+
5983
+ /* The number of blocks pre-allocated each system call, 256 ==8Mb */
5984
+ #ifndef FIO_MEMORY_BLOCKS_PER_ALLOCATION
5985
+ #define FIO_MEMORY_BLOCKS_PER_ALLOCATION 256
5986
+ #endif
5987
+
5988
+ #define FIO_MEMORY_BLOCK_MASK (FIO_MEMORY_BLOCK_SIZE - 1) /* 0b0...1... */
5989
+
5990
+ #define FIO_MEMORY_BLOCK_SLICES (FIO_MEMORY_BLOCK_SIZE >> 4) /* 16B slices */
5991
+
5992
+ /* must be divisable by 16 bytes, bigger than min(sizeof(block_s), 16) */
5993
+ #define FIO_MEMORY_BLOCK_HEADER_SIZE 32
5994
+
5995
+ /* allocation counter position (start) */
5996
+ #define FIO_MEMORY_BLOCK_START_POS (FIO_MEMORY_BLOCK_HEADER_SIZE >> 4)
5997
+
5998
+ #define FIO_MEMORY_MAX_SLICES_PER_BLOCK \
5999
+ (FIO_MEMORY_BLOCK_SLICES - FIO_MEMORY_BLOCK_START_POS)
6000
+
6001
+ /* *****************************************************************************
6002
+ FIO_FORCE_MALLOC handler
5984
6003
  ***************************************************************************** */
5985
6004
 
5986
6005
  #if FIO_FORCE_MALLOC
@@ -6129,9 +6148,13 @@ static inline void *sys_alloc(size_t len, uint8_t is_indi) {
6129
6148
  }
6130
6149
  munmap((void *)((uintptr_t)result + len), FIO_MEMORY_BLOCK_SIZE - offset);
6131
6150
  }
6132
- next_alloc =
6133
- (void *)((uintptr_t)result + FIO_MEMORY_BLOCK_SIZE +
6134
- (is_indi * ((uintptr_t)1 << 30))); /* add 1TB for realloc */
6151
+ if (is_indi ==
6152
+ 0) /* advance by a block's allocation size for next allocation */
6153
+ next_alloc =
6154
+ (void *)((uintptr_t)result +
6155
+ (FIO_MEMORY_BLOCK_SIZE * (FIO_MEMORY_BLOCKS_PER_ALLOCATION)));
6156
+ else /* add 1TB for realloc */
6157
+ next_alloc = (void *)((uintptr_t)result + (is_indi * ((uintptr_t)1 << 30)));
6135
6158
  return result;
6136
6159
  }
6137
6160
 
@@ -6140,14 +6163,14 @@ static inline void sys_free(void *mem, size_t len) { munmap(mem, len); }
6140
6163
 
6141
6164
  static void *sys_realloc(void *mem, size_t prev_len, size_t new_len) {
6142
6165
  if (new_len > prev_len) {
6143
- #if defined(__linux__) && defined(MREMAP_MAYMOVE)
6144
- void *result = mremap(mem, prev_len, new_len, MREMAP_MAYMOVE);
6145
- if (result == MAP_FAILED)
6146
- return NULL;
6147
- #else
6148
- void *result =
6149
- mmap((void *)((uintptr_t)mem + prev_len), new_len - prev_len,
6150
- PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
6166
+ void *result;
6167
+ #if defined(__linux__)
6168
+ result = mremap(mem, prev_len, new_len, 0);
6169
+ if (result != MAP_FAILED)
6170
+ return result;
6171
+ #endif
6172
+ result = mmap((void *)((uintptr_t)mem + prev_len), new_len - prev_len,
6173
+ PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
6151
6174
  if (result == (void *)((uintptr_t)mem + prev_len)) {
6152
6175
  result = mem;
6153
6176
  } else {
@@ -6161,7 +6184,6 @@ static void *sys_realloc(void *mem, size_t prev_len, size_t new_len) {
6161
6184
  // memcpy(result, mem, prev_len);
6162
6185
  munmap(mem, prev_len); /* free original memory */
6163
6186
  }
6164
- #endif
6165
6187
  return result;
6166
6188
  }
6167
6189
  if (new_len + 4096 < prev_len) /* more than a single dangling page */
@@ -6180,12 +6202,21 @@ Data Types
6180
6202
  ***************************************************************************** */
6181
6203
 
6182
6204
  /* The basic block header. Starts a 32Kib memory block */
6183
- typedef struct block_s {
6184
- uint16_t ref; /* reference count (per memory page) */
6185
- uint16_t pos; /* position into the block */
6186
- uint16_t max; /* available memory count */
6187
- uint16_t pad; /* memory padding */
6188
- } block_s;
6205
+ typedef struct block_s block_s;
6206
+
6207
+ struct block_s {
6208
+ block_s *parent; /* REQUIRED, root == point to self */
6209
+ uint16_t ref; /* reference count (per memory page) */
6210
+ uint16_t pos; /* position into the block */
6211
+ uint16_t max; /* available memory count */
6212
+ uint16_t root_ref; /* root reference memory padding */
6213
+ };
6214
+
6215
+ typedef struct block_node_s block_node_s;
6216
+ struct block_node_s {
6217
+ block_s dont_touch; /* prevent block internal data from being corrupted */
6218
+ fio_ls_embd_s node; /* next block */
6219
+ };
6189
6220
 
6190
6221
  /* a per-CPU core "arena" for memory allocations */
6191
6222
  typedef struct {
@@ -6195,14 +6226,14 @@ typedef struct {
6195
6226
 
6196
6227
  /* The memory allocators persistent state */
6197
6228
  static struct {
6198
- size_t active_size; /* active array size */
6199
- block_s *available; /* free list for memory blocks */
6200
- intptr_t count; /* free list counter */
6201
- size_t cores; /* the number of detected CPU cores*/
6202
- fio_lock_i lock; /* a global lock */
6229
+ fio_ls_embd_s available; /* free list for memory blocks */
6230
+ // intptr_t count; /* free list counter */
6231
+ size_t cores; /* the number of detected CPU cores*/
6232
+ fio_lock_i lock; /* a global lock */
6203
6233
  } memory = {
6204
6234
  .cores = 1,
6205
6235
  .lock = FIO_LOCK_INIT,
6236
+ .available = FIO_LS_INIT(memory.available),
6206
6237
  };
6207
6238
 
6208
6239
  /* The per-CPU arena array. */
@@ -6211,6 +6242,31 @@ static arena_s *arenas;
6211
6242
  /* The per-CPU arena array. */
6212
6243
  static long double on_malloc_zero;
6213
6244
 
6245
+ #if DEBUG
6246
+ /* The per-CPU arena array. */
6247
+ static size_t fio_mem_block_count_max;
6248
+ /* The per-CPU arena array. */
6249
+ static size_t fio_mem_block_count;
6250
+ #define FIO_MEMORY_ON_BLOCK_ALLOC() \
6251
+ do { \
6252
+ fio_atomic_add(&fio_mem_block_count, 1); \
6253
+ if (fio_mem_block_count > fio_mem_block_count_max) \
6254
+ fio_mem_block_count_max = fio_mem_block_count; \
6255
+ } while (0)
6256
+ #define FIO_MEMORY_ON_BLOCK_FREE() \
6257
+ do { \
6258
+ fio_atomic_sub(&fio_mem_block_count, 1); \
6259
+ } while (0)
6260
+ #define FIO_MEMORY_PRINT_BLOCK_STAT() \
6261
+ FIO_LOG_INFO( \
6262
+ "(fio) Total memory blocks allocated before cleanup %zu\n" \
6263
+ " Maximum memory blocks allocated at a single time %zu\n", \
6264
+ fio_mem_block_count, fio_mem_block_count_max)
6265
+ #else
6266
+ #define FIO_MEMORY_ON_BLOCK_ALLOC()
6267
+ #define FIO_MEMORY_ON_BLOCK_FREE()
6268
+ #define FIO_MEMORY_PRINT_BLOCK_STAT()
6269
+ #endif
6214
6270
  /* *****************************************************************************
6215
6271
  Per-CPU Arena management
6216
6272
  ***************************************************************************** */
@@ -6254,24 +6310,28 @@ void fio_malloc_after_fork(void) {
6254
6310
  }
6255
6311
 
6256
6312
  /* *****************************************************************************
6257
- Block management
6313
+ Block management / allocation
6258
6314
  ***************************************************************************** */
6259
6315
 
6260
- // static inline block_s **block_find(void *mem_) {
6261
- // const uintptr_t mem = (uintptr_t)mem_;
6262
- // block_s *blk = memory.active;
6263
- // }
6264
-
6265
- /* intializes the block header for an available block of memory. */
6266
- static inline block_s *block_init(void *blk_) {
6267
- block_s *blk = blk_;
6316
+ static inline void block_init_root(block_s *blk, block_s *parent) {
6268
6317
  *blk = (block_s){
6318
+ .parent = parent,
6269
6319
  .ref = 1,
6270
- .pos = (2 + (sizeof(block_s) >> 4)),
6271
- .max = (FIO_MEMORY_BLOCK_SLICES - 1) -
6272
- (sizeof(block_s) >> 4), /* count available units of 16 bytes */
6320
+ .pos = FIO_MEMORY_BLOCK_START_POS,
6321
+ .root_ref = 1,
6273
6322
  };
6274
- return blk;
6323
+ }
6324
+
6325
+ /* intializes the block header for an available block of memory. */
6326
+ static inline void block_init(block_s *blk) {
6327
+ /* initialization shouldn't effect `parent` or `root_ref`*/
6328
+ blk->ref = 1;
6329
+ blk->pos = FIO_MEMORY_BLOCK_START_POS;
6330
+ /* zero out linked list memory (everything else is already zero) */
6331
+ ((block_node_s *)blk)->node.next = NULL;
6332
+ ((block_node_s *)blk)->node.prev = NULL;
6333
+ /* bump parent reference count */
6334
+ fio_atomic_add(&blk->parent->root_ref, 1);
6275
6335
  }
6276
6336
 
6277
6337
  /* intializes the block header for an available block of memory. */
@@ -6279,46 +6339,63 @@ static inline void block_free(block_s *blk) {
6279
6339
  if (fio_atomic_sub(&blk->ref, 1))
6280
6340
  return;
6281
6341
 
6282
- if (fio_atomic_add(&memory.count, 1) >
6283
- (intptr_t)(FIO_MEM_MAX_BLOCKS_PER_CORE * memory.cores)) {
6284
- /* return memory to the system */
6285
- fio_atomic_sub(&memory.count, 1);
6286
- sys_free(blk, FIO_MEMORY_BLOCK_SIZE);
6342
+ memset(blk + 1, 0, (FIO_MEMORY_BLOCK_SIZE - sizeof(*blk)));
6343
+ fio_lock(&memory.lock);
6344
+ fio_ls_embd_push(&memory.available, &((block_node_s *)blk)->node);
6345
+
6346
+ blk = blk->parent;
6347
+
6348
+ if (fio_atomic_sub(&blk->root_ref, 1)) {
6349
+ fio_unlock(&memory.lock);
6287
6350
  return;
6288
6351
  }
6289
- memset(blk, 0, FIO_MEMORY_BLOCK_SIZE);
6290
- fio_lock(&memory.lock);
6291
- ((block_s **)blk)[0] = memory.available;
6292
- memory.available = (block_s *)blk;
6352
+ // fio_unlock(&memory.lock);
6353
+ // return;
6354
+
6355
+ /* remove all of the root block's children (slices) from the memory pool */
6356
+ for (size_t i = 0; i < FIO_MEMORY_BLOCKS_PER_ALLOCATION; ++i) {
6357
+ block_node_s *pos =
6358
+ (block_node_s *)((uintptr_t)blk + (i * FIO_MEMORY_BLOCK_SIZE));
6359
+ fio_ls_embd_remove(&pos->node);
6360
+ }
6361
+
6293
6362
  fio_unlock(&memory.lock);
6363
+ sys_free(blk, FIO_MEMORY_BLOCK_SIZE * FIO_MEMORY_BLOCKS_PER_ALLOCATION);
6364
+ FIO_LOG_DEBUG("memory allocator returned %p to the system", (void *)blk);
6365
+ FIO_MEMORY_ON_BLOCK_FREE();
6294
6366
  }
6295
6367
 
6296
6368
  /* intializes the block header for an available block of memory. */
6297
6369
  static inline block_s *block_new(void) {
6298
6370
  block_s *blk = NULL;
6299
6371
 
6300
- if (memory.available) {
6301
- fio_lock(&memory.lock);
6302
- blk = memory.available;
6303
- if (blk) {
6304
- memory.available = ((block_s **)blk)[0];
6305
- }
6306
- fio_unlock(&memory.lock);
6307
- }
6372
+ fio_lock(&memory.lock);
6373
+ blk = (block_s *)fio_ls_embd_pop(&memory.available);
6308
6374
  if (blk) {
6375
+ blk = (block_s *)FIO_LS_EMBD_OBJ(block_node_s, node, blk);
6309
6376
  FIO_ASSERT(((uintptr_t)blk & FIO_MEMORY_BLOCK_MASK) == 0,
6310
6377
  "Memory allocator error! double `fio_free`?\n");
6311
- fio_atomic_sub(&memory.count, 1);
6312
- ((block_s **)blk)[0] = NULL;
6313
- ((block_s **)blk)[1] = NULL;
6314
- return block_init(blk);
6378
+ block_init(blk); /* must be performed within lock */
6379
+ fio_unlock(&memory.lock);
6380
+ return blk;
6315
6381
  }
6316
6382
  /* collect memory from the system */
6317
- blk = sys_alloc(FIO_MEMORY_BLOCK_SIZE, 0);
6383
+ blk = sys_alloc(FIO_MEMORY_BLOCK_SIZE * FIO_MEMORY_BLOCKS_PER_ALLOCATION, 0);
6318
6384
  if (!blk)
6319
6385
  return NULL;
6320
- return block_init(blk);
6321
- ;
6386
+ FIO_LOG_DEBUG("memory allocator allocated %p from the system", (void *)blk);
6387
+ FIO_MEMORY_ON_BLOCK_ALLOC();
6388
+ block_init_root(blk, blk);
6389
+ /* the extra memory goes into the memory pool. initialize + linke-list. */
6390
+ block_node_s *tmp = (block_node_s *)blk;
6391
+ for (int i = 1; i < FIO_MEMORY_BLOCKS_PER_ALLOCATION; ++i) {
6392
+ tmp = (block_node_s *)((uintptr_t)tmp + FIO_MEMORY_BLOCK_SIZE);
6393
+ block_init_root((block_s *)tmp, blk);
6394
+ fio_ls_embd_push(&memory.available, &tmp->node);
6395
+ }
6396
+ fio_unlock(&memory.lock);
6397
+ /* return the root block (which isn't in the memory pool). */
6398
+ return blk;
6322
6399
  }
6323
6400
 
6324
6401
  /* allocates memory from within a block - called within an arena's lock */
@@ -6328,7 +6405,7 @@ static inline void *block_slice(uint16_t units) {
6328
6405
  /* arena is empty */
6329
6406
  blk = block_new();
6330
6407
  arena_last_used->block = blk;
6331
- } else if (blk->pos + units > blk->max) {
6408
+ } else if (blk->pos + units > FIO_MEMORY_MAX_SLICES_PER_BLOCK) {
6332
6409
  /* not enough memory in the block - rotate */
6333
6410
  block_free(blk);
6334
6411
  blk = block_new();
@@ -6343,7 +6420,7 @@ static inline void *block_slice(uint16_t units) {
6343
6420
  const void *mem = (void *)((uintptr_t)blk + ((uintptr_t)blk->pos << 4));
6344
6421
  fio_atomic_add(&blk->ref, 1);
6345
6422
  blk->pos += units;
6346
- if (blk->pos >= blk->max) {
6423
+ if (blk->pos >= FIO_MEMORY_MAX_SLICES_PER_BLOCK) {
6347
6424
  /* ... the block was fully utilized, clear arena */
6348
6425
  block_free(blk);
6349
6426
  arena_last_used->block = NULL;
@@ -6397,6 +6474,34 @@ error:
6397
6474
  Allocator Initialization (initialize arenas and allocate a block for each CPU)
6398
6475
  ***************************************************************************** */
6399
6476
 
6477
+ #if DEBUG
6478
+ void fio_memory_dump_missing(void) {
6479
+ fprintf(stderr, "\n ==== Attempting Memory Dump (will crash) ====\n");
6480
+ if (fio_ls_embd_any(&memory.available))
6481
+ return;
6482
+ block_node_s *smallest =
6483
+ FIO_LS_EMBD_OBJ(block_node_s, node, memory.available.next);
6484
+ FIO_LS_EMBD_FOR(&memory.available, node) {
6485
+ block_node_s *tmp = FIO_LS_EMBD_OBJ(block_node_s, node, node);
6486
+ if (smallest > tmp)
6487
+ smallest = tmp;
6488
+ }
6489
+
6490
+ for (size_t i = 0;
6491
+ i < FIO_MEMORY_BLOCK_SIZE * FIO_MEMORY_BLOCKS_PER_ALLOCATION; ++i) {
6492
+ if ((((uintptr_t)smallest + i) & FIO_MEMORY_BLOCK_MASK) == 0) {
6493
+ i += 32;
6494
+ fprintf(stderr, "---block jump---\n");
6495
+ continue;
6496
+ }
6497
+ if (((char *)smallest)[i])
6498
+ fprintf(stderr, "%c", ((char *)smallest)[i]);
6499
+ }
6500
+ }
6501
+ #else
6502
+ #define fio_memory_dump_missing()
6503
+ #endif
6504
+
6400
6505
  static void fio_mem_init(void) {
6401
6506
  if (arenas)
6402
6507
  return;
@@ -6410,17 +6515,9 @@ static void fio_mem_init(void) {
6410
6515
  if (cpu_count <= 0)
6411
6516
  cpu_count = 8;
6412
6517
  memory.cores = cpu_count;
6413
- memory.count = 0 - cpu_count;
6414
6518
  arenas = big_alloc(sizeof(*arenas) * cpu_count);
6415
6519
  FIO_ASSERT_ALLOC(arenas);
6416
- size_t pre_pool = cpu_count > 32 ? 32 : cpu_count;
6417
- for (size_t i = 0; i < pre_pool; ++i) {
6418
- void *block = sys_alloc(FIO_MEMORY_BLOCK_SIZE, 0);
6419
- if (block) {
6420
- block_init(block);
6421
- block_free(block);
6422
- }
6423
- }
6520
+ block_free(block_new());
6424
6521
  pthread_atfork(NULL, NULL, fio_malloc_after_fork);
6425
6522
  }
6426
6523
 
@@ -6428,20 +6525,25 @@ static void fio_mem_destroy(void) {
6428
6525
  if (!arenas)
6429
6526
  return;
6430
6527
 
6528
+ FIO_MEMORY_PRINT_BLOCK_STAT();
6529
+
6431
6530
  for (size_t i = 0; i < memory.cores; ++i) {
6432
6531
  if (arenas[i].block)
6433
6532
  block_free(arenas[i].block);
6434
6533
  arenas[i].block = NULL;
6435
6534
  }
6436
- while (memory.available) {
6437
- block_s *b = memory.available;
6438
- memory.available = ((block_s **)b)[0];
6439
- sys_free(b, FIO_MEMORY_BLOCK_SIZE);
6535
+ if (fio_ls_embd_any(&memory.available)) {
6536
+ FIO_LOG_WARNING("facil.io detected memory traces remaining after cleanup"
6537
+ " - memory leak?");
6538
+ FIO_MEMORY_PRINT_BLOCK_STAT();
6539
+ size_t count = 0;
6540
+ FIO_LS_EMBD_FOR(&memory.available, node) { ++count; }
6541
+ FIO_LOG_DEBUG("Memory pool size: %zu (%zu blocks per allocation).", count,
6542
+ (size_t)FIO_MEMORY_BLOCKS_PER_ALLOCATION);
6440
6543
  }
6441
6544
  big_free(arenas);
6442
6545
  arenas = NULL;
6443
6546
  }
6444
-
6445
6547
  /* *****************************************************************************
6446
6548
  Memory allocation / deacclocation API
6447
6549
  ***************************************************************************** */
@@ -6491,8 +6593,9 @@ void fio_free(void *ptr) {
6491
6593
  * This variation is slightly faster as it might copy less data
6492
6594
  */
6493
6595
  void *fio_realloc2(void *ptr, size_t new_size, size_t copy_length) {
6494
- if (!ptr || ptr == (void *)&on_malloc_zero)
6596
+ if (!ptr || ptr == (void *)&on_malloc_zero) {
6495
6597
  return fio_malloc(new_size);
6598
+ }
6496
6599
  if (!new_size) {
6497
6600
  goto zero_size;
6498
6601
  }
@@ -6514,7 +6617,7 @@ void *fio_realloc2(void *ptr, size_t new_size, size_t copy_length) {
6514
6617
  return new_mem;
6515
6618
  zero_size:
6516
6619
  fio_free(ptr);
6517
- return malloc(0);
6620
+ return fio_malloc(0);
6518
6621
  }
6519
6622
 
6520
6623
  void *fio_realloc(void *ptr, size_t new_size) {
@@ -8314,13 +8417,13 @@ void fio_malloc_test(void) {
8314
8417
  "block.\n",
8315
8418
  count,
8316
8419
  (size_t)((FIO_MEMORY_BLOCK_SLICES - 2) - (sizeof(block_s) >> 4) - 1));
8317
- intptr_t old_memory_pool_count = memory.count;
8420
+ fio_ls_embd_s old_memory_list = memory.available;
8318
8421
  fio_free(mem);
8319
- FIO_ASSERT(memory.available,
8422
+ FIO_ASSERT(fio_ls_embd_any(&memory.available),
8320
8423
  "memory pool empty (memory block wasn't freed)!\n");
8321
- FIO_ASSERT(old_memory_pool_count + 1 == memory.count,
8322
- "memory.count == %ld , was %ld (memory block counting error)!\n",
8323
- (long)memory.count, (long)old_memory_pool_count);
8424
+ FIO_ASSERT(old_memory_list.next != memory.available.next ||
8425
+ memory.available.prev != old_memory_list.prev,
8426
+ "memory pool not updated after block being freed!\n");
8324
8427
  }
8325
8428
  /* rotate block again */
8326
8429
  b = arena_last_used->block;
@@ -9990,7 +10093,4 @@ void fio_test(void) {
9990
10093
  (void)fio_poll;
9991
10094
  }
9992
10095
 
9993
- #undef FIO_ASSERT
9994
- #else
9995
- #define fio_test()
9996
- #endif
10096
+ #endif /* DEBUG */
@@ -227,6 +227,27 @@ Version and helper macros
227
227
  #include <sys/socket.h>
228
228
  #endif
229
229
 
230
+ /* *****************************************************************************
231
+ Patch for OSX version < 10.12 from https://stackoverflow.com/a/9781275/4025095
232
+ ***************************************************************************** */
233
+ #if defined(__MACH__) && !defined(CLOCK_REALTIME)
234
+ #include <sys/time.h>
235
+ #define CLOCK_REALTIME 0
236
+ #define clock_gettime patch_clock_gettime
237
+ // clock_gettime is not implemented on older versions of OS X (< 10.12).
238
+ // If implemented, CLOCK_REALTIME will have already been defined.
239
+ static inline int patch_clock_gettime(int clk_id, struct timespec *t) {
240
+ struct timeval now;
241
+ int rv = gettimeofday(&now, NULL);
242
+ if (rv)
243
+ return rv;
244
+ t->tv_sec = now.tv_sec;
245
+ t->tv_nsec = now.tv_usec * 1000;
246
+ return 0;
247
+ (void)clk_id;
248
+ }
249
+ #endif
250
+
230
251
  /* *****************************************************************************
231
252
  C++ extern start
232
253
  ***************************************************************************** */
@@ -392,9 +413,9 @@ extern int FIO_LOG_LEVEL;
392
413
  #define FIO_LOG_PRINT(level, ...) \
393
414
  do { \
394
415
  if (level <= FIO_LOG_LEVEL) { \
395
- char tmp___log[512]; \
396
- int len___log = snprintf(tmp___log, 500, __VA_ARGS__); \
397
- if (len___log <= 0 || len___log > 500) { \
416
+ char tmp___log[1024]; \
417
+ int len___log = snprintf(tmp___log, 1000, __VA_ARGS__); \
418
+ if (len___log <= 0 || len___log > 1000) { \
398
419
  fwrite("ERROR: log line output too long (can't write).", 46, 1, \
399
420
  stderr); \
400
421
  break; \
@@ -2558,36 +2579,30 @@ C++ extern end
2558
2579
  * When using tcmalloc or jemalloc, it's possible to define `FIO_FORCE_MALLOC`
2559
2580
  * to prevent the facil.io allocator from compiling (`-DFIO_FORCE_MALLOC`).
2560
2581
  */
2561
- #define H_FIO_MEM_H /* prevent fiobj conflicts */
2562
2582
 
2563
- /** Allocator default settings. */
2564
-
2565
- /** The logarithmic value for a memory block, 15 == 32Kb, 16 == 64Kb, etc' */
2566
2583
  #ifndef FIO_MEMORY_BLOCK_SIZE_LOG
2584
+ /**
2585
+ * The logarithmic value for a memory block, 15 == 32Kb, 16 == 64Kb, etc'
2586
+ *
2587
+ * By default, a block of memory is 32Kb silce from an 8Mb allocation.
2588
+ *
2589
+ * A value of 16 will make this a 64Kb silce from a 16Mb allocation.
2590
+ */
2567
2591
  #define FIO_MEMORY_BLOCK_SIZE_LOG (15)
2568
2592
  #endif
2569
2593
 
2570
- /* dounb't change these - they are derived from FIO_MEMORY_BLOCK_SIZE_LOG */
2571
2594
  #undef FIO_MEMORY_BLOCK_SIZE
2572
- #undef FIO_MEMORY_BLOCK_MASK
2573
- #undef FIO_MEMORY_BLOCK_SLICES
2595
+ /** The resulting memoru block size, depends on `FIO_MEMORY_BLOCK_SIZE_LOG` */
2574
2596
  #define FIO_MEMORY_BLOCK_SIZE ((uintptr_t)1 << FIO_MEMORY_BLOCK_SIZE_LOG)
2575
- #define FIO_MEMORY_BLOCK_MASK (FIO_MEMORY_BLOCK_SIZE - 1) /* 0b0...1... */
2576
- #define FIO_MEMORY_BLOCK_SLICES (FIO_MEMORY_BLOCK_SIZE >> 4) /* 16B slices */
2577
-
2578
- #ifndef FIO_MEMORY_BLOCK_ALLOC_LIMIT
2579
- /* defaults to 37.5% of the block, after which `mmap` is used instead */
2580
- #define FIO_MEMORY_BLOCK_ALLOC_LIMIT \
2581
- ((FIO_MEMORY_BLOCK_SIZE >> 2) + (FIO_MEMORY_BLOCK_SIZE >> 3))
2582
- #endif
2583
2597
 
2584
- #ifndef FIO_MEM_MAX_BLOCKS_PER_CORE
2585
2598
  /**
2586
- * The maximum number of available memory blocks that will be pooled before
2587
- * memory is returned to the system.
2599
+ * The maximum allocation size, after which `mmap` will be called instead of the
2600
+ * facil.io allocator.
2601
+ *
2602
+ * Defaults to 50% of the block (16Kb), after which `mmap` is used instead
2588
2603
  */
2589
- #define FIO_MEM_MAX_BLOCKS_PER_CORE \
2590
- (1 << (22 - FIO_MEMORY_BLOCK_SIZE_LOG)) /* 22 == 4Mb per CPU core (1<<22) */
2604
+ #ifndef FIO_MEMORY_BLOCK_ALLOC_LIMIT
2605
+ #define FIO_MEMORY_BLOCK_ALLOC_LIMIT (FIO_MEMORY_BLOCK_SIZE >> 1)
2591
2606
  #endif
2592
2607
 
2593
2608
  /* *****************************************************************************
@@ -3565,48 +3580,51 @@ String Implementation - Memory management
3565
3580
  * since it uses 16 byte alignment right up until allocations are routed
3566
3581
  * directly to `mmap` (due to their size, usually over 12KB).
3567
3582
  */
3568
- #define ROUND_UP_CAPA2WORDS(num) \
3569
- ((((num) + 1) & (sizeof(long double) - 1)) \
3570
- ? (((num) + 1) | (sizeof(long double) - 1)) \
3571
- : (num))
3583
+ #define ROUND_UP_CAPA2WORDS(num) (((num) + 1) | (sizeof(long double) - 1))
3584
+ // Smaller might be:
3585
+ // ((((num) + 1) & (sizeof(long double) - 1))
3586
+ // ? (((num) + 1) | (sizeof(long double) - 1))
3587
+ // : (num))
3588
+
3572
3589
  /**
3573
3590
  * Requires the String to have at least `needed` capacity. Returns the current
3574
3591
  * state of the String.
3575
3592
  */
3576
3593
  FIO_FUNC fio_str_info_s fio_str_capa_assert(fio_str_s *s, size_t needed) {
3577
- if (!s)
3578
- return (fio_str_info_s){.capa = 0};
3594
+ if (!s || s->frozen) {
3595
+ return fio_str_info(s);
3596
+ }
3579
3597
  char *tmp;
3580
3598
  if (s->small || !s->data) {
3599
+ if (needed < FIO_STR_SMALL_CAPA) {
3600
+ return (fio_str_info_s){.capa = (FIO_STR_SMALL_CAPA - 1),
3601
+ .len = (size_t)(s->small >> 1),
3602
+ .data = FIO_STR_SMALL_DATA(s)};
3603
+ }
3581
3604
  goto is_small;
3582
3605
  }
3583
- if (needed > s->capa) {
3584
- needed = ROUND_UP_CAPA2WORDS(needed);
3585
- if (s->dealloc == FIO_FREE) {
3586
- tmp = (char *)FIO_REALLOC(s->data, needed + 1, s->len);
3587
- FIO_ASSERT_ALLOC(tmp);
3588
- } else {
3589
- tmp = (char *)FIO_MALLOC(needed + 1);
3590
- FIO_ASSERT_ALLOC(tmp);
3591
- memcpy(tmp, s->data, s->len);
3592
- if (s->dealloc)
3593
- s->dealloc(s->data);
3594
- s->dealloc = FIO_FREE;
3595
- }
3596
- s->capa = needed;
3597
- s->data = tmp;
3598
- s->data[needed] = 0;
3606
+ if (needed < s->capa) {
3607
+ return (fio_str_info_s){.capa = s->capa, .len = s->len, .data = s->data};
3608
+ }
3609
+ needed = ROUND_UP_CAPA2WORDS(needed);
3610
+ if (s->dealloc == FIO_FREE) {
3611
+ tmp = (char *)FIO_REALLOC(s->data, needed + 1, s->len + 1);
3612
+ FIO_ASSERT_ALLOC(tmp);
3613
+ } else {
3614
+ tmp = (char *)FIO_MALLOC(needed + 1);
3615
+ FIO_ASSERT_ALLOC(tmp);
3616
+ memcpy(tmp, s->data, s->len + 1);
3617
+ if (s->dealloc)
3618
+ s->dealloc(s->data);
3619
+ s->dealloc = FIO_FREE;
3599
3620
  }
3600
- return (fio_str_info_s){
3601
- .capa = (s->frozen ? 0 : s->capa), .len = s->len, .data = s->data};
3621
+ s->capa = needed;
3622
+ s->data = tmp;
3623
+ s->data[needed] = 0;
3624
+ return (fio_str_info_s){.capa = s->capa, .len = s->len, .data = s->data};
3602
3625
 
3603
3626
  is_small:
3604
3627
  /* small string (string data is within the container) */
3605
- if (needed < FIO_STR_SMALL_CAPA) {
3606
- return (fio_str_info_s){.capa = (s->frozen ? 0 : (FIO_STR_SMALL_CAPA - 1)),
3607
- .len = (size_t)(s->small >> 1),
3608
- .data = FIO_STR_SMALL_DATA(s)};
3609
- }
3610
3628
  needed = ROUND_UP_CAPA2WORDS(needed);
3611
3629
  tmp = (char *)FIO_MALLOC(needed + 1);
3612
3630
  FIO_ASSERT_ALLOC(tmp);
@@ -3634,8 +3652,7 @@ is_small:
3634
3652
  .data = tmp,
3635
3653
  };
3636
3654
  #endif
3637
- return (fio_str_info_s){
3638
- .capa = (s->frozen ? 0 : needed), .len = existing_len, .data = s->data};
3655
+ return (fio_str_info_s){.capa = needed, .len = existing_len, .data = s->data};
3639
3656
  }
3640
3657
 
3641
3658
  /** Performs a best attempt at minimizing memory consumption. */
@@ -287,7 +287,7 @@ static inline int swallow_ch(uint8_t **buffer, register uint8_t *const limit,
287
287
  break;
288
288
  }
289
289
  }
290
- #if !defined(__x86_64__)
290
+ #if !ALLOW_UNALIGNED_MEMORY_ACCESS || !defined(__x86_64__)
291
291
  finish:
292
292
  #endif
293
293
  while (*buffer < limit) {
@@ -18,27 +18,6 @@ Feel free to copy, use and enjoy according to the license provided.
18
18
  #include <sys/types.h>
19
19
  #include <unistd.h>
20
20
 
21
- /* *****************************************************************************
22
- Patch for OSX version < 10.12 from https://stackoverflow.com/a/9781275/4025095
23
- ***************************************************************************** */
24
- #if defined(__MACH__) && !defined(CLOCK_REALTIME)
25
- #include <sys/time.h>
26
- #define CLOCK_REALTIME 0
27
- #define clock_gettime patch_clock_gettime
28
- // clock_gettime is not implemented on older versions of OS X (< 10.12).
29
- // If implemented, CLOCK_REALTIME will have already been defined.
30
- static inline int patch_clock_gettime(int clk_id, struct timespec *t) {
31
- struct timeval now;
32
- int rv = gettimeofday(&now, NULL);
33
- if (rv)
34
- return rv;
35
- t->tv_sec = now.tv_sec;
36
- t->tv_nsec = now.tv_usec * 1000;
37
- return 0;
38
- (void)clk_id;
39
- }
40
- #endif
41
-
42
21
  /* *****************************************************************************
43
22
  SSL/TLS patch
44
23
  ***************************************************************************** */
@@ -2065,7 +2044,7 @@ void http_write_log(http_s *h) {
2065
2044
 
2066
2045
  struct timespec start, end;
2067
2046
  clock_gettime(CLOCK_REALTIME, &end);
2068
- start = fio_last_tick();
2047
+ start = h->received_at;
2069
2048
 
2070
2049
  {
2071
2050
  // TODO Guess IP address from headers (forwarded) where possible
@@ -2097,9 +2076,9 @@ void http_write_log(http_s *h) {
2097
2076
  fiobj_str_join(l, h->version);
2098
2077
  fiobj_str_write(l, "\" ", 2);
2099
2078
  if (bytes_sent > 0) {
2100
- fiobj_str_join(l, fiobj_num_tmp(h->status));
2079
+ fiobj_str_write_i(l, h->status);
2101
2080
  fiobj_str_write(l, " ", 1);
2102
- fiobj_str_join(l, fiobj_num_tmp(bytes_sent));
2081
+ fiobj_str_write_i(l, bytes_sent);
2103
2082
  fiobj_str_write(l, "b ", 2);
2104
2083
  } else {
2105
2084
  fiobj_str_join(l, fiobj_num_tmp(h->status));
@@ -2108,7 +2087,7 @@ void http_write_log(http_s *h) {
2108
2087
 
2109
2088
  bytes_sent = ((end.tv_sec - start.tv_sec) * 1000) +
2110
2089
  ((end.tv_nsec - start.tv_nsec) / 1000000);
2111
- fiobj_str_join(l, fiobj_num_tmp(bytes_sent));
2090
+ fiobj_str_write_i(l, bytes_sent);
2112
2091
  fiobj_str_write(l, "ms\r\n", 4);
2113
2092
 
2114
2093
  buff = fiobj_obj2cstr(l);
@@ -2693,12 +2672,9 @@ parse_path:
2693
2672
  Lookup Tables / functions
2694
2673
  ***************************************************************************** */
2695
2674
 
2696
- static FIOBJ tmp_cpy_obj(FIOBJ o) { return fiobj_dup(o); }
2697
-
2698
2675
  #define FIO_SET_NAME fio_mime_set
2699
2676
  #define FIO_SET_OBJ_TYPE FIOBJ
2700
2677
  #define FIO_SET_OBJ_COMPARE(o1, o2) (1)
2701
- #define FIO_SET_OBJ_COPY(dest, o) (dest) = tmp_cpy_obj((o))
2702
2678
  #define FIO_SET_OBJ_DESTROY(o) fiobj_free((o))
2703
2679
 
2704
2680
  #include <fio.h>
@@ -42,6 +42,22 @@ Compile Time Settings
42
42
  #define HTTP_MAX_HEADER_LENGTH 8192
43
43
  #endif
44
44
 
45
+ #ifndef FIO_HTTP_EXACT_LOGGING
46
+ /**
47
+ * By default, facil.io logs the HTTP request cycle using a fuzzy starting point
48
+ * (a close enough timestamp).
49
+ *
50
+ * The fuzzy timestamp includes delays that aren't related to the HTTP request,
51
+ * sometimes including time that was spent waiting on the client. On the other
52
+ * hand, `FIO_HTTP_EXACT_LOGGING` excludes time that the client might have been
53
+ * waiting for facil.io to read data from the network.
54
+ *
55
+ * Due to the preference to err on the side of causion, fuzzy time-stamping is
56
+ * the default.
57
+ */
58
+ #define FIO_HTTP_EXACT_LOGGING 0
59
+ #endif
60
+
45
61
  /** the `http_listen settings, see details in the struct definition. */
46
62
  typedef struct http_settings_s http_settings_s;
47
63
 
@@ -85,8 +85,9 @@ static int write_header(FIOBJ o, void *w_) {
85
85
  fio_str_info_s str = fiobj_obj2cstr(o);
86
86
  if (!str.data)
87
87
  return 0;
88
- fiobj_str_capa_assert(w->dest,
89
- fiobj_obj2cstr(w->dest).len + name.len + str.len + 5);
88
+ // fiobj_str_capa_assert(w->dest,
89
+ // fiobj_obj2cstr(w->dest).len + name.len + str.len +
90
+ // 5);
90
91
  fiobj_str_write(w->dest, name.data, name.len);
91
92
  fiobj_str_write(w->dest, ":", 1);
92
93
  fiobj_str_write(w->dest, str.data, str.len);
@@ -105,7 +106,7 @@ static FIOBJ headers2str(http_s *h, uintptr_t padding) {
105
106
  struct header_writer_s w;
106
107
  {
107
108
  const uintptr_t header_length_guess =
108
- fiobj_hash_count(h->private_data.out_headers) * 48;
109
+ fiobj_hash_count(h->private_data.out_headers) * 64;
109
110
  w.dest = fiobj_str_buf(header_length_guess + padding);
110
111
  }
111
112
  http1pr_s *p = handle2pr(h);
@@ -602,6 +603,13 @@ static int http1_on_http_version(http1_parser_s *parser, char *version,
602
603
  size_t len) {
603
604
  http1_pr2handle(parser2http(parser)).version = fiobj_str_new(version, len);
604
605
  parser2http(parser)->header_size += len;
606
+ /* start counting - occurs on the first line of both requests and responses */
607
+ #if FIO_HTTP_EXACT_LOGGING
608
+ clock_gettime(CLOCK_REALTIME,
609
+ &http1_pr2handle(parser2http(parser)).received_at);
610
+ #else
611
+ http1_pr2handle(parser2http(parser)).received_at = fio_last_tick();
612
+ #endif
605
613
  return 0;
606
614
  }
607
615
  /** called when a header is parsed. */
@@ -29,8 +29,8 @@ Seeking for characters in a string
29
29
  *
30
30
  * On newer systems, `memchr` should be faster.
31
31
  */
32
- static inline int seek2ch(uint8_t **buffer, register uint8_t *const limit,
33
- const uint8_t c) {
32
+ static int seek2ch(uint8_t **buffer, register uint8_t *const limit,
33
+ const uint8_t c) {
34
34
  if (**buffer == c) {
35
35
  #if HTTP1_PARSER_CONVERT_EOL2NUL
36
36
  **buffer = 0;
@@ -72,7 +72,7 @@ static inline int seek2ch(uint8_t **buffer, register uint8_t *const limit,
72
72
  break;
73
73
  }
74
74
  }
75
- #if !defined(__x86_64__)
75
+ #if !ALLOW_UNALIGNED_MEMORY_ACCESS || !defined(__x86_64__)
76
76
  finish:
77
77
  #endif
78
78
  while (*buffer < limit) {
@@ -34,6 +34,11 @@ to maintain and that could be used for an HTTP/1.x client as well.
34
34
  #define HTTP1_PARSER_CONVERT_EOL2NUL 0
35
35
  #endif
36
36
 
37
+ #ifndef FIO_MEMCHAR
38
+ /** Prefer a custom memchr implementation. Usualy memchr is better. */
39
+ #define FIO_MEMCHAR 0
40
+ #endif
41
+
37
42
  #if HTTP_HEADERS_LOWERCASE
38
43
 
39
44
  #define HEADER_NAME_IS_EQ(var_name, const_name, len) \
@@ -132,11 +132,11 @@ FIOBJ HTTP_HVALUE_GZIP;
132
132
  FIOBJ HTTP_HVALUE_KEEP_ALIVE;
133
133
  FIOBJ HTTP_HVALUE_MAX_AGE;
134
134
  FIOBJ HTTP_HVALUE_NO_CACHE;
135
+ FIOBJ HTTP_HVALUE_SSE_MIME;
135
136
  FIOBJ HTTP_HVALUE_WEBSOCKET;
136
137
  FIOBJ HTTP_HVALUE_WS_SEC_VERSION;
137
138
  FIOBJ HTTP_HVALUE_WS_UPGRADE;
138
139
  FIOBJ HTTP_HVALUE_WS_VERSION;
139
- FIOBJ HTTP_HVALUE_SSE_MIME;
140
140
 
141
141
  static void http_lib_init(void *ignr_);
142
142
  static void http_lib_cleanup(void *ignr_);
@@ -163,7 +163,6 @@ static void http_lib_cleanup(void *ignr_) {
163
163
  HTTPLIB_RESET(HTTP_HEADER_DATE);
164
164
  HTTPLIB_RESET(HTTP_HEADER_ETAG);
165
165
  HTTPLIB_RESET(HTTP_HEADER_HOST);
166
- HTTPLIB_RESET(HTTP_HVALUE_SSE_MIME);
167
166
  HTTPLIB_RESET(HTTP_HEADER_LAST_MODIFIED);
168
167
  HTTPLIB_RESET(HTTP_HEADER_ORIGIN);
169
168
  HTTPLIB_RESET(HTTP_HEADER_SET_COOKIE);
@@ -177,6 +176,7 @@ static void http_lib_cleanup(void *ignr_) {
177
176
  HTTPLIB_RESET(HTTP_HVALUE_KEEP_ALIVE);
178
177
  HTTPLIB_RESET(HTTP_HVALUE_MAX_AGE);
179
178
  HTTPLIB_RESET(HTTP_HVALUE_NO_CACHE);
179
+ HTTPLIB_RESET(HTTP_HVALUE_SSE_MIME);
180
180
  HTTPLIB_RESET(HTTP_HVALUE_WEBSOCKET);
181
181
  HTTPLIB_RESET(HTTP_HVALUE_WS_SEC_VERSION);
182
182
  HTTPLIB_RESET(HTTP_HVALUE_WS_UPGRADE);
@@ -350,7 +350,7 @@ static VALUE iodine_cli_parse(VALUE self, VALUE desc) {
350
350
  "-redis -r an optional Redis URL server address. Default: none.",
351
351
  "-redis-ping -rp websocket ping interval (0..255). Default: 5 minutes",
352
352
  FIO_CLI_TYPE_INT, "\n\x1B[4mMisc:\x1B[0m", FIO_CLI_TYPE_PRINT,
353
- "-warmup warm up the application. CAREFUL! iodine might fork.",
353
+ "-warmup --preload warm up the application. CAREFUL! with workers.",
354
354
  FIO_CLI_TYPE_BOOL,
355
355
  "-verbosity -V 0..5 server verbosity level. Default: 4",
356
356
  FIO_CLI_TYPE_INT);
@@ -360,6 +360,7 @@ static VALUE iodine_cli_parse(VALUE self, VALUE desc) {
360
360
  if (level > 0 && level < 100)
361
361
  FIO_LOG_LEVEL = level;
362
362
  }
363
+
363
364
  if (fio_cli_get("-w")) {
364
365
  iodine_workers_set(IodineModule, INT2NUM(fio_cli_get_i("-w")));
365
366
  }
@@ -372,14 +373,25 @@ static VALUE iodine_cli_parse(VALUE self, VALUE desc) {
372
373
  if (fio_cli_get_bool("-warmup")) {
373
374
  rb_hash_aset(defaults, ID2SYM(rb_intern("warmup_")), Qtrue);
374
375
  }
375
- if (fio_cli_get("-p")) {
376
- rb_hash_aset(defaults, ID2SYM(rb_intern("port")),
377
- rb_str_new_cstr(fio_cli_get("-p")));
378
- }
379
376
  if (fio_cli_get("-b")) {
377
+ if (fio_cli_get("-b")[0] == '/' ||
378
+ (fio_cli_get("-b")[0] == '.' && fio_cli_get("-b")[1] == '/')) {
379
+ if (fio_cli_get("-p") &&
380
+ (fio_cli_get("-p")[0] != '0' || fio_cli_get("-p")[1])) {
381
+ FIO_LOG_WARNING(
382
+ "Detected a Unix socket binding (-b) conflicting with port.\n"
383
+ " Port settings (-p %s) are ignored",
384
+ fio_cli_get("-p"));
385
+ }
386
+ fio_cli_set("-p", "0");
387
+ }
380
388
  rb_hash_aset(defaults, ID2SYM(rb_intern("address")),
381
389
  rb_str_new_cstr(fio_cli_get("-b")));
382
390
  }
391
+ if (fio_cli_get("-p")) {
392
+ rb_hash_aset(defaults, ID2SYM(rb_intern("port")),
393
+ rb_str_new_cstr(fio_cli_get("-p")));
394
+ }
383
395
  if (fio_cli_get("-www")) {
384
396
  rb_hash_aset(defaults, ID2SYM(rb_intern("public")),
385
397
  rb_str_new_cstr(fio_cli_get("-www")));
@@ -102,7 +102,7 @@ module Iodine
102
102
  # x.report("Iodine::Mustache - no chaching - render list of 1000") do |times|
103
103
  # Iodine::Mustache.render(nil, data_1000, template)
104
104
  # end
105
- #
105
+ # nil
106
106
  # end
107
107
  # end
108
108
  #
@@ -1,3 +1,3 @@
1
1
  module Iodine
2
- VERSION = '0.7.10'.freeze
2
+ VERSION = '0.7.11'.freeze
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: iodine
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.10
4
+ version: 0.7.11
5
5
  platform: ruby
6
6
  authors:
7
7
  - Boaz Segev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-11-19 00:00:00.000000000 Z
11
+ date: 2018-11-22 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -210,7 +210,7 @@ licenses:
210
210
  - MIT
211
211
  metadata:
212
212
  allowed_push_host: https://rubygems.org
213
- post_install_message: 'Thank you for installing Iodine 0.7.10.
213
+ post_install_message: 'Thank you for installing Iodine 0.7.11.
214
214
 
215
215
  '
216
216
  rdoc_options: []