iodine 0.2.11 → 0.2.12

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of iodine might be problematic. Click here for more details.

checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 1b66fc52e683d2179b50db49e0c3912b11465cfc
4
- data.tar.gz: c52e3bcb4f88dc8134a6fd0cd8c933569a7feccf
3
+ metadata.gz: f452e7b373f6e571cfde2d15c813a2b56b68b7ec
4
+ data.tar.gz: ae82d8b0e5a8e89a341e94ad8fa64f4bdcbe2bab
5
5
  SHA512:
6
- metadata.gz: ce2c2217e792f1cd6b4178e0a12cfc80bf5eb7fef89113ce232799be5fb2797da2c09e399c324c189019434ee566d4083ff68cdf288eacc846c4042e76709cc7
7
- data.tar.gz: c7aae4d6787b9d1442a978f66d83610f2391819e1c643099ed2fad2c78ae36d484d66e122295044b90d6d8039ab9a7fb013ef7ef9e95fa037fad3843213c3565
6
+ metadata.gz: e10f6de46108d5282babbbbde57f24889def07db0807cdc5674257881c6082ec219107d44f7c25c1523bb9365386ae5b188129ef4cf410d49c9ab62dacb255bf
7
+ data.tar.gz: 5f9dddd11aaad923cedb1151902fc92398e863a281360873642594dd77dd6fbb844f9c1498ff0a6d622cc69de6b857b370b7eab35c2de672f51e6acd47098fc5
@@ -8,7 +8,9 @@ Please notice that this change log contains changes for upcoming releases as wel
8
8
 
9
9
  ***
10
10
 
11
- Change log v.0.2.12 (next release)
11
+ Change log v.0.2.12
12
+
13
+ **Fix** removed `mempool` after it failed some stress and concurrency tests.
12
14
 
13
15
  ***
14
16
 
@@ -32,7 +32,7 @@ class ShootoutApp
32
32
  else
33
33
  msg = {type: 'broadcast', payload: payload}.to_json
34
34
  # Iodine::Websocket.each {|ws| ws.write msg}
35
- Iodine::Websocket.multiwrite(msg) # {|ws| true }
35
+ Iodine::Websocket.each_write(msg) # {|ws| true }
36
36
  write({type: "broadcastResult", payload: payload}.to_json)
37
37
  end
38
38
  end
@@ -445,7 +445,7 @@ void async_join() {
445
445
  }
446
446
  perform_tasks();
447
447
  async_free();
448
- };
448
+ }
449
449
 
450
450
  /**
451
451
  Waits for existing tasks to complete and releases the thread
@@ -457,7 +457,7 @@ void async_signal() {
457
457
  return;
458
458
  async->flags.run = 0;
459
459
  wake_all_threads();
460
- };
460
+ }
461
461
 
462
462
  /******************************************************************************
463
463
  Test
@@ -195,7 +195,7 @@ static size_t fd_capacity = 0;
195
195
  #define uuid2info(uuid) fd_info[sock_uuid2fd(uuid)]
196
196
  #define is_valid(uuid) \
197
197
  (fd_info[sock_uuid2fd(uuid)].fduuid.data.counter == \
198
- ((fduuid_u)(uuid)).data.counter && \
198
+ ((fduuid_u *)(&uuid))->data.counter && \
199
199
  uuid2info(uuid).open)
200
200
 
201
201
  static struct {
@@ -254,8 +254,9 @@ static void destroy_lib_data(void) {
254
254
  #if USE_MALLOC == 1
255
255
  free(fd_info);
256
256
  #else
257
- munmap(fd_info, (BUFFER_PACKET_REAL_SIZE * BUFFER_PACKET_POOL) +
258
- (sizeof(fd_info_s) * fd_capacity));
257
+ munmap(fd_info,
258
+ (BUFFER_PACKET_REAL_SIZE * BUFFER_PACKET_POOL) +
259
+ (sizeof(fd_info_s) * fd_capacity));
259
260
  #endif
260
261
  }
261
262
  fd_info = NULL;
@@ -300,7 +301,7 @@ static void sock_lib_init(void) {
300
301
  spn_unlock(&fd_info[i].lock);
301
302
  }
302
303
  /* initialize pool */
303
- buffer_pool.allocated = buff_mem + fd_map_mem_size;
304
+ buffer_pool.allocated = (void *)((uintptr_t)buff_mem + fd_map_mem_size);
304
305
  buffer_pool.pool = buffer_pool.allocated;
305
306
  sock_packet_s *pos = buffer_pool.pool;
306
307
  for (size_t i = 0; i < BUFFER_PACKET_POOL - 1; i++) {
@@ -436,10 +437,11 @@ static inline int sock_flush_fd(int fd) {
436
437
  // send the data
437
438
  if (fd_info[fd].rw_hooks && fd_info[fd].rw_hooks->write)
438
439
  sent = fd_info[fd].rw_hooks->write(
439
- fd_info[fd].fduuid.uuid, (((void *)(packet + 1)) + fd_info[fd].sent),
440
+ fd_info[fd].fduuid.uuid,
441
+ (void *)(((uintptr_t)(packet + 1)) + fd_info[fd].sent),
440
442
  i_exp - fd_info[fd].sent);
441
443
  else
442
- sent = write(fd, (((void *)(packet + 1)) + fd_info[fd].sent),
444
+ sent = write(fd, (void *)(((uintptr_t)(packet + 1)) + fd_info[fd].sent),
443
445
  i_exp - fd_info[fd].sent);
444
446
  // review result and update packet data
445
447
  if (sent < 0) {
@@ -463,11 +465,13 @@ static inline int sock_flush_data(int fd) {
463
465
  ssize_t sent;
464
466
  if (fd_info[fd].rw_hooks && fd_info[fd].rw_hooks->write)
465
467
  sent = fd_info[fd].rw_hooks->write(
466
- fd_info[fd].fduuid.uuid, fd_info[fd].packet->buffer + fd_info[fd].sent,
468
+ fd_info[fd].fduuid.uuid,
469
+ (void *)((uintptr_t)fd_info[fd].packet->buffer + fd_info[fd].sent),
467
470
  fd_info[fd].packet->length - fd_info[fd].sent);
468
471
  else
469
- sent = write(fd, fd_info[fd].packet->buffer + fd_info[fd].sent,
470
- fd_info[fd].packet->length - fd_info[fd].sent);
472
+ sent = write(
473
+ fd, (void *)((uintptr_t)fd_info[fd].packet->buffer + fd_info[fd].sent),
474
+ fd_info[fd].packet->length - fd_info[fd].sent);
471
475
  if (sent < 0) {
472
476
  if (ERR_OK)
473
477
  return -1;
@@ -502,7 +506,7 @@ static void sock_flush_unsafe(int fd) {
502
506
  #if SOCK_DELAY_WRITE == 1
503
507
 
504
508
  static inline void sock_flush_schd(intptr_t uuid) {
505
- if (async_run((void *)sock_flush, (void *)uuid) == -1)
509
+ if (async_run((void (*)(void *))sock_flush, (void *)uuid) == -1)
506
510
  goto fallback;
507
511
  return;
508
512
  fallback:
@@ -744,7 +748,9 @@ sock_packet_s *sock_checkout_packet(void) {
744
748
  if (packet) {
745
749
  buffer_pool.pool = packet->metadata.next;
746
750
  spn_unlock(&buffer_pool.lock);
747
- *packet = (sock_packet_s){.buffer = packet + 1, .metadata.next = NULL};
751
+ *packet = (sock_packet_s){
752
+ .buffer = packet + 1, .metadata.next = NULL, .metadata.dealloc = free,
753
+ };
748
754
  return packet;
749
755
  }
750
756
  spn_unlock(&buffer_pool.lock);
@@ -789,7 +795,7 @@ void sock_free_packet(sock_packet_s *packet) {
789
795
  if (next->metadata.keep_open == 0)
790
796
  close((int)((ssize_t)next->buffer));
791
797
  } else if (next->metadata.external)
792
- free(next->buffer);
798
+ next->metadata.dealloc(next->buffer);
793
799
  if (next->metadata.next == NULL)
794
800
  break; /* next will hold the last packet in the chain. */
795
801
  next = next->metadata.next;
@@ -920,7 +926,7 @@ ssize_t sock_write2_fn(sock_write_info_s options) {
920
926
  memcpy(packet->buffer, options.buffer, to_cpy);
921
927
  packet->length = to_cpy;
922
928
  options.length -= to_cpy;
923
- options.buffer += to_cpy;
929
+ options.buffer = (void *)((uintptr_t)options.buffer + to_cpy);
924
930
  sock_send_packet_unsafe(sock_uuid2fd(options.fduuid), packet);
925
931
  if (!is_valid(options.fduuid) || uuid2info(options.fduuid).err == 1 ||
926
932
  options.length == 0)
@@ -69,7 +69,7 @@ typedef union {
69
69
  } fduuid_u;
70
70
 
71
71
  #define FDUUID_FAIL(uuid) (uuid == -1)
72
- #define sock_uuid2fd(uuid) ((fduuid_u)(uuid)).data.fd
72
+ #define sock_uuid2fd(uuid) ((fduuid_u *)(&uuid))->data.fd
73
73
  #endif
74
74
 
75
75
  /* *****************************************************************************
@@ -362,6 +362,14 @@ typedef struct sock_packet_s {
362
362
  /** Starting point offset, when the buffer is a file (see
363
363
  * `sock_packet_s.metadata.is_fd`). */
364
364
  off_t offset;
365
+ /** This deallocation callback will be called when the packet is finished
366
+ * with the buffer.
367
+ * The deallocation callback will only be called for buffers marked as
368
+ * `external`.
369
+ * If no deallocation callback is specified,`free` will be called as a
370
+ * default deallocation method.
371
+ */
372
+ void (*dealloc)(void *buffer);
365
373
  /** sets whether a packet can be inserted before this packet without
366
374
  * interrupting the communication flow. */
367
375
  unsigned can_interrupt : 1;
@@ -32,7 +32,10 @@ Portability - used to help port this to different frameworks (i.e. Ruby).
32
32
  void async_signal();
33
33
 
34
34
  /* used here but declared elsewhere */
35
- void call_async_signal(void *_) { async_signal(); }
35
+ void call_async_signal(void *_) {
36
+ (void)(_);
37
+ async_signal();
38
+ }
36
39
 
37
40
  /* protect the call to join from any exceptions */
38
41
  static void *_inner_join_with_rbthread(void *rbt) {
@@ -69,7 +72,7 @@ static void *create_ruby_thread_gvl(void *_args) {
69
72
 
70
73
  /* create a ruby thread */
71
74
  UNUSED_FUNC static int create_thread(THREAD_TYPE *thr,
72
- void *(*thread_func)(void *), void *arg) {
75
+ void *(*thread_func)(void *), void *arg) {
73
76
  struct CreateThreadArgs *data = malloc(sizeof(*data));
74
77
  if (!data)
75
78
  return -1;
@@ -7,7 +7,6 @@ Feel free to copy, use and enjoy according to the license provided.
7
7
  #include "websockets.h"
8
8
  #include "bscrypt.h"
9
9
  #include "libserver.h"
10
- #include "mempool.h"
11
10
  #include <arpa/inet.h>
12
11
  #include <stdio.h>
13
12
  #include <stdlib.h>
@@ -57,13 +56,13 @@ struct buffer_s create_ws_buffer(ws_s *owner) {
57
56
  (void)(owner);
58
57
  struct buffer_s buff;
59
58
  buff.size = round_up_buffer_size(WS_INITIAL_BUFFER_SIZE);
60
- buff.data = mempool_malloc(buff.size);
59
+ buff.data = malloc(buff.size);
61
60
  return buff;
62
61
  }
63
62
 
64
63
  struct buffer_s resize_ws_buffer(ws_s *owner, struct buffer_s buff) {
65
64
  buff.size = round_up_buffer_size(buff.size);
66
- void *tmp = mempool_realloc(buff.data, buff.size);
65
+ void *tmp = realloc(buff.data, buff.size);
67
66
  if (!tmp) {
68
67
  free_ws_buffer(owner, buff);
69
68
  buff.size = 0;
@@ -74,7 +73,7 @@ struct buffer_s resize_ws_buffer(ws_s *owner, struct buffer_s buff) {
74
73
  void free_ws_buffer(ws_s *owner, struct buffer_s buff) {
75
74
  (void)(owner);
76
75
  if (buff.data)
77
- mempool_free(buff.data);
76
+ free(buff.data);
78
77
  }
79
78
 
80
79
  #undef round_up_buffer_size
@@ -425,7 +424,7 @@ Create/Destroy the websocket object
425
424
 
426
425
  static ws_s *new_websocket() {
427
426
  // allocate the protocol object
428
- ws_s *ws = mempool_malloc(sizeof(*ws));
427
+ ws_s *ws = malloc(sizeof(*ws));
429
428
  memset(ws, 0, sizeof(*ws));
430
429
 
431
430
  // setup the protocol & protocol callbacks
@@ -442,7 +441,7 @@ static void destroy_ws(ws_s *ws) {
442
441
  if (ws->on_close)
443
442
  ws->on_close(ws);
444
443
  free_ws_buffer(ws, ws->buffer);
445
- mempool_free(ws);
444
+ free(ws);
446
445
  }
447
446
 
448
447
  /*******************************************************************************
@@ -807,25 +806,54 @@ Multi-Write (direct broadcast) Implementation
807
806
  struct websocket_multi_write {
808
807
  uint8_t (*if_callback)(ws_s *ws_to, void *arg);
809
808
  void *arg;
809
+ spn_lock_i lock;
810
+ size_t count;
810
811
  size_t length;
811
812
  uint8_t buffer[];
812
813
  };
813
814
 
815
+ static void ws_reduce_or_free_multi_write(void *buff) {
816
+ struct websocket_multi_write *mw =
817
+ (void *)((uintptr_t)buff - sizeof(struct websocket_multi_write));
818
+ spn_lock(&mw->lock);
819
+ mw->count -= 1;
820
+ spn_unlock(&mw->lock);
821
+ if (!mw->count) {
822
+ free(mw);
823
+ }
824
+ }
825
+
814
826
  static void ws_finish_multi_write(intptr_t fd, protocol_s *_ws, void *arg) {
827
+ struct websocket_multi_write *multi = arg;
815
828
  (void)(fd);
816
829
  (void)(_ws);
817
- mempool_free(arg);
818
- }
819
- static void ws_check_multi_write(intptr_t fd, protocol_s *_ws, void *arg) {
820
- struct websocket_multi_write *multi = arg;
821
- if (multi->if_callback((void *)_ws, multi->arg))
822
- sock_write(fd, multi->buffer, multi->length);
830
+ ws_reduce_or_free_multi_write(multi->buffer);
823
831
  }
824
832
 
825
833
  static void ws_direct_multi_write(intptr_t fd, protocol_s *_ws, void *arg) {
826
834
  struct websocket_multi_write *multi = arg;
827
835
  (void)(_ws);
828
- sock_write(fd, multi->buffer, multi->length);
836
+
837
+ sock_packet_s *packet = sock_checkout_packet();
838
+ *packet = (sock_packet_s){
839
+ .buffer = multi->buffer,
840
+ .length = multi->length,
841
+ .metadata.can_interrupt = 1,
842
+ .metadata.dealloc = ws_reduce_or_free_multi_write,
843
+ .metadata.external = 1,
844
+ };
845
+
846
+ spn_lock(&multi->lock);
847
+ multi->count += 1;
848
+ spn_unlock(&multi->lock);
849
+
850
+ sock_send_packet(fd, packet);
851
+ }
852
+
853
+ static void ws_check_multi_write(intptr_t fd, protocol_s *_ws, void *arg) {
854
+ struct websocket_multi_write *multi = arg;
855
+ if (multi->if_callback((void *)_ws, multi->arg))
856
+ ws_direct_multi_write(fd, _ws, arg);
829
857
  }
830
858
 
831
859
  void websocket_write_each(ws_s *ws_originator, void *data, size_t len,
@@ -833,11 +861,13 @@ void websocket_write_each(ws_s *ws_originator, void *data, size_t len,
833
861
  uint8_t (*if_callback)(ws_s *ws_to, void *arg),
834
862
  void *arg) {
835
863
  struct websocket_multi_write *multi =
836
- mempool_malloc(len + 14 /* max head size */ + sizeof(*multi));
864
+ malloc(len + 14 /* max head size */ + sizeof(*multi));
837
865
  multi->length =
838
866
  websocket_encode(multi->buffer, data, len, is_text, 1, 1, as_client);
839
867
  multi->if_callback = if_callback;
840
868
  multi->arg = arg;
869
+ multi->lock = SPN_LOCK_INIT;
870
+ multi->count = 1;
841
871
  server_each((ws_originator ? ws_originator->fd : -1), WEBSOCKET_ID_STR,
842
872
  (if_callback ? ws_check_multi_write : ws_direct_multi_write),
843
873
  multi, ws_finish_multi_write);
@@ -1,3 +1,3 @@
1
1
  module Iodine
2
- VERSION = '0.2.11'.freeze
2
+ VERSION = '0.2.12'.freeze
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: iodine
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.11
4
+ version: 0.2.12
5
5
  platform: ruby
6
6
  authors:
7
7
  - Boaz Segev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2017-02-20 00:00:00.000000000 Z
11
+ date: 2017-02-21 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rack
@@ -147,7 +147,6 @@ files:
147
147
  - ext/iodine/libserver.h
148
148
  - ext/iodine/libsock.c
149
149
  - ext/iodine/libsock.h
150
- - ext/iodine/mempool.h
151
150
  - ext/iodine/misc.c
152
151
  - ext/iodine/misc.h
153
152
  - ext/iodine/random.c
@@ -1,836 +0,0 @@
1
- /*
2
- Copyright: Boaz Segev, 2016-2017
3
- License: MIT
4
-
5
- Feel free to copy, use and enjoy according to the license provided.
6
- */
7
- #ifndef MEMPOOL_H
8
-
9
- /* *****************************************************************************
10
- A simple `mmap` based localized memory pool (localized `malloc` alternative).
11
-
12
- The memory pool is localized to the same object file (C file). See NOTICE.
13
-
14
- The issue: objects that have a long life, such as Websocket / HTTP2 protocol
15
- objects or server wide strings with reference counts, cause memory fragmentation
16
- when allocated on the heap alongside objects that have a short life.
17
-
18
- This is a common issue when using `malloc` in long running processes for all
19
- allocations.
20
-
21
- This issue effects long running processes (such as servers) while it's effect on
22
- short lived proccesses are less accute and could often be ignored.
23
-
24
- To circumvent this issue, a seperate memory allocation method is used for
25
- long-lived objects.
26
-
27
- This memory pool allocates large blocks of memory (~2Mb at a time), minimizing
28
- small memory fragmentation by both reserving large memory blocks and seperating
29
- memory locality between long lived objects and short lived objects.
30
-
31
- The memory pool isn't expected to be faster than the system's `malloc`
32
- (although, sometimes it might perform better). However, selective use of this
33
- memory pool could improve concurrency (each pool has a seperate lock, unlike
34
- `malloc`'s system lock') as well as help with memory fragmentation.
35
-
36
- ================================================================================
37
- NOTICE:
38
-
39
- The memory pool is attached to the specific file. in which `mempool.h` is
40
- included.
41
-
42
- The memory shoule NEVER be freed from a different file.
43
-
44
- However, it's easy to work around this limitation by wrapping the `mempool_`
45
- functions using proper `create` / `destroy` functions for any objects.
46
-
47
- ================================================================================
48
-
49
- This file requires the "spnlock.h" library file as well. Together these files
50
- can be used also seperately from the `facil.io` library.
51
-
52
- */
53
- #define MEMPOOL_H
54
- MEMPOOL_H
55
- #ifndef _GNU_SOURCE
56
- #define _GNU_SOURCE
57
- #endif
58
-
59
- #include <errno.h>
60
- #include <signal.h>
61
- #include <stdint.h>
62
- #include <stdio.h>
63
- #include <stdlib.h>
64
- #include <string.h>
65
-
66
- #ifndef UNUSED_FUNC
67
- #define UNUSED_FUNC __attribute__((unused))
68
- #endif
69
-
70
- /* *****************************************************************************
71
- ********************************************************************************
72
- API Declerations
73
- ********************************************************************************
74
- ***************************************************************************** */
75
-
76
- /** Allocates memory from the pool. */
77
- static UNUSED_FUNC void *mempool_malloc(size_t size);
78
- /**
79
- * Frees the memory, releasing it back to the pool (or, sometimes, the system).
80
- */
81
- static UNUSED_FUNC void mempool_free(void *ptr);
82
- /**
83
- * Behaves the same a the systems `realloc`, attempting to resize the memory
84
- * when possible.
85
- *
86
- * On error returns NULL (the old pointer data remains allocated and valid)
87
- * otherwise returns a new pointer (either equal to the old or after
88
- * deallocating the old one).
89
- */
90
- static UNUSED_FUNC void *mempool_realloc(void *ptr, size_t new_size);
91
-
92
- #if defined(DEBUG) && DEBUG == 1
93
- /** Tests the memory pool, both testing against issues / corruption and testing
94
- * it's performance against the system's `malloc`.
95
- */
96
- static UNUSED_FUNC void mempool_test(void);
97
- #endif
98
-
99
- /* *****************************************************************************
100
- ********************************************************************************
101
- Implementation
102
- ********************************************************************************
103
- ***************************************************************************** */
104
-
105
- /* *****************************************************************************
106
- Memory block allocation
107
- */
108
-
109
- #define MEMPOOL_BLOCK_SIZE (1UL << 21)
110
- #define MEMPOOL_ORDERING_LIMIT 32
111
- #define MEMPOOL_RETURN_MEM_TO_SYSTEM 1
112
-
113
- /* Will we use mmap or malloc? */
114
- // clang-format off
115
- #ifdef __has_include
116
- /* check for unix support */
117
- # if __has_include(<unistd.h>) && __has_include(<sys/mman.h>)
118
- # define HAS_UNIX_FEATURES
119
- # include <unistd.h>
120
- # endif
121
- #endif
122
- // clang-format on
123
-
124
- /* *****************************************************************************
125
- spnlock.h (can also be embeded instead of included)
126
- */
127
- #include "spnlock.h"
128
-
129
- /* *****************************************************************************
130
- Memory slices, tree and helpers
131
- */
132
-
133
- struct mempool_reserved_slice_s_offset { /** offset from this slice */
134
- uint32_t reserved1; /* used to make the offset 16 bytes long */
135
- uint32_t ahead;
136
- uint32_t behind;
137
- uint32_t reserved2; /* used to make the offset 16 bytes long */
138
- };
139
-
140
- typedef struct mempool_reserved_slice_s {
141
- /** offset from this slice */
142
- struct mempool_reserved_slice_s_offset offset;
143
- /** Used for the free slices linked list. */
144
- struct mempool_reserved_slice_s *next;
145
- struct mempool_reserved_slice_s *prev;
146
- } mempool_reserved_slice_s;
147
-
148
- static struct {
149
- mempool_reserved_slice_s *available;
150
- spn_lock_i lock;
151
- } mempool_reserved_pool = {.available = NULL, .lock = SPN_LOCK_INIT};
152
-
153
- #define MEMPOOL_LOCK() spn_lock(&mempool_reserved_pool.lock)
154
- #define MEMPOOL_UNLOCK() spn_unlock(&mempool_reserved_pool.lock)
155
-
156
- #define MEMPOOL_USED_MARKER ((uint32_t)(~0UL << 21))
157
- #define MEMPOOL_INDI_MARKER ((uint32_t)0xF7F7F7F7UL)
158
- #define MEMPOOL_SIZE_MASK (MEMPOOL_BLOCK_SIZE - 1)
159
-
160
- #define MEMPOOL_SLICE2PTR(slice) \
161
- ((void *)(((uintptr_t)(slice)) + \
162
- (sizeof(struct mempool_reserved_slice_s_offset))))
163
- #define MEMPOOL_PTR2SLICE(ptr) \
164
- ((mempool_reserved_slice_s *)(((uintptr_t)(ptr)) - \
165
- (sizeof( \
166
- struct mempool_reserved_slice_s_offset))))
167
-
168
- /* *****************************************************************************
169
- Memory Block Allocation / Deallocation
170
- */
171
- #ifdef HAS_UNIX_FEATURES
172
- #include <sys/mman.h>
173
-
174
- #define MEMPOOL_ALLOC_SPECIAL(target, size) \
175
- do { \
176
- target = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, \
177
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
178
- if (target == MAP_FAILED) \
179
- target = NULL; \
180
- } while (0);
181
-
182
- #define MEMPOOL_DEALLOC_SPECIAL(target, size) munmap((target), (size))
183
-
184
- #else
185
-
186
- #define MEMPOOL_ALLOC_SPECIAL(target, size) \
187
- do { \
188
- target = malloc(size); \
189
- } while (0);
190
-
191
- #define MEMPOOL_DEALLOC_SPECIAL(target, size) free((target));
192
-
193
- #endif
194
-
195
- /* *****************************************************************************
196
- Helpers: Memory block slicing and memory pool list maintanence.
197
- */
198
-
199
- /* *****************************************************************************
200
- API implementation
201
- */
202
-
203
- static UNUSED_FUNC void *mempool_malloc(size_t size) {
204
- if (!size)
205
- return NULL;
206
- if (size & 15) {
207
- size = (size & (~15)) + 16;
208
- }
209
-
210
- size += sizeof(struct mempool_reserved_slice_s_offset);
211
-
212
- mempool_reserved_slice_s *slice = NULL;
213
-
214
- if (size > (MEMPOOL_BLOCK_SIZE - (sizeof(mempool_reserved_slice_s) << 1)))
215
- goto alloc_indi;
216
-
217
- MEMPOOL_LOCK();
218
- slice = mempool_reserved_pool.available;
219
- while (slice && slice->offset.ahead < size)
220
- slice = slice->next;
221
- if (slice) {
222
- /* remove slice from available memory list */
223
- if (slice->next)
224
- slice->next->prev = slice->prev;
225
- if (slice->prev)
226
- slice->prev->next = slice->next;
227
- else
228
- mempool_reserved_pool.available = slice->next;
229
- slice->next = NULL;
230
- slice->prev = NULL;
231
- } else {
232
- MEMPOOL_ALLOC_SPECIAL(slice, MEMPOOL_BLOCK_SIZE);
233
- // fprintf(stderr, "Allocated Block at %p\n", slice);
234
- slice->offset.behind = 0;
235
- slice->offset.ahead =
236
- MEMPOOL_BLOCK_SIZE - (sizeof(struct mempool_reserved_slice_s_offset));
237
-
238
- mempool_reserved_slice_s *tmp =
239
- (mempool_reserved_slice_s
240
- *)(((uintptr_t)(slice)) + MEMPOOL_BLOCK_SIZE -
241
- (sizeof(struct mempool_reserved_slice_s_offset)));
242
- tmp->offset.ahead = 0;
243
- tmp->offset.behind = slice->offset.ahead;
244
- }
245
-
246
- if (!slice) {
247
- MEMPOOL_UNLOCK();
248
- fprintf(stderr, "mempool: no memory\n");
249
- return NULL;
250
- }
251
-
252
- if (slice->offset.ahead > (size + sizeof(mempool_reserved_slice_s))) {
253
- if (slice->offset.ahead & MEMPOOL_USED_MARKER) {
254
- fprintf(stderr, "mempool ERROR: allocating an allocated slice!\n");
255
- }
256
- /* cut the slice in two */
257
- mempool_reserved_slice_s *tmp =
258
- (mempool_reserved_slice_s *)(((uintptr_t)slice) + size);
259
- tmp->offset.behind = size;
260
- tmp->offset.ahead = slice->offset.ahead - size;
261
- slice->offset.ahead = size;
262
- /* inform higher neighbor about any updates */
263
- ((mempool_reserved_slice_s *)(((uintptr_t)tmp) + tmp->offset.ahead))
264
- ->offset.behind = tmp->offset.ahead;
265
- /* place the new slice in the available memory list */
266
- uint16_t limit = MEMPOOL_ORDERING_LIMIT;
267
- tmp->next = NULL;
268
- tmp->prev = NULL;
269
- mempool_reserved_slice_s **pos = &mempool_reserved_pool.available;
270
- while (limit && *pos && ((*pos)->offset.ahead < tmp->offset.ahead)) {
271
- tmp->prev = *pos;
272
- pos = &(*pos)->next;
273
- --limit;
274
- }
275
- if (*pos) {
276
- tmp->next = *pos;
277
- tmp->next->prev = tmp;
278
- *pos = tmp;
279
- } else {
280
- *pos = tmp;
281
- }
282
- }
283
-
284
- slice->offset.ahead |= MEMPOOL_USED_MARKER;
285
- MEMPOOL_UNLOCK();
286
- slice->next = NULL;
287
- slice->prev = NULL;
288
- // mempool_reserved_slice_s *tmp =
289
- // (void *)((uintptr_t)slice + (slice->offset.ahead &
290
- // MEMPOOL_SIZE_MASK));
291
- // fprintf(stderr, "Allocated %lu bytes at: %u <- %p -> %u."
292
- // "next: %u <- %p -> %u\n ",
293
- // size, slice->offset.behind, slice,
294
- // (uint32_t)(slice->offset.ahead & MEMPOOL_SIZE_MASK),
295
- // tmp->offset.behind, tmp,
296
- // (uint32_t)(tmp->offset.ahead & MEMPOOL_SIZE_MASK));
297
- return MEMPOOL_SLICE2PTR(slice);
298
- alloc_indi:
299
- MEMPOOL_ALLOC_SPECIAL(slice, size);
300
- if (slice) {
301
- slice->offset.ahead = size;
302
- slice->offset.behind = MEMPOOL_INDI_MARKER;
303
- }
304
- return MEMPOOL_SLICE2PTR(slice);
305
- }
306
-
307
- /**
308
- * Frees the memory, releasing it back to the pool (or, sometimes, the
309
- * system).
310
- */
311
- static UNUSED_FUNC void mempool_free(void *ptr) {
312
- if (!ptr)
313
- return;
314
- mempool_reserved_slice_s **pos, *slice = MEMPOOL_PTR2SLICE(ptr), *tmp;
315
-
316
- if (slice->offset.behind == MEMPOOL_INDI_MARKER)
317
- goto alloc_indi;
318
- if ((slice->offset.ahead & MEMPOOL_USED_MARKER) != MEMPOOL_USED_MARKER)
319
- goto error;
320
-
321
- MEMPOOL_LOCK();
322
- slice->offset.ahead &= MEMPOOL_SIZE_MASK;
323
- /* merge slice with upper boundry */
324
- while ((tmp = (mempool_reserved_slice_s *)(((uintptr_t)slice) +
325
- slice->offset.ahead))
326
- ->offset.ahead &&
327
- (tmp->offset.ahead & MEMPOOL_USED_MARKER) == 0) {
328
- /* extract merged slice from list */
329
- if (tmp->next)
330
- tmp->next->prev = tmp->prev;
331
- if (tmp->prev)
332
- tmp->prev->next = tmp->next;
333
- else
334
- mempool_reserved_pool.available = tmp->next;
335
-
336
- tmp->next = NULL;
337
- tmp->prev = NULL;
338
- slice->offset.ahead += tmp->offset.ahead;
339
- }
340
- /* merge slice with lower boundry */
341
- while (slice->offset.behind &&
342
- ((tmp = (mempool_reserved_slice_s *)(((uintptr_t)slice) -
343
- slice->offset.behind))
344
- ->offset.ahead &
345
- MEMPOOL_USED_MARKER) == 0) {
346
- /* extract merged slice from list */
347
- if (tmp->next)
348
- tmp->next->prev = tmp->prev;
349
- if (tmp->prev)
350
- tmp->prev->next = tmp->next;
351
- else
352
- mempool_reserved_pool.available = tmp->next;
353
-
354
- tmp->next = NULL;
355
- tmp->prev = NULL;
356
- tmp->offset.ahead += slice->offset.ahead;
357
-
358
- slice = tmp;
359
- }
360
-
361
- /* return memory to system, if the block is no longer required. */
362
- if (MEMPOOL_RETURN_MEM_TO_SYSTEM && mempool_reserved_pool.available &&
363
- slice->offset.behind == 0 &&
364
- ((mempool_reserved_slice_s *)(((uintptr_t)slice) + slice->offset.ahead))
365
- ->offset.ahead == 0) {
366
- MEMPOOL_UNLOCK();
367
- // fprintf(
368
- // stderr, "DEALLOCATED BLOCK %p, size review %u == %lu %s\n", slice,
369
- // slice->offset.ahead,
370
- // MEMPOOL_BLOCK_SIZE - sizeof(struct mempool_reserved_slice_s_offset),
371
- // (slice->offset.ahead ==
372
- // MEMPOOL_BLOCK_SIZE - sizeof(struct mempool_reserved_slice_s_offset))
373
- // ? "passed."
374
- // : "FAILED.");
375
- MEMPOOL_DEALLOC_SPECIAL(slice, MEMPOOL_BLOCK_SIZE);
376
- return;
377
- }
378
-
379
- /* inform higher neighbor about any updates */
380
- // fprintf(stderr, "slice: %p -> %u\n", slice, slice->offset.ahead);
381
- ((mempool_reserved_slice_s *)(((uintptr_t)slice) + slice->offset.ahead))
382
- ->offset.behind = slice->offset.ahead;
383
-
384
- /* place slice in list */
385
- uint8_t limit = MEMPOOL_ORDERING_LIMIT;
386
- slice->next = NULL;
387
- slice->prev = NULL;
388
- pos = &mempool_reserved_pool.available;
389
- while (limit && *pos && ((*pos)->offset.ahead < slice->offset.ahead)) {
390
- slice->prev = *pos;
391
- pos = &(*pos)->next;
392
- --limit;
393
- }
394
- if (*pos) {
395
- slice->next = *pos;
396
- slice->next->prev = slice;
397
- *pos = slice;
398
- } else {
399
- *pos = slice;
400
- }
401
-
402
- MEMPOOL_UNLOCK();
403
- return;
404
- alloc_indi:
405
- MEMPOOL_DEALLOC_SPECIAL(slice, slice->offset.ahead);
406
- return;
407
- error:
408
- MEMPOOL_UNLOCK();
409
- if ((slice->offset.ahead & MEMPOOL_USED_MARKER) == 0)
410
- fprintf(stderr, "mempool: memory being freed is already free.\n");
411
- else
412
- fprintf(stderr, "mempool: memory allocation data corrupted. possible "
413
- "buffer overflow?\n");
414
- errno = EFAULT;
415
- raise(SIGSEGV); /* support longjmp rescue */
416
- exit(EFAULT);
417
- }
418
- /**
419
- * Behaves the same a the systems `realloc`, attempting to resize the memory
420
- * when possible. On error returns NULL (the old pointer data remains allocated
421
- * and valid) otherwise returns a new pointer (either equal to the old or after
422
- * deallocating the old one).
423
- */
424
- static UNUSED_FUNC void *mempool_realloc(void *ptr, size_t size) {
425
- if (!size)
426
- return NULL;
427
- if (size & 15) {
428
- size = (size & (~16)) + 16;
429
- }
430
- size += sizeof(struct mempool_reserved_slice_s_offset);
431
-
432
- mempool_reserved_slice_s *tmp = NULL, *slice = MEMPOOL_PTR2SLICE(ptr);
433
-
434
- if (slice->offset.behind == MEMPOOL_INDI_MARKER)
435
- goto realloc_indi;
436
- if ((slice->offset.ahead & MEMPOOL_USED_MARKER) != MEMPOOL_USED_MARKER)
437
- goto error;
438
-
439
- MEMPOOL_LOCK();
440
-
441
- slice->offset.ahead &= MEMPOOL_SIZE_MASK;
442
- /* merge slice with upper boundry */
443
- while ((tmp = (mempool_reserved_slice_s *)(((uintptr_t)slice) +
444
- slice->offset.ahead))
445
- ->offset.ahead &&
446
- (tmp->offset.ahead & MEMPOOL_USED_MARKER) == 0) {
447
- /* extract merged slice from list */
448
- if (tmp->next)
449
- tmp->next->prev = tmp->prev;
450
- if (tmp->prev)
451
- tmp->prev->next = tmp->next;
452
- else
453
- mempool_reserved_pool.available = tmp->next;
454
-
455
- tmp->next = NULL;
456
- tmp->prev = NULL;
457
- slice->offset.ahead += tmp->offset.ahead;
458
- }
459
-
460
- /* inform higher neighbor about any updates */
461
- ((mempool_reserved_slice_s *)(((uintptr_t)slice) + slice->offset.ahead))
462
- ->offset.behind = slice->offset.ahead;
463
-
464
- if ((slice->offset.ahead) > size + sizeof(mempool_reserved_slice_s)) {
465
- /* cut the slice in two */
466
- tmp = (mempool_reserved_slice_s *)(((uintptr_t)slice) + size);
467
- tmp->offset.behind = size;
468
- tmp->offset.ahead = slice->offset.ahead - size;
469
- slice->offset.ahead = size;
470
- /* inform higher neighbor about any updates */
471
- ((mempool_reserved_slice_s *)(((uintptr_t)tmp) + tmp->offset.ahead))
472
- ->offset.behind = tmp->offset.ahead;
473
- /* place the new slice in the available memory list */
474
- tmp->next = NULL;
475
- tmp->prev = NULL;
476
- mempool_reserved_slice_s **pos = &mempool_reserved_pool.available;
477
- uint8_t limit = MEMPOOL_ORDERING_LIMIT;
478
- while (limit && *pos && ((*pos)->offset.ahead < tmp->offset.ahead)) {
479
- tmp->prev = *pos;
480
- pos = &(*pos)->next;
481
- --limit;
482
- }
483
- if (*pos) {
484
- tmp->next = *pos;
485
- tmp->next->prev = tmp;
486
- *pos = tmp;
487
- } else {
488
- *pos = tmp;
489
- }
490
-
491
- slice->offset.ahead |= MEMPOOL_USED_MARKER;
492
- MEMPOOL_UNLOCK();
493
- return ptr;
494
- }
495
- slice->offset.ahead |= MEMPOOL_USED_MARKER;
496
- MEMPOOL_UNLOCK();
497
-
498
- if ((slice->offset.ahead & MEMPOOL_SIZE_MASK) < size) {
499
- void *new_mem =
500
- mempool_malloc(size - sizeof(struct mempool_reserved_slice_s_offset));
501
- if (!new_mem)
502
- return NULL;
503
- memcpy(new_mem, ptr, slice->offset.ahead & MEMPOOL_SIZE_MASK);
504
- mempool_free(ptr);
505
- ptr = new_mem;
506
- }
507
- return ptr;
508
-
509
- realloc_indi:
510
- /* indi doesn't shrink */
511
- if (slice->offset.ahead > size)
512
- return ptr;
513
- /* reallocate indi */
514
- void *new_mem =
515
- mempool_malloc(size - sizeof(struct mempool_reserved_slice_s_offset));
516
- if (!new_mem)
517
- return NULL;
518
- memcpy(new_mem, ptr, slice->offset.ahead & MEMPOOL_SIZE_MASK);
519
- mempool_free(ptr);
520
- return new_mem;
521
- error:
522
- errno = EFAULT;
523
- raise(SIGSEGV); /* support longjmp rescue */
524
- exit(EFAULT);
525
- }
526
-
527
- /* *****************************************************************************
528
- ********************************************************************************
529
- TESTING
530
- ********************************************************************************
531
- ***************************************************************************** */
532
-
533
- #if defined(DEBUG) && DEBUG == 1
534
-
535
- #define MEMTEST_SLICE 32
536
-
537
- #include <time.h>
538
- #include <inttypes.h>
539
- static void mempool_stats(void) {
540
- fprintf(stderr, "* Pool object: %lu bytes\n"
541
- "* Alignment: %lu \n"
542
- "* Minimal Allocation Size (including header): %lu\n"
543
- "* Minimal Allocation Space (no header): %lu\n"
544
- "* Header size: %lu\n",
545
- sizeof(mempool_reserved_pool),
546
- sizeof(struct mempool_reserved_slice_s_offset),
547
- sizeof(mempool_reserved_slice_s),
548
- sizeof(mempool_reserved_slice_s) -
549
- sizeof(struct mempool_reserved_slice_s_offset),
550
- sizeof(struct mempool_reserved_slice_s_offset));
551
- }
552
-
553
- static void mempool_speedtest(size_t memtest_repeats, void *(*mlk)(size_t),
554
- void (*fr)(void *),
555
- void *(*ralc)(void *, size_t)) {
556
- void **pntrs = mlk(memtest_repeats * sizeof(*pntrs));
557
- clock_t start, end, mlk_time, fr_time, zr_time;
558
- mlk_time = 0;
559
- fr_time = 0;
560
- zr_time = 0;
561
- struct timespec start_test, end_test;
562
- clock_gettime(CLOCK_MONOTONIC, &start_test);
563
-
564
- start = clock();
565
- for (size_t i = 0; i < memtest_repeats; i++) {
566
- __asm__ volatile("" ::: "memory");
567
- }
568
- end = clock();
569
- mlk_time = end - start;
570
- fprintf(stderr, "* Doing nothing: %lu CPU cycles.\n", mlk_time);
571
-
572
- start = clock();
573
- for (size_t i = 0; i < memtest_repeats; i++) {
574
- // fprintf(stderr, "malloc %lu\n", i);
575
- pntrs[i] = mlk(MEMTEST_SLICE);
576
- *((uint8_t *)pntrs[i]) = 1;
577
- }
578
- end = clock();
579
- mlk_time = end - start;
580
- fprintf(stderr,
581
- "* Allocating %lu consecutive blocks %d each: %lu CPU cycles.\n",
582
- memtest_repeats, MEMTEST_SLICE, mlk_time);
583
-
584
- start = clock();
585
- for (size_t i = 0; i < memtest_repeats; i += 2) {
586
- fr(pntrs[i]);
587
- }
588
- end = clock();
589
- fr_time = end - start;
590
-
591
- start = clock();
592
- for (size_t i = 0; i < memtest_repeats; i += 2) {
593
- pntrs[i] = mlk(MEMTEST_SLICE);
594
- }
595
- end = clock();
596
- mlk_time = end - start;
597
-
598
- fprintf(stderr,
599
- "* Freeing %lu Fragmented (single space) blocks %d each: %lu CPU "
600
- "cycles.\n",
601
- memtest_repeats / 2, MEMTEST_SLICE, fr_time);
602
-
603
- fprintf(stderr, "* Allocating %lu Fragmented (single space) blocks %d "
604
- "bytes each: %lu CPU "
605
- "cycles.\n",
606
- memtest_repeats / 2, MEMTEST_SLICE, mlk_time);
607
-
608
- mlk_time = 0;
609
- fr_time = 0;
610
-
611
- for (size_t xtimes = 0; xtimes < 100; xtimes++) {
612
- start = clock();
613
- for (size_t i = 0; i < memtest_repeats; i += 7) {
614
- fr(pntrs[i]);
615
- }
616
- end = clock();
617
- fr_time += end - start;
618
-
619
- start = clock();
620
- for (size_t i = 0; i < memtest_repeats; i += 7) {
621
- pntrs[i] = mlk(MEMTEST_SLICE);
622
- }
623
- end = clock();
624
- mlk_time += end - start;
625
- }
626
-
627
- fprintf(stderr,
628
- "* 100X Freeing %lu Fragmented (7 spaces) blocks %d each: %lu CPU "
629
- "cycles.\n",
630
- memtest_repeats / 7, MEMTEST_SLICE, fr_time);
631
-
632
- fprintf(stderr, "* 100X Allocating %lu Fragmented (7 spaces) blocks %d "
633
- "bytes each: %lu CPU "
634
- "cycles.\n",
635
- memtest_repeats / 7, MEMTEST_SLICE, mlk_time);
636
-
637
- start = clock();
638
- for (size_t i = 0; i < memtest_repeats; i++) {
639
- memset(pntrs[i], 170, MEMTEST_SLICE);
640
- }
641
- end = clock();
642
- zr_time = end - start;
643
- fprintf(stderr, "* Set bits (0b10) for %lu consecutive blocks %dB "
644
- "each: %lu CPU cycles.\n",
645
- memtest_repeats, MEMTEST_SLICE, zr_time);
646
-
647
- start = clock();
648
- for (size_t i = 0; i < memtest_repeats; i++) {
649
- fr(pntrs[i]);
650
- }
651
- end = clock();
652
- fr_time = end - start;
653
- fprintf(stderr, "* Freeing %lu consecutive blocks %d each: %lu CPU cycles.\n",
654
- memtest_repeats, MEMTEST_SLICE, fr_time);
655
-
656
- start = clock();
657
- for (size_t i = 0; i < memtest_repeats; i++) {
658
- pntrs[i] = mlk(MEMTEST_SLICE);
659
- }
660
- end = clock();
661
- start = clock();
662
- for (size_t i = 0; i < memtest_repeats; i += 2) {
663
- fr(pntrs[i]);
664
- }
665
- end = clock();
666
- mlk_time = end - start;
667
- fprintf(stderr,
668
- "* Freeing every other block %dB X %lu blocks: %lu CPU cycles.\n",
669
- MEMTEST_SLICE, memtest_repeats >> 1, mlk_time);
670
-
671
- start = clock();
672
- for (size_t i = 1; i < memtest_repeats; i += 2) {
673
- pntrs[i] = ralc(pntrs[i], MEMTEST_SLICE << 1);
674
- if (pntrs[i] == NULL)
675
- fprintf(stderr, "REALLOC RETURNED NULL - Memory leaked during test\n");
676
- }
677
- end = clock();
678
- mlk_time = end - start;
679
- fprintf(
680
- stderr,
681
- "* Reallocating every other block %dB X %lu blocks: %lu CPU cycles.\n",
682
- MEMTEST_SLICE, memtest_repeats >> 1, mlk_time);
683
-
684
- start = clock();
685
- for (size_t i = 1; i < memtest_repeats; i += 2) {
686
- fr(pntrs[i]);
687
- }
688
- end = clock();
689
- mlk_time = end - start;
690
- fprintf(stderr,
691
- "* Freeing every other block %dB X %lu blocks: %lu CPU cycles.\n",
692
- MEMTEST_SLICE, memtest_repeats >> 1, mlk_time);
693
-
694
- start = clock();
695
- for (size_t i = 0; i < memtest_repeats; i++) {
696
- pntrs[i] = mlk(MEMTEST_SLICE);
697
- }
698
- end = clock();
699
- mlk_time = end - start;
700
- fprintf(stderr,
701
- "* Allocating %lu consecutive blocks %d each: %lu CPU cycles.\n",
702
- memtest_repeats, MEMTEST_SLICE, mlk_time);
703
- start = clock();
704
- for (size_t i = 0; i < memtest_repeats; i++) {
705
- fr(pntrs[i]);
706
- }
707
- end = clock();
708
- fr_time = end - start;
709
- fprintf(stderr, "* Freeing %lu consecutive blocks %d each: %lu CPU cycles.\n",
710
- memtest_repeats, MEMTEST_SLICE, fr_time);
711
- fprintf(stderr, "* Freeing pointer array %p.\n", (void *)pntrs);
712
- fr(pntrs);
713
-
714
- clock_gettime(CLOCK_MONOTONIC, &end_test);
715
- uint64_t msec_for_test =
716
- (end_test.tv_nsec < start_test.tv_nsec)
717
- ? ((end_test.tv_sec -= 1), (start_test.tv_nsec - end_test.tv_nsec))
718
- : (end_test.tv_nsec - start_test.tv_nsec);
719
- uint64_t sec_for_test = end_test.tv_sec - start_test.tv_sec;
720
-
721
- fprintf(stderr,
722
- "Finished test in %" PRIu64 "m, %" PRIu64 "s %" PRIu64 " mili.sec.\n",
723
- sec_for_test / 60, sec_for_test - (((sec_for_test) / 60) * 60),
724
- msec_for_test / 1000000);
725
- }
726
-
727
- static UNUSED_FUNC void mempool_test(void) {
728
- fprintf(stderr, "*****************************\n");
729
- fprintf(stderr, "mempool implementation details:\n");
730
- mempool_stats();
731
- fprintf(stderr, "*****************************\n");
732
- fprintf(stderr, "System memory test for ~2Mb\n");
733
- mempool_speedtest((2 << 20) / MEMTEST_SLICE, malloc, free, realloc);
734
- fprintf(stderr, "*****************************\n");
735
- fprintf(stderr, " mempool memory test for ~2Mb\n");
736
- mempool_speedtest((2 << 20) / MEMTEST_SLICE, mempool_malloc, mempool_free,
737
- mempool_realloc);
738
- fprintf(stderr, "*****************************\n");
739
- fprintf(stderr, "System memory test for ~4Mb\n");
740
- mempool_speedtest((2 << 21) / MEMTEST_SLICE, malloc, free, realloc);
741
- fprintf(stderr, "*****************************\n");
742
- fprintf(stderr, " mempool memory test for ~4Mb\n");
743
- mempool_speedtest((2 << 21) / MEMTEST_SLICE, mempool_malloc, mempool_free,
744
- mempool_realloc);
745
- fprintf(stderr, "*****************************\n");
746
- fprintf(stderr, "System memory test for ~8Mb\n");
747
- mempool_speedtest((2 << 22) / MEMTEST_SLICE, malloc, free, realloc);
748
- fprintf(stderr, "*****************************\n");
749
- fprintf(stderr, " mempool memory test for ~8Mb\n");
750
- mempool_speedtest((2 << 22) / MEMTEST_SLICE, mempool_malloc, mempool_free,
751
- mempool_realloc);
752
- fprintf(stderr, "*****************************\n");
753
- fprintf(stderr, "System memory test for ~16Mb\n");
754
- mempool_speedtest((2 << 23) / MEMTEST_SLICE, malloc, free, realloc);
755
- fprintf(stderr, "*****************************\n");
756
- fprintf(stderr, " mempool memory test for ~16Mb\n");
757
- mempool_speedtest((2 << 23) / MEMTEST_SLICE, mempool_malloc, mempool_free,
758
- mempool_realloc);
759
- fprintf(stderr, "*****************************\n");
760
-
761
- fprintf(stderr, "*****************************\n");
762
- fprintf(stderr, "Stressing the system\n");
763
- fprintf(stderr, "*****************************\n");
764
- size_t repeat = 1024 * 1024 * 16;
765
- size_t unit = 16;
766
- struct timespec start_test, end_test;
767
- clock_t start, end;
768
- fprintf(stderr, "Stress allocation/deallocation using "
769
- "1:5 fragmentation of ~134Mb:\n");
770
- while (repeat >= 1024) {
771
- fprintf(stderr, " * %lu X %lu bytes", repeat, unit);
772
- clock_gettime(CLOCK_MONOTONIC, &start_test);
773
- start = clock();
774
- void **ptrs = mempool_malloc(repeat * sizeof(void *));
775
- for (size_t i = 0; i < repeat; i++) {
776
- ptrs[i] = mempool_malloc(unit);
777
- }
778
- for (size_t i = 0; i < repeat; i += 5) {
779
- mempool_free(ptrs[i]);
780
- }
781
- for (size_t i = 1; i < repeat; i += 5) {
782
- mempool_free(ptrs[i]);
783
- }
784
- for (size_t i = 2; i < repeat; i += 5) {
785
- mempool_free(ptrs[i]);
786
- }
787
- for (size_t i = 3; i < repeat; i += 5) {
788
- mempool_free(ptrs[i]);
789
- }
790
- for (size_t i = 4; i < repeat; i += 5) {
791
- mempool_free(ptrs[i]);
792
- }
793
- for (size_t i = 0; i < repeat; i++) {
794
- ptrs[i] = mempool_malloc(unit);
795
- }
796
- for (size_t i = 0; i < repeat; i++) {
797
- mempool_free(ptrs[i]);
798
- }
799
- mempool_free(ptrs);
800
- end = clock();
801
- clock_gettime(CLOCK_MONOTONIC, &end_test);
802
- uint64_t msec_for_test =
803
- (end_test.tv_nsec < start_test.tv_nsec)
804
- ? ((end_test.tv_sec -= 1), (start_test.tv_nsec - end_test.tv_nsec))
805
- : (end_test.tv_nsec - start_test.tv_nsec);
806
- uint64_t sec_for_test = end_test.tv_sec - start_test.tv_sec;
807
-
808
- fprintf(stderr,
809
- " %" PRIu64 "m, %" PRIu64 "s %" PRIu64 " mili.sec. ( %lu CPU)\n",
810
- sec_for_test / 60, sec_for_test - (((sec_for_test) / 60) * 60),
811
- msec_for_test / 1000000, end - start);
812
-
813
- unit <<= 1;
814
- repeat >>= 1;
815
- }
816
- }
817
-
818
- #undef MEMTEST_SLICE
819
-
820
- #endif
821
-
822
- /* *****************************************************************************
823
- Cleanup
824
- */
825
- #undef MEMPOOL_BLOCK_SIZE
826
- #undef MEMPOOL_ALLOC_SPECIAL
827
- #undef MEMPOOL_DEALLOC_SPECIAL
828
- #undef MEMPOOL_SIZE_MASK
829
- #undef MEMPOOL_USED_MARKER
830
- #undef MEMPOOL_INDI_MARKER
831
- #undef MEMPOOL_SLICE2PTR
832
- #undef MEMPOOL_PTR2SLICE
833
- #undef MEMPOOL_LOCK
834
- #undef MEMPOOL_UNLOCK
835
- #undef MEMPOOL_ORDERING_LIMIT
836
- #endif