iodine 0.3.6 → 0.4.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of iodine might be problematic. Click here for more details.

Files changed (74) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +46 -0
  3. data/LIMITS.md +25 -0
  4. data/README.md +39 -80
  5. data/SPEC-Websocket-Draft.md +129 -4
  6. data/bin/echo +2 -2
  7. data/bin/http-hello +1 -0
  8. data/bin/updated api +113 -0
  9. data/bin/ws-echo +0 -1
  10. data/examples/broadcast.ru +56 -0
  11. data/examples/echo.ru +57 -0
  12. data/examples/hello.ru +30 -0
  13. data/examples/redis.ru +69 -0
  14. data/examples/shootout.ru +53 -0
  15. data/exe/iodine +2 -80
  16. data/ext/iodine/defer.c +11 -5
  17. data/ext/iodine/empty.h +26 -0
  18. data/ext/iodine/evio.h +1 -1
  19. data/ext/iodine/facil.c +103 -61
  20. data/ext/iodine/facil.h +20 -12
  21. data/ext/iodine/fio_dict.c +446 -0
  22. data/ext/iodine/fio_dict.h +90 -0
  23. data/ext/iodine/fio_hash_table.h +370 -0
  24. data/ext/iodine/fio_list.h +30 -3
  25. data/ext/iodine/http.c +169 -37
  26. data/ext/iodine/http.h +33 -10
  27. data/ext/iodine/http1.c +78 -42
  28. data/ext/iodine/http_request.c +6 -0
  29. data/ext/iodine/http_request.h +3 -0
  30. data/ext/iodine/http_response.c +43 -11
  31. data/ext/iodine/iodine.c +380 -0
  32. data/ext/iodine/iodine.h +62 -0
  33. data/ext/iodine/iodine_helpers.c +235 -0
  34. data/ext/iodine/iodine_helpers.h +13 -0
  35. data/ext/iodine/iodine_http.c +409 -241
  36. data/ext/iodine/iodine_http.h +7 -14
  37. data/ext/iodine/iodine_protocol.c +626 -0
  38. data/ext/iodine/iodine_protocol.h +13 -0
  39. data/ext/iodine/iodine_pubsub.c +646 -0
  40. data/ext/iodine/iodine_pubsub.h +27 -0
  41. data/ext/iodine/iodine_websockets.c +796 -0
  42. data/ext/iodine/iodine_websockets.h +19 -0
  43. data/ext/iodine/pubsub.c +544 -0
  44. data/ext/iodine/pubsub.h +215 -0
  45. data/ext/iodine/random.c +4 -4
  46. data/ext/iodine/rb-call.c +1 -5
  47. data/ext/iodine/rb-defer.c +3 -20
  48. data/ext/iodine/rb-rack-io.c +22 -22
  49. data/ext/iodine/rb-rack-io.h +3 -4
  50. data/ext/iodine/rb-registry.c +111 -118
  51. data/ext/iodine/redis_connection.c +277 -0
  52. data/ext/iodine/redis_connection.h +77 -0
  53. data/ext/iodine/redis_engine.c +398 -0
  54. data/ext/iodine/redis_engine.h +68 -0
  55. data/ext/iodine/resp.c +842 -0
  56. data/ext/iodine/resp.h +253 -0
  57. data/ext/iodine/sock.c +26 -12
  58. data/ext/iodine/sock.h +14 -3
  59. data/ext/iodine/spnlock.inc +19 -2
  60. data/ext/iodine/websockets.c +299 -11
  61. data/ext/iodine/websockets.h +159 -6
  62. data/lib/iodine.rb +104 -1
  63. data/lib/iodine/cli.rb +106 -0
  64. data/lib/iodine/monkeypatch.rb +40 -0
  65. data/lib/iodine/pubsub.rb +70 -0
  66. data/lib/iodine/version.rb +1 -1
  67. data/lib/iodine/websocket.rb +12 -0
  68. data/lib/rack/handler/iodine.rb +33 -7
  69. metadata +35 -7
  70. data/ext/iodine/iodine_core.c +0 -760
  71. data/ext/iodine/iodine_core.h +0 -79
  72. data/ext/iodine/iodine_websocket.c +0 -551
  73. data/ext/iodine/iodine_websocket.h +0 -22
  74. data/lib/iodine/http.rb +0 -4
@@ -0,0 +1,215 @@
1
+ /*
2
+ Copyright: Boaz segev, 2016-2017
3
+ License: MIT
4
+
5
+ Feel free to copy, use and enjoy according to the license provided.
6
+ */
7
+ #ifndef H_FACIL_PUBSUB_H
8
+ /**
9
+ This pub/sub API is designed to unload pub/sub stress from external messanging
10
+ systems onto the local process.
11
+
12
+ For example, the NULL pub/sub engine, which is routed to the facil_cluster
13
+ engine, will only publish a single message per process instead of a message per
14
+ client, allowing the cluster communication channel to be less crowded when
15
+ possible.
16
+
17
+ This should allow pub/sub engines, such as Redis, to spread their workload
18
+ between all of an application's processes, enhancing overall performance.
19
+ */
20
+ #define H_FACIL_PUBSUB_H
21
+ #include "facil.h"
22
+
23
+ #ifndef FIO_PUBBSUB_MAX_CHANNEL_LEN
24
+ #define FIO_PUBBSUB_MAX_CHANNEL_LEN 1024
25
+ #endif
26
+
27
+ /** An opaque pointer used to identify a subscription. */
28
+ typedef struct pubsub_sub_s *pubsub_sub_pt;
29
+
30
+ /** A pub/sub engine data structure. See details later on. */
31
+ typedef struct pubsub_engine_s pubsub_engine_s;
32
+
33
+ /** The information a "client" (callback) receives. */
34
+ typedef struct pubsub_message_s {
35
+ /** The pub/sub engine farwarding this message. */
36
+ pubsub_engine_s const *engine;
37
+ /** The pub/sub target channnel. */
38
+ struct {
39
+ char *name;
40
+ uint32_t len;
41
+ } channel;
42
+ /** The pub/sub message. */
43
+ struct {
44
+ char *data;
45
+ uint32_t len;
46
+ } msg;
47
+ /** indicates that pattern matching was used. */
48
+ unsigned use_pattern : 1;
49
+ /** The subscription that prompted the message to be routed to the client. */
50
+ pubsub_sub_pt subscription;
51
+ /** Client opaque data pointer (from the `subscribe`) function call. */
52
+ void *udata1;
53
+ /** Client opaque data pointer (from the `subscribe`) function call. */
54
+ void *udata2;
55
+ } pubsub_message_s;
56
+
57
+ /** The arguments used for `pubsub_subscribe` or `pubsub_find_sub`. */
58
+ struct pubsub_subscribe_args {
59
+ /** The pub/sub engine to use. NULL defaults to the local cluster engine. */
60
+ pubsub_engine_s const *engine;
61
+ /** The channel to subscribe to. */
62
+ struct {
63
+ char *name;
64
+ uint32_t len;
65
+ } channel;
66
+ /** The on message callback. the `*msg` pointer is to a temporary object. */
67
+ void (*on_message)(pubsub_message_s *msg);
68
+ /** An optional callback for when a subscription is fully canceled. */
69
+ void (*on_unsubscribe)(void *udata1, void *udata2);
70
+ /** Opaque user data#1 */
71
+ void *udata1;
72
+ /** Opaque user data#2 .. using two allows allocation to be avoided. */
73
+ void *udata2;
74
+ /** Use pattern matching for channel subscription. */
75
+ unsigned use_pattern : 1;
76
+ };
77
+
78
+ /** The arguments used for `pubsub_publish`. */
79
+ struct pubsub_publish_args {
80
+ /** The pub/sub engine to use. NULL defaults to the local cluster engine. */
81
+ pubsub_engine_s const *engine;
82
+ /** The channel to publish to. */
83
+ struct {
84
+ char *name;
85
+ uint32_t len;
86
+ } channel;
87
+ /** The data being pushed. */
88
+ struct {
89
+ char *data;
90
+ uint32_t len;
91
+ } msg;
92
+ /** Use pattern matching for channel publication. */
93
+ unsigned use_pattern : 1;
94
+ /**
95
+ * Push the message to the whole cluster, using the cluster engine.
96
+ * Always TRUE unless an engine was specified.
97
+ */
98
+ unsigned push2cluster : 1;
99
+ };
100
+
101
+ /**
102
+ * Subscribes to a specific channel.
103
+ *
104
+ * Returns a subscription pointer or NULL (failure).
105
+ */ pubsub_sub_pt pubsub_subscribe(struct pubsub_subscribe_args);
106
+ #define pubsub_subscribe(...) \
107
+ pubsub_subscribe((struct pubsub_subscribe_args){__VA_ARGS__})
108
+
109
+ /**
110
+ * This helper searches for an existing subscription.
111
+ *
112
+ * Use with care, NEVER call `pubsub_unsubscribe` more times than you have
113
+ * called `pubsub_subscribe`, since the subscription handle memory is realesed
114
+ * onnce the reference count reaches 0.
115
+ *
116
+ * Returns a subscription pointer or NULL (none found).
117
+ */
118
+ pubsub_sub_pt pubsub_find_sub(struct pubsub_subscribe_args);
119
+ #define pubsub_find_sub(...) \
120
+ pubsub_find_sub((struct pubsub_subscribe_args){__VA_ARGS__})
121
+
122
+ /**
123
+ * Unsubscribes from a specific channel.
124
+ *
125
+ * Returns 0 on success and -1 on failure.
126
+ */
127
+ void pubsub_unsubscribe(pubsub_sub_pt subscription);
128
+
129
+ /**
130
+ * Publishes a message to a channel belonging to a pub/sub service (engine).
131
+ *
132
+ * Returns 0 on success and -1 on failure.
133
+ */
134
+ int pubsub_publish(struct pubsub_publish_args);
135
+ #define pubsub_publish(...) \
136
+ pubsub_publish((struct pubsub_publish_args){__VA_ARGS__})
137
+
138
+ /**
139
+ * defers message hadling if it can't be performed (i.e., resource is busy) or
140
+ * should be fragmented (allowing large tasks to be broken down).
141
+ *
142
+ * This should only be called from within the `on_message` callback.
143
+ *
144
+ * It's recommended that the `on_message` callback return immediately followin
145
+ * this function call, as code might run concurrently.
146
+ *
147
+ * Uses reference counting for zero copy.
148
+ *
149
+ * It's impossible to use a different `on_message` callbck without resorting to
150
+ * memory allocations... so when in need, manage routing withing the
151
+ * `on_message` callback.
152
+ */
153
+ void pubsub_defer(pubsub_message_s *msg);
154
+
155
+ /**
156
+ * Pub/Sub services (engines) MUST provide the listed function pointers.
157
+ *
158
+ * When an engine received a message to publish, they should call the
159
+ * `pubsub_eng_distribute` function. i.e.:
160
+ *
161
+ * pubsub_engine_distribute(
162
+ * .engine = self,
163
+ * .channel.name = "channel 1",
164
+ * .channel.len = 9,
165
+ * .msg.data = "hello",
166
+ * .msg.len = 5,
167
+ * .push2cluster = self->push2cluster,
168
+ * .use_pattern = 0 );
169
+ *
170
+ * Engines MUST survive until the pub/sub service is finished using them and
171
+ * there are no more subscriptions.
172
+ */
173
+ struct pubsub_engine_s {
174
+ /** Should return 0 on success and -1 on failure. */
175
+ int (*subscribe)(const pubsub_engine_s *eng, const char *ch, size_t ch_len,
176
+ uint8_t use_pattern);
177
+ /** Return value is ignored. */
178
+ void (*unsubscribe)(const pubsub_engine_s *eng, const char *ch, size_t ch_len,
179
+ uint8_t use_pattern);
180
+ /** Should return 0 on success and -1 on failure. */
181
+ int (*publish)(const pubsub_engine_s *eng, const char *ch, size_t ch_len,
182
+ const char *msg, size_t msg_len, uint8_t use_pattern);
183
+ /** Set to TRUE (1) if published messages should propegate to the cluster. */
184
+ unsigned push2cluster : 1;
185
+ };
186
+
187
+ /** The default pub/sub engine. */
188
+ extern const pubsub_engine_s *PUBSUB_CLUSTER_ENGINE;
189
+
190
+ /** An engine that performs pub/sub only within a single process. */
191
+ extern const pubsub_engine_s *PUBSUB_PROCESS_ENGINE;
192
+
193
+ /** Allows process wide changes to the default Pub/Sub Engine. */
194
+ extern pubsub_engine_s *PUBSUB_DEFAULT_ENGINE;
195
+
196
+ /**
197
+ * The function used by engines to distribute received messages.
198
+ * The `udata*` and `subscription` fields are ignored.
199
+ */
200
+ void pubsub_engine_distribute(pubsub_message_s msg);
201
+ #define pubsub_engine_distribute(...) \
202
+ pubsub_engine_distribute((pubsub_message_s){__VA_ARGS__})
203
+
204
+ /**
205
+ * Engines can ask facil.io to resubscribe to all active channels.
206
+ *
207
+ * This allows engines that lost their connection to their Pub/Sub service to
208
+ * resubscribe all the currently active channels with the new connection.
209
+ *
210
+ * CAUTION: This is an evented task... try not to free the engine's memory while
211
+ * resubscriptions are under way...
212
+ */
213
+ void pubsub_engine_resubscribe(pubsub_engine_s *eng);
214
+
215
+ #endif /* H_FACIL_PUBSUB_H */
@@ -173,10 +173,10 @@ static void init_rand_fd(void) {
173
173
  return ret; \
174
174
  }
175
175
  /* rand functions */
176
- MAKE_RAND_FUNC(uint32_t, bscrypt_rand32);
177
- MAKE_RAND_FUNC(uint64_t, bscrypt_rand64);
178
- MAKE_RAND_FUNC(bits128_u, bscrypt_rand128);
179
- MAKE_RAND_FUNC(bits256_u, bscrypt_rand256);
176
+ MAKE_RAND_FUNC(uint32_t, bscrypt_rand32)
177
+ MAKE_RAND_FUNC(uint64_t, bscrypt_rand64)
178
+ MAKE_RAND_FUNC(bits128_u, bscrypt_rand128)
179
+ MAKE_RAND_FUNC(bits256_u, bscrypt_rand256)
180
180
  /* clear template */
181
181
  #undef MAKE_RAND_FUNC
182
182
 
@@ -111,9 +111,5 @@ static VALUE call_arg(VALUE obj, ID method, int argc, VALUE *argv) {
111
111
  ////////////////////////////////////////////////////////////////////////////
112
112
  // the API interface
113
113
  struct _Ruby_Method_Caller_Class_ RubyCaller = {
114
- .call = call,
115
- .call2 = call_arg,
116
- .call_c = call_c,
117
- // .leave_gvl = leave_gvl,
118
- .in_gvl = check_in_gvl,
114
+ .call = call, .call2 = call_arg, .call_c = call_c, .in_gvl = check_in_gvl,
119
115
  };
@@ -58,7 +58,7 @@ void *defer_new_thread(void *(*thread_func)(void *), pool_pt pool) {
58
58
  return NULL;
59
59
  *data = (struct CreateThreadArgs){.thread_func = thread_func, .arg = pool};
60
60
  void *thr = rb_thread_call_with_gvl(create_ruby_thread_gvl, data);
61
- if (thr == (void *)Qnil)
61
+ if (!thr || thr == (void *)Qnil || thr == (void *)Qfalse)
62
62
  thr = NULL;
63
63
  return thr;
64
64
  }
@@ -67,26 +67,9 @@ void *defer_new_thread(void *(*thread_func)(void *), pool_pt pool) {
67
67
  OVERRIDE THIS to replace the default pthread implementation.
68
68
  */
69
69
  int defer_join_thread(void *thr) {
70
+ if (!thr || (VALUE)thr == Qfalse || (VALUE)thr == Qnil)
71
+ return -1;
70
72
  rb_thread_call_with_gvl(_inner_join_with_rbthread, (void *)thr);
71
73
  Registry.remove((VALUE)thr);
72
74
  return 0;
73
75
  }
74
-
75
- /******************************************************************************
76
- Portability - used to help port this to different frameworks (i.e. Ruby).
77
- */
78
-
79
- #define THREAD_TYPE VALUE
80
-
81
- /* Don't use sentinals with Ruby */
82
- #ifndef ASYNC_USE_SENTINEL
83
- #define ASYNC_USE_SENTINEL 0
84
- #endif
85
-
86
- /* The unused directive */
87
- #ifndef UNUSED_FUNC
88
- #define UNUSED_FUNC __attribute__((unused))
89
- #endif
90
-
91
- /* used here but declared elsewhere */
92
- void async_signal();
@@ -4,20 +4,20 @@ License: MIT
4
4
 
5
5
  Feel free to copy, use and enjoy according to the license provided.
6
6
  */
7
- // clang-format off
8
7
  #include "rb-rack-io.h"
9
- #include "iodine_core.h"
10
- #include <ruby/io.h>
8
+
9
+ #include "iodine.h"
10
+
11
11
  #include <ruby/encoding.h>
12
+ #include <ruby/io.h>
12
13
  #include <unistd.h>
13
- // clang-format on
14
14
 
15
15
  #ifndef _GNU_SOURCE
16
16
  #define _GNU_SOURCE
17
17
  #endif
18
18
  #include "rb-call.h"
19
19
 
20
- /* RackIO manages a minimal interface to act as an IO wrapper according to
20
+ /* IodineRackIO manages a minimal interface to act as an IO wrapper according to
21
21
  these Rack specifications:
22
22
 
23
23
  The input stream is an IO-like object which contains the raw HTTP POST data.
@@ -63,10 +63,10 @@ static VALUE TCPSOCKET_CLASS;
63
63
  static ID for_fd_id;
64
64
 
65
65
  #define set_uuid(object, request) \
66
- rb_ivar_set((object), fd_var_id, ULONG2NUM((request)->fd))
66
+ rb_ivar_set((object), iodine_fd_var_id, ULONG2NUM((request)->fd))
67
67
 
68
68
  inline static intptr_t get_uuid(VALUE obj) {
69
- VALUE i = rb_ivar_get(obj, fd_var_id);
69
+ VALUE i = rb_ivar_get(obj, iodine_fd_var_id);
70
70
  return (intptr_t)FIX2ULONG(i);
71
71
  }
72
72
 
@@ -109,7 +109,7 @@ static VALUE strio_gets(VALUE self) {
109
109
  while ((pos_e < end) && str[pos_e] != '\n')
110
110
  pos_e++;
111
111
  set_pos(self, pos_e + 1);
112
- return rb_enc_str_new(str + pos, pos_e - pos, BinaryEncoding);
112
+ return rb_enc_str_new(str + pos, pos_e - pos, IodineBinaryEncoding);
113
113
  }
114
114
 
115
115
  // Reads data from the IO, according to the Rack specifications for `#read`.
@@ -154,10 +154,10 @@ static VALUE strio_read(int argc, VALUE *argv, VALUE self) {
154
154
  if (buffer == Qnil) {
155
155
  buffer = rb_str_buf_new(len);
156
156
  // make sure the buffer is binary encoded.
157
- rb_enc_associate(buffer, BinaryEncoding);
157
+ rb_enc_associate(buffer, IodineBinaryEncoding);
158
158
  } else {
159
159
  // make sure the buffer is binary encoded.
160
- rb_enc_associate(buffer, BinaryEncoding);
160
+ rb_enc_associate(buffer, IodineBinaryEncoding);
161
161
  if (rb_str_capacity(buffer) < (size_t)len)
162
162
  rb_str_resize(buffer, len);
163
163
  }
@@ -229,7 +229,7 @@ static VALUE tfio_gets(VALUE self) {
229
229
  if (pos > pos_e) {
230
230
  buffer = rb_str_buf_new(pos_e - pos);
231
231
  // make sure the buffer is binary encoded.
232
- rb_enc_associate(buffer, BinaryEncoding);
232
+ rb_enc_associate(buffer, IodineBinaryEncoding);
233
233
  if (pread(fd, RSTRING_PTR(buffer), pos_e - pos, pos) < 0)
234
234
  return Qnil;
235
235
  rb_str_set_len(buffer, pos_e - pos);
@@ -281,10 +281,10 @@ static VALUE tfio_read(int argc, VALUE *argv, VALUE self) {
281
281
  if (buffer == Qnil) {
282
282
  buffer = rb_str_buf_new(len);
283
283
  // make sure the buffer is binary encoded.
284
- rb_enc_associate(buffer, BinaryEncoding);
284
+ rb_enc_associate(buffer, IodineBinaryEncoding);
285
285
  } else {
286
286
  // make sure the buffer is binary encoded.
287
- rb_enc_associate(buffer, BinaryEncoding);
287
+ rb_enc_associate(buffer, IodineBinaryEncoding);
288
288
  if (rb_str_capacity(buffer) < (size_t)len)
289
289
  rb_str_resize(buffer, len);
290
290
  }
@@ -322,9 +322,9 @@ Hijacking
322
322
  */
323
323
 
324
324
  // defined by iodine_http
325
- extern VALUE R_HIJACK; // for Rack: rack.hijack
326
- extern VALUE R_HIJACK_CB; // for Rack: rack.hijack
327
- extern VALUE R_HIJACK_IO; // for Rack: rack.hijack_io
325
+ extern VALUE IODINE_R_HIJACK; // for Rack: rack.hijack
326
+ extern VALUE IODINE_R_HIJACK_CB; // for Rack: rack.hijack
327
+ extern VALUE IODINE_R_HIJACK_IO; // for Rack: rack.hijack_io
328
328
 
329
329
  static VALUE rio_get_io(int argc, VALUE *argv, VALUE self) {
330
330
  if (TCPSOCKET_CLASS == Qnil)
@@ -334,15 +334,15 @@ static VALUE rio_get_io(int argc, VALUE *argv, VALUE self) {
334
334
  VALUE fd = INT2FIX(sock_uuid2fd(fduuid));
335
335
  VALUE env = rb_ivar_get(self, env_id);
336
336
  // make sure we're not repeating ourselves
337
- VALUE new_io = rb_hash_aref(env, R_HIJACK_IO);
337
+ VALUE new_io = rb_hash_aref(env, IODINE_R_HIJACK_IO);
338
338
  if (new_io != Qnil)
339
339
  return new_io;
340
340
  // VALUE new_io = how the fuck do we create a new IO from the fd?
341
341
  new_io = RubyCaller.call2(TCPSOCKET_CLASS, for_fd_id, 1,
342
342
  &fd); // TCPSocket.for_fd(fd) ... cool...
343
- rb_hash_aset(env, R_HIJACK_IO, new_io);
343
+ rb_hash_aset(env, IODINE_R_HIJACK_IO, new_io);
344
344
  if (argc)
345
- rb_hash_aset(env, R_HIJACK_CB, *argv);
345
+ rb_hash_aset(env, IODINE_R_HIJACK_CB, *argv);
346
346
  return new_io;
347
347
  }
348
348
 
@@ -354,11 +354,11 @@ C land API
354
354
  static VALUE new_rack_io(http_request_s *request, VALUE env) {
355
355
  VALUE rack_io;
356
356
  if (request->body_file > 0) {
357
- rack_io = rb_funcall2(rRackFileIO, new_func_id, 0, NULL);
357
+ rack_io = rb_funcall2(rRackFileIO, iodine_new_func_id, 0, NULL);
358
358
  rb_ivar_set(rack_io, io_id, ULONG2NUM(request->body_file));
359
359
  lseek(request->body_file, 0, SEEK_SET);
360
360
  } else {
361
- rack_io = rb_funcall2(rRackStrIO, new_func_id, 0, NULL);
361
+ rack_io = rb_funcall2(rRackStrIO, iodine_new_func_id, 0, NULL);
362
362
  rb_ivar_set(rack_io, io_id, ULONG2NUM(((intptr_t)request->body_str)));
363
363
  // fprintf(stderr, "rack body IO (%lu, %p):%.*s\n", request->content_length,
364
364
  // request->body_str, (int)request->content_length,
@@ -402,6 +402,6 @@ static void init_rack_io(void) {
402
402
 
403
403
  ////////////////////////////////////////////////////////////////////////////
404
404
  // the API interface
405
- struct _RackIO_ RackIO = {
405
+ struct IodineRackIO IodineRackIO = {
406
406
  .create = new_rack_io, .init = init_rack_io,
407
407
  };
@@ -6,15 +6,14 @@ Feel free to copy, use and enjoy according to the license provided.
6
6
  */
7
7
  #ifndef RUBY_RACK_IO_H
8
8
  #define RUBY_RACK_IO_H
9
- // clang-format off
9
+
10
10
  #include <ruby.h>
11
- // clang-format on
12
11
 
13
12
  #include "http_request.h"
14
13
 
15
- extern struct _RackIO_ {
14
+ extern struct IodineRackIO {
16
15
  VALUE (*create)(http_request_s *request, VALUE env);
17
16
  void (*init)(void);
18
- } RackIO;
17
+ } IodineRackIO;
19
18
 
20
19
  #endif /* RUBY_RACK_IO_H */
@@ -9,164 +9,156 @@ Feel free to copy, use and enjoy according to the license provided.
9
9
 
10
10
  #include "spnlock.inc"
11
11
 
12
- // #define RUBY_REG_DBG
12
+ #include "fio_hash_table.h"
13
+ #include <signal.h>
13
14
 
15
+ // #define RUBY_REG_DBG
16
+ #ifndef REGISTRY_POOL_SIZE
14
17
  #define REGISTRY_POOL_SIZE 1024
15
- // the references struct (bin-tree)
16
- struct Object {
17
- struct Object *next;
18
- VALUE obj;
19
- int count;
20
- };
18
+ #endif
19
+
20
+ #ifndef RUBY_REG_DBG
21
+ #define RUBY_REG_DBG 0
22
+ #endif
21
23
 
22
- // the registry global
23
- static struct Registry {
24
- struct Object pool_mem[REGISTRY_POOL_SIZE];
25
- struct Object *obj_pool;
26
- struct Object *first;
24
+ typedef struct {
25
+ union {
26
+ fio_list_s pool;
27
+ fio_ht_node_s node;
28
+ };
29
+ VALUE obj;
30
+ volatile uint64_t ref;
31
+ } obj_s;
32
+
33
+ // the registry state keeper
34
+ static struct {
35
+ obj_s pool_mem[REGISTRY_POOL_SIZE];
36
+ fio_list_s pool;
37
+ fio_ht_s store;
27
38
  VALUE owner;
28
39
  spn_lock_i lock;
29
- } registry = {
30
- .obj_pool = NULL, .first = NULL, .owner = 0, .lock = SPN_LOCK_INIT};
40
+ } registry = {.pool = FIO_LIST_INIT_STATIC(registry.pool),
41
+ .store = FIO_HASH_TABLE_STATIC(registry.store),
42
+ .owner = 0,
43
+ .lock = SPN_LOCK_INIT};
31
44
 
32
45
  #define try_lock_registry() spn_trylock(&registry.lock)
33
46
  #define unlock_registry() spn_unlock(&registry.lock)
34
47
  #define lock_registry() spn_lock(&registry.lock)
35
48
 
36
- inline static void free_node(struct Object *to_free) {
49
+ inline static void free_node(obj_s *to_free) {
37
50
  if (to_free >= registry.pool_mem &&
38
- (intptr_t)to_free <= (intptr_t)(&registry.obj_pool)) {
39
- to_free->next = registry.obj_pool;
40
- registry.obj_pool = to_free;
41
- } else {
51
+ (intptr_t)to_free <= (intptr_t)(&registry.pool))
52
+ fio_list_push(obj_s, pool, registry.pool, to_free);
53
+ else
42
54
  free(to_free);
43
- }
44
55
  }
45
56
 
46
- // add an object to the registry
47
- //
48
- // allow multiple registrartions (bag)
49
- static VALUE register_object(VALUE obj) {
50
- if (!obj || obj == Qnil)
57
+ /** adds an object to the registry or increases it's reference count. */
58
+ static VALUE register_object(VALUE ruby_obj) {
59
+ if (!ruby_obj || ruby_obj == Qnil || ruby_obj == Qfalse)
51
60
  return 0;
52
- struct Object *line = registry.first;
53
61
  lock_registry();
54
- while (line) {
55
- if (line->obj == obj) {
56
- line->count++;
57
- goto finish;
58
- }
59
- line = line->next;
60
- }
61
- if (registry.obj_pool) {
62
- line = registry.obj_pool;
63
- registry.obj_pool = registry.obj_pool->next;
64
- } else {
65
- line = malloc(sizeof(struct Object));
62
+ obj_s *obj = (void *)fio_ht_find(&registry.store, (uint64_t)ruby_obj);
63
+ if (obj) {
64
+ obj = fio_node2obj(obj_s, node, obj);
65
+ #if RUBY_REG_DBG == 1
66
+ fprintf(stderr, "Ruby Registry: register %p ref: %" PRIu64 " + 1\n",
67
+ (void *)ruby_obj, obj->ref);
68
+ #endif
69
+ goto exists;
66
70
  }
67
- if (line == NULL) {
71
+ #if RUBY_REG_DBG == 1
72
+ fprintf(stderr, "Ruby Registry: register %p\n", (void *)ruby_obj);
73
+ #endif
74
+ obj = fio_list_pop(obj_s, pool, registry.pool);
75
+ if (!obj)
76
+ obj = malloc(sizeof(obj_s));
77
+ if (!obj) {
68
78
  perror("No Memory!");
79
+ kill(0, SIGINT);
69
80
  exit(1);
70
81
  }
71
- line->obj = obj;
72
- line->next = registry.first;
73
- line->count = 1;
74
- registry.first = line;
75
- finish:
82
+ *obj = (obj_s){.obj = ruby_obj};
83
+ fio_ht_add(&registry.store, &obj->node, (uint64_t)ruby_obj);
84
+ exists:
85
+ spn_add(&obj->ref, 1);
86
+
76
87
  unlock_registry();
77
- return obj;
88
+ return ruby_obj;
78
89
  }
79
90
 
80
- // free a single registry
81
- //
82
- // free only one.
83
- static void unregister_object(VALUE obj) {
84
- if (!obj || obj == Qnil)
91
+ /** decreases an object's reference count or removes if from the registry. */
92
+ static void unregister_object(VALUE ruby_obj) {
93
+ if (!ruby_obj || ruby_obj == Qnil)
85
94
  return;
86
95
  lock_registry();
87
- struct Object **line = &registry.first;
88
- while (*line) {
89
- if ((*line)->obj == obj) {
90
- (*line)->count -= 1;
91
- if ((*line)->count <= 0) {
92
- struct Object *to_free = *line;
93
- *line = (*line)->next;
94
- free_node(to_free);
95
- goto finish;
96
- }
97
- }
98
- line = &((*line)->next);
96
+ obj_s *obj = (void *)fio_ht_find(&registry.store, (uint64_t)ruby_obj);
97
+ if (!obj) {
98
+ #if RUBY_REG_DBG == 1
99
+ fprintf(stderr, "Ruby Registry: unregister - NOT FOUND %p\n",
100
+ (void *)ruby_obj);
101
+ #endif
102
+ goto finish;
103
+ }
104
+ obj = fio_node2obj(obj_s, node, obj);
105
+ if (spn_sub(&obj->ref, 1)) {
106
+ unlock_registry();
107
+ #if RUBY_REG_DBG == 1
108
+ fprintf(stderr, "Ruby Registry: unregistered %p ref: %" PRIu64 " \n",
109
+ (void *)ruby_obj, obj->ref);
110
+ #endif
111
+ return;
99
112
  }
113
+ fio_ht_remove(&obj->node);
114
+ free_node(obj);
100
115
  finish:
101
116
  unlock_registry();
117
+ #if RUBY_REG_DBG == 1
118
+ fprintf(stderr, "Ruby Registry: unregistered %p\n", (void *)ruby_obj);
119
+ #endif
102
120
  }
103
121
 
104
- // // Replaces one registry object with another,
105
- // // allowing updates to the Registry with no memory allocations.
106
- // //
107
- // // returns 0 if all OK, returns -1 if it couldn't replace the object.
108
- // static int replace_object(VALUE obj, VALUE new_obj) {
109
- // int ret = -1;
110
- // if (obj == new_obj)
111
- // return 0;
112
- // pthread_mutex_lock(&registry_lock);
113
- // struct Object* line = registry.first;
114
- // while (line) {
115
- // if (line->obj == obj) {
116
- // line->obj = new_obj;
117
- // ret = 0;
118
- // goto finish;
119
- // }
120
- // line = line->next;
121
- // }
122
- // finish:
123
- // pthread_mutex_unlock(&registry_lock);
124
- // return ret;
125
- // }
126
-
127
- // a callback for the GC (marking active objects)
122
+ /* a callback for the GC (marking active objects) */
128
123
  static void registry_mark(void *ignore) {
129
124
  (void)ignore;
130
- #ifdef RUBY_REG_DBG
125
+ #if RUBY_REG_DBG == 1
131
126
  Registry.print();
132
127
  #endif
133
128
  lock_registry();
134
- struct Object *line = registry.first;
135
- while (line) {
136
- if (line->obj)
137
- rb_gc_mark(line->obj);
138
- line = line->next;
139
- }
129
+ obj_s *obj;
130
+ fio_ht_for_each(obj_s, node, obj, registry.store) rb_gc_mark(obj->obj);
140
131
  unlock_registry();
141
132
  }
142
133
 
143
- // clear the registry (end of lifetime)
134
+ /* clear the registry (end of lifetime) */
144
135
  static void registry_clear(void *ignore) {
145
136
  (void)ignore;
137
+ #if RUBY_REG_DBG == 1
138
+ fprintf(stderr, "Ruby Registry: Clear!!!\n");
139
+ #endif
146
140
  lock_registry();
147
- struct Object *line;
148
- struct Object *to_free;
149
- // free active object references
150
- line = registry.first;
151
- while (line) {
152
- to_free = line;
153
- line = line->next;
154
- free_node(to_free);
141
+ obj_s *obj;
142
+ fio_ht_for_each(obj_s, node, obj, registry.store) {
143
+ fio_ht_remove(&obj->node);
144
+ rb_gc_mark(obj->obj);
155
145
  }
156
- registry.first = NULL;
157
146
  registry.owner = 0;
147
+ fio_ht_free(&registry.store);
158
148
  unlock_registry();
159
149
  }
160
150
 
161
- // the data-type used to identify the registry
162
- // this sets the callbacks.
151
+ /*
152
+ the data-type used to identify the registry
153
+ this sets the callbacks.
154
+ */
163
155
  static struct rb_data_type_struct my_registry_type_struct = {
164
156
  .wrap_struct_name = "RubyReferencesIn_C_Land",
165
157
  .function.dfree = (void (*)(void *))registry_clear,
166
158
  .function.dmark = (void (*)(void *))registry_mark,
167
159
  };
168
160
 
169
- // initialize the registry
161
+ /* initialize the registry */
170
162
  static void init(VALUE owner) {
171
163
  lock_registry();
172
164
  // only one registry
@@ -181,27 +173,28 @@ static void init(VALUE owner) {
181
173
  TypedData_Wrap_Struct(rReferences, &my_registry_type_struct, &registry);
182
174
  rb_ivar_set(owner, rb_intern("registry"), r_registry);
183
175
  // initialize memory pool
184
- for (size_t i = 0; i < REGISTRY_POOL_SIZE - 1; i++) {
185
- registry.pool_mem[i].next = registry.pool_mem + i + 1;
176
+ for (size_t i = 0; i < REGISTRY_POOL_SIZE; i++) {
177
+ fio_list_push(obj_s, pool, registry.pool, &registry.pool_mem[i]);
186
178
  }
187
- registry.pool_mem[REGISTRY_POOL_SIZE - 1].next = NULL;
188
- registry.obj_pool = registry.pool_mem;
189
179
  finish:
190
180
  unlock_registry();
191
181
  }
192
182
 
193
- // print data, for testing
183
+ /* print data, for testing */
194
184
  static void print(void) {
195
185
  lock_registry();
196
- struct Object *line = registry.first;
197
186
  fprintf(stderr, "Registry owner is %lu\n", registry.owner);
198
- long index = 0;
199
- while (line) {
200
- fprintf(stderr, "[%lu] => %d X obj %lu type %d at %p\n", index++,
201
- line->count, line->obj, TYPE(line->obj), (void *)line);
202
- line = line->next;
187
+ obj_s *obj;
188
+ uint64_t index = 0;
189
+ fio_ht_for_each(obj_s, node, obj, registry.store) {
190
+ fprintf(stderr, "[%" PRIu64 " ] => %" PRIu64 " X obj %p type %d at %p\n",
191
+ index++, obj->ref, (void *)obj->obj, TYPE(obj->obj), (void *)obj);
203
192
  }
204
- fprintf(stderr, "Total of %lu registered objects being marked\n", index);
193
+ fprintf(stderr, "Total of %" PRIu64 " registered objects being marked\n",
194
+ index);
195
+ fprintf(stderr,
196
+ "Registry uses %" PRIu64 " Hash bins for %" PRIu64 " objects\n",
197
+ registry.store.bin_count, registry.store.count);
205
198
  unlock_registry();
206
199
  }
207
200