iodine 0.2.17 → 0.3.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of iodine might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -0
- data/README.md +36 -3
- data/bin/config.ru +23 -2
- data/bin/http-hello +1 -1
- data/bin/ws-shootout +5 -0
- data/ext/iodine/defer.c +468 -0
- data/ext/iodine/defer.h +105 -0
- data/ext/iodine/evio.c +263 -0
- data/ext/iodine/evio.h +133 -0
- data/ext/iodine/extconf.rb +2 -1
- data/ext/iodine/facil.c +958 -0
- data/ext/iodine/facil.h +423 -0
- data/ext/iodine/http.c +90 -0
- data/ext/iodine/http.h +50 -12
- data/ext/iodine/http1.c +200 -267
- data/ext/iodine/http1.h +17 -26
- data/ext/iodine/http1_request.c +81 -0
- data/ext/iodine/http1_request.h +58 -0
- data/ext/iodine/http1_response.c +403 -0
- data/ext/iodine/http1_response.h +90 -0
- data/ext/iodine/http1_simple_parser.c +124 -108
- data/ext/iodine/http1_simple_parser.h +8 -3
- data/ext/iodine/http_request.c +104 -0
- data/ext/iodine/http_request.h +58 -102
- data/ext/iodine/http_response.c +212 -208
- data/ext/iodine/http_response.h +89 -252
- data/ext/iodine/iodine_core.c +57 -46
- data/ext/iodine/iodine_core.h +3 -1
- data/ext/iodine/iodine_http.c +105 -81
- data/ext/iodine/iodine_websocket.c +17 -13
- data/ext/iodine/iodine_websocket.h +1 -0
- data/ext/iodine/rb-call.c +9 -7
- data/ext/iodine/{rb-libasync.h → rb-defer.c} +57 -49
- data/ext/iodine/rb-rack-io.c +12 -6
- data/ext/iodine/rb-rack-io.h +1 -1
- data/ext/iodine/rb-registry.c +5 -2
- data/ext/iodine/sock.c +1159 -0
- data/ext/iodine/{libsock.h → sock.h} +138 -142
- data/ext/iodine/spnlock.inc +77 -0
- data/ext/iodine/websockets.c +101 -112
- data/ext/iodine/websockets.h +38 -19
- data/iodine.gemspec +3 -3
- data/lib/iodine/version.rb +1 -1
- data/lib/rack/handler/iodine.rb +6 -6
- metadata +23 -19
- data/ext/iodine/http_response_http1.h +0 -382
- data/ext/iodine/libasync.c +0 -570
- data/ext/iodine/libasync.h +0 -122
- data/ext/iodine/libreact.c +0 -350
- data/ext/iodine/libreact.h +0 -244
- data/ext/iodine/libserver.c +0 -957
- data/ext/iodine/libserver.h +0 -481
- data/ext/iodine/libsock.c +0 -1025
- data/ext/iodine/spnlock.h +0 -243
data/ext/iodine/spnlock.h
DELETED
@@ -1,243 +0,0 @@
|
|
1
|
-
/*
|
2
|
-
Copyright: Boaz Segev, 2016-2017
|
3
|
-
License: MIT
|
4
|
-
|
5
|
-
Feel free to copy, use and enjoy according to the license provided.
|
6
|
-
*/
|
7
|
-
#ifndef SIMPLE_SPN_LOCK_H
|
8
|
-
/* *****************************************************************************
|
9
|
-
A Simple busy lock implementation ... (spnlock.h)
|
10
|
-
|
11
|
-
Based on a lot of internet reading as well as comparative work (i.e the Linux
|
12
|
-
karnel's code and the more readable Apple's kernel code)
|
13
|
-
|
14
|
-
Written by Boaz Segev at 2016. Donated to the public domain for all to enjoy.
|
15
|
-
*/
|
16
|
-
#define SIMPLE_SPN_LOCK_H
|
17
|
-
|
18
|
-
#ifndef _GNU_SOURCE
|
19
|
-
#define _GNU_SOURCE
|
20
|
-
#endif
|
21
|
-
|
22
|
-
#include <stdint.h>
|
23
|
-
#include <stdlib.h>
|
24
|
-
|
25
|
-
/*********
|
26
|
-
* manage the way threads "wait" for the lock to release
|
27
|
-
*/
|
28
|
-
#if defined(__unix__) || defined(__APPLE__) || defined(__linux__)
|
29
|
-
/* nanosleep seems to be the most effective and efficient reschedule */
|
30
|
-
#include <time.h>
|
31
|
-
#define reschedule_thread() \
|
32
|
-
{ \
|
33
|
-
static const struct timespec tm = {.tv_nsec = 1}; \
|
34
|
-
nanosleep(&tm, NULL); \
|
35
|
-
}
|
36
|
-
|
37
|
-
#else /* no effective rescheduling, just spin... */
|
38
|
-
#define reschedule_thread()
|
39
|
-
|
40
|
-
/* these are SUPER slow when comapred with nanosleep or CPU cycling */
|
41
|
-
// #if defined(__SSE2__) || defined(__SSE2)
|
42
|
-
// #define reschedule_thread() __asm__("pause" :::)
|
43
|
-
//
|
44
|
-
// #elif defined(__has_include) && __has_include(<pthread.h>)
|
45
|
-
// #include "pthread.h"
|
46
|
-
// #define reschedule_thread() sched_yield()
|
47
|
-
// #endif
|
48
|
-
|
49
|
-
#endif
|
50
|
-
/* end `reschedule_thread` block*/
|
51
|
-
|
52
|
-
/*********
|
53
|
-
* The spin lock core functions (spn_trylock, spn_unlock, is_spn_locked)
|
54
|
-
*/
|
55
|
-
|
56
|
-
/* prefer C11 standard implementation where available (trust the system) */
|
57
|
-
#if defined(__has_include)
|
58
|
-
#if __has_include(<stdatomic.h>)
|
59
|
-
#define SPN_TMP_HAS_ATOMICS 1
|
60
|
-
#include <stdatomic.h>
|
61
|
-
typedef atomic_bool spn_lock_i;
|
62
|
-
#define SPN_LOCK_INIT ATOMIC_VAR_INIT(0)
|
63
|
-
/** returns 1 if the lock was busy (TRUE == FAIL). */
|
64
|
-
static inline int spn_trylock(spn_lock_i *lock) {
|
65
|
-
__sync_synchronize();
|
66
|
-
return atomic_exchange(lock, 1);
|
67
|
-
}
|
68
|
-
/** Releases a lock. */
|
69
|
-
static inline void spn_unlock(spn_lock_i *lock) {
|
70
|
-
atomic_store(lock, 0);
|
71
|
-
__sync_synchronize();
|
72
|
-
}
|
73
|
-
/** returns a lock's state (non 0 == Busy). */
|
74
|
-
static inline int spn_is_locked(spn_lock_i *lock) { return atomic_load(lock); }
|
75
|
-
#endif
|
76
|
-
#endif
|
77
|
-
|
78
|
-
/* Chack if stdatomic was available */
|
79
|
-
#ifdef SPN_TMP_HAS_ATOMICS
|
80
|
-
#undef SPN_TMP_HAS_ATOMICS
|
81
|
-
|
82
|
-
#else
|
83
|
-
/* Test for compiler builtins */
|
84
|
-
|
85
|
-
/* use clang builtins if available - trust the compiler */
|
86
|
-
#if defined(__clang__)
|
87
|
-
#if defined(__has_builtin) && __has_builtin(__sync_swap)
|
88
|
-
/* define the type */
|
89
|
-
typedef volatile uint8_t spn_lock_i;
|
90
|
-
/** returns 1 if the lock was busy (TRUE == FAIL). */
|
91
|
-
static inline int spn_trylock(spn_lock_i *lock) { return __sync_swap(lock, 1); }
|
92
|
-
#define SPN_TMP_HAS_BUILTIN 1
|
93
|
-
#endif
|
94
|
-
/* use gcc builtins if available - trust the compiler */
|
95
|
-
#elif defined(__GNUC__) && \
|
96
|
-
(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4))
|
97
|
-
/* define the type */
|
98
|
-
typedef volatile uint8_t spn_lock_i;
|
99
|
-
/** returns 1 if the lock was busy (TRUE == FAIL). */
|
100
|
-
static inline int spn_trylock(spn_lock_i *lock) {
|
101
|
-
__sync_synchronize();
|
102
|
-
return __sync_fetch_and_or(lock, 1);
|
103
|
-
}
|
104
|
-
#define SPN_TMP_HAS_BUILTIN 1
|
105
|
-
#endif
|
106
|
-
|
107
|
-
/* Check if compiler builtins were available, if not, try assembly*/
|
108
|
-
#if SPN_TMP_HAS_BUILTIN
|
109
|
-
#undef SPN_TMP_HAS_BUILTIN
|
110
|
-
|
111
|
-
/* use Intel's asm if on Intel - trust Intel's documentation */
|
112
|
-
#elif defined(__amd64__) || defined(__x86_64__) || defined(__x86__) || \
|
113
|
-
defined(__i386__) || defined(__ia64__) || defined(_M_IA64) || \
|
114
|
-
defined(__itanium__) || defined(__i386__)
|
115
|
-
/* define the type */
|
116
|
-
typedef volatile uint8_t spn_lock_i;
|
117
|
-
/** returns 1 if the lock was busy (TRUE == FAIL). */
|
118
|
-
static inline int spn_trylock(spn_lock_i *lock) {
|
119
|
-
spn_lock_i tmp;
|
120
|
-
__asm__ volatile("mfence" ::: "memory");
|
121
|
-
__asm__ volatile("xchgb %0,%1" : "=r"(tmp), "=m"(*lock) : "0"(1) : "memory");
|
122
|
-
return tmp;
|
123
|
-
}
|
124
|
-
|
125
|
-
/* use SPARC's asm if on SPARC - trust the design */
|
126
|
-
#elif defined(__sparc__) || defined(__sparc)
|
127
|
-
/* define the type */
|
128
|
-
typedef volatile uint8_t spn_lock_i;
|
129
|
-
/** returns TRUE (non-zero) if the lock was busy (TRUE == FAIL). */
|
130
|
-
static inline int spn_trylock(spn_lock_i *lock) {
|
131
|
-
spn_lock_i tmp;
|
132
|
-
__asm__ volatile("ldstub [%1], %0" : "=r"(tmp) : "r"(lock) : "memory");
|
133
|
-
return tmp; /* return 0xFF if the lock was busy, 0 if free */
|
134
|
-
}
|
135
|
-
|
136
|
-
#else
|
137
|
-
/* I don't know how to provide green thread safety on PowerPC or ARM */
|
138
|
-
#error "Couldn't implement a spinlock for this system / compiler"
|
139
|
-
#endif /* types and atomic exchange */
|
140
|
-
/** Initialization value in `free` state. */
|
141
|
-
#define SPN_LOCK_INIT 0
|
142
|
-
|
143
|
-
/** Releases a lock. */
|
144
|
-
static inline void spn_unlock(spn_lock_i *lock) {
|
145
|
-
__asm__ volatile("" ::: "memory");
|
146
|
-
*lock = 0;
|
147
|
-
}
|
148
|
-
/** returns a lock's state (non 0 == Busy). */
|
149
|
-
static inline int spn_is_locked(spn_lock_i *lock) {
|
150
|
-
__asm__ volatile("" ::: "memory");
|
151
|
-
return *lock;
|
152
|
-
}
|
153
|
-
|
154
|
-
#endif /* has atomics */
|
155
|
-
#include <stdio.h>
|
156
|
-
/** Busy waits for the lock. */
|
157
|
-
static inline void spn_lock(spn_lock_i *lock) {
|
158
|
-
while (spn_trylock(lock)) {
|
159
|
-
reschedule_thread();
|
160
|
-
}
|
161
|
-
}
|
162
|
-
|
163
|
-
/* *****************************************************************************
|
164
|
-
spnlock.h finished
|
165
|
-
*/
|
166
|
-
#endif
|
167
|
-
|
168
|
-
#if DEBUG == 1 && !defined(SPN_LOCK_TEST_REPEAT_COUNT)
|
169
|
-
|
170
|
-
/* allow of the unused flag */
|
171
|
-
#ifndef UNUSED_FUNC
|
172
|
-
#define UNUSED_FUNC __attribute__((unused))
|
173
|
-
#endif
|
174
|
-
|
175
|
-
#define SPN_LOCK_TEST_REPEAT_COUNT 10000UL
|
176
|
-
#define SPN_LOCK_TEST_THREAD_COUNT 10000UL
|
177
|
-
#include <pthread.h>
|
178
|
-
#include <stdio.h>
|
179
|
-
|
180
|
-
UNUSED_FUNC static void *test_spn_lock_work(void *arg) {
|
181
|
-
static spn_lock_i lck = SPN_LOCK_INIT;
|
182
|
-
uint64_t *ip = arg;
|
183
|
-
for (size_t i = 0; i < SPN_LOCK_TEST_REPEAT_COUNT; i++) {
|
184
|
-
spn_lock(&lck);
|
185
|
-
uint64_t j = *ip;
|
186
|
-
j++;
|
187
|
-
__asm__ volatile("" ::: "memory", "cc");
|
188
|
-
*ip = j;
|
189
|
-
spn_unlock(&lck);
|
190
|
-
}
|
191
|
-
return NULL;
|
192
|
-
}
|
193
|
-
|
194
|
-
UNUSED_FUNC static void *test_spn_lock_lockless_work(void *arg) {
|
195
|
-
uint64_t *ip = arg;
|
196
|
-
for (size_t i = 0; i < SPN_LOCK_TEST_REPEAT_COUNT; i++) {
|
197
|
-
uint64_t j = *ip;
|
198
|
-
j++;
|
199
|
-
__asm__ volatile("" ::: "memory", "cc");
|
200
|
-
*ip = j;
|
201
|
-
}
|
202
|
-
return NULL;
|
203
|
-
}
|
204
|
-
|
205
|
-
UNUSED_FUNC static void spn_lock_test(void) {
|
206
|
-
size_t start, end;
|
207
|
-
unsigned long num = 0;
|
208
|
-
pthread_t *threads = malloc(SPN_LOCK_TEST_THREAD_COUNT * sizeof(*threads));
|
209
|
-
void *tmp;
|
210
|
-
start = clock();
|
211
|
-
for (size_t i = 0; i < SPN_LOCK_TEST_THREAD_COUNT; i++) {
|
212
|
-
pthread_create(threads + i, NULL, test_spn_lock_lockless_work, &num);
|
213
|
-
}
|
214
|
-
for (size_t i = 0; i < SPN_LOCK_TEST_THREAD_COUNT; i++) {
|
215
|
-
pthread_join(threads[i], &tmp);
|
216
|
-
}
|
217
|
-
end = clock();
|
218
|
-
fprintf(stderr, "Lockless Num = %lu with %lu CPU cycles.\n", num,
|
219
|
-
end - start);
|
220
|
-
|
221
|
-
num = 0;
|
222
|
-
|
223
|
-
start = clock();
|
224
|
-
for (size_t i = 0; i < SPN_LOCK_TEST_THREAD_COUNT; i++) {
|
225
|
-
if (pthread_create(threads + i, NULL, test_spn_lock_work, &num))
|
226
|
-
fprintf(stderr,
|
227
|
-
"Failed to create thread number %lu... test will fail to run as "
|
228
|
-
"expected.\n",
|
229
|
-
i);
|
230
|
-
;
|
231
|
-
}
|
232
|
-
for (size_t i = 0; i < SPN_LOCK_TEST_THREAD_COUNT; i++) {
|
233
|
-
pthread_join(threads[i], &tmp);
|
234
|
-
}
|
235
|
-
end = clock();
|
236
|
-
free(threads);
|
237
|
-
fprintf(stderr, "Locked Num = %lu with %lu CPU cycles.\n", num, end - start);
|
238
|
-
fprintf(stderr, "spn_lock test %s\n",
|
239
|
-
num == SPN_LOCK_TEST_THREAD_COUNT * SPN_LOCK_TEST_REPEAT_COUNT
|
240
|
-
? "passed."
|
241
|
-
: "FAILED!");
|
242
|
-
}
|
243
|
-
#endif /* Test */
|