iodine 0.7.9 → 0.7.10
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of iodine might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/.travis.yml +8 -2
- data/CHANGELOG.md +14 -0
- data/README.md +85 -29
- data/{examples → bin}/info.md +0 -0
- data/bin/{mustache.rb → mustache_bench.rb} +0 -0
- data/examples/shootout.ru +4 -4
- data/ext/iodine/extconf.rb +1 -1
- data/ext/iodine/fio.c +738 -549
- data/ext/iodine/fio.h +4 -5
- data/ext/iodine/fiobj_mustache.c +1 -1
- data/ext/iodine/fiobj_numbers.c +3 -3
- data/ext/iodine/http.c +15 -14
- data/ext/iodine/http1.c +6 -6
- data/ext/iodine/http1_parser.c +3 -3
- data/ext/iodine/http_internal.c +3 -3
- data/ext/iodine/http_internal.h +1 -1
- data/ext/iodine/iodine.c +7 -8
- data/ext/iodine/iodine_connection.c +48 -8
- data/ext/iodine/iodine_defer.c +7 -7
- data/ext/iodine/iodine_mustache.c +1 -1
- data/ext/iodine/redis_engine.c +4 -4
- data/ext/iodine/websockets.c +41 -22
- data/ext/iodine/websockets.h +13 -12
- data/iodine.gemspec +0 -2
- data/lib/iodine.rb +4 -0
- data/lib/iodine/version.rb +1 -1
- data/lib/rack/handler/iodine.rb +2 -4
- metadata +5 -39
- data/bin/config.ru +0 -97
- data/bin/echo +0 -46
- data/bin/http-big +0 -63
- data/bin/http-hello +0 -62
- data/bin/http-playground +0 -124
- data/bin/playground +0 -62
- data/bin/raw-rbhttp +0 -38
- data/bin/raw-rbhttp-em +0 -63
- data/bin/raw_broadcast +0 -64
- data/bin/test_with_faye +0 -44
- data/bin/updated api +0 -113
- data/bin/ws-broadcast +0 -106
- data/bin/ws-echo +0 -117
- data/examples/test_template.mustache +0 -16
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: bd4ea42a7f008886bcadee4c829499686a1fbceeb2738daf56e4155063d9054c
|
4
|
+
data.tar.gz: aeb66d448ad9cd09e55f7f15c79bcb1b3e8f0a41f3e2adfc9e2c894c20d7c80f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ecdde8977f2b48089f370356f3bbd272054df193f9be220ac28ad8491c59f749a1c21e1506bb45c9f2532e9bf072b9e7bf365c92ccb4b3a651b52bacbdfb21ad
|
7
|
+
data.tar.gz: 5268a0318e3484a0a703fa68e52f057c6cab513066243ff37e052c58669d8babe5c53650082349bb9b2610f80afd0f5e551f530a3e6d662ad49dfa4609f3d6b1
|
data/.travis.yml
CHANGED
@@ -22,10 +22,16 @@ addons:
|
|
22
22
|
- gcc-4.9
|
23
23
|
- gcc-5
|
24
24
|
script:
|
25
|
+
- echo CFLAGS = $CFLAGS
|
26
|
+
- echo cflags = $cflags
|
25
27
|
- gem uninstall -x iodine
|
26
28
|
- rake build
|
27
29
|
- find pkg/iodine-*.gem -exec gem install -V {} +
|
28
30
|
- gem uninstall -x iodine
|
29
|
-
-
|
31
|
+
- export CFLAGS="-std=c99"
|
32
|
+
- export CC=gcc
|
33
|
+
- find pkg/iodine-*.gem -exec gem install -V {} +
|
30
34
|
- gem uninstall -x iodine
|
31
|
-
-
|
35
|
+
- export CFLAGS="-Wall"
|
36
|
+
- export CC=gcc-5
|
37
|
+
- find pkg/iodine-*.gem -exec gem install -V {} +
|
data/CHANGELOG.md
CHANGED
@@ -6,6 +6,20 @@ Please notice that this change log contains changes for upcoming releases as wel
|
|
6
6
|
|
7
7
|
## Changes:
|
8
8
|
|
9
|
+
#### Change log v.0.7.10
|
10
|
+
|
11
|
+
**Fix**: (pub/sub) fixed connection lock for pub/sub tasks. Now pub/sub Ruby tasks will lock the connection, protecting the user's code against concurrent access to the connection's data.
|
12
|
+
|
13
|
+
**Fix**: (installation) fixed `CFLAGS` compilation value to allow for pre-existing values set by Ruby.
|
14
|
+
|
15
|
+
**Fix**: (installation) fixed possible issues than could occur when installing iodine with `FIO_FORCE_MALLOC`.
|
16
|
+
|
17
|
+
**Optimization**: (pub/sub) leverages facil.io broadcasting optimizations, minimizing memory allocations when broadcasting pub/sub messages directly to multiple WebSocket clients.
|
18
|
+
|
19
|
+
**Update**: (fio) updated the facil.io code to leverage it's urgent task queue for outbound IO, which minimizes reliance on the IO backup thread.
|
20
|
+
|
21
|
+
**Update**: (IO) minor tweaks to the IO backup thread and CLI output format.
|
22
|
+
|
9
23
|
#### Change log v.0.7.9
|
10
24
|
|
11
25
|
**Fix**: fixed the background IO backup thread initialization and sleep interval. This thread isn't critical. It's only used to (slowly) flush sockets when all the actual threads are blocked by long running Ruby application code.
|
data/README.md
CHANGED
@@ -60,6 +60,16 @@ During development, it's more common to use a single process and a few threads:
|
|
60
60
|
bundler exec iodine -p $PORT -t 16 -w 1
|
61
61
|
```
|
62
62
|
|
63
|
+
### Heap Fragmentation Protection
|
64
|
+
|
65
|
+
Iodine includes a network oriented custom memory allocator, with very high performance.
|
66
|
+
|
67
|
+
This allows the heap to be divided, naturally, into long-living objects (allocated normally) and short living objects (allocated using the iodine allocator).
|
68
|
+
|
69
|
+
This approach helps to minimize heap fragmentation for long running processes.
|
70
|
+
|
71
|
+
It's still recommended to consider [jemalloc](http://jemalloc.net) or other allocators to mitigate the heap fragmentation that would be caused by Ruby's internal memory management.
|
72
|
+
|
63
73
|
### Static file serving support
|
64
74
|
|
65
75
|
Iodine supports an internal static file service that bypasses the Ruby layer and serves static files directly from "C-land".
|
@@ -95,11 +105,9 @@ bundler exec iodine -p $PORT -t 16 -w 4 -www /my/public/folder -v
|
|
95
105
|
|
96
106
|
#### X-Sendfile
|
97
107
|
|
98
|
-
|
99
|
-
|
100
|
-
To enable iodine's native X-Sendfile support, a static file service (a public folder) needs to be assigned (this informs iodine that static files aren't sent using a different layer, such as nginx).
|
108
|
+
When a public folder is assigned (the static file server is active), iodine automatically adds support for the `X-Sendfile` header in any Ruby application response.
|
101
109
|
|
102
|
-
This allows Ruby to send very large files using a very small memory footprint
|
110
|
+
This allows Ruby to send very large files using a very small memory footprint and usually leverages the `sendfile` system call.
|
103
111
|
|
104
112
|
i.e. (example `config.ru` for iodine):
|
105
113
|
|
@@ -121,19 +129,21 @@ end
|
|
121
129
|
run app
|
122
130
|
```
|
123
131
|
|
124
|
-
|
132
|
+
Benchmark [localhost:3000/source](http://localhost:3000/source) to experience the `X-Sendfile` extension at work.
|
125
133
|
|
126
134
|
#### Pre-Compressed assets / files
|
127
135
|
|
128
|
-
Rails does this automatically when compiling assets
|
136
|
+
Rails does this automatically when compiling assets, which is: `gzip` your static files.
|
129
137
|
|
130
138
|
Iodine will automatically recognize and send the `gz` version if the client (browser) supports the `gzip` transfer-encoding.
|
131
139
|
|
132
140
|
For example, to offer a compressed version of `style.css`, run (in the terminal):
|
133
141
|
|
134
|
-
|
142
|
+
```bash
|
143
|
+
$ gzip -k -9 style.css
|
144
|
+
```
|
135
145
|
|
136
|
-
|
146
|
+
This results in both files, `style.css` (the original) and `style.css.gz` (the compressed).
|
137
147
|
|
138
148
|
When a browser that supports compressed encoding (which is most browsers) requests the file, iodine will recognize that a pre-compressed option exists and will prefer the `gzip` compressed version.
|
139
149
|
|
@@ -203,11 +213,19 @@ Iodine.start
|
|
203
213
|
run APP
|
204
214
|
```
|
205
215
|
|
206
|
-
|
216
|
+
### Native Pub/Sub with *optional* Redis scaling
|
217
|
+
|
218
|
+
Iodine's core, `facil.io` offers a native Pub/Sub implementation that can be scaled across machine boundaries using Redis.
|
207
219
|
|
208
|
-
|
220
|
+
The default implementation covers the whole process cluster, so a single cluster doesn't need Redis
|
221
|
+
|
222
|
+
Once a single iodine process cluster isn't enough, horizontal scaling for the Pub/Sub layer is as simple as connecting iodine to Redis using the `-r <url>` from the command line. i.e.:
|
223
|
+
|
224
|
+
```bash
|
225
|
+
$ iodine -w -1 -t 8 -r redis://localhost
|
226
|
+
```
|
209
227
|
|
210
|
-
|
228
|
+
It's also possible to initialize the iodine<=>Redis link using Ruby, directly from the application's code:
|
211
229
|
|
212
230
|
```ruby
|
213
231
|
# initialize the Redis engine for each iodine process.
|
@@ -219,7 +237,7 @@ end
|
|
219
237
|
# ... the rest of the application remains unchanged.
|
220
238
|
```
|
221
239
|
|
222
|
-
|
240
|
+
Iodine's Redis client can also be used for asynchronous Redis command execution. i.e.:
|
223
241
|
|
224
242
|
```ruby
|
225
243
|
if(Iodine::PubSub.default.is_a? Iodine::PubSub::Redis)
|
@@ -230,11 +248,13 @@ end
|
|
230
248
|
|
231
249
|
**Pub/Sub Details and Limitations:**
|
232
250
|
|
233
|
-
* Iodine's Redis client does *not* support multiple databases. This is both because [database scoping is ignored by Redis during pub/sub](https://redis.io/topics/pubsub#database-amp-scoping) and because [Redis Cluster doesn't support multiple databases](https://redis.io/topics/cluster-spec). This indicated that multiple database support just isn't worth the extra effort.
|
251
|
+
* Iodine's Redis client does *not* support multiple databases. This is both because [database scoping is ignored by Redis during pub/sub](https://redis.io/topics/pubsub#database-amp-scoping) and because [Redis Cluster doesn't support multiple databases](https://redis.io/topics/cluster-spec). This indicated that multiple database support just isn't worth the extra effort and performance hit.
|
234
252
|
|
235
|
-
* The iodine Redis client will use
|
253
|
+
* The iodine Redis client will use two Redis connections for the whole process cluster (a single publishing connection and a single subscription connection), minimizing the Redis load and network bandwidth.
|
254
|
+
*
|
255
|
+
Connections will be automatically re-established if timeouts or errors occur.
|
236
256
|
|
237
|
-
|
257
|
+
### Hot Restart
|
238
258
|
|
239
259
|
Iodine will "hot-restart" the application by shutting down and re-spawning the worker processes.
|
240
260
|
|
@@ -254,7 +274,7 @@ Since the master / root process doesn't handle any requests (it only handles pub
|
|
254
274
|
|
255
275
|
**Note**: This will **not** re-load the application (any changes to the Ruby code require an actual restart).
|
256
276
|
|
257
|
-
|
277
|
+
### Optimized HTTP logging
|
258
278
|
|
259
279
|
By default, iodine is pretty quite. Some messages are logged to `stderr`, but not many.
|
260
280
|
|
@@ -278,7 +298,7 @@ The log output can also be redirected to a `stdout`:
|
|
278
298
|
bundler exec iodine -p $PORT -v 2>&1
|
279
299
|
```
|
280
300
|
|
281
|
-
|
301
|
+
### Built-in support for Sequel and ActiveRecord
|
282
302
|
|
283
303
|
It's a well known fact that Database connections require special attention when using `fork`-ing servers (multi-process servers) such as Puma, Passenger and iodine.
|
284
304
|
|
@@ -288,7 +308,7 @@ With iodine, there's no need to worry.
|
|
288
308
|
|
289
309
|
Iodine provides built-in `fork` handling for both ActiveRecord and Sequel, in order to protect against these possible errors.
|
290
310
|
|
291
|
-
|
311
|
+
### TCP/IP (raw) sockets
|
292
312
|
|
293
313
|
Upgrading to a custom protocol (i.e., in order to implement your own WebSocket protocol with special extensions) is available when neither WebSockets nor SSE connection upgrades were requested. In the following (terminal) example, we'll use an echo server without direct socket echo:
|
294
314
|
|
@@ -317,14 +337,6 @@ Iodine.start
|
|
317
337
|
run APP
|
318
338
|
```
|
319
339
|
|
320
|
-
#### A few notes
|
321
|
-
|
322
|
-
This design has a number of benefits, some of them related to better IO handling, resource optimization (no need for two IO polling systems), etc. This also allows us to use middleware without interfering with connection upgrades and provides backwards compatibility.
|
323
|
-
|
324
|
-
Iodine's HTTP server imposes a few restrictions for performance and security reasons, such as limiting each header line to 8Kb. These restrictions shouldn't be an issue and are similar to limitations imposed by Apache or Nginx.
|
325
|
-
|
326
|
-
If you still want to use Rack's `hijack` API, iodine will support you - but be aware that you will need to implement your own reactor and thread pool for any sockets you hijack, as well as a socket buffer for non-blocking `write` operations (why do that when you can write a protocol object and have the main reactor manage the socket?).
|
327
|
-
|
328
340
|
### How does it compare to other servers?
|
329
341
|
|
330
342
|
The honest answer is "I don't know". I recommend that you perform your own tests.
|
@@ -384,7 +396,46 @@ $ RACK_ENV=production puma -p 3000 -t 16 -w 4
|
|
384
396
|
|
385
397
|
It's recommended that the servers (Iodine/Puma) and the client (`wrk`/`ab`) run on separate machines.
|
386
398
|
|
387
|
-
###
|
399
|
+
### A few notes
|
400
|
+
|
401
|
+
Iodine's upgrade / callback design has a number of benefits, some of them related to better IO handling, resource optimization (no need for two IO polling systems), etc. This also allows us to use middleware without interfering with connection upgrades and provides backwards compatibility.
|
402
|
+
|
403
|
+
Iodine's HTTP server imposes a few restrictions for performance and security reasons, such as limiting each header line to 8Kb. These restrictions shouldn't be an issue and are similar to limitations imposed by Apache or Nginx.
|
404
|
+
|
405
|
+
If you still want to use Rack's `hijack` API, iodine will support you - but be aware that you will need to implement your own reactor and thread pool for any sockets you hijack, as well as a socket buffer for non-blocking `write` operations (why do that when you can write a protocol object and have the main reactor manage the socket?).
|
406
|
+
|
407
|
+
## Installation
|
408
|
+
|
409
|
+
To install iodine, simply install the the `iodine` gem:
|
410
|
+
|
411
|
+
```bash
|
412
|
+
$ gem install iodine
|
413
|
+
```
|
414
|
+
|
415
|
+
Iodine is written in C and allows some compile-time customizations, such as:
|
416
|
+
|
417
|
+
* `FIO_FORCE_MALLOC` - avoids iodine's custom memory allocator and use `malloc` instead (mostly used when debugging iodine or when using a different memory allocator).
|
418
|
+
|
419
|
+
* `FIO_MAX_SOCK_CAPACITY` - limits iodine's maximum client capacity. Defaults to 131,072 clients.
|
420
|
+
|
421
|
+
* `HTTP_MAX_HEADER_COUNT` - limits the number of headers the HTTP server will accept before disconnecting a client (security). Defaults to 128 headers (permissive).
|
422
|
+
|
423
|
+
* `HTTP_MAX_HEADER_LENGTH` - limits the number of bytes allowed for a single header (pre-allocated memory per connection + security). Defaults to 8Kb per header line (normal).
|
424
|
+
|
425
|
+
* `HTTP_BUSY_UNLESS_HAS_FDS` - requires at least X number of free file descriptors (for new database connections, etc') before accepting a new HTTP client.
|
426
|
+
|
427
|
+
* `FIO_ENGINE_POLL` - prefer the `poll` system call over `epoll` or `kqueue` (not recommended).
|
428
|
+
|
429
|
+
These options can be used, for example, like so:
|
430
|
+
|
431
|
+
```bash
|
432
|
+
$ CFLAGS="-DFIO_FORCE_MALLOC=1 -DHTTP_MAX_HEADER_COUNT=64" \
|
433
|
+
gem install iodine
|
434
|
+
```
|
435
|
+
|
436
|
+
More possible compile time options can be found in the [facil.io documentation](http://facil.io).
|
437
|
+
|
438
|
+
## Evented oriented design with extra safety
|
388
439
|
|
389
440
|
Iodine is an evened server, similar in it's architecture to `nginx` and `puma`. It's different than the simple "thread-per-client" design that is often taught when we begin to learn about network programming.
|
390
441
|
|
@@ -417,12 +468,17 @@ The server events are fairly fast and fragmented (longer code is fragmented acro
|
|
417
468
|
|
418
469
|
...but single threaded mode should probably be avoided.
|
419
470
|
|
420
|
-
The thread pool is there to help slow user code.
|
421
471
|
|
422
472
|
It's very common that the application's code will run slower and require external resources (i.e., databases, a custom pub/sub service, etc'). This slow code could "starve" the server, which is patiently waiting to run it's tasks on the same thread.
|
423
473
|
|
474
|
+
The thread pool is there to help slow user code.
|
475
|
+
|
424
476
|
The slower your application code, the more threads you will need to keep the server running in a responsive manner (note that responsiveness and speed aren't always the same).
|
425
477
|
|
478
|
+
To make a thread pool easier and safer to use, iodine makes sure that no connection task / callback is called concurrently for the same connection.
|
479
|
+
|
480
|
+
For example, a is a WebSocket connection is already busy in it's `on_message` callback, no other messages will be forwarded to the callback until the current callback returns.
|
481
|
+
|
426
482
|
## Free, as in freedom (BYO beer)
|
427
483
|
|
428
484
|
Iodine is **free** and **open source**, so why not take it out for a spin?
|
@@ -475,7 +531,7 @@ Iodine.start
|
|
475
531
|
|
476
532
|
```
|
477
533
|
|
478
|
-
|
534
|
+
### Why not EventMachine?
|
479
535
|
|
480
536
|
You can go ahead and use EventMachine if you like. They're doing amazing work on that one and it's been used a lot in Ruby-land... really, tons of good developers and people on that project.
|
481
537
|
|
data/{examples → bin}/info.md
RENAMED
File without changes
|
File without changes
|
data/examples/shootout.ru
CHANGED
@@ -15,10 +15,10 @@ module ShootoutApp
|
|
15
15
|
env.each { |k, v| out << "#{k}: #{v}\n" ; len += out[-1].length }
|
16
16
|
request = Rack::Request.new(env)
|
17
17
|
out << "\nRequest Path: #{request.path_info}\n"
|
18
|
-
len += out[-1].length
|
18
|
+
len += out[-1].length
|
19
19
|
unless request.params.empty?
|
20
20
|
out << "Params:\n"
|
21
|
-
len += out[-1].length
|
21
|
+
len += out[-1].length
|
22
22
|
request.params.each { |k,v| out << "#{k}: #{v}\n" ; len += out[-1].length }
|
23
23
|
end
|
24
24
|
[200, { 'Content-Length' => len.to_s, 'Content-Type' => 'text/plain; charset=UTF-8;' }, out]
|
@@ -27,8 +27,8 @@ module ShootoutApp
|
|
27
27
|
# It's slower than writing to every socket a pre-parsed message, but it's closer
|
28
28
|
# to real-life implementations.
|
29
29
|
def self.on_open client
|
30
|
-
client.subscribe
|
31
|
-
client.subscribe
|
30
|
+
client.subscribe(:shootout_b, as: :binary) # { |ch, msg| client.write(msg)}
|
31
|
+
client.subscribe(:shootout) # { |ch, msg| client.write(msg)}
|
32
32
|
end
|
33
33
|
def self.on_message client, data
|
34
34
|
if data[0] == 'b' # binary
|
data/ext/iodine/extconf.rb
CHANGED
@@ -29,7 +29,7 @@ else
|
|
29
29
|
puts 'using an unknown (old?) compiler... who knows if this will work out... we hope.'
|
30
30
|
end
|
31
31
|
|
32
|
-
$CFLAGS = "-std=c11 -
|
32
|
+
RbConfig::MAKEFILE_CONFIG['CFLAGS'] = $CFLAGS = "-std=c11 -DFIO_PRINT_STATE=0 #{$CFLAGS} #{$CFLAGS == ENV['CFLAGS'] ? "" : ENV['CFLAGS']}"
|
33
33
|
RbConfig::MAKEFILE_CONFIG['CC'] = $CC = ENV['CC'] if ENV['CC']
|
34
34
|
RbConfig::MAKEFILE_CONFIG['CPP'] = $CPP = ENV['CPP'] if ENV['CPP']
|
35
35
|
|
data/ext/iodine/fio.c
CHANGED
@@ -65,6 +65,10 @@ Feel free to copy, use and enjoy according to the license provided.
|
|
65
65
|
#define FIO_POLL_TICK 1000
|
66
66
|
#endif
|
67
67
|
|
68
|
+
#ifndef FIO_USE_URGENT_QUEUE
|
69
|
+
#define FIO_USE_URGENT_QUEUE 1
|
70
|
+
#endif
|
71
|
+
|
68
72
|
#ifndef DEBUG_SPINLOCK
|
69
73
|
#define DEBUG_SPINLOCK 0
|
70
74
|
#endif
|
@@ -510,191 +514,6 @@ Section Start Marker
|
|
510
514
|
|
511
515
|
|
512
516
|
|
513
|
-
Timers
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
***************************************************************************** */
|
525
|
-
|
526
|
-
typedef struct {
|
527
|
-
fio_ls_embd_s node;
|
528
|
-
struct timespec due;
|
529
|
-
size_t interval; /*in ms */
|
530
|
-
size_t repetitions;
|
531
|
-
void (*task)(void *);
|
532
|
-
void *arg;
|
533
|
-
void (*on_finish)(void *);
|
534
|
-
} fio_timer_s;
|
535
|
-
|
536
|
-
static fio_ls_embd_s fio_timers = FIO_LS_INIT(fio_timers);
|
537
|
-
|
538
|
-
static fio_lock_i fio_timer_lock = FIO_LOCK_INIT;
|
539
|
-
|
540
|
-
/** Marks the current time as facil.io's cycle time */
|
541
|
-
static inline void fio_mark_time(void) {
|
542
|
-
clock_gettime(CLOCK_REALTIME, &fio_data->last_cycle);
|
543
|
-
}
|
544
|
-
|
545
|
-
/** Calculates the due time for a task, given it's interval */
|
546
|
-
static struct timespec fio_timer_calc_due(size_t interval) {
|
547
|
-
struct timespec now = fio_last_tick();
|
548
|
-
if (interval > 1000) {
|
549
|
-
now.tv_sec += interval / 1000;
|
550
|
-
interval -= interval / 1000;
|
551
|
-
}
|
552
|
-
now.tv_nsec += (interval * 1000000UL);
|
553
|
-
if (now.tv_nsec > 1000000000L) {
|
554
|
-
now.tv_nsec -= 1000000000L;
|
555
|
-
now.tv_sec += 1;
|
556
|
-
}
|
557
|
-
return now;
|
558
|
-
}
|
559
|
-
|
560
|
-
/** Returns the number of miliseconds until the next event, up to FIO_POLL_TICK
|
561
|
-
*/
|
562
|
-
static size_t fio_timer_calc_first_interval(void) {
|
563
|
-
if (fio_defer_has_queue())
|
564
|
-
return 0;
|
565
|
-
if (fio_ls_embd_is_empty(&fio_timers)) {
|
566
|
-
return FIO_POLL_TICK;
|
567
|
-
}
|
568
|
-
struct timespec now = fio_last_tick();
|
569
|
-
struct timespec due =
|
570
|
-
FIO_LS_EMBD_OBJ(fio_timer_s, node, fio_timers.next)->due;
|
571
|
-
if (due.tv_sec < now.tv_sec ||
|
572
|
-
(due.tv_sec == now.tv_sec && due.tv_nsec <= now.tv_nsec))
|
573
|
-
return 0;
|
574
|
-
size_t interval = 1000L * (due.tv_sec - now.tv_sec);
|
575
|
-
if (due.tv_nsec >= now.tv_nsec) {
|
576
|
-
interval += (due.tv_nsec - now.tv_nsec) / 1000000L;
|
577
|
-
} else {
|
578
|
-
interval -= (now.tv_nsec - due.tv_nsec) / 1000000L;
|
579
|
-
}
|
580
|
-
if (interval > FIO_POLL_TICK)
|
581
|
-
interval = FIO_POLL_TICK;
|
582
|
-
return interval;
|
583
|
-
}
|
584
|
-
|
585
|
-
/* simple a<=>b if "a" is bigger a negative result is returned, eq == 0. */
|
586
|
-
static int fio_timer_compare(struct timespec a, struct timespec b) {
|
587
|
-
if (a.tv_sec == b.tv_sec) {
|
588
|
-
if (a.tv_nsec < b.tv_nsec)
|
589
|
-
return 1;
|
590
|
-
if (a.tv_nsec > b.tv_nsec)
|
591
|
-
return -1;
|
592
|
-
return 0;
|
593
|
-
}
|
594
|
-
if (a.tv_sec < b.tv_sec)
|
595
|
-
return 1;
|
596
|
-
return -1;
|
597
|
-
}
|
598
|
-
|
599
|
-
/** Places a timer in an ordered linked list. */
|
600
|
-
static void fio_timer_add_order(fio_timer_s *timer) {
|
601
|
-
timer->due = fio_timer_calc_due(timer->interval);
|
602
|
-
// fio_ls_embd_s *pos = &fio_timers;
|
603
|
-
fio_lock(&fio_timer_lock);
|
604
|
-
FIO_LS_EMBD_FOR(&fio_timers, node) {
|
605
|
-
fio_timer_s *t2 = FIO_LS_EMBD_OBJ(fio_timer_s, node, node);
|
606
|
-
if (fio_timer_compare(timer->due, t2->due) >= 0) {
|
607
|
-
fio_ls_embd_push(node, &timer->node);
|
608
|
-
goto finish;
|
609
|
-
}
|
610
|
-
}
|
611
|
-
fio_ls_embd_push(&fio_timers, &timer->node);
|
612
|
-
finish:
|
613
|
-
fio_unlock(&fio_timer_lock);
|
614
|
-
}
|
615
|
-
|
616
|
-
/** Performs a timer task and re-adds it to the queue (or cleans it up) */
|
617
|
-
static void fio_timer_perform_single(void *timer_, void *ignr) {
|
618
|
-
fio_timer_s *timer = timer_;
|
619
|
-
timer->task(timer->arg);
|
620
|
-
if (!timer->repetitions || fio_atomic_sub(&timer->repetitions, 1))
|
621
|
-
goto reschedule;
|
622
|
-
if (timer->on_finish)
|
623
|
-
timer->on_finish(timer->arg);
|
624
|
-
free(timer);
|
625
|
-
return;
|
626
|
-
(void)ignr;
|
627
|
-
reschedule:
|
628
|
-
fio_timer_add_order(timer);
|
629
|
-
}
|
630
|
-
|
631
|
-
/** schedules all timers that are due to be performed. */
|
632
|
-
static void fio_timer_schedule(void) {
|
633
|
-
struct timespec now = fio_last_tick();
|
634
|
-
fio_lock(&fio_timer_lock);
|
635
|
-
while (fio_ls_embd_any(&fio_timers) &&
|
636
|
-
fio_timer_compare(
|
637
|
-
FIO_LS_EMBD_OBJ(fio_timer_s, node, fio_timers.next)->due, now) >=
|
638
|
-
0) {
|
639
|
-
fio_ls_embd_s *tmp = fio_ls_embd_remove(fio_timers.next);
|
640
|
-
fio_defer(fio_timer_perform_single, FIO_LS_EMBD_OBJ(fio_timer_s, node, tmp),
|
641
|
-
NULL);
|
642
|
-
}
|
643
|
-
fio_unlock(&fio_timer_lock);
|
644
|
-
}
|
645
|
-
|
646
|
-
static void fio_timer_clear_all(void) {
|
647
|
-
fio_lock(&fio_timer_lock);
|
648
|
-
while (fio_ls_embd_any(&fio_timers)) {
|
649
|
-
fio_timer_s *timer =
|
650
|
-
FIO_LS_EMBD_OBJ(fio_timer_s, node, fio_ls_embd_pop(&fio_timers));
|
651
|
-
if (timer->on_finish)
|
652
|
-
timer->on_finish(timer->arg);
|
653
|
-
free(timer);
|
654
|
-
}
|
655
|
-
fio_unlock(&fio_timer_lock);
|
656
|
-
}
|
657
|
-
|
658
|
-
/**
|
659
|
-
* Creates a timer to run a task at the specified interval.
|
660
|
-
*
|
661
|
-
* The task will repeat `repetitions` times. If `repetitions` is set to 0, task
|
662
|
-
* will repeat forever.
|
663
|
-
*
|
664
|
-
* Returns -1 on error.
|
665
|
-
*
|
666
|
-
* The `on_finish` handler is always called (even on error).
|
667
|
-
*/
|
668
|
-
int fio_run_every(size_t milliseconds, size_t repetitions, void (*task)(void *),
|
669
|
-
void *arg, void (*on_finish)(void *)) {
|
670
|
-
if (!task || (milliseconds == 0 && !repetitions))
|
671
|
-
return -1;
|
672
|
-
fio_timer_s *timer = malloc(sizeof(*timer));
|
673
|
-
FIO_ASSERT_ALLOC(timer);
|
674
|
-
fio_mark_time();
|
675
|
-
*timer = (fio_timer_s){
|
676
|
-
.due = fio_timer_calc_due(milliseconds),
|
677
|
-
.interval = milliseconds,
|
678
|
-
.repetitions = repetitions,
|
679
|
-
.task = task,
|
680
|
-
.arg = arg,
|
681
|
-
.on_finish = on_finish,
|
682
|
-
};
|
683
|
-
fio_timer_add_order(timer);
|
684
|
-
return 0;
|
685
|
-
}
|
686
|
-
|
687
|
-
/* *****************************************************************************
|
688
|
-
Section Start Marker
|
689
|
-
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
517
|
|
699
518
|
|
700
519
|
|
@@ -757,7 +576,6 @@ typedef struct {
|
|
757
576
|
|
758
577
|
/* task queue block */
|
759
578
|
typedef struct fio_defer_queue_block_s fio_defer_queue_block_s;
|
760
|
-
|
761
579
|
struct fio_defer_queue_block_s {
|
762
580
|
fio_defer_task_s tasks[DEFER_QUEUE_BLOCK_COUNT];
|
763
581
|
fio_defer_queue_block_s *next;
|
@@ -766,18 +584,26 @@ struct fio_defer_queue_block_s {
|
|
766
584
|
unsigned char state;
|
767
585
|
};
|
768
586
|
|
769
|
-
|
770
|
-
|
771
|
-
|
772
|
-
static struct {
|
773
|
-
/* a lock for the state machine, used for multi-threading support */
|
587
|
+
/* task queue object */
|
588
|
+
typedef struct { /* a lock for the state machine, used for multi-threading
|
589
|
+
support */
|
774
590
|
fio_lock_i lock;
|
775
591
|
/* current active block to pop tasks */
|
776
592
|
fio_defer_queue_block_s *reader;
|
777
593
|
/* current active block to push tasks */
|
778
594
|
fio_defer_queue_block_s *writer;
|
779
|
-
|
780
|
-
|
595
|
+
/* static, built-in, queue */
|
596
|
+
fio_defer_queue_block_s static_queue;
|
597
|
+
} fio_task_queue_s;
|
598
|
+
|
599
|
+
/* the state machine - this holds all the data about the task queue and pool */
|
600
|
+
static fio_task_queue_s task_queue_normal = {
|
601
|
+
.reader = &task_queue_normal.static_queue,
|
602
|
+
.writer = &task_queue_normal.static_queue};
|
603
|
+
|
604
|
+
static fio_task_queue_s task_queue_urgent = {
|
605
|
+
.reader = &task_queue_urgent.static_queue,
|
606
|
+
.writer = &task_queue_urgent.static_queue};
|
781
607
|
|
782
608
|
/* *****************************************************************************
|
783
609
|
Internal Task API
|
@@ -797,85 +623,98 @@ static size_t fio_defer_count_alloc, fio_defer_count_dealloc;
|
|
797
623
|
#define COUNT_RESET
|
798
624
|
#endif
|
799
625
|
|
800
|
-
static inline void
|
801
|
-
|
626
|
+
static inline void fio_defer_push_task_fn(fio_defer_task_s task,
|
627
|
+
fio_task_queue_s *queue) {
|
628
|
+
fio_lock(&queue->lock);
|
802
629
|
|
803
630
|
/* test if full */
|
804
|
-
if (
|
805
|
-
deferred.writer->write == deferred.writer->read) {
|
631
|
+
if (queue->writer->state && queue->writer->write == queue->writer->read) {
|
806
632
|
/* return to static buffer or allocate new buffer */
|
807
|
-
if (
|
808
|
-
|
633
|
+
if (queue->static_queue.state == 2) {
|
634
|
+
queue->writer->next = &queue->static_queue;
|
809
635
|
} else {
|
810
|
-
|
636
|
+
queue->writer->next = fio_malloc(sizeof(*queue->writer->next));
|
811
637
|
COUNT_ALLOC;
|
812
|
-
if (!
|
638
|
+
if (!queue->writer->next)
|
813
639
|
goto critical_error;
|
814
640
|
}
|
815
|
-
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
641
|
+
queue->writer = queue->writer->next;
|
642
|
+
queue->writer->write = 0;
|
643
|
+
queue->writer->read = 0;
|
644
|
+
queue->writer->state = 0;
|
645
|
+
queue->writer->next = NULL;
|
820
646
|
}
|
821
647
|
|
822
648
|
/* place task and finish */
|
823
|
-
|
649
|
+
queue->writer->tasks[queue->writer->write++] = task;
|
824
650
|
/* cycle buffer */
|
825
|
-
if (
|
826
|
-
|
827
|
-
|
651
|
+
if (queue->writer->write == DEFER_QUEUE_BLOCK_COUNT) {
|
652
|
+
queue->writer->write = 0;
|
653
|
+
queue->writer->state = 1;
|
828
654
|
}
|
829
|
-
fio_unlock(&
|
655
|
+
fio_unlock(&queue->lock);
|
830
656
|
return;
|
831
657
|
|
832
658
|
critical_error:
|
833
|
-
fio_unlock(&
|
659
|
+
fio_unlock(&queue->lock);
|
834
660
|
FIO_ASSERT_ALLOC(NULL)
|
835
661
|
}
|
836
662
|
|
837
|
-
|
663
|
+
#define fio_defer_push_task(func_, arg1_, arg2_) \
|
664
|
+
fio_defer_push_task_fn( \
|
665
|
+
(fio_defer_task_s){.func = func_, .arg1 = arg1_, .arg2 = arg2_}, \
|
666
|
+
&task_queue_normal)
|
667
|
+
|
668
|
+
#if FIO_USE_URGENT_QUEUE
|
669
|
+
#define fio_defer_push_urgent(func_, arg1_, arg2_) \
|
670
|
+
fio_defer_push_task_fn( \
|
671
|
+
(fio_defer_task_s){.func = func_, .arg1 = arg1_, .arg2 = arg2_}, \
|
672
|
+
&task_queue_urgent)
|
673
|
+
#else
|
674
|
+
#define fio_defer_push_urgent(func_, arg1_, arg2_) \
|
675
|
+
fio_defer_push_task(func_, arg1_, arg2_)
|
676
|
+
#endif
|
677
|
+
|
678
|
+
static inline fio_defer_task_s fio_defer_pop_task(fio_task_queue_s *queue) {
|
838
679
|
fio_defer_task_s ret = (fio_defer_task_s){.func = NULL};
|
839
680
|
fio_defer_queue_block_s *to_free = NULL;
|
840
681
|
/* lock the state machine, grab/create a task and place it at the tail */
|
841
|
-
fio_lock(&
|
682
|
+
fio_lock(&queue->lock);
|
842
683
|
|
843
684
|
/* empty? */
|
844
|
-
if (
|
845
|
-
!deferred.reader->state)
|
685
|
+
if (queue->reader->write == queue->reader->read && !queue->reader->state)
|
846
686
|
goto finish;
|
847
687
|
/* collect task */
|
848
|
-
ret =
|
688
|
+
ret = queue->reader->tasks[queue->reader->read++];
|
849
689
|
/* cycle */
|
850
|
-
if (
|
851
|
-
|
852
|
-
|
690
|
+
if (queue->reader->read == DEFER_QUEUE_BLOCK_COUNT) {
|
691
|
+
queue->reader->read = 0;
|
692
|
+
queue->reader->state = 0;
|
853
693
|
}
|
854
694
|
/* did we finish the queue in the buffer? */
|
855
|
-
if (
|
856
|
-
if (
|
857
|
-
to_free =
|
858
|
-
|
695
|
+
if (queue->reader->write == queue->reader->read) {
|
696
|
+
if (queue->reader->next) {
|
697
|
+
to_free = queue->reader;
|
698
|
+
queue->reader = queue->reader->next;
|
859
699
|
} else {
|
860
|
-
if (
|
861
|
-
|
862
|
-
to_free =
|
863
|
-
|
864
|
-
|
700
|
+
if (queue->reader != &queue->static_queue &&
|
701
|
+
queue->static_queue.state == 2) {
|
702
|
+
to_free = queue->reader;
|
703
|
+
queue->writer = &queue->static_queue;
|
704
|
+
queue->reader = &queue->static_queue;
|
865
705
|
}
|
866
|
-
|
867
|
-
0;
|
706
|
+
queue->reader->write = queue->reader->read = queue->reader->state = 0;
|
868
707
|
}
|
869
708
|
}
|
870
709
|
|
871
710
|
finish:
|
872
|
-
if (to_free == &
|
873
|
-
|
874
|
-
|
711
|
+
if (to_free == &queue->static_queue) {
|
712
|
+
queue->static_queue.state = 2;
|
713
|
+
queue->static_queue.next = NULL;
|
875
714
|
}
|
876
|
-
fio_unlock(&
|
715
|
+
fio_unlock(&queue->lock);
|
877
716
|
|
878
|
-
if (to_free && to_free != &
|
717
|
+
if (to_free && to_free != &queue->static_queue) {
|
879
718
|
fio_free(to_free);
|
880
719
|
COUNT_DEALLOC;
|
881
720
|
}
|
@@ -883,25 +722,30 @@ finish:
|
|
883
722
|
}
|
884
723
|
|
885
724
|
/* same as fio_defer_clear_queue , just inlined */
|
886
|
-
static inline void
|
887
|
-
fio_lock(&
|
888
|
-
while (
|
889
|
-
fio_defer_queue_block_s *tmp =
|
890
|
-
|
891
|
-
if (tmp != &
|
725
|
+
static inline void fio_defer_clear_tasks_for_queue(fio_task_queue_s *queue) {
|
726
|
+
fio_lock(&queue->lock);
|
727
|
+
while (queue->reader) {
|
728
|
+
fio_defer_queue_block_s *tmp = queue->reader;
|
729
|
+
queue->reader = queue->reader->next;
|
730
|
+
if (tmp != &queue->static_queue) {
|
892
731
|
COUNT_DEALLOC;
|
893
732
|
free(tmp);
|
894
733
|
}
|
895
734
|
}
|
896
|
-
|
897
|
-
|
898
|
-
fio_unlock(&
|
735
|
+
queue->static_queue = (fio_defer_queue_block_s){.next = NULL};
|
736
|
+
queue->reader = queue->writer = &queue->static_queue;
|
737
|
+
fio_unlock(&queue->lock);
|
899
738
|
}
|
900
739
|
|
901
|
-
static void
|
740
|
+
static inline void fio_defer_clear_tasks(void) {
|
741
|
+
fio_defer_clear_tasks_for_queue(&task_queue_normal);
|
742
|
+
fio_defer_clear_tasks_for_queue(&task_queue_urgent);
|
743
|
+
}
|
902
744
|
|
903
|
-
|
904
|
-
|
745
|
+
static void fio_defer_on_fork(void) {
|
746
|
+
task_queue_normal.lock = FIO_LOCK_INIT;
|
747
|
+
task_queue_urgent.lock = FIO_LOCK_INIT;
|
748
|
+
}
|
905
749
|
|
906
750
|
/* *****************************************************************************
|
907
751
|
External Task API
|
@@ -912,7 +756,7 @@ int fio_defer(void (*func)(void *, void *), void *arg1, void *arg2) {
|
|
912
756
|
/* must have a task to defer */
|
913
757
|
if (!func)
|
914
758
|
goto call_error;
|
915
|
-
fio_defer_push_task(
|
759
|
+
fio_defer_push_task(func, arg1, arg2);
|
916
760
|
return 0;
|
917
761
|
|
918
762
|
call_error:
|
@@ -922,67 +766,261 @@ call_error:
|
|
922
766
|
/** Performs all deferred functions until the queue had been depleted. */
|
923
767
|
void fio_defer_perform(void) {
|
924
768
|
for (;;) {
|
925
|
-
|
769
|
+
#if FIO_USE_URGENT_QUEUE
|
770
|
+
fio_defer_task_s task = fio_defer_pop_task(&task_queue_urgent);
|
771
|
+
if (!task.func)
|
772
|
+
task = fio_defer_pop_task(&task_queue_normal);
|
773
|
+
#else
|
774
|
+
fio_defer_task_s task = fio_defer_pop_task(&task_queue_normal);
|
775
|
+
#endif
|
926
776
|
if (!task.func)
|
927
777
|
return;
|
928
778
|
task.func(task.arg1, task.arg2);
|
929
779
|
}
|
930
780
|
}
|
931
781
|
|
932
|
-
/** Returns true if there are deferred functions waiting for execution. */
|
933
|
-
int fio_defer_has_queue(void) {
|
934
|
-
return
|
935
|
-
|
782
|
+
/** Returns true if there are deferred functions waiting for execution. */
|
783
|
+
int fio_defer_has_queue(void) {
|
784
|
+
return task_queue_urgent.reader != task_queue_urgent.writer ||
|
785
|
+
task_queue_urgent.reader->write != task_queue_urgent.reader->read ||
|
786
|
+
task_queue_normal.reader != task_queue_normal.writer ||
|
787
|
+
task_queue_normal.reader->write != task_queue_normal.reader->read;
|
788
|
+
}
|
789
|
+
|
790
|
+
/** Clears the queue. */
|
791
|
+
void fio_defer_clear_queue(void) { fio_defer_clear_tasks(); }
|
792
|
+
|
793
|
+
static void fio_defer_thread_wait(void);
|
794
|
+
static void *fio_defer_cycle(void *ignr) {
|
795
|
+
do {
|
796
|
+
fio_defer_perform();
|
797
|
+
fio_defer_thread_wait();
|
798
|
+
} while (fio_is_running());
|
799
|
+
return ignr;
|
800
|
+
}
|
801
|
+
|
802
|
+
/* thread pool type */
|
803
|
+
typedef struct {
|
804
|
+
size_t thread_count;
|
805
|
+
void *threads[];
|
806
|
+
} fio_defer_thread_pool_s;
|
807
|
+
|
808
|
+
/* joins a thread pool */
|
809
|
+
static void fio_defer_thread_pool_join(fio_defer_thread_pool_s *pool) {
|
810
|
+
for (size_t i = 0; i < pool->thread_count; ++i) {
|
811
|
+
fio_thread_join(pool->threads[i]);
|
812
|
+
}
|
813
|
+
free(pool);
|
814
|
+
}
|
815
|
+
|
816
|
+
/* creates a thread pool */
|
817
|
+
static fio_defer_thread_pool_s *fio_defer_thread_pool_new(size_t count) {
|
818
|
+
if (!count)
|
819
|
+
count = 1;
|
820
|
+
fio_defer_thread_pool_s *pool =
|
821
|
+
malloc(sizeof(*pool) + (count * sizeof(void *)));
|
822
|
+
FIO_ASSERT_ALLOC(pool);
|
823
|
+
pool->thread_count = count;
|
824
|
+
for (size_t i = 0; i < count; ++i) {
|
825
|
+
pool->threads[i] = fio_thread_new(fio_defer_cycle, NULL);
|
826
|
+
if (!pool->threads[i]) {
|
827
|
+
pool->thread_count = i;
|
828
|
+
goto error;
|
829
|
+
}
|
830
|
+
}
|
831
|
+
return pool;
|
832
|
+
error:
|
833
|
+
FIO_LOG_FATAL("couldn't spawn threads for thread pool, attempting shutdown.");
|
834
|
+
fio_stop();
|
835
|
+
fio_defer_thread_pool_join(pool);
|
836
|
+
return NULL;
|
837
|
+
}
|
838
|
+
|
839
|
+
/* *****************************************************************************
|
840
|
+
Section Start Marker
|
841
|
+
|
842
|
+
|
843
|
+
|
844
|
+
|
845
|
+
|
846
|
+
|
847
|
+
|
848
|
+
|
849
|
+
|
850
|
+
Timers
|
851
|
+
|
852
|
+
|
853
|
+
|
854
|
+
|
855
|
+
|
856
|
+
|
857
|
+
|
858
|
+
|
859
|
+
|
860
|
+
|
861
|
+
***************************************************************************** */
|
862
|
+
|
863
|
+
typedef struct {
|
864
|
+
fio_ls_embd_s node;
|
865
|
+
struct timespec due;
|
866
|
+
size_t interval; /*in ms */
|
867
|
+
size_t repetitions;
|
868
|
+
void (*task)(void *);
|
869
|
+
void *arg;
|
870
|
+
void (*on_finish)(void *);
|
871
|
+
} fio_timer_s;
|
872
|
+
|
873
|
+
static fio_ls_embd_s fio_timers = FIO_LS_INIT(fio_timers);
|
874
|
+
|
875
|
+
static fio_lock_i fio_timer_lock = FIO_LOCK_INIT;
|
876
|
+
|
877
|
+
/** Marks the current time as facil.io's cycle time */
|
878
|
+
static inline void fio_mark_time(void) {
|
879
|
+
clock_gettime(CLOCK_REALTIME, &fio_data->last_cycle);
|
880
|
+
}
|
881
|
+
|
882
|
+
/** Calculates the due time for a task, given it's interval */
|
883
|
+
static struct timespec fio_timer_calc_due(size_t interval) {
|
884
|
+
struct timespec now = fio_last_tick();
|
885
|
+
if (interval > 1000) {
|
886
|
+
now.tv_sec += interval / 1000;
|
887
|
+
interval -= interval / 1000;
|
888
|
+
}
|
889
|
+
now.tv_nsec += (interval * 1000000UL);
|
890
|
+
if (now.tv_nsec > 1000000000L) {
|
891
|
+
now.tv_nsec -= 1000000000L;
|
892
|
+
now.tv_sec += 1;
|
893
|
+
}
|
894
|
+
return now;
|
895
|
+
}
|
896
|
+
|
897
|
+
/** Returns the number of miliseconds until the next event, up to FIO_POLL_TICK
|
898
|
+
*/
|
899
|
+
static size_t fio_timer_calc_first_interval(void) {
|
900
|
+
if (fio_defer_has_queue())
|
901
|
+
return 0;
|
902
|
+
if (fio_ls_embd_is_empty(&fio_timers)) {
|
903
|
+
return FIO_POLL_TICK;
|
904
|
+
}
|
905
|
+
struct timespec now = fio_last_tick();
|
906
|
+
struct timespec due =
|
907
|
+
FIO_LS_EMBD_OBJ(fio_timer_s, node, fio_timers.next)->due;
|
908
|
+
if (due.tv_sec < now.tv_sec ||
|
909
|
+
(due.tv_sec == now.tv_sec && due.tv_nsec <= now.tv_nsec))
|
910
|
+
return 0;
|
911
|
+
size_t interval = 1000L * (due.tv_sec - now.tv_sec);
|
912
|
+
if (due.tv_nsec >= now.tv_nsec) {
|
913
|
+
interval += (due.tv_nsec - now.tv_nsec) / 1000000L;
|
914
|
+
} else {
|
915
|
+
interval -= (now.tv_nsec - due.tv_nsec) / 1000000L;
|
916
|
+
}
|
917
|
+
if (interval > FIO_POLL_TICK)
|
918
|
+
interval = FIO_POLL_TICK;
|
919
|
+
return interval;
|
920
|
+
}
|
921
|
+
|
922
|
+
/* simple a<=>b if "a" is bigger a negative result is returned, eq == 0. */
|
923
|
+
static int fio_timer_compare(struct timespec a, struct timespec b) {
|
924
|
+
if (a.tv_sec == b.tv_sec) {
|
925
|
+
if (a.tv_nsec < b.tv_nsec)
|
926
|
+
return 1;
|
927
|
+
if (a.tv_nsec > b.tv_nsec)
|
928
|
+
return -1;
|
929
|
+
return 0;
|
930
|
+
}
|
931
|
+
if (a.tv_sec < b.tv_sec)
|
932
|
+
return 1;
|
933
|
+
return -1;
|
934
|
+
}
|
935
|
+
|
936
|
+
/** Places a timer in an ordered linked list. */
|
937
|
+
static void fio_timer_add_order(fio_timer_s *timer) {
|
938
|
+
timer->due = fio_timer_calc_due(timer->interval);
|
939
|
+
// fio_ls_embd_s *pos = &fio_timers;
|
940
|
+
fio_lock(&fio_timer_lock);
|
941
|
+
FIO_LS_EMBD_FOR(&fio_timers, node) {
|
942
|
+
fio_timer_s *t2 = FIO_LS_EMBD_OBJ(fio_timer_s, node, node);
|
943
|
+
if (fio_timer_compare(timer->due, t2->due) >= 0) {
|
944
|
+
fio_ls_embd_push(node, &timer->node);
|
945
|
+
goto finish;
|
946
|
+
}
|
947
|
+
}
|
948
|
+
fio_ls_embd_push(&fio_timers, &timer->node);
|
949
|
+
finish:
|
950
|
+
fio_unlock(&fio_timer_lock);
|
951
|
+
}
|
952
|
+
|
953
|
+
/** Performs a timer task and re-adds it to the queue (or cleans it up) */
|
954
|
+
static void fio_timer_perform_single(void *timer_, void *ignr) {
|
955
|
+
fio_timer_s *timer = timer_;
|
956
|
+
timer->task(timer->arg);
|
957
|
+
if (!timer->repetitions || fio_atomic_sub(&timer->repetitions, 1))
|
958
|
+
goto reschedule;
|
959
|
+
if (timer->on_finish)
|
960
|
+
timer->on_finish(timer->arg);
|
961
|
+
free(timer);
|
962
|
+
return;
|
963
|
+
(void)ignr;
|
964
|
+
reschedule:
|
965
|
+
fio_timer_add_order(timer);
|
936
966
|
}
|
937
967
|
|
938
|
-
/**
|
939
|
-
void
|
940
|
-
|
941
|
-
|
942
|
-
|
943
|
-
|
944
|
-
|
945
|
-
|
946
|
-
|
947
|
-
|
968
|
+
/** schedules all timers that are due to be performed. */
|
969
|
+
static void fio_timer_schedule(void) {
|
970
|
+
struct timespec now = fio_last_tick();
|
971
|
+
fio_lock(&fio_timer_lock);
|
972
|
+
while (fio_ls_embd_any(&fio_timers) &&
|
973
|
+
fio_timer_compare(
|
974
|
+
FIO_LS_EMBD_OBJ(fio_timer_s, node, fio_timers.next)->due, now) >=
|
975
|
+
0) {
|
976
|
+
fio_ls_embd_s *tmp = fio_ls_embd_remove(fio_timers.next);
|
977
|
+
fio_defer(fio_timer_perform_single, FIO_LS_EMBD_OBJ(fio_timer_s, node, tmp),
|
978
|
+
NULL);
|
979
|
+
}
|
980
|
+
fio_unlock(&fio_timer_lock);
|
948
981
|
}
|
949
982
|
|
950
|
-
|
951
|
-
|
952
|
-
|
953
|
-
|
954
|
-
|
955
|
-
|
956
|
-
|
957
|
-
|
958
|
-
for (size_t i = 0; i < pool->thread_count; ++i) {
|
959
|
-
fio_thread_join(pool->threads[i]);
|
983
|
+
static void fio_timer_clear_all(void) {
|
984
|
+
fio_lock(&fio_timer_lock);
|
985
|
+
while (fio_ls_embd_any(&fio_timers)) {
|
986
|
+
fio_timer_s *timer =
|
987
|
+
FIO_LS_EMBD_OBJ(fio_timer_s, node, fio_ls_embd_pop(&fio_timers));
|
988
|
+
if (timer->on_finish)
|
989
|
+
timer->on_finish(timer->arg);
|
990
|
+
free(timer);
|
960
991
|
}
|
961
|
-
|
992
|
+
fio_unlock(&fio_timer_lock);
|
962
993
|
}
|
963
994
|
|
964
|
-
|
965
|
-
|
966
|
-
|
967
|
-
|
968
|
-
|
969
|
-
|
970
|
-
|
971
|
-
|
972
|
-
|
973
|
-
|
974
|
-
|
975
|
-
|
976
|
-
|
977
|
-
|
978
|
-
|
979
|
-
|
980
|
-
|
981
|
-
|
982
|
-
|
983
|
-
|
984
|
-
|
995
|
+
/**
|
996
|
+
* Creates a timer to run a task at the specified interval.
|
997
|
+
*
|
998
|
+
* The task will repeat `repetitions` times. If `repetitions` is set to 0, task
|
999
|
+
* will repeat forever.
|
1000
|
+
*
|
1001
|
+
* Returns -1 on error.
|
1002
|
+
*
|
1003
|
+
* The `on_finish` handler is always called (even on error).
|
1004
|
+
*/
|
1005
|
+
int fio_run_every(size_t milliseconds, size_t repetitions, void (*task)(void *),
|
1006
|
+
void *arg, void (*on_finish)(void *)) {
|
1007
|
+
if (!task || (milliseconds == 0 && !repetitions))
|
1008
|
+
return -1;
|
1009
|
+
fio_timer_s *timer = malloc(sizeof(*timer));
|
1010
|
+
FIO_ASSERT_ALLOC(timer);
|
1011
|
+
fio_mark_time();
|
1012
|
+
*timer = (fio_timer_s){
|
1013
|
+
.due = fio_timer_calc_due(milliseconds),
|
1014
|
+
.interval = milliseconds,
|
1015
|
+
.repetitions = repetitions,
|
1016
|
+
.task = task,
|
1017
|
+
.arg = arg,
|
1018
|
+
.on_finish = on_finish,
|
1019
|
+
};
|
1020
|
+
fio_timer_add_order(timer);
|
1021
|
+
return 0;
|
985
1022
|
}
|
1023
|
+
|
986
1024
|
/* *****************************************************************************
|
987
1025
|
Section Start Marker
|
988
1026
|
|
@@ -1451,12 +1489,12 @@ static size_t fio_poll(void) {
|
|
1451
1489
|
} else {
|
1452
1490
|
// no error, then it's an active event(s)
|
1453
1491
|
if (events[i].events & EPOLLOUT) {
|
1454
|
-
|
1455
|
-
|
1492
|
+
fio_defer_push_urgent(deferred_on_ready,
|
1493
|
+
(void *)fd2uuid(events[i].data.fd), NULL);
|
1456
1494
|
}
|
1457
1495
|
if (events[i].events & EPOLLIN)
|
1458
|
-
|
1459
|
-
|
1496
|
+
fio_defer_push_task(deferred_on_data,
|
1497
|
+
(void *)fd2uuid(events[i].data.fd), NULL);
|
1460
1498
|
}
|
1461
1499
|
} // end for loop
|
1462
1500
|
total += active_count;
|
@@ -1584,7 +1622,8 @@ static size_t fio_poll(void) {
|
|
1584
1622
|
for (int i = 0; i < active_count; i++) {
|
1585
1623
|
// test for event(s) type
|
1586
1624
|
if (events[i].filter == EVFILT_READ) {
|
1587
|
-
|
1625
|
+
fio_defer_push_task(deferred_on_data, (void *)fd2uuid(events[i].udata),
|
1626
|
+
NULL);
|
1588
1627
|
}
|
1589
1628
|
// connection errors should be reported after `read` in case there's data
|
1590
1629
|
// left in the buffer... not that the edge case matters.
|
@@ -1598,7 +1637,8 @@ static size_t fio_poll(void) {
|
|
1598
1637
|
fio_force_close(fd2uuid(events[i].udata));
|
1599
1638
|
} else if (events[i].filter == EVFILT_WRITE) {
|
1600
1639
|
// we can only write if there's no error in the socket
|
1601
|
-
|
1640
|
+
fio_defer_push_urgent(deferred_on_ready,
|
1641
|
+
((void *)fd2uuid(events[i].udata)), NULL);
|
1602
1642
|
}
|
1603
1643
|
}
|
1604
1644
|
} else if (active_count < 0) {
|
@@ -1732,12 +1772,12 @@ static size_t fio_poll(void) {
|
|
1732
1772
|
if (list[i].revents & FIO_POLL_READ_EVENTS) {
|
1733
1773
|
// FIO_LOG_DEBUG("Poll Read %zu => %p", i, (void *)fd2uuid(i));
|
1734
1774
|
fio_poll_remove_read(i);
|
1735
|
-
|
1775
|
+
fio_defer_push_task(deferred_on_data, (void *)fd2uuid(i), NULL);
|
1736
1776
|
}
|
1737
1777
|
if (list[i].revents & FIO_POLL_WRITE_EVENTS) {
|
1738
1778
|
// FIO_LOG_DEBUG("Poll Write %zu => %p", i, (void *)fd2uuid(i));
|
1739
1779
|
fio_poll_remove_write(i);
|
1740
|
-
|
1780
|
+
fio_defer_push_urgent(deferred_on_ready, (void *)fd2uuid(i), NULL);
|
1741
1781
|
}
|
1742
1782
|
if (list[i].revents & (POLLHUP | POLLERR)) {
|
1743
1783
|
// FIO_LOG_DEBUG("Poll Hangup %zu => %p", i, (void *)fd2uuid(i));
|
@@ -1847,7 +1887,7 @@ static void deferred_on_close(void *uuid_, void *pr_) {
|
|
1847
1887
|
pr->on_close((intptr_t)uuid_, pr);
|
1848
1888
|
return;
|
1849
1889
|
postpone:
|
1850
|
-
|
1890
|
+
fio_defer_push_task(deferred_on_close, uuid_, pr_);
|
1851
1891
|
}
|
1852
1892
|
|
1853
1893
|
static void deferred_on_shutdown(void *arg, void *arg2) {
|
@@ -1880,19 +1920,12 @@ static void deferred_on_shutdown(void *arg, void *arg2) {
|
|
1880
1920
|
}
|
1881
1921
|
return;
|
1882
1922
|
postpone:
|
1883
|
-
|
1923
|
+
fio_defer_push_task(deferred_on_shutdown, arg, NULL);
|
1884
1924
|
(void)arg2;
|
1885
1925
|
}
|
1886
1926
|
|
1887
|
-
static void
|
1927
|
+
static void deferred_on_ready_usr(void *arg, void *arg2) {
|
1888
1928
|
errno = 0;
|
1889
|
-
if (fio_flush((intptr_t)arg) > 0 || errno == EWOULDBLOCK) {
|
1890
|
-
fio_poll_add_write(fio_uuid2fd(arg));
|
1891
|
-
return;
|
1892
|
-
}
|
1893
|
-
if (!uuid_data(arg).protocol) {
|
1894
|
-
return;
|
1895
|
-
}
|
1896
1929
|
fio_protocol_s *pr = protocol_try_lock(fio_uuid2fd(arg), FIO_PR_LOCK_WRITE);
|
1897
1930
|
if (!pr) {
|
1898
1931
|
if (errno == EBADF)
|
@@ -1903,7 +1936,20 @@ static void deferred_on_ready(void *arg, void *arg2) {
|
|
1903
1936
|
protocol_unlock(pr, FIO_PR_LOCK_WRITE);
|
1904
1937
|
return;
|
1905
1938
|
postpone:
|
1906
|
-
|
1939
|
+
fio_defer_push_task(deferred_on_ready, arg, NULL);
|
1940
|
+
(void)arg2;
|
1941
|
+
}
|
1942
|
+
|
1943
|
+
static void deferred_on_ready(void *arg, void *arg2) {
|
1944
|
+
errno = 0;
|
1945
|
+
if (fio_flush((intptr_t)arg) > 0 || errno == EWOULDBLOCK) {
|
1946
|
+
fio_poll_add_write(fio_uuid2fd(arg));
|
1947
|
+
return;
|
1948
|
+
}
|
1949
|
+
if (!uuid_data(arg).protocol) {
|
1950
|
+
return;
|
1951
|
+
}
|
1952
|
+
fio_defer_push_task(deferred_on_ready_usr, arg, NULL);
|
1907
1953
|
(void)arg2;
|
1908
1954
|
}
|
1909
1955
|
|
@@ -1928,7 +1974,7 @@ static void deferred_on_data(void *uuid, void *arg2) {
|
|
1928
1974
|
postpone:
|
1929
1975
|
if (arg2) {
|
1930
1976
|
/* the event is being forced, so force rescheduling */
|
1931
|
-
|
1977
|
+
fio_defer_push_task(deferred_on_data, (void *)uuid, (void *)1);
|
1932
1978
|
} else {
|
1933
1979
|
/* the protocol was locked, so there might not be any need for the event */
|
1934
1980
|
fio_poll_add_read(fio_uuid2fd((intptr_t)uuid));
|
@@ -1950,7 +1996,7 @@ static void deferred_ping(void *arg, void *arg2) {
|
|
1950
1996
|
protocol_unlock(pr, FIO_PR_LOCK_WRITE);
|
1951
1997
|
return;
|
1952
1998
|
postpone:
|
1953
|
-
|
1999
|
+
fio_defer_push_task(deferred_ping, arg, NULL);
|
1954
2000
|
(void)arg2;
|
1955
2001
|
}
|
1956
2002
|
|
@@ -1962,13 +2008,13 @@ void fio_force_event(intptr_t uuid, enum fio_io_event ev) {
|
|
1962
2008
|
switch (ev) {
|
1963
2009
|
case FIO_EVENT_ON_DATA:
|
1964
2010
|
fio_trylock(&uuid_data(uuid).scheduled);
|
1965
|
-
|
2011
|
+
fio_defer_push_task(deferred_on_data, (void *)uuid, (void *)1);
|
1966
2012
|
break;
|
1967
2013
|
case FIO_EVENT_ON_TIMEOUT:
|
1968
|
-
|
2014
|
+
fio_defer_push_task(deferred_ping, (void *)uuid, NULL);
|
1969
2015
|
break;
|
1970
2016
|
case FIO_EVENT_ON_READY:
|
1971
|
-
|
2017
|
+
fio_defer_push_urgent(deferred_on_ready, (void *)uuid, NULL);
|
1972
2018
|
break;
|
1973
2019
|
}
|
1974
2020
|
}
|
@@ -2501,10 +2547,13 @@ ssize_t fio_write2_fn(intptr_t uuid, fio_write_args_s options) {
|
|
2501
2547
|
packet->dealloc = (options.after.dealloc ? options.after.dealloc : free);
|
2502
2548
|
}
|
2503
2549
|
/* add packet to outgoing list */
|
2550
|
+
uint8_t was_empty = 1;
|
2504
2551
|
fio_lock(&uuid_data(uuid).sock_lock);
|
2505
2552
|
if (!uuid_is_valid(uuid)) {
|
2506
2553
|
goto locked_error;
|
2507
2554
|
}
|
2555
|
+
if (uuid_data(uuid).packet)
|
2556
|
+
was_empty = 0;
|
2508
2557
|
if (options.urgent == 0) {
|
2509
2558
|
*uuid_data(uuid).packet_last = packet;
|
2510
2559
|
uuid_data(uuid).packet_last = &packet->next;
|
@@ -2518,10 +2567,12 @@ ssize_t fio_write2_fn(intptr_t uuid, fio_write_args_s options) {
|
|
2518
2567
|
uuid_data(uuid).packet_last = &packet->next;
|
2519
2568
|
}
|
2520
2569
|
}
|
2570
|
+
fio_atomic_add(&uuid_data(uuid).packet_count, 1);
|
2521
2571
|
fio_unlock(&uuid_data(uuid).sock_lock);
|
2522
2572
|
|
2523
|
-
|
2524
|
-
|
2573
|
+
if (was_empty) {
|
2574
|
+
fio_defer_push_urgent(deferred_on_ready, (void *)uuid, NULL);
|
2575
|
+
}
|
2525
2576
|
return 0;
|
2526
2577
|
locked_error:
|
2527
2578
|
fio_unlock(&uuid_data(uuid).sock_lock);
|
@@ -2835,7 +2886,7 @@ static int fio_attach__internal(void *uuid_, void *protocol_) {
|
|
2835
2886
|
uuid_data(uuid).active = fio_data->last_cycle.tv_sec;
|
2836
2887
|
fio_unlock(&uuid_data(uuid).protocol_lock);
|
2837
2888
|
if (old_pr) {
|
2838
|
-
|
2889
|
+
fio_defer_push_task(deferred_on_close, (void *)uuid, old_pr);
|
2839
2890
|
} else if (protocol) {
|
2840
2891
|
fio_poll_add(fio_uuid2fd(uuid));
|
2841
2892
|
}
|
@@ -2844,7 +2895,7 @@ static int fio_attach__internal(void *uuid_, void *protocol_) {
|
|
2844
2895
|
invalid_uuid:
|
2845
2896
|
fio_unlock(&uuid_data(uuid).protocol_lock);
|
2846
2897
|
if (protocol)
|
2847
|
-
|
2898
|
+
fio_defer_push_task(deferred_on_close, (void *)uuid, protocol);
|
2848
2899
|
if (uuid == -1)
|
2849
2900
|
errno = EBADF;
|
2850
2901
|
else
|
@@ -2852,7 +2903,8 @@ invalid_uuid:
|
|
2852
2903
|
return -1;
|
2853
2904
|
}
|
2854
2905
|
|
2855
|
-
/**
|
2906
|
+
/**
|
2907
|
+
* Attaches (or updates) a protocol object to a socket UUID.
|
2856
2908
|
* Returns -1 on error and 0 on success.
|
2857
2909
|
*/
|
2858
2910
|
void fio_attach(intptr_t uuid, fio_protocol_s *protocol) {
|
@@ -2971,8 +3023,8 @@ void fio_state_callback_force(callback_type_e c_type) {
|
|
2971
3023
|
case FIO_CALL_ON_IDLE: /* idle callbacks are orderless and evented */
|
2972
3024
|
FIO_LS_EMBD_FOR(&callback_collection[c_type].callbacks, pos) {
|
2973
3025
|
callback_data_s *tmp = FIO_LS_EMBD_OBJ(callback_data_s, node, pos);
|
2974
|
-
|
2975
|
-
|
3026
|
+
fio_defer_push_task(fio_state_on_idle_perform,
|
3027
|
+
(void *)(uintptr_t)tmp->func, tmp->arg);
|
2976
3028
|
}
|
2977
3029
|
break;
|
2978
3030
|
|
@@ -3058,7 +3110,7 @@ postpone:
|
|
3058
3110
|
fio_free(args);
|
3059
3111
|
return;
|
3060
3112
|
}
|
3061
|
-
|
3113
|
+
fio_defer_push_task(fio_io_task_perform, uuid_, args_);
|
3062
3114
|
}
|
3063
3115
|
/**
|
3064
3116
|
* Schedules a protected connection task. The task will run within the
|
@@ -3071,14 +3123,14 @@ void fio_defer_io_task FIO_IGNORE_MACRO(intptr_t uuid,
|
|
3071
3123
|
fio_defer_iotask_args_s args) {
|
3072
3124
|
if (!args.task) {
|
3073
3125
|
if (args.fallback)
|
3074
|
-
|
3075
|
-
|
3126
|
+
fio_defer_push_task((void (*)(void *, void *))args.fallback, (void *)uuid,
|
3127
|
+
args.udata);
|
3076
3128
|
return;
|
3077
3129
|
}
|
3078
3130
|
fio_defer_iotask_args_s *cpy = fio_malloc(sizeof(*cpy));
|
3079
3131
|
FIO_ASSERT_ALLOC(cpy);
|
3080
3132
|
*cpy = args;
|
3081
|
-
|
3133
|
+
fio_defer_push_task(fio_io_task_perform, (void *)uuid, cpy);
|
3082
3134
|
}
|
3083
3135
|
|
3084
3136
|
/* *****************************************************************************
|
@@ -3264,12 +3316,15 @@ static void fio_review_timeout(void *arg, void *ignr) {
|
|
3264
3316
|
if (!fd_data(fd).protocol || (fd_data(fd).active + timeout >= review))
|
3265
3317
|
goto finish;
|
3266
3318
|
tmp = protocol_try_lock(fd, FIO_PR_LOCK_STATE);
|
3267
|
-
if (!tmp)
|
3319
|
+
if (!tmp) {
|
3320
|
+
if (errno == EBADF)
|
3321
|
+
goto finish;
|
3268
3322
|
goto reschedule;
|
3323
|
+
}
|
3269
3324
|
if (prt_meta(tmp).locks[FIO_PR_LOCK_TASK] ||
|
3270
3325
|
prt_meta(tmp).locks[FIO_PR_LOCK_WRITE])
|
3271
3326
|
goto unlock;
|
3272
|
-
|
3327
|
+
fio_defer_push_task(deferred_ping, (void *)fio_fd2uuid((int)fd), NULL);
|
3273
3328
|
unlock:
|
3274
3329
|
protocol_unlock(tmp, FIO_PR_LOCK_STATE);
|
3275
3330
|
finish:
|
@@ -3282,7 +3337,7 @@ finish:
|
|
3282
3337
|
return;
|
3283
3338
|
}
|
3284
3339
|
reschedule:
|
3285
|
-
|
3340
|
+
fio_defer_push_task(fio_review_timeout, (void *)fd, NULL);
|
3286
3341
|
}
|
3287
3342
|
|
3288
3343
|
/* reactor pattern cycling - common actions */
|
@@ -3312,7 +3367,7 @@ static void fio_cycle_schedule_events(void) {
|
|
3312
3367
|
if (fio_data->need_review && fio_data->last_cycle.tv_sec != last_to_review) {
|
3313
3368
|
last_to_review = fio_data->last_cycle.tv_sec;
|
3314
3369
|
fio_data->need_review = 0;
|
3315
|
-
|
3370
|
+
fio_defer_push_task(fio_review_timeout, (void *)0, NULL);
|
3316
3371
|
}
|
3317
3372
|
}
|
3318
3373
|
|
@@ -3320,7 +3375,7 @@ static void fio_cycle_schedule_events(void) {
|
|
3320
3375
|
static void fio_cycle_unwind(void *ignr, void *ignr2) {
|
3321
3376
|
if (fio_data->connection_count) {
|
3322
3377
|
fio_cycle_schedule_events();
|
3323
|
-
|
3378
|
+
fio_defer_push_task(fio_cycle_unwind, ignr, ignr2);
|
3324
3379
|
return;
|
3325
3380
|
}
|
3326
3381
|
fio_stop();
|
@@ -3331,7 +3386,7 @@ static void fio_cycle_unwind(void *ignr, void *ignr2) {
|
|
3331
3386
|
static void fio_cycle(void *ignr, void *ignr2) {
|
3332
3387
|
fio_cycle_schedule_events();
|
3333
3388
|
if (fio_data->active) {
|
3334
|
-
|
3389
|
+
fio_defer_push_task(fio_cycle, ignr, ignr2);
|
3335
3390
|
return;
|
3336
3391
|
}
|
3337
3392
|
return;
|
@@ -3360,7 +3415,7 @@ static void fio_worker_startup(void) {
|
|
3360
3415
|
fio_data->need_review = 1;
|
3361
3416
|
|
3362
3417
|
/* the cycle task will loop by re-scheduling until it's time to finish */
|
3363
|
-
|
3418
|
+
fio_defer_push_task(fio_cycle, NULL, NULL);
|
3364
3419
|
|
3365
3420
|
/* A single thread doesn't need a pool. */
|
3366
3421
|
if (fio_data->threads > 1) {
|
@@ -3380,10 +3435,10 @@ static void fio_worker_cleanup(void) {
|
|
3380
3435
|
fio_state_callback_force(FIO_CALL_ON_SHUTDOWN);
|
3381
3436
|
for (size_t i = 0; i <= fio_data->max_protocol_fd; ++i) {
|
3382
3437
|
if (fd_data(i).protocol) {
|
3383
|
-
|
3438
|
+
fio_defer_push_task(deferred_on_shutdown, (void *)fd2uuid(i), NULL);
|
3384
3439
|
}
|
3385
3440
|
}
|
3386
|
-
|
3441
|
+
fio_defer_push_task(fio_cycle_unwind, NULL, NULL);
|
3387
3442
|
fio_defer_perform();
|
3388
3443
|
for (size_t i = 0; i <= fio_data->max_protocol_fd; ++i) {
|
3389
3444
|
if (fd_data(i).protocol || fd_data(i).open) {
|
@@ -3443,7 +3498,7 @@ static void *fio_sentinel_worker_thread(void *arg) {
|
|
3443
3498
|
FIO_LOG_WARNING("Child worker (%d) shutdown. Respawning worker.",
|
3444
3499
|
child);
|
3445
3500
|
}
|
3446
|
-
|
3501
|
+
fio_defer_push_task(fio_sentinel_task, NULL, NULL);
|
3447
3502
|
fio_unlock(&fio_fork_lock);
|
3448
3503
|
}
|
3449
3504
|
#endif
|
@@ -4246,17 +4301,17 @@ typedef enum fio_cluster_message_type_e {
|
|
4246
4301
|
|
4247
4302
|
typedef struct fio_collection_s fio_collection_s;
|
4248
4303
|
|
4304
|
+
#pragma pack(1)
|
4249
4305
|
typedef struct {
|
4250
|
-
|
4306
|
+
size_t name_len;
|
4307
|
+
char *name;
|
4308
|
+
size_t ref;
|
4251
4309
|
fio_ls_embd_s subscriptions;
|
4252
4310
|
fio_collection_s *parent;
|
4311
|
+
fio_match_fn match;
|
4253
4312
|
fio_lock_i lock;
|
4254
4313
|
} channel_s;
|
4255
|
-
|
4256
|
-
typedef struct {
|
4257
|
-
channel_s ch; /* MUST be on top, so *channel == *pattern */
|
4258
|
-
fio_match_fn match;
|
4259
|
-
} pattern_s;
|
4314
|
+
#pragma pack()
|
4260
4315
|
|
4261
4316
|
struct subscription_s {
|
4262
4317
|
fio_ls_embd_s node;
|
@@ -4272,15 +4327,58 @@ struct subscription_s {
|
|
4272
4327
|
fio_lock_i unsubscribed;
|
4273
4328
|
};
|
4274
4329
|
|
4330
|
+
/* Use `malloc` / `free`, because channles might have a long life. */
|
4331
|
+
|
4332
|
+
/** Used internally by the Set object to create a new channel. */
|
4333
|
+
static channel_s *fio_channel_copy(channel_s *src) {
|
4334
|
+
channel_s *dest = malloc(sizeof(*dest) + src->name_len + 1);
|
4335
|
+
FIO_ASSERT_ALLOC(dest);
|
4336
|
+
dest->name_len = src->name_len;
|
4337
|
+
dest->match = src->match;
|
4338
|
+
dest->parent = src->parent;
|
4339
|
+
dest->name = (char *)(dest + 1);
|
4340
|
+
if (src->name_len)
|
4341
|
+
memcpy(dest->name, src->name, src->name_len);
|
4342
|
+
dest->name[src->name_len] = 0;
|
4343
|
+
dest->subscriptions = (fio_ls_embd_s)FIO_LS_INIT(dest->subscriptions);
|
4344
|
+
dest->ref = 1;
|
4345
|
+
dest->lock = FIO_LOCK_INIT;
|
4346
|
+
return dest;
|
4347
|
+
}
|
4348
|
+
/** Frees a channel (reference counting). */
|
4349
|
+
static void fio_channel_free(channel_s *ch) {
|
4350
|
+
if (!ch)
|
4351
|
+
return;
|
4352
|
+
if (fio_atomic_sub(&ch->ref, 1))
|
4353
|
+
return;
|
4354
|
+
free(ch);
|
4355
|
+
}
|
4356
|
+
/** Increases a channel's reference count. */
|
4357
|
+
static void fio_channel_dup(channel_s *ch) {
|
4358
|
+
if (!ch)
|
4359
|
+
return;
|
4360
|
+
fio_atomic_add(&ch->ref, 1);
|
4361
|
+
}
|
4362
|
+
/** Tests if two channels are equal. */
|
4363
|
+
static int fio_channel_cmp(channel_s *ch1, channel_s *ch2) {
|
4364
|
+
return ch1->name_len == ch2->name_len && ch1->match == ch2->match &&
|
4365
|
+
!memcmp(ch1->name, ch2->name, ch1->name_len);
|
4366
|
+
}
|
4367
|
+
/* pub/sub channels and core data sets have a long life, so avoid fio_malloc */
|
4368
|
+
#if !FIO_FORCE_MALLOC
|
4369
|
+
#define FIO_FORCE_MALLOC 1
|
4370
|
+
#define FIO_FORCE_MALLOC_IS_TMP 1
|
4371
|
+
#endif
|
4372
|
+
|
4275
4373
|
#define FIO_SET_NAME fio_ch_set
|
4276
4374
|
#define FIO_SET_OBJ_TYPE channel_s *
|
4277
|
-
#define FIO_SET_OBJ_COMPARE(o1, o2)
|
4278
|
-
#define FIO_SET_OBJ_DESTROY(obj)
|
4375
|
+
#define FIO_SET_OBJ_COMPARE(o1, o2) fio_channel_cmp((o1), (o2))
|
4376
|
+
#define FIO_SET_OBJ_DESTROY(obj) fio_channel_free((obj))
|
4377
|
+
#define FIO_SET_OBJ_COPY(dest, src) ((dest) = fio_channel_copy((src)))
|
4279
4378
|
#include <fio.h>
|
4280
4379
|
|
4281
|
-
#define
|
4282
|
-
#define
|
4283
|
-
#define FIO_SET_OBJ_COMPARE(k1, k2) ((k1) == (k2))
|
4380
|
+
#define FIO_ARY_NAME fio_meta_ary
|
4381
|
+
#define FIO_ARY_TYPE fio_msg_metadata_fn
|
4284
4382
|
#include <fio.h>
|
4285
4383
|
|
4286
4384
|
#define FIO_SET_NAME fio_engine_set
|
@@ -4288,6 +4386,10 @@ struct subscription_s {
|
|
4288
4386
|
#define FIO_SET_OBJ_COMPARE(k1, k2) ((k1) == (k2))
|
4289
4387
|
#include <fio.h>
|
4290
4388
|
|
4389
|
+
#if FIO_FORCE_MALLOC_IS_TMP
|
4390
|
+
#undef FIO_FORCE_MALLOC
|
4391
|
+
#endif
|
4392
|
+
|
4291
4393
|
struct fio_collection_s {
|
4292
4394
|
fio_ch_set_s channels;
|
4293
4395
|
fio_lock_i lock;
|
@@ -4305,7 +4407,7 @@ static struct {
|
|
4305
4407
|
fio_lock_i lock;
|
4306
4408
|
} engines;
|
4307
4409
|
struct {
|
4308
|
-
|
4410
|
+
fio_meta_ary_s ary;
|
4309
4411
|
fio_lock_i lock;
|
4310
4412
|
} meta;
|
4311
4413
|
} fio_postoffice = {
|
@@ -4319,23 +4421,169 @@ static struct {
|
|
4319
4421
|
/** used to contain the message before it's passed to the handler */
|
4320
4422
|
typedef struct {
|
4321
4423
|
fio_msg_s msg;
|
4322
|
-
fio_msg_metadata_s *meta;
|
4323
4424
|
size_t marker;
|
4425
|
+
size_t meta_len;
|
4426
|
+
fio_msg_metadata_s *meta;
|
4324
4427
|
} fio_msg_client_s;
|
4325
4428
|
|
4326
4429
|
/** used to contain the message internally while publishing */
|
4327
4430
|
typedef struct {
|
4328
|
-
fio_msg_metadata_s *meta;
|
4329
4431
|
fio_str_info_s channel;
|
4330
4432
|
fio_str_info_s data;
|
4331
4433
|
uintptr_t ref; /* internal reference counter */
|
4332
4434
|
int32_t filter;
|
4333
4435
|
int8_t is_json;
|
4436
|
+
size_t meta_len;
|
4437
|
+
fio_msg_metadata_s meta[];
|
4334
4438
|
} fio_msg_internal_s;
|
4335
4439
|
|
4336
4440
|
/** The default engine (settable). */
|
4337
4441
|
fio_pubsub_engine_s *FIO_PUBSUB_DEFAULT = FIO_PUBSUB_CLUSTER;
|
4338
4442
|
|
4443
|
+
/* *****************************************************************************
|
4444
|
+
Internal message object creation
|
4445
|
+
***************************************************************************** */
|
4446
|
+
|
4447
|
+
#if 1 /* Copy Meta-Data Array vs. lock and unlock per callback */
|
4448
|
+
|
4449
|
+
/** returns a temporary fio_meta_ary_s with a copy of the metadata array */
|
4450
|
+
static fio_meta_ary_s fio_postoffice_meta_copy_new(void) {
|
4451
|
+
fio_meta_ary_s t = FIO_ARY_INIT;
|
4452
|
+
if (!fio_meta_ary_count(&fio_postoffice.meta.ary)) {
|
4453
|
+
return t;
|
4454
|
+
}
|
4455
|
+
fio_lock(&fio_postoffice.meta.lock);
|
4456
|
+
size_t len = fio_meta_ary_count(&fio_postoffice.meta.ary);
|
4457
|
+
if (len) {
|
4458
|
+
t.end = t.capa = len;
|
4459
|
+
t.arry = fio_malloc(sizeof(*t.arry) * len);
|
4460
|
+
FIO_ASSERT_ALLOC(t.arry);
|
4461
|
+
memcpy(t.arry, fio_meta_ary_to_a(&fio_postoffice.meta.ary),
|
4462
|
+
sizeof(*t.arry) * len);
|
4463
|
+
}
|
4464
|
+
fio_unlock(&fio_postoffice.meta.lock);
|
4465
|
+
return t;
|
4466
|
+
}
|
4467
|
+
|
4468
|
+
/** frees a temporary copy created by postoffice_meta_copy_new */
|
4469
|
+
static void fio_postoffice_meta_copy_free(fio_meta_ary_s cpy) {
|
4470
|
+
fio_free(cpy.arry);
|
4471
|
+
}
|
4472
|
+
|
4473
|
+
static fio_msg_internal_s *
|
4474
|
+
fio_pubsub_create_message(int32_t filter, fio_str_info_s ch,
|
4475
|
+
fio_str_info_s data, int8_t is_json, int8_t cpy) {
|
4476
|
+
fio_meta_ary_s t = FIO_ARY_INIT;
|
4477
|
+
if (!filter)
|
4478
|
+
t = fio_postoffice_meta_copy_new();
|
4479
|
+
fio_msg_internal_s *m = fio_malloc(sizeof(*m) + (sizeof(*m->meta) * t.end) +
|
4480
|
+
(ch.len + 1) + (data.len + 1));
|
4481
|
+
*m = (fio_msg_internal_s){
|
4482
|
+
.filter = filter,
|
4483
|
+
.channel =
|
4484
|
+
(fio_str_info_s){.data = (char *)(m->meta + t.end), .len = ch.len},
|
4485
|
+
.data = (fio_str_info_s){.data = ((char *)(m->meta + t.end) + ch.len + 1),
|
4486
|
+
.len = data.len},
|
4487
|
+
.is_json = is_json,
|
4488
|
+
.ref = 1,
|
4489
|
+
.meta_len = t.end,
|
4490
|
+
};
|
4491
|
+
// m->channel.data[ch.len] = 0; /* redundant, fio_malloc is all zero */
|
4492
|
+
// m->data.data[data.len] = 0; /* redundant, fio_malloc is all zero */
|
4493
|
+
if (cpy) {
|
4494
|
+
memcpy(m->channel.data, ch.data, ch.len);
|
4495
|
+
memcpy(m->data.data, data.data, data.len);
|
4496
|
+
while (t.end) {
|
4497
|
+
--t.end;
|
4498
|
+
m->meta[t.end] = t.arry[t.end](m->channel, m->data, is_json);
|
4499
|
+
}
|
4500
|
+
}
|
4501
|
+
fio_postoffice_meta_copy_free(t);
|
4502
|
+
return m;
|
4503
|
+
}
|
4504
|
+
|
4505
|
+
static void fio_pubsub_create_message_update_meta(fio_msg_internal_s *m) {
|
4506
|
+
if (m->filter || !m->meta_len)
|
4507
|
+
return;
|
4508
|
+
fio_meta_ary_s t = fio_postoffice_meta_copy_new();
|
4509
|
+
if (t.end > m->meta_len)
|
4510
|
+
t.end = m->meta_len;
|
4511
|
+
m->meta_len = t.end;
|
4512
|
+
while (t.end) {
|
4513
|
+
--t.end;
|
4514
|
+
m->meta[t.end] = t.arry[t.end](m->channel, m->data, m->is_json);
|
4515
|
+
}
|
4516
|
+
fio_postoffice_meta_copy_free(t);
|
4517
|
+
}
|
4518
|
+
|
4519
|
+
#else
|
4520
|
+
|
4521
|
+
/** returns the pub/sub metadata count safely (locks) */
|
4522
|
+
static size_t fio_postoffice_meta_count(void) {
|
4523
|
+
size_t count;
|
4524
|
+
fio_lock(&fio_postoffice.meta.lock);
|
4525
|
+
count = fio_meta_ary_count(&fio_postoffice.meta.ary);
|
4526
|
+
fio_unlock(&fio_postoffice.meta.lock);
|
4527
|
+
return count;
|
4528
|
+
}
|
4529
|
+
/** collects a callback by index, from within a loop */
|
4530
|
+
static fio_msg_metadata_fn fio_postoffice_meta_index(intptr_t index) {
|
4531
|
+
fio_msg_metadata_fn cb;
|
4532
|
+
fio_lock(&fio_postoffice.meta.lock);
|
4533
|
+
cb = fio_meta_ary_get(&fio_postoffice.meta.ary, index);
|
4534
|
+
fio_unlock(&fio_postoffice.meta.lock);
|
4535
|
+
return cb;
|
4536
|
+
}
|
4537
|
+
|
4538
|
+
static fio_msg_internal_s *
|
4539
|
+
fio_pubsub_create_message(int32_t filter, fio_str_info_s ch,
|
4540
|
+
fio_str_info_s data, int8_t is_json, int8_t cpy) {
|
4541
|
+
size_t meta_len = 0;
|
4542
|
+
if (!filter)
|
4543
|
+
meta_len = fio_postoffice_meta_count();
|
4544
|
+
fio_msg_internal_s *m =
|
4545
|
+
fio_malloc(sizeof(*m) + (sizeof(*m->meta) * meta_len) + (ch.len + 1) +
|
4546
|
+
(data.len + 1));
|
4547
|
+
*m = (fio_msg_internal_s){
|
4548
|
+
.filter = filter,
|
4549
|
+
.channel =
|
4550
|
+
(fio_str_info_s){.data = (char *)(m->meta + meta_len), .len = ch.len},
|
4551
|
+
.data =
|
4552
|
+
(fio_str_info_s){.data = ((char *)(m->meta + meta_len) + ch.len + 1),
|
4553
|
+
.len = data.len},
|
4554
|
+
.is_json = is_json,
|
4555
|
+
.ref = 1,
|
4556
|
+
.meta_len = meta_len,
|
4557
|
+
};
|
4558
|
+
// m->channel.data[ch.len] = 0; /* redundant, fio_malloc is all zero */
|
4559
|
+
// m->data.data[data.len] = 0; /* redundant, fio_malloc is all zero */
|
4560
|
+
if (cpy) {
|
4561
|
+
memcpy(m->channel.data, ch.data, ch.len);
|
4562
|
+
memcpy(m->data.data, data.data, data.len);
|
4563
|
+
while (meta_len) {
|
4564
|
+
--meta_len;
|
4565
|
+
fio_msg_metadata_fn cb = fio_postoffice_meta_index(meta_len);
|
4566
|
+
if (cb)
|
4567
|
+
m->meta[meta_len] = cb(m->channel, m->data, is_json);
|
4568
|
+
}
|
4569
|
+
}
|
4570
|
+
return m;
|
4571
|
+
}
|
4572
|
+
|
4573
|
+
static void fio_pubsub_create_message_update_meta(fio_msg_internal_s *m) {
|
4574
|
+
if (m->filter || !m->meta_len)
|
4575
|
+
return;
|
4576
|
+
size_t meta_len = m->meta_len;
|
4577
|
+
while (meta_len) {
|
4578
|
+
--meta_len;
|
4579
|
+
fio_msg_metadata_fn cb = fio_postoffice_meta_index(meta_len);
|
4580
|
+
if (cb)
|
4581
|
+
m->meta[meta_len] = cb(m->channel, m->data, m->is_json);
|
4582
|
+
}
|
4583
|
+
}
|
4584
|
+
|
4585
|
+
#endif
|
4586
|
+
|
4339
4587
|
/* *****************************************************************************
|
4340
4588
|
Cluster forking handler
|
4341
4589
|
***************************************************************************** */
|
@@ -4376,8 +4624,8 @@ static void fio_pubsub_on_fork(void) {
|
|
4376
4624
|
Channel Subscription Management
|
4377
4625
|
***************************************************************************** */
|
4378
4626
|
|
4379
|
-
static void pubsub_on_channel_create(channel_s *ch
|
4380
|
-
static void pubsub_on_channel_destroy(channel_s *ch
|
4627
|
+
static void pubsub_on_channel_create(channel_s *ch);
|
4628
|
+
static void pubsub_on_channel_destroy(channel_s *ch);
|
4381
4629
|
|
4382
4630
|
/* some comon tasks extracted */
|
4383
4631
|
static inline channel_s *fio_filter_dup_lock_internal(channel_s *ch,
|
@@ -4385,7 +4633,7 @@ static inline channel_s *fio_filter_dup_lock_internal(channel_s *ch,
|
|
4385
4633
|
fio_collection_s *c) {
|
4386
4634
|
fio_lock(&c->lock);
|
4387
4635
|
ch = fio_ch_set_insert(&c->channels, hashed, ch);
|
4388
|
-
|
4636
|
+
fio_channel_dup(ch);
|
4389
4637
|
fio_lock(&ch->lock);
|
4390
4638
|
fio_unlock(&c->lock);
|
4391
4639
|
return ch;
|
@@ -4393,63 +4641,49 @@ static inline channel_s *fio_filter_dup_lock_internal(channel_s *ch,
|
|
4393
4641
|
|
4394
4642
|
/** Creates / finds a filter channel, adds a reference count and locks it. */
|
4395
4643
|
static channel_s *fio_filter_dup_lock(uint32_t filter) {
|
4396
|
-
channel_s
|
4397
|
-
|
4398
|
-
|
4399
|
-
.id = FIO_STR_INIT,
|
4400
|
-
.subscriptions = FIO_LS_INIT(ch->subscriptions),
|
4644
|
+
channel_s ch = (channel_s){
|
4645
|
+
.name = (char *)&filter,
|
4646
|
+
.name_len = (sizeof(filter)),
|
4401
4647
|
.parent = &fio_postoffice.filters,
|
4402
|
-
.
|
4648
|
+
.ref = 8, /* avoid freeing stack memory */
|
4403
4649
|
};
|
4404
|
-
|
4405
|
-
fio_str_freeze(&ch->id);
|
4406
|
-
return fio_filter_dup_lock_internal(ch, filter, &fio_postoffice.filters);
|
4650
|
+
return fio_filter_dup_lock_internal(&ch, filter, &fio_postoffice.filters);
|
4407
4651
|
}
|
4408
4652
|
|
4409
4653
|
/** Creates / finds a pubsub channel, adds a reference count and locks it. */
|
4410
4654
|
static channel_s *fio_channel_dup_lock(fio_str_info_s name) {
|
4411
|
-
channel_s
|
4412
|
-
|
4413
|
-
|
4414
|
-
.id = FIO_STR_INIT,
|
4415
|
-
.subscriptions = FIO_LS_INIT(ch->subscriptions),
|
4655
|
+
channel_s ch = (channel_s){
|
4656
|
+
.name = name.data,
|
4657
|
+
.name_len = name.len,
|
4416
4658
|
.parent = &fio_postoffice.pubsub,
|
4417
|
-
.
|
4659
|
+
.ref = 8, /* avoid freeing stack memory */
|
4418
4660
|
};
|
4419
|
-
|
4420
|
-
|
4421
|
-
|
4422
|
-
|
4423
|
-
|
4424
|
-
pubsub_on_channel_create(ch, NULL);
|
4661
|
+
uint64_t hashed_name = fio_siphash(name.data, name.len);
|
4662
|
+
channel_s *ch_p =
|
4663
|
+
fio_filter_dup_lock_internal(&ch, hashed_name, &fio_postoffice.pubsub);
|
4664
|
+
if (fio_ls_embd_is_empty(&ch_p->subscriptions)) {
|
4665
|
+
pubsub_on_channel_create(ch_p);
|
4425
4666
|
}
|
4426
|
-
return
|
4667
|
+
return ch_p;
|
4427
4668
|
}
|
4428
4669
|
|
4429
4670
|
/** Creates / finds a pattern channel, adds a reference count and locks it. */
|
4430
4671
|
static channel_s *fio_channel_match_dup_lock(fio_str_info_s name,
|
4431
4672
|
fio_match_fn match) {
|
4432
|
-
|
4433
|
-
|
4434
|
-
|
4435
|
-
.
|
4436
|
-
{
|
4437
|
-
.id = FIO_STR_INIT,
|
4438
|
-
.subscriptions = FIO_LS_INIT(ch->ch.subscriptions),
|
4439
|
-
.parent = &fio_postoffice.patterns,
|
4440
|
-
.lock = FIO_LOCK_INIT,
|
4441
|
-
},
|
4673
|
+
channel_s ch = (channel_s){
|
4674
|
+
.name = name.data,
|
4675
|
+
.name_len = name.len,
|
4676
|
+
.parent = &fio_postoffice.patterns,
|
4442
4677
|
.match = match,
|
4678
|
+
.ref = 8, /* avoid freeing stack memory */
|
4443
4679
|
};
|
4444
|
-
|
4445
|
-
|
4446
|
-
|
4447
|
-
|
4448
|
-
|
4449
|
-
if (fio_ls_embd_is_empty(&ch->ch.subscriptions)) {
|
4450
|
-
pubsub_on_channel_create(&ch->ch, match);
|
4680
|
+
uint64_t hashed_name = fio_siphash(name.data, name.len);
|
4681
|
+
channel_s *ch_p =
|
4682
|
+
fio_filter_dup_lock_internal(&ch, hashed_name, &fio_postoffice.patterns);
|
4683
|
+
if (fio_ls_embd_is_empty(&ch_p->subscriptions)) {
|
4684
|
+
pubsub_on_channel_create(ch_p);
|
4451
4685
|
}
|
4452
|
-
return
|
4686
|
+
return ch_p;
|
4453
4687
|
}
|
4454
4688
|
|
4455
4689
|
/* to be used for reference counting (subtructing) */
|
@@ -4460,7 +4694,7 @@ static inline void fio_subscription_free(subscription_s *s) {
|
|
4460
4694
|
if (s->on_unsubscribe) {
|
4461
4695
|
s->on_unsubscribe(s->udata1, s->udata2);
|
4462
4696
|
}
|
4463
|
-
|
4697
|
+
fio_channel_free(s->parent);
|
4464
4698
|
fio_free(s);
|
4465
4699
|
}
|
4466
4700
|
|
@@ -4504,19 +4738,13 @@ void fio_unsubscribe(subscription_s *s) {
|
|
4504
4738
|
goto finish;
|
4505
4739
|
fio_lock(&s->lock);
|
4506
4740
|
channel_s *ch = s->parent;
|
4507
|
-
fio_match_fn match = NULL;
|
4508
4741
|
uint8_t removed = 0;
|
4509
4742
|
fio_lock(&ch->lock);
|
4510
4743
|
fio_ls_embd_remove(&s->node);
|
4511
4744
|
/* check if channel is done for */
|
4512
4745
|
if (fio_ls_embd_is_empty(&ch->subscriptions)) {
|
4513
4746
|
fio_collection_s *c = ch->parent;
|
4514
|
-
uint64_t hashed =
|
4515
|
-
if (c == &fio_postoffice.patterns) {
|
4516
|
-
pattern_s *pat = (pattern_s *)ch;
|
4517
|
-
hashed ^= ((uintptr_t)pat->match);
|
4518
|
-
match = ((pattern_s *)(ch))->match;
|
4519
|
-
}
|
4747
|
+
uint64_t hashed = fio_siphash(ch->name, ch->name_len);
|
4520
4748
|
/* lock collection */
|
4521
4749
|
fio_lock(&c->lock);
|
4522
4750
|
/* test again within lock */
|
@@ -4528,7 +4756,7 @@ void fio_unsubscribe(subscription_s *s) {
|
|
4528
4756
|
}
|
4529
4757
|
fio_unlock(&ch->lock);
|
4530
4758
|
if (removed) {
|
4531
|
-
pubsub_on_channel_destroy(ch
|
4759
|
+
pubsub_on_channel_destroy(ch);
|
4532
4760
|
}
|
4533
4761
|
|
4534
4762
|
/* promise the subscription will be inactive */
|
@@ -4545,7 +4773,8 @@ finish:
|
|
4545
4773
|
* To keep the string beyond the lifetime of the subscription, copy the string.
|
4546
4774
|
*/
|
4547
4775
|
fio_str_info_s fio_subscription_channel(subscription_s *subscription) {
|
4548
|
-
return
|
4776
|
+
return (fio_str_info_s){.data = subscription->parent->name,
|
4777
|
+
.len = subscription->parent->name_len};
|
4549
4778
|
}
|
4550
4779
|
|
4551
4780
|
/* *****************************************************************************
|
@@ -4554,31 +4783,34 @@ Engine handling and Management
|
|
4554
4783
|
|
4555
4784
|
/* implemented later, informs root process about pub/sub subscriptions */
|
4556
4785
|
static inline void fio_cluster_inform_root_about_channel(channel_s *ch,
|
4557
|
-
fio_match_fn match,
|
4558
4786
|
int add);
|
4559
4787
|
|
4560
4788
|
/* runs in lock(!) let'm all know */
|
4561
|
-
static void pubsub_on_channel_create(channel_s *ch
|
4789
|
+
static void pubsub_on_channel_create(channel_s *ch) {
|
4562
4790
|
fio_lock(&fio_postoffice.engines.lock);
|
4563
4791
|
FIO_SET_FOR_LOOP(&fio_postoffice.engines.set, pos) {
|
4564
4792
|
if (!pos->hash)
|
4565
4793
|
continue;
|
4566
|
-
pos->obj->subscribe(pos->obj,
|
4794
|
+
pos->obj->subscribe(pos->obj,
|
4795
|
+
(fio_str_info_s){.data = ch->name, .len = ch->name_len},
|
4796
|
+
ch->match);
|
4567
4797
|
}
|
4568
4798
|
fio_unlock(&fio_postoffice.engines.lock);
|
4569
|
-
fio_cluster_inform_root_about_channel(ch,
|
4799
|
+
fio_cluster_inform_root_about_channel(ch, 1);
|
4570
4800
|
}
|
4571
4801
|
|
4572
4802
|
/* runs in lock(!) let'm all know */
|
4573
|
-
static void pubsub_on_channel_destroy(channel_s *ch
|
4803
|
+
static void pubsub_on_channel_destroy(channel_s *ch) {
|
4574
4804
|
fio_lock(&fio_postoffice.engines.lock);
|
4575
4805
|
FIO_SET_FOR_LOOP(&fio_postoffice.engines.set, pos) {
|
4576
4806
|
if (!pos->hash)
|
4577
4807
|
continue;
|
4578
|
-
pos->obj->unsubscribe(
|
4808
|
+
pos->obj->unsubscribe(
|
4809
|
+
pos->obj, (fio_str_info_s){.data = ch->name, .len = ch->name_len},
|
4810
|
+
ch->match);
|
4579
4811
|
}
|
4580
4812
|
fio_unlock(&fio_postoffice.engines.lock);
|
4581
|
-
fio_cluster_inform_root_about_channel(ch,
|
4813
|
+
fio_cluster_inform_root_about_channel(ch, 0);
|
4582
4814
|
}
|
4583
4815
|
|
4584
4816
|
/**
|
@@ -4636,15 +4868,20 @@ void fio_pubsub_reattach(fio_pubsub_engine_s *eng) {
|
|
4636
4868
|
FIO_SET_FOR_LOOP(&fio_postoffice.pubsub.channels, pos) {
|
4637
4869
|
if (!pos->hash)
|
4638
4870
|
continue;
|
4639
|
-
eng->subscribe(
|
4871
|
+
eng->subscribe(
|
4872
|
+
eng,
|
4873
|
+
(fio_str_info_s){.data = pos->obj->name, .len = pos->obj->name_len},
|
4874
|
+
NULL);
|
4640
4875
|
}
|
4641
4876
|
fio_unlock(&fio_postoffice.pubsub.lock);
|
4642
4877
|
fio_lock(&fio_postoffice.patterns.lock);
|
4643
4878
|
FIO_SET_FOR_LOOP(&fio_postoffice.patterns.channels, pos) {
|
4644
4879
|
if (!pos->hash)
|
4645
4880
|
continue;
|
4646
|
-
eng->subscribe(
|
4647
|
-
|
4881
|
+
eng->subscribe(
|
4882
|
+
eng,
|
4883
|
+
(fio_str_info_s){.data = pos->obj->name, .len = pos->obj->name_len},
|
4884
|
+
pos->obj->match);
|
4648
4885
|
}
|
4649
4886
|
fio_unlock(&fio_postoffice.patterns.lock);
|
4650
4887
|
}
|
@@ -4653,60 +4890,25 @@ void fio_pubsub_reattach(fio_pubsub_engine_s *eng) {
|
|
4653
4890
|
* Message Metadata handling
|
4654
4891
|
**************************************************************************** */
|
4655
4892
|
|
4656
|
-
static inline void fio_call_meta_callbacks(fio_msg_internal_s *m,
|
4657
|
-
fio_str_info_s ch,
|
4658
|
-
fio_str_info_s msg,
|
4659
|
-
uint8_t is_json) {
|
4660
|
-
uintptr_t len;
|
4661
|
-
fio_meta_set__ordered_s_ *cpy = NULL;
|
4662
|
-
fio_lock(&fio_postoffice.meta.lock);
|
4663
|
-
/* don't call user code within a lock - copy the array :-( */
|
4664
|
-
len = fio_postoffice.meta.set.pos;
|
4665
|
-
if (len && fio_meta_set_count(&fio_postoffice.meta.set)) {
|
4666
|
-
cpy = fio_malloc(sizeof(*cpy) * len);
|
4667
|
-
FIO_ASSERT_ALLOC(cpy);
|
4668
|
-
memcpy(cpy, fio_postoffice.meta.set.ordered, sizeof(*cpy) * len);
|
4669
|
-
}
|
4670
|
-
fio_unlock(&fio_postoffice.meta.lock);
|
4671
|
-
if (!cpy) {
|
4672
|
-
return;
|
4673
|
-
}
|
4674
|
-
for (uintptr_t i = 0; i < len; ++i) {
|
4675
|
-
if (!cpy[i].hash)
|
4676
|
-
continue;
|
4677
|
-
fio_msg_metadata_s *meta = fio_malloc(sizeof(*meta));
|
4678
|
-
FIO_ASSERT_ALLOC(meta);
|
4679
|
-
*meta = cpy[i].obj(ch, msg, is_json);
|
4680
|
-
if (meta->metadata) {
|
4681
|
-
meta->next = m->meta;
|
4682
|
-
m->meta = meta;
|
4683
|
-
} else {
|
4684
|
-
fio_free(meta);
|
4685
|
-
}
|
4686
|
-
}
|
4687
|
-
fio_free(cpy);
|
4688
|
-
}
|
4689
|
-
|
4690
4893
|
void fio_message_metadata_callback_set(fio_msg_metadata_fn callback,
|
4691
4894
|
int enable) {
|
4895
|
+
if (!callback)
|
4896
|
+
return;
|
4692
4897
|
fio_lock(&fio_postoffice.meta.lock);
|
4898
|
+
fio_meta_ary_remove2(&fio_postoffice.meta.ary, callback, NULL);
|
4693
4899
|
if (enable)
|
4694
|
-
|
4695
|
-
callback);
|
4696
|
-
else
|
4697
|
-
fio_meta_set_remove(&fio_postoffice.meta.set, (uintptr_t)callback, callback,
|
4698
|
-
NULL);
|
4900
|
+
fio_meta_ary_push(&fio_postoffice.meta.ary, callback);
|
4699
4901
|
fio_unlock(&fio_postoffice.meta.lock);
|
4700
4902
|
}
|
4701
4903
|
|
4702
4904
|
/** Finds the message's metadata by it's type ID. */
|
4703
4905
|
void *fio_message_metadata(fio_msg_s *msg, intptr_t type_id) {
|
4704
|
-
fio_msg_metadata_s *
|
4705
|
-
|
4706
|
-
|
4707
|
-
|
4708
|
-
|
4709
|
-
|
4906
|
+
fio_msg_metadata_s *meta = ((fio_msg_client_s *)msg)->meta;
|
4907
|
+
size_t len = ((fio_msg_client_s *)msg)->meta_len;
|
4908
|
+
while (len) {
|
4909
|
+
--len;
|
4910
|
+
if (meta[len].type_id == type_id)
|
4911
|
+
return meta[len].metadata;
|
4710
4912
|
}
|
4711
4913
|
return NULL;
|
4712
4914
|
}
|
@@ -4716,23 +4918,23 @@ void *fio_message_metadata(fio_msg_s *msg, intptr_t type_id) {
|
|
4716
4918
|
**************************************************************************** */
|
4717
4919
|
|
4718
4920
|
/* common internal tasks */
|
4719
|
-
static channel_s *fio_channel_find_dup_internal(
|
4921
|
+
static channel_s *fio_channel_find_dup_internal(channel_s *ch_tmp,
|
4922
|
+
uint64_t hashed,
|
4720
4923
|
fio_collection_s *c) {
|
4721
4924
|
fio_lock(&c->lock);
|
4722
|
-
channel_s *ch = fio_ch_set_find(&c->channels, hashed,
|
4925
|
+
channel_s *ch = fio_ch_set_find(&c->channels, hashed, ch_tmp);
|
4723
4926
|
if (!ch) {
|
4724
4927
|
fio_unlock(&c->lock);
|
4725
4928
|
return NULL;
|
4726
4929
|
}
|
4727
|
-
|
4930
|
+
fio_channel_dup(ch);
|
4728
4931
|
fio_unlock(&c->lock);
|
4729
4932
|
return ch;
|
4730
4933
|
}
|
4731
4934
|
|
4732
4935
|
/** Finds a filter channel, increasing it's reference count if it exists. */
|
4733
4936
|
static channel_s *fio_filter_find_dup(uint32_t filter) {
|
4734
|
-
|
4735
|
-
0); /* don't free */
|
4937
|
+
channel_s tmp = {.name = (char *)(&filter), .name_len = sizeof(filter)};
|
4736
4938
|
channel_s *ch =
|
4737
4939
|
fio_channel_find_dup_internal(&tmp, filter, &fio_postoffice.filters);
|
4738
4940
|
return ch;
|
@@ -4740,9 +4942,8 @@ static channel_s *fio_filter_find_dup(uint32_t filter) {
|
|
4740
4942
|
|
4741
4943
|
/** Finds a pubsub channel, increasing it's reference count if it exists. */
|
4742
4944
|
static channel_s *fio_channel_find_dup(fio_str_info_s name) {
|
4743
|
-
|
4744
|
-
|
4745
|
-
uint64_t hashed_name = fio_str_hash(&tmp);
|
4945
|
+
channel_s tmp = {.name = name.data, .name_len = name.len};
|
4946
|
+
uint64_t hashed_name = fio_siphash(name.data, name.len);
|
4746
4947
|
channel_s *ch =
|
4747
4948
|
fio_channel_find_dup_internal(&tmp, hashed_name, &fio_postoffice.pubsub);
|
4748
4949
|
return ch;
|
@@ -4752,20 +4953,16 @@ static channel_s *fio_channel_find_dup(fio_str_info_s name) {
|
|
4752
4953
|
static inline void fio_msg_internal_free(fio_msg_internal_s *msg) {
|
4753
4954
|
if (fio_atomic_sub(&msg->ref, 1))
|
4754
4955
|
return;
|
4755
|
-
|
4756
|
-
|
4757
|
-
|
4758
|
-
|
4759
|
-
|
4760
|
-
|
4761
|
-
|
4762
|
-
|
4763
|
-
|
4764
|
-
|
4765
|
-
tmp->on_finish(&tmp_msg, tmp->metadata);
|
4766
|
-
}
|
4767
|
-
fio_free(tmp);
|
4768
|
-
} while (meta);
|
4956
|
+
while (msg->meta_len) {
|
4957
|
+
--msg->meta_len;
|
4958
|
+
if (msg->meta[msg->meta_len].on_finish) {
|
4959
|
+
fio_msg_s tmp_msg = {
|
4960
|
+
.channel = msg->channel,
|
4961
|
+
.msg = msg->data,
|
4962
|
+
};
|
4963
|
+
msg->meta[msg->meta_len].on_finish(&tmp_msg,
|
4964
|
+
msg->meta[msg->meta_len].metadata);
|
4965
|
+
}
|
4769
4966
|
}
|
4770
4967
|
fio_free(msg);
|
4771
4968
|
}
|
@@ -4785,7 +4982,7 @@ void fio_message_defer(fio_msg_s *msg_) {
|
|
4785
4982
|
static void fio_perform_subscription_callback(void *s_, void *msg_) {
|
4786
4983
|
subscription_s *s = s_;
|
4787
4984
|
if (fio_trylock(&s->lock)) {
|
4788
|
-
|
4985
|
+
fio_defer_push_task(fio_perform_subscription_callback, s_, msg_);
|
4789
4986
|
return;
|
4790
4987
|
}
|
4791
4988
|
fio_msg_internal_s *msg = (fio_msg_internal_s *)msg_;
|
@@ -4798,6 +4995,7 @@ static void fio_perform_subscription_callback(void *s_, void *msg_) {
|
|
4798
4995
|
.udata1 = s->udata1,
|
4799
4996
|
.udata2 = s->udata2,
|
4800
4997
|
},
|
4998
|
+
.meta_len = msg->meta_len,
|
4801
4999
|
.meta = msg->meta,
|
4802
5000
|
.marker = 0,
|
4803
5001
|
};
|
@@ -4807,7 +5005,7 @@ static void fio_perform_subscription_callback(void *s_, void *msg_) {
|
|
4807
5005
|
}
|
4808
5006
|
fio_unlock(&s->lock);
|
4809
5007
|
if (m.marker) {
|
4810
|
-
|
5008
|
+
fio_defer_push_task(fio_perform_subscription_callback, s_, msg_);
|
4811
5009
|
return;
|
4812
5010
|
}
|
4813
5011
|
fio_msg_internal_free(msg);
|
@@ -4823,7 +5021,7 @@ static void fio_publish2channel(channel_s *ch, fio_msg_internal_s *msg) {
|
|
4823
5021
|
}
|
4824
5022
|
fio_atomic_add(&s->ref, 1);
|
4825
5023
|
fio_atomic_add(&msg->ref, 1);
|
4826
|
-
|
5024
|
+
fio_defer_push_task(fio_perform_subscription_callback, s, msg);
|
4827
5025
|
}
|
4828
5026
|
fio_msg_internal_free(msg);
|
4829
5027
|
}
|
@@ -4834,13 +5032,13 @@ static void fio_publish2channel_task(void *ch_, void *msg) {
|
|
4834
5032
|
if (!msg)
|
4835
5033
|
goto finish;
|
4836
5034
|
if (fio_trylock(&ch->lock)) {
|
4837
|
-
|
5035
|
+
fio_defer_push_urgent(fio_publish2channel_task, ch, msg);
|
4838
5036
|
return;
|
4839
5037
|
}
|
4840
5038
|
fio_publish2channel(ch, msg);
|
4841
5039
|
fio_unlock(&ch->lock);
|
4842
5040
|
finish:
|
4843
|
-
|
5041
|
+
fio_channel_free(ch);
|
4844
5042
|
}
|
4845
5043
|
|
4846
5044
|
/** Publishes the message to the current process and frees the strings. */
|
@@ -4854,12 +5052,10 @@ static void fio_publish2process(fio_msg_internal_s *m) {
|
|
4854
5052
|
} else {
|
4855
5053
|
ch = fio_channel_find_dup(m->channel);
|
4856
5054
|
}
|
4857
|
-
if (m->filter == 0) {
|
4858
|
-
fio_call_meta_callbacks(m, m->channel, m->data, m->is_json);
|
4859
|
-
}
|
4860
5055
|
/* exact match */
|
4861
5056
|
if (ch) {
|
4862
|
-
|
5057
|
+
fio_defer_push_urgent(fio_publish2channel_task, ch,
|
5058
|
+
fio_msg_internal_dup(m));
|
4863
5059
|
}
|
4864
5060
|
if (m->filter == 0) {
|
4865
5061
|
/* pattern matching match */
|
@@ -4868,10 +5064,13 @@ static void fio_publish2process(fio_msg_internal_s *m) {
|
|
4868
5064
|
if (!p->hash) {
|
4869
5065
|
continue;
|
4870
5066
|
}
|
4871
|
-
|
4872
|
-
if (
|
4873
|
-
|
4874
|
-
|
5067
|
+
|
5068
|
+
if (p->obj->match(
|
5069
|
+
(fio_str_info_s){.data = p->obj->name, .len = p->obj->name_len},
|
5070
|
+
m->channel)) {
|
5071
|
+
fio_channel_dup(p->obj);
|
5072
|
+
fio_defer_push_urgent(fio_publish2channel_task, p->obj,
|
5073
|
+
fio_msg_internal_dup(m));
|
4875
5074
|
}
|
4876
5075
|
}
|
4877
5076
|
fio_unlock(&fio_postoffice.patterns.lock);
|
@@ -4953,7 +5152,7 @@ static void fio_cluster_init(void) {
|
|
4953
5152
|
uint32_t tmp_folder_len = 0;
|
4954
5153
|
if (!tmp_folder || ((tmp_folder_len = (uint32_t)strlen(tmp_folder)) > 100)) {
|
4955
5154
|
#ifdef P_tmpdir
|
4956
|
-
tmp_folder = P_tmpdir;
|
5155
|
+
tmp_folder = (char *)P_tmpdir;
|
4957
5156
|
if (tmp_folder)
|
4958
5157
|
tmp_folder_len = (uint32_t)strlen(tmp_folder);
|
4959
5158
|
#else
|
@@ -5052,18 +5251,14 @@ static void fio_cluster_on_data(intptr_t uuid, fio_protocol_s *pr_) {
|
|
5052
5251
|
return;
|
5053
5252
|
}
|
5054
5253
|
}
|
5055
|
-
c->msg =
|
5056
|
-
|
5057
|
-
|
5058
|
-
|
5059
|
-
|
5060
|
-
|
5061
|
-
|
5062
|
-
|
5063
|
-
.is_json = c->type == FIO_CLUSTER_MSG_JSON ||
|
5064
|
-
c->type == FIO_CLUSTER_MSG_ROOT_JSON,
|
5065
|
-
.ref = 1,
|
5066
|
-
};
|
5254
|
+
c->msg = fio_pubsub_create_message(
|
5255
|
+
c->filter,
|
5256
|
+
(fio_str_info_s){.data = (char *)(c->msg + 1), .len = c->exp_channel},
|
5257
|
+
(fio_str_info_s){.data = ((char *)(c->msg + 1) + c->exp_channel + 1),
|
5258
|
+
.len = c->exp_msg},
|
5259
|
+
(int8_t)(c->type == FIO_CLUSTER_MSG_JSON ||
|
5260
|
+
c->type == FIO_CLUSTER_MSG_ROOT_JSON),
|
5261
|
+
0);
|
5067
5262
|
i += 16;
|
5068
5263
|
}
|
5069
5264
|
if (c->exp_channel) {
|
@@ -5094,6 +5289,7 @@ static void fio_cluster_on_data(intptr_t uuid, fio_protocol_s *pr_) {
|
|
5094
5289
|
c->exp_msg = 0;
|
5095
5290
|
}
|
5096
5291
|
}
|
5292
|
+
fio_pubsub_create_message_update_meta(c->msg);
|
5097
5293
|
c->handler(c);
|
5098
5294
|
fio_msg_internal_free(c->msg);
|
5099
5295
|
c->msg = NULL;
|
@@ -5343,8 +5539,8 @@ static void fio_cluster_client_handler(struct cluster_pr_s *pr) {
|
|
5343
5539
|
static void fio_cluster_client_sender(fio_str_s *data, intptr_t ignr_) {
|
5344
5540
|
if (!uuid_is_valid(cluster_data.uuid) && fio_data->active) {
|
5345
5541
|
/* delay message delivery until we have a vaild uuid */
|
5346
|
-
|
5347
|
-
|
5542
|
+
fio_defer_push_task((void (*)(void *, void *))fio_cluster_client_sender,
|
5543
|
+
data, (void *)ignr_);
|
5348
5544
|
return;
|
5349
5545
|
}
|
5350
5546
|
fio_str_send_free2(cluster_data.uuid, data);
|
@@ -5370,7 +5566,7 @@ static void fio_cluster_on_connect(intptr_t uuid, void *udata) {
|
|
5370
5566
|
if (!pos->hash) {
|
5371
5567
|
continue;
|
5372
5568
|
}
|
5373
|
-
fio_cluster_inform_root_about_channel(pos->obj,
|
5569
|
+
fio_cluster_inform_root_about_channel(pos->obj, 1);
|
5374
5570
|
}
|
5375
5571
|
fio_unlock(&fio_postoffice.pubsub.lock);
|
5376
5572
|
fio_lock(&fio_postoffice.patterns.lock);
|
@@ -5378,8 +5574,7 @@ static void fio_cluster_on_connect(intptr_t uuid, void *udata) {
|
|
5378
5574
|
if (!pos->hash) {
|
5379
5575
|
continue;
|
5380
5576
|
}
|
5381
|
-
fio_cluster_inform_root_about_channel(pos->obj,
|
5382
|
-
((pattern_s *)pos->obj)->match, 1);
|
5577
|
+
fio_cluster_inform_root_about_channel(pos->obj, 1);
|
5383
5578
|
}
|
5384
5579
|
fio_unlock(&fio_postoffice.patterns.lock);
|
5385
5580
|
|
@@ -5441,34 +5636,34 @@ static void fio_send2cluster(int32_t filter, fio_str_info_s ch,
|
|
5441
5636
|
**************************************************************************** */
|
5442
5637
|
|
5443
5638
|
static inline void fio_cluster_inform_root_about_channel(channel_s *ch,
|
5444
|
-
fio_match_fn match,
|
5445
5639
|
int add) {
|
5446
5640
|
if (!fio_data->is_worker || fio_data->workers == 1 || !cluster_data.uuid ||
|
5447
5641
|
!ch)
|
5448
5642
|
return;
|
5449
|
-
fio_str_info_s ch_name =
|
5643
|
+
fio_str_info_s ch_name = {.data = ch->name, .len = ch->name_len};
|
5450
5644
|
fio_str_info_s msg = {.data = NULL, .len = 0};
|
5451
5645
|
#if DEBUG
|
5452
5646
|
FIO_LOG_DEBUG("(%d) informing root about: %s (%zu) msg type %d", getpid(),
|
5453
5647
|
ch_name.data, ch_name.len,
|
5454
|
-
(match ? (add ? FIO_CLUSTER_MSG_PATTERN_SUB
|
5455
|
-
|
5456
|
-
|
5457
|
-
|
5648
|
+
(ch->match ? (add ? FIO_CLUSTER_MSG_PATTERN_SUB
|
5649
|
+
: FIO_CLUSTER_MSG_PATTERN_UNSUB)
|
5650
|
+
: (add ? FIO_CLUSTER_MSG_PUBSUB_SUB
|
5651
|
+
: FIO_CLUSTER_MSG_PUBSUB_UNSUB)));
|
5458
5652
|
#endif
|
5459
5653
|
char buf[8] = {0};
|
5460
|
-
if (match) {
|
5461
|
-
fio_u2str64(buf, (uint64_t)match);
|
5654
|
+
if (ch->match) {
|
5655
|
+
fio_u2str64(buf, (uint64_t)ch->match);
|
5462
5656
|
msg.data = buf;
|
5463
|
-
msg.len = sizeof(match);
|
5657
|
+
msg.len = sizeof(ch->match);
|
5464
5658
|
}
|
5465
5659
|
|
5466
5660
|
fio_cluster_client_sender(
|
5467
5661
|
fio_cluster_wrap_message(ch_name.len, msg.len,
|
5468
|
-
(match
|
5469
|
-
|
5470
|
-
|
5471
|
-
|
5662
|
+
(ch->match
|
5663
|
+
? (add ? FIO_CLUSTER_MSG_PATTERN_SUB
|
5664
|
+
: FIO_CLUSTER_MSG_PATTERN_UNSUB)
|
5665
|
+
: (add ? FIO_CLUSTER_MSG_PUBSUB_SUB
|
5666
|
+
: FIO_CLUSTER_MSG_PUBSUB_UNSUB)),
|
5472
5667
|
0, ch_name.data, msg.data),
|
5473
5668
|
-1);
|
5474
5669
|
}
|
@@ -5533,7 +5728,7 @@ static void fio_cluster_at_exit(void *ignore) {
|
|
5533
5728
|
fio_engine_set_free(&fio_postoffice.engines.set);
|
5534
5729
|
|
5535
5730
|
/* clear meta hooks */
|
5536
|
-
|
5731
|
+
fio_meta_ary_free(&fio_postoffice.meta.ary);
|
5537
5732
|
/* perform newly created tasks */
|
5538
5733
|
fio_defer_perform();
|
5539
5734
|
(void)ignore;
|
@@ -5563,21 +5758,10 @@ static void fio_cluster_signal_children(void) {
|
|
5563
5758
|
-1);
|
5564
5759
|
}
|
5565
5760
|
|
5566
|
-
static void fio_publish2process2(int32_t filter, fio_str_info_s ch_name,
|
5567
|
-
|
5568
|
-
|
5569
|
-
|
5570
|
-
FIO_ASSERT_ALLOC(m);
|
5571
|
-
*m = (fio_msg_internal_s){
|
5572
|
-
.filter = filter,
|
5573
|
-
.channel = {.data = (char *)(m + 1), .len = ch_name.len},
|
5574
|
-
.data = {.data = ((char *)(m + 1) + ch_name.len + 1), .len = msg.len},
|
5575
|
-
.is_json = is_json,
|
5576
|
-
.ref = 1,
|
5577
|
-
};
|
5578
|
-
memcpy(m->channel.data, ch_name.data, ch_name.len);
|
5579
|
-
memcpy(m->data.data, msg.data, msg.len);
|
5580
|
-
fio_publish2process(m);
|
5761
|
+
static inline void fio_publish2process2(int32_t filter, fio_str_info_s ch_name,
|
5762
|
+
fio_str_info_s msg, uint8_t is_json) {
|
5763
|
+
fio_publish2process(
|
5764
|
+
fio_pubsub_create_message(filter, ch_name, msg, is_json, 1));
|
5581
5765
|
}
|
5582
5766
|
|
5583
5767
|
/**
|
@@ -6217,14 +6401,16 @@ static void fio_mem_init(void) {
|
|
6217
6401
|
if (arenas)
|
6218
6402
|
return;
|
6219
6403
|
|
6404
|
+
ssize_t cpu_count = 0;
|
6220
6405
|
#ifdef _SC_NPROCESSORS_ONLN
|
6221
|
-
|
6406
|
+
cpu_count = sysconf(_SC_NPROCESSORS_ONLN);
|
6222
6407
|
#else
|
6223
6408
|
#warning Dynamic CPU core count is unavailable - assuming 8 cores for memory allocation pools.
|
6224
|
-
ssize_t cpu_count = 8; /* fallback */
|
6225
6409
|
#endif
|
6410
|
+
if (cpu_count <= 0)
|
6411
|
+
cpu_count = 8;
|
6226
6412
|
memory.cores = cpu_count;
|
6227
|
-
memory.count = 0 -
|
6413
|
+
memory.count = 0 - cpu_count;
|
6228
6414
|
arenas = big_alloc(sizeof(*arenas) * cpu_count);
|
6229
6415
|
FIO_ASSERT_ALLOC(arenas);
|
6230
6416
|
size_t pre_pool = cpu_count > 32 ? 32 : cpu_count;
|
@@ -6271,6 +6457,7 @@ void *fio_malloc(size_t size) {
|
|
6271
6457
|
}
|
6272
6458
|
if (size >= FIO_MEMORY_BLOCK_ALLOC_LIMIT) {
|
6273
6459
|
/* system allocation - must be block aligned */
|
6460
|
+
// FIO_LOG_WARNING("fio_malloc re-routed to mmap - big allocation");
|
6274
6461
|
return big_alloc(size);
|
6275
6462
|
}
|
6276
6463
|
/* ceiling for 16 byte alignement, translated to 16 byte units */
|
@@ -8443,7 +8630,9 @@ Testing fio_defer task system
|
|
8443
8630
|
|
8444
8631
|
#define FIO_DEFER_TOTAL_COUNT (512 * 1024)
|
8445
8632
|
|
8633
|
+
#ifndef FIO_DEFER_TEST_PRINT
|
8446
8634
|
#define FIO_DEFER_TEST_PRINT 0
|
8635
|
+
#endif
|
8447
8636
|
|
8448
8637
|
static void sample_task(void *i_count, void *unused2) {
|
8449
8638
|
(void)(unused2);
|
@@ -8511,7 +8700,7 @@ static void fio_defer_test(void) {
|
|
8511
8700
|
"defer deallocation vs. allocation error, %zu != %zu",
|
8512
8701
|
fio_defer_count_dealloc, fio_defer_count_alloc);
|
8513
8702
|
}
|
8514
|
-
FIO_ASSERT(
|
8703
|
+
FIO_ASSERT(task_queue_normal.writer == &task_queue_normal.static_queue,
|
8515
8704
|
"defer library didn't release dynamic queue (should be static)");
|
8516
8705
|
fprintf(stderr, "\n* passed.\n");
|
8517
8706
|
}
|