puma 5.6.5 → 6.4.2
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of puma might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/History.md +338 -14
- data/LICENSE +0 -0
- data/README.md +79 -29
- data/bin/puma-wild +1 -1
- data/docs/architecture.md +0 -0
- data/docs/compile_options.md +34 -0
- data/docs/deployment.md +0 -0
- data/docs/fork_worker.md +1 -3
- data/docs/images/puma-connection-flow-no-reactor.png +0 -0
- data/docs/images/puma-connection-flow.png +0 -0
- data/docs/images/puma-general-arch.png +0 -0
- data/docs/jungle/README.md +0 -0
- data/docs/jungle/rc.d/README.md +0 -0
- data/docs/jungle/rc.d/puma.conf +0 -0
- data/docs/kubernetes.md +12 -0
- data/docs/nginx.md +1 -1
- data/docs/plugins.md +0 -0
- data/docs/rails_dev_mode.md +0 -0
- data/docs/restart.md +1 -0
- data/docs/signals.md +0 -0
- data/docs/stats.md +0 -0
- data/docs/systemd.md +3 -6
- data/docs/testing_benchmarks_local_files.md +150 -0
- data/docs/testing_test_rackup_ci_files.md +36 -0
- data/ext/puma_http11/PumaHttp11Service.java +0 -0
- data/ext/puma_http11/ext_help.h +0 -0
- data/ext/puma_http11/extconf.rb +16 -9
- data/ext/puma_http11/http11_parser.c +1 -1
- data/ext/puma_http11/http11_parser.h +1 -1
- data/ext/puma_http11/http11_parser.java.rl +2 -2
- data/ext/puma_http11/http11_parser.rl +2 -2
- data/ext/puma_http11/http11_parser_common.rl +2 -2
- data/ext/puma_http11/mini_ssl.c +127 -19
- data/ext/puma_http11/no_ssl/PumaHttp11Service.java +0 -0
- data/ext/puma_http11/org/jruby/puma/Http11.java +3 -3
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +1 -1
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +157 -53
- data/ext/puma_http11/puma_http11.c +17 -9
- data/lib/puma/app/status.rb +4 -4
- data/lib/puma/binder.rb +50 -53
- data/lib/puma/cli.rb +16 -18
- data/lib/puma/client.rb +100 -26
- data/lib/puma/cluster/worker.rb +18 -11
- data/lib/puma/cluster/worker_handle.rb +4 -1
- data/lib/puma/cluster.rb +102 -40
- data/lib/puma/commonlogger.rb +21 -14
- data/lib/puma/configuration.rb +77 -59
- data/lib/puma/const.rb +129 -92
- data/lib/puma/control_cli.rb +15 -11
- data/lib/puma/detect.rb +7 -4
- data/lib/puma/dsl.rb +250 -56
- data/lib/puma/error_logger.rb +18 -9
- data/lib/puma/events.rb +6 -126
- data/lib/puma/io_buffer.rb +39 -4
- data/lib/puma/jruby_restart.rb +2 -1
- data/lib/puma/json_serialization.rb +0 -0
- data/lib/puma/launcher/bundle_pruner.rb +104 -0
- data/lib/puma/launcher.rb +102 -175
- data/lib/puma/log_writer.rb +147 -0
- data/lib/puma/minissl/context_builder.rb +26 -12
- data/lib/puma/minissl.rb +104 -11
- data/lib/puma/null_io.rb +16 -2
- data/lib/puma/plugin/systemd.rb +90 -0
- data/lib/puma/plugin/tmp_restart.rb +1 -1
- data/lib/puma/plugin.rb +0 -0
- data/lib/puma/rack/builder.rb +6 -6
- data/lib/puma/rack/urlmap.rb +1 -1
- data/lib/puma/rack_default.rb +19 -4
- data/lib/puma/reactor.rb +19 -10
- data/lib/puma/request.rb +365 -170
- data/lib/puma/runner.rb +56 -20
- data/lib/puma/sd_notify.rb +149 -0
- data/lib/puma/server.rb +137 -89
- data/lib/puma/single.rb +13 -11
- data/lib/puma/state_file.rb +3 -6
- data/lib/puma/thread_pool.rb +57 -19
- data/lib/puma/util.rb +0 -11
- data/lib/puma.rb +12 -11
- data/lib/rack/handler/puma.rb +113 -86
- data/tools/Dockerfile +2 -2
- data/tools/trickletest.rb +0 -0
- metadata +11 -6
- data/lib/puma/queue_close.rb +0 -26
- data/lib/puma/systemd.rb +0 -46
data/bin/puma-wild
CHANGED
data/docs/architecture.md
CHANGED
File without changes
|
data/docs/compile_options.md
CHANGED
@@ -19,3 +19,37 @@ For Bundler, use its configuration system:
|
|
19
19
|
```
|
20
20
|
bundle config build.puma "--with-cflags='-D PUMA_QUERY_STRING_MAX_LENGTH=64000'"
|
21
21
|
```
|
22
|
+
|
23
|
+
## Request Path, `PUMA_REQUEST_PATH_MAX_LENGTH`
|
24
|
+
|
25
|
+
By default, the max length of `REQUEST_PATH` is `8192`. But you may want to
|
26
|
+
adjust it to accept longer paths in requests.
|
27
|
+
|
28
|
+
For manual install, pass the `PUMA_REQUEST_PATH_MAX_LENGTH` option like this:
|
29
|
+
|
30
|
+
```
|
31
|
+
gem install puma -- --with-cflags="-D PUMA_REQUEST_PATH_MAX_LENGTH=64000"
|
32
|
+
```
|
33
|
+
|
34
|
+
For Bundler, use its configuration system:
|
35
|
+
|
36
|
+
```
|
37
|
+
bundle config build.puma "--with-cflags='-D PUMA_REQUEST_PATH_MAX_LENGTH=64000'"
|
38
|
+
```
|
39
|
+
|
40
|
+
## Request URI, `PUMA_REQUEST_URI_MAX_LENGTH`
|
41
|
+
|
42
|
+
By default, the max length of `REQUEST_URI` is `1024 * 12`. But you may want to
|
43
|
+
adjust it to accept longer URIs in requests.
|
44
|
+
|
45
|
+
For manual install, pass the `PUMA_REQUEST_URI_MAX_LENGTH` option like this:
|
46
|
+
|
47
|
+
```
|
48
|
+
gem install puma -- --with-cflags="-D PUMA_REQUEST_URI_MAX_LENGTH=64000"
|
49
|
+
```
|
50
|
+
|
51
|
+
For Bundler, use its configuration system:
|
52
|
+
|
53
|
+
```
|
54
|
+
bundle config build.puma "--with-cflags='-D PUMA_REQUEST_URI_MAX_LENGTH=64000'"
|
55
|
+
```
|
data/docs/deployment.md
CHANGED
File without changes
|
data/docs/fork_worker.md
CHANGED
@@ -10,7 +10,7 @@ Puma 5 introduces an experimental new cluster-mode configuration option, `fork_w
|
|
10
10
|
10004 \_ puma: cluster worker 3: 10000 [puma]
|
11
11
|
```
|
12
12
|
|
13
|
-
|
13
|
+
The `fork_worker` option allows your application to be initialized only once for copy-on-write memory savings, and it has two additional advantages:
|
14
14
|
|
15
15
|
1. **Compatible with phased restart.** Because the master process itself doesn't preload the application, this mode works with phased restart (`SIGUSR1` or `pumactl phased-restart`). When worker 0 reloads as part of a phased restart, it initializes a new copy of your application first, then the other workers reload by forking from this new worker already containing the new preloaded application.
|
16
16
|
|
@@ -24,8 +24,6 @@ Similar to the `preload_app!` option, the `fork_worker` option allows your appli
|
|
24
24
|
|
25
25
|
### Limitations
|
26
26
|
|
27
|
-
- Not compatible with the `preload_app!` option
|
28
|
-
|
29
27
|
- This mode is still very experimental so there may be bugs or edge-cases, particularly around expected behavior of existing hooks. Please open a [bug report](https://github.com/puma/puma/issues/new?template=bug_report.md) if you encounter any issues.
|
30
28
|
|
31
29
|
- In order to fork new workers cleanly, worker 0 shuts down its server and stops serving requests so there are no open file descriptors or other kinds of shared global state between processes, and to maximize copy-on-write efficiency across the newly-forked workers. This may temporarily reduce total capacity of the cluster during a phased restart / refork.
|
File without changes
|
File without changes
|
File without changes
|
data/docs/jungle/README.md
CHANGED
File without changes
|
data/docs/jungle/rc.d/README.md
CHANGED
File without changes
|
data/docs/jungle/rc.d/puma.conf
CHANGED
File without changes
|
data/docs/kubernetes.md
CHANGED
@@ -64,3 +64,15 @@ There is a subtle race condition between step 2 and 3: The replication controlle
|
|
64
64
|
The way Kubernetes works this way, rather than handling step 2 synchronously, is due to the CAP theorem: in a distributed system there is no way to guarantee that any message will arrive promptly. In particular, waiting for all Service controllers to report back might get stuck for an indefinite time if one of them has already been terminated or if there has been a net split. A way to work around this is to add a sleep to the pre-stop hook of the same time as the `terminationGracePeriodSeconds` time. This will allow the Puma process to keep serving new requests during the entire grace period, although it will no longer receive new requests after all Service controllers have propagated the removal of the pod from their endpoint lists. Then, after `terminationGracePeriodSeconds`, the pod receives `SIGKILL` and closes down. If your process can't handle SIGKILL properly, for example because it needs to release locks in different services, you can also sleep for a shorter period (and/or increase `terminationGracePeriodSeconds`) as long as the time slept is longer than the time that your Service controllers take to propagate the pod removal. The downside of this workaround is that all pods will take at minimum the amount of time slept to shut down and this will increase the time required for your rolling deploy.
|
65
65
|
|
66
66
|
More discussions and links to relevant articles can be found in https://github.com/puma/puma/issues/2343.
|
67
|
+
|
68
|
+
## Workers Per Pod, and Other Config Issues
|
69
|
+
|
70
|
+
With containerization, you will have to make a decision about how "big" to make each pod. Should you run 2 pods with 50 workers each? 25 pods, each with 4 workers? 100 pods, with each Puma running in single mode? Each scenario represents the same total amount of capacity (100 Puma processes that can respond to requests), but there are tradeoffs to make.
|
71
|
+
|
72
|
+
* Worker counts should be somewhere between 4 and 32 in most cases. You want more than 4 in order to minimize time spent in request queueing for a free Puma worker, but probably less than ~32 because otherwise autoscaling is working in too large of an increment or they probably won't fit very well into your nodes. In any queueing system, queue time is proportional to 1/n, where n is the number of things pulling from the queue. Each pod will have its own request queue (i.e., the socket backlog). If you have 4 pods with 1 worker each (4 request queues), wait times are, proportionally, about 4 times higher than if you had 1 pod with 4 workers (1 request queue).
|
73
|
+
* Unless you have a very I/O-heavy application (50%+ time spent waiting on IO), use the default thread count (5 for MRI). Using higher numbers of threads with low I/O wait (<50%) will lead to additional request queueing time (latency!) and additional memory usage.
|
74
|
+
* More processes per pod reduces memory usage per process, because of copy-on-write memory and because the cost of the single master process is "amortized" over more child processes.
|
75
|
+
* Don't run less than 4 processes per pod if you can. Low numbers of processes per pod will lead to high request queueing, which means you will have to run more pods.
|
76
|
+
* If multithreaded, allocate 1 CPU per worker. If single threaded, allocate 0.75 cpus per worker. Most web applications spend about 25% of their time in I/O - but when you're running multi-threaded, your Puma process will have higher CPU usage and should be able to fully saturate a CPU core.
|
77
|
+
* Most Puma processes will use about ~512MB-1GB per worker, and about 1GB for the master process. However, you probably shouldn't bother with setting memory limits lower than around 2GB per process, because most places you are deploying will have 2GB of RAM per CPU. A sensible memory limit for a Puma configuration of 4 child workers might be something like 8 GB (1 GB for the master, 7GB for the 4 children).
|
78
|
+
|
data/docs/nginx.md
CHANGED
data/docs/plugins.md
CHANGED
File without changes
|
data/docs/rails_dev_mode.md
CHANGED
File without changes
|
data/docs/restart.md
CHANGED
@@ -27,6 +27,7 @@ Any of the following will cause a Puma server to perform a hot restart:
|
|
27
27
|
|
28
28
|
### Additional notes
|
29
29
|
|
30
|
+
* The newly started Puma process changes its current working directory to the directory specified by the `directory` option. If `directory` is set to symlink, this is automatically re-evaluated, so this mechanism can be used to upgrade the application.
|
30
31
|
* Only one version of the application is running at a time.
|
31
32
|
* `on_restart` is invoked just before the server shuts down. This can be used to clean up resources (like long-lived database connections) gracefully. Since Ruby 2.0, it is not typically necessary to explicitly close file descriptors on restart. This is because any file descriptor opened by Ruby will have the `FD_CLOEXEC` flag set, meaning that file descriptors are closed on `exec`. `on_restart` is useful, though, if your application needs to perform any more graceful protocol-specific shutdown procedures before closing connections.
|
32
33
|
|
data/docs/signals.md
CHANGED
File without changes
|
data/docs/stats.md
CHANGED
File without changes
|
data/docs/systemd.md
CHANGED
@@ -24,8 +24,7 @@ After=network.target
|
|
24
24
|
|
25
25
|
[Service]
|
26
26
|
# Puma supports systemd's `Type=notify` and watchdog service
|
27
|
-
# monitoring,
|
28
|
-
# as of Puma 5.1 or later.
|
27
|
+
# monitoring, as of Puma 5.1 or later.
|
29
28
|
# On earlier versions of Puma or JRuby, change this to `Type=simple` and remove
|
30
29
|
# the `WatchdogSec` line.
|
31
30
|
Type=notify
|
@@ -52,7 +51,7 @@ ExecStart=/<FULLPATH>/bin/puma -C <YOUR_APP_PATH>/puma.rb
|
|
52
51
|
# Variant: Rails start.
|
53
52
|
# ExecStart=/<FULLPATH>/bin/puma -C <YOUR_APP_PATH>/config/puma.rb ../config.ru
|
54
53
|
|
55
|
-
# Variant: Use `bundle exec
|
54
|
+
# Variant: Use `bundle exec puma` instead of binstub
|
56
55
|
# Variant: Specify directives inline.
|
57
56
|
# ExecStart=/<FULLPATH>/puma -b tcp://0.0.0.0:9292 -b ssl://0.0.0.0:9293?key=key.pem&cert=cert.pem
|
58
57
|
|
@@ -77,9 +76,7 @@ compatible with both clustered mode and application preload.
|
|
77
76
|
|
78
77
|
**Note:** Any wrapper scripts which `exec`, or other indirections in `ExecStart`
|
79
78
|
may result in activated socket file descriptors being closed before reaching the
|
80
|
-
puma master process.
|
81
|
-
`--keep-file-descriptors` flag. `bundle exec` can be avoided by using a `puma`
|
82
|
-
executable generated by `bundle binstubs puma`. This is tracked in [#1499].
|
79
|
+
puma master process.
|
83
80
|
|
84
81
|
**Note:** Socket activation doesn't currently work on JRuby. This is tracked in
|
85
82
|
[#1367].
|
@@ -0,0 +1,150 @@
|
|
1
|
+
# Testing - benchmark/local files
|
2
|
+
|
3
|
+
These files generate data that shows request-per-second (RPS), etc. Typically, files are in
|
4
|
+
pairs, a shell script and a Ruby script. The shell script starts the server, then runs the
|
5
|
+
Ruby file, which starts client request stream(s), then collects and logs metrics.
|
6
|
+
|
7
|
+
## response_time_wrk.sh
|
8
|
+
|
9
|
+
This uses [wrk] for generating data. One or more wrk runs are performed. Summarizes RPS and
|
10
|
+
wrk latency times. The default for the `-b` argument runs 28 different client request streams,
|
11
|
+
and takes a bit over 5 minutes. See 'Request Stream Configuration' below for `-b` argument
|
12
|
+
description.
|
13
|
+
|
14
|
+
<details>
|
15
|
+
<summary>Summary output for<br/><code>benchmarks/local/response_time_wrk.sh -w2 -t5:5 -s tcp6</code>:</summary>
|
16
|
+
|
17
|
+
```
|
18
|
+
Type req/sec 50% 75% 90% 99% 100% Resp Size
|
19
|
+
───────────────────────────────────────────────────────────────── 1kB
|
20
|
+
array 13710 0.74 2.52 5.23 7.76 37.45 1024
|
21
|
+
chunk 13502 0.76 2.55 5.28 7.84 11.23 1042
|
22
|
+
string 13794 0.74 2.51 5.20 7.75 14.07 1024
|
23
|
+
io 9615 1.16 3.45 7.13 10.57 15.75 1024
|
24
|
+
───────────────────────────────────────────────────────────────── 10kB
|
25
|
+
array 13458 0.76 2.57 5.31 7.93 13.94 10239
|
26
|
+
chunk 13066 0.78 2.64 5.46 8.18 38.48 10320
|
27
|
+
string 13500 0.76 2.55 5.29 7.88 11.42 10240
|
28
|
+
io 9293 1.18 3.59 7.39 10.94 16.99 10240
|
29
|
+
───────────────────────────────────────────────────────────────── 100kB
|
30
|
+
array 11315 0.96 3.06 6.33 9.49 17.69 102424
|
31
|
+
chunk 9916 1.10 3.48 7.20 10.73 15.14 103075
|
32
|
+
string 10948 1.00 3.17 6.57 9.83 17.88 102378
|
33
|
+
io 8901 1.21 3.72 7.48 11.27 59.98 102407
|
34
|
+
───────────────────────────────────────────────────────────────── 256kB
|
35
|
+
array 9217 1.15 3.82 7.88 11.74 17.12 262212
|
36
|
+
chunk 7339 1.45 4.76 9.81 14.63 22.70 264007
|
37
|
+
string 8574 1.19 3.81 7.73 11.21 15.80 262147
|
38
|
+
io 8911 1.19 3.80 7.55 15.25 60.01 262183
|
39
|
+
───────────────────────────────────────────────────────────────── 512kB
|
40
|
+
array 6951 1.49 5.03 10.28 15.90 25.08 524378
|
41
|
+
chunk 5234 2.03 6.56 13.57 20.46 32.15 527862
|
42
|
+
string 6438 1.55 5.04 10.12 16.28 72.87 524275
|
43
|
+
io 8533 1.15 4.62 8.79 48.15 70.51 524327
|
44
|
+
───────────────────────────────────────────────────────────────── 1024kB
|
45
|
+
array 4122 1.80 15.59 41.87 67.79 121.00 1048565
|
46
|
+
chunk 3158 2.82 15.22 31.00 71.39 99.90 1055654
|
47
|
+
string 4710 2.24 6.66 13.65 20.38 70.44 1048575
|
48
|
+
io 8355 1.23 3.95 7.94 14.08 68.54 1048498
|
49
|
+
───────────────────────────────────────────────────────────────── 2048kB
|
50
|
+
array 2454 4.12 14.02 27.70 43.48 88.89 2097415
|
51
|
+
chunk 1743 6.26 17.65 36.98 55.78 92.10 2111358
|
52
|
+
string 2479 4.38 12.52 25.65 38.44 95.62 2097502
|
53
|
+
io 8264 1.25 3.83 7.76 11.73 65.69 2097090
|
54
|
+
|
55
|
+
Body ────────── req/sec ────────── ─────── req 50% times ───────
|
56
|
+
KB array chunk string io array chunk string io
|
57
|
+
1 13710 13502 13794 9615 0.745 0.757 0.741 1.160
|
58
|
+
10 13458 13066 13500 9293 0.760 0.784 0.759 1.180
|
59
|
+
100 11315 9916 10948 8901 0.960 1.100 1.000 1.210
|
60
|
+
256 9217 7339 8574 8911 1.150 1.450 1.190 1.190
|
61
|
+
512 6951 5234 6438 8533 1.490 2.030 1.550 1.150
|
62
|
+
1024 4122 3158 4710 8355 1.800 2.820 2.240 1.230
|
63
|
+
2048 2454 1743 2479 8264 4.120 6.260 4.380 1.250
|
64
|
+
─────────────────────────────────────────────────────────────────────
|
65
|
+
wrk -t8 -c16 -d10s
|
66
|
+
benchmarks/local/response_time_wrk.sh -w2 -t5:5 -s tcp6 -Y
|
67
|
+
Server cluster mode -w2 -t5:5, bind: tcp6
|
68
|
+
Puma repo branch 00-response-refactor
|
69
|
+
ruby 3.2.0dev (2022-06-14T01:21:55Z master 048f14221c) +YJIT [x86_64-linux]
|
70
|
+
|
71
|
+
[2136] - Gracefully shutting down workers...
|
72
|
+
[2136] === puma shutdown: 2022-06-13 21:16:13 -0500 ===
|
73
|
+
[2136] - Goodbye!
|
74
|
+
|
75
|
+
5:15 Total Time
|
76
|
+
```
|
77
|
+
</details><br/>
|
78
|
+
|
79
|
+
## bench_base.sh, bench_base.rb
|
80
|
+
|
81
|
+
These two files setup parameters for the Puma server, which is normally started in a shell
|
82
|
+
script. It then starts a Ruby file (a subclass of BenchBase), passing arguments to it. The
|
83
|
+
Ruby file is normally used to generate a client request stream(s).
|
84
|
+
|
85
|
+
### Puma Configuration
|
86
|
+
|
87
|
+
The following arguments are used for the Puma server:
|
88
|
+
|
89
|
+
* **`-C`** - configuration file
|
90
|
+
* **`-d`** - app delay
|
91
|
+
* **`-r`** - rackup file, often defaults to test/rackup/ci_select.ru
|
92
|
+
* **`-s`** - bind socket type, default is tcp/tcp4, also tcp6, ssl/ssl4, ssl6, unix, or aunix
|
93
|
+
(unix & abstract unix are not available with wrk).
|
94
|
+
* **`-t`** - threads, expressed as '5:5', same as Puma --thread
|
95
|
+
* **`-w`** - workers, same as Puma --worker
|
96
|
+
* **`-Y`** - enable Ruby YJIT
|
97
|
+
|
98
|
+
### Request Stream Configuration
|
99
|
+
|
100
|
+
The following arguments are used for request streams:
|
101
|
+
|
102
|
+
* **`-b`** - response body configuration. Body type options are a array, c chunked, s string,
|
103
|
+
and i for File/IO. None or any combination can be specified, they should start the option.
|
104
|
+
Then, any combination of comma separated integers can be used for the response body size
|
105
|
+
in kB. The string 'ac50,100' would create four runs, 50kb array, 50kB chunked, 100kB array,
|
106
|
+
and 100kB chunked. See 'Testing - test/rackup/ci-*.ru files' for more info.
|
107
|
+
* **`-c`** - connections per client request stream thread, defaults to 2 for wrk.
|
108
|
+
* **`-D`** - duration of client request stream in seconds.
|
109
|
+
* **`-T`** - number of threads in the client request stream. For wrk, this defaults to
|
110
|
+
80% of Puma workers * max_threads.
|
111
|
+
|
112
|
+
### Notes - Configuration
|
113
|
+
|
114
|
+
The above lists script arguments.
|
115
|
+
|
116
|
+
`bench_base.sh` contains most server defaults. Many can be set via ENV variables.
|
117
|
+
|
118
|
+
`bench_base.rb` contains the client request stream defaults. The default value for
|
119
|
+
`-b` is `acsi1,10,100,256,512,1024,2048`, which is a 4 x 7 matrix, and hence, runs
|
120
|
+
28 jobs. Also, the i body type (File/IO) generates files, they are placed in the
|
121
|
+
`"#{Dir.tmpdir}/.puma_response_body_io"` directory, which is created.
|
122
|
+
|
123
|
+
### Notes - wrk
|
124
|
+
|
125
|
+
The shell scripts use `-T` for wrk's thread count, since `-t` is used for Puma
|
126
|
+
server threads. Regarding the `-c` argument, wrk has an interesting behavior.
|
127
|
+
The total number of connections is set by `(connections/threads).to_i`. The scripts
|
128
|
+
here use `-c` as connections per thread. Hence, using `-T4 -c2` will yield a total
|
129
|
+
of eight wrk connections, two per thread. The equivalent wrk arguments would be `-t4 -c8`.
|
130
|
+
|
131
|
+
Puma can only process so many requests, and requests will queue in the backlog
|
132
|
+
until Puma can respond to them. With wrk, if the number of total connections is
|
133
|
+
too high, one will see the upper latency times increase, pushing into the lower
|
134
|
+
latency times as the connections are increased. The default values for wrk's
|
135
|
+
threads and connections were chosen to minimize requests' time in the backlog.
|
136
|
+
|
137
|
+
An example with four wrk runs using `-b s10`. Notice that `req/sec` varies by
|
138
|
+
less than 1%, but the `75%` times increase by an order of magnitude:
|
139
|
+
```
|
140
|
+
req/sec 50% 75% 90% 99% 100% Resp Size wrk cmd line
|
141
|
+
─────────────────────────────────────────────────────────────────────────────
|
142
|
+
13597 0.755 2.550 5.260 7.800 13.310 12040 wrk -t8 -c16 -d10
|
143
|
+
13549 0.793 4.430 8.140 11.220 16.600 12002 wrk -t10 -c20 -d10
|
144
|
+
13570 1.040 25.790 40.010 49.070 58.300 11982 wrk -t8 -c64 -d10
|
145
|
+
13684 1.050 25.820 40.080 49.160 66.190 12033 wrk -t16 -c64 -d10
|
146
|
+
```
|
147
|
+
Finally, wrk's output may cause rounding errors, so the response body size calculation is
|
148
|
+
imprecise.
|
149
|
+
|
150
|
+
[wrk]: <https://github.com/ioquatix/wrk>
|
@@ -0,0 +1,36 @@
|
|
1
|
+
# Testing - test/rackup/ci-*.ru files
|
2
|
+
|
3
|
+
## Overview
|
4
|
+
|
5
|
+
Puma should efficiently handle a variety of response bodies, varying both by size
|
6
|
+
and by the type of object used for the body.
|
7
|
+
|
8
|
+
Five rackup files are located in 'test/rackup' that can be used. All have their
|
9
|
+
request body size (in kB) set via `Body-Conf` header or with `ENV['CI_BODY_CONF']`.
|
10
|
+
Additionally, the ci_select.ru file can have it's body type set via a starting
|
11
|
+
character.
|
12
|
+
|
13
|
+
* **ci_array.ru** - body is an `Array` of 1kB strings. `Content-Length` is not set.
|
14
|
+
* **ci_chunked.ru** - body is an `Enumerator` of 1kB strings. `Content-Length` is not set.
|
15
|
+
* **ci_io.ru** - body is a File/IO object. `Content-Length` is set.
|
16
|
+
* **ci_string.ru** - body is a single string. `Content-Length` is set.
|
17
|
+
* **ci_select.ru** - can be any of the above.
|
18
|
+
|
19
|
+
All responses have 25 headers, total length approx 1kB. ci_array.ru and ci_chunked.ru
|
20
|
+
contain 1kB items.
|
21
|
+
|
22
|
+
All can be delayed by a float value (seconds) specified by the `Dly` header
|
23
|
+
|
24
|
+
Note that rhe `Body-Conf` header takes precedence, and `ENV['CI_BODY_CONF']` is
|
25
|
+
only read on load.
|
26
|
+
|
27
|
+
## ci_select.ru
|
28
|
+
|
29
|
+
The ci_select.ru file allows a starting character to specify the body type in the
|
30
|
+
`Body-Conf` header or with `ENV['CI_BODY_CONF']`.
|
31
|
+
* **a** - array of strings
|
32
|
+
* **c** - chunked (enum)
|
33
|
+
* **s** - single string
|
34
|
+
* **i** - File/IO
|
35
|
+
|
36
|
+
A value of `a100` would return a body as an array of 100 1kB strings.
|
File without changes
|
data/ext/puma_http11/ext_help.h
CHANGED
File without changes
|
data/ext/puma_http11/extconf.rb
CHANGED
@@ -2,20 +2,26 @@ require 'mkmf'
|
|
2
2
|
|
3
3
|
dir_config("puma_http11")
|
4
4
|
|
5
|
-
if $mingw
|
5
|
+
if $mingw
|
6
6
|
append_cflags '-fstack-protector-strong -D_FORTIFY_SOURCE=2'
|
7
7
|
append_ldflags '-fstack-protector-strong -l:libssp.a'
|
8
8
|
have_library 'ssp'
|
9
9
|
end
|
10
10
|
|
11
|
-
unless ENV["
|
11
|
+
unless ENV["PUMA_DISABLE_SSL"]
|
12
12
|
# don't use pkg_config('openssl') if '--with-openssl-dir' is used
|
13
|
-
|
13
|
+
# also looks within the Ruby build for directory info
|
14
|
+
has_openssl_dir = dir_config('openssl').any? ||
|
15
|
+
RbConfig::CONFIG['configure_args']&.include?('openssl') ||
|
16
|
+
Dir.exist?("#{RbConfig::TOPDIR}/src/main/c/openssl") # TruffleRuby
|
17
|
+
|
14
18
|
found_pkg_config = !has_openssl_dir && pkg_config('openssl')
|
15
19
|
|
16
|
-
found_ssl = if
|
20
|
+
found_ssl = if !$mingw && found_pkg_config
|
17
21
|
puts 'using OpenSSL pkgconfig (openssl.pc)'
|
18
22
|
true
|
23
|
+
elsif have_library('libcrypto', 'BIO_read') && have_library('libssl', 'SSL_CTX_new')
|
24
|
+
true
|
19
25
|
elsif %w'crypto libeay32'.find {|crypto| have_library(crypto, 'BIO_read')} &&
|
20
26
|
%w'ssl ssleay32'.find {|ssl| have_library(ssl, 'SSL_CTX_new')}
|
21
27
|
true
|
@@ -28,13 +34,14 @@ unless ENV["DISABLE_SSL"]
|
|
28
34
|
have_header "openssl/bio.h"
|
29
35
|
|
30
36
|
# below is yes for 1.0.2 & later
|
31
|
-
have_func
|
37
|
+
have_func "DTLS_method" , "openssl/ssl.h"
|
38
|
+
have_func "SSL_CTX_set_session_cache_mode(NULL, 0)", "openssl/ssl.h"
|
32
39
|
|
33
40
|
# below are yes for 1.1.0 & later
|
34
|
-
have_func
|
35
|
-
have_func
|
41
|
+
have_func "TLS_server_method" , "openssl/ssl.h"
|
42
|
+
have_func "SSL_CTX_set_min_proto_version(NULL, 0)" , "openssl/ssl.h"
|
36
43
|
|
37
|
-
have_func
|
44
|
+
have_func "X509_STORE_up_ref"
|
38
45
|
have_func "SSL_CTX_set_ecdh_auto(NULL, 0)" , "openssl/ssl.h"
|
39
46
|
|
40
47
|
# below exists in 1.1.0 and later, but isn't documented until 3.0.0
|
@@ -53,7 +60,7 @@ unless ENV["DISABLE_SSL"]
|
|
53
60
|
end
|
54
61
|
end
|
55
62
|
|
56
|
-
if ENV["
|
63
|
+
if ENV["PUMA_MAKE_WARNINGS_INTO_ERRORS"]
|
57
64
|
# Make all warnings into errors
|
58
65
|
# Except `implicit-fallthrough` since most failures comes from ragel state machine generated code
|
59
66
|
if respond_to?(:append_cflags, true) # Ruby 2.5 and later
|
@@ -39,8 +39,8 @@ public class Http11Parser {
|
|
39
39
|
Http11.query_string(runtime, parser.data, parser.buffer, parser.query_start, fpc-parser.query_start);
|
40
40
|
}
|
41
41
|
|
42
|
-
action
|
43
|
-
Http11.
|
42
|
+
action server_protocol {
|
43
|
+
Http11.server_protocol(runtime, parser.data, parser.buffer, parser.mark, fpc-parser.mark);
|
44
44
|
}
|
45
45
|
|
46
46
|
action request_path {
|
@@ -62,8 +62,8 @@ static void snake_upcase_char(char *c)
|
|
62
62
|
parser->query_string(parser, PTR_TO(query_start), LEN(query_start, fpc));
|
63
63
|
}
|
64
64
|
|
65
|
-
action
|
66
|
-
parser->
|
65
|
+
action server_protocol {
|
66
|
+
parser->server_protocol(parser, PTR_TO(mark), LEN(mark, fpc));
|
67
67
|
}
|
68
68
|
|
69
69
|
action request_path {
|
@@ -38,8 +38,8 @@
|
|
38
38
|
Method = ( upper | digit | safe ){1,20} >mark %request_method;
|
39
39
|
|
40
40
|
http_number = ( digit+ "." digit+ ) ;
|
41
|
-
|
42
|
-
Request_Line = ( Method " " Request_URI ("#" Fragment){0,1} " "
|
41
|
+
Server_Protocol = ( "HTTP/" http_number ) >mark %server_protocol ;
|
42
|
+
Request_Line = ( Method " " Request_URI ("#" Fragment){0,1} " " Server_Protocol CRLF ) ;
|
43
43
|
|
44
44
|
field_name = ( token -- ":" )+ >start_field $snake_upcase_field %write_field;
|
45
45
|
|