puma 7.1.0-java → 8.0.0-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/History.md +116 -0
- data/README.md +18 -11
- data/docs/5.0-Upgrade.md +98 -0
- data/docs/6.0-Upgrade.md +56 -0
- data/docs/7.0-Upgrade.md +52 -0
- data/docs/8.0-Upgrade.md +100 -0
- data/docs/deployment.md +58 -23
- data/docs/grpc.md +62 -0
- data/docs/images/favicon.svg +1 -0
- data/docs/images/running-puma.svg +1 -0
- data/docs/images/standard-logo.svg +1 -0
- data/docs/jungle/README.md +1 -1
- data/docs/kubernetes.md +3 -10
- data/docs/plugins.md +2 -2
- data/docs/signals.md +10 -10
- data/docs/stats.md +1 -1
- data/docs/systemd.md +3 -3
- data/ext/puma_http11/http11_parser.java.rl +51 -65
- data/ext/puma_http11/org/jruby/puma/EnvKey.java +241 -0
- data/ext/puma_http11/org/jruby/puma/Http11.java +168 -104
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +71 -85
- data/ext/puma_http11/puma_http11.c +101 -109
- data/lib/puma/app/status.rb +10 -2
- data/lib/puma/cli.rb +1 -1
- data/lib/puma/client.rb +90 -66
- data/lib/puma/client_env.rb +171 -0
- data/lib/puma/cluster/worker.rb +10 -9
- data/lib/puma/cluster.rb +3 -4
- data/lib/puma/configuration.rb +85 -16
- data/lib/puma/const.rb +2 -2
- data/lib/puma/control_cli.rb +1 -1
- data/lib/puma/detect.rb +11 -0
- data/lib/puma/dsl.rb +90 -14
- data/lib/puma/launcher.rb +7 -7
- data/lib/puma/puma_http11.jar +0 -0
- data/lib/puma/reactor.rb +3 -12
- data/lib/puma/{request.rb → response.rb} +25 -194
- data/lib/puma/runner.rb +1 -1
- data/lib/puma/server.rb +72 -37
- data/lib/puma/server_plugin_control.rb +32 -0
- data/lib/puma/single.rb +2 -2
- data/lib/puma/thread_pool.rb +129 -23
- data/lib/rack/handler/puma.rb +1 -1
- data/tools/Dockerfile +13 -5
- metadata +17 -7
- data/ext/puma_http11/ext_help.h +0 -15
data/docs/deployment.md
CHANGED
|
@@ -16,32 +16,34 @@ assume this is how you're using Puma.
|
|
|
16
16
|
Initially, Puma was conceived as a thread-only web server, but support for
|
|
17
17
|
processes was added in version 2.
|
|
18
18
|
|
|
19
|
+
In general, use single mode only if:
|
|
20
|
+
|
|
21
|
+
* You are using JRuby, TruffleRuby or another fully-multithreaded implementation of Ruby
|
|
22
|
+
* You are using MRI but in an environment where only 1 CPU core is available.
|
|
23
|
+
|
|
24
|
+
Otherwise, you'll want to use cluster mode to utilize all available CPU resources.
|
|
25
|
+
|
|
19
26
|
To run `puma` in single mode (i.e., as a development environment), set the
|
|
20
27
|
number of workers to 0; anything higher will run in cluster mode.
|
|
21
28
|
|
|
22
|
-
|
|
29
|
+
## Cluster Mode Tips
|
|
23
30
|
|
|
24
|
-
|
|
31
|
+
For the purposes of Puma provisioning, "CPU cores" means:
|
|
25
32
|
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
* Set the number of threads to desired concurrent requests/number of workers.
|
|
29
|
-
Puma defaults to 5, and that's a decent number.
|
|
33
|
+
1. On ARM, the number of physical cores.
|
|
34
|
+
2. On x86, the number of logical cores, hyperthreads, or vCPUs (these words all mean the same thing).
|
|
30
35
|
|
|
31
|
-
|
|
36
|
+
Set your config with the following process:
|
|
32
37
|
|
|
33
|
-
* If you'
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
* Enjoy 50% memory savings
|
|
37
|
-
* As you grow more confident in the thread-safety of your app, you can tune the
|
|
38
|
-
workers down and the threads up.
|
|
38
|
+
* Use cluster mode and set `workers :auto` (requires the `concurrent-ruby` gem) to match the number of CPU cores on the machine (minimum 2, otherwise use single mode!). If you can't add the gem, set the worker count manually to the available CPU cores.
|
|
39
|
+
* Set the number of threads to desired concurrent requests/number of workers.
|
|
40
|
+
Puma defaults to 5, and that's a decent number.
|
|
39
41
|
|
|
40
|
-
|
|
42
|
+
For most deployments, adding `concurrent-ruby` and using `workers :auto` is the right starting point.
|
|
41
43
|
|
|
42
|
-
See [
|
|
44
|
+
See [`workers :auto` gotchas](../lib/puma/dsl.rb).
|
|
43
45
|
|
|
44
|
-
|
|
46
|
+
## Worker utilization
|
|
45
47
|
|
|
46
48
|
**How do you know if you've got enough (or too many workers)?**
|
|
47
49
|
|
|
@@ -50,14 +52,34 @@ a time. But since so many apps are waiting on IO from DBs, etc., they can
|
|
|
50
52
|
utilize threads to use the process more efficiently.
|
|
51
53
|
|
|
52
54
|
Generally, you never want processes that are pegged all the time. That can mean
|
|
53
|
-
there is more work to do than the process can get through. On the other hand, if
|
|
54
|
-
you have processes that sit around doing nothing, then
|
|
55
|
-
|
|
55
|
+
there is more work to do than the process can get through, and requests will end up with additional latency. On the other hand, if
|
|
56
|
+
you have processes that sit around doing nothing, then you're wasting resources and money.
|
|
57
|
+
|
|
58
|
+
In general, you are making a tradeoff between:
|
|
59
|
+
|
|
60
|
+
1. CPU and memory utilization.
|
|
61
|
+
2. Time spent queueing for a Puma worker to `accept` requests and additional latency caused by CPU contention.
|
|
62
|
+
|
|
63
|
+
If latency is important to you, you will have to accept lower utilization, and vice versa.
|
|
56
64
|
|
|
57
|
-
|
|
58
|
-
utilization means you've got capacity still but aren't starving threads.
|
|
65
|
+
## Container/VPS sizing
|
|
59
66
|
|
|
60
|
-
|
|
67
|
+
You will have to make a decision about how "big" to make each pod/VPS/server/dyno.
|
|
68
|
+
|
|
69
|
+
**TL:DR;**: 80% of Puma apps will end up deploying "pods" of 4 workers, 5 threads each, 4 vCPU and 8GB of RAM.
|
|
70
|
+
|
|
71
|
+
For the rest of this discussion, we'll adopt the Kubernetes term of "pods".
|
|
72
|
+
|
|
73
|
+
Should you run 2 pods with 50 workers each? 25 pods, each with 4 workers? 100 pods, with each Puma running in single mode? Each scenario represents the same total amount of capacity (100 Puma processes that can respond to requests), but there are tradeoffs to make:
|
|
74
|
+
|
|
75
|
+
* **Increasing worker counts decreases latency, but means you scale in bigger "chunks"**. Worker counts should be somewhere between 4 and 32 in most cases. You want more than 4 in order to minimize time spent in request queueing for a free Puma worker, but probably less than ~32 because otherwise autoscaling is working in too large of an increment or they probably won't fit very well into your nodes. In any queueing system, queue time is proportional to 1/n, where n is the number of things pulling from the queue. Each pod will have its own request queue (i.e., the socket backlog). If you have 4 pods with 1 worker each (4 request queues), wait times are, proportionally, about 4 times higher than if you had 1 pod with 4 workers (1 request queue).
|
|
76
|
+
* **Increasing thread counts will increase throughput, but also latency and memory use** Unless you have a very I/O-heavy application (50%+ time spent waiting on IO), use the default thread count (5 for MRI). Using higher numbers of threads with low I/O wait (<50% of wall clock time) will lead to additional request latency and additional memory usage.
|
|
77
|
+
* **Increasing worker counts decreases memory per worker on average**. More processes per pod reduces memory usage per process, because of copy-on-write memory and because the cost of the single master process is "amortized" over more child processes.
|
|
78
|
+
* **Low worker counts (<4) have exceptionally poor throughput**. Don't run less than 4 processes per pod if you can. Low numbers of processes per pod will lead to high request queueing (see discussion above), which means you will have to run more pods and resources.
|
|
79
|
+
* **CPU-core-to-worker ratios should be around 1**. If running Puma with `threads > 1`, allocate 1 CPU core (see definition above!) per worker. If single threaded, allocate ~0.75 cpus per worker. Most web applications spend about 25% of their time in I/O - but when you're running multi-threaded, your Puma process will have higher CPU usage and should be able to fully saturate a CPU core. Using `workers :auto` will size workers to this guidance on most platforms.
|
|
80
|
+
* **Don't set memory limits unless necessary**. Most Puma processes will use about ~512MB-1GB per worker, and about 1GB for the master process. However, you probably shouldn't bother with setting memory limits lower than around 2GB per process, because most places you are deploying will have 2GB of RAM per CPU. A sensible memory limit for a Puma configuration of 4 child workers might be something like 8 GB (1 GB for the master, 7GB for the 4 children).
|
|
81
|
+
|
|
82
|
+
**Measuring utilization and queue time**
|
|
61
83
|
|
|
62
84
|
Using a timestamp header from an upstream proxy server (e.g., `nginx` or
|
|
63
85
|
`haproxy`) makes it possible to indicate how long requests have been waiting for
|
|
@@ -75,7 +97,7 @@ a Puma thread to become available.
|
|
|
75
97
|
* `env['puma.request_body_wait']` contains the number of milliseconds Puma
|
|
76
98
|
spent waiting for the client to send the request body.
|
|
77
99
|
* haproxy: `%Th` (TLS handshake time) and `%Ti` (idle time before request)
|
|
78
|
-
can
|
|
100
|
+
can also be added as headers.
|
|
79
101
|
|
|
80
102
|
## Should I daemonize?
|
|
81
103
|
|
|
@@ -100,3 +122,16 @@ or hell, even `monit`.
|
|
|
100
122
|
You probably will want to deploy some new code at some point, and you'd like
|
|
101
123
|
Puma to start running that new code. There are a few options for restarting
|
|
102
124
|
Puma, described separately in our [restart documentation](restart.md).
|
|
125
|
+
|
|
126
|
+
## Migrating from Unicorn
|
|
127
|
+
|
|
128
|
+
* If you're migrating from unicorn though, here are some settings to start with:
|
|
129
|
+
* Set workers to half the number of unicorn workers you're using
|
|
130
|
+
* Set threads to 2
|
|
131
|
+
* Enjoy 50% memory savings
|
|
132
|
+
* As you grow more confident in the thread-safety of your app, you can tune the
|
|
133
|
+
workers down and the threads up.
|
|
134
|
+
|
|
135
|
+
## Ubuntu / Systemd (Systemctl) Installation
|
|
136
|
+
|
|
137
|
+
See [systemd.md](systemd.md)
|
data/docs/grpc.md
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
# Using gRPC with Puma in Clustered Mode
|
|
2
|
+
|
|
3
|
+
This guide shows how to set up gRPC with Puma in a clustered environment using Puma's hooks to manage gRPC's lifecycle methods during forking.
|
|
4
|
+
|
|
5
|
+
## The Problem
|
|
6
|
+
|
|
7
|
+
In a clustered Puma setup, you might encounter the following error when using gRPC:
|
|
8
|
+
|
|
9
|
+
```
|
|
10
|
+
grpc cannot be used between calls to GRPC.prefork and GRPC.postfork_child or GRPC.postfork_parent
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
To work correctly, gRPC needs these methods called at specific points in the process lifecycle:
|
|
14
|
+
- `GRPC.prefork`: Called before forking.
|
|
15
|
+
- `GRPC.postfork_child`: Called in the child process after forking.
|
|
16
|
+
- `GRPC.postfork_parent`: Called in the parent process after forking.
|
|
17
|
+
|
|
18
|
+
Puma provides hooks such as `on_worker_fork`, `after_worker_fork`, and `on_worker_boot` to execute code during these lifecycle events. Understanding the behavior of these hooks is key to ensuring gRPC operates correctly in a clustered setup.
|
|
19
|
+
|
|
20
|
+
## The Solution
|
|
21
|
+
|
|
22
|
+
### Example Configuration
|
|
23
|
+
|
|
24
|
+
This configuration integrates gRPC's lifecycle methods in a clustered Puma setup and works whether preloading is enabled or not.
|
|
25
|
+
|
|
26
|
+
```ruby
|
|
27
|
+
# config/puma.rb
|
|
28
|
+
|
|
29
|
+
is_mac = RUBY_PLATFORM.include?("darwin")
|
|
30
|
+
|
|
31
|
+
before_worker_fork do |index|
|
|
32
|
+
GRPC.prefork unless is_mac
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
after_worker_fork do |index|
|
|
36
|
+
GRPC.postfork_parent unless is_mac
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
before_worker_boot do
|
|
40
|
+
GRPC.postfork_child unless is_mac
|
|
41
|
+
end
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
### Understanding the Lifecycle and Hooks
|
|
45
|
+
|
|
46
|
+
Puma's hooks determine when to call gRPC's lifecycle methods. Each hook plays a specific role in managing the lifecycle during forking:
|
|
47
|
+
|
|
48
|
+
- **`on_worker_fork`**:
|
|
49
|
+
- This hook runs before forking workers and is where you call `GRPC.prefork`.
|
|
50
|
+
- In preloading setups (default in Puma v7), it runs in the **master process** before workers are forked, as the application is preloaded in the master process.
|
|
51
|
+
- Without preloading, it still runs in the **master process** before forking workers, but the application is not preloaded.
|
|
52
|
+
- `GRPC.prefork` is called here to prepare GRPC for the forking process.
|
|
53
|
+
|
|
54
|
+
- **`after_worker_fork`**:
|
|
55
|
+
- This hook always runs in the **master process** after a worker is forked, regardless of whether preloading is enabled.
|
|
56
|
+
- Call `GRPC.postfork_parent` here to finalize the master process's state after forking.
|
|
57
|
+
|
|
58
|
+
- **`on_worker_boot`**:
|
|
59
|
+
- This hook always runs in the **worker process** after it is forked, regardless of whether preloading is enabled.
|
|
60
|
+
- Call `GRPC.postfork_child` here to finalize the worker's state.
|
|
61
|
+
|
|
62
|
+
**Note**: On macOS, these methods are skipped because gRPC does not require them due to differences in how forking works.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="152.027" height="150.013" viewBox="0 0 114.02 112.51"><defs><clipPath id="a"><path d="M0 .012h63V62H0Zm0 0"/></clipPath><clipPath id="b"><path d="M31.156.004c17.11 0 30.98 13.871 30.98 30.98s-13.87 30.977-30.98 30.977C14.051 61.96.18 48.094.18 30.984S14.05.004 31.156.004m0 0" clip-rule="evenodd"/></clipPath><clipPath id="d"><path d="M52 .012h62V62H52Zm0 0"/></clipPath><clipPath id="e"><path d="M83.043.008c17.105 0 30.977 13.867 30.977 30.976 0 17.11-13.872 30.977-30.977 30.977-17.11 0-30.98-13.867-30.98-30.977S65.933.008 83.043.008m0 0" clip-rule="evenodd"/></clipPath><clipPath id="g"><path d="M52 .012h11V62H52Zm0 0"/></clipPath><clipPath id="h"><path d="M31.156.004c17.11 0 30.98 13.871 30.98 30.98s-13.87 30.977-30.98 30.977C14.051 61.96.18 48.094.18 30.984S14.05.004 31.156.004m0 0" clip-rule="evenodd"/></clipPath><clipPath id="i"><path d="M83.043.008c17.105 0 30.977 13.867 30.977 30.976 0 17.11-13.872 30.977-30.977 30.977-17.11 0-30.98-13.867-30.98-30.977S65.933.008 83.043.008m0 0" clip-rule="evenodd"/></clipPath><clipPath id="k"><path d="M0 50h62v62.512H0Zm0 0"/></clipPath><clipPath id="l"><path d="M30.977 50.555c17.109 0 30.976 13.87 30.976 30.98s-13.867 30.973-30.976 30.973C13.867 112.508 0 98.645 0 81.535s13.867-30.98 30.977-30.98m0 0" clip-rule="evenodd"/></clipPath><clipPath id="n"><path d="M0 50h62v12H0Zm0 0"/></clipPath><clipPath id="o"><path d="M31.156.004c17.11 0 30.98 13.871 30.98 30.98s-13.87 30.977-30.98 30.977C14.051 61.96.18 48.094.18 30.984S14.05.004 31.156.004m0 0" clip-rule="evenodd"/></clipPath><clipPath id="p"><path d="M30.977 50.555c17.109 0 30.976 13.87 30.976 30.98s-13.867 30.973-30.976 30.973C13.867 112.508 0 98.645 0 81.535s13.867-30.98 30.977-30.98m0 0" clip-rule="evenodd"/></clipPath><clipPath id="r"><path d="M51 50h63v62.512H51Zm0 0"/></clipPath><clipPath id="s"><path d="M82.86 50.555c17.109 0 30.98 13.87 30.98 30.98s-13.871 30.977-30.98 30.977c-17.106 0-30.977-13.867-30.977-30.977s13.87-30.98 30.976-30.98m0 0" clip-rule="evenodd"/></clipPath><clipPath id="u"><path d="M52 50h62v12H52Zm0 0"/></clipPath><clipPath id="v"><path d="M83.043.008c17.105 0 30.977 13.867 30.977 30.976 0 17.11-13.872 30.977-30.977 30.977-17.11 0-30.98-13.867-30.98-30.977S65.933.008 83.043.008m0 0" clip-rule="evenodd"/></clipPath><clipPath id="w"><path d="M82.86 50.555c17.109 0 30.98 13.87 30.98 30.98s-13.871 30.977-30.98 30.977c-17.106 0-30.977-13.867-30.977-30.977s13.87-30.98 30.976-30.98m0 0" clip-rule="evenodd"/></clipPath><clipPath id="y"><path d="M51 50h11v62.512H51Zm0 0"/></clipPath><clipPath id="z"><path d="M30.977 50.555c17.109 0 30.976 13.87 30.976 30.98s-13.867 30.973-30.976 30.973C13.867 112.508 0 98.645 0 81.535s13.867-30.98 30.977-30.98m0 0" clip-rule="evenodd"/></clipPath><clipPath id="A"><path d="M82.86 50.555c17.109 0 30.98 13.87 30.98 30.98s-13.871 30.977-30.98 30.977c-17.106 0-30.977-13.867-30.977-30.977s13.87-30.98 30.976-30.98m0 0" clip-rule="evenodd"/></clipPath><image xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHQAAAB0CAIAAADb+IFwAAAABmJLR0QA/wD/AP+gvaeTAAABK0lEQVR4nO3SwQmEQAAEQfUhPg4xFDM0AnM0lIuiWFi6IhiaWb/9WWZ0H+/oCcs2esDMigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaH1On+jN0yr50LFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKC/0BiakDO9qS1OUAAAAASUVORK5CYII=" id="c" width="116" height="116" x="0" y="0"/><image xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHQAAAB0CAIAAADb+IFwAAAABmJLR0QA/wD/AP+gvaeTAAABKElEQVR4nO3dwQmAQBAEQc+3iLmYrCmZlUnYnEhVBEOz/x3Hvi1fcl737AmvWWcP+DNxQ+KGxA2JGxI3JG5I3JC4IXFD4obEDYkbEjckbkjckLghcUPihsQNiRsSNyRuSNyQuCFxQ+KGxA2JGxI3JG5I3JC4IXFD4obEDYkbEjckbkjckLghcUPihsQNiRsSNyRuSNyQuCFxQ+KGxA2JGxI3JG5I3JC4IXFD4obEDYkbEjckbkjckLghcUPihsQNiRsSNyRuaHztT8SfuNyQuCFxQ+KGxA2JGxI3JG5I3JC4IXFD4obEDYkbEjckbkjckLghcUPihsQNiRsSNyRuSNyQuCFxQ+KGxA2JGxI3JG5I3JC4IXFD4obEDYkbEjckbkjckLghcUMPkcYCz6dHDDoAAAAASUVORK5CYII=" id="f" width="116" height="116" x="0" y="0"/><image xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHQAAAB0CAIAAADb+IFwAAAABmJLR0QA/wD/AP+gvaeTAAABMElEQVR4nO3csQmEUBBAwfNiETO7sli7MrMEWzAZPuibApblsfFO6zL/Rli3nc6/zoPOf+I/eoE3Ky5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFplH/c7+gy4WKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXugG+KwQ7PgO2kwAAAABJRU5ErkJggg==" id="j" width="116" height="116" x="0" y="0"/><image xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHQAAAB0CAIAAADb+IFwAAAABmJLR0QA/wD/AP+gvaeTAAABJ0lEQVR4nO3SsQmAQAAEQTUWEXPB/oszMTOyiuFBdio4lpv3bZ1iLKMH/FlxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouND/3MXoDcV7v6Ak9VyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRc6AM3AgQ7OdMblwAAAABJRU5ErkJggg==" id="m" width="116" height="116" x="0" y="0"/><image xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHQAAAB0CAIAAADb+IFwAAAABmJLR0QA/wD/AP+gvaeTAAABM0lEQVR4nO3dsQnDQBBFQZ1waGSFasP9F+E2lKoDV+BIPA7MTAWfx+Y79u250FhnD/hn4obEDYkbEjckbkjckLghcUPihsQNiRsSNyRuSNyQuCFxQ+KGxA2JGxI3JG5I3JC4IXFD4obEDYkbEjckbkjckLghcUPihsQNiRsSNyRuSNzQ4/M6Zm+45X2dsyf85HJD4obEDYkbEjckbkjckLghcUPihsQNiRsSNyRuSNyQuCFxQ+KGxA2JGxI3JG5I3JC4IXFD4obEDYkbEjckbkjc0PAnouNyQ+KGxA2JGxI3JG5I3JC4IXFD4obEDYkbEjckbkjckLghcUPihsQNiRsSNyRuSNyQuCFxQ+KGxA2JGxI3JG5I3JC4IXFD4obEDYkbEjckbkjckLghcUPihsQNfQEBTgQ9BPCEIQAAAABJRU5ErkJggg==" id="q" width="116" height="116" x="0" y="0"/><image xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHQAAAB0CAIAAADb+IFwAAAABmJLR0QA/wD/AP+gvaeTAAABJUlEQVR4nO3SsQmAQAAEQRVDEUvQzP4rNDd2eZCZCo7l5mPfJhrL6AF/Jm5I3JC4IXFD4obEDYkbEjckbkjckLghcUPihsQNiRsSNyRuSNyQuCFxQ+KGxA2JGxI3JG5I3JC4IXFD4obEDYkbEjckbkjckLghcUPihsQNiRsSNyRuSNyQuCFxQ+KGxA2towe8ndc9esJnPDckbkjckLghcUPihsQNiRsSNyRuSNyQuCFxQ+KGxA2JGxI3JG5I3JC4IXFD4obEDYkbEjckbkjckLghcUPihsQNiRsSNyRuSNyQuCFxQ+KGxA2JGxI3JG5I3JC4IXFD4obEDYkbEjckbkjckLghcUPihsQNiRsSNyRuSNyQuCFxQ+KGxA2JGxI3JG5I3NADw1YBfSH2ahsAAAAASUVORK5CYII=" id="t" width="116" height="116" x="0" y="0"/><image xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHQAAAB0CAIAAADb+IFwAAAABmJLR0QA/wD/AP+gvaeTAAABKElEQVR4nO3dMQqAQBAEQZULRQRz//9Mc2OLA+l6wdBsvut57EuMbfaAPysuVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhcbsAW/jumdP+EyXCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kJrfyKcLhcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXOgB/tcBcv0+qHgAAAAASUVORK5CYII=" id="x" width="116" height="116" x="0" y="0"/><image xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHQAAAB0CAIAAADb+IFwAAAABmJLR0QA/wD/AP+gvaeTAAABMklEQVR4nO3SwQmEUAxAwXXxpogdaP9VaRHet4W9DB/kTQFJeGTat/UT4zv6gDcrLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFheZRi89jofOv+6Hz/9HnQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi4UHGh4kLFhYoLFRcqLlRcqLhQcaHiQsWFigsVFyouVFyouFBxoeJCxYWKCxUXKi5UXKi40LRv6+gbXqvPhYoLFRf6AZBxBD0Rzoe7AAAAAElFTkSuQmCC" id="B" width="116" height="116" x="0" y="0"/></defs><g clip-path="url(#a)"><g clip-path="url(#b)"><use xlink:href="#c" transform="translate(-.61 -1.1)scale(1.00103)"/></g></g><g clip-path="url(#d)"><g clip-path="url(#e)"><use xlink:href="#f" transform="translate(-.61 -1.1)scale(1.00103)"/></g></g><g clip-path="url(#g)"><g clip-path="url(#h)"><g clip-path="url(#i)"><use xlink:href="#j" transform="translate(-.61 -1.1)scale(1.00103)"/></g></g></g><g clip-path="url(#k)"><g clip-path="url(#l)"><use xlink:href="#m" transform="translate(-.61 -1.1)scale(1.00103)"/></g></g><g clip-path="url(#n)"><g clip-path="url(#o)"><g clip-path="url(#p)"><use xlink:href="#q" transform="translate(-.61 -1.1)scale(1.00103)"/></g></g></g><g clip-path="url(#r)"><g clip-path="url(#s)"><use xlink:href="#t" transform="translate(-.61 -1.1)scale(1.00103)"/></g></g><g clip-path="url(#u)"><g clip-path="url(#v)"><g clip-path="url(#w)"><use xlink:href="#x" transform="translate(-.61 -1.1)scale(1.00103)"/></g></g></g><g clip-path="url(#y)"><g clip-path="url(#z)"><g clip-path="url(#A)"><use xlink:href="#B" transform="translate(-.61 -1.1)scale(1.00103)"/></g></g></g></svg>
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
<svg xmlns="http://www.w3.org/2000/svg" width="596.587" height="219.92" viewBox="0 0 447.44 164.94"><defs><clipPath id="a"><path d="M0 0h437v164.941H0Zm0 0"/></clipPath><clipPath id="b"><path d="M10 0h437.441v164.941H10Zm0 0"/></clipPath><clipPath id="c"><path d="M5 0h438v164.941H5Zm0 0"/></clipPath></defs><g clip-path="url(#a)"><path fill="#e0067f" d="M238.277 102.027c-1.859-1.246-7.484-1.246-13.086-1.906-5.601-.598-8.093-1.844-3.734-4.328 4.363-2.496 12.465-16.813 12.465-16.813 0 3.727 0 9.317 2.496 11.22 2.477 1.835 6.848 1.835 6.848 4.956s-3.121 8.094-4.989 6.871m189.965-55.46c-3.742-3.735-9.965-11.82-13.707-15.555-3.742-3.723-8.71-4.375-18.07-6.23-4.98-7.485-13.703-4.989-16.82 0-3.735 0-18.692 5.609-24.922 10.593-6.22 4.98-32.371 4.98-42.336 0-9.969-4.984-19.93-22.422-36.121-31.145-16.2-8.73-39.88-1.25-39.88-1.25-21.8-4.37-59.144 32.395-74.71 45.454-15.578 13.086-29.899 18.687-43.606 23.062-13.687 4.379-62.902 12.45-81.593 14.293-18.668 1.906-33.63 16.852-36.118 20.582-2.5 3.758 8.715 13.7 13.715 10.606 4.965-3.114 21.164-11.836 29.875-15.602 8.742-3.727 42.367-8.078 55.446-9.34 13.066-1.25 52.32-13.055 54.808-14.922 2.484-1.883 7.473-6.87 8.73 0 1.231 6.832 9.344 16.184 17.415 14.332 8.109-1.89 6.226 1.852 3.742 7.453-2.473 5.598 1.254 15.555 9.976 15.555s28.649 3.121 32.38 3.121c3.745 0 2.48 2.496-4.36 4.989-6.863 2.5-9.332 5.624-1.246 8.714 8.086 3.121 21.187 3.121 26.144-.617 4.996-3.75 3.75-10.59 6.262-14.332 2.461-3.73 4.969 4.988 1.848 10.582-3.114 5.637-6.227 14.973-11.828 14.973-5.61 0-18.7 5.594-23.676 8.094-4.988 2.496-11.215 11.191-3.73 13.699 7.464 2.476 12.452 1.254 16.816-3.113 4.37-4.364 15.562-1.875 24.277-5.61 8.738-3.758 11.871-9.957 12.465-13.07.629-3.145 4.348-3.145 4.348-3.742.648 6.254 3.75 3.742 11.214 8.113 7.477 4.348 17.457 3.723 22.418-1.258 4.989-4.988 0-7.465-6.203-9.348-6.234-1.886-11.855-3.73-16.84-6.238-4.964-2.5 3.13-4.984 9.364-6.23 6.207-1.243 26.148-3.723 30.492-8.727 4.402-4.973 19.344-16.805 25.566-19.902 13.051-4.352 21.16-9.356 30.504-15.57 9.336-6.243 17.43-5.004 20.559-4.368 3.105.617 13.07 6.219 17.437 6.219 4.348 0 11.22-2.473 11.82-8.707.63-6.227 3.762-9.352 6.231-13.086 2.492-3.742-4.36-8.723-8.086-12.473"/></g><g clip-path="url(#b)"><path fill="#f4f014" d="M248.86 102.027c-1.86-1.246-7.485-1.246-13.087-1.906-5.605-.598-8.093-1.844-3.734-4.328C236.402 93.297 244.5 78.98 244.5 78.98c0 3.727 0 9.317 2.5 11.22 2.473 1.835 6.848 1.835 6.848 4.956s-3.121 8.094-4.989 6.871m189.964-55.46c-3.742-3.735-9.969-11.82-13.71-15.555-3.743-3.723-8.708-4.375-18.067-6.23-4.98-7.485-13.703-4.989-16.82 0-3.739 0-18.692 5.609-24.922 10.593-6.22 4.98-32.371 4.98-42.34 0C313 30.391 303.039 12.953 286.848 4.23c-16.2-8.73-39.88-1.25-39.88-1.25-21.8-4.37-59.144 32.395-74.714 45.454-15.574 13.086-29.895 18.687-43.602 23.062-13.687 4.379-62.902 12.45-81.593 14.293-18.668 1.906-33.63 16.852-36.118 20.582-2.5 3.758 8.715 13.7 13.715 10.606 4.965-3.114 21.164-11.836 29.875-15.602 8.742-3.727 42.364-8.078 55.446-9.34 13.062-1.25 52.32-13.055 54.808-14.922 2.485-1.883 7.469-6.87 8.73 0 1.231 6.832 9.344 16.184 17.415 14.332 8.11-1.89 6.226 1.852 3.742 7.453-2.477 5.598 1.254 15.555 9.976 15.555s28.649 3.121 32.375 3.121c3.75 0 2.485 2.496-4.355 4.989-6.863 2.5-9.332 5.624-1.246 8.714 8.086 3.121 21.187 3.121 26.144-.617 4.997-3.75 3.75-10.59 6.262-14.332 2.461-3.73 4.965 4.988 1.848 10.582-3.113 5.637-6.227 14.973-11.828 14.973-5.61 0-18.7 5.594-23.68 8.094-4.984 2.496-11.211 11.191-3.727 13.699 7.465 2.476 12.454 1.254 16.817-3.113 4.37-4.364 15.562-1.875 24.277-5.61 8.738-3.758 11.871-9.957 12.465-13.07.629-3.145 4.348-3.145 4.348-3.742.648 6.254 3.75 3.742 11.214 8.113 7.477 4.348 17.458 3.723 22.418-1.258 4.985-4.988 0-7.465-6.203-9.348-6.234-1.886-11.859-3.73-16.84-6.238-4.964-2.5 3.13-4.984 9.36-6.23 6.21-1.243 26.152-3.723 30.496-8.727 4.402-4.973 19.344-16.805 25.566-19.902 13.051-4.352 21.157-9.356 30.504-15.57 9.336-6.243 17.43-5.004 20.559-4.368 3.105.617 13.07 6.219 17.433 6.219 4.352 0 11.223-2.473 11.825-8.707.629-6.227 3.761-9.352 6.23-13.086 2.492-3.742-4.36-8.723-8.086-12.473"/></g><g clip-path="url(#c)"><path fill="#312f34" d="M244.07 102.027c-1.86-1.246-7.484-1.246-13.09-1.906-5.601-.598-8.09-1.844-3.734-4.328 4.367-2.496 12.465-16.813 12.465-16.813 0 3.727 0 9.317 2.5 11.22 2.473 1.835 6.844 1.835 6.844 4.956s-3.118 8.094-4.985 6.871m189.965-55.46c-3.742-3.735-9.969-11.82-13.71-15.555-3.743-3.723-8.708-4.375-18.067-6.23-4.98-7.485-13.703-4.989-16.824 0-3.735 0-18.688 5.609-24.922 10.593-6.215 4.98-32.367 4.98-42.336 0-9.969-4.984-19.926-22.422-36.121-31.145-16.2-8.73-39.875-1.25-39.875-1.25-21.801-4.37-59.145 32.395-74.715 45.454-15.578 13.086-29.895 18.687-43.606 23.062-13.687 4.379-62.902 12.45-81.59 14.293-18.667 1.906-33.628 16.852-36.12 20.582-2.497 3.758 8.714 13.7 13.718 10.606 4.965-3.114 21.164-11.836 29.871-15.602 8.746-3.727 42.367-8.078 55.45-9.34 13.062-1.25 52.316-13.055 54.808-14.922 2.48-1.883 7.469-6.87 8.727 0 1.234 6.832 9.347 16.184 17.418 14.332 8.105-1.89 6.222 1.852 3.742 7.453-2.477 5.598 1.254 15.555 9.972 15.555 8.723 0 28.653 3.121 32.38 3.121 3.75 0 2.484 2.496-4.36 4.989-6.86 2.5-9.328 5.624-1.242 8.714 8.086 3.121 21.183 3.121 26.144-.617 4.996-3.75 3.75-10.59 6.262-14.332 2.461-3.73 4.965 4.988 1.844 10.582-3.11 5.637-6.223 14.973-11.828 14.973-5.61 0-18.696 5.594-23.676 8.094-4.988 2.496-11.211 11.191-3.727 13.699 7.461 2.476 12.45 1.254 16.813-3.113 4.375-4.364 15.562-1.875 24.277-5.61 8.738-3.758 11.875-9.957 12.465-13.07.633-3.145 4.352-3.145 4.352-3.742.644 6.254 3.75 3.742 11.21 8.113 7.481 4.348 17.461 3.723 22.418-1.258 4.989-4.988 0-7.465-6.203-9.348-6.23-1.886-11.855-3.73-16.836-6.238-4.964-2.5 3.13-4.984 9.36-6.23 6.21-1.243 26.152-3.723 30.496-8.727 4.402-4.973 19.34-16.805 25.566-19.902 13.051-4.352 21.157-9.356 30.5-15.57 9.34-6.243 17.434-5.004 20.559-4.368 3.105.617 13.074 6.219 17.437 6.219 4.352 0 11.22-2.473 11.82-8.707.63-6.227 3.766-9.352 6.235-13.086 2.488-3.742-4.36-8.723-8.086-12.473"/></g></svg>
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
<svg xmlns="http://www.w3.org/2000/svg" width="557.027" height="158.88" viewBox="0 0 417.77 119.16"><defs><clipPath id="a"><path d="M0 0h110v119.16H0Zm0 0"/></clipPath><clipPath id="b"><path d="M109 0h100v119.16H109Zm0 0"/></clipPath><clipPath id="c"><path d="M208 0h101v119.16H208Zm0 0"/></clipPath><clipPath id="d"><path d="M308 0h109.77v119.16H308Zm0 0"/></clipPath></defs><g clip-path="url(#a)"><path fill="#e0067f" d="M59.582 119.16C26.676 119.16 0 92.484 0 59.578S26.676 0 59.582 0c20.86 0 39.215 10.719 49.86 26.953-6.145 9.371-9.72 20.582-9.72 32.625 0 12.047 3.575 23.258 9.72 32.629-10.645 16.234-29 26.953-49.86 26.953"/></g><path fill="#fff" d="M67.008 55.258c0-2.574-2.07-4.031-4.649-4.031h-7.613v8.007h7.613c2.578 0 4.649-1.453 4.649-3.976M45.113 80.18V42.828h18.703c8.399 0 12.993 5.656 12.993 12.43 0 6.722-4.594 12.379-12.993 12.379h-9.07V80.18z"/><g clip-path="url(#b)"><path fill="#43a6d6" d="M159.3 119.16c-20.859 0-39.214-10.719-49.859-26.953 6.145-9.371 9.72-20.582 9.72-32.629 0-12.043-3.575-23.254-9.72-32.625C120.086 10.72 138.441 0 159.301 0c20.742 0 39.008 10.594 49.676 26.672-6.258 9.426-9.907 20.742-9.907 32.906 0 12.168 3.649 23.48 9.907 32.91-10.668 16.075-28.934 26.672-49.676 26.672"/></g><path fill="#21255a" d="M109.441 92.207c-6.144-9.371-9.718-20.582-9.718-32.629 0-12.043 3.574-23.254 9.718-32.625 6.145 9.371 9.72 20.582 9.72 32.625 0 12.047-3.575 23.258-9.72 32.629"/><path fill="#fff" d="M141.477 65.059v-22.23h9.8v21.894c0 4.425 2.688 7.617 7.895 7.617 5.152 0 7.84-3.192 7.84-7.617V42.828h9.746v22.176c0 9.297-5.602 15.848-17.586 15.848s-17.695-6.61-17.695-15.793"/><g clip-path="url(#c)"><path fill="#f4f014" d="M258.652 119.16c-20.742 0-39.004-10.597-49.675-26.672 6.257-9.43 9.906-20.742 9.906-32.91 0-12.164-3.649-23.48-9.906-32.906C219.648 10.594 237.91 0 258.652 0c20.801 0 39.114 10.656 49.77 26.809-6.203 9.402-9.813 20.664-9.813 32.77 0 12.105 3.61 23.37 9.813 32.769-10.656 16.156-28.969 26.812-49.77 26.812"/></g><path fill="#2da140" d="M208.977 92.488c-6.258-9.43-9.907-20.742-9.907-32.91 0-12.164 3.649-23.48 9.907-32.906 6.257 9.426 9.906 20.742 9.906 32.906 0 12.168-3.649 23.48-9.906 32.91"/><path fill="#fff" d="M270.215 80.18V55.484l-9.406 24.696h-4.254l-9.465-24.696V80.18h-9.633V42.828h13.383l7.84 20.606 7.785-20.606h13.383V80.18z"/><g clip-path="url(#d)"><path fill="#312f34" d="M358.188 119.16c-20.801 0-39.11-10.656-49.766-26.812 6.2-9.399 9.812-20.664 9.812-32.77 0-12.105-3.613-23.367-9.812-32.77C319.078 10.657 337.387 0 358.187 0c32.907 0 59.583 26.672 59.583 59.578s-26.676 59.582-59.582 59.582"/></g><path fill="#302d17" d="M308.422 92.348c-6.203-9.399-9.813-20.664-9.813-32.77 0-12.105 3.61-23.367 9.813-32.77 6.2 9.403 9.812 20.665 9.812 32.77s-3.613 23.371-9.812 32.77"/><path fill="#fff" d="m358.215 52.348-4.758 14.054h9.465Zm9.187 27.832-1.851-5.375h-14.727l-1.847 5.375h-10.922l14.058-37.352h12.153L378.32 80.18z"/></svg>
|
data/docs/jungle/README.md
CHANGED
data/docs/kubernetes.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
## Running Puma in Kubernetes
|
|
4
4
|
|
|
5
|
-
In general running Puma in Kubernetes works as-is, no special configuration is needed beyond what you would write anyway to get a new Kubernetes Deployment going. There is one known interaction between the way Kubernetes handles pod termination and how Puma handles `SIGINT`, where some
|
|
5
|
+
In general running Puma in Kubernetes works as-is, no special configuration is needed beyond what you would write anyway to get a new Kubernetes Deployment going. There is one known interaction between the way Kubernetes handles pod termination and how Puma handles `SIGINT`, where some requests might be sent to Puma after it has already entered graceful shutdown mode and is no longer accepting requests. This can lead to dropped requests during rolling deploys. A workaround for this is listed at the end of this article.
|
|
6
6
|
|
|
7
7
|
## Basic setup
|
|
8
8
|
|
|
@@ -61,7 +61,7 @@ For some high-throughput systems, it is possible that some HTTP requests will re
|
|
|
61
61
|
4. The pod has up to `terminationGracePeriodSeconds` (default: 30 seconds) to gracefully shut down. Puma will do this (after it receives SIGTERM) by closing down the socket that accepts new requests and finishing any requests already running before exiting the Puma process.
|
|
62
62
|
5. If the pod is still running after `terminationGracePeriodSeconds` has elapsed, the pod receives `SIGKILL` to make sure the process inside it stops. After that, the container exits and all other Kubernetes objects associated with it are cleaned up.
|
|
63
63
|
|
|
64
|
-
There is a subtle race condition between step 2 and 3: The replication controller does not synchronously remove the pod from the Services AND THEN call the pre-stop hook of the pod, but rather it asynchronously sends "remove this pod from your endpoints" requests to the Services and then immediately proceeds to invoke the pods' pre-stop hook. If the Service controller (typically something like nginx or haproxy) receives
|
|
64
|
+
There is a subtle race condition between step 2 and 3: The replication controller does not synchronously remove the pod from the Services AND THEN call the pre-stop hook of the pod, but rather it asynchronously sends "remove this pod from your endpoints" requests to the Services and then immediately proceeds to invoke the pods' pre-stop hook. If the Service controller (typically something like nginx or haproxy) receives and handles this request "too" late (due to internal lag or network latency between the replication and Service controllers) then it is possible that the Service controller will send one or more requests to a Puma process which has already shut down its listening socket. These requests will then fail with 5XX error codes.
|
|
65
65
|
|
|
66
66
|
The way Kubernetes works this way, rather than handling step 2 synchronously, is due to the CAP theorem: in a distributed system there is no way to guarantee that any message will arrive promptly. In particular, waiting for all Service controllers to report back might get stuck for an indefinite time if one of them has already been terminated or if there has been a net split. A way to work around this is to add a sleep to the pre-stop hook of the same time as the `terminationGracePeriodSeconds` time. This will allow the Puma process to keep serving new requests during the entire grace period, although it will no longer receive new requests after all Service controllers have propagated the removal of the pod from their endpoint lists. Then, after `terminationGracePeriodSeconds`, the pod receives `SIGKILL` and closes down. If your process can't handle SIGKILL properly, for example because it needs to release locks in different services, you can also sleep for a shorter period (and/or increase `terminationGracePeriodSeconds`) as long as the time slept is longer than the time that your Service controllers take to propagate the pod removal. The downside of this workaround is that all pods will take at minimum the amount of time slept to shut down and this will increase the time required for your rolling deploy.
|
|
67
67
|
|
|
@@ -69,12 +69,5 @@ More discussions and links to relevant articles can be found in https://github.c
|
|
|
69
69
|
|
|
70
70
|
## Workers Per Pod, and Other Config Issues
|
|
71
71
|
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
* Worker counts should be somewhere between 4 and 32 in most cases. You want more than 4 in order to minimize time spent in request queueing for a free Puma worker, but probably less than ~32 because otherwise autoscaling is working in too large of an increment or they probably won't fit very well into your nodes. In any queueing system, queue time is proportional to 1/n, where n is the number of things pulling from the queue. Each pod will have its own request queue (i.e., the socket backlog). If you have 4 pods with 1 worker each (4 request queues), wait times are, proportionally, about 4 times higher than if you had 1 pod with 4 workers (1 request queue).
|
|
75
|
-
* Unless you have a very I/O-heavy application (50%+ time spent waiting on IO), use the default thread count (5 for MRI). Using higher numbers of threads with low I/O wait (<50%) will lead to additional request queueing time (latency!) and additional memory usage.
|
|
76
|
-
* More processes per pod reduces memory usage per process, because of copy-on-write memory and because the cost of the single master process is "amortized" over more child processes.
|
|
77
|
-
* Don't run less than 4 processes per pod if you can. Low numbers of processes per pod will lead to high request queueing, which means you will have to run more pods.
|
|
78
|
-
* If multithreaded, allocate 1 CPU per worker. If single threaded, allocate 0.75 cpus per worker. Most web applications spend about 25% of their time in I/O - but when you're running multi-threaded, your Puma process will have higher CPU usage and should be able to fully saturate a CPU core.
|
|
79
|
-
* Most Puma processes will use about ~512MB-1GB per worker, and about 1GB for the master process. However, you probably shouldn't bother with setting memory limits lower than around 2GB per process, because most places you are deploying will have 2GB of RAM per CPU. A sensible memory limit for a Puma configuration of 4 child workers might be something like 8 GB (1 GB for the master, 7GB for the 4 children).
|
|
72
|
+
See our [deployment docs](./deployment.md) for more information about how to correctly size your pods and choose the right number of workers and threads.
|
|
80
73
|
|
data/docs/plugins.md
CHANGED
|
@@ -5,13 +5,13 @@ operations.
|
|
|
5
5
|
|
|
6
6
|
There are two canonical plugins to aid in the development of new plugins:
|
|
7
7
|
|
|
8
|
-
* [tmp\_restart](https://github.com/puma/puma/blob/
|
|
8
|
+
* [tmp\_restart](https://github.com/puma/puma/blob/main/lib/puma/plugin/tmp_restart.rb):
|
|
9
9
|
Restarts the server if the file `tmp/restart.txt` is touched
|
|
10
10
|
* [heroku](https://github.com/puma/puma-heroku/blob/master/lib/puma/plugin/heroku.rb):
|
|
11
11
|
Packages up the default configuration used by Puma on Heroku (being sunset
|
|
12
12
|
with the release of Puma 5.0)
|
|
13
13
|
|
|
14
|
-
Plugins are activated in a Puma configuration file (such as `config/puma.rb
|
|
14
|
+
Plugins are activated in a Puma configuration file (such as `config/puma.rb`)
|
|
15
15
|
by adding `plugin "name"`, such as `plugin "heroku"`.
|
|
16
16
|
|
|
17
17
|
Plugins are activated based on path requirements so, activating the `heroku`
|
data/docs/signals.md
CHANGED
|
@@ -33,16 +33,16 @@ Now you will see via `ps` that there is no more `tail` process. Sometimes when r
|
|
|
33
33
|
|
|
34
34
|
Puma cluster responds to these signals:
|
|
35
35
|
|
|
36
|
-
- `TTIN
|
|
37
|
-
- `TTOU
|
|
38
|
-
- `TERM
|
|
39
|
-
- `USR2
|
|
40
|
-
- `USR1
|
|
41
|
-
- `HUP
|
|
42
|
-
- `INT
|
|
43
|
-
- `CHLD`
|
|
44
|
-
- `URG
|
|
45
|
-
- `INFO` print backtraces of all puma threads
|
|
36
|
+
- `TTIN`: Increment the worker count by 1.
|
|
37
|
+
- `TTOU`: Decrement the worker count by 1.
|
|
38
|
+
- `TERM`: Send `TERM` to worker. The worker will attempt to finish then exit.
|
|
39
|
+
- `USR2`: Restart workers. This also reloads the Puma configuration file, if there is one.
|
|
40
|
+
- `USR1`: Restart workers in phases, a rolling restart. This will not reload the configuration file.
|
|
41
|
+
- `HUP`: Reopen log files defined in `stdout_redirect` configuration parameter. If there is no `stdout_redirect` option provided, it will behave like `INT`.
|
|
42
|
+
- `INT`: Equivalent of sending Ctrl-C to cluster. Puma will attempt to finish then exit.
|
|
43
|
+
- `CHLD`: Reap zombie child processes and wake event loop in `fork_worker` mode.
|
|
44
|
+
- `URG`: Refork workers in phases from worker 0 if `fork_worker` option is enabled.
|
|
45
|
+
- `INFO` (or `PWR` for systems without `INFO`) print backtraces of all puma threads (if supported on your platform).
|
|
46
46
|
|
|
47
47
|
## Callbacks order in case of different signals
|
|
48
48
|
|
data/docs/stats.md
CHANGED
|
@@ -70,7 +70,7 @@ When Puma runs in single mode, these stats are available at the top level. When
|
|
|
70
70
|
|
|
71
71
|
### cluster mode
|
|
72
72
|
|
|
73
|
-
* phase: which phase of restart the process is in, during [phased restart](https://github.com/puma/puma/blob/
|
|
73
|
+
* phase: which phase of restart the process is in, during [phased restart](https://github.com/puma/puma/blob/main/docs/restart.md)
|
|
74
74
|
* workers: ??
|
|
75
75
|
* booted_workers: how many workers currently running?
|
|
76
76
|
* old_workers: ??
|
data/docs/systemd.md
CHANGED
|
@@ -119,8 +119,8 @@ or cluster mode.
|
|
|
119
119
|
### Sockets and symlinks
|
|
120
120
|
|
|
121
121
|
When using releases folders, you should set the socket path using the shared
|
|
122
|
-
folder path (ex. `/srv/
|
|
123
|
-
path (`/srv/
|
|
122
|
+
folder path (ex. `/srv/project/shared/tmp/puma.sock`), not the release folder
|
|
123
|
+
path (`/srv/project/releases/1234/tmp/puma.sock`).
|
|
124
124
|
|
|
125
125
|
Puma will detect the release path socket as different than the one provided by
|
|
126
126
|
systemd and attempt to bind it again, resulting in the exception `There is
|
|
@@ -139,7 +139,7 @@ automatically for any activated socket. When systemd socket activation is not
|
|
|
139
139
|
enabled, this option does nothing.
|
|
140
140
|
|
|
141
141
|
This also accepts an optional argument `only` (DSL: `'only'`) to discard any
|
|
142
|
-
binds that
|
|
142
|
+
binds that are not socket activated.
|
|
143
143
|
|
|
144
144
|
## Usage
|
|
145
145
|
|
|
@@ -2,6 +2,7 @@ package org.jruby.puma;
|
|
|
2
2
|
|
|
3
3
|
import org.jruby.Ruby;
|
|
4
4
|
import org.jruby.RubyHash;
|
|
5
|
+
import org.jruby.RubyString;
|
|
5
6
|
import org.jruby.util.ByteList;
|
|
6
7
|
|
|
7
8
|
public class Http11Parser {
|
|
@@ -12,44 +13,44 @@ public class Http11Parser {
|
|
|
12
13
|
|
|
13
14
|
machine puma_parser;
|
|
14
15
|
|
|
15
|
-
action mark {
|
|
16
|
+
action mark {this.mark = fpc; }
|
|
16
17
|
|
|
17
|
-
action start_field {
|
|
18
|
-
action snake_upcase_field { /*
|
|
18
|
+
action start_field { this.field_start = fpc; }
|
|
19
|
+
action snake_upcase_field { /* done lazily as needed */ }
|
|
19
20
|
action write_field {
|
|
20
|
-
|
|
21
|
+
this.field_len = fpc-this.field_start;
|
|
21
22
|
}
|
|
22
23
|
|
|
23
|
-
action start_value {
|
|
24
|
+
action start_value { this.mark = fpc; }
|
|
24
25
|
action write_value {
|
|
25
|
-
Http11.http_field(runtime,
|
|
26
|
+
Http11.http_field(runtime, envStrings, this, fpc-this.mark);
|
|
26
27
|
}
|
|
27
28
|
action request_method {
|
|
28
|
-
Http11.request_method(runtime,
|
|
29
|
+
Http11.request_method(runtime, envStrings, this, fpc-this.mark);
|
|
29
30
|
}
|
|
30
31
|
action request_uri {
|
|
31
|
-
Http11.request_uri(runtime,
|
|
32
|
+
Http11.request_uri(runtime, envStrings, this, fpc-this.mark);
|
|
32
33
|
}
|
|
33
34
|
action fragment {
|
|
34
|
-
Http11.fragment(runtime,
|
|
35
|
+
Http11.fragment(runtime, envStrings, this, fpc-this.mark);
|
|
35
36
|
}
|
|
36
37
|
|
|
37
|
-
action start_query {
|
|
38
|
+
action start_query {this.query_start = fpc; }
|
|
38
39
|
action query_string {
|
|
39
|
-
Http11.query_string(runtime,
|
|
40
|
+
Http11.query_string(runtime, envStrings, this, fpc-this.query_start);
|
|
40
41
|
}
|
|
41
42
|
|
|
42
43
|
action server_protocol {
|
|
43
|
-
Http11.server_protocol(runtime,
|
|
44
|
+
Http11.server_protocol(runtime, envStrings, this, fpc-this.mark);
|
|
44
45
|
}
|
|
45
46
|
|
|
46
47
|
action request_path {
|
|
47
|
-
Http11.request_path(runtime,
|
|
48
|
+
Http11.request_path(runtime, envStrings, this, fpc-this.mark);
|
|
48
49
|
}
|
|
49
50
|
|
|
50
51
|
action done {
|
|
51
|
-
|
|
52
|
-
http.header_done(runtime,
|
|
52
|
+
this.body_start = fpc + 1;
|
|
53
|
+
http.header_done(runtime, this, fpc + 1, pe - fpc - 1);
|
|
53
54
|
fbreak;
|
|
54
55
|
}
|
|
55
56
|
|
|
@@ -60,69 +61,54 @@ public class Http11Parser {
|
|
|
60
61
|
/** Data **/
|
|
61
62
|
%% write data noentry;
|
|
62
63
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
64
|
+
int cs;
|
|
65
|
+
int body_start;
|
|
66
|
+
int nread;
|
|
67
|
+
int mark;
|
|
68
|
+
int field_start;
|
|
69
|
+
int field_len;
|
|
70
|
+
int query_start;
|
|
66
71
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
}
|
|
72
|
+
RubyHash data;
|
|
73
|
+
byte[] buffer;
|
|
70
74
|
|
|
71
|
-
public
|
|
72
|
-
int cs;
|
|
73
|
-
int body_start;
|
|
74
|
-
int content_len;
|
|
75
|
-
int nread;
|
|
76
|
-
int mark;
|
|
77
|
-
int field_start;
|
|
78
|
-
int field_len;
|
|
79
|
-
int query_start;
|
|
80
|
-
|
|
81
|
-
RubyHash data;
|
|
82
|
-
ByteList buffer;
|
|
83
|
-
|
|
84
|
-
public void init() {
|
|
85
|
-
cs = 0;
|
|
86
|
-
|
|
87
|
-
%% write init;
|
|
88
|
-
|
|
89
|
-
body_start = 0;
|
|
90
|
-
content_len = 0;
|
|
91
|
-
mark = 0;
|
|
92
|
-
nread = 0;
|
|
93
|
-
field_len = 0;
|
|
94
|
-
field_start = 0;
|
|
95
|
-
}
|
|
96
|
-
}
|
|
75
|
+
public void init() {
|
|
97
76
|
|
|
98
|
-
|
|
77
|
+
%% write init;
|
|
78
|
+
|
|
79
|
+
body_start = 0;
|
|
80
|
+
mark = 0;
|
|
81
|
+
nread = 0;
|
|
82
|
+
field_len = 0;
|
|
83
|
+
field_start = 0;
|
|
84
|
+
}
|
|
99
85
|
|
|
100
86
|
public int execute(Ruby runtime, Http11 http, ByteList buffer, int off) {
|
|
101
87
|
int p, pe;
|
|
102
|
-
int cs =
|
|
88
|
+
int cs = this.cs;
|
|
103
89
|
int len = buffer.length();
|
|
90
|
+
int beg = buffer.begin();
|
|
91
|
+
RubyString[] envStrings = http.envStrings;
|
|
104
92
|
assert off<=len : "offset past end of buffer";
|
|
105
93
|
|
|
106
|
-
p = off;
|
|
107
|
-
pe = len;
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
byte[] data = buffer.bytes();
|
|
111
|
-
parser.buffer = buffer;
|
|
94
|
+
p = beg + off;
|
|
95
|
+
pe = beg + len;
|
|
96
|
+
byte[] data = buffer.unsafeBytes();
|
|
97
|
+
this.buffer = data;
|
|
112
98
|
|
|
113
99
|
%% write exec;
|
|
114
100
|
|
|
115
|
-
|
|
116
|
-
|
|
101
|
+
this.cs = cs;
|
|
102
|
+
this.nread += (p - off);
|
|
117
103
|
|
|
118
104
|
assert p <= pe : "buffer overflow after parsing execute";
|
|
119
|
-
assert
|
|
120
|
-
assert
|
|
121
|
-
assert
|
|
122
|
-
assert
|
|
123
|
-
assert
|
|
105
|
+
assert this.nread <= len : "nread longer than length";
|
|
106
|
+
assert this.body_start <= len : "body starts after buffer end";
|
|
107
|
+
assert this.mark < len : "mark is after buffer end";
|
|
108
|
+
assert this.field_len <= len : "field has length longer than whole buffer";
|
|
109
|
+
assert this.field_start < len : "field starts after buffer end";
|
|
124
110
|
|
|
125
|
-
return
|
|
111
|
+
return this.nread;
|
|
126
112
|
}
|
|
127
113
|
|
|
128
114
|
public int finish() {
|
|
@@ -136,10 +122,10 @@ public class Http11Parser {
|
|
|
136
122
|
}
|
|
137
123
|
|
|
138
124
|
public boolean has_error() {
|
|
139
|
-
return
|
|
125
|
+
return this.cs == puma_parser_error;
|
|
140
126
|
}
|
|
141
127
|
|
|
142
128
|
public boolean is_finished() {
|
|
143
|
-
return
|
|
129
|
+
return this.cs == puma_parser_first_final;
|
|
144
130
|
}
|
|
145
131
|
}
|