jun-puma 1.0.1-java → 1.0.2-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. checksums.yaml +4 -4
  2. data/lib/puma/puma_http11.jar +0 -0
  3. metadata +3 -81
  4. data/bin/puma-wild +0 -25
  5. data/docs/architecture.md +0 -74
  6. data/docs/compile_options.md +0 -55
  7. data/docs/deployment.md +0 -102
  8. data/docs/fork_worker.md +0 -31
  9. data/docs/images/puma-connection-flow-no-reactor.png +0 -0
  10. data/docs/images/puma-connection-flow.png +0 -0
  11. data/docs/images/puma-general-arch.png +0 -0
  12. data/docs/jungle/README.md +0 -9
  13. data/docs/jungle/rc.d/README.md +0 -74
  14. data/docs/jungle/rc.d/puma +0 -61
  15. data/docs/jungle/rc.d/puma.conf +0 -10
  16. data/docs/kubernetes.md +0 -78
  17. data/docs/nginx.md +0 -80
  18. data/docs/plugins.md +0 -38
  19. data/docs/rails_dev_mode.md +0 -28
  20. data/docs/restart.md +0 -64
  21. data/docs/signals.md +0 -98
  22. data/docs/stats.md +0 -142
  23. data/docs/systemd.md +0 -244
  24. data/docs/testing_benchmarks_local_files.md +0 -150
  25. data/docs/testing_test_rackup_ci_files.md +0 -36
  26. data/ext/puma_http11/PumaHttp11Service.java +0 -17
  27. data/ext/puma_http11/ext_help.h +0 -15
  28. data/ext/puma_http11/http11_parser.c +0 -1057
  29. data/ext/puma_http11/http11_parser.h +0 -65
  30. data/ext/puma_http11/http11_parser.java.rl +0 -145
  31. data/ext/puma_http11/http11_parser.rl +0 -149
  32. data/ext/puma_http11/http11_parser_common.rl +0 -54
  33. data/ext/puma_http11/mini_ssl.c +0 -832
  34. data/ext/puma_http11/no_ssl/PumaHttp11Service.java +0 -15
  35. data/ext/puma_http11/org/jruby/puma/Http11.java +0 -226
  36. data/ext/puma_http11/org/jruby/puma/Http11Parser.java +0 -455
  37. data/ext/puma_http11/org/jruby/puma/MiniSSL.java +0 -508
  38. data/ext/puma_http11/puma_http11.c +0 -492
  39. data/lib/puma/app/status.rb +0 -96
  40. data/lib/puma/binder.rb +0 -501
  41. data/lib/puma/cli.rb +0 -243
  42. data/lib/puma/client.rb +0 -632
  43. data/lib/puma/cluster/worker.rb +0 -182
  44. data/lib/puma/cluster/worker_handle.rb +0 -97
  45. data/lib/puma/cluster.rb +0 -562
  46. data/lib/puma/commonlogger.rb +0 -115
  47. data/lib/puma/configuration.rb +0 -391
  48. data/lib/puma/const.rb +0 -289
  49. data/lib/puma/control_cli.rb +0 -316
  50. data/lib/puma/detect.rb +0 -45
  51. data/lib/puma/dsl.rb +0 -1204
  52. data/lib/puma/error_logger.rb +0 -113
  53. data/lib/puma/events.rb +0 -57
  54. data/lib/puma/io_buffer.rb +0 -46
  55. data/lib/puma/jruby_restart.rb +0 -27
  56. data/lib/puma/json_serialization.rb +0 -96
  57. data/lib/puma/launcher/bundle_pruner.rb +0 -104
  58. data/lib/puma/launcher.rb +0 -484
  59. data/lib/puma/log_writer.rb +0 -147
  60. data/lib/puma/minissl/context_builder.rb +0 -95
  61. data/lib/puma/minissl.rb +0 -458
  62. data/lib/puma/null_io.rb +0 -61
  63. data/lib/puma/plugin/systemd.rb +0 -90
  64. data/lib/puma/plugin/tmp_restart.rb +0 -36
  65. data/lib/puma/plugin.rb +0 -111
  66. data/lib/puma/rack/builder.rb +0 -297
  67. data/lib/puma/rack/urlmap.rb +0 -93
  68. data/lib/puma/rack_default.rb +0 -24
  69. data/lib/puma/reactor.rb +0 -125
  70. data/lib/puma/request.rb +0 -671
  71. data/lib/puma/runner.rb +0 -213
  72. data/lib/puma/sd_notify.rb +0 -149
  73. data/lib/puma/server.rb +0 -664
  74. data/lib/puma/single.rb +0 -69
  75. data/lib/puma/state_file.rb +0 -68
  76. data/lib/puma/thread_pool.rb +0 -434
  77. data/lib/puma/util.rb +0 -141
  78. data/lib/puma.rb +0 -78
  79. data/lib/rack/handler/puma.rb +0 -141
  80. data/tools/Dockerfile +0 -16
  81. data/tools/trickletest.rb +0 -44
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0f07d6ca63cbdbe816c25b8c6abd651bbbbef00f7306f13301f1975b2ec726e3
4
- data.tar.gz: 39b63d4115cb66dc290a43593f37e4a2efa1901c1359edad4e7ffd2650771395
3
+ metadata.gz: 93729a2531af093dfe3a707a797c6937fc035fb41f48c23b66486e01f2c66b1c
4
+ data.tar.gz: 1d07eebd87f8f70834e7a208bbcd1ea9255b501e1405e7fd903e4c9055da2bca
5
5
  SHA512:
6
- metadata.gz: 5fe0f30ad119052885c8dea934f836631f056e5d41d6b4790d15f991e6ed86c37fbc757abf88194d619065b55a633287afa2dd686aac0d3cdab803545580d08d
7
- data.tar.gz: 6ef6f27d740b44717ed185013faccfc7d75f963dec165b2e7befc6c41e993fc54ef681691d4f1f969ff35e6852d4831fcf6cec6ce30fffdf2032101e91af2bd0
6
+ metadata.gz: 5b8f718c1105ba6d8586ab8f54c34810471580f11f3b7f6124dff04dbca5903025127e43a9df79fa3c68a507b30000cc31468c0428201a87156c4e758b81a32b
7
+ data.tar.gz: 123566fa15a0af5bbad37502698d04822d9a25ce2a91229dd4e5e6890fb50206fbb511ed6953578fe73edc54809e31cb79d8f581168460cb18a8c681010f1164
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: jun-puma
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.1
4
+ version: 1.0.2
5
5
  platform: java
6
6
  authors:
7
7
  - Evan Phoenix
@@ -17,8 +17,8 @@ dependencies:
17
17
  - !ruby/object:Gem::Version
18
18
  version: '2.0'
19
19
  name: nio4r
20
- prerelease: false
21
20
  type: :runtime
21
+ prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
24
  - - "~>"
@@ -40,87 +40,9 @@ files:
40
40
  - LICENSE
41
41
  - README.md
42
42
  - bin/puma
43
- - bin/puma-wild
44
43
  - bin/pumactl
45
- - docs/architecture.md
46
- - docs/compile_options.md
47
- - docs/deployment.md
48
- - docs/fork_worker.md
49
- - docs/images/puma-connection-flow-no-reactor.png
50
- - docs/images/puma-connection-flow.png
51
- - docs/images/puma-general-arch.png
52
- - docs/jungle/README.md
53
- - docs/jungle/rc.d/README.md
54
- - docs/jungle/rc.d/puma
55
- - docs/jungle/rc.d/puma.conf
56
- - docs/kubernetes.md
57
- - docs/nginx.md
58
- - docs/plugins.md
59
- - docs/rails_dev_mode.md
60
- - docs/restart.md
61
- - docs/signals.md
62
- - docs/stats.md
63
- - docs/systemd.md
64
- - docs/testing_benchmarks_local_files.md
65
- - docs/testing_test_rackup_ci_files.md
66
- - ext/puma_http11/PumaHttp11Service.java
67
- - ext/puma_http11/ext_help.h
68
44
  - ext/puma_http11/extconf.rb
69
- - ext/puma_http11/http11_parser.c
70
- - ext/puma_http11/http11_parser.h
71
- - ext/puma_http11/http11_parser.java.rl
72
- - ext/puma_http11/http11_parser.rl
73
- - ext/puma_http11/http11_parser_common.rl
74
- - ext/puma_http11/mini_ssl.c
75
- - ext/puma_http11/no_ssl/PumaHttp11Service.java
76
- - ext/puma_http11/org/jruby/puma/Http11.java
77
- - ext/puma_http11/org/jruby/puma/Http11Parser.java
78
- - ext/puma_http11/org/jruby/puma/MiniSSL.java
79
- - ext/puma_http11/puma_http11.c
80
- - lib/puma.rb
81
- - lib/puma/app/status.rb
82
- - lib/puma/binder.rb
83
- - lib/puma/cli.rb
84
- - lib/puma/client.rb
85
- - lib/puma/cluster.rb
86
- - lib/puma/cluster/worker.rb
87
- - lib/puma/cluster/worker_handle.rb
88
- - lib/puma/commonlogger.rb
89
- - lib/puma/configuration.rb
90
- - lib/puma/const.rb
91
- - lib/puma/control_cli.rb
92
- - lib/puma/detect.rb
93
- - lib/puma/dsl.rb
94
- - lib/puma/error_logger.rb
95
- - lib/puma/events.rb
96
- - lib/puma/io_buffer.rb
97
- - lib/puma/jruby_restart.rb
98
- - lib/puma/json_serialization.rb
99
- - lib/puma/launcher.rb
100
- - lib/puma/launcher/bundle_pruner.rb
101
- - lib/puma/log_writer.rb
102
- - lib/puma/minissl.rb
103
- - lib/puma/minissl/context_builder.rb
104
- - lib/puma/null_io.rb
105
- - lib/puma/plugin.rb
106
- - lib/puma/plugin/systemd.rb
107
- - lib/puma/plugin/tmp_restart.rb
108
45
  - lib/puma/puma_http11.jar
109
- - lib/puma/rack/builder.rb
110
- - lib/puma/rack/urlmap.rb
111
- - lib/puma/rack_default.rb
112
- - lib/puma/reactor.rb
113
- - lib/puma/request.rb
114
- - lib/puma/runner.rb
115
- - lib/puma/sd_notify.rb
116
- - lib/puma/server.rb
117
- - lib/puma/single.rb
118
- - lib/puma/state_file.rb
119
- - lib/puma/thread_pool.rb
120
- - lib/puma/util.rb
121
- - lib/rack/handler/puma.rb
122
- - tools/Dockerfile
123
- - tools/trickletest.rb
124
46
  homepage: https://puma.io
125
47
  licenses:
126
48
  - BSD-3-Clause
@@ -145,7 +67,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
145
67
  - !ruby/object:Gem::Version
146
68
  version: '0'
147
69
  requirements: []
148
- rubygems_version: 3.2.33
70
+ rubygems_version: 3.3.26
149
71
  signing_key:
150
72
  specification_version: 4
151
73
  summary: Puma is a simple, fast, threaded, and highly parallel HTTP 1.1 server for
data/bin/puma-wild DELETED
@@ -1,25 +0,0 @@
1
- #!/usr/bin/env ruby
2
- #
3
- # Copyright (c) 2014 Evan Phoenix
4
- #
5
-
6
- require 'rubygems'
7
-
8
- cli_arg = ARGV.shift
9
-
10
- inc = ""
11
-
12
- if cli_arg == "-I"
13
- inc = ARGV.shift
14
- $LOAD_PATH.concat inc.split(":")
15
- end
16
-
17
- module Puma; end
18
-
19
- Puma.const_set(:WILD_ARGS, ["-I", inc])
20
-
21
- require 'puma/cli'
22
-
23
- cli = Puma::CLI.new ARGV
24
-
25
- cli.run
data/docs/architecture.md DELETED
@@ -1,74 +0,0 @@
1
- # Architecture
2
-
3
- ## Overview
4
-
5
- ![https://bit.ly/2iJuFky](images/puma-general-arch.png)
6
-
7
- Puma is a threaded Ruby HTTP application server processing requests across a TCP
8
- and/or UNIX socket.
9
-
10
-
11
- Puma processes (there can be one or many) accept connections from the socket via
12
- a thread (in the [`Reactor`](../lib/puma/reactor.rb) class). The connection,
13
- once fully buffered and read, moves into the `todo` list, where an available
14
- thread will pick it up (in the [`ThreadPool`](../lib/puma/thread_pool.rb)
15
- class).
16
-
17
- Puma works in two main modes: cluster and single. In single mode, only one Puma
18
- process boots. In cluster mode, a `master` process is booted, which prepares
19
- (and may boot) the application and then uses the `fork()` system call to create
20
- one or more `child` processes. These `child` processes all listen to the same
21
- socket. The `master` process does not listen to the socket or process requests -
22
- its purpose is primarily to manage and listen for UNIX signals and possibly kill
23
- or boot `child` processes.
24
-
25
- We sometimes call `child` processes (or Puma processes in `single` mode)
26
- _workers_, and we sometimes call the threads created by Puma's
27
- [`ThreadPool`](../lib/puma/thread_pool.rb) _worker threads_.
28
-
29
- ## How Requests Work
30
-
31
- ![https://bit.ly/2zwzhEK](images/puma-connection-flow.png)
32
-
33
- * Upon startup, Puma listens on a TCP or UNIX socket.
34
- * The backlog of this socket is configured with a default of 1024, but the
35
- actual backlog value is capped by the `net.core.somaxconn` sysctl value.
36
- The backlog determines the size of the queue for unaccepted connections. If
37
- the backlog is full, the operating system is not accepting new connections.
38
- * This socket backlog is distinct from the `backlog` of work as reported by
39
- `Puma.stats` or the control server. The backlog that `Puma.stats` refers to
40
- represents the number of connections in the process' `todo` set waiting for
41
- a thread from the [`ThreadPool`](../lib/puma/thread_pool.rb).
42
- * By default, a single, separate thread (created by the
43
- [`Reactor`](../lib/puma/reactor.rb) class) reads and buffers requests from the
44
- socket.
45
- * When at least one worker thread is available for work, the reactor thread
46
- listens to the socket and accepts a request (if one is waiting).
47
- * The reactor thread waits for the entire HTTP request to be received.
48
- * Puma exposes the time spent waiting for the HTTP request body to be
49
- received to the Rack app as `env['puma.request_body_wait']`
50
- (milliseconds).
51
- * Once fully buffered and received, the connection is pushed into the "todo"
52
- set.
53
- * Worker threads pop work off the "todo" set for processing.
54
- * The worker thread processes the request via `call`ing the configured Rack
55
- application. The Rack application generates the HTTP response.
56
- * The worker thread writes the response to the connection. While Puma buffers
57
- requests via a separate thread, it does not use a separate thread for
58
- responses.
59
- * Once done, the thread becomes available to process another connection in the
60
- "todo" set.
61
-
62
- ### `queue_requests`
63
-
64
- ![https://bit.ly/2zxCJ1Z](images/puma-connection-flow-no-reactor.png)
65
-
66
- The `queue_requests` option is `true` by default, enabling the separate reactor
67
- thread used to buffer requests as described above.
68
-
69
- If set to `false`, this buffer will not be used for connections while waiting
70
- for the request to arrive.
71
-
72
- In this mode, when a connection is accepted, it is added to the "todo" queue
73
- immediately, and a worker will synchronously do any waiting necessary to read
74
- the HTTP request from the socket.
@@ -1,55 +0,0 @@
1
- # Compile Options
2
-
3
- There are some `cflags` provided to change Puma's default configuration for its
4
- C extension.
5
-
6
- ## Query String, `PUMA_QUERY_STRING_MAX_LENGTH`
7
-
8
- By default, the max length of `QUERY_STRING` is `1024 * 10`. But you may want to
9
- adjust it to accept longer queries in GET requests.
10
-
11
- For manual install, pass the `PUMA_QUERY_STRING_MAX_LENGTH` option like this:
12
-
13
- ```
14
- gem install puma -- --with-cflags="-D PUMA_QUERY_STRING_MAX_LENGTH=64000"
15
- ```
16
-
17
- For Bundler, use its configuration system:
18
-
19
- ```
20
- bundle config build.puma "--with-cflags='-D PUMA_QUERY_STRING_MAX_LENGTH=64000'"
21
- ```
22
-
23
- ## Request Path, `PUMA_REQUEST_PATH_MAX_LENGTH`
24
-
25
- By default, the max length of `REQUEST_PATH` is `8192`. But you may want to
26
- adjust it to accept longer paths in requests.
27
-
28
- For manual install, pass the `PUMA_REQUEST_PATH_MAX_LENGTH` option like this:
29
-
30
- ```
31
- gem install puma -- --with-cflags="-D PUMA_REQUEST_PATH_MAX_LENGTH=64000"
32
- ```
33
-
34
- For Bundler, use its configuration system:
35
-
36
- ```
37
- bundle config build.puma "--with-cflags='-D PUMA_REQUEST_PATH_MAX_LENGTH=64000'"
38
- ```
39
-
40
- ## Request URI, `PUMA_REQUEST_URI_MAX_LENGTH`
41
-
42
- By default, the max length of `REQUEST_URI` is `1024 * 12`. But you may want to
43
- adjust it to accept longer URIs in requests.
44
-
45
- For manual install, pass the `PUMA_REQUEST_URI_MAX_LENGTH` option like this:
46
-
47
- ```
48
- gem install puma -- --with-cflags="-D PUMA_REQUEST_URI_MAX_LENGTH=64000"
49
- ```
50
-
51
- For Bundler, use its configuration system:
52
-
53
- ```
54
- bundle config build.puma "--with-cflags='-D PUMA_REQUEST_URI_MAX_LENGTH=64000'"
55
- ```
data/docs/deployment.md DELETED
@@ -1,102 +0,0 @@
1
- # Deployment engineering for Puma
2
-
3
- Puma expects to be run in a deployed environment eventually. You can use it as
4
- your development server, but most people use it in their production deployments.
5
-
6
- To that end, this document serves as a foundation of wisdom regarding deploying
7
- Puma to production while increasing happiness and decreasing downtime.
8
-
9
- ## Specifying Puma
10
-
11
- Most people will specify Puma by including `gem "puma"` in a Gemfile, so we'll
12
- assume this is how you're using Puma.
13
-
14
- ## Single vs. Cluster mode
15
-
16
- Initially, Puma was conceived as a thread-only web server, but support for
17
- processes was added in version 2.
18
-
19
- To run `puma` in single mode (i.e., as a development environment), set the
20
- number of workers to 0; anything higher will run in cluster mode.
21
-
22
- Here are some tips for cluster mode:
23
-
24
- ### MRI
25
-
26
- * Use cluster mode and set the number of workers to 1.5x the number of CPU cores
27
- in the machine, starting from a minimum of 2.
28
- * Set the number of threads to desired concurrent requests/number of workers.
29
- Puma defaults to 5, and that's a decent number.
30
-
31
- #### Migrating from Unicorn
32
-
33
- * If you're migrating from unicorn though, here are some settings to start with:
34
- * Set workers to half the number of unicorn workers you're using
35
- * Set threads to 2
36
- * Enjoy 50% memory savings
37
- * As you grow more confident in the thread-safety of your app, you can tune the
38
- workers down and the threads up.
39
-
40
- #### Ubuntu / Systemd (Systemctl) Installation
41
-
42
- See [systemd.md](systemd.md)
43
-
44
- #### Worker utilization
45
-
46
- **How do you know if you've got enough (or too many workers)?**
47
-
48
- A good question. Due to MRI's GIL, only one thread can be executing Ruby code at
49
- a time. But since so many apps are waiting on IO from DBs, etc., they can
50
- utilize threads to use the process more efficiently.
51
-
52
- Generally, you never want processes that are pegged all the time. That can mean
53
- there is more work to do than the process can get through. On the other hand, if
54
- you have processes that sit around doing nothing, then they're just eating up
55
- resources.
56
-
57
- Watch your CPU utilization over time and aim for about 70% on average. 70%
58
- utilization means you've got capacity still but aren't starving threads.
59
-
60
- **Measuring utilization**
61
-
62
- Using a timestamp header from an upstream proxy server (e.g., `nginx` or
63
- `haproxy`) makes it possible to indicate how long requests have been waiting for
64
- a Puma thread to become available.
65
-
66
- * Have your upstream proxy set a header with the time it received the request:
67
- * nginx: `proxy_set_header X-Request-Start "${msec}";`
68
- * haproxy >= 1.9: `http-request set-header X-Request-Start
69
- t=%[date()]%[date_us()]`
70
- * haproxy < 1.9: `http-request set-header X-Request-Start t=%[date()]`
71
- * In your Rack middleware, determine the amount of time elapsed since
72
- `X-Request-Start`.
73
- * To improve accuracy, you will want to subtract time spent waiting for slow
74
- clients:
75
- * `env['puma.request_body_wait']` contains the number of milliseconds Puma
76
- spent waiting for the client to send the request body.
77
- * haproxy: `%Th` (TLS handshake time) and `%Ti` (idle time before request)
78
- can can also be added as headers.
79
-
80
- ## Should I daemonize?
81
-
82
- The Puma 5.0 release removed daemonization. For older versions and alternatives,
83
- continue reading.
84
-
85
- I prefer not to daemonize my servers and use something like `runit` or `systemd`
86
- to monitor them as child processes. This gives them fast response to crashes and
87
- makes it easy to figure out what is going on. Additionally, unlike `unicorn`,
88
- Puma does not require daemonization to do zero-downtime restarts.
89
-
90
- I see people using daemonization because they start puma directly via Capistrano
91
- task and thus want it to live on past the `cap deploy`. To these people, I say:
92
- You need to be using a process monitor. Nothing is making sure Puma stays up in
93
- this scenario! You're just waiting for something weird to happen, Puma to die,
94
- and to get paged at 3 AM. Do yourself a favor, at least the process monitoring
95
- your OS comes with, be it `sysvinit` or `systemd`. Or branch out and use `runit`
96
- or hell, even `monit`.
97
-
98
- ## Restarting
99
-
100
- You probably will want to deploy some new code at some point, and you'd like
101
- Puma to start running that new code. There are a few options for restarting
102
- Puma, described separately in our [restart documentation](restart.md).
data/docs/fork_worker.md DELETED
@@ -1,31 +0,0 @@
1
- # Fork-Worker Cluster Mode [Experimental]
2
-
3
- Puma 5 introduces an experimental new cluster-mode configuration option, `fork_worker` (`--fork-worker` from the CLI). This mode causes Puma to fork additional workers from worker 0, instead of directly from the master process:
4
-
5
- ```
6
- 10000 \_ puma 4.3.3 (tcp://0.0.0.0:9292) [puma]
7
- 10001 \_ puma: cluster worker 0: 10000 [puma]
8
- 10002 \_ puma: cluster worker 1: 10000 [puma]
9
- 10003 \_ puma: cluster worker 2: 10000 [puma]
10
- 10004 \_ puma: cluster worker 3: 10000 [puma]
11
- ```
12
-
13
- The `fork_worker` option allows your application to be initialized only once for copy-on-write memory savings, and it has two additional advantages:
14
-
15
- 1. **Compatible with phased restart.** Because the master process itself doesn't preload the application, this mode works with phased restart (`SIGUSR1` or `pumactl phased-restart`). When worker 0 reloads as part of a phased restart, it initializes a new copy of your application first, then the other workers reload by forking from this new worker already containing the new preloaded application.
16
-
17
- This allows a phased restart to complete as quickly as a hot restart (`SIGUSR2` or `pumactl restart`), while still minimizing downtime by staggering the restart across cluster workers.
18
-
19
- 2. **'Refork' for additional copy-on-write improvements in running applications.** Fork-worker mode introduces a new `refork` command that re-loads all nonzero workers by re-forking them from worker 0.
20
-
21
- This command can potentially improve memory utilization in large or complex applications that don't fully pre-initialize on startup, because the re-forked workers can share copy-on-write memory with a worker that has been running for a while and serving requests.
22
-
23
- You can trigger a refork by sending the cluster the `SIGURG` signal or running the `pumactl refork` command at any time. A refork will also automatically trigger once, after a certain number of requests have been processed by worker 0 (default 1000). To configure the number of requests before the auto-refork, pass a positive integer argument to `fork_worker` (e.g., `fork_worker 1000`), or `0` to disable.
24
-
25
- ### Limitations
26
-
27
- - This mode is still very experimental so there may be bugs or edge-cases, particularly around expected behavior of existing hooks. Please open a [bug report](https://github.com/puma/puma/issues/new?template=bug_report.md) if you encounter any issues.
28
-
29
- - In order to fork new workers cleanly, worker 0 shuts down its server and stops serving requests so there are no open file descriptors or other kinds of shared global state between processes, and to maximize copy-on-write efficiency across the newly-forked workers. This may temporarily reduce total capacity of the cluster during a phased restart / refork.
30
-
31
- In a cluster with `n` workers, a normal phased restart stops and restarts workers one by one while the application is loaded in each process, so `n-1` workers are available serving requests during the restart. In a phased restart in fork-worker mode, the application is first loaded in worker 0 while `n-1` workers are available, then worker 0 remains stopped while the rest of the workers are reloaded one by one, leaving only `n-2` workers to be available for a brief period of time. Reloading the rest of the workers should be quick because the application is preloaded at that point, but there may be situations where it can take longer (slow clients, long-running application code, slow worker-fork hooks, etc).
Binary file
Binary file
@@ -1,9 +0,0 @@
1
- # Puma as a service
2
-
3
- ## Systemd
4
-
5
- See [/docs/systemd](https://github.com/puma/puma/blob/master/docs/systemd.md).
6
-
7
- ## rc.d
8
-
9
- See `/docs/jungle/rc.d` for FreeBSD's rc.d scripts
@@ -1,74 +0,0 @@
1
- # Puma as a service using rc.d
2
-
3
- Manage multiple Puma servers as services on one box using FreeBSD's rc.d service.
4
-
5
- ## Dependencies
6
-
7
- * `jq` - a command-line json parser is needed to parse the json in the config file
8
-
9
- ## Installation
10
-
11
- # Copy the puma script to the rc.d directory (make sure everyone has read/execute perms)
12
- sudo cp puma /usr/local/etc/rc.d/
13
-
14
- # Create an empty configuration file
15
- sudo touch /usr/local/etc/puma.conf
16
-
17
- # Enable the puma service
18
- sudo echo 'puma_enable="YES"' >> /etc/rc.conf
19
-
20
- ## Managing the jungle
21
-
22
- Puma apps are referenced in /usr/local/etc/puma.conf by default.
23
-
24
- Start the jungle running:
25
-
26
- `service puma start`
27
-
28
- This script will run at boot time.
29
-
30
-
31
- You can also stop the jungle (stops ALL puma instances) by running:
32
-
33
- `service puma stop`
34
-
35
-
36
- To restart the jungle:
37
-
38
- `service puma restart`
39
-
40
- ## Conventions
41
-
42
- * The script expects:
43
- * a config file to exist under `config/puma.rb` in your app. E.g.: `/home/apps/my-app/config/puma.rb`.
44
-
45
- You can always change those defaults by editing the scripts.
46
-
47
- ## Here's what a minimal app's config file should have
48
-
49
- ```
50
- {
51
- "servers" : [
52
- {
53
- "dir": "/path/to/rails/project",
54
- "user": "deploy-user",
55
- "ruby_version": "ruby.version",
56
- "ruby_env": "rbenv"
57
- }
58
- ]
59
- }
60
- ```
61
-
62
- ## Before starting...
63
-
64
- You need to customise `puma.conf` to:
65
-
66
- * Set the right user your app should be running on unless you want root to execute it!
67
- * Set the directory of the app
68
- * Set the ruby version to execute
69
- * Set the ruby environment (currently set to rbenv, since that is the only ruby environment currently supported)
70
- * Add additional server instances following the scheme in the example
71
-
72
- ## Notes:
73
-
74
- Only rbenv is currently supported.
@@ -1,61 +0,0 @@
1
- #!/bin/sh
2
- #
3
-
4
- # PROVIDE: puma
5
-
6
- . /etc/rc.subr
7
-
8
- name="puma"
9
- start_cmd="puma_start"
10
- stop_cmd="puma_stop"
11
- restart_cmd="puma_restart"
12
- rcvar=puma_enable
13
- required_files=/usr/local/etc/puma.conf
14
-
15
- puma_start()
16
- {
17
- server_count=$(/usr/local/bin/jq ".servers[] .ruby_env" /usr/local/etc/puma.conf | wc -l)
18
- i=0
19
- while [ "$i" -lt "$server_count" ]; do
20
- rb_env=$(/usr/local/bin/jq -r ".servers[$i].ruby_env" /usr/local/etc/puma.conf)
21
- dir=$(/usr/local/bin/jq -r ".servers[$i].dir" /usr/local/etc/puma.conf)
22
- user=$(/usr/local/bin/jq -r ".servers[$i].user" /usr/local/etc/puma.conf)
23
- rb_ver=$(/usr/local/bin/jq -r ".servers[$i].ruby_version" /usr/local/etc/puma.conf)
24
- case $rb_env in
25
- "rbenv")
26
- cd $dir && rbenv shell $rb_ver && /usr/sbin/daemon -u $user bundle exec puma -C $dir/config/puma.rb
27
- ;;
28
- *)
29
- ;;
30
- esac
31
- i=$(( i + 1 ))
32
- done
33
- }
34
-
35
- puma_stop()
36
- {
37
- pkill ruby
38
- }
39
-
40
- puma_restart()
41
- {
42
- server_count=$(/usr/local/bin/jq ".servers[] .ruby_env" /usr/local/etc/puma.conf | wc -l)
43
- i=0
44
- while [ "$i" -lt "$server_count" ]; do
45
- rb_env=$(/usr/local/bin/jq -r ".servers[$i].ruby_env" /usr/local/etc/puma.conf)
46
- dir=$(/usr/local/bin/jq -r ".servers[$i].dir" /usr/local/etc/puma.conf)
47
- user=$(/usr/local/bin/jq -r ".servers[$i].user" /usr/local/etc/puma.conf)
48
- rb_ver=$(/usr/local/bin/jq -r ".servers[$i].ruby_version" /usr/local/etc/puma.conf)
49
- case $rb_env in
50
- "rbenv")
51
- cd $dir && rbenv shell $rb_ver && /usr/sbin/daemon -u $user bundle exec puma -C $dir/config/puma.rb
52
- ;;
53
- *)
54
- ;;
55
- esac
56
- i=$(( i + 1 ))
57
- done
58
- }
59
-
60
- load_rc_config $name
61
- run_rc_command "$1"
@@ -1,10 +0,0 @@
1
- {
2
- "servers" : [
3
- {
4
- "dir": "/path/to/rails/project",
5
- "user": "deploy-user",
6
- "ruby_version": "ruby.version",
7
- "ruby_env": "rbenv"
8
- }
9
- ]
10
- }
data/docs/kubernetes.md DELETED
@@ -1,78 +0,0 @@
1
- # Kubernetes
2
-
3
- ## Running Puma in Kubernetes
4
-
5
- In general running Puma in Kubernetes works as-is, no special configuration is needed beyond what you would write anyway to get a new Kubernetes Deployment going. There is one known interaction between the way Kubernetes handles pod termination and how Puma handles `SIGINT`, where some request might be sent to Puma after it has already entered graceful shutdown mode and is no longer accepting requests. This can lead to dropped requests during rolling deploys. A workaround for this is listed at the end of this article.
6
-
7
- ## Basic setup
8
-
9
- Assuming you already have a running cluster and docker image repository, you can run a simple Puma app with the following example Dockerfile and Deployment specification. These are meant as examples only and are deliberately very minimal to the point of skipping many options that are recommended for running in production, like healthchecks and envvar configuration with ConfigMaps. In general you should check the [Kubernetes documentation](https://kubernetes.io/docs/home/) and [Docker documentation](https://docs.docker.com/) for a more comprehensive overview of the available options.
10
-
11
- A basic Dockerfile example:
12
- ```
13
- FROM ruby:2.5.1-alpine # can be updated to newer ruby versions
14
- RUN apk update && apk add build-base # and any other packages you need
15
-
16
- # Only rebuild gem bundle if Gemfile changes
17
- COPY Gemfile Gemfile.lock ./
18
- RUN bundle install
19
-
20
- # Copy over the rest of the files
21
- COPY . .
22
-
23
- # Open up port and start the service
24
- EXPOSE 9292
25
- CMD bundle exec rackup -o 0.0.0.0
26
- ```
27
-
28
- A sample `deployment.yaml`:
29
- ```
30
- ---
31
- apiVersion: apps/v1
32
- kind: Deployment
33
- metadata:
34
- name: my-awesome-puma-app
35
- spec:
36
- selector:
37
- matchLabels:
38
- app: my-awesome-puma-app
39
- template:
40
- metadata:
41
- labels:
42
- app: my-awesome-puma-app
43
- service: my-awesome-puma-app
44
- spec:
45
- containers:
46
- - name: my-awesome-puma-app
47
- image: <your image here>
48
- ports:
49
- - containerPort: 9292
50
- ```
51
-
52
- ## Graceful shutdown and pod termination
53
-
54
- For some high-throughput systems, it is possible that some HTTP requests will return responses with response codes in the 5XX range during a rolling deploy to a new version. This is caused by [the way that Kubernetes terminates a pod during rolling deploys](https://cloud.google.com/blog/products/gcp/kubernetes-best-practices-terminating-with-grace):
55
-
56
- 1. The replication controller determines a pod should be shut down.
57
- 2. The Pod is set to the “Terminating” State and removed from the endpoints list of all Services, so that it receives no more requests.
58
- 3. The pods pre-stop hook get called. The default for this is to send `SIGTERM` to the process inside the pod.
59
- 4. The pod has up to `terminationGracePeriodSeconds` (default: 30 seconds) to gracefully shut down. Puma will do this (after it receives SIGTERM) by closing down the socket that accepts new requests and finishing any requests already running before exiting the Puma process.
60
- 5. If the pod is still running after `terminationGracePeriodSeconds` has elapsed, the pod receives `SIGKILL` to make sure the process inside it stops. After that, the container exits and all other Kubernetes objects associated with it are cleaned up.
61
-
62
- There is a subtle race condition between step 2 and 3: The replication controller does not synchronously remove the pod from the Services AND THEN call the pre-stop hook of the pod, but rather it asynchronously sends "remove this pod from your endpoints" requests to the Services and then immediately proceeds to invoke the pods' pre-stop hook. If the Service controller (typically something like nginx or haproxy) receives this request handles this request "too" late (due to internal lag or network latency between the replication and Service controllers) then it is possible that the Service controller will send one or more requests to a Puma process which has already shut down its listening socket. These requests will then fail with 5XX error codes.
63
-
64
- The way Kubernetes works this way, rather than handling step 2 synchronously, is due to the CAP theorem: in a distributed system there is no way to guarantee that any message will arrive promptly. In particular, waiting for all Service controllers to report back might get stuck for an indefinite time if one of them has already been terminated or if there has been a net split. A way to work around this is to add a sleep to the pre-stop hook of the same time as the `terminationGracePeriodSeconds` time. This will allow the Puma process to keep serving new requests during the entire grace period, although it will no longer receive new requests after all Service controllers have propagated the removal of the pod from their endpoint lists. Then, after `terminationGracePeriodSeconds`, the pod receives `SIGKILL` and closes down. If your process can't handle SIGKILL properly, for example because it needs to release locks in different services, you can also sleep for a shorter period (and/or increase `terminationGracePeriodSeconds`) as long as the time slept is longer than the time that your Service controllers take to propagate the pod removal. The downside of this workaround is that all pods will take at minimum the amount of time slept to shut down and this will increase the time required for your rolling deploy.
65
-
66
- More discussions and links to relevant articles can be found in https://github.com/puma/puma/issues/2343.
67
-
68
- ## Workers Per Pod, and Other Config Issues
69
-
70
- With containerization, you will have to make a decision about how "big" to make each pod. Should you run 2 pods with 50 workers each? 25 pods, each with 4 workers? 100 pods, with each Puma running in single mode? Each scenario represents the same total amount of capacity (100 Puma processes that can respond to requests), but there are tradeoffs to make.
71
-
72
- * Worker counts should be somewhere between 4 and 32 in most cases. You want more than 4 in order to minimize time spent in request queueing for a free Puma worker, but probably less than ~32 because otherwise autoscaling is working in too large of an increment or they probably won't fit very well into your nodes. In any queueing system, queue time is proportional to 1/n, where n is the number of things pulling from the queue. Each pod will have its own request queue (i.e., the socket backlog). If you have 4 pods with 1 worker each (4 request queues), wait times are, proportionally, about 4 times higher than if you had 1 pod with 4 workers (1 request queue).
73
- * Unless you have a very I/O-heavy application (50%+ time spent waiting on IO), use the default thread count (5 for MRI). Using higher numbers of threads with low I/O wait (<50%) will lead to additional request queueing time (latency!) and additional memory usage.
74
- * More processes per pod reduces memory usage per process, because of copy-on-write memory and because the cost of the single master process is "amortized" over more child processes.
75
- * Don't run less than 4 processes per pod if you can. Low numbers of processes per pod will lead to high request queueing, which means you will have to run more pods.
76
- * If multithreaded, allocate 1 CPU per worker. If single threaded, allocate 0.75 cpus per worker. Most web applications spend about 25% of their time in I/O - but when you're running multi-threaded, your Puma process will have higher CPU usage and should be able to fully saturate a CPU core.
77
- * Most Puma processes will use about ~512MB-1GB per worker, and about 1GB for the master process. However, you probably shouldn't bother with setting memory limits lower than around 2GB per process, because most places you are deploying will have 2GB of RAM per CPU. A sensible memory limit for a Puma configuration of 4 child workers might be something like 8 GB (1 GB for the master, 7GB for the 4 children).
78
-