puma 6.6.1-java → 7.0.0.pre1-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 77572e7c8734be26c763ff4e53aa5cd9e37ed091fdc6cfeeb81ca1421ac6da02
4
- data.tar.gz: 0b77cde4be14a0541131e8ef17e2980ab014e94d76c4e83e2a84bf7557d95929
3
+ metadata.gz: 13a568477e4c57c776b48c53fcdca298b5b136a1d2792e65057e0776576fb833
4
+ data.tar.gz: a48e423ea9bdccb6e842d6feb6afe1c5e940f537062133655b20c2fd530321f6
5
5
  SHA512:
6
- metadata.gz: a6d3710d65cfb68d777bc0388b42bcdfe420cfa6b20c9b64ab0419c248f2919480d9aa07ba65cc4abe3e0dae40722e3aa544fadbe2706965c21d601329319b6c
7
- data.tar.gz: 174671b99f41f99ee43e0567d3ef3599fe2064a08f6ea647b00c054498163876709a92867868116a1a53588ed08a7a61553c0848116341e0d46df7ad648da43f
6
+ metadata.gz: 829ac0cfef8951cf30aa2c99a8c1867d4dbf600f1016f811e6810f07f7da377e0c1ffefecee9de2efd6eb1366ecccf36eb36bbc114e1c58eeac6dd729541a6d4
7
+ data.tar.gz: 34966e63623b1c5457822226c2445645332287ace88e9646cb80a12a8cb82305d9025cecb74317dd561df05e4435fc8d1c12b6e3a23c1a24e16fad6fdfcd9d76
data/History.md CHANGED
@@ -1,3 +1,8 @@
1
+ ## 7.0.0.pre1 / 2025-07-31
2
+
3
+ * Changed
4
+ * Fix long tail response problem with keepalive connections ([#3678])
5
+
1
6
  ## 6.6.1 / 2025-07-30
2
7
 
3
8
  * Bugfixes
@@ -2160,6 +2165,7 @@ be added back in a future date when a java Puma::MiniSSL is added.
2160
2165
  * Bugfixes
2161
2166
  * Your bugfix goes here <Most recent on the top, like GitHub> (#Github Number)
2162
2167
 
2168
+ [#3678]:https://github.com/puma/puma/pull/3678 "PR by @MSP-Greg, merged 2025-07-31"
2163
2169
  [#3680]:https://github.com/puma/puma/pull/3680 "PR by @byroot, merged 2025-07-31"
2164
2170
  [#3572]:https://github.com/puma/puma/pull/3572 "PR by @barthez, merged 2025-02-06"
2165
2171
  [#3586]:https://github.com/puma/puma/pull/3586 "PR by @MSP-Greg, merged 2025-02-03"
data/docs/stats.md CHANGED
@@ -65,7 +65,8 @@ When Puma runs in single mode, these stats are available at the top level. When
65
65
  and is not used for any internal decisions, unlike `busy_theads`, which is usually a more useful stat.
66
66
  * max_threads: the maximum number of threads Puma is configured to spool per worker
67
67
  * requests_count: the number of requests this worker has served since starting
68
-
68
+ * reactor_max: the maximum observed number of requests held in Puma's "reactor" which is used for asyncronously buffering request bodies. This stat is reset on every call, so it's the maximum value observed since the last stat call.
69
+ * backlog_max: the maximum number of requests that have been fully buffered by the reactor and placed in a ready queue, but have not yet been picked up by a server thread. This stat is reset on every call, so it's the maximum value observed since the last stat call.
69
70
 
70
71
  ### cluster mode
71
72
 
data/lib/puma/client.rb CHANGED
@@ -111,7 +111,8 @@ module Puma
111
111
  end
112
112
 
113
113
  attr_reader :env, :to_io, :body, :io, :timeout_at, :ready, :hijacked,
114
- :tempfile, :io_buffer, :http_content_length_limit_exceeded
114
+ :tempfile, :io_buffer, :http_content_length_limit_exceeded,
115
+ :requests_served
115
116
 
116
117
  attr_writer :peerip, :http_content_length_limit
117
118
 
@@ -150,11 +151,12 @@ module Puma
150
151
  end
151
152
 
152
153
  # Number of seconds until the timeout elapses.
154
+ # @!attribute [r] timeout
153
155
  def timeout
154
156
  [@timeout_at - Process.clock_gettime(Process::CLOCK_MONOTONIC), 0].max
155
157
  end
156
158
 
157
- def reset(fast_check=true)
159
+ def reset
158
160
  @parser.reset
159
161
  @io_buffer.reset
160
162
  @read_header = true
@@ -166,7 +168,10 @@ module Puma
166
168
  @peerip = nil if @remote_addr_header
167
169
  @in_last_chunk = false
168
170
  @http_content_length_limit_exceeded = false
171
+ end
169
172
 
173
+ # only used with back-to-back requests contained in the buffer
174
+ def process_back_to_back_requests
170
175
  if @buffer
171
176
  return false unless try_to_parse_proxy_protocol
172
177
 
@@ -178,19 +183,15 @@ module Puma
178
183
  raise HttpParserError,
179
184
  "HEADER is longer than allowed, aborting client early."
180
185
  end
181
-
182
- return false
183
- else
184
- begin
185
- if fast_check && @to_io.wait_readable(FAST_TRACK_KA_TIMEOUT)
186
- return try_to_finish
187
- end
188
- rescue IOError
189
- # swallow it
190
- end
191
186
  end
192
187
  end
193
188
 
189
+ # if a client sends back-to-back requests, the buffer may contain one or more
190
+ # of them.
191
+ def has_back_to_back_requests?
192
+ !(@buffer.nil? || @buffer.empty?)
193
+ end
194
+
194
195
  def close
195
196
  tempfile_close
196
197
  begin
@@ -291,8 +292,10 @@ module Puma
291
292
 
292
293
  def eagerly_finish
293
294
  return true if @ready
294
- return false unless @to_io.wait_readable(0)
295
- try_to_finish
295
+ while @to_io.wait_readable(0) # rubocop: disable Style/WhileUntilModifier
296
+ return true if try_to_finish
297
+ end
298
+ false
296
299
  end
297
300
 
298
301
  def finish(timeout)
@@ -128,14 +128,15 @@ module Puma
128
128
 
129
129
  while true
130
130
  begin
131
- b = server.backlog || 0
132
- r = server.running || 0
133
- t = server.pool_capacity || 0
134
- m = server.max_threads || 0
135
- rc = server.requests_count || 0
136
- bt = server.busy_threads || 0
137
- payload = %Q!#{base_payload}{ "backlog":#{b}, "running":#{r}, "pool_capacity":#{t}, "max_threads":#{m}, "requests_count":#{rc}, "busy_threads":#{bt} }\n!
138
- io << payload
131
+ payload = base_payload.dup
132
+
133
+ hsh = server.stats
134
+ hsh.each do |k, v|
135
+ payload << %Q! "#{k}":#{v || 0},!
136
+ end
137
+ # sub call properly adds 'closing' string
138
+ io << payload.sub(/,\z/, " }\n")
139
+ server.reset_max
139
140
  rescue IOError
140
141
  Puma::Util.purge_interrupt_queue
141
142
  break
@@ -4,13 +4,15 @@ module Puma
4
4
  class Cluster < Runner
5
5
  #—————————————————————— DO NOT USE — this class is for internal use only ———
6
6
 
7
-
8
7
  # This class represents a worker process from the perspective of the puma
9
8
  # master process. It contains information about the process and its health
10
9
  # and it exposes methods to control the process via IPC. It does not
11
10
  # include the actual logic executed by the worker process itself. For that,
12
11
  # see Puma::Cluster::Worker.
13
12
  class WorkerHandle # :nodoc:
13
+ # array of stat 'max' keys
14
+ WORKER_MAX_KEYS = [:backlog_max, :reactor_max]
15
+
14
16
  def initialize(idx, pid, phase, options)
15
17
  @index = idx
16
18
  @pid = pid
@@ -23,6 +25,7 @@ module Puma
23
25
  @last_checkin = Time.now
24
26
  @last_status = {}
25
27
  @term = false
28
+ @worker_max = Array.new WORKER_MAX_KEYS.length, 0
26
29
  end
27
30
 
28
31
  attr_reader :index, :pid, :phase, :signal, :last_checkin, :last_status, :started_at
@@ -51,12 +54,39 @@ module Puma
51
54
  @term
52
55
  end
53
56
 
54
- STATUS_PATTERN = /{ "backlog":(?<backlog>\d*), "running":(?<running>\d*), "pool_capacity":(?<pool_capacity>\d*), "max_threads":(?<max_threads>\d*), "requests_count":(?<requests_count>\d*), "busy_threads":(?<busy_threads>\d*) }/
55
- private_constant :STATUS_PATTERN
56
-
57
57
  def ping!(status)
58
+ hsh = {}
59
+ k, v = nil, nil
60
+ # @todo remove each once Ruby 2.5 is no longer supported
61
+ status.tr('}{"', '').strip.split(", ").each do |kv|
62
+ cntr = 0
63
+ kv.split(':').each do |t|
64
+ if cntr == 0
65
+ k = t
66
+ cntr = 1
67
+ else
68
+ v = t
69
+ end
70
+ end
71
+ hsh[k.to_sym] = v.to_i
72
+ end
73
+
74
+ # check stat max values, we can't signal workers to reset the max values,
75
+ # so we do so here
76
+ WORKER_MAX_KEYS.each_with_index do |key, idx|
77
+ if hsh[key] < @worker_max[idx]
78
+ hsh[key] = @worker_max[idx]
79
+ else
80
+ @worker_max[idx] = hsh[key]
81
+ end
82
+ end
58
83
  @last_checkin = Time.now
59
- @last_status = status.match(STATUS_PATTERN).named_captures.map { |c_name, c| [c_name.to_sym, c.to_i] }.to_h
84
+ @last_status = hsh
85
+ end
86
+
87
+ # Resets max values to zero. Called whenever `Cluster#stats` is called
88
+ def reset_max
89
+ WORKER_MAX_KEYS.length.times { |idx| @worker_max[idx] = 0 }
60
90
  end
61
91
 
62
92
  # @see Puma::Cluster#check_workers
data/lib/puma/cluster.rb CHANGED
@@ -22,6 +22,7 @@ module Puma
22
22
  @workers = []
23
23
  @next_check = Time.now
24
24
 
25
+ @worker_max = [] # keeps track of 'max' stat values
25
26
  @phased_restart = false
26
27
  end
27
28
 
@@ -268,11 +269,14 @@ module Puma
268
269
  end
269
270
 
270
271
  # Inside of a child process, this will return all zeroes, as @workers is only populated in
271
- # the master process.
272
+ # the master process. Calling this also resets stat 'max' values to zero.
272
273
  # @!attribute [r] stats
274
+ # @return [Hash]
275
+
273
276
  def stats
274
277
  old_worker_count = @workers.count { |w| w.phase != @phase }
275
278
  worker_status = @workers.map do |w|
279
+ w.reset_max
276
280
  {
277
281
  started_at: utc_iso8601(w.started_at),
278
282
  pid: w.pid,
@@ -283,7 +287,6 @@ module Puma
283
287
  last_status: w.last_status,
284
288
  }
285
289
  end
286
-
287
290
  {
288
291
  started_at: utc_iso8601(@started_at),
289
292
  workers: @workers.size,
@@ -140,12 +140,10 @@ module Puma
140
140
  io_selector_backend: :auto,
141
141
  log_requests: false,
142
142
  logger: STDOUT,
143
- # How many requests to attempt inline before sending a client back to
144
- # the reactor to be subject to normal ordering. The idea here is that
145
- # we amortize the cost of going back to the reactor for a well behaved
146
- # but very "greedy" client across 10 requests. This prevents a not
147
- # well behaved client from monopolizing the thread forever.
148
- max_fast_inline: 10,
143
+ # Limits how many requests a keep alive connection can make.
144
+ # The connection will be closed after it reaches `max_keep_alive`
145
+ # requests.
146
+ max_keep_alive: 25,
149
147
  max_threads: Puma.mri? ? 5 : 16,
150
148
  min_threads: 0,
151
149
  mode: :http,
data/lib/puma/const.rb CHANGED
@@ -100,13 +100,11 @@ module Puma
100
100
  # too taxing on performance.
101
101
  module Const
102
102
 
103
- PUMA_VERSION = VERSION = "6.6.1"
104
- CODE_NAME = "Return to Forever"
103
+ PUMA_VERSION = VERSION = "7.0.0.pre1"
104
+ CODE_NAME = "Romantic Warrior"
105
105
 
106
106
  PUMA_SERVER_STRING = ["puma", PUMA_VERSION, CODE_NAME].join(" ").freeze
107
107
 
108
- FAST_TRACK_KA_TIMEOUT = 0.2
109
-
110
108
  # How long to wait when getting some write blocking on the socket when
111
109
  # sending data back
112
110
  WRITE_TIMEOUT = 10
data/lib/puma/dsl.rb CHANGED
@@ -1270,16 +1270,24 @@ module Puma
1270
1270
  @options[:fork_worker] = Integer(after_requests)
1271
1271
  end
1272
1272
 
1273
- # The number of requests to attempt inline before sending a client back to
1274
- # the reactor to be subject to normal ordering.
1273
+ # @deprecated Use {#max_keep_alive} instead.
1275
1274
  #
1276
- # The default is 10.
1275
+ def max_fast_inline(num_of_requests)
1276
+ warn "[WARNING] `max_fast_inline` is deprecated use `max_keep_alive` instead"
1277
+ @options[:max_keep_alive] ||= Float(num_of_requests) unless num_of_requests.nil?
1278
+ end
1279
+
1280
+ # The number of requests a keep-alive client can submit before being closed.
1281
+ # Note that some applications (server to server) may benefit from a very high
1282
+ # number or Float::INFINITY.
1283
+ #
1284
+ # The default is 25.
1277
1285
  #
1278
1286
  # @example
1279
- # max_fast_inline 20
1287
+ # max_keep_alive 20
1280
1288
  #
1281
- def max_fast_inline(num_of_requests)
1282
- @options[:max_fast_inline] = Float(num_of_requests)
1289
+ def max_keep_alive(num_of_requests)
1290
+ @options[:max_keep_alive] = Float(num_of_requests) unless num_of_requests.nil?
1283
1291
  end
1284
1292
 
1285
1293
  # When `true`, keep-alive connections are maintained on inbound requests.
data/lib/puma/reactor.rb CHANGED
@@ -15,6 +15,12 @@ module Puma
15
15
  #
16
16
  # The implementation uses a Queue to synchronize adding new objects from the internal select loop.
17
17
  class Reactor
18
+
19
+ # @!attribute [rw] reactor_max
20
+ # Maximum number of clients in the selector. Reset with calls to `Server.stats`.
21
+ attr_accessor :reactor_max
22
+ attr_reader :reactor_size
23
+
18
24
  # Create a new Reactor to monitor IO objects added by #add.
19
25
  # The provided block will be invoked when an IO has data available to read,
20
26
  # its timeout elapses, or when the Reactor shuts down.
@@ -29,6 +35,8 @@ module Puma
29
35
  @input = Queue.new
30
36
  @timeouts = []
31
37
  @block = block
38
+ @reactor_size = 0
39
+ @reactor_max = 0
32
40
  end
33
41
 
34
42
  # Run the internal select loop, using a background thread by default.
@@ -108,6 +116,8 @@ module Puma
108
116
  # Start monitoring the object.
109
117
  def register(client)
110
118
  @selector.register(client.to_io, :r).value = client
119
+ @reactor_size += 1
120
+ @reactor_max = @reactor_size if @reactor_max < @reactor_size
111
121
  @timeouts << client
112
122
  rescue ArgumentError
113
123
  # unreadable clients raise error when processed by NIO
@@ -118,6 +128,7 @@ module Puma
118
128
  def wakeup!(client)
119
129
  if @block.call client
120
130
  @selector.deregister client.to_io
131
+ @reactor_size -= 1
121
132
  @timeouts.delete client
122
133
  end
123
134
  end
data/lib/puma/request.rb CHANGED
@@ -164,17 +164,7 @@ module Puma
164
164
  return false if closed_socket?(socket)
165
165
 
166
166
  # Close the connection after a reasonable number of inline requests
167
- # if the server is at capacity and the listener has a new connection ready.
168
- # This allows Puma to service connections fairly when the number
169
- # of concurrent connections exceeds the size of the threadpool.
170
- force_keep_alive = if @enable_keep_alives
171
- requests < @max_fast_inline ||
172
- @thread_pool.busy_threads < @max_threads ||
173
- !client.listener.to_io.wait_readable(0)
174
- else
175
- # Always set force_keep_alive to false if the server has keep-alives not enabled.
176
- false
177
- end
167
+ force_keep_alive = @enable_keep_alives && client.requests_served < @max_keep_alive
178
168
 
179
169
  resp_info = str_headers(env, status, headers, res_body, io_buffer, force_keep_alive)
180
170
 
@@ -267,7 +257,8 @@ module Puma
267
257
 
268
258
  fast_write_response socket, body, io_buffer, chunked, content_length.to_i
269
259
  body.close if close_body
270
- keep_alive
260
+ # if we're shutting down, close keep_alive connections
261
+ !shutting_down? && keep_alive
271
262
  end
272
263
 
273
264
  # @param env [Hash] see Puma::Client#env, from request
@@ -585,7 +576,7 @@ module Puma
585
576
  # response body
586
577
  # @param io_buffer [Puma::IOBuffer] modified inn place
587
578
  # @param force_keep_alive [Boolean] 'anded' with keep_alive, based on system
588
- # status and `@max_fast_inline`
579
+ # status and `@max_keep_alive`
589
580
  # @return [Hash] resp_info
590
581
  # @version 5.0.3
591
582
  #
data/lib/puma/server.rb CHANGED
@@ -94,8 +94,9 @@ module Puma
94
94
  @min_threads = @options[:min_threads]
95
95
  @max_threads = @options[:max_threads]
96
96
  @queue_requests = @options[:queue_requests]
97
- @max_fast_inline = @options[:max_fast_inline]
97
+ @max_keep_alive = @options[:max_keep_alive]
98
98
  @enable_keep_alives = @options[:enable_keep_alives]
99
+ @enable_keep_alives &&= @queue_requests
99
100
  @io_selector_backend = @options[:io_selector_backend]
100
101
  @http_content_length_limit = @options[:http_content_length_limit]
101
102
 
@@ -220,7 +221,6 @@ module Puma
220
221
  @thread_pool&.spawned
221
222
  end
222
223
 
223
-
224
224
  # This number represents the number of requests that
225
225
  # the server is capable of taking right now.
226
226
  #
@@ -324,6 +324,7 @@ module Puma
324
324
  pool = @thread_pool
325
325
  queue_requests = @queue_requests
326
326
  drain = options[:drain_on_shutdown] ? 0 : nil
327
+ max_flt = @max_threads.to_f
327
328
 
328
329
  addr_send_name, addr_value = case options[:remote_address]
329
330
  when :value
@@ -364,8 +365,20 @@ module Puma
364
365
  if sock == check
365
366
  break if handle_check
366
367
  else
367
- pool.wait_until_not_full
368
- pool.wait_for_less_busy_worker(options[:wait_for_less_busy_worker]) if @clustered
368
+ # if ThreadPool out_of_band code is running, we don't want to add
369
+ # clients until the code is finished.
370
+ sleep 0.001 while pool.out_of_band_running
371
+
372
+ # only use delay when clustered and busy
373
+ if pool.busy_threads >= @max_threads
374
+ if @clustered
375
+ delay = 0.0001 * ((@reactor&.reactor_size || 0) + pool.busy_threads * 1.5)/max_flt
376
+ sleep delay
377
+ else
378
+ # use small sleep for busy single worker
379
+ sleep 0.0001
380
+ end
381
+ end
369
382
 
370
383
  io = begin
371
384
  sock.accept_nonblock
@@ -453,8 +466,7 @@ module Puma
453
466
  requests = 0
454
467
 
455
468
  begin
456
- if @queue_requests &&
457
- !client.eagerly_finish
469
+ if @queue_requests && !client.eagerly_finish
458
470
 
459
471
  client.set_timeout(@first_data_timeout)
460
472
  if @reactor.add client
@@ -467,39 +479,33 @@ module Puma
467
479
  client.finish(@first_data_timeout)
468
480
  end
469
481
 
470
- while true
471
- @requests_count += 1
472
- case handle_request(client, requests + 1)
473
- when false
474
- break
475
- when :async
476
- close_socket = false
477
- break
478
- when true
479
- ThreadPool.clean_thread_locals if clean_thread_locals
480
-
481
- requests += 1
482
+ @requests_count += 1
483
+ case handle_request(client, requests + 1)
484
+ when false
485
+ when :async
486
+ close_socket = false
487
+ when true
488
+ ThreadPool.clean_thread_locals if clean_thread_locals
482
489
 
483
- # As an optimization, try to read the next request from the
484
- # socket for a short time before returning to the reactor.
485
- fast_check = @status == :run
490
+ requests += 1
486
491
 
487
- # Always pass the client back to the reactor after a reasonable
488
- # number of inline requests if there are other requests pending.
489
- fast_check = false if requests >= @max_fast_inline &&
490
- @thread_pool.backlog > 0
492
+ client.reset
491
493
 
492
- next_request_ready = with_force_shutdown(client) do
493
- client.reset(fast_check)
494
- end
494
+ # This indicates data exists in the client read buffer and there may be
495
+ # additional requests on it, so process them
496
+ next_request_ready = if client.has_back_to_back_requests?
497
+ with_force_shutdown(client) { client.process_back_to_back_requests }
498
+ else
499
+ nil
500
+ end
495
501
 
496
- unless next_request_ready
497
- break unless @queue_requests
498
- client.set_timeout @persistent_timeout
499
- if @reactor.add client
500
- close_socket = false
501
- break
502
- end
502
+ if next_request_ready
503
+ @thread_pool << client
504
+ close_socket = false
505
+ elsif @queue_requests
506
+ client.set_timeout @persistent_timeout
507
+ if @reactor.add client
508
+ close_socket = false
503
509
  end
504
510
  end
505
511
  end
@@ -650,7 +656,16 @@ module Puma
650
656
 
651
657
  # List of methods invoked by #stats.
652
658
  # @version 5.0.0
653
- STAT_METHODS = [:backlog, :running, :pool_capacity, :max_threads, :requests_count, :busy_threads].freeze
659
+ STAT_METHODS = [
660
+ :backlog,
661
+ :running,
662
+ :pool_capacity,
663
+ :busy_threads,
664
+ :backlog_max,
665
+ :max_threads,
666
+ :requests_count,
667
+ :reactor_max,
668
+ ].freeze
654
669
 
655
670
  # Returns a hash of stats about the running server for reporting purposes.
656
671
  # @version 5.0.0
@@ -660,9 +675,16 @@ module Puma
660
675
  stats = @thread_pool&.stats || {}
661
676
  stats[:max_threads] = @max_threads
662
677
  stats[:requests_count] = @requests_count
678
+ stats[:reactor_max] = @reactor.reactor_max
679
+ reset_max
663
680
  stats
664
681
  end
665
682
 
683
+ def reset_max
684
+ @reactor.reactor_max = 0
685
+ @thread_pool.reset_max
686
+ end
687
+
666
688
  # below are 'delegations' to binder
667
689
  # remove in Puma 7?
668
690
 
@@ -25,6 +25,8 @@ module Puma
25
25
  # up its work before leaving the thread to die on the vine.
26
26
  SHUTDOWN_GRACE_TIME = 5 # seconds
27
27
 
28
+ attr_reader :out_of_band_running
29
+
28
30
  # Maintain a minimum of +min+ and maximum of +max+ threads
29
31
  # in the pool.
30
32
  #
@@ -35,9 +37,9 @@ module Puma
35
37
  @not_empty = ConditionVariable.new
36
38
  @not_full = ConditionVariable.new
37
39
  @mutex = Mutex.new
40
+ @todo = Queue.new
38
41
 
39
- @todo = []
40
-
42
+ @backlog_max = 0
41
43
  @spawned = 0
42
44
  @waiting = 0
43
45
 
@@ -50,6 +52,7 @@ module Puma
50
52
  @shutdown_grace_time = Float(options[:pool_shutdown_grace_time] || SHUTDOWN_GRACE_TIME)
51
53
  @block = block
52
54
  @out_of_band = options[:out_of_band]
55
+ @out_of_band_running = false
53
56
  @clean_thread_locals = options[:clean_thread_locals]
54
57
  @before_thread_start = options[:before_thread_start]
55
58
  @before_thread_exit = options[:before_thread_exit]
@@ -89,20 +92,33 @@ module Puma
89
92
  # @return [Hash] hash containing stat info from ThreadPool
90
93
  def stats
91
94
  with_mutex do
95
+ temp = @backlog_max
96
+ @backlog_max = 0
92
97
  { backlog: @todo.size,
93
98
  running: @spawned,
94
99
  pool_capacity: @waiting + (@max - @spawned),
95
- busy_threads: @spawned - @waiting + @todo.size
100
+ busy_threads: @spawned - @waiting + @todo.size,
101
+ backlog_max: temp
96
102
  }
97
103
  end
98
104
  end
99
105
 
106
+ def reset_max
107
+ with_mutex { @backlog_max = 0 }
108
+ end
109
+
100
110
  # How many objects have yet to be processed by the pool?
101
111
  #
102
112
  def backlog
103
113
  with_mutex { @todo.size }
104
114
  end
105
115
 
116
+ # The maximum size of the backlog
117
+ #
118
+ def backlog_max
119
+ with_mutex { @backlog_max }
120
+ end
121
+
106
122
  # @!attribute [r] pool_capacity
107
123
  def pool_capacity
108
124
  waiting + (@max - spawned)
@@ -214,12 +230,14 @@ module Puma
214
230
 
215
231
  # we execute on idle hook when all threads are free
216
232
  return false unless @spawned == @waiting
217
-
233
+ @out_of_band_running = true
218
234
  @out_of_band.each(&:call)
219
235
  true
220
236
  rescue Exception => e
221
237
  STDERR.puts "Exception calling out_of_band_hook: #{e.message} (#{e.class})"
222
238
  true
239
+ ensure
240
+ @out_of_band_running = false
223
241
  end
224
242
 
225
243
  private :trigger_out_of_band_hook
@@ -239,6 +257,8 @@ module Puma
239
257
  end
240
258
 
241
259
  @todo << work
260
+ t = @todo.size
261
+ @backlog_max = t if t > @backlog_max
242
262
 
243
263
  if @waiting < @todo.size and @spawned < @max
244
264
  spawn_thread
@@ -248,69 +268,6 @@ module Puma
248
268
  end
249
269
  end
250
270
 
251
- # This method is used by `Puma::Server` to let the server know when
252
- # the thread pool can pull more requests from the socket and
253
- # pass to the reactor.
254
- #
255
- # The general idea is that the thread pool can only work on a fixed
256
- # number of requests at the same time. If it is already processing that
257
- # number of requests then it is at capacity. If another Puma process has
258
- # spare capacity, then the request can be left on the socket so the other
259
- # worker can pick it up and process it.
260
- #
261
- # For example: if there are 5 threads, but only 4 working on
262
- # requests, this method will not wait and the `Puma::Server`
263
- # can pull a request right away.
264
- #
265
- # If there are 5 threads and all 5 of them are busy, then it will
266
- # pause here, and wait until the `not_full` condition variable is
267
- # signaled, usually this indicates that a request has been processed.
268
- #
269
- # It's important to note that even though the server might accept another
270
- # request, it might not be added to the `@todo` array right away.
271
- # For example if a slow client has only sent a header, but not a body
272
- # then the `@todo` array would stay the same size as the reactor works
273
- # to try to buffer the request. In that scenario the next call to this
274
- # method would not block and another request would be added into the reactor
275
- # by the server. This would continue until a fully buffered request
276
- # makes it through the reactor and can then be processed by the thread pool.
277
- def wait_until_not_full
278
- with_mutex do
279
- while true
280
- return if @shutdown
281
-
282
- # If we can still spin up new threads and there
283
- # is work queued that cannot be handled by waiting
284
- # threads, then accept more work until we would
285
- # spin up the max number of threads.
286
- return if busy_threads < @max
287
-
288
- @not_full.wait @mutex
289
- end
290
- end
291
- end
292
-
293
- # @version 5.0.0
294
- def wait_for_less_busy_worker(delay_s)
295
- return unless delay_s && delay_s > 0
296
-
297
- # Ruby MRI does GVL, this can result
298
- # in processing contention when multiple threads
299
- # (requests) are running concurrently
300
- return unless Puma.mri?
301
-
302
- with_mutex do
303
- return if @shutdown
304
-
305
- # do not delay, if we are not busy
306
- return unless busy_threads > 0
307
-
308
- # this will be signaled once a request finishes,
309
- # which can happen earlier than delay
310
- @not_full.wait @mutex, delay_s
311
- end
312
- end
313
-
314
271
  # If there are any free threads in the pool, tell one to go ahead
315
272
  # and exit. If +force+ is true, then a trim request is requested
316
273
  # even if all threads are being utilized.
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: puma
3
3
  version: !ruby/object:Gem::Version
4
- version: 6.6.1
4
+ version: 7.0.0.pre1
5
5
  platform: java
6
6
  authors:
7
7
  - Evan Phoenix