rsense-server 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +23 -0
  3. data/Gemfile +14 -0
  4. data/Guardfile +5 -0
  5. data/LICENSE.txt +1 -0
  6. data/README.md +51 -0
  7. data/Rakefile +9 -0
  8. data/bin/_rsense.rb +115 -0
  9. data/config/puma.rb +2 -0
  10. data/lib/rsense/server/code.rb +38 -0
  11. data/lib/rsense/server/command/completion_result.rb +11 -0
  12. data/lib/rsense/server/command/special_meth.rb +18 -0
  13. data/lib/rsense/server/command/type_inference_method.rb +24 -0
  14. data/lib/rsense/server/command.rb +239 -0
  15. data/lib/rsense/server/config.rb +70 -0
  16. data/lib/rsense/server/gem_path.rb +18 -0
  17. data/lib/rsense/server/listeners/find_definition_event_listener.rb +91 -0
  18. data/lib/rsense/server/listeners/where_event_listener.rb +39 -0
  19. data/lib/rsense/server/load_path.rb +62 -0
  20. data/lib/rsense/server/options.rb +85 -0
  21. data/lib/rsense/server/parser.rb +17 -0
  22. data/lib/rsense/server/path_info.rb +17 -0
  23. data/lib/rsense/server/project.rb +24 -0
  24. data/lib/rsense/server/version.rb +5 -0
  25. data/lib/rsense/server.rb +18 -0
  26. data/rsense-server.gemspec +35 -0
  27. data/spec/fixtures/config_fixture/.rsense +4 -0
  28. data/spec/fixtures/deeply/nested/thing.rb +0 -0
  29. data/spec/fixtures/find_def_sample.json +10 -0
  30. data/spec/fixtures/sample.json +10 -0
  31. data/spec/fixtures/test_gem/.gitignore +22 -0
  32. data/spec/fixtures/test_gem/Gemfile +4 -0
  33. data/spec/fixtures/test_gem/LICENSE.txt +22 -0
  34. data/spec/fixtures/test_gem/README.md +29 -0
  35. data/spec/fixtures/test_gem/Rakefile +2 -0
  36. data/spec/fixtures/test_gem/lib/sample/version.rb +3 -0
  37. data/spec/fixtures/test_gem/lib/sample.rb +16 -0
  38. data/spec/fixtures/test_gem/sample.gemspec +23 -0
  39. data/spec/fixtures/test_gem/test.json +10 -0
  40. data/spec/rsense/server/code_spec.rb +44 -0
  41. data/spec/rsense/server/command/special_meth_spec.rb +23 -0
  42. data/spec/rsense/server/command_spec.rb +108 -0
  43. data/spec/rsense/server/config_spec.rb +27 -0
  44. data/spec/rsense/server/gem_path_spec.rb +16 -0
  45. data/spec/rsense/server/load_path_spec.rb +63 -0
  46. data/spec/rsense/server/options_spec.rb +33 -0
  47. data/spec/rsense/server/path_info_spec.rb +11 -0
  48. data/spec/rsense/server/project_spec.rb +18 -0
  49. data/spec/rsense/server_spec.rb +7 -0
  50. data/spec/spec_helper.rb +16 -0
  51. data/vendor/gems/puma-2.8.2-java/COPYING +55 -0
  52. data/vendor/gems/puma-2.8.2-java/DEPLOYMENT.md +92 -0
  53. data/vendor/gems/puma-2.8.2-java/Gemfile +17 -0
  54. data/vendor/gems/puma-2.8.2-java/History.txt +532 -0
  55. data/vendor/gems/puma-2.8.2-java/LICENSE +26 -0
  56. data/vendor/gems/puma-2.8.2-java/Manifest.txt +68 -0
  57. data/vendor/gems/puma-2.8.2-java/README.md +251 -0
  58. data/vendor/gems/puma-2.8.2-java/Rakefile +158 -0
  59. data/vendor/gems/puma-2.8.2-java/bin/puma +10 -0
  60. data/vendor/gems/puma-2.8.2-java/bin/puma-wild +17 -0
  61. data/vendor/gems/puma-2.8.2-java/bin/pumactl +12 -0
  62. data/vendor/gems/puma-2.8.2-java/docs/config.md +0 -0
  63. data/vendor/gems/puma-2.8.2-java/docs/nginx.md +80 -0
  64. data/vendor/gems/puma-2.8.2-java/docs/signals.md +42 -0
  65. data/vendor/gems/puma-2.8.2-java/ext/puma_http11/PumaHttp11Service.java +17 -0
  66. data/vendor/gems/puma-2.8.2-java/ext/puma_http11/ext_help.h +15 -0
  67. data/vendor/gems/puma-2.8.2-java/ext/puma_http11/extconf.rb +8 -0
  68. data/vendor/gems/puma-2.8.2-java/ext/puma_http11/http11_parser.c +1225 -0
  69. data/vendor/gems/puma-2.8.2-java/ext/puma_http11/http11_parser.h +64 -0
  70. data/vendor/gems/puma-2.8.2-java/ext/puma_http11/http11_parser.java.rl +161 -0
  71. data/vendor/gems/puma-2.8.2-java/ext/puma_http11/http11_parser.rl +146 -0
  72. data/vendor/gems/puma-2.8.2-java/ext/puma_http11/http11_parser_common.rl +54 -0
  73. data/vendor/gems/puma-2.8.2-java/ext/puma_http11/io_buffer.c +155 -0
  74. data/vendor/gems/puma-2.8.2-java/ext/puma_http11/mini_ssl.c +195 -0
  75. data/vendor/gems/puma-2.8.2-java/ext/puma_http11/org/jruby/puma/Http11.java +225 -0
  76. data/vendor/gems/puma-2.8.2-java/ext/puma_http11/org/jruby/puma/Http11Parser.java +488 -0
  77. data/vendor/gems/puma-2.8.2-java/ext/puma_http11/org/jruby/puma/MiniSSL.java +289 -0
  78. data/vendor/gems/puma-2.8.2-java/ext/puma_http11/puma_http11.c +491 -0
  79. data/vendor/gems/puma-2.8.2-java/lib/puma/accept_nonblock.rb +23 -0
  80. data/vendor/gems/puma-2.8.2-java/lib/puma/app/status.rb +59 -0
  81. data/vendor/gems/puma-2.8.2-java/lib/puma/binder.rb +298 -0
  82. data/vendor/gems/puma-2.8.2-java/lib/puma/capistrano.rb +86 -0
  83. data/vendor/gems/puma-2.8.2-java/lib/puma/cli.rb +587 -0
  84. data/vendor/gems/puma-2.8.2-java/lib/puma/client.rb +289 -0
  85. data/vendor/gems/puma-2.8.2-java/lib/puma/cluster.rb +389 -0
  86. data/vendor/gems/puma-2.8.2-java/lib/puma/compat.rb +18 -0
  87. data/vendor/gems/puma-2.8.2-java/lib/puma/configuration.rb +377 -0
  88. data/vendor/gems/puma-2.8.2-java/lib/puma/const.rb +165 -0
  89. data/vendor/gems/puma-2.8.2-java/lib/puma/control_cli.rb +251 -0
  90. data/vendor/gems/puma-2.8.2-java/lib/puma/daemon_ext.rb +25 -0
  91. data/vendor/gems/puma-2.8.2-java/lib/puma/delegation.rb +11 -0
  92. data/vendor/gems/puma-2.8.2-java/lib/puma/detect.rb +4 -0
  93. data/vendor/gems/puma-2.8.2-java/lib/puma/events.rb +130 -0
  94. data/vendor/gems/puma-2.8.2-java/lib/puma/io_buffer.rb +7 -0
  95. data/vendor/gems/puma-2.8.2-java/lib/puma/java_io_buffer.rb +45 -0
  96. data/vendor/gems/puma-2.8.2-java/lib/puma/jruby_restart.rb +83 -0
  97. data/vendor/gems/puma-2.8.2-java/lib/puma/minissl.rb +148 -0
  98. data/vendor/gems/puma-2.8.2-java/lib/puma/null_io.rb +34 -0
  99. data/vendor/gems/puma-2.8.2-java/lib/puma/puma_http11.jar +0 -0
  100. data/vendor/gems/puma-2.8.2-java/lib/puma/rack_default.rb +7 -0
  101. data/vendor/gems/puma-2.8.2-java/lib/puma/rack_patch.rb +45 -0
  102. data/vendor/gems/puma-2.8.2-java/lib/puma/reactor.rb +183 -0
  103. data/vendor/gems/puma-2.8.2-java/lib/puma/runner.rb +146 -0
  104. data/vendor/gems/puma-2.8.2-java/lib/puma/server.rb +801 -0
  105. data/vendor/gems/puma-2.8.2-java/lib/puma/single.rb +102 -0
  106. data/vendor/gems/puma-2.8.2-java/lib/puma/tcp_logger.rb +32 -0
  107. data/vendor/gems/puma-2.8.2-java/lib/puma/thread_pool.rb +185 -0
  108. data/vendor/gems/puma-2.8.2-java/lib/puma/util.rb +9 -0
  109. data/vendor/gems/puma-2.8.2-java/lib/puma.rb +14 -0
  110. data/vendor/gems/puma-2.8.2-java/lib/rack/handler/puma.rb +66 -0
  111. data/vendor/gems/puma-2.8.2-java/puma.gemspec +55 -0
  112. data/vendor/gems/puma-2.8.2-java/test/test_app_status.rb +92 -0
  113. data/vendor/gems/puma-2.8.2-java/test/test_cli.rb +173 -0
  114. data/vendor/gems/puma-2.8.2-java/test/test_config.rb +26 -0
  115. data/vendor/gems/puma-2.8.2-java/test/test_http10.rb +27 -0
  116. data/vendor/gems/puma-2.8.2-java/test/test_http11.rb +144 -0
  117. data/vendor/gems/puma-2.8.2-java/test/test_integration.rb +165 -0
  118. data/vendor/gems/puma-2.8.2-java/test/test_iobuffer.rb +38 -0
  119. data/vendor/gems/puma-2.8.2-java/test/test_minissl.rb +25 -0
  120. data/vendor/gems/puma-2.8.2-java/test/test_null_io.rb +31 -0
  121. data/vendor/gems/puma-2.8.2-java/test/test_persistent.rb +238 -0
  122. data/vendor/gems/puma-2.8.2-java/test/test_puma_server.rb +323 -0
  123. data/vendor/gems/puma-2.8.2-java/test/test_rack_handler.rb +10 -0
  124. data/vendor/gems/puma-2.8.2-java/test/test_rack_server.rb +141 -0
  125. data/vendor/gems/puma-2.8.2-java/test/test_tcp_rack.rb +42 -0
  126. data/vendor/gems/puma-2.8.2-java/test/test_thread_pool.rb +156 -0
  127. data/vendor/gems/puma-2.8.2-java/test/test_unix_socket.rb +39 -0
  128. data/vendor/gems/puma-2.8.2-java/test/test_ws.rb +89 -0
  129. data/vendor/gems/puma-2.8.2-java/tools/jungle/README.md +9 -0
  130. data/vendor/gems/puma-2.8.2-java/tools/jungle/init.d/README.md +54 -0
  131. data/vendor/gems/puma-2.8.2-java/tools/jungle/init.d/puma +332 -0
  132. data/vendor/gems/puma-2.8.2-java/tools/jungle/init.d/run-puma +3 -0
  133. data/vendor/gems/puma-2.8.2-java/tools/jungle/upstart/README.md +61 -0
  134. data/vendor/gems/puma-2.8.2-java/tools/jungle/upstart/puma-manager.conf +31 -0
  135. data/vendor/gems/puma-2.8.2-java/tools/jungle/upstart/puma.conf +63 -0
  136. data/vendor/gems/puma-2.8.2-java/tools/trickletest.rb +45 -0
  137. metadata +389 -0
@@ -0,0 +1,801 @@
1
+ require 'rack'
2
+ require 'stringio'
3
+
4
+ require 'puma/thread_pool'
5
+ require 'puma/const'
6
+ require 'puma/events'
7
+ require 'puma/null_io'
8
+ require 'puma/compat'
9
+ require 'puma/reactor'
10
+ require 'puma/client'
11
+ require 'puma/binder'
12
+ require 'puma/delegation'
13
+ require 'puma/accept_nonblock'
14
+ require 'puma/util'
15
+
16
+ require 'puma/rack_patch'
17
+
18
+ require 'puma/puma_http11'
19
+
20
+ unless Puma.const_defined? "IOBuffer"
21
+ require 'puma/io_buffer'
22
+ end
23
+
24
+ require 'socket'
25
+
26
+ module Puma
27
+
28
+ # The HTTP Server itself. Serves out a single Rack app.
29
+ class Server
30
+
31
+ include Puma::Const
32
+ extend Puma::Delegation
33
+
34
+ attr_reader :thread
35
+ attr_reader :events
36
+ attr_accessor :app
37
+
38
+ attr_accessor :min_threads
39
+ attr_accessor :max_threads
40
+ attr_accessor :persistent_timeout
41
+ attr_accessor :auto_trim_time
42
+ attr_accessor :first_data_timeout
43
+
44
+ # Create a server for the rack app +app+.
45
+ #
46
+ # +events+ is an object which will be called when certain error events occur
47
+ # to be handled. See Puma::Events for the list of current methods to implement.
48
+ #
49
+ # Server#run returns a thread that you can join on to wait for the server
50
+ # to do it's work.
51
+ #
52
+ def initialize(app, events=Events.stdio, options={})
53
+ @app = app
54
+ @events = events
55
+
56
+ @check, @notify = Puma::Util.pipe
57
+
58
+ @status = :stop
59
+
60
+ @min_threads = 0
61
+ @max_threads = 16
62
+ @auto_trim_time = 1
63
+
64
+ @thread = nil
65
+ @thread_pool = nil
66
+
67
+ @persistent_timeout = PERSISTENT_TIMEOUT
68
+
69
+ @binder = Binder.new(events)
70
+ @own_binder = true
71
+
72
+ @first_data_timeout = FIRST_DATA_TIMEOUT
73
+
74
+ @leak_stack_on_error = true
75
+
76
+ @options = options
77
+
78
+ ENV['RACK_ENV'] ||= "development"
79
+
80
+ @mode = :http
81
+ end
82
+
83
+ attr_accessor :binder, :leak_stack_on_error
84
+
85
+ forward :add_tcp_listener, :@binder
86
+ forward :add_ssl_listener, :@binder
87
+ forward :add_unix_listener, :@binder
88
+
89
+ def inherit_binder(bind)
90
+ @binder = bind
91
+ @own_binder = false
92
+ end
93
+
94
+ def tcp_mode!
95
+ @mode = :tcp
96
+ end
97
+
98
+ # On Linux, use TCP_CORK to better control how the TCP stack
99
+ # packetizes our stream. This improves both latency and throughput.
100
+ #
101
+ if RUBY_PLATFORM =~ /linux/
102
+ # 6 == Socket::IPPROTO_TCP
103
+ # 3 == TCP_CORK
104
+ # 1/0 == turn on/off
105
+ def cork_socket(socket)
106
+ begin
107
+ socket.setsockopt(6, 3, 1) if socket.kind_of? TCPSocket
108
+ rescue IOError, SystemCallError
109
+ end
110
+ end
111
+
112
+ def uncork_socket(socket)
113
+ begin
114
+ socket.setsockopt(6, 3, 0) if socket.kind_of? TCPSocket
115
+ rescue IOError, SystemCallError
116
+ end
117
+ end
118
+ else
119
+ def cork_socket(socket)
120
+ end
121
+
122
+ def uncork_socket(socket)
123
+ end
124
+ end
125
+
126
+ def backlog
127
+ @thread_pool and @thread_pool.backlog
128
+ end
129
+
130
+ def running
131
+ @thread_pool and @thread_pool.spawned
132
+ end
133
+
134
+ # Lopez Mode == raw tcp apps
135
+
136
+ def run_lopez_mode(background=true)
137
+ @thread_pool = ThreadPool.new(@min_threads,
138
+ @max_threads,
139
+ Hash) do |client, tl|
140
+
141
+ io = client.to_io
142
+ addr = io.peeraddr.last
143
+
144
+ if addr.empty?
145
+ # Set unix socket addrs to localhost
146
+ addr = "127.0.0.1:0"
147
+ else
148
+ addr = "#{addr}:#{io.peeraddr[1]}"
149
+ end
150
+
151
+ env = { 'thread' => tl, REMOTE_ADDR => addr }
152
+
153
+ begin
154
+ @app.call env, client.to_io
155
+ rescue Object => e
156
+ STDERR.puts "! Detected exception at toplevel: #{e.message} (#{e.class})"
157
+ STDERR.puts e.backtrace
158
+ end
159
+
160
+ client.close unless env['detach']
161
+ end
162
+
163
+ @events.fire :state, :running
164
+
165
+ if background
166
+ @thread = Thread.new { handle_servers_lopez_mode }
167
+ return @thread
168
+ else
169
+ handle_servers_lopez_mode
170
+ end
171
+ end
172
+
173
+ def handle_servers_lopez_mode
174
+ begin
175
+ check = @check
176
+ sockets = [check] + @binder.ios
177
+ pool = @thread_pool
178
+
179
+ while @status == :run
180
+ begin
181
+ ios = IO.select sockets
182
+ ios.first.each do |sock|
183
+ if sock == check
184
+ break if handle_check
185
+ else
186
+ begin
187
+ if io = sock.accept_nonblock
188
+ client = Client.new io, nil
189
+ pool << client
190
+ end
191
+ rescue SystemCallError
192
+ end
193
+ end
194
+ end
195
+ rescue Errno::ECONNABORTED
196
+ # client closed the socket even before accept
197
+ client.close rescue nil
198
+ rescue Object => e
199
+ @events.unknown_error self, e, "Listen loop"
200
+ end
201
+ end
202
+
203
+ @events.fire :state, @status
204
+
205
+ graceful_shutdown if @status == :stop || @status == :restart
206
+
207
+ rescue Exception => e
208
+ STDERR.puts "Exception handling servers: #{e.message} (#{e.class})"
209
+ STDERR.puts e.backtrace
210
+ ensure
211
+ @check.close
212
+ @notify.close
213
+
214
+ if @status != :restart and @own_binder
215
+ @binder.close
216
+ end
217
+ end
218
+
219
+ @events.fire :state, :done
220
+ end
221
+ # Runs the server.
222
+ #
223
+ # If +background+ is true (the default) then a thread is spun
224
+ # up in the background to handle requests. Otherwise requests
225
+ # are handled synchronously.
226
+ #
227
+ def run(background=true)
228
+ BasicSocket.do_not_reverse_lookup = true
229
+
230
+ @events.fire :state, :booting
231
+
232
+ @status = :run
233
+
234
+ if @mode == :tcp
235
+ return run_lopez_mode(background)
236
+ end
237
+
238
+ @thread_pool = ThreadPool.new(@min_threads,
239
+ @max_threads,
240
+ IOBuffer) do |client, buffer|
241
+ process_now = false
242
+
243
+ begin
244
+ process_now = client.eagerly_finish
245
+ rescue HttpParserError => e
246
+ client.write_400
247
+ client.close
248
+
249
+ @events.parse_error self, client.env, e
250
+ rescue ConnectionError
251
+ client.close
252
+ else
253
+ if process_now
254
+ process_client client, buffer
255
+ else
256
+ client.set_timeout @first_data_timeout
257
+ @reactor.add client
258
+ end
259
+ end
260
+ end
261
+
262
+ @reactor = Reactor.new self, @thread_pool
263
+
264
+ @reactor.run_in_thread
265
+
266
+ if @auto_trim_time
267
+ @thread_pool.auto_trim!(@auto_trim_time)
268
+ end
269
+
270
+ @events.fire :state, :running
271
+
272
+ if background
273
+ @thread = Thread.new { handle_servers }
274
+ return @thread
275
+ else
276
+ handle_servers
277
+ end
278
+ end
279
+
280
+ def handle_servers
281
+ begin
282
+ check = @check
283
+ sockets = [check] + @binder.ios
284
+ pool = @thread_pool
285
+
286
+ while @status == :run
287
+ begin
288
+ ios = IO.select sockets
289
+ ios.first.each do |sock|
290
+ if sock == check
291
+ break if handle_check
292
+ else
293
+ begin
294
+ if io = sock.accept_nonblock
295
+ client = Client.new io, @binder.env(sock)
296
+ pool << client
297
+ end
298
+ rescue SystemCallError
299
+ end
300
+ end
301
+ end
302
+ rescue Errno::ECONNABORTED
303
+ # client closed the socket even before accept
304
+ client.close rescue nil
305
+ rescue Object => e
306
+ @events.unknown_error self, e, "Listen loop"
307
+ end
308
+ end
309
+
310
+ @events.fire :state, @status
311
+
312
+ graceful_shutdown if @status == :stop || @status == :restart
313
+ @reactor.clear! if @status == :restart
314
+
315
+ @reactor.shutdown
316
+ rescue Exception => e
317
+ STDERR.puts "Exception handling servers: #{e.message} (#{e.class})"
318
+ STDERR.puts e.backtrace
319
+ ensure
320
+ @check.close
321
+ @notify.close
322
+
323
+ if @status != :restart and @own_binder
324
+ @binder.close
325
+ end
326
+ end
327
+
328
+ @events.fire :state, :done
329
+ end
330
+
331
+ # :nodoc:
332
+ def handle_check
333
+ cmd = @check.read(1)
334
+
335
+ case cmd
336
+ when STOP_COMMAND
337
+ @status = :stop
338
+ return true
339
+ when HALT_COMMAND
340
+ @status = :halt
341
+ return true
342
+ when RESTART_COMMAND
343
+ @status = :restart
344
+ return true
345
+ end
346
+
347
+ return false
348
+ end
349
+
350
+ # Given a connection on +client+, handle the incoming requests.
351
+ #
352
+ # This method support HTTP Keep-Alive so it may, depending on if the client
353
+ # indicates that it supports keep alive, wait for another request before
354
+ # returning.
355
+ #
356
+ def process_client(client, buffer)
357
+ begin
358
+ close_socket = true
359
+
360
+ while true
361
+ case handle_request(client, buffer)
362
+ when false
363
+ return
364
+ when :async
365
+ close_socket = false
366
+ return
367
+ when true
368
+ buffer.reset
369
+
370
+ unless client.reset(@status == :run)
371
+ close_socket = false
372
+ client.set_timeout @persistent_timeout
373
+ @reactor.add client
374
+ return
375
+ end
376
+ end
377
+ end
378
+
379
+ # The client disconnected while we were reading data
380
+ rescue ConnectionError
381
+ # Swallow them. The ensure tries to close +client+ down
382
+
383
+ # The client doesn't know HTTP well
384
+ rescue HttpParserError => e
385
+ client.write_400
386
+
387
+ @events.parse_error self, client.env, e
388
+
389
+ # Server error
390
+ rescue StandardError => e
391
+ client.write_500
392
+
393
+ @events.unknown_error self, e, "Read"
394
+
395
+ ensure
396
+ buffer.reset
397
+
398
+ begin
399
+ client.close if close_socket
400
+ rescue IOError, SystemCallError
401
+ # Already closed
402
+ rescue StandardError => e
403
+ @events.unknown_error self, e, "Client"
404
+ end
405
+ end
406
+ end
407
+
408
+ # Given a Hash +env+ for the request read from +client+, add
409
+ # and fixup keys to comply with Rack's env guidelines.
410
+ #
411
+ def normalize_env(env, client)
412
+ if host = env[HTTP_HOST]
413
+ if colon = host.index(":")
414
+ env[SERVER_NAME] = host[0, colon]
415
+ env[SERVER_PORT] = host[colon+1, host.bytesize]
416
+ else
417
+ env[SERVER_NAME] = host
418
+ env[SERVER_PORT] = default_server_port(env)
419
+ end
420
+ else
421
+ env[SERVER_NAME] = LOCALHOST
422
+ env[SERVER_PORT] = default_server_port(env)
423
+ end
424
+
425
+ unless env[REQUEST_PATH]
426
+ # it might be a dumbass full host request header
427
+ uri = URI.parse(env[REQUEST_URI])
428
+ env[REQUEST_PATH] = uri.path
429
+
430
+ raise "No REQUEST PATH" unless env[REQUEST_PATH]
431
+ end
432
+
433
+ env[PATH_INFO] = env[REQUEST_PATH]
434
+
435
+ # From http://www.ietf.org/rfc/rfc3875 :
436
+ # "Script authors should be aware that the REMOTE_ADDR and
437
+ # REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9)
438
+ # may not identify the ultimate source of the request.
439
+ # They identify the client for the immediate request to the
440
+ # server; that client may be a proxy, gateway, or other
441
+ # intermediary acting on behalf of the actual source client."
442
+ #
443
+
444
+ addr = client.peeraddr.last
445
+
446
+ # Set unix socket addrs to localhost
447
+ addr = "127.0.0.1" if addr.empty?
448
+
449
+ env[REMOTE_ADDR] = addr
450
+ end
451
+
452
+ def default_server_port(env)
453
+ env['HTTP_X_FORWARDED_PROTO'] == 'https' ? PORT_443 : PORT_80
454
+ end
455
+
456
+ # Given the request +env+ from +client+ and a partial request body
457
+ # in +body+, finish reading the body if there is one and invoke
458
+ # the rack app. Then construct the response and write it back to
459
+ # +client+
460
+ #
461
+ # +cl+ is the previously fetched Content-Length header if there
462
+ # was one. This is an optimization to keep from having to look
463
+ # it up again.
464
+ #
465
+ def handle_request(req, lines)
466
+ env = req.env
467
+ client = req.io
468
+
469
+ normalize_env env, client
470
+
471
+ env[PUMA_SOCKET] = client
472
+
473
+ env[HIJACK_P] = true
474
+ env[HIJACK] = req
475
+
476
+ body = req.body
477
+
478
+ head = env[REQUEST_METHOD] == HEAD
479
+
480
+ env[RACK_INPUT] = body
481
+ env[RACK_URL_SCHEME] = env[HTTPS_KEY] ? HTTPS : HTTP
482
+
483
+ # A rack extension. If the app writes #call'ables to this
484
+ # array, we will invoke them when the request is done.
485
+ #
486
+ after_reply = env[RACK_AFTER_REPLY] = []
487
+
488
+ begin
489
+ begin
490
+ status, headers, res_body = @app.call(env)
491
+
492
+ return :async if req.hijacked
493
+
494
+ status = status.to_i
495
+
496
+ if status == -1
497
+ unless headers.empty? and res_body == []
498
+ raise "async response must have empty headers and body"
499
+ end
500
+
501
+ return :async
502
+ end
503
+ rescue StandardError => e
504
+ @events.unknown_error self, e, "Rack app"
505
+
506
+ status, headers, res_body = lowlevel_error(e)
507
+ end
508
+
509
+ content_length = nil
510
+ no_body = head
511
+
512
+ if res_body.kind_of? Array and res_body.size == 1
513
+ content_length = res_body[0].bytesize
514
+ end
515
+
516
+ cork_socket client
517
+
518
+ line_ending = LINE_END
519
+ colon = COLON
520
+
521
+ if env[HTTP_VERSION] == HTTP_11
522
+ allow_chunked = true
523
+ keep_alive = env[HTTP_CONNECTION] != CLOSE
524
+ include_keepalive_header = false
525
+
526
+ # An optimization. The most common response is 200, so we can
527
+ # reply with the proper 200 status without having to compute
528
+ # the response header.
529
+ #
530
+ if status == 200
531
+ lines << HTTP_11_200
532
+ else
533
+ lines.append "HTTP/1.1 ", status.to_s, " ",
534
+ fetch_status_code(status), line_ending
535
+
536
+ no_body ||= status < 200 || STATUS_WITH_NO_ENTITY_BODY[status]
537
+ end
538
+ else
539
+ allow_chunked = false
540
+ keep_alive = env[HTTP_CONNECTION] == KEEP_ALIVE
541
+ include_keepalive_header = keep_alive
542
+
543
+ # Same optimization as above for HTTP/1.1
544
+ #
545
+ if status == 200
546
+ lines << HTTP_10_200
547
+ else
548
+ lines.append "HTTP/1.0 ", status.to_s, " ",
549
+ fetch_status_code(status), line_ending
550
+
551
+ no_body ||= status < 200 || STATUS_WITH_NO_ENTITY_BODY[status]
552
+ end
553
+ end
554
+
555
+ response_hijack = nil
556
+
557
+ headers.each do |k, vs|
558
+ case k
559
+ when CONTENT_LENGTH2
560
+ content_length = vs
561
+ next
562
+ when TRANSFER_ENCODING
563
+ allow_chunked = false
564
+ content_length = nil
565
+ when HIJACK
566
+ response_hijack = vs
567
+ next
568
+ end
569
+
570
+ if vs.respond_to?(:to_s)
571
+ vs.to_s.split(NEWLINE).each do |v|
572
+ lines.append k, colon, v, line_ending
573
+ end
574
+ else
575
+ lines.append k, colon, line_ending
576
+ end
577
+ end
578
+
579
+ if no_body
580
+ if content_length and status != 204
581
+ lines.append CONTENT_LENGTH_S, content_length.to_s, line_ending
582
+ end
583
+
584
+ lines << line_ending
585
+ fast_write client, lines.to_s
586
+ return keep_alive
587
+ end
588
+
589
+ if include_keepalive_header
590
+ lines << CONNECTION_KEEP_ALIVE
591
+ elsif !keep_alive
592
+ lines << CONNECTION_CLOSE
593
+ end
594
+
595
+ unless response_hijack
596
+ if content_length
597
+ lines.append CONTENT_LENGTH_S, content_length.to_s, line_ending
598
+ chunked = false
599
+ elsif allow_chunked
600
+ lines << TRANSFER_ENCODING_CHUNKED
601
+ chunked = true
602
+ end
603
+ end
604
+
605
+ lines << line_ending
606
+
607
+ fast_write client, lines.to_s
608
+
609
+ if response_hijack
610
+ response_hijack.call client
611
+ return :async
612
+ end
613
+
614
+ begin
615
+ res_body.each do |part|
616
+ if chunked
617
+ client.syswrite part.bytesize.to_s(16)
618
+ client.syswrite line_ending
619
+ fast_write client, part
620
+ client.syswrite line_ending
621
+ else
622
+ fast_write client, part
623
+ end
624
+
625
+ client.flush
626
+ end
627
+
628
+ if chunked
629
+ client.syswrite CLOSE_CHUNKED
630
+ client.flush
631
+ end
632
+ rescue SystemCallError, IOError
633
+ raise ConnectionError, "Connection error detected during write"
634
+ end
635
+
636
+ ensure
637
+ uncork_socket client
638
+
639
+ body.close
640
+ res_body.close if res_body.respond_to? :close
641
+
642
+ after_reply.each { |o| o.call }
643
+ end
644
+
645
+ return keep_alive
646
+ end
647
+
648
+ def fetch_status_code(status)
649
+ HTTP_STATUS_CODES.fetch(status) { 'CUSTOM' }
650
+ end
651
+ private :fetch_status_code
652
+
653
+ # Given the requset +env+ from +client+ and the partial body +body+
654
+ # plus a potential Content-Length value +cl+, finish reading
655
+ # the body and return it.
656
+ #
657
+ # If the body is larger than MAX_BODY, a Tempfile object is used
658
+ # for the body, otherwise a StringIO is used.
659
+ #
660
+ def read_body(env, client, body, cl)
661
+ content_length = cl.to_i
662
+
663
+ remain = content_length - body.bytesize
664
+
665
+ return StringIO.new(body) if remain <= 0
666
+
667
+ # Use a Tempfile if there is a lot of data left
668
+ if remain > MAX_BODY
669
+ stream = Tempfile.new(Const::PUMA_TMP_BASE)
670
+ stream.binmode
671
+ else
672
+ # The body[0,0] trick is to get an empty string in the same
673
+ # encoding as body.
674
+ stream = StringIO.new body[0,0]
675
+ end
676
+
677
+ stream.write body
678
+
679
+ # Read an odd sized chunk so we can read even sized ones
680
+ # after this
681
+ chunk = client.readpartial(remain % CHUNK_SIZE)
682
+
683
+ # No chunk means a closed socket
684
+ unless chunk
685
+ stream.close
686
+ return nil
687
+ end
688
+
689
+ remain -= stream.write(chunk)
690
+
691
+ # Raed the rest of the chunks
692
+ while remain > 0
693
+ chunk = client.readpartial(CHUNK_SIZE)
694
+ unless chunk
695
+ stream.close
696
+ return nil
697
+ end
698
+
699
+ remain -= stream.write(chunk)
700
+ end
701
+
702
+ stream.rewind
703
+
704
+ return stream
705
+ end
706
+
707
+ # A fallback rack response if +@app+ raises as exception.
708
+ #
709
+ def lowlevel_error(e)
710
+ if handler = @options[:lowlevel_error_handler]
711
+ return handler.call(e)
712
+ end
713
+
714
+ if @leak_stack_on_error
715
+ [500, {}, ["Puma caught this error: #{e.message} (#{e.class})\n#{e.backtrace.join("\n")}"]]
716
+ else
717
+ [500, {}, ["A really lowlevel plumbing error occured. Please contact your local Maytag(tm) repair man.\n"]]
718
+ end
719
+ end
720
+
721
+ # Wait for all outstanding requests to finish.
722
+ #
723
+ def graceful_shutdown
724
+ if @options[:drain_on_shutdown]
725
+ count = 0
726
+
727
+ while true
728
+ ios = IO.select @binder.ios, nil, nil, 0
729
+ break unless ios
730
+
731
+ ios.first.each do |sock|
732
+ begin
733
+ if io = sock.accept_nonblock
734
+ count += 1
735
+ client = Client.new io, @binder.env(sock)
736
+ @thread_pool << client
737
+ end
738
+ rescue SystemCallError
739
+ end
740
+ end
741
+ end
742
+
743
+ @events.debug "Drained #{count} additional connections."
744
+ end
745
+
746
+ @thread_pool.shutdown if @thread_pool
747
+ end
748
+
749
+ # Stops the acceptor thread and then causes the worker threads to finish
750
+ # off the request queue before finally exiting.
751
+ #
752
+ def stop(sync=false)
753
+ begin
754
+ @notify << STOP_COMMAND
755
+ rescue IOError
756
+ # The server, in another thread, is shutting down
757
+ end
758
+
759
+ @thread.join if @thread && sync
760
+ end
761
+
762
+ def halt(sync=false)
763
+ begin
764
+ @notify << HALT_COMMAND
765
+ rescue IOError
766
+ # The server, in another thread, is shutting down
767
+ end
768
+
769
+ @thread.join if @thread && sync
770
+ end
771
+
772
+ def begin_restart
773
+ begin
774
+ @notify << RESTART_COMMAND
775
+ rescue IOError
776
+ # The server, in another thread, is shutting down
777
+ end
778
+ end
779
+
780
+ def fast_write(io, str)
781
+ n = 0
782
+ while true
783
+ begin
784
+ n = io.syswrite str
785
+ rescue Errno::EAGAIN, Errno::EWOULDBLOCK
786
+ if !IO.select(nil, [io], nil, WRITE_TIMEOUT)
787
+ raise ConnectionError, "Socket timeout writing data"
788
+ end
789
+
790
+ retry
791
+ rescue Errno::EPIPE, SystemCallError, IOError
792
+ raise ConnectionError, "Socket timeout writing data"
793
+ end
794
+
795
+ return if n == str.bytesize
796
+ str = str.byteslice(n..-1)
797
+ end
798
+ end
799
+ private :fast_write
800
+ end
801
+ end