grpc 0.6.0 → 0.6.1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (57) hide show
  1. checksums.yaml +4 -4
  2. data/.rspec +1 -0
  3. data/.rubocop_todo.yml +12 -20
  4. data/CHANGELOG.md +11 -0
  5. data/Rakefile +1 -0
  6. data/bin/apis/pubsub_demo.rb +3 -6
  7. data/bin/interop/interop_client.rb +43 -3
  8. data/bin/interop/interop_server.rb +1 -1
  9. data/bin/math_server.rb +1 -1
  10. data/bin/noproto_server.rb +1 -1
  11. data/ext/grpc/rb_byte_buffer.c +15 -189
  12. data/ext/grpc/rb_byte_buffer.h +4 -12
  13. data/ext/grpc/rb_call.c +514 -307
  14. data/ext/grpc/rb_call.h +4 -4
  15. data/ext/grpc/rb_channel.c +58 -34
  16. data/ext/grpc/rb_channel.h +0 -3
  17. data/ext/grpc/rb_channel_args.c +13 -4
  18. data/ext/grpc/rb_completion_queue.c +50 -23
  19. data/ext/grpc/rb_completion_queue.h +7 -3
  20. data/ext/grpc/rb_credentials.c +40 -28
  21. data/ext/grpc/rb_credentials.h +0 -4
  22. data/ext/grpc/rb_grpc.c +86 -67
  23. data/ext/grpc/rb_grpc.h +20 -10
  24. data/ext/grpc/rb_server.c +119 -26
  25. data/ext/grpc/rb_server.h +0 -4
  26. data/ext/grpc/rb_server_credentials.c +29 -16
  27. data/ext/grpc/rb_server_credentials.h +0 -4
  28. data/grpc.gemspec +11 -8
  29. data/lib/grpc.rb +1 -1
  30. data/lib/grpc/errors.rb +8 -7
  31. data/lib/grpc/generic/active_call.rb +104 -171
  32. data/lib/grpc/generic/bidi_call.rb +32 -60
  33. data/lib/grpc/generic/client_stub.rb +42 -31
  34. data/lib/grpc/generic/rpc_desc.rb +7 -12
  35. data/lib/grpc/generic/rpc_server.rb +253 -170
  36. data/lib/grpc/{core/event.rb → notifier.rb} +25 -9
  37. data/lib/grpc/version.rb +1 -1
  38. data/spec/call_spec.rb +23 -40
  39. data/spec/channel_spec.rb +11 -20
  40. data/spec/client_server_spec.rb +193 -175
  41. data/spec/credentials_spec.rb +2 -2
  42. data/spec/generic/active_call_spec.rb +59 -85
  43. data/spec/generic/client_stub_spec.rb +46 -64
  44. data/spec/generic/rpc_desc_spec.rb +50 -80
  45. data/spec/generic/rpc_server_pool_spec.rb +2 -3
  46. data/spec/generic/rpc_server_spec.rb +158 -29
  47. data/spec/server_spec.rb +1 -1
  48. data/spec/spec_helper.rb +8 -4
  49. metadata +27 -37
  50. data/ext/grpc/rb_event.c +0 -361
  51. data/ext/grpc/rb_event.h +0 -53
  52. data/ext/grpc/rb_metadata.c +0 -215
  53. data/ext/grpc/rb_metadata.h +0 -53
  54. data/spec/alloc_spec.rb +0 -44
  55. data/spec/byte_buffer_spec.rb +0 -67
  56. data/spec/event_spec.rb +0 -53
  57. data/spec/metadata_spec.rb +0 -64
@@ -30,18 +30,12 @@
30
30
  require 'forwardable'
31
31
  require 'grpc/grpc'
32
32
 
33
- def assert_event_type(ev, want)
34
- fail OutOfTime if ev.nil?
35
- got = ev.type
36
- fail("Unexpected rpc event: got #{got}, want #{want}") unless got == want
37
- end
38
-
39
33
  # GRPC contains the General RPC module.
40
34
  module GRPC
41
35
  # The BiDiCall class orchestrates exection of a BiDi stream on a client or
42
36
  # server.
43
37
  class BidiCall
44
- include Core::CompletionType
38
+ include Core::CallOps
45
39
  include Core::StatusCodes
46
40
  include Core::TimeConsts
47
41
 
@@ -63,8 +57,7 @@ module GRPC
63
57
  # @param marshal [Function] f(obj)->string that marshal requests
64
58
  # @param unmarshal [Function] f(string)->obj that unmarshals responses
65
59
  # @param deadline [Fixnum] the deadline for the call to complete
66
- # @param finished_tag [Object] the object used as the call's finish tag,
67
- def initialize(call, q, marshal, unmarshal, deadline, finished_tag)
60
+ def initialize(call, q, marshal, unmarshal, deadline)
68
61
  fail(ArgumentError, 'not a call') unless call.is_a? Core::Call
69
62
  unless q.is_a? Core::CompletionQueue
70
63
  fail(ArgumentError, 'not a CompletionQueue')
@@ -72,7 +65,6 @@ module GRPC
72
65
  @call = call
73
66
  @cq = q
74
67
  @deadline = deadline
75
- @finished_tag = finished_tag
76
68
  @marshal = marshal
77
69
  @readq = Queue.new
78
70
  @unmarshal = unmarshal
@@ -86,13 +78,11 @@ module GRPC
86
78
  # @param requests the Enumerable of requests to send
87
79
  # @return an Enumerator of requests to yield
88
80
  def run_on_client(requests, &blk)
89
- enq_th = start_write_loop(requests)
90
- loop_th = start_read_loop
81
+ @enq_th = start_write_loop(requests)
82
+ @loop_th = start_read_loop
91
83
  replies = each_queued_msg
92
84
  return replies if blk.nil?
93
85
  replies.each { |r| blk.call(r) }
94
- enq_th.join
95
- loop_th.join
96
86
  end
97
87
 
98
88
  # Begins orchestration of the Bidi stream for a server generating replies.
@@ -108,10 +98,8 @@ module GRPC
108
98
  # @param gen_each_reply [Proc] generates the BiDi stream replies.
109
99
  def run_on_server(gen_each_reply)
110
100
  replys = gen_each_reply.call(each_queued_msg)
111
- enq_th = start_write_loop(replys, is_client: false)
112
- loop_th = start_read_loop
113
- loop_th.join
114
- enq_th.join
101
+ @enq_th = start_write_loop(replys, is_client: false)
102
+ @loop_th = start_read_loop
115
103
  end
116
104
 
117
105
  private
@@ -130,10 +118,12 @@ module GRPC
130
118
  logger.debug("each_queued_msg: msg##{count}")
131
119
  count += 1
132
120
  req = @readq.pop
121
+ logger.debug("each_queued_msg: req = #{req}")
133
122
  throw req if req.is_a? StandardError
134
123
  break if req.equal?(END_OF_READS)
135
124
  yield req
136
125
  end
126
+ @enq_th.join if @enq_th.alive?
137
127
  end
138
128
 
139
129
  # during bidi-streaming, read the requests to send from a separate thread
@@ -144,36 +134,23 @@ module GRPC
144
134
  begin
145
135
  count = 0
146
136
  requests.each do |req|
137
+ logger.debug("bidi-write_loop: #{count}")
147
138
  count += 1
148
139
  payload = @marshal.call(req)
149
- @call.start_write(Core::ByteBuffer.new(payload), write_tag)
150
- ev = @cq.pluck(write_tag, INFINITE_FUTURE)
151
- begin
152
- assert_event_type(ev, WRITE_ACCEPTED)
153
- ensure
154
- ev.close
155
- end
140
+ @call.run_batch(@cq, write_tag, INFINITE_FUTURE,
141
+ SEND_MESSAGE => payload)
156
142
  end
157
143
  if is_client
158
- @call.writes_done(write_tag)
159
- ev = @cq.pluck(write_tag, INFINITE_FUTURE)
160
- begin
161
- assert_event_type(ev, FINISH_ACCEPTED)
162
- ensure
163
- ev.close
164
- end
165
- logger.debug("bidi-client: sent #{count} reqs, waiting to finish")
166
- ev = @cq.pluck(@finished_tag, INFINITE_FUTURE)
167
- begin
168
- assert_event_type(ev, FINISHED)
169
- ensure
170
- ev.close
171
- end
172
- logger.debug('bidi-client: finished received')
144
+ logger.debug("bidi-write-loop: sent #{count}, waiting to finish")
145
+ batch_result = @call.run_batch(@cq, write_tag, INFINITE_FUTURE,
146
+ SEND_CLOSE_FROM_CLIENT => nil,
147
+ RECV_STATUS_ON_CLIENT => nil)
148
+ batch_result.check_status
173
149
  end
174
150
  rescue StandardError => e
175
- logger.warn('bidi: write_loop failed')
151
+ logger.warn('bidi-write_loop: failed')
176
152
  logger.warn(e)
153
+ raise e
177
154
  end
178
155
  end
179
156
  end
@@ -187,27 +164,22 @@ module GRPC
187
164
 
188
165
  # queue the initial read before beginning the loop
189
166
  loop do
190
- logger.debug("waiting for read #{count}")
167
+ logger.debug("bidi-read_loop: #{count}")
191
168
  count += 1
192
- @call.start_read(read_tag)
193
- ev = @cq.pluck(read_tag, INFINITE_FUTURE)
194
- begin
195
- assert_event_type(ev, READ)
196
-
197
- # handle the next event.
198
- if ev.result.nil?
199
- @readq.push(END_OF_READS)
200
- logger.debug('done reading!')
201
- break
202
- end
203
-
204
- # push the latest read onto the queue and continue reading
205
- logger.debug("received req: #{ev.result}")
206
- res = @unmarshal.call(ev.result.to_s)
207
- @readq.push(res)
208
- ensure
209
- ev.close
169
+ # TODO: ensure metadata is read if available, currently it's not
170
+ batch_result = @call.run_batch(@cq, read_tag, INFINITE_FUTURE,
171
+ RECV_MESSAGE => nil)
172
+ # handle the next message
173
+ if batch_result.message.nil?
174
+ @readq.push(END_OF_READS)
175
+ logger.debug('bidi-read-loop: done reading!')
176
+ break
210
177
  end
178
+
179
+ # push the latest read onto the queue and continue reading
180
+ logger.debug("received req: #{batch_result.message}")
181
+ res = @unmarshal.call(batch_result.message)
182
+ @readq.push(res)
211
183
  end
212
184
 
213
185
  rescue StandardError => e
@@ -28,16 +28,16 @@
28
28
  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
29
 
30
30
  require 'grpc/generic/active_call'
31
- require 'xray/thread_dump_signal_handler'
32
31
 
33
32
  # GRPC contains the General RPC module.
34
33
  module GRPC
35
34
  # ClientStub represents an endpoint used to send requests to GRPC servers.
36
35
  class ClientStub
37
36
  include Core::StatusCodes
37
+ include Core::TimeConsts
38
38
 
39
- # Default deadline is 5 seconds.
40
- DEFAULT_DEADLINE = 5
39
+ # Default timeout is 5 seconds.
40
+ DEFAULT_TIMEOUT = 5
41
41
 
42
42
  # setup_channel is used by #initialize to constuct a channel from its
43
43
  # arguments.
@@ -51,6 +51,14 @@ module GRPC
51
51
  Core::Channel.new(host, kw, creds)
52
52
  end
53
53
 
54
+ def self.update_with_jwt_aud_uri(a_hash, host, method)
55
+ last_slash_idx, res = method.rindex('/'), a_hash.clone
56
+ return res if last_slash_idx.nil?
57
+ service_name = method[0..(last_slash_idx - 1)]
58
+ res[:jwt_aud_uri] = "https://#{host}#{service_name}"
59
+ res
60
+ end
61
+
54
62
  # check_update_metadata is used by #initialize verify that it's a Proc.
55
63
  def self.check_update_metadata(update_metadata)
56
64
  return update_metadata if update_metadata.nil?
@@ -76,8 +84,8 @@ module GRPC
76
84
  # present the host and arbitrary keyword arg areignored, and the RPC
77
85
  # connection uses this channel.
78
86
  #
79
- # - :deadline
80
- # when present, this is the default deadline used for calls
87
+ # - :timeout
88
+ # when present, this is the default timeout used for calls
81
89
  #
82
90
  # - :update_metadata
83
91
  # when present, this a func that takes a hash and returns a hash
@@ -87,13 +95,13 @@ module GRPC
87
95
  # @param host [String] the host the stub connects to
88
96
  # @param q [Core::CompletionQueue] used to wait for events
89
97
  # @param channel_override [Core::Channel] a pre-created channel
90
- # @param deadline [Number] the default deadline to use in requests
98
+ # @param timeout [Number] the default timeout to use in requests
91
99
  # @param creds [Core::Credentials] the channel
92
100
  # @param update_metadata a func that updates metadata as described above
93
101
  # @param kw [KeywordArgs]the channel arguments
94
102
  def initialize(host, q,
95
103
  channel_override: nil,
96
- deadline: DEFAULT_DEADLINE,
104
+ timeout: nil,
97
105
  creds: nil,
98
106
  update_metadata: nil,
99
107
  **kw)
@@ -103,7 +111,7 @@ module GRPC
103
111
  @update_metadata = ClientStub.check_update_metadata(update_metadata)
104
112
  alt_host = kw[Core::Channel::SSL_TARGET]
105
113
  @host = alt_host.nil? ? host : alt_host
106
- @deadline = deadline
114
+ @timeout = timeout.nil? ? DEFAULT_TIMEOUT : timeout
107
115
  end
108
116
 
109
117
  # request_response sends a request to a GRPC server, and returns the
@@ -140,13 +148,14 @@ module GRPC
140
148
  # @param req [Object] the request sent to the server
141
149
  # @param marshal [Function] f(obj)->string that marshals requests
142
150
  # @param unmarshal [Function] f(string)->obj that unmarshals responses
143
- # @param deadline [Numeric] (optional) the max completion time in seconds
151
+ # @param timeout [Numeric] (optional) the max completion time in seconds
144
152
  # @param return_op [true|false] return an Operation if true
145
153
  # @return [Object] the response received from the server
146
- def request_response(method, req, marshal, unmarshal, deadline = nil,
154
+ def request_response(method, req, marshal, unmarshal, timeout = nil,
147
155
  return_op: false, **kw)
148
- c = new_active_call(method, marshal, unmarshal, deadline || @deadline)
149
- md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
156
+ c = new_active_call(method, marshal, unmarshal, timeout)
157
+ kw_with_jwt_uri = self.class.update_with_jwt_aud_uri(kw, @host, method)
158
+ md = @update_metadata.nil? ? kw : @update_metadata.call(kw_with_jwt_uri)
150
159
  return c.request_response(req, **md) unless return_op
151
160
 
152
161
  # return the operation view of the active_call; define #execute as a
@@ -197,13 +206,14 @@ module GRPC
197
206
  # @param requests [Object] an Enumerable of requests to send
198
207
  # @param marshal [Function] f(obj)->string that marshals requests
199
208
  # @param unmarshal [Function] f(string)->obj that unmarshals responses
200
- # @param deadline [Numeric] the max completion time in seconds
209
+ # @param timeout [Numeric] the max completion time in seconds
201
210
  # @param return_op [true|false] return an Operation if true
202
211
  # @return [Object|Operation] the response received from the server
203
- def client_streamer(method, requests, marshal, unmarshal, deadline = nil,
212
+ def client_streamer(method, requests, marshal, unmarshal, timeout = nil,
204
213
  return_op: false, **kw)
205
- c = new_active_call(method, marshal, unmarshal, deadline || @deadline)
206
- md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
214
+ c = new_active_call(method, marshal, unmarshal, timeout)
215
+ kw_with_jwt_uri = self.class.update_with_jwt_aud_uri(kw, @host, method)
216
+ md = @update_metadata.nil? ? kw : @update_metadata.call(kw_with_jwt_uri)
207
217
  return c.client_streamer(requests, **md) unless return_op
208
218
 
209
219
  # return the operation view of the active_call; define #execute as a
@@ -262,14 +272,15 @@ module GRPC
262
272
  # @param req [Object] the request sent to the server
263
273
  # @param marshal [Function] f(obj)->string that marshals requests
264
274
  # @param unmarshal [Function] f(string)->obj that unmarshals responses
265
- # @param deadline [Numeric] the max completion time in seconds
275
+ # @param timeout [Numeric] the max completion time in seconds
266
276
  # @param return_op [true|false]return an Operation if true
267
277
  # @param blk [Block] when provided, is executed for each response
268
278
  # @return [Enumerator|Operation|nil] as discussed above
269
- def server_streamer(method, req, marshal, unmarshal, deadline = nil,
279
+ def server_streamer(method, req, marshal, unmarshal, timeout = nil,
270
280
  return_op: false, **kw, &blk)
271
- c = new_active_call(method, marshal, unmarshal, deadline || @deadline)
272
- md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
281
+ c = new_active_call(method, marshal, unmarshal, timeout)
282
+ kw_with_jwt_uri = self.class.update_with_jwt_aud_uri(kw, @host, method)
283
+ md = @update_metadata.nil? ? kw : @update_metadata.call(kw_with_jwt_uri)
273
284
  return c.server_streamer(req, **md, &blk) unless return_op
274
285
 
275
286
  # return the operation view of the active_call; define #execute
@@ -367,14 +378,15 @@ module GRPC
367
378
  # @param requests [Object] an Enumerable of requests to send
368
379
  # @param marshal [Function] f(obj)->string that marshals requests
369
380
  # @param unmarshal [Function] f(string)->obj that unmarshals responses
370
- # @param deadline [Numeric] (optional) the max completion time in seconds
381
+ # @param timeout [Numeric] (optional) the max completion time in seconds
371
382
  # @param blk [Block] when provided, is executed for each response
372
383
  # @param return_op [true|false] return an Operation if true
373
384
  # @return [Enumerator|nil|Operation] as discussed above
374
- def bidi_streamer(method, requests, marshal, unmarshal, deadline = nil,
385
+ def bidi_streamer(method, requests, marshal, unmarshal, timeout = nil,
375
386
  return_op: false, **kw, &blk)
376
- c = new_active_call(method, marshal, unmarshal, deadline || @deadline)
377
- md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
387
+ c = new_active_call(method, marshal, unmarshal, timeout)
388
+ kw_with_jwt_uri = self.class.update_with_jwt_aud_uri(kw, @host, method)
389
+ md = @update_metadata.nil? ? kw : @update_metadata.call(kw_with_jwt_uri)
378
390
  return c.bidi_streamer(requests, **md, &blk) unless return_op
379
391
 
380
392
  # return the operation view of the active_call; define #execute
@@ -390,15 +402,14 @@ module GRPC
390
402
 
391
403
  # Creates a new active stub
392
404
  #
393
- # @param ch [GRPC::Channel] the channel used to create the stub.
405
+ # @param method [string] the method being called.
394
406
  # @param marshal [Function] f(obj)->string that marshals requests
395
407
  # @param unmarshal [Function] f(string)->obj that unmarshals responses
396
- # @param deadline [TimeConst]
397
- def new_active_call(ch, marshal, unmarshal, deadline = nil)
398
- absolute_deadline = Core::TimeConsts.from_relative_time(deadline)
399
- call = @ch.create_call(ch, @host, absolute_deadline)
400
- ActiveCall.new(call, @queue, marshal, unmarshal, absolute_deadline,
401
- started: false)
408
+ # @param timeout [TimeConst]
409
+ def new_active_call(method, marshal, unmarshal, timeout = nil)
410
+ deadline = from_relative_time(timeout.nil? ? @timeout : timeout)
411
+ call = @ch.create_call(@queue, method, @host, deadline)
412
+ ActiveCall.new(call, @queue, marshal, unmarshal, deadline, started: false)
402
413
  end
403
414
  end
404
415
  end
@@ -80,26 +80,21 @@ module GRPC
80
80
  else # is a bidi_stream
81
81
  active_call.run_server_bidi(mth)
82
82
  end
83
- send_status(active_call, OK, 'OK')
84
- active_call.finished
83
+ send_status(active_call, OK, 'OK', **active_call.output_metadata)
85
84
  rescue BadStatus => e
86
- # this is raised by handlers that want GRPC to send an application
87
- # error code and detail message.
85
+ # this is raised by handlers that want GRPC to send an application error
86
+ # code and detail message and some additional app-specific metadata.
88
87
  logger.debug("app err: #{active_call}, status:#{e.code}:#{e.details}")
89
- send_status(active_call, e.code, e.details)
88
+ send_status(active_call, e.code, e.details, **e.metadata)
90
89
  rescue Core::CallError => e
91
90
  # This is raised by GRPC internals but should rarely, if ever happen.
92
91
  # Log it, but don't notify the other endpoint..
93
92
  logger.warn("failed call: #{active_call}\n#{e}")
94
- rescue OutOfTime
93
+ rescue Core::OutOfTime
95
94
  # This is raised when active_call#method.call exceeeds the deadline
96
95
  # event. Send a status of deadline exceeded
97
96
  logger.warn("late call: #{active_call}")
98
97
  send_status(active_call, DEADLINE_EXCEEDED, 'late')
99
- rescue Core::EventError => e
100
- # This is raised by GRPC internals but should rarely, if ever happen.
101
- # Log it, but don't notify the other endpoint..
102
- logger.warn("failed call: #{active_call}\n#{e}")
103
98
  rescue StandardError => e
104
99
  # This will usuaally be an unhandled error in the handling code.
105
100
  # Send back a UNKNOWN status to the client
@@ -140,9 +135,9 @@ module GRPC
140
135
  "##{mth.name}: bad arg count; got:#{mth.arity}, want:#{want}, #{msg}"
141
136
  end
142
137
 
143
- def send_status(active_client, code, details)
138
+ def send_status(active_client, code, details, **kw)
144
139
  details = 'Not sure why' if details.nil?
145
- active_client.send_status(code, details)
140
+ active_client.send_status(code, details, code == OK, **kw)
146
141
  rescue StandardError => e
147
142
  logger.warn("Could not send status #{code}:#{details}")
148
143
  logger.warn(e)
@@ -31,14 +31,142 @@ require 'grpc/grpc'
31
31
  require 'grpc/generic/active_call'
32
32
  require 'grpc/generic/service'
33
33
  require 'thread'
34
- require 'xray/thread_dump_signal_handler'
34
+
35
+ # A global that contains signals the gRPC servers should respond to.
36
+ $grpc_signals = []
35
37
 
36
38
  # GRPC contains the General RPC module.
37
39
  module GRPC
40
+ # Handles the signals in $grpc_signals.
41
+ #
42
+ # @return false if the server should exit, true if not.
43
+ def handle_signals
44
+ loop do
45
+ sig = $grpc_signals.shift
46
+ case sig
47
+ when 'INT'
48
+ return false
49
+ when 'TERM'
50
+ return false
51
+ end
52
+ end
53
+ true
54
+ end
55
+ module_function :handle_signals
56
+
57
+ # Sets up a signal handler that adds signals to the signal handling global.
58
+ #
59
+ # Signal handlers should do as little as humanly possible.
60
+ # Here, they just add themselves to $grpc_signals
61
+ #
62
+ # RpcServer (and later other parts of gRPC) monitors the signals
63
+ # $grpc_signals in its own non-signal context.
64
+ def trap_signals
65
+ %w(INT TERM).each { |sig| trap(sig) { $grpc_signals << sig } }
66
+ end
67
+ module_function :trap_signals
68
+
69
+ # Pool is a simple thread pool.
70
+ class Pool
71
+ # Default keep alive period is 1s
72
+ DEFAULT_KEEP_ALIVE = 1
73
+
74
+ def initialize(size, keep_alive: DEFAULT_KEEP_ALIVE)
75
+ fail 'pool size must be positive' unless size > 0
76
+ @jobs = Queue.new
77
+ @size = size
78
+ @stopped = false
79
+ @stop_mutex = Mutex.new
80
+ @stop_cond = ConditionVariable.new
81
+ @workers = []
82
+ @keep_alive = keep_alive
83
+ end
84
+
85
+ # Returns the number of jobs waiting
86
+ def jobs_waiting
87
+ @jobs.size
88
+ end
89
+
90
+ # Runs the given block on the queue with the provided args.
91
+ #
92
+ # @param args the args passed blk when it is called
93
+ # @param blk the block to call
94
+ def schedule(*args, &blk)
95
+ fail 'already stopped' if @stopped
96
+ return if blk.nil?
97
+ logger.info('schedule another job')
98
+ @jobs << [blk, args]
99
+ end
100
+
101
+ # Starts running the jobs in the thread pool.
102
+ def start
103
+ fail 'already stopped' if @stopped
104
+ until @workers.size == @size.to_i
105
+ next_thread = Thread.new do
106
+ catch(:exit) do # allows { throw :exit } to kill a thread
107
+ loop_execute_jobs
108
+ end
109
+ remove_current_thread
110
+ end
111
+ @workers << next_thread
112
+ end
113
+ end
114
+
115
+ # Stops the jobs in the pool
116
+ def stop
117
+ logger.info('stopping, will wait for all the workers to exit')
118
+ @workers.size.times { schedule { throw :exit } }
119
+ @stopped = true
120
+ @stop_mutex.synchronize do # wait @keep_alive for works to stop
121
+ @stop_cond.wait(@stop_mutex, @keep_alive) if @workers.size > 0
122
+ end
123
+ forcibly_stop_workers
124
+ logger.info('stopped, all workers are shutdown')
125
+ end
126
+
127
+ protected
128
+
129
+ # Forcibly shutdown any threads that are still alive.
130
+ def forcibly_stop_workers
131
+ return unless @workers.size > 0
132
+ logger.info("forcibly terminating #{@workers.size} worker(s)")
133
+ @workers.each do |t|
134
+ next unless t.alive?
135
+ begin
136
+ t.exit
137
+ rescue StandardError => e
138
+ logger.warn('error while terminating a worker')
139
+ logger.warn(e)
140
+ end
141
+ end
142
+ end
143
+
144
+ # removes the threads from workers, and signal when all the
145
+ # threads are complete.
146
+ def remove_current_thread
147
+ @stop_mutex.synchronize do
148
+ @workers.delete(Thread.current)
149
+ @stop_cond.signal if @workers.size == 0
150
+ end
151
+ end
152
+
153
+ def loop_execute_jobs
154
+ loop do
155
+ begin
156
+ blk, args = @jobs.pop
157
+ blk.call(*args)
158
+ rescue StandardError => e
159
+ logger.warn('Error in worker thread')
160
+ logger.warn(e)
161
+ end
162
+ end
163
+ end
164
+ end
165
+
38
166
  # RpcServer hosts a number of services and makes them available on the
39
167
  # network.
40
168
  class RpcServer
41
- include Core::CompletionType
169
+ include Core::CallOps
42
170
  include Core::TimeConsts
43
171
  extend ::Forwardable
44
172
 
@@ -50,6 +178,38 @@ module GRPC
50
178
  # Default max_waiting_requests size is 20
51
179
  DEFAULT_MAX_WAITING_REQUESTS = 20
52
180
 
181
+ # Default poll period is 1s
182
+ DEFAULT_POLL_PERIOD = 1
183
+
184
+ # Signal check period is 0.25s
185
+ SIGNAL_CHECK_PERIOD = 0.25
186
+
187
+ # setup_cq is used by #initialize to constuct a Core::CompletionQueue from
188
+ # its arguments.
189
+ def self.setup_cq(alt_cq)
190
+ return Core::CompletionQueue.new if alt_cq.nil?
191
+ unless alt_cq.is_a? Core::CompletionQueue
192
+ fail(TypeError, '!CompletionQueue')
193
+ end
194
+ alt_cq
195
+ end
196
+
197
+ # setup_srv is used by #initialize to constuct a Core::Server from its
198
+ # arguments.
199
+ def self.setup_srv(alt_srv, cq, **kw)
200
+ return Core::Server.new(cq, kw) if alt_srv.nil?
201
+ fail(TypeError, '!Server') unless alt_srv.is_a? Core::Server
202
+ alt_srv
203
+ end
204
+
205
+ # setup_connect_md_proc is used by #initialize to validate the
206
+ # connect_md_proc.
207
+ def self.setup_connect_md_proc(a_proc)
208
+ return nil if a_proc.nil?
209
+ fail(TypeError, '!Proc') unless a_proc.is_a? Proc
210
+ a_proc
211
+ end
212
+
53
213
  # Creates a new RpcServer.
54
214
  #
55
215
  # The RPC server is configured using keyword arguments.
@@ -77,30 +237,21 @@ module GRPC
77
237
  # * max_waiting_requests: the maximum number of requests that are not
78
238
  # being handled to allow. When this limit is exceeded, the server responds
79
239
  # with not available to new requests
240
+ #
241
+ # * connect_md_proc:
242
+ # when non-nil is a proc for determining metadata to to send back the client
243
+ # on receiving an invocation req. The proc signature is:
244
+ # {key: val, ..} func(method_name, {key: val, ...})
80
245
  def initialize(pool_size:DEFAULT_POOL_SIZE,
81
246
  max_waiting_requests:DEFAULT_MAX_WAITING_REQUESTS,
82
- poll_period:INFINITE_FUTURE,
247
+ poll_period:DEFAULT_POLL_PERIOD,
83
248
  completion_queue_override:nil,
84
249
  server_override:nil,
250
+ connect_md_proc:nil,
85
251
  **kw)
86
- if completion_queue_override.nil?
87
- cq = Core::CompletionQueue.new
88
- else
89
- cq = completion_queue_override
90
- unless cq.is_a? Core::CompletionQueue
91
- fail(ArgumentError, 'not a CompletionQueue')
92
- end
93
- end
94
- @cq = cq
95
-
96
- if server_override.nil?
97
- srv = Core::Server.new(@cq, kw)
98
- else
99
- srv = server_override
100
- fail(ArgumentError, 'not a Server') unless srv.is_a? Core::Server
101
- end
102
- @server = srv
103
-
252
+ @cq = RpcServer.setup_cq(completion_queue_override)
253
+ @server = RpcServer.setup_srv(server_override, @cq, **kw)
254
+ @connect_md_proc = RpcServer.setup_connect_md_proc(connect_md_proc)
104
255
  @pool_size = pool_size
105
256
  @max_waiting_requests = max_waiting_requests
106
257
  @poll_period = poll_period
@@ -117,6 +268,13 @@ module GRPC
117
268
  return unless @running
118
269
  @stopped = true
119
270
  @pool.stop
271
+
272
+ # TODO: uncomment this:
273
+ #
274
+ # This segfaults in the c layer, so its commented out for now. Shutdown
275
+ # still occurs, but the c layer has to do the cleanup.
276
+ #
277
+ # @server.close
120
278
  end
121
279
 
122
280
  # determines if the server is currently running
@@ -139,7 +297,21 @@ module GRPC
139
297
  running?
140
298
  end
141
299
 
142
- # determines if the server is currently stopped
300
+ # Runs the server in its own thread, then waits for signal INT or TERM on
301
+ # the current thread to terminate it.
302
+ def run_till_terminated
303
+ GRPC.trap_signals
304
+ t = Thread.new { run }
305
+ wait_till_running
306
+ loop do
307
+ sleep SIGNAL_CHECK_PERIOD
308
+ break unless GRPC.handle_signals
309
+ end
310
+ stop
311
+ t.join
312
+ end
313
+
314
+ # Determines if the server is currently stopped
143
315
  def stopped?
144
316
  @stopped ||= false
145
317
  end
@@ -202,154 +374,71 @@ module GRPC
202
374
  end
203
375
  @pool.start
204
376
  @server.start
205
- server_tag = Object.new
206
- until stopped?
207
- @server.request_call(server_tag)
208
- ev = @cq.pluck(server_tag, @poll_period)
209
- next if ev.nil?
210
- if ev.type != SERVER_RPC_NEW
211
- logger.warn("bad evt: got:#{ev.type}, want:#{SERVER_RPC_NEW}")
212
- ev.close
213
- next
214
- end
215
- c = new_active_server_call(ev.call, ev.result)
216
- unless c.nil?
217
- mth = ev.result.method.to_sym
218
- ev.close
219
- @pool.schedule(c) do |call|
220
- rpc_descs[mth].run_server_method(call, rpc_handlers[mth])
221
- end
222
- end
223
- end
377
+ loop_handle_server_calls
224
378
  @running = false
225
379
  end
226
380
 
227
- def new_active_server_call(call, new_server_rpc)
228
- # Accept the call. This is necessary even if a status is to be sent
229
- # back immediately
230
- finished_tag = Object.new
231
- call_queue = Core::CompletionQueue.new
232
- call.metadata = new_server_rpc.metadata # store the metadata
233
- call.server_accept(call_queue, finished_tag)
234
- call.server_end_initial_metadata
235
-
236
- # Send UNAVAILABLE if there are too many unprocessed jobs
381
+ # Sends UNAVAILABLE if there are too many unprocessed jobs
382
+ def available?(an_rpc)
237
383
  jobs_count, max = @pool.jobs_waiting, @max_waiting_requests
238
384
  logger.info("waiting: #{jobs_count}, max: #{max}")
239
- if @pool.jobs_waiting > @max_waiting_requests
240
- logger.warn("NOT AVAILABLE: too many jobs_waiting: #{new_server_rpc}")
241
- noop = proc { |x| x }
242
- c = ActiveCall.new(call, call_queue, noop, noop,
243
- new_server_rpc.deadline,
244
- finished_tag: finished_tag)
245
- c.send_status(StatusCodes::UNAVAILABLE, '')
246
- return nil
247
- end
248
-
249
- # Send NOT_FOUND if the method does not exist
250
- mth = new_server_rpc.method.to_sym
251
- unless rpc_descs.key?(mth)
252
- logger.warn("NOT_FOUND: #{new_server_rpc}")
253
- noop = proc { |x| x }
254
- c = ActiveCall.new(call, call_queue, noop, noop,
255
- new_server_rpc.deadline,
256
- finished_tag: finished_tag)
257
- c.send_status(StatusCodes::NOT_FOUND, '')
258
- return nil
259
- end
260
-
261
- # Create the ActiveCall
262
- rpc_desc = rpc_descs[mth]
263
- logger.info("deadline is #{new_server_rpc.deadline}; (now=#{Time.now})")
264
- ActiveCall.new(call, call_queue,
265
- rpc_desc.marshal_proc, rpc_desc.unmarshal_proc(:input),
266
- new_server_rpc.deadline, finished_tag: finished_tag)
385
+ return an_rpc if @pool.jobs_waiting <= @max_waiting_requests
386
+ logger.warn("NOT AVAILABLE: too many jobs_waiting: #{an_rpc}")
387
+ noop = proc { |x| x }
388
+ c = ActiveCall.new(an_rpc.call, @cq, noop, noop, an_rpc.deadline)
389
+ c.send_status(StatusCodes::UNAVAILABLE, '')
390
+ nil
267
391
  end
268
392
 
269
- # Pool is a simple thread pool for running server requests.
270
- class Pool
271
- def initialize(size)
272
- fail 'pool size must be positive' unless size > 0
273
- @jobs = Queue.new
274
- @size = size
275
- @stopped = false
276
- @stop_mutex = Mutex.new
277
- @stop_cond = ConditionVariable.new
278
- @workers = []
279
- end
280
-
281
- # Returns the number of jobs waiting
282
- def jobs_waiting
283
- @jobs.size
284
- end
285
-
286
- # Runs the given block on the queue with the provided args.
287
- #
288
- # @param args the args passed blk when it is called
289
- # @param blk the block to call
290
- def schedule(*args, &blk)
291
- fail 'already stopped' if @stopped
292
- return if blk.nil?
293
- logger.info('schedule another job')
294
- @jobs << [blk, args]
295
- end
393
+ # Sends NOT_FOUND if the method can't be found
394
+ def found?(an_rpc)
395
+ mth = an_rpc.method.to_sym
396
+ return an_rpc if rpc_descs.key?(mth)
397
+ logger.warn("NOT_FOUND: #{an_rpc}")
398
+ noop = proc { |x| x }
399
+ c = ActiveCall.new(an_rpc.call, @cq, noop, noop, an_rpc.deadline)
400
+ c.send_status(StatusCodes::NOT_FOUND, '')
401
+ nil
402
+ end
296
403
 
297
- # Starts running the jobs in the thread pool.
298
- def start
299
- fail 'already stopped' if @stopped
300
- until @workers.size == @size.to_i
301
- next_thread = Thread.new do
302
- catch(:exit) do # allows { throw :exit } to kill a thread
303
- loop do
304
- begin
305
- blk, args = @jobs.pop
306
- blk.call(*args)
307
- rescue StandardError => e
308
- logger.warn('Error in worker thread')
309
- logger.warn(e)
310
- end
311
- end
312
- end
313
-
314
- # removes the threads from workers, and signal when all the
315
- # threads are complete.
316
- @stop_mutex.synchronize do
317
- @workers.delete(Thread.current)
318
- @stop_cond.signal if @workers.size == 0
319
- end
404
+ # handles calls to the server
405
+ def loop_handle_server_calls
406
+ fail 'not running' unless @running
407
+ request_call_tag = Object.new
408
+ until stopped?
409
+ deadline = from_relative_time(@poll_period)
410
+ an_rpc = @server.request_call(@cq, request_call_tag, deadline)
411
+ c = new_active_server_call(an_rpc)
412
+ unless c.nil?
413
+ mth = an_rpc.method.to_sym
414
+ @pool.schedule(c) do |call|
415
+ rpc_descs[mth].run_server_method(call, rpc_handlers[mth])
320
416
  end
321
- @workers << next_thread
322
417
  end
323
418
  end
419
+ end
324
420
 
325
- # Stops the jobs in the pool
326
- def stop
327
- logger.info('stopping, will wait for all the workers to exit')
328
- @workers.size.times { schedule { throw :exit } }
329
- @stopped = true
330
-
331
- # TODO: allow configuration of the keepalive period
332
- keep_alive = 5
333
- @stop_mutex.synchronize do
334
- @stop_cond.wait(@stop_mutex, keep_alive) if @workers.size > 0
335
- end
336
-
337
- # Forcibly shutdown any threads that are still alive.
338
- if @workers.size > 0
339
- logger.warn("forcibly terminating #{@workers.size} worker(s)")
340
- @workers.each do |t|
341
- next unless t.alive?
342
- begin
343
- t.exit
344
- rescue StandardError => e
345
- logger.warn('error while terminating a worker')
346
- logger.warn(e)
347
- end
348
- end
349
- end
421
+ def new_active_server_call(an_rpc)
422
+ return nil if an_rpc.nil? || an_rpc.call.nil?
350
423
 
351
- logger.info('stopped, all workers are shutdown')
424
+ # allow the metadata to be accessed from the call
425
+ handle_call_tag = Object.new
426
+ an_rpc.call.metadata = an_rpc.metadata # attaches md to call for handlers
427
+ connect_md = nil
428
+ unless @connect_md_proc.nil?
429
+ connect_md = @connect_md_proc.call(an_rpc.method, an_rpc.metadata)
352
430
  end
431
+ an_rpc.call.run_batch(@cq, handle_call_tag, INFINITE_FUTURE,
432
+ SEND_INITIAL_METADATA => connect_md)
433
+ return nil unless available?(an_rpc)
434
+ return nil unless found?(an_rpc)
435
+
436
+ # Create the ActiveCall
437
+ logger.info("deadline is #{an_rpc.deadline}; (now=#{Time.now})")
438
+ rpc_desc = rpc_descs[an_rpc.method.to_sym]
439
+ ActiveCall.new(an_rpc.call, @cq,
440
+ rpc_desc.marshal_proc, rpc_desc.unmarshal_proc(:input),
441
+ an_rpc.deadline)
353
442
  end
354
443
 
355
444
  protected
@@ -362,11 +451,9 @@ module GRPC
362
451
  @rpc_handlers ||= {}
363
452
  end
364
453
 
365
- private
366
-
367
454
  def assert_valid_service_class(cls)
368
455
  unless cls.include?(GenericService)
369
- fail "#{cls} should 'include GenericService'"
456
+ fail "#{cls} must 'include GenericService'"
370
457
  end
371
458
  if cls.rpc_descs.size == 0
372
459
  fail "#{cls} should specify some rpc descriptions"
@@ -376,21 +463,17 @@ module GRPC
376
463
 
377
464
  def add_rpc_descs_for(service)
378
465
  cls = service.is_a?(Class) ? service : service.class
379
- specs = rpc_descs
380
- handlers = rpc_handlers
466
+ specs, handlers = rpc_descs, rpc_handlers
381
467
  cls.rpc_descs.each_pair do |name, spec|
382
468
  route = "/#{cls.service_name}/#{name}".to_sym
383
- if specs.key? route
384
- fail "Cannot add rpc #{route} from #{spec}, already registered"
469
+ fail "already registered: rpc #{route} from #{spec}" if specs.key? route
470
+ specs[route] = spec
471
+ if service.is_a?(Class)
472
+ handlers[route] = cls.new.method(name.to_s.underscore.to_sym)
385
473
  else
386
- specs[route] = spec
387
- if service.is_a?(Class)
388
- handlers[route] = cls.new.method(name.to_s.underscore.to_sym)
389
- else
390
- handlers[route] = service.method(name.to_s.underscore.to_sym)
391
- end
392
- logger.info("handling #{route} with #{handlers[route]}")
474
+ handlers[route] = service.method(name.to_s.underscore.to_sym)
393
475
  end
476
+ logger.info("handling #{route} with #{handlers[route]}")
394
477
  end
395
478
  end
396
479
  end