grpc 0.6.1 → 0.9.2
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/.rubocop_todo.yml +3 -3
- data/README.md +41 -39
- data/bin/apis/pubsub_demo.rb +2 -2
- data/bin/interop/interop_client.rb +6 -4
- data/bin/interop/interop_server.rb +7 -4
- data/bin/math.proto +7 -7
- data/bin/math_client.rb +22 -22
- data/bin/math_server.rb +6 -6
- data/bin/noproto_client.rb +4 -4
- data/bin/noproto_server.rb +3 -3
- data/ext/grpc/extconf.rb +20 -5
- data/ext/grpc/rb_byte_buffer.c +5 -4
- data/ext/grpc/rb_byte_buffer.h +2 -1
- data/ext/grpc/rb_call.c +5 -6
- data/ext/grpc/rb_call.h +2 -1
- data/ext/grpc/rb_channel.c +1 -1
- data/ext/grpc/rb_channel.h +2 -1
- data/ext/grpc/rb_channel_args.c +2 -1
- data/ext/grpc/rb_channel_args.h +2 -1
- data/ext/grpc/rb_completion_queue.c +7 -49
- data/ext/grpc/rb_completion_queue.h +4 -3
- data/ext/grpc/rb_credentials.c +1 -1
- data/ext/grpc/rb_credentials.h +2 -1
- data/ext/grpc/rb_grpc.h +2 -1
- data/ext/grpc/rb_server.c +10 -11
- data/ext/grpc/rb_server.h +2 -1
- data/ext/grpc/rb_server_credentials.c +1 -1
- data/ext/grpc/rb_server_credentials.h +2 -1
- data/grpc.gemspec +1 -1
- data/lib/grpc/core/time_consts.rb +1 -1
- data/lib/grpc/generic/active_call.rb +13 -9
- data/lib/grpc/generic/bidi_call.rb +37 -41
- data/lib/grpc/generic/rpc_desc.rb +8 -7
- data/lib/grpc/generic/rpc_server.rb +55 -42
- data/lib/grpc/generic/service.rb +22 -24
- data/lib/grpc/logconfig.rb +4 -1
- data/lib/grpc/version.rb +1 -1
- data/spec/completion_queue_spec.rb +0 -32
- data/spec/generic/rpc_server_pool_spec.rb +2 -2
- data/spec/generic/rpc_server_spec.rb +34 -12
- data/spec/generic/service_spec.rb +9 -9
- metadata +4 -4
data/grpc.gemspec
CHANGED
@@ -34,7 +34,7 @@ Gem::Specification.new do |s|
|
|
34
34
|
s.add_development_dependency 'rake', '~> 10.4'
|
35
35
|
s.add_development_dependency 'rake-compiler', '~> 0.9'
|
36
36
|
s.add_development_dependency 'rspec', '~> 3.2'
|
37
|
-
s.add_development_dependency 'rubocop', '~> 0.30'
|
37
|
+
s.add_development_dependency 'rubocop', '~> 0.30.0'
|
38
38
|
|
39
39
|
s.extensions = %w(ext/grpc/extconf.rb)
|
40
40
|
end
|
@@ -39,6 +39,7 @@ class Struct
|
|
39
39
|
return nil if status.nil?
|
40
40
|
fail GRPC::Cancelled if status.code == GRPC::Core::StatusCodes::CANCELLED
|
41
41
|
if status.code != GRPC::Core::StatusCodes::OK
|
42
|
+
GRPC.logger.debug("Failing with status #{status}")
|
42
43
|
# raise BadStatus, propagating the metadata if present.
|
43
44
|
md = status.metadata
|
44
45
|
with_sym_keys = Hash[md.each_pair.collect { |x, y| [x.to_sym, y] }]
|
@@ -54,7 +55,6 @@ module GRPC
|
|
54
55
|
# The ActiveCall class provides simple methods for sending marshallable
|
55
56
|
# data to a call
|
56
57
|
class ActiveCall
|
57
|
-
include Core::StatusCodes
|
58
58
|
include Core::TimeConsts
|
59
59
|
include Core::CallOps
|
60
60
|
extend Forwardable
|
@@ -128,6 +128,11 @@ module GRPC
|
|
128
128
|
@output_metadata ||= {}
|
129
129
|
end
|
130
130
|
|
131
|
+
# cancelled indicates if the call was cancelled
|
132
|
+
def cancelled
|
133
|
+
!@call.status.nil? && @call.status.code == Core::StatusCodes::CANCELLED
|
134
|
+
end
|
135
|
+
|
131
136
|
# multi_req_view provides a restricted view of this ActiveCall for use
|
132
137
|
# in a server client-streaming handler.
|
133
138
|
def multi_req_view
|
@@ -161,6 +166,7 @@ module GRPC
|
|
161
166
|
ops[RECV_STATUS_ON_CLIENT] = nil if assert_finished
|
162
167
|
batch_result = @call.run_batch(@cq, self, INFINITE_FUTURE, ops)
|
163
168
|
return unless assert_finished
|
169
|
+
@call.status = batch_result.status
|
164
170
|
batch_result.check_status
|
165
171
|
end
|
166
172
|
|
@@ -177,6 +183,7 @@ module GRPC
|
|
177
183
|
@call.metadata.merge!(batch_result.status.metadata)
|
178
184
|
end
|
179
185
|
end
|
186
|
+
@call.status = batch_result.status
|
180
187
|
batch_result.check_status
|
181
188
|
end
|
182
189
|
|
@@ -188,7 +195,7 @@ module GRPC
|
|
188
195
|
# @param marshalled [false, true] indicates if the object is already
|
189
196
|
# marshalled.
|
190
197
|
def remote_send(req, marshalled = false)
|
191
|
-
logger.debug("sending #{req}, marshalled? #{marshalled}")
|
198
|
+
GRPC.logger.debug("sending #{req}, marshalled? #{marshalled}")
|
192
199
|
if marshalled
|
193
200
|
payload = req
|
194
201
|
else
|
@@ -230,14 +237,14 @@ module GRPC
|
|
230
237
|
@call.metadata = batch_result.metadata
|
231
238
|
@metadata_tag = nil
|
232
239
|
end
|
233
|
-
logger.debug("received req: #{batch_result}")
|
240
|
+
GRPC.logger.debug("received req: #{batch_result}")
|
234
241
|
unless batch_result.nil? || batch_result.message.nil?
|
235
|
-
logger.debug("received req.to_s: #{batch_result.message}")
|
242
|
+
GRPC.logger.debug("received req.to_s: #{batch_result.message}")
|
236
243
|
res = @unmarshal.call(batch_result.message)
|
237
|
-
logger.debug("received_req (unmarshalled): #{res.inspect}")
|
244
|
+
GRPC.logger.debug("received_req (unmarshalled): #{res.inspect}")
|
238
245
|
return res
|
239
246
|
end
|
240
|
-
logger.debug('found nil; the final response has been sent')
|
247
|
+
GRPC.logger.debug('found nil; the final response has been sent')
|
241
248
|
nil
|
242
249
|
end
|
243
250
|
|
@@ -409,9 +416,6 @@ module GRPC
|
|
409
416
|
start_call(**kw) unless @started
|
410
417
|
bd = BidiCall.new(@call, @cq, @marshal, @unmarshal, @deadline)
|
411
418
|
bd.run_on_client(requests, &blk)
|
412
|
-
rescue GRPC::Core::CallError => e
|
413
|
-
finished # checks for Cancelled
|
414
|
-
raise e
|
415
419
|
end
|
416
420
|
|
417
421
|
# run_server_bidi orchestrates a BiDi stream processing on a server.
|
@@ -78,11 +78,9 @@ module GRPC
|
|
78
78
|
# @param requests the Enumerable of requests to send
|
79
79
|
# @return an Enumerator of requests to yield
|
80
80
|
def run_on_client(requests, &blk)
|
81
|
-
@enq_th =
|
81
|
+
@enq_th = Thread.new { write_loop(requests) }
|
82
82
|
@loop_th = start_read_loop
|
83
|
-
|
84
|
-
return replies if blk.nil?
|
85
|
-
replies.each { |r| blk.call(r) }
|
83
|
+
each_queued_msg(&blk)
|
86
84
|
end
|
87
85
|
|
88
86
|
# Begins orchestration of the Bidi stream for a server generating replies.
|
@@ -98,8 +96,8 @@ module GRPC
|
|
98
96
|
# @param gen_each_reply [Proc] generates the BiDi stream replies.
|
99
97
|
def run_on_server(gen_each_reply)
|
100
98
|
replys = gen_each_reply.call(each_queued_msg)
|
101
|
-
@enq_th = start_write_loop(replys, is_client: false)
|
102
99
|
@loop_th = start_read_loop
|
100
|
+
write_loop(replys, is_client: false)
|
103
101
|
end
|
104
102
|
|
105
103
|
private
|
@@ -115,78 +113,76 @@ module GRPC
|
|
115
113
|
return enum_for(:each_queued_msg) unless block_given?
|
116
114
|
count = 0
|
117
115
|
loop do
|
118
|
-
logger.debug("each_queued_msg:
|
116
|
+
GRPC.logger.debug("each_queued_msg: waiting##{count}")
|
119
117
|
count += 1
|
120
118
|
req = @readq.pop
|
121
|
-
logger.debug("each_queued_msg: req = #{req}")
|
119
|
+
GRPC.logger.debug("each_queued_msg: req = #{req}")
|
122
120
|
throw req if req.is_a? StandardError
|
123
121
|
break if req.equal?(END_OF_READS)
|
124
122
|
yield req
|
125
123
|
end
|
126
|
-
@enq_th.join if @enq_th.alive?
|
127
124
|
end
|
128
125
|
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
payload = @marshal.call(req)
|
140
|
-
@call.run_batch(@cq, write_tag, INFINITE_FUTURE,
|
141
|
-
SEND_MESSAGE => payload)
|
142
|
-
end
|
143
|
-
if is_client
|
144
|
-
logger.debug("bidi-write-loop: sent #{count}, waiting to finish")
|
145
|
-
batch_result = @call.run_batch(@cq, write_tag, INFINITE_FUTURE,
|
146
|
-
SEND_CLOSE_FROM_CLIENT => nil,
|
147
|
-
RECV_STATUS_ON_CLIENT => nil)
|
148
|
-
batch_result.check_status
|
149
|
-
end
|
150
|
-
rescue StandardError => e
|
151
|
-
logger.warn('bidi-write_loop: failed')
|
152
|
-
logger.warn(e)
|
153
|
-
raise e
|
154
|
-
end
|
126
|
+
def write_loop(requests, is_client: true)
|
127
|
+
GRPC.logger.debug('bidi-write-loop: starting')
|
128
|
+
write_tag = Object.new
|
129
|
+
count = 0
|
130
|
+
requests.each do |req|
|
131
|
+
GRPC.logger.debug("bidi-write-loop: #{count}")
|
132
|
+
count += 1
|
133
|
+
payload = @marshal.call(req)
|
134
|
+
@call.run_batch(@cq, write_tag, INFINITE_FUTURE,
|
135
|
+
SEND_MESSAGE => payload)
|
155
136
|
end
|
137
|
+
GRPC.logger.debug("bidi-write-loop: #{count} writes done")
|
138
|
+
if is_client
|
139
|
+
GRPC.logger.debug("bidi-write-loop: client sent #{count}, waiting")
|
140
|
+
batch_result = @call.run_batch(@cq, write_tag, INFINITE_FUTURE,
|
141
|
+
SEND_CLOSE_FROM_CLIENT => nil,
|
142
|
+
RECV_STATUS_ON_CLIENT => nil)
|
143
|
+
@call.status = batch_result.status
|
144
|
+
batch_result.check_status
|
145
|
+
GRPC.logger.debug("bidi-write-loop: done status #{@call.status}")
|
146
|
+
end
|
147
|
+
GRPC.logger.debug('bidi-write-loop: finished')
|
148
|
+
rescue StandardError => e
|
149
|
+
GRPC.logger.warn('bidi-write-loop: failed')
|
150
|
+
GRPC.logger.warn(e)
|
151
|
+
raise e
|
156
152
|
end
|
157
153
|
|
158
154
|
# starts the read loop
|
159
155
|
def start_read_loop
|
160
156
|
Thread.new do
|
157
|
+
GRPC.logger.debug('bidi-read-loop: starting')
|
161
158
|
begin
|
162
159
|
read_tag = Object.new
|
163
160
|
count = 0
|
164
|
-
|
165
161
|
# queue the initial read before beginning the loop
|
166
162
|
loop do
|
167
|
-
logger.debug("bidi-
|
163
|
+
GRPC.logger.debug("bidi-read-loop: #{count}")
|
168
164
|
count += 1
|
169
165
|
# TODO: ensure metadata is read if available, currently it's not
|
170
166
|
batch_result = @call.run_batch(@cq, read_tag, INFINITE_FUTURE,
|
171
167
|
RECV_MESSAGE => nil)
|
172
168
|
# handle the next message
|
173
169
|
if batch_result.message.nil?
|
170
|
+
GRPC.logger.debug("bidi-read-loop: null batch #{batch_result}")
|
174
171
|
@readq.push(END_OF_READS)
|
175
|
-
logger.debug('bidi-read-loop: done reading!')
|
172
|
+
GRPC.logger.debug('bidi-read-loop: done reading!')
|
176
173
|
break
|
177
174
|
end
|
178
175
|
|
179
176
|
# push the latest read onto the queue and continue reading
|
180
|
-
logger.debug("received req: #{batch_result.message}")
|
181
177
|
res = @unmarshal.call(batch_result.message)
|
182
178
|
@readq.push(res)
|
183
179
|
end
|
184
|
-
|
185
180
|
rescue StandardError => e
|
186
|
-
logger.warn('bidi:
|
187
|
-
logger.warn(e)
|
181
|
+
GRPC.logger.warn('bidi: read-loop failed')
|
182
|
+
GRPC.logger.warn(e)
|
188
183
|
@readq.push(e) # let each_queued_msg terminate with this error
|
189
184
|
end
|
185
|
+
GRPC.logger.debug('bidi-read-loop: finished')
|
190
186
|
end
|
191
187
|
end
|
192
188
|
end
|
@@ -84,22 +84,22 @@ module GRPC
|
|
84
84
|
rescue BadStatus => e
|
85
85
|
# this is raised by handlers that want GRPC to send an application error
|
86
86
|
# code and detail message and some additional app-specific metadata.
|
87
|
-
logger.debug("app err
|
87
|
+
GRPC.logger.debug("app err:#{active_call}, status:#{e.code}:#{e.details}")
|
88
88
|
send_status(active_call, e.code, e.details, **e.metadata)
|
89
89
|
rescue Core::CallError => e
|
90
90
|
# This is raised by GRPC internals but should rarely, if ever happen.
|
91
91
|
# Log it, but don't notify the other endpoint..
|
92
|
-
logger.warn("failed call: #{active_call}\n#{e}")
|
92
|
+
GRPC.logger.warn("failed call: #{active_call}\n#{e}")
|
93
93
|
rescue Core::OutOfTime
|
94
94
|
# This is raised when active_call#method.call exceeeds the deadline
|
95
95
|
# event. Send a status of deadline exceeded
|
96
|
-
logger.warn("late call: #{active_call}")
|
96
|
+
GRPC.logger.warn("late call: #{active_call}")
|
97
97
|
send_status(active_call, DEADLINE_EXCEEDED, 'late')
|
98
98
|
rescue StandardError => e
|
99
99
|
# This will usuaally be an unhandled error in the handling code.
|
100
100
|
# Send back a UNKNOWN status to the client
|
101
|
-
logger.warn("failed handler: #{active_call}; sending status:UNKNOWN")
|
102
|
-
logger.warn(e)
|
101
|
+
GRPC.logger.warn("failed handler: #{active_call}; sending status:UNKNOWN")
|
102
|
+
GRPC.logger.warn(e)
|
103
103
|
send_status(active_call, UNKNOWN, 'no reason given')
|
104
104
|
end
|
105
105
|
|
@@ -137,10 +137,11 @@ module GRPC
|
|
137
137
|
|
138
138
|
def send_status(active_client, code, details, **kw)
|
139
139
|
details = 'Not sure why' if details.nil?
|
140
|
+
GRPC.logger.debug("Sending status #{code}:#{details}")
|
140
141
|
active_client.send_status(code, details, code == OK, **kw)
|
141
142
|
rescue StandardError => e
|
142
|
-
logger.warn("Could not send status #{code}:#{details}")
|
143
|
-
logger.warn(e)
|
143
|
+
GRPC.logger.warn("Could not send status #{code}:#{details}")
|
144
|
+
GRPC.logger.warn(e)
|
144
145
|
end
|
145
146
|
end
|
146
147
|
end
|
@@ -76,7 +76,7 @@ module GRPC
|
|
76
76
|
@jobs = Queue.new
|
77
77
|
@size = size
|
78
78
|
@stopped = false
|
79
|
-
@stop_mutex = Mutex.new
|
79
|
+
@stop_mutex = Mutex.new # needs to be held when accessing @stopped
|
80
80
|
@stop_cond = ConditionVariable.new
|
81
81
|
@workers = []
|
82
82
|
@keep_alive = keep_alive
|
@@ -92,10 +92,15 @@ module GRPC
|
|
92
92
|
# @param args the args passed blk when it is called
|
93
93
|
# @param blk the block to call
|
94
94
|
def schedule(*args, &blk)
|
95
|
-
fail 'already stopped' if @stopped
|
96
95
|
return if blk.nil?
|
97
|
-
|
98
|
-
|
96
|
+
@stop_mutex.synchronize do
|
97
|
+
if @stopped
|
98
|
+
GRPC.logger.warn('did not schedule job, already stopped')
|
99
|
+
return
|
100
|
+
end
|
101
|
+
GRPC.logger.info('schedule another job')
|
102
|
+
@jobs << [blk, args]
|
103
|
+
end
|
99
104
|
end
|
100
105
|
|
101
106
|
# Starts running the jobs in the thread pool.
|
@@ -114,14 +119,14 @@ module GRPC
|
|
114
119
|
|
115
120
|
# Stops the jobs in the pool
|
116
121
|
def stop
|
117
|
-
logger.info('stopping, will wait for all the workers to exit')
|
122
|
+
GRPC.logger.info('stopping, will wait for all the workers to exit')
|
118
123
|
@workers.size.times { schedule { throw :exit } }
|
119
|
-
@stopped = true
|
120
124
|
@stop_mutex.synchronize do # wait @keep_alive for works to stop
|
125
|
+
@stopped = true
|
121
126
|
@stop_cond.wait(@stop_mutex, @keep_alive) if @workers.size > 0
|
122
127
|
end
|
123
128
|
forcibly_stop_workers
|
124
|
-
logger.info('stopped, all workers are shutdown')
|
129
|
+
GRPC.logger.info('stopped, all workers are shutdown')
|
125
130
|
end
|
126
131
|
|
127
132
|
protected
|
@@ -129,14 +134,14 @@ module GRPC
|
|
129
134
|
# Forcibly shutdown any threads that are still alive.
|
130
135
|
def forcibly_stop_workers
|
131
136
|
return unless @workers.size > 0
|
132
|
-
logger.info("forcibly terminating #{@workers.size} worker(s)")
|
137
|
+
GRPC.logger.info("forcibly terminating #{@workers.size} worker(s)")
|
133
138
|
@workers.each do |t|
|
134
139
|
next unless t.alive?
|
135
140
|
begin
|
136
141
|
t.exit
|
137
142
|
rescue StandardError => e
|
138
|
-
logger.warn('error while terminating a worker')
|
139
|
-
logger.warn(e)
|
143
|
+
GRPC.logger.warn('error while terminating a worker')
|
144
|
+
GRPC.logger.warn(e)
|
140
145
|
end
|
141
146
|
end
|
142
147
|
end
|
@@ -146,7 +151,7 @@ module GRPC
|
|
146
151
|
def remove_current_thread
|
147
152
|
@stop_mutex.synchronize do
|
148
153
|
@workers.delete(Thread.current)
|
149
|
-
@stop_cond.signal if @workers.size
|
154
|
+
@stop_cond.signal if @workers.size.zero?
|
150
155
|
end
|
151
156
|
end
|
152
157
|
|
@@ -156,8 +161,8 @@ module GRPC
|
|
156
161
|
blk, args = @jobs.pop
|
157
162
|
blk.call(*args)
|
158
163
|
rescue StandardError => e
|
159
|
-
logger.warn('Error in worker thread')
|
160
|
-
logger.warn(e)
|
164
|
+
GRPC.logger.warn('Error in worker thread')
|
165
|
+
GRPC.logger.warn(e)
|
161
166
|
end
|
162
167
|
end
|
163
168
|
end
|
@@ -249,15 +254,18 @@ module GRPC
|
|
249
254
|
server_override:nil,
|
250
255
|
connect_md_proc:nil,
|
251
256
|
**kw)
|
252
|
-
@cq = RpcServer.setup_cq(completion_queue_override)
|
253
|
-
@server = RpcServer.setup_srv(server_override, @cq, **kw)
|
254
257
|
@connect_md_proc = RpcServer.setup_connect_md_proc(connect_md_proc)
|
255
|
-
@
|
258
|
+
@cq = RpcServer.setup_cq(completion_queue_override)
|
256
259
|
@max_waiting_requests = max_waiting_requests
|
257
260
|
@poll_period = poll_period
|
258
|
-
@
|
259
|
-
@run_cond = ConditionVariable.new
|
261
|
+
@pool_size = pool_size
|
260
262
|
@pool = Pool.new(@pool_size)
|
263
|
+
@run_cond = ConditionVariable.new
|
264
|
+
@run_mutex = Mutex.new
|
265
|
+
@running = false
|
266
|
+
@server = RpcServer.setup_srv(server_override, @cq, **kw)
|
267
|
+
@stopped = false
|
268
|
+
@stop_mutex = Mutex.new
|
261
269
|
end
|
262
270
|
|
263
271
|
# stops a running server
|
@@ -266,20 +274,23 @@ module GRPC
|
|
266
274
|
# server's current call loop is it's last.
|
267
275
|
def stop
|
268
276
|
return unless @running
|
269
|
-
@
|
277
|
+
@stop_mutex.synchronize do
|
278
|
+
@stopped = true
|
279
|
+
end
|
270
280
|
@pool.stop
|
281
|
+
@server.close
|
282
|
+
end
|
271
283
|
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
# @server.close
|
284
|
+
# determines if the server has been stopped
|
285
|
+
def stopped?
|
286
|
+
@stop_mutex.synchronize do
|
287
|
+
return @stopped
|
288
|
+
end
|
278
289
|
end
|
279
290
|
|
280
291
|
# determines if the server is currently running
|
281
292
|
def running?
|
282
|
-
@running
|
293
|
+
@running
|
283
294
|
end
|
284
295
|
|
285
296
|
# Is called from other threads to wait for #run to start up the server.
|
@@ -311,11 +322,6 @@ module GRPC
|
|
311
322
|
t.join
|
312
323
|
end
|
313
324
|
|
314
|
-
# Determines if the server is currently stopped
|
315
|
-
def stopped?
|
316
|
-
@stopped ||= false
|
317
|
-
end
|
318
|
-
|
319
325
|
# handle registration of classes
|
320
326
|
#
|
321
327
|
# service is either a class that includes GRPC::GenericService and whose
|
@@ -364,8 +370,8 @@ module GRPC
|
|
364
370
|
# - #running? returns true after this is called, until #stop cause the
|
365
371
|
# the server to stop.
|
366
372
|
def run
|
367
|
-
if rpc_descs.size
|
368
|
-
logger.warn('did not run as no services were present')
|
373
|
+
if rpc_descs.size.zero?
|
374
|
+
GRPC.logger.warn('did not run as no services were present')
|
369
375
|
return
|
370
376
|
end
|
371
377
|
@run_mutex.synchronize do
|
@@ -381,9 +387,9 @@ module GRPC
|
|
381
387
|
# Sends UNAVAILABLE if there are too many unprocessed jobs
|
382
388
|
def available?(an_rpc)
|
383
389
|
jobs_count, max = @pool.jobs_waiting, @max_waiting_requests
|
384
|
-
logger.info("waiting: #{jobs_count}, max: #{max}")
|
390
|
+
GRPC.logger.info("waiting: #{jobs_count}, max: #{max}")
|
385
391
|
return an_rpc if @pool.jobs_waiting <= @max_waiting_requests
|
386
|
-
logger.warn("NOT AVAILABLE: too many jobs_waiting: #{an_rpc}")
|
392
|
+
GRPC.logger.warn("NOT AVAILABLE: too many jobs_waiting: #{an_rpc}")
|
387
393
|
noop = proc { |x| x }
|
388
394
|
c = ActiveCall.new(an_rpc.call, @cq, noop, noop, an_rpc.deadline)
|
389
395
|
c.send_status(StatusCodes::UNAVAILABLE, '')
|
@@ -394,7 +400,7 @@ module GRPC
|
|
394
400
|
def found?(an_rpc)
|
395
401
|
mth = an_rpc.method.to_sym
|
396
402
|
return an_rpc if rpc_descs.key?(mth)
|
397
|
-
logger.warn("NOT_FOUND: #{an_rpc}")
|
403
|
+
GRPC.logger.warn("NOT_FOUND: #{an_rpc}")
|
398
404
|
noop = proc { |x| x }
|
399
405
|
c = ActiveCall.new(an_rpc.call, @cq, noop, noop, an_rpc.deadline)
|
400
406
|
c.send_status(StatusCodes::NOT_FOUND, '')
|
@@ -407,7 +413,13 @@ module GRPC
|
|
407
413
|
request_call_tag = Object.new
|
408
414
|
until stopped?
|
409
415
|
deadline = from_relative_time(@poll_period)
|
410
|
-
|
416
|
+
begin
|
417
|
+
an_rpc = @server.request_call(@cq, request_call_tag, deadline)
|
418
|
+
rescue Core::CallError, RuntimeError => e
|
419
|
+
# can happen during server shutdown
|
420
|
+
GRPC.logger.warn("server call failed: #{e}")
|
421
|
+
next
|
422
|
+
end
|
411
423
|
c = new_active_server_call(an_rpc)
|
412
424
|
unless c.nil?
|
413
425
|
mth = an_rpc.method.to_sym
|
@@ -434,7 +446,7 @@ module GRPC
|
|
434
446
|
return nil unless found?(an_rpc)
|
435
447
|
|
436
448
|
# Create the ActiveCall
|
437
|
-
logger.info("deadline is #{an_rpc.deadline}; (now=#{Time.now})")
|
449
|
+
GRPC.logger.info("deadline is #{an_rpc.deadline}; (now=#{Time.now})")
|
438
450
|
rpc_desc = rpc_descs[an_rpc.method.to_sym]
|
439
451
|
ActiveCall.new(an_rpc.call, @cq,
|
440
452
|
rpc_desc.marshal_proc, rpc_desc.unmarshal_proc(:input),
|
@@ -455,7 +467,7 @@ module GRPC
|
|
455
467
|
unless cls.include?(GenericService)
|
456
468
|
fail "#{cls} must 'include GenericService'"
|
457
469
|
end
|
458
|
-
if cls.rpc_descs.size
|
470
|
+
if cls.rpc_descs.size.zero?
|
459
471
|
fail "#{cls} should specify some rpc descriptions"
|
460
472
|
end
|
461
473
|
cls.assert_rpc_descs_have_methods
|
@@ -468,12 +480,13 @@ module GRPC
|
|
468
480
|
route = "/#{cls.service_name}/#{name}".to_sym
|
469
481
|
fail "already registered: rpc #{route} from #{spec}" if specs.key? route
|
470
482
|
specs[route] = spec
|
483
|
+
rpc_name = GenericService.underscore(name.to_s).to_sym
|
471
484
|
if service.is_a?(Class)
|
472
|
-
handlers[route] = cls.new.method(
|
485
|
+
handlers[route] = cls.new.method(rpc_name)
|
473
486
|
else
|
474
|
-
handlers[route] = service.method(
|
487
|
+
handlers[route] = service.method(rpc_name)
|
475
488
|
end
|
476
|
-
logger.info("handling #{route} with #{handlers[route]}")
|
489
|
+
GRPC.logger.info("handling #{route} with #{handlers[route]}")
|
477
490
|
end
|
478
491
|
end
|
479
492
|
end
|