grpc 1.0.1-x86-mingw32 → 1.1.2-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/etc/roots.pem +39 -111
  3. data/grpc_c.32.ruby +0 -0
  4. data/grpc_c.64.ruby +0 -0
  5. data/src/ruby/ext/grpc/extconf.rb +0 -1
  6. data/src/ruby/ext/grpc/rb_byte_buffer.c +8 -7
  7. data/src/ruby/ext/grpc/rb_call.c +15 -5
  8. data/src/ruby/ext/grpc/rb_channel.c +1 -1
  9. data/src/ruby/ext/grpc/rb_compression_options.c +466 -0
  10. data/src/ruby/ext/grpc/rb_compression_options.h +44 -0
  11. data/src/ruby/ext/grpc/rb_grpc.c +3 -1
  12. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +198 -190
  13. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +306 -294
  14. data/src/ruby/ext/grpc/rb_server.c +18 -12
  15. data/src/ruby/lib/grpc/2.0/grpc_c.so +0 -0
  16. data/src/ruby/lib/grpc/2.1/grpc_c.so +0 -0
  17. data/src/ruby/lib/grpc/2.2/grpc_c.so +0 -0
  18. data/src/ruby/lib/grpc/2.3/grpc_c.so +0 -0
  19. data/src/ruby/lib/grpc/2.4/grpc_c.so +0 -0
  20. data/src/ruby/lib/grpc/errors.rb +154 -2
  21. data/src/ruby/lib/grpc/generic/active_call.rb +144 -63
  22. data/src/ruby/lib/grpc/generic/bidi_call.rb +18 -2
  23. data/src/ruby/lib/grpc/generic/client_stub.rb +7 -5
  24. data/src/ruby/lib/grpc/generic/rpc_desc.rb +39 -13
  25. data/src/ruby/lib/grpc/generic/rpc_server.rb +51 -24
  26. data/src/ruby/lib/grpc/generic/service.rb +3 -2
  27. data/src/ruby/lib/grpc/grpc_c.so +0 -0
  28. data/src/ruby/lib/grpc/version.rb +1 -1
  29. data/src/ruby/pb/grpc/health/checker.rb +3 -1
  30. data/src/ruby/pb/src/proto/grpc/testing/test_services_pb.rb +7 -0
  31. data/src/ruby/pb/test/client.rb +307 -7
  32. data/src/ruby/pb/test/server.rb +26 -1
  33. data/src/ruby/spec/compression_options_spec.rb +164 -0
  34. data/src/ruby/spec/error_sanity_spec.rb +64 -0
  35. data/src/ruby/spec/generic/active_call_spec.rb +290 -12
  36. data/src/ruby/spec/generic/client_stub_spec.rb +91 -41
  37. data/src/ruby/spec/generic/rpc_desc_spec.rb +36 -16
  38. data/src/ruby/spec/generic/rpc_server_pool_spec.rb +22 -28
  39. data/src/ruby/spec/generic/rpc_server_spec.rb +6 -6
  40. data/src/ruby/spec/pb/health/checker_spec.rb +27 -19
  41. data/src/ruby/spec/spec_helper.rb +2 -0
  42. metadata +18 -8
@@ -56,7 +56,8 @@ module GRPC
56
56
  # @param unmarshal [Function] f(string)->obj that unmarshals responses
57
57
  # @param metadata_received [true|false] indicates if metadata has already
58
58
  # been received. Should always be true for server calls
59
- def initialize(call, marshal, unmarshal, metadata_received: false)
59
+ def initialize(call, marshal, unmarshal, metadata_received: false,
60
+ req_view: nil)
60
61
  fail(ArgumentError, 'not a call') unless call.is_a? Core::Call
61
62
  @call = call
62
63
  @marshal = marshal
@@ -67,6 +68,7 @@ module GRPC
67
68
  @writes_complete = false
68
69
  @complete = false
69
70
  @done_mutex = Mutex.new
71
+ @req_view = req_view
70
72
  end
71
73
 
72
74
  # Begins orchestration of the Bidi stream for a client sending requests.
@@ -95,7 +97,15 @@ module GRPC
95
97
  #
96
98
  # @param gen_each_reply [Proc] generates the BiDi stream replies.
97
99
  def run_on_server(gen_each_reply)
98
- replys = gen_each_reply.call(read_loop(is_client: false))
100
+ # Pass in the optional call object parameter if possible
101
+ if gen_each_reply.arity == 1
102
+ replys = gen_each_reply.call(read_loop(is_client: false))
103
+ elsif gen_each_reply.arity == 2
104
+ replys = gen_each_reply.call(read_loop(is_client: false), @req_view)
105
+ else
106
+ fail 'Illegal arity of reply generator'
107
+ end
108
+
99
109
  write_loop(replys, is_client: false)
100
110
  end
101
111
 
@@ -141,6 +151,7 @@ module GRPC
141
151
  payload = @marshal.call(req)
142
152
  # Fails if status already received
143
153
  begin
154
+ @req_view.send_initial_metadata unless @req_view.nil?
144
155
  @call.run_batch(SEND_MESSAGE => payload)
145
156
  rescue GRPC::Core::CallError => e
146
157
  # This is almost definitely caused by a status arriving while still
@@ -189,6 +200,7 @@ module GRPC
189
200
  if is_client
190
201
  batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil)
191
202
  @call.status = batch_result.status
203
+ @call.trailing_metadata = @call.status.metadata if @call.status
192
204
  batch_result.check_status
193
205
  GRPC.logger.debug("bidi-read-loop: done status #{@call.status}")
194
206
  end
@@ -208,6 +220,10 @@ module GRPC
208
220
  GRPC.logger.debug('bidi-read-loop: finished')
209
221
  @reads_complete = true
210
222
  finished
223
+ # Make sure that the write loop is done done before finishing the call.
224
+ # Note that blocking is ok at this point because we've already received
225
+ # a status
226
+ @enq_th.join if is_client
211
227
  end
212
228
  end
213
229
  end
@@ -168,6 +168,7 @@ module GRPC
168
168
 
169
169
  # return the operation view of the active_call; define #execute as a
170
170
  # new method for this instance that invokes #request_response.
171
+ c.merge_metadata_to_send(metadata)
171
172
  op = c.operation
172
173
  op.define_singleton_method(:execute) do
173
174
  c.request_response(req, metadata: metadata)
@@ -231,9 +232,10 @@ module GRPC
231
232
 
232
233
  # return the operation view of the active_call; define #execute as a
233
234
  # new method for this instance that invokes #client_streamer.
235
+ c.merge_metadata_to_send(metadata)
234
236
  op = c.operation
235
237
  op.define_singleton_method(:execute) do
236
- c.client_streamer(requests, metadata: metadata)
238
+ c.client_streamer(requests)
237
239
  end
238
240
  op
239
241
  end
@@ -309,9 +311,10 @@ module GRPC
309
311
 
310
312
  # return the operation view of the active_call; define #execute
311
313
  # as a new method for this instance that invokes #server_streamer
314
+ c.merge_metadata_to_send(metadata)
312
315
  op = c.operation
313
316
  op.define_singleton_method(:execute) do
314
- c.server_streamer(req, metadata: metadata, &blk)
317
+ c.server_streamer(req, &blk)
315
318
  end
316
319
  op
317
320
  end
@@ -417,15 +420,15 @@ module GRPC
417
420
  deadline: deadline,
418
421
  parent: parent,
419
422
  credentials: credentials)
420
-
421
423
  return c.bidi_streamer(requests, metadata: metadata,
422
424
  &blk) unless return_op
423
425
 
424
426
  # return the operation view of the active_call; define #execute
425
427
  # as a new method for this instance that invokes #bidi_streamer
428
+ c.merge_metadata_to_send(metadata)
426
429
  op = c.operation
427
430
  op.define_singleton_method(:execute) do
428
- c.bidi_streamer(requests, metadata: metadata, &blk)
431
+ c.bidi_streamer(requests, &blk)
429
432
  end
430
433
  op
431
434
  end
@@ -445,7 +448,6 @@ module GRPC
445
448
  deadline: nil,
446
449
  parent: nil,
447
450
  credentials: nil)
448
-
449
451
  deadline = from_relative_time(@timeout) if deadline.nil?
450
452
  # Provide each new client call with its own completion queue
451
453
  call = @ch.create_call(parent, # parent call
@@ -62,25 +62,44 @@ module GRPC
62
62
  proc { |o| unmarshal_class.method(unmarshal_method).call(o) }
63
63
  end
64
64
 
65
+ def handle_request_response(active_call, mth)
66
+ req = active_call.remote_read
67
+ resp = mth.call(req, active_call.single_req_view)
68
+ active_call.server_unary_response(
69
+ resp, trailing_metadata: active_call.output_metadata)
70
+ end
71
+
72
+ def handle_client_streamer(active_call, mth)
73
+ resp = mth.call(active_call.multi_req_view)
74
+ active_call.server_unary_response(
75
+ resp, trailing_metadata: active_call.output_metadata)
76
+ end
77
+
78
+ def handle_server_streamer(active_call, mth)
79
+ req = active_call.remote_read
80
+ replys = mth.call(req, active_call.single_req_view)
81
+ replys.each { |r| active_call.remote_send(r) }
82
+ send_status(active_call, OK, 'OK', active_call.output_metadata)
83
+ end
84
+
85
+ def handle_bidi_streamer(active_call, mth)
86
+ active_call.run_server_bidi(mth)
87
+ send_status(active_call, OK, 'OK', active_call.output_metadata)
88
+ end
89
+
65
90
  def run_server_method(active_call, mth)
66
91
  # While a server method is running, it might be cancelled, its deadline
67
92
  # might be reached, the handler could throw an unknown error, or a
68
93
  # well-behaved handler could throw a StatusError.
69
94
  if request_response?
70
- req = active_call.remote_read
71
- resp = mth.call(req, active_call.single_req_view)
72
- active_call.remote_send(resp)
95
+ handle_request_response(active_call, mth)
73
96
  elsif client_streamer?
74
- resp = mth.call(active_call.multi_req_view)
75
- active_call.remote_send(resp)
97
+ handle_client_streamer(active_call, mth)
76
98
  elsif server_streamer?
77
- req = active_call.remote_read
78
- replys = mth.call(req, active_call.single_req_view)
79
- replys.each { |r| active_call.remote_send(r) }
99
+ handle_server_streamer(active_call, mth)
80
100
  else # is a bidi_stream
81
- active_call.run_server_bidi(mth)
101
+ handle_bidi_streamer(active_call, mth)
82
102
  end
83
- send_status(active_call, OK, 'OK', active_call.output_metadata)
84
103
  rescue BadStatus => e
85
104
  # this is raised by handlers that want GRPC to send an application error
86
105
  # code and detail message and some additional app-specific metadata.
@@ -91,7 +110,7 @@ module GRPC
91
110
  # Log it, but don't notify the other endpoint..
92
111
  GRPC.logger.warn("failed call: #{active_call}\n#{e}")
93
112
  rescue Core::OutOfTime
94
- # This is raised when active_call#method.call exceeeds the deadline
113
+ # This is raised when active_call#method.call exceeds the deadline
95
114
  # event. Send a status of deadline exceeded
96
115
  GRPC.logger.warn("late call: #{active_call}")
97
116
  send_status(active_call, DEADLINE_EXCEEDED, 'late')
@@ -100,11 +119,18 @@ module GRPC
100
119
  # Send back a UNKNOWN status to the client
101
120
  GRPC.logger.warn("failed handler: #{active_call}; sending status:UNKNOWN")
102
121
  GRPC.logger.warn(e)
103
- send_status(active_call, UNKNOWN, 'no reason given')
122
+ send_status(active_call, UNKNOWN, "#{e.class}: #{e.message}")
104
123
  end
105
124
 
106
125
  def assert_arity_matches(mth)
107
- if request_response? || server_streamer?
126
+ # A bidi handler function can optionally be passed a second
127
+ # call object parameter for access to metadata, cancelling, etc.
128
+ if bidi_streamer?
129
+ if mth.arity != 2 && mth.arity != 1
130
+ fail arity_error(mth, 2, "should be #{mth.name}(req, call) or " \
131
+ "#{mth.name}(req)")
132
+ end
133
+ elsif request_response? || server_streamer?
108
134
  if mth.arity != 2
109
135
  fail arity_error(mth, 2, "should be #{mth.name}(req, call)")
110
136
  end
@@ -48,6 +48,10 @@ module GRPC
48
48
  @stop_cond = ConditionVariable.new
49
49
  @workers = []
50
50
  @keep_alive = keep_alive
51
+
52
+ # Each worker thread has its own queue to push and pull jobs
53
+ # these queues are put into @ready_queues when that worker is idle
54
+ @ready_workers = Queue.new
51
55
  end
52
56
 
53
57
  # Returns the number of jobs waiting
@@ -55,6 +59,13 @@ module GRPC
55
59
  @jobs.size
56
60
  end
57
61
 
62
+ def ready_for_work?
63
+ # Busy worker threads are either doing work, or have a single job
64
+ # waiting on them. Workers that are idle with no jobs waiting
65
+ # have their "queues" in @ready_workers
66
+ !@ready_workers.empty?
67
+ end
68
+
58
69
  # Runs the given block on the queue with the provided args.
59
70
  #
60
71
  # @param args the args passed blk when it is called
@@ -67,7 +78,11 @@ module GRPC
67
78
  return
68
79
  end
69
80
  GRPC.logger.info('schedule another job')
70
- @jobs << [blk, args]
81
+ fail 'No worker threads available' if @ready_workers.empty?
82
+ worker_queue = @ready_workers.pop
83
+
84
+ fail 'worker already has a task waiting' unless worker_queue.empty?
85
+ worker_queue << [blk, args]
71
86
  end
72
87
  end
73
88
 
@@ -77,9 +92,11 @@ module GRPC
77
92
  fail 'already stopped' if @stopped
78
93
  end
79
94
  until @workers.size == @size.to_i
80
- next_thread = Thread.new do
95
+ new_worker_queue = Queue.new
96
+ @ready_workers << new_worker_queue
97
+ next_thread = Thread.new(new_worker_queue) do |jobs|
81
98
  catch(:exit) do # allows { throw :exit } to kill a thread
82
- loop_execute_jobs
99
+ loop_execute_jobs(jobs)
83
100
  end
84
101
  remove_current_thread
85
102
  end
@@ -90,7 +107,7 @@ module GRPC
90
107
  # Stops the jobs in the pool
91
108
  def stop
92
109
  GRPC.logger.info('stopping, will wait for all the workers to exit')
93
- @workers.size.times { schedule { throw :exit } }
110
+ schedule { throw :exit } while ready_for_work?
94
111
  @stop_mutex.synchronize do # wait @keep_alive for works to stop
95
112
  @stopped = true
96
113
  @stop_cond.wait(@stop_mutex, @keep_alive) if @workers.size > 0
@@ -125,15 +142,18 @@ module GRPC
125
142
  end
126
143
  end
127
144
 
128
- def loop_execute_jobs
145
+ def loop_execute_jobs(worker_queue)
129
146
  loop do
130
147
  begin
131
- blk, args = @jobs.pop
148
+ blk, args = worker_queue.pop
132
149
  blk.call(*args)
133
150
  rescue StandardError => e
134
151
  GRPC.logger.warn('Error in worker thread')
135
152
  GRPC.logger.warn(e)
136
153
  end
154
+ # there shouldn't be any work given to this thread while its busy
155
+ fail('received a task while busy') unless worker_queue.empty?
156
+ @ready_workers << worker_queue
137
157
  end
138
158
  end
139
159
  end
@@ -147,10 +167,10 @@ module GRPC
147
167
 
148
168
  def_delegators :@server, :add_http2_port
149
169
 
150
- # Default thread pool size is 3
151
- DEFAULT_POOL_SIZE = 3
170
+ # Default thread pool size is 30
171
+ DEFAULT_POOL_SIZE = 30
152
172
 
153
- # Default max_waiting_requests size is 20
173
+ # Deprecated due to internal changes to the thread pool
154
174
  DEFAULT_MAX_WAITING_REQUESTS = 20
155
175
 
156
176
  # Default poll period is 1s
@@ -175,11 +195,11 @@ module GRPC
175
195
  # instance.
176
196
  #
177
197
  # * pool_size: the size of the thread pool the server uses to run its
178
- # threads
198
+ # threads. No more concurrent requests can be made than the size
199
+ # of the thread pool
179
200
  #
180
- # * max_waiting_requests: the maximum number of requests that are not
181
- # being handled to allow. When this limit is exceeded, the server responds
182
- # with not available to new requests
201
+ # * max_waiting_requests: Deprecated due to internal changes to the thread
202
+ # pool. This is still an argument for compatibility but is ignored.
183
203
  #
184
204
  # * poll_period: when present, the server polls for new events with this
185
205
  # period
@@ -330,13 +350,14 @@ module GRPC
330
350
 
331
351
  # Sends RESOURCE_EXHAUSTED if there are too many unprocessed jobs
332
352
  def available?(an_rpc)
333
- jobs_count, max = @pool.jobs_waiting, @max_waiting_requests
334
- GRPC.logger.info("waiting: #{jobs_count}, max: #{max}")
335
- return an_rpc if @pool.jobs_waiting <= @max_waiting_requests
336
- GRPC.logger.warn("NOT AVAILABLE: too many jobs_waiting: #{an_rpc}")
353
+ return an_rpc if @pool.ready_for_work?
354
+ GRPC.logger.warn('no free worker threads currently')
337
355
  noop = proc { |x| x }
356
+
357
+ # Create a new active call that knows that metadata hasn't been
358
+ # sent yet
338
359
  c = ActiveCall.new(an_rpc.call, noop, noop, an_rpc.deadline,
339
- metadata_received: true)
360
+ metadata_received: true, started: false)
340
361
  c.send_status(GRPC::Core::StatusCodes::RESOURCE_EXHAUSTED, '')
341
362
  nil
342
363
  end
@@ -347,8 +368,11 @@ module GRPC
347
368
  return an_rpc if rpc_descs.key?(mth)
348
369
  GRPC.logger.warn("UNIMPLEMENTED: #{an_rpc}")
349
370
  noop = proc { |x| x }
371
+
372
+ # Create a new active call that knows that
373
+ # metadata hasn't been sent yet
350
374
  c = ActiveCall.new(an_rpc.call, noop, noop, an_rpc.deadline,
351
- metadata_received: true)
375
+ metadata_received: true, started: false)
352
376
  c.send_status(GRPC::Core::StatusCodes::UNIMPLEMENTED, '')
353
377
  nil
354
378
  end
@@ -395,17 +419,20 @@ module GRPC
395
419
  unless @connect_md_proc.nil?
396
420
  connect_md = @connect_md_proc.call(an_rpc.method, an_rpc.metadata)
397
421
  end
398
- an_rpc.call.run_batch(SEND_INITIAL_METADATA => connect_md)
399
422
 
400
423
  return nil unless available?(an_rpc)
401
424
  return nil unless implemented?(an_rpc)
402
425
 
403
- # Create the ActiveCall
426
+ # Create the ActiveCall. Indicate that metadata hasnt been sent yet.
404
427
  GRPC.logger.info("deadline is #{an_rpc.deadline}; (now=#{Time.now})")
405
428
  rpc_desc = rpc_descs[an_rpc.method.to_sym]
406
- c = ActiveCall.new(an_rpc.call, rpc_desc.marshal_proc,
407
- rpc_desc.unmarshal_proc(:input), an_rpc.deadline,
408
- metadata_received: true)
429
+ c = ActiveCall.new(an_rpc.call,
430
+ rpc_desc.marshal_proc,
431
+ rpc_desc.unmarshal_proc(:input),
432
+ an_rpc.deadline,
433
+ metadata_received: true,
434
+ started: false,
435
+ metadata_to_send: connect_md)
409
436
  mth = an_rpc.method.to_sym
410
437
  [c, mth]
411
438
  end
@@ -110,8 +110,9 @@ module GRPC
110
110
  rpc_descs[name] = RpcDesc.new(name, input, output,
111
111
  marshal_class_method,
112
112
  unmarshal_class_method)
113
- define_method(name) do
114
- fail GRPC::BadStatus, GRPC::Core::StatusCodes::UNIMPLEMENTED
113
+ define_method(GenericService.underscore(name.to_s).to_sym) do |_, _|
114
+ fail GRPC::BadStatus.new_status_exception(
115
+ GRPC::Core::StatusCodes::UNIMPLEMENTED)
115
116
  end
116
117
  end
117
118
 
Binary file
@@ -29,5 +29,5 @@
29
29
 
30
30
  # GRPC contains the General RPC module.
31
31
  module GRPC
32
- VERSION = '1.0.1'
32
+ VERSION = '1.1.2'
33
33
  end
@@ -52,7 +52,9 @@ module Grpc
52
52
  @status_mutex.synchronize do
53
53
  status = @statuses["#{req.service}"]
54
54
  end
55
- fail GRPC::BadStatus, StatusCodes::NOT_FOUND if status.nil?
55
+ if status.nil?
56
+ fail GRPC::BadStatus.new_status_exception(StatusCodes::NOT_FOUND)
57
+ end
56
58
  HealthCheckResponse.new(status: status)
57
59
  end
58
60
 
@@ -54,6 +54,10 @@ module Grpc
54
54
  rpc :EmptyCall, Empty, Empty
55
55
  # One request followed by one response.
56
56
  rpc :UnaryCall, SimpleRequest, SimpleResponse
57
+ # One request followed by one response. Response has cache control
58
+ # headers set such that a caching HTTP proxy (such as GFE) can
59
+ # satisfy subsequent requests.
60
+ rpc :CacheableUnaryCall, SimpleRequest, SimpleResponse
57
61
  # One request followed by a sequence of responses (streamed download).
58
62
  # The server returns the payload with client desired type and sizes.
59
63
  rpc :StreamingOutputCall, StreamingOutputCallRequest, stream(StreamingOutputCallResponse)
@@ -69,6 +73,9 @@ module Grpc
69
73
  # stream of responses are returned to the client when the server starts with
70
74
  # first request.
71
75
  rpc :HalfDuplexCall, stream(StreamingOutputCallRequest), stream(StreamingOutputCallResponse)
76
+ # The test server will not implement this method. It will be used
77
+ # to test the behavior when clients call unimplemented methods.
78
+ rpc :UnimplementedCall, Empty, Empty
72
79
  end
73
80
 
74
81
  Stub = Service.rpc_stub_class
@@ -111,6 +111,18 @@ end
111
111
  # creates a test stub that accesses host:port securely.
112
112
  def create_stub(opts)
113
113
  address = "#{opts.host}:#{opts.port}"
114
+
115
+ # Provide channel args that request compression by default
116
+ # for compression interop tests
117
+ if ['client_compressed_unary',
118
+ 'client_compressed_streaming'].include?(opts.test_case)
119
+ compression_options =
120
+ GRPC::Core::CompressionOptions.new(default_algorithm: :gzip)
121
+ compression_channel_args = compression_options.to_channel_arg_hash
122
+ else
123
+ compression_channel_args = {}
124
+ end
125
+
114
126
  if opts.secure
115
127
  creds = ssl_creds(opts.use_test_ca)
116
128
  stub_opts = {
@@ -145,10 +157,27 @@ def create_stub(opts)
145
157
  end
146
158
 
147
159
  GRPC.logger.info("... connecting securely to #{address}")
148
- Grpc::Testing::TestService::Stub.new(address, creds, **stub_opts)
160
+ stub_opts[:channel_args].merge!(compression_channel_args)
161
+ if opts.test_case == "unimplemented_service"
162
+ Grpc::Testing::UnimplementedService::Stub.new(address, creds, **stub_opts)
163
+ else
164
+ Grpc::Testing::TestService::Stub.new(address, creds, **stub_opts)
165
+ end
149
166
  else
150
167
  GRPC.logger.info("... connecting insecurely to #{address}")
151
- Grpc::Testing::TestService::Stub.new(address, :this_channel_is_insecure)
168
+ if opts.test_case == "unimplemented_service"
169
+ Grpc::Testing::UnimplementedService::Stub.new(
170
+ address,
171
+ :this_channel_is_insecure,
172
+ channel_args: compression_channel_args
173
+ )
174
+ else
175
+ Grpc::Testing::TestService::Stub.new(
176
+ address,
177
+ :this_channel_is_insecure,
178
+ channel_args: compression_channel_args
179
+ )
180
+ end
152
181
  end
153
182
  end
154
183
 
@@ -216,10 +245,28 @@ class BlockingEnumerator
216
245
  end
217
246
  end
218
247
 
248
+ # Intended to be used to wrap a call_op, and to adjust
249
+ # the write flag of the call_op in between messages yielded to it.
250
+ class WriteFlagSettingStreamingInputEnumerable
251
+ attr_accessor :call_op
252
+
253
+ def initialize(requests_and_write_flags)
254
+ @requests_and_write_flags = requests_and_write_flags
255
+ end
256
+
257
+ def each
258
+ @requests_and_write_flags.each do |request_and_flag|
259
+ @call_op.write_flag = request_and_flag[:write_flag]
260
+ yield request_and_flag[:request]
261
+ end
262
+ end
263
+ end
264
+
219
265
  # defines methods corresponding to each interop test case.
220
266
  class NamedTests
221
267
  include Grpc::Testing
222
268
  include Grpc::Testing::PayloadType
269
+ include GRPC::Core::MetadataKeys
223
270
 
224
271
  def initialize(stub, args)
225
272
  @stub = stub
@@ -235,6 +282,48 @@ class NamedTests
235
282
  perform_large_unary
236
283
  end
237
284
 
285
+ def client_compressed_unary
286
+ # first request used also for the probe
287
+ req_size, wanted_response_size = 271_828, 314_159
288
+ expect_compressed = BoolValue.new(value: true)
289
+ payload = Payload.new(type: :COMPRESSABLE, body: nulls(req_size))
290
+ req = SimpleRequest.new(response_type: :COMPRESSABLE,
291
+ response_size: wanted_response_size,
292
+ payload: payload,
293
+ expect_compressed: expect_compressed)
294
+
295
+ # send a probe to see if CompressedResponse is supported on the server
296
+ send_probe_for_compressed_request_support do
297
+ request_uncompressed_args = {
298
+ COMPRESSION_REQUEST_ALGORITHM => 'identity'
299
+ }
300
+ @stub.unary_call(req, metadata: request_uncompressed_args)
301
+ end
302
+
303
+ # make a call with a compressed message
304
+ resp = @stub.unary_call(req)
305
+ assert('Expected second unary call with compression to work') do
306
+ resp.payload.body.length == wanted_response_size
307
+ end
308
+
309
+ # make a call with an uncompressed message
310
+ stub_options = {
311
+ COMPRESSION_REQUEST_ALGORITHM => 'identity'
312
+ }
313
+
314
+ req = SimpleRequest.new(
315
+ response_type: :COMPRESSABLE,
316
+ response_size: wanted_response_size,
317
+ payload: payload,
318
+ expect_compressed: BoolValue.new(value: false)
319
+ )
320
+
321
+ resp = @stub.unary_call(req, metadata: stub_options)
322
+ assert('Expected second unary call with compression to work') do
323
+ resp.payload.body.length == wanted_response_size
324
+ end
325
+ end
326
+
238
327
  def service_account_creds
239
328
  # ignore this test if the oauth options are not set
240
329
  if @args.oauth_scope.nil?
@@ -309,6 +398,50 @@ class NamedTests
309
398
  end
310
399
  end
311
400
 
401
+ def client_compressed_streaming
402
+ # first request used also by the probe
403
+ first_request = StreamingInputCallRequest.new(
404
+ payload: Payload.new(type: :COMPRESSABLE, body: nulls(27_182)),
405
+ expect_compressed: BoolValue.new(value: true)
406
+ )
407
+
408
+ # send a probe to see if CompressedResponse is supported on the server
409
+ send_probe_for_compressed_request_support do
410
+ request_uncompressed_args = {
411
+ COMPRESSION_REQUEST_ALGORITHM => 'identity'
412
+ }
413
+ @stub.streaming_input_call([first_request],
414
+ metadata: request_uncompressed_args)
415
+ end
416
+
417
+ second_request = StreamingInputCallRequest.new(
418
+ payload: Payload.new(type: :COMPRESSABLE, body: nulls(45_904)),
419
+ expect_compressed: BoolValue.new(value: false)
420
+ )
421
+
422
+ # Create the requests messages and the corresponding write flags
423
+ # for each message
424
+ requests = WriteFlagSettingStreamingInputEnumerable.new([
425
+ { request: first_request,
426
+ write_flag: 0 },
427
+ { request: second_request,
428
+ write_flag: GRPC::Core::WriteFlags::NO_COMPRESS }
429
+ ])
430
+
431
+ # Create the call_op, pass it to the requests enumerable, and
432
+ # run the call
433
+ call_op = @stub.streaming_input_call(requests,
434
+ return_op: true)
435
+ requests.call_op = call_op
436
+ resp = call_op.execute
437
+
438
+ wanted_aggregate_size = 73_086
439
+
440
+ assert("#{__callee__}: aggregate payload size is incorrect") do
441
+ wanted_aggregate_size == resp.aggregated_payload_size
442
+ end
443
+ end
444
+
312
445
  def server_streaming
313
446
  msg_sizes = [31_415, 9, 2653, 58_979]
314
447
  response_spec = msg_sizes.map { |s| ResponseParameters.new(size: s) }
@@ -338,11 +471,8 @@ class NamedTests
338
471
  deadline = GRPC::Core::TimeConsts::from_relative_time(1)
339
472
  resps = @stub.full_duplex_call(enum.each_item, deadline: deadline)
340
473
  resps.each { } # wait to receive each request (or timeout)
341
- fail 'Should have raised GRPC::BadStatus(DEADLINE_EXCEEDED)'
342
- rescue GRPC::BadStatus => e
343
- assert("#{__callee__}: status was wrong") do
344
- e.code == GRPC::Core::StatusCodes::DEADLINE_EXCEEDED
345
- end
474
+ fail 'Should have raised GRPC::DeadlineExceeded'
475
+ rescue GRPC::DeadlineExceeded
346
476
  end
347
477
 
348
478
  def empty_stream
@@ -384,6 +514,153 @@ class NamedTests
384
514
  op.wait
385
515
  end
386
516
 
517
+ def unimplemented_method
518
+ begin
519
+ resp = @stub.unimplemented_call(Empty.new)
520
+ rescue GRPC::Unimplemented => e
521
+ return
522
+ rescue Exception => e
523
+ fail AssertionError, "Expected BadStatus. Received: #{e.inspect}"
524
+ end
525
+ fail AssertionError, "GRPC::Unimplemented should have been raised. Was not."
526
+ end
527
+
528
+ def unimplemented_service
529
+ begin
530
+ resp = @stub.unimplemented_call(Empty.new)
531
+ rescue GRPC::Unimplemented => e
532
+ return
533
+ rescue Exception => e
534
+ fail AssertionError, "Expected BadStatus. Received: #{e.inspect}"
535
+ end
536
+ fail AssertionError, "GRPC::Unimplemented should have been raised. Was not."
537
+ end
538
+
539
+ def status_code_and_message
540
+
541
+ # Function wide constants.
542
+ message = "test status method"
543
+ code = GRPC::Core::StatusCodes::UNKNOWN
544
+
545
+ # Testing with UnaryCall.
546
+ payload = Payload.new(type: :COMPRESSABLE, body: nulls(1))
547
+ echo_status = EchoStatus.new(code: code, message: message)
548
+ req = SimpleRequest.new(response_type: :COMPRESSABLE,
549
+ response_size: 1,
550
+ payload: payload,
551
+ response_status: echo_status)
552
+ seen_correct_exception = false
553
+ begin
554
+ resp = @stub.unary_call(req)
555
+ rescue GRPC::Unknown => e
556
+ if e.details != message
557
+ fail AssertionError,
558
+ "Expected message #{message}. Received: #{e.details}"
559
+ end
560
+ seen_correct_exception = true
561
+ rescue Exception => e
562
+ fail AssertionError, "Expected BadStatus. Received: #{e.inspect}"
563
+ end
564
+
565
+ if not seen_correct_exception
566
+ fail AssertionError, "Did not see expected status from UnaryCall"
567
+ end
568
+
569
+ # testing with FullDuplex
570
+ req_cls, p_cls = StreamingOutputCallRequest, ResponseParameters
571
+ duplex_req = req_cls.new(payload: Payload.new(body: nulls(1)),
572
+ response_type: :COMPRESSABLE,
573
+ response_parameters: [p_cls.new(size: 1)],
574
+ response_status: echo_status)
575
+ seen_correct_exception = false
576
+ begin
577
+ resp = @stub.full_duplex_call([duplex_req])
578
+ resp.each { |r| }
579
+ rescue GRPC::Unknown => e
580
+ if e.details != message
581
+ fail AssertionError,
582
+ "Expected message #{message}. Received: #{e.details}"
583
+ end
584
+ seen_correct_exception = true
585
+ rescue Exception => e
586
+ fail AssertionError, "Expected BadStatus. Received: #{e.inspect}"
587
+ end
588
+
589
+ if not seen_correct_exception
590
+ fail AssertionError, "Did not see expected status from FullDuplexCall"
591
+ end
592
+
593
+ end
594
+
595
+
596
+ def custom_metadata
597
+
598
+ # Function wide constants
599
+ req_size, wanted_response_size = 271_828, 314_159
600
+ initial_metadata_key = "x-grpc-test-echo-initial"
601
+ initial_metadata_value = "test_initial_metadata_value"
602
+ trailing_metadata_key = "x-grpc-test-echo-trailing-bin"
603
+ trailing_metadata_value = "\x0a\x0b\x0a\x0b\x0a\x0b"
604
+
605
+ metadata = {
606
+ initial_metadata_key => initial_metadata_value,
607
+ trailing_metadata_key => trailing_metadata_value
608
+ }
609
+
610
+ # Testing with UnaryCall
611
+ payload = Payload.new(type: :COMPRESSABLE, body: nulls(req_size))
612
+ req = SimpleRequest.new(response_type: :COMPRESSABLE,
613
+ response_size: wanted_response_size,
614
+ payload: payload)
615
+
616
+ op = @stub.unary_call(req, metadata: metadata, return_op: true)
617
+ op.execute
618
+ if not op.metadata.has_key?(initial_metadata_key)
619
+ fail AssertionError, "Expected initial metadata. None received"
620
+ elsif op.metadata[initial_metadata_key] != metadata[initial_metadata_key]
621
+ fail AssertionError,
622
+ "Expected initial metadata: #{metadata[initial_metadata_key]}. "\
623
+ "Received: #{op.metadata[initial_metadata_key]}"
624
+ end
625
+ if not op.trailing_metadata.has_key?(trailing_metadata_key)
626
+ fail AssertionError, "Expected trailing metadata. None received"
627
+ elsif op.trailing_metadata[trailing_metadata_key] !=
628
+ metadata[trailing_metadata_key]
629
+ fail AssertionError,
630
+ "Expected trailing metadata: #{metadata[trailing_metadata_key]}. "\
631
+ "Received: #{op.trailing_metadata[trailing_metadata_key]}"
632
+ end
633
+
634
+ # Testing with FullDuplex
635
+ req_cls, p_cls = StreamingOutputCallRequest, ResponseParameters
636
+ duplex_req = req_cls.new(payload: Payload.new(body: nulls(req_size)),
637
+ response_type: :COMPRESSABLE,
638
+ response_parameters: [p_cls.new(size: wanted_response_size)])
639
+
640
+ duplex_op = @stub.full_duplex_call([duplex_req], metadata: metadata,
641
+ return_op: true)
642
+ resp = duplex_op.execute
643
+ resp.each { |r| } # ensures that the server sends trailing data
644
+ duplex_op.wait
645
+ if not duplex_op.metadata.has_key?(initial_metadata_key)
646
+ fail AssertionError, "Expected initial metadata. None received"
647
+ elsif duplex_op.metadata[initial_metadata_key] !=
648
+ metadata[initial_metadata_key]
649
+ fail AssertionError,
650
+ "Expected initial metadata: #{metadata[initial_metadata_key]}. "\
651
+ "Received: #{duplex_op.metadata[initial_metadata_key]}"
652
+ end
653
+ if not duplex_op.trailing_metadata[trailing_metadata_key]
654
+ fail AssertionError, "Expected trailing metadata. None received"
655
+ elsif duplex_op.trailing_metadata[trailing_metadata_key] !=
656
+ metadata[trailing_metadata_key]
657
+ fail AssertionError,
658
+ "Expected trailing metadata: #{metadata[trailing_metadata_key]}. "\
659
+ "Received: #{duplex_op.trailing_metadata[trailing_metadata_key]}"
660
+ end
661
+
662
+ end
663
+
387
664
  def all
388
665
  all_methods = NamedTests.instance_methods(false).map(&:to_s)
389
666
  all_methods.each do |m|
@@ -415,6 +692,29 @@ class NamedTests
415
692
  end
416
693
  resp
417
694
  end
695
+
696
+ # Send probing message for compressed request on the server, to see
697
+ # if it's implemented.
698
+ def send_probe_for_compressed_request_support(&send_probe)
699
+ bad_status_occured = false
700
+
701
+ begin
702
+ send_probe.call
703
+ rescue GRPC::BadStatus => e
704
+ if e.code == GRPC::Core::StatusCodes::INVALID_ARGUMENT
705
+ bad_status_occured = true
706
+ else
707
+ fail AssertionError, "Bad status received but code is #{e.code}"
708
+ end
709
+ rescue Exception => e
710
+ fail AssertionError, "Expected BadStatus. Received: #{e.inspect}"
711
+ end
712
+
713
+ assert('CompressedRequest probe failed') do
714
+ bad_status_occured
715
+ end
716
+ end
717
+
418
718
  end
419
719
 
420
720
  # Args is used to hold the command line info.