mongo 2.16.0 → 2.16.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (38) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/lib/mongo/collection/view/builder/map_reduce.rb +7 -4
  4. data/lib/mongo/collection/view/iterable.rb +2 -1
  5. data/lib/mongo/collection/view/map_reduce.rb +14 -1
  6. data/lib/mongo/query_cache.rb +12 -2
  7. data/lib/mongo/server/monitor/connection.rb +10 -4
  8. data/lib/mongo/server/push_monitor.rb +4 -1
  9. data/lib/mongo/version.rb +1 -1
  10. data/spec/integration/query_cache_spec.rb +159 -0
  11. data/spec/integration/sdam_events_spec.rb +40 -0
  12. data/spec/lite_spec_helper.rb +7 -0
  13. data/spec/mongo/collection/view/map_reduce_spec.rb +16 -0
  14. data/spec/mongo/collection/view/readable_spec.rb +56 -0
  15. data/spec/mongo/query_cache_spec.rb +165 -0
  16. data/spec/mongo/server/monitor/connection_spec.rb +22 -0
  17. data/spec/mongo/server/push_monitor_spec.rb +101 -0
  18. data/spec/runners/auth.rb +1 -1
  19. data/spec/runners/change_streams/spec.rb +1 -1
  20. data/spec/runners/cmap.rb +1 -1
  21. data/spec/runners/command_monitoring.rb +1 -1
  22. data/spec/runners/connection_string.rb +1 -1
  23. data/spec/runners/crud/spec.rb +1 -3
  24. data/spec/runners/gridfs.rb +1 -1
  25. data/spec/runners/read_write_concern_document.rb +1 -1
  26. data/spec/runners/sdam.rb +1 -1
  27. data/spec/runners/server_selection.rb +1 -1
  28. data/spec/runners/server_selection_rtt.rb +1 -1
  29. data/spec/runners/unified/test_group.rb +1 -1
  30. data/spec/shared/share/Dockerfile.erb +3 -3
  31. data/spec/shared/shlib/server.sh +1 -1
  32. data/spec/spec_tests/seed_list_discovery_spec.rb +1 -1
  33. data/spec/support/certificates/atlas-ocsp-ca.crt +40 -47
  34. data/spec/support/certificates/atlas-ocsp.crt +101 -106
  35. data/spec/support/utils.rb +31 -0
  36. data.tar.gz.sig +0 -0
  37. metadata +1092 -1089
  38. metadata.gz.sig +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 803953b133578a0014b611609e6b334196ebe25484ee4db3fc3299cf1911445f
4
- data.tar.gz: 368a07676afa5d71f41e86b9a288b4691f99b0549d041d87d6c4a8cf771d6ff5
3
+ metadata.gz: 6bbafc263bbcad82f83f9dab3bcd1120388149586352625a365b10bb15dc87be
4
+ data.tar.gz: 10a2c5a650d659bf9bd7cd45f024e77c8c0a29234e9d789efd2dda6d674b8f81
5
5
  SHA512:
6
- metadata.gz: dd6d240f340e8ea904028bf7d3a9cc2ef6ee6d0e54d181a45c2e083f8e71dc29fab942d265425f5eeb98c0e5f81786f8d0ade460e90ffe2da7af69038f22155f
7
- data.tar.gz: 4474efb5b33f984b4900bd702f05d1c012b5827b09b33281da60c9e07f3c416953292d2d2e9b463ed3c015f8a5e607de9da9aafcf8452b9568373dae49dbd880
6
+ metadata.gz: a3284c0e972a9f4a8ce900405f6bb72df29cef702af58dd4e369619164a38b7a5128a2a01bd5f0d8b5a20feb001f43b6d0ab7f3590c539050ef38ef291764dd0
7
+ data.tar.gz: 2fe66c448b75bbfdbbbe4bc98d8286ede141e38a1361794c5c30cc83f87672e7faaf53463bbee084fd905baca2f78dccf2d5f3f3ff531c067c5763a3f683a338
checksums.yaml.gz.sig CHANGED
Binary file
@@ -115,11 +115,14 @@ module Mongo
115
115
  end
116
116
  command.update(view_options)
117
117
  command.update(Utils.slice_hash(options, :collation))
118
+
118
119
  # Read preference isn't simply passed in the command payload
119
- # (it may need to be converted to wire protocol flags)
120
- # so remove it here and hopefully it's handled elsewhere.
121
- # If not, RUBY-2706.
122
- command.delete(:read)
120
+ # (it may need to be converted to wire protocol flags).
121
+ # Ideally it should be removed here, however due to Mongoid 7
122
+ # using this method and requiring :read to be returned from it,
123
+ # we cannot do this just yet - see RUBY-2932.
124
+ #command.delete(:read)
125
+
123
126
  command.merge!(Options::Mapper.transform_documents(options, MAPPINGS))
124
127
  command
125
128
  end
@@ -70,7 +70,7 @@ module Mongo
70
70
  # If a query with a limit is performed, the query cache will
71
71
  # re-use results from an earlier query with the same or larger
72
72
  # limit, and then impose the lower limit during iteration.
73
- limit_for_cached_query = respond_to?(:limit) ? limit : nil
73
+ limit_for_cached_query = respond_to?(:limit) ? QueryCache.normalized_limit(limit) : nil
74
74
  end
75
75
 
76
76
  if block_given?
@@ -171,6 +171,7 @@ module Mongo
171
171
  max_time_ms: options[:max_time_ms],
172
172
  max_value: options[:max_value],
173
173
  min_value: options[:min_value],
174
+ no_cursor_timeout: options[:no_cursor_timeout],
174
175
  return_key: options[:return_key],
175
176
  show_disk_loc: options[:show_disk_loc],
176
177
  comment: options[:comment],
@@ -248,7 +248,20 @@ module Mongo
248
248
  end
249
249
 
250
250
  def initial_query_op(session)
251
- Operation::MapReduce.new(map_reduce_spec(session))
251
+ spec = map_reduce_spec(session)
252
+ # Read preference isn't simply passed in the command payload
253
+ # (it may need to be converted to wire protocol flags).
254
+ # Passing it in command payload produces errors on at least
255
+ # 5.0 mongoses.
256
+ # In the future map_reduce_command should remove :read
257
+ # from its return value, however we cannot do this right now
258
+ # due to Mongoid 7 relying on :read being returned as part of
259
+ # the command - see RUBY-2932.
260
+ # Delete :read here for now because it cannot be sent to mongos this way.
261
+ spec = spec.dup
262
+ spec[:selector] = spec[:selector].dup
263
+ spec[:selector].delete(:read)
264
+ Operation::MapReduce.new(spec)
252
265
  end
253
266
 
254
267
  def valid_server?(server)
@@ -179,7 +179,8 @@ module Mongo
179
179
  #
180
180
  # @api private
181
181
  def get(**opts)
182
- limit = opts[:limit]
182
+ limit = normalized_limit(opts[:limit])
183
+
183
184
  _namespace_key = namespace_key(**opts)
184
185
  _cache_key = cache_key(**opts)
185
186
 
@@ -189,7 +190,7 @@ module Mongo
189
190
  caching_cursor = namespace_hash[_cache_key]
190
191
  return nil unless caching_cursor
191
192
 
192
- caching_cursor_limit = caching_cursor.view.limit
193
+ caching_cursor_limit = normalized_limit(caching_cursor.view.limit)
193
194
 
194
195
  # There are two scenarios in which a caching cursor could fulfill the
195
196
  # query:
@@ -199,6 +200,7 @@ module Mongo
199
200
  #
200
201
  # Otherwise, return nil because the stored cursor will not satisfy
201
202
  # the query.
203
+
202
204
  if limit && (caching_cursor_limit.nil? || caching_cursor_limit >= limit)
203
205
  caching_cursor
204
206
  elsif limit.nil? && caching_cursor_limit.nil?
@@ -208,6 +210,14 @@ module Mongo
208
210
  end
209
211
  end
210
212
 
213
+ def normalized_limit(limit)
214
+ return nil unless limit
215
+ # For the purposes of caching, a limit of 0 means no limit, as mongo treats it as such.
216
+ return nil if limit == 0
217
+ # For the purposes of caching, a negative limit is the same as as a positive limit.
218
+ limit.abs
219
+ end
220
+
211
221
  private
212
222
 
213
223
  def cache_key(**opts)
@@ -227,15 +227,21 @@ module Mongo
227
227
  # @api private
228
228
  def check_document
229
229
  server_api = @app_metadata.server_api || options[:server_api]
230
- if hello_ok? || server_api
231
- doc = HELLO_DOC
230
+ doc = if hello_ok? || server_api
231
+ _doc = HELLO_DOC
232
232
  if server_api
233
- doc = doc.merge(Utils.transform_server_api(server_api))
233
+ _doc = _doc.merge(Utils.transform_server_api(server_api))
234
234
  end
235
- doc
235
+ _doc
236
236
  else
237
237
  LEGACY_HELLO_DOC
238
238
  end
239
+ # compressors must be set to maintain correct compression status
240
+ # in the server description. See RUBY-2427
241
+ if compressors = options[:compressors]
242
+ doc = doc.merge(compression: compressors)
243
+ end
244
+ doc
239
245
  end
240
246
 
241
247
  private
@@ -110,7 +110,7 @@ module Mongo
110
110
  if new_description.topology_version
111
111
  @topology_version = new_description.topology_version
112
112
  end
113
- rescue Mongo::Error => exc
113
+ rescue IOError, SocketError, SystemCallError, Mongo::Error => exc
114
114
  stop_requested = @lock.synchronize { @stop_requested }
115
115
  if stop_requested
116
116
  # Ignore the exception, see RUBY-2771.
@@ -123,6 +123,9 @@ module Mongo
123
123
  log_prefix: options[:log_prefix],
124
124
  bg_error_backtrace: options[:bg_error_backtrace],
125
125
  )
126
+
127
+ # Avoid tight looping in push monitor - see RUBY-2806.
128
+ sleep(0.5)
126
129
  end
127
130
 
128
131
  def check
data/lib/mongo/version.rb CHANGED
@@ -20,5 +20,5 @@ module Mongo
20
20
  # The current version of the driver.
21
21
  #
22
22
  # @since 2.0.0
23
- VERSION = '2.16.0'.freeze
23
+ VERSION = '2.16.3'.freeze
24
24
  end
@@ -345,18 +345,69 @@ describe 'QueryCache' do
345
345
 
346
346
  it 'uses the cache' do
347
347
  results_limit_5 = authorized_collection.find.limit(5).to_a
348
+ results_limit_negative_5 = authorized_collection.find.limit(-5).to_a
348
349
  results_limit_3 = authorized_collection.find.limit(3).to_a
350
+ results_limit_negative_3 = authorized_collection.find.limit(-3).to_a
349
351
  results_no_limit = authorized_collection.find.to_a
352
+ results_limit_0 = authorized_collection.find.limit(0).to_a
353
+
354
+
355
+ expect(results_limit_5.length).to eq(5)
356
+ expect(results_limit_5.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4])
357
+
358
+ expect(results_limit_negative_5.length).to eq(5)
359
+ expect(results_limit_negative_5.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4])
360
+
361
+ expect(results_limit_3.length).to eq(3)
362
+ expect(results_limit_3.map { |r| r["test"] }).to eq([0, 1, 2])
363
+
364
+ expect(results_limit_negative_3.length).to eq(3)
365
+ expect(results_limit_negative_3.map { |r| r["test"] }).to eq([0, 1, 2])
366
+
367
+ expect(results_no_limit.length).to eq(10)
368
+ expect(results_no_limit.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
369
+
370
+ expect(results_limit_0.length).to eq(10)
371
+ expect(results_limit_0.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
372
+
373
+ expect(events.length).to eq(1)
374
+ end
375
+ end
376
+
377
+ context 'when the first query has a 0 limit' do
378
+ before do
379
+ authorized_collection.find.limit(0).to_a
380
+ end
381
+
382
+ it 'uses the cache' do
383
+ results_limit_5 = authorized_collection.find.limit(5).to_a
384
+ results_limit_negative_5 = authorized_collection.find.limit(-5).to_a
385
+ results_limit_3 = authorized_collection.find.limit(3).to_a
386
+ results_limit_negative_3 = authorized_collection.find.limit(-3).to_a
387
+ results_no_limit = authorized_collection.find.to_a
388
+ results_limit_0 = authorized_collection.find.limit(0).to_a
350
389
 
351
390
  expect(results_limit_5.length).to eq(5)
352
391
  expect(results_limit_5.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4])
353
392
 
393
+ expect(results_limit_negative_5.length).to eq(5)
394
+ expect(results_limit_negative_5.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4])
395
+
396
+
354
397
  expect(results_limit_3.length).to eq(3)
355
398
  expect(results_limit_3.map { |r| r["test"] }).to eq([0, 1, 2])
356
399
 
400
+ expect(results_limit_negative_3.length).to eq(3)
401
+ expect(results_limit_negative_3.map { |r| r["test"] }).to eq([0, 1, 2])
402
+
403
+
357
404
  expect(results_no_limit.length).to eq(10)
358
405
  expect(results_no_limit.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
359
406
 
407
+
408
+ expect(results_limit_0.length).to eq(10)
409
+ expect(results_limit_0.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
410
+
360
411
  expect(events.length).to eq(1)
361
412
  end
362
413
  end
@@ -391,6 +442,21 @@ describe 'QueryCache' do
391
442
  end
392
443
  end
393
444
 
445
+ context 'and two queries are performed with a larger negative limit' do
446
+ it 'uses the query cache for the third query' do
447
+ results1 = authorized_collection.find.limit(-3).to_a
448
+ results2 = authorized_collection.find.limit(-3).to_a
449
+
450
+ expect(results1.length).to eq(3)
451
+ expect(results1.map { |r| r["test"] }).to eq([0, 1, 2])
452
+
453
+ expect(results2.length).to eq(3)
454
+ expect(results2.map { |r| r["test"] }).to eq([0, 1, 2])
455
+
456
+ expect(events.length).to eq(2)
457
+ end
458
+ end
459
+
394
460
  context 'and the second query has a smaller limit' do
395
461
  let(:results) { authorized_collection.find.limit(1).to_a }
396
462
 
@@ -401,6 +467,99 @@ describe 'QueryCache' do
401
467
  end
402
468
  end
403
469
 
470
+ context 'and the second query has a smaller negative limit' do
471
+ let(:results) { authorized_collection.find.limit(-1).to_a }
472
+
473
+ it 'uses the cached query' do
474
+ expect(results.count).to eq(1)
475
+ expect(results.first["test"]).to eq(0)
476
+ expect(events.length).to eq(1)
477
+ end
478
+ end
479
+
480
+ context 'and the second query has no limit' do
481
+ it 'queries again' do
482
+ expect(authorized_collection.find.to_a.count).to eq(10)
483
+ expect(events.length).to eq(2)
484
+ end
485
+ end
486
+ end
487
+
488
+ context 'when the first query has a negative limit' do
489
+ before do
490
+ authorized_collection.find.limit(-2).to_a
491
+ end
492
+
493
+ context 'and the second query has a larger limit' do
494
+ let(:results) { authorized_collection.find.limit(3).to_a }
495
+
496
+ it 'queries again' do
497
+ expect(results.length).to eq(3)
498
+ expect(results.map { |result| result["test"] }).to eq([0, 1, 2])
499
+ expect(events.length).to eq(2)
500
+ end
501
+ end
502
+
503
+ context 'and the second query has a larger negative limit' do
504
+ let(:results) { authorized_collection.find.limit(-3).to_a }
505
+
506
+ it 'queries again' do
507
+ expect(results.length).to eq(3)
508
+ expect(results.map { |result| result["test"] }).to eq([0, 1, 2])
509
+ expect(events.length).to eq(2)
510
+ end
511
+ end
512
+
513
+ context 'and two queries are performed with a larger limit' do
514
+ it 'uses the query cache for the third query' do
515
+ results1 = authorized_collection.find.limit(3).to_a
516
+ results2 = authorized_collection.find.limit(3).to_a
517
+
518
+ expect(results1.length).to eq(3)
519
+ expect(results1.map { |r| r["test"] }).to eq([0, 1, 2])
520
+
521
+ expect(results2.length).to eq(3)
522
+ expect(results2.map { |r| r["test"] }).to eq([0, 1, 2])
523
+
524
+ expect(events.length).to eq(2)
525
+ end
526
+ end
527
+
528
+ context 'and two queries are performed with a larger negative limit' do
529
+ it 'uses the query cache for the third query' do
530
+ results1 = authorized_collection.find.limit(-3).to_a
531
+ results2 = authorized_collection.find.limit(-3).to_a
532
+
533
+ expect(results1.length).to eq(3)
534
+ expect(results1.map { |r| r["test"] }).to eq([0, 1, 2])
535
+
536
+ expect(results2.length).to eq(3)
537
+ expect(results2.map { |r| r["test"] }).to eq([0, 1, 2])
538
+
539
+ expect(events.length).to eq(2)
540
+ end
541
+ end
542
+
543
+ context 'and the second query has a smaller limit' do
544
+ let(:results) { authorized_collection.find.limit(1).to_a }
545
+
546
+ it 'uses the cached query' do
547
+ expect(results.count).to eq(1)
548
+ expect(results.first["test"]).to eq(0)
549
+ expect(events.length).to eq(1)
550
+ end
551
+ end
552
+
553
+ context 'and the second query has a smaller negative limit' do
554
+ let(:results) { authorized_collection.find.limit(-1).to_a }
555
+
556
+ it 'uses the cached query' do
557
+ expect(results.count).to eq(1)
558
+ expect(results.first["test"]).to eq(0)
559
+ expect(events.length).to eq(1)
560
+ end
561
+ end
562
+
404
563
  context 'and the second query has no limit' do
405
564
  it 'queries again' do
406
565
  expect(authorized_collection.find.to_a.count).to eq(10)
@@ -135,4 +135,44 @@ describe 'SDAM events' do
135
135
  end
136
136
  end
137
137
  end
138
+
139
+ describe 'server description changed' do
140
+ require_topology :single
141
+
142
+ let(:sdam_proc) do
143
+ Proc.new do |client|
144
+ client.subscribe(Mongo::Monitoring::SERVER_DESCRIPTION_CHANGED, subscriber)
145
+ end
146
+ end
147
+
148
+ let(:client) do
149
+ new_local_client(SpecConfig.instance.addresses,
150
+ # Heartbeat interval is bound by 500 ms
151
+ SpecConfig.instance.test_options.merge(client_options).merge(
152
+ heartbeat_frequency: 0.5,
153
+ sdam_proc: sdam_proc,
154
+ ),
155
+ )
156
+ end
157
+
158
+ let(:client_options) do
159
+ {}
160
+ end
161
+
162
+ it 'is not published when there are no changes in server state' do
163
+ client
164
+ sleep 6
165
+ client.close
166
+
167
+ events = subscriber.select_succeeded_events(Mongo::Monitoring::Event::ServerDescriptionChanged)
168
+
169
+ # In 6 seconds we should have about 10 or 12 heartbeats.
170
+ # We expect 1 or 2 description changes:
171
+ # The first one from unknown to known,
172
+ # The second one because server changes the fields it returns based on
173
+ # driver server check payload (e.g. ismaster/isWritablePrimary).
174
+ events.length.should >= 1
175
+ events.length.should <= 2
176
+ end
177
+ end
138
178
  end
@@ -157,6 +157,13 @@ RSpec.configure do |config|
157
157
  end
158
158
 
159
159
  if SpecConfig.instance.active_support?
160
+ require "active_support/version"
161
+ if ActiveSupport.version >= Gem::Version.new(7)
162
+ # ActiveSupport wants us to require ALL of it all of the time.
163
+ # See: https://github.com/rails/rails/issues/43851,
164
+ # https://github.com/rails/rails/issues/43889, etc.
165
+ require 'active_support'
166
+ end
160
167
  require "active_support/time"
161
168
  require 'mongo/active_support'
162
169
  end
@@ -865,4 +865,20 @@ describe Mongo::Collection::View::MapReduce do
865
865
  end
866
866
  end
867
867
  end
868
+
869
+ describe '#map_reduce_spec' do
870
+ context 'when read preference is given' do
871
+ let(:view_options) do
872
+ { read: {mode: :secondary} }
873
+ end
874
+
875
+ context 'selector' do
876
+ # For compatibility with released versions of Mongoid, this method
877
+ # must return read preference under the :read key.
878
+ it 'contains read preference' do
879
+ map_reduce_spec[:selector][:read].should == {'mode' => :secondary}
880
+ end
881
+ end
882
+ end
883
+ end
868
884
  end
@@ -1186,6 +1186,62 @@ describe Mongo::Collection::View::Readable do
1186
1186
  it 'returns a new View' do
1187
1187
  expect(new_view).not_to be(view)
1188
1188
  end
1189
+
1190
+ context 'when sending to server' do
1191
+ let(:subscriber) { Mrss::EventSubscriber.new }
1192
+
1193
+ before do
1194
+ authorized_collection.client.subscribe(Mongo::Monitoring::COMMAND, subscriber)
1195
+ end
1196
+
1197
+ let(:event) do
1198
+ subscriber.single_command_started_event('find')
1199
+ end
1200
+
1201
+ it 'is sent to server' do
1202
+ new_view.to_a
1203
+ event.command.slice('noCursorTimeout').should == {'noCursorTimeout' => true}
1204
+ end
1205
+ end
1206
+
1207
+ context 'integration test' do
1208
+ require_topology :single
1209
+
1210
+ # The number of open cursors with the option set to prevent timeout.
1211
+ def current_no_timeout_count
1212
+ root_authorized_client
1213
+ .command(serverStatus: 1)
1214
+ .documents
1215
+ .first
1216
+ .fetch('metrics')
1217
+ .fetch('cursor')
1218
+ .fetch('open')
1219
+ .fetch('noTimeout')
1220
+ end
1221
+
1222
+ it 'is applied on the server' do
1223
+ # Initialize collection with two documents.
1224
+ new_view.collection.insert_many([{}, {}])
1225
+
1226
+ expect(new_view.count).to be == 2
1227
+
1228
+ # Initial "noTimeout" count should be zero.
1229
+ states = [current_no_timeout_count]
1230
+
1231
+ # The "noTimeout" count should be one while iterating.
1232
+ new_view.batch_size(1).each { states << current_no_timeout_count }
1233
+
1234
+ # Final "noTimeout" count should be back to zero.
1235
+ states << current_no_timeout_count
1236
+
1237
+ # This succeeds on:
1238
+ # commit aab776ebdfb15ddb9765039f7300e15796de0c5c
1239
+ #
1240
+ # This starts failing with [0, 0, 0, 0] from:
1241
+ # commit 2d9f0217ec904a1952a1ada2136502eefbca562e
1242
+ expect(states).to be == [0, 1, 1, 0]
1243
+ end
1244
+ end
1189
1245
  end
1190
1246
 
1191
1247
  describe '#projection' do