mongo 2.17.0 → 2.17.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/lib/mongo/bulk_write.rb +1 -1
  4. data/lib/mongo/cluster/reapers/cursor_reaper.rb +26 -14
  5. data/lib/mongo/collection/view/builder/map_reduce.rb +7 -4
  6. data/lib/mongo/collection/view/iterable.rb +2 -1
  7. data/lib/mongo/collection/view/map_reduce.rb +14 -1
  8. data/lib/mongo/cursor/kill_spec.rb +19 -2
  9. data/lib/mongo/cursor.rb +11 -6
  10. data/lib/mongo/operation/shared/sessions_supported.rb +7 -3
  11. data/lib/mongo/query_cache.rb +12 -2
  12. data/lib/mongo/server/monitor/connection.rb +10 -4
  13. data/lib/mongo/version.rb +1 -1
  14. data/spec/integration/bulk_write_spec.rb +16 -0
  15. data/spec/integration/query_cache_spec.rb +159 -0
  16. data/spec/integration/sdam_events_spec.rb +40 -0
  17. data/spec/lite_spec_helper.rb +7 -0
  18. data/spec/mongo/cluster/cursor_reaper_spec.rb +22 -15
  19. data/spec/mongo/collection/view/map_reduce_spec.rb +16 -0
  20. data/spec/mongo/collection/view/readable_spec.rb +56 -0
  21. data/spec/mongo/cursor_spec.rb +53 -2
  22. data/spec/mongo/query_cache_spec.rb +165 -0
  23. data/spec/runners/auth.rb +1 -1
  24. data/spec/runners/change_streams/spec.rb +1 -1
  25. data/spec/runners/cmap.rb +1 -1
  26. data/spec/runners/command_monitoring.rb +1 -1
  27. data/spec/runners/connection_string.rb +1 -1
  28. data/spec/runners/crud/spec.rb +1 -3
  29. data/spec/runners/gridfs.rb +1 -1
  30. data/spec/runners/read_write_concern_document.rb +1 -1
  31. data/spec/runners/sdam.rb +1 -1
  32. data/spec/runners/server_selection.rb +1 -1
  33. data/spec/runners/server_selection_rtt.rb +1 -1
  34. data/spec/runners/unified/test_group.rb +1 -1
  35. data/spec/shared/share/Dockerfile.erb +3 -3
  36. data/spec/shared/shlib/server.sh +1 -1
  37. data/spec/spec_tests/seed_list_discovery_spec.rb +1 -1
  38. data/spec/support/certificates/atlas-ocsp-ca.crt +40 -47
  39. data/spec/support/certificates/atlas-ocsp.crt +101 -106
  40. data/spec/support/utils.rb +31 -0
  41. data.tar.gz.sig +2 -2
  42. metadata +1056 -1055
  43. metadata.gz.sig +1 -2
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a7fed7eef29657f665e0b2cbecaab0e60626e76aad58087ff5fd8d051d237002
4
- data.tar.gz: 77e80dd97d3d60c6836b563233f23d1f505344905d87ff8afedbe31fa45b216a
3
+ metadata.gz: d91b7c77d8d5e9d9298baf6aae6789f164fc9f61cb58e27c5f096ccb775fe7bd
4
+ data.tar.gz: 6ea13c72698ac64aac21a29ee083fb483c56b51afbe9c2a1afd75ced4cefdded
5
5
  SHA512:
6
- metadata.gz: 4a898c56e637b3c0597126cc207d9c70ae9f0b3533294fab7ba92f8882be72a5df44ead386e055433966b40ad7f34ef3d0a5a82ae1c9b3088d4fb2d428673ee8
7
- data.tar.gz: abec048d264aee514a33c660f0ac151f169f6b764d7e85c91f626a1ecb30e2b27c9ddcf3aa404c0f3e3cc27eb8f00dfe0e223a69096755556cb4942c5be9ce3f
6
+ metadata.gz: 1b037dab0a527f76219b85159117c875e3b5b9a8cffe64689df71caeb344efedffa6d04a8c9232bd9a20aa1484da0b4a9690f00c126e4095931dd682328ca341
7
+ data.tar.gz: 0c4b32a82ebbf64d6eccbcc0356d289e913393467c640fdf1aa7d19c2cd4150fc0a924d5815f45e59ca62a85be832012b21caf524cadda666bbd07f1d187d342
checksums.yaml.gz.sig CHANGED
Binary file
@@ -222,7 +222,7 @@ module Mongo
222
222
  def split_execute(name, values, connection, context, operation_id, result_combiner, session, txn_num)
223
223
  execute_operation(name, values.shift(values.size / 2), connection, context, operation_id, result_combiner, session, txn_num)
224
224
 
225
- txn_num = session.next_txn_num if txn_num
225
+ txn_num = session.next_txn_num if txn_num && !session.in_transaction?
226
226
  execute_operation(name, values, connection, context, operation_id, result_combiner, session, txn_num)
227
227
  end
228
228
 
@@ -44,6 +44,7 @@ module Mongo
44
44
  @to_kill = {}
45
45
  @active_cursor_ids = Set.new
46
46
  @mutex = Mutex.new
47
+ @kill_spec_queue = Queue.new
47
48
  end
48
49
 
49
50
  attr_reader :cluster
@@ -51,17 +52,10 @@ module Mongo
51
52
  # Schedule a kill cursors operation to be eventually executed.
52
53
  #
53
54
  # @param [ Cursor::KillSpec ] kill_spec The kill specification.
54
- # @param [ Mongo::Server ] server The server to send the kill cursors
55
- # operation to.
56
55
  #
57
56
  # @api private
58
- def schedule_kill_cursor(kill_spec, server)
59
- @mutex.synchronize do
60
- if @active_cursor_ids.include?(kill_spec.cursor_id)
61
- @to_kill[server.address.seed] ||= Set.new
62
- @to_kill[server.address.seed] << kill_spec
63
- end
64
- end
57
+ def schedule_kill_cursor(kill_spec)
58
+ @kill_spec_queue << kill_spec
65
59
  end
66
60
 
67
61
  # Register a cursor id as active.
@@ -110,6 +104,24 @@ module Mongo
110
104
  end
111
105
  end
112
106
 
107
+ # Read and decode scheduled kill cursors operations.
108
+ #
109
+ # This method mutates instance variables without locking, so is is not
110
+ # thread safe. Generally, it should not be called itself, this is a helper
111
+ # for `kill_cursor` method.
112
+ #
113
+ # @api private
114
+ def read_scheduled_kill_specs
115
+ while kill_spec = @kill_spec_queue.pop(true)
116
+ if @active_cursor_ids.include?(kill_spec.cursor_id)
117
+ @to_kill[kill_spec.server_address] ||= Set.new
118
+ @to_kill[kill_spec.server_address] << kill_spec
119
+ end
120
+ end
121
+ rescue ThreadError
122
+ # Empty queue, nothing to do.
123
+ end
124
+
113
125
  # Execute all pending kill cursors operations.
114
126
  #
115
127
  # @example Execute pending kill cursors operations.
@@ -122,14 +134,14 @@ module Mongo
122
134
  # TODO optimize this to batch kill cursor operations for the same
123
135
  # server/database/collection instead of killing each cursor
124
136
  # individually.
125
-
126
137
  loop do
127
- server_address_str = nil
138
+ server_address = nil
128
139
 
129
140
  kill_spec = @mutex.synchronize do
141
+ read_scheduled_kill_specs
130
142
  # Find a server that has any cursors scheduled for destruction.
131
- server_address_str, specs =
132
- @to_kill.detect { |server_address_str, specs| specs.any? }
143
+ server_address, specs =
144
+ @to_kill.detect { |_, specs| specs.any? }
133
145
 
134
146
  if specs.nil?
135
147
  # All servers have empty specs, nothing to do.
@@ -168,7 +180,7 @@ module Mongo
168
180
  op = Operation::KillCursors.new(spec)
169
181
 
170
182
  server = cluster.servers.detect do |server|
171
- server.address.seed == server_address_str
183
+ server.address == server_address
172
184
  end
173
185
 
174
186
  unless server
@@ -115,11 +115,14 @@ module Mongo
115
115
  end
116
116
  command.update(view_options)
117
117
  command.update(options.slice(:collation))
118
+
118
119
  # Read preference isn't simply passed in the command payload
119
- # (it may need to be converted to wire protocol flags)
120
- # so remove it here and hopefully it's handled elsewhere.
121
- # If not, RUBY-2706.
122
- command.delete(:read)
120
+ # (it may need to be converted to wire protocol flags).
121
+ # Ideally it should be removed here, however due to Mongoid 7
122
+ # using this method and requiring :read to be returned from it,
123
+ # we cannot do this just yet - see RUBY-2932.
124
+ #command.delete(:read)
125
+
123
126
  command.merge!(Options::Mapper.transform_documents(options, MAPPINGS))
124
127
  command
125
128
  end
@@ -70,7 +70,7 @@ module Mongo
70
70
  # If a query with a limit is performed, the query cache will
71
71
  # re-use results from an earlier query with the same or larger
72
72
  # limit, and then impose the lower limit during iteration.
73
- limit_for_cached_query = respond_to?(:limit) ? limit : nil
73
+ limit_for_cached_query = respond_to?(:limit) ? QueryCache.normalized_limit(limit) : nil
74
74
  end
75
75
 
76
76
  if block_given?
@@ -170,6 +170,7 @@ module Mongo
170
170
  max_time_ms: options[:max_time_ms],
171
171
  max_value: options[:max_value],
172
172
  min_value: options[:min_value],
173
+ no_cursor_timeout: options[:no_cursor_timeout],
173
174
  return_key: options[:return_key],
174
175
  show_disk_loc: options[:show_disk_loc],
175
176
  comment: options[:comment],
@@ -250,7 +250,20 @@ module Mongo
250
250
  end
251
251
 
252
252
  def initial_query_op(session)
253
- Operation::MapReduce.new(map_reduce_spec(session))
253
+ spec = map_reduce_spec(session)
254
+ # Read preference isn't simply passed in the command payload
255
+ # (it may need to be converted to wire protocol flags).
256
+ # Passing it in command payload produces errors on at least
257
+ # 5.0 mongoses.
258
+ # In the future map_reduce_command should remove :read
259
+ # from its return value, however we cannot do this right now
260
+ # due to Mongoid 7 relying on :read being returned as part of
261
+ # the command - see RUBY-2932.
262
+ # Delete :read here for now because it cannot be sent to mongos this way.
263
+ spec = spec.dup
264
+ spec[:selector] = spec[:selector].dup
265
+ spec[:selector].delete(:read)
266
+ Operation::MapReduce.new(spec)
254
267
  end
255
268
 
256
269
  def valid_server?(server)
@@ -25,14 +25,31 @@ module Mongo
25
25
  # @api private
26
26
  class KillSpec
27
27
 
28
- def initialize(cursor_id:, coll_name:, db_name:, service_id:)
28
+ def initialize(cursor_id:, coll_name:, db_name:, service_id:, server_address:)
29
29
  @cursor_id = cursor_id
30
30
  @coll_name = coll_name
31
31
  @db_name = db_name
32
32
  @service_id = service_id
33
+ @server_address = server_address
33
34
  end
34
35
 
35
- attr_reader :cursor_id, :coll_name, :db_name, :service_id
36
+ attr_reader :cursor_id, :coll_name, :db_name, :service_id, :server_address
37
+
38
+ def ==(other)
39
+ cursor_id == other.cursor_id &&
40
+ coll_name == other.coll_name &&
41
+ db_name == other.db_name &&
42
+ service_id == other.service_id &&
43
+ server_address == other.server_address
44
+ end
45
+
46
+ def eql?(other)
47
+ self.==(other)
48
+ end
49
+
50
+ def hash
51
+ [cursor_id, coll_name, db_name, service_id, server_address].compact.hash
52
+ end
36
53
  end
37
54
  end
38
55
  end
data/lib/mongo/cursor.rb CHANGED
@@ -84,9 +84,8 @@ module Mongo
84
84
  @session = @options[:session]
85
85
  unless closed?
86
86
  register
87
- ObjectSpace.define_finalizer(self, self.class.finalize(kill_spec,
87
+ ObjectSpace.define_finalizer(self, self.class.finalize(kill_spec(server),
88
88
  cluster,
89
- server,
90
89
  @session))
91
90
  end
92
91
  end
@@ -107,12 +106,12 @@ module Mongo
107
106
  # @return [ Proc ] The Finalizer.
108
107
  #
109
108
  # @api private
110
- def self.finalize(kill_spec, cluster, server, session)
109
+ def self.finalize(kill_spec, cluster, session)
111
110
  unless KillSpec === kill_spec
112
111
  raise ArgumentError, "First argument must be a KillSpec: #{kill_spec.inspect}"
113
112
  end
114
113
  proc do
115
- cluster.schedule_kill_cursor(kill_spec, server)
114
+ cluster.schedule_kill_cursor(kill_spec)
116
115
  session.end_session if session && session.implicit?
117
116
  end
118
117
  end
@@ -254,7 +253,12 @@ module Mongo
254
253
  #
255
254
  # @since 2.2.0
256
255
  def batch_size
257
- @view.batch_size && @view.batch_size > 0 ? @view.batch_size : limit
256
+ value = @view.batch_size && @view.batch_size > 0 ? @view.batch_size : limit
257
+ if value == 0
258
+ nil
259
+ else
260
+ value
261
+ end
258
262
  end
259
263
 
260
264
  # Is the cursor closed?
@@ -367,12 +371,13 @@ module Mongo
367
371
  end
368
372
 
369
373
  # @api private
370
- def kill_spec
374
+ def kill_spec(server)
371
375
  KillSpec.new(
372
376
  cursor_id: id,
373
377
  coll_name: collection_name,
374
378
  db_name: database.name,
375
379
  service_id: initial_result.connection_description.service_id,
380
+ server_address: server.address,
376
381
  )
377
382
  end
378
383
 
@@ -257,10 +257,14 @@ module Mongo
257
257
  super.tap do |message|
258
258
  if session = context.session
259
259
  # Serialize the message to detect client-side problems,
260
- # such as invalid BSON keys. The message will be serialized again
260
+ # such as invalid BSON keys or too large messages.
261
+ # The message will be serialized again
261
262
  # later prior to being sent to the connection.
262
- message.serialize(BSON::ByteBuffer.new)
263
-
263
+ buf = BSON::ByteBuffer.new
264
+ message.serialize(buf)
265
+ if buf.length > connection.max_message_size
266
+ raise Error::MaxMessageSize.new(connection.max_message_size)
267
+ end
264
268
  session.update_state!
265
269
  end
266
270
  end
@@ -179,7 +179,8 @@ module Mongo
179
179
  #
180
180
  # @api private
181
181
  def get(**opts)
182
- limit = opts[:limit]
182
+ limit = normalized_limit(opts[:limit])
183
+
183
184
  _namespace_key = namespace_key(**opts)
184
185
  _cache_key = cache_key(**opts)
185
186
 
@@ -189,7 +190,7 @@ module Mongo
189
190
  caching_cursor = namespace_hash[_cache_key]
190
191
  return nil unless caching_cursor
191
192
 
192
- caching_cursor_limit = caching_cursor.view.limit
193
+ caching_cursor_limit = normalized_limit(caching_cursor.view.limit)
193
194
 
194
195
  # There are two scenarios in which a caching cursor could fulfill the
195
196
  # query:
@@ -199,6 +200,7 @@ module Mongo
199
200
  #
200
201
  # Otherwise, return nil because the stored cursor will not satisfy
201
202
  # the query.
203
+
202
204
  if limit && (caching_cursor_limit.nil? || caching_cursor_limit >= limit)
203
205
  caching_cursor
204
206
  elsif limit.nil? && caching_cursor_limit.nil?
@@ -208,6 +210,14 @@ module Mongo
208
210
  end
209
211
  end
210
212
 
213
+ def normalized_limit(limit)
214
+ return nil unless limit
215
+ # For the purposes of caching, a limit of 0 means no limit, as mongo treats it as such.
216
+ return nil if limit == 0
217
+ # For the purposes of caching, a negative limit is the same as as a positive limit.
218
+ limit.abs
219
+ end
220
+
211
221
  private
212
222
 
213
223
  def cache_key(**opts)
@@ -227,15 +227,21 @@ module Mongo
227
227
  # @api private
228
228
  def check_document
229
229
  server_api = @app_metadata.server_api || options[:server_api]
230
- if hello_ok? || server_api
231
- doc = HELLO_DOC
230
+ doc = if hello_ok? || server_api
231
+ _doc = HELLO_DOC
232
232
  if server_api
233
- doc = doc.merge(Utils.transform_server_api(server_api))
233
+ _doc = _doc.merge(Utils.transform_server_api(server_api))
234
234
  end
235
- doc
235
+ _doc
236
236
  else
237
237
  LEGACY_HELLO_DOC
238
238
  end
239
+ # compressors must be set to maintain correct compression status
240
+ # in the server description. See RUBY-2427
241
+ if compressors = options[:compressors]
242
+ doc = doc.merge(compression: compressors)
243
+ end
244
+ doc
239
245
  end
240
246
 
241
247
  private
data/lib/mongo/version.rb CHANGED
@@ -20,5 +20,5 @@ module Mongo
20
20
  # The current version of the driver.
21
21
  #
22
22
  # @since 2.0.0
23
- VERSION = '2.17.0'.freeze
23
+ VERSION = '2.17.3'.freeze
24
24
  end
@@ -18,6 +18,22 @@ describe 'Bulk writes' do
18
18
  authorized_collection.bulk_write(operations)
19
19
  end.not_to raise_error
20
20
  end
21
+
22
+ context 'in transaction' do
23
+ require_transaction_support
24
+ min_server_version "4.4"
25
+
26
+ it 'succeeds' do
27
+ authorized_collection.create
28
+ expect do
29
+ authorized_collection.client.start_session do |session|
30
+ session.with_transaction do
31
+ authorized_collection.bulk_write(operations, { session: session })
32
+ end
33
+ end
34
+ end.not_to raise_error
35
+ end
36
+ end
21
37
  end
22
38
 
23
39
  context 'when bulk write needs to be split' do
@@ -345,18 +345,69 @@ describe 'QueryCache' do
345
345
 
346
346
  it 'uses the cache' do
347
347
  results_limit_5 = authorized_collection.find.limit(5).to_a
348
+ results_limit_negative_5 = authorized_collection.find.limit(-5).to_a
348
349
  results_limit_3 = authorized_collection.find.limit(3).to_a
350
+ results_limit_negative_3 = authorized_collection.find.limit(-3).to_a
349
351
  results_no_limit = authorized_collection.find.to_a
352
+ results_limit_0 = authorized_collection.find.limit(0).to_a
353
+
354
+
355
+ expect(results_limit_5.length).to eq(5)
356
+ expect(results_limit_5.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4])
357
+
358
+ expect(results_limit_negative_5.length).to eq(5)
359
+ expect(results_limit_negative_5.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4])
360
+
361
+ expect(results_limit_3.length).to eq(3)
362
+ expect(results_limit_3.map { |r| r["test"] }).to eq([0, 1, 2])
363
+
364
+ expect(results_limit_negative_3.length).to eq(3)
365
+ expect(results_limit_negative_3.map { |r| r["test"] }).to eq([0, 1, 2])
366
+
367
+ expect(results_no_limit.length).to eq(10)
368
+ expect(results_no_limit.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
369
+
370
+ expect(results_limit_0.length).to eq(10)
371
+ expect(results_limit_0.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
372
+
373
+ expect(events.length).to eq(1)
374
+ end
375
+ end
376
+
377
+ context 'when the first query has a 0 limit' do
378
+ before do
379
+ authorized_collection.find.limit(0).to_a
380
+ end
381
+
382
+ it 'uses the cache' do
383
+ results_limit_5 = authorized_collection.find.limit(5).to_a
384
+ results_limit_negative_5 = authorized_collection.find.limit(-5).to_a
385
+ results_limit_3 = authorized_collection.find.limit(3).to_a
386
+ results_limit_negative_3 = authorized_collection.find.limit(-3).to_a
387
+ results_no_limit = authorized_collection.find.to_a
388
+ results_limit_0 = authorized_collection.find.limit(0).to_a
350
389
 
351
390
  expect(results_limit_5.length).to eq(5)
352
391
  expect(results_limit_5.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4])
353
392
 
393
+ expect(results_limit_negative_5.length).to eq(5)
394
+ expect(results_limit_negative_5.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4])
395
+
396
+
354
397
  expect(results_limit_3.length).to eq(3)
355
398
  expect(results_limit_3.map { |r| r["test"] }).to eq([0, 1, 2])
356
399
 
400
+ expect(results_limit_negative_3.length).to eq(3)
401
+ expect(results_limit_negative_3.map { |r| r["test"] }).to eq([0, 1, 2])
402
+
403
+
357
404
  expect(results_no_limit.length).to eq(10)
358
405
  expect(results_no_limit.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
359
406
 
407
+
408
+ expect(results_limit_0.length).to eq(10)
409
+ expect(results_limit_0.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
410
+
360
411
  expect(events.length).to eq(1)
361
412
  end
362
413
  end
@@ -391,6 +442,21 @@ describe 'QueryCache' do
391
442
  end
392
443
  end
393
444
 
445
+ context 'and two queries are performed with a larger negative limit' do
446
+ it 'uses the query cache for the third query' do
447
+ results1 = authorized_collection.find.limit(-3).to_a
448
+ results2 = authorized_collection.find.limit(-3).to_a
449
+
450
+ expect(results1.length).to eq(3)
451
+ expect(results1.map { |r| r["test"] }).to eq([0, 1, 2])
452
+
453
+ expect(results2.length).to eq(3)
454
+ expect(results2.map { |r| r["test"] }).to eq([0, 1, 2])
455
+
456
+ expect(events.length).to eq(2)
457
+ end
458
+ end
459
+
394
460
  context 'and the second query has a smaller limit' do
395
461
  let(:results) { authorized_collection.find.limit(1).to_a }
396
462
 
@@ -401,6 +467,99 @@ describe 'QueryCache' do
401
467
  end
402
468
  end
403
469
 
470
+ context 'and the second query has a smaller negative limit' do
471
+ let(:results) { authorized_collection.find.limit(-1).to_a }
472
+
473
+ it 'uses the cached query' do
474
+ expect(results.count).to eq(1)
475
+ expect(results.first["test"]).to eq(0)
476
+ expect(events.length).to eq(1)
477
+ end
478
+ end
479
+
480
+ context 'and the second query has no limit' do
481
+ it 'queries again' do
482
+ expect(authorized_collection.find.to_a.count).to eq(10)
483
+ expect(events.length).to eq(2)
484
+ end
485
+ end
486
+ end
487
+
488
+ context 'when the first query has a negative limit' do
489
+ before do
490
+ authorized_collection.find.limit(-2).to_a
491
+ end
492
+
493
+ context 'and the second query has a larger limit' do
494
+ let(:results) { authorized_collection.find.limit(3).to_a }
495
+
496
+ it 'queries again' do
497
+ expect(results.length).to eq(3)
498
+ expect(results.map { |result| result["test"] }).to eq([0, 1, 2])
499
+ expect(events.length).to eq(2)
500
+ end
501
+ end
502
+
503
+ context 'and the second query has a larger negative limit' do
504
+ let(:results) { authorized_collection.find.limit(-3).to_a }
505
+
506
+ it 'queries again' do
507
+ expect(results.length).to eq(3)
508
+ expect(results.map { |result| result["test"] }).to eq([0, 1, 2])
509
+ expect(events.length).to eq(2)
510
+ end
511
+ end
512
+
513
+ context 'and two queries are performed with a larger limit' do
514
+ it 'uses the query cache for the third query' do
515
+ results1 = authorized_collection.find.limit(3).to_a
516
+ results2 = authorized_collection.find.limit(3).to_a
517
+
518
+ expect(results1.length).to eq(3)
519
+ expect(results1.map { |r| r["test"] }).to eq([0, 1, 2])
520
+
521
+ expect(results2.length).to eq(3)
522
+ expect(results2.map { |r| r["test"] }).to eq([0, 1, 2])
523
+
524
+ expect(events.length).to eq(2)
525
+ end
526
+ end
527
+
528
+ context 'and two queries are performed with a larger negative limit' do
529
+ it 'uses the query cache for the third query' do
530
+ results1 = authorized_collection.find.limit(-3).to_a
531
+ results2 = authorized_collection.find.limit(-3).to_a
532
+
533
+ expect(results1.length).to eq(3)
534
+ expect(results1.map { |r| r["test"] }).to eq([0, 1, 2])
535
+
536
+ expect(results2.length).to eq(3)
537
+ expect(results2.map { |r| r["test"] }).to eq([0, 1, 2])
538
+
539
+ expect(events.length).to eq(2)
540
+ end
541
+ end
542
+
543
+ context 'and the second query has a smaller limit' do
544
+ let(:results) { authorized_collection.find.limit(1).to_a }
545
+
546
+ it 'uses the cached query' do
547
+ expect(results.count).to eq(1)
548
+ expect(results.first["test"]).to eq(0)
549
+ expect(events.length).to eq(1)
550
+ end
551
+ end
552
+
553
+ context 'and the second query has a smaller negative limit' do
554
+ let(:results) { authorized_collection.find.limit(-1).to_a }
555
+
556
+ it 'uses the cached query' do
557
+ expect(results.count).to eq(1)
558
+ expect(results.first["test"]).to eq(0)
559
+ expect(events.length).to eq(1)
560
+ end
561
+ end
562
+
404
563
  context 'and the second query has no limit' do
405
564
  it 'queries again' do
406
565
  expect(authorized_collection.find.to_a.count).to eq(10)
@@ -135,4 +135,44 @@ describe 'SDAM events' do
135
135
  end
136
136
  end
137
137
  end
138
+
139
+ describe 'server description changed' do
140
+ require_topology :single
141
+
142
+ let(:sdam_proc) do
143
+ Proc.new do |client|
144
+ client.subscribe(Mongo::Monitoring::SERVER_DESCRIPTION_CHANGED, subscriber)
145
+ end
146
+ end
147
+
148
+ let(:client) do
149
+ new_local_client(SpecConfig.instance.addresses,
150
+ # Heartbeat interval is bound by 500 ms
151
+ SpecConfig.instance.test_options.merge(client_options).merge(
152
+ heartbeat_frequency: 0.5,
153
+ sdam_proc: sdam_proc,
154
+ ),
155
+ )
156
+ end
157
+
158
+ let(:client_options) do
159
+ {}
160
+ end
161
+
162
+ it 'is not published when there are no changes in server state' do
163
+ client
164
+ sleep 6
165
+ client.close
166
+
167
+ events = subscriber.select_succeeded_events(Mongo::Monitoring::Event::ServerDescriptionChanged)
168
+
169
+ # In 6 seconds we should have about 10 or 12 heartbeats.
170
+ # We expect 1 or 2 description changes:
171
+ # The first one from unknown to known,
172
+ # The second one because server changes the fields it returns based on
173
+ # driver server check payload (e.g. ismaster/isWritablePrimary).
174
+ events.length.should >= 1
175
+ events.length.should <= 2
176
+ end
177
+ end
138
178
  end
@@ -157,6 +157,13 @@ RSpec.configure do |config|
157
157
  end
158
158
 
159
159
  if SpecConfig.instance.active_support?
160
+ require "active_support/version"
161
+ if ActiveSupport.version >= Gem::Version.new(7)
162
+ # ActiveSupport wants us to require ALL of it all of the time.
163
+ # See: https://github.com/rails/rails/issues/43851,
164
+ # https://github.com/rails/rails/issues/43889, etc.
165
+ require 'active_support'
166
+ end
160
167
  require "active_support/time"
161
168
  require 'mongo/active_support'
162
169
  end