mongo 2.16.2 → 2.16.4

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2708181386dc2ec2ea46e5ffe3f96967c3ea1e23dfd6f0d6d6ef15fbbde3877a
4
- data.tar.gz: 017f64d2a62cc0215b6234cef24c8f0be74f75ed5fa688d32155047dbd56a7e0
3
+ metadata.gz: 755484233ccfdd734c1a7d85d96b4f274607fd034bb1234995d8fcbd5dfb20c9
4
+ data.tar.gz: c8e8134177b68b9025f0e9a83507177279bf007499a78e78b886a0791f0db5bb
5
5
  SHA512:
6
- metadata.gz: 2488655223e41488f275a7c8d1cd8b4c416a44a92c31cba63ab3fca21b27ec8f545b5feaa953aefb50035dbc68fe47db4f3aebb97fcb1de4e0e5fc6e3b186014
7
- data.tar.gz: 671ae924600e906df4a2c209b7c491be445ef5c7c7030f6869fa991c53bd49a7cb728a718c8667b5c948022fb2912d45af437c1cf5b82f8b7ec0f0c849916045
6
+ metadata.gz: 9bc4bfb80121b537af93af7d44925ac243c723ec4ef78cd94b64f04f4d632812e04a2b9859d0e1e38cb1d88abce369d2b80da77d2897a731f3bc65e8174f7589
7
+ data.tar.gz: 833d7b866479a83c13c06ada2a0be578cc7767ab5a1b84c07639b1b3e0d2245ee300b67b54c2be296783a0833aa7e55960f440b50ec53d9add093ddd91ea84a8
checksums.yaml.gz.sig CHANGED
Binary file
@@ -70,7 +70,7 @@ module Mongo
70
70
  # If a query with a limit is performed, the query cache will
71
71
  # re-use results from an earlier query with the same or larger
72
72
  # limit, and then impose the lower limit during iteration.
73
- limit_for_cached_query = respond_to?(:limit) ? limit : nil
73
+ limit_for_cached_query = respond_to?(:limit) ? QueryCache.normalized_limit(limit) : nil
74
74
  end
75
75
 
76
76
  if block_given?
@@ -171,6 +171,7 @@ module Mongo
171
171
  max_time_ms: options[:max_time_ms],
172
172
  max_value: options[:max_value],
173
173
  min_value: options[:min_value],
174
+ no_cursor_timeout: options[:no_cursor_timeout],
174
175
  return_key: options[:return_key],
175
176
  show_disk_loc: options[:show_disk_loc],
176
177
  comment: options[:comment],
@@ -185,6 +186,8 @@ module Mongo
185
186
  collection.client.log_warn("The :oplog_replay option is deprecated and ignored by MongoDB 4.4 and later")
186
187
  end
187
188
 
189
+ maybe_set_tailable_options(spec)
190
+
188
191
  if explained?
189
192
  spec[:explain] = options[:explain]
190
193
  Operation::Explain.new(spec)
@@ -200,6 +203,19 @@ module Mongo
200
203
  def use_query_cache?
201
204
  QueryCache.enabled? && !collection.system_collection?
202
205
  end
206
+
207
+ # Add tailable cusror options to the command specifiction if needed.
208
+ #
209
+ # @param [ Hash ] spec The command specification.
210
+ def maybe_set_tailable_options(spec)
211
+ case cursor_type
212
+ when :tailable
213
+ spec[:tailable] = true
214
+ when :tailable_await
215
+ spec[:tailable] = true
216
+ spec[:await_data] = true
217
+ end
218
+ end
203
219
  end
204
220
  end
205
221
  end
@@ -127,6 +127,7 @@ module Mongo
127
127
  # return in each response from MongoDB.
128
128
  # @option options [ Hash ] :collation The collation to use.
129
129
  # @option options [ String ] :comment Associate a comment with the query.
130
+ # @option options [ :tailable, :tailable_await ] :cursor_type The type of cursor to use.
130
131
  # @option options [ Hash ] :explain Execute an explain with the provided
131
132
  # explain options (known options are :verbose and :verbosity) rather
132
133
  # than a find.
@@ -179,7 +179,8 @@ module Mongo
179
179
  #
180
180
  # @api private
181
181
  def get(**opts)
182
- limit = opts[:limit]
182
+ limit = normalized_limit(opts[:limit])
183
+
183
184
  _namespace_key = namespace_key(**opts)
184
185
  _cache_key = cache_key(**opts)
185
186
 
@@ -189,7 +190,7 @@ module Mongo
189
190
  caching_cursor = namespace_hash[_cache_key]
190
191
  return nil unless caching_cursor
191
192
 
192
- caching_cursor_limit = caching_cursor.view.limit
193
+ caching_cursor_limit = normalized_limit(caching_cursor.view.limit)
193
194
 
194
195
  # There are two scenarios in which a caching cursor could fulfill the
195
196
  # query:
@@ -199,6 +200,7 @@ module Mongo
199
200
  #
200
201
  # Otherwise, return nil because the stored cursor will not satisfy
201
202
  # the query.
203
+
202
204
  if limit && (caching_cursor_limit.nil? || caching_cursor_limit >= limit)
203
205
  caching_cursor
204
206
  elsif limit.nil? && caching_cursor_limit.nil?
@@ -208,6 +210,14 @@ module Mongo
208
210
  end
209
211
  end
210
212
 
213
+ def normalized_limit(limit)
214
+ return nil unless limit
215
+ # For the purposes of caching, a limit of 0 means no limit, as mongo treats it as such.
216
+ return nil if limit == 0
217
+ # For the purposes of caching, a negative limit is the same as as a positive limit.
218
+ limit.abs
219
+ end
220
+
211
221
  private
212
222
 
213
223
  def cache_key(**opts)
@@ -227,15 +227,21 @@ module Mongo
227
227
  # @api private
228
228
  def check_document
229
229
  server_api = @app_metadata.server_api || options[:server_api]
230
- if hello_ok? || server_api
231
- doc = HELLO_DOC
230
+ doc = if hello_ok? || server_api
231
+ _doc = HELLO_DOC
232
232
  if server_api
233
- doc = doc.merge(Utils.transform_server_api(server_api))
233
+ _doc = _doc.merge(Utils.transform_server_api(server_api))
234
234
  end
235
- doc
235
+ _doc
236
236
  else
237
237
  LEGACY_HELLO_DOC
238
238
  end
239
+ # compressors must be set to maintain correct compression status
240
+ # in the server description. See RUBY-2427
241
+ if compressors = options[:compressors]
242
+ doc = doc.merge(compression: compressors)
243
+ end
244
+ doc
239
245
  end
240
246
 
241
247
  private
data/lib/mongo/version.rb CHANGED
@@ -20,5 +20,5 @@ module Mongo
20
20
  # The current version of the driver.
21
21
  #
22
22
  # @since 2.0.0
23
- VERSION = '2.16.2'.freeze
23
+ VERSION = '2.16.4'.freeze
24
24
  end
@@ -0,0 +1,227 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'spec_helper'
4
+
5
+ describe 'Find operation options' do
6
+ require_mri
7
+ require_no_auth
8
+ min_server_fcv '4.4'
9
+
10
+ let(:subscriber) { Mrss::EventSubscriber.new }
11
+
12
+ let(:seeds) do
13
+ [ SpecConfig.instance.addresses.first ]
14
+ end
15
+
16
+ let(:client_options) do
17
+ {}
18
+ end
19
+
20
+ let(:collection_options) do
21
+ {}
22
+ end
23
+
24
+ let(:client) do
25
+ ClientRegistry.instance.new_local_client(
26
+ seeds,
27
+ SpecConfig.instance.test_options
28
+ .merge(database: SpecConfig.instance.test_db)
29
+ .merge(client_options)
30
+ ).tap do |client|
31
+ client.subscribe(Mongo::Monitoring::COMMAND, subscriber)
32
+ end
33
+ end
34
+
35
+ let(:collection) do
36
+ client['find_options', collection_options]
37
+ end
38
+
39
+ let(:find_command) do
40
+ subscriber.started_events.find { |cmd| cmd.command_name == 'find' }
41
+ end
42
+
43
+ let(:should_create_collection) { true }
44
+
45
+ before do
46
+ client['find_options'].drop
47
+ collection.create if should_create_collection
48
+ collection.insert_many([ { a: 1 }, { a: 2 }, { a: 3 } ])
49
+ end
50
+
51
+ describe 'collation' do
52
+ let(:client_options) do
53
+ {}
54
+ end
55
+
56
+ let(:collation) do
57
+ { 'locale' => 'en_US' }
58
+ end
59
+
60
+ context 'when defined on the collection' do
61
+ let(:collection_options) do
62
+ { collation: collation }
63
+ end
64
+
65
+ it 'uses the collation defined on the collection' do
66
+ collection.find.to_a
67
+ expect(find_command.command['collation']).to be_nil
68
+ end
69
+ end
70
+
71
+ context 'when defined on the operation' do
72
+ let(:collection_options) do
73
+ {}
74
+ end
75
+
76
+ it 'uses the collation defined on the collection' do
77
+ collection.find({}, collation: collation).to_a
78
+ expect(find_command.command['collation']).to eq(collation)
79
+ end
80
+ end
81
+
82
+ context 'when defined on both collection and operation' do
83
+ let(:collection_options) do
84
+ { 'locale' => 'de_AT' }
85
+ end
86
+
87
+ let(:should_create_collection) { false }
88
+
89
+ it 'uses the collation defined on the collection' do
90
+ collection.find({}, collation: collation).to_a
91
+ expect(find_command.command['collation']).to eq(collation)
92
+ end
93
+ end
94
+ end
95
+
96
+ describe 'read concern' do
97
+ context 'when defined on the client' do
98
+ let(:client_options) do
99
+ { read_concern: { level: :local } }
100
+ end
101
+
102
+ let(:collection_options) do
103
+ {}
104
+ end
105
+
106
+ it 'uses the read concern defined on the client' do
107
+ collection.find.to_a
108
+ expect(find_command.command['readConcern']).to eq('level' => 'local')
109
+ end
110
+
111
+ context 'when defined on the collection' do
112
+ let(:collection_options) do
113
+ { read_concern: { level: :majority } }
114
+ end
115
+
116
+ it 'uses the read concern defined on the collection' do
117
+ collection.find.to_a
118
+ expect(find_command.command['readConcern']).to eq('level' => 'majority')
119
+ end
120
+
121
+ context 'when defined on the operation' do
122
+ let(:operation_read_concern) do
123
+ { level: :available }
124
+ end
125
+
126
+ it 'uses the read concern defined on the operation' do
127
+ collection.find({}, read_concern: operation_read_concern).to_a
128
+ expect(find_command.command['readConcern']).to eq('level' => 'available')
129
+ end
130
+ end
131
+ end
132
+
133
+ context 'when defined on the operation' do
134
+ let(:collection_options) do
135
+ {}
136
+ end
137
+
138
+ let(:operation_read_concern) do
139
+ { level: :available }
140
+ end
141
+
142
+ it 'uses the read concern defined on the operation' do
143
+ collection.find({}, read_concern: operation_read_concern).to_a
144
+ expect(find_command.command['readConcern']).to eq('level' => 'available')
145
+ end
146
+ end
147
+ end
148
+
149
+ context 'when defined on the collection' do
150
+ let(:client_options) do
151
+ {}
152
+ end
153
+
154
+ let(:collection_options) do
155
+ { read_concern: { level: :majority } }
156
+ end
157
+
158
+ it 'uses the read concern defined on the collection' do
159
+ collection.find.to_a
160
+ expect(find_command.command['readConcern']).to eq('level' => 'majority')
161
+ end
162
+
163
+ context 'when defined on the operation' do
164
+ let(:operation_read_concern) do
165
+ { level: :available }
166
+ end
167
+
168
+ it 'uses the read concern defined on the operation' do
169
+ collection.find({}, read_concern: operation_read_concern).to_a
170
+ expect(find_command.command['readConcern']).to eq('level' => 'available')
171
+ end
172
+ end
173
+ end
174
+ end
175
+
176
+ describe 'read preference' do
177
+ require_topology :replica_set
178
+
179
+ context 'when defined on the client' do
180
+ let(:client_options) do
181
+ { read: { mode: :secondary } }
182
+ end
183
+
184
+ let(:collection_options) do
185
+ {}
186
+ end
187
+
188
+ it 'uses the read preference defined on the client' do
189
+ collection.find.to_a
190
+ expect(find_command.command['$readPreference']).to eq('mode' => 'secondary')
191
+ end
192
+
193
+ context 'when defined on the collection' do
194
+ let(:collection_options) do
195
+ { read: { mode: :secondary_preferred } }
196
+ end
197
+
198
+ it 'uses the read concern defined on the collection' do
199
+ collection.find.to_a
200
+ expect(find_command.command['$readPreference']).to eq('mode' => 'secondaryPreferred')
201
+ end
202
+ end
203
+ end
204
+ end
205
+
206
+ describe 'cursor type' do
207
+ let(:collection_options) do
208
+ { capped: true, size: 1000 }
209
+ end
210
+
211
+ context 'when cursor type is :tailable' do
212
+ it 'sets the cursor type to tailable' do
213
+ collection.find({}, cursor_type: :tailable).first
214
+ expect(find_command.command['tailable']).to be true
215
+ expect(find_command.command['awaitData']).to be_falsey
216
+ end
217
+ end
218
+
219
+ context 'when cursor type is :tailable_await' do
220
+ it 'sets the cursor type to tailable' do
221
+ collection.find({}, cursor_type: :tailable_await).first
222
+ expect(find_command.command['tailable']).to be true
223
+ expect(find_command.command['awaitData']).to be true
224
+ end
225
+ end
226
+ end
227
+ end
@@ -345,18 +345,69 @@ describe 'QueryCache' do
345
345
 
346
346
  it 'uses the cache' do
347
347
  results_limit_5 = authorized_collection.find.limit(5).to_a
348
+ results_limit_negative_5 = authorized_collection.find.limit(-5).to_a
348
349
  results_limit_3 = authorized_collection.find.limit(3).to_a
350
+ results_limit_negative_3 = authorized_collection.find.limit(-3).to_a
349
351
  results_no_limit = authorized_collection.find.to_a
352
+ results_limit_0 = authorized_collection.find.limit(0).to_a
353
+
354
+
355
+ expect(results_limit_5.length).to eq(5)
356
+ expect(results_limit_5.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4])
357
+
358
+ expect(results_limit_negative_5.length).to eq(5)
359
+ expect(results_limit_negative_5.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4])
360
+
361
+ expect(results_limit_3.length).to eq(3)
362
+ expect(results_limit_3.map { |r| r["test"] }).to eq([0, 1, 2])
363
+
364
+ expect(results_limit_negative_3.length).to eq(3)
365
+ expect(results_limit_negative_3.map { |r| r["test"] }).to eq([0, 1, 2])
366
+
367
+ expect(results_no_limit.length).to eq(10)
368
+ expect(results_no_limit.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
369
+
370
+ expect(results_limit_0.length).to eq(10)
371
+ expect(results_limit_0.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
372
+
373
+ expect(events.length).to eq(1)
374
+ end
375
+ end
376
+
377
+ context 'when the first query has a 0 limit' do
378
+ before do
379
+ authorized_collection.find.limit(0).to_a
380
+ end
381
+
382
+ it 'uses the cache' do
383
+ results_limit_5 = authorized_collection.find.limit(5).to_a
384
+ results_limit_negative_5 = authorized_collection.find.limit(-5).to_a
385
+ results_limit_3 = authorized_collection.find.limit(3).to_a
386
+ results_limit_negative_3 = authorized_collection.find.limit(-3).to_a
387
+ results_no_limit = authorized_collection.find.to_a
388
+ results_limit_0 = authorized_collection.find.limit(0).to_a
350
389
 
351
390
  expect(results_limit_5.length).to eq(5)
352
391
  expect(results_limit_5.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4])
353
392
 
393
+ expect(results_limit_negative_5.length).to eq(5)
394
+ expect(results_limit_negative_5.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4])
395
+
396
+
354
397
  expect(results_limit_3.length).to eq(3)
355
398
  expect(results_limit_3.map { |r| r["test"] }).to eq([0, 1, 2])
356
399
 
400
+ expect(results_limit_negative_3.length).to eq(3)
401
+ expect(results_limit_negative_3.map { |r| r["test"] }).to eq([0, 1, 2])
402
+
403
+
357
404
  expect(results_no_limit.length).to eq(10)
358
405
  expect(results_no_limit.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
359
406
 
407
+
408
+ expect(results_limit_0.length).to eq(10)
409
+ expect(results_limit_0.map { |r| r["test"] }).to eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
410
+
360
411
  expect(events.length).to eq(1)
361
412
  end
362
413
  end
@@ -391,6 +442,21 @@ describe 'QueryCache' do
391
442
  end
392
443
  end
393
444
 
445
+ context 'and two queries are performed with a larger negative limit' do
446
+ it 'uses the query cache for the third query' do
447
+ results1 = authorized_collection.find.limit(-3).to_a
448
+ results2 = authorized_collection.find.limit(-3).to_a
449
+
450
+ expect(results1.length).to eq(3)
451
+ expect(results1.map { |r| r["test"] }).to eq([0, 1, 2])
452
+
453
+ expect(results2.length).to eq(3)
454
+ expect(results2.map { |r| r["test"] }).to eq([0, 1, 2])
455
+
456
+ expect(events.length).to eq(2)
457
+ end
458
+ end
459
+
394
460
  context 'and the second query has a smaller limit' do
395
461
  let(:results) { authorized_collection.find.limit(1).to_a }
396
462
 
@@ -401,6 +467,99 @@ describe 'QueryCache' do
401
467
  end
402
468
  end
403
469
 
470
+ context 'and the second query has a smaller negative limit' do
471
+ let(:results) { authorized_collection.find.limit(-1).to_a }
472
+
473
+ it 'uses the cached query' do
474
+ expect(results.count).to eq(1)
475
+ expect(results.first["test"]).to eq(0)
476
+ expect(events.length).to eq(1)
477
+ end
478
+ end
479
+
480
+ context 'and the second query has no limit' do
481
+ it 'queries again' do
482
+ expect(authorized_collection.find.to_a.count).to eq(10)
483
+ expect(events.length).to eq(2)
484
+ end
485
+ end
486
+ end
487
+
488
+ context 'when the first query has a negative limit' do
489
+ before do
490
+ authorized_collection.find.limit(-2).to_a
491
+ end
492
+
493
+ context 'and the second query has a larger limit' do
494
+ let(:results) { authorized_collection.find.limit(3).to_a }
495
+
496
+ it 'queries again' do
497
+ expect(results.length).to eq(3)
498
+ expect(results.map { |result| result["test"] }).to eq([0, 1, 2])
499
+ expect(events.length).to eq(2)
500
+ end
501
+ end
502
+
503
+ context 'and the second query has a larger negative limit' do
504
+ let(:results) { authorized_collection.find.limit(-3).to_a }
505
+
506
+ it 'queries again' do
507
+ expect(results.length).to eq(3)
508
+ expect(results.map { |result| result["test"] }).to eq([0, 1, 2])
509
+ expect(events.length).to eq(2)
510
+ end
511
+ end
512
+
513
+ context 'and two queries are performed with a larger limit' do
514
+ it 'uses the query cache for the third query' do
515
+ results1 = authorized_collection.find.limit(3).to_a
516
+ results2 = authorized_collection.find.limit(3).to_a
517
+
518
+ expect(results1.length).to eq(3)
519
+ expect(results1.map { |r| r["test"] }).to eq([0, 1, 2])
520
+
521
+ expect(results2.length).to eq(3)
522
+ expect(results2.map { |r| r["test"] }).to eq([0, 1, 2])
523
+
524
+ expect(events.length).to eq(2)
525
+ end
526
+ end
527
+
528
+ context 'and two queries are performed with a larger negative limit' do
529
+ it 'uses the query cache for the third query' do
530
+ results1 = authorized_collection.find.limit(-3).to_a
531
+ results2 = authorized_collection.find.limit(-3).to_a
532
+
533
+ expect(results1.length).to eq(3)
534
+ expect(results1.map { |r| r["test"] }).to eq([0, 1, 2])
535
+
536
+ expect(results2.length).to eq(3)
537
+ expect(results2.map { |r| r["test"] }).to eq([0, 1, 2])
538
+
539
+ expect(events.length).to eq(2)
540
+ end
541
+ end
542
+
543
+ context 'and the second query has a smaller limit' do
544
+ let(:results) { authorized_collection.find.limit(1).to_a }
545
+
546
+ it 'uses the cached query' do
547
+ expect(results.count).to eq(1)
548
+ expect(results.first["test"]).to eq(0)
549
+ expect(events.length).to eq(1)
550
+ end
551
+ end
552
+
553
+ context 'and the second query has a smaller negative limit' do
554
+ let(:results) { authorized_collection.find.limit(-1).to_a }
555
+
556
+ it 'uses the cached query' do
557
+ expect(results.count).to eq(1)
558
+ expect(results.first["test"]).to eq(0)
559
+ expect(events.length).to eq(1)
560
+ end
561
+ end
562
+
404
563
  context 'and the second query has no limit' do
405
564
  it 'queries again' do
406
565
  expect(authorized_collection.find.to_a.count).to eq(10)
@@ -135,4 +135,44 @@ describe 'SDAM events' do
135
135
  end
136
136
  end
137
137
  end
138
+
139
+ describe 'server description changed' do
140
+ require_topology :single
141
+
142
+ let(:sdam_proc) do
143
+ Proc.new do |client|
144
+ client.subscribe(Mongo::Monitoring::SERVER_DESCRIPTION_CHANGED, subscriber)
145
+ end
146
+ end
147
+
148
+ let(:client) do
149
+ new_local_client(SpecConfig.instance.addresses,
150
+ # Heartbeat interval is bound by 500 ms
151
+ SpecConfig.instance.test_options.merge(client_options).merge(
152
+ heartbeat_frequency: 0.5,
153
+ sdam_proc: sdam_proc,
154
+ ),
155
+ )
156
+ end
157
+
158
+ let(:client_options) do
159
+ {}
160
+ end
161
+
162
+ it 'is not published when there are no changes in server state' do
163
+ client
164
+ sleep 6
165
+ client.close
166
+
167
+ events = subscriber.select_succeeded_events(Mongo::Monitoring::Event::ServerDescriptionChanged)
168
+
169
+ # In 6 seconds we should have about 10 or 12 heartbeats.
170
+ # We expect 1 or 2 description changes:
171
+ # The first one from unknown to known,
172
+ # The second one because server changes the fields it returns based on
173
+ # driver server check payload (e.g. ismaster/isWritablePrimary).
174
+ events.length.should >= 1
175
+ events.length.should <= 2
176
+ end
177
+ end
138
178
  end
@@ -1186,6 +1186,62 @@ describe Mongo::Collection::View::Readable do
1186
1186
  it 'returns a new View' do
1187
1187
  expect(new_view).not_to be(view)
1188
1188
  end
1189
+
1190
+ context 'when sending to server' do
1191
+ let(:subscriber) { Mrss::EventSubscriber.new }
1192
+
1193
+ before do
1194
+ authorized_collection.client.subscribe(Mongo::Monitoring::COMMAND, subscriber)
1195
+ end
1196
+
1197
+ let(:event) do
1198
+ subscriber.single_command_started_event('find')
1199
+ end
1200
+
1201
+ it 'is sent to server' do
1202
+ new_view.to_a
1203
+ event.command.slice('noCursorTimeout').should == {'noCursorTimeout' => true}
1204
+ end
1205
+ end
1206
+
1207
+ context 'integration test' do
1208
+ require_topology :single
1209
+
1210
+ # The number of open cursors with the option set to prevent timeout.
1211
+ def current_no_timeout_count
1212
+ root_authorized_client
1213
+ .command(serverStatus: 1)
1214
+ .documents
1215
+ .first
1216
+ .fetch('metrics')
1217
+ .fetch('cursor')
1218
+ .fetch('open')
1219
+ .fetch('noTimeout')
1220
+ end
1221
+
1222
+ it 'is applied on the server' do
1223
+ # Initialize collection with two documents.
1224
+ new_view.collection.insert_many([{}, {}])
1225
+
1226
+ expect(new_view.count).to be == 2
1227
+
1228
+ # Initial "noTimeout" count should be zero.
1229
+ states = [current_no_timeout_count]
1230
+
1231
+ # The "noTimeout" count should be one while iterating.
1232
+ new_view.batch_size(1).each { states << current_no_timeout_count }
1233
+
1234
+ # Final "noTimeout" count should be back to zero.
1235
+ states << current_no_timeout_count
1236
+
1237
+ # This succeeds on:
1238
+ # commit aab776ebdfb15ddb9765039f7300e15796de0c5c
1239
+ #
1240
+ # This starts failing with [0, 0, 0, 0] from:
1241
+ # commit 2d9f0217ec904a1952a1ada2136502eefbca562e
1242
+ expect(states).to be == [0, 1, 1, 0]
1243
+ end
1244
+ end
1189
1245
  end
1190
1246
 
1191
1247
  describe '#projection' do