mongo 2.13.0.beta1 → 2.13.0.rc1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data.tar.gz.sig +1 -5
- data/Rakefile +15 -9
- data/lib/mongo.rb +4 -2
- data/lib/mongo/auth/aws/request.rb +4 -2
- data/lib/mongo/bulk_write.rb +1 -0
- data/lib/mongo/client.rb +143 -21
- data/lib/mongo/cluster.rb +53 -17
- data/lib/mongo/cluster/sdam_flow.rb +13 -10
- data/lib/mongo/cluster/topology/replica_set_no_primary.rb +3 -2
- data/lib/mongo/cluster/topology/sharded.rb +1 -1
- data/lib/mongo/cluster/topology/single.rb +1 -1
- data/lib/mongo/collection.rb +17 -13
- data/lib/mongo/collection/view/readable.rb +3 -1
- data/lib/mongo/collection/view/writable.rb +41 -5
- data/lib/mongo/database.rb +31 -4
- data/lib/mongo/database/view.rb +19 -4
- data/lib/mongo/distinguishing_semaphore.rb +55 -0
- data/lib/mongo/error.rb +1 -0
- data/lib/mongo/error/invalid_session.rb +2 -1
- data/lib/mongo/error/operation_failure.rb +6 -0
- data/lib/mongo/error/sessions_not_supported.rb +35 -0
- data/lib/mongo/event/base.rb +6 -0
- data/lib/mongo/grid/file.rb +5 -0
- data/lib/mongo/grid/file/chunk.rb +2 -0
- data/lib/mongo/grid/fs_bucket.rb +15 -13
- data/lib/mongo/grid/stream/write.rb +9 -3
- data/lib/mongo/monitoring.rb +38 -0
- data/lib/mongo/monitoring/command_log_subscriber.rb +10 -2
- data/lib/mongo/monitoring/event/command_failed.rb +11 -0
- data/lib/mongo/monitoring/event/command_started.rb +37 -2
- data/lib/mongo/monitoring/event/command_succeeded.rb +11 -0
- data/lib/mongo/monitoring/event/server_closed.rb +1 -1
- data/lib/mongo/monitoring/event/server_description_changed.rb +27 -4
- data/lib/mongo/monitoring/event/server_heartbeat_failed.rb +9 -2
- data/lib/mongo/monitoring/event/server_heartbeat_started.rb +9 -2
- data/lib/mongo/monitoring/event/server_heartbeat_succeeded.rb +9 -2
- data/lib/mongo/monitoring/event/server_opening.rb +1 -1
- data/lib/mongo/monitoring/event/topology_changed.rb +1 -1
- data/lib/mongo/monitoring/event/topology_closed.rb +1 -1
- data/lib/mongo/monitoring/event/topology_opening.rb +1 -1
- data/lib/mongo/monitoring/publishable.rb +6 -3
- data/lib/mongo/monitoring/server_description_changed_log_subscriber.rb +9 -1
- data/lib/mongo/monitoring/topology_changed_log_subscriber.rb +1 -1
- data/lib/mongo/protocol/message.rb +36 -8
- data/lib/mongo/protocol/msg.rb +14 -0
- data/lib/mongo/protocol/serializers.rb +5 -2
- data/lib/mongo/server.rb +10 -3
- data/lib/mongo/server/connection.rb +4 -4
- data/lib/mongo/server/connection_base.rb +3 -1
- data/lib/mongo/server/description.rb +5 -0
- data/lib/mongo/server/monitor.rb +76 -44
- data/lib/mongo/server/monitor/connection.rb +55 -7
- data/lib/mongo/server/pending_connection.rb +14 -4
- data/lib/mongo/server/push_monitor.rb +173 -0
- data/{spec/runners/transactions/context.rb → lib/mongo/server/push_monitor/connection.rb} +9 -14
- data/lib/mongo/server_selector.rb +0 -1
- data/lib/mongo/server_selector/base.rb +579 -1
- data/lib/mongo/server_selector/nearest.rb +1 -6
- data/lib/mongo/server_selector/primary.rb +1 -6
- data/lib/mongo/server_selector/primary_preferred.rb +7 -10
- data/lib/mongo/server_selector/secondary.rb +1 -6
- data/lib/mongo/server_selector/secondary_preferred.rb +1 -7
- data/lib/mongo/session.rb +2 -0
- data/lib/mongo/socket.rb +20 -8
- data/lib/mongo/socket/ssl.rb +1 -1
- data/lib/mongo/socket/tcp.rb +1 -1
- data/lib/mongo/topology_version.rb +9 -0
- data/lib/mongo/utils.rb +62 -0
- data/lib/mongo/version.rb +1 -1
- data/spec/README.aws-auth.md +2 -2
- data/spec/integration/awaited_ismaster_spec.rb +28 -0
- data/spec/integration/change_stream_examples_spec.rb +6 -2
- data/spec/integration/check_clean_slate_spec.rb +16 -0
- data/spec/integration/client_construction_spec.rb +1 -0
- data/spec/integration/connect_single_rs_name_spec.rb +5 -2
- data/spec/integration/connection_spec.rb +7 -4
- data/spec/integration/crud_spec.rb +4 -4
- data/spec/integration/docs_examples_spec.rb +6 -0
- data/spec/integration/grid_fs_bucket_spec.rb +48 -0
- data/spec/integration/heartbeat_events_spec.rb +4 -23
- data/spec/integration/read_concern_spec.rb +1 -1
- data/spec/integration/retryable_errors_spec.rb +1 -1
- data/spec/integration/retryable_writes/shared/performs_legacy_retries.rb +2 -2
- data/spec/integration/retryable_writes/shared/performs_modern_retries.rb +3 -3
- data/spec/integration/retryable_writes/shared/performs_no_retries.rb +2 -2
- data/spec/integration/sdam_error_handling_spec.rb +37 -15
- data/spec/integration/sdam_events_spec.rb +77 -6
- data/spec/integration/sdam_prose_spec.rb +64 -0
- data/spec/integration/server_monitor_spec.rb +25 -1
- data/spec/integration/size_limit_spec.rb +7 -3
- data/spec/integration/size_limit_spec.rb~12e1e9c4f... RUBY-2242 Fix zlib compression (#2021) +98 -0
- data/spec/integration/ssl_uri_options_spec.rb +2 -2
- data/spec/integration/zlib_compression_spec.rb +25 -0
- data/spec/lite_spec_helper.rb +12 -5
- data/spec/mongo/auth/aws/request_spec.rb +76 -0
- data/spec/mongo/auth/scram_spec.rb +1 -1
- data/spec/mongo/client_construction_spec.rb +207 -0
- data/spec/mongo/client_spec.rb +38 -3
- data/spec/mongo/cluster/topology/replica_set_spec.rb +52 -9
- data/spec/mongo/cluster/topology/single_spec.rb +4 -2
- data/spec/mongo/cluster_spec.rb +34 -35
- data/spec/mongo/collection/view/change_stream_resume_spec.rb +6 -6
- data/spec/mongo/collection_spec.rb +500 -0
- data/spec/mongo/database_spec.rb +245 -8
- data/spec/mongo/distinguishing_semaphore_spec.rb +63 -0
- data/spec/mongo/error/operation_failure_spec.rb +40 -0
- data/spec/mongo/index/view_spec.rb +2 -2
- data/spec/mongo/monitoring/event/server_description_changed_spec.rb +1 -4
- data/spec/mongo/protocol/msg_spec.rb +10 -0
- data/spec/mongo/semaphore_spec.rb +51 -0
- data/spec/mongo/server/connection_auth_spec.rb +2 -2
- data/spec/mongo/server_selector/nearest_spec.rb +23 -23
- data/spec/mongo/server_selector/primary_preferred_spec.rb +26 -26
- data/spec/mongo/server_selector/primary_spec.rb +9 -9
- data/spec/mongo/server_selector/secondary_preferred_spec.rb +22 -22
- data/spec/mongo/server_selector/secondary_spec.rb +18 -18
- data/spec/mongo/server_selector_spec.rb +4 -4
- data/spec/mongo/session_spec.rb +35 -0
- data/spec/runners/change_streams/test.rb +2 -2
- data/spec/runners/cmap.rb +1 -1
- data/spec/runners/command_monitoring.rb +3 -34
- data/spec/runners/crud/context.rb +9 -5
- data/spec/runners/crud/operation.rb +59 -27
- data/spec/runners/crud/spec.rb +0 -8
- data/spec/runners/crud/test.rb +1 -1
- data/spec/runners/sdam.rb +2 -2
- data/spec/runners/server_selection.rb +242 -28
- data/spec/runners/transactions.rb +12 -12
- data/spec/runners/transactions/operation.rb +151 -25
- data/spec/runners/transactions/test.rb +60 -16
- data/spec/spec_tests/command_monitoring_spec.rb +22 -12
- data/spec/spec_tests/crud_spec.rb +1 -1
- data/spec/spec_tests/data/change_streams/change-streams-errors.yml +4 -8
- data/spec/spec_tests/data/change_streams/change-streams-resume-whitelist.yml +66 -0
- data/spec/spec_tests/data/max_staleness/ReplicaSetNoPrimary/MaxStalenessTooSmall.yml +15 -0
- data/spec/spec_tests/data/max_staleness/ReplicaSetNoPrimary/NoKnownServers.yml +4 -3
- data/spec/spec_tests/data/max_staleness/Unknown/SmallMaxStaleness.yml +1 -0
- data/spec/spec_tests/data/sdam_integration/cancel-server-check.yml +96 -0
- data/spec/spec_tests/data/sdam_integration/connectTimeoutMS.yml +88 -0
- data/spec/spec_tests/data/sdam_integration/find-network-error.yml +83 -0
- data/spec/spec_tests/data/sdam_integration/find-shutdown-error.yml +116 -0
- data/spec/spec_tests/data/sdam_integration/insert-network-error.yml +86 -0
- data/spec/spec_tests/data/sdam_integration/insert-shutdown-error.yml +115 -0
- data/spec/spec_tests/data/sdam_integration/isMaster-command-error.yml +168 -0
- data/spec/spec_tests/data/sdam_integration/isMaster-network-error.yml +162 -0
- data/spec/spec_tests/data/sdam_integration/isMaster-timeout.yml +229 -0
- data/spec/spec_tests/data/sdam_integration/rediscover-quickly-after-step-down.yml +87 -0
- data/spec/spec_tests/max_staleness_spec.rb +4 -142
- data/spec/spec_tests/retryable_reads_spec.rb +2 -2
- data/spec/spec_tests/sdam_integration_spec.rb +13 -0
- data/spec/spec_tests/sdam_monitoring_spec.rb +1 -2
- data/spec/spec_tests/server_selection_spec.rb +4 -116
- data/spec/stress/cleanup_spec.rb +17 -2
- data/spec/stress/connection_pool_stress_spec.rb +10 -8
- data/spec/support/child_process_helper.rb +78 -0
- data/spec/support/client_registry.rb +1 -0
- data/spec/support/cluster_config.rb +4 -0
- data/spec/support/event_subscriber.rb +123 -33
- data/spec/support/keyword_struct.rb +26 -0
- data/spec/support/shared/server_selector.rb +13 -1
- data/spec/support/spec_config.rb +38 -13
- data/spec/support/spec_organizer.rb +129 -0
- data/spec/support/spec_setup.rb +1 -1
- data/spec/support/utils.rb +46 -0
- metadata +992 -942
- metadata.gz.sig +0 -0
- data/lib/mongo/server_selector/selectable.rb +0 -560
- data/spec/runners/sdam_monitoring.rb +0 -89
@@ -67,7 +67,7 @@ module Mongo
|
|
67
67
|
end
|
68
68
|
|
69
69
|
# Work around https://jira.mongodb.org/browse/SERVER-17397
|
70
|
-
if ClusterConfig.instance.server_version < '4.
|
70
|
+
if ClusterConfig.instance.server_version < '4.4' &&
|
71
71
|
global_client.cluster.servers.length > 1
|
72
72
|
then
|
73
73
|
mongos_each_direct_client do |client|
|
@@ -107,7 +107,7 @@ module Mongo
|
|
107
107
|
|
108
108
|
def run
|
109
109
|
change_stream = begin
|
110
|
-
@target.watch(@pipeline, Utils.snakeize_hash(@options))
|
110
|
+
@target.watch(@pipeline, ::Utils.snakeize_hash(@options))
|
111
111
|
rescue Mongo::Error::OperationFailure => e
|
112
112
|
return {
|
113
113
|
result: {
|
data/spec/runners/cmap.rb
CHANGED
@@ -40,7 +40,7 @@ module Mongo
|
|
40
40
|
@test = YAML.load(File.read(test_path))
|
41
41
|
|
42
42
|
@description = @test['description']
|
43
|
-
@pool_options = Utils.snakeize_hash(process_options(@test['poolOptions']))
|
43
|
+
@pool_options = ::Utils.snakeize_hash(process_options(@test['poolOptions']))
|
44
44
|
@spec_ops = @test['operations'].map { |o| Operation.new(self, o) }
|
45
45
|
@processed_ops = []
|
46
46
|
@expected_error = @test['error']
|
@@ -135,7 +135,7 @@ module Mongo
|
|
135
135
|
if expected.keys.first == '$numberLong'
|
136
136
|
converted = expected.values.first.to_i
|
137
137
|
if actual.is_a?(BSON::Int64)
|
138
|
-
actual = Utils.int64_value(actual)
|
138
|
+
actual = ::Utils.int64_value(actual)
|
139
139
|
elsif actual.is_a?(BSON::Int32)
|
140
140
|
return false
|
141
141
|
end
|
@@ -260,8 +260,9 @@ module Mongo
|
|
260
260
|
# @param [ Mongo::Collection ] collection The collection.
|
261
261
|
#
|
262
262
|
# @since 2.1.0
|
263
|
-
def run(collection)
|
263
|
+
def run(collection, subscriber)
|
264
264
|
collection.insert_many(@data)
|
265
|
+
subscriber.clear_events!
|
265
266
|
@operation.execute(collection)
|
266
267
|
end
|
267
268
|
end
|
@@ -338,37 +339,5 @@ module Mongo
|
|
338
339
|
"match_#{event_type}"
|
339
340
|
end
|
340
341
|
end
|
341
|
-
|
342
|
-
# The test subscriber to track the events.
|
343
|
-
#
|
344
|
-
# @since 2.1.0
|
345
|
-
class TestSubscriber
|
346
|
-
|
347
|
-
def started(event)
|
348
|
-
command_started_event[event.command_name] = event
|
349
|
-
end
|
350
|
-
|
351
|
-
def succeeded(event)
|
352
|
-
command_succeeded_event[event.command_name] = event
|
353
|
-
end
|
354
|
-
|
355
|
-
def failed(event)
|
356
|
-
command_failed_event[event.command_name] = event
|
357
|
-
end
|
358
|
-
|
359
|
-
private
|
360
|
-
|
361
|
-
def command_started_event
|
362
|
-
@started_events ||= BSON::Document.new
|
363
|
-
end
|
364
|
-
|
365
|
-
def command_succeeded_event
|
366
|
-
@succeeded_events ||= BSON::Document.new
|
367
|
-
end
|
368
|
-
|
369
|
-
def command_failed_event
|
370
|
-
@failed_events ||= BSON::Document.new
|
371
|
-
end
|
372
|
-
end
|
373
342
|
end
|
374
343
|
end
|
@@ -12,12 +12,16 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
|
+
require 'support/keyword_struct'
|
16
|
+
|
15
17
|
module Mongo
|
16
18
|
module CRUD
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
19
|
+
Context = KeywordStruct.new(
|
20
|
+
:session0,
|
21
|
+
:session1,
|
22
|
+
:sdam_subscriber,
|
23
|
+
:threads,
|
24
|
+
:primary_address,
|
25
|
+
)
|
22
26
|
end
|
23
27
|
end
|
@@ -88,7 +88,7 @@ module Mongo
|
|
88
88
|
#
|
89
89
|
# @since 2.0.0
|
90
90
|
def execute(target)
|
91
|
-
op_name = Utils.underscore(name)
|
91
|
+
op_name = ::Utils.underscore(name)
|
92
92
|
if target.is_a?(Mongo::Database)
|
93
93
|
op_name = "db_#{op_name}"
|
94
94
|
elsif target.is_a?(Mongo::Client)
|
@@ -99,14 +99,14 @@ module Mongo
|
|
99
99
|
|
100
100
|
def database_options
|
101
101
|
if opts = @spec['databaseOptions']
|
102
|
-
Utils.convert_operation_options(opts)
|
102
|
+
::Utils.convert_operation_options(opts)
|
103
103
|
else
|
104
104
|
nil
|
105
105
|
end
|
106
106
|
end
|
107
107
|
|
108
108
|
def collection_options
|
109
|
-
Utils.convert_operation_options(@spec['collectionOptions'])
|
109
|
+
::Utils.convert_operation_options(@spec['collectionOptions'])
|
110
110
|
end
|
111
111
|
|
112
112
|
private
|
@@ -114,31 +114,31 @@ module Mongo
|
|
114
114
|
# read operations
|
115
115
|
|
116
116
|
def aggregate(collection, context)
|
117
|
-
collection.aggregate(arguments['pipeline'], context
|
117
|
+
collection.aggregate(arguments['pipeline'], transformed_options(context)).to_a
|
118
118
|
end
|
119
119
|
|
120
120
|
def db_aggregate(database, context)
|
121
|
-
database.aggregate(arguments['pipeline'], context
|
121
|
+
database.aggregate(arguments['pipeline'], transformed_options(context)).to_a
|
122
122
|
end
|
123
123
|
|
124
124
|
def count(collection, context)
|
125
|
-
collection.count(arguments['filter'], context
|
125
|
+
collection.count(arguments['filter'], transformed_options(context))
|
126
126
|
end
|
127
127
|
|
128
128
|
def count_documents(collection, context)
|
129
|
-
collection.count_documents(arguments['filter'], context
|
129
|
+
collection.count_documents(arguments['filter'], transformed_options(context))
|
130
130
|
end
|
131
131
|
|
132
132
|
def distinct(collection, context)
|
133
|
-
collection.distinct(arguments['fieldName'], arguments['filter'], context
|
133
|
+
collection.distinct(arguments['fieldName'], arguments['filter'], transformed_options(context))
|
134
134
|
end
|
135
135
|
|
136
136
|
def estimated_document_count(collection, context)
|
137
|
-
collection.estimated_document_count(context
|
137
|
+
collection.estimated_document_count(transformed_options(context))
|
138
138
|
end
|
139
139
|
|
140
140
|
def find(collection, context)
|
141
|
-
opts = context
|
141
|
+
opts = transformed_options(context)
|
142
142
|
if arguments['modifiers']
|
143
143
|
opts = opts.merge(modifiers: BSON::Document.new(arguments['modifiers']))
|
144
144
|
end
|
@@ -183,7 +183,7 @@ module Mongo
|
|
183
183
|
# write operations
|
184
184
|
|
185
185
|
def bulk_write(collection, context)
|
186
|
-
result = collection.bulk_write(requests, context
|
186
|
+
result = collection.bulk_write(requests, transformed_options(context))
|
187
187
|
return_doc = {}
|
188
188
|
return_doc['deletedCount'] = result.deleted_count || 0
|
189
189
|
return_doc['insertedIds'] = result.inserted_ids if result.inserted_ids
|
@@ -197,50 +197,50 @@ module Mongo
|
|
197
197
|
end
|
198
198
|
|
199
199
|
def delete_many(collection, context)
|
200
|
-
result = collection.delete_many(arguments['filter'], context
|
200
|
+
result = collection.delete_many(arguments['filter'], transformed_options(context))
|
201
201
|
{ 'deletedCount' => result.deleted_count }
|
202
202
|
end
|
203
203
|
|
204
204
|
def delete_one(collection, context)
|
205
|
-
result = collection.delete_one(arguments['filter'], context
|
205
|
+
result = collection.delete_one(arguments['filter'], transformed_options(context))
|
206
206
|
{ 'deletedCount' => result.deleted_count }
|
207
207
|
end
|
208
208
|
|
209
209
|
def insert_many(collection, context)
|
210
|
-
result = collection.insert_many(arguments['documents'], context
|
210
|
+
result = collection.insert_many(arguments['documents'], transformed_options(context))
|
211
211
|
{ 'insertedIds' => result.inserted_ids }
|
212
212
|
end
|
213
213
|
|
214
214
|
def insert_one(collection, context)
|
215
|
-
result = collection.insert_one(arguments['document'], context
|
215
|
+
result = collection.insert_one(arguments['document'], transformed_options(context))
|
216
216
|
{ 'insertedId' => result.inserted_id }
|
217
217
|
end
|
218
218
|
|
219
219
|
def replace_one(collection, context)
|
220
|
-
result = collection.replace_one(arguments['filter'], arguments['replacement'], context
|
220
|
+
result = collection.replace_one(arguments['filter'], arguments['replacement'], transformed_options(context))
|
221
221
|
update_return_doc(result)
|
222
222
|
end
|
223
223
|
|
224
224
|
def update_many(collection, context)
|
225
|
-
result = collection.update_many(arguments['filter'], arguments['update'], context
|
225
|
+
result = collection.update_many(arguments['filter'], arguments['update'], transformed_options(context))
|
226
226
|
update_return_doc(result)
|
227
227
|
end
|
228
228
|
|
229
229
|
def update_one(collection, context)
|
230
|
-
result = collection.update_one(arguments['filter'], arguments['update'], context
|
230
|
+
result = collection.update_one(arguments['filter'], arguments['update'], transformed_options(context))
|
231
231
|
update_return_doc(result)
|
232
232
|
end
|
233
233
|
|
234
234
|
def find_one_and_delete(collection, context)
|
235
|
-
collection.find_one_and_delete(arguments['filter'], context
|
235
|
+
collection.find_one_and_delete(arguments['filter'], transformed_options(context))
|
236
236
|
end
|
237
237
|
|
238
238
|
def find_one_and_replace(collection, context)
|
239
|
-
collection.find_one_and_replace(arguments['filter'], arguments['replacement'], context
|
239
|
+
collection.find_one_and_replace(arguments['filter'], arguments['replacement'], transformed_options(context))
|
240
240
|
end
|
241
241
|
|
242
242
|
def find_one_and_update(collection, context)
|
243
|
-
collection.find_one_and_update(arguments['filter'], arguments['update'], context
|
243
|
+
collection.find_one_and_update(arguments['filter'], arguments['update'], transformed_options(context))
|
244
244
|
end
|
245
245
|
|
246
246
|
# ddl
|
@@ -270,7 +270,8 @@ module Mongo
|
|
270
270
|
end
|
271
271
|
|
272
272
|
def create_collection(database, context)
|
273
|
-
|
273
|
+
opts = transformed_options(context)
|
274
|
+
database[arguments.fetch('collection')].create(session: opts[:session])
|
274
275
|
end
|
275
276
|
|
276
277
|
def rename(collection, context)
|
@@ -351,6 +352,16 @@ module Mongo
|
|
351
352
|
end
|
352
353
|
end
|
353
354
|
|
355
|
+
def configure_fail_point(client, context)
|
356
|
+
fp = arguments.fetch('failPoint')
|
357
|
+
$disable_fail_points ||= []
|
358
|
+
$disable_fail_points << [
|
359
|
+
fp,
|
360
|
+
ClusterConfig.instance.primary_address,
|
361
|
+
]
|
362
|
+
client.use('admin').database.command(fp)
|
363
|
+
end
|
364
|
+
|
354
365
|
# options & arguments
|
355
366
|
|
356
367
|
def options
|
@@ -360,7 +371,7 @@ module Mongo
|
|
360
371
|
# bulk write test is an exception in that it has an "options" key
|
361
372
|
# with the options.
|
362
373
|
arguments.merge(arguments['options'] || {}).each do |spec_k, v|
|
363
|
-
ruby_k = Utils.underscore(spec_k).to_sym
|
374
|
+
ruby_k = ::Utils.underscore(spec_k).to_sym
|
364
375
|
|
365
376
|
if v.is_a?(Hash) && v['$numberLong']
|
366
377
|
v = v['$numberLong'].to_i
|
@@ -390,8 +401,8 @@ module Mongo
|
|
390
401
|
end
|
391
402
|
|
392
403
|
def bulk_request(request)
|
393
|
-
op_name = Utils.underscore(request['name'])
|
394
|
-
args = Utils.shallow_snakeize_hash(request['arguments'])
|
404
|
+
op_name = ::Utils.underscore(request['name'])
|
405
|
+
args = ::Utils.shallow_snakeize_hash(request['arguments'])
|
395
406
|
if args[:document]
|
396
407
|
unless args.keys == [:document]
|
397
408
|
raise "If :document is given, it must be the only key"
|
@@ -406,7 +417,7 @@ module Mongo
|
|
406
417
|
end
|
407
418
|
|
408
419
|
def transform_return_document(v)
|
409
|
-
Utils.underscore(v).to_sym
|
420
|
+
::Utils.underscore(v).to_sym
|
410
421
|
end
|
411
422
|
|
412
423
|
def update
|
@@ -414,7 +425,7 @@ module Mongo
|
|
414
425
|
end
|
415
426
|
|
416
427
|
def transform_read_preference(v)
|
417
|
-
Utils.snakeize_hash(v)
|
428
|
+
::Utils.snakeize_hash(v)
|
418
429
|
end
|
419
430
|
|
420
431
|
def read_preference
|
@@ -429,6 +440,27 @@ module Mongo
|
|
429
440
|
return_doc['modifiedCount'] = result.modified_count if result.modified_count
|
430
441
|
return_doc
|
431
442
|
end
|
443
|
+
|
444
|
+
def transformed_options(context)
|
445
|
+
opts = options.dup
|
446
|
+
if opts[:session]
|
447
|
+
opts[:session] = case opts[:session]
|
448
|
+
when 'session0'
|
449
|
+
unless context.session0
|
450
|
+
raise "Trying to use session0 but it is not in context"
|
451
|
+
end
|
452
|
+
context.session0
|
453
|
+
when 'session1'
|
454
|
+
unless context.session1
|
455
|
+
raise "Trying to use session1 but it is not in context"
|
456
|
+
end
|
457
|
+
context.session1
|
458
|
+
else
|
459
|
+
raise "Invalid session name '#{opts[:session]}'"
|
460
|
+
end
|
461
|
+
end
|
462
|
+
opts
|
463
|
+
end
|
432
464
|
end
|
433
465
|
end
|
434
466
|
end
|
data/spec/runners/crud/spec.rb
CHANGED
@@ -11,14 +11,6 @@ module Mongo
|
|
11
11
|
def initialize(test_path)
|
12
12
|
contents = File.read(test_path)
|
13
13
|
|
14
|
-
# Since Ruby driver binds a client to a database, change the
|
15
|
-
# database name in the spec to the one we are using
|
16
|
-
contents.sub!(/"crud-tests"/, '"ruby-driver"')
|
17
|
-
contents.sub!(/"retryable-reads-tests"/, '"ruby-driver"')
|
18
|
-
contents.sub!(/"transaction-tests"/, '"ruby-driver"')
|
19
|
-
contents.sub!(/"withTransaction-tests"/, '"ruby-driver"')
|
20
|
-
contents.sub!(/ default_write_concern_db/, ' ruby-driver')
|
21
|
-
|
22
14
|
@spec = YAML.load(contents)
|
23
15
|
@description = File.basename(test_path)
|
24
16
|
@data = BSON::ExtJSON.parse_obj(@spec['data'])
|
data/spec/runners/crud/test.rb
CHANGED
@@ -28,7 +28,7 @@ module Mongo
|
|
28
28
|
@spec = crud_spec
|
29
29
|
@data = data
|
30
30
|
@description = test['description']
|
31
|
-
@client_options = Utils.convert_client_options(test['clientOptions'] || {})
|
31
|
+
@client_options = ::Utils.convert_client_options(test['clientOptions'] || {})
|
32
32
|
|
33
33
|
if test['failPoint']
|
34
34
|
@fail_point_command = FAIL_POINT_BASE_COMMAND.merge(test['failPoint'])
|
data/spec/runners/sdam.rb
CHANGED
@@ -149,7 +149,7 @@ module Mongo
|
|
149
149
|
end
|
150
150
|
|
151
151
|
def when
|
152
|
-
Utils.underscore(@spec.fetch('when'))
|
152
|
+
::Utils.underscore(@spec.fetch('when'))
|
153
153
|
end
|
154
154
|
|
155
155
|
def max_wire_version
|
@@ -161,7 +161,7 @@ module Mongo
|
|
161
161
|
end
|
162
162
|
|
163
163
|
def type
|
164
|
-
Utils.underscore(@spec.fetch('type'))
|
164
|
+
::Utils.underscore(@spec.fetch('type'))
|
165
165
|
end
|
166
166
|
|
167
167
|
def result
|
@@ -73,19 +73,6 @@ module Mongo
|
|
73
73
|
@type = Mongo::Cluster::Topology.const_get(@test['topology_description']['type'])
|
74
74
|
end
|
75
75
|
|
76
|
-
# Whether this spec describes a replica set.
|
77
|
-
#
|
78
|
-
# @example Determine if the spec describes a replica set.
|
79
|
-
# spec.replica_set?
|
80
|
-
#
|
81
|
-
# @return [true, false] If the spec describes a replica set.
|
82
|
-
#
|
83
|
-
# @since 2.0.0
|
84
|
-
def replica_set?
|
85
|
-
type == Mongo::Cluster::Topology::ReplicaSetNoPrimary ||
|
86
|
-
type == Mongo::Cluster::Topology::ReplicaSetWithPrimary
|
87
|
-
end
|
88
|
-
|
89
76
|
# Does this spec expect a server to be found.
|
90
77
|
#
|
91
78
|
# @example Will a server be found with this spec.
|
@@ -98,15 +85,10 @@ module Mongo
|
|
98
85
|
!in_latency_window.empty?
|
99
86
|
end
|
100
87
|
|
101
|
-
#
|
88
|
+
# Whether the test requires an error to be raised during server selection.
|
102
89
|
#
|
103
|
-
# @
|
104
|
-
|
105
|
-
#
|
106
|
-
# @return [ true, false ] If an error will be raised by the max staleness setting.
|
107
|
-
#
|
108
|
-
# @since 2.4.0
|
109
|
-
def invalid_max_staleness?
|
90
|
+
# @return [ true, false ] Whether the test expects an error.
|
91
|
+
def error?
|
110
92
|
@test['error']
|
111
93
|
end
|
112
94
|
|
@@ -122,9 +104,6 @@ module Mongo
|
|
122
104
|
#
|
123
105
|
# @since 2.0.0
|
124
106
|
def in_latency_window
|
125
|
-
if read_preference['mode'] == :secondary_preferred && primary
|
126
|
-
return @in_latency_window.push(primary).uniq
|
127
|
-
end
|
128
107
|
@in_latency_window
|
129
108
|
end
|
130
109
|
|
@@ -134,13 +113,248 @@ module Mongo
|
|
134
113
|
#
|
135
114
|
# @since 2.0.0
|
136
115
|
def candidate_servers
|
137
|
-
@candidate_servers
|
116
|
+
@candidate_servers
|
117
|
+
end
|
118
|
+
end
|
119
|
+
end
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
123
|
+
def define_server_selection_spec_tests(test_paths)
|
124
|
+
# Linter insists that a server selection semaphore is present when
|
125
|
+
# performing server selection.
|
126
|
+
skip_if_linting
|
127
|
+
|
128
|
+
test_paths.each do |file|
|
129
|
+
|
130
|
+
spec = Mongo::ServerSelection::Read::Spec.new(file)
|
131
|
+
|
132
|
+
context(spec.description) do
|
133
|
+
# Cluster needs a topology and topology needs a cluster...
|
134
|
+
# This temporary cluster is used for topology construction.
|
135
|
+
let(:temp_cluster) do
|
136
|
+
double('temp cluster').tap do |cluster|
|
137
|
+
allow(cluster).to receive(:servers_list).and_return([])
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
let(:topology) do
|
142
|
+
options = if spec.type <= Mongo::Cluster::Topology::ReplicaSetNoPrimary
|
143
|
+
{replica_set_name: 'foo'}
|
144
|
+
else
|
145
|
+
{}
|
146
|
+
end
|
147
|
+
spec.type.new(options, monitoring, temp_cluster)
|
148
|
+
end
|
149
|
+
|
150
|
+
let(:monitoring) do
|
151
|
+
Mongo::Monitoring.new(monitoring: false)
|
152
|
+
end
|
153
|
+
|
154
|
+
let(:listeners) do
|
155
|
+
Mongo::Event::Listeners.new
|
156
|
+
end
|
157
|
+
|
158
|
+
let(:options) do
|
159
|
+
if spec.heartbeat_frequency
|
160
|
+
{server_selection_timeout: 0.1, heartbeat_frequency: spec.heartbeat_frequency}
|
161
|
+
else
|
162
|
+
{server_selection_timeout: 0.1}
|
163
|
+
end
|
164
|
+
end
|
165
|
+
|
166
|
+
let(:cluster) do
|
167
|
+
double('cluster').tap do |c|
|
168
|
+
allow(c).to receive(:server_selection_semaphore)
|
169
|
+
allow(c).to receive(:connected?).and_return(true)
|
170
|
+
allow(c).to receive(:summary)
|
171
|
+
allow(c).to receive(:topology).and_return(topology)
|
172
|
+
allow(c).to receive(:single?).and_return(topology.single?)
|
173
|
+
allow(c).to receive(:sharded?).and_return(topology.sharded?)
|
174
|
+
allow(c).to receive(:replica_set?).and_return(topology.replica_set?)
|
175
|
+
allow(c).to receive(:unknown?).and_return(topology.unknown?)
|
176
|
+
allow(c).to receive(:options).and_return(options)
|
177
|
+
allow(c).to receive(:scan!).and_return(true)
|
178
|
+
allow(c).to receive(:app_metadata).and_return(app_metadata)
|
179
|
+
allow(c).to receive(:heartbeat_interval).and_return(
|
180
|
+
spec.heartbeat_frequency || Mongo::Server::Monitor::DEFAULT_HEARTBEAT_INTERVAL)
|
138
181
|
end
|
182
|
+
end
|
183
|
+
|
184
|
+
# One of the spec test assertions is on the set of servers that are
|
185
|
+
# eligible for selection without taking latency into account.
|
186
|
+
# In the driver, latency is taken into account at various points during
|
187
|
+
# server selection, hence there isn't a method that can be called to
|
188
|
+
# retrieve the list of servers without accounting for latency.
|
189
|
+
# Work around this by executing server selection with all servers set
|
190
|
+
# to zero latency, when evaluating the candidate server set.
|
191
|
+
let(:ignore_latency) { false }
|
192
|
+
|
193
|
+
let(:candidate_servers) do
|
194
|
+
spec.candidate_servers.collect do |server|
|
195
|
+
features = double('features').tap do |feat|
|
196
|
+
allow(feat).to receive(:max_staleness_enabled?).and_return(server['maxWireVersion'] && server['maxWireVersion'] >= 5)
|
197
|
+
allow(feat).to receive(:check_driver_support!).and_return(true)
|
198
|
+
end
|
199
|
+
address = Mongo::Address.new(server['address'])
|
200
|
+
Mongo::Server.new(address, cluster, monitoring, listeners,
|
201
|
+
{monitoring_io: false}.update(options)
|
202
|
+
).tap do |s|
|
203
|
+
allow(s).to receive(:average_round_trip_time) do
|
204
|
+
if ignore_latency
|
205
|
+
0
|
206
|
+
elsif server['avg_rtt_ms']
|
207
|
+
server['avg_rtt_ms'] / 1000.0
|
208
|
+
end
|
209
|
+
end
|
210
|
+
allow(s).to receive(:tags).and_return(server['tags'])
|
211
|
+
allow(s).to receive(:secondary?).and_return(server['type'] == 'RSSecondary')
|
212
|
+
allow(s).to receive(:primary?).and_return(server['type'] == 'RSPrimary')
|
213
|
+
allow(s).to receive(:mongos?).and_return(server['type'] == 'Mongos')
|
214
|
+
allow(s).to receive(:standalone?).and_return(server['type'] == 'Standalone')
|
215
|
+
allow(s).to receive(:unknown?).and_return(server['type'] == 'Unknown')
|
216
|
+
allow(s).to receive(:connectable?).and_return(true)
|
217
|
+
allow(s).to receive(:last_write_date).and_return(
|
218
|
+
Time.at(server['lastWrite']['lastWriteDate']['$numberLong'].to_f / 1000)) if server['lastWrite']
|
219
|
+
allow(s).to receive(:last_scan).and_return(
|
220
|
+
Time.at(server['lastUpdateTime'].to_f / 1000))
|
221
|
+
allow(s).to receive(:features).and_return(features)
|
222
|
+
allow(s).to receive(:replica_set_name).and_return('foo')
|
223
|
+
end
|
224
|
+
end
|
225
|
+
end
|
226
|
+
|
227
|
+
let(:suitable_servers) do
|
228
|
+
spec.suitable_servers.collect do |server|
|
229
|
+
Mongo::Server.new(Mongo::Address.new(server['address']), cluster, monitoring, listeners,
|
230
|
+
options.merge(monitoring_io: false))
|
231
|
+
end
|
232
|
+
end
|
139
233
|
|
140
|
-
|
234
|
+
let(:in_latency_window) do
|
235
|
+
spec.in_latency_window.collect do |server|
|
236
|
+
Mongo::Server.new(Mongo::Address.new(server['address']), cluster, monitoring, listeners,
|
237
|
+
options.merge(monitoring_io: false))
|
238
|
+
end
|
239
|
+
end
|
240
|
+
|
241
|
+
let(:server_selector_definition) do
|
242
|
+
{ mode: spec.read_preference['mode'] }.tap do |definition|
|
243
|
+
definition[:tag_sets] = spec.read_preference['tag_sets']
|
244
|
+
definition[:max_staleness] = spec.max_staleness if spec.max_staleness
|
245
|
+
end
|
246
|
+
end
|
247
|
+
|
248
|
+
let(:server_selector) do
|
249
|
+
Mongo::ServerSelector.get(server_selector_definition)
|
250
|
+
end
|
251
|
+
|
252
|
+
let(:app_metadata) do
|
253
|
+
Mongo::Server::AppMetadata.new({})
|
254
|
+
end
|
255
|
+
|
256
|
+
before do
|
257
|
+
allow(cluster).to receive(:servers_list).and_return(candidate_servers)
|
258
|
+
allow(cluster).to receive(:servers) do
|
259
|
+
# Copy Cluster#servers definition because clusters is a double
|
260
|
+
cluster.topology.servers(cluster.servers_list)
|
261
|
+
end
|
262
|
+
allow(cluster).to receive(:addresses).and_return(candidate_servers.map(&:address))
|
263
|
+
end
|
264
|
+
|
265
|
+
if spec.error?
|
266
|
+
|
267
|
+
it 'Raises an InvalidServerPreference exception' do
|
268
|
+
|
269
|
+
expect do
|
270
|
+
server_selector.select_server(cluster)
|
271
|
+
end.to raise_exception(Mongo::Error::InvalidServerPreference)
|
272
|
+
end
|
273
|
+
|
274
|
+
else
|
275
|
+
|
276
|
+
if spec.server_available?
|
277
|
+
|
278
|
+
it 'has non-empty suitable servers' do
|
279
|
+
spec.suitable_servers.should be_a(Array)
|
280
|
+
spec.suitable_servers.should_not be_empty
|
281
|
+
end
|
282
|
+
|
283
|
+
if spec.in_latency_window.length == 1
|
284
|
+
|
285
|
+
it 'selects the expected server' do
|
286
|
+
[server_selector.select_server(cluster)].should == in_latency_window
|
287
|
+
end
|
288
|
+
|
289
|
+
else
|
290
|
+
|
291
|
+
it 'selects a server in the suitable list' do
|
292
|
+
in_latency_window.should include(server_selector.select_server(cluster))
|
293
|
+
end
|
294
|
+
|
295
|
+
let(:expected_addresses) do
|
296
|
+
in_latency_window.map(&:address).map(&:seed).sort
|
297
|
+
end
|
298
|
+
|
299
|
+
let(:actual_addresses) do
|
300
|
+
server_selector.suitable_servers(cluster).map(&:address).map(&:seed).sort
|
301
|
+
end
|
302
|
+
|
303
|
+
it 'identifies expected suitable servers' do
|
304
|
+
actual_addresses.should == expected_addresses
|
305
|
+
end
|
306
|
+
|
307
|
+
end
|
308
|
+
|
309
|
+
context 'candidate servers without taking latency into account' do
|
310
|
+
let(:ignore_latency) { true }
|
311
|
+
|
312
|
+
let(:expected_addresses) do
|
313
|
+
suitable_servers.map(&:address).map(&:seed).sort
|
314
|
+
end
|
315
|
+
|
316
|
+
let(:actual_addresses) do
|
317
|
+
servers = server_selector.send(:suitable_servers, cluster)
|
318
|
+
|
319
|
+
# The tests expect that only secondaries are "suitable" for
|
320
|
+
# server selection with secondary preferred read preference.
|
321
|
+
# In actuality, primaries are also suitable, and the driver
|
322
|
+
# returns the primaries also. Remove primaries from the
|
323
|
+
# actual set when read preference is secondary preferred.
|
324
|
+
# HOWEVER, if a test ends up selecting a primary, then it
|
325
|
+
# includes that primary into its suitable servers. Therefore
|
326
|
+
# only remove primaries when the number of suitable servers
|
327
|
+
# is greater than 1.
|
328
|
+
servers.delete_if do |server|
|
329
|
+
server_selector.is_a?(Mongo::ServerSelector::SecondaryPreferred) &&
|
330
|
+
server.primary? &&
|
331
|
+
servers.length > 1
|
332
|
+
end
|
333
|
+
|
334
|
+
# Since we remove the latency requirement, the servers
|
335
|
+
# may be returned in arbitrary order.
|
336
|
+
servers.map(&:address).map(&:seed).sort
|
337
|
+
end
|
338
|
+
|
339
|
+
it 'identifies expected suitable servers' do
|
340
|
+
actual_addresses.should == expected_addresses
|
341
|
+
end
|
342
|
+
end
|
343
|
+
|
344
|
+
else
|
345
|
+
|
346
|
+
# Runner does not handle non-empty suitable servers with
|
347
|
+
# no servers in latency window.
|
348
|
+
it 'has empty suitable servers' do
|
349
|
+
expect(spec.suitable_servers).to eq([])
|
350
|
+
end
|
351
|
+
|
352
|
+
it 'Raises a NoServerAvailable Exception' do
|
353
|
+
expect do
|
354
|
+
server_selector.select_server(cluster)
|
355
|
+
end.to raise_exception(Mongo::Error::NoServerAvailable)
|
356
|
+
end
|
141
357
|
|
142
|
-
def primary
|
143
|
-
@candidate_servers.find { |s| s['type'] == 'RSPrimary' }
|
144
358
|
end
|
145
359
|
end
|
146
360
|
end
|