mongo 2.8.0 → 2.9.0.rc0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data.tar.gz.sig +0 -0
- data/Rakefile +12 -0
- data/lib/mongo.rb +15 -1
- data/lib/mongo/address/ipv6.rb +0 -2
- data/lib/mongo/auth/scram/conversation.rb +0 -3
- data/lib/mongo/bulk_write/result_combiner.rb +12 -2
- data/lib/mongo/client.rb +59 -6
- data/lib/mongo/cluster.rb +19 -8
- data/lib/mongo/cluster/reapers/cursor_reaper.rb +0 -2
- data/lib/mongo/cluster/reapers/socket_reaper.rb +12 -9
- data/lib/mongo/collection.rb +1 -1
- data/lib/mongo/collection/view/aggregation.rb +5 -1
- data/lib/mongo/collection/view/builder/map_reduce.rb +1 -1
- data/lib/mongo/collection/view/change_stream.rb +30 -10
- data/lib/mongo/collection/view/iterable.rb +13 -6
- data/lib/mongo/collection/view/map_reduce.rb +12 -10
- data/lib/mongo/collection/view/readable.rb +19 -14
- data/lib/mongo/cursor.rb +12 -8
- data/lib/mongo/database.rb +10 -7
- data/lib/mongo/database/view.rb +18 -11
- data/lib/mongo/error.rb +2 -2
- data/lib/mongo/error/connection_check_out_timeout.rb +49 -0
- data/lib/mongo/error/operation_failure.rb +9 -9
- data/lib/mongo/error/parser.rb +25 -3
- data/lib/mongo/error/pool_closed_error.rb +43 -0
- data/lib/mongo/error/sdam_error_detection.rb +18 -0
- data/lib/mongo/grid/file/chunk.rb +0 -2
- data/lib/mongo/grid/fs_bucket.rb +26 -12
- data/lib/mongo/grid/stream/read.rb +36 -21
- data/lib/mongo/index/view.rb +11 -7
- data/lib/mongo/logger.rb +0 -2
- data/lib/mongo/monitoring.rb +31 -0
- data/lib/mongo/monitoring/cmap_log_subscriber.rb +53 -0
- data/lib/mongo/monitoring/event.rb +1 -0
- data/lib/mongo/monitoring/event/cmap.rb +25 -0
- data/lib/mongo/monitoring/event/cmap/base.rb +28 -0
- data/lib/mongo/monitoring/event/cmap/connection_check_out_failed.rb +78 -0
- data/lib/mongo/monitoring/event/cmap/connection_check_out_started.rb +56 -0
- data/lib/mongo/monitoring/event/cmap/connection_checked_in.rb +63 -0
- data/lib/mongo/monitoring/event/cmap/connection_checked_out.rb +64 -0
- data/lib/mongo/monitoring/event/cmap/connection_closed.rb +103 -0
- data/lib/mongo/monitoring/event/cmap/connection_created.rb +64 -0
- data/lib/mongo/monitoring/event/cmap/connection_ready.rb +64 -0
- data/lib/mongo/monitoring/event/cmap/pool_cleared.rb +57 -0
- data/lib/mongo/monitoring/event/cmap/pool_closed.rb +57 -0
- data/lib/mongo/monitoring/event/cmap/pool_created.rb +63 -0
- data/lib/mongo/monitoring/event/command_started.rb +12 -3
- data/lib/mongo/monitoring/publishable.rb +10 -2
- data/lib/mongo/operation.rb +0 -1
- data/lib/mongo/operation/find/legacy/result.rb +1 -0
- data/lib/mongo/operation/list_collections/result.rb +7 -1
- data/lib/mongo/operation/result.rb +10 -1
- data/lib/mongo/operation/shared/executable.rb +15 -0
- data/lib/mongo/operation/shared/result/use_legacy_error_parser.rb +29 -0
- data/lib/mongo/operation/shared/specifiable.rb +0 -16
- data/lib/mongo/operation/update/legacy/result.rb +1 -0
- data/lib/mongo/protocol/compressed.rb +0 -2
- data/lib/mongo/protocol/msg.rb +25 -2
- data/lib/mongo/retryable.rb +171 -33
- data/lib/mongo/server.rb +26 -7
- data/lib/mongo/server/app_metadata.rb +0 -2
- data/lib/mongo/server/connectable.rb +8 -2
- data/lib/mongo/server/connection.rb +83 -13
- data/lib/mongo/server/connection_base.rb +1 -1
- data/lib/mongo/server/connection_pool.rb +439 -43
- data/lib/mongo/server/monitor/connection.rb +4 -1
- data/lib/mongo/session.rb +37 -5
- data/lib/mongo/session/session_pool.rb +2 -2
- data/lib/mongo/socket.rb +0 -2
- data/lib/mongo/socket/ssl.rb +0 -2
- data/lib/mongo/uri.rb +127 -66
- data/lib/mongo/uri/srv_protocol.rb +35 -13
- data/lib/mongo/version.rb +1 -1
- data/spec/README.md +190 -63
- data/spec/integration/change_stream_spec.rb +64 -0
- data/spec/integration/command_spec.rb +0 -7
- data/spec/integration/error_detection_spec.rb +39 -0
- data/spec/integration/read_concern.rb +83 -0
- data/spec/integration/retryable_writes_spec.rb +6 -50
- data/spec/integration/sdam_error_handling_spec.rb +60 -7
- data/spec/integration/ssl_uri_options_spec.rb +24 -0
- data/spec/integration/step_down_spec.rb +197 -0
- data/spec/lite_spec_helper.rb +4 -0
- data/spec/mongo/client_construction_spec.rb +42 -17
- data/spec/mongo/client_spec.rb +32 -1
- data/spec/mongo/cluster/socket_reaper_spec.rb +2 -2
- data/spec/mongo/cluster_spec.rb +36 -2
- data/spec/mongo/collection/view/aggregation_spec.rb +2 -0
- data/spec/mongo/collection/view/change_stream_spec.rb +28 -28
- data/spec/mongo/collection/view/readable_spec.rb +1 -1
- data/spec/mongo/collection/view_spec.rb +3 -1
- data/spec/mongo/cursor_spec.rb +5 -5
- data/spec/mongo/error/parser_spec.rb +61 -1
- data/spec/mongo/grid/stream/read_spec.rb +2 -2
- data/spec/mongo/monitoring/event/cmap/connection_check_out_failed_spec.rb +23 -0
- data/spec/mongo/monitoring/event/cmap/connection_check_out_started_spec.rb +19 -0
- data/spec/mongo/monitoring/event/cmap/connection_checked_in_spec.rb +23 -0
- data/spec/mongo/monitoring/event/cmap/connection_checked_out_spec.rb +23 -0
- data/spec/mongo/monitoring/event/cmap/connection_closed_spec.rb +27 -0
- data/spec/mongo/monitoring/event/cmap/connection_created_spec.rb +24 -0
- data/spec/mongo/monitoring/event/cmap/connection_ready_spec.rb +24 -0
- data/spec/mongo/monitoring/event/cmap/pool_cleared_spec.rb +19 -0
- data/spec/mongo/monitoring/event/cmap/pool_closed_spec.rb +19 -0
- data/spec/mongo/monitoring/event/cmap/pool_created_spec.rb +26 -0
- data/spec/mongo/operation/delete/bulk_spec.rb +1 -6
- data/spec/mongo/operation/delete/command_spec.rb +1 -1
- data/spec/mongo/operation/delete/op_msg_spec.rb +1 -1
- data/spec/mongo/operation/delete_spec.rb +4 -4
- data/spec/mongo/operation/insert/bulk_spec.rb +1 -1
- data/spec/mongo/operation/insert/command_spec.rb +1 -1
- data/spec/mongo/operation/insert/op_msg_spec.rb +1 -1
- data/spec/mongo/operation/update/bulk_spec.rb +1 -1
- data/spec/mongo/operation/update/command_spec.rb +2 -2
- data/spec/mongo/operation/update/op_msg_spec.rb +2 -2
- data/spec/mongo/protocol/msg_spec.rb +11 -0
- data/spec/mongo/retryable_spec.rb +78 -25
- data/spec/mongo/server/connection_pool_spec.rb +661 -126
- data/spec/mongo/server/connection_spec.rb +55 -7
- data/spec/mongo/server_spec.rb +5 -0
- data/spec/mongo/uri/srv_protocol_spec.rb +135 -2
- data/spec/mongo/uri_option_parsing_spec.rb +511 -0
- data/spec/mongo/uri_spec.rb +42 -6
- data/spec/spec_helper.rb +1 -84
- data/spec/spec_tests/cmap_spec.rb +50 -0
- data/spec/spec_tests/command_monitoring_spec.rb +7 -18
- data/spec/spec_tests/crud_spec.rb +3 -49
- data/spec/spec_tests/data/cmap/connection-must-have-id.yml +21 -0
- data/spec/spec_tests/data/cmap/connection-must-order-ids.yml +21 -0
- data/spec/spec_tests/data/cmap/pool-checkin-destroy-closed.yml +24 -0
- data/spec/spec_tests/data/cmap/pool-checkin-destroy-stale.yml +24 -0
- data/spec/spec_tests/data/cmap/pool-checkin-make-available.yml +21 -0
- data/spec/spec_tests/data/cmap/pool-checkin.yml +18 -0
- data/spec/spec_tests/data/cmap/pool-checkout-connection.yml +13 -0
- data/spec/spec_tests/data/cmap/pool-checkout-error-closed.yml +28 -0
- data/spec/spec_tests/data/cmap/pool-checkout-multiple.yml +34 -0
- data/spec/spec_tests/data/cmap/pool-checkout-no-idle.yml +31 -0
- data/spec/spec_tests/data/cmap/pool-checkout-no-stale.yml +29 -0
- data/spec/spec_tests/data/cmap/pool-close-destroy-conns.yml +26 -0
- data/spec/spec_tests/data/cmap/pool-close.yml +11 -0
- data/spec/spec_tests/data/cmap/pool-create-max-size.yml +56 -0
- data/spec/spec_tests/data/cmap/pool-create-min-size.yml +27 -0
- data/spec/spec_tests/data/cmap/pool-create-with-options.yml +20 -0
- data/spec/spec_tests/data/cmap/pool-create.yml +12 -0
- data/spec/spec_tests/data/cmap/wait-queue-fairness.yml +94 -0
- data/spec/spec_tests/data/cmap/wait-queue-timeout.yml +41 -0
- data/spec/spec_tests/data/retryable_reads/aggregate-serverErrors.yml +157 -0
- data/spec/spec_tests/data/retryable_reads/aggregate.yml +87 -0
- data/spec/spec_tests/data/retryable_reads/changeStreams-client.watch-serverErrors.yml +149 -0
- data/spec/spec_tests/data/retryable_reads/changeStreams-client.watch.yml +61 -0
- data/spec/spec_tests/data/retryable_reads/changeStreams-db.coll.watch-serverErrors.yml +149 -0
- data/spec/spec_tests/data/retryable_reads/changeStreams-db.coll.watch.yml +65 -0
- data/spec/spec_tests/data/retryable_reads/changeStreams-db.watch-serverErrors.yml +153 -0
- data/spec/spec_tests/data/retryable_reads/changeStreams-db.watch.yml +61 -0
- data/spec/spec_tests/data/retryable_reads/count-serverErrors.yml +150 -0
- data/spec/spec_tests/data/retryable_reads/count.yml +64 -0
- data/spec/spec_tests/data/retryable_reads/countDocuments-serverErrors.yml +150 -0
- data/spec/spec_tests/data/retryable_reads/countDocuments.yml +64 -0
- data/spec/spec_tests/data/retryable_reads/distinct-serverErrors.yml +156 -0
- data/spec/spec_tests/data/retryable_reads/distinct.yml +71 -0
- data/spec/spec_tests/data/retryable_reads/estimatedDocumentCount-serverErrors.yml +148 -0
- data/spec/spec_tests/data/retryable_reads/estimatedDocumentCount.yml +62 -0
- data/spec/spec_tests/data/retryable_reads/find-serverErrors.yml +160 -0
- data/spec/spec_tests/data/retryable_reads/find.yml +86 -0
- data/spec/spec_tests/data/retryable_reads/findOne-serverErrors.yml +154 -0
- data/spec/spec_tests/data/retryable_reads/findOne.yml +68 -0
- data/spec/spec_tests/data/retryable_reads/gridfs-download-serverErrors.yml +173 -0
- data/spec/spec_tests/data/retryable_reads/gridfs-download.yml +79 -0
- data/spec/spec_tests/data/retryable_reads/gridfs-downloadByName-serverErrors.yml +174 -0
- data/spec/spec_tests/data/retryable_reads/gridfs-downloadByName.yml +79 -0
- data/spec/spec_tests/data/retryable_reads/listCollectionNames-serverErrors.yml +143 -0
- data/spec/spec_tests/data/retryable_reads/listCollectionNames.yml +59 -0
- data/spec/spec_tests/data/retryable_reads/listCollectionObjects-serverErrors.yml +144 -0
- data/spec/spec_tests/data/retryable_reads/listCollectionObjects.yml +59 -0
- data/spec/spec_tests/data/retryable_reads/listCollections-serverErrors.yml +143 -0
- data/spec/spec_tests/data/retryable_reads/listCollections.yml +59 -0
- data/spec/spec_tests/data/retryable_reads/listDatabaseNames-serverErrors.yml +143 -0
- data/spec/spec_tests/data/retryable_reads/listDatabaseNames.yml +59 -0
- data/spec/spec_tests/data/retryable_reads/listDatabaseObjects-serverErrors.yml +144 -0
- data/spec/spec_tests/data/retryable_reads/listDatabaseObjects.yml +59 -0
- data/spec/spec_tests/data/retryable_reads/listDatabases-serverErrors.yml +144 -0
- data/spec/spec_tests/data/retryable_reads/listDatabases.yml +59 -0
- data/spec/spec_tests/data/retryable_reads/listIndexNames-serverErrors.yml +144 -0
- data/spec/spec_tests/data/retryable_reads/listIndexNames.yml +60 -0
- data/spec/spec_tests/data/retryable_reads/listIndexes-serverErrors.yml +145 -0
- data/spec/spec_tests/data/retryable_reads/listIndexes.yml +60 -0
- data/spec/spec_tests/data/retryable_reads/mapReduce.yml +60 -0
- data/spec/spec_tests/data/retryable_writes/bulkWrite-serverErrors.yml +10 -7
- data/spec/spec_tests/data/retryable_writes/bulkWrite.yml +15 -22
- data/spec/spec_tests/data/retryable_writes/deleteMany.yml +22 -0
- data/spec/spec_tests/data/retryable_writes/deleteOne-serverErrors.yml +8 -7
- data/spec/spec_tests/data/retryable_writes/deleteOne.yml +5 -8
- data/spec/spec_tests/data/retryable_writes/findOneAndDelete-serverErrors.yml +8 -7
- data/spec/spec_tests/data/retryable_writes/findOneAndDelete.yml +5 -8
- data/spec/spec_tests/data/retryable_writes/findOneAndReplace-serverErrors.yml +8 -7
- data/spec/spec_tests/data/retryable_writes/findOneAndReplace.yml +5 -8
- data/spec/spec_tests/data/retryable_writes/findOneAndUpdate-serverErrors.yml +8 -7
- data/spec/spec_tests/data/retryable_writes/findOneAndUpdate.yml +5 -8
- data/spec/spec_tests/data/retryable_writes/insertMany-serverErrors.yml +8 -7
- data/spec/spec_tests/data/retryable_writes/insertMany.yml +5 -8
- data/spec/spec_tests/data/retryable_writes/insertOne-serverErrors.yml +10 -45
- data/spec/spec_tests/data/retryable_writes/insertOne.yml +5 -8
- data/spec/spec_tests/data/retryable_writes/replaceOne-serverErrors.yml +8 -7
- data/spec/spec_tests/data/retryable_writes/replaceOne.yml +5 -8
- data/spec/spec_tests/data/retryable_writes/updateMany.yml +27 -0
- data/spec/spec_tests/data/retryable_writes/updateOne-serverErrors.yml +8 -7
- data/spec/spec_tests/data/retryable_writes/updateOne.yml +5 -14
- data/spec/spec_tests/data/transactions/abort.yml +7 -2
- data/spec/spec_tests/data/transactions/bulk.yml +7 -2
- data/spec/spec_tests/data/transactions/causal-consistency.yml +11 -4
- data/spec/spec_tests/data/transactions/commit.yml +11 -4
- data/spec/spec_tests/data/transactions/count.yml +64 -0
- data/spec/spec_tests/data/transactions/delete.yml +7 -2
- data/spec/spec_tests/data/transactions/error-labels.yml +8 -2
- data/spec/spec_tests/data/transactions/errors.yml +7 -2
- data/spec/spec_tests/data/transactions/findOneAndDelete.yml +7 -2
- data/spec/spec_tests/data/transactions/findOneAndReplace.yml +7 -2
- data/spec/spec_tests/data/transactions/findOneAndUpdate.yml +7 -2
- data/spec/spec_tests/data/transactions/insert.yml +9 -2
- data/spec/spec_tests/data/transactions/isolation.yml +7 -2
- data/spec/spec_tests/data/transactions/read-concern.yml +15 -6
- data/spec/spec_tests/data/transactions/read-pref.yml +7 -2
- data/spec/spec_tests/data/transactions/reads.yml +8 -48
- data/spec/spec_tests/data/transactions/retryable-abort.yml +7 -2
- data/spec/spec_tests/data/transactions/retryable-commit.yml +7 -2
- data/spec/spec_tests/data/transactions/retryable-writes.yml +7 -2
- data/spec/spec_tests/data/transactions/run-command.yml +7 -2
- data/spec/spec_tests/data/transactions/transaction-options.yml +7 -2
- data/spec/spec_tests/data/transactions/update.yml +7 -2
- data/spec/spec_tests/data/transactions/write-concern.yml +7 -2
- data/spec/spec_tests/data/transactions_api/callback-aborts.yml +6 -1
- data/spec/spec_tests/data/transactions_api/callback-commits.yml +6 -1
- data/spec/spec_tests/data/transactions_api/callback-retry.yml +6 -1
- data/spec/spec_tests/data/transactions_api/commit-retry.yml +6 -1
- data/spec/spec_tests/data/transactions_api/commit-transienttransactionerror-4.2.yml +6 -3
- data/spec/spec_tests/data/transactions_api/commit-transienttransactionerror.yml +6 -1
- data/spec/spec_tests/data/transactions_api/commit-writeconcernerror.yml +6 -1
- data/spec/spec_tests/data/transactions_api/commit.yml +6 -1
- data/spec/spec_tests/data/transactions_api/transaction-options.yml +6 -1
- data/spec/spec_tests/retryable_reads_spec.rb +11 -0
- data/spec/spec_tests/retryable_writes_spec.rb +4 -69
- data/spec/spec_tests/transactions_api_spec.rb +42 -37
- data/spec/spec_tests/transactions_spec.rb +42 -33
- data/spec/support/authorization.rb +12 -0
- data/spec/support/change_streams/operation.rb +1 -1
- data/spec/support/client_registry.rb +20 -0
- data/spec/support/cluster_config.rb +16 -15
- data/spec/support/cluster_tools.rb +346 -0
- data/spec/support/cmap.rb +367 -0
- data/spec/support/cmap/verifier.rb +46 -0
- data/spec/support/command_monitoring.rb +4 -6
- data/spec/support/common_shortcuts.rb +6 -0
- data/spec/support/connection_string.rb +2 -2
- data/spec/support/crud.rb +171 -184
- data/spec/support/crud/operation.rb +43 -0
- data/spec/support/crud/outcome.rb +53 -0
- data/spec/support/crud/read.rb +102 -12
- data/spec/support/crud/requirement.rb +69 -0
- data/spec/support/crud/spec.rb +68 -0
- data/spec/support/crud/test.rb +141 -0
- data/spec/support/crud/verifier.rb +96 -18
- data/spec/support/crud/write.rb +18 -3
- data/spec/support/event_subscriber.rb +15 -0
- data/spec/support/primary_socket.rb +2 -2
- data/spec/support/spec_config.rb +89 -20
- data/spec/support/transactions.rb +2 -306
- data/spec/support/transactions/operation.rb +7 -7
- data/spec/support/transactions/spec.rb +28 -0
- data/spec/support/transactions/test.rb +191 -0
- data/spec/support/utils.rb +123 -0
- metadata +202 -9
- metadata.gz.sig +0 -0
- data/lib/mongo/server/connection_pool/queue.rb +0 -359
- data/spec/mongo/server/connection_pool/queue_spec.rb +0 -353
- data/spec/support/transactions/verifier.rb +0 -97
@@ -7,51 +7,60 @@ describe 'Transactions' do
|
|
7
7
|
spec = Mongo::Transactions::Spec.new(file)
|
8
8
|
|
9
9
|
context(spec.description) do
|
10
|
-
spec
|
11
|
-
|
10
|
+
define_spec_tests_with_requirements(spec) do |req|
|
11
|
+
spec.tests.each do |test_factory|
|
12
|
+
test_instance = test_factory.call
|
12
13
|
|
13
|
-
|
14
|
-
require_transaction_support
|
14
|
+
context(test_instance.description) do
|
15
15
|
|
16
|
-
|
16
|
+
let(:test) { test_factory.call }
|
17
17
|
|
18
|
-
|
19
|
-
|
20
|
-
|
18
|
+
if test_instance.skip_reason
|
19
|
+
before do
|
20
|
+
skip test_instance.skip_reason
|
21
|
+
end
|
21
22
|
end
|
22
|
-
end
|
23
|
-
|
24
|
-
before(:each) do
|
25
|
-
test.setup_test
|
26
|
-
end
|
27
23
|
|
28
|
-
|
29
|
-
|
30
|
-
|
24
|
+
before(:each) do
|
25
|
+
if req.satisfied?
|
26
|
+
test.setup_test
|
27
|
+
end
|
28
|
+
end
|
31
29
|
|
32
|
-
|
33
|
-
|
34
|
-
|
30
|
+
after(:each) do
|
31
|
+
if req.satisfied?
|
32
|
+
test.teardown_test
|
33
|
+
end
|
34
|
+
end
|
35
35
|
|
36
|
-
|
36
|
+
let(:results) do
|
37
|
+
test.run
|
38
|
+
end
|
37
39
|
|
38
|
-
|
39
|
-
verifier.verify_operation_result(results[:results])
|
40
|
-
end
|
40
|
+
let(:verifier) { Mongo::CRUD::Verifier.new(test) }
|
41
41
|
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
end
|
42
|
+
it 'returns the correct result' do
|
43
|
+
verifier.verify_operation_result(test_instance.expected_results, results[:results])
|
44
|
+
end
|
46
45
|
|
47
|
-
|
48
|
-
|
49
|
-
verifier.
|
46
|
+
it 'has the correct data in the collection', if: test_instance.outcome && test_instance.outcome.collection_data? do
|
47
|
+
results
|
48
|
+
verifier.verify_collection_data(
|
49
|
+
test_instance.outcome.collection_data,
|
50
|
+
results[:contents])
|
50
51
|
end
|
51
52
|
|
52
|
-
test_instance.expectations
|
53
|
-
it
|
54
|
-
verifier.
|
53
|
+
if test_instance.expectations
|
54
|
+
it 'has the correct number of command_started events' do
|
55
|
+
verifier.verify_command_started_event_count(
|
56
|
+
test_instance.expectations, results[:events])
|
57
|
+
end
|
58
|
+
|
59
|
+
test_instance.expectations.each_with_index do |expectation, i|
|
60
|
+
it "has the correct command_started event #{i}" do
|
61
|
+
verifier.verify_command_started_event(
|
62
|
+
test_instance.expectations, results[:events], i)
|
63
|
+
end
|
55
64
|
end
|
56
65
|
end
|
57
66
|
end
|
@@ -74,6 +74,18 @@ module Authorization
|
|
74
74
|
ClientRegistry.instance.global_client('authorized_without_retry_writes')
|
75
75
|
end
|
76
76
|
|
77
|
+
context.let(:authorized_client_without_retry_reads) do
|
78
|
+
ClientRegistry.instance.global_client('authorized_without_retry_reads')
|
79
|
+
end
|
80
|
+
|
81
|
+
context.let(:authorized_client_without_any_retry_reads) do
|
82
|
+
ClientRegistry.instance.global_client('authorized_without_any_retry_reads')
|
83
|
+
end
|
84
|
+
|
85
|
+
context.let(:authorized_client_without_any_retries) do
|
86
|
+
ClientRegistry.instance.global_client('authorized_without_any_retries')
|
87
|
+
end
|
88
|
+
|
77
89
|
# Provides an authorized mongo client that has a Command subscriber.
|
78
90
|
#
|
79
91
|
# @since 2.5.1
|
@@ -101,6 +101,18 @@ class ClientRegistry
|
|
101
101
|
).tap do |client|
|
102
102
|
client.subscribe(Mongo::Monitoring::COMMAND, EventSubscriber)
|
103
103
|
end
|
104
|
+
# Provides an authorized mongo client that uses legacy read retry logic.
|
105
|
+
when 'authorized_without_retry_reads'
|
106
|
+
global_client('authorized').with(
|
107
|
+
retry_reads: false,
|
108
|
+
server_selection_timeout: 4.27,
|
109
|
+
)
|
110
|
+
# Provides an authorized mongo client that does not retry reads at all.
|
111
|
+
when 'authorized_without_any_retry_reads'
|
112
|
+
global_client('authorized').with(
|
113
|
+
retry_reads: false, max_read_retries: 0,
|
114
|
+
server_selection_timeout: 4.27,
|
115
|
+
)
|
104
116
|
# Provides an authorized mongo client that does not retry writes,
|
105
117
|
# overriding global test suite option to retry writes if necessary.
|
106
118
|
when 'authorized_without_retry_writes'
|
@@ -110,6 +122,14 @@ class ClientRegistry
|
|
110
122
|
).tap do |client|
|
111
123
|
client.subscribe(Mongo::Monitoring::COMMAND, EventSubscriber)
|
112
124
|
end
|
125
|
+
# Provides an authorized mongo client that does not retry reads or writes
|
126
|
+
# at all.
|
127
|
+
when 'authorized_without_any_retries'
|
128
|
+
global_client('authorized').with(
|
129
|
+
retry_reads: false, max_read_retries: 0,
|
130
|
+
retry_writes: false, max_write_retries: 0,
|
131
|
+
server_selection_timeout: 4.27,
|
132
|
+
)
|
113
133
|
# Provides an unauthorized mongo client on the admin database, for use in
|
114
134
|
# setting up the first admin root user.
|
115
135
|
when 'admin_unauthorized'
|
@@ -3,28 +3,29 @@ require 'singleton'
|
|
3
3
|
class ClusterConfig
|
4
4
|
include Singleton
|
5
5
|
|
6
|
-
def
|
7
|
-
|
6
|
+
def basic_client
|
7
|
+
# Do not cache the result here so that if the client gets closed,
|
8
|
+
# client registry reconnects it in subsequent tests
|
9
|
+
ClientRegistry.instance.global_client('basic')
|
8
10
|
end
|
9
11
|
|
10
12
|
def single_server?
|
11
|
-
|
12
|
-
end
|
13
|
-
|
14
|
-
def server!
|
15
|
-
server = scanned_client.cluster.servers.first
|
16
|
-
if server.nil?
|
17
|
-
raise ScannedClientHasNoServers
|
18
|
-
end
|
19
|
-
server
|
13
|
+
basic_client.cluster.servers.length == 1
|
20
14
|
end
|
21
15
|
|
22
16
|
def mongos?
|
23
|
-
|
17
|
+
if @mongos.nil?
|
18
|
+
basic_client.cluster.next_primary
|
19
|
+
@mongos = basic_client.cluster.topology.is_a?(Mongo::Cluster::Topology::Sharded)
|
20
|
+
end
|
21
|
+
@mongos
|
24
22
|
end
|
25
23
|
|
26
24
|
def replica_set_name
|
27
|
-
@replica_set_name ||=
|
25
|
+
@replica_set_name ||= begin
|
26
|
+
basic_client.cluster.next_primary
|
27
|
+
basic_client.cluster.topology.replica_set_name
|
28
|
+
end
|
28
29
|
end
|
29
30
|
|
30
31
|
def server_version
|
@@ -74,7 +75,7 @@ class ClusterConfig
|
|
74
75
|
def auth_enabled?
|
75
76
|
if @auth_enabled.nil?
|
76
77
|
@auth_enabled = begin
|
77
|
-
|
78
|
+
basic_client.use(:admin).command(getCmdLineOpts: 1).first["argv"].include?("--auth")
|
78
79
|
rescue => e
|
79
80
|
e.message =~ /(not authorized)|(unauthorized)|(no users authenticated)|(requires authentication)/
|
80
81
|
end
|
@@ -84,7 +85,7 @@ class ClusterConfig
|
|
84
85
|
|
85
86
|
def topology
|
86
87
|
@topology ||= begin
|
87
|
-
topology =
|
88
|
+
topology = basic_client.cluster.topology.class.name.sub(/.*::/, '')
|
88
89
|
topology = topology.gsub(/([A-Z])/) { |match| '_' + match.downcase }.sub(/^_/, '')
|
89
90
|
if topology =~ /^replica_set/
|
90
91
|
topology = 'replica_set'
|
@@ -0,0 +1,346 @@
|
|
1
|
+
require 'singleton'
|
2
|
+
|
3
|
+
# There is an undocumented {replSetStepUp: 1} command which can be used to
|
4
|
+
# ask a particular secondary to become a primary. It has existed since server
|
5
|
+
# 3.6 or earlier.
|
6
|
+
#
|
7
|
+
# Alternatively, to encourage a specific server to be selected, the recommended
|
8
|
+
# way is to set priority of that server higher. Changing priority requires
|
9
|
+
# reconfiguring the replica set, which in turn requires the replica set to
|
10
|
+
# have a primary.
|
11
|
+
#
|
12
|
+
# There are three timeouts that affect elections and stepdowns, when asking a
|
13
|
+
# server to step down:
|
14
|
+
#
|
15
|
+
# - secondaryCatchUpPeriodSecs - how long the existing primary will wait for
|
16
|
+
# secondaries to catch up prior to actually stepping down.
|
17
|
+
# - replSetStepDown parameter - how long the existing primary will decline
|
18
|
+
# getting elected as the new primary.
|
19
|
+
# - electionTimeoutMillis - how long, after a server notices that there is
|
20
|
+
# no primary, that server will vote or call elections.
|
21
|
+
#
|
22
|
+
# These parameters must be configured in a certain way;
|
23
|
+
#
|
24
|
+
# - replSetStepDown should generally be higher than secondaryCatchUpPeriodSecs.
|
25
|
+
# If a server is asked to step down and it spends all of its replSetStepDown
|
26
|
+
# time waiting for secondaries to catch up, the stepdown itself will not
|
27
|
+
# be performed and an error will be returned for the stepdown command.
|
28
|
+
# - secondaryCatchUpPeriodSecs + electionTimeoutMillis should be lower than
|
29
|
+
# replSetStepDown, so that all of the other servers can participate in
|
30
|
+
# the election prior to the primary which is stepping down becoming eligible
|
31
|
+
# to vote and potentially getting reelected.
|
32
|
+
#
|
33
|
+
# Settings used by this test:
|
34
|
+
#
|
35
|
+
# - replSetStepDown = 4 seconds
|
36
|
+
# - secondaryCatchUpPeriodSecs = 2 seconds
|
37
|
+
# - electionTimeoutMillis = 1 second
|
38
|
+
#
|
39
|
+
# Recommended guidance for working elections:
|
40
|
+
# - Set priority of all nodes other than old primary and new desired primary
|
41
|
+
# to 0
|
42
|
+
# - Turn off election handoff
|
43
|
+
# - Use stepdown & stepup commands (even when we don't care which server becomes
|
44
|
+
# the new primary
|
45
|
+
# - Put step up command in retry loop
|
46
|
+
|
47
|
+
class ClusterTools
|
48
|
+
include Singleton
|
49
|
+
|
50
|
+
def force_step_down
|
51
|
+
admin_client.database.command(
|
52
|
+
replSetStepDown: 1, force: true)
|
53
|
+
end
|
54
|
+
|
55
|
+
# https://docs.mongodb.com/manual/reference/parameters/#param.enableElectionHandoff
|
56
|
+
def set_election_handoff(value)
|
57
|
+
unless [true, false].include?(value)
|
58
|
+
raise ArgumentError, 'Value must be true or false'
|
59
|
+
end
|
60
|
+
|
61
|
+
direct_client_for_each_server do |client|
|
62
|
+
client.use(:admin).database.command(setParameter: 1, enableElectionHandoff: value)
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
# Sets election timeout to the specified value, in seconds.
|
67
|
+
# Election timeout specifies how long nodes in a cluster wait to vote/ask
|
68
|
+
# for elections once they lose connection with the active primary.
|
69
|
+
#
|
70
|
+
# This in theory generally safe to do in the test suite and leave the cluster
|
71
|
+
# at the 1 second setting, because the tests are run against a local
|
72
|
+
# deployment which shouldn't have any elections in it at all, unless we are
|
73
|
+
# testing step down behavior in which case we want the election timeout
|
74
|
+
# to be low. In practice a low election timeout results in intermittent
|
75
|
+
# test failures, therefore the timeout should be restored to its default
|
76
|
+
# value once step down tests are complete.
|
77
|
+
def set_election_timeout(timeout)
|
78
|
+
cfg = get_rs_config
|
79
|
+
cfg['settings']['electionTimeoutMillis'] = timeout * 1000
|
80
|
+
set_rs_config(cfg)
|
81
|
+
end
|
82
|
+
|
83
|
+
# Resets priorities on all replica set members to 1.
|
84
|
+
#
|
85
|
+
# Use at the end of a test run.
|
86
|
+
def reset_priorities
|
87
|
+
cfg = get_rs_config
|
88
|
+
cfg['members'].each do |member|
|
89
|
+
member['priority'] = 1
|
90
|
+
end
|
91
|
+
set_rs_config(cfg)
|
92
|
+
end
|
93
|
+
|
94
|
+
# Requests that the current primary in the RS steps down.
|
95
|
+
def step_down
|
96
|
+
admin_client.database.command(
|
97
|
+
replSetStepDown: 4, secondaryCatchUpPeriodSecs: 2)
|
98
|
+
rescue Mongo::Error::OperationFailure => e
|
99
|
+
# While waiting for secondaries to catch up before stepping down, this node decided to step down for other reasons (189)
|
100
|
+
if e.code == 189
|
101
|
+
# success
|
102
|
+
else
|
103
|
+
raise
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
# Attempts to elect the server at the specified address as the new primary
|
108
|
+
# by asking it to step up.
|
109
|
+
#
|
110
|
+
# @param [ Mongo::Address ] address
|
111
|
+
def step_up(address)
|
112
|
+
client = direct_client(address)
|
113
|
+
start = Time.now
|
114
|
+
loop do
|
115
|
+
begin
|
116
|
+
client.database.command(replSetStepUp: 1)
|
117
|
+
break
|
118
|
+
rescue Mongo::Error::OperationFailure => e
|
119
|
+
# Election failed. (125)
|
120
|
+
if e.code == 125
|
121
|
+
# Possible reason is the node we are trying to elect has blacklisted
|
122
|
+
# itself. This is where {replSetFreeze: 0} should make it eligible
|
123
|
+
# for election again but this seems to not always work.
|
124
|
+
else
|
125
|
+
raise
|
126
|
+
end
|
127
|
+
|
128
|
+
if Time.now > start + 10
|
129
|
+
raise e
|
130
|
+
end
|
131
|
+
end
|
132
|
+
end
|
133
|
+
reset_server_states
|
134
|
+
end
|
135
|
+
|
136
|
+
# The recommended guidance for changing a primary is:
|
137
|
+
#
|
138
|
+
# - turn off election handoff
|
139
|
+
# - pick a server to be the new primary
|
140
|
+
# - set the target's priority to 10, existing primary's priority to 1,
|
141
|
+
# other servers' priorities to 0
|
142
|
+
# - call step down on the existing primary
|
143
|
+
# - call step up on the target in a loop until it becomes the primary
|
144
|
+
def change_primary
|
145
|
+
existing_primary = admin_client.cluster.next_primary
|
146
|
+
existing_primary_address = existing_primary.address
|
147
|
+
|
148
|
+
target = admin_client.cluster.servers_list.detect do |server|
|
149
|
+
server.address != existing_primary_address
|
150
|
+
end
|
151
|
+
|
152
|
+
cfg = get_rs_config
|
153
|
+
cfg['members'].each do |member|
|
154
|
+
member['priority'] = case member['host']
|
155
|
+
when existing_primary_address.to_s
|
156
|
+
1
|
157
|
+
when target.address.to_s
|
158
|
+
10
|
159
|
+
else
|
160
|
+
0
|
161
|
+
end
|
162
|
+
end
|
163
|
+
set_rs_config(cfg)
|
164
|
+
|
165
|
+
if unfreeze_server(target.address)
|
166
|
+
# Target server self-elected as primary, no further action is needed.
|
167
|
+
return
|
168
|
+
end
|
169
|
+
|
170
|
+
step_down
|
171
|
+
persistently_step_up(target.address)
|
172
|
+
|
173
|
+
new_primary = admin_client.cluster.next_primary
|
174
|
+
puts "#{Time.now} [CT] Primary changed to #{new_primary.address}"
|
175
|
+
end
|
176
|
+
|
177
|
+
def persistently_step_up(address)
|
178
|
+
start = Time.now
|
179
|
+
loop do
|
180
|
+
puts "#{Time.now} [CT] Asking #{address} to step up"
|
181
|
+
|
182
|
+
step_up(address)
|
183
|
+
|
184
|
+
if admin_client.cluster.next_primary.address == address
|
185
|
+
break
|
186
|
+
end
|
187
|
+
|
188
|
+
if Time.now - start > 10
|
189
|
+
raise "Unable to get #{address} instated as primary after 10 seconds"
|
190
|
+
end
|
191
|
+
end
|
192
|
+
end
|
193
|
+
|
194
|
+
# Attempts to elect the server at the specified address as the new primary
|
195
|
+
# by manipulating priorities.
|
196
|
+
#
|
197
|
+
# This method requires that there is an active primary in the replica set at
|
198
|
+
# the time of the call (presumably a different one).
|
199
|
+
#
|
200
|
+
# @param [ Mongo::Address ] address
|
201
|
+
def force_primary(address)
|
202
|
+
current_primary = admin_client.cluster.next_primary
|
203
|
+
if current_primary.address == address
|
204
|
+
raise "Attempting to set primary to #{address} but it is already the primary"
|
205
|
+
end
|
206
|
+
encourage_primary(address)
|
207
|
+
|
208
|
+
if unfreeze_server(address)
|
209
|
+
# Target server self-elected as primary, no further action is needed.
|
210
|
+
return
|
211
|
+
end
|
212
|
+
|
213
|
+
step_down
|
214
|
+
persistently_step_up(address)
|
215
|
+
admin_client.cluster.next_primary.unknown!
|
216
|
+
new_primary = admin_client.cluster.next_primary
|
217
|
+
if new_primary.address != address
|
218
|
+
raise "Elected primary #{new_primary.address} is not what we wanted (#{address})"
|
219
|
+
end
|
220
|
+
end
|
221
|
+
|
222
|
+
# Adjusts replica set configuration so that the next election is likely
|
223
|
+
# to result in the server at the specified address becoming a primary.
|
224
|
+
# Address should be a Mongo::Address object.
|
225
|
+
#
|
226
|
+
# This method requires that there is an active primary in the replica set at
|
227
|
+
# the time of the call.
|
228
|
+
#
|
229
|
+
# @param [ Mongo::Address ] address
|
230
|
+
def encourage_primary(address)
|
231
|
+
existing_primary = admin_client.cluster.next_primary
|
232
|
+
cfg = get_rs_config
|
233
|
+
found = false
|
234
|
+
cfg['members'].each do |member|
|
235
|
+
if member['host'] == address.to_s
|
236
|
+
member['priority'] = 10
|
237
|
+
found = true
|
238
|
+
elsif member['host'] == existing_primary.address.to_s
|
239
|
+
member['priority'] = 1
|
240
|
+
else
|
241
|
+
member['priority'] = 0
|
242
|
+
end
|
243
|
+
end
|
244
|
+
unless found
|
245
|
+
raise "No RS member for #{address}"
|
246
|
+
end
|
247
|
+
|
248
|
+
set_rs_config(cfg)
|
249
|
+
end
|
250
|
+
|
251
|
+
# Allows the server at the specified address to run for elections and
|
252
|
+
# potentially become a primary. Use after issuing a step down command
|
253
|
+
# to clear the prohibtion on the stepped down server to be a primary.
|
254
|
+
#
|
255
|
+
# Returns true if the server at address became a primary, such that
|
256
|
+
# a step up command is not necessary.
|
257
|
+
def unfreeze_server(address)
|
258
|
+
begin
|
259
|
+
direct_client(address).use('admin').database.command(replSetFreeze: 0)
|
260
|
+
rescue Mongo::Error::OperationFailure => e
|
261
|
+
# Mongo::Error::OperationFailure: cannot freeze node when primary or running for election. state: Primary (95)
|
262
|
+
if e.code == 95
|
263
|
+
# The server we want to become primary may have already become the
|
264
|
+
# primary by holding a spontaneous election and winning due to the
|
265
|
+
# priorities we have set.
|
266
|
+
admin_client.cluster.servers_list.each do |server|
|
267
|
+
server.unknown!
|
268
|
+
end
|
269
|
+
if admin_client.cluster.next_primary.address == address
|
270
|
+
puts "#{Time.now} [CT] Primary self-elected to #{address}"
|
271
|
+
return true
|
272
|
+
end
|
273
|
+
end
|
274
|
+
raise
|
275
|
+
end
|
276
|
+
false
|
277
|
+
end
|
278
|
+
|
279
|
+
def unfreeze_all
|
280
|
+
admin_client.cluster.servers_list.each do |server|
|
281
|
+
client = direct_client(server.address)
|
282
|
+
# Primary refuses to be unfrozen with this message:
|
283
|
+
# cannot freeze node when primary or running for election. state: Primary (95)
|
284
|
+
if server != admin_client.cluster.next_primary
|
285
|
+
client.use('admin').database.command(replSetFreeze: 0)
|
286
|
+
end
|
287
|
+
end
|
288
|
+
end
|
289
|
+
|
290
|
+
# Gets the current replica set configuration.
|
291
|
+
def get_rs_config
|
292
|
+
result = admin_client.database.command(replSetGetConfig: 1)
|
293
|
+
doc = result.reply.documents.first
|
294
|
+
if doc['ok'] != 1
|
295
|
+
raise 'Failed to get RS config'
|
296
|
+
end
|
297
|
+
doc['config']
|
298
|
+
end
|
299
|
+
|
300
|
+
# Reconfigures the replica set with the specified configuration.
|
301
|
+
# Automatically increases RS version in the process.
|
302
|
+
def set_rs_config(config)
|
303
|
+
config = config.dup
|
304
|
+
config['version'] += 1
|
305
|
+
result = admin_client.database.command(replSetReconfig: config)
|
306
|
+
doc = result.reply.documents.first
|
307
|
+
if doc['ok'] != 1
|
308
|
+
raise 'Failed to reconfigure RS'
|
309
|
+
end
|
310
|
+
end
|
311
|
+
|
312
|
+
private
|
313
|
+
|
314
|
+
def admin_client
|
315
|
+
# Since we are triggering elections, we need to have a higher server
|
316
|
+
# selection timeout applied. The default timeout for tests assumes a
|
317
|
+
# stable deployment.
|
318
|
+
@admin_client ||= ClientRegistry.instance.global_client('root_authorized_admin').
|
319
|
+
with(server_selection_timeout: 15)
|
320
|
+
end
|
321
|
+
|
322
|
+
def direct_client(address)
|
323
|
+
@direct_clients ||= {}
|
324
|
+
@direct_clients[address] ||= ClientRegistry.instance.new_local_client(
|
325
|
+
[address.to_s],
|
326
|
+
SpecConfig.instance.test_options.merge(
|
327
|
+
SpecConfig.instance.auth_options).merge(
|
328
|
+
connect: :direct, server_selection_timeout: 10))
|
329
|
+
end
|
330
|
+
|
331
|
+
def each_server(&block)
|
332
|
+
admin_client.cluster.servers_list.each(&block)
|
333
|
+
end
|
334
|
+
|
335
|
+
def direct_client_for_each_server(&block)
|
336
|
+
each_server do |server|
|
337
|
+
yield direct_client(server.address)
|
338
|
+
end
|
339
|
+
end
|
340
|
+
|
341
|
+
def reset_server_states
|
342
|
+
each_server do |server|
|
343
|
+
server.unknown!
|
344
|
+
end
|
345
|
+
end
|
346
|
+
end
|