mongo 2.13.0.beta1 → 2.14.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data.tar.gz.sig +1 -5
- data/Rakefile +50 -9
- data/lib/mongo.rb +13 -2
- data/lib/mongo/address.rb +1 -1
- data/lib/mongo/address/ipv4.rb +1 -1
- data/lib/mongo/address/ipv6.rb +1 -1
- data/lib/mongo/auth/aws/request.rb +31 -5
- data/lib/mongo/bulk_write.rb +18 -0
- data/lib/mongo/caching_cursor.rb +74 -0
- data/lib/mongo/client.rb +238 -31
- data/lib/mongo/cluster.rb +56 -20
- data/lib/mongo/cluster/sdam_flow.rb +13 -10
- data/lib/mongo/cluster/topology/replica_set_no_primary.rb +3 -2
- data/lib/mongo/cluster/topology/sharded.rb +1 -1
- data/lib/mongo/cluster/topology/single.rb +2 -2
- data/lib/mongo/collection.rb +66 -24
- data/lib/mongo/collection/view.rb +24 -20
- data/lib/mongo/collection/view/aggregation.rb +25 -4
- data/lib/mongo/collection/view/builder/find_command.rb +38 -18
- data/lib/mongo/collection/view/explainable.rb +27 -8
- data/lib/mongo/collection/view/iterable.rb +72 -12
- data/lib/mongo/collection/view/readable.rb +19 -3
- data/lib/mongo/collection/view/writable.rb +55 -5
- data/lib/mongo/crypt/encryption_io.rb +6 -6
- data/lib/mongo/cursor.rb +16 -3
- data/lib/mongo/database.rb +37 -4
- data/lib/mongo/database/view.rb +18 -3
- data/lib/mongo/distinguishing_semaphore.rb +55 -0
- data/lib/mongo/error.rb +5 -0
- data/lib/mongo/error/invalid_read_concern.rb +28 -0
- data/lib/mongo/error/invalid_server_auth_host.rb +22 -0
- data/lib/mongo/error/invalid_session.rb +2 -1
- data/lib/mongo/error/operation_failure.rb +11 -5
- data/lib/mongo/error/server_certificate_revoked.rb +22 -0
- data/lib/mongo/error/sessions_not_supported.rb +35 -0
- data/lib/mongo/error/unsupported_option.rb +14 -12
- data/lib/mongo/event/base.rb +6 -0
- data/lib/mongo/grid/file.rb +5 -0
- data/lib/mongo/grid/file/chunk.rb +2 -0
- data/lib/mongo/grid/fs_bucket.rb +15 -13
- data/lib/mongo/grid/stream/write.rb +9 -3
- data/lib/mongo/index/view.rb +3 -0
- data/lib/mongo/lint.rb +2 -1
- data/lib/mongo/logger.rb +3 -3
- data/lib/mongo/monitoring.rb +38 -0
- data/lib/mongo/monitoring/command_log_subscriber.rb +10 -2
- data/lib/mongo/monitoring/event/command_failed.rb +11 -0
- data/lib/mongo/monitoring/event/command_started.rb +37 -2
- data/lib/mongo/monitoring/event/command_succeeded.rb +11 -0
- data/lib/mongo/monitoring/event/server_closed.rb +1 -1
- data/lib/mongo/monitoring/event/server_description_changed.rb +27 -4
- data/lib/mongo/monitoring/event/server_heartbeat_failed.rb +9 -2
- data/lib/mongo/monitoring/event/server_heartbeat_started.rb +9 -2
- data/lib/mongo/monitoring/event/server_heartbeat_succeeded.rb +9 -2
- data/lib/mongo/monitoring/event/server_opening.rb +1 -1
- data/lib/mongo/monitoring/event/topology_changed.rb +1 -1
- data/lib/mongo/monitoring/event/topology_closed.rb +1 -1
- data/lib/mongo/monitoring/event/topology_opening.rb +1 -1
- data/lib/mongo/monitoring/publishable.rb +6 -3
- data/lib/mongo/monitoring/server_description_changed_log_subscriber.rb +9 -1
- data/lib/mongo/monitoring/topology_changed_log_subscriber.rb +1 -1
- data/lib/mongo/operation.rb +2 -0
- data/lib/mongo/operation/aggregate/result.rb +9 -8
- data/lib/mongo/operation/collections_info/command.rb +5 -0
- data/lib/mongo/operation/collections_info/result.rb +18 -1
- data/lib/mongo/operation/delete/bulk_result.rb +2 -0
- data/lib/mongo/operation/delete/result.rb +3 -0
- data/lib/mongo/operation/explain/command.rb +4 -0
- data/lib/mongo/operation/explain/legacy.rb +4 -0
- data/lib/mongo/operation/explain/op_msg.rb +6 -0
- data/lib/mongo/operation/explain/result.rb +3 -0
- data/lib/mongo/operation/find/legacy/result.rb +2 -0
- data/lib/mongo/operation/find/result.rb +13 -0
- data/lib/mongo/operation/get_more/result.rb +3 -0
- data/lib/mongo/operation/indexes/result.rb +5 -0
- data/lib/mongo/operation/insert/bulk_result.rb +5 -0
- data/lib/mongo/operation/insert/result.rb +5 -0
- data/lib/mongo/operation/list_collections/result.rb +5 -0
- data/lib/mongo/operation/map_reduce/result.rb +10 -0
- data/lib/mongo/operation/parallel_scan/result.rb +4 -0
- data/lib/mongo/operation/result.rb +35 -6
- data/lib/mongo/operation/shared/bypass_document_validation.rb +1 -0
- data/lib/mongo/operation/shared/causal_consistency_supported.rb +1 -0
- data/lib/mongo/operation/shared/collections_info_or_list_collections.rb +2 -0
- data/lib/mongo/operation/shared/executable.rb +1 -0
- data/lib/mongo/operation/shared/idable.rb +2 -1
- data/lib/mongo/operation/shared/limited.rb +1 -0
- data/lib/mongo/operation/shared/object_id_generator.rb +1 -0
- data/lib/mongo/operation/shared/result/aggregatable.rb +1 -0
- data/lib/mongo/operation/shared/sessions_supported.rb +1 -0
- data/lib/mongo/operation/shared/specifiable.rb +1 -0
- data/lib/mongo/operation/shared/write.rb +1 -0
- data/lib/mongo/operation/shared/write_concern_supported.rb +1 -0
- data/lib/mongo/operation/update/legacy/result.rb +7 -0
- data/lib/mongo/operation/update/result.rb +8 -0
- data/lib/mongo/operation/users_info/result.rb +3 -0
- data/lib/mongo/protocol/message.rb +47 -10
- data/lib/mongo/protocol/msg.rb +34 -1
- data/lib/mongo/protocol/query.rb +36 -0
- data/lib/mongo/protocol/serializers.rb +5 -2
- data/lib/mongo/query_cache.rb +242 -0
- data/lib/mongo/retryable.rb +8 -1
- data/lib/mongo/server.rb +15 -4
- data/lib/mongo/server/app_metadata.rb +27 -3
- data/lib/mongo/server/connection.rb +4 -4
- data/lib/mongo/server/connection_base.rb +38 -12
- data/lib/mongo/server/connection_common.rb +2 -2
- data/lib/mongo/server/connection_pool.rb +3 -0
- data/lib/mongo/server/description.rb +13 -1
- data/lib/mongo/server/monitor.rb +76 -44
- data/lib/mongo/server/monitor/connection.rb +57 -9
- data/lib/mongo/server/pending_connection.rb +14 -4
- data/lib/mongo/server/push_monitor.rb +173 -0
- data/{spec/runners/transactions/context.rb → lib/mongo/server/push_monitor/connection.rb} +9 -14
- data/lib/mongo/server_selector.rb +0 -1
- data/lib/mongo/server_selector/base.rb +583 -1
- data/lib/mongo/server_selector/nearest.rb +1 -6
- data/lib/mongo/server_selector/primary.rb +1 -6
- data/lib/mongo/server_selector/primary_preferred.rb +7 -10
- data/lib/mongo/server_selector/secondary.rb +1 -6
- data/lib/mongo/server_selector/secondary_preferred.rb +1 -7
- data/lib/mongo/session.rb +7 -1
- data/lib/mongo/socket.rb +26 -12
- data/lib/mongo/socket/ocsp_cache.rb +97 -0
- data/lib/mongo/socket/ocsp_verifier.rb +368 -0
- data/lib/mongo/socket/ssl.rb +46 -25
- data/lib/mongo/socket/tcp.rb +1 -1
- data/lib/mongo/srv/monitor.rb +7 -13
- data/lib/mongo/srv/resolver.rb +14 -10
- data/lib/mongo/timeout.rb +2 -0
- data/lib/mongo/topology_version.rb +9 -0
- data/lib/mongo/uri.rb +21 -390
- data/lib/mongo/uri/options_mapper.rb +582 -0
- data/lib/mongo/uri/srv_protocol.rb +3 -2
- data/lib/mongo/utils.rb +73 -0
- data/lib/mongo/version.rb +1 -1
- data/spec/NOTES.aws-auth.md +12 -7
- data/spec/README.aws-auth.md +2 -2
- data/spec/README.md +63 -1
- data/spec/integration/awaited_ismaster_spec.rb +28 -0
- data/spec/integration/bson_symbol_spec.rb +4 -2
- data/spec/integration/bulk_write_spec.rb +67 -0
- data/spec/integration/change_stream_examples_spec.rb +6 -2
- data/spec/integration/change_stream_spec.rb +1 -1
- data/spec/integration/check_clean_slate_spec.rb +16 -0
- data/spec/integration/client_authentication_options_spec.rb +92 -28
- data/spec/integration/client_construction_spec.rb +1 -0
- data/spec/integration/client_side_encryption/auto_encryption_bulk_writes_spec.rb +9 -5
- data/spec/integration/connect_single_rs_name_spec.rb +5 -2
- data/spec/integration/connection_pool_populator_spec.rb +4 -2
- data/spec/integration/connection_spec.rb +7 -4
- data/spec/integration/crud_spec.rb +4 -4
- data/spec/integration/cursor_reaping_spec.rb +54 -18
- data/spec/integration/docs_examples_spec.rb +6 -0
- data/spec/integration/fork_reconnect_spec.rb +56 -1
- data/spec/integration/grid_fs_bucket_spec.rb +48 -0
- data/spec/integration/heartbeat_events_spec.rb +4 -23
- data/spec/integration/ocsp_connectivity_spec.rb +26 -0
- data/spec/integration/ocsp_verifier_cache_spec.rb +188 -0
- data/spec/integration/ocsp_verifier_spec.rb +334 -0
- data/spec/integration/query_cache_spec.rb +1045 -0
- data/spec/integration/query_cache_transactions_spec.rb +190 -0
- data/spec/integration/read_concern_spec.rb +1 -1
- data/spec/integration/retryable_errors_spec.rb +1 -1
- data/spec/integration/retryable_writes/retryable_writes_40_and_newer_spec.rb +1 -0
- data/spec/integration/retryable_writes/shared/performs_legacy_retries.rb +4 -2
- data/spec/integration/retryable_writes/shared/performs_modern_retries.rb +3 -3
- data/spec/integration/retryable_writes/shared/performs_no_retries.rb +2 -2
- data/spec/integration/sdam_error_handling_spec.rb +122 -15
- data/spec/integration/sdam_events_spec.rb +80 -6
- data/spec/integration/sdam_prose_spec.rb +64 -0
- data/spec/integration/server_monitor_spec.rb +25 -1
- data/spec/integration/server_selection_spec.rb +36 -0
- data/spec/integration/size_limit_spec.rb +23 -5
- data/spec/integration/srv_monitoring_spec.rb +38 -3
- data/spec/integration/srv_spec.rb +56 -0
- data/spec/integration/ssl_uri_options_spec.rb +2 -2
- data/spec/integration/transactions_examples_spec.rb +17 -7
- data/spec/integration/zlib_compression_spec.rb +25 -0
- data/spec/lite_spec_helper.rb +20 -9
- data/spec/mongo/address_spec.rb +1 -1
- data/spec/mongo/auth/aws/request_region_spec.rb +42 -0
- data/spec/mongo/auth/aws/request_spec.rb +76 -0
- data/spec/mongo/auth/scram_spec.rb +1 -1
- data/spec/mongo/auth/user_spec.rb +1 -1
- data/spec/mongo/bulk_write_spec.rb +2 -2
- data/spec/mongo/caching_cursor_spec.rb +70 -0
- data/spec/mongo/client_construction_spec.rb +386 -3
- data/spec/mongo/client_encryption_spec.rb +16 -10
- data/spec/mongo/client_spec.rb +85 -3
- data/spec/mongo/cluster/topology/replica_set_spec.rb +53 -10
- data/spec/mongo/cluster/topology/sharded_spec.rb +1 -1
- data/spec/mongo/cluster/topology/single_spec.rb +19 -8
- data/spec/mongo/cluster/topology/unknown_spec.rb +1 -1
- data/spec/mongo/cluster/topology_spec.rb +1 -1
- data/spec/mongo/cluster_spec.rb +37 -35
- data/spec/mongo/collection/view/change_stream_resume_spec.rb +7 -7
- data/spec/mongo/collection/view/explainable_spec.rb +87 -4
- data/spec/mongo/collection/view/map_reduce_spec.rb +2 -0
- data/spec/mongo/collection/view/readable_spec.rb +36 -0
- data/spec/mongo/collection_spec.rb +572 -0
- data/spec/mongo/crypt/auto_decryption_context_spec.rb +1 -1
- data/spec/mongo/crypt/auto_encryption_context_spec.rb +1 -1
- data/spec/mongo/crypt/binary_spec.rb +1 -6
- data/spec/mongo/crypt/binding/binary_spec.rb +1 -6
- data/spec/mongo/crypt/binding/context_spec.rb +2 -7
- data/spec/mongo/crypt/binding/helpers_spec.rb +1 -6
- data/spec/mongo/crypt/binding/mongocrypt_spec.rb +2 -7
- data/spec/mongo/crypt/binding/status_spec.rb +1 -6
- data/spec/mongo/crypt/binding/version_spec.rb +1 -6
- data/spec/mongo/crypt/data_key_context_spec.rb +1 -1
- data/spec/mongo/crypt/explicit_decryption_context_spec.rb +1 -1
- data/spec/mongo/crypt/explicit_encryption_context_spec.rb +1 -1
- data/spec/mongo/crypt/status_spec.rb +1 -6
- data/spec/mongo/database_spec.rb +353 -8
- data/spec/mongo/distinguishing_semaphore_spec.rb +63 -0
- data/spec/mongo/error/no_server_available_spec.rb +1 -1
- data/spec/mongo/error/operation_failure_spec.rb +40 -0
- data/spec/mongo/index/view_spec.rb +148 -2
- data/spec/mongo/logger_spec.rb +13 -11
- data/spec/mongo/monitoring/event/server_closed_spec.rb +1 -1
- data/spec/mongo/monitoring/event/server_description_changed_spec.rb +1 -4
- data/spec/mongo/monitoring/event/server_opening_spec.rb +1 -1
- data/spec/mongo/monitoring/event/topology_changed_spec.rb +1 -1
- data/spec/mongo/monitoring/event/topology_closed_spec.rb +1 -1
- data/spec/mongo/monitoring/event/topology_opening_spec.rb +1 -1
- data/spec/mongo/operation/delete/op_msg_spec.rb +3 -3
- data/spec/mongo/operation/insert/command_spec.rb +2 -2
- data/spec/mongo/operation/insert/op_msg_spec.rb +3 -3
- data/spec/mongo/operation/read_preference_op_msg_spec.rb +1 -1
- data/spec/mongo/operation/update/command_spec.rb +2 -2
- data/spec/mongo/operation/update/op_msg_spec.rb +3 -3
- data/spec/mongo/protocol/msg_spec.rb +10 -0
- data/spec/mongo/query_cache_spec.rb +280 -0
- data/spec/mongo/semaphore_spec.rb +51 -0
- data/spec/mongo/server/app_metadata_shared.rb +82 -2
- data/spec/mongo/server/connection_auth_spec.rb +2 -2
- data/spec/mongo/server/connection_pool_spec.rb +7 -3
- data/spec/mongo/server/connection_spec.rb +15 -8
- data/spec/mongo/server/description_spec.rb +18 -0
- data/spec/mongo/server_selector/nearest_spec.rb +23 -23
- data/spec/mongo/server_selector/primary_preferred_spec.rb +26 -26
- data/spec/mongo/server_selector/primary_spec.rb +9 -9
- data/spec/mongo/server_selector/secondary_preferred_spec.rb +22 -22
- data/spec/mongo/server_selector/secondary_spec.rb +18 -18
- data/spec/mongo/server_selector_spec.rb +6 -6
- data/spec/mongo/session_spec.rb +35 -0
- data/spec/mongo/socket/ssl_spec.rb +4 -4
- data/spec/mongo/socket_spec.rb +1 -1
- data/spec/mongo/uri/srv_protocol_spec.rb +64 -33
- data/spec/mongo/uri_option_parsing_spec.rb +11 -11
- data/spec/mongo/uri_spec.rb +68 -41
- data/spec/mongo/utils_spec.rb +39 -0
- data/spec/runners/auth.rb +3 -0
- data/spec/runners/change_streams/test.rb +3 -3
- data/spec/runners/cmap.rb +1 -1
- data/spec/runners/command_monitoring.rb +3 -34
- data/spec/runners/connection_string.rb +35 -124
- data/spec/runners/crud/context.rb +9 -5
- data/spec/runners/crud/operation.rb +59 -27
- data/spec/runners/crud/spec.rb +0 -8
- data/spec/runners/crud/test.rb +1 -1
- data/spec/runners/crud/test_base.rb +0 -19
- data/spec/runners/sdam.rb +2 -2
- data/spec/runners/server_selection.rb +242 -28
- data/spec/runners/transactions.rb +12 -12
- data/spec/runners/transactions/operation.rb +151 -25
- data/spec/runners/transactions/test.rb +62 -18
- data/spec/shared/LICENSE +20 -0
- data/spec/shared/lib/mrss/child_process_helper.rb +80 -0
- data/spec/shared/lib/mrss/constraints.rb +303 -0
- data/spec/shared/lib/mrss/lite_constraints.rb +175 -0
- data/spec/shared/lib/mrss/spec_organizer.rb +149 -0
- data/spec/spec_helper.rb +3 -1
- data/spec/spec_tests/cmap_spec.rb +7 -3
- data/spec/spec_tests/command_monitoring_spec.rb +22 -12
- data/spec/spec_tests/crud_spec.rb +1 -1
- data/spec/spec_tests/data/change_streams/change-streams-errors.yml +4 -9
- data/spec/spec_tests/data/change_streams/change-streams-resume-whitelist.yml +66 -0
- data/spec/spec_tests/data/change_streams/change-streams.yml +0 -1
- data/spec/spec_tests/data/cmap/pool-checkout-connection.yml +6 -2
- data/spec/spec_tests/data/cmap/pool-create-min-size.yml +3 -0
- data/spec/spec_tests/data/connection_string/valid-warnings.yml +24 -0
- data/spec/spec_tests/data/max_staleness/ReplicaSetNoPrimary/MaxStalenessTooSmall.yml +15 -0
- data/spec/spec_tests/data/max_staleness/ReplicaSetNoPrimary/NoKnownServers.yml +4 -3
- data/spec/spec_tests/data/max_staleness/Unknown/SmallMaxStaleness.yml +1 -0
- data/spec/spec_tests/data/sdam_integration/cancel-server-check.yml +96 -0
- data/spec/spec_tests/data/sdam_integration/connectTimeoutMS.yml +88 -0
- data/spec/spec_tests/data/sdam_integration/find-network-error.yml +83 -0
- data/spec/spec_tests/data/sdam_integration/find-shutdown-error.yml +116 -0
- data/spec/spec_tests/data/sdam_integration/insert-network-error.yml +86 -0
- data/spec/spec_tests/data/sdam_integration/insert-shutdown-error.yml +115 -0
- data/spec/spec_tests/data/sdam_integration/isMaster-command-error.yml +168 -0
- data/spec/spec_tests/data/sdam_integration/isMaster-network-error.yml +162 -0
- data/spec/spec_tests/data/sdam_integration/isMaster-timeout.yml +229 -0
- data/spec/spec_tests/data/sdam_integration/rediscover-quickly-after-step-down.yml +87 -0
- data/spec/spec_tests/data/sdam_monitoring/discovered_standalone.yml +1 -3
- data/spec/spec_tests/data/sdam_monitoring/standalone.yml +2 -2
- data/spec/spec_tests/data/sdam_monitoring/standalone_repeated.yml +2 -2
- data/spec/spec_tests/data/sdam_monitoring/standalone_suppress_equal_description_changes.yml +2 -2
- data/spec/spec_tests/data/sdam_monitoring/standalone_to_rs_with_me_mismatch.yml +2 -2
- data/spec/spec_tests/data/uri_options/auth-options.yml +25 -0
- data/spec/spec_tests/data/uri_options/compression-options.yml +6 -3
- data/spec/spec_tests/data/uri_options/read-preference-options.yml +24 -0
- data/spec/spec_tests/data/uri_options/ruby-connection-options.yml +1 -0
- data/spec/spec_tests/data/uri_options/tls-options.yml +160 -4
- data/spec/spec_tests/dns_seedlist_discovery_spec.rb +9 -1
- data/spec/spec_tests/max_staleness_spec.rb +4 -142
- data/spec/spec_tests/retryable_reads_spec.rb +2 -2
- data/spec/spec_tests/sdam_integration_spec.rb +13 -0
- data/spec/spec_tests/sdam_monitoring_spec.rb +1 -2
- data/spec/spec_tests/server_selection_spec.rb +4 -116
- data/spec/spec_tests/uri_options_spec.rb +31 -33
- data/spec/stress/cleanup_spec.rb +17 -2
- data/spec/stress/connection_pool_stress_spec.rb +10 -8
- data/spec/stress/fork_reconnect_stress_spec.rb +1 -1
- data/spec/support/certificates/atlas-ocsp-ca.crt +28 -0
- data/spec/support/certificates/atlas-ocsp.crt +41 -0
- data/spec/support/client_registry.rb +1 -0
- data/spec/support/client_registry_macros.rb +11 -2
- data/spec/support/cluster_config.rb +4 -0
- data/spec/support/common_shortcuts.rb +45 -0
- data/spec/support/constraints.rb +6 -253
- data/spec/support/event_subscriber.rb +123 -33
- data/spec/support/keyword_struct.rb +26 -0
- data/spec/support/matchers.rb +16 -0
- data/spec/support/ocsp +1 -0
- data/spec/support/session_registry.rb +52 -0
- data/spec/support/shared/server_selector.rb +13 -1
- data/spec/support/spec_config.rb +60 -13
- data/spec/support/spec_setup.rb +1 -1
- data/spec/support/utils.rb +84 -1
- metadata +1027 -937
- metadata.gz.sig +0 -0
- data/lib/mongo/server_selector/selectable.rb +0 -560
- data/spec/runners/sdam_monitoring.rb +0 -89
- data/spec/support/lite_constraints.rb +0 -141
@@ -0,0 +1,173 @@
|
|
1
|
+
# Copyright (C) 2020 MongoDB Inc.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
module Mongo
|
16
|
+
class Server
|
17
|
+
|
18
|
+
# A monitor utilizing server-pushed ismaster requests.
|
19
|
+
#
|
20
|
+
# When a Monitor handshakes with a 4.4+ server, it creates an instance
|
21
|
+
# of PushMonitor. PushMonitor subsequently executes server-pushed ismaster
|
22
|
+
# (i.e. awaited & exhausted ismaster) to receive topology changes from the
|
23
|
+
# server as quickly as possible. The Monitor still monitors the server
|
24
|
+
# for round-trip time calculations and to perform immediate checks as
|
25
|
+
# requested by the application.
|
26
|
+
#
|
27
|
+
# @api private
|
28
|
+
class PushMonitor
|
29
|
+
extend Forwardable
|
30
|
+
include BackgroundThread
|
31
|
+
|
32
|
+
def initialize(monitor, topology_version, monitoring, **options)
|
33
|
+
if topology_version.nil?
|
34
|
+
raise ArgumentError, 'Topology version must be provided but it was nil'
|
35
|
+
end
|
36
|
+
@monitor = monitor
|
37
|
+
@topology_version = topology_version
|
38
|
+
@monitoring = monitoring
|
39
|
+
@options = options
|
40
|
+
@lock = Mutex.new
|
41
|
+
end
|
42
|
+
|
43
|
+
# @return [ Monitor ] The monitor to which this push monitor is attached.
|
44
|
+
attr_reader :monitor
|
45
|
+
|
46
|
+
# @return [ TopologyVersion ] Most recently received topology version.
|
47
|
+
attr_reader :topology_version
|
48
|
+
|
49
|
+
# @return [ Monitoring ] monitoring The monitoring.
|
50
|
+
attr_reader :monitoring
|
51
|
+
|
52
|
+
# @return [ Hash ] Push monitor options.
|
53
|
+
attr_reader :options
|
54
|
+
|
55
|
+
# @return [ Server ] The server that is being monitored.
|
56
|
+
def_delegator :monitor, :server
|
57
|
+
|
58
|
+
def start!
|
59
|
+
@lock.synchronize do
|
60
|
+
super
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
def stop!
|
65
|
+
@lock.synchronize do
|
66
|
+
@stop_requested = true
|
67
|
+
if @connection
|
68
|
+
# Interrupt any in-progress exhausted ismaster reads by
|
69
|
+
# disconnecting the connection.
|
70
|
+
@connection.send(:socket).close
|
71
|
+
end
|
72
|
+
end
|
73
|
+
super.tap do
|
74
|
+
@lock.synchronize do
|
75
|
+
if @connection
|
76
|
+
@connection.disconnect!
|
77
|
+
@connection = nil
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
def do_work
|
84
|
+
@lock.synchronize do
|
85
|
+
return if @stop_requested
|
86
|
+
end
|
87
|
+
|
88
|
+
result = monitoring.publish_heartbeat(server, awaited: true) do
|
89
|
+
ismaster
|
90
|
+
end
|
91
|
+
new_description = monitor.run_sdam_flow(result, awaited: true)
|
92
|
+
# When ismaster fails due to a fail point, the response does not
|
93
|
+
# include topology version. In this case we need to keep our existing
|
94
|
+
# topology version so that we can resume monitoring.
|
95
|
+
# The spec does not appear to directly address this case but
|
96
|
+
# https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#streaming-ismaster
|
97
|
+
# says that topologyVersion should only be updated from successful
|
98
|
+
# ismaster responses.
|
99
|
+
if new_description.topology_version
|
100
|
+
@topology_version = new_description.topology_version
|
101
|
+
end
|
102
|
+
rescue Mongo::Error => exc
|
103
|
+
msg = "Error running awaited ismaster on #{server.address}"
|
104
|
+
Utils.warn_bg_exception(msg, exc,
|
105
|
+
logger: options[:logger],
|
106
|
+
log_prefix: options[:log_prefix],
|
107
|
+
bg_error_backtrace: options[:bg_error_backtrace],
|
108
|
+
)
|
109
|
+
end
|
110
|
+
|
111
|
+
def ismaster
|
112
|
+
@lock.synchronize do
|
113
|
+
if @connection && @connection.pid != Process.pid
|
114
|
+
log_warn("Detected PID change - Mongo client should have been reconnected (old pid #{@connection.pid}, new pid #{Process.pid}")
|
115
|
+
@connection.disconnect!
|
116
|
+
@connection = nil
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
@lock.synchronize do
|
121
|
+
unless @connection
|
122
|
+
@server_pushing = false
|
123
|
+
connection = PushMonitor::Connection.new(server.address, options)
|
124
|
+
connection.connect!
|
125
|
+
@connection = connection
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
resp_msg = begin
|
130
|
+
unless @server_pushing
|
131
|
+
write_ismaster
|
132
|
+
end
|
133
|
+
read_response
|
134
|
+
rescue Mongo::Error
|
135
|
+
@lock.synchronize do
|
136
|
+
@connection.disconnect!
|
137
|
+
@connection = nil
|
138
|
+
end
|
139
|
+
raise
|
140
|
+
end
|
141
|
+
@server_pushing = resp_msg.flags.include?(:more_to_come)
|
142
|
+
result = resp_msg.documents.first
|
143
|
+
end
|
144
|
+
|
145
|
+
def write_ismaster
|
146
|
+
payload = Monitor::Connection::ISMASTER_OP_MSG.merge(
|
147
|
+
topologyVersion: topology_version.to_doc,
|
148
|
+
maxAwaitTimeMS: monitor.heartbeat_interval * 1000,
|
149
|
+
)
|
150
|
+
|
151
|
+
req_msg = Protocol::Msg.new([:exhaust_allowed], {}, payload)
|
152
|
+
@lock.synchronize { @connection }.write_bytes(req_msg.serialize.to_s)
|
153
|
+
end
|
154
|
+
|
155
|
+
def read_response
|
156
|
+
if timeout = options[:connect_timeout]
|
157
|
+
if timeout < 0
|
158
|
+
raise Mongo::SocketTimeoutError, "Requested to read with a negative timeout: #{}"
|
159
|
+
elsif timeout > 0
|
160
|
+
timeout += options[:heartbeat_frequency] || Monitor::DEFAULT_HEARTBEAT_INTERVAL
|
161
|
+
end
|
162
|
+
end
|
163
|
+
# We set the timeout twice: once passed into read_socket which applies
|
164
|
+
# to each individual read operation, and again around the entire read.
|
165
|
+
Timeout.timeout(timeout, Error::SocketTimeoutError, "Failed to read an awaited ismaster response in #{timeout} seconds") do
|
166
|
+
@lock.synchronize { @connection }.read_response(socket_timeout: timeout)
|
167
|
+
end
|
168
|
+
end
|
169
|
+
end
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
require 'mongo/server/push_monitor/connection'
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright (C)
|
1
|
+
# Copyright (C) 2020 MongoDB Inc.
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
@@ -13,19 +13,14 @@
|
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
15
|
module Mongo
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
[:session].each do |key|
|
25
|
-
if out[key]
|
26
|
-
out[key] = send(key)
|
27
|
-
end
|
28
|
-
end
|
16
|
+
class Server
|
17
|
+
class PushMonitor
|
18
|
+
|
19
|
+
# @api private
|
20
|
+
class Connection < Server::Monitor::Connection
|
21
|
+
|
22
|
+
def socket_timeout
|
23
|
+
options[:socket_timeout]
|
29
24
|
end
|
30
25
|
end
|
31
26
|
end
|
@@ -13,7 +13,6 @@
|
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
15
|
require 'mongo/server_selector/base'
|
16
|
-
require 'mongo/server_selector/selectable'
|
17
16
|
require 'mongo/server_selector/nearest'
|
18
17
|
require 'mongo/server_selector/primary'
|
19
18
|
require 'mongo/server_selector/primary_preferred'
|
@@ -16,8 +16,356 @@ module Mongo
|
|
16
16
|
|
17
17
|
module ServerSelector
|
18
18
|
|
19
|
-
# @api private
|
20
19
|
class Base
|
20
|
+
|
21
|
+
# Initialize the server selector.
|
22
|
+
#
|
23
|
+
# @example Initialize the selector.
|
24
|
+
# Mongo::ServerSelector::Secondary.new(:tag_sets => [{'dc' => 'nyc'}])
|
25
|
+
#
|
26
|
+
# @example Initialize the preference with no options.
|
27
|
+
# Mongo::ServerSelector::Secondary.new
|
28
|
+
#
|
29
|
+
# @param [ Hash ] options The server preference options.
|
30
|
+
#
|
31
|
+
# @option options [ Integer ] :local_threshold The local threshold boundary for
|
32
|
+
# nearest selection in seconds.
|
33
|
+
# @option options [ Integer ] max_staleness The maximum replication lag,
|
34
|
+
# in seconds, that a secondary can suffer and still be eligible for a read.
|
35
|
+
# A value of -1 is treated identically to nil, which is to not
|
36
|
+
# have a maximum staleness.
|
37
|
+
# @option options [ Hash | nil ] hedge A Hash specifying whether to enable hedged
|
38
|
+
# reads on the server. Hedged reads are not enabled by default. When
|
39
|
+
# specifying this option, it must be in the format: { enabled: true },
|
40
|
+
# where the value of the :enabled key is a boolean value.
|
41
|
+
#
|
42
|
+
# @raise [ Error::InvalidServerPreference ] If tag sets are specified
|
43
|
+
# but not allowed.
|
44
|
+
#
|
45
|
+
# @api private
|
46
|
+
def initialize(options = nil)
|
47
|
+
options = options ? options.dup : {}
|
48
|
+
if options[:max_staleness] == -1
|
49
|
+
options.delete(:max_staleness)
|
50
|
+
end
|
51
|
+
@options = options
|
52
|
+
@tag_sets = options[:tag_sets] || []
|
53
|
+
@max_staleness = options[:max_staleness]
|
54
|
+
@hedge = options[:hedge]
|
55
|
+
|
56
|
+
validate!
|
57
|
+
end
|
58
|
+
|
59
|
+
# @return [ Hash ] options The options.
|
60
|
+
attr_reader :options
|
61
|
+
|
62
|
+
# @return [ Array ] tag_sets The tag sets used to select servers.
|
63
|
+
attr_reader :tag_sets
|
64
|
+
|
65
|
+
# @return [ Integer ] max_staleness The maximum replication lag, in
|
66
|
+
# seconds, that a secondary can suffer and still be eligible for a read.
|
67
|
+
#
|
68
|
+
# @since 2.4.0
|
69
|
+
attr_reader :max_staleness
|
70
|
+
|
71
|
+
# @return [ Hash | nil ] hedge The document specifying whether to enable
|
72
|
+
# hedged reads.
|
73
|
+
attr_reader :hedge
|
74
|
+
|
75
|
+
# Get the timeout for server selection.
|
76
|
+
#
|
77
|
+
# @example Get the server selection timeout, in seconds.
|
78
|
+
# selector.server_selection_timeout
|
79
|
+
#
|
80
|
+
# @return [ Float ] The timeout.
|
81
|
+
#
|
82
|
+
# @since 2.0.0
|
83
|
+
#
|
84
|
+
# @deprecated This setting is now taken from the cluster options when
|
85
|
+
# a server is selected. Will be removed in version 3.0.
|
86
|
+
def server_selection_timeout
|
87
|
+
@server_selection_timeout ||=
|
88
|
+
(options[:server_selection_timeout] || ServerSelector::SERVER_SELECTION_TIMEOUT)
|
89
|
+
end
|
90
|
+
|
91
|
+
# Get the local threshold boundary for nearest selection in seconds.
|
92
|
+
#
|
93
|
+
# @example Get the local threshold.
|
94
|
+
# selector.local_threshold
|
95
|
+
#
|
96
|
+
# @return [ Float ] The local threshold.
|
97
|
+
#
|
98
|
+
# @since 2.0.0
|
99
|
+
#
|
100
|
+
# @deprecated This setting is now taken from the cluster options when
|
101
|
+
# a server is selected. Will be removed in version 3.0.
|
102
|
+
def local_threshold
|
103
|
+
@local_threshold ||= (options[:local_threshold] || ServerSelector::LOCAL_THRESHOLD)
|
104
|
+
end
|
105
|
+
|
106
|
+
# @api private
|
107
|
+
def local_threshold_with_cluster(cluster)
|
108
|
+
options[:local_threshold] || cluster.options[:local_threshold] || LOCAL_THRESHOLD
|
109
|
+
end
|
110
|
+
|
111
|
+
# Inspect the server selector.
|
112
|
+
#
|
113
|
+
# @example Inspect the server selector.
|
114
|
+
# selector.inspect
|
115
|
+
#
|
116
|
+
# @return [ String ] The inspection.
|
117
|
+
#
|
118
|
+
# @since 2.2.0
|
119
|
+
def inspect
|
120
|
+
"#<#{self.class.name}:0x#{object_id} tag_sets=#{tag_sets.inspect} max_staleness=#{max_staleness.inspect} hedge=#{hedge}>"
|
121
|
+
end
|
122
|
+
|
123
|
+
# Check equality of two server selectors.
|
124
|
+
#
|
125
|
+
# @example Check server selector equality.
|
126
|
+
# preference == other
|
127
|
+
#
|
128
|
+
# @param [ Object ] other The other preference.
|
129
|
+
#
|
130
|
+
# @return [ true, false ] Whether the objects are equal.
|
131
|
+
#
|
132
|
+
# @since 2.0.0
|
133
|
+
def ==(other)
|
134
|
+
name == other.name && hedge == other.hedge &&
|
135
|
+
max_staleness == other.max_staleness && tag_sets == other.tag_sets
|
136
|
+
end
|
137
|
+
|
138
|
+
# Select a server from the specified cluster, taking into account
|
139
|
+
# mongos pinning for the specified session.
|
140
|
+
#
|
141
|
+
# If the session is given and has a pinned server, this server is the
|
142
|
+
# only server considered for selection. If the server is of type mongos,
|
143
|
+
# it is returned immediately; otherwise monitoring checks on this
|
144
|
+
# server are initiated to update its status, and if the server becomes
|
145
|
+
# a mongos within the server selection timeout, it is returned.
|
146
|
+
#
|
147
|
+
# If no session is given or the session does not have a pinned server,
|
148
|
+
# normal server selection process is performed among all servers in the
|
149
|
+
# specified cluster matching the preference of this server selector
|
150
|
+
# object. Monitoring checks are initiated on servers in the cluster until
|
151
|
+
# a suitable server is found, up to the server selection timeout.
|
152
|
+
#
|
153
|
+
# If a suitable server is not found within the server selection timeout,
|
154
|
+
# this method raises Error::NoServerAvailable.
|
155
|
+
#
|
156
|
+
# @param [ Mongo::Cluster ] cluster The cluster from which to select
|
157
|
+
# an eligible server.
|
158
|
+
# @param [ true, false ] ping Whether to ping the server before selection.
|
159
|
+
# Deprecated and ignored.
|
160
|
+
# @param [ Session | nil ] session Optional session to take into account
|
161
|
+
# for mongos pinning. Added in version 2.10.0.
|
162
|
+
#
|
163
|
+
# @return [ Mongo::Server ] A server matching the server preference.
|
164
|
+
#
|
165
|
+
# @raise [ Error::NoServerAvailable ] No server was found matching the
|
166
|
+
# specified preference / pinning requirement in the server selection
|
167
|
+
# timeout.
|
168
|
+
# @raise [ Error::LintError ] An unexpected condition was detected, and
|
169
|
+
# lint mode is enabled.
|
170
|
+
#
|
171
|
+
# @since 2.0.0
|
172
|
+
def select_server(cluster, ping = nil, session = nil)
|
173
|
+
server_selection_timeout = cluster.options[:server_selection_timeout] || SERVER_SELECTION_TIMEOUT
|
174
|
+
|
175
|
+
# Special handling for zero timeout: if we have to select a server,
|
176
|
+
# and the timeout is zero, fail immediately (since server selection
|
177
|
+
# will take some non-zero amount of time in any case).
|
178
|
+
if server_selection_timeout == 0
|
179
|
+
msg = "Failing server selection due to zero timeout. " +
|
180
|
+
" Requested #{name} in cluster: #{cluster.summary}"
|
181
|
+
raise Error::NoServerAvailable.new(self, cluster, msg)
|
182
|
+
end
|
183
|
+
|
184
|
+
deadline = Time.now + server_selection_timeout
|
185
|
+
|
186
|
+
if session && session.pinned_server
|
187
|
+
if Mongo::Lint.enabled?
|
188
|
+
unless cluster.sharded?
|
189
|
+
raise Error::LintError, "Session has a pinned server in a non-sharded topology: #{topology}"
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
if !session.in_transaction?
|
194
|
+
session.unpin
|
195
|
+
end
|
196
|
+
|
197
|
+
if server = session.pinned_server
|
198
|
+
# Here we assume that a mongos stays in the topology indefinitely.
|
199
|
+
# This will no longer be the case once SRV polling is implemented.
|
200
|
+
|
201
|
+
unless server.mongos?
|
202
|
+
while (time_remaining = deadline - Time.now) > 0
|
203
|
+
wait_for_server_selection(cluster, time_remaining)
|
204
|
+
end
|
205
|
+
|
206
|
+
unless server.mongos?
|
207
|
+
msg = "The session being used is pinned to the server which is not a mongos: #{server.summary} " +
|
208
|
+
"(after #{server_selection_timeout} seconds)"
|
209
|
+
raise Error::NoServerAvailable.new(self, cluster, msg)
|
210
|
+
end
|
211
|
+
end
|
212
|
+
|
213
|
+
return server
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
217
|
+
if cluster.replica_set?
|
218
|
+
validate_max_staleness_value_early!
|
219
|
+
end
|
220
|
+
|
221
|
+
if cluster.addresses.empty?
|
222
|
+
if Lint.enabled?
|
223
|
+
unless cluster.servers.empty?
|
224
|
+
raise Error::LintError, "Cluster has no addresses but has servers: #{cluster.servers.map(&:inspect).join(', ')}"
|
225
|
+
end
|
226
|
+
end
|
227
|
+
msg = "Cluster has no addresses, and therefore will never have a server"
|
228
|
+
raise Error::NoServerAvailable.new(self, cluster, msg)
|
229
|
+
end
|
230
|
+
|
231
|
+
=begin Add this check in version 3.0.0
|
232
|
+
unless cluster.connected?
|
233
|
+
msg = 'Cluster is disconnected'
|
234
|
+
raise Error::NoServerAvailable.new(self, cluster, msg)
|
235
|
+
end
|
236
|
+
=end
|
237
|
+
|
238
|
+
loop do
|
239
|
+
server = try_select_server(cluster)
|
240
|
+
|
241
|
+
if server
|
242
|
+
unless cluster.topology.compatible?
|
243
|
+
raise Error::UnsupportedFeatures, cluster.topology.compatibility_error.to_s
|
244
|
+
end
|
245
|
+
|
246
|
+
if session && session.starting_transaction? && cluster.sharded?
|
247
|
+
session.pin(server)
|
248
|
+
end
|
249
|
+
|
250
|
+
return server
|
251
|
+
end
|
252
|
+
|
253
|
+
cluster.scan!(false)
|
254
|
+
|
255
|
+
time_remaining = deadline - Time.now
|
256
|
+
if time_remaining > 0
|
257
|
+
wait_for_server_selection(cluster, time_remaining)
|
258
|
+
|
259
|
+
# If we wait for server selection, perform another round of
|
260
|
+
# attempting to locate a suitable server. Otherwise server selection
|
261
|
+
# can raise NoServerAvailable message when the diagnostics
|
262
|
+
# reports an available server of the requested type.
|
263
|
+
else
|
264
|
+
break
|
265
|
+
end
|
266
|
+
end
|
267
|
+
|
268
|
+
msg = "No #{name} server"
|
269
|
+
if is_a?(ServerSelector::Secondary) && !tag_sets.empty?
|
270
|
+
msg += " with tag sets: #{tag_sets}"
|
271
|
+
end
|
272
|
+
msg += " is available in cluster: #{cluster.summary} " +
|
273
|
+
"with timeout=#{server_selection_timeout}, " +
|
274
|
+
"LT=#{local_threshold_with_cluster(cluster)}"
|
275
|
+
msg += server_selection_diagnostic_message(cluster)
|
276
|
+
raise Error::NoServerAvailable.new(self, cluster, msg)
|
277
|
+
rescue Error::NoServerAvailable => e
|
278
|
+
if session && session.in_transaction? && !session.committing_transaction?
|
279
|
+
e.add_label('TransientTransactionError')
|
280
|
+
end
|
281
|
+
if session && session.committing_transaction?
|
282
|
+
e.add_label('UnknownTransactionCommitResult')
|
283
|
+
end
|
284
|
+
raise e
|
285
|
+
end
|
286
|
+
|
287
|
+
# Tries to find a suitable server, returns the server if one is available
|
288
|
+
# or nil if there isn't a suitable server.
|
289
|
+
#
|
290
|
+
# @return [ Server | nil ] A suitable server, if one exists.
|
291
|
+
#
|
292
|
+
# @api private
|
293
|
+
def try_select_server(cluster)
|
294
|
+
servers = suitable_servers(cluster)
|
295
|
+
|
296
|
+
# This list of servers may be ordered in a specific way
|
297
|
+
# by the selector (e.g. for secondary preferred, the first
|
298
|
+
# server may be a secondary and the second server may be primary)
|
299
|
+
# and we should take the first server here respecting the order
|
300
|
+
server = servers.first
|
301
|
+
|
302
|
+
if server
|
303
|
+
if Lint.enabled?
|
304
|
+
# It is possible for a server to have a nil average RTT here
|
305
|
+
# because the ARTT comes from description which may be updated
|
306
|
+
# by a background thread while server selection is running.
|
307
|
+
# Currently lint mode is not a public feature, if/when this
|
308
|
+
# changes (https://jira.mongodb.org/browse/RUBY-1576) the
|
309
|
+
# requirement for ARTT to be not nil would need to be removed.
|
310
|
+
if server.average_round_trip_time.nil?
|
311
|
+
raise Error::LintError, "Server #{server.address} has nil average rtt"
|
312
|
+
end
|
313
|
+
end
|
314
|
+
end
|
315
|
+
|
316
|
+
server
|
317
|
+
end
|
318
|
+
|
319
|
+
# Returns servers of acceptable types from the cluster.
|
320
|
+
#
|
321
|
+
# Does not perform staleness validation, staleness filtering or
|
322
|
+
# latency filtering.
|
323
|
+
#
|
324
|
+
# @param [ Cluster ] cluster The cluster.
|
325
|
+
#
|
326
|
+
# @return [ Array<Server> ] The candidate servers.
|
327
|
+
#
|
328
|
+
# @api private
|
329
|
+
def candidates(cluster)
|
330
|
+
servers = cluster.servers
|
331
|
+
servers.each do |server|
|
332
|
+
validate_max_staleness_support!(server)
|
333
|
+
end
|
334
|
+
if cluster.single?
|
335
|
+
servers
|
336
|
+
elsif cluster.sharded?
|
337
|
+
servers
|
338
|
+
elsif cluster.replica_set?
|
339
|
+
select_in_replica_set(servers)
|
340
|
+
else
|
341
|
+
# Unknown cluster - no servers
|
342
|
+
[]
|
343
|
+
end
|
344
|
+
end
|
345
|
+
|
346
|
+
# Returns servers satisfying the server selector from the cluster.
|
347
|
+
#
|
348
|
+
# @param [ Cluster ] cluster The cluster.
|
349
|
+
#
|
350
|
+
# @return [ Array<Server> ] The suitable servers.
|
351
|
+
#
|
352
|
+
# @api private
|
353
|
+
def suitable_servers(cluster)
|
354
|
+
if cluster.single?
|
355
|
+
candidates(cluster)
|
356
|
+
elsif cluster.sharded?
|
357
|
+
local_threshold = local_threshold_with_cluster(cluster)
|
358
|
+
servers = candidates(cluster)
|
359
|
+
near_servers(servers, local_threshold)
|
360
|
+
elsif cluster.replica_set?
|
361
|
+
validate_max_staleness_value!(cluster)
|
362
|
+
candidates(cluster)
|
363
|
+
else
|
364
|
+
# Unknown cluster - no servers
|
365
|
+
[]
|
366
|
+
end
|
367
|
+
end
|
368
|
+
|
21
369
|
private
|
22
370
|
|
23
371
|
# Convert this server preference definition into a format appropriate
|
@@ -35,6 +383,240 @@ module Mongo
|
|
35
383
|
preference
|
36
384
|
end
|
37
385
|
end
|
386
|
+
|
387
|
+
# Select the primary from a list of provided candidates.
|
388
|
+
#
|
389
|
+
# @param [ Array ] candidates List of candidate servers to select the
|
390
|
+
# primary from.
|
391
|
+
#
|
392
|
+
# @return [ Array ] The primary.
|
393
|
+
#
|
394
|
+
# @since 2.0.0
|
395
|
+
def primary(candidates)
|
396
|
+
candidates.select do |server|
|
397
|
+
server.primary?
|
398
|
+
end
|
399
|
+
end
|
400
|
+
|
401
|
+
# Select the secondaries from a list of provided candidates.
|
402
|
+
#
|
403
|
+
# @param [ Array ] candidates List of candidate servers to select the
|
404
|
+
# secondaries from.
|
405
|
+
#
|
406
|
+
# @return [ Array ] The secondary servers.
|
407
|
+
#
|
408
|
+
# @since 2.0.0
|
409
|
+
def secondaries(candidates)
|
410
|
+
matching_servers = candidates.select(&:secondary?)
|
411
|
+
matching_servers = filter_stale_servers(matching_servers, primary(candidates).first)
|
412
|
+
matching_servers = match_tag_sets(matching_servers) unless tag_sets.empty?
|
413
|
+
# Per server selection spec the server selected MUST be a random
|
414
|
+
# one matching staleness and latency requirements.
|
415
|
+
# Selectors always pass the output of #secondaries to #nearest
|
416
|
+
# which shuffles the server list, fulfilling this requirement.
|
417
|
+
matching_servers
|
418
|
+
end
|
419
|
+
|
420
|
+
# Select the near servers from a list of provided candidates, taking the
|
421
|
+
# local threshold into account.
|
422
|
+
#
|
423
|
+
# @param [ Array ] candidates List of candidate servers to select the
|
424
|
+
# near servers from.
|
425
|
+
# @param [ Integer ] local_threshold Local threshold. This parameter
|
426
|
+
# will be required in driver version 3.0.
|
427
|
+
#
|
428
|
+
# @return [ Array ] The near servers.
|
429
|
+
#
|
430
|
+
# @since 2.0.0
|
431
|
+
def near_servers(candidates = [], local_threshold = nil)
|
432
|
+
return candidates if candidates.empty?
|
433
|
+
|
434
|
+
# Average RTT on any server may change at any time by the server
|
435
|
+
# monitor's background thread. ARTT may also become nil if the
|
436
|
+
# server is marked unknown. Take a snapshot of ARTTs for the duration
|
437
|
+
# of this method.
|
438
|
+
|
439
|
+
candidates = candidates.map do |server|
|
440
|
+
{server: server, artt: server.average_round_trip_time}
|
441
|
+
end.reject do |candidate|
|
442
|
+
candidate[:artt].nil?
|
443
|
+
end
|
444
|
+
|
445
|
+
return candidates if candidates.empty?
|
446
|
+
|
447
|
+
nearest_candidate = candidates.min_by do |candidate|
|
448
|
+
candidate[:artt]
|
449
|
+
end
|
450
|
+
|
451
|
+
# Default for legacy signarure
|
452
|
+
local_threshold ||= self.local_threshold
|
453
|
+
|
454
|
+
threshold = nearest_candidate[:artt] + local_threshold
|
455
|
+
|
456
|
+
candidates.select do |candidate|
|
457
|
+
candidate[:artt] <= threshold
|
458
|
+
end.map do |candidate|
|
459
|
+
candidate[:server]
|
460
|
+
end.shuffle!
|
461
|
+
end
|
462
|
+
|
463
|
+
# Select the servers matching the defined tag sets.
|
464
|
+
#
|
465
|
+
# @param [ Array ] candidates List of candidate servers from which those
|
466
|
+
# matching the defined tag sets should be selected.
|
467
|
+
#
|
468
|
+
# @return [ Array ] The servers matching the defined tag sets.
|
469
|
+
#
|
470
|
+
# @since 2.0.0
|
471
|
+
def match_tag_sets(candidates)
|
472
|
+
matches = []
|
473
|
+
tag_sets.find do |tag_set|
|
474
|
+
matches = candidates.select { |server| server.matches_tag_set?(tag_set) }
|
475
|
+
!matches.empty?
|
476
|
+
end
|
477
|
+
matches || []
|
478
|
+
end
|
479
|
+
|
480
|
+
def filter_stale_servers(candidates, primary = nil)
|
481
|
+
return candidates unless @max_staleness
|
482
|
+
|
483
|
+
# last_scan is filled out by the Monitor, and can be nil if a server
|
484
|
+
# had its description manually set rather than being normally updated
|
485
|
+
# via the SDAM flow. We don't handle the possibility of a nil
|
486
|
+
# last_scan here.
|
487
|
+
if primary
|
488
|
+
candidates.select do |server|
|
489
|
+
validate_max_staleness_support!(server)
|
490
|
+
staleness = (server.last_scan - server.last_write_date) -
|
491
|
+
(primary.last_scan - primary.last_write_date) +
|
492
|
+
server.cluster.heartbeat_interval
|
493
|
+
staleness <= @max_staleness
|
494
|
+
end
|
495
|
+
else
|
496
|
+
max_write_date = candidates.collect(&:last_write_date).max
|
497
|
+
candidates.select do |server|
|
498
|
+
validate_max_staleness_support!(server)
|
499
|
+
staleness = max_write_date - server.last_write_date + server.cluster.heartbeat_interval
|
500
|
+
staleness <= @max_staleness
|
501
|
+
end
|
502
|
+
end
|
503
|
+
end
|
504
|
+
|
505
|
+
def validate!
|
506
|
+
if !@tag_sets.all? { |set| set.empty? } && !tags_allowed?
|
507
|
+
raise Error::InvalidServerPreference.new(Error::InvalidServerPreference::NO_TAG_SUPPORT)
|
508
|
+
elsif @max_staleness && !max_staleness_allowed?
|
509
|
+
raise Error::InvalidServerPreference.new(Error::InvalidServerPreference::NO_MAX_STALENESS_SUPPORT)
|
510
|
+
end
|
511
|
+
|
512
|
+
if @hedge
|
513
|
+
unless hedge_allowed?
|
514
|
+
raise Error::InvalidServerPreference.new(Error::InvalidServerPreference::NO_HEDGE_SUPPORT)
|
515
|
+
end
|
516
|
+
|
517
|
+
unless @hedge.is_a?(Hash) && @hedge.key?(:enabled) &&
|
518
|
+
[true, false].include?(@hedge[:enabled])
|
519
|
+
raise Error::InvalidServerPreference.new(
|
520
|
+
"`hedge` value (#{hedge}) is invalid - hedge must be a Hash in the " \
|
521
|
+
"format { enabled: true }"
|
522
|
+
)
|
523
|
+
end
|
524
|
+
end
|
525
|
+
end
|
526
|
+
|
527
|
+
def validate_max_staleness_support!(server)
|
528
|
+
if @max_staleness && !server.features.max_staleness_enabled?
|
529
|
+
raise Error::InvalidServerPreference.new(Error::InvalidServerPreference::NO_MAX_STALENESS_WITH_LEGACY_SERVER)
|
530
|
+
end
|
531
|
+
end
|
532
|
+
|
533
|
+
def validate_max_staleness_value_early!
|
534
|
+
if @max_staleness
|
535
|
+
unless @max_staleness >= SMALLEST_MAX_STALENESS_SECONDS
|
536
|
+
msg = "`max_staleness` value (#{@max_staleness}) is too small - it must be at least " +
|
537
|
+
"`Mongo::ServerSelector::SMALLEST_MAX_STALENESS_SECONDS` (#{ServerSelector::SMALLEST_MAX_STALENESS_SECONDS})"
|
538
|
+
raise Error::InvalidServerPreference.new(msg)
|
539
|
+
end
|
540
|
+
end
|
541
|
+
end
|
542
|
+
|
543
|
+
def validate_max_staleness_value!(cluster)
|
544
|
+
if @max_staleness
|
545
|
+
heartbeat_interval = cluster.heartbeat_interval
|
546
|
+
unless @max_staleness >= [
|
547
|
+
SMALLEST_MAX_STALENESS_SECONDS,
|
548
|
+
min_cluster_staleness = heartbeat_interval + Cluster::IDLE_WRITE_PERIOD_SECONDS,
|
549
|
+
].max
|
550
|
+
msg = "`max_staleness` value (#{@max_staleness}) is too small - it must be at least " +
|
551
|
+
"`Mongo::ServerSelector::SMALLEST_MAX_STALENESS_SECONDS` (#{ServerSelector::SMALLEST_MAX_STALENESS_SECONDS}) and (the cluster's heartbeat_frequency " +
|
552
|
+
"setting + `Mongo::Cluster::IDLE_WRITE_PERIOD_SECONDS`) (#{min_cluster_staleness})"
|
553
|
+
raise Error::InvalidServerPreference.new(msg)
|
554
|
+
end
|
555
|
+
end
|
556
|
+
end
|
557
|
+
|
558
|
+
# Waits for server state changes in the specified cluster.
|
559
|
+
#
|
560
|
+
# If the cluster has a server selection semaphore, waits on that
|
561
|
+
# semaphore up to the specified remaining time. Any change in server
|
562
|
+
# state resulting from SDAM will immediately wake up this method and
|
563
|
+
# cause it to return.
|
564
|
+
#
|
565
|
+
# If the cluster des not have a server selection semaphore, waits
|
566
|
+
# the smaller of 0.25 seconds and the specified remaining time.
|
567
|
+
# This functionality is provided for backwards compatibilty only for
|
568
|
+
# applications directly invoking the server selection process.
|
569
|
+
# If lint mode is enabled and the cluster does not have a server
|
570
|
+
# selection semaphore, Error::LintError will be raised.
|
571
|
+
#
|
572
|
+
# @param [ Cluster ] cluster The cluster to wait for.
|
573
|
+
# @param [ Numeric ] time_remaining Maximum time to wait, in seconds.
|
574
|
+
def wait_for_server_selection(cluster, time_remaining)
|
575
|
+
if cluster.server_selection_semaphore
|
576
|
+
# Since the semaphore may have been signaled between us checking
|
577
|
+
# the servers list earlier and the wait call below, we should not
|
578
|
+
# wait for the full remaining time - wait for up to 1 second, then
|
579
|
+
# recheck the state.
|
580
|
+
cluster.server_selection_semaphore.wait([time_remaining, 1].min)
|
581
|
+
else
|
582
|
+
if Lint.enabled?
|
583
|
+
raise Error::LintError, 'Waiting for server selection without having a server selection semaphore'
|
584
|
+
end
|
585
|
+
sleep [time_remaining, 0.25].min
|
586
|
+
end
|
587
|
+
end
|
588
|
+
|
589
|
+
# Creates a diagnostic message when server selection fails.
|
590
|
+
#
|
591
|
+
# The diagnostic message includes the following information, as applicable:
|
592
|
+
#
|
593
|
+
# - Servers having dead monitor threads
|
594
|
+
# - Cluster is disconnected
|
595
|
+
#
|
596
|
+
# If none of the conditions for diagnostic messages apply, an empty string
|
597
|
+
# is returned.
|
598
|
+
#
|
599
|
+
# @param [ Cluster ] cluster The cluster on which server selection was
|
600
|
+
# performed.
|
601
|
+
#
|
602
|
+
# @return [ String ] The diagnostic message.
|
603
|
+
def server_selection_diagnostic_message(cluster)
|
604
|
+
msg = ''
|
605
|
+
dead_monitors = []
|
606
|
+
cluster.servers_list.each do |server|
|
607
|
+
thread = server.monitor.instance_variable_get('@thread')
|
608
|
+
if thread.nil? || !thread.alive?
|
609
|
+
dead_monitors << server
|
610
|
+
end
|
611
|
+
end
|
612
|
+
if dead_monitors.any?
|
613
|
+
msg += ". The following servers have dead monitor threads: #{dead_monitors.map(&:summary).join(', ')}"
|
614
|
+
end
|
615
|
+
unless cluster.connected?
|
616
|
+
msg += ". The cluster is disconnected (client may have been closed)"
|
617
|
+
end
|
618
|
+
msg
|
619
|
+
end
|
38
620
|
end
|
39
621
|
end
|
40
622
|
end
|