sensu-plugins-mongodb-mrtrotl 1.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +1 -0
- data/LICENSE +22 -0
- data/README.md +27 -0
- data/bin/check-mongodb-metric.rb +144 -0
- data/bin/check-mongodb-query-count.rb +267 -0
- data/bin/check-mongodb.py +1644 -0
- data/bin/check-mongodb.rb +5 -0
- data/bin/metrics-mongodb-replication.rb +254 -0
- data/bin/metrics-mongodb.rb +133 -0
- data/lib/bson/__init__.py +1347 -0
- data/lib/bson/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/_helpers.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/binary.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/code.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/codec_options.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/dbref.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/decimal128.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/int64.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/json_util.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/max_key.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/min_key.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/objectid.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/raw_bson.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/regex.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/son.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/timestamp.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/tz_util.cpython-310.pyc +0 -0
- data/lib/bson/_cbson.cpython-310-x86_64-linux-gnu.so +0 -0
- data/lib/bson/_helpers.py +41 -0
- data/lib/bson/binary.py +364 -0
- data/lib/bson/code.py +101 -0
- data/lib/bson/codec_options.py +414 -0
- data/lib/bson/codec_options.pyi +100 -0
- data/lib/bson/dbref.py +133 -0
- data/lib/bson/decimal128.py +314 -0
- data/lib/bson/errors.py +35 -0
- data/lib/bson/int64.py +39 -0
- data/lib/bson/json_util.py +874 -0
- data/lib/bson/max_key.py +55 -0
- data/lib/bson/min_key.py +55 -0
- data/lib/bson/objectid.py +286 -0
- data/lib/bson/py.typed +2 -0
- data/lib/bson/raw_bson.py +175 -0
- data/lib/bson/regex.py +135 -0
- data/lib/bson/son.py +208 -0
- data/lib/bson/timestamp.py +124 -0
- data/lib/bson/tz_util.py +52 -0
- data/lib/gridfs/__init__.py +1015 -0
- data/lib/gridfs/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/gridfs/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/gridfs/__pycache__/grid_file.cpython-310.pyc +0 -0
- data/lib/gridfs/errors.py +33 -0
- data/lib/gridfs/grid_file.py +907 -0
- data/lib/gridfs/py.typed +2 -0
- data/lib/pymongo/__init__.py +185 -0
- data/lib/pymongo/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/_csot.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/aggregation.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/auth.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/auth_aws.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/bulk.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/change_stream.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/client_options.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/client_session.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/collation.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/collection.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/command_cursor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/common.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/compression_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/cursor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/daemon.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/database.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/driver_info.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/encryption.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/encryption_options.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/event_loggers.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/hello.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/helpers.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/max_staleness_selectors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/message.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/mongo_client.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/monitor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/monitoring.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/network.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ocsp_cache.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ocsp_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/operations.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/periodic_executor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/pool.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/pyopenssl_context.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/read_concern.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/read_preferences.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/response.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/results.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/saslprep.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_api.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_description.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_selectors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_type.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/settings.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/socket_checker.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/srv_resolver.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ssl_context.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ssl_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/topology.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/topology_description.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/typings.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/uri_parser.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/write_concern.cpython-310.pyc +0 -0
- data/lib/pymongo/_cmessage.cpython-310-x86_64-linux-gnu.so +0 -0
- data/lib/pymongo/_csot.py +118 -0
- data/lib/pymongo/aggregation.py +229 -0
- data/lib/pymongo/auth.py +549 -0
- data/lib/pymongo/auth_aws.py +94 -0
- data/lib/pymongo/bulk.py +513 -0
- data/lib/pymongo/change_stream.py +457 -0
- data/lib/pymongo/client_options.py +302 -0
- data/lib/pymongo/client_session.py +1112 -0
- data/lib/pymongo/collation.py +224 -0
- data/lib/pymongo/collection.py +3204 -0
- data/lib/pymongo/command_cursor.py +353 -0
- data/lib/pymongo/common.py +984 -0
- data/lib/pymongo/compression_support.py +149 -0
- data/lib/pymongo/cursor.py +1345 -0
- data/lib/pymongo/daemon.py +141 -0
- data/lib/pymongo/database.py +1202 -0
- data/lib/pymongo/driver_info.py +42 -0
- data/lib/pymongo/encryption.py +884 -0
- data/lib/pymongo/encryption_options.py +221 -0
- data/lib/pymongo/errors.py +365 -0
- data/lib/pymongo/event_loggers.py +221 -0
- data/lib/pymongo/hello.py +219 -0
- data/lib/pymongo/helpers.py +259 -0
- data/lib/pymongo/max_staleness_selectors.py +114 -0
- data/lib/pymongo/message.py +1440 -0
- data/lib/pymongo/mongo_client.py +2144 -0
- data/lib/pymongo/monitor.py +440 -0
- data/lib/pymongo/monitoring.py +1801 -0
- data/lib/pymongo/network.py +311 -0
- data/lib/pymongo/ocsp_cache.py +87 -0
- data/lib/pymongo/ocsp_support.py +372 -0
- data/lib/pymongo/operations.py +507 -0
- data/lib/pymongo/periodic_executor.py +183 -0
- data/lib/pymongo/pool.py +1660 -0
- data/lib/pymongo/py.typed +2 -0
- data/lib/pymongo/pyopenssl_context.py +383 -0
- data/lib/pymongo/read_concern.py +75 -0
- data/lib/pymongo/read_preferences.py +609 -0
- data/lib/pymongo/response.py +109 -0
- data/lib/pymongo/results.py +217 -0
- data/lib/pymongo/saslprep.py +113 -0
- data/lib/pymongo/server.py +247 -0
- data/lib/pymongo/server_api.py +170 -0
- data/lib/pymongo/server_description.py +285 -0
- data/lib/pymongo/server_selectors.py +153 -0
- data/lib/pymongo/server_type.py +32 -0
- data/lib/pymongo/settings.py +159 -0
- data/lib/pymongo/socket_checker.py +104 -0
- data/lib/pymongo/srv_resolver.py +126 -0
- data/lib/pymongo/ssl_context.py +39 -0
- data/lib/pymongo/ssl_support.py +99 -0
- data/lib/pymongo/topology.py +890 -0
- data/lib/pymongo/topology_description.py +639 -0
- data/lib/pymongo/typings.py +39 -0
- data/lib/pymongo/uri_parser.py +624 -0
- data/lib/pymongo/write_concern.py +129 -0
- data/lib/pymongo-4.2.0.dist-info/INSTALLER +1 -0
- data/lib/pymongo-4.2.0.dist-info/LICENSE +201 -0
- data/lib/pymongo-4.2.0.dist-info/METADATA +250 -0
- data/lib/pymongo-4.2.0.dist-info/RECORD +167 -0
- data/lib/pymongo-4.2.0.dist-info/REQUESTED +0 -0
- data/lib/pymongo-4.2.0.dist-info/WHEEL +6 -0
- data/lib/pymongo-4.2.0.dist-info/top_level.txt +3 -0
- data/lib/sensu-plugins-mongodb/metrics.rb +391 -0
- data/lib/sensu-plugins-mongodb/version.rb +9 -0
- data/lib/sensu-plugins-mongodb.rb +1 -0
- metadata +407 -0
@@ -0,0 +1,890 @@
|
|
1
|
+
# Copyright 2014-present MongoDB, Inc.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License"); you
|
4
|
+
# may not use this file except in compliance with the License. You
|
5
|
+
# may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
12
|
+
# implied. See the License for the specific language governing
|
13
|
+
# permissions and limitations under the License.
|
14
|
+
|
15
|
+
"""Internal class to monitor a topology of one or more servers."""
|
16
|
+
|
17
|
+
import os
|
18
|
+
import queue
|
19
|
+
import random
|
20
|
+
import threading
|
21
|
+
import time
|
22
|
+
import warnings
|
23
|
+
import weakref
|
24
|
+
from typing import Any
|
25
|
+
|
26
|
+
from pymongo import _csot, common, helpers, periodic_executor
|
27
|
+
from pymongo.client_session import _ServerSessionPool
|
28
|
+
from pymongo.errors import (
|
29
|
+
ConfigurationError,
|
30
|
+
ConnectionFailure,
|
31
|
+
InvalidOperation,
|
32
|
+
NetworkTimeout,
|
33
|
+
NotPrimaryError,
|
34
|
+
OperationFailure,
|
35
|
+
PyMongoError,
|
36
|
+
ServerSelectionTimeoutError,
|
37
|
+
WriteError,
|
38
|
+
)
|
39
|
+
from pymongo.hello import Hello
|
40
|
+
from pymongo.monitor import SrvMonitor
|
41
|
+
from pymongo.pool import PoolOptions
|
42
|
+
from pymongo.server import Server
|
43
|
+
from pymongo.server_description import ServerDescription
|
44
|
+
from pymongo.server_selectors import (
|
45
|
+
Selection,
|
46
|
+
any_server_selector,
|
47
|
+
arbiter_server_selector,
|
48
|
+
readable_server_selector,
|
49
|
+
secondary_server_selector,
|
50
|
+
writable_server_selector,
|
51
|
+
)
|
52
|
+
from pymongo.topology_description import (
|
53
|
+
SRV_POLLING_TOPOLOGIES,
|
54
|
+
TOPOLOGY_TYPE,
|
55
|
+
TopologyDescription,
|
56
|
+
_updated_topology_description_srv_polling,
|
57
|
+
updated_topology_description,
|
58
|
+
)
|
59
|
+
|
60
|
+
|
61
|
+
def process_events_queue(queue_ref):
|
62
|
+
q = queue_ref()
|
63
|
+
if not q:
|
64
|
+
return False # Cancel PeriodicExecutor.
|
65
|
+
|
66
|
+
while True:
|
67
|
+
try:
|
68
|
+
event = q.get_nowait()
|
69
|
+
except queue.Empty:
|
70
|
+
break
|
71
|
+
else:
|
72
|
+
fn, args = event
|
73
|
+
fn(*args)
|
74
|
+
|
75
|
+
return True # Continue PeriodicExecutor.
|
76
|
+
|
77
|
+
|
78
|
+
class Topology(object):
|
79
|
+
"""Monitor a topology of one or more servers."""
|
80
|
+
|
81
|
+
def __init__(self, topology_settings):
|
82
|
+
self._topology_id = topology_settings._topology_id
|
83
|
+
self._listeners = topology_settings._pool_options._event_listeners
|
84
|
+
pub = self._listeners is not None
|
85
|
+
self._publish_server = pub and self._listeners.enabled_for_server
|
86
|
+
self._publish_tp = pub and self._listeners.enabled_for_topology
|
87
|
+
|
88
|
+
# Create events queue if there are publishers.
|
89
|
+
self._events = None
|
90
|
+
self.__events_executor: Any = None
|
91
|
+
|
92
|
+
if self._publish_server or self._publish_tp:
|
93
|
+
self._events = queue.Queue(maxsize=100)
|
94
|
+
|
95
|
+
if self._publish_tp:
|
96
|
+
assert self._events is not None
|
97
|
+
self._events.put((self._listeners.publish_topology_opened, (self._topology_id,)))
|
98
|
+
self._settings = topology_settings
|
99
|
+
topology_description = TopologyDescription(
|
100
|
+
topology_settings.get_topology_type(),
|
101
|
+
topology_settings.get_server_descriptions(),
|
102
|
+
topology_settings.replica_set_name,
|
103
|
+
None,
|
104
|
+
None,
|
105
|
+
topology_settings,
|
106
|
+
)
|
107
|
+
|
108
|
+
self._description = topology_description
|
109
|
+
if self._publish_tp:
|
110
|
+
assert self._events is not None
|
111
|
+
initial_td = TopologyDescription(
|
112
|
+
TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings
|
113
|
+
)
|
114
|
+
self._events.put(
|
115
|
+
(
|
116
|
+
self._listeners.publish_topology_description_changed,
|
117
|
+
(initial_td, self._description, self._topology_id),
|
118
|
+
)
|
119
|
+
)
|
120
|
+
|
121
|
+
for seed in topology_settings.seeds:
|
122
|
+
if self._publish_server:
|
123
|
+
assert self._events is not None
|
124
|
+
self._events.put((self._listeners.publish_server_opened, (seed, self._topology_id)))
|
125
|
+
|
126
|
+
# Store the seed list to help diagnose errors in _error_message().
|
127
|
+
self._seed_addresses = list(topology_description.server_descriptions())
|
128
|
+
self._opened = False
|
129
|
+
self._closed = False
|
130
|
+
self._lock = threading.Lock()
|
131
|
+
self._condition = self._settings.condition_class(self._lock)
|
132
|
+
self._servers = {}
|
133
|
+
self._pid = None
|
134
|
+
self._max_cluster_time = None
|
135
|
+
self._session_pool = _ServerSessionPool()
|
136
|
+
|
137
|
+
if self._publish_server or self._publish_tp:
|
138
|
+
|
139
|
+
def target():
|
140
|
+
return process_events_queue(weak)
|
141
|
+
|
142
|
+
executor = periodic_executor.PeriodicExecutor(
|
143
|
+
interval=common.EVENTS_QUEUE_FREQUENCY,
|
144
|
+
min_interval=common.MIN_HEARTBEAT_INTERVAL,
|
145
|
+
target=target,
|
146
|
+
name="pymongo_events_thread",
|
147
|
+
)
|
148
|
+
|
149
|
+
# We strongly reference the executor and it weakly references
|
150
|
+
# the queue via this closure. When the topology is freed, stop
|
151
|
+
# the executor soon.
|
152
|
+
weak = weakref.ref(self._events, executor.close)
|
153
|
+
self.__events_executor = executor
|
154
|
+
executor.open()
|
155
|
+
|
156
|
+
self._srv_monitor = None
|
157
|
+
if self._settings.fqdn is not None and not self._settings.load_balanced:
|
158
|
+
self._srv_monitor = SrvMonitor(self, self._settings)
|
159
|
+
|
160
|
+
def open(self):
|
161
|
+
"""Start monitoring, or restart after a fork.
|
162
|
+
|
163
|
+
No effect if called multiple times.
|
164
|
+
|
165
|
+
.. warning:: Topology is shared among multiple threads and is protected
|
166
|
+
by mutual exclusion. Using Topology from a process other than the one
|
167
|
+
that initialized it will emit a warning and may result in deadlock. To
|
168
|
+
prevent this from happening, MongoClient must be created after any
|
169
|
+
forking.
|
170
|
+
|
171
|
+
"""
|
172
|
+
pid = os.getpid()
|
173
|
+
if self._pid is None:
|
174
|
+
self._pid = pid
|
175
|
+
elif pid != self._pid:
|
176
|
+
self._pid = pid
|
177
|
+
warnings.warn(
|
178
|
+
"MongoClient opened before fork. Create MongoClient only "
|
179
|
+
"after forking. See PyMongo's documentation for details: "
|
180
|
+
"https://pymongo.readthedocs.io/en/stable/faq.html#"
|
181
|
+
"is-pymongo-fork-safe"
|
182
|
+
)
|
183
|
+
with self._lock:
|
184
|
+
# Close servers and clear the pools.
|
185
|
+
for server in self._servers.values():
|
186
|
+
server.close()
|
187
|
+
# Reset the session pool to avoid duplicate sessions in
|
188
|
+
# the child process.
|
189
|
+
self._session_pool.reset()
|
190
|
+
|
191
|
+
with self._lock:
|
192
|
+
self._ensure_opened()
|
193
|
+
|
194
|
+
def get_server_selection_timeout(self):
|
195
|
+
# CSOT: use remaining timeout when set.
|
196
|
+
timeout = _csot.remaining()
|
197
|
+
if timeout is None:
|
198
|
+
return self._settings.server_selection_timeout
|
199
|
+
return timeout
|
200
|
+
|
201
|
+
def select_servers(self, selector, server_selection_timeout=None, address=None):
|
202
|
+
"""Return a list of Servers matching selector, or time out.
|
203
|
+
|
204
|
+
:Parameters:
|
205
|
+
- `selector`: function that takes a list of Servers and returns
|
206
|
+
a subset of them.
|
207
|
+
- `server_selection_timeout` (optional): maximum seconds to wait.
|
208
|
+
If not provided, the default value common.SERVER_SELECTION_TIMEOUT
|
209
|
+
is used.
|
210
|
+
- `address`: optional server address to select.
|
211
|
+
|
212
|
+
Calls self.open() if needed.
|
213
|
+
|
214
|
+
Raises exc:`ServerSelectionTimeoutError` after
|
215
|
+
`server_selection_timeout` if no matching servers are found.
|
216
|
+
"""
|
217
|
+
if server_selection_timeout is None:
|
218
|
+
server_timeout = self.get_server_selection_timeout()
|
219
|
+
else:
|
220
|
+
server_timeout = server_selection_timeout
|
221
|
+
|
222
|
+
with self._lock:
|
223
|
+
server_descriptions = self._select_servers_loop(selector, server_timeout, address)
|
224
|
+
|
225
|
+
return [self.get_server_by_address(sd.address) for sd in server_descriptions]
|
226
|
+
|
227
|
+
def _select_servers_loop(self, selector, timeout, address):
|
228
|
+
"""select_servers() guts. Hold the lock when calling this."""
|
229
|
+
now = time.monotonic()
|
230
|
+
end_time = now + timeout
|
231
|
+
server_descriptions = self._description.apply_selector(
|
232
|
+
selector, address, custom_selector=self._settings.server_selector
|
233
|
+
)
|
234
|
+
|
235
|
+
while not server_descriptions:
|
236
|
+
# No suitable servers.
|
237
|
+
if timeout == 0 or now > end_time:
|
238
|
+
raise ServerSelectionTimeoutError(
|
239
|
+
"%s, Timeout: %ss, Topology Description: %r"
|
240
|
+
% (self._error_message(selector), timeout, self.description)
|
241
|
+
)
|
242
|
+
|
243
|
+
self._ensure_opened()
|
244
|
+
self._request_check_all()
|
245
|
+
|
246
|
+
# Release the lock and wait for the topology description to
|
247
|
+
# change, or for a timeout. We won't miss any changes that
|
248
|
+
# came after our most recent apply_selector call, since we've
|
249
|
+
# held the lock until now.
|
250
|
+
self._condition.wait(common.MIN_HEARTBEAT_INTERVAL)
|
251
|
+
self._description.check_compatible()
|
252
|
+
now = time.monotonic()
|
253
|
+
server_descriptions = self._description.apply_selector(
|
254
|
+
selector, address, custom_selector=self._settings.server_selector
|
255
|
+
)
|
256
|
+
|
257
|
+
self._description.check_compatible()
|
258
|
+
return server_descriptions
|
259
|
+
|
260
|
+
def _select_server(self, selector, server_selection_timeout=None, address=None):
|
261
|
+
servers = self.select_servers(selector, server_selection_timeout, address)
|
262
|
+
if len(servers) == 1:
|
263
|
+
return servers[0]
|
264
|
+
server1, server2 = random.sample(servers, 2)
|
265
|
+
if server1.pool.operation_count <= server2.pool.operation_count:
|
266
|
+
return server1
|
267
|
+
else:
|
268
|
+
return server2
|
269
|
+
|
270
|
+
def select_server(self, selector, server_selection_timeout=None, address=None):
|
271
|
+
"""Like select_servers, but choose a random server if several match."""
|
272
|
+
server = self._select_server(selector, server_selection_timeout, address)
|
273
|
+
if _csot.get_timeout():
|
274
|
+
_csot.set_rtt(server.description.round_trip_time)
|
275
|
+
return server
|
276
|
+
|
277
|
+
def select_server_by_address(self, address, server_selection_timeout=None):
|
278
|
+
"""Return a Server for "address", reconnecting if necessary.
|
279
|
+
|
280
|
+
If the server's type is not known, request an immediate check of all
|
281
|
+
servers. Time out after "server_selection_timeout" if the server
|
282
|
+
cannot be reached.
|
283
|
+
|
284
|
+
:Parameters:
|
285
|
+
- `address`: A (host, port) pair.
|
286
|
+
- `server_selection_timeout` (optional): maximum seconds to wait.
|
287
|
+
If not provided, the default value
|
288
|
+
common.SERVER_SELECTION_TIMEOUT is used.
|
289
|
+
|
290
|
+
Calls self.open() if needed.
|
291
|
+
|
292
|
+
Raises exc:`ServerSelectionTimeoutError` after
|
293
|
+
`server_selection_timeout` if no matching servers are found.
|
294
|
+
"""
|
295
|
+
return self.select_server(any_server_selector, server_selection_timeout, address)
|
296
|
+
|
297
|
+
def _process_change(self, server_description, reset_pool=False):
|
298
|
+
"""Process a new ServerDescription on an opened topology.
|
299
|
+
|
300
|
+
Hold the lock when calling this.
|
301
|
+
"""
|
302
|
+
td_old = self._description
|
303
|
+
sd_old = td_old._server_descriptions[server_description.address]
|
304
|
+
if _is_stale_server_description(sd_old, server_description):
|
305
|
+
# This is a stale hello response. Ignore it.
|
306
|
+
return
|
307
|
+
|
308
|
+
new_td = updated_topology_description(self._description, server_description)
|
309
|
+
# CMAP: Ensure the pool is "ready" when the server is selectable.
|
310
|
+
if server_description.is_readable or (
|
311
|
+
server_description.is_server_type_known and new_td.topology_type == TOPOLOGY_TYPE.Single
|
312
|
+
):
|
313
|
+
server = self._servers.get(server_description.address)
|
314
|
+
if server:
|
315
|
+
server.pool.ready()
|
316
|
+
|
317
|
+
suppress_event = (self._publish_server or self._publish_tp) and sd_old == server_description
|
318
|
+
if self._publish_server and not suppress_event:
|
319
|
+
assert self._events is not None
|
320
|
+
self._events.put(
|
321
|
+
(
|
322
|
+
self._listeners.publish_server_description_changed,
|
323
|
+
(sd_old, server_description, server_description.address, self._topology_id),
|
324
|
+
)
|
325
|
+
)
|
326
|
+
|
327
|
+
self._description = new_td
|
328
|
+
self._update_servers()
|
329
|
+
self._receive_cluster_time_no_lock(server_description.cluster_time)
|
330
|
+
|
331
|
+
if self._publish_tp and not suppress_event:
|
332
|
+
assert self._events is not None
|
333
|
+
self._events.put(
|
334
|
+
(
|
335
|
+
self._listeners.publish_topology_description_changed,
|
336
|
+
(td_old, self._description, self._topology_id),
|
337
|
+
)
|
338
|
+
)
|
339
|
+
|
340
|
+
# Shutdown SRV polling for unsupported cluster types.
|
341
|
+
# This is only applicable if the old topology was Unknown, and the
|
342
|
+
# new one is something other than Unknown or Sharded.
|
343
|
+
if self._srv_monitor and (
|
344
|
+
td_old.topology_type == TOPOLOGY_TYPE.Unknown
|
345
|
+
and self._description.topology_type not in SRV_POLLING_TOPOLOGIES
|
346
|
+
):
|
347
|
+
self._srv_monitor.close()
|
348
|
+
|
349
|
+
# Clear the pool from a failed heartbeat.
|
350
|
+
if reset_pool:
|
351
|
+
server = self._servers.get(server_description.address)
|
352
|
+
if server:
|
353
|
+
server.pool.reset()
|
354
|
+
|
355
|
+
# Wake waiters in select_servers().
|
356
|
+
self._condition.notify_all()
|
357
|
+
|
358
|
+
def on_change(self, server_description, reset_pool=False):
|
359
|
+
"""Process a new ServerDescription after an hello call completes."""
|
360
|
+
# We do no I/O holding the lock.
|
361
|
+
with self._lock:
|
362
|
+
# Monitors may continue working on hello calls for some time
|
363
|
+
# after a call to Topology.close, so this method may be called at
|
364
|
+
# any time. Ensure the topology is open before processing the
|
365
|
+
# change.
|
366
|
+
# Any monitored server was definitely in the topology description
|
367
|
+
# once. Check if it's still in the description or if some state-
|
368
|
+
# change removed it. E.g., we got a host list from the primary
|
369
|
+
# that didn't include this server.
|
370
|
+
if self._opened and self._description.has_server(server_description.address):
|
371
|
+
self._process_change(server_description, reset_pool)
|
372
|
+
|
373
|
+
def _process_srv_update(self, seedlist):
|
374
|
+
"""Process a new seedlist on an opened topology.
|
375
|
+
Hold the lock when calling this.
|
376
|
+
"""
|
377
|
+
td_old = self._description
|
378
|
+
if td_old.topology_type not in SRV_POLLING_TOPOLOGIES:
|
379
|
+
return
|
380
|
+
self._description = _updated_topology_description_srv_polling(self._description, seedlist)
|
381
|
+
|
382
|
+
self._update_servers()
|
383
|
+
|
384
|
+
if self._publish_tp:
|
385
|
+
assert self._events is not None
|
386
|
+
self._events.put(
|
387
|
+
(
|
388
|
+
self._listeners.publish_topology_description_changed,
|
389
|
+
(td_old, self._description, self._topology_id),
|
390
|
+
)
|
391
|
+
)
|
392
|
+
|
393
|
+
def on_srv_update(self, seedlist):
|
394
|
+
"""Process a new list of nodes obtained from scanning SRV records."""
|
395
|
+
# We do no I/O holding the lock.
|
396
|
+
with self._lock:
|
397
|
+
if self._opened:
|
398
|
+
self._process_srv_update(seedlist)
|
399
|
+
|
400
|
+
def get_server_by_address(self, address):
|
401
|
+
"""Get a Server or None.
|
402
|
+
|
403
|
+
Returns the current version of the server immediately, even if it's
|
404
|
+
Unknown or absent from the topology. Only use this in unittests.
|
405
|
+
In driver code, use select_server_by_address, since then you're
|
406
|
+
assured a recent view of the server's type and wire protocol version.
|
407
|
+
"""
|
408
|
+
return self._servers.get(address)
|
409
|
+
|
410
|
+
def has_server(self, address):
|
411
|
+
return address in self._servers
|
412
|
+
|
413
|
+
def get_primary(self):
|
414
|
+
"""Return primary's address or None."""
|
415
|
+
# Implemented here in Topology instead of MongoClient, so it can lock.
|
416
|
+
with self._lock:
|
417
|
+
topology_type = self._description.topology_type
|
418
|
+
if topology_type != TOPOLOGY_TYPE.ReplicaSetWithPrimary:
|
419
|
+
return None
|
420
|
+
|
421
|
+
return writable_server_selector(self._new_selection())[0].address
|
422
|
+
|
423
|
+
def _get_replica_set_members(self, selector):
|
424
|
+
"""Return set of replica set member addresses."""
|
425
|
+
# Implemented here in Topology instead of MongoClient, so it can lock.
|
426
|
+
with self._lock:
|
427
|
+
topology_type = self._description.topology_type
|
428
|
+
if topology_type not in (
|
429
|
+
TOPOLOGY_TYPE.ReplicaSetWithPrimary,
|
430
|
+
TOPOLOGY_TYPE.ReplicaSetNoPrimary,
|
431
|
+
):
|
432
|
+
return set()
|
433
|
+
|
434
|
+
return set([sd.address for sd in selector(self._new_selection())])
|
435
|
+
|
436
|
+
def get_secondaries(self):
|
437
|
+
"""Return set of secondary addresses."""
|
438
|
+
return self._get_replica_set_members(secondary_server_selector)
|
439
|
+
|
440
|
+
def get_arbiters(self):
|
441
|
+
"""Return set of arbiter addresses."""
|
442
|
+
return self._get_replica_set_members(arbiter_server_selector)
|
443
|
+
|
444
|
+
def max_cluster_time(self):
|
445
|
+
"""Return a document, the highest seen $clusterTime."""
|
446
|
+
return self._max_cluster_time
|
447
|
+
|
448
|
+
def _receive_cluster_time_no_lock(self, cluster_time):
|
449
|
+
# Driver Sessions Spec: "Whenever a driver receives a cluster time from
|
450
|
+
# a server it MUST compare it to the current highest seen cluster time
|
451
|
+
# for the deployment. If the new cluster time is higher than the
|
452
|
+
# highest seen cluster time it MUST become the new highest seen cluster
|
453
|
+
# time. Two cluster times are compared using only the BsonTimestamp
|
454
|
+
# value of the clusterTime embedded field."
|
455
|
+
if cluster_time:
|
456
|
+
# ">" uses bson.timestamp.Timestamp's comparison operator.
|
457
|
+
if (
|
458
|
+
not self._max_cluster_time
|
459
|
+
or cluster_time["clusterTime"] > self._max_cluster_time["clusterTime"]
|
460
|
+
):
|
461
|
+
self._max_cluster_time = cluster_time
|
462
|
+
|
463
|
+
def receive_cluster_time(self, cluster_time):
|
464
|
+
with self._lock:
|
465
|
+
self._receive_cluster_time_no_lock(cluster_time)
|
466
|
+
|
467
|
+
def request_check_all(self, wait_time=5):
|
468
|
+
"""Wake all monitors, wait for at least one to check its server."""
|
469
|
+
with self._lock:
|
470
|
+
self._request_check_all()
|
471
|
+
self._condition.wait(wait_time)
|
472
|
+
|
473
|
+
def data_bearing_servers(self):
|
474
|
+
"""Return a list of all data-bearing servers.
|
475
|
+
|
476
|
+
This includes any server that might be selected for an operation.
|
477
|
+
"""
|
478
|
+
if self._description.topology_type == TOPOLOGY_TYPE.Single:
|
479
|
+
return self._description.known_servers
|
480
|
+
return self._description.readable_servers
|
481
|
+
|
482
|
+
def update_pool(self):
|
483
|
+
# Remove any stale sockets and add new sockets if pool is too small.
|
484
|
+
servers = []
|
485
|
+
with self._lock:
|
486
|
+
# Only update pools for data-bearing servers.
|
487
|
+
for sd in self.data_bearing_servers():
|
488
|
+
server = self._servers[sd.address]
|
489
|
+
servers.append((server, server.pool.gen.get_overall()))
|
490
|
+
|
491
|
+
for server, generation in servers:
|
492
|
+
try:
|
493
|
+
server.pool.remove_stale_sockets(generation)
|
494
|
+
except PyMongoError as exc:
|
495
|
+
ctx = _ErrorContext(exc, 0, generation, False, None)
|
496
|
+
self.handle_error(server.description.address, ctx)
|
497
|
+
raise
|
498
|
+
|
499
|
+
def close(self):
|
500
|
+
"""Clear pools and terminate monitors. Topology does not reopen on
|
501
|
+
demand. Any further operations will raise
|
502
|
+
:exc:`~.errors.InvalidOperation`."""
|
503
|
+
with self._lock:
|
504
|
+
for server in self._servers.values():
|
505
|
+
server.close()
|
506
|
+
|
507
|
+
# Mark all servers Unknown.
|
508
|
+
self._description = self._description.reset()
|
509
|
+
for address, sd in self._description.server_descriptions().items():
|
510
|
+
if address in self._servers:
|
511
|
+
self._servers[address].description = sd
|
512
|
+
|
513
|
+
# Stop SRV polling thread.
|
514
|
+
if self._srv_monitor:
|
515
|
+
self._srv_monitor.close()
|
516
|
+
|
517
|
+
self._opened = False
|
518
|
+
self._closed = True
|
519
|
+
|
520
|
+
# Publish only after releasing the lock.
|
521
|
+
if self._publish_tp:
|
522
|
+
assert self._events is not None
|
523
|
+
self._events.put((self._listeners.publish_topology_closed, (self._topology_id,)))
|
524
|
+
if self._publish_server or self._publish_tp:
|
525
|
+
self.__events_executor.close()
|
526
|
+
|
527
|
+
@property
|
528
|
+
def description(self):
|
529
|
+
return self._description
|
530
|
+
|
531
|
+
def pop_all_sessions(self):
|
532
|
+
"""Pop all session ids from the pool."""
|
533
|
+
with self._lock:
|
534
|
+
return self._session_pool.pop_all()
|
535
|
+
|
536
|
+
def _check_implicit_session_support(self):
|
537
|
+
with self._lock:
|
538
|
+
self._check_session_support()
|
539
|
+
|
540
|
+
def _check_session_support(self):
|
541
|
+
"""Internal check for session support on clusters."""
|
542
|
+
if self._settings.load_balanced:
|
543
|
+
# Sessions never time out in load balanced mode.
|
544
|
+
return float("inf")
|
545
|
+
session_timeout = self._description.logical_session_timeout_minutes
|
546
|
+
if session_timeout is None:
|
547
|
+
# Maybe we need an initial scan? Can raise ServerSelectionError.
|
548
|
+
if self._description.topology_type == TOPOLOGY_TYPE.Single:
|
549
|
+
if not self._description.has_known_servers:
|
550
|
+
self._select_servers_loop(
|
551
|
+
any_server_selector, self.get_server_selection_timeout(), None
|
552
|
+
)
|
553
|
+
elif not self._description.readable_servers:
|
554
|
+
self._select_servers_loop(
|
555
|
+
readable_server_selector, self.get_server_selection_timeout(), None
|
556
|
+
)
|
557
|
+
|
558
|
+
session_timeout = self._description.logical_session_timeout_minutes
|
559
|
+
if session_timeout is None:
|
560
|
+
raise ConfigurationError("Sessions are not supported by this MongoDB deployment")
|
561
|
+
return session_timeout
|
562
|
+
|
563
|
+
def get_server_session(self):
|
564
|
+
"""Start or resume a server session, or raise ConfigurationError."""
|
565
|
+
with self._lock:
|
566
|
+
session_timeout = self._check_session_support()
|
567
|
+
return self._session_pool.get_server_session(session_timeout)
|
568
|
+
|
569
|
+
def return_server_session(self, server_session, lock):
|
570
|
+
if lock:
|
571
|
+
with self._lock:
|
572
|
+
self._session_pool.return_server_session(
|
573
|
+
server_session, self._description.logical_session_timeout_minutes
|
574
|
+
)
|
575
|
+
else:
|
576
|
+
# Called from a __del__ method, can't use a lock.
|
577
|
+
self._session_pool.return_server_session_no_lock(server_session)
|
578
|
+
|
579
|
+
def _new_selection(self):
|
580
|
+
"""A Selection object, initially including all known servers.
|
581
|
+
|
582
|
+
Hold the lock when calling this.
|
583
|
+
"""
|
584
|
+
return Selection.from_topology_description(self._description)
|
585
|
+
|
586
|
+
def _ensure_opened(self):
|
587
|
+
"""Start monitors, or restart after a fork.
|
588
|
+
|
589
|
+
Hold the lock when calling this.
|
590
|
+
"""
|
591
|
+
if self._closed:
|
592
|
+
raise InvalidOperation("Cannot use MongoClient after close")
|
593
|
+
|
594
|
+
if not self._opened:
|
595
|
+
self._opened = True
|
596
|
+
self._update_servers()
|
597
|
+
|
598
|
+
# Start or restart the events publishing thread.
|
599
|
+
if self._publish_tp or self._publish_server:
|
600
|
+
self.__events_executor.open()
|
601
|
+
|
602
|
+
# Start the SRV polling thread.
|
603
|
+
if self._srv_monitor and (self.description.topology_type in SRV_POLLING_TOPOLOGIES):
|
604
|
+
self._srv_monitor.open()
|
605
|
+
|
606
|
+
if self._settings.load_balanced:
|
607
|
+
# Emit initial SDAM events for load balancer mode.
|
608
|
+
self._process_change(
|
609
|
+
ServerDescription(
|
610
|
+
self._seed_addresses[0],
|
611
|
+
Hello({"ok": 1, "serviceId": self._topology_id, "maxWireVersion": 13}),
|
612
|
+
)
|
613
|
+
)
|
614
|
+
|
615
|
+
# Ensure that the monitors are open.
|
616
|
+
for server in self._servers.values():
|
617
|
+
server.open()
|
618
|
+
|
619
|
+
def _is_stale_error(self, address, err_ctx):
|
620
|
+
server = self._servers.get(address)
|
621
|
+
if server is None:
|
622
|
+
# Another thread removed this server from the topology.
|
623
|
+
return True
|
624
|
+
|
625
|
+
if server._pool.stale_generation(err_ctx.sock_generation, err_ctx.service_id):
|
626
|
+
# This is an outdated error from a previous pool version.
|
627
|
+
return True
|
628
|
+
|
629
|
+
# topologyVersion check, ignore error when cur_tv >= error_tv:
|
630
|
+
cur_tv = server.description.topology_version
|
631
|
+
error = err_ctx.error
|
632
|
+
error_tv = None
|
633
|
+
if error and hasattr(error, "details"):
|
634
|
+
if isinstance(error.details, dict):
|
635
|
+
error_tv = error.details.get("topologyVersion")
|
636
|
+
|
637
|
+
return _is_stale_error_topology_version(cur_tv, error_tv)
|
638
|
+
|
639
|
+
def _handle_error(self, address, err_ctx):
|
640
|
+
if self._is_stale_error(address, err_ctx):
|
641
|
+
return
|
642
|
+
|
643
|
+
server = self._servers[address]
|
644
|
+
error = err_ctx.error
|
645
|
+
exc_type = type(error)
|
646
|
+
service_id = err_ctx.service_id
|
647
|
+
|
648
|
+
# Ignore a handshake error if the server is behind a load balancer but
|
649
|
+
# the service ID is unknown. This indicates that the error happened
|
650
|
+
# when dialing the connection or during the MongoDB handshake, so we
|
651
|
+
# don't know the service ID to use for clearing the pool.
|
652
|
+
if self._settings.load_balanced and not service_id and not err_ctx.completed_handshake:
|
653
|
+
return
|
654
|
+
|
655
|
+
if issubclass(exc_type, NetworkTimeout) and err_ctx.completed_handshake:
|
656
|
+
# The socket has been closed. Don't reset the server.
|
657
|
+
# Server Discovery And Monitoring Spec: "When an application
|
658
|
+
# operation fails because of any network error besides a socket
|
659
|
+
# timeout...."
|
660
|
+
return
|
661
|
+
elif issubclass(exc_type, WriteError):
|
662
|
+
# Ignore writeErrors.
|
663
|
+
return
|
664
|
+
elif issubclass(exc_type, (NotPrimaryError, OperationFailure)):
|
665
|
+
# As per the SDAM spec if:
|
666
|
+
# - the server sees a "not primary" error, and
|
667
|
+
# - the server is not shutting down, and
|
668
|
+
# - the server version is >= 4.2, then
|
669
|
+
# we keep the existing connection pool, but mark the server type
|
670
|
+
# as Unknown and request an immediate check of the server.
|
671
|
+
# Otherwise, we clear the connection pool, mark the server as
|
672
|
+
# Unknown and request an immediate check of the server.
|
673
|
+
if hasattr(error, "code"):
|
674
|
+
err_code = error.code
|
675
|
+
else:
|
676
|
+
# Default error code if one does not exist.
|
677
|
+
default = 10107 if isinstance(error, NotPrimaryError) else None
|
678
|
+
err_code = error.details.get("code", default)
|
679
|
+
if err_code in helpers._NOT_PRIMARY_CODES:
|
680
|
+
is_shutting_down = err_code in helpers._SHUTDOWN_CODES
|
681
|
+
# Mark server Unknown, clear the pool, and request check.
|
682
|
+
if not self._settings.load_balanced:
|
683
|
+
self._process_change(ServerDescription(address, error=error))
|
684
|
+
if is_shutting_down or (err_ctx.max_wire_version <= 7):
|
685
|
+
# Clear the pool.
|
686
|
+
server.reset(service_id)
|
687
|
+
server.request_check()
|
688
|
+
elif not err_ctx.completed_handshake:
|
689
|
+
# Unknown command error during the connection handshake.
|
690
|
+
if not self._settings.load_balanced:
|
691
|
+
self._process_change(ServerDescription(address, error=error))
|
692
|
+
# Clear the pool.
|
693
|
+
server.reset(service_id)
|
694
|
+
elif issubclass(exc_type, ConnectionFailure):
|
695
|
+
# "Client MUST replace the server's description with type Unknown
|
696
|
+
# ... MUST NOT request an immediate check of the server."
|
697
|
+
if not self._settings.load_balanced:
|
698
|
+
self._process_change(ServerDescription(address, error=error))
|
699
|
+
# Clear the pool.
|
700
|
+
server.reset(service_id)
|
701
|
+
# "When a client marks a server Unknown from `Network error when
|
702
|
+
# reading or writing`_, clients MUST cancel the hello check on
|
703
|
+
# that server and close the current monitoring connection."
|
704
|
+
server._monitor.cancel_check()
|
705
|
+
|
706
|
+
def handle_error(self, address, err_ctx):
|
707
|
+
"""Handle an application error.
|
708
|
+
|
709
|
+
May reset the server to Unknown, clear the pool, and request an
|
710
|
+
immediate check depending on the error and the context.
|
711
|
+
"""
|
712
|
+
with self._lock:
|
713
|
+
self._handle_error(address, err_ctx)
|
714
|
+
|
715
|
+
def _request_check_all(self):
|
716
|
+
"""Wake all monitors. Hold the lock when calling this."""
|
717
|
+
for server in self._servers.values():
|
718
|
+
server.request_check()
|
719
|
+
|
720
|
+
def _update_servers(self):
|
721
|
+
"""Sync our Servers from TopologyDescription.server_descriptions.
|
722
|
+
|
723
|
+
Hold the lock while calling this.
|
724
|
+
"""
|
725
|
+
for address, sd in self._description.server_descriptions().items():
|
726
|
+
if address not in self._servers:
|
727
|
+
monitor = self._settings.monitor_class(
|
728
|
+
server_description=sd,
|
729
|
+
topology=self,
|
730
|
+
pool=self._create_pool_for_monitor(address),
|
731
|
+
topology_settings=self._settings,
|
732
|
+
)
|
733
|
+
|
734
|
+
weak = None
|
735
|
+
if self._publish_server:
|
736
|
+
weak = weakref.ref(self._events)
|
737
|
+
server = Server(
|
738
|
+
server_description=sd,
|
739
|
+
pool=self._create_pool_for_server(address),
|
740
|
+
monitor=monitor,
|
741
|
+
topology_id=self._topology_id,
|
742
|
+
listeners=self._listeners,
|
743
|
+
events=weak,
|
744
|
+
)
|
745
|
+
|
746
|
+
self._servers[address] = server
|
747
|
+
server.open()
|
748
|
+
else:
|
749
|
+
# Cache old is_writable value.
|
750
|
+
was_writable = self._servers[address].description.is_writable
|
751
|
+
# Update server description.
|
752
|
+
self._servers[address].description = sd
|
753
|
+
# Update is_writable value of the pool, if it changed.
|
754
|
+
if was_writable != sd.is_writable:
|
755
|
+
self._servers[address].pool.update_is_writable(sd.is_writable)
|
756
|
+
|
757
|
+
for address, server in list(self._servers.items()):
|
758
|
+
if not self._description.has_server(address):
|
759
|
+
server.close()
|
760
|
+
self._servers.pop(address)
|
761
|
+
|
762
|
+
def _create_pool_for_server(self, address):
|
763
|
+
return self._settings.pool_class(address, self._settings.pool_options)
|
764
|
+
|
765
|
+
def _create_pool_for_monitor(self, address):
|
766
|
+
options = self._settings.pool_options
|
767
|
+
|
768
|
+
# According to the Server Discovery And Monitoring Spec, monitors use
|
769
|
+
# connect_timeout for both connect_timeout and socket_timeout. The
|
770
|
+
# pool only has one socket so maxPoolSize and so on aren't needed.
|
771
|
+
monitor_pool_options = PoolOptions(
|
772
|
+
connect_timeout=options.connect_timeout,
|
773
|
+
socket_timeout=options.connect_timeout,
|
774
|
+
ssl_context=options._ssl_context,
|
775
|
+
tls_allow_invalid_hostnames=options.tls_allow_invalid_hostnames,
|
776
|
+
event_listeners=options._event_listeners,
|
777
|
+
appname=options.appname,
|
778
|
+
driver=options.driver,
|
779
|
+
pause_enabled=False,
|
780
|
+
server_api=options.server_api,
|
781
|
+
)
|
782
|
+
|
783
|
+
return self._settings.pool_class(address, monitor_pool_options, handshake=False)
|
784
|
+
|
785
|
+
def _error_message(self, selector):
|
786
|
+
"""Format an error message if server selection fails.
|
787
|
+
|
788
|
+
Hold the lock when calling this.
|
789
|
+
"""
|
790
|
+
is_replica_set = self._description.topology_type in (
|
791
|
+
TOPOLOGY_TYPE.ReplicaSetWithPrimary,
|
792
|
+
TOPOLOGY_TYPE.ReplicaSetNoPrimary,
|
793
|
+
)
|
794
|
+
|
795
|
+
if is_replica_set:
|
796
|
+
server_plural = "replica set members"
|
797
|
+
elif self._description.topology_type == TOPOLOGY_TYPE.Sharded:
|
798
|
+
server_plural = "mongoses"
|
799
|
+
else:
|
800
|
+
server_plural = "servers"
|
801
|
+
|
802
|
+
if self._description.known_servers:
|
803
|
+
# We've connected, but no servers match the selector.
|
804
|
+
if selector is writable_server_selector:
|
805
|
+
if is_replica_set:
|
806
|
+
return "No primary available for writes"
|
807
|
+
else:
|
808
|
+
return "No %s available for writes" % server_plural
|
809
|
+
else:
|
810
|
+
return 'No %s match selector "%s"' % (server_plural, selector)
|
811
|
+
else:
|
812
|
+
addresses = list(self._description.server_descriptions())
|
813
|
+
servers = list(self._description.server_descriptions().values())
|
814
|
+
if not servers:
|
815
|
+
if is_replica_set:
|
816
|
+
# We removed all servers because of the wrong setName?
|
817
|
+
return 'No %s available for replica set name "%s"' % (
|
818
|
+
server_plural,
|
819
|
+
self._settings.replica_set_name,
|
820
|
+
)
|
821
|
+
else:
|
822
|
+
return "No %s available" % server_plural
|
823
|
+
|
824
|
+
# 1 or more servers, all Unknown. Are they unknown for one reason?
|
825
|
+
error = servers[0].error
|
826
|
+
same = all(server.error == error for server in servers[1:])
|
827
|
+
if same:
|
828
|
+
if error is None:
|
829
|
+
# We're still discovering.
|
830
|
+
return "No %s found yet" % server_plural
|
831
|
+
|
832
|
+
if is_replica_set and not set(addresses).intersection(self._seed_addresses):
|
833
|
+
# We replaced our seeds with new hosts but can't reach any.
|
834
|
+
return (
|
835
|
+
"Could not reach any servers in %s. Replica set is"
|
836
|
+
" configured with internal hostnames or IPs?" % addresses
|
837
|
+
)
|
838
|
+
|
839
|
+
return str(error)
|
840
|
+
else:
|
841
|
+
return ",".join(str(server.error) for server in servers if server.error)
|
842
|
+
|
843
|
+
def __repr__(self):
|
844
|
+
msg = ""
|
845
|
+
if not self._opened:
|
846
|
+
msg = "CLOSED "
|
847
|
+
return "<%s %s%r>" % (self.__class__.__name__, msg, self._description)
|
848
|
+
|
849
|
+
def eq_props(self):
|
850
|
+
"""The properties to use for MongoClient/Topology equality checks."""
|
851
|
+
ts = self._settings
|
852
|
+
return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, ts.srv_service_name)
|
853
|
+
|
854
|
+
def __eq__(self, other):
|
855
|
+
if isinstance(other, self.__class__):
|
856
|
+
return self.eq_props() == other.eq_props()
|
857
|
+
return NotImplemented
|
858
|
+
|
859
|
+
def __hash__(self):
|
860
|
+
return hash(self.eq_props())
|
861
|
+
|
862
|
+
|
863
|
+
class _ErrorContext(object):
|
864
|
+
"""An error with context for SDAM error handling."""
|
865
|
+
|
866
|
+
def __init__(self, error, max_wire_version, sock_generation, completed_handshake, service_id):
|
867
|
+
self.error = error
|
868
|
+
self.max_wire_version = max_wire_version
|
869
|
+
self.sock_generation = sock_generation
|
870
|
+
self.completed_handshake = completed_handshake
|
871
|
+
self.service_id = service_id
|
872
|
+
|
873
|
+
|
874
|
+
def _is_stale_error_topology_version(current_tv, error_tv):
|
875
|
+
"""Return True if the error's topologyVersion is <= current."""
|
876
|
+
if current_tv is None or error_tv is None:
|
877
|
+
return False
|
878
|
+
if current_tv["processId"] != error_tv["processId"]:
|
879
|
+
return False
|
880
|
+
return current_tv["counter"] >= error_tv["counter"]
|
881
|
+
|
882
|
+
|
883
|
+
def _is_stale_server_description(current_sd, new_sd):
|
884
|
+
"""Return True if the new topologyVersion is < current."""
|
885
|
+
current_tv, new_tv = current_sd.topology_version, new_sd.topology_version
|
886
|
+
if current_tv is None or new_tv is None:
|
887
|
+
return False
|
888
|
+
if current_tv["processId"] != new_tv["processId"]:
|
889
|
+
return False
|
890
|
+
return current_tv["counter"] > new_tv["counter"]
|