sensu-plugins-mongodb-mrtrotl 1.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +1 -0
- data/LICENSE +22 -0
- data/README.md +27 -0
- data/bin/check-mongodb-metric.rb +144 -0
- data/bin/check-mongodb-query-count.rb +267 -0
- data/bin/check-mongodb.py +1644 -0
- data/bin/check-mongodb.rb +5 -0
- data/bin/metrics-mongodb-replication.rb +254 -0
- data/bin/metrics-mongodb.rb +133 -0
- data/lib/bson/__init__.py +1347 -0
- data/lib/bson/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/_helpers.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/binary.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/code.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/codec_options.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/dbref.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/decimal128.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/int64.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/json_util.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/max_key.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/min_key.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/objectid.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/raw_bson.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/regex.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/son.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/timestamp.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/tz_util.cpython-310.pyc +0 -0
- data/lib/bson/_cbson.cpython-310-x86_64-linux-gnu.so +0 -0
- data/lib/bson/_helpers.py +41 -0
- data/lib/bson/binary.py +364 -0
- data/lib/bson/code.py +101 -0
- data/lib/bson/codec_options.py +414 -0
- data/lib/bson/codec_options.pyi +100 -0
- data/lib/bson/dbref.py +133 -0
- data/lib/bson/decimal128.py +314 -0
- data/lib/bson/errors.py +35 -0
- data/lib/bson/int64.py +39 -0
- data/lib/bson/json_util.py +874 -0
- data/lib/bson/max_key.py +55 -0
- data/lib/bson/min_key.py +55 -0
- data/lib/bson/objectid.py +286 -0
- data/lib/bson/py.typed +2 -0
- data/lib/bson/raw_bson.py +175 -0
- data/lib/bson/regex.py +135 -0
- data/lib/bson/son.py +208 -0
- data/lib/bson/timestamp.py +124 -0
- data/lib/bson/tz_util.py +52 -0
- data/lib/gridfs/__init__.py +1015 -0
- data/lib/gridfs/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/gridfs/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/gridfs/__pycache__/grid_file.cpython-310.pyc +0 -0
- data/lib/gridfs/errors.py +33 -0
- data/lib/gridfs/grid_file.py +907 -0
- data/lib/gridfs/py.typed +2 -0
- data/lib/pymongo/__init__.py +185 -0
- data/lib/pymongo/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/_csot.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/aggregation.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/auth.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/auth_aws.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/bulk.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/change_stream.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/client_options.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/client_session.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/collation.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/collection.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/command_cursor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/common.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/compression_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/cursor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/daemon.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/database.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/driver_info.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/encryption.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/encryption_options.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/event_loggers.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/hello.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/helpers.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/max_staleness_selectors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/message.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/mongo_client.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/monitor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/monitoring.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/network.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ocsp_cache.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ocsp_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/operations.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/periodic_executor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/pool.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/pyopenssl_context.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/read_concern.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/read_preferences.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/response.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/results.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/saslprep.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_api.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_description.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_selectors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_type.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/settings.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/socket_checker.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/srv_resolver.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ssl_context.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ssl_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/topology.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/topology_description.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/typings.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/uri_parser.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/write_concern.cpython-310.pyc +0 -0
- data/lib/pymongo/_cmessage.cpython-310-x86_64-linux-gnu.so +0 -0
- data/lib/pymongo/_csot.py +118 -0
- data/lib/pymongo/aggregation.py +229 -0
- data/lib/pymongo/auth.py +549 -0
- data/lib/pymongo/auth_aws.py +94 -0
- data/lib/pymongo/bulk.py +513 -0
- data/lib/pymongo/change_stream.py +457 -0
- data/lib/pymongo/client_options.py +302 -0
- data/lib/pymongo/client_session.py +1112 -0
- data/lib/pymongo/collation.py +224 -0
- data/lib/pymongo/collection.py +3204 -0
- data/lib/pymongo/command_cursor.py +353 -0
- data/lib/pymongo/common.py +984 -0
- data/lib/pymongo/compression_support.py +149 -0
- data/lib/pymongo/cursor.py +1345 -0
- data/lib/pymongo/daemon.py +141 -0
- data/lib/pymongo/database.py +1202 -0
- data/lib/pymongo/driver_info.py +42 -0
- data/lib/pymongo/encryption.py +884 -0
- data/lib/pymongo/encryption_options.py +221 -0
- data/lib/pymongo/errors.py +365 -0
- data/lib/pymongo/event_loggers.py +221 -0
- data/lib/pymongo/hello.py +219 -0
- data/lib/pymongo/helpers.py +259 -0
- data/lib/pymongo/max_staleness_selectors.py +114 -0
- data/lib/pymongo/message.py +1440 -0
- data/lib/pymongo/mongo_client.py +2144 -0
- data/lib/pymongo/monitor.py +440 -0
- data/lib/pymongo/monitoring.py +1801 -0
- data/lib/pymongo/network.py +311 -0
- data/lib/pymongo/ocsp_cache.py +87 -0
- data/lib/pymongo/ocsp_support.py +372 -0
- data/lib/pymongo/operations.py +507 -0
- data/lib/pymongo/periodic_executor.py +183 -0
- data/lib/pymongo/pool.py +1660 -0
- data/lib/pymongo/py.typed +2 -0
- data/lib/pymongo/pyopenssl_context.py +383 -0
- data/lib/pymongo/read_concern.py +75 -0
- data/lib/pymongo/read_preferences.py +609 -0
- data/lib/pymongo/response.py +109 -0
- data/lib/pymongo/results.py +217 -0
- data/lib/pymongo/saslprep.py +113 -0
- data/lib/pymongo/server.py +247 -0
- data/lib/pymongo/server_api.py +170 -0
- data/lib/pymongo/server_description.py +285 -0
- data/lib/pymongo/server_selectors.py +153 -0
- data/lib/pymongo/server_type.py +32 -0
- data/lib/pymongo/settings.py +159 -0
- data/lib/pymongo/socket_checker.py +104 -0
- data/lib/pymongo/srv_resolver.py +126 -0
- data/lib/pymongo/ssl_context.py +39 -0
- data/lib/pymongo/ssl_support.py +99 -0
- data/lib/pymongo/topology.py +890 -0
- data/lib/pymongo/topology_description.py +639 -0
- data/lib/pymongo/typings.py +39 -0
- data/lib/pymongo/uri_parser.py +624 -0
- data/lib/pymongo/write_concern.py +129 -0
- data/lib/pymongo-4.2.0.dist-info/INSTALLER +1 -0
- data/lib/pymongo-4.2.0.dist-info/LICENSE +201 -0
- data/lib/pymongo-4.2.0.dist-info/METADATA +250 -0
- data/lib/pymongo-4.2.0.dist-info/RECORD +167 -0
- data/lib/pymongo-4.2.0.dist-info/REQUESTED +0 -0
- data/lib/pymongo-4.2.0.dist-info/WHEEL +6 -0
- data/lib/pymongo-4.2.0.dist-info/top_level.txt +3 -0
- data/lib/sensu-plugins-mongodb/metrics.rb +391 -0
- data/lib/sensu-plugins-mongodb/version.rb +9 -0
- data/lib/sensu-plugins-mongodb.rb +1 -0
- metadata +407 -0
data/lib/pymongo/pool.py
ADDED
@@ -0,0 +1,1660 @@
|
|
1
|
+
# Copyright 2011-present MongoDB, Inc.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License"); you
|
4
|
+
# may not use this file except in compliance with the License. You
|
5
|
+
# may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
12
|
+
# implied. See the License for the specific language governing
|
13
|
+
# permissions and limitations under the License.
|
14
|
+
|
15
|
+
import collections
|
16
|
+
import contextlib
|
17
|
+
import copy
|
18
|
+
import os
|
19
|
+
import platform
|
20
|
+
import socket
|
21
|
+
import ssl
|
22
|
+
import sys
|
23
|
+
import threading
|
24
|
+
import time
|
25
|
+
import weakref
|
26
|
+
from typing import Any, NoReturn, Optional
|
27
|
+
|
28
|
+
from bson import DEFAULT_CODEC_OPTIONS
|
29
|
+
from bson.son import SON
|
30
|
+
from pymongo import __version__, _csot, auth, helpers
|
31
|
+
from pymongo.client_session import _validate_session_write_concern
|
32
|
+
from pymongo.common import (
|
33
|
+
MAX_BSON_SIZE,
|
34
|
+
MAX_CONNECTING,
|
35
|
+
MAX_IDLE_TIME_SEC,
|
36
|
+
MAX_MESSAGE_SIZE,
|
37
|
+
MAX_POOL_SIZE,
|
38
|
+
MAX_WIRE_VERSION,
|
39
|
+
MAX_WRITE_BATCH_SIZE,
|
40
|
+
MIN_POOL_SIZE,
|
41
|
+
ORDERED_TYPES,
|
42
|
+
WAIT_QUEUE_TIMEOUT,
|
43
|
+
)
|
44
|
+
from pymongo.errors import (
|
45
|
+
AutoReconnect,
|
46
|
+
ConfigurationError,
|
47
|
+
ConnectionFailure,
|
48
|
+
DocumentTooLarge,
|
49
|
+
ExecutionTimeout,
|
50
|
+
InvalidOperation,
|
51
|
+
NetworkTimeout,
|
52
|
+
NotPrimaryError,
|
53
|
+
OperationFailure,
|
54
|
+
PyMongoError,
|
55
|
+
WaitQueueTimeoutError,
|
56
|
+
_CertificateError,
|
57
|
+
)
|
58
|
+
from pymongo.hello import Hello, HelloCompat
|
59
|
+
from pymongo.monitoring import ConnectionCheckOutFailedReason, ConnectionClosedReason
|
60
|
+
from pymongo.network import command, receive_message
|
61
|
+
from pymongo.read_preferences import ReadPreference
|
62
|
+
from pymongo.server_api import _add_to_command
|
63
|
+
from pymongo.server_type import SERVER_TYPE
|
64
|
+
from pymongo.socket_checker import SocketChecker
|
65
|
+
from pymongo.ssl_support import HAS_SNI, SSLError
|
66
|
+
|
67
|
+
try:
|
68
|
+
from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl
|
69
|
+
|
70
|
+
def _set_non_inheritable_non_atomic(fd):
|
71
|
+
"""Set the close-on-exec flag on the given file descriptor."""
|
72
|
+
flags = fcntl(fd, F_GETFD)
|
73
|
+
fcntl(fd, F_SETFD, flags | FD_CLOEXEC)
|
74
|
+
|
75
|
+
except ImportError:
|
76
|
+
# Windows, various platforms we don't claim to support
|
77
|
+
# (Jython, IronPython, ...), systems that don't provide
|
78
|
+
# everything we need from fcntl, etc.
|
79
|
+
def _set_non_inheritable_non_atomic(fd):
|
80
|
+
"""Dummy function for platforms that don't provide fcntl."""
|
81
|
+
pass
|
82
|
+
|
83
|
+
|
84
|
+
_MAX_TCP_KEEPIDLE = 120
|
85
|
+
_MAX_TCP_KEEPINTVL = 10
|
86
|
+
_MAX_TCP_KEEPCNT = 9
|
87
|
+
|
88
|
+
if sys.platform == "win32":
|
89
|
+
try:
|
90
|
+
import _winreg as winreg
|
91
|
+
except ImportError:
|
92
|
+
import winreg
|
93
|
+
|
94
|
+
def _query(key, name, default):
|
95
|
+
try:
|
96
|
+
value, _ = winreg.QueryValueEx(key, name)
|
97
|
+
# Ensure the value is a number or raise ValueError.
|
98
|
+
return int(value)
|
99
|
+
except (OSError, ValueError):
|
100
|
+
# QueryValueEx raises OSError when the key does not exist (i.e.
|
101
|
+
# the system is using the Windows default value).
|
102
|
+
return default
|
103
|
+
|
104
|
+
try:
|
105
|
+
with winreg.OpenKey(
|
106
|
+
winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters"
|
107
|
+
) as key:
|
108
|
+
_WINDOWS_TCP_IDLE_MS = _query(key, "KeepAliveTime", 7200000)
|
109
|
+
_WINDOWS_TCP_INTERVAL_MS = _query(key, "KeepAliveInterval", 1000)
|
110
|
+
except OSError:
|
111
|
+
# We could not check the default values because winreg.OpenKey failed.
|
112
|
+
# Assume the system is using the default values.
|
113
|
+
_WINDOWS_TCP_IDLE_MS = 7200000
|
114
|
+
_WINDOWS_TCP_INTERVAL_MS = 1000
|
115
|
+
|
116
|
+
def _set_keepalive_times(sock):
|
117
|
+
idle_ms = min(_WINDOWS_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000)
|
118
|
+
interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, _MAX_TCP_KEEPINTVL * 1000)
|
119
|
+
if idle_ms < _WINDOWS_TCP_IDLE_MS or interval_ms < _WINDOWS_TCP_INTERVAL_MS:
|
120
|
+
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, idle_ms, interval_ms))
|
121
|
+
|
122
|
+
else:
|
123
|
+
|
124
|
+
def _set_tcp_option(sock, tcp_option, max_value):
|
125
|
+
if hasattr(socket, tcp_option):
|
126
|
+
sockopt = getattr(socket, tcp_option)
|
127
|
+
try:
|
128
|
+
# PYTHON-1350 - NetBSD doesn't implement getsockopt for
|
129
|
+
# TCP_KEEPIDLE and friends. Don't attempt to set the
|
130
|
+
# values there.
|
131
|
+
default = sock.getsockopt(socket.IPPROTO_TCP, sockopt)
|
132
|
+
if default > max_value:
|
133
|
+
sock.setsockopt(socket.IPPROTO_TCP, sockopt, max_value)
|
134
|
+
except socket.error:
|
135
|
+
pass
|
136
|
+
|
137
|
+
def _set_keepalive_times(sock):
|
138
|
+
_set_tcp_option(sock, "TCP_KEEPIDLE", _MAX_TCP_KEEPIDLE)
|
139
|
+
_set_tcp_option(sock, "TCP_KEEPINTVL", _MAX_TCP_KEEPINTVL)
|
140
|
+
_set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT)
|
141
|
+
|
142
|
+
|
143
|
+
_METADATA: SON[str, Any] = SON(
|
144
|
+
[
|
145
|
+
("driver", SON([("name", "PyMongo"), ("version", __version__)])),
|
146
|
+
]
|
147
|
+
)
|
148
|
+
|
149
|
+
if sys.platform.startswith("linux"):
|
150
|
+
# platform.linux_distribution was deprecated in Python 3.5
|
151
|
+
# and removed in Python 3.8. Starting in Python 3.5 it
|
152
|
+
# raises DeprecationWarning
|
153
|
+
# DeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5
|
154
|
+
_name = platform.system()
|
155
|
+
_METADATA["os"] = SON(
|
156
|
+
[
|
157
|
+
("type", _name),
|
158
|
+
("name", _name),
|
159
|
+
("architecture", platform.machine()),
|
160
|
+
# Kernel version (e.g. 4.4.0-17-generic).
|
161
|
+
("version", platform.release()),
|
162
|
+
]
|
163
|
+
)
|
164
|
+
elif sys.platform == "darwin":
|
165
|
+
_METADATA["os"] = SON(
|
166
|
+
[
|
167
|
+
("type", platform.system()),
|
168
|
+
("name", platform.system()),
|
169
|
+
("architecture", platform.machine()),
|
170
|
+
# (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin
|
171
|
+
# kernel version.
|
172
|
+
("version", platform.mac_ver()[0]),
|
173
|
+
]
|
174
|
+
)
|
175
|
+
elif sys.platform == "win32":
|
176
|
+
_METADATA["os"] = SON(
|
177
|
+
[
|
178
|
+
("type", platform.system()),
|
179
|
+
# "Windows XP", "Windows 7", "Windows 10", etc.
|
180
|
+
("name", " ".join((platform.system(), platform.release()))),
|
181
|
+
("architecture", platform.machine()),
|
182
|
+
# Windows patch level (e.g. 5.1.2600-SP3)
|
183
|
+
("version", "-".join(platform.win32_ver()[1:3])),
|
184
|
+
]
|
185
|
+
)
|
186
|
+
elif sys.platform.startswith("java"):
|
187
|
+
_name, _ver, _arch = platform.java_ver()[-1]
|
188
|
+
_METADATA["os"] = SON(
|
189
|
+
[
|
190
|
+
# Linux, Windows 7, Mac OS X, etc.
|
191
|
+
("type", _name),
|
192
|
+
("name", _name),
|
193
|
+
# x86, x86_64, AMD64, etc.
|
194
|
+
("architecture", _arch),
|
195
|
+
# Linux kernel version, OSX version, etc.
|
196
|
+
("version", _ver),
|
197
|
+
]
|
198
|
+
)
|
199
|
+
else:
|
200
|
+
# Get potential alias (e.g. SunOS 5.11 becomes Solaris 2.11)
|
201
|
+
_aliased = platform.system_alias(platform.system(), platform.release(), platform.version())
|
202
|
+
_METADATA["os"] = SON(
|
203
|
+
[
|
204
|
+
("type", platform.system()),
|
205
|
+
("name", " ".join([part for part in _aliased[:2] if part])),
|
206
|
+
("architecture", platform.machine()),
|
207
|
+
("version", _aliased[2]),
|
208
|
+
]
|
209
|
+
)
|
210
|
+
|
211
|
+
if platform.python_implementation().startswith("PyPy"):
|
212
|
+
_METADATA["platform"] = " ".join(
|
213
|
+
(
|
214
|
+
platform.python_implementation(),
|
215
|
+
".".join(map(str, sys.pypy_version_info)), # type: ignore
|
216
|
+
"(Python %s)" % ".".join(map(str, sys.version_info)),
|
217
|
+
)
|
218
|
+
)
|
219
|
+
elif sys.platform.startswith("java"):
|
220
|
+
_METADATA["platform"] = " ".join(
|
221
|
+
(
|
222
|
+
platform.python_implementation(),
|
223
|
+
".".join(map(str, sys.version_info)),
|
224
|
+
"(%s)" % " ".join((platform.system(), platform.release())),
|
225
|
+
)
|
226
|
+
)
|
227
|
+
else:
|
228
|
+
_METADATA["platform"] = " ".join(
|
229
|
+
(platform.python_implementation(), ".".join(map(str, sys.version_info)))
|
230
|
+
)
|
231
|
+
|
232
|
+
|
233
|
+
# If the first getaddrinfo call of this interpreter's life is on a thread,
|
234
|
+
# while the main thread holds the import lock, getaddrinfo deadlocks trying
|
235
|
+
# to import the IDNA codec. Import it here, where presumably we're on the
|
236
|
+
# main thread, to avoid the deadlock. See PYTHON-607.
|
237
|
+
"foo".encode("idna")
|
238
|
+
|
239
|
+
|
240
|
+
def _raise_connection_failure(
|
241
|
+
address: Any, error: Exception, msg_prefix: Optional[str] = None
|
242
|
+
) -> NoReturn:
|
243
|
+
"""Convert a socket.error to ConnectionFailure and raise it."""
|
244
|
+
host, port = address
|
245
|
+
# If connecting to a Unix socket, port will be None.
|
246
|
+
if port is not None:
|
247
|
+
msg = "%s:%d: %s" % (host, port, error)
|
248
|
+
else:
|
249
|
+
msg = "%s: %s" % (host, error)
|
250
|
+
if msg_prefix:
|
251
|
+
msg = msg_prefix + msg
|
252
|
+
if isinstance(error, socket.timeout):
|
253
|
+
raise NetworkTimeout(msg) from error
|
254
|
+
elif isinstance(error, SSLError) and "timed out" in str(error):
|
255
|
+
# Eventlet does not distinguish TLS network timeouts from other
|
256
|
+
# SSLErrors (https://github.com/eventlet/eventlet/issues/692).
|
257
|
+
# Luckily, we can work around this limitation because the phrase
|
258
|
+
# 'timed out' appears in all the timeout related SSLErrors raised.
|
259
|
+
raise NetworkTimeout(msg) from error
|
260
|
+
else:
|
261
|
+
raise AutoReconnect(msg) from error
|
262
|
+
|
263
|
+
|
264
|
+
def _cond_wait(condition, deadline):
|
265
|
+
timeout = deadline - time.monotonic() if deadline else None
|
266
|
+
return condition.wait(timeout)
|
267
|
+
|
268
|
+
|
269
|
+
class PoolOptions(object):
|
270
|
+
"""Read only connection pool options for a MongoClient.
|
271
|
+
|
272
|
+
Should not be instantiated directly by application developers. Access
|
273
|
+
a client's pool options via
|
274
|
+
:attr:`~pymongo.client_options.ClientOptions.pool_options` instead::
|
275
|
+
|
276
|
+
pool_opts = client.options.pool_options
|
277
|
+
pool_opts.max_pool_size
|
278
|
+
pool_opts.min_pool_size
|
279
|
+
|
280
|
+
"""
|
281
|
+
|
282
|
+
__slots__ = (
|
283
|
+
"__max_pool_size",
|
284
|
+
"__min_pool_size",
|
285
|
+
"__max_idle_time_seconds",
|
286
|
+
"__connect_timeout",
|
287
|
+
"__socket_timeout",
|
288
|
+
"__wait_queue_timeout",
|
289
|
+
"__ssl_context",
|
290
|
+
"__tls_allow_invalid_hostnames",
|
291
|
+
"__event_listeners",
|
292
|
+
"__appname",
|
293
|
+
"__driver",
|
294
|
+
"__metadata",
|
295
|
+
"__compression_settings",
|
296
|
+
"__max_connecting",
|
297
|
+
"__pause_enabled",
|
298
|
+
"__server_api",
|
299
|
+
"__load_balanced",
|
300
|
+
"__credentials",
|
301
|
+
)
|
302
|
+
|
303
|
+
def __init__(
|
304
|
+
self,
|
305
|
+
max_pool_size=MAX_POOL_SIZE,
|
306
|
+
min_pool_size=MIN_POOL_SIZE,
|
307
|
+
max_idle_time_seconds=MAX_IDLE_TIME_SEC,
|
308
|
+
connect_timeout=None,
|
309
|
+
socket_timeout=None,
|
310
|
+
wait_queue_timeout=WAIT_QUEUE_TIMEOUT,
|
311
|
+
ssl_context=None,
|
312
|
+
tls_allow_invalid_hostnames=False,
|
313
|
+
event_listeners=None,
|
314
|
+
appname=None,
|
315
|
+
driver=None,
|
316
|
+
compression_settings=None,
|
317
|
+
max_connecting=MAX_CONNECTING,
|
318
|
+
pause_enabled=True,
|
319
|
+
server_api=None,
|
320
|
+
load_balanced=None,
|
321
|
+
credentials=None,
|
322
|
+
):
|
323
|
+
self.__max_pool_size = max_pool_size
|
324
|
+
self.__min_pool_size = min_pool_size
|
325
|
+
self.__max_idle_time_seconds = max_idle_time_seconds
|
326
|
+
self.__connect_timeout = connect_timeout
|
327
|
+
self.__socket_timeout = socket_timeout
|
328
|
+
self.__wait_queue_timeout = wait_queue_timeout
|
329
|
+
self.__ssl_context = ssl_context
|
330
|
+
self.__tls_allow_invalid_hostnames = tls_allow_invalid_hostnames
|
331
|
+
self.__event_listeners = event_listeners
|
332
|
+
self.__appname = appname
|
333
|
+
self.__driver = driver
|
334
|
+
self.__compression_settings = compression_settings
|
335
|
+
self.__max_connecting = max_connecting
|
336
|
+
self.__pause_enabled = pause_enabled
|
337
|
+
self.__server_api = server_api
|
338
|
+
self.__load_balanced = load_balanced
|
339
|
+
self.__credentials = credentials
|
340
|
+
self.__metadata = copy.deepcopy(_METADATA)
|
341
|
+
if appname:
|
342
|
+
self.__metadata["application"] = {"name": appname}
|
343
|
+
|
344
|
+
# Combine the "driver" MongoClient option with PyMongo's info, like:
|
345
|
+
# {
|
346
|
+
# 'driver': {
|
347
|
+
# 'name': 'PyMongo|MyDriver',
|
348
|
+
# 'version': '4.2.0|1.2.3',
|
349
|
+
# },
|
350
|
+
# 'platform': 'CPython 3.7.0|MyPlatform'
|
351
|
+
# }
|
352
|
+
if driver:
|
353
|
+
if driver.name:
|
354
|
+
self.__metadata["driver"]["name"] = "%s|%s" % (
|
355
|
+
_METADATA["driver"]["name"],
|
356
|
+
driver.name,
|
357
|
+
)
|
358
|
+
if driver.version:
|
359
|
+
self.__metadata["driver"]["version"] = "%s|%s" % (
|
360
|
+
_METADATA["driver"]["version"],
|
361
|
+
driver.version,
|
362
|
+
)
|
363
|
+
if driver.platform:
|
364
|
+
self.__metadata["platform"] = "%s|%s" % (_METADATA["platform"], driver.platform)
|
365
|
+
|
366
|
+
@property
|
367
|
+
def _credentials(self):
|
368
|
+
"""A :class:`~pymongo.auth.MongoCredentials` instance or None."""
|
369
|
+
return self.__credentials
|
370
|
+
|
371
|
+
@property
|
372
|
+
def non_default_options(self):
|
373
|
+
"""The non-default options this pool was created with.
|
374
|
+
|
375
|
+
Added for CMAP's :class:`PoolCreatedEvent`.
|
376
|
+
"""
|
377
|
+
opts = {}
|
378
|
+
if self.__max_pool_size != MAX_POOL_SIZE:
|
379
|
+
opts["maxPoolSize"] = self.__max_pool_size
|
380
|
+
if self.__min_pool_size != MIN_POOL_SIZE:
|
381
|
+
opts["minPoolSize"] = self.__min_pool_size
|
382
|
+
if self.__max_idle_time_seconds != MAX_IDLE_TIME_SEC:
|
383
|
+
opts["maxIdleTimeMS"] = self.__max_idle_time_seconds * 1000
|
384
|
+
if self.__wait_queue_timeout != WAIT_QUEUE_TIMEOUT:
|
385
|
+
opts["waitQueueTimeoutMS"] = self.__wait_queue_timeout * 1000
|
386
|
+
if self.__max_connecting != MAX_CONNECTING:
|
387
|
+
opts["maxConnecting"] = self.__max_connecting
|
388
|
+
return opts
|
389
|
+
|
390
|
+
@property
|
391
|
+
def max_pool_size(self):
|
392
|
+
"""The maximum allowable number of concurrent connections to each
|
393
|
+
connected server. Requests to a server will block if there are
|
394
|
+
`maxPoolSize` outstanding connections to the requested server.
|
395
|
+
Defaults to 100. Cannot be 0.
|
396
|
+
|
397
|
+
When a server's pool has reached `max_pool_size`, operations for that
|
398
|
+
server block waiting for a socket to be returned to the pool. If
|
399
|
+
``waitQueueTimeoutMS`` is set, a blocked operation will raise
|
400
|
+
:exc:`~pymongo.errors.ConnectionFailure` after a timeout.
|
401
|
+
By default ``waitQueueTimeoutMS`` is not set.
|
402
|
+
"""
|
403
|
+
return self.__max_pool_size
|
404
|
+
|
405
|
+
@property
|
406
|
+
def min_pool_size(self):
|
407
|
+
"""The minimum required number of concurrent connections that the pool
|
408
|
+
will maintain to each connected server. Default is 0.
|
409
|
+
"""
|
410
|
+
return self.__min_pool_size
|
411
|
+
|
412
|
+
@property
|
413
|
+
def max_connecting(self):
|
414
|
+
"""The maximum number of concurrent connection creation attempts per
|
415
|
+
pool. Defaults to 2.
|
416
|
+
"""
|
417
|
+
return self.__max_connecting
|
418
|
+
|
419
|
+
@property
|
420
|
+
def pause_enabled(self):
|
421
|
+
return self.__pause_enabled
|
422
|
+
|
423
|
+
@property
|
424
|
+
def max_idle_time_seconds(self):
|
425
|
+
"""The maximum number of seconds that a connection can remain
|
426
|
+
idle in the pool before being removed and replaced. Defaults to
|
427
|
+
`None` (no limit).
|
428
|
+
"""
|
429
|
+
return self.__max_idle_time_seconds
|
430
|
+
|
431
|
+
@property
|
432
|
+
def connect_timeout(self):
|
433
|
+
"""How long a connection can take to be opened before timing out."""
|
434
|
+
return self.__connect_timeout
|
435
|
+
|
436
|
+
@property
|
437
|
+
def socket_timeout(self):
|
438
|
+
"""How long a send or receive on a socket can take before timing out."""
|
439
|
+
return self.__socket_timeout
|
440
|
+
|
441
|
+
@property
|
442
|
+
def wait_queue_timeout(self):
|
443
|
+
"""How long a thread will wait for a socket from the pool if the pool
|
444
|
+
has no free sockets.
|
445
|
+
"""
|
446
|
+
return self.__wait_queue_timeout
|
447
|
+
|
448
|
+
@property
|
449
|
+
def _ssl_context(self):
|
450
|
+
"""An SSLContext instance or None."""
|
451
|
+
return self.__ssl_context
|
452
|
+
|
453
|
+
@property
|
454
|
+
def tls_allow_invalid_hostnames(self):
|
455
|
+
"""If True skip ssl.match_hostname."""
|
456
|
+
return self.__tls_allow_invalid_hostnames
|
457
|
+
|
458
|
+
@property
|
459
|
+
def _event_listeners(self):
|
460
|
+
"""An instance of pymongo.monitoring._EventListeners."""
|
461
|
+
return self.__event_listeners
|
462
|
+
|
463
|
+
@property
|
464
|
+
def appname(self):
|
465
|
+
"""The application name, for sending with hello in server handshake."""
|
466
|
+
return self.__appname
|
467
|
+
|
468
|
+
@property
|
469
|
+
def driver(self):
|
470
|
+
"""Driver name and version, for sending with hello in handshake."""
|
471
|
+
return self.__driver
|
472
|
+
|
473
|
+
@property
|
474
|
+
def _compression_settings(self):
|
475
|
+
return self.__compression_settings
|
476
|
+
|
477
|
+
@property
|
478
|
+
def metadata(self):
|
479
|
+
"""A dict of metadata about the application, driver, os, and platform."""
|
480
|
+
return self.__metadata.copy()
|
481
|
+
|
482
|
+
@property
|
483
|
+
def server_api(self):
|
484
|
+
"""A pymongo.server_api.ServerApi or None."""
|
485
|
+
return self.__server_api
|
486
|
+
|
487
|
+
@property
|
488
|
+
def load_balanced(self):
|
489
|
+
"""True if this Pool is configured in load balanced mode."""
|
490
|
+
return self.__load_balanced
|
491
|
+
|
492
|
+
|
493
|
+
class _CancellationContext(object):
|
494
|
+
def __init__(self):
|
495
|
+
self._cancelled = False
|
496
|
+
|
497
|
+
def cancel(self):
|
498
|
+
"""Cancel this context."""
|
499
|
+
self._cancelled = True
|
500
|
+
|
501
|
+
@property
|
502
|
+
def cancelled(self):
|
503
|
+
"""Was cancel called?"""
|
504
|
+
return self._cancelled
|
505
|
+
|
506
|
+
|
507
|
+
class SocketInfo(object):
|
508
|
+
"""Store a socket with some metadata.
|
509
|
+
|
510
|
+
:Parameters:
|
511
|
+
- `sock`: a raw socket object
|
512
|
+
- `pool`: a Pool instance
|
513
|
+
- `address`: the server's (host, port)
|
514
|
+
- `id`: the id of this socket in it's pool
|
515
|
+
"""
|
516
|
+
|
517
|
+
def __init__(self, sock, pool, address, id):
|
518
|
+
self.pool_ref = weakref.ref(pool)
|
519
|
+
self.sock = sock
|
520
|
+
self.address = address
|
521
|
+
self.id = id
|
522
|
+
self.authed = set()
|
523
|
+
self.closed = False
|
524
|
+
self.last_checkin_time = time.monotonic()
|
525
|
+
self.performed_handshake = False
|
526
|
+
self.is_writable = False
|
527
|
+
self.max_wire_version = MAX_WIRE_VERSION
|
528
|
+
self.max_bson_size = MAX_BSON_SIZE
|
529
|
+
self.max_message_size = MAX_MESSAGE_SIZE
|
530
|
+
self.max_write_batch_size = MAX_WRITE_BATCH_SIZE
|
531
|
+
self.supports_sessions = False
|
532
|
+
self.hello_ok = None
|
533
|
+
self.is_mongos = False
|
534
|
+
self.op_msg_enabled = False
|
535
|
+
self.listeners = pool.opts._event_listeners
|
536
|
+
self.enabled_for_cmap = pool.enabled_for_cmap
|
537
|
+
self.compression_settings = pool.opts._compression_settings
|
538
|
+
self.compression_context = None
|
539
|
+
self.socket_checker = SocketChecker()
|
540
|
+
# Support for mechanism negotiation on the initial handshake.
|
541
|
+
self.negotiated_mechs = None
|
542
|
+
self.auth_ctx = None
|
543
|
+
|
544
|
+
# The pool's generation changes with each reset() so we can close
|
545
|
+
# sockets created before the last reset.
|
546
|
+
self.pool_gen = pool.gen
|
547
|
+
self.generation = self.pool_gen.get_overall()
|
548
|
+
self.ready = False
|
549
|
+
self.cancel_context = None
|
550
|
+
if not pool.handshake:
|
551
|
+
# This is a Monitor connection.
|
552
|
+
self.cancel_context = _CancellationContext()
|
553
|
+
self.opts = pool.opts
|
554
|
+
self.more_to_come = False
|
555
|
+
# For load balancer support.
|
556
|
+
self.service_id = None
|
557
|
+
# When executing a transaction in load balancing mode, this flag is
|
558
|
+
# set to true to indicate that the session now owns the connection.
|
559
|
+
self.pinned_txn = False
|
560
|
+
self.pinned_cursor = False
|
561
|
+
self.active = False
|
562
|
+
self.last_timeout = self.opts.socket_timeout
|
563
|
+
self.connect_rtt = 0.0
|
564
|
+
|
565
|
+
def set_socket_timeout(self, timeout):
|
566
|
+
"""Cache last timeout to avoid duplicate calls to sock.settimeout."""
|
567
|
+
if timeout == self.last_timeout:
|
568
|
+
return
|
569
|
+
self.last_timeout = timeout
|
570
|
+
self.sock.settimeout(timeout)
|
571
|
+
|
572
|
+
def apply_timeout(self, client, cmd):
|
573
|
+
# CSOT: use remaining timeout when set.
|
574
|
+
timeout = _csot.remaining()
|
575
|
+
if timeout is None:
|
576
|
+
# Reset the socket timeout unless we're performing a streaming monitor check.
|
577
|
+
if not self.more_to_come:
|
578
|
+
self.set_socket_timeout(self.opts.socket_timeout)
|
579
|
+
return None
|
580
|
+
# RTT validation.
|
581
|
+
rtt = _csot.get_rtt()
|
582
|
+
if rtt is None:
|
583
|
+
rtt = self.connect_rtt
|
584
|
+
max_time_ms = timeout - rtt
|
585
|
+
if max_time_ms < 0:
|
586
|
+
# CSOT: raise an error without running the command since we know it will time out.
|
587
|
+
errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f}"
|
588
|
+
raise ExecutionTimeout(
|
589
|
+
errmsg, 50, {"ok": 0, "errmsg": errmsg, "code": 50}, self.max_wire_version
|
590
|
+
)
|
591
|
+
if cmd is not None:
|
592
|
+
cmd["maxTimeMS"] = int(max_time_ms * 1000)
|
593
|
+
self.set_socket_timeout(timeout)
|
594
|
+
return timeout
|
595
|
+
|
596
|
+
def pin_txn(self):
|
597
|
+
self.pinned_txn = True
|
598
|
+
assert not self.pinned_cursor
|
599
|
+
|
600
|
+
def pin_cursor(self):
|
601
|
+
self.pinned_cursor = True
|
602
|
+
assert not self.pinned_txn
|
603
|
+
|
604
|
+
def unpin(self):
|
605
|
+
pool = self.pool_ref()
|
606
|
+
if pool:
|
607
|
+
pool.return_socket(self)
|
608
|
+
else:
|
609
|
+
self.close_socket(ConnectionClosedReason.STALE)
|
610
|
+
|
611
|
+
def hello_cmd(self):
|
612
|
+
# Handshake spec requires us to use OP_MSG+hello command for the
|
613
|
+
# initial handshake in load balanced or stable API mode.
|
614
|
+
if self.opts.server_api or self.hello_ok or self.opts.load_balanced:
|
615
|
+
self.op_msg_enabled = True
|
616
|
+
return SON([(HelloCompat.CMD, 1)])
|
617
|
+
else:
|
618
|
+
return SON([(HelloCompat.LEGACY_CMD, 1), ("helloOk", True)])
|
619
|
+
|
620
|
+
def hello(self):
|
621
|
+
return self._hello(None, None, None)
|
622
|
+
|
623
|
+
def _hello(self, cluster_time, topology_version, heartbeat_frequency):
|
624
|
+
cmd = self.hello_cmd()
|
625
|
+
performing_handshake = not self.performed_handshake
|
626
|
+
awaitable = False
|
627
|
+
if performing_handshake:
|
628
|
+
self.performed_handshake = True
|
629
|
+
cmd["client"] = self.opts.metadata
|
630
|
+
if self.compression_settings:
|
631
|
+
cmd["compression"] = self.compression_settings.compressors
|
632
|
+
if self.opts.load_balanced:
|
633
|
+
cmd["loadBalanced"] = True
|
634
|
+
elif topology_version is not None:
|
635
|
+
cmd["topologyVersion"] = topology_version
|
636
|
+
cmd["maxAwaitTimeMS"] = int(heartbeat_frequency * 1000)
|
637
|
+
awaitable = True
|
638
|
+
# If connect_timeout is None there is no timeout.
|
639
|
+
if self.opts.connect_timeout:
|
640
|
+
self.set_socket_timeout(self.opts.connect_timeout + heartbeat_frequency)
|
641
|
+
|
642
|
+
if not performing_handshake and cluster_time is not None:
|
643
|
+
cmd["$clusterTime"] = cluster_time
|
644
|
+
|
645
|
+
creds = self.opts._credentials
|
646
|
+
if creds:
|
647
|
+
if creds.mechanism == "DEFAULT" and creds.username:
|
648
|
+
cmd["saslSupportedMechs"] = creds.source + "." + creds.username
|
649
|
+
auth_ctx = auth._AuthContext.from_credentials(creds)
|
650
|
+
if auth_ctx:
|
651
|
+
cmd["speculativeAuthenticate"] = auth_ctx.speculate_command()
|
652
|
+
else:
|
653
|
+
auth_ctx = None
|
654
|
+
|
655
|
+
if performing_handshake:
|
656
|
+
start = time.monotonic()
|
657
|
+
doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable)
|
658
|
+
if performing_handshake:
|
659
|
+
self.connect_rtt = time.monotonic() - start
|
660
|
+
hello = Hello(doc, awaitable=awaitable)
|
661
|
+
self.is_writable = hello.is_writable
|
662
|
+
self.max_wire_version = hello.max_wire_version
|
663
|
+
self.max_bson_size = hello.max_bson_size
|
664
|
+
self.max_message_size = hello.max_message_size
|
665
|
+
self.max_write_batch_size = hello.max_write_batch_size
|
666
|
+
self.supports_sessions = hello.logical_session_timeout_minutes is not None
|
667
|
+
self.hello_ok = hello.hello_ok
|
668
|
+
self.is_repl = hello.server_type in (
|
669
|
+
SERVER_TYPE.RSPrimary,
|
670
|
+
SERVER_TYPE.RSSecondary,
|
671
|
+
SERVER_TYPE.RSArbiter,
|
672
|
+
SERVER_TYPE.RSOther,
|
673
|
+
SERVER_TYPE.RSGhost,
|
674
|
+
)
|
675
|
+
self.is_standalone = hello.server_type == SERVER_TYPE.Standalone
|
676
|
+
self.is_mongos = hello.server_type == SERVER_TYPE.Mongos
|
677
|
+
if performing_handshake and self.compression_settings:
|
678
|
+
ctx = self.compression_settings.get_compression_context(hello.compressors)
|
679
|
+
self.compression_context = ctx
|
680
|
+
|
681
|
+
self.op_msg_enabled = True
|
682
|
+
if creds:
|
683
|
+
self.negotiated_mechs = hello.sasl_supported_mechs
|
684
|
+
if auth_ctx:
|
685
|
+
auth_ctx.parse_response(hello)
|
686
|
+
if auth_ctx.speculate_succeeded():
|
687
|
+
self.auth_ctx = auth_ctx
|
688
|
+
if self.opts.load_balanced:
|
689
|
+
if not hello.service_id:
|
690
|
+
raise ConfigurationError(
|
691
|
+
"Driver attempted to initialize in load balancing mode,"
|
692
|
+
" but the server does not support this mode"
|
693
|
+
)
|
694
|
+
self.service_id = hello.service_id
|
695
|
+
self.generation = self.pool_gen.get(self.service_id)
|
696
|
+
return hello
|
697
|
+
|
698
|
+
def _next_reply(self):
|
699
|
+
reply = self.receive_message(None)
|
700
|
+
self.more_to_come = reply.more_to_come
|
701
|
+
unpacked_docs = reply.unpack_response()
|
702
|
+
response_doc = unpacked_docs[0]
|
703
|
+
helpers._check_command_response(response_doc, self.max_wire_version)
|
704
|
+
return response_doc
|
705
|
+
|
706
|
+
def command(
|
707
|
+
self,
|
708
|
+
dbname,
|
709
|
+
spec,
|
710
|
+
read_preference=ReadPreference.PRIMARY,
|
711
|
+
codec_options=DEFAULT_CODEC_OPTIONS,
|
712
|
+
check=True,
|
713
|
+
allowable_errors=None,
|
714
|
+
read_concern=None,
|
715
|
+
write_concern=None,
|
716
|
+
parse_write_concern_error=False,
|
717
|
+
collation=None,
|
718
|
+
session=None,
|
719
|
+
client=None,
|
720
|
+
retryable_write=False,
|
721
|
+
publish_events=True,
|
722
|
+
user_fields=None,
|
723
|
+
exhaust_allowed=False,
|
724
|
+
):
|
725
|
+
"""Execute a command or raise an error.
|
726
|
+
|
727
|
+
:Parameters:
|
728
|
+
- `dbname`: name of the database on which to run the command
|
729
|
+
- `spec`: a command document as a dict, SON, or mapping object
|
730
|
+
- `read_preference`: a read preference
|
731
|
+
- `codec_options`: a CodecOptions instance
|
732
|
+
- `check`: raise OperationFailure if there are errors
|
733
|
+
- `allowable_errors`: errors to ignore if `check` is True
|
734
|
+
- `read_concern`: The read concern for this command.
|
735
|
+
- `write_concern`: The write concern for this command.
|
736
|
+
- `parse_write_concern_error`: Whether to parse the
|
737
|
+
``writeConcernError`` field in the command response.
|
738
|
+
- `collation`: The collation for this command.
|
739
|
+
- `session`: optional ClientSession instance.
|
740
|
+
- `client`: optional MongoClient for gossipping $clusterTime.
|
741
|
+
- `retryable_write`: True if this command is a retryable write.
|
742
|
+
- `publish_events`: Should we publish events for this command?
|
743
|
+
- `user_fields` (optional): Response fields that should be decoded
|
744
|
+
using the TypeDecoders from codec_options, passed to
|
745
|
+
bson._decode_all_selective.
|
746
|
+
"""
|
747
|
+
self.validate_session(client, session)
|
748
|
+
session = _validate_session_write_concern(session, write_concern)
|
749
|
+
|
750
|
+
# Ensure command name remains in first place.
|
751
|
+
if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type]
|
752
|
+
spec = SON(spec)
|
753
|
+
|
754
|
+
if not (write_concern is None or write_concern.acknowledged or collation is None):
|
755
|
+
raise ConfigurationError("Collation is unsupported for unacknowledged writes.")
|
756
|
+
|
757
|
+
self.add_server_api(spec)
|
758
|
+
if session:
|
759
|
+
session._apply_to(spec, retryable_write, read_preference, self)
|
760
|
+
self.send_cluster_time(spec, session, client)
|
761
|
+
listeners = self.listeners if publish_events else None
|
762
|
+
unacknowledged = write_concern and not write_concern.acknowledged
|
763
|
+
if self.op_msg_enabled:
|
764
|
+
self._raise_if_not_writable(unacknowledged)
|
765
|
+
try:
|
766
|
+
return command(
|
767
|
+
self,
|
768
|
+
dbname,
|
769
|
+
spec,
|
770
|
+
self.is_mongos,
|
771
|
+
read_preference,
|
772
|
+
codec_options,
|
773
|
+
session,
|
774
|
+
client,
|
775
|
+
check,
|
776
|
+
allowable_errors,
|
777
|
+
self.address,
|
778
|
+
listeners,
|
779
|
+
self.max_bson_size,
|
780
|
+
read_concern,
|
781
|
+
parse_write_concern_error=parse_write_concern_error,
|
782
|
+
collation=collation,
|
783
|
+
compression_ctx=self.compression_context,
|
784
|
+
use_op_msg=self.op_msg_enabled,
|
785
|
+
unacknowledged=unacknowledged,
|
786
|
+
user_fields=user_fields,
|
787
|
+
exhaust_allowed=exhaust_allowed,
|
788
|
+
write_concern=write_concern,
|
789
|
+
)
|
790
|
+
except (OperationFailure, NotPrimaryError):
|
791
|
+
raise
|
792
|
+
# Catch socket.error, KeyboardInterrupt, etc. and close ourselves.
|
793
|
+
except BaseException as error:
|
794
|
+
self._raise_connection_failure(error)
|
795
|
+
|
796
|
+
def send_message(self, message, max_doc_size):
|
797
|
+
"""Send a raw BSON message or raise ConnectionFailure.
|
798
|
+
|
799
|
+
If a network exception is raised, the socket is closed.
|
800
|
+
"""
|
801
|
+
if self.max_bson_size is not None and max_doc_size > self.max_bson_size:
|
802
|
+
raise DocumentTooLarge(
|
803
|
+
"BSON document too large (%d bytes) - the connected server "
|
804
|
+
"supports BSON document sizes up to %d bytes." % (max_doc_size, self.max_bson_size)
|
805
|
+
)
|
806
|
+
|
807
|
+
try:
|
808
|
+
self.sock.sendall(message)
|
809
|
+
except BaseException as error:
|
810
|
+
self._raise_connection_failure(error)
|
811
|
+
|
812
|
+
def receive_message(self, request_id):
|
813
|
+
"""Receive a raw BSON message or raise ConnectionFailure.
|
814
|
+
|
815
|
+
If any exception is raised, the socket is closed.
|
816
|
+
"""
|
817
|
+
try:
|
818
|
+
return receive_message(self, request_id, self.max_message_size)
|
819
|
+
except BaseException as error:
|
820
|
+
self._raise_connection_failure(error)
|
821
|
+
|
822
|
+
def _raise_if_not_writable(self, unacknowledged):
|
823
|
+
"""Raise NotPrimaryError on unacknowledged write if this socket is not
|
824
|
+
writable.
|
825
|
+
"""
|
826
|
+
if unacknowledged and not self.is_writable:
|
827
|
+
# Write won't succeed, bail as if we'd received a not primary error.
|
828
|
+
raise NotPrimaryError("not primary", {"ok": 0, "errmsg": "not primary", "code": 10107})
|
829
|
+
|
830
|
+
def unack_write(self, msg, max_doc_size):
|
831
|
+
"""Send unack OP_MSG.
|
832
|
+
|
833
|
+
Can raise ConnectionFailure or InvalidDocument.
|
834
|
+
|
835
|
+
:Parameters:
|
836
|
+
- `msg`: bytes, an OP_MSG message.
|
837
|
+
- `max_doc_size`: size in bytes of the largest document in `msg`.
|
838
|
+
"""
|
839
|
+
self._raise_if_not_writable(True)
|
840
|
+
self.send_message(msg, max_doc_size)
|
841
|
+
|
842
|
+
def write_command(self, request_id, msg, codec_options):
|
843
|
+
"""Send "insert" etc. command, returning response as a dict.
|
844
|
+
|
845
|
+
Can raise ConnectionFailure or OperationFailure.
|
846
|
+
|
847
|
+
:Parameters:
|
848
|
+
- `request_id`: an int.
|
849
|
+
- `msg`: bytes, the command message.
|
850
|
+
"""
|
851
|
+
self.send_message(msg, 0)
|
852
|
+
reply = self.receive_message(request_id)
|
853
|
+
result = reply.command_response(codec_options)
|
854
|
+
|
855
|
+
# Raises NotPrimaryError or OperationFailure.
|
856
|
+
helpers._check_command_response(result, self.max_wire_version)
|
857
|
+
return result
|
858
|
+
|
859
|
+
def authenticate(self):
|
860
|
+
"""Authenticate to the server if needed.
|
861
|
+
|
862
|
+
Can raise ConnectionFailure or OperationFailure.
|
863
|
+
"""
|
864
|
+
# CMAP spec says to publish the ready event only after authenticating
|
865
|
+
# the connection.
|
866
|
+
if not self.ready:
|
867
|
+
creds = self.opts._credentials
|
868
|
+
if creds:
|
869
|
+
auth.authenticate(creds, self)
|
870
|
+
self.ready = True
|
871
|
+
if self.enabled_for_cmap:
|
872
|
+
self.listeners.publish_connection_ready(self.address, self.id)
|
873
|
+
|
874
|
+
def validate_session(self, client, session):
|
875
|
+
"""Validate this session before use with client.
|
876
|
+
|
877
|
+
Raises error if the client is not the one that created the session.
|
878
|
+
"""
|
879
|
+
if session:
|
880
|
+
if session._client is not client:
|
881
|
+
raise InvalidOperation("Can only use session with the MongoClient that started it")
|
882
|
+
|
883
|
+
def close_socket(self, reason):
|
884
|
+
"""Close this connection with a reason."""
|
885
|
+
if self.closed:
|
886
|
+
return
|
887
|
+
self._close_socket()
|
888
|
+
if reason and self.enabled_for_cmap:
|
889
|
+
self.listeners.publish_connection_closed(self.address, self.id, reason)
|
890
|
+
|
891
|
+
def _close_socket(self):
|
892
|
+
"""Close this connection."""
|
893
|
+
if self.closed:
|
894
|
+
return
|
895
|
+
self.closed = True
|
896
|
+
if self.cancel_context:
|
897
|
+
self.cancel_context.cancel()
|
898
|
+
# Note: We catch exceptions to avoid spurious errors on interpreter
|
899
|
+
# shutdown.
|
900
|
+
try:
|
901
|
+
self.sock.close()
|
902
|
+
except Exception:
|
903
|
+
pass
|
904
|
+
|
905
|
+
def socket_closed(self):
|
906
|
+
"""Return True if we know socket has been closed, False otherwise."""
|
907
|
+
return self.socket_checker.socket_closed(self.sock)
|
908
|
+
|
909
|
+
def send_cluster_time(self, command, session, client):
|
910
|
+
"""Add $clusterTime."""
|
911
|
+
if client:
|
912
|
+
client._send_cluster_time(command, session)
|
913
|
+
|
914
|
+
def add_server_api(self, command):
|
915
|
+
"""Add server_api parameters."""
|
916
|
+
if self.opts.server_api:
|
917
|
+
_add_to_command(command, self.opts.server_api)
|
918
|
+
|
919
|
+
def update_last_checkin_time(self):
|
920
|
+
self.last_checkin_time = time.monotonic()
|
921
|
+
|
922
|
+
def update_is_writable(self, is_writable):
|
923
|
+
self.is_writable = is_writable
|
924
|
+
|
925
|
+
def idle_time_seconds(self):
|
926
|
+
"""Seconds since this socket was last checked into its pool."""
|
927
|
+
return time.monotonic() - self.last_checkin_time
|
928
|
+
|
929
|
+
def _raise_connection_failure(self, error):
|
930
|
+
# Catch *all* exceptions from socket methods and close the socket. In
|
931
|
+
# regular Python, socket operations only raise socket.error, even if
|
932
|
+
# the underlying cause was a Ctrl-C: a signal raised during socket.recv
|
933
|
+
# is expressed as an EINTR error from poll. See internal_select_ex() in
|
934
|
+
# socketmodule.c. All error codes from poll become socket.error at
|
935
|
+
# first. Eventually in PyEval_EvalFrameEx the interpreter checks for
|
936
|
+
# signals and throws KeyboardInterrupt into the current frame on the
|
937
|
+
# main thread.
|
938
|
+
#
|
939
|
+
# But in Gevent and Eventlet, the polling mechanism (epoll, kqueue,
|
940
|
+
# ...) is called in Python code, which experiences the signal as a
|
941
|
+
# KeyboardInterrupt from the start, rather than as an initial
|
942
|
+
# socket.error, so we catch that, close the socket, and reraise it.
|
943
|
+
#
|
944
|
+
# The connection closed event will be emitted later in return_socket.
|
945
|
+
if self.ready:
|
946
|
+
reason = None
|
947
|
+
else:
|
948
|
+
reason = ConnectionClosedReason.ERROR
|
949
|
+
self.close_socket(reason)
|
950
|
+
# SSLError from PyOpenSSL inherits directly from Exception.
|
951
|
+
if isinstance(error, (IOError, OSError, SSLError)):
|
952
|
+
_raise_connection_failure(self.address, error)
|
953
|
+
else:
|
954
|
+
raise
|
955
|
+
|
956
|
+
def __eq__(self, other):
|
957
|
+
return self.sock == other.sock
|
958
|
+
|
959
|
+
def __ne__(self, other):
|
960
|
+
return not self == other
|
961
|
+
|
962
|
+
def __hash__(self):
|
963
|
+
return hash(self.sock)
|
964
|
+
|
965
|
+
def __repr__(self):
|
966
|
+
return "SocketInfo(%s)%s at %s" % (
|
967
|
+
repr(self.sock),
|
968
|
+
self.closed and " CLOSED" or "",
|
969
|
+
id(self),
|
970
|
+
)
|
971
|
+
|
972
|
+
|
973
|
+
def _create_connection(address, options):
|
974
|
+
"""Given (host, port) and PoolOptions, connect and return a socket object.
|
975
|
+
|
976
|
+
Can raise socket.error.
|
977
|
+
|
978
|
+
This is a modified version of create_connection from CPython >= 2.7.
|
979
|
+
"""
|
980
|
+
host, port = address
|
981
|
+
|
982
|
+
# Check if dealing with a unix domain socket
|
983
|
+
if host.endswith(".sock"):
|
984
|
+
if not hasattr(socket, "AF_UNIX"):
|
985
|
+
raise ConnectionFailure("UNIX-sockets are not supported on this system")
|
986
|
+
sock = socket.socket(socket.AF_UNIX)
|
987
|
+
# SOCK_CLOEXEC not supported for Unix sockets.
|
988
|
+
_set_non_inheritable_non_atomic(sock.fileno())
|
989
|
+
try:
|
990
|
+
sock.connect(host)
|
991
|
+
return sock
|
992
|
+
except socket.error:
|
993
|
+
sock.close()
|
994
|
+
raise
|
995
|
+
|
996
|
+
# Don't try IPv6 if we don't support it. Also skip it if host
|
997
|
+
# is 'localhost' (::1 is fine). Avoids slow connect issues
|
998
|
+
# like PYTHON-356.
|
999
|
+
family = socket.AF_INET
|
1000
|
+
if socket.has_ipv6 and host != "localhost":
|
1001
|
+
family = socket.AF_UNSPEC
|
1002
|
+
|
1003
|
+
err = None
|
1004
|
+
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
|
1005
|
+
af, socktype, proto, dummy, sa = res
|
1006
|
+
# SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited
|
1007
|
+
# number of platforms (newer Linux and *BSD). Starting with CPython 3.4
|
1008
|
+
# all file descriptors are created non-inheritable. See PEP 446.
|
1009
|
+
try:
|
1010
|
+
sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto)
|
1011
|
+
except socket.error:
|
1012
|
+
# Can SOCK_CLOEXEC be defined even if the kernel doesn't support
|
1013
|
+
# it?
|
1014
|
+
sock = socket.socket(af, socktype, proto)
|
1015
|
+
# Fallback when SOCK_CLOEXEC isn't available.
|
1016
|
+
_set_non_inheritable_non_atomic(sock.fileno())
|
1017
|
+
try:
|
1018
|
+
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
1019
|
+
# CSOT: apply timeout to socket connect.
|
1020
|
+
timeout = _csot.remaining()
|
1021
|
+
if timeout is None:
|
1022
|
+
timeout = options.connect_timeout
|
1023
|
+
elif timeout <= 0:
|
1024
|
+
raise socket.timeout("timed out")
|
1025
|
+
sock.settimeout(timeout)
|
1026
|
+
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True)
|
1027
|
+
_set_keepalive_times(sock)
|
1028
|
+
sock.connect(sa)
|
1029
|
+
return sock
|
1030
|
+
except socket.error as e:
|
1031
|
+
err = e
|
1032
|
+
sock.close()
|
1033
|
+
|
1034
|
+
if err is not None:
|
1035
|
+
raise err
|
1036
|
+
else:
|
1037
|
+
# This likely means we tried to connect to an IPv6 only
|
1038
|
+
# host with an OS/kernel or Python interpreter that doesn't
|
1039
|
+
# support IPv6. The test case is Jython2.5.1 which doesn't
|
1040
|
+
# support IPv6 at all.
|
1041
|
+
raise socket.error("getaddrinfo failed")
|
1042
|
+
|
1043
|
+
|
1044
|
+
def _configured_socket(address, options):
|
1045
|
+
"""Given (host, port) and PoolOptions, return a configured socket.
|
1046
|
+
|
1047
|
+
Can raise socket.error, ConnectionFailure, or _CertificateError.
|
1048
|
+
|
1049
|
+
Sets socket's SSL and timeout options.
|
1050
|
+
"""
|
1051
|
+
sock = _create_connection(address, options)
|
1052
|
+
ssl_context = options._ssl_context
|
1053
|
+
|
1054
|
+
if ssl_context is not None:
|
1055
|
+
host = address[0]
|
1056
|
+
try:
|
1057
|
+
# We have to pass hostname / ip address to wrap_socket
|
1058
|
+
# to use SSLContext.check_hostname.
|
1059
|
+
if HAS_SNI:
|
1060
|
+
sock = ssl_context.wrap_socket(sock, server_hostname=host)
|
1061
|
+
else:
|
1062
|
+
sock = ssl_context.wrap_socket(sock)
|
1063
|
+
except _CertificateError:
|
1064
|
+
sock.close()
|
1065
|
+
# Raise _CertificateError directly like we do after match_hostname
|
1066
|
+
# below.
|
1067
|
+
raise
|
1068
|
+
except (IOError, OSError, SSLError) as exc: # noqa: B014
|
1069
|
+
sock.close()
|
1070
|
+
# We raise AutoReconnect for transient and permanent SSL handshake
|
1071
|
+
# failures alike. Permanent handshake failures, like protocol
|
1072
|
+
# mismatch, will be turned into ServerSelectionTimeoutErrors later.
|
1073
|
+
_raise_connection_failure(address, exc, "SSL handshake failed: ")
|
1074
|
+
if (
|
1075
|
+
ssl_context.verify_mode
|
1076
|
+
and not ssl_context.check_hostname
|
1077
|
+
and not options.tls_allow_invalid_hostnames
|
1078
|
+
):
|
1079
|
+
try:
|
1080
|
+
ssl.match_hostname(sock.getpeercert(), hostname=host)
|
1081
|
+
except _CertificateError:
|
1082
|
+
sock.close()
|
1083
|
+
raise
|
1084
|
+
|
1085
|
+
sock.settimeout(options.socket_timeout)
|
1086
|
+
return sock
|
1087
|
+
|
1088
|
+
|
1089
|
+
class _PoolClosedError(PyMongoError):
|
1090
|
+
"""Internal error raised when a thread tries to get a connection from a
|
1091
|
+
closed pool.
|
1092
|
+
"""
|
1093
|
+
|
1094
|
+
pass
|
1095
|
+
|
1096
|
+
|
1097
|
+
class _PoolGeneration(object):
|
1098
|
+
def __init__(self):
|
1099
|
+
# Maps service_id to generation.
|
1100
|
+
self._generations = collections.defaultdict(int)
|
1101
|
+
# Overall pool generation.
|
1102
|
+
self._generation = 0
|
1103
|
+
|
1104
|
+
def get(self, service_id):
|
1105
|
+
"""Get the generation for the given service_id."""
|
1106
|
+
if service_id is None:
|
1107
|
+
return self._generation
|
1108
|
+
return self._generations[service_id]
|
1109
|
+
|
1110
|
+
def get_overall(self):
|
1111
|
+
"""Get the Pool's overall generation."""
|
1112
|
+
return self._generation
|
1113
|
+
|
1114
|
+
def inc(self, service_id):
|
1115
|
+
"""Increment the generation for the given service_id."""
|
1116
|
+
self._generation += 1
|
1117
|
+
if service_id is None:
|
1118
|
+
for service_id in self._generations:
|
1119
|
+
self._generations[service_id] += 1
|
1120
|
+
else:
|
1121
|
+
self._generations[service_id] += 1
|
1122
|
+
|
1123
|
+
def stale(self, gen, service_id):
|
1124
|
+
"""Return if the given generation for a given service_id is stale."""
|
1125
|
+
return gen != self.get(service_id)
|
1126
|
+
|
1127
|
+
|
1128
|
+
class PoolState(object):
|
1129
|
+
PAUSED = 1
|
1130
|
+
READY = 2
|
1131
|
+
CLOSED = 3
|
1132
|
+
|
1133
|
+
|
1134
|
+
# Do *not* explicitly inherit from object or Jython won't call __del__
|
1135
|
+
# http://bugs.jython.org/issue1057
|
1136
|
+
class Pool:
|
1137
|
+
def __init__(self, address, options, handshake=True):
|
1138
|
+
"""
|
1139
|
+
:Parameters:
|
1140
|
+
- `address`: a (hostname, port) tuple
|
1141
|
+
- `options`: a PoolOptions instance
|
1142
|
+
- `handshake`: whether to call hello for each new SocketInfo
|
1143
|
+
"""
|
1144
|
+
if options.pause_enabled:
|
1145
|
+
self.state = PoolState.PAUSED
|
1146
|
+
else:
|
1147
|
+
self.state = PoolState.READY
|
1148
|
+
# Check a socket's health with socket_closed() every once in a while.
|
1149
|
+
# Can override for testing: 0 to always check, None to never check.
|
1150
|
+
self._check_interval_seconds = 1
|
1151
|
+
# LIFO pool. Sockets are ordered on idle time. Sockets claimed
|
1152
|
+
# and returned to pool from the left side. Stale sockets removed
|
1153
|
+
# from the right side.
|
1154
|
+
self.sockets: collections.deque = collections.deque()
|
1155
|
+
self.lock = threading.Lock()
|
1156
|
+
self.active_sockets = 0
|
1157
|
+
# Monotonically increasing connection ID required for CMAP Events.
|
1158
|
+
self.next_connection_id = 1
|
1159
|
+
# Track whether the sockets in this pool are writeable or not.
|
1160
|
+
self.is_writable = None
|
1161
|
+
|
1162
|
+
# Keep track of resets, so we notice sockets created before the most
|
1163
|
+
# recent reset and close them.
|
1164
|
+
# self.generation = 0
|
1165
|
+
self.gen = _PoolGeneration()
|
1166
|
+
self.pid = os.getpid()
|
1167
|
+
self.address = address
|
1168
|
+
self.opts = options
|
1169
|
+
self.handshake = handshake
|
1170
|
+
# Don't publish events in Monitor pools.
|
1171
|
+
self.enabled_for_cmap = (
|
1172
|
+
self.handshake
|
1173
|
+
and self.opts._event_listeners is not None
|
1174
|
+
and self.opts._event_listeners.enabled_for_cmap
|
1175
|
+
)
|
1176
|
+
|
1177
|
+
# The first portion of the wait queue.
|
1178
|
+
# Enforces: maxPoolSize
|
1179
|
+
# Also used for: clearing the wait queue
|
1180
|
+
self.size_cond = threading.Condition(self.lock)
|
1181
|
+
self.requests = 0
|
1182
|
+
self.max_pool_size = self.opts.max_pool_size
|
1183
|
+
if not self.max_pool_size:
|
1184
|
+
self.max_pool_size = float("inf")
|
1185
|
+
# The second portion of the wait queue.
|
1186
|
+
# Enforces: maxConnecting
|
1187
|
+
# Also used for: clearing the wait queue
|
1188
|
+
self._max_connecting_cond = threading.Condition(self.lock)
|
1189
|
+
self._max_connecting = self.opts.max_connecting
|
1190
|
+
self._pending = 0
|
1191
|
+
if self.enabled_for_cmap:
|
1192
|
+
self.opts._event_listeners.publish_pool_created(
|
1193
|
+
self.address, self.opts.non_default_options
|
1194
|
+
)
|
1195
|
+
# Similar to active_sockets but includes threads in the wait queue.
|
1196
|
+
self.operation_count = 0
|
1197
|
+
# Retain references to pinned connections to prevent the CPython GC
|
1198
|
+
# from thinking that a cursor's pinned connection can be GC'd when the
|
1199
|
+
# cursor is GC'd (see PYTHON-2751).
|
1200
|
+
self.__pinned_sockets = set()
|
1201
|
+
self.ncursors = 0
|
1202
|
+
self.ntxns = 0
|
1203
|
+
|
1204
|
+
def ready(self):
|
1205
|
+
# Take the lock to avoid the race condition described in PYTHON-2699.
|
1206
|
+
with self.lock:
|
1207
|
+
if self.state != PoolState.READY:
|
1208
|
+
self.state = PoolState.READY
|
1209
|
+
if self.enabled_for_cmap:
|
1210
|
+
self.opts._event_listeners.publish_pool_ready(self.address)
|
1211
|
+
|
1212
|
+
@property
|
1213
|
+
def closed(self):
|
1214
|
+
return self.state == PoolState.CLOSED
|
1215
|
+
|
1216
|
+
def _reset(self, close, pause=True, service_id=None):
|
1217
|
+
old_state = self.state
|
1218
|
+
with self.size_cond:
|
1219
|
+
if self.closed:
|
1220
|
+
return
|
1221
|
+
if self.opts.pause_enabled and pause and not self.opts.load_balanced:
|
1222
|
+
old_state, self.state = self.state, PoolState.PAUSED
|
1223
|
+
self.gen.inc(service_id)
|
1224
|
+
newpid = os.getpid()
|
1225
|
+
if self.pid != newpid:
|
1226
|
+
self.pid = newpid
|
1227
|
+
self.active_sockets = 0
|
1228
|
+
self.operation_count = 0
|
1229
|
+
if service_id is None:
|
1230
|
+
sockets, self.sockets = self.sockets, collections.deque()
|
1231
|
+
else:
|
1232
|
+
discard: collections.deque = collections.deque()
|
1233
|
+
keep: collections.deque = collections.deque()
|
1234
|
+
for sock_info in self.sockets:
|
1235
|
+
if sock_info.service_id == service_id:
|
1236
|
+
discard.append(sock_info)
|
1237
|
+
else:
|
1238
|
+
keep.append(sock_info)
|
1239
|
+
sockets = discard
|
1240
|
+
self.sockets = keep
|
1241
|
+
|
1242
|
+
if close:
|
1243
|
+
self.state = PoolState.CLOSED
|
1244
|
+
# Clear the wait queue
|
1245
|
+
self._max_connecting_cond.notify_all()
|
1246
|
+
self.size_cond.notify_all()
|
1247
|
+
|
1248
|
+
listeners = self.opts._event_listeners
|
1249
|
+
# CMAP spec says that close() MUST close sockets before publishing the
|
1250
|
+
# PoolClosedEvent but that reset() SHOULD close sockets *after*
|
1251
|
+
# publishing the PoolClearedEvent.
|
1252
|
+
if close:
|
1253
|
+
for sock_info in sockets:
|
1254
|
+
sock_info.close_socket(ConnectionClosedReason.POOL_CLOSED)
|
1255
|
+
if self.enabled_for_cmap:
|
1256
|
+
listeners.publish_pool_closed(self.address)
|
1257
|
+
else:
|
1258
|
+
if old_state != PoolState.PAUSED and self.enabled_for_cmap:
|
1259
|
+
listeners.publish_pool_cleared(self.address, service_id=service_id)
|
1260
|
+
for sock_info in sockets:
|
1261
|
+
sock_info.close_socket(ConnectionClosedReason.STALE)
|
1262
|
+
|
1263
|
+
def update_is_writable(self, is_writable):
|
1264
|
+
"""Updates the is_writable attribute on all sockets currently in the
|
1265
|
+
Pool.
|
1266
|
+
"""
|
1267
|
+
self.is_writable = is_writable
|
1268
|
+
with self.lock:
|
1269
|
+
for _socket in self.sockets:
|
1270
|
+
_socket.update_is_writable(self.is_writable)
|
1271
|
+
|
1272
|
+
def reset(self, service_id=None):
|
1273
|
+
self._reset(close=False, service_id=service_id)
|
1274
|
+
|
1275
|
+
def reset_without_pause(self):
|
1276
|
+
self._reset(close=False, pause=False)
|
1277
|
+
|
1278
|
+
def close(self):
|
1279
|
+
self._reset(close=True)
|
1280
|
+
|
1281
|
+
def stale_generation(self, gen, service_id):
|
1282
|
+
return self.gen.stale(gen, service_id)
|
1283
|
+
|
1284
|
+
def remove_stale_sockets(self, reference_generation):
|
1285
|
+
"""Removes stale sockets then adds new ones if pool is too small and
|
1286
|
+
has not been reset. The `reference_generation` argument specifies the
|
1287
|
+
`generation` at the point in time this operation was requested on the
|
1288
|
+
pool.
|
1289
|
+
"""
|
1290
|
+
# Take the lock to avoid the race condition described in PYTHON-2699.
|
1291
|
+
with self.lock:
|
1292
|
+
if self.state != PoolState.READY:
|
1293
|
+
return
|
1294
|
+
|
1295
|
+
if self.opts.max_idle_time_seconds is not None:
|
1296
|
+
with self.lock:
|
1297
|
+
while (
|
1298
|
+
self.sockets
|
1299
|
+
and self.sockets[-1].idle_time_seconds() > self.opts.max_idle_time_seconds
|
1300
|
+
):
|
1301
|
+
sock_info = self.sockets.pop()
|
1302
|
+
sock_info.close_socket(ConnectionClosedReason.IDLE)
|
1303
|
+
|
1304
|
+
while True:
|
1305
|
+
with self.size_cond:
|
1306
|
+
# There are enough sockets in the pool.
|
1307
|
+
if len(self.sockets) + self.active_sockets >= self.opts.min_pool_size:
|
1308
|
+
return
|
1309
|
+
if self.requests >= self.opts.min_pool_size:
|
1310
|
+
return
|
1311
|
+
self.requests += 1
|
1312
|
+
incremented = False
|
1313
|
+
try:
|
1314
|
+
with self._max_connecting_cond:
|
1315
|
+
# If maxConnecting connections are already being created
|
1316
|
+
# by this pool then try again later instead of waiting.
|
1317
|
+
if self._pending >= self._max_connecting:
|
1318
|
+
return
|
1319
|
+
self._pending += 1
|
1320
|
+
incremented = True
|
1321
|
+
sock_info = self.connect()
|
1322
|
+
with self.lock:
|
1323
|
+
# Close connection and return if the pool was reset during
|
1324
|
+
# socket creation or while acquiring the pool lock.
|
1325
|
+
if self.gen.get_overall() != reference_generation:
|
1326
|
+
sock_info.close_socket(ConnectionClosedReason.STALE)
|
1327
|
+
return
|
1328
|
+
self.sockets.appendleft(sock_info)
|
1329
|
+
finally:
|
1330
|
+
if incremented:
|
1331
|
+
# Notify after adding the socket to the pool.
|
1332
|
+
with self._max_connecting_cond:
|
1333
|
+
self._pending -= 1
|
1334
|
+
self._max_connecting_cond.notify()
|
1335
|
+
|
1336
|
+
with self.size_cond:
|
1337
|
+
self.requests -= 1
|
1338
|
+
self.size_cond.notify()
|
1339
|
+
|
1340
|
+
def connect(self, handler=None):
|
1341
|
+
"""Connect to Mongo and return a new SocketInfo.
|
1342
|
+
|
1343
|
+
Can raise ConnectionFailure.
|
1344
|
+
|
1345
|
+
Note that the pool does not keep a reference to the socket -- you
|
1346
|
+
must call return_socket() when you're done with it.
|
1347
|
+
"""
|
1348
|
+
with self.lock:
|
1349
|
+
conn_id = self.next_connection_id
|
1350
|
+
self.next_connection_id += 1
|
1351
|
+
|
1352
|
+
listeners = self.opts._event_listeners
|
1353
|
+
if self.enabled_for_cmap:
|
1354
|
+
listeners.publish_connection_created(self.address, conn_id)
|
1355
|
+
|
1356
|
+
try:
|
1357
|
+
sock = _configured_socket(self.address, self.opts)
|
1358
|
+
except BaseException as error:
|
1359
|
+
if self.enabled_for_cmap:
|
1360
|
+
listeners.publish_connection_closed(
|
1361
|
+
self.address, conn_id, ConnectionClosedReason.ERROR
|
1362
|
+
)
|
1363
|
+
|
1364
|
+
if isinstance(error, (IOError, OSError, SSLError)):
|
1365
|
+
_raise_connection_failure(self.address, error)
|
1366
|
+
|
1367
|
+
raise
|
1368
|
+
|
1369
|
+
sock_info = SocketInfo(sock, self, self.address, conn_id)
|
1370
|
+
try:
|
1371
|
+
if self.handshake:
|
1372
|
+
sock_info.hello()
|
1373
|
+
self.is_writable = sock_info.is_writable
|
1374
|
+
if handler:
|
1375
|
+
handler.contribute_socket(sock_info, completed_handshake=False)
|
1376
|
+
|
1377
|
+
sock_info.authenticate()
|
1378
|
+
except BaseException:
|
1379
|
+
sock_info.close_socket(ConnectionClosedReason.ERROR)
|
1380
|
+
raise
|
1381
|
+
|
1382
|
+
return sock_info
|
1383
|
+
|
1384
|
+
@contextlib.contextmanager
|
1385
|
+
def get_socket(self, handler=None):
|
1386
|
+
"""Get a socket from the pool. Use with a "with" statement.
|
1387
|
+
|
1388
|
+
Returns a :class:`SocketInfo` object wrapping a connected
|
1389
|
+
:class:`socket.socket`.
|
1390
|
+
|
1391
|
+
This method should always be used in a with-statement::
|
1392
|
+
|
1393
|
+
with pool.get_socket() as socket_info:
|
1394
|
+
socket_info.send_message(msg)
|
1395
|
+
data = socket_info.receive_message(op_code, request_id)
|
1396
|
+
|
1397
|
+
Can raise ConnectionFailure or OperationFailure.
|
1398
|
+
|
1399
|
+
:Parameters:
|
1400
|
+
- `handler` (optional): A _MongoClientErrorHandler.
|
1401
|
+
"""
|
1402
|
+
listeners = self.opts._event_listeners
|
1403
|
+
if self.enabled_for_cmap:
|
1404
|
+
listeners.publish_connection_check_out_started(self.address)
|
1405
|
+
|
1406
|
+
sock_info = self._get_socket(handler=handler)
|
1407
|
+
|
1408
|
+
if self.enabled_for_cmap:
|
1409
|
+
listeners.publish_connection_checked_out(self.address, sock_info.id)
|
1410
|
+
try:
|
1411
|
+
yield sock_info
|
1412
|
+
except BaseException:
|
1413
|
+
# Exception in caller. Ensure the connection gets returned.
|
1414
|
+
# Note that when pinned is True, the session owns the
|
1415
|
+
# connection and it is responsible for checking the connection
|
1416
|
+
# back into the pool.
|
1417
|
+
pinned = sock_info.pinned_txn or sock_info.pinned_cursor
|
1418
|
+
if handler:
|
1419
|
+
# Perform SDAM error handling rules while the connection is
|
1420
|
+
# still checked out.
|
1421
|
+
exc_type, exc_val, _ = sys.exc_info()
|
1422
|
+
handler.handle(exc_type, exc_val)
|
1423
|
+
if not pinned and sock_info.active:
|
1424
|
+
self.return_socket(sock_info)
|
1425
|
+
raise
|
1426
|
+
if sock_info.pinned_txn:
|
1427
|
+
with self.lock:
|
1428
|
+
self.__pinned_sockets.add(sock_info)
|
1429
|
+
self.ntxns += 1
|
1430
|
+
elif sock_info.pinned_cursor:
|
1431
|
+
with self.lock:
|
1432
|
+
self.__pinned_sockets.add(sock_info)
|
1433
|
+
self.ncursors += 1
|
1434
|
+
elif sock_info.active:
|
1435
|
+
self.return_socket(sock_info)
|
1436
|
+
|
1437
|
+
def _raise_if_not_ready(self, emit_event):
|
1438
|
+
if self.state != PoolState.READY:
|
1439
|
+
if self.enabled_for_cmap and emit_event:
|
1440
|
+
self.opts._event_listeners.publish_connection_check_out_failed(
|
1441
|
+
self.address, ConnectionCheckOutFailedReason.CONN_ERROR
|
1442
|
+
)
|
1443
|
+
_raise_connection_failure(self.address, AutoReconnect("connection pool paused"))
|
1444
|
+
|
1445
|
+
def _get_socket(self, handler=None):
|
1446
|
+
"""Get or create a SocketInfo. Can raise ConnectionFailure."""
|
1447
|
+
# We use the pid here to avoid issues with fork / multiprocessing.
|
1448
|
+
# See test.test_client:TestClient.test_fork for an example of
|
1449
|
+
# what could go wrong otherwise
|
1450
|
+
if self.pid != os.getpid():
|
1451
|
+
self.reset_without_pause()
|
1452
|
+
|
1453
|
+
if self.closed:
|
1454
|
+
if self.enabled_for_cmap:
|
1455
|
+
self.opts._event_listeners.publish_connection_check_out_failed(
|
1456
|
+
self.address, ConnectionCheckOutFailedReason.POOL_CLOSED
|
1457
|
+
)
|
1458
|
+
raise _PoolClosedError(
|
1459
|
+
"Attempted to check out a connection from closed connection pool"
|
1460
|
+
)
|
1461
|
+
|
1462
|
+
with self.lock:
|
1463
|
+
self.operation_count += 1
|
1464
|
+
|
1465
|
+
# Get a free socket or create one.
|
1466
|
+
if _csot.get_timeout():
|
1467
|
+
deadline = _csot.get_deadline()
|
1468
|
+
elif self.opts.wait_queue_timeout:
|
1469
|
+
deadline = time.monotonic() + self.opts.wait_queue_timeout
|
1470
|
+
else:
|
1471
|
+
deadline = None
|
1472
|
+
|
1473
|
+
with self.size_cond:
|
1474
|
+
self._raise_if_not_ready(emit_event=True)
|
1475
|
+
while not (self.requests < self.max_pool_size):
|
1476
|
+
if not _cond_wait(self.size_cond, deadline):
|
1477
|
+
# Timed out, notify the next thread to ensure a
|
1478
|
+
# timeout doesn't consume the condition.
|
1479
|
+
if self.requests < self.max_pool_size:
|
1480
|
+
self.size_cond.notify()
|
1481
|
+
self._raise_wait_queue_timeout()
|
1482
|
+
self._raise_if_not_ready(emit_event=True)
|
1483
|
+
self.requests += 1
|
1484
|
+
|
1485
|
+
# We've now acquired the semaphore and must release it on error.
|
1486
|
+
sock_info = None
|
1487
|
+
incremented = False
|
1488
|
+
emitted_event = False
|
1489
|
+
try:
|
1490
|
+
with self.lock:
|
1491
|
+
self.active_sockets += 1
|
1492
|
+
incremented = True
|
1493
|
+
|
1494
|
+
while sock_info is None:
|
1495
|
+
# CMAP: we MUST wait for either maxConnecting OR for a socket
|
1496
|
+
# to be checked back into the pool.
|
1497
|
+
with self._max_connecting_cond:
|
1498
|
+
self._raise_if_not_ready(emit_event=False)
|
1499
|
+
while not (self.sockets or self._pending < self._max_connecting):
|
1500
|
+
if not _cond_wait(self._max_connecting_cond, deadline):
|
1501
|
+
# Timed out, notify the next thread to ensure a
|
1502
|
+
# timeout doesn't consume the condition.
|
1503
|
+
if self.sockets or self._pending < self._max_connecting:
|
1504
|
+
self._max_connecting_cond.notify()
|
1505
|
+
emitted_event = True
|
1506
|
+
self._raise_wait_queue_timeout()
|
1507
|
+
self._raise_if_not_ready(emit_event=False)
|
1508
|
+
|
1509
|
+
try:
|
1510
|
+
sock_info = self.sockets.popleft()
|
1511
|
+
except IndexError:
|
1512
|
+
self._pending += 1
|
1513
|
+
if sock_info: # We got a socket from the pool
|
1514
|
+
if self._perished(sock_info):
|
1515
|
+
sock_info = None
|
1516
|
+
continue
|
1517
|
+
else: # We need to create a new connection
|
1518
|
+
try:
|
1519
|
+
sock_info = self.connect(handler=handler)
|
1520
|
+
finally:
|
1521
|
+
with self._max_connecting_cond:
|
1522
|
+
self._pending -= 1
|
1523
|
+
self._max_connecting_cond.notify()
|
1524
|
+
except BaseException:
|
1525
|
+
if sock_info:
|
1526
|
+
# We checked out a socket but authentication failed.
|
1527
|
+
sock_info.close_socket(ConnectionClosedReason.ERROR)
|
1528
|
+
with self.size_cond:
|
1529
|
+
self.requests -= 1
|
1530
|
+
if incremented:
|
1531
|
+
self.active_sockets -= 1
|
1532
|
+
self.size_cond.notify()
|
1533
|
+
|
1534
|
+
if self.enabled_for_cmap and not emitted_event:
|
1535
|
+
self.opts._event_listeners.publish_connection_check_out_failed(
|
1536
|
+
self.address, ConnectionCheckOutFailedReason.CONN_ERROR
|
1537
|
+
)
|
1538
|
+
raise
|
1539
|
+
|
1540
|
+
sock_info.active = True
|
1541
|
+
return sock_info
|
1542
|
+
|
1543
|
+
def return_socket(self, sock_info):
|
1544
|
+
"""Return the socket to the pool, or if it's closed discard it.
|
1545
|
+
|
1546
|
+
:Parameters:
|
1547
|
+
- `sock_info`: The socket to check into the pool.
|
1548
|
+
"""
|
1549
|
+
txn = sock_info.pinned_txn
|
1550
|
+
cursor = sock_info.pinned_cursor
|
1551
|
+
sock_info.active = False
|
1552
|
+
sock_info.pinned_txn = False
|
1553
|
+
sock_info.pinned_cursor = False
|
1554
|
+
self.__pinned_sockets.discard(sock_info)
|
1555
|
+
listeners = self.opts._event_listeners
|
1556
|
+
if self.enabled_for_cmap:
|
1557
|
+
listeners.publish_connection_checked_in(self.address, sock_info.id)
|
1558
|
+
if self.pid != os.getpid():
|
1559
|
+
self.reset_without_pause()
|
1560
|
+
else:
|
1561
|
+
if self.closed:
|
1562
|
+
sock_info.close_socket(ConnectionClosedReason.POOL_CLOSED)
|
1563
|
+
elif sock_info.closed:
|
1564
|
+
# CMAP requires the closed event be emitted after the check in.
|
1565
|
+
if self.enabled_for_cmap:
|
1566
|
+
listeners.publish_connection_closed(
|
1567
|
+
self.address, sock_info.id, ConnectionClosedReason.ERROR
|
1568
|
+
)
|
1569
|
+
else:
|
1570
|
+
with self.lock:
|
1571
|
+
# Hold the lock to ensure this section does not race with
|
1572
|
+
# Pool.reset().
|
1573
|
+
if self.stale_generation(sock_info.generation, sock_info.service_id):
|
1574
|
+
sock_info.close_socket(ConnectionClosedReason.STALE)
|
1575
|
+
else:
|
1576
|
+
sock_info.update_last_checkin_time()
|
1577
|
+
sock_info.update_is_writable(self.is_writable)
|
1578
|
+
self.sockets.appendleft(sock_info)
|
1579
|
+
# Notify any threads waiting to create a connection.
|
1580
|
+
self._max_connecting_cond.notify()
|
1581
|
+
|
1582
|
+
with self.size_cond:
|
1583
|
+
if txn:
|
1584
|
+
self.ntxns -= 1
|
1585
|
+
elif cursor:
|
1586
|
+
self.ncursors -= 1
|
1587
|
+
self.requests -= 1
|
1588
|
+
self.active_sockets -= 1
|
1589
|
+
self.operation_count -= 1
|
1590
|
+
self.size_cond.notify()
|
1591
|
+
|
1592
|
+
def _perished(self, sock_info):
|
1593
|
+
"""Return True and close the connection if it is "perished".
|
1594
|
+
|
1595
|
+
This side-effecty function checks if this socket has been idle for
|
1596
|
+
for longer than the max idle time, or if the socket has been closed by
|
1597
|
+
some external network error, or if the socket's generation is outdated.
|
1598
|
+
|
1599
|
+
Checking sockets lets us avoid seeing *some*
|
1600
|
+
:class:`~pymongo.errors.AutoReconnect` exceptions on server
|
1601
|
+
hiccups, etc. We only check if the socket was closed by an external
|
1602
|
+
error if it has been > 1 second since the socket was checked into the
|
1603
|
+
pool, to keep performance reasonable - we can't avoid AutoReconnects
|
1604
|
+
completely anyway.
|
1605
|
+
"""
|
1606
|
+
idle_time_seconds = sock_info.idle_time_seconds()
|
1607
|
+
# If socket is idle, open a new one.
|
1608
|
+
if (
|
1609
|
+
self.opts.max_idle_time_seconds is not None
|
1610
|
+
and idle_time_seconds > self.opts.max_idle_time_seconds
|
1611
|
+
):
|
1612
|
+
sock_info.close_socket(ConnectionClosedReason.IDLE)
|
1613
|
+
return True
|
1614
|
+
|
1615
|
+
if self._check_interval_seconds is not None and (
|
1616
|
+
0 == self._check_interval_seconds or idle_time_seconds > self._check_interval_seconds
|
1617
|
+
):
|
1618
|
+
if sock_info.socket_closed():
|
1619
|
+
sock_info.close_socket(ConnectionClosedReason.ERROR)
|
1620
|
+
return True
|
1621
|
+
|
1622
|
+
if self.stale_generation(sock_info.generation, sock_info.service_id):
|
1623
|
+
sock_info.close_socket(ConnectionClosedReason.STALE)
|
1624
|
+
return True
|
1625
|
+
|
1626
|
+
return False
|
1627
|
+
|
1628
|
+
def _raise_wait_queue_timeout(self) -> NoReturn:
|
1629
|
+
listeners = self.opts._event_listeners
|
1630
|
+
if self.enabled_for_cmap:
|
1631
|
+
listeners.publish_connection_check_out_failed(
|
1632
|
+
self.address, ConnectionCheckOutFailedReason.TIMEOUT
|
1633
|
+
)
|
1634
|
+
timeout = _csot.get_timeout() or self.opts.wait_queue_timeout
|
1635
|
+
if self.opts.load_balanced:
|
1636
|
+
other_ops = self.active_sockets - self.ncursors - self.ntxns
|
1637
|
+
raise WaitQueueTimeoutError(
|
1638
|
+
"Timeout waiting for connection from the connection pool. "
|
1639
|
+
"maxPoolSize: %s, connections in use by cursors: %s, "
|
1640
|
+
"connections in use by transactions: %s, connections in use "
|
1641
|
+
"by other operations: %s, timeout: %s"
|
1642
|
+
% (
|
1643
|
+
self.opts.max_pool_size,
|
1644
|
+
self.ncursors,
|
1645
|
+
self.ntxns,
|
1646
|
+
other_ops,
|
1647
|
+
timeout,
|
1648
|
+
)
|
1649
|
+
)
|
1650
|
+
raise WaitQueueTimeoutError(
|
1651
|
+
"Timed out while checking out a connection from connection pool. "
|
1652
|
+
"maxPoolSize: %s, timeout: %s" % (self.opts.max_pool_size, timeout)
|
1653
|
+
)
|
1654
|
+
|
1655
|
+
def __del__(self):
|
1656
|
+
# Avoid ResourceWarnings in Python 3
|
1657
|
+
# Close all sockets without calling reset() or close() because it is
|
1658
|
+
# not safe to acquire a lock in __del__.
|
1659
|
+
for sock_info in self.sockets:
|
1660
|
+
sock_info.close_socket(None)
|