sensu-plugins-mongodb-mrtrotl 1.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +1 -0
- data/LICENSE +22 -0
- data/README.md +27 -0
- data/bin/check-mongodb-metric.rb +144 -0
- data/bin/check-mongodb-query-count.rb +267 -0
- data/bin/check-mongodb.py +1644 -0
- data/bin/check-mongodb.rb +5 -0
- data/bin/metrics-mongodb-replication.rb +254 -0
- data/bin/metrics-mongodb.rb +133 -0
- data/lib/bson/__init__.py +1347 -0
- data/lib/bson/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/_helpers.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/binary.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/code.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/codec_options.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/dbref.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/decimal128.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/int64.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/json_util.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/max_key.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/min_key.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/objectid.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/raw_bson.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/regex.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/son.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/timestamp.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/tz_util.cpython-310.pyc +0 -0
- data/lib/bson/_cbson.cpython-310-x86_64-linux-gnu.so +0 -0
- data/lib/bson/_helpers.py +41 -0
- data/lib/bson/binary.py +364 -0
- data/lib/bson/code.py +101 -0
- data/lib/bson/codec_options.py +414 -0
- data/lib/bson/codec_options.pyi +100 -0
- data/lib/bson/dbref.py +133 -0
- data/lib/bson/decimal128.py +314 -0
- data/lib/bson/errors.py +35 -0
- data/lib/bson/int64.py +39 -0
- data/lib/bson/json_util.py +874 -0
- data/lib/bson/max_key.py +55 -0
- data/lib/bson/min_key.py +55 -0
- data/lib/bson/objectid.py +286 -0
- data/lib/bson/py.typed +2 -0
- data/lib/bson/raw_bson.py +175 -0
- data/lib/bson/regex.py +135 -0
- data/lib/bson/son.py +208 -0
- data/lib/bson/timestamp.py +124 -0
- data/lib/bson/tz_util.py +52 -0
- data/lib/gridfs/__init__.py +1015 -0
- data/lib/gridfs/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/gridfs/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/gridfs/__pycache__/grid_file.cpython-310.pyc +0 -0
- data/lib/gridfs/errors.py +33 -0
- data/lib/gridfs/grid_file.py +907 -0
- data/lib/gridfs/py.typed +2 -0
- data/lib/pymongo/__init__.py +185 -0
- data/lib/pymongo/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/_csot.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/aggregation.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/auth.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/auth_aws.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/bulk.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/change_stream.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/client_options.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/client_session.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/collation.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/collection.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/command_cursor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/common.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/compression_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/cursor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/daemon.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/database.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/driver_info.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/encryption.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/encryption_options.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/event_loggers.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/hello.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/helpers.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/max_staleness_selectors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/message.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/mongo_client.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/monitor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/monitoring.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/network.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ocsp_cache.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ocsp_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/operations.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/periodic_executor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/pool.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/pyopenssl_context.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/read_concern.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/read_preferences.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/response.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/results.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/saslprep.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_api.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_description.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_selectors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_type.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/settings.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/socket_checker.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/srv_resolver.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ssl_context.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ssl_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/topology.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/topology_description.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/typings.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/uri_parser.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/write_concern.cpython-310.pyc +0 -0
- data/lib/pymongo/_cmessage.cpython-310-x86_64-linux-gnu.so +0 -0
- data/lib/pymongo/_csot.py +118 -0
- data/lib/pymongo/aggregation.py +229 -0
- data/lib/pymongo/auth.py +549 -0
- data/lib/pymongo/auth_aws.py +94 -0
- data/lib/pymongo/bulk.py +513 -0
- data/lib/pymongo/change_stream.py +457 -0
- data/lib/pymongo/client_options.py +302 -0
- data/lib/pymongo/client_session.py +1112 -0
- data/lib/pymongo/collation.py +224 -0
- data/lib/pymongo/collection.py +3204 -0
- data/lib/pymongo/command_cursor.py +353 -0
- data/lib/pymongo/common.py +984 -0
- data/lib/pymongo/compression_support.py +149 -0
- data/lib/pymongo/cursor.py +1345 -0
- data/lib/pymongo/daemon.py +141 -0
- data/lib/pymongo/database.py +1202 -0
- data/lib/pymongo/driver_info.py +42 -0
- data/lib/pymongo/encryption.py +884 -0
- data/lib/pymongo/encryption_options.py +221 -0
- data/lib/pymongo/errors.py +365 -0
- data/lib/pymongo/event_loggers.py +221 -0
- data/lib/pymongo/hello.py +219 -0
- data/lib/pymongo/helpers.py +259 -0
- data/lib/pymongo/max_staleness_selectors.py +114 -0
- data/lib/pymongo/message.py +1440 -0
- data/lib/pymongo/mongo_client.py +2144 -0
- data/lib/pymongo/monitor.py +440 -0
- data/lib/pymongo/monitoring.py +1801 -0
- data/lib/pymongo/network.py +311 -0
- data/lib/pymongo/ocsp_cache.py +87 -0
- data/lib/pymongo/ocsp_support.py +372 -0
- data/lib/pymongo/operations.py +507 -0
- data/lib/pymongo/periodic_executor.py +183 -0
- data/lib/pymongo/pool.py +1660 -0
- data/lib/pymongo/py.typed +2 -0
- data/lib/pymongo/pyopenssl_context.py +383 -0
- data/lib/pymongo/read_concern.py +75 -0
- data/lib/pymongo/read_preferences.py +609 -0
- data/lib/pymongo/response.py +109 -0
- data/lib/pymongo/results.py +217 -0
- data/lib/pymongo/saslprep.py +113 -0
- data/lib/pymongo/server.py +247 -0
- data/lib/pymongo/server_api.py +170 -0
- data/lib/pymongo/server_description.py +285 -0
- data/lib/pymongo/server_selectors.py +153 -0
- data/lib/pymongo/server_type.py +32 -0
- data/lib/pymongo/settings.py +159 -0
- data/lib/pymongo/socket_checker.py +104 -0
- data/lib/pymongo/srv_resolver.py +126 -0
- data/lib/pymongo/ssl_context.py +39 -0
- data/lib/pymongo/ssl_support.py +99 -0
- data/lib/pymongo/topology.py +890 -0
- data/lib/pymongo/topology_description.py +639 -0
- data/lib/pymongo/typings.py +39 -0
- data/lib/pymongo/uri_parser.py +624 -0
- data/lib/pymongo/write_concern.py +129 -0
- data/lib/pymongo-4.2.0.dist-info/INSTALLER +1 -0
- data/lib/pymongo-4.2.0.dist-info/LICENSE +201 -0
- data/lib/pymongo-4.2.0.dist-info/METADATA +250 -0
- data/lib/pymongo-4.2.0.dist-info/RECORD +167 -0
- data/lib/pymongo-4.2.0.dist-info/REQUESTED +0 -0
- data/lib/pymongo-4.2.0.dist-info/WHEEL +6 -0
- data/lib/pymongo-4.2.0.dist-info/top_level.txt +3 -0
- data/lib/sensu-plugins-mongodb/metrics.rb +391 -0
- data/lib/sensu-plugins-mongodb/version.rb +9 -0
- data/lib/sensu-plugins-mongodb.rb +1 -0
- metadata +407 -0
data/lib/pymongo/bulk.py
ADDED
@@ -0,0 +1,513 @@
|
|
1
|
+
# Copyright 2014-present MongoDB, Inc.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""The bulk write operations interface.
|
16
|
+
|
17
|
+
.. versionadded:: 2.7
|
18
|
+
"""
|
19
|
+
import copy
|
20
|
+
from itertools import islice
|
21
|
+
from typing import Any, NoReturn
|
22
|
+
|
23
|
+
from bson.objectid import ObjectId
|
24
|
+
from bson.raw_bson import RawBSONDocument
|
25
|
+
from bson.son import SON
|
26
|
+
from pymongo import _csot, common
|
27
|
+
from pymongo.client_session import _validate_session_write_concern
|
28
|
+
from pymongo.collation import validate_collation_or_none
|
29
|
+
from pymongo.common import (
|
30
|
+
validate_is_document_type,
|
31
|
+
validate_ok_for_replace,
|
32
|
+
validate_ok_for_update,
|
33
|
+
)
|
34
|
+
from pymongo.errors import (
|
35
|
+
BulkWriteError,
|
36
|
+
ConfigurationError,
|
37
|
+
InvalidOperation,
|
38
|
+
OperationFailure,
|
39
|
+
)
|
40
|
+
from pymongo.helpers import _RETRYABLE_ERROR_CODES, _get_wce_doc
|
41
|
+
from pymongo.message import (
|
42
|
+
_DELETE,
|
43
|
+
_INSERT,
|
44
|
+
_UPDATE,
|
45
|
+
_BulkWriteContext,
|
46
|
+
_EncryptedBulkWriteContext,
|
47
|
+
_randint,
|
48
|
+
)
|
49
|
+
from pymongo.read_preferences import ReadPreference
|
50
|
+
from pymongo.write_concern import WriteConcern
|
51
|
+
|
52
|
+
_DELETE_ALL = 0
|
53
|
+
_DELETE_ONE = 1
|
54
|
+
|
55
|
+
# For backwards compatibility. See MongoDB src/mongo/base/error_codes.err
|
56
|
+
_BAD_VALUE = 2
|
57
|
+
_UNKNOWN_ERROR = 8
|
58
|
+
_WRITE_CONCERN_ERROR = 64
|
59
|
+
|
60
|
+
_COMMANDS = ("insert", "update", "delete")
|
61
|
+
|
62
|
+
|
63
|
+
class _Run(object):
|
64
|
+
"""Represents a batch of write operations."""
|
65
|
+
|
66
|
+
def __init__(self, op_type):
|
67
|
+
"""Initialize a new Run object."""
|
68
|
+
self.op_type = op_type
|
69
|
+
self.index_map = []
|
70
|
+
self.ops = []
|
71
|
+
self.idx_offset = 0
|
72
|
+
|
73
|
+
def index(self, idx):
|
74
|
+
"""Get the original index of an operation in this run.
|
75
|
+
|
76
|
+
:Parameters:
|
77
|
+
- `idx`: The Run index that maps to the original index.
|
78
|
+
"""
|
79
|
+
return self.index_map[idx]
|
80
|
+
|
81
|
+
def add(self, original_index, operation):
|
82
|
+
"""Add an operation to this Run instance.
|
83
|
+
|
84
|
+
:Parameters:
|
85
|
+
- `original_index`: The original index of this operation
|
86
|
+
within a larger bulk operation.
|
87
|
+
- `operation`: The operation document.
|
88
|
+
"""
|
89
|
+
self.index_map.append(original_index)
|
90
|
+
self.ops.append(operation)
|
91
|
+
|
92
|
+
|
93
|
+
def _merge_command(run, full_result, offset, result):
|
94
|
+
"""Merge a write command result into the full bulk result."""
|
95
|
+
affected = result.get("n", 0)
|
96
|
+
|
97
|
+
if run.op_type == _INSERT:
|
98
|
+
full_result["nInserted"] += affected
|
99
|
+
|
100
|
+
elif run.op_type == _DELETE:
|
101
|
+
full_result["nRemoved"] += affected
|
102
|
+
|
103
|
+
elif run.op_type == _UPDATE:
|
104
|
+
upserted = result.get("upserted")
|
105
|
+
if upserted:
|
106
|
+
n_upserted = len(upserted)
|
107
|
+
for doc in upserted:
|
108
|
+
doc["index"] = run.index(doc["index"] + offset)
|
109
|
+
full_result["upserted"].extend(upserted)
|
110
|
+
full_result["nUpserted"] += n_upserted
|
111
|
+
full_result["nMatched"] += affected - n_upserted
|
112
|
+
else:
|
113
|
+
full_result["nMatched"] += affected
|
114
|
+
full_result["nModified"] += result["nModified"]
|
115
|
+
|
116
|
+
write_errors = result.get("writeErrors")
|
117
|
+
if write_errors:
|
118
|
+
for doc in write_errors:
|
119
|
+
# Leave the server response intact for APM.
|
120
|
+
replacement = doc.copy()
|
121
|
+
idx = doc["index"] + offset
|
122
|
+
replacement["index"] = run.index(idx)
|
123
|
+
# Add the failed operation to the error document.
|
124
|
+
replacement["op"] = run.ops[idx]
|
125
|
+
full_result["writeErrors"].append(replacement)
|
126
|
+
|
127
|
+
wce = _get_wce_doc(result)
|
128
|
+
if wce:
|
129
|
+
full_result["writeConcernErrors"].append(wce)
|
130
|
+
|
131
|
+
|
132
|
+
def _raise_bulk_write_error(full_result: Any) -> NoReturn:
|
133
|
+
"""Raise a BulkWriteError from the full bulk api result."""
|
134
|
+
if full_result["writeErrors"]:
|
135
|
+
full_result["writeErrors"].sort(key=lambda error: error["index"])
|
136
|
+
raise BulkWriteError(full_result)
|
137
|
+
|
138
|
+
|
139
|
+
class _Bulk(object):
|
140
|
+
"""The private guts of the bulk write API."""
|
141
|
+
|
142
|
+
def __init__(self, collection, ordered, bypass_document_validation, comment=None, let=None):
|
143
|
+
"""Initialize a _Bulk instance."""
|
144
|
+
self.collection = collection.with_options(
|
145
|
+
codec_options=collection.codec_options._replace(
|
146
|
+
unicode_decode_error_handler="replace", document_class=dict
|
147
|
+
)
|
148
|
+
)
|
149
|
+
self.let = let
|
150
|
+
if self.let is not None:
|
151
|
+
common.validate_is_document_type("let", self.let)
|
152
|
+
self.comment = comment
|
153
|
+
self.ordered = ordered
|
154
|
+
self.ops = []
|
155
|
+
self.executed = False
|
156
|
+
self.bypass_doc_val = bypass_document_validation
|
157
|
+
self.uses_collation = False
|
158
|
+
self.uses_array_filters = False
|
159
|
+
self.uses_hint_update = False
|
160
|
+
self.uses_hint_delete = False
|
161
|
+
self.is_retryable = True
|
162
|
+
self.retrying = False
|
163
|
+
self.started_retryable_write = False
|
164
|
+
# Extra state so that we know where to pick up on a retry attempt.
|
165
|
+
self.current_run = None
|
166
|
+
self.next_run = None
|
167
|
+
|
168
|
+
@property
|
169
|
+
def bulk_ctx_class(self):
|
170
|
+
encrypter = self.collection.database.client._encrypter
|
171
|
+
if encrypter and not encrypter._bypass_auto_encryption:
|
172
|
+
return _EncryptedBulkWriteContext
|
173
|
+
else:
|
174
|
+
return _BulkWriteContext
|
175
|
+
|
176
|
+
def add_insert(self, document):
|
177
|
+
"""Add an insert document to the list of ops."""
|
178
|
+
validate_is_document_type("document", document)
|
179
|
+
# Generate ObjectId client side.
|
180
|
+
if not (isinstance(document, RawBSONDocument) or "_id" in document):
|
181
|
+
document["_id"] = ObjectId()
|
182
|
+
self.ops.append((_INSERT, document))
|
183
|
+
|
184
|
+
def add_update(
|
185
|
+
self,
|
186
|
+
selector,
|
187
|
+
update,
|
188
|
+
multi=False,
|
189
|
+
upsert=False,
|
190
|
+
collation=None,
|
191
|
+
array_filters=None,
|
192
|
+
hint=None,
|
193
|
+
):
|
194
|
+
"""Create an update document and add it to the list of ops."""
|
195
|
+
validate_ok_for_update(update)
|
196
|
+
cmd = SON([("q", selector), ("u", update), ("multi", multi), ("upsert", upsert)])
|
197
|
+
collation = validate_collation_or_none(collation)
|
198
|
+
if collation is not None:
|
199
|
+
self.uses_collation = True
|
200
|
+
cmd["collation"] = collation
|
201
|
+
if array_filters is not None:
|
202
|
+
self.uses_array_filters = True
|
203
|
+
cmd["arrayFilters"] = array_filters
|
204
|
+
if hint is not None:
|
205
|
+
self.uses_hint_update = True
|
206
|
+
cmd["hint"] = hint
|
207
|
+
if multi:
|
208
|
+
# A bulk_write containing an update_many is not retryable.
|
209
|
+
self.is_retryable = False
|
210
|
+
self.ops.append((_UPDATE, cmd))
|
211
|
+
|
212
|
+
def add_replace(self, selector, replacement, upsert=False, collation=None, hint=None):
|
213
|
+
"""Create a replace document and add it to the list of ops."""
|
214
|
+
validate_ok_for_replace(replacement)
|
215
|
+
cmd = SON([("q", selector), ("u", replacement), ("multi", False), ("upsert", upsert)])
|
216
|
+
collation = validate_collation_or_none(collation)
|
217
|
+
if collation is not None:
|
218
|
+
self.uses_collation = True
|
219
|
+
cmd["collation"] = collation
|
220
|
+
if hint is not None:
|
221
|
+
self.uses_hint_update = True
|
222
|
+
cmd["hint"] = hint
|
223
|
+
self.ops.append((_UPDATE, cmd))
|
224
|
+
|
225
|
+
def add_delete(self, selector, limit, collation=None, hint=None):
|
226
|
+
"""Create a delete document and add it to the list of ops."""
|
227
|
+
cmd = SON([("q", selector), ("limit", limit)])
|
228
|
+
collation = validate_collation_or_none(collation)
|
229
|
+
if collation is not None:
|
230
|
+
self.uses_collation = True
|
231
|
+
cmd["collation"] = collation
|
232
|
+
if hint is not None:
|
233
|
+
self.uses_hint_delete = True
|
234
|
+
cmd["hint"] = hint
|
235
|
+
if limit == _DELETE_ALL:
|
236
|
+
# A bulk_write containing a delete_many is not retryable.
|
237
|
+
self.is_retryable = False
|
238
|
+
self.ops.append((_DELETE, cmd))
|
239
|
+
|
240
|
+
def gen_ordered(self):
|
241
|
+
"""Generate batches of operations, batched by type of
|
242
|
+
operation, in the order **provided**.
|
243
|
+
"""
|
244
|
+
run = None
|
245
|
+
for idx, (op_type, operation) in enumerate(self.ops):
|
246
|
+
if run is None:
|
247
|
+
run = _Run(op_type)
|
248
|
+
elif run.op_type != op_type:
|
249
|
+
yield run
|
250
|
+
run = _Run(op_type)
|
251
|
+
run.add(idx, operation)
|
252
|
+
yield run
|
253
|
+
|
254
|
+
def gen_unordered(self):
|
255
|
+
"""Generate batches of operations, batched by type of
|
256
|
+
operation, in arbitrary order.
|
257
|
+
"""
|
258
|
+
operations = [_Run(_INSERT), _Run(_UPDATE), _Run(_DELETE)]
|
259
|
+
for idx, (op_type, operation) in enumerate(self.ops):
|
260
|
+
operations[op_type].add(idx, operation)
|
261
|
+
|
262
|
+
for run in operations:
|
263
|
+
if run.ops:
|
264
|
+
yield run
|
265
|
+
|
266
|
+
def _execute_command(
|
267
|
+
self,
|
268
|
+
generator,
|
269
|
+
write_concern,
|
270
|
+
session,
|
271
|
+
sock_info,
|
272
|
+
op_id,
|
273
|
+
retryable,
|
274
|
+
full_result,
|
275
|
+
final_write_concern=None,
|
276
|
+
):
|
277
|
+
db_name = self.collection.database.name
|
278
|
+
client = self.collection.database.client
|
279
|
+
listeners = client._event_listeners
|
280
|
+
|
281
|
+
if not self.current_run:
|
282
|
+
self.current_run = next(generator)
|
283
|
+
self.next_run = None
|
284
|
+
run = self.current_run
|
285
|
+
|
286
|
+
# sock_info.command validates the session, but we use
|
287
|
+
# sock_info.write_command.
|
288
|
+
sock_info.validate_session(client, session)
|
289
|
+
last_run = False
|
290
|
+
|
291
|
+
while run:
|
292
|
+
if not self.retrying:
|
293
|
+
self.next_run = next(generator, None)
|
294
|
+
if self.next_run is None:
|
295
|
+
last_run = True
|
296
|
+
|
297
|
+
cmd_name = _COMMANDS[run.op_type]
|
298
|
+
bwc = self.bulk_ctx_class(
|
299
|
+
db_name,
|
300
|
+
cmd_name,
|
301
|
+
sock_info,
|
302
|
+
op_id,
|
303
|
+
listeners,
|
304
|
+
session,
|
305
|
+
run.op_type,
|
306
|
+
self.collection.codec_options,
|
307
|
+
)
|
308
|
+
|
309
|
+
while run.idx_offset < len(run.ops):
|
310
|
+
# If this is the last possible operation, use the
|
311
|
+
# final write concern.
|
312
|
+
if last_run and (len(run.ops) - run.idx_offset) == 1:
|
313
|
+
write_concern = final_write_concern or write_concern
|
314
|
+
|
315
|
+
cmd = SON([(cmd_name, self.collection.name), ("ordered", self.ordered)])
|
316
|
+
if self.comment:
|
317
|
+
cmd["comment"] = self.comment
|
318
|
+
_csot.apply_write_concern(cmd, write_concern)
|
319
|
+
if self.bypass_doc_val:
|
320
|
+
cmd["bypassDocumentValidation"] = True
|
321
|
+
if self.let is not None and run.op_type in (_DELETE, _UPDATE):
|
322
|
+
cmd["let"] = self.let
|
323
|
+
if session:
|
324
|
+
# Start a new retryable write unless one was already
|
325
|
+
# started for this command.
|
326
|
+
if retryable and not self.started_retryable_write:
|
327
|
+
session._start_retryable_write()
|
328
|
+
self.started_retryable_write = True
|
329
|
+
session._apply_to(cmd, retryable, ReadPreference.PRIMARY, sock_info)
|
330
|
+
sock_info.send_cluster_time(cmd, session, client)
|
331
|
+
sock_info.add_server_api(cmd)
|
332
|
+
# CSOT: apply timeout before encoding the command.
|
333
|
+
sock_info.apply_timeout(client, cmd)
|
334
|
+
ops = islice(run.ops, run.idx_offset, None)
|
335
|
+
|
336
|
+
# Run as many ops as possible in one command.
|
337
|
+
if write_concern.acknowledged:
|
338
|
+
result, to_send = bwc.execute(cmd, ops, client)
|
339
|
+
|
340
|
+
# Retryable writeConcernErrors halt the execution of this run.
|
341
|
+
wce = result.get("writeConcernError", {})
|
342
|
+
if wce.get("code", 0) in _RETRYABLE_ERROR_CODES:
|
343
|
+
# Synthesize the full bulk result without modifying the
|
344
|
+
# current one because this write operation may be retried.
|
345
|
+
full = copy.deepcopy(full_result)
|
346
|
+
_merge_command(run, full, run.idx_offset, result)
|
347
|
+
_raise_bulk_write_error(full)
|
348
|
+
|
349
|
+
_merge_command(run, full_result, run.idx_offset, result)
|
350
|
+
|
351
|
+
# We're no longer in a retry once a command succeeds.
|
352
|
+
self.retrying = False
|
353
|
+
self.started_retryable_write = False
|
354
|
+
|
355
|
+
if self.ordered and "writeErrors" in result:
|
356
|
+
break
|
357
|
+
else:
|
358
|
+
to_send = bwc.execute_unack(cmd, ops, client)
|
359
|
+
|
360
|
+
run.idx_offset += len(to_send)
|
361
|
+
|
362
|
+
# We're supposed to continue if errors are
|
363
|
+
# at the write concern level (e.g. wtimeout)
|
364
|
+
if self.ordered and full_result["writeErrors"]:
|
365
|
+
break
|
366
|
+
# Reset our state
|
367
|
+
self.current_run = run = self.next_run
|
368
|
+
|
369
|
+
def execute_command(self, generator, write_concern, session):
|
370
|
+
"""Execute using write commands."""
|
371
|
+
# nModified is only reported for write commands, not legacy ops.
|
372
|
+
full_result = {
|
373
|
+
"writeErrors": [],
|
374
|
+
"writeConcernErrors": [],
|
375
|
+
"nInserted": 0,
|
376
|
+
"nUpserted": 0,
|
377
|
+
"nMatched": 0,
|
378
|
+
"nModified": 0,
|
379
|
+
"nRemoved": 0,
|
380
|
+
"upserted": [],
|
381
|
+
}
|
382
|
+
op_id = _randint()
|
383
|
+
|
384
|
+
def retryable_bulk(session, sock_info, retryable):
|
385
|
+
self._execute_command(
|
386
|
+
generator, write_concern, session, sock_info, op_id, retryable, full_result
|
387
|
+
)
|
388
|
+
|
389
|
+
client = self.collection.database.client
|
390
|
+
with client._tmp_session(session) as s:
|
391
|
+
client._retry_with_session(self.is_retryable, retryable_bulk, s, self)
|
392
|
+
|
393
|
+
if full_result["writeErrors"] or full_result["writeConcernErrors"]:
|
394
|
+
_raise_bulk_write_error(full_result)
|
395
|
+
return full_result
|
396
|
+
|
397
|
+
def execute_op_msg_no_results(self, sock_info, generator):
|
398
|
+
"""Execute write commands with OP_MSG and w=0 writeConcern, unordered."""
|
399
|
+
db_name = self.collection.database.name
|
400
|
+
client = self.collection.database.client
|
401
|
+
listeners = client._event_listeners
|
402
|
+
op_id = _randint()
|
403
|
+
|
404
|
+
if not self.current_run:
|
405
|
+
self.current_run = next(generator)
|
406
|
+
run = self.current_run
|
407
|
+
|
408
|
+
while run:
|
409
|
+
cmd_name = _COMMANDS[run.op_type]
|
410
|
+
bwc = self.bulk_ctx_class(
|
411
|
+
db_name,
|
412
|
+
cmd_name,
|
413
|
+
sock_info,
|
414
|
+
op_id,
|
415
|
+
listeners,
|
416
|
+
None,
|
417
|
+
run.op_type,
|
418
|
+
self.collection.codec_options,
|
419
|
+
)
|
420
|
+
|
421
|
+
while run.idx_offset < len(run.ops):
|
422
|
+
cmd = SON(
|
423
|
+
[
|
424
|
+
(cmd_name, self.collection.name),
|
425
|
+
("ordered", False),
|
426
|
+
("writeConcern", {"w": 0}),
|
427
|
+
]
|
428
|
+
)
|
429
|
+
sock_info.add_server_api(cmd)
|
430
|
+
ops = islice(run.ops, run.idx_offset, None)
|
431
|
+
# Run as many ops as possible.
|
432
|
+
to_send = bwc.execute_unack(cmd, ops, client)
|
433
|
+
run.idx_offset += len(to_send)
|
434
|
+
self.current_run = run = next(generator, None)
|
435
|
+
|
436
|
+
def execute_command_no_results(self, sock_info, generator, write_concern):
|
437
|
+
"""Execute write commands with OP_MSG and w=0 WriteConcern, ordered."""
|
438
|
+
full_result = {
|
439
|
+
"writeErrors": [],
|
440
|
+
"writeConcernErrors": [],
|
441
|
+
"nInserted": 0,
|
442
|
+
"nUpserted": 0,
|
443
|
+
"nMatched": 0,
|
444
|
+
"nModified": 0,
|
445
|
+
"nRemoved": 0,
|
446
|
+
"upserted": [],
|
447
|
+
}
|
448
|
+
# Ordered bulk writes have to be acknowledged so that we stop
|
449
|
+
# processing at the first error, even when the application
|
450
|
+
# specified unacknowledged writeConcern.
|
451
|
+
initial_write_concern = WriteConcern()
|
452
|
+
op_id = _randint()
|
453
|
+
try:
|
454
|
+
self._execute_command(
|
455
|
+
generator,
|
456
|
+
initial_write_concern,
|
457
|
+
None,
|
458
|
+
sock_info,
|
459
|
+
op_id,
|
460
|
+
False,
|
461
|
+
full_result,
|
462
|
+
write_concern,
|
463
|
+
)
|
464
|
+
except OperationFailure:
|
465
|
+
pass
|
466
|
+
|
467
|
+
def execute_no_results(self, sock_info, generator, write_concern):
|
468
|
+
"""Execute all operations, returning no results (w=0)."""
|
469
|
+
if self.uses_collation:
|
470
|
+
raise ConfigurationError("Collation is unsupported for unacknowledged writes.")
|
471
|
+
if self.uses_array_filters:
|
472
|
+
raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.")
|
473
|
+
# Guard against unsupported unacknowledged writes.
|
474
|
+
unack = write_concern and not write_concern.acknowledged
|
475
|
+
if unack and self.uses_hint_delete and sock_info.max_wire_version < 9:
|
476
|
+
raise ConfigurationError(
|
477
|
+
"Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands."
|
478
|
+
)
|
479
|
+
if unack and self.uses_hint_update and sock_info.max_wire_version < 8:
|
480
|
+
raise ConfigurationError(
|
481
|
+
"Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands."
|
482
|
+
)
|
483
|
+
# Cannot have both unacknowledged writes and bypass document validation.
|
484
|
+
if self.bypass_doc_val:
|
485
|
+
raise OperationFailure(
|
486
|
+
"Cannot set bypass_document_validation with unacknowledged write concern"
|
487
|
+
)
|
488
|
+
|
489
|
+
if self.ordered:
|
490
|
+
return self.execute_command_no_results(sock_info, generator, write_concern)
|
491
|
+
return self.execute_op_msg_no_results(sock_info, generator)
|
492
|
+
|
493
|
+
def execute(self, write_concern, session):
|
494
|
+
"""Execute operations."""
|
495
|
+
if not self.ops:
|
496
|
+
raise InvalidOperation("No operations to execute")
|
497
|
+
if self.executed:
|
498
|
+
raise InvalidOperation("Bulk operations can only be executed once.")
|
499
|
+
self.executed = True
|
500
|
+
write_concern = write_concern or self.collection.write_concern
|
501
|
+
session = _validate_session_write_concern(session, write_concern)
|
502
|
+
|
503
|
+
if self.ordered:
|
504
|
+
generator = self.gen_ordered()
|
505
|
+
else:
|
506
|
+
generator = self.gen_unordered()
|
507
|
+
|
508
|
+
client = self.collection.database.client
|
509
|
+
if not write_concern.acknowledged:
|
510
|
+
with client._socket_for_writes(session) as sock_info:
|
511
|
+
self.execute_no_results(sock_info, generator, write_concern)
|
512
|
+
else:
|
513
|
+
return self.execute_command(generator, write_concern, session)
|