sensu-plugins-mongodb-mrtrotl 1.4.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (181) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGELOG.md +1 -0
  3. data/LICENSE +22 -0
  4. data/README.md +27 -0
  5. data/bin/check-mongodb-metric.rb +144 -0
  6. data/bin/check-mongodb-query-count.rb +267 -0
  7. data/bin/check-mongodb.py +1644 -0
  8. data/bin/check-mongodb.rb +5 -0
  9. data/bin/metrics-mongodb-replication.rb +254 -0
  10. data/bin/metrics-mongodb.rb +133 -0
  11. data/lib/bson/__init__.py +1347 -0
  12. data/lib/bson/__pycache__/__init__.cpython-310.pyc +0 -0
  13. data/lib/bson/__pycache__/_helpers.cpython-310.pyc +0 -0
  14. data/lib/bson/__pycache__/binary.cpython-310.pyc +0 -0
  15. data/lib/bson/__pycache__/code.cpython-310.pyc +0 -0
  16. data/lib/bson/__pycache__/codec_options.cpython-310.pyc +0 -0
  17. data/lib/bson/__pycache__/dbref.cpython-310.pyc +0 -0
  18. data/lib/bson/__pycache__/decimal128.cpython-310.pyc +0 -0
  19. data/lib/bson/__pycache__/errors.cpython-310.pyc +0 -0
  20. data/lib/bson/__pycache__/int64.cpython-310.pyc +0 -0
  21. data/lib/bson/__pycache__/json_util.cpython-310.pyc +0 -0
  22. data/lib/bson/__pycache__/max_key.cpython-310.pyc +0 -0
  23. data/lib/bson/__pycache__/min_key.cpython-310.pyc +0 -0
  24. data/lib/bson/__pycache__/objectid.cpython-310.pyc +0 -0
  25. data/lib/bson/__pycache__/raw_bson.cpython-310.pyc +0 -0
  26. data/lib/bson/__pycache__/regex.cpython-310.pyc +0 -0
  27. data/lib/bson/__pycache__/son.cpython-310.pyc +0 -0
  28. data/lib/bson/__pycache__/timestamp.cpython-310.pyc +0 -0
  29. data/lib/bson/__pycache__/tz_util.cpython-310.pyc +0 -0
  30. data/lib/bson/_cbson.cpython-310-x86_64-linux-gnu.so +0 -0
  31. data/lib/bson/_helpers.py +41 -0
  32. data/lib/bson/binary.py +364 -0
  33. data/lib/bson/code.py +101 -0
  34. data/lib/bson/codec_options.py +414 -0
  35. data/lib/bson/codec_options.pyi +100 -0
  36. data/lib/bson/dbref.py +133 -0
  37. data/lib/bson/decimal128.py +314 -0
  38. data/lib/bson/errors.py +35 -0
  39. data/lib/bson/int64.py +39 -0
  40. data/lib/bson/json_util.py +874 -0
  41. data/lib/bson/max_key.py +55 -0
  42. data/lib/bson/min_key.py +55 -0
  43. data/lib/bson/objectid.py +286 -0
  44. data/lib/bson/py.typed +2 -0
  45. data/lib/bson/raw_bson.py +175 -0
  46. data/lib/bson/regex.py +135 -0
  47. data/lib/bson/son.py +208 -0
  48. data/lib/bson/timestamp.py +124 -0
  49. data/lib/bson/tz_util.py +52 -0
  50. data/lib/gridfs/__init__.py +1015 -0
  51. data/lib/gridfs/__pycache__/__init__.cpython-310.pyc +0 -0
  52. data/lib/gridfs/__pycache__/errors.cpython-310.pyc +0 -0
  53. data/lib/gridfs/__pycache__/grid_file.cpython-310.pyc +0 -0
  54. data/lib/gridfs/errors.py +33 -0
  55. data/lib/gridfs/grid_file.py +907 -0
  56. data/lib/gridfs/py.typed +2 -0
  57. data/lib/pymongo/__init__.py +185 -0
  58. data/lib/pymongo/__pycache__/__init__.cpython-310.pyc +0 -0
  59. data/lib/pymongo/__pycache__/_csot.cpython-310.pyc +0 -0
  60. data/lib/pymongo/__pycache__/aggregation.cpython-310.pyc +0 -0
  61. data/lib/pymongo/__pycache__/auth.cpython-310.pyc +0 -0
  62. data/lib/pymongo/__pycache__/auth_aws.cpython-310.pyc +0 -0
  63. data/lib/pymongo/__pycache__/bulk.cpython-310.pyc +0 -0
  64. data/lib/pymongo/__pycache__/change_stream.cpython-310.pyc +0 -0
  65. data/lib/pymongo/__pycache__/client_options.cpython-310.pyc +0 -0
  66. data/lib/pymongo/__pycache__/client_session.cpython-310.pyc +0 -0
  67. data/lib/pymongo/__pycache__/collation.cpython-310.pyc +0 -0
  68. data/lib/pymongo/__pycache__/collection.cpython-310.pyc +0 -0
  69. data/lib/pymongo/__pycache__/command_cursor.cpython-310.pyc +0 -0
  70. data/lib/pymongo/__pycache__/common.cpython-310.pyc +0 -0
  71. data/lib/pymongo/__pycache__/compression_support.cpython-310.pyc +0 -0
  72. data/lib/pymongo/__pycache__/cursor.cpython-310.pyc +0 -0
  73. data/lib/pymongo/__pycache__/daemon.cpython-310.pyc +0 -0
  74. data/lib/pymongo/__pycache__/database.cpython-310.pyc +0 -0
  75. data/lib/pymongo/__pycache__/driver_info.cpython-310.pyc +0 -0
  76. data/lib/pymongo/__pycache__/encryption.cpython-310.pyc +0 -0
  77. data/lib/pymongo/__pycache__/encryption_options.cpython-310.pyc +0 -0
  78. data/lib/pymongo/__pycache__/errors.cpython-310.pyc +0 -0
  79. data/lib/pymongo/__pycache__/event_loggers.cpython-310.pyc +0 -0
  80. data/lib/pymongo/__pycache__/hello.cpython-310.pyc +0 -0
  81. data/lib/pymongo/__pycache__/helpers.cpython-310.pyc +0 -0
  82. data/lib/pymongo/__pycache__/max_staleness_selectors.cpython-310.pyc +0 -0
  83. data/lib/pymongo/__pycache__/message.cpython-310.pyc +0 -0
  84. data/lib/pymongo/__pycache__/mongo_client.cpython-310.pyc +0 -0
  85. data/lib/pymongo/__pycache__/monitor.cpython-310.pyc +0 -0
  86. data/lib/pymongo/__pycache__/monitoring.cpython-310.pyc +0 -0
  87. data/lib/pymongo/__pycache__/network.cpython-310.pyc +0 -0
  88. data/lib/pymongo/__pycache__/ocsp_cache.cpython-310.pyc +0 -0
  89. data/lib/pymongo/__pycache__/ocsp_support.cpython-310.pyc +0 -0
  90. data/lib/pymongo/__pycache__/operations.cpython-310.pyc +0 -0
  91. data/lib/pymongo/__pycache__/periodic_executor.cpython-310.pyc +0 -0
  92. data/lib/pymongo/__pycache__/pool.cpython-310.pyc +0 -0
  93. data/lib/pymongo/__pycache__/pyopenssl_context.cpython-310.pyc +0 -0
  94. data/lib/pymongo/__pycache__/read_concern.cpython-310.pyc +0 -0
  95. data/lib/pymongo/__pycache__/read_preferences.cpython-310.pyc +0 -0
  96. data/lib/pymongo/__pycache__/response.cpython-310.pyc +0 -0
  97. data/lib/pymongo/__pycache__/results.cpython-310.pyc +0 -0
  98. data/lib/pymongo/__pycache__/saslprep.cpython-310.pyc +0 -0
  99. data/lib/pymongo/__pycache__/server.cpython-310.pyc +0 -0
  100. data/lib/pymongo/__pycache__/server_api.cpython-310.pyc +0 -0
  101. data/lib/pymongo/__pycache__/server_description.cpython-310.pyc +0 -0
  102. data/lib/pymongo/__pycache__/server_selectors.cpython-310.pyc +0 -0
  103. data/lib/pymongo/__pycache__/server_type.cpython-310.pyc +0 -0
  104. data/lib/pymongo/__pycache__/settings.cpython-310.pyc +0 -0
  105. data/lib/pymongo/__pycache__/socket_checker.cpython-310.pyc +0 -0
  106. data/lib/pymongo/__pycache__/srv_resolver.cpython-310.pyc +0 -0
  107. data/lib/pymongo/__pycache__/ssl_context.cpython-310.pyc +0 -0
  108. data/lib/pymongo/__pycache__/ssl_support.cpython-310.pyc +0 -0
  109. data/lib/pymongo/__pycache__/topology.cpython-310.pyc +0 -0
  110. data/lib/pymongo/__pycache__/topology_description.cpython-310.pyc +0 -0
  111. data/lib/pymongo/__pycache__/typings.cpython-310.pyc +0 -0
  112. data/lib/pymongo/__pycache__/uri_parser.cpython-310.pyc +0 -0
  113. data/lib/pymongo/__pycache__/write_concern.cpython-310.pyc +0 -0
  114. data/lib/pymongo/_cmessage.cpython-310-x86_64-linux-gnu.so +0 -0
  115. data/lib/pymongo/_csot.py +118 -0
  116. data/lib/pymongo/aggregation.py +229 -0
  117. data/lib/pymongo/auth.py +549 -0
  118. data/lib/pymongo/auth_aws.py +94 -0
  119. data/lib/pymongo/bulk.py +513 -0
  120. data/lib/pymongo/change_stream.py +457 -0
  121. data/lib/pymongo/client_options.py +302 -0
  122. data/lib/pymongo/client_session.py +1112 -0
  123. data/lib/pymongo/collation.py +224 -0
  124. data/lib/pymongo/collection.py +3204 -0
  125. data/lib/pymongo/command_cursor.py +353 -0
  126. data/lib/pymongo/common.py +984 -0
  127. data/lib/pymongo/compression_support.py +149 -0
  128. data/lib/pymongo/cursor.py +1345 -0
  129. data/lib/pymongo/daemon.py +141 -0
  130. data/lib/pymongo/database.py +1202 -0
  131. data/lib/pymongo/driver_info.py +42 -0
  132. data/lib/pymongo/encryption.py +884 -0
  133. data/lib/pymongo/encryption_options.py +221 -0
  134. data/lib/pymongo/errors.py +365 -0
  135. data/lib/pymongo/event_loggers.py +221 -0
  136. data/lib/pymongo/hello.py +219 -0
  137. data/lib/pymongo/helpers.py +259 -0
  138. data/lib/pymongo/max_staleness_selectors.py +114 -0
  139. data/lib/pymongo/message.py +1440 -0
  140. data/lib/pymongo/mongo_client.py +2144 -0
  141. data/lib/pymongo/monitor.py +440 -0
  142. data/lib/pymongo/monitoring.py +1801 -0
  143. data/lib/pymongo/network.py +311 -0
  144. data/lib/pymongo/ocsp_cache.py +87 -0
  145. data/lib/pymongo/ocsp_support.py +372 -0
  146. data/lib/pymongo/operations.py +507 -0
  147. data/lib/pymongo/periodic_executor.py +183 -0
  148. data/lib/pymongo/pool.py +1660 -0
  149. data/lib/pymongo/py.typed +2 -0
  150. data/lib/pymongo/pyopenssl_context.py +383 -0
  151. data/lib/pymongo/read_concern.py +75 -0
  152. data/lib/pymongo/read_preferences.py +609 -0
  153. data/lib/pymongo/response.py +109 -0
  154. data/lib/pymongo/results.py +217 -0
  155. data/lib/pymongo/saslprep.py +113 -0
  156. data/lib/pymongo/server.py +247 -0
  157. data/lib/pymongo/server_api.py +170 -0
  158. data/lib/pymongo/server_description.py +285 -0
  159. data/lib/pymongo/server_selectors.py +153 -0
  160. data/lib/pymongo/server_type.py +32 -0
  161. data/lib/pymongo/settings.py +159 -0
  162. data/lib/pymongo/socket_checker.py +104 -0
  163. data/lib/pymongo/srv_resolver.py +126 -0
  164. data/lib/pymongo/ssl_context.py +39 -0
  165. data/lib/pymongo/ssl_support.py +99 -0
  166. data/lib/pymongo/topology.py +890 -0
  167. data/lib/pymongo/topology_description.py +639 -0
  168. data/lib/pymongo/typings.py +39 -0
  169. data/lib/pymongo/uri_parser.py +624 -0
  170. data/lib/pymongo/write_concern.py +129 -0
  171. data/lib/pymongo-4.2.0.dist-info/INSTALLER +1 -0
  172. data/lib/pymongo-4.2.0.dist-info/LICENSE +201 -0
  173. data/lib/pymongo-4.2.0.dist-info/METADATA +250 -0
  174. data/lib/pymongo-4.2.0.dist-info/RECORD +167 -0
  175. data/lib/pymongo-4.2.0.dist-info/REQUESTED +0 -0
  176. data/lib/pymongo-4.2.0.dist-info/WHEEL +6 -0
  177. data/lib/pymongo-4.2.0.dist-info/top_level.txt +3 -0
  178. data/lib/sensu-plugins-mongodb/metrics.rb +391 -0
  179. data/lib/sensu-plugins-mongodb/version.rb +9 -0
  180. data/lib/sensu-plugins-mongodb.rb +1 -0
  181. metadata +407 -0
@@ -0,0 +1,1440 @@
1
+ # Copyright 2009-present MongoDB, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Tools for creating `messages
16
+ <https://www.mongodb.com/docs/manual/reference/mongodb-wire-protocol/>`_ to be sent to
17
+ MongoDB.
18
+
19
+ .. note:: This module is for internal use and is generally not needed by
20
+ application developers.
21
+ """
22
+
23
+ import datetime
24
+ import random
25
+ import struct
26
+ from io import BytesIO as _BytesIO
27
+ from typing import Any, Dict, NoReturn
28
+
29
+ import bson
30
+ from bson import CodecOptions, _decode_selective, _dict_to_bson, _make_c_string, encode
31
+ from bson.int64 import Int64
32
+ from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson
33
+ from bson.son import SON
34
+
35
+ try:
36
+ from pymongo import _cmessage # type: ignore[attr-defined]
37
+
38
+ _use_c = True
39
+ except ImportError:
40
+ _use_c = False
41
+ from pymongo.errors import (
42
+ ConfigurationError,
43
+ CursorNotFound,
44
+ DocumentTooLarge,
45
+ ExecutionTimeout,
46
+ InvalidOperation,
47
+ NotPrimaryError,
48
+ OperationFailure,
49
+ ProtocolError,
50
+ )
51
+ from pymongo.hello import HelloCompat
52
+ from pymongo.read_preferences import ReadPreference
53
+ from pymongo.write_concern import WriteConcern
54
+
55
+ MAX_INT32 = 2147483647
56
+ MIN_INT32 = -2147483648
57
+
58
+ # Overhead allowed for encoded command documents.
59
+ _COMMAND_OVERHEAD = 16382
60
+
61
+ _INSERT = 0
62
+ _UPDATE = 1
63
+ _DELETE = 2
64
+
65
+ _EMPTY = b""
66
+ _BSONOBJ = b"\x03"
67
+ _ZERO_8 = b"\x00"
68
+ _ZERO_16 = b"\x00\x00"
69
+ _ZERO_32 = b"\x00\x00\x00\x00"
70
+ _ZERO_64 = b"\x00\x00\x00\x00\x00\x00\x00\x00"
71
+ _SKIPLIM = b"\x00\x00\x00\x00\xff\xff\xff\xff"
72
+ _OP_MAP = {
73
+ _INSERT: b"\x04documents\x00\x00\x00\x00\x00",
74
+ _UPDATE: b"\x04updates\x00\x00\x00\x00\x00",
75
+ _DELETE: b"\x04deletes\x00\x00\x00\x00\x00",
76
+ }
77
+ _FIELD_MAP = {"insert": "documents", "update": "updates", "delete": "deletes"}
78
+
79
+ _UNICODE_REPLACE_CODEC_OPTIONS: "CodecOptions[Dict[str, Any]]" = CodecOptions(
80
+ unicode_decode_error_handler="replace"
81
+ )
82
+
83
+
84
+ def _randint():
85
+ """Generate a pseudo random 32 bit integer."""
86
+ return random.randint(MIN_INT32, MAX_INT32)
87
+
88
+
89
+ def _maybe_add_read_preference(spec, read_preference):
90
+ """Add $readPreference to spec when appropriate."""
91
+ mode = read_preference.mode
92
+ document = read_preference.document
93
+ # Only add $readPreference if it's something other than primary to avoid
94
+ # problems with mongos versions that don't support read preferences. Also,
95
+ # for maximum backwards compatibility, don't add $readPreference for
96
+ # secondaryPreferred unless tags or maxStalenessSeconds are in use (setting
97
+ # the secondaryOkay bit has the same effect).
98
+ if mode and (mode != ReadPreference.SECONDARY_PREFERRED.mode or len(document) > 1):
99
+ if "$query" not in spec:
100
+ spec = SON([("$query", spec)])
101
+ spec["$readPreference"] = document
102
+ return spec
103
+
104
+
105
+ def _convert_exception(exception):
106
+ """Convert an Exception into a failure document for publishing."""
107
+ return {"errmsg": str(exception), "errtype": exception.__class__.__name__}
108
+
109
+
110
+ def _convert_write_result(operation, command, result):
111
+ """Convert a legacy write result to write command format."""
112
+
113
+ # Based on _merge_legacy from bulk.py
114
+ affected = result.get("n", 0)
115
+ res = {"ok": 1, "n": affected}
116
+ errmsg = result.get("errmsg", result.get("err", ""))
117
+ if errmsg:
118
+ # The write was successful on at least the primary so don't return.
119
+ if result.get("wtimeout"):
120
+ res["writeConcernError"] = {"errmsg": errmsg, "code": 64, "errInfo": {"wtimeout": True}}
121
+ else:
122
+ # The write failed.
123
+ error = {"index": 0, "code": result.get("code", 8), "errmsg": errmsg}
124
+ if "errInfo" in result:
125
+ error["errInfo"] = result["errInfo"]
126
+ res["writeErrors"] = [error]
127
+ return res
128
+ if operation == "insert":
129
+ # GLE result for insert is always 0 in most MongoDB versions.
130
+ res["n"] = len(command["documents"])
131
+ elif operation == "update":
132
+ if "upserted" in result:
133
+ res["upserted"] = [{"index": 0, "_id": result["upserted"]}]
134
+ # Versions of MongoDB before 2.6 don't return the _id for an
135
+ # upsert if _id is not an ObjectId.
136
+ elif result.get("updatedExisting") is False and affected == 1:
137
+ # If _id is in both the update document *and* the query spec
138
+ # the update document _id takes precedence.
139
+ update = command["updates"][0]
140
+ _id = update["u"].get("_id", update["q"].get("_id"))
141
+ res["upserted"] = [{"index": 0, "_id": _id}]
142
+ return res
143
+
144
+
145
+ _OPTIONS = SON(
146
+ [
147
+ ("tailable", 2),
148
+ ("oplogReplay", 8),
149
+ ("noCursorTimeout", 16),
150
+ ("awaitData", 32),
151
+ ("allowPartialResults", 128),
152
+ ]
153
+ )
154
+
155
+
156
+ _MODIFIERS = SON(
157
+ [
158
+ ("$query", "filter"),
159
+ ("$orderby", "sort"),
160
+ ("$hint", "hint"),
161
+ ("$comment", "comment"),
162
+ ("$maxScan", "maxScan"),
163
+ ("$maxTimeMS", "maxTimeMS"),
164
+ ("$max", "max"),
165
+ ("$min", "min"),
166
+ ("$returnKey", "returnKey"),
167
+ ("$showRecordId", "showRecordId"),
168
+ ("$showDiskLoc", "showRecordId"), # <= MongoDb 3.0
169
+ ("$snapshot", "snapshot"),
170
+ ]
171
+ )
172
+
173
+
174
+ def _gen_find_command(
175
+ coll,
176
+ spec,
177
+ projection,
178
+ skip,
179
+ limit,
180
+ batch_size,
181
+ options,
182
+ read_concern,
183
+ collation=None,
184
+ session=None,
185
+ allow_disk_use=None,
186
+ ):
187
+ """Generate a find command document."""
188
+ cmd = SON([("find", coll)])
189
+ if "$query" in spec:
190
+ cmd.update(
191
+ [
192
+ (_MODIFIERS[key], val) if key in _MODIFIERS else (key, val)
193
+ for key, val in spec.items()
194
+ ]
195
+ )
196
+ if "$explain" in cmd:
197
+ cmd.pop("$explain")
198
+ if "$readPreference" in cmd:
199
+ cmd.pop("$readPreference")
200
+ else:
201
+ cmd["filter"] = spec
202
+
203
+ if projection:
204
+ cmd["projection"] = projection
205
+ if skip:
206
+ cmd["skip"] = skip
207
+ if limit:
208
+ cmd["limit"] = abs(limit)
209
+ if limit < 0:
210
+ cmd["singleBatch"] = True
211
+ if batch_size:
212
+ cmd["batchSize"] = batch_size
213
+ if read_concern.level and not (session and session.in_transaction):
214
+ cmd["readConcern"] = read_concern.document
215
+ if collation:
216
+ cmd["collation"] = collation
217
+ if allow_disk_use is not None:
218
+ cmd["allowDiskUse"] = allow_disk_use
219
+ if options:
220
+ cmd.update([(opt, True) for opt, val in _OPTIONS.items() if options & val])
221
+
222
+ return cmd
223
+
224
+
225
+ def _gen_get_more_command(cursor_id, coll, batch_size, max_await_time_ms, comment, sock_info):
226
+ """Generate a getMore command document."""
227
+ cmd = SON([("getMore", cursor_id), ("collection", coll)])
228
+ if batch_size:
229
+ cmd["batchSize"] = batch_size
230
+ if max_await_time_ms is not None:
231
+ cmd["maxTimeMS"] = max_await_time_ms
232
+ if comment is not None and sock_info.max_wire_version >= 9:
233
+ cmd["comment"] = comment
234
+ return cmd
235
+
236
+
237
+ class _Query(object):
238
+ """A query operation."""
239
+
240
+ __slots__ = (
241
+ "flags",
242
+ "db",
243
+ "coll",
244
+ "ntoskip",
245
+ "spec",
246
+ "fields",
247
+ "codec_options",
248
+ "read_preference",
249
+ "limit",
250
+ "batch_size",
251
+ "name",
252
+ "read_concern",
253
+ "collation",
254
+ "session",
255
+ "client",
256
+ "allow_disk_use",
257
+ "_as_command",
258
+ "exhaust",
259
+ )
260
+
261
+ # For compatibility with the _GetMore class.
262
+ sock_mgr = None
263
+ cursor_id = None
264
+
265
+ def __init__(
266
+ self,
267
+ flags,
268
+ db,
269
+ coll,
270
+ ntoskip,
271
+ spec,
272
+ fields,
273
+ codec_options,
274
+ read_preference,
275
+ limit,
276
+ batch_size,
277
+ read_concern,
278
+ collation,
279
+ session,
280
+ client,
281
+ allow_disk_use,
282
+ exhaust,
283
+ ):
284
+ self.flags = flags
285
+ self.db = db
286
+ self.coll = coll
287
+ self.ntoskip = ntoskip
288
+ self.spec = spec
289
+ self.fields = fields
290
+ self.codec_options = codec_options
291
+ self.read_preference = read_preference
292
+ self.read_concern = read_concern
293
+ self.limit = limit
294
+ self.batch_size = batch_size
295
+ self.collation = collation
296
+ self.session = session
297
+ self.client = client
298
+ self.allow_disk_use = allow_disk_use
299
+ self.name = "find"
300
+ self._as_command = None
301
+ self.exhaust = exhaust
302
+
303
+ def reset(self):
304
+ self._as_command = None
305
+
306
+ def namespace(self):
307
+ return "%s.%s" % (self.db, self.coll)
308
+
309
+ def use_command(self, sock_info):
310
+ use_find_cmd = False
311
+ if not self.exhaust:
312
+ use_find_cmd = True
313
+ elif sock_info.max_wire_version >= 8:
314
+ # OP_MSG supports exhaust on MongoDB 4.2+
315
+ use_find_cmd = True
316
+ elif not self.read_concern.ok_for_legacy:
317
+ raise ConfigurationError(
318
+ "read concern level of %s is not valid "
319
+ "with a max wire version of %d."
320
+ % (self.read_concern.level, sock_info.max_wire_version)
321
+ )
322
+
323
+ sock_info.validate_session(self.client, self.session)
324
+ return use_find_cmd
325
+
326
+ def as_command(self, sock_info, apply_timeout=False):
327
+ """Return a find command document for this query."""
328
+ # We use the command twice: on the wire and for command monitoring.
329
+ # Generate it once, for speed and to avoid repeating side-effects.
330
+ if self._as_command is not None:
331
+ return self._as_command
332
+
333
+ explain = "$explain" in self.spec
334
+ cmd = _gen_find_command(
335
+ self.coll,
336
+ self.spec,
337
+ self.fields,
338
+ self.ntoskip,
339
+ self.limit,
340
+ self.batch_size,
341
+ self.flags,
342
+ self.read_concern,
343
+ self.collation,
344
+ self.session,
345
+ self.allow_disk_use,
346
+ )
347
+ if explain:
348
+ self.name = "explain"
349
+ cmd = SON([("explain", cmd)])
350
+ session = self.session
351
+ sock_info.add_server_api(cmd)
352
+ if session:
353
+ session._apply_to(cmd, False, self.read_preference, sock_info)
354
+ # Explain does not support readConcern.
355
+ if not explain and not session.in_transaction:
356
+ session._update_read_concern(cmd, sock_info)
357
+ sock_info.send_cluster_time(cmd, session, self.client)
358
+ # Support auto encryption
359
+ client = self.client
360
+ if client._encrypter and not client._encrypter._bypass_auto_encryption:
361
+ cmd = client._encrypter.encrypt(self.db, cmd, self.codec_options)
362
+ # Support CSOT
363
+ if apply_timeout:
364
+ sock_info.apply_timeout(client, cmd)
365
+ self._as_command = cmd, self.db
366
+ return self._as_command
367
+
368
+ def get_message(self, read_preference, sock_info, use_cmd=False):
369
+ """Get a query message, possibly setting the secondaryOk bit."""
370
+ # Use the read_preference decided by _socket_from_server.
371
+ self.read_preference = read_preference
372
+ if read_preference.mode:
373
+ # Set the secondaryOk bit.
374
+ flags = self.flags | 4
375
+ else:
376
+ flags = self.flags
377
+
378
+ ns = self.namespace()
379
+ spec = self.spec
380
+
381
+ if use_cmd:
382
+ spec = self.as_command(sock_info, apply_timeout=True)[0]
383
+ request_id, msg, size, _ = _op_msg(
384
+ 0,
385
+ spec,
386
+ self.db,
387
+ read_preference,
388
+ self.codec_options,
389
+ ctx=sock_info.compression_context,
390
+ )
391
+ return request_id, msg, size
392
+
393
+ # OP_QUERY treats ntoreturn of -1 and 1 the same, return
394
+ # one document and close the cursor. We have to use 2 for
395
+ # batch size if 1 is specified.
396
+ ntoreturn = self.batch_size == 1 and 2 or self.batch_size
397
+ if self.limit:
398
+ if ntoreturn:
399
+ ntoreturn = min(self.limit, ntoreturn)
400
+ else:
401
+ ntoreturn = self.limit
402
+
403
+ if sock_info.is_mongos:
404
+ spec = _maybe_add_read_preference(spec, read_preference)
405
+
406
+ return _query(
407
+ flags,
408
+ ns,
409
+ self.ntoskip,
410
+ ntoreturn,
411
+ spec,
412
+ None if use_cmd else self.fields,
413
+ self.codec_options,
414
+ ctx=sock_info.compression_context,
415
+ )
416
+
417
+
418
+ class _GetMore(object):
419
+ """A getmore operation."""
420
+
421
+ __slots__ = (
422
+ "db",
423
+ "coll",
424
+ "ntoreturn",
425
+ "cursor_id",
426
+ "max_await_time_ms",
427
+ "codec_options",
428
+ "read_preference",
429
+ "session",
430
+ "client",
431
+ "sock_mgr",
432
+ "_as_command",
433
+ "exhaust",
434
+ "comment",
435
+ )
436
+
437
+ name = "getMore"
438
+
439
+ def __init__(
440
+ self,
441
+ db,
442
+ coll,
443
+ ntoreturn,
444
+ cursor_id,
445
+ codec_options,
446
+ read_preference,
447
+ session,
448
+ client,
449
+ max_await_time_ms,
450
+ sock_mgr,
451
+ exhaust,
452
+ comment,
453
+ ):
454
+ self.db = db
455
+ self.coll = coll
456
+ self.ntoreturn = ntoreturn
457
+ self.cursor_id = cursor_id
458
+ self.codec_options = codec_options
459
+ self.read_preference = read_preference
460
+ self.session = session
461
+ self.client = client
462
+ self.max_await_time_ms = max_await_time_ms
463
+ self.sock_mgr = sock_mgr
464
+ self._as_command = None
465
+ self.exhaust = exhaust
466
+ self.comment = comment
467
+
468
+ def reset(self):
469
+ self._as_command = None
470
+
471
+ def namespace(self):
472
+ return "%s.%s" % (self.db, self.coll)
473
+
474
+ def use_command(self, sock_info):
475
+ use_cmd = False
476
+ if not self.exhaust:
477
+ use_cmd = True
478
+ elif sock_info.max_wire_version >= 8:
479
+ # OP_MSG supports exhaust on MongoDB 4.2+
480
+ use_cmd = True
481
+
482
+ sock_info.validate_session(self.client, self.session)
483
+ return use_cmd
484
+
485
+ def as_command(self, sock_info, apply_timeout=False):
486
+ """Return a getMore command document for this query."""
487
+ # See _Query.as_command for an explanation of this caching.
488
+ if self._as_command is not None:
489
+ return self._as_command
490
+
491
+ cmd = _gen_get_more_command(
492
+ self.cursor_id,
493
+ self.coll,
494
+ self.ntoreturn,
495
+ self.max_await_time_ms,
496
+ self.comment,
497
+ sock_info,
498
+ )
499
+ if self.session:
500
+ self.session._apply_to(cmd, False, self.read_preference, sock_info)
501
+ sock_info.add_server_api(cmd)
502
+ sock_info.send_cluster_time(cmd, self.session, self.client)
503
+ # Support auto encryption
504
+ client = self.client
505
+ if client._encrypter and not client._encrypter._bypass_auto_encryption:
506
+ cmd = client._encrypter.encrypt(self.db, cmd, self.codec_options)
507
+ # Support CSOT
508
+ if apply_timeout:
509
+ sock_info.apply_timeout(client, cmd=None)
510
+ self._as_command = cmd, self.db
511
+ return self._as_command
512
+
513
+ def get_message(self, dummy0, sock_info, use_cmd=False):
514
+ """Get a getmore message."""
515
+
516
+ ns = self.namespace()
517
+ ctx = sock_info.compression_context
518
+
519
+ if use_cmd:
520
+ spec = self.as_command(sock_info, apply_timeout=True)[0]
521
+ if self.sock_mgr:
522
+ flags = _OpMsg.EXHAUST_ALLOWED
523
+ else:
524
+ flags = 0
525
+ request_id, msg, size, _ = _op_msg(
526
+ flags, spec, self.db, None, self.codec_options, ctx=sock_info.compression_context
527
+ )
528
+ return request_id, msg, size
529
+
530
+ return _get_more(ns, self.ntoreturn, self.cursor_id, ctx)
531
+
532
+
533
+ class _RawBatchQuery(_Query):
534
+ def use_command(self, sock_info):
535
+ # Compatibility checks.
536
+ super(_RawBatchQuery, self).use_command(sock_info)
537
+ if sock_info.max_wire_version >= 8:
538
+ # MongoDB 4.2+ supports exhaust over OP_MSG
539
+ return True
540
+ elif not self.exhaust:
541
+ return True
542
+ return False
543
+
544
+
545
+ class _RawBatchGetMore(_GetMore):
546
+ def use_command(self, sock_info):
547
+ # Compatibility checks.
548
+ super(_RawBatchGetMore, self).use_command(sock_info)
549
+ if sock_info.max_wire_version >= 8:
550
+ # MongoDB 4.2+ supports exhaust over OP_MSG
551
+ return True
552
+ elif not self.exhaust:
553
+ return True
554
+ return False
555
+
556
+
557
+ class _CursorAddress(tuple):
558
+ """The server address (host, port) of a cursor, with namespace property."""
559
+
560
+ __namespace: Any
561
+
562
+ def __new__(cls, address, namespace):
563
+ self = tuple.__new__(cls, address)
564
+ self.__namespace = namespace
565
+ return self
566
+
567
+ @property
568
+ def namespace(self):
569
+ """The namespace this cursor."""
570
+ return self.__namespace
571
+
572
+ def __hash__(self):
573
+ # Two _CursorAddress instances with different namespaces
574
+ # must not hash the same.
575
+ return (self + (self.__namespace,)).__hash__()
576
+
577
+ def __eq__(self, other):
578
+ if isinstance(other, _CursorAddress):
579
+ return tuple(self) == tuple(other) and self.namespace == other.namespace
580
+ return NotImplemented
581
+
582
+ def __ne__(self, other):
583
+ return not self == other
584
+
585
+
586
+ _pack_compression_header = struct.Struct("<iiiiiiB").pack
587
+ _COMPRESSION_HEADER_SIZE = 25
588
+
589
+
590
+ def _compress(operation, data, ctx):
591
+ """Takes message data, compresses it, and adds an OP_COMPRESSED header."""
592
+ compressed = ctx.compress(data)
593
+ request_id = _randint()
594
+
595
+ header = _pack_compression_header(
596
+ _COMPRESSION_HEADER_SIZE + len(compressed), # Total message length
597
+ request_id, # Request id
598
+ 0, # responseTo
599
+ 2012, # operation id
600
+ operation, # original operation id
601
+ len(data), # uncompressed message length
602
+ ctx.compressor_id,
603
+ ) # compressor id
604
+ return request_id, header + compressed
605
+
606
+
607
+ _pack_header = struct.Struct("<iiii").pack
608
+
609
+
610
+ def __pack_message(operation, data):
611
+ """Takes message data and adds a message header based on the operation.
612
+
613
+ Returns the resultant message string.
614
+ """
615
+ rid = _randint()
616
+ message = _pack_header(16 + len(data), rid, 0, operation)
617
+ return rid, message + data
618
+
619
+
620
+ _pack_int = struct.Struct("<i").pack
621
+ _pack_op_msg_flags_type = struct.Struct("<IB").pack
622
+ _pack_byte = struct.Struct("<B").pack
623
+
624
+
625
+ def _op_msg_no_header(flags, command, identifier, docs, opts):
626
+ """Get a OP_MSG message.
627
+
628
+ Note: this method handles multiple documents in a type one payload but
629
+ it does not perform batch splitting and the total message size is
630
+ only checked *after* generating the entire message.
631
+ """
632
+ # Encode the command document in payload 0 without checking keys.
633
+ encoded = _dict_to_bson(command, False, opts)
634
+ flags_type = _pack_op_msg_flags_type(flags, 0)
635
+ total_size = len(encoded)
636
+ max_doc_size = 0
637
+ if identifier:
638
+ type_one = _pack_byte(1)
639
+ cstring = _make_c_string(identifier)
640
+ encoded_docs = [_dict_to_bson(doc, False, opts) for doc in docs]
641
+ size = len(cstring) + sum(len(doc) for doc in encoded_docs) + 4
642
+ encoded_size = _pack_int(size)
643
+ total_size += size
644
+ max_doc_size = max(len(doc) for doc in encoded_docs)
645
+ data = [flags_type, encoded, type_one, encoded_size, cstring] + encoded_docs
646
+ else:
647
+ data = [flags_type, encoded]
648
+ return b"".join(data), total_size, max_doc_size
649
+
650
+
651
+ def _op_msg_compressed(flags, command, identifier, docs, opts, ctx):
652
+ """Internal OP_MSG message helper."""
653
+ msg, total_size, max_bson_size = _op_msg_no_header(flags, command, identifier, docs, opts)
654
+ rid, msg = _compress(2013, msg, ctx)
655
+ return rid, msg, total_size, max_bson_size
656
+
657
+
658
+ def _op_msg_uncompressed(flags, command, identifier, docs, opts):
659
+ """Internal compressed OP_MSG message helper."""
660
+ data, total_size, max_bson_size = _op_msg_no_header(flags, command, identifier, docs, opts)
661
+ request_id, op_message = __pack_message(2013, data)
662
+ return request_id, op_message, total_size, max_bson_size
663
+
664
+
665
+ if _use_c:
666
+ _op_msg_uncompressed = _cmessage._op_msg # noqa: F811
667
+
668
+
669
+ def _op_msg(flags, command, dbname, read_preference, opts, ctx=None):
670
+ """Get a OP_MSG message."""
671
+ command["$db"] = dbname
672
+ # getMore commands do not send $readPreference.
673
+ if read_preference is not None and "$readPreference" not in command:
674
+ # Only send $readPreference if it's not primary (the default).
675
+ if read_preference.mode:
676
+ command["$readPreference"] = read_preference.document
677
+ name = next(iter(command))
678
+ try:
679
+ identifier = _FIELD_MAP.get(name)
680
+ docs = command.pop(identifier)
681
+ except KeyError:
682
+ identifier = ""
683
+ docs = None
684
+ try:
685
+ if ctx:
686
+ return _op_msg_compressed(flags, command, identifier, docs, opts, ctx)
687
+ return _op_msg_uncompressed(flags, command, identifier, docs, opts)
688
+ finally:
689
+ # Add the field back to the command.
690
+ if identifier:
691
+ command[identifier] = docs
692
+
693
+
694
+ def _query_impl(options, collection_name, num_to_skip, num_to_return, query, field_selector, opts):
695
+ """Get an OP_QUERY message."""
696
+ encoded = _dict_to_bson(query, False, opts)
697
+ if field_selector:
698
+ efs = _dict_to_bson(field_selector, False, opts)
699
+ else:
700
+ efs = b""
701
+ max_bson_size = max(len(encoded), len(efs))
702
+ return (
703
+ b"".join(
704
+ [
705
+ _pack_int(options),
706
+ _make_c_string(collection_name),
707
+ _pack_int(num_to_skip),
708
+ _pack_int(num_to_return),
709
+ encoded,
710
+ efs,
711
+ ]
712
+ ),
713
+ max_bson_size,
714
+ )
715
+
716
+
717
+ def _query_compressed(
718
+ options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, ctx=None
719
+ ):
720
+ """Internal compressed query message helper."""
721
+ op_query, max_bson_size = _query_impl(
722
+ options, collection_name, num_to_skip, num_to_return, query, field_selector, opts
723
+ )
724
+ rid, msg = _compress(2004, op_query, ctx)
725
+ return rid, msg, max_bson_size
726
+
727
+
728
+ def _query_uncompressed(
729
+ options, collection_name, num_to_skip, num_to_return, query, field_selector, opts
730
+ ):
731
+ """Internal query message helper."""
732
+ op_query, max_bson_size = _query_impl(
733
+ options, collection_name, num_to_skip, num_to_return, query, field_selector, opts
734
+ )
735
+ rid, msg = __pack_message(2004, op_query)
736
+ return rid, msg, max_bson_size
737
+
738
+
739
+ if _use_c:
740
+ _query_uncompressed = _cmessage._query_message # noqa: F811
741
+
742
+
743
+ def _query(
744
+ options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, ctx=None
745
+ ):
746
+ """Get a **query** message."""
747
+ if ctx:
748
+ return _query_compressed(
749
+ options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, ctx
750
+ )
751
+ return _query_uncompressed(
752
+ options, collection_name, num_to_skip, num_to_return, query, field_selector, opts
753
+ )
754
+
755
+
756
+ _pack_long_long = struct.Struct("<q").pack
757
+
758
+
759
+ def _get_more_impl(collection_name, num_to_return, cursor_id):
760
+ """Get an OP_GET_MORE message."""
761
+ return b"".join(
762
+ [
763
+ _ZERO_32,
764
+ _make_c_string(collection_name),
765
+ _pack_int(num_to_return),
766
+ _pack_long_long(cursor_id),
767
+ ]
768
+ )
769
+
770
+
771
+ def _get_more_compressed(collection_name, num_to_return, cursor_id, ctx):
772
+ """Internal compressed getMore message helper."""
773
+ return _compress(2005, _get_more_impl(collection_name, num_to_return, cursor_id), ctx)
774
+
775
+
776
+ def _get_more_uncompressed(collection_name, num_to_return, cursor_id):
777
+ """Internal getMore message helper."""
778
+ return __pack_message(2005, _get_more_impl(collection_name, num_to_return, cursor_id))
779
+
780
+
781
+ if _use_c:
782
+ _get_more_uncompressed = _cmessage._get_more_message # noqa: F811
783
+
784
+
785
+ def _get_more(collection_name, num_to_return, cursor_id, ctx=None):
786
+ """Get a **getMore** message."""
787
+ if ctx:
788
+ return _get_more_compressed(collection_name, num_to_return, cursor_id, ctx)
789
+ return _get_more_uncompressed(collection_name, num_to_return, cursor_id)
790
+
791
+
792
+ class _BulkWriteContext(object):
793
+ """A wrapper around SocketInfo for use with write splitting functions."""
794
+
795
+ __slots__ = (
796
+ "db_name",
797
+ "sock_info",
798
+ "op_id",
799
+ "name",
800
+ "field",
801
+ "publish",
802
+ "start_time",
803
+ "listeners",
804
+ "session",
805
+ "compress",
806
+ "op_type",
807
+ "codec",
808
+ )
809
+
810
+ def __init__(
811
+ self, database_name, cmd_name, sock_info, operation_id, listeners, session, op_type, codec
812
+ ):
813
+ self.db_name = database_name
814
+ self.sock_info = sock_info
815
+ self.op_id = operation_id
816
+ self.listeners = listeners
817
+ self.publish = listeners.enabled_for_commands
818
+ self.name = cmd_name
819
+ self.field = _FIELD_MAP[self.name]
820
+ self.start_time = datetime.datetime.now() if self.publish else None
821
+ self.session = session
822
+ self.compress = True if sock_info.compression_context else False
823
+ self.op_type = op_type
824
+ self.codec = codec
825
+
826
+ def _batch_command(self, cmd, docs):
827
+ namespace = self.db_name + ".$cmd"
828
+ request_id, msg, to_send = _do_batched_op_msg(
829
+ namespace, self.op_type, cmd, docs, self.codec, self
830
+ )
831
+ if not to_send:
832
+ raise InvalidOperation("cannot do an empty bulk write")
833
+ return request_id, msg, to_send
834
+
835
+ def execute(self, cmd, docs, client):
836
+ request_id, msg, to_send = self._batch_command(cmd, docs)
837
+ result = self.write_command(cmd, request_id, msg, to_send)
838
+ client._process_response(result, self.session)
839
+ return result, to_send
840
+
841
+ def execute_unack(self, cmd, docs, client):
842
+ request_id, msg, to_send = self._batch_command(cmd, docs)
843
+ # Though this isn't strictly a "legacy" write, the helper
844
+ # handles publishing commands and sending our message
845
+ # without receiving a result. Send 0 for max_doc_size
846
+ # to disable size checking. Size checking is handled while
847
+ # the documents are encoded to BSON.
848
+ self.unack_write(cmd, request_id, msg, 0, to_send)
849
+ return to_send
850
+
851
+ @property
852
+ def max_bson_size(self):
853
+ """A proxy for SockInfo.max_bson_size."""
854
+ return self.sock_info.max_bson_size
855
+
856
+ @property
857
+ def max_message_size(self):
858
+ """A proxy for SockInfo.max_message_size."""
859
+ if self.compress:
860
+ # Subtract 16 bytes for the message header.
861
+ return self.sock_info.max_message_size - 16
862
+ return self.sock_info.max_message_size
863
+
864
+ @property
865
+ def max_write_batch_size(self):
866
+ """A proxy for SockInfo.max_write_batch_size."""
867
+ return self.sock_info.max_write_batch_size
868
+
869
+ @property
870
+ def max_split_size(self):
871
+ """The maximum size of a BSON command before batch splitting."""
872
+ return self.max_bson_size
873
+
874
+ def unack_write(self, cmd, request_id, msg, max_doc_size, docs):
875
+ """A proxy for SocketInfo.unack_write that handles event publishing."""
876
+ if self.publish:
877
+ assert self.start_time is not None
878
+ duration = datetime.datetime.now() - self.start_time
879
+ cmd = self._start(cmd, request_id, docs)
880
+ start = datetime.datetime.now()
881
+ try:
882
+ result = self.sock_info.unack_write(msg, max_doc_size)
883
+ if self.publish:
884
+ duration = (datetime.datetime.now() - start) + duration
885
+ if result is not None:
886
+ reply = _convert_write_result(self.name, cmd, result)
887
+ else:
888
+ # Comply with APM spec.
889
+ reply = {"ok": 1}
890
+ self._succeed(request_id, reply, duration)
891
+ except Exception as exc:
892
+ if self.publish:
893
+ assert self.start_time is not None
894
+ duration = (datetime.datetime.now() - start) + duration
895
+ if isinstance(exc, OperationFailure):
896
+ failure = _convert_write_result(self.name, cmd, exc.details)
897
+ elif isinstance(exc, NotPrimaryError):
898
+ failure = exc.details
899
+ else:
900
+ failure = _convert_exception(exc)
901
+ self._fail(request_id, failure, duration)
902
+ raise
903
+ finally:
904
+ self.start_time = datetime.datetime.now()
905
+ return result
906
+
907
+ def write_command(self, cmd, request_id, msg, docs):
908
+ """A proxy for SocketInfo.write_command that handles event publishing."""
909
+ if self.publish:
910
+ assert self.start_time is not None
911
+ duration = datetime.datetime.now() - self.start_time
912
+ self._start(cmd, request_id, docs)
913
+ start = datetime.datetime.now()
914
+ try:
915
+ reply = self.sock_info.write_command(request_id, msg, self.codec)
916
+ if self.publish:
917
+ duration = (datetime.datetime.now() - start) + duration
918
+ self._succeed(request_id, reply, duration)
919
+ except Exception as exc:
920
+ if self.publish:
921
+ duration = (datetime.datetime.now() - start) + duration
922
+ if isinstance(exc, (NotPrimaryError, OperationFailure)):
923
+ failure = exc.details
924
+ else:
925
+ failure = _convert_exception(exc)
926
+ self._fail(request_id, failure, duration)
927
+ raise
928
+ finally:
929
+ self.start_time = datetime.datetime.now()
930
+ return reply
931
+
932
+ def _start(self, cmd, request_id, docs):
933
+ """Publish a CommandStartedEvent."""
934
+ cmd[self.field] = docs
935
+ self.listeners.publish_command_start(
936
+ cmd,
937
+ self.db_name,
938
+ request_id,
939
+ self.sock_info.address,
940
+ self.op_id,
941
+ self.sock_info.service_id,
942
+ )
943
+ return cmd
944
+
945
+ def _succeed(self, request_id, reply, duration):
946
+ """Publish a CommandSucceededEvent."""
947
+ self.listeners.publish_command_success(
948
+ duration,
949
+ reply,
950
+ self.name,
951
+ request_id,
952
+ self.sock_info.address,
953
+ self.op_id,
954
+ self.sock_info.service_id,
955
+ )
956
+
957
+ def _fail(self, request_id, failure, duration):
958
+ """Publish a CommandFailedEvent."""
959
+ self.listeners.publish_command_failure(
960
+ duration,
961
+ failure,
962
+ self.name,
963
+ request_id,
964
+ self.sock_info.address,
965
+ self.op_id,
966
+ self.sock_info.service_id,
967
+ )
968
+
969
+
970
+ # From the Client Side Encryption spec:
971
+ # Because automatic encryption increases the size of commands, the driver
972
+ # MUST split bulk writes at a reduced size limit before undergoing automatic
973
+ # encryption. The write payload MUST be split at 2MiB (2097152).
974
+ _MAX_SPLIT_SIZE_ENC = 2097152
975
+
976
+
977
+ class _EncryptedBulkWriteContext(_BulkWriteContext):
978
+ __slots__ = ()
979
+
980
+ def _batch_command(self, cmd, docs):
981
+ namespace = self.db_name + ".$cmd"
982
+ msg, to_send = _encode_batched_write_command(
983
+ namespace, self.op_type, cmd, docs, self.codec, self
984
+ )
985
+ if not to_send:
986
+ raise InvalidOperation("cannot do an empty bulk write")
987
+
988
+ # Chop off the OP_QUERY header to get a properly batched write command.
989
+ cmd_start = msg.index(b"\x00", 4) + 9
990
+ cmd = _inflate_bson(memoryview(msg)[cmd_start:], DEFAULT_RAW_BSON_OPTIONS)
991
+ return cmd, to_send
992
+
993
+ def execute(self, cmd, docs, client):
994
+ batched_cmd, to_send = self._batch_command(cmd, docs)
995
+ result = self.sock_info.command(
996
+ self.db_name, batched_cmd, codec_options=self.codec, session=self.session, client=client
997
+ )
998
+ return result, to_send
999
+
1000
+ def execute_unack(self, cmd, docs, client):
1001
+ batched_cmd, to_send = self._batch_command(cmd, docs)
1002
+ self.sock_info.command(
1003
+ self.db_name,
1004
+ batched_cmd,
1005
+ write_concern=WriteConcern(w=0),
1006
+ session=self.session,
1007
+ client=client,
1008
+ )
1009
+ return to_send
1010
+
1011
+ @property
1012
+ def max_split_size(self):
1013
+ """Reduce the batch splitting size."""
1014
+ return _MAX_SPLIT_SIZE_ENC
1015
+
1016
+
1017
+ def _raise_document_too_large(operation: str, doc_size: int, max_size: int) -> NoReturn:
1018
+ """Internal helper for raising DocumentTooLarge."""
1019
+ if operation == "insert":
1020
+ raise DocumentTooLarge(
1021
+ "BSON document too large (%d bytes)"
1022
+ " - the connected server supports"
1023
+ " BSON document sizes up to %d"
1024
+ " bytes." % (doc_size, max_size)
1025
+ )
1026
+ else:
1027
+ # There's nothing intelligent we can say
1028
+ # about size for update and delete
1029
+ raise DocumentTooLarge("%r command document too large" % (operation,))
1030
+
1031
+
1032
+ # OP_MSG -------------------------------------------------------------
1033
+
1034
+
1035
+ _OP_MSG_MAP = {
1036
+ _INSERT: b"documents\x00",
1037
+ _UPDATE: b"updates\x00",
1038
+ _DELETE: b"deletes\x00",
1039
+ }
1040
+
1041
+
1042
+ def _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf):
1043
+ """Create a batched OP_MSG write."""
1044
+ max_bson_size = ctx.max_bson_size
1045
+ max_write_batch_size = ctx.max_write_batch_size
1046
+ max_message_size = ctx.max_message_size
1047
+
1048
+ flags = b"\x00\x00\x00\x00" if ack else b"\x02\x00\x00\x00"
1049
+ # Flags
1050
+ buf.write(flags)
1051
+
1052
+ # Type 0 Section
1053
+ buf.write(b"\x00")
1054
+ buf.write(_dict_to_bson(command, False, opts))
1055
+
1056
+ # Type 1 Section
1057
+ buf.write(b"\x01")
1058
+ size_location = buf.tell()
1059
+ # Save space for size
1060
+ buf.write(b"\x00\x00\x00\x00")
1061
+ try:
1062
+ buf.write(_OP_MSG_MAP[operation])
1063
+ except KeyError:
1064
+ raise InvalidOperation("Unknown command")
1065
+
1066
+ to_send = []
1067
+ idx = 0
1068
+ for doc in docs:
1069
+ # Encode the current operation
1070
+ value = _dict_to_bson(doc, False, opts)
1071
+ doc_length = len(value)
1072
+ new_message_size = buf.tell() + doc_length
1073
+ # Does first document exceed max_message_size?
1074
+ doc_too_large = idx == 0 and (new_message_size > max_message_size)
1075
+ # When OP_MSG is used unacknowleged we have to check
1076
+ # document size client side or applications won't be notified.
1077
+ # Otherwise we let the server deal with documents that are too large
1078
+ # since ordered=False causes those documents to be skipped instead of
1079
+ # halting the bulk write operation.
1080
+ unacked_doc_too_large = not ack and (doc_length > max_bson_size)
1081
+ if doc_too_large or unacked_doc_too_large:
1082
+ write_op = list(_FIELD_MAP.keys())[operation]
1083
+ _raise_document_too_large(write_op, len(value), max_bson_size)
1084
+ # We have enough data, return this batch.
1085
+ if new_message_size > max_message_size:
1086
+ break
1087
+ buf.write(value)
1088
+ to_send.append(doc)
1089
+ idx += 1
1090
+ # We have enough documents, return this batch.
1091
+ if idx == max_write_batch_size:
1092
+ break
1093
+
1094
+ # Write type 1 section size
1095
+ length = buf.tell()
1096
+ buf.seek(size_location)
1097
+ buf.write(_pack_int(length - size_location))
1098
+
1099
+ return to_send, length
1100
+
1101
+
1102
+ def _encode_batched_op_msg(operation, command, docs, ack, opts, ctx):
1103
+ """Encode the next batched insert, update, or delete operation
1104
+ as OP_MSG.
1105
+ """
1106
+ buf = _BytesIO()
1107
+
1108
+ to_send, _ = _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf)
1109
+ return buf.getvalue(), to_send
1110
+
1111
+
1112
+ if _use_c:
1113
+ _encode_batched_op_msg = _cmessage._encode_batched_op_msg # noqa: F811
1114
+
1115
+
1116
+ def _batched_op_msg_compressed(operation, command, docs, ack, opts, ctx):
1117
+ """Create the next batched insert, update, or delete operation
1118
+ with OP_MSG, compressed.
1119
+ """
1120
+ data, to_send = _encode_batched_op_msg(operation, command, docs, ack, opts, ctx)
1121
+
1122
+ request_id, msg = _compress(2013, data, ctx.sock_info.compression_context)
1123
+ return request_id, msg, to_send
1124
+
1125
+
1126
+ def _batched_op_msg(operation, command, docs, ack, opts, ctx):
1127
+ """OP_MSG implementation entry point."""
1128
+ buf = _BytesIO()
1129
+
1130
+ # Save space for message length and request id
1131
+ buf.write(_ZERO_64)
1132
+ # responseTo, opCode
1133
+ buf.write(b"\x00\x00\x00\x00\xdd\x07\x00\x00")
1134
+
1135
+ to_send, length = _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf)
1136
+
1137
+ # Header - request id and message length
1138
+ buf.seek(4)
1139
+ request_id = _randint()
1140
+ buf.write(_pack_int(request_id))
1141
+ buf.seek(0)
1142
+ buf.write(_pack_int(length))
1143
+
1144
+ return request_id, buf.getvalue(), to_send
1145
+
1146
+
1147
+ if _use_c:
1148
+ _batched_op_msg = _cmessage._batched_op_msg # noqa: F811
1149
+
1150
+
1151
+ def _do_batched_op_msg(namespace, operation, command, docs, opts, ctx):
1152
+ """Create the next batched insert, update, or delete operation
1153
+ using OP_MSG.
1154
+ """
1155
+ command["$db"] = namespace.split(".", 1)[0]
1156
+ if "writeConcern" in command:
1157
+ ack = bool(command["writeConcern"].get("w", 1))
1158
+ else:
1159
+ ack = True
1160
+ if ctx.sock_info.compression_context:
1161
+ return _batched_op_msg_compressed(operation, command, docs, ack, opts, ctx)
1162
+ return _batched_op_msg(operation, command, docs, ack, opts, ctx)
1163
+
1164
+
1165
+ # End OP_MSG -----------------------------------------------------
1166
+
1167
+
1168
+ def _encode_batched_write_command(namespace, operation, command, docs, opts, ctx):
1169
+ """Encode the next batched insert, update, or delete command."""
1170
+ buf = _BytesIO()
1171
+
1172
+ to_send, _ = _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, buf)
1173
+ return buf.getvalue(), to_send
1174
+
1175
+
1176
+ if _use_c:
1177
+ _encode_batched_write_command = _cmessage._encode_batched_write_command # noqa: F811
1178
+
1179
+
1180
+ def _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, buf):
1181
+ """Create a batched OP_QUERY write command."""
1182
+ max_bson_size = ctx.max_bson_size
1183
+ max_write_batch_size = ctx.max_write_batch_size
1184
+ # Max BSON object size + 16k - 2 bytes for ending NUL bytes.
1185
+ # Server guarantees there is enough room: SERVER-10643.
1186
+ max_cmd_size = max_bson_size + _COMMAND_OVERHEAD
1187
+ max_split_size = ctx.max_split_size
1188
+
1189
+ # No options
1190
+ buf.write(_ZERO_32)
1191
+ # Namespace as C string
1192
+ buf.write(namespace.encode("utf8"))
1193
+ buf.write(_ZERO_8)
1194
+ # Skip: 0, Limit: -1
1195
+ buf.write(_SKIPLIM)
1196
+
1197
+ # Where to write command document length
1198
+ command_start = buf.tell()
1199
+ buf.write(encode(command))
1200
+
1201
+ # Start of payload
1202
+ buf.seek(-1, 2)
1203
+ # Work around some Jython weirdness.
1204
+ buf.truncate()
1205
+ try:
1206
+ buf.write(_OP_MAP[operation])
1207
+ except KeyError:
1208
+ raise InvalidOperation("Unknown command")
1209
+
1210
+ # Where to write list document length
1211
+ list_start = buf.tell() - 4
1212
+ to_send = []
1213
+ idx = 0
1214
+ for doc in docs:
1215
+ # Encode the current operation
1216
+ key = str(idx).encode("utf8")
1217
+ value = _dict_to_bson(doc, False, opts)
1218
+ # Is there enough room to add this document? max_cmd_size accounts for
1219
+ # the two trailing null bytes.
1220
+ doc_too_large = len(value) > max_cmd_size
1221
+ if doc_too_large:
1222
+ write_op = list(_FIELD_MAP.keys())[operation]
1223
+ _raise_document_too_large(write_op, len(value), max_bson_size)
1224
+ enough_data = idx >= 1 and (buf.tell() + len(key) + len(value)) >= max_split_size
1225
+ enough_documents = idx >= max_write_batch_size
1226
+ if enough_data or enough_documents:
1227
+ break
1228
+ buf.write(_BSONOBJ)
1229
+ buf.write(key)
1230
+ buf.write(_ZERO_8)
1231
+ buf.write(value)
1232
+ to_send.append(doc)
1233
+ idx += 1
1234
+
1235
+ # Finalize the current OP_QUERY message.
1236
+ # Close list and command documents
1237
+ buf.write(_ZERO_16)
1238
+
1239
+ # Write document lengths and request id
1240
+ length = buf.tell()
1241
+ buf.seek(list_start)
1242
+ buf.write(_pack_int(length - list_start - 1))
1243
+ buf.seek(command_start)
1244
+ buf.write(_pack_int(length - command_start))
1245
+
1246
+ return to_send, length
1247
+
1248
+
1249
+ class _OpReply(object):
1250
+ """A MongoDB OP_REPLY response message."""
1251
+
1252
+ __slots__ = ("flags", "cursor_id", "number_returned", "documents")
1253
+
1254
+ UNPACK_FROM = struct.Struct("<iqii").unpack_from
1255
+ OP_CODE = 1
1256
+
1257
+ def __init__(self, flags, cursor_id, number_returned, documents):
1258
+ self.flags = flags
1259
+ self.cursor_id = Int64(cursor_id)
1260
+ self.number_returned = number_returned
1261
+ self.documents = documents
1262
+
1263
+ def raw_response(self, cursor_id=None, user_fields=None):
1264
+ """Check the response header from the database, without decoding BSON.
1265
+
1266
+ Check the response for errors and unpack.
1267
+
1268
+ Can raise CursorNotFound, NotPrimaryError, ExecutionTimeout, or
1269
+ OperationFailure.
1270
+
1271
+ :Parameters:
1272
+ - `cursor_id` (optional): cursor_id we sent to get this response -
1273
+ used for raising an informative exception when we get cursor id not
1274
+ valid at server response.
1275
+ """
1276
+ if self.flags & 1:
1277
+ # Shouldn't get this response if we aren't doing a getMore
1278
+ if cursor_id is None:
1279
+ raise ProtocolError("No cursor id for getMore operation")
1280
+
1281
+ # Fake a getMore command response. OP_GET_MORE provides no
1282
+ # document.
1283
+ msg = "Cursor not found, cursor id: %d" % (cursor_id,)
1284
+ errobj = {"ok": 0, "errmsg": msg, "code": 43}
1285
+ raise CursorNotFound(msg, 43, errobj)
1286
+ elif self.flags & 2:
1287
+ error_object: dict = bson.BSON(self.documents).decode()
1288
+ # Fake the ok field if it doesn't exist.
1289
+ error_object.setdefault("ok", 0)
1290
+ if error_object["$err"].startswith(HelloCompat.LEGACY_ERROR):
1291
+ raise NotPrimaryError(error_object["$err"], error_object)
1292
+ elif error_object.get("code") == 50:
1293
+ default_msg = "operation exceeded time limit"
1294
+ raise ExecutionTimeout(
1295
+ error_object.get("$err", default_msg), error_object.get("code"), error_object
1296
+ )
1297
+ raise OperationFailure(
1298
+ "database error: %s" % error_object.get("$err"),
1299
+ error_object.get("code"),
1300
+ error_object,
1301
+ )
1302
+ if self.documents:
1303
+ return [self.documents]
1304
+ return []
1305
+
1306
+ def unpack_response(
1307
+ self,
1308
+ cursor_id=None,
1309
+ codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
1310
+ user_fields=None,
1311
+ legacy_response=False,
1312
+ ):
1313
+ """Unpack a response from the database and decode the BSON document(s).
1314
+
1315
+ Check the response for errors and unpack, returning a dictionary
1316
+ containing the response data.
1317
+
1318
+ Can raise CursorNotFound, NotPrimaryError, ExecutionTimeout, or
1319
+ OperationFailure.
1320
+
1321
+ :Parameters:
1322
+ - `cursor_id` (optional): cursor_id we sent to get this response -
1323
+ used for raising an informative exception when we get cursor id not
1324
+ valid at server response
1325
+ - `codec_options` (optional): an instance of
1326
+ :class:`~bson.codec_options.CodecOptions`
1327
+ """
1328
+ self.raw_response(cursor_id)
1329
+ if legacy_response:
1330
+ return bson.decode_all(self.documents, codec_options)
1331
+ return bson._decode_all_selective(self.documents, codec_options, user_fields)
1332
+
1333
+ def command_response(self, codec_options):
1334
+ """Unpack a command response."""
1335
+ docs = self.unpack_response(codec_options=codec_options)
1336
+ assert self.number_returned == 1
1337
+ return docs[0]
1338
+
1339
+ def raw_command_response(self):
1340
+ """Return the bytes of the command response."""
1341
+ # This should never be called on _OpReply.
1342
+ raise NotImplementedError
1343
+
1344
+ @property
1345
+ def more_to_come(self):
1346
+ """Is the moreToCome bit set on this response?"""
1347
+ return False
1348
+
1349
+ @classmethod
1350
+ def unpack(cls, msg):
1351
+ """Construct an _OpReply from raw bytes."""
1352
+ # PYTHON-945: ignore starting_from field.
1353
+ flags, cursor_id, _, number_returned = cls.UNPACK_FROM(msg)
1354
+
1355
+ documents = msg[20:]
1356
+ return cls(flags, cursor_id, number_returned, documents)
1357
+
1358
+
1359
+ class _OpMsg(object):
1360
+ """A MongoDB OP_MSG response message."""
1361
+
1362
+ __slots__ = ("flags", "cursor_id", "number_returned", "payload_document")
1363
+
1364
+ UNPACK_FROM = struct.Struct("<IBi").unpack_from
1365
+ OP_CODE = 2013
1366
+
1367
+ # Flag bits.
1368
+ CHECKSUM_PRESENT = 1
1369
+ MORE_TO_COME = 1 << 1
1370
+ EXHAUST_ALLOWED = 1 << 16 # Only present on requests.
1371
+
1372
+ def __init__(self, flags, payload_document):
1373
+ self.flags = flags
1374
+ self.payload_document = payload_document
1375
+
1376
+ def raw_response(self, cursor_id=None, user_fields={}): # noqa: B006
1377
+ """
1378
+ cursor_id is ignored
1379
+ user_fields is used to determine which fields must not be decoded
1380
+ """
1381
+ inflated_response = _decode_selective(
1382
+ RawBSONDocument(self.payload_document), user_fields, DEFAULT_RAW_BSON_OPTIONS
1383
+ )
1384
+ return [inflated_response]
1385
+
1386
+ def unpack_response(
1387
+ self,
1388
+ cursor_id=None,
1389
+ codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
1390
+ user_fields=None,
1391
+ legacy_response=False,
1392
+ ):
1393
+ """Unpack a OP_MSG command response.
1394
+
1395
+ :Parameters:
1396
+ - `cursor_id` (optional): Ignored, for compatibility with _OpReply.
1397
+ - `codec_options` (optional): an instance of
1398
+ :class:`~bson.codec_options.CodecOptions`
1399
+ """
1400
+ # If _OpMsg is in-use, this cannot be a legacy response.
1401
+ assert not legacy_response
1402
+ return bson._decode_all_selective(self.payload_document, codec_options, user_fields)
1403
+
1404
+ def command_response(self, codec_options):
1405
+ """Unpack a command response."""
1406
+ return self.unpack_response(codec_options=codec_options)[0]
1407
+
1408
+ def raw_command_response(self):
1409
+ """Return the bytes of the command response."""
1410
+ return self.payload_document
1411
+
1412
+ @property
1413
+ def more_to_come(self):
1414
+ """Is the moreToCome bit set on this response?"""
1415
+ return self.flags & self.MORE_TO_COME
1416
+
1417
+ @classmethod
1418
+ def unpack(cls, msg):
1419
+ """Construct an _OpMsg from raw bytes."""
1420
+ flags, first_payload_type, first_payload_size = cls.UNPACK_FROM(msg)
1421
+ if flags != 0:
1422
+ if flags & cls.CHECKSUM_PRESENT:
1423
+ raise ProtocolError("Unsupported OP_MSG flag checksumPresent: 0x%x" % (flags,))
1424
+
1425
+ if flags ^ cls.MORE_TO_COME:
1426
+ raise ProtocolError("Unsupported OP_MSG flags: 0x%x" % (flags,))
1427
+ if first_payload_type != 0:
1428
+ raise ProtocolError("Unsupported OP_MSG payload type: 0x%x" % (first_payload_type,))
1429
+
1430
+ if len(msg) != first_payload_size + 5:
1431
+ raise ProtocolError("Unsupported OP_MSG reply: >1 section")
1432
+
1433
+ payload_document = msg[5:]
1434
+ return cls(flags, payload_document)
1435
+
1436
+
1437
+ _UNPACK_REPLY = {
1438
+ _OpReply.OP_CODE: _OpReply.unpack,
1439
+ _OpMsg.OP_CODE: _OpMsg.unpack,
1440
+ }