sensu-plugins-mongodb-mrtrotl 1.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +1 -0
- data/LICENSE +22 -0
- data/README.md +27 -0
- data/bin/check-mongodb-metric.rb +144 -0
- data/bin/check-mongodb-query-count.rb +267 -0
- data/bin/check-mongodb.py +1644 -0
- data/bin/check-mongodb.rb +5 -0
- data/bin/metrics-mongodb-replication.rb +254 -0
- data/bin/metrics-mongodb.rb +133 -0
- data/lib/bson/__init__.py +1347 -0
- data/lib/bson/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/_helpers.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/binary.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/code.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/codec_options.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/dbref.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/decimal128.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/int64.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/json_util.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/max_key.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/min_key.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/objectid.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/raw_bson.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/regex.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/son.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/timestamp.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/tz_util.cpython-310.pyc +0 -0
- data/lib/bson/_cbson.cpython-310-x86_64-linux-gnu.so +0 -0
- data/lib/bson/_helpers.py +41 -0
- data/lib/bson/binary.py +364 -0
- data/lib/bson/code.py +101 -0
- data/lib/bson/codec_options.py +414 -0
- data/lib/bson/codec_options.pyi +100 -0
- data/lib/bson/dbref.py +133 -0
- data/lib/bson/decimal128.py +314 -0
- data/lib/bson/errors.py +35 -0
- data/lib/bson/int64.py +39 -0
- data/lib/bson/json_util.py +874 -0
- data/lib/bson/max_key.py +55 -0
- data/lib/bson/min_key.py +55 -0
- data/lib/bson/objectid.py +286 -0
- data/lib/bson/py.typed +2 -0
- data/lib/bson/raw_bson.py +175 -0
- data/lib/bson/regex.py +135 -0
- data/lib/bson/son.py +208 -0
- data/lib/bson/timestamp.py +124 -0
- data/lib/bson/tz_util.py +52 -0
- data/lib/gridfs/__init__.py +1015 -0
- data/lib/gridfs/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/gridfs/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/gridfs/__pycache__/grid_file.cpython-310.pyc +0 -0
- data/lib/gridfs/errors.py +33 -0
- data/lib/gridfs/grid_file.py +907 -0
- data/lib/gridfs/py.typed +2 -0
- data/lib/pymongo/__init__.py +185 -0
- data/lib/pymongo/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/_csot.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/aggregation.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/auth.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/auth_aws.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/bulk.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/change_stream.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/client_options.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/client_session.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/collation.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/collection.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/command_cursor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/common.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/compression_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/cursor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/daemon.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/database.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/driver_info.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/encryption.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/encryption_options.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/event_loggers.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/hello.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/helpers.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/max_staleness_selectors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/message.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/mongo_client.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/monitor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/monitoring.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/network.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ocsp_cache.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ocsp_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/operations.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/periodic_executor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/pool.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/pyopenssl_context.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/read_concern.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/read_preferences.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/response.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/results.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/saslprep.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_api.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_description.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_selectors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_type.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/settings.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/socket_checker.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/srv_resolver.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ssl_context.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ssl_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/topology.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/topology_description.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/typings.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/uri_parser.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/write_concern.cpython-310.pyc +0 -0
- data/lib/pymongo/_cmessage.cpython-310-x86_64-linux-gnu.so +0 -0
- data/lib/pymongo/_csot.py +118 -0
- data/lib/pymongo/aggregation.py +229 -0
- data/lib/pymongo/auth.py +549 -0
- data/lib/pymongo/auth_aws.py +94 -0
- data/lib/pymongo/bulk.py +513 -0
- data/lib/pymongo/change_stream.py +457 -0
- data/lib/pymongo/client_options.py +302 -0
- data/lib/pymongo/client_session.py +1112 -0
- data/lib/pymongo/collation.py +224 -0
- data/lib/pymongo/collection.py +3204 -0
- data/lib/pymongo/command_cursor.py +353 -0
- data/lib/pymongo/common.py +984 -0
- data/lib/pymongo/compression_support.py +149 -0
- data/lib/pymongo/cursor.py +1345 -0
- data/lib/pymongo/daemon.py +141 -0
- data/lib/pymongo/database.py +1202 -0
- data/lib/pymongo/driver_info.py +42 -0
- data/lib/pymongo/encryption.py +884 -0
- data/lib/pymongo/encryption_options.py +221 -0
- data/lib/pymongo/errors.py +365 -0
- data/lib/pymongo/event_loggers.py +221 -0
- data/lib/pymongo/hello.py +219 -0
- data/lib/pymongo/helpers.py +259 -0
- data/lib/pymongo/max_staleness_selectors.py +114 -0
- data/lib/pymongo/message.py +1440 -0
- data/lib/pymongo/mongo_client.py +2144 -0
- data/lib/pymongo/monitor.py +440 -0
- data/lib/pymongo/monitoring.py +1801 -0
- data/lib/pymongo/network.py +311 -0
- data/lib/pymongo/ocsp_cache.py +87 -0
- data/lib/pymongo/ocsp_support.py +372 -0
- data/lib/pymongo/operations.py +507 -0
- data/lib/pymongo/periodic_executor.py +183 -0
- data/lib/pymongo/pool.py +1660 -0
- data/lib/pymongo/py.typed +2 -0
- data/lib/pymongo/pyopenssl_context.py +383 -0
- data/lib/pymongo/read_concern.py +75 -0
- data/lib/pymongo/read_preferences.py +609 -0
- data/lib/pymongo/response.py +109 -0
- data/lib/pymongo/results.py +217 -0
- data/lib/pymongo/saslprep.py +113 -0
- data/lib/pymongo/server.py +247 -0
- data/lib/pymongo/server_api.py +170 -0
- data/lib/pymongo/server_description.py +285 -0
- data/lib/pymongo/server_selectors.py +153 -0
- data/lib/pymongo/server_type.py +32 -0
- data/lib/pymongo/settings.py +159 -0
- data/lib/pymongo/socket_checker.py +104 -0
- data/lib/pymongo/srv_resolver.py +126 -0
- data/lib/pymongo/ssl_context.py +39 -0
- data/lib/pymongo/ssl_support.py +99 -0
- data/lib/pymongo/topology.py +890 -0
- data/lib/pymongo/topology_description.py +639 -0
- data/lib/pymongo/typings.py +39 -0
- data/lib/pymongo/uri_parser.py +624 -0
- data/lib/pymongo/write_concern.py +129 -0
- data/lib/pymongo-4.2.0.dist-info/INSTALLER +1 -0
- data/lib/pymongo-4.2.0.dist-info/LICENSE +201 -0
- data/lib/pymongo-4.2.0.dist-info/METADATA +250 -0
- data/lib/pymongo-4.2.0.dist-info/RECORD +167 -0
- data/lib/pymongo-4.2.0.dist-info/REQUESTED +0 -0
- data/lib/pymongo-4.2.0.dist-info/WHEEL +6 -0
- data/lib/pymongo-4.2.0.dist-info/top_level.txt +3 -0
- data/lib/sensu-plugins-mongodb/metrics.rb +391 -0
- data/lib/sensu-plugins-mongodb/version.rb +9 -0
- data/lib/sensu-plugins-mongodb.rb +1 -0
- metadata +407 -0
@@ -0,0 +1,1345 @@
|
|
1
|
+
# Copyright 2009-present MongoDB, Inc.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""Cursor class to iterate over Mongo query results."""
|
16
|
+
import copy
|
17
|
+
import threading
|
18
|
+
import warnings
|
19
|
+
from collections import deque
|
20
|
+
from typing import (
|
21
|
+
TYPE_CHECKING,
|
22
|
+
Any,
|
23
|
+
Dict,
|
24
|
+
Generic,
|
25
|
+
Iterable,
|
26
|
+
List,
|
27
|
+
Mapping,
|
28
|
+
NoReturn,
|
29
|
+
Optional,
|
30
|
+
Sequence,
|
31
|
+
Tuple,
|
32
|
+
Union,
|
33
|
+
cast,
|
34
|
+
overload,
|
35
|
+
)
|
36
|
+
|
37
|
+
from bson import RE_TYPE, _convert_raw_document_lists_to_streams
|
38
|
+
from bson.code import Code
|
39
|
+
from bson.son import SON
|
40
|
+
from pymongo import helpers
|
41
|
+
from pymongo.collation import validate_collation_or_none
|
42
|
+
from pymongo.common import (
|
43
|
+
validate_boolean,
|
44
|
+
validate_is_document_type,
|
45
|
+
validate_is_mapping,
|
46
|
+
)
|
47
|
+
from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure
|
48
|
+
from pymongo.message import (
|
49
|
+
_CursorAddress,
|
50
|
+
_GetMore,
|
51
|
+
_Query,
|
52
|
+
_RawBatchGetMore,
|
53
|
+
_RawBatchQuery,
|
54
|
+
)
|
55
|
+
from pymongo.response import PinnedResponse
|
56
|
+
from pymongo.typings import _CollationIn, _DocumentType
|
57
|
+
|
58
|
+
# These errors mean that the server has already killed the cursor so there is
|
59
|
+
# no need to send killCursors.
|
60
|
+
_CURSOR_CLOSED_ERRORS = frozenset(
|
61
|
+
[
|
62
|
+
43, # CursorNotFound
|
63
|
+
50, # MaxTimeMSExpired
|
64
|
+
175, # QueryPlanKilled
|
65
|
+
237, # CursorKilled
|
66
|
+
# On a tailable cursor, the following errors mean the capped collection
|
67
|
+
# rolled over.
|
68
|
+
# MongoDB 2.6:
|
69
|
+
# {'$err': 'Runner killed during getMore', 'code': 28617, 'ok': 0}
|
70
|
+
28617,
|
71
|
+
# MongoDB 3.0:
|
72
|
+
# {'$err': 'getMore executor error: UnknownError no details available',
|
73
|
+
# 'code': 17406, 'ok': 0}
|
74
|
+
17406,
|
75
|
+
# MongoDB 3.2 + 3.4:
|
76
|
+
# {'ok': 0.0, 'errmsg': 'GetMore command executor error:
|
77
|
+
# CappedPositionLost: CollectionScan died due to failure to restore
|
78
|
+
# tailable cursor position. Last seen record id: RecordId(3)',
|
79
|
+
# 'code': 96}
|
80
|
+
96,
|
81
|
+
# MongoDB 3.6+:
|
82
|
+
# {'ok': 0.0, 'errmsg': 'errmsg: "CollectionScan died due to failure to
|
83
|
+
# restore tailable cursor position. Last seen record id: RecordId(3)"',
|
84
|
+
# 'code': 136, 'codeName': 'CappedPositionLost'}
|
85
|
+
136,
|
86
|
+
]
|
87
|
+
)
|
88
|
+
|
89
|
+
_QUERY_OPTIONS = {
|
90
|
+
"tailable_cursor": 2,
|
91
|
+
"secondary_okay": 4,
|
92
|
+
"oplog_replay": 8,
|
93
|
+
"no_timeout": 16,
|
94
|
+
"await_data": 32,
|
95
|
+
"exhaust": 64,
|
96
|
+
"partial": 128,
|
97
|
+
}
|
98
|
+
|
99
|
+
|
100
|
+
class CursorType(object):
|
101
|
+
NON_TAILABLE = 0
|
102
|
+
"""The standard cursor type."""
|
103
|
+
|
104
|
+
TAILABLE = _QUERY_OPTIONS["tailable_cursor"]
|
105
|
+
"""The tailable cursor type.
|
106
|
+
|
107
|
+
Tailable cursors are only for use with capped collections. They are not
|
108
|
+
closed when the last data is retrieved but are kept open and the cursor
|
109
|
+
location marks the final document position. If more data is received
|
110
|
+
iteration of the cursor will continue from the last document received.
|
111
|
+
"""
|
112
|
+
|
113
|
+
TAILABLE_AWAIT = TAILABLE | _QUERY_OPTIONS["await_data"]
|
114
|
+
"""A tailable cursor with the await option set.
|
115
|
+
|
116
|
+
Creates a tailable cursor that will wait for a few seconds after returning
|
117
|
+
the full result set so that it can capture and return additional data added
|
118
|
+
during the query.
|
119
|
+
"""
|
120
|
+
|
121
|
+
EXHAUST = _QUERY_OPTIONS["exhaust"]
|
122
|
+
"""An exhaust cursor.
|
123
|
+
|
124
|
+
MongoDB will stream batched results to the client without waiting for the
|
125
|
+
client to request each batch, reducing latency.
|
126
|
+
"""
|
127
|
+
|
128
|
+
|
129
|
+
class _SocketManager(object):
|
130
|
+
"""Used with exhaust cursors to ensure the socket is returned."""
|
131
|
+
|
132
|
+
def __init__(self, sock, more_to_come):
|
133
|
+
self.sock = sock
|
134
|
+
self.more_to_come = more_to_come
|
135
|
+
self.closed = False
|
136
|
+
self.lock = threading.Lock()
|
137
|
+
|
138
|
+
def update_exhaust(self, more_to_come):
|
139
|
+
self.more_to_come = more_to_come
|
140
|
+
|
141
|
+
def close(self):
|
142
|
+
"""Return this instance's socket to the connection pool."""
|
143
|
+
if not self.closed:
|
144
|
+
self.closed = True
|
145
|
+
self.sock.unpin()
|
146
|
+
self.sock = None
|
147
|
+
|
148
|
+
|
149
|
+
_Sort = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]]
|
150
|
+
_Hint = Union[str, _Sort]
|
151
|
+
|
152
|
+
|
153
|
+
if TYPE_CHECKING:
|
154
|
+
from pymongo.client_session import ClientSession
|
155
|
+
from pymongo.collection import Collection
|
156
|
+
|
157
|
+
|
158
|
+
class Cursor(Generic[_DocumentType]):
|
159
|
+
"""A cursor / iterator over Mongo query results."""
|
160
|
+
|
161
|
+
_query_class = _Query
|
162
|
+
_getmore_class = _GetMore
|
163
|
+
|
164
|
+
def __init__(
|
165
|
+
self,
|
166
|
+
collection: "Collection[_DocumentType]",
|
167
|
+
filter: Optional[Mapping[str, Any]] = None,
|
168
|
+
projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None,
|
169
|
+
skip: int = 0,
|
170
|
+
limit: int = 0,
|
171
|
+
no_cursor_timeout: bool = False,
|
172
|
+
cursor_type: int = CursorType.NON_TAILABLE,
|
173
|
+
sort: Optional[_Sort] = None,
|
174
|
+
allow_partial_results: bool = False,
|
175
|
+
oplog_replay: bool = False,
|
176
|
+
batch_size: int = 0,
|
177
|
+
collation: Optional[_CollationIn] = None,
|
178
|
+
hint: Optional[_Hint] = None,
|
179
|
+
max_scan: Optional[int] = None,
|
180
|
+
max_time_ms: Optional[int] = None,
|
181
|
+
max: Optional[_Sort] = None,
|
182
|
+
min: Optional[_Sort] = None,
|
183
|
+
return_key: Optional[bool] = None,
|
184
|
+
show_record_id: Optional[bool] = None,
|
185
|
+
snapshot: Optional[bool] = None,
|
186
|
+
comment: Optional[Any] = None,
|
187
|
+
session: Optional["ClientSession"] = None,
|
188
|
+
allow_disk_use: Optional[bool] = None,
|
189
|
+
let: Optional[bool] = None,
|
190
|
+
) -> None:
|
191
|
+
"""Create a new cursor.
|
192
|
+
|
193
|
+
Should not be called directly by application developers - see
|
194
|
+
:meth:`~pymongo.collection.Collection.find` instead.
|
195
|
+
|
196
|
+
.. seealso:: The MongoDB documentation on `cursors <https://dochub.mongodb.org/core/cursors>`_.
|
197
|
+
"""
|
198
|
+
# Initialize all attributes used in __del__ before possibly raising
|
199
|
+
# an error to avoid attribute errors during garbage collection.
|
200
|
+
self.__collection: Collection[_DocumentType] = collection
|
201
|
+
self.__id: Any = None
|
202
|
+
self.__exhaust = False
|
203
|
+
self.__sock_mgr: Any = None
|
204
|
+
self.__killed = False
|
205
|
+
self.__session: Optional["ClientSession"]
|
206
|
+
|
207
|
+
if session:
|
208
|
+
self.__session = session
|
209
|
+
self.__explicit_session = True
|
210
|
+
else:
|
211
|
+
self.__session = None
|
212
|
+
self.__explicit_session = False
|
213
|
+
|
214
|
+
spec: Mapping[str, Any] = filter or {}
|
215
|
+
validate_is_mapping("filter", spec)
|
216
|
+
if not isinstance(skip, int):
|
217
|
+
raise TypeError("skip must be an instance of int")
|
218
|
+
if not isinstance(limit, int):
|
219
|
+
raise TypeError("limit must be an instance of int")
|
220
|
+
validate_boolean("no_cursor_timeout", no_cursor_timeout)
|
221
|
+
if no_cursor_timeout and not self.__explicit_session:
|
222
|
+
warnings.warn(
|
223
|
+
"use an explicit session with no_cursor_timeout=True "
|
224
|
+
"otherwise the cursor may still timeout after "
|
225
|
+
"30 minutes, for more info see "
|
226
|
+
"https://mongodb.com/docs/v4.4/reference/method/"
|
227
|
+
"cursor.noCursorTimeout/"
|
228
|
+
"#session-idle-timeout-overrides-nocursortimeout",
|
229
|
+
UserWarning,
|
230
|
+
stacklevel=2,
|
231
|
+
)
|
232
|
+
if cursor_type not in (
|
233
|
+
CursorType.NON_TAILABLE,
|
234
|
+
CursorType.TAILABLE,
|
235
|
+
CursorType.TAILABLE_AWAIT,
|
236
|
+
CursorType.EXHAUST,
|
237
|
+
):
|
238
|
+
raise ValueError("not a valid value for cursor_type")
|
239
|
+
validate_boolean("allow_partial_results", allow_partial_results)
|
240
|
+
validate_boolean("oplog_replay", oplog_replay)
|
241
|
+
if not isinstance(batch_size, int):
|
242
|
+
raise TypeError("batch_size must be an integer")
|
243
|
+
if batch_size < 0:
|
244
|
+
raise ValueError("batch_size must be >= 0")
|
245
|
+
# Only set if allow_disk_use is provided by the user, else None.
|
246
|
+
if allow_disk_use is not None:
|
247
|
+
allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use)
|
248
|
+
|
249
|
+
if projection is not None:
|
250
|
+
projection = helpers._fields_list_to_dict(projection, "projection")
|
251
|
+
|
252
|
+
if let is not None:
|
253
|
+
validate_is_document_type("let", let)
|
254
|
+
|
255
|
+
self.__let = let
|
256
|
+
self.__spec = spec
|
257
|
+
self.__has_filter = filter is not None
|
258
|
+
self.__projection = projection
|
259
|
+
self.__skip = skip
|
260
|
+
self.__limit = limit
|
261
|
+
self.__batch_size = batch_size
|
262
|
+
self.__ordering = sort and helpers._index_document(sort) or None
|
263
|
+
self.__max_scan = max_scan
|
264
|
+
self.__explain = False
|
265
|
+
self.__comment = comment
|
266
|
+
self.__max_time_ms = max_time_ms
|
267
|
+
self.__max_await_time_ms: Optional[int] = None
|
268
|
+
self.__max: Optional[Union[SON[Any, Any], _Sort]] = max
|
269
|
+
self.__min: Optional[Union[SON[Any, Any], _Sort]] = min
|
270
|
+
self.__collation = validate_collation_or_none(collation)
|
271
|
+
self.__return_key = return_key
|
272
|
+
self.__show_record_id = show_record_id
|
273
|
+
self.__allow_disk_use = allow_disk_use
|
274
|
+
self.__snapshot = snapshot
|
275
|
+
self.__set_hint(hint)
|
276
|
+
|
277
|
+
# Exhaust cursor support
|
278
|
+
if cursor_type == CursorType.EXHAUST:
|
279
|
+
if self.__collection.database.client.is_mongos:
|
280
|
+
raise InvalidOperation("Exhaust cursors are not supported by mongos")
|
281
|
+
if limit:
|
282
|
+
raise InvalidOperation("Can't use limit and exhaust together.")
|
283
|
+
self.__exhaust = True
|
284
|
+
|
285
|
+
# This is ugly. People want to be able to do cursor[5:5] and
|
286
|
+
# get an empty result set (old behavior was an
|
287
|
+
# exception). It's hard to do that right, though, because the
|
288
|
+
# server uses limit(0) to mean 'no limit'. So we set __empty
|
289
|
+
# in that case and check for it when iterating. We also unset
|
290
|
+
# it anytime we change __limit.
|
291
|
+
self.__empty = False
|
292
|
+
|
293
|
+
self.__data: deque = deque()
|
294
|
+
self.__address = None
|
295
|
+
self.__retrieved = 0
|
296
|
+
|
297
|
+
self.__codec_options = collection.codec_options
|
298
|
+
# Read preference is set when the initial find is sent.
|
299
|
+
self.__read_preference = None
|
300
|
+
self.__read_concern = collection.read_concern
|
301
|
+
|
302
|
+
self.__query_flags = cursor_type
|
303
|
+
if no_cursor_timeout:
|
304
|
+
self.__query_flags |= _QUERY_OPTIONS["no_timeout"]
|
305
|
+
if allow_partial_results:
|
306
|
+
self.__query_flags |= _QUERY_OPTIONS["partial"]
|
307
|
+
if oplog_replay:
|
308
|
+
self.__query_flags |= _QUERY_OPTIONS["oplog_replay"]
|
309
|
+
|
310
|
+
# The namespace to use for find/getMore commands.
|
311
|
+
self.__dbname = collection.database.name
|
312
|
+
self.__collname = collection.name
|
313
|
+
|
314
|
+
@property
|
315
|
+
def collection(self) -> "Collection[_DocumentType]":
|
316
|
+
"""The :class:`~pymongo.collection.Collection` that this
|
317
|
+
:class:`Cursor` is iterating.
|
318
|
+
"""
|
319
|
+
return self.__collection
|
320
|
+
|
321
|
+
@property
|
322
|
+
def retrieved(self) -> int:
|
323
|
+
"""The number of documents retrieved so far."""
|
324
|
+
return self.__retrieved
|
325
|
+
|
326
|
+
def __del__(self) -> None:
|
327
|
+
self.__die()
|
328
|
+
|
329
|
+
def rewind(self) -> "Cursor[_DocumentType]":
|
330
|
+
"""Rewind this cursor to its unevaluated state.
|
331
|
+
|
332
|
+
Reset this cursor if it has been partially or completely evaluated.
|
333
|
+
Any options that are present on the cursor will remain in effect.
|
334
|
+
Future iterating performed on this cursor will cause new queries to
|
335
|
+
be sent to the server, even if the resultant data has already been
|
336
|
+
retrieved by this cursor.
|
337
|
+
"""
|
338
|
+
self.close()
|
339
|
+
self.__data = deque()
|
340
|
+
self.__id = None
|
341
|
+
self.__address = None
|
342
|
+
self.__retrieved = 0
|
343
|
+
self.__killed = False
|
344
|
+
|
345
|
+
return self
|
346
|
+
|
347
|
+
def clone(self) -> "Cursor[_DocumentType]":
|
348
|
+
"""Get a clone of this cursor.
|
349
|
+
|
350
|
+
Returns a new Cursor instance with options matching those that have
|
351
|
+
been set on the current instance. The clone will be completely
|
352
|
+
unevaluated, even if the current instance has been partially or
|
353
|
+
completely evaluated.
|
354
|
+
"""
|
355
|
+
return self._clone(True)
|
356
|
+
|
357
|
+
def _clone(self, deepcopy=True, base=None):
|
358
|
+
"""Internal clone helper."""
|
359
|
+
if not base:
|
360
|
+
if self.__explicit_session:
|
361
|
+
base = self._clone_base(self.__session)
|
362
|
+
else:
|
363
|
+
base = self._clone_base(None)
|
364
|
+
|
365
|
+
values_to_clone = (
|
366
|
+
"spec",
|
367
|
+
"projection",
|
368
|
+
"skip",
|
369
|
+
"limit",
|
370
|
+
"max_time_ms",
|
371
|
+
"max_await_time_ms",
|
372
|
+
"comment",
|
373
|
+
"max",
|
374
|
+
"min",
|
375
|
+
"ordering",
|
376
|
+
"explain",
|
377
|
+
"hint",
|
378
|
+
"batch_size",
|
379
|
+
"max_scan",
|
380
|
+
"query_flags",
|
381
|
+
"collation",
|
382
|
+
"empty",
|
383
|
+
"show_record_id",
|
384
|
+
"return_key",
|
385
|
+
"allow_disk_use",
|
386
|
+
"snapshot",
|
387
|
+
"exhaust",
|
388
|
+
"has_filter",
|
389
|
+
)
|
390
|
+
data = dict(
|
391
|
+
(k, v)
|
392
|
+
for k, v in self.__dict__.items()
|
393
|
+
if k.startswith("_Cursor__") and k[9:] in values_to_clone
|
394
|
+
)
|
395
|
+
if deepcopy:
|
396
|
+
data = self._deepcopy(data)
|
397
|
+
base.__dict__.update(data)
|
398
|
+
return base
|
399
|
+
|
400
|
+
def _clone_base(self, session):
|
401
|
+
"""Creates an empty Cursor object for information to be copied into."""
|
402
|
+
return self.__class__(self.__collection, session=session)
|
403
|
+
|
404
|
+
def __die(self, synchronous=False):
|
405
|
+
"""Closes this cursor."""
|
406
|
+
try:
|
407
|
+
already_killed = self.__killed
|
408
|
+
except AttributeError:
|
409
|
+
# __init__ did not run to completion (or at all).
|
410
|
+
return
|
411
|
+
|
412
|
+
self.__killed = True
|
413
|
+
if self.__id and not already_killed:
|
414
|
+
cursor_id = self.__id
|
415
|
+
address = _CursorAddress(self.__address, "%s.%s" % (self.__dbname, self.__collname))
|
416
|
+
else:
|
417
|
+
# Skip killCursors.
|
418
|
+
cursor_id = 0
|
419
|
+
address = None
|
420
|
+
self.__collection.database.client._cleanup_cursor(
|
421
|
+
synchronous,
|
422
|
+
cursor_id,
|
423
|
+
address,
|
424
|
+
self.__sock_mgr,
|
425
|
+
self.__session,
|
426
|
+
self.__explicit_session,
|
427
|
+
)
|
428
|
+
if not self.__explicit_session:
|
429
|
+
self.__session = None
|
430
|
+
self.__sock_mgr = None
|
431
|
+
|
432
|
+
def close(self) -> None:
|
433
|
+
"""Explicitly close / kill this cursor."""
|
434
|
+
self.__die(True)
|
435
|
+
|
436
|
+
def __query_spec(self):
|
437
|
+
"""Get the spec to use for a query."""
|
438
|
+
operators = {}
|
439
|
+
if self.__ordering:
|
440
|
+
operators["$orderby"] = self.__ordering
|
441
|
+
if self.__explain:
|
442
|
+
operators["$explain"] = True
|
443
|
+
if self.__hint:
|
444
|
+
operators["$hint"] = self.__hint
|
445
|
+
if self.__let:
|
446
|
+
operators["let"] = self.__let
|
447
|
+
if self.__comment:
|
448
|
+
operators["$comment"] = self.__comment
|
449
|
+
if self.__max_scan:
|
450
|
+
operators["$maxScan"] = self.__max_scan
|
451
|
+
if self.__max_time_ms is not None:
|
452
|
+
operators["$maxTimeMS"] = self.__max_time_ms
|
453
|
+
if self.__max:
|
454
|
+
operators["$max"] = self.__max
|
455
|
+
if self.__min:
|
456
|
+
operators["$min"] = self.__min
|
457
|
+
if self.__return_key is not None:
|
458
|
+
operators["$returnKey"] = self.__return_key
|
459
|
+
if self.__show_record_id is not None:
|
460
|
+
# This is upgraded to showRecordId for MongoDB 3.2+ "find" command.
|
461
|
+
operators["$showDiskLoc"] = self.__show_record_id
|
462
|
+
if self.__snapshot is not None:
|
463
|
+
operators["$snapshot"] = self.__snapshot
|
464
|
+
|
465
|
+
if operators:
|
466
|
+
# Make a shallow copy so we can cleanly rewind or clone.
|
467
|
+
spec = copy.copy(self.__spec)
|
468
|
+
|
469
|
+
# Allow-listed commands must be wrapped in $query.
|
470
|
+
if "$query" not in spec:
|
471
|
+
# $query has to come first
|
472
|
+
spec = SON([("$query", spec)])
|
473
|
+
|
474
|
+
if not isinstance(spec, SON):
|
475
|
+
# Ensure the spec is SON. As order is important this will
|
476
|
+
# ensure its set before merging in any extra operators.
|
477
|
+
spec = SON(spec)
|
478
|
+
|
479
|
+
spec.update(operators)
|
480
|
+
return spec
|
481
|
+
# Have to wrap with $query if "query" is the first key.
|
482
|
+
# We can't just use $query anytime "query" is a key as
|
483
|
+
# that breaks commands like count and find_and_modify.
|
484
|
+
# Checking spec.keys()[0] covers the case that the spec
|
485
|
+
# was passed as an instance of SON or OrderedDict.
|
486
|
+
elif "query" in self.__spec and (
|
487
|
+
len(self.__spec) == 1 or next(iter(self.__spec)) == "query"
|
488
|
+
):
|
489
|
+
return SON({"$query": self.__spec})
|
490
|
+
|
491
|
+
return self.__spec
|
492
|
+
|
493
|
+
def __check_okay_to_chain(self):
|
494
|
+
"""Check if it is okay to chain more options onto this cursor."""
|
495
|
+
if self.__retrieved or self.__id is not None:
|
496
|
+
raise InvalidOperation("cannot set options after executing query")
|
497
|
+
|
498
|
+
def add_option(self, mask: int) -> "Cursor[_DocumentType]":
|
499
|
+
"""Set arbitrary query flags using a bitmask.
|
500
|
+
|
501
|
+
To set the tailable flag:
|
502
|
+
cursor.add_option(2)
|
503
|
+
"""
|
504
|
+
if not isinstance(mask, int):
|
505
|
+
raise TypeError("mask must be an int")
|
506
|
+
self.__check_okay_to_chain()
|
507
|
+
|
508
|
+
if mask & _QUERY_OPTIONS["exhaust"]:
|
509
|
+
if self.__limit:
|
510
|
+
raise InvalidOperation("Can't use limit and exhaust together.")
|
511
|
+
if self.__collection.database.client.is_mongos:
|
512
|
+
raise InvalidOperation("Exhaust cursors are not supported by mongos")
|
513
|
+
self.__exhaust = True
|
514
|
+
|
515
|
+
self.__query_flags |= mask
|
516
|
+
return self
|
517
|
+
|
518
|
+
def remove_option(self, mask: int) -> "Cursor[_DocumentType]":
|
519
|
+
"""Unset arbitrary query flags using a bitmask.
|
520
|
+
|
521
|
+
To unset the tailable flag:
|
522
|
+
cursor.remove_option(2)
|
523
|
+
"""
|
524
|
+
if not isinstance(mask, int):
|
525
|
+
raise TypeError("mask must be an int")
|
526
|
+
self.__check_okay_to_chain()
|
527
|
+
|
528
|
+
if mask & _QUERY_OPTIONS["exhaust"]:
|
529
|
+
self.__exhaust = False
|
530
|
+
|
531
|
+
self.__query_flags &= ~mask
|
532
|
+
return self
|
533
|
+
|
534
|
+
def allow_disk_use(self, allow_disk_use: bool) -> "Cursor[_DocumentType]":
|
535
|
+
"""Specifies whether MongoDB can use temporary disk files while
|
536
|
+
processing a blocking sort operation.
|
537
|
+
|
538
|
+
Raises :exc:`TypeError` if `allow_disk_use` is not a boolean.
|
539
|
+
|
540
|
+
.. note:: `allow_disk_use` requires server version **>= 4.4**
|
541
|
+
|
542
|
+
:Parameters:
|
543
|
+
- `allow_disk_use`: if True, MongoDB may use temporary
|
544
|
+
disk files to store data exceeding the system memory limit while
|
545
|
+
processing a blocking sort operation.
|
546
|
+
|
547
|
+
.. versionadded:: 3.11
|
548
|
+
"""
|
549
|
+
if not isinstance(allow_disk_use, bool):
|
550
|
+
raise TypeError("allow_disk_use must be a bool")
|
551
|
+
self.__check_okay_to_chain()
|
552
|
+
|
553
|
+
self.__allow_disk_use = allow_disk_use
|
554
|
+
return self
|
555
|
+
|
556
|
+
def limit(self, limit: int) -> "Cursor[_DocumentType]":
|
557
|
+
"""Limits the number of results to be returned by this cursor.
|
558
|
+
|
559
|
+
Raises :exc:`TypeError` if `limit` is not an integer. Raises
|
560
|
+
:exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor`
|
561
|
+
has already been used. The last `limit` applied to this cursor
|
562
|
+
takes precedence. A limit of ``0`` is equivalent to no limit.
|
563
|
+
|
564
|
+
:Parameters:
|
565
|
+
- `limit`: the number of results to return
|
566
|
+
|
567
|
+
.. seealso:: The MongoDB documentation on `limit <https://dochub.mongodb.org/core/limit>`_.
|
568
|
+
"""
|
569
|
+
if not isinstance(limit, int):
|
570
|
+
raise TypeError("limit must be an integer")
|
571
|
+
if self.__exhaust:
|
572
|
+
raise InvalidOperation("Can't use limit and exhaust together.")
|
573
|
+
self.__check_okay_to_chain()
|
574
|
+
|
575
|
+
self.__empty = False
|
576
|
+
self.__limit = limit
|
577
|
+
return self
|
578
|
+
|
579
|
+
def batch_size(self, batch_size: int) -> "Cursor[_DocumentType]":
|
580
|
+
"""Limits the number of documents returned in one batch. Each batch
|
581
|
+
requires a round trip to the server. It can be adjusted to optimize
|
582
|
+
performance and limit data transfer.
|
583
|
+
|
584
|
+
.. note:: batch_size can not override MongoDB's internal limits on the
|
585
|
+
amount of data it will return to the client in a single batch (i.e
|
586
|
+
if you set batch size to 1,000,000,000, MongoDB will currently only
|
587
|
+
return 4-16MB of results per batch).
|
588
|
+
|
589
|
+
Raises :exc:`TypeError` if `batch_size` is not an integer.
|
590
|
+
Raises :exc:`ValueError` if `batch_size` is less than ``0``.
|
591
|
+
Raises :exc:`~pymongo.errors.InvalidOperation` if this
|
592
|
+
:class:`Cursor` has already been used. The last `batch_size`
|
593
|
+
applied to this cursor takes precedence.
|
594
|
+
|
595
|
+
:Parameters:
|
596
|
+
- `batch_size`: The size of each batch of results requested.
|
597
|
+
"""
|
598
|
+
if not isinstance(batch_size, int):
|
599
|
+
raise TypeError("batch_size must be an integer")
|
600
|
+
if batch_size < 0:
|
601
|
+
raise ValueError("batch_size must be >= 0")
|
602
|
+
self.__check_okay_to_chain()
|
603
|
+
|
604
|
+
self.__batch_size = batch_size
|
605
|
+
return self
|
606
|
+
|
607
|
+
def skip(self, skip: int) -> "Cursor[_DocumentType]":
|
608
|
+
"""Skips the first `skip` results of this cursor.
|
609
|
+
|
610
|
+
Raises :exc:`TypeError` if `skip` is not an integer. Raises
|
611
|
+
:exc:`ValueError` if `skip` is less than ``0``. Raises
|
612
|
+
:exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has
|
613
|
+
already been used. The last `skip` applied to this cursor takes
|
614
|
+
precedence.
|
615
|
+
|
616
|
+
:Parameters:
|
617
|
+
- `skip`: the number of results to skip
|
618
|
+
"""
|
619
|
+
if not isinstance(skip, int):
|
620
|
+
raise TypeError("skip must be an integer")
|
621
|
+
if skip < 0:
|
622
|
+
raise ValueError("skip must be >= 0")
|
623
|
+
self.__check_okay_to_chain()
|
624
|
+
|
625
|
+
self.__skip = skip
|
626
|
+
return self
|
627
|
+
|
628
|
+
def max_time_ms(self, max_time_ms: Optional[int]) -> "Cursor[_DocumentType]":
|
629
|
+
"""Specifies a time limit for a query operation. If the specified
|
630
|
+
time is exceeded, the operation will be aborted and
|
631
|
+
:exc:`~pymongo.errors.ExecutionTimeout` is raised. If `max_time_ms`
|
632
|
+
is ``None`` no limit is applied.
|
633
|
+
|
634
|
+
Raises :exc:`TypeError` if `max_time_ms` is not an integer or ``None``.
|
635
|
+
Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor`
|
636
|
+
has already been used.
|
637
|
+
|
638
|
+
:Parameters:
|
639
|
+
- `max_time_ms`: the time limit after which the operation is aborted
|
640
|
+
"""
|
641
|
+
if not isinstance(max_time_ms, int) and max_time_ms is not None:
|
642
|
+
raise TypeError("max_time_ms must be an integer or None")
|
643
|
+
self.__check_okay_to_chain()
|
644
|
+
|
645
|
+
self.__max_time_ms = max_time_ms
|
646
|
+
return self
|
647
|
+
|
648
|
+
def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> "Cursor[_DocumentType]":
|
649
|
+
"""Specifies a time limit for a getMore operation on a
|
650
|
+
:attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other
|
651
|
+
types of cursor max_await_time_ms is ignored.
|
652
|
+
|
653
|
+
Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or
|
654
|
+
``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this
|
655
|
+
:class:`Cursor` has already been used.
|
656
|
+
|
657
|
+
.. note:: `max_await_time_ms` requires server version **>= 3.2**
|
658
|
+
|
659
|
+
:Parameters:
|
660
|
+
- `max_await_time_ms`: the time limit after which the operation is
|
661
|
+
aborted
|
662
|
+
|
663
|
+
.. versionadded:: 3.2
|
664
|
+
"""
|
665
|
+
if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None:
|
666
|
+
raise TypeError("max_await_time_ms must be an integer or None")
|
667
|
+
self.__check_okay_to_chain()
|
668
|
+
|
669
|
+
# Ignore max_await_time_ms if not tailable or await_data is False.
|
670
|
+
if self.__query_flags & CursorType.TAILABLE_AWAIT:
|
671
|
+
self.__max_await_time_ms = max_await_time_ms
|
672
|
+
|
673
|
+
return self
|
674
|
+
|
675
|
+
@overload
|
676
|
+
def __getitem__(self, index: int) -> _DocumentType:
|
677
|
+
...
|
678
|
+
|
679
|
+
@overload
|
680
|
+
def __getitem__(self, index: slice) -> "Cursor[_DocumentType]":
|
681
|
+
...
|
682
|
+
|
683
|
+
def __getitem__(self, index):
|
684
|
+
"""Get a single document or a slice of documents from this cursor.
|
685
|
+
|
686
|
+
.. warning:: A :class:`~Cursor` is not a Python :class:`list`. Each
|
687
|
+
index access or slice requires that a new query be run using skip
|
688
|
+
and limit. Do not iterate the cursor using index accesses.
|
689
|
+
The following example is **extremely inefficient** and may return
|
690
|
+
surprising results::
|
691
|
+
|
692
|
+
cursor = db.collection.find()
|
693
|
+
# Warning: This runs a new query for each document.
|
694
|
+
# Don't do this!
|
695
|
+
for idx in range(10):
|
696
|
+
print(cursor[idx])
|
697
|
+
|
698
|
+
Raises :class:`~pymongo.errors.InvalidOperation` if this
|
699
|
+
cursor has already been used.
|
700
|
+
|
701
|
+
To get a single document use an integral index, e.g.::
|
702
|
+
|
703
|
+
>>> db.test.find()[50]
|
704
|
+
|
705
|
+
An :class:`IndexError` will be raised if the index is negative
|
706
|
+
or greater than the amount of documents in this cursor. Any
|
707
|
+
limit previously applied to this cursor will be ignored.
|
708
|
+
|
709
|
+
To get a slice of documents use a slice index, e.g.::
|
710
|
+
|
711
|
+
>>> db.test.find()[20:25]
|
712
|
+
|
713
|
+
This will return this cursor with a limit of ``5`` and skip of
|
714
|
+
``20`` applied. Using a slice index will override any prior
|
715
|
+
limits or skips applied to this cursor (including those
|
716
|
+
applied through previous calls to this method). Raises
|
717
|
+
:class:`IndexError` when the slice has a step, a negative
|
718
|
+
start value, or a stop value less than or equal to the start
|
719
|
+
value.
|
720
|
+
|
721
|
+
:Parameters:
|
722
|
+
- `index`: An integer or slice index to be applied to this cursor
|
723
|
+
"""
|
724
|
+
self.__check_okay_to_chain()
|
725
|
+
self.__empty = False
|
726
|
+
if isinstance(index, slice):
|
727
|
+
if index.step is not None:
|
728
|
+
raise IndexError("Cursor instances do not support slice steps")
|
729
|
+
|
730
|
+
skip = 0
|
731
|
+
if index.start is not None:
|
732
|
+
if index.start < 0:
|
733
|
+
raise IndexError("Cursor instances do not support negative indices")
|
734
|
+
skip = index.start
|
735
|
+
|
736
|
+
if index.stop is not None:
|
737
|
+
limit = index.stop - skip
|
738
|
+
if limit < 0:
|
739
|
+
raise IndexError(
|
740
|
+
"stop index must be greater than start index for slice %r" % index
|
741
|
+
)
|
742
|
+
if limit == 0:
|
743
|
+
self.__empty = True
|
744
|
+
else:
|
745
|
+
limit = 0
|
746
|
+
|
747
|
+
self.__skip = skip
|
748
|
+
self.__limit = limit
|
749
|
+
return self
|
750
|
+
|
751
|
+
if isinstance(index, int):
|
752
|
+
if index < 0:
|
753
|
+
raise IndexError("Cursor instances do not support negative indices")
|
754
|
+
clone = self.clone()
|
755
|
+
clone.skip(index + self.__skip)
|
756
|
+
clone.limit(-1) # use a hard limit
|
757
|
+
clone.__query_flags &= ~CursorType.TAILABLE_AWAIT # PYTHON-1371
|
758
|
+
for doc in clone:
|
759
|
+
return doc
|
760
|
+
raise IndexError("no such item for Cursor instance")
|
761
|
+
raise TypeError("index %r cannot be applied to Cursor instances" % index)
|
762
|
+
|
763
|
+
def max_scan(self, max_scan: Optional[int]) -> "Cursor[_DocumentType]":
|
764
|
+
"""**DEPRECATED** - Limit the number of documents to scan when
|
765
|
+
performing the query.
|
766
|
+
|
767
|
+
Raises :class:`~pymongo.errors.InvalidOperation` if this
|
768
|
+
cursor has already been used. Only the last :meth:`max_scan`
|
769
|
+
applied to this cursor has any effect.
|
770
|
+
|
771
|
+
:Parameters:
|
772
|
+
- `max_scan`: the maximum number of documents to scan
|
773
|
+
|
774
|
+
.. versionchanged:: 3.7
|
775
|
+
Deprecated :meth:`max_scan`. Support for this option is deprecated in
|
776
|
+
MongoDB 4.0. Use :meth:`max_time_ms` instead to limit server side
|
777
|
+
execution time.
|
778
|
+
"""
|
779
|
+
self.__check_okay_to_chain()
|
780
|
+
self.__max_scan = max_scan
|
781
|
+
return self
|
782
|
+
|
783
|
+
def max(self, spec: _Sort) -> "Cursor[_DocumentType]":
|
784
|
+
"""Adds ``max`` operator that specifies upper bound for specific index.
|
785
|
+
|
786
|
+
When using ``max``, :meth:`~hint` should also be configured to ensure
|
787
|
+
the query uses the expected index and starting in MongoDB 4.2
|
788
|
+
:meth:`~hint` will be required.
|
789
|
+
|
790
|
+
:Parameters:
|
791
|
+
- `spec`: a list of field, limit pairs specifying the exclusive
|
792
|
+
upper bound for all keys of a specific index in order.
|
793
|
+
|
794
|
+
.. versionchanged:: 3.8
|
795
|
+
Deprecated cursors that use ``max`` without a :meth:`~hint`.
|
796
|
+
|
797
|
+
.. versionadded:: 2.7
|
798
|
+
"""
|
799
|
+
if not isinstance(spec, (list, tuple)):
|
800
|
+
raise TypeError("spec must be an instance of list or tuple")
|
801
|
+
|
802
|
+
self.__check_okay_to_chain()
|
803
|
+
self.__max = SON(spec)
|
804
|
+
return self
|
805
|
+
|
806
|
+
def min(self, spec: _Sort) -> "Cursor[_DocumentType]":
|
807
|
+
"""Adds ``min`` operator that specifies lower bound for specific index.
|
808
|
+
|
809
|
+
When using ``min``, :meth:`~hint` should also be configured to ensure
|
810
|
+
the query uses the expected index and starting in MongoDB 4.2
|
811
|
+
:meth:`~hint` will be required.
|
812
|
+
|
813
|
+
:Parameters:
|
814
|
+
- `spec`: a list of field, limit pairs specifying the inclusive
|
815
|
+
lower bound for all keys of a specific index in order.
|
816
|
+
|
817
|
+
.. versionchanged:: 3.8
|
818
|
+
Deprecated cursors that use ``min`` without a :meth:`~hint`.
|
819
|
+
|
820
|
+
.. versionadded:: 2.7
|
821
|
+
"""
|
822
|
+
if not isinstance(spec, (list, tuple)):
|
823
|
+
raise TypeError("spec must be an instance of list or tuple")
|
824
|
+
|
825
|
+
self.__check_okay_to_chain()
|
826
|
+
self.__min = SON(spec)
|
827
|
+
return self
|
828
|
+
|
829
|
+
def sort(
|
830
|
+
self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None
|
831
|
+
) -> "Cursor[_DocumentType]":
|
832
|
+
"""Sorts this cursor's results.
|
833
|
+
|
834
|
+
Pass a field name and a direction, either
|
835
|
+
:data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`::
|
836
|
+
|
837
|
+
for doc in collection.find().sort('field', pymongo.ASCENDING):
|
838
|
+
print(doc)
|
839
|
+
|
840
|
+
To sort by multiple fields, pass a list of (key, direction) pairs::
|
841
|
+
|
842
|
+
for doc in collection.find().sort([
|
843
|
+
('field1', pymongo.ASCENDING),
|
844
|
+
('field2', pymongo.DESCENDING)]):
|
845
|
+
print(doc)
|
846
|
+
|
847
|
+
Text search results can be sorted by relevance::
|
848
|
+
|
849
|
+
cursor = db.test.find(
|
850
|
+
{'$text': {'$search': 'some words'}},
|
851
|
+
{'score': {'$meta': 'textScore'}})
|
852
|
+
|
853
|
+
# Sort by 'score' field.
|
854
|
+
cursor.sort([('score', {'$meta': 'textScore'})])
|
855
|
+
|
856
|
+
for doc in cursor:
|
857
|
+
print(doc)
|
858
|
+
|
859
|
+
For more advanced text search functionality, see MongoDB's
|
860
|
+
`Atlas Search <https://docs.atlas.mongodb.com/atlas-search/>`_.
|
861
|
+
|
862
|
+
Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has
|
863
|
+
already been used. Only the last :meth:`sort` applied to this
|
864
|
+
cursor has any effect.
|
865
|
+
|
866
|
+
:Parameters:
|
867
|
+
- `key_or_list`: a single key or a list of (key, direction)
|
868
|
+
pairs specifying the keys to sort on
|
869
|
+
- `direction` (optional): only used if `key_or_list` is a single
|
870
|
+
key, if not given :data:`~pymongo.ASCENDING` is assumed
|
871
|
+
"""
|
872
|
+
self.__check_okay_to_chain()
|
873
|
+
keys = helpers._index_list(key_or_list, direction)
|
874
|
+
self.__ordering = helpers._index_document(keys)
|
875
|
+
return self
|
876
|
+
|
877
|
+
def distinct(self, key: str) -> List:
|
878
|
+
"""Get a list of distinct values for `key` among all documents
|
879
|
+
in the result set of this query.
|
880
|
+
|
881
|
+
Raises :class:`TypeError` if `key` is not an instance of
|
882
|
+
:class:`basestring` (:class:`str` in python 3).
|
883
|
+
|
884
|
+
The :meth:`distinct` method obeys the
|
885
|
+
:attr:`~pymongo.collection.Collection.read_preference` of the
|
886
|
+
:class:`~pymongo.collection.Collection` instance on which
|
887
|
+
:meth:`~pymongo.collection.Collection.find` was called.
|
888
|
+
|
889
|
+
:Parameters:
|
890
|
+
- `key`: name of key for which we want to get the distinct values
|
891
|
+
|
892
|
+
.. seealso:: :meth:`pymongo.collection.Collection.distinct`
|
893
|
+
"""
|
894
|
+
options: Dict[str, Any] = {}
|
895
|
+
if self.__spec:
|
896
|
+
options["query"] = self.__spec
|
897
|
+
if self.__max_time_ms is not None:
|
898
|
+
options["maxTimeMS"] = self.__max_time_ms
|
899
|
+
if self.__comment:
|
900
|
+
options["comment"] = self.__comment
|
901
|
+
if self.__collation is not None:
|
902
|
+
options["collation"] = self.__collation
|
903
|
+
|
904
|
+
return self.__collection.distinct(key, session=self.__session, **options)
|
905
|
+
|
906
|
+
def explain(self) -> _DocumentType:
|
907
|
+
"""Returns an explain plan record for this cursor.
|
908
|
+
|
909
|
+
.. note:: This method uses the default verbosity mode of the
|
910
|
+
`explain command
|
911
|
+
<https://mongodb.com/docs/manual/reference/command/explain/>`_,
|
912
|
+
``allPlansExecution``. To use a different verbosity use
|
913
|
+
:meth:`~pymongo.database.Database.command` to run the explain
|
914
|
+
command directly.
|
915
|
+
|
916
|
+
.. seealso:: The MongoDB documentation on `explain <https://dochub.mongodb.org/core/explain>`_.
|
917
|
+
"""
|
918
|
+
c = self.clone()
|
919
|
+
c.__explain = True
|
920
|
+
|
921
|
+
# always use a hard limit for explains
|
922
|
+
if c.__limit:
|
923
|
+
c.__limit = -abs(c.__limit)
|
924
|
+
return next(c)
|
925
|
+
|
926
|
+
def __set_hint(self, index):
|
927
|
+
if index is None:
|
928
|
+
self.__hint = None
|
929
|
+
return
|
930
|
+
|
931
|
+
if isinstance(index, str):
|
932
|
+
self.__hint = index
|
933
|
+
else:
|
934
|
+
self.__hint = helpers._index_document(index)
|
935
|
+
|
936
|
+
def hint(self, index: Optional[_Hint]) -> "Cursor[_DocumentType]":
|
937
|
+
"""Adds a 'hint', telling Mongo the proper index to use for the query.
|
938
|
+
|
939
|
+
Judicious use of hints can greatly improve query
|
940
|
+
performance. When doing a query on multiple fields (at least
|
941
|
+
one of which is indexed) pass the indexed field as a hint to
|
942
|
+
the query. Raises :class:`~pymongo.errors.OperationFailure` if the
|
943
|
+
provided hint requires an index that does not exist on this collection,
|
944
|
+
and raises :class:`~pymongo.errors.InvalidOperation` if this cursor has
|
945
|
+
already been used.
|
946
|
+
|
947
|
+
`index` should be an index as passed to
|
948
|
+
:meth:`~pymongo.collection.Collection.create_index`
|
949
|
+
(e.g. ``[('field', ASCENDING)]``) or the name of the index.
|
950
|
+
If `index` is ``None`` any existing hint for this query is
|
951
|
+
cleared. The last hint applied to this cursor takes precedence
|
952
|
+
over all others.
|
953
|
+
|
954
|
+
:Parameters:
|
955
|
+
- `index`: index to hint on (as an index specifier)
|
956
|
+
"""
|
957
|
+
self.__check_okay_to_chain()
|
958
|
+
self.__set_hint(index)
|
959
|
+
return self
|
960
|
+
|
961
|
+
def comment(self, comment: Any) -> "Cursor[_DocumentType]":
|
962
|
+
"""Adds a 'comment' to the cursor.
|
963
|
+
|
964
|
+
http://mongodb.com/docs/manual/reference/operator/comment/
|
965
|
+
|
966
|
+
:Parameters:
|
967
|
+
- `comment`: A string to attach to the query to help interpret and
|
968
|
+
trace the operation in the server logs and in profile data.
|
969
|
+
|
970
|
+
.. versionadded:: 2.7
|
971
|
+
"""
|
972
|
+
self.__check_okay_to_chain()
|
973
|
+
self.__comment = comment
|
974
|
+
return self
|
975
|
+
|
976
|
+
def where(self, code: Union[str, Code]) -> "Cursor[_DocumentType]":
|
977
|
+
"""Adds a `$where`_ clause to this query.
|
978
|
+
|
979
|
+
The `code` argument must be an instance of :class:`basestring`
|
980
|
+
(:class:`str` in python 3) or :class:`~bson.code.Code`
|
981
|
+
containing a JavaScript expression. This expression will be
|
982
|
+
evaluated for each document scanned. Only those documents
|
983
|
+
for which the expression evaluates to *true* will be returned
|
984
|
+
as results. The keyword *this* refers to the object currently
|
985
|
+
being scanned. For example::
|
986
|
+
|
987
|
+
# Find all documents where field "a" is less than "b" plus "c".
|
988
|
+
for doc in db.test.find().where('this.a < (this.b + this.c)'):
|
989
|
+
print(doc)
|
990
|
+
|
991
|
+
Raises :class:`TypeError` if `code` is not an instance of
|
992
|
+
:class:`basestring` (:class:`str` in python 3). Raises
|
993
|
+
:class:`~pymongo.errors.InvalidOperation` if this
|
994
|
+
:class:`Cursor` has already been used. Only the last call to
|
995
|
+
:meth:`where` applied to a :class:`Cursor` has any effect.
|
996
|
+
|
997
|
+
.. note:: MongoDB 4.4 drops support for :class:`~bson.code.Code`
|
998
|
+
with scope variables. Consider using `$expr`_ instead.
|
999
|
+
|
1000
|
+
:Parameters:
|
1001
|
+
- `code`: JavaScript expression to use as a filter
|
1002
|
+
|
1003
|
+
.. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/
|
1004
|
+
.. _$where: https://mongodb.com/docs/manual/reference/operator/query/where/
|
1005
|
+
"""
|
1006
|
+
self.__check_okay_to_chain()
|
1007
|
+
if not isinstance(code, Code):
|
1008
|
+
code = Code(code)
|
1009
|
+
|
1010
|
+
# Avoid overwriting a filter argument that was given by the user
|
1011
|
+
# when updating the spec.
|
1012
|
+
spec: Dict[str, Any]
|
1013
|
+
if self.__has_filter:
|
1014
|
+
spec = dict(self.__spec)
|
1015
|
+
else:
|
1016
|
+
spec = cast(Dict, self.__spec)
|
1017
|
+
spec["$where"] = code
|
1018
|
+
self.__spec = spec
|
1019
|
+
return self
|
1020
|
+
|
1021
|
+
def collation(self, collation: Optional[_CollationIn]) -> "Cursor[_DocumentType]":
|
1022
|
+
"""Adds a :class:`~pymongo.collation.Collation` to this query.
|
1023
|
+
|
1024
|
+
Raises :exc:`TypeError` if `collation` is not an instance of
|
1025
|
+
:class:`~pymongo.collation.Collation` or a ``dict``. Raises
|
1026
|
+
:exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has
|
1027
|
+
already been used. Only the last collation applied to this cursor has
|
1028
|
+
any effect.
|
1029
|
+
|
1030
|
+
:Parameters:
|
1031
|
+
- `collation`: An instance of :class:`~pymongo.collation.Collation`.
|
1032
|
+
"""
|
1033
|
+
self.__check_okay_to_chain()
|
1034
|
+
self.__collation = validate_collation_or_none(collation)
|
1035
|
+
return self
|
1036
|
+
|
1037
|
+
def __send_message(self, operation):
|
1038
|
+
"""Send a query or getmore operation and handles the response.
|
1039
|
+
|
1040
|
+
If operation is ``None`` this is an exhaust cursor, which reads
|
1041
|
+
the next result batch off the exhaust socket instead of
|
1042
|
+
sending getMore messages to the server.
|
1043
|
+
|
1044
|
+
Can raise ConnectionFailure.
|
1045
|
+
"""
|
1046
|
+
client = self.__collection.database.client
|
1047
|
+
# OP_MSG is required to support exhaust cursors with encryption.
|
1048
|
+
if client._encrypter and self.__exhaust:
|
1049
|
+
raise InvalidOperation("exhaust cursors do not support auto encryption")
|
1050
|
+
|
1051
|
+
try:
|
1052
|
+
response = client._run_operation(
|
1053
|
+
operation, self._unpack_response, address=self.__address
|
1054
|
+
)
|
1055
|
+
except OperationFailure as exc:
|
1056
|
+
if exc.code in _CURSOR_CLOSED_ERRORS or self.__exhaust:
|
1057
|
+
# Don't send killCursors because the cursor is already closed.
|
1058
|
+
self.__killed = True
|
1059
|
+
self.close()
|
1060
|
+
# If this is a tailable cursor the error is likely
|
1061
|
+
# due to capped collection roll over. Setting
|
1062
|
+
# self.__killed to True ensures Cursor.alive will be
|
1063
|
+
# False. No need to re-raise.
|
1064
|
+
if (
|
1065
|
+
exc.code in _CURSOR_CLOSED_ERRORS
|
1066
|
+
and self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]
|
1067
|
+
):
|
1068
|
+
return
|
1069
|
+
raise
|
1070
|
+
except ConnectionFailure:
|
1071
|
+
# Don't send killCursors because the cursor is already closed.
|
1072
|
+
self.__killed = True
|
1073
|
+
self.close()
|
1074
|
+
raise
|
1075
|
+
except Exception:
|
1076
|
+
self.close()
|
1077
|
+
raise
|
1078
|
+
|
1079
|
+
self.__address = response.address
|
1080
|
+
if isinstance(response, PinnedResponse):
|
1081
|
+
if not self.__sock_mgr:
|
1082
|
+
self.__sock_mgr = _SocketManager(response.socket_info, response.more_to_come)
|
1083
|
+
|
1084
|
+
cmd_name = operation.name
|
1085
|
+
docs = response.docs
|
1086
|
+
if response.from_command:
|
1087
|
+
if cmd_name != "explain":
|
1088
|
+
cursor = docs[0]["cursor"]
|
1089
|
+
self.__id = cursor["id"]
|
1090
|
+
if cmd_name == "find":
|
1091
|
+
documents = cursor["firstBatch"]
|
1092
|
+
# Update the namespace used for future getMore commands.
|
1093
|
+
ns = cursor.get("ns")
|
1094
|
+
if ns:
|
1095
|
+
self.__dbname, self.__collname = ns.split(".", 1)
|
1096
|
+
else:
|
1097
|
+
documents = cursor["nextBatch"]
|
1098
|
+
self.__data = deque(documents)
|
1099
|
+
self.__retrieved += len(documents)
|
1100
|
+
else:
|
1101
|
+
self.__id = 0
|
1102
|
+
self.__data = deque(docs)
|
1103
|
+
self.__retrieved += len(docs)
|
1104
|
+
else:
|
1105
|
+
self.__id = response.data.cursor_id
|
1106
|
+
self.__data = deque(docs)
|
1107
|
+
self.__retrieved += response.data.number_returned
|
1108
|
+
|
1109
|
+
if self.__id == 0:
|
1110
|
+
# Don't wait for garbage collection to call __del__, return the
|
1111
|
+
# socket and the session to the pool now.
|
1112
|
+
self.close()
|
1113
|
+
|
1114
|
+
if self.__limit and self.__id and self.__limit <= self.__retrieved:
|
1115
|
+
self.close()
|
1116
|
+
|
1117
|
+
def _unpack_response(
|
1118
|
+
self, response, cursor_id, codec_options, user_fields=None, legacy_response=False
|
1119
|
+
):
|
1120
|
+
return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response)
|
1121
|
+
|
1122
|
+
def _read_preference(self):
|
1123
|
+
if self.__read_preference is None:
|
1124
|
+
# Save the read preference for getMore commands.
|
1125
|
+
self.__read_preference = self.__collection._read_preference_for(self.session)
|
1126
|
+
return self.__read_preference
|
1127
|
+
|
1128
|
+
def _refresh(self):
|
1129
|
+
"""Refreshes the cursor with more data from Mongo.
|
1130
|
+
|
1131
|
+
Returns the length of self.__data after refresh. Will exit early if
|
1132
|
+
self.__data is already non-empty. Raises OperationFailure when the
|
1133
|
+
cursor cannot be refreshed due to an error on the query.
|
1134
|
+
"""
|
1135
|
+
if len(self.__data) or self.__killed:
|
1136
|
+
return len(self.__data)
|
1137
|
+
|
1138
|
+
if not self.__session:
|
1139
|
+
self.__session = self.__collection.database.client._ensure_session()
|
1140
|
+
|
1141
|
+
if self.__id is None: # Query
|
1142
|
+
if (self.__min or self.__max) and not self.__hint:
|
1143
|
+
raise InvalidOperation(
|
1144
|
+
"Passing a 'hint' is required when using the min/max query"
|
1145
|
+
" option to ensure the query utilizes the correct index"
|
1146
|
+
)
|
1147
|
+
q = self._query_class(
|
1148
|
+
self.__query_flags,
|
1149
|
+
self.__collection.database.name,
|
1150
|
+
self.__collection.name,
|
1151
|
+
self.__skip,
|
1152
|
+
self.__query_spec(),
|
1153
|
+
self.__projection,
|
1154
|
+
self.__codec_options,
|
1155
|
+
self._read_preference(),
|
1156
|
+
self.__limit,
|
1157
|
+
self.__batch_size,
|
1158
|
+
self.__read_concern,
|
1159
|
+
self.__collation,
|
1160
|
+
self.__session,
|
1161
|
+
self.__collection.database.client,
|
1162
|
+
self.__allow_disk_use,
|
1163
|
+
self.__exhaust,
|
1164
|
+
)
|
1165
|
+
self.__send_message(q)
|
1166
|
+
elif self.__id: # Get More
|
1167
|
+
if self.__limit:
|
1168
|
+
limit = self.__limit - self.__retrieved
|
1169
|
+
if self.__batch_size:
|
1170
|
+
limit = min(limit, self.__batch_size)
|
1171
|
+
else:
|
1172
|
+
limit = self.__batch_size
|
1173
|
+
# Exhaust cursors don't send getMore messages.
|
1174
|
+
g = self._getmore_class(
|
1175
|
+
self.__dbname,
|
1176
|
+
self.__collname,
|
1177
|
+
limit,
|
1178
|
+
self.__id,
|
1179
|
+
self.__codec_options,
|
1180
|
+
self._read_preference(),
|
1181
|
+
self.__session,
|
1182
|
+
self.__collection.database.client,
|
1183
|
+
self.__max_await_time_ms,
|
1184
|
+
self.__sock_mgr,
|
1185
|
+
self.__exhaust,
|
1186
|
+
self.__comment,
|
1187
|
+
)
|
1188
|
+
self.__send_message(g)
|
1189
|
+
|
1190
|
+
return len(self.__data)
|
1191
|
+
|
1192
|
+
@property
|
1193
|
+
def alive(self) -> bool:
|
1194
|
+
"""Does this cursor have the potential to return more data?
|
1195
|
+
|
1196
|
+
This is mostly useful with `tailable cursors
|
1197
|
+
<https://www.mongodb.com/docs/manual/core/tailable-cursors/>`_
|
1198
|
+
since they will stop iterating even though they *may* return more
|
1199
|
+
results in the future.
|
1200
|
+
|
1201
|
+
With regular cursors, simply use a for loop instead of :attr:`alive`::
|
1202
|
+
|
1203
|
+
for doc in collection.find():
|
1204
|
+
print(doc)
|
1205
|
+
|
1206
|
+
.. note:: Even if :attr:`alive` is True, :meth:`next` can raise
|
1207
|
+
:exc:`StopIteration`. :attr:`alive` can also be True while iterating
|
1208
|
+
a cursor from a failed server. In this case :attr:`alive` will
|
1209
|
+
return False after :meth:`next` fails to retrieve the next batch
|
1210
|
+
of results from the server.
|
1211
|
+
"""
|
1212
|
+
return bool(len(self.__data) or (not self.__killed))
|
1213
|
+
|
1214
|
+
@property
|
1215
|
+
def cursor_id(self) -> Optional[int]:
|
1216
|
+
"""Returns the id of the cursor
|
1217
|
+
|
1218
|
+
.. versionadded:: 2.2
|
1219
|
+
"""
|
1220
|
+
return self.__id
|
1221
|
+
|
1222
|
+
@property
|
1223
|
+
def address(self) -> Optional[Tuple[str, Any]]:
|
1224
|
+
"""The (host, port) of the server used, or None.
|
1225
|
+
|
1226
|
+
.. versionchanged:: 3.0
|
1227
|
+
Renamed from "conn_id".
|
1228
|
+
"""
|
1229
|
+
return self.__address
|
1230
|
+
|
1231
|
+
@property
|
1232
|
+
def session(self) -> Optional["ClientSession"]:
|
1233
|
+
"""The cursor's :class:`~pymongo.client_session.ClientSession`, or None.
|
1234
|
+
|
1235
|
+
.. versionadded:: 3.6
|
1236
|
+
"""
|
1237
|
+
if self.__explicit_session:
|
1238
|
+
return self.__session
|
1239
|
+
return None
|
1240
|
+
|
1241
|
+
def __iter__(self) -> "Cursor[_DocumentType]":
|
1242
|
+
return self
|
1243
|
+
|
1244
|
+
def next(self) -> _DocumentType:
|
1245
|
+
"""Advance the cursor."""
|
1246
|
+
if self.__empty:
|
1247
|
+
raise StopIteration
|
1248
|
+
if len(self.__data) or self._refresh():
|
1249
|
+
return self.__data.popleft()
|
1250
|
+
else:
|
1251
|
+
raise StopIteration
|
1252
|
+
|
1253
|
+
__next__ = next
|
1254
|
+
|
1255
|
+
def __enter__(self) -> "Cursor[_DocumentType]":
|
1256
|
+
return self
|
1257
|
+
|
1258
|
+
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
|
1259
|
+
self.close()
|
1260
|
+
|
1261
|
+
def __copy__(self) -> "Cursor[_DocumentType]":
|
1262
|
+
"""Support function for `copy.copy()`.
|
1263
|
+
|
1264
|
+
.. versionadded:: 2.4
|
1265
|
+
"""
|
1266
|
+
return self._clone(deepcopy=False)
|
1267
|
+
|
1268
|
+
def __deepcopy__(self, memo: Any) -> Any:
|
1269
|
+
"""Support function for `copy.deepcopy()`.
|
1270
|
+
|
1271
|
+
.. versionadded:: 2.4
|
1272
|
+
"""
|
1273
|
+
return self._clone(deepcopy=True)
|
1274
|
+
|
1275
|
+
def _deepcopy(self, x, memo=None):
|
1276
|
+
"""Deepcopy helper for the data dictionary or list.
|
1277
|
+
|
1278
|
+
Regular expressions cannot be deep copied but as they are immutable we
|
1279
|
+
don't have to copy them when cloning.
|
1280
|
+
"""
|
1281
|
+
y: Any
|
1282
|
+
if not hasattr(x, "items"):
|
1283
|
+
y, is_list, iterator = [], True, enumerate(x)
|
1284
|
+
else:
|
1285
|
+
y, is_list, iterator = {}, False, x.items()
|
1286
|
+
|
1287
|
+
if memo is None:
|
1288
|
+
memo = {}
|
1289
|
+
val_id = id(x)
|
1290
|
+
if val_id in memo:
|
1291
|
+
return memo.get(val_id)
|
1292
|
+
memo[val_id] = y
|
1293
|
+
|
1294
|
+
for key, value in iterator:
|
1295
|
+
if isinstance(value, (dict, list)) and not isinstance(value, SON):
|
1296
|
+
value = self._deepcopy(value, memo)
|
1297
|
+
elif not isinstance(value, RE_TYPE):
|
1298
|
+
value = copy.deepcopy(value, memo)
|
1299
|
+
|
1300
|
+
if is_list:
|
1301
|
+
y.append(value)
|
1302
|
+
else:
|
1303
|
+
if not isinstance(key, RE_TYPE):
|
1304
|
+
key = copy.deepcopy(key, memo)
|
1305
|
+
y[key] = value
|
1306
|
+
return y
|
1307
|
+
|
1308
|
+
|
1309
|
+
class RawBatchCursor(Cursor, Generic[_DocumentType]):
|
1310
|
+
"""A cursor / iterator over raw batches of BSON data from a query result."""
|
1311
|
+
|
1312
|
+
_query_class = _RawBatchQuery
|
1313
|
+
_getmore_class = _RawBatchGetMore
|
1314
|
+
|
1315
|
+
def __init__(self, collection: "Collection[_DocumentType]", *args: Any, **kwargs: Any) -> None:
|
1316
|
+
"""Create a new cursor / iterator over raw batches of BSON data.
|
1317
|
+
|
1318
|
+
Should not be called directly by application developers -
|
1319
|
+
see :meth:`~pymongo.collection.Collection.find_raw_batches`
|
1320
|
+
instead.
|
1321
|
+
|
1322
|
+
.. seealso:: The MongoDB documentation on `cursors <https://dochub.mongodb.org/core/cursors>`_.
|
1323
|
+
"""
|
1324
|
+
super(RawBatchCursor, self).__init__(collection, *args, **kwargs)
|
1325
|
+
|
1326
|
+
def _unpack_response(
|
1327
|
+
self, response, cursor_id, codec_options, user_fields=None, legacy_response=False
|
1328
|
+
):
|
1329
|
+
raw_response = response.raw_response(cursor_id, user_fields=user_fields)
|
1330
|
+
if not legacy_response:
|
1331
|
+
# OP_MSG returns firstBatch/nextBatch documents as a BSON array
|
1332
|
+
# Re-assemble the array of documents into a document stream
|
1333
|
+
_convert_raw_document_lists_to_streams(raw_response[0])
|
1334
|
+
return raw_response
|
1335
|
+
|
1336
|
+
def explain(self) -> _DocumentType:
|
1337
|
+
"""Returns an explain plan record for this cursor.
|
1338
|
+
|
1339
|
+
.. seealso:: The MongoDB documentation on `explain <https://dochub.mongodb.org/core/explain>`_.
|
1340
|
+
"""
|
1341
|
+
clone = self._clone(deepcopy=True, base=Cursor(self.collection))
|
1342
|
+
return clone.explain()
|
1343
|
+
|
1344
|
+
def __getitem__(self, index: Any) -> NoReturn:
|
1345
|
+
raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor")
|