sensu-plugins-mongodb-mrtrotl 1.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +1 -0
- data/LICENSE +22 -0
- data/README.md +27 -0
- data/bin/check-mongodb-metric.rb +144 -0
- data/bin/check-mongodb-query-count.rb +267 -0
- data/bin/check-mongodb.py +1644 -0
- data/bin/check-mongodb.rb +5 -0
- data/bin/metrics-mongodb-replication.rb +254 -0
- data/bin/metrics-mongodb.rb +133 -0
- data/lib/bson/__init__.py +1347 -0
- data/lib/bson/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/_helpers.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/binary.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/code.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/codec_options.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/dbref.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/decimal128.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/int64.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/json_util.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/max_key.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/min_key.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/objectid.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/raw_bson.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/regex.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/son.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/timestamp.cpython-310.pyc +0 -0
- data/lib/bson/__pycache__/tz_util.cpython-310.pyc +0 -0
- data/lib/bson/_cbson.cpython-310-x86_64-linux-gnu.so +0 -0
- data/lib/bson/_helpers.py +41 -0
- data/lib/bson/binary.py +364 -0
- data/lib/bson/code.py +101 -0
- data/lib/bson/codec_options.py +414 -0
- data/lib/bson/codec_options.pyi +100 -0
- data/lib/bson/dbref.py +133 -0
- data/lib/bson/decimal128.py +314 -0
- data/lib/bson/errors.py +35 -0
- data/lib/bson/int64.py +39 -0
- data/lib/bson/json_util.py +874 -0
- data/lib/bson/max_key.py +55 -0
- data/lib/bson/min_key.py +55 -0
- data/lib/bson/objectid.py +286 -0
- data/lib/bson/py.typed +2 -0
- data/lib/bson/raw_bson.py +175 -0
- data/lib/bson/regex.py +135 -0
- data/lib/bson/son.py +208 -0
- data/lib/bson/timestamp.py +124 -0
- data/lib/bson/tz_util.py +52 -0
- data/lib/gridfs/__init__.py +1015 -0
- data/lib/gridfs/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/gridfs/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/gridfs/__pycache__/grid_file.cpython-310.pyc +0 -0
- data/lib/gridfs/errors.py +33 -0
- data/lib/gridfs/grid_file.py +907 -0
- data/lib/gridfs/py.typed +2 -0
- data/lib/pymongo/__init__.py +185 -0
- data/lib/pymongo/__pycache__/__init__.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/_csot.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/aggregation.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/auth.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/auth_aws.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/bulk.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/change_stream.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/client_options.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/client_session.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/collation.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/collection.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/command_cursor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/common.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/compression_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/cursor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/daemon.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/database.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/driver_info.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/encryption.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/encryption_options.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/errors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/event_loggers.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/hello.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/helpers.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/max_staleness_selectors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/message.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/mongo_client.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/monitor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/monitoring.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/network.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ocsp_cache.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ocsp_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/operations.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/periodic_executor.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/pool.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/pyopenssl_context.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/read_concern.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/read_preferences.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/response.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/results.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/saslprep.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_api.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_description.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_selectors.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/server_type.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/settings.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/socket_checker.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/srv_resolver.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ssl_context.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/ssl_support.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/topology.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/topology_description.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/typings.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/uri_parser.cpython-310.pyc +0 -0
- data/lib/pymongo/__pycache__/write_concern.cpython-310.pyc +0 -0
- data/lib/pymongo/_cmessage.cpython-310-x86_64-linux-gnu.so +0 -0
- data/lib/pymongo/_csot.py +118 -0
- data/lib/pymongo/aggregation.py +229 -0
- data/lib/pymongo/auth.py +549 -0
- data/lib/pymongo/auth_aws.py +94 -0
- data/lib/pymongo/bulk.py +513 -0
- data/lib/pymongo/change_stream.py +457 -0
- data/lib/pymongo/client_options.py +302 -0
- data/lib/pymongo/client_session.py +1112 -0
- data/lib/pymongo/collation.py +224 -0
- data/lib/pymongo/collection.py +3204 -0
- data/lib/pymongo/command_cursor.py +353 -0
- data/lib/pymongo/common.py +984 -0
- data/lib/pymongo/compression_support.py +149 -0
- data/lib/pymongo/cursor.py +1345 -0
- data/lib/pymongo/daemon.py +141 -0
- data/lib/pymongo/database.py +1202 -0
- data/lib/pymongo/driver_info.py +42 -0
- data/lib/pymongo/encryption.py +884 -0
- data/lib/pymongo/encryption_options.py +221 -0
- data/lib/pymongo/errors.py +365 -0
- data/lib/pymongo/event_loggers.py +221 -0
- data/lib/pymongo/hello.py +219 -0
- data/lib/pymongo/helpers.py +259 -0
- data/lib/pymongo/max_staleness_selectors.py +114 -0
- data/lib/pymongo/message.py +1440 -0
- data/lib/pymongo/mongo_client.py +2144 -0
- data/lib/pymongo/monitor.py +440 -0
- data/lib/pymongo/monitoring.py +1801 -0
- data/lib/pymongo/network.py +311 -0
- data/lib/pymongo/ocsp_cache.py +87 -0
- data/lib/pymongo/ocsp_support.py +372 -0
- data/lib/pymongo/operations.py +507 -0
- data/lib/pymongo/periodic_executor.py +183 -0
- data/lib/pymongo/pool.py +1660 -0
- data/lib/pymongo/py.typed +2 -0
- data/lib/pymongo/pyopenssl_context.py +383 -0
- data/lib/pymongo/read_concern.py +75 -0
- data/lib/pymongo/read_preferences.py +609 -0
- data/lib/pymongo/response.py +109 -0
- data/lib/pymongo/results.py +217 -0
- data/lib/pymongo/saslprep.py +113 -0
- data/lib/pymongo/server.py +247 -0
- data/lib/pymongo/server_api.py +170 -0
- data/lib/pymongo/server_description.py +285 -0
- data/lib/pymongo/server_selectors.py +153 -0
- data/lib/pymongo/server_type.py +32 -0
- data/lib/pymongo/settings.py +159 -0
- data/lib/pymongo/socket_checker.py +104 -0
- data/lib/pymongo/srv_resolver.py +126 -0
- data/lib/pymongo/ssl_context.py +39 -0
- data/lib/pymongo/ssl_support.py +99 -0
- data/lib/pymongo/topology.py +890 -0
- data/lib/pymongo/topology_description.py +639 -0
- data/lib/pymongo/typings.py +39 -0
- data/lib/pymongo/uri_parser.py +624 -0
- data/lib/pymongo/write_concern.py +129 -0
- data/lib/pymongo-4.2.0.dist-info/INSTALLER +1 -0
- data/lib/pymongo-4.2.0.dist-info/LICENSE +201 -0
- data/lib/pymongo-4.2.0.dist-info/METADATA +250 -0
- data/lib/pymongo-4.2.0.dist-info/RECORD +167 -0
- data/lib/pymongo-4.2.0.dist-info/REQUESTED +0 -0
- data/lib/pymongo-4.2.0.dist-info/WHEEL +6 -0
- data/lib/pymongo-4.2.0.dist-info/top_level.txt +3 -0
- data/lib/sensu-plugins-mongodb/metrics.rb +391 -0
- data/lib/sensu-plugins-mongodb/version.rb +9 -0
- data/lib/sensu-plugins-mongodb.rb +1 -0
- metadata +407 -0
@@ -0,0 +1,907 @@
|
|
1
|
+
# Copyright 2009-present MongoDB, Inc.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""Tools for representing files stored in GridFS."""
|
16
|
+
import datetime
|
17
|
+
import io
|
18
|
+
import math
|
19
|
+
import os
|
20
|
+
from typing import Any, Iterable, List, Mapping, NoReturn, Optional
|
21
|
+
|
22
|
+
from bson.binary import Binary
|
23
|
+
from bson.int64 import Int64
|
24
|
+
from bson.objectid import ObjectId
|
25
|
+
from bson.son import SON
|
26
|
+
from gridfs.errors import CorruptGridFile, FileExists, NoFile
|
27
|
+
from pymongo import ASCENDING
|
28
|
+
from pymongo.client_session import ClientSession
|
29
|
+
from pymongo.collection import Collection
|
30
|
+
from pymongo.cursor import Cursor
|
31
|
+
from pymongo.errors import (
|
32
|
+
ConfigurationError,
|
33
|
+
CursorNotFound,
|
34
|
+
DuplicateKeyError,
|
35
|
+
InvalidOperation,
|
36
|
+
OperationFailure,
|
37
|
+
)
|
38
|
+
from pymongo.read_preferences import ReadPreference
|
39
|
+
|
40
|
+
_SEEK_SET = os.SEEK_SET
|
41
|
+
_SEEK_CUR = os.SEEK_CUR
|
42
|
+
_SEEK_END = os.SEEK_END
|
43
|
+
|
44
|
+
EMPTY = b""
|
45
|
+
NEWLN = b"\n"
|
46
|
+
|
47
|
+
"""Default chunk size, in bytes."""
|
48
|
+
# Slightly under a power of 2, to work well with server's record allocations.
|
49
|
+
DEFAULT_CHUNK_SIZE = 255 * 1024
|
50
|
+
|
51
|
+
_C_INDEX: SON[str, Any] = SON([("files_id", ASCENDING), ("n", ASCENDING)])
|
52
|
+
_F_INDEX: SON[str, Any] = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)])
|
53
|
+
|
54
|
+
|
55
|
+
def _grid_in_property(
|
56
|
+
field_name: str,
|
57
|
+
docstring: str,
|
58
|
+
read_only: Optional[bool] = False,
|
59
|
+
closed_only: Optional[bool] = False,
|
60
|
+
) -> Any:
|
61
|
+
"""Create a GridIn property."""
|
62
|
+
|
63
|
+
def getter(self: Any) -> Any:
|
64
|
+
if closed_only and not self._closed:
|
65
|
+
raise AttributeError("can only get %r on a closed file" % field_name)
|
66
|
+
# Protect against PHP-237
|
67
|
+
if field_name == "length":
|
68
|
+
return self._file.get(field_name, 0)
|
69
|
+
return self._file.get(field_name, None)
|
70
|
+
|
71
|
+
def setter(self: Any, value: Any) -> Any:
|
72
|
+
if self._closed:
|
73
|
+
self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {field_name: value}})
|
74
|
+
self._file[field_name] = value
|
75
|
+
|
76
|
+
if read_only:
|
77
|
+
docstring += "\n\nThis attribute is read-only."
|
78
|
+
elif closed_only:
|
79
|
+
docstring = "%s\n\n%s" % (
|
80
|
+
docstring,
|
81
|
+
"This attribute is read-only and "
|
82
|
+
"can only be read after :meth:`close` "
|
83
|
+
"has been called.",
|
84
|
+
)
|
85
|
+
|
86
|
+
if not read_only and not closed_only:
|
87
|
+
return property(getter, setter, doc=docstring)
|
88
|
+
return property(getter, doc=docstring)
|
89
|
+
|
90
|
+
|
91
|
+
def _grid_out_property(field_name: str, docstring: str) -> Any:
|
92
|
+
"""Create a GridOut property."""
|
93
|
+
|
94
|
+
def getter(self: Any) -> Any:
|
95
|
+
self._ensure_file()
|
96
|
+
|
97
|
+
# Protect against PHP-237
|
98
|
+
if field_name == "length":
|
99
|
+
return self._file.get(field_name, 0)
|
100
|
+
return self._file.get(field_name, None)
|
101
|
+
|
102
|
+
docstring += "\n\nThis attribute is read-only."
|
103
|
+
return property(getter, doc=docstring)
|
104
|
+
|
105
|
+
|
106
|
+
def _clear_entity_type_registry(entity: Any, **kwargs: Any) -> Any:
|
107
|
+
"""Clear the given database/collection object's type registry."""
|
108
|
+
codecopts = entity.codec_options.with_options(type_registry=None)
|
109
|
+
return entity.with_options(codec_options=codecopts, **kwargs)
|
110
|
+
|
111
|
+
|
112
|
+
def _disallow_transactions(session: Optional[ClientSession]) -> None:
|
113
|
+
if session and session.in_transaction:
|
114
|
+
raise InvalidOperation("GridFS does not support multi-document transactions")
|
115
|
+
|
116
|
+
|
117
|
+
class GridIn(object):
|
118
|
+
"""Class to write data to GridFS."""
|
119
|
+
|
120
|
+
def __init__(
|
121
|
+
self, root_collection: Collection, session: Optional[ClientSession] = None, **kwargs: Any
|
122
|
+
) -> None:
|
123
|
+
"""Write a file to GridFS
|
124
|
+
|
125
|
+
Application developers should generally not need to
|
126
|
+
instantiate this class directly - instead see the methods
|
127
|
+
provided by :class:`~gridfs.GridFS`.
|
128
|
+
|
129
|
+
Raises :class:`TypeError` if `root_collection` is not an
|
130
|
+
instance of :class:`~pymongo.collection.Collection`.
|
131
|
+
|
132
|
+
Any of the file level options specified in the `GridFS Spec
|
133
|
+
<http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as
|
134
|
+
keyword arguments. Any additional keyword arguments will be
|
135
|
+
set as additional fields on the file document. Valid keyword
|
136
|
+
arguments include:
|
137
|
+
|
138
|
+
- ``"_id"``: unique ID for this file (default:
|
139
|
+
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
|
140
|
+
not have already been used for another file
|
141
|
+
|
142
|
+
- ``"filename"``: human name for the file
|
143
|
+
|
144
|
+
- ``"contentType"`` or ``"content_type"``: valid mime-type
|
145
|
+
for the file
|
146
|
+
|
147
|
+
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
|
148
|
+
chunks, in bytes (default: 255 kb)
|
149
|
+
|
150
|
+
- ``"encoding"``: encoding used for this file. Any :class:`str`
|
151
|
+
that is written to the file will be converted to :class:`bytes`.
|
152
|
+
|
153
|
+
:Parameters:
|
154
|
+
- `root_collection`: root collection to write to
|
155
|
+
- `session` (optional): a
|
156
|
+
:class:`~pymongo.client_session.ClientSession` to use for all
|
157
|
+
commands
|
158
|
+
- `**kwargs: Any` (optional): file level options (see above)
|
159
|
+
|
160
|
+
.. versionchanged:: 4.0
|
161
|
+
Removed the `disable_md5` parameter. See
|
162
|
+
:ref:`removed-gridfs-checksum` for details.
|
163
|
+
|
164
|
+
.. versionchanged:: 3.7
|
165
|
+
Added the `disable_md5` parameter.
|
166
|
+
|
167
|
+
.. versionchanged:: 3.6
|
168
|
+
Added ``session`` parameter.
|
169
|
+
|
170
|
+
.. versionchanged:: 3.0
|
171
|
+
`root_collection` must use an acknowledged
|
172
|
+
:attr:`~pymongo.collection.Collection.write_concern`
|
173
|
+
"""
|
174
|
+
if not isinstance(root_collection, Collection):
|
175
|
+
raise TypeError("root_collection must be an instance of Collection")
|
176
|
+
|
177
|
+
if not root_collection.write_concern.acknowledged:
|
178
|
+
raise ConfigurationError("root_collection must use acknowledged write_concern")
|
179
|
+
_disallow_transactions(session)
|
180
|
+
|
181
|
+
# Handle alternative naming
|
182
|
+
if "content_type" in kwargs:
|
183
|
+
kwargs["contentType"] = kwargs.pop("content_type")
|
184
|
+
if "chunk_size" in kwargs:
|
185
|
+
kwargs["chunkSize"] = kwargs.pop("chunk_size")
|
186
|
+
|
187
|
+
coll = _clear_entity_type_registry(root_collection, read_preference=ReadPreference.PRIMARY)
|
188
|
+
|
189
|
+
# Defaults
|
190
|
+
kwargs["_id"] = kwargs.get("_id", ObjectId())
|
191
|
+
kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE)
|
192
|
+
object.__setattr__(self, "_session", session)
|
193
|
+
object.__setattr__(self, "_coll", coll)
|
194
|
+
object.__setattr__(self, "_chunks", coll.chunks)
|
195
|
+
object.__setattr__(self, "_file", kwargs)
|
196
|
+
object.__setattr__(self, "_buffer", io.BytesIO())
|
197
|
+
object.__setattr__(self, "_position", 0)
|
198
|
+
object.__setattr__(self, "_chunk_number", 0)
|
199
|
+
object.__setattr__(self, "_closed", False)
|
200
|
+
object.__setattr__(self, "_ensured_index", False)
|
201
|
+
|
202
|
+
def __create_index(self, collection: Collection, index_key: Any, unique: bool) -> None:
|
203
|
+
doc = collection.find_one(projection={"_id": 1}, session=self._session)
|
204
|
+
if doc is None:
|
205
|
+
try:
|
206
|
+
index_keys = [
|
207
|
+
index_spec["key"]
|
208
|
+
for index_spec in collection.list_indexes(session=self._session)
|
209
|
+
]
|
210
|
+
except OperationFailure:
|
211
|
+
index_keys = []
|
212
|
+
if index_key not in index_keys:
|
213
|
+
collection.create_index(index_key.items(), unique=unique, session=self._session)
|
214
|
+
|
215
|
+
def __ensure_indexes(self) -> None:
|
216
|
+
if not object.__getattribute__(self, "_ensured_index"):
|
217
|
+
_disallow_transactions(self._session)
|
218
|
+
self.__create_index(self._coll.files, _F_INDEX, False)
|
219
|
+
self.__create_index(self._coll.chunks, _C_INDEX, True)
|
220
|
+
object.__setattr__(self, "_ensured_index", True)
|
221
|
+
|
222
|
+
def abort(self) -> None:
|
223
|
+
"""Remove all chunks/files that may have been uploaded and close."""
|
224
|
+
self._coll.chunks.delete_many({"files_id": self._file["_id"]}, session=self._session)
|
225
|
+
self._coll.files.delete_one({"_id": self._file["_id"]}, session=self._session)
|
226
|
+
object.__setattr__(self, "_closed", True)
|
227
|
+
|
228
|
+
@property
|
229
|
+
def closed(self) -> bool:
|
230
|
+
"""Is this file closed?"""
|
231
|
+
return self._closed
|
232
|
+
|
233
|
+
_id: Any = _grid_in_property("_id", "The ``'_id'`` value for this file.", read_only=True)
|
234
|
+
filename: Optional[str] = _grid_in_property("filename", "Name of this file.")
|
235
|
+
name: Optional[str] = _grid_in_property("filename", "Alias for `filename`.")
|
236
|
+
content_type: Optional[str] = _grid_in_property("contentType", "Mime-type for this file.")
|
237
|
+
length: int = _grid_in_property("length", "Length (in bytes) of this file.", closed_only=True)
|
238
|
+
chunk_size: int = _grid_in_property("chunkSize", "Chunk size for this file.", read_only=True)
|
239
|
+
upload_date: datetime.datetime = _grid_in_property(
|
240
|
+
"uploadDate", "Date that this file was uploaded.", closed_only=True
|
241
|
+
)
|
242
|
+
md5: Optional[str] = _grid_in_property(
|
243
|
+
"md5", "MD5 of the contents of this file if an md5 sum was created.", closed_only=True
|
244
|
+
)
|
245
|
+
|
246
|
+
_buffer: io.BytesIO
|
247
|
+
_closed: bool
|
248
|
+
|
249
|
+
def __getattr__(self, name: str) -> Any:
|
250
|
+
if name in self._file:
|
251
|
+
return self._file[name]
|
252
|
+
raise AttributeError("GridIn object has no attribute '%s'" % name)
|
253
|
+
|
254
|
+
def __setattr__(self, name: str, value: Any) -> None:
|
255
|
+
# For properties of this instance like _buffer, or descriptors set on
|
256
|
+
# the class like filename, use regular __setattr__
|
257
|
+
if name in self.__dict__ or name in self.__class__.__dict__:
|
258
|
+
object.__setattr__(self, name, value)
|
259
|
+
else:
|
260
|
+
# All other attributes are part of the document in db.fs.files.
|
261
|
+
# Store them to be sent to server on close() or if closed, send
|
262
|
+
# them now.
|
263
|
+
self._file[name] = value
|
264
|
+
if self._closed:
|
265
|
+
self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}})
|
266
|
+
|
267
|
+
def __flush_data(self, data: Any) -> None:
|
268
|
+
"""Flush `data` to a chunk."""
|
269
|
+
self.__ensure_indexes()
|
270
|
+
if not data:
|
271
|
+
return
|
272
|
+
assert len(data) <= self.chunk_size
|
273
|
+
|
274
|
+
chunk = {"files_id": self._file["_id"], "n": self._chunk_number, "data": Binary(data)}
|
275
|
+
|
276
|
+
try:
|
277
|
+
self._chunks.insert_one(chunk, session=self._session)
|
278
|
+
except DuplicateKeyError:
|
279
|
+
self._raise_file_exists(self._file["_id"])
|
280
|
+
self._chunk_number += 1
|
281
|
+
self._position += len(data)
|
282
|
+
|
283
|
+
def __flush_buffer(self) -> None:
|
284
|
+
"""Flush the buffer contents out to a chunk."""
|
285
|
+
self.__flush_data(self._buffer.getvalue())
|
286
|
+
self._buffer.close()
|
287
|
+
self._buffer = io.BytesIO()
|
288
|
+
|
289
|
+
def __flush(self) -> Any:
|
290
|
+
"""Flush the file to the database."""
|
291
|
+
try:
|
292
|
+
self.__flush_buffer()
|
293
|
+
# The GridFS spec says length SHOULD be an Int64.
|
294
|
+
self._file["length"] = Int64(self._position)
|
295
|
+
self._file["uploadDate"] = datetime.datetime.utcnow()
|
296
|
+
|
297
|
+
return self._coll.files.insert_one(self._file, session=self._session)
|
298
|
+
except DuplicateKeyError:
|
299
|
+
self._raise_file_exists(self._id)
|
300
|
+
|
301
|
+
def _raise_file_exists(self, file_id: Any) -> NoReturn:
|
302
|
+
"""Raise a FileExists exception for the given file_id."""
|
303
|
+
raise FileExists("file with _id %r already exists" % file_id)
|
304
|
+
|
305
|
+
def close(self) -> None:
|
306
|
+
"""Flush the file and close it.
|
307
|
+
|
308
|
+
A closed file cannot be written any more. Calling
|
309
|
+
:meth:`close` more than once is allowed.
|
310
|
+
"""
|
311
|
+
if not self._closed:
|
312
|
+
self.__flush()
|
313
|
+
object.__setattr__(self, "_closed", True)
|
314
|
+
|
315
|
+
def read(self, size: int = -1) -> NoReturn:
|
316
|
+
raise io.UnsupportedOperation("read")
|
317
|
+
|
318
|
+
def readable(self) -> bool:
|
319
|
+
return False
|
320
|
+
|
321
|
+
def seekable(self) -> bool:
|
322
|
+
return False
|
323
|
+
|
324
|
+
def write(self, data: Any) -> None:
|
325
|
+
"""Write data to the file. There is no return value.
|
326
|
+
|
327
|
+
`data` can be either a string of bytes or a file-like object
|
328
|
+
(implementing :meth:`read`). If the file has an
|
329
|
+
:attr:`encoding` attribute, `data` can also be a
|
330
|
+
:class:`str` instance, which will be encoded as
|
331
|
+
:attr:`encoding` before being written.
|
332
|
+
|
333
|
+
Due to buffering, the data may not actually be written to the
|
334
|
+
database until the :meth:`close` method is called. Raises
|
335
|
+
:class:`ValueError` if this file is already closed. Raises
|
336
|
+
:class:`TypeError` if `data` is not an instance of
|
337
|
+
:class:`bytes`, a file-like object, or an instance of :class:`str`.
|
338
|
+
Unicode data is only allowed if the file has an :attr:`encoding`
|
339
|
+
attribute.
|
340
|
+
|
341
|
+
:Parameters:
|
342
|
+
- `data`: string of bytes or file-like object to be written
|
343
|
+
to the file
|
344
|
+
"""
|
345
|
+
if self._closed:
|
346
|
+
raise ValueError("cannot write to a closed file")
|
347
|
+
|
348
|
+
try:
|
349
|
+
# file-like
|
350
|
+
read = data.read
|
351
|
+
except AttributeError:
|
352
|
+
# string
|
353
|
+
if not isinstance(data, (str, bytes)):
|
354
|
+
raise TypeError("can only write strings or file-like objects")
|
355
|
+
if isinstance(data, str):
|
356
|
+
try:
|
357
|
+
data = data.encode(self.encoding)
|
358
|
+
except AttributeError:
|
359
|
+
raise TypeError("must specify an encoding for file in order to write str")
|
360
|
+
read = io.BytesIO(data).read
|
361
|
+
|
362
|
+
if self._buffer.tell() > 0:
|
363
|
+
# Make sure to flush only when _buffer is complete
|
364
|
+
space = self.chunk_size - self._buffer.tell()
|
365
|
+
if space:
|
366
|
+
try:
|
367
|
+
to_write = read(space)
|
368
|
+
except BaseException:
|
369
|
+
self.abort()
|
370
|
+
raise
|
371
|
+
self._buffer.write(to_write)
|
372
|
+
if len(to_write) < space:
|
373
|
+
return # EOF or incomplete
|
374
|
+
self.__flush_buffer()
|
375
|
+
to_write = read(self.chunk_size)
|
376
|
+
while to_write and len(to_write) == self.chunk_size:
|
377
|
+
self.__flush_data(to_write)
|
378
|
+
to_write = read(self.chunk_size)
|
379
|
+
self._buffer.write(to_write)
|
380
|
+
|
381
|
+
def writelines(self, sequence: Iterable[Any]) -> None:
|
382
|
+
"""Write a sequence of strings to the file.
|
383
|
+
|
384
|
+
Does not add seperators.
|
385
|
+
"""
|
386
|
+
for line in sequence:
|
387
|
+
self.write(line)
|
388
|
+
|
389
|
+
def writeable(self) -> bool:
|
390
|
+
return True
|
391
|
+
|
392
|
+
def __enter__(self) -> "GridIn":
|
393
|
+
"""Support for the context manager protocol."""
|
394
|
+
return self
|
395
|
+
|
396
|
+
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any:
|
397
|
+
"""Support for the context manager protocol.
|
398
|
+
|
399
|
+
Close the file if no exceptions occur and allow exceptions to propagate.
|
400
|
+
"""
|
401
|
+
if exc_type is None:
|
402
|
+
# No exceptions happened.
|
403
|
+
self.close()
|
404
|
+
else:
|
405
|
+
# Something happened, at minimum mark as closed.
|
406
|
+
object.__setattr__(self, "_closed", True)
|
407
|
+
|
408
|
+
# propagate exceptions
|
409
|
+
return False
|
410
|
+
|
411
|
+
|
412
|
+
class GridOut(io.IOBase):
|
413
|
+
"""Class to read data out of GridFS."""
|
414
|
+
|
415
|
+
def __init__(
|
416
|
+
self,
|
417
|
+
root_collection: Collection,
|
418
|
+
file_id: Optional[int] = None,
|
419
|
+
file_document: Optional[Any] = None,
|
420
|
+
session: Optional[ClientSession] = None,
|
421
|
+
) -> None:
|
422
|
+
"""Read a file from GridFS
|
423
|
+
|
424
|
+
Application developers should generally not need to
|
425
|
+
instantiate this class directly - instead see the methods
|
426
|
+
provided by :class:`~gridfs.GridFS`.
|
427
|
+
|
428
|
+
Either `file_id` or `file_document` must be specified,
|
429
|
+
`file_document` will be given priority if present. Raises
|
430
|
+
:class:`TypeError` if `root_collection` is not an instance of
|
431
|
+
:class:`~pymongo.collection.Collection`.
|
432
|
+
|
433
|
+
:Parameters:
|
434
|
+
- `root_collection`: root collection to read from
|
435
|
+
- `file_id` (optional): value of ``"_id"`` for the file to read
|
436
|
+
- `file_document` (optional): file document from
|
437
|
+
`root_collection.files`
|
438
|
+
- `session` (optional): a
|
439
|
+
:class:`~pymongo.client_session.ClientSession` to use for all
|
440
|
+
commands
|
441
|
+
|
442
|
+
.. versionchanged:: 3.8
|
443
|
+
For better performance and to better follow the GridFS spec,
|
444
|
+
:class:`GridOut` now uses a single cursor to read all the chunks in
|
445
|
+
the file.
|
446
|
+
|
447
|
+
.. versionchanged:: 3.6
|
448
|
+
Added ``session`` parameter.
|
449
|
+
|
450
|
+
.. versionchanged:: 3.0
|
451
|
+
Creating a GridOut does not immediately retrieve the file metadata
|
452
|
+
from the server. Metadata is fetched when first needed.
|
453
|
+
"""
|
454
|
+
if not isinstance(root_collection, Collection):
|
455
|
+
raise TypeError("root_collection must be an instance of Collection")
|
456
|
+
_disallow_transactions(session)
|
457
|
+
|
458
|
+
root_collection = _clear_entity_type_registry(root_collection)
|
459
|
+
|
460
|
+
super().__init__()
|
461
|
+
|
462
|
+
self.__chunks = root_collection.chunks
|
463
|
+
self.__files = root_collection.files
|
464
|
+
self.__file_id = file_id
|
465
|
+
self.__buffer = EMPTY
|
466
|
+
self.__chunk_iter = None
|
467
|
+
self.__position = 0
|
468
|
+
self._file = file_document
|
469
|
+
self._session = session
|
470
|
+
|
471
|
+
_id: Any = _grid_out_property("_id", "The ``'_id'`` value for this file.")
|
472
|
+
filename: str = _grid_out_property("filename", "Name of this file.")
|
473
|
+
name: str = _grid_out_property("filename", "Alias for `filename`.")
|
474
|
+
content_type: Optional[str] = _grid_out_property("contentType", "Mime-type for this file.")
|
475
|
+
length: int = _grid_out_property("length", "Length (in bytes) of this file.")
|
476
|
+
chunk_size: int = _grid_out_property("chunkSize", "Chunk size for this file.")
|
477
|
+
upload_date: datetime.datetime = _grid_out_property(
|
478
|
+
"uploadDate", "Date that this file was first uploaded."
|
479
|
+
)
|
480
|
+
aliases: Optional[List[str]] = _grid_out_property("aliases", "List of aliases for this file.")
|
481
|
+
metadata: Optional[Mapping[str, Any]] = _grid_out_property(
|
482
|
+
"metadata", "Metadata attached to this file."
|
483
|
+
)
|
484
|
+
md5: Optional[str] = _grid_out_property(
|
485
|
+
"md5", "MD5 of the contents of this file if an md5 sum was created."
|
486
|
+
)
|
487
|
+
|
488
|
+
_file: Any
|
489
|
+
__chunk_iter: Any
|
490
|
+
|
491
|
+
def _ensure_file(self) -> None:
|
492
|
+
if not self._file:
|
493
|
+
_disallow_transactions(self._session)
|
494
|
+
self._file = self.__files.find_one({"_id": self.__file_id}, session=self._session)
|
495
|
+
if not self._file:
|
496
|
+
raise NoFile(
|
497
|
+
"no file in gridfs collection %r with _id %r" % (self.__files, self.__file_id)
|
498
|
+
)
|
499
|
+
|
500
|
+
def __getattr__(self, name: str) -> Any:
|
501
|
+
self._ensure_file()
|
502
|
+
if name in self._file:
|
503
|
+
return self._file[name]
|
504
|
+
raise AttributeError("GridOut object has no attribute '%s'" % name)
|
505
|
+
|
506
|
+
def readable(self) -> bool:
|
507
|
+
return True
|
508
|
+
|
509
|
+
def readchunk(self) -> bytes:
|
510
|
+
"""Reads a chunk at a time. If the current position is within a
|
511
|
+
chunk the remainder of the chunk is returned.
|
512
|
+
"""
|
513
|
+
received = len(self.__buffer)
|
514
|
+
chunk_data = EMPTY
|
515
|
+
chunk_size = int(self.chunk_size)
|
516
|
+
|
517
|
+
if received > 0:
|
518
|
+
chunk_data = self.__buffer
|
519
|
+
elif self.__position < int(self.length):
|
520
|
+
chunk_number = int((received + self.__position) / chunk_size)
|
521
|
+
if self.__chunk_iter is None:
|
522
|
+
self.__chunk_iter = _GridOutChunkIterator(
|
523
|
+
self, self.__chunks, self._session, chunk_number
|
524
|
+
)
|
525
|
+
|
526
|
+
chunk = self.__chunk_iter.next()
|
527
|
+
chunk_data = chunk["data"][self.__position % chunk_size :]
|
528
|
+
|
529
|
+
if not chunk_data:
|
530
|
+
raise CorruptGridFile("truncated chunk")
|
531
|
+
|
532
|
+
self.__position += len(chunk_data)
|
533
|
+
self.__buffer = EMPTY
|
534
|
+
return chunk_data
|
535
|
+
|
536
|
+
def read(self, size: int = -1) -> bytes:
|
537
|
+
"""Read at most `size` bytes from the file (less if there
|
538
|
+
isn't enough data).
|
539
|
+
|
540
|
+
The bytes are returned as an instance of :class:`str` (:class:`bytes`
|
541
|
+
in python 3). If `size` is negative or omitted all data is read.
|
542
|
+
|
543
|
+
:Parameters:
|
544
|
+
- `size` (optional): the number of bytes to read
|
545
|
+
|
546
|
+
.. versionchanged:: 3.8
|
547
|
+
This method now only checks for extra chunks after reading the
|
548
|
+
entire file. Previously, this method would check for extra chunks
|
549
|
+
on every call.
|
550
|
+
"""
|
551
|
+
self._ensure_file()
|
552
|
+
|
553
|
+
remainder = int(self.length) - self.__position
|
554
|
+
if size < 0 or size > remainder:
|
555
|
+
size = remainder
|
556
|
+
|
557
|
+
if size == 0:
|
558
|
+
return EMPTY
|
559
|
+
|
560
|
+
received = 0
|
561
|
+
data = io.BytesIO()
|
562
|
+
while received < size:
|
563
|
+
chunk_data = self.readchunk()
|
564
|
+
received += len(chunk_data)
|
565
|
+
data.write(chunk_data)
|
566
|
+
|
567
|
+
# Detect extra chunks after reading the entire file.
|
568
|
+
if size == remainder and self.__chunk_iter:
|
569
|
+
try:
|
570
|
+
self.__chunk_iter.next()
|
571
|
+
except StopIteration:
|
572
|
+
pass
|
573
|
+
|
574
|
+
self.__position -= received - size
|
575
|
+
|
576
|
+
# Return 'size' bytes and store the rest.
|
577
|
+
data.seek(size)
|
578
|
+
self.__buffer = data.read()
|
579
|
+
data.seek(0)
|
580
|
+
return data.read(size)
|
581
|
+
|
582
|
+
def readline(self, size: int = -1) -> bytes: # type: ignore[override]
|
583
|
+
"""Read one line or up to `size` bytes from the file.
|
584
|
+
|
585
|
+
:Parameters:
|
586
|
+
- `size` (optional): the maximum number of bytes to read
|
587
|
+
"""
|
588
|
+
remainder = int(self.length) - self.__position
|
589
|
+
if size < 0 or size > remainder:
|
590
|
+
size = remainder
|
591
|
+
|
592
|
+
if size == 0:
|
593
|
+
return EMPTY
|
594
|
+
|
595
|
+
received = 0
|
596
|
+
data = io.BytesIO()
|
597
|
+
while received < size:
|
598
|
+
chunk_data = self.readchunk()
|
599
|
+
pos = chunk_data.find(NEWLN, 0, size)
|
600
|
+
if pos != -1:
|
601
|
+
size = received + pos + 1
|
602
|
+
|
603
|
+
received += len(chunk_data)
|
604
|
+
data.write(chunk_data)
|
605
|
+
if pos != -1:
|
606
|
+
break
|
607
|
+
|
608
|
+
self.__position -= received - size
|
609
|
+
|
610
|
+
# Return 'size' bytes and store the rest.
|
611
|
+
data.seek(size)
|
612
|
+
self.__buffer = data.read()
|
613
|
+
data.seek(0)
|
614
|
+
return data.read(size)
|
615
|
+
|
616
|
+
def tell(self) -> int:
|
617
|
+
"""Return the current position of this file."""
|
618
|
+
return self.__position
|
619
|
+
|
620
|
+
def seek(self, pos: int, whence: int = _SEEK_SET) -> int:
|
621
|
+
"""Set the current position of this file.
|
622
|
+
|
623
|
+
:Parameters:
|
624
|
+
- `pos`: the position (or offset if using relative
|
625
|
+
positioning) to seek to
|
626
|
+
- `whence` (optional): where to seek
|
627
|
+
from. :attr:`os.SEEK_SET` (``0``) for absolute file
|
628
|
+
positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative
|
629
|
+
to the current position, :attr:`os.SEEK_END` (``2``) to
|
630
|
+
seek relative to the file's end.
|
631
|
+
|
632
|
+
.. versionchanged:: 4.1
|
633
|
+
The method now returns the new position in the file, to
|
634
|
+
conform to the behavior of :meth:`io.IOBase.seek`.
|
635
|
+
"""
|
636
|
+
if whence == _SEEK_SET:
|
637
|
+
new_pos = pos
|
638
|
+
elif whence == _SEEK_CUR:
|
639
|
+
new_pos = self.__position + pos
|
640
|
+
elif whence == _SEEK_END:
|
641
|
+
new_pos = int(self.length) + pos
|
642
|
+
else:
|
643
|
+
raise IOError(22, "Invalid value for `whence`")
|
644
|
+
|
645
|
+
if new_pos < 0:
|
646
|
+
raise IOError(22, "Invalid value for `pos` - must be positive")
|
647
|
+
|
648
|
+
# Optimization, continue using the same buffer and chunk iterator.
|
649
|
+
if new_pos == self.__position:
|
650
|
+
return new_pos
|
651
|
+
|
652
|
+
self.__position = new_pos
|
653
|
+
self.__buffer = EMPTY
|
654
|
+
if self.__chunk_iter:
|
655
|
+
self.__chunk_iter.close()
|
656
|
+
self.__chunk_iter = None
|
657
|
+
return new_pos
|
658
|
+
|
659
|
+
def seekable(self) -> bool:
|
660
|
+
return True
|
661
|
+
|
662
|
+
def __iter__(self) -> "GridOut":
|
663
|
+
"""Return an iterator over all of this file's data.
|
664
|
+
|
665
|
+
The iterator will return lines (delimited by ``b'\\n'``) of
|
666
|
+
:class:`bytes`. This can be useful when serving files
|
667
|
+
using a webserver that handles such an iterator efficiently.
|
668
|
+
|
669
|
+
.. versionchanged:: 3.8
|
670
|
+
The iterator now raises :class:`CorruptGridFile` when encountering
|
671
|
+
any truncated, missing, or extra chunk in a file. The previous
|
672
|
+
behavior was to only raise :class:`CorruptGridFile` on a missing
|
673
|
+
chunk.
|
674
|
+
|
675
|
+
.. versionchanged:: 4.0
|
676
|
+
The iterator now iterates over *lines* in the file, instead
|
677
|
+
of chunks, to conform to the base class :py:class:`io.IOBase`.
|
678
|
+
Use :meth:`GridOut.readchunk` to read chunk by chunk instead
|
679
|
+
of line by line.
|
680
|
+
"""
|
681
|
+
return self
|
682
|
+
|
683
|
+
def close(self) -> None:
|
684
|
+
"""Make GridOut more generically file-like."""
|
685
|
+
if self.__chunk_iter:
|
686
|
+
self.__chunk_iter.close()
|
687
|
+
self.__chunk_iter = None
|
688
|
+
super().close()
|
689
|
+
|
690
|
+
def write(self, value: Any) -> NoReturn:
|
691
|
+
raise io.UnsupportedOperation("write")
|
692
|
+
|
693
|
+
def writelines(self, lines: Any) -> NoReturn:
|
694
|
+
raise io.UnsupportedOperation("writelines")
|
695
|
+
|
696
|
+
def writable(self) -> bool:
|
697
|
+
return False
|
698
|
+
|
699
|
+
def __enter__(self) -> "GridOut":
|
700
|
+
"""Makes it possible to use :class:`GridOut` files
|
701
|
+
with the context manager protocol.
|
702
|
+
"""
|
703
|
+
return self
|
704
|
+
|
705
|
+
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any:
|
706
|
+
"""Makes it possible to use :class:`GridOut` files
|
707
|
+
with the context manager protocol.
|
708
|
+
"""
|
709
|
+
self.close()
|
710
|
+
return False
|
711
|
+
|
712
|
+
def fileno(self) -> NoReturn:
|
713
|
+
raise io.UnsupportedOperation("fileno")
|
714
|
+
|
715
|
+
def flush(self) -> None:
|
716
|
+
# GridOut is read-only, so flush does nothing.
|
717
|
+
pass
|
718
|
+
|
719
|
+
def isatty(self) -> bool:
|
720
|
+
return False
|
721
|
+
|
722
|
+
def truncate(self, size: Optional[int] = None) -> NoReturn:
|
723
|
+
# See https://docs.python.org/3/library/io.html#io.IOBase.writable
|
724
|
+
# for why truncate has to raise.
|
725
|
+
raise io.UnsupportedOperation("truncate")
|
726
|
+
|
727
|
+
# Override IOBase.__del__ otherwise it will lead to __getattr__ on
|
728
|
+
# __IOBase_closed which calls _ensure_file and potentially performs I/O.
|
729
|
+
# We cannot do I/O in __del__ since it can lead to a deadlock.
|
730
|
+
def __del__(self) -> None:
|
731
|
+
pass
|
732
|
+
|
733
|
+
|
734
|
+
class _GridOutChunkIterator(object):
|
735
|
+
"""Iterates over a file's chunks using a single cursor.
|
736
|
+
|
737
|
+
Raises CorruptGridFile when encountering any truncated, missing, or extra
|
738
|
+
chunk in a file.
|
739
|
+
"""
|
740
|
+
|
741
|
+
def __init__(
|
742
|
+
self,
|
743
|
+
grid_out: GridOut,
|
744
|
+
chunks: Collection,
|
745
|
+
session: Optional[ClientSession],
|
746
|
+
next_chunk: Any,
|
747
|
+
) -> None:
|
748
|
+
self._id = grid_out._id
|
749
|
+
self._chunk_size = int(grid_out.chunk_size)
|
750
|
+
self._length = int(grid_out.length)
|
751
|
+
self._chunks = chunks
|
752
|
+
self._session = session
|
753
|
+
self._next_chunk = next_chunk
|
754
|
+
self._num_chunks = math.ceil(float(self._length) / self._chunk_size)
|
755
|
+
self._cursor = None
|
756
|
+
|
757
|
+
_cursor: Optional[Cursor]
|
758
|
+
|
759
|
+
def expected_chunk_length(self, chunk_n: int) -> int:
|
760
|
+
if chunk_n < self._num_chunks - 1:
|
761
|
+
return self._chunk_size
|
762
|
+
return self._length - (self._chunk_size * (self._num_chunks - 1))
|
763
|
+
|
764
|
+
def __iter__(self) -> "_GridOutChunkIterator":
|
765
|
+
return self
|
766
|
+
|
767
|
+
def _create_cursor(self) -> None:
|
768
|
+
filter = {"files_id": self._id}
|
769
|
+
if self._next_chunk > 0:
|
770
|
+
filter["n"] = {"$gte": self._next_chunk}
|
771
|
+
_disallow_transactions(self._session)
|
772
|
+
self._cursor = self._chunks.find(filter, sort=[("n", 1)], session=self._session)
|
773
|
+
|
774
|
+
def _next_with_retry(self) -> Mapping[str, Any]:
|
775
|
+
"""Return the next chunk and retry once on CursorNotFound.
|
776
|
+
|
777
|
+
We retry on CursorNotFound to maintain backwards compatibility in
|
778
|
+
cases where two calls to read occur more than 10 minutes apart (the
|
779
|
+
server's default cursor timeout).
|
780
|
+
"""
|
781
|
+
if self._cursor is None:
|
782
|
+
self._create_cursor()
|
783
|
+
assert self._cursor is not None
|
784
|
+
try:
|
785
|
+
return self._cursor.next()
|
786
|
+
except CursorNotFound:
|
787
|
+
self._cursor.close()
|
788
|
+
self._create_cursor()
|
789
|
+
return self._cursor.next()
|
790
|
+
|
791
|
+
def next(self) -> Mapping[str, Any]:
|
792
|
+
try:
|
793
|
+
chunk = self._next_with_retry()
|
794
|
+
except StopIteration:
|
795
|
+
if self._next_chunk >= self._num_chunks:
|
796
|
+
raise
|
797
|
+
raise CorruptGridFile("no chunk #%d" % self._next_chunk)
|
798
|
+
|
799
|
+
if chunk["n"] != self._next_chunk:
|
800
|
+
self.close()
|
801
|
+
raise CorruptGridFile(
|
802
|
+
"Missing chunk: expected chunk #%d but found "
|
803
|
+
"chunk with n=%d" % (self._next_chunk, chunk["n"])
|
804
|
+
)
|
805
|
+
|
806
|
+
if chunk["n"] >= self._num_chunks:
|
807
|
+
# According to spec, ignore extra chunks if they are empty.
|
808
|
+
if len(chunk["data"]):
|
809
|
+
self.close()
|
810
|
+
raise CorruptGridFile(
|
811
|
+
"Extra chunk found: expected %d chunks but found "
|
812
|
+
"chunk with n=%d" % (self._num_chunks, chunk["n"])
|
813
|
+
)
|
814
|
+
|
815
|
+
expected_length = self.expected_chunk_length(chunk["n"])
|
816
|
+
if len(chunk["data"]) != expected_length:
|
817
|
+
self.close()
|
818
|
+
raise CorruptGridFile(
|
819
|
+
"truncated chunk #%d: expected chunk length to be %d but "
|
820
|
+
"found chunk with length %d" % (chunk["n"], expected_length, len(chunk["data"]))
|
821
|
+
)
|
822
|
+
|
823
|
+
self._next_chunk += 1
|
824
|
+
return chunk
|
825
|
+
|
826
|
+
__next__ = next
|
827
|
+
|
828
|
+
def close(self) -> None:
|
829
|
+
if self._cursor:
|
830
|
+
self._cursor.close()
|
831
|
+
self._cursor = None
|
832
|
+
|
833
|
+
|
834
|
+
class GridOutIterator(object):
|
835
|
+
def __init__(self, grid_out: GridOut, chunks: Collection, session: ClientSession):
|
836
|
+
self.__chunk_iter = _GridOutChunkIterator(grid_out, chunks, session, 0)
|
837
|
+
|
838
|
+
def __iter__(self) -> "GridOutIterator":
|
839
|
+
return self
|
840
|
+
|
841
|
+
def next(self) -> bytes:
|
842
|
+
chunk = self.__chunk_iter.next()
|
843
|
+
return bytes(chunk["data"])
|
844
|
+
|
845
|
+
__next__ = next
|
846
|
+
|
847
|
+
|
848
|
+
class GridOutCursor(Cursor):
|
849
|
+
"""A cursor / iterator for returning GridOut objects as the result
|
850
|
+
of an arbitrary query against the GridFS files collection.
|
851
|
+
"""
|
852
|
+
|
853
|
+
def __init__(
|
854
|
+
self,
|
855
|
+
collection: Collection,
|
856
|
+
filter: Optional[Mapping[str, Any]] = None,
|
857
|
+
skip: int = 0,
|
858
|
+
limit: int = 0,
|
859
|
+
no_cursor_timeout: bool = False,
|
860
|
+
sort: Optional[Any] = None,
|
861
|
+
batch_size: int = 0,
|
862
|
+
session: Optional[ClientSession] = None,
|
863
|
+
) -> None:
|
864
|
+
"""Create a new cursor, similar to the normal
|
865
|
+
:class:`~pymongo.cursor.Cursor`.
|
866
|
+
|
867
|
+
Should not be called directly by application developers - see
|
868
|
+
the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead.
|
869
|
+
|
870
|
+
.. versionadded 2.7
|
871
|
+
|
872
|
+
.. seealso:: The MongoDB documentation on `cursors <https://dochub.mongodb.org/core/cursors>`_.
|
873
|
+
"""
|
874
|
+
_disallow_transactions(session)
|
875
|
+
collection = _clear_entity_type_registry(collection)
|
876
|
+
|
877
|
+
# Hold on to the base "fs" collection to create GridOut objects later.
|
878
|
+
self.__root_collection = collection
|
879
|
+
|
880
|
+
super(GridOutCursor, self).__init__(
|
881
|
+
collection.files,
|
882
|
+
filter,
|
883
|
+
skip=skip,
|
884
|
+
limit=limit,
|
885
|
+
no_cursor_timeout=no_cursor_timeout,
|
886
|
+
sort=sort,
|
887
|
+
batch_size=batch_size,
|
888
|
+
session=session,
|
889
|
+
)
|
890
|
+
|
891
|
+
def next(self) -> GridOut:
|
892
|
+
"""Get next GridOut object from cursor."""
|
893
|
+
_disallow_transactions(self.session)
|
894
|
+
next_file = super(GridOutCursor, self).next()
|
895
|
+
return GridOut(self.__root_collection, file_document=next_file, session=self.session)
|
896
|
+
|
897
|
+
__next__ = next
|
898
|
+
|
899
|
+
def add_option(self, *args: Any, **kwargs: Any) -> NoReturn:
|
900
|
+
raise NotImplementedError("Method does not exist for GridOutCursor")
|
901
|
+
|
902
|
+
def remove_option(self, *args: Any, **kwargs: Any) -> NoReturn:
|
903
|
+
raise NotImplementedError("Method does not exist for GridOutCursor")
|
904
|
+
|
905
|
+
def _clone_base(self, session: ClientSession) -> "GridOutCursor":
|
906
|
+
"""Creates an empty GridOutCursor for information to be copied into."""
|
907
|
+
return GridOutCursor(self.__root_collection, session=session)
|