kinto 23.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kinto/__init__.py +92 -0
- kinto/__main__.py +249 -0
- kinto/authorization.py +134 -0
- kinto/config/__init__.py +94 -0
- kinto/config/kinto.tpl +270 -0
- kinto/contribute.json +27 -0
- kinto/core/__init__.py +246 -0
- kinto/core/authentication.py +48 -0
- kinto/core/authorization.py +311 -0
- kinto/core/cache/__init__.py +131 -0
- kinto/core/cache/memcached.py +112 -0
- kinto/core/cache/memory.py +104 -0
- kinto/core/cache/postgresql/__init__.py +178 -0
- kinto/core/cache/postgresql/schema.sql +23 -0
- kinto/core/cache/testing.py +208 -0
- kinto/core/cornice/__init__.py +93 -0
- kinto/core/cornice/cors.py +144 -0
- kinto/core/cornice/errors.py +40 -0
- kinto/core/cornice/pyramidhook.py +373 -0
- kinto/core/cornice/renderer.py +89 -0
- kinto/core/cornice/resource.py +205 -0
- kinto/core/cornice/service.py +641 -0
- kinto/core/cornice/util.py +138 -0
- kinto/core/cornice/validators/__init__.py +94 -0
- kinto/core/cornice/validators/_colander.py +142 -0
- kinto/core/cornice/validators/_marshmallow.py +182 -0
- kinto/core/cornice_swagger/__init__.py +92 -0
- kinto/core/cornice_swagger/converters/__init__.py +21 -0
- kinto/core/cornice_swagger/converters/exceptions.py +6 -0
- kinto/core/cornice_swagger/converters/parameters.py +90 -0
- kinto/core/cornice_swagger/converters/schema.py +249 -0
- kinto/core/cornice_swagger/swagger.py +725 -0
- kinto/core/cornice_swagger/templates/index.html +73 -0
- kinto/core/cornice_swagger/templates/index_script_template.html +21 -0
- kinto/core/cornice_swagger/util.py +42 -0
- kinto/core/cornice_swagger/views.py +78 -0
- kinto/core/decorators.py +74 -0
- kinto/core/errors.py +216 -0
- kinto/core/events.py +301 -0
- kinto/core/initialization.py +738 -0
- kinto/core/listeners/__init__.py +9 -0
- kinto/core/metrics.py +94 -0
- kinto/core/openapi.py +115 -0
- kinto/core/permission/__init__.py +202 -0
- kinto/core/permission/memory.py +167 -0
- kinto/core/permission/postgresql/__init__.py +489 -0
- kinto/core/permission/postgresql/migrations/migration_001_002.sql +18 -0
- kinto/core/permission/postgresql/schema.sql +41 -0
- kinto/core/permission/testing.py +487 -0
- kinto/core/resource/__init__.py +1311 -0
- kinto/core/resource/model.py +412 -0
- kinto/core/resource/schema.py +502 -0
- kinto/core/resource/viewset.py +230 -0
- kinto/core/schema.py +119 -0
- kinto/core/scripts.py +50 -0
- kinto/core/statsd.py +1 -0
- kinto/core/storage/__init__.py +436 -0
- kinto/core/storage/exceptions.py +53 -0
- kinto/core/storage/generators.py +58 -0
- kinto/core/storage/memory.py +651 -0
- kinto/core/storage/postgresql/__init__.py +1131 -0
- kinto/core/storage/postgresql/client.py +120 -0
- kinto/core/storage/postgresql/migrations/migration_001_002.sql +10 -0
- kinto/core/storage/postgresql/migrations/migration_002_003.sql +33 -0
- kinto/core/storage/postgresql/migrations/migration_003_004.sql +18 -0
- kinto/core/storage/postgresql/migrations/migration_004_005.sql +20 -0
- kinto/core/storage/postgresql/migrations/migration_005_006.sql +11 -0
- kinto/core/storage/postgresql/migrations/migration_006_007.sql +74 -0
- kinto/core/storage/postgresql/migrations/migration_007_008.sql +66 -0
- kinto/core/storage/postgresql/migrations/migration_008_009.sql +41 -0
- kinto/core/storage/postgresql/migrations/migration_009_010.sql +98 -0
- kinto/core/storage/postgresql/migrations/migration_010_011.sql +14 -0
- kinto/core/storage/postgresql/migrations/migration_011_012.sql +9 -0
- kinto/core/storage/postgresql/migrations/migration_012_013.sql +71 -0
- kinto/core/storage/postgresql/migrations/migration_013_014.sql +14 -0
- kinto/core/storage/postgresql/migrations/migration_014_015.sql +95 -0
- kinto/core/storage/postgresql/migrations/migration_015_016.sql +4 -0
- kinto/core/storage/postgresql/migrations/migration_016_017.sql +81 -0
- kinto/core/storage/postgresql/migrations/migration_017_018.sql +25 -0
- kinto/core/storage/postgresql/migrations/migration_018_019.sql +8 -0
- kinto/core/storage/postgresql/migrations/migration_019_020.sql +7 -0
- kinto/core/storage/postgresql/migrations/migration_020_021.sql +68 -0
- kinto/core/storage/postgresql/migrations/migration_021_022.sql +62 -0
- kinto/core/storage/postgresql/migrations/migration_022_023.sql +5 -0
- kinto/core/storage/postgresql/migrations/migration_023_024.sql +6 -0
- kinto/core/storage/postgresql/migrations/migration_024_025.sql +6 -0
- kinto/core/storage/postgresql/migrator.py +98 -0
- kinto/core/storage/postgresql/pool.py +55 -0
- kinto/core/storage/postgresql/schema.sql +143 -0
- kinto/core/storage/testing.py +1857 -0
- kinto/core/storage/utils.py +37 -0
- kinto/core/testing.py +182 -0
- kinto/core/utils.py +553 -0
- kinto/core/views/__init__.py +0 -0
- kinto/core/views/batch.py +163 -0
- kinto/core/views/errors.py +145 -0
- kinto/core/views/heartbeat.py +106 -0
- kinto/core/views/hello.py +69 -0
- kinto/core/views/openapi.py +35 -0
- kinto/core/views/version.py +50 -0
- kinto/events.py +3 -0
- kinto/plugins/__init__.py +0 -0
- kinto/plugins/accounts/__init__.py +94 -0
- kinto/plugins/accounts/authentication.py +63 -0
- kinto/plugins/accounts/scripts.py +61 -0
- kinto/plugins/accounts/utils.py +13 -0
- kinto/plugins/accounts/views.py +136 -0
- kinto/plugins/admin/README.md +3 -0
- kinto/plugins/admin/VERSION +1 -0
- kinto/plugins/admin/__init__.py +40 -0
- kinto/plugins/admin/build/VERSION +1 -0
- kinto/plugins/admin/build/assets/index-CYFwtKtL.css +6 -0
- kinto/plugins/admin/build/assets/index-DJ0m93zA.js +149 -0
- kinto/plugins/admin/build/assets/logo-VBRiKSPX.png +0 -0
- kinto/plugins/admin/build/index.html +18 -0
- kinto/plugins/admin/public/help.html +25 -0
- kinto/plugins/admin/views.py +42 -0
- kinto/plugins/default_bucket/__init__.py +191 -0
- kinto/plugins/flush.py +28 -0
- kinto/plugins/history/__init__.py +65 -0
- kinto/plugins/history/listener.py +181 -0
- kinto/plugins/history/views.py +66 -0
- kinto/plugins/openid/__init__.py +131 -0
- kinto/plugins/openid/utils.py +14 -0
- kinto/plugins/openid/views.py +193 -0
- kinto/plugins/prometheus.py +300 -0
- kinto/plugins/statsd.py +85 -0
- kinto/schema_validation.py +135 -0
- kinto/views/__init__.py +34 -0
- kinto/views/admin.py +195 -0
- kinto/views/buckets.py +45 -0
- kinto/views/collections.py +58 -0
- kinto/views/contribute.py +39 -0
- kinto/views/groups.py +90 -0
- kinto/views/permissions.py +235 -0
- kinto/views/records.py +133 -0
- kinto-23.2.1.dist-info/METADATA +232 -0
- kinto-23.2.1.dist-info/RECORD +142 -0
- kinto-23.2.1.dist-info/WHEEL +5 -0
- kinto-23.2.1.dist-info/entry_points.txt +5 -0
- kinto-23.2.1.dist-info/licenses/LICENSE +13 -0
- kinto-23.2.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,651 @@
|
|
|
1
|
+
import numbers
|
|
2
|
+
import operator
|
|
3
|
+
import re
|
|
4
|
+
from collections import abc, defaultdict
|
|
5
|
+
|
|
6
|
+
from kinto.core import utils
|
|
7
|
+
from kinto.core.decorators import deprecate_kwargs, synchronized
|
|
8
|
+
from kinto.core.storage import (
|
|
9
|
+
DEFAULT_DELETED_FIELD,
|
|
10
|
+
DEFAULT_ID_FIELD,
|
|
11
|
+
DEFAULT_MODIFIED_FIELD,
|
|
12
|
+
MISSING,
|
|
13
|
+
Sort,
|
|
14
|
+
StorageBase,
|
|
15
|
+
exceptions,
|
|
16
|
+
)
|
|
17
|
+
from kinto.core.utils import COMPARISON, find_nested_value, json
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def tree():
|
|
21
|
+
return defaultdict(tree)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class MemoryBasedStorage(StorageBase):
|
|
25
|
+
"""Abstract storage class, providing basic operations and
|
|
26
|
+
methods for in-memory implementations of sorting and filtering.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
json = json
|
|
30
|
+
|
|
31
|
+
def initialize_schema(self, dry_run=False):
|
|
32
|
+
# Nothing to do.
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
def strip_deleted_object(
|
|
36
|
+
self,
|
|
37
|
+
resource_name,
|
|
38
|
+
parent_id,
|
|
39
|
+
obj,
|
|
40
|
+
id_field=DEFAULT_ID_FIELD,
|
|
41
|
+
modified_field=DEFAULT_MODIFIED_FIELD,
|
|
42
|
+
deleted_field=DEFAULT_DELETED_FIELD,
|
|
43
|
+
):
|
|
44
|
+
"""Strip the object of all its fields expect id and timestamp,
|
|
45
|
+
and set the deletion field value (e.g deleted=True)
|
|
46
|
+
"""
|
|
47
|
+
deleted = {}
|
|
48
|
+
deleted[id_field] = obj[id_field]
|
|
49
|
+
deleted[modified_field] = obj[modified_field]
|
|
50
|
+
deleted[deleted_field] = True
|
|
51
|
+
return deleted
|
|
52
|
+
|
|
53
|
+
def set_object_timestamp(
|
|
54
|
+
self,
|
|
55
|
+
resource_name,
|
|
56
|
+
parent_id,
|
|
57
|
+
obj,
|
|
58
|
+
modified_field=DEFAULT_MODIFIED_FIELD,
|
|
59
|
+
last_modified=None,
|
|
60
|
+
):
|
|
61
|
+
timestamp = self.bump_and_store_timestamp(
|
|
62
|
+
resource_name, parent_id, obj, modified_field, last_modified=last_modified
|
|
63
|
+
)
|
|
64
|
+
obj[modified_field] = timestamp
|
|
65
|
+
return obj
|
|
66
|
+
|
|
67
|
+
def extract_object_set(
|
|
68
|
+
self, objects, filters, sorting, id_field, deleted_field, pagination_rules=None, limit=None
|
|
69
|
+
):
|
|
70
|
+
"""Take the list of objects and handle filtering, sorting and
|
|
71
|
+
pagination.
|
|
72
|
+
|
|
73
|
+
"""
|
|
74
|
+
return extract_object_set(
|
|
75
|
+
objects,
|
|
76
|
+
filters=filters,
|
|
77
|
+
sorting=sorting,
|
|
78
|
+
id_field=id_field,
|
|
79
|
+
deleted_field=deleted_field,
|
|
80
|
+
pagination_rules=pagination_rules,
|
|
81
|
+
limit=limit,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
def bump_timestamp(self, resource_timestamp, obj, modified_field, last_modified):
|
|
85
|
+
"""Timestamp are base on current millisecond.
|
|
86
|
+
|
|
87
|
+
.. note ::
|
|
88
|
+
|
|
89
|
+
Here it is assumed that if requests from the same user burst in,
|
|
90
|
+
the time will slide into the future. It is not problematic since
|
|
91
|
+
the timestamp notion is opaque, and behaves like a revision number.
|
|
92
|
+
"""
|
|
93
|
+
is_specified = obj is not None and modified_field in obj or last_modified is not None
|
|
94
|
+
if is_specified:
|
|
95
|
+
# If there is a timestamp in the new object, try to use it.
|
|
96
|
+
if last_modified is not None:
|
|
97
|
+
current = last_modified
|
|
98
|
+
else:
|
|
99
|
+
current = obj[modified_field]
|
|
100
|
+
|
|
101
|
+
# If it is equal to current resource timestamp, bump it.
|
|
102
|
+
if current == resource_timestamp:
|
|
103
|
+
resource_timestamp += 1
|
|
104
|
+
current = resource_timestamp
|
|
105
|
+
# If it is superior (future), use it as new resource timestamp.
|
|
106
|
+
elif current > resource_timestamp:
|
|
107
|
+
resource_timestamp = current
|
|
108
|
+
# Else (past), do nothing.
|
|
109
|
+
|
|
110
|
+
else:
|
|
111
|
+
# Not specified, use a new one.
|
|
112
|
+
current = utils.msec_time()
|
|
113
|
+
# If two ops in the same msec, bump it.
|
|
114
|
+
if current <= resource_timestamp:
|
|
115
|
+
current = resource_timestamp + 1
|
|
116
|
+
resource_timestamp = current
|
|
117
|
+
return current, resource_timestamp
|
|
118
|
+
|
|
119
|
+
def bump_and_store_timestamp(
|
|
120
|
+
self, resource_name, parent_id, obj=None, modified_field=None, last_modified=None
|
|
121
|
+
):
|
|
122
|
+
"""Use the bump_timestamp to get its next value and store the resource_timestamp."""
|
|
123
|
+
raise NotImplementedError
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class Storage(MemoryBasedStorage):
|
|
127
|
+
"""Storage backend implementation in memory.
|
|
128
|
+
|
|
129
|
+
Useful for development or testing purposes, but stored data is lost after
|
|
130
|
+
each server restart.
|
|
131
|
+
|
|
132
|
+
Enable in configuration::
|
|
133
|
+
|
|
134
|
+
kinto.storage_backend = kinto.core.storage.memory
|
|
135
|
+
"""
|
|
136
|
+
|
|
137
|
+
def __init__(self, *args, readonly=False, **kwargs):
|
|
138
|
+
super().__init__(*args, **kwargs)
|
|
139
|
+
self.readonly = readonly
|
|
140
|
+
self.flush()
|
|
141
|
+
|
|
142
|
+
def flush(self):
|
|
143
|
+
self._store = tree()
|
|
144
|
+
self._cemetery = tree()
|
|
145
|
+
self._timestamps = defaultdict(dict)
|
|
146
|
+
|
|
147
|
+
@synchronized
|
|
148
|
+
def resource_timestamp(self, resource_name, parent_id):
|
|
149
|
+
ts = self._timestamps[parent_id].get(resource_name)
|
|
150
|
+
if ts is not None:
|
|
151
|
+
return ts
|
|
152
|
+
if self.readonly:
|
|
153
|
+
error_msg = "Cannot initialize empty resource timestamp when running in readonly."
|
|
154
|
+
raise exceptions.ReadonlyError(message=error_msg)
|
|
155
|
+
return self.bump_and_store_timestamp(resource_name, parent_id)
|
|
156
|
+
|
|
157
|
+
@synchronized
|
|
158
|
+
def all_resources_timestamps(self, resource_name):
|
|
159
|
+
return {k: v[resource_name] for k, v in self._timestamps.items() if resource_name in v}
|
|
160
|
+
|
|
161
|
+
def bump_and_store_timestamp(
|
|
162
|
+
self, resource_name, parent_id, obj=None, modified_field=None, last_modified=None
|
|
163
|
+
):
|
|
164
|
+
"""Use the bump_timestamp to get its next value and store the resource_timestamp."""
|
|
165
|
+
current_resource_timestamp = self._timestamps[parent_id].get(resource_name, 0)
|
|
166
|
+
|
|
167
|
+
current, resource_timestamp = self.bump_timestamp(
|
|
168
|
+
current_resource_timestamp, obj, modified_field, last_modified
|
|
169
|
+
)
|
|
170
|
+
self._timestamps[parent_id][resource_name] = resource_timestamp
|
|
171
|
+
|
|
172
|
+
return current
|
|
173
|
+
|
|
174
|
+
@deprecate_kwargs({"collection_id": "resource_name", "record": "obj"})
|
|
175
|
+
@synchronized
|
|
176
|
+
def create(
|
|
177
|
+
self,
|
|
178
|
+
resource_name,
|
|
179
|
+
parent_id,
|
|
180
|
+
obj,
|
|
181
|
+
id_generator=None,
|
|
182
|
+
id_field=DEFAULT_ID_FIELD,
|
|
183
|
+
modified_field=DEFAULT_MODIFIED_FIELD,
|
|
184
|
+
):
|
|
185
|
+
id_generator = id_generator or self.id_generator
|
|
186
|
+
|
|
187
|
+
# This is very inefficient, but memory storage is not used in production.
|
|
188
|
+
# The serialization provides the necessary consistency with other
|
|
189
|
+
# backends implementation, and the deserialization creates a deep
|
|
190
|
+
# copy of the passed object.
|
|
191
|
+
obj = json.loads(json.dumps(obj))
|
|
192
|
+
|
|
193
|
+
if id_field in obj:
|
|
194
|
+
# Raise unicity error if object with same id already exists.
|
|
195
|
+
try:
|
|
196
|
+
existing = self.get(resource_name, parent_id, obj[id_field])
|
|
197
|
+
raise exceptions.UnicityError(id_field, existing)
|
|
198
|
+
except exceptions.ObjectNotFoundError:
|
|
199
|
+
pass
|
|
200
|
+
else:
|
|
201
|
+
obj[id_field] = id_generator()
|
|
202
|
+
|
|
203
|
+
self.set_object_timestamp(resource_name, parent_id, obj, modified_field=modified_field)
|
|
204
|
+
_id = obj[id_field]
|
|
205
|
+
self._store[parent_id][resource_name][_id] = obj
|
|
206
|
+
self._cemetery[parent_id][resource_name].pop(_id, None)
|
|
207
|
+
return obj
|
|
208
|
+
|
|
209
|
+
@deprecate_kwargs({"collection_id": "resource_name"})
|
|
210
|
+
@synchronized
|
|
211
|
+
def get(
|
|
212
|
+
self,
|
|
213
|
+
resource_name,
|
|
214
|
+
parent_id,
|
|
215
|
+
object_id,
|
|
216
|
+
id_field=DEFAULT_ID_FIELD,
|
|
217
|
+
modified_field=DEFAULT_MODIFIED_FIELD,
|
|
218
|
+
):
|
|
219
|
+
objects = self._store[parent_id][resource_name]
|
|
220
|
+
if object_id not in objects:
|
|
221
|
+
raise exceptions.ObjectNotFoundError(object_id)
|
|
222
|
+
return {**objects[object_id]}
|
|
223
|
+
|
|
224
|
+
@deprecate_kwargs({"collection_id": "resource_name", "record": "obj"})
|
|
225
|
+
@synchronized
|
|
226
|
+
def update(
|
|
227
|
+
self,
|
|
228
|
+
resource_name,
|
|
229
|
+
parent_id,
|
|
230
|
+
object_id,
|
|
231
|
+
obj,
|
|
232
|
+
id_field=DEFAULT_ID_FIELD,
|
|
233
|
+
modified_field=DEFAULT_MODIFIED_FIELD,
|
|
234
|
+
):
|
|
235
|
+
# This is very inefficient, but memory storage is not used in production.
|
|
236
|
+
# The serialization provides the necessary consistency with other
|
|
237
|
+
# backends implementation, and the deserialization creates a deep
|
|
238
|
+
# copy of the passed object.
|
|
239
|
+
obj = json.loads(json.dumps(obj))
|
|
240
|
+
|
|
241
|
+
obj[id_field] = object_id
|
|
242
|
+
|
|
243
|
+
self.set_object_timestamp(resource_name, parent_id, obj, modified_field=modified_field)
|
|
244
|
+
self._store[parent_id][resource_name][object_id] = obj
|
|
245
|
+
self._cemetery[parent_id][resource_name].pop(object_id, None)
|
|
246
|
+
return obj
|
|
247
|
+
|
|
248
|
+
@deprecate_kwargs({"collection_id": "resource_name"})
|
|
249
|
+
@synchronized
|
|
250
|
+
def delete(
|
|
251
|
+
self,
|
|
252
|
+
resource_name,
|
|
253
|
+
parent_id,
|
|
254
|
+
object_id,
|
|
255
|
+
id_field=DEFAULT_ID_FIELD,
|
|
256
|
+
with_deleted=True,
|
|
257
|
+
modified_field=DEFAULT_MODIFIED_FIELD,
|
|
258
|
+
deleted_field=DEFAULT_DELETED_FIELD,
|
|
259
|
+
last_modified=None,
|
|
260
|
+
):
|
|
261
|
+
existing = self.get(resource_name, parent_id, object_id)
|
|
262
|
+
# Need to delete the last_modified field of the object.
|
|
263
|
+
del existing[modified_field]
|
|
264
|
+
|
|
265
|
+
self.set_object_timestamp(
|
|
266
|
+
resource_name,
|
|
267
|
+
parent_id,
|
|
268
|
+
existing,
|
|
269
|
+
modified_field=modified_field,
|
|
270
|
+
last_modified=last_modified,
|
|
271
|
+
)
|
|
272
|
+
existing = self.strip_deleted_object(resource_name, parent_id, existing)
|
|
273
|
+
|
|
274
|
+
# Add to deleted items, remove from store.
|
|
275
|
+
if with_deleted:
|
|
276
|
+
deleted = {**existing}
|
|
277
|
+
self._cemetery[parent_id][resource_name][object_id] = deleted
|
|
278
|
+
self._store[parent_id][resource_name].pop(object_id)
|
|
279
|
+
return existing
|
|
280
|
+
|
|
281
|
+
@deprecate_kwargs({"collection_id": "resource_name"})
|
|
282
|
+
@synchronized
|
|
283
|
+
def purge_deleted(
|
|
284
|
+
self,
|
|
285
|
+
resource_name,
|
|
286
|
+
parent_id,
|
|
287
|
+
before=None,
|
|
288
|
+
max_retained=None,
|
|
289
|
+
id_field=DEFAULT_ID_FIELD,
|
|
290
|
+
modified_field=DEFAULT_MODIFIED_FIELD,
|
|
291
|
+
):
|
|
292
|
+
if max_retained is not None and before is not None:
|
|
293
|
+
raise ValueError("`before` and `max_retained` are exclusive arguments. Pick one.")
|
|
294
|
+
|
|
295
|
+
parent_id_match = re.compile(parent_id.replace("*", ".*"))
|
|
296
|
+
|
|
297
|
+
timestamps_by_parent_id = {
|
|
298
|
+
pid: resources
|
|
299
|
+
for pid, resources in self._timestamps.items()
|
|
300
|
+
if parent_id_match.match(pid)
|
|
301
|
+
}
|
|
302
|
+
if resource_name is not None:
|
|
303
|
+
for pid, resources in timestamps_by_parent_id.items():
|
|
304
|
+
del self._timestamps[pid][resource_name]
|
|
305
|
+
else:
|
|
306
|
+
for pid, resources in timestamps_by_parent_id.items():
|
|
307
|
+
del self._timestamps[pid]
|
|
308
|
+
|
|
309
|
+
num_deleted = 0
|
|
310
|
+
tombstones_by_parent_id = {
|
|
311
|
+
pid: resources
|
|
312
|
+
for pid, resources in self._cemetery.items()
|
|
313
|
+
if parent_id_match.match(pid)
|
|
314
|
+
}
|
|
315
|
+
for pid, resources in tombstones_by_parent_id.items():
|
|
316
|
+
if resource_name is not None:
|
|
317
|
+
resources = {resource_name: resources[resource_name]}
|
|
318
|
+
for resource, resource_objects in resources.items():
|
|
319
|
+
if before is None:
|
|
320
|
+
if max_retained is None:
|
|
321
|
+
kept = {}
|
|
322
|
+
else:
|
|
323
|
+
kept = {
|
|
324
|
+
key: value
|
|
325
|
+
for i, (key, value) in enumerate(
|
|
326
|
+
sorted(
|
|
327
|
+
resource_objects.items(),
|
|
328
|
+
key=lambda i: i[1]["last_modified"],
|
|
329
|
+
reverse=True,
|
|
330
|
+
)
|
|
331
|
+
)
|
|
332
|
+
if i < max_retained
|
|
333
|
+
}
|
|
334
|
+
else:
|
|
335
|
+
kept = {
|
|
336
|
+
key: value
|
|
337
|
+
for key, value in resource_objects.items()
|
|
338
|
+
if value[modified_field] >= before
|
|
339
|
+
}
|
|
340
|
+
self._cemetery[pid][resource] = kept
|
|
341
|
+
num_deleted += len(resource_objects) - len(kept)
|
|
342
|
+
return num_deleted
|
|
343
|
+
|
|
344
|
+
@synchronized
|
|
345
|
+
def list_all(
|
|
346
|
+
self,
|
|
347
|
+
resource_name,
|
|
348
|
+
parent_id,
|
|
349
|
+
filters=None,
|
|
350
|
+
sorting=None,
|
|
351
|
+
pagination_rules=None,
|
|
352
|
+
limit=None,
|
|
353
|
+
include_deleted=False,
|
|
354
|
+
id_field=DEFAULT_ID_FIELD,
|
|
355
|
+
modified_field=DEFAULT_MODIFIED_FIELD,
|
|
356
|
+
deleted_field=DEFAULT_DELETED_FIELD,
|
|
357
|
+
):
|
|
358
|
+
objects = _get_objects_by_parent_id(self._store, parent_id, resource_name)
|
|
359
|
+
|
|
360
|
+
objects, _ = self.extract_object_set(
|
|
361
|
+
objects=objects,
|
|
362
|
+
filters=filters,
|
|
363
|
+
sorting=None,
|
|
364
|
+
id_field=id_field,
|
|
365
|
+
deleted_field=deleted_field,
|
|
366
|
+
)
|
|
367
|
+
deleted = []
|
|
368
|
+
if include_deleted:
|
|
369
|
+
deleted = _get_objects_by_parent_id(self._cemetery, parent_id, resource_name)
|
|
370
|
+
|
|
371
|
+
objects, _ = self.extract_object_set(
|
|
372
|
+
objects=objects + deleted,
|
|
373
|
+
filters=filters,
|
|
374
|
+
sorting=sorting,
|
|
375
|
+
id_field=id_field,
|
|
376
|
+
deleted_field=deleted_field,
|
|
377
|
+
pagination_rules=pagination_rules,
|
|
378
|
+
limit=limit,
|
|
379
|
+
)
|
|
380
|
+
return objects
|
|
381
|
+
|
|
382
|
+
@synchronized
|
|
383
|
+
def count_all(
|
|
384
|
+
self,
|
|
385
|
+
resource_name,
|
|
386
|
+
parent_id,
|
|
387
|
+
filters=None,
|
|
388
|
+
id_field=DEFAULT_ID_FIELD,
|
|
389
|
+
modified_field=DEFAULT_MODIFIED_FIELD,
|
|
390
|
+
deleted_field=DEFAULT_DELETED_FIELD,
|
|
391
|
+
):
|
|
392
|
+
objects = _get_objects_by_parent_id(self._store, parent_id, resource_name)
|
|
393
|
+
_, count = self.extract_object_set(
|
|
394
|
+
objects=objects,
|
|
395
|
+
filters=filters,
|
|
396
|
+
sorting=None,
|
|
397
|
+
id_field=id_field,
|
|
398
|
+
deleted_field=deleted_field,
|
|
399
|
+
)
|
|
400
|
+
return count
|
|
401
|
+
|
|
402
|
+
@deprecate_kwargs({"collection_id": "resource_name"})
|
|
403
|
+
@synchronized
|
|
404
|
+
def delete_all(
|
|
405
|
+
self,
|
|
406
|
+
resource_name,
|
|
407
|
+
parent_id,
|
|
408
|
+
filters=None,
|
|
409
|
+
sorting=None,
|
|
410
|
+
pagination_rules=None,
|
|
411
|
+
limit=None,
|
|
412
|
+
id_field=DEFAULT_ID_FIELD,
|
|
413
|
+
with_deleted=True,
|
|
414
|
+
modified_field=DEFAULT_MODIFIED_FIELD,
|
|
415
|
+
deleted_field=DEFAULT_DELETED_FIELD,
|
|
416
|
+
):
|
|
417
|
+
objects = _get_objects_by_parent_id(self._store, parent_id, resource_name, with_meta=True)
|
|
418
|
+
objects, count = self.extract_object_set(
|
|
419
|
+
objects=objects,
|
|
420
|
+
filters=filters,
|
|
421
|
+
sorting=sorting,
|
|
422
|
+
pagination_rules=pagination_rules,
|
|
423
|
+
limit=limit,
|
|
424
|
+
id_field=id_field,
|
|
425
|
+
deleted_field=deleted_field,
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
deleted = [
|
|
429
|
+
self.delete(
|
|
430
|
+
r.pop("__resource_name__"),
|
|
431
|
+
r.pop("__parent_id__"),
|
|
432
|
+
r[id_field],
|
|
433
|
+
id_field=id_field,
|
|
434
|
+
with_deleted=with_deleted,
|
|
435
|
+
modified_field=modified_field,
|
|
436
|
+
deleted_field=deleted_field,
|
|
437
|
+
)
|
|
438
|
+
for r in objects
|
|
439
|
+
]
|
|
440
|
+
return deleted
|
|
441
|
+
|
|
442
|
+
@synchronized
|
|
443
|
+
def trim_objects(
|
|
444
|
+
self,
|
|
445
|
+
resource_name: str,
|
|
446
|
+
parent_id: str,
|
|
447
|
+
filters: list,
|
|
448
|
+
max_objects: int,
|
|
449
|
+
id_field: str = DEFAULT_ID_FIELD,
|
|
450
|
+
modified_field: str = DEFAULT_MODIFIED_FIELD,
|
|
451
|
+
) -> int:
|
|
452
|
+
objects = _get_objects_by_parent_id(self._store, parent_id, resource_name, with_meta=True)
|
|
453
|
+
objects, _ = self.extract_object_set(
|
|
454
|
+
objects=objects,
|
|
455
|
+
filters=filters,
|
|
456
|
+
sorting=[Sort(modified_field, -1)],
|
|
457
|
+
id_field=id_field,
|
|
458
|
+
deleted_field=DEFAULT_DELETED_FIELD,
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
to_delete = objects[max_objects:]
|
|
462
|
+
for r in to_delete:
|
|
463
|
+
self.delete(
|
|
464
|
+
resource_name,
|
|
465
|
+
parent_id,
|
|
466
|
+
r[id_field],
|
|
467
|
+
id_field=id_field,
|
|
468
|
+
modified_field=modified_field,
|
|
469
|
+
)
|
|
470
|
+
return len(to_delete)
|
|
471
|
+
|
|
472
|
+
|
|
473
|
+
def extract_object_set(
|
|
474
|
+
objects,
|
|
475
|
+
filters,
|
|
476
|
+
sorting,
|
|
477
|
+
pagination_rules=None,
|
|
478
|
+
limit=None,
|
|
479
|
+
id_field=DEFAULT_ID_FIELD,
|
|
480
|
+
deleted_field=DEFAULT_DELETED_FIELD,
|
|
481
|
+
):
|
|
482
|
+
"""Apply filters, sorting, limit, and pagination rules to the list of
|
|
483
|
+
`objects`.
|
|
484
|
+
|
|
485
|
+
"""
|
|
486
|
+
filtered = list(apply_filters(objects, filters or []))
|
|
487
|
+
total_objects = len(filtered)
|
|
488
|
+
|
|
489
|
+
if pagination_rules:
|
|
490
|
+
paginated = []
|
|
491
|
+
for rule in pagination_rules:
|
|
492
|
+
values = apply_filters(filtered, rule)
|
|
493
|
+
paginated.extend(values)
|
|
494
|
+
else:
|
|
495
|
+
paginated = filtered
|
|
496
|
+
|
|
497
|
+
sorted_ = apply_sorting(paginated, sorting or [])
|
|
498
|
+
|
|
499
|
+
filtered_deleted = len([r for r in sorted_ if r.get(deleted_field) is True])
|
|
500
|
+
|
|
501
|
+
if limit:
|
|
502
|
+
sorted_ = list(sorted_)[:limit]
|
|
503
|
+
|
|
504
|
+
return sorted_, total_objects - filtered_deleted
|
|
505
|
+
|
|
506
|
+
|
|
507
|
+
def canonical_json(obj):
|
|
508
|
+
# We just a predictable serialization so that we just compare strings.
|
|
509
|
+
return json.dumps(obj, sort_keys=True)
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
def apply_filters(objects, filters):
|
|
513
|
+
"""Filter the specified objects, using basic iteration."""
|
|
514
|
+
|
|
515
|
+
def contains_filtering(object_value, search_term):
|
|
516
|
+
if object_value == MISSING:
|
|
517
|
+
return False
|
|
518
|
+
try:
|
|
519
|
+
search_set = set([canonical_json(v) for v in search_term])
|
|
520
|
+
object_value_set = set([canonical_json(v) for v in object_value])
|
|
521
|
+
except TypeError:
|
|
522
|
+
return False
|
|
523
|
+
return object_value_set.intersection(search_set) == search_set
|
|
524
|
+
|
|
525
|
+
def contains_any_filtering(object_value, search_term):
|
|
526
|
+
if object_value == MISSING:
|
|
527
|
+
return False
|
|
528
|
+
try:
|
|
529
|
+
search_set = set([canonical_json(v) for v in search_term])
|
|
530
|
+
object_value_set = set([canonical_json(v) for v in object_value])
|
|
531
|
+
except TypeError:
|
|
532
|
+
return False
|
|
533
|
+
return object_value_set.intersection(search_set)
|
|
534
|
+
|
|
535
|
+
operators = {
|
|
536
|
+
COMPARISON.LT: operator.lt,
|
|
537
|
+
COMPARISON.MAX: operator.le,
|
|
538
|
+
COMPARISON.EQ: operator.eq,
|
|
539
|
+
COMPARISON.NOT: operator.ne,
|
|
540
|
+
COMPARISON.MIN: operator.ge,
|
|
541
|
+
COMPARISON.GT: operator.gt,
|
|
542
|
+
COMPARISON.IN: operator.contains,
|
|
543
|
+
COMPARISON.EXCLUDE: lambda x, y: not operator.contains(x, y),
|
|
544
|
+
COMPARISON.LIKE: lambda x, y: re.search(y, x, re.IGNORECASE),
|
|
545
|
+
COMPARISON.CONTAINS: contains_filtering,
|
|
546
|
+
COMPARISON.CONTAINS_ANY: contains_any_filtering,
|
|
547
|
+
}
|
|
548
|
+
for obj in objects:
|
|
549
|
+
matches = True
|
|
550
|
+
for f in filters:
|
|
551
|
+
right = f.value
|
|
552
|
+
if f.field == DEFAULT_ID_FIELD:
|
|
553
|
+
if isinstance(right, int):
|
|
554
|
+
right = str(right)
|
|
555
|
+
|
|
556
|
+
left = find_nested_value(obj, f.field, MISSING)
|
|
557
|
+
|
|
558
|
+
if f.operator in (COMPARISON.IN, COMPARISON.EXCLUDE):
|
|
559
|
+
right, left = left, right
|
|
560
|
+
elif f.operator == COMPARISON.LIKE:
|
|
561
|
+
# Add implicit start/end wildchars if none is specified.
|
|
562
|
+
if "*" not in right:
|
|
563
|
+
right = f"*{right}*"
|
|
564
|
+
right = f"^{right.replace('*', '.*')}$"
|
|
565
|
+
elif f.operator in (
|
|
566
|
+
COMPARISON.LT,
|
|
567
|
+
COMPARISON.MAX,
|
|
568
|
+
COMPARISON.EQ,
|
|
569
|
+
COMPARISON.NOT,
|
|
570
|
+
COMPARISON.MIN,
|
|
571
|
+
COMPARISON.GT,
|
|
572
|
+
):
|
|
573
|
+
left = schwartzian_transform(left)
|
|
574
|
+
right = schwartzian_transform(right)
|
|
575
|
+
|
|
576
|
+
if f.operator == COMPARISON.HAS:
|
|
577
|
+
matches = left != MISSING if f.value else left == MISSING
|
|
578
|
+
else:
|
|
579
|
+
matches = matches and operators[f.operator](left, right)
|
|
580
|
+
if matches:
|
|
581
|
+
yield obj
|
|
582
|
+
|
|
583
|
+
|
|
584
|
+
def schwartzian_transform(value):
|
|
585
|
+
"""Decorate a value with a tag that enforces the Postgres sort order.
|
|
586
|
+
|
|
587
|
+
The sort order, per https://www.postgresql.org/docs/9.6/static/datatype-json.html, is:
|
|
588
|
+
|
|
589
|
+
Object > Array > Boolean > Number > String > Null
|
|
590
|
+
|
|
591
|
+
Note that there are more interesting rules for comparing objects
|
|
592
|
+
and arrays but we probably don't need to be that compatible.
|
|
593
|
+
|
|
594
|
+
MISSING represents what would be a SQL NULL, which is "bigger"
|
|
595
|
+
than everything else.
|
|
596
|
+
"""
|
|
597
|
+
if value is None:
|
|
598
|
+
return (0, value)
|
|
599
|
+
if isinstance(value, str):
|
|
600
|
+
return (1, value)
|
|
601
|
+
if isinstance(value, bool):
|
|
602
|
+
# This has to be before Number, because bools are a subclass
|
|
603
|
+
# of int :(
|
|
604
|
+
return (3, value)
|
|
605
|
+
if isinstance(value, numbers.Number):
|
|
606
|
+
return (2, value)
|
|
607
|
+
if isinstance(value, abc.Sequence):
|
|
608
|
+
return (4, value)
|
|
609
|
+
if isinstance(value, abc.Mapping):
|
|
610
|
+
return (5, value)
|
|
611
|
+
if value is MISSING:
|
|
612
|
+
return (6, value)
|
|
613
|
+
raise ValueError(f"Unknown value: {value}") # pragma: no cover
|
|
614
|
+
|
|
615
|
+
|
|
616
|
+
def apply_sorting(objects, sorting):
|
|
617
|
+
"""Sort the specified objects, using cumulative python sorting."""
|
|
618
|
+
result = list(objects)
|
|
619
|
+
|
|
620
|
+
if not result:
|
|
621
|
+
return result
|
|
622
|
+
|
|
623
|
+
def column(obj, name):
|
|
624
|
+
return schwartzian_transform(find_nested_value(obj, name, default=MISSING))
|
|
625
|
+
|
|
626
|
+
for sort in reversed(sorting):
|
|
627
|
+
result = sorted(result, key=lambda r: column(r, sort.field), reverse=(sort.direction < 0))
|
|
628
|
+
|
|
629
|
+
return result
|
|
630
|
+
|
|
631
|
+
|
|
632
|
+
def _get_objects_by_parent_id(store, parent_id, resource_name, with_meta=False):
|
|
633
|
+
parent_id_match = re.compile(f"^{parent_id.replace('*', '.*')}$")
|
|
634
|
+
by_parent_id = {
|
|
635
|
+
pid: resources for pid, resources in store.items() if parent_id_match.match(pid)
|
|
636
|
+
}
|
|
637
|
+
objects = []
|
|
638
|
+
for pid, resources in by_parent_id.items():
|
|
639
|
+
if resource_name is not None:
|
|
640
|
+
resources = {resource_name: resources[resource_name]}
|
|
641
|
+
for resource, colobjects in resources.items():
|
|
642
|
+
for r in colobjects.values():
|
|
643
|
+
if with_meta:
|
|
644
|
+
objects.append(dict(__resource_name__=resource, __parent_id__=pid, **r))
|
|
645
|
+
else:
|
|
646
|
+
objects.append(r)
|
|
647
|
+
return objects
|
|
648
|
+
|
|
649
|
+
|
|
650
|
+
def load_from_config(config):
|
|
651
|
+
return Storage()
|