kinto 23.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (142) hide show
  1. kinto/__init__.py +92 -0
  2. kinto/__main__.py +249 -0
  3. kinto/authorization.py +134 -0
  4. kinto/config/__init__.py +94 -0
  5. kinto/config/kinto.tpl +270 -0
  6. kinto/contribute.json +27 -0
  7. kinto/core/__init__.py +246 -0
  8. kinto/core/authentication.py +48 -0
  9. kinto/core/authorization.py +311 -0
  10. kinto/core/cache/__init__.py +131 -0
  11. kinto/core/cache/memcached.py +112 -0
  12. kinto/core/cache/memory.py +104 -0
  13. kinto/core/cache/postgresql/__init__.py +178 -0
  14. kinto/core/cache/postgresql/schema.sql +23 -0
  15. kinto/core/cache/testing.py +208 -0
  16. kinto/core/cornice/__init__.py +93 -0
  17. kinto/core/cornice/cors.py +144 -0
  18. kinto/core/cornice/errors.py +40 -0
  19. kinto/core/cornice/pyramidhook.py +373 -0
  20. kinto/core/cornice/renderer.py +89 -0
  21. kinto/core/cornice/resource.py +205 -0
  22. kinto/core/cornice/service.py +641 -0
  23. kinto/core/cornice/util.py +138 -0
  24. kinto/core/cornice/validators/__init__.py +94 -0
  25. kinto/core/cornice/validators/_colander.py +142 -0
  26. kinto/core/cornice/validators/_marshmallow.py +182 -0
  27. kinto/core/cornice_swagger/__init__.py +92 -0
  28. kinto/core/cornice_swagger/converters/__init__.py +21 -0
  29. kinto/core/cornice_swagger/converters/exceptions.py +6 -0
  30. kinto/core/cornice_swagger/converters/parameters.py +90 -0
  31. kinto/core/cornice_swagger/converters/schema.py +249 -0
  32. kinto/core/cornice_swagger/swagger.py +725 -0
  33. kinto/core/cornice_swagger/templates/index.html +73 -0
  34. kinto/core/cornice_swagger/templates/index_script_template.html +21 -0
  35. kinto/core/cornice_swagger/util.py +42 -0
  36. kinto/core/cornice_swagger/views.py +78 -0
  37. kinto/core/decorators.py +74 -0
  38. kinto/core/errors.py +216 -0
  39. kinto/core/events.py +301 -0
  40. kinto/core/initialization.py +738 -0
  41. kinto/core/listeners/__init__.py +9 -0
  42. kinto/core/metrics.py +94 -0
  43. kinto/core/openapi.py +115 -0
  44. kinto/core/permission/__init__.py +202 -0
  45. kinto/core/permission/memory.py +167 -0
  46. kinto/core/permission/postgresql/__init__.py +489 -0
  47. kinto/core/permission/postgresql/migrations/migration_001_002.sql +18 -0
  48. kinto/core/permission/postgresql/schema.sql +41 -0
  49. kinto/core/permission/testing.py +487 -0
  50. kinto/core/resource/__init__.py +1311 -0
  51. kinto/core/resource/model.py +412 -0
  52. kinto/core/resource/schema.py +502 -0
  53. kinto/core/resource/viewset.py +230 -0
  54. kinto/core/schema.py +119 -0
  55. kinto/core/scripts.py +50 -0
  56. kinto/core/statsd.py +1 -0
  57. kinto/core/storage/__init__.py +436 -0
  58. kinto/core/storage/exceptions.py +53 -0
  59. kinto/core/storage/generators.py +58 -0
  60. kinto/core/storage/memory.py +651 -0
  61. kinto/core/storage/postgresql/__init__.py +1131 -0
  62. kinto/core/storage/postgresql/client.py +120 -0
  63. kinto/core/storage/postgresql/migrations/migration_001_002.sql +10 -0
  64. kinto/core/storage/postgresql/migrations/migration_002_003.sql +33 -0
  65. kinto/core/storage/postgresql/migrations/migration_003_004.sql +18 -0
  66. kinto/core/storage/postgresql/migrations/migration_004_005.sql +20 -0
  67. kinto/core/storage/postgresql/migrations/migration_005_006.sql +11 -0
  68. kinto/core/storage/postgresql/migrations/migration_006_007.sql +74 -0
  69. kinto/core/storage/postgresql/migrations/migration_007_008.sql +66 -0
  70. kinto/core/storage/postgresql/migrations/migration_008_009.sql +41 -0
  71. kinto/core/storage/postgresql/migrations/migration_009_010.sql +98 -0
  72. kinto/core/storage/postgresql/migrations/migration_010_011.sql +14 -0
  73. kinto/core/storage/postgresql/migrations/migration_011_012.sql +9 -0
  74. kinto/core/storage/postgresql/migrations/migration_012_013.sql +71 -0
  75. kinto/core/storage/postgresql/migrations/migration_013_014.sql +14 -0
  76. kinto/core/storage/postgresql/migrations/migration_014_015.sql +95 -0
  77. kinto/core/storage/postgresql/migrations/migration_015_016.sql +4 -0
  78. kinto/core/storage/postgresql/migrations/migration_016_017.sql +81 -0
  79. kinto/core/storage/postgresql/migrations/migration_017_018.sql +25 -0
  80. kinto/core/storage/postgresql/migrations/migration_018_019.sql +8 -0
  81. kinto/core/storage/postgresql/migrations/migration_019_020.sql +7 -0
  82. kinto/core/storage/postgresql/migrations/migration_020_021.sql +68 -0
  83. kinto/core/storage/postgresql/migrations/migration_021_022.sql +62 -0
  84. kinto/core/storage/postgresql/migrations/migration_022_023.sql +5 -0
  85. kinto/core/storage/postgresql/migrations/migration_023_024.sql +6 -0
  86. kinto/core/storage/postgresql/migrations/migration_024_025.sql +6 -0
  87. kinto/core/storage/postgresql/migrator.py +98 -0
  88. kinto/core/storage/postgresql/pool.py +55 -0
  89. kinto/core/storage/postgresql/schema.sql +143 -0
  90. kinto/core/storage/testing.py +1857 -0
  91. kinto/core/storage/utils.py +37 -0
  92. kinto/core/testing.py +182 -0
  93. kinto/core/utils.py +553 -0
  94. kinto/core/views/__init__.py +0 -0
  95. kinto/core/views/batch.py +163 -0
  96. kinto/core/views/errors.py +145 -0
  97. kinto/core/views/heartbeat.py +106 -0
  98. kinto/core/views/hello.py +69 -0
  99. kinto/core/views/openapi.py +35 -0
  100. kinto/core/views/version.py +50 -0
  101. kinto/events.py +3 -0
  102. kinto/plugins/__init__.py +0 -0
  103. kinto/plugins/accounts/__init__.py +94 -0
  104. kinto/plugins/accounts/authentication.py +63 -0
  105. kinto/plugins/accounts/scripts.py +61 -0
  106. kinto/plugins/accounts/utils.py +13 -0
  107. kinto/plugins/accounts/views.py +136 -0
  108. kinto/plugins/admin/README.md +3 -0
  109. kinto/plugins/admin/VERSION +1 -0
  110. kinto/plugins/admin/__init__.py +40 -0
  111. kinto/plugins/admin/build/VERSION +1 -0
  112. kinto/plugins/admin/build/assets/index-CYFwtKtL.css +6 -0
  113. kinto/plugins/admin/build/assets/index-DJ0m93zA.js +149 -0
  114. kinto/plugins/admin/build/assets/logo-VBRiKSPX.png +0 -0
  115. kinto/plugins/admin/build/index.html +18 -0
  116. kinto/plugins/admin/public/help.html +25 -0
  117. kinto/plugins/admin/views.py +42 -0
  118. kinto/plugins/default_bucket/__init__.py +191 -0
  119. kinto/plugins/flush.py +28 -0
  120. kinto/plugins/history/__init__.py +65 -0
  121. kinto/plugins/history/listener.py +181 -0
  122. kinto/plugins/history/views.py +66 -0
  123. kinto/plugins/openid/__init__.py +131 -0
  124. kinto/plugins/openid/utils.py +14 -0
  125. kinto/plugins/openid/views.py +193 -0
  126. kinto/plugins/prometheus.py +300 -0
  127. kinto/plugins/statsd.py +85 -0
  128. kinto/schema_validation.py +135 -0
  129. kinto/views/__init__.py +34 -0
  130. kinto/views/admin.py +195 -0
  131. kinto/views/buckets.py +45 -0
  132. kinto/views/collections.py +58 -0
  133. kinto/views/contribute.py +39 -0
  134. kinto/views/groups.py +90 -0
  135. kinto/views/permissions.py +235 -0
  136. kinto/views/records.py +133 -0
  137. kinto-23.2.1.dist-info/METADATA +232 -0
  138. kinto-23.2.1.dist-info/RECORD +142 -0
  139. kinto-23.2.1.dist-info/WHEEL +5 -0
  140. kinto-23.2.1.dist-info/entry_points.txt +5 -0
  141. kinto-23.2.1.dist-info/licenses/LICENSE +13 -0
  142. kinto-23.2.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1131 @@
1
+ import logging
2
+ import os
3
+ import warnings
4
+ from collections import defaultdict
5
+
6
+ from kinto.core.decorators import deprecate_kwargs
7
+ from kinto.core.storage import (
8
+ DEFAULT_DELETED_FIELD,
9
+ DEFAULT_ID_FIELD,
10
+ DEFAULT_MODIFIED_FIELD,
11
+ MISSING,
12
+ StorageBase,
13
+ exceptions,
14
+ )
15
+ from kinto.core.storage.postgresql.client import create_from_config
16
+ from kinto.core.storage.postgresql.migrator import MigratorMixin
17
+ from kinto.core.utils import COMPARISON, json
18
+ from kinto.core.utils import sqlalchemy as sa
19
+
20
+
21
+ logger = logging.getLogger(__name__)
22
+ HERE = os.path.dirname(__file__)
23
+
24
+
25
+ class Storage(StorageBase, MigratorMixin):
26
+ """Storage backend using PostgreSQL.
27
+
28
+ Recommended in production (*requires PostgreSQL 9.4 or higher*).
29
+
30
+ Enable in configuration::
31
+
32
+ kinto.storage_backend = kinto.core.storage.postgresql
33
+
34
+ Database location URI can be customized::
35
+
36
+ kinto.storage_url = postgresql://user:pass@db.server.lan:5432/dbname
37
+
38
+ Alternatively, username and password could also rely on system user ident
39
+ or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*).
40
+
41
+ .. note::
42
+
43
+ Some tables and indices are created when ``kinto migrate`` is run.
44
+ This requires some privileges on the database, or some error will
45
+ be raised.
46
+
47
+ **Alternatively**, the schema can be initialized outside the
48
+ python application, using the SQL file located in
49
+ :file:`kinto/core/storage/postgresql/schema.sql`. This allows to
50
+ distinguish schema manipulation privileges from schema usage.
51
+
52
+
53
+ A connection pool is enabled by default::
54
+
55
+ kinto.storage_pool_size = 10
56
+ kinto.storage_maxoverflow = 10
57
+ kinto.storage_max_backlog = -1
58
+ kinto.storage_pool_recycle = -1
59
+ kinto.storage_pool_timeout = 30
60
+ kinto.cache_poolclass =
61
+ kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog
62
+
63
+ The ``max_backlog`` limits the number of threads that can be in the queue
64
+ waiting for a connection. Once this limit has been reached, any further
65
+ attempts to acquire a connection will be rejected immediately, instead of
66
+ locking up all threads by keeping them waiting in the queue.
67
+
68
+ See `dedicated section in SQLAlchemy documentation
69
+ <http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_
70
+ for default values and behaviour.
71
+
72
+ .. note::
73
+
74
+ Using a `dedicated connection pool <http://pgpool.net>`_ is still
75
+ recommended to allow load balancing, replication or limit the number
76
+ of connections used in a multi-process deployment.
77
+
78
+ """ # NOQA
79
+
80
+ # MigratorMixin attributes.
81
+ name = "storage"
82
+ schema_version = 25
83
+ schema_file = os.path.join(HERE, "schema.sql")
84
+ migrations_directory = os.path.join(HERE, "migrations")
85
+
86
+ def __init__(self, client, max_fetch_size, *args, readonly=False, **kwargs):
87
+ super().__init__(*args, **kwargs)
88
+ self.client = client
89
+ self._max_fetch_size = max_fetch_size
90
+ self.readonly = readonly
91
+
92
+ def create_schema(self, dry_run=False):
93
+ """Override create_schema to ensure DB encoding and TZ are OK."""
94
+ self._check_database_encoding()
95
+ self._check_database_timezone()
96
+ return super().create_schema(dry_run)
97
+
98
+ def initialize_schema(self, dry_run=False):
99
+ return self.create_or_migrate_schema(dry_run)
100
+
101
+ def _check_database_timezone(self):
102
+ # Make sure database has UTC timezone.
103
+ query = "SELECT current_setting('TIMEZONE') AS timezone;"
104
+ with self.client.connect() as conn:
105
+ result = conn.execute(sa.text(query))
106
+ obj = result.fetchone()
107
+ timezone = obj.timezone.upper()
108
+ if timezone != "UTC": # pragma: no cover
109
+ msg = f"Database timezone is not UTC ({timezone})"
110
+ warnings.warn(msg)
111
+ logger.warning(msg)
112
+
113
+ def _check_database_encoding(self):
114
+ # Make sure database is UTF-8.
115
+ query = """
116
+ SELECT pg_encoding_to_char(encoding) AS encoding
117
+ FROM pg_database
118
+ WHERE datname = current_database();
119
+ """
120
+ with self.client.connect() as conn:
121
+ result = conn.execute(sa.text(query))
122
+ obj = result.fetchone()
123
+ encoding = obj.encoding.lower()
124
+ if encoding != "utf8": # pragma: no cover
125
+ raise AssertionError(f"Unexpected database encoding {encoding}")
126
+
127
+ def get_installed_version(self):
128
+ """Return current version of schema or None if not any found."""
129
+ # Check for objects table, which definitely indicates a new
130
+ # DB. (metadata can exist if the permission schema ran first.)
131
+ table_exists_query = """
132
+ SELECT table_name
133
+ FROM information_schema.tables
134
+ WHERE table_name = '{}';
135
+ """
136
+ schema_version_metadata_query = """
137
+ SELECT value AS version
138
+ FROM metadata
139
+ WHERE name = 'storage_schema_version'
140
+ ORDER BY LPAD(value, 3, '0') DESC;
141
+ """
142
+ with self.client.connect() as conn:
143
+ result = conn.execute(sa.text(table_exists_query.format("objects")))
144
+ objects_table_exists = result.rowcount > 0
145
+ result = conn.execute(sa.text(table_exists_query.format("records")))
146
+ records_table_exists = result.rowcount > 0
147
+
148
+ if not objects_table_exists and not records_table_exists:
149
+ return
150
+
151
+ result = conn.execute(sa.text(schema_version_metadata_query))
152
+ if result.rowcount > 0:
153
+ return int(result.fetchone().version)
154
+
155
+ # No storage_schema_version row.
156
+ # Perhaps it got flush()ed by a pre-8.1.2 Kinto (which
157
+ # would wipe the metadata table).
158
+ # Alternately, maybe we are working from a very early
159
+ # Cliquet version which never had a migration.
160
+ # Check for a created_at row. If this is gone, it's
161
+ # probably been flushed at some point.
162
+ query = "SELECT COUNT(*) FROM metadata WHERE name = 'created_at';"
163
+ result = conn.execute(sa.text(query))
164
+ was_flushed = int(result.fetchone()[0]) == 0
165
+ if not was_flushed:
166
+ error_msg = "No schema history; assuming migration from Cliquet (version 1)."
167
+ logger.warning(error_msg)
168
+ return 1
169
+
170
+ # We have no idea what the schema is here. Migration
171
+ # is completely broken.
172
+ # Log an obsequious error message to the user and try
173
+ # to recover by assuming the last version where we had
174
+ # this bug.
175
+ logger.warning(UNKNOWN_SCHEMA_VERSION_MESSAGE)
176
+
177
+ # This is the last schema version where flushing the
178
+ # server would delete the schema version.
179
+ MAX_FLUSHABLE_SCHEMA_VERSION = 20
180
+ return MAX_FLUSHABLE_SCHEMA_VERSION
181
+
182
+ def flush(self):
183
+ """Delete objects from tables without destroying schema.
184
+
185
+ This is used in test suites as well as in the flush plugin.
186
+ """
187
+ query = """
188
+ DELETE FROM objects;
189
+ DELETE FROM timestamps;
190
+ """
191
+ with self.client.connect(force_commit=True) as conn:
192
+ conn.execute(sa.text(query))
193
+ logger.debug("Flushed PostgreSQL storage tables")
194
+
195
+ def resource_timestamp(self, resource_name, parent_id):
196
+ query_existing = """
197
+ WITH existing_timestamps AS (
198
+ -- Timestamp of latest object.
199
+ (
200
+ SELECT last_modified, as_epoch(last_modified) AS last_epoch
201
+ FROM objects
202
+ WHERE parent_id = :parent_id
203
+ AND resource_name = :resource_name
204
+ ORDER BY as_epoch(last_modified) DESC
205
+ LIMIT 1
206
+ )
207
+ -- Timestamp of empty resource.
208
+ UNION
209
+ (
210
+ SELECT last_modified, as_epoch(last_modified) AS last_epoch
211
+ FROM timestamps
212
+ WHERE parent_id = :parent_id
213
+ AND resource_name = :resource_name
214
+ )
215
+ )
216
+ SELECT MAX(last_modified) AS last_modified, MAX(last_epoch) AS last_epoch
217
+ FROM existing_timestamps
218
+ """
219
+
220
+ create_if_missing = """
221
+ INSERT INTO timestamps (parent_id, resource_name, last_modified)
222
+ VALUES (:parent_id, :resource_name, COALESCE(:last_modified, clock_timestamp()::timestamp))
223
+ ON CONFLICT (parent_id, resource_name) DO NOTHING
224
+ RETURNING as_epoch(last_modified) AS last_epoch
225
+ """
226
+
227
+ placeholders = dict(parent_id=parent_id, resource_name=resource_name)
228
+ with self.client.connect(readonly=False) as conn:
229
+ existing_ts = None
230
+ ts_result = conn.execute(sa.text(query_existing), placeholders)
231
+ row = ts_result.fetchone() # Will return (None, None) when empty.
232
+ existing_ts = row.last_modified
233
+
234
+ # If the backend is readonly, we should not try to create the timestamp.
235
+ if self.readonly:
236
+ if existing_ts is None:
237
+ error_msg = (
238
+ "Cannot initialize empty resource timestamp when running in readonly."
239
+ )
240
+ raise exceptions.ReadonlyError(message=error_msg)
241
+ obj = row
242
+ else:
243
+ create_result = conn.execute(
244
+ sa.text(create_if_missing), dict(last_modified=existing_ts, **placeholders)
245
+ )
246
+ obj = create_result.fetchone() or row
247
+
248
+ return obj.last_epoch
249
+
250
+ def all_resources_timestamps(self, resource_name):
251
+ query = """
252
+ WITH existing_timestamps AS (
253
+ -- Timestamp of latest object by parent_id.
254
+ (
255
+ SELECT DISTINCT ON (parent_id) parent_id, last_modified
256
+ FROM objects
257
+ WHERE resource_name = :resource_name
258
+ ORDER BY parent_id, last_modified DESC
259
+ )
260
+ -- Timestamp of resources without sub-objects.
261
+ UNION ALL
262
+ (
263
+ SELECT parent_id, last_modified
264
+ FROM timestamps
265
+ WHERE resource_name = :resource_name
266
+ )
267
+ )
268
+ SELECT parent_id, as_epoch(MAX(last_modified)) AS last_modified
269
+ FROM existing_timestamps
270
+ GROUP BY parent_id
271
+ ORDER BY last_modified DESC
272
+ """
273
+ with self.client.connect(readonly=True) as conn:
274
+ result = conn.execute(sa.text(query), dict(resource_name=resource_name))
275
+ rows = result.fetchmany(self._max_fetch_size + 1)
276
+
277
+ results = {r[0]: r[1] for r in rows}
278
+ return results
279
+
280
+ @deprecate_kwargs({"collection_id": "resource_name", "record": "obj"})
281
+ def create(
282
+ self,
283
+ resource_name,
284
+ parent_id,
285
+ obj,
286
+ id_generator=None,
287
+ id_field=DEFAULT_ID_FIELD,
288
+ modified_field=DEFAULT_MODIFIED_FIELD,
289
+ ):
290
+ id_generator = id_generator or self.id_generator
291
+ obj = {**obj}
292
+ if id_field in obj:
293
+ # Optimistically raise unicity error if object with same
294
+ # id already exists.
295
+ # Even if this check doesn't find one, be robust against
296
+ # conflicts because we could race with another thread.
297
+ # Still, this reduces write load because SELECTs are
298
+ # cheaper than INSERTs.
299
+ try:
300
+ existing = self.get(resource_name, parent_id, obj[id_field])
301
+ raise exceptions.UnicityError(id_field, existing)
302
+ except exceptions.ObjectNotFoundError:
303
+ pass
304
+ else:
305
+ obj[id_field] = id_generator()
306
+
307
+ # Remove redundancy in data field
308
+ query_object = {**obj}
309
+ query_object.pop(id_field, None)
310
+ query_object.pop(modified_field, None)
311
+
312
+ # If there is an object in the table and it is deleted = TRUE,
313
+ # we want to replace it. Otherwise, we want to do nothing and
314
+ # throw a UnicityError. Per
315
+ # https://stackoverflow.com/questions/15939902/is-select-or-insert-in-a-function-prone-to-race-conditions/15950324#15950324
316
+ # a WHERE clause in the DO UPDATE will lock the conflicting
317
+ # row whether it is true or not, so the subsequent SELECT is
318
+ # safe. We add a constant "inserted" field to know whether we
319
+ # need to throw or not.
320
+ query = """
321
+ INSERT INTO objects (id, parent_id, resource_name, data, last_modified, deleted)
322
+ VALUES (:object_id, :parent_id,
323
+ :resource_name, (:data)::JSONB,
324
+ from_epoch(:last_modified),
325
+ FALSE)
326
+ ON CONFLICT (id, parent_id, resource_name) DO UPDATE
327
+ SET last_modified = from_epoch(:last_modified),
328
+ data = (:data)::JSONB,
329
+ deleted = FALSE
330
+ WHERE objects.deleted = TRUE
331
+ RETURNING id, data, as_epoch(last_modified) AS last_modified;
332
+ """
333
+
334
+ safe_holders = {}
335
+ placeholders = dict(
336
+ object_id=obj[id_field],
337
+ parent_id=parent_id,
338
+ resource_name=resource_name,
339
+ last_modified=obj.get(modified_field),
340
+ data=json.dumps(query_object),
341
+ )
342
+ with self.client.connect() as conn:
343
+ result = conn.execute(sa.text(query % safe_holders), placeholders)
344
+ inserted = result.fetchone()
345
+
346
+ if not inserted:
347
+ raise exceptions.UnicityError(id_field)
348
+
349
+ obj[modified_field] = inserted.last_modified
350
+ return obj
351
+
352
+ @deprecate_kwargs({"collection_id": "resource_name"})
353
+ def get(
354
+ self,
355
+ resource_name,
356
+ parent_id,
357
+ object_id,
358
+ id_field=DEFAULT_ID_FIELD,
359
+ modified_field=DEFAULT_MODIFIED_FIELD,
360
+ ):
361
+ query = """
362
+ SELECT as_epoch(last_modified) AS last_modified, data
363
+ FROM objects
364
+ WHERE id = :object_id
365
+ AND parent_id = :parent_id
366
+ AND resource_name = :resource_name
367
+ AND NOT deleted;
368
+ """
369
+ placeholders = dict(object_id=object_id, parent_id=parent_id, resource_name=resource_name)
370
+ with self.client.connect(readonly=True) as conn:
371
+ result = conn.execute(sa.text(query), placeholders)
372
+ if result.rowcount == 0:
373
+ raise exceptions.ObjectNotFoundError(object_id)
374
+ else:
375
+ existing = result.fetchone()
376
+
377
+ obj = existing.data
378
+ obj[id_field] = object_id
379
+ obj[modified_field] = existing.last_modified
380
+ return obj
381
+
382
+ @deprecate_kwargs({"collection_id": "resource_name", "record": "obj"})
383
+ def update(
384
+ self,
385
+ resource_name,
386
+ parent_id,
387
+ object_id,
388
+ obj,
389
+ id_field=DEFAULT_ID_FIELD,
390
+ modified_field=DEFAULT_MODIFIED_FIELD,
391
+ ):
392
+ # Remove redundancy in data field
393
+ query_object = {**obj}
394
+ query_object.pop(id_field, None)
395
+ query_object.pop(modified_field, None)
396
+
397
+ query = """
398
+ INSERT INTO objects (id, parent_id, resource_name, data, last_modified, deleted)
399
+ VALUES (:object_id, :parent_id,
400
+ :resource_name, (:data)::JSONB,
401
+ from_epoch(:last_modified),
402
+ FALSE)
403
+ ON CONFLICT (id, parent_id, resource_name) DO UPDATE
404
+ SET data = (:data)::JSONB,
405
+ deleted = FALSE,
406
+ last_modified = GREATEST(from_epoch(:last_modified),
407
+ EXCLUDED.last_modified)
408
+ RETURNING as_epoch(last_modified) AS last_modified;
409
+ """
410
+ placeholders = dict(
411
+ object_id=object_id,
412
+ parent_id=parent_id,
413
+ resource_name=resource_name,
414
+ last_modified=obj.get(modified_field),
415
+ data=json.dumps(query_object),
416
+ )
417
+
418
+ with self.client.connect() as conn:
419
+ result = conn.execute(sa.text(query), placeholders)
420
+ updated = result.fetchone()
421
+
422
+ obj = {**obj, id_field: object_id}
423
+ obj[modified_field] = updated.last_modified
424
+ return obj
425
+
426
+ @deprecate_kwargs({"collection_id": "resource_name"})
427
+ def delete(
428
+ self,
429
+ resource_name,
430
+ parent_id,
431
+ object_id,
432
+ id_field=DEFAULT_ID_FIELD,
433
+ with_deleted=True,
434
+ modified_field=DEFAULT_MODIFIED_FIELD,
435
+ deleted_field=DEFAULT_DELETED_FIELD,
436
+ last_modified=None,
437
+ ):
438
+ if with_deleted:
439
+ query = """
440
+ UPDATE objects
441
+ SET deleted=TRUE,
442
+ data=(:deleted_data)::JSONB,
443
+ last_modified=from_epoch(:last_modified)
444
+ WHERE id = :object_id
445
+ AND parent_id = :parent_id
446
+ AND resource_name = :resource_name
447
+ AND NOT deleted
448
+ RETURNING as_epoch(last_modified) AS last_modified;
449
+ """
450
+ else:
451
+ query = """
452
+ DELETE FROM objects
453
+ WHERE id = :object_id
454
+ AND parent_id = :parent_id
455
+ AND resource_name = :resource_name
456
+ AND NOT deleted
457
+ RETURNING as_epoch(last_modified) AS last_modified;
458
+ """
459
+ deleted_data = json.dumps(dict([(deleted_field, True)]))
460
+ placeholders = dict(
461
+ object_id=object_id,
462
+ parent_id=parent_id,
463
+ resource_name=resource_name,
464
+ last_modified=last_modified,
465
+ deleted_data=deleted_data,
466
+ )
467
+
468
+ with self.client.connect() as conn:
469
+ result = conn.execute(sa.text(query), placeholders)
470
+ if result.rowcount == 0:
471
+ raise exceptions.ObjectNotFoundError(object_id)
472
+ updated = result.fetchone()
473
+
474
+ obj = {}
475
+ obj[modified_field] = updated.last_modified
476
+ obj[id_field] = object_id
477
+
478
+ obj[deleted_field] = True
479
+ return obj
480
+
481
+ @deprecate_kwargs({"collection_id": "resource_name"})
482
+ def delete_all(
483
+ self,
484
+ resource_name,
485
+ parent_id,
486
+ filters=None,
487
+ sorting=None,
488
+ pagination_rules=None,
489
+ limit=None,
490
+ id_field=DEFAULT_ID_FIELD,
491
+ with_deleted=True,
492
+ modified_field=DEFAULT_MODIFIED_FIELD,
493
+ deleted_field=DEFAULT_DELETED_FIELD,
494
+ ):
495
+ if with_deleted:
496
+ query = """
497
+ WITH matching_objects AS (
498
+ SELECT id, parent_id, resource_name
499
+ FROM objects
500
+ WHERE {parent_id_filter}
501
+ {resource_name_filter}
502
+ AND NOT deleted
503
+ {conditions_filter}
504
+ {pagination_rules}
505
+ {sorting}
506
+ LIMIT :pagination_limit
507
+ FOR UPDATE
508
+ )
509
+ UPDATE objects
510
+ SET deleted=TRUE, data=(:deleted_data)::JSONB, last_modified=NULL
511
+ FROM matching_objects
512
+ WHERE objects.id = matching_objects.id
513
+ AND objects.parent_id = matching_objects.parent_id
514
+ AND objects.resource_name = matching_objects.resource_name
515
+ RETURNING objects.id, as_epoch(last_modified) AS last_modified;
516
+ """
517
+ else:
518
+ query = """
519
+ WITH matching_objects AS (
520
+ SELECT id, parent_id, resource_name
521
+ FROM objects
522
+ WHERE {parent_id_filter}
523
+ {resource_name_filter}
524
+ AND NOT deleted
525
+ {conditions_filter}
526
+ {pagination_rules}
527
+ {sorting}
528
+ LIMIT :pagination_limit
529
+ FOR UPDATE
530
+ )
531
+ DELETE
532
+ FROM objects
533
+ USING matching_objects
534
+ WHERE objects.id = matching_objects.id
535
+ AND objects.parent_id = matching_objects.parent_id
536
+ AND objects.resource_name = matching_objects.resource_name
537
+ RETURNING objects.id, as_epoch(last_modified) AS last_modified;
538
+ """
539
+
540
+ id_field = id_field or self.id_field
541
+ modified_field = modified_field or self.modified_field
542
+ deleted_data = json.dumps(dict([(deleted_field, True)]))
543
+ placeholders = dict(
544
+ parent_id=parent_id, resource_name=resource_name, deleted_data=deleted_data
545
+ )
546
+ # Safe strings
547
+ safeholders = defaultdict(str)
548
+ # Handle parent_id as a regex only if it contains *
549
+ if "*" in parent_id:
550
+ safeholders["parent_id_filter"] = "parent_id LIKE :parent_id"
551
+ placeholders["parent_id"] = parent_id.replace("*", "%")
552
+ else:
553
+ safeholders["parent_id_filter"] = "parent_id = :parent_id"
554
+ # If resource is None, remove it from query.
555
+ if resource_name is None:
556
+ safeholders["resource_name_filter"] = ""
557
+ else:
558
+ safeholders["resource_name_filter"] = "AND resource_name = :resource_name" # NOQA
559
+
560
+ if filters:
561
+ safe_sql, holders = self._format_conditions(filters, id_field, modified_field)
562
+ safeholders["conditions_filter"] = f"AND {safe_sql}"
563
+ placeholders.update(**holders)
564
+
565
+ if sorting:
566
+ sql, holders = self._format_sorting(sorting, id_field, modified_field)
567
+ safeholders["sorting"] = sql
568
+ placeholders.update(**holders)
569
+
570
+ if pagination_rules:
571
+ sql, holders = self._format_pagination(pagination_rules, id_field, modified_field)
572
+ safeholders["pagination_rules"] = f"AND ({sql})"
573
+ placeholders.update(**holders)
574
+
575
+ # Limit the number of results (pagination).
576
+ limit = min(self._max_fetch_size, limit) if limit else self._max_fetch_size
577
+ placeholders["pagination_limit"] = limit
578
+
579
+ query = query.format_map(safeholders)
580
+
581
+ with self.client.connect() as conn:
582
+ result = conn.execute(sa.text(query), placeholders)
583
+ deleted = result.fetchmany(self._max_fetch_size)
584
+
585
+ objects = []
586
+ for result in deleted:
587
+ obj = {}
588
+ obj[id_field] = result.id
589
+ obj[modified_field] = result.last_modified
590
+ obj[deleted_field] = True
591
+ objects.append(obj)
592
+
593
+ return objects
594
+
595
+ @deprecate_kwargs({"collection_id": "resource_name"})
596
+ def purge_deleted(
597
+ self,
598
+ resource_name,
599
+ parent_id,
600
+ before=None,
601
+ max_retained=None,
602
+ id_field=DEFAULT_ID_FIELD,
603
+ modified_field=DEFAULT_MODIFIED_FIELD,
604
+ ):
605
+ delete_tombstones = """
606
+ DELETE
607
+ FROM objects
608
+ WHERE {parent_id_filter}
609
+ {resource_name_filter}
610
+ {conditions_filter}
611
+ """
612
+
613
+ if max_retained is not None:
614
+ if before is not None:
615
+ raise ValueError("`before` and `max_retained` are exclusive arguments. Pick one.")
616
+
617
+ delete_tombstones = """
618
+ WITH ranked AS (
619
+ SELECT
620
+ id AS objid,
621
+ parent_id,
622
+ resource_name,
623
+ ROW_NUMBER() OVER (
624
+ PARTITION BY parent_id, resource_name
625
+ ORDER BY last_modified DESC
626
+ ) AS rn
627
+ FROM objects
628
+ )
629
+ DELETE FROM objects
630
+ WHERE id IN (
631
+ SELECT objid
632
+ FROM ranked
633
+ WHERE
634
+ {parent_id_filter}
635
+ {resource_name_filter}
636
+ AND rn > :max_retained
637
+ )
638
+ """
639
+
640
+ id_field = id_field or self.id_field
641
+ modified_field = modified_field or self.modified_field
642
+ placeholders = dict(
643
+ parent_id=parent_id, resource_name=resource_name, max_retained=max_retained
644
+ )
645
+ # Safe strings
646
+ safeholders = defaultdict(str)
647
+ # Handle parent_id as a regex only if it contains *
648
+ if "*" in parent_id:
649
+ safeholders["parent_id_filter"] = "parent_id LIKE :parent_id"
650
+ placeholders["parent_id"] = parent_id.replace("*", "%")
651
+ else:
652
+ safeholders["parent_id_filter"] = "parent_id = :parent_id"
653
+ # If resource is None, remove it from query.
654
+ if resource_name is None:
655
+ safeholders["resource_name_filter"] = ""
656
+ else:
657
+ safeholders["resource_name_filter"] = "AND resource_name = :resource_name" # NOQA
658
+
659
+ if before is not None:
660
+ safeholders["conditions_filter"] = "AND as_epoch(last_modified) < :before"
661
+ placeholders["before"] = before
662
+
663
+ with self.client.connect() as conn:
664
+ result = conn.execute(sa.text(delete_tombstones.format_map(safeholders)), placeholders)
665
+ deleted = result.rowcount
666
+
667
+ # If purging everything from a parent_id, then clear timestamps.
668
+ if resource_name is None and before is None:
669
+ delete_timestamps = """
670
+ DELETE
671
+ FROM timestamps
672
+ WHERE {parent_id_filter}
673
+ """
674
+ conn.execute(sa.text(delete_timestamps.format_map(safeholders)), placeholders)
675
+
676
+ return deleted
677
+
678
+ def list_all(
679
+ self,
680
+ resource_name,
681
+ parent_id,
682
+ filters=None,
683
+ sorting=None,
684
+ pagination_rules=None,
685
+ limit=None,
686
+ include_deleted=False,
687
+ id_field=DEFAULT_ID_FIELD,
688
+ modified_field=DEFAULT_MODIFIED_FIELD,
689
+ deleted_field=DEFAULT_DELETED_FIELD,
690
+ ):
691
+ query = """
692
+ SELECT id, as_epoch(last_modified) AS last_modified, data
693
+ FROM objects
694
+ WHERE {parent_id_filter}
695
+ AND resource_name = :resource_name
696
+ {conditions_deleted}
697
+ {conditions_filter}
698
+ {pagination_rules}
699
+ {sorting}
700
+ LIMIT :pagination_limit;
701
+ """
702
+
703
+ rows = self._get_rows(
704
+ query,
705
+ resource_name,
706
+ parent_id,
707
+ filters=filters,
708
+ sorting=sorting,
709
+ pagination_rules=pagination_rules,
710
+ limit=limit,
711
+ include_deleted=include_deleted,
712
+ id_field=id_field,
713
+ modified_field=modified_field,
714
+ deleted_field=deleted_field,
715
+ )
716
+
717
+ if len(rows) == 0:
718
+ return []
719
+
720
+ records = []
721
+ for result in rows:
722
+ record = result.data
723
+ record[id_field] = result.id
724
+ record[modified_field] = result.last_modified
725
+ records.append(record)
726
+ return records
727
+
728
+ def count_all(
729
+ self,
730
+ resource_name,
731
+ parent_id,
732
+ filters=None,
733
+ id_field=DEFAULT_ID_FIELD,
734
+ modified_field=DEFAULT_MODIFIED_FIELD,
735
+ deleted_field=DEFAULT_DELETED_FIELD,
736
+ ):
737
+ query = """
738
+ SELECT COUNT(*) AS total_count
739
+ FROM objects
740
+ WHERE {parent_id_filter}
741
+ AND resource_name = :resource_name
742
+ AND NOT deleted
743
+ {conditions_filter}
744
+ """
745
+ rows = self._get_rows(
746
+ query,
747
+ resource_name,
748
+ parent_id,
749
+ filters=filters,
750
+ id_field=id_field,
751
+ modified_field=modified_field,
752
+ deleted_field=deleted_field,
753
+ )
754
+ return rows[0].total_count
755
+
756
+ def trim_objects(
757
+ self,
758
+ resource_name: str,
759
+ parent_id: str,
760
+ filters: list,
761
+ max_objects: int,
762
+ id_field: str = DEFAULT_ID_FIELD,
763
+ modified_field: str = DEFAULT_MODIFIED_FIELD,
764
+ ) -> int:
765
+ query = """
766
+ WITH to_delete AS (
767
+ SELECT {id_field}
768
+ FROM objects
769
+ WHERE parent_id = :parent_id
770
+ AND resource_name = :resource_name
771
+ {conditions_filter}
772
+ ORDER BY {modified_field} DESC
773
+ OFFSET :max_objects
774
+ )
775
+ DELETE FROM objects o
776
+ USING to_delete d
777
+ WHERE o.id = d.id
778
+ RETURNING 1;
779
+ """
780
+
781
+ placeholders = dict(
782
+ parent_id=parent_id, resource_name=resource_name, max_objects=max_objects
783
+ )
784
+ safe_sql, holders = self._format_conditions(filters, id_field, modified_field)
785
+ placeholders.update(**holders)
786
+
787
+ safeholders = dict(id_field=id_field, modified_field=modified_field)
788
+ safeholders["conditions_filter"] = f"AND {safe_sql}"
789
+
790
+ with self.client.connect() as conn:
791
+ result = conn.execute(sa.text(query.format_map(safeholders)), placeholders)
792
+ # Using RETURNING so rowcount reflects the number deleted
793
+ return result.rowcount
794
+
795
+ def _get_rows(
796
+ self,
797
+ query,
798
+ resource_name,
799
+ parent_id,
800
+ filters=None,
801
+ sorting=None,
802
+ pagination_rules=None,
803
+ limit=None,
804
+ include_deleted=False,
805
+ id_field=DEFAULT_ID_FIELD,
806
+ modified_field=DEFAULT_MODIFIED_FIELD,
807
+ deleted_field=DEFAULT_DELETED_FIELD,
808
+ ):
809
+ # Unsafe strings escaped by PostgreSQL
810
+ placeholders = dict(parent_id=parent_id, resource_name=resource_name)
811
+
812
+ # Safe strings
813
+ safeholders = defaultdict(str)
814
+
815
+ # Handle parent_id as a regex only if it contains *
816
+ if "*" in parent_id:
817
+ safeholders["parent_id_filter"] = "parent_id LIKE :parent_id"
818
+ placeholders["parent_id"] = parent_id.replace("*", "%")
819
+ else:
820
+ safeholders["parent_id_filter"] = "parent_id = :parent_id"
821
+
822
+ if filters:
823
+ safe_sql, holders = self._format_conditions(filters, id_field, modified_field)
824
+ safeholders["conditions_filter"] = f"AND {safe_sql}"
825
+ placeholders.update(**holders)
826
+
827
+ if not include_deleted:
828
+ safeholders["conditions_deleted"] = "AND NOT deleted"
829
+
830
+ if sorting:
831
+ sql, holders = self._format_sorting(sorting, id_field, modified_field)
832
+ safeholders["sorting"] = sql
833
+ placeholders.update(**holders)
834
+
835
+ if pagination_rules:
836
+ sql, holders = self._format_pagination(pagination_rules, id_field, modified_field)
837
+ safeholders["pagination_rules"] = f"AND ({sql})"
838
+ placeholders.update(**holders)
839
+
840
+ # Limit the number of results (pagination).
841
+ limit = min(self._max_fetch_size + 1, limit) if limit else self._max_fetch_size
842
+ placeholders["pagination_limit"] = limit
843
+
844
+ with self.client.connect(readonly=True) as conn:
845
+ result = conn.execute(sa.text(query.format_map(safeholders)), placeholders)
846
+ return result.fetchmany(self._max_fetch_size + 1)
847
+
848
+ def _format_conditions(self, filters, id_field, modified_field, prefix="filters"):
849
+ """Format the filters list in SQL, with placeholders for safe escaping.
850
+
851
+ .. note::
852
+ All conditions are combined using AND.
853
+
854
+ .. note::
855
+
856
+ Field name and value are escaped as they come from HTTP API.
857
+
858
+ :returns: A SQL string with placeholders, and a dict mapping
859
+ placeholders to actual values.
860
+ :rtype: tuple
861
+ """
862
+ operators = {
863
+ COMPARISON.EQ: "=",
864
+ COMPARISON.NOT: "<>",
865
+ COMPARISON.IN: "IN",
866
+ COMPARISON.EXCLUDE: "NOT IN",
867
+ COMPARISON.LIKE: "ILIKE",
868
+ COMPARISON.CONTAINS: "@>",
869
+ }
870
+
871
+ conditions = []
872
+ holders = {}
873
+ for i, filtr in enumerate(filters):
874
+ value = filtr.value
875
+ is_like_query = filtr.operator == COMPARISON.LIKE
876
+
877
+ if filtr.field == id_field:
878
+ sql_field = "id"
879
+ if isinstance(value, int):
880
+ value = str(value)
881
+ elif filtr.field == modified_field:
882
+ sql_field = "as_epoch(last_modified)"
883
+ else:
884
+ column_name = "data"
885
+ # Subfields: ``person.name`` becomes ``data->person->>name``
886
+ subfields = filtr.field.split(".")
887
+ for j, subfield in enumerate(subfields):
888
+ # Safely escape field name
889
+ field_holder = f"{prefix}_field_{i}_{j}"
890
+ holders[field_holder] = subfield
891
+ # Use ->> to convert the last level to text if
892
+ # needed for LIKE query. (Other queries do JSONB comparison.)
893
+ column_name += "->>" if j == len(subfields) - 1 and is_like_query else "->"
894
+ column_name += f":{field_holder}"
895
+ sql_field = column_name
896
+
897
+ string_field = filtr.field in (id_field, modified_field) or is_like_query
898
+ if not string_field and value != MISSING:
899
+ # JSONB-ify the value.
900
+ if filtr.operator not in (
901
+ COMPARISON.IN,
902
+ COMPARISON.EXCLUDE,
903
+ COMPARISON.CONTAINS_ANY,
904
+ ):
905
+ value = json.dumps(value)
906
+ else:
907
+ value = [json.dumps(v) for v in value]
908
+
909
+ if filtr.operator in (COMPARISON.IN, COMPARISON.EXCLUDE):
910
+ value = tuple(value)
911
+ # WHERE field IN (); -- Fails with syntax error.
912
+ if len(value) == 0:
913
+ value = (None,)
914
+
915
+ if is_like_query:
916
+ # Operand should be a string.
917
+ # Add implicit start/end wildcards if none is specified.
918
+ if "*" not in value:
919
+ value = f"*{value}*"
920
+ value = value.replace("*", "%")
921
+
922
+ if filtr.operator == COMPARISON.HAS:
923
+ operator = "IS NOT NULL" if filtr.value else "IS NULL"
924
+ cond = f"{sql_field} {operator}"
925
+
926
+ elif filtr.operator == COMPARISON.CONTAINS:
927
+ value_holder = f"{prefix}_value_{i}"
928
+ holders[value_holder] = value
929
+ # In case the field is not a sequence, we ignore the object.
930
+ is_json_sequence = f"jsonb_typeof({sql_field}) = 'array'"
931
+ sql_operator = operators[filtr.operator]
932
+ cond = f"{is_json_sequence} AND {sql_field} {sql_operator} :{value_holder}"
933
+
934
+ elif filtr.operator == COMPARISON.CONTAINS_ANY:
935
+ value_holder = f"{prefix}_value_{i}"
936
+ holders[value_holder] = value
937
+ # In case the field is not a sequence, we ignore the object.
938
+ is_json_sequence = f"jsonb_typeof({sql_field}) = 'array'"
939
+ # Postgres's && operator doesn't support jsonbs.
940
+ # However, it does support Postgres arrays of any
941
+ # type. Assume that the referenced field is a JSON
942
+ # array and convert it to a Postgres array.
943
+ data_as_array = f"""
944
+ (SELECT array_agg(elems) FROM jsonb_array_elements({sql_field}) elems)
945
+ """
946
+ cond = f"{is_json_sequence} AND {data_as_array} && (:{value_holder})::jsonb[]"
947
+
948
+ elif value != MISSING:
949
+ # Safely escape value. MISSINGs get handled below.
950
+ value_holder = f"{prefix}_value_{i}"
951
+ holders[value_holder] = value
952
+
953
+ sql_operator = operators.setdefault(filtr.operator, filtr.operator.value)
954
+ cond = f"{sql_field} {sql_operator} :{value_holder}"
955
+
956
+ # If the field is missing, column_name will produce
957
+ # NULL. NULL has strange properties with comparisons
958
+ # in SQL -- NULL = anything => NULL, NULL <> anything => NULL.
959
+ # We generally want missing fields to be treated as a
960
+ # special value that compares as different from
961
+ # everything, including JSON null. Do this on a
962
+ # per-operator basis.
963
+ null_false_operators = (
964
+ # NULLs aren't EQ to anything (definitionally).
965
+ COMPARISON.EQ,
966
+ # So they can't match anything in an INCLUDE.
967
+ COMPARISON.IN,
968
+ # Nor can they be LIKE anything.
969
+ COMPARISON.LIKE,
970
+ # NULLs don't contain anything.
971
+ COMPARISON.CONTAINS,
972
+ COMPARISON.CONTAINS_ANY,
973
+ )
974
+ null_true_operators = (
975
+ # NULLs are automatically not equal to everything.
976
+ COMPARISON.NOT,
977
+ # Thus they can never be excluded.
978
+ COMPARISON.EXCLUDE,
979
+ # Match Postgres's default sort behavior
980
+ # (NULLS LAST) by allowing NULLs to
981
+ # automatically be greater than everything.
982
+ COMPARISON.GT,
983
+ COMPARISON.MIN,
984
+ )
985
+
986
+ if not (filtr.field == id_field or filtr.field == modified_field):
987
+ if value == MISSING:
988
+ # Handle MISSING values. The main use case for this is
989
+ # pagination, since there's no way to encode MISSING
990
+ # at the HTTP API level. Because we only need to cover
991
+ # pagination, we don't have to worry about any
992
+ # operators besides LT, LE, GT, GE, and EQ, and
993
+ # never worry about id_field or modified_field.
994
+ #
995
+ # Comparing a value against NULL is not the same
996
+ # as comparing a NULL against some other value, so
997
+ # we need another set of operators for which
998
+ # NULLs are OK.
999
+ if filtr.operator in (COMPARISON.EQ, COMPARISON.MIN):
1000
+ # If a row is NULL, then it can be == NULL
1001
+ # (for the purposes of pagination).
1002
+ # >= NULL should only match rows that are
1003
+ # NULL, since there's nothing higher.
1004
+ cond = f"{sql_field} IS NULL"
1005
+ elif filtr.operator == COMPARISON.LT:
1006
+ # If we're looking for < NULL, match only
1007
+ # non-nulls.
1008
+ cond = f"{sql_field} IS NOT NULL"
1009
+ elif filtr.operator == COMPARISON.MAX:
1010
+ # <= NULL should include everything -- NULL
1011
+ # because it's equal, and non-nulls because
1012
+ # they're <.
1013
+ cond = "TRUE"
1014
+ elif filtr.operator == COMPARISON.GT:
1015
+ # Nothing can be greater than NULL (that is,
1016
+ # higher in search order).
1017
+ cond = "FALSE"
1018
+ else:
1019
+ raise ValueError("Somehow we got a filter with MISSING value")
1020
+ elif filtr.operator in null_false_operators:
1021
+ cond = f"({sql_field} IS NOT NULL AND {cond})"
1022
+ elif filtr.operator in null_true_operators:
1023
+ cond = f"({sql_field} IS NULL OR {cond})"
1024
+ else:
1025
+ # No need to check for LT and MAX because NULL < foo
1026
+ # is NULL, which is falsy in SQL.
1027
+ pass
1028
+
1029
+ conditions.append(cond)
1030
+
1031
+ safe_sql = " AND ".join(conditions)
1032
+ return safe_sql, holders
1033
+
1034
+ def _format_pagination(self, pagination_rules, id_field, modified_field):
1035
+ """Format the pagination rules in SQL, with placeholders for
1036
+ safe escaping.
1037
+
1038
+ .. note::
1039
+
1040
+ All rules are combined using OR.
1041
+
1042
+ .. note::
1043
+
1044
+ Field names are escaped as they come from HTTP API.
1045
+
1046
+ :returns: A SQL string with placeholders, and a dict mapping
1047
+ placeholders to actual values.
1048
+ :rtype: tuple
1049
+ """
1050
+ rules = []
1051
+ placeholders = {}
1052
+
1053
+ for i, rule in enumerate(pagination_rules):
1054
+ prefix = f"rules_{i}"
1055
+ safe_sql, holders = self._format_conditions(
1056
+ rule, id_field, modified_field, prefix=prefix
1057
+ )
1058
+ rules.append(safe_sql)
1059
+ placeholders.update(**holders)
1060
+
1061
+ # Unsure how to convert to fstrings
1062
+ safe_sql = " OR ".join([f"({r})" for r in rules])
1063
+ return safe_sql, placeholders
1064
+
1065
+ def _format_sorting(self, sorting, id_field, modified_field):
1066
+ """Format the sorting in SQL, with placeholders for safe escaping.
1067
+
1068
+ .. note::
1069
+
1070
+ Field names are escaped as they come from HTTP API.
1071
+
1072
+ :returns: A SQL string with placeholders, and a dict mapping
1073
+ placeholders to actual values.
1074
+ :rtype: tuple
1075
+ """
1076
+ sorts = []
1077
+ holders = {}
1078
+ for i, sort in enumerate(sorting):
1079
+ if sort.field == id_field:
1080
+ sql_field = "id"
1081
+ elif sort.field == modified_field:
1082
+ sql_field = "objects.last_modified"
1083
+ else:
1084
+ # Subfields: ``person.name`` becomes ``data->person->name``
1085
+ subfields = sort.field.split(".")
1086
+ sql_field = "data"
1087
+ for j, subfield in enumerate(subfields):
1088
+ # Safely escape field name
1089
+ field_holder = f"sort_field_{i}_{j}"
1090
+ holders[field_holder] = subfield
1091
+ sql_field += f"->(:{field_holder})"
1092
+
1093
+ sql_direction = "ASC" if sort.direction > 0 else "DESC"
1094
+ sql_sort = f"{sql_field} {sql_direction}"
1095
+ sorts.append(sql_sort)
1096
+
1097
+ safe_sql = f"ORDER BY {', '.join(sorts)}"
1098
+ return safe_sql, holders
1099
+
1100
+
1101
+ def load_from_config(config):
1102
+ settings = config.get_settings()
1103
+ max_fetch_size = int(settings["storage_max_fetch_size"])
1104
+ readonly = settings.get("readonly", False)
1105
+ client = create_from_config(config, prefix="storage_")
1106
+ return Storage(client=client, max_fetch_size=max_fetch_size, readonly=readonly)
1107
+
1108
+
1109
+ UNKNOWN_SCHEMA_VERSION_MESSAGE = """
1110
+ Missing schema history. Perhaps at some point, this Kinto server was
1111
+ flushed. Due to a bug in older Kinto versions (see
1112
+ https://github.com/Kinto/kinto/issues/1460), flushing the server would
1113
+ cause us to forget what version of the schema was in use. This means
1114
+ automatic migration is impossible.
1115
+
1116
+ Historically, when this happened, Kinto would just assume that the
1117
+ wiped server had the "current" schema, so you may have been missing a
1118
+ schema version for quite some time.
1119
+
1120
+ To try to recover, we have assumed a schema version corresponding to
1121
+ the last Kinto version with this bug (schema version 20). However, if
1122
+ a migration fails, or most queries are broken, you may not actually be
1123
+ running that schema. You can try to fix this by manually setting the
1124
+ schema version in the database to what you think it should be using a
1125
+ command like:
1126
+
1127
+ INSERT INTO metadata VALUES ('storage_schema_version', '19');
1128
+
1129
+ See https://github.com/Kinto/kinto/wiki/Schema-versions for more details.
1130
+
1131
+ """.strip()