kinto 23.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kinto/__init__.py +92 -0
- kinto/__main__.py +249 -0
- kinto/authorization.py +134 -0
- kinto/config/__init__.py +94 -0
- kinto/config/kinto.tpl +270 -0
- kinto/contribute.json +27 -0
- kinto/core/__init__.py +246 -0
- kinto/core/authentication.py +48 -0
- kinto/core/authorization.py +311 -0
- kinto/core/cache/__init__.py +131 -0
- kinto/core/cache/memcached.py +112 -0
- kinto/core/cache/memory.py +104 -0
- kinto/core/cache/postgresql/__init__.py +178 -0
- kinto/core/cache/postgresql/schema.sql +23 -0
- kinto/core/cache/testing.py +208 -0
- kinto/core/cornice/__init__.py +93 -0
- kinto/core/cornice/cors.py +144 -0
- kinto/core/cornice/errors.py +40 -0
- kinto/core/cornice/pyramidhook.py +373 -0
- kinto/core/cornice/renderer.py +89 -0
- kinto/core/cornice/resource.py +205 -0
- kinto/core/cornice/service.py +641 -0
- kinto/core/cornice/util.py +138 -0
- kinto/core/cornice/validators/__init__.py +94 -0
- kinto/core/cornice/validators/_colander.py +142 -0
- kinto/core/cornice/validators/_marshmallow.py +182 -0
- kinto/core/cornice_swagger/__init__.py +92 -0
- kinto/core/cornice_swagger/converters/__init__.py +21 -0
- kinto/core/cornice_swagger/converters/exceptions.py +6 -0
- kinto/core/cornice_swagger/converters/parameters.py +90 -0
- kinto/core/cornice_swagger/converters/schema.py +249 -0
- kinto/core/cornice_swagger/swagger.py +725 -0
- kinto/core/cornice_swagger/templates/index.html +73 -0
- kinto/core/cornice_swagger/templates/index_script_template.html +21 -0
- kinto/core/cornice_swagger/util.py +42 -0
- kinto/core/cornice_swagger/views.py +78 -0
- kinto/core/decorators.py +74 -0
- kinto/core/errors.py +216 -0
- kinto/core/events.py +301 -0
- kinto/core/initialization.py +738 -0
- kinto/core/listeners/__init__.py +9 -0
- kinto/core/metrics.py +94 -0
- kinto/core/openapi.py +115 -0
- kinto/core/permission/__init__.py +202 -0
- kinto/core/permission/memory.py +167 -0
- kinto/core/permission/postgresql/__init__.py +489 -0
- kinto/core/permission/postgresql/migrations/migration_001_002.sql +18 -0
- kinto/core/permission/postgresql/schema.sql +41 -0
- kinto/core/permission/testing.py +487 -0
- kinto/core/resource/__init__.py +1311 -0
- kinto/core/resource/model.py +412 -0
- kinto/core/resource/schema.py +502 -0
- kinto/core/resource/viewset.py +230 -0
- kinto/core/schema.py +119 -0
- kinto/core/scripts.py +50 -0
- kinto/core/statsd.py +1 -0
- kinto/core/storage/__init__.py +436 -0
- kinto/core/storage/exceptions.py +53 -0
- kinto/core/storage/generators.py +58 -0
- kinto/core/storage/memory.py +651 -0
- kinto/core/storage/postgresql/__init__.py +1131 -0
- kinto/core/storage/postgresql/client.py +120 -0
- kinto/core/storage/postgresql/migrations/migration_001_002.sql +10 -0
- kinto/core/storage/postgresql/migrations/migration_002_003.sql +33 -0
- kinto/core/storage/postgresql/migrations/migration_003_004.sql +18 -0
- kinto/core/storage/postgresql/migrations/migration_004_005.sql +20 -0
- kinto/core/storage/postgresql/migrations/migration_005_006.sql +11 -0
- kinto/core/storage/postgresql/migrations/migration_006_007.sql +74 -0
- kinto/core/storage/postgresql/migrations/migration_007_008.sql +66 -0
- kinto/core/storage/postgresql/migrations/migration_008_009.sql +41 -0
- kinto/core/storage/postgresql/migrations/migration_009_010.sql +98 -0
- kinto/core/storage/postgresql/migrations/migration_010_011.sql +14 -0
- kinto/core/storage/postgresql/migrations/migration_011_012.sql +9 -0
- kinto/core/storage/postgresql/migrations/migration_012_013.sql +71 -0
- kinto/core/storage/postgresql/migrations/migration_013_014.sql +14 -0
- kinto/core/storage/postgresql/migrations/migration_014_015.sql +95 -0
- kinto/core/storage/postgresql/migrations/migration_015_016.sql +4 -0
- kinto/core/storage/postgresql/migrations/migration_016_017.sql +81 -0
- kinto/core/storage/postgresql/migrations/migration_017_018.sql +25 -0
- kinto/core/storage/postgresql/migrations/migration_018_019.sql +8 -0
- kinto/core/storage/postgresql/migrations/migration_019_020.sql +7 -0
- kinto/core/storage/postgresql/migrations/migration_020_021.sql +68 -0
- kinto/core/storage/postgresql/migrations/migration_021_022.sql +62 -0
- kinto/core/storage/postgresql/migrations/migration_022_023.sql +5 -0
- kinto/core/storage/postgresql/migrations/migration_023_024.sql +6 -0
- kinto/core/storage/postgresql/migrations/migration_024_025.sql +6 -0
- kinto/core/storage/postgresql/migrator.py +98 -0
- kinto/core/storage/postgresql/pool.py +55 -0
- kinto/core/storage/postgresql/schema.sql +143 -0
- kinto/core/storage/testing.py +1857 -0
- kinto/core/storage/utils.py +37 -0
- kinto/core/testing.py +182 -0
- kinto/core/utils.py +553 -0
- kinto/core/views/__init__.py +0 -0
- kinto/core/views/batch.py +163 -0
- kinto/core/views/errors.py +145 -0
- kinto/core/views/heartbeat.py +106 -0
- kinto/core/views/hello.py +69 -0
- kinto/core/views/openapi.py +35 -0
- kinto/core/views/version.py +50 -0
- kinto/events.py +3 -0
- kinto/plugins/__init__.py +0 -0
- kinto/plugins/accounts/__init__.py +94 -0
- kinto/plugins/accounts/authentication.py +63 -0
- kinto/plugins/accounts/scripts.py +61 -0
- kinto/plugins/accounts/utils.py +13 -0
- kinto/plugins/accounts/views.py +136 -0
- kinto/plugins/admin/README.md +3 -0
- kinto/plugins/admin/VERSION +1 -0
- kinto/plugins/admin/__init__.py +40 -0
- kinto/plugins/admin/build/VERSION +1 -0
- kinto/plugins/admin/build/assets/index-CYFwtKtL.css +6 -0
- kinto/plugins/admin/build/assets/index-DJ0m93zA.js +149 -0
- kinto/plugins/admin/build/assets/logo-VBRiKSPX.png +0 -0
- kinto/plugins/admin/build/index.html +18 -0
- kinto/plugins/admin/public/help.html +25 -0
- kinto/plugins/admin/views.py +42 -0
- kinto/plugins/default_bucket/__init__.py +191 -0
- kinto/plugins/flush.py +28 -0
- kinto/plugins/history/__init__.py +65 -0
- kinto/plugins/history/listener.py +181 -0
- kinto/plugins/history/views.py +66 -0
- kinto/plugins/openid/__init__.py +131 -0
- kinto/plugins/openid/utils.py +14 -0
- kinto/plugins/openid/views.py +193 -0
- kinto/plugins/prometheus.py +300 -0
- kinto/plugins/statsd.py +85 -0
- kinto/schema_validation.py +135 -0
- kinto/views/__init__.py +34 -0
- kinto/views/admin.py +195 -0
- kinto/views/buckets.py +45 -0
- kinto/views/collections.py +58 -0
- kinto/views/contribute.py +39 -0
- kinto/views/groups.py +90 -0
- kinto/views/permissions.py +235 -0
- kinto/views/records.py +133 -0
- kinto-23.2.1.dist-info/METADATA +232 -0
- kinto-23.2.1.dist-info/RECORD +142 -0
- kinto-23.2.1.dist-info/WHEEL +5 -0
- kinto-23.2.1.dist-info/entry_points.txt +5 -0
- kinto-23.2.1.dist-info/licenses/LICENSE +13 -0
- kinto-23.2.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
--
|
|
2
|
+
-- Helper that returns the current collection timestamp.
|
|
3
|
+
--
|
|
4
|
+
CREATE OR REPLACE FUNCTION collection_timestamp(uid VARCHAR, resource VARCHAR)
|
|
5
|
+
RETURNS TIMESTAMP AS $$
|
|
6
|
+
DECLARE
|
|
7
|
+
ts TIMESTAMP;
|
|
8
|
+
BEGIN
|
|
9
|
+
WITH create_if_missing AS (
|
|
10
|
+
INSERT INTO timestamps (parent_id, collection_id, last_modified)
|
|
11
|
+
VALUES (uid, resource, clock_timestamp())
|
|
12
|
+
ON CONFLICT (parent_id, collection_id) DO NOTHING
|
|
13
|
+
RETURNING last_modified
|
|
14
|
+
),
|
|
15
|
+
get_or_create AS (
|
|
16
|
+
SELECT last_modified FROM create_if_missing
|
|
17
|
+
UNION
|
|
18
|
+
SELECT last_modified FROM timestamps
|
|
19
|
+
WHERE parent_id = uid
|
|
20
|
+
AND collection_id = resource
|
|
21
|
+
)
|
|
22
|
+
SELECT last_modified INTO ts FROM get_or_create;
|
|
23
|
+
|
|
24
|
+
RETURN ts;
|
|
25
|
+
END;
|
|
26
|
+
$$ LANGUAGE plpgsql;
|
|
27
|
+
|
|
28
|
+
--
|
|
29
|
+
-- Triggers to set last_modified on INSERT/UPDATE
|
|
30
|
+
--
|
|
31
|
+
DROP TRIGGER IF EXISTS tgr_records_last_modified ON records;
|
|
32
|
+
DROP TRIGGER IF EXISTS tgr_deleted_last_modified ON deleted;
|
|
33
|
+
|
|
34
|
+
CREATE OR REPLACE FUNCTION bump_timestamp()
|
|
35
|
+
RETURNS trigger AS $$
|
|
36
|
+
DECLARE
|
|
37
|
+
previous TIMESTAMP;
|
|
38
|
+
current TIMESTAMP;
|
|
39
|
+
|
|
40
|
+
BEGIN
|
|
41
|
+
previous := NULL;
|
|
42
|
+
SELECT last_modified INTO previous
|
|
43
|
+
FROM timestamps
|
|
44
|
+
WHERE parent_id = NEW.parent_id
|
|
45
|
+
AND collection_id = NEW.collection_id;
|
|
46
|
+
|
|
47
|
+
--
|
|
48
|
+
-- This bumps the current timestamp to 1 msec in the future if the previous
|
|
49
|
+
-- timestamp is equal to the current one (or higher if was bumped already).
|
|
50
|
+
--
|
|
51
|
+
-- If a bunch of requests from the same user on the same collection
|
|
52
|
+
-- arrive in the same millisecond, the unicity constraint can raise
|
|
53
|
+
-- an error (operation is cancelled).
|
|
54
|
+
-- See https://github.com/mozilla-services/cliquet/issues/25
|
|
55
|
+
--
|
|
56
|
+
current := clock_timestamp();
|
|
57
|
+
IF previous IS NOT NULL AND previous >= current THEN
|
|
58
|
+
current := previous + INTERVAL '1 milliseconds';
|
|
59
|
+
END IF;
|
|
60
|
+
|
|
61
|
+
IF NEW.last_modified IS NULL OR
|
|
62
|
+
(previous IS NOT NULL AND as_epoch(NEW.last_modified) = as_epoch(previous)) THEN
|
|
63
|
+
-- If record does not carry last-modified, or if the one specified
|
|
64
|
+
-- is equal to previous, assign it to current (i.e. bump it).
|
|
65
|
+
NEW.last_modified := current;
|
|
66
|
+
ELSE
|
|
67
|
+
-- Use record last-modified as collection timestamp.
|
|
68
|
+
IF previous IS NULL OR NEW.last_modified > previous THEN
|
|
69
|
+
current := NEW.last_modified;
|
|
70
|
+
END IF;
|
|
71
|
+
END IF;
|
|
72
|
+
|
|
73
|
+
--
|
|
74
|
+
-- Upsert current collection timestamp.
|
|
75
|
+
--
|
|
76
|
+
INSERT INTO timestamps (parent_id, collection_id, last_modified)
|
|
77
|
+
VALUES (NEW.parent_id, NEW.collection_id, current)
|
|
78
|
+
ON CONFLICT (parent_id, collection_id) DO UPDATE
|
|
79
|
+
SET last_modified = current;
|
|
80
|
+
|
|
81
|
+
RETURN NEW;
|
|
82
|
+
END;
|
|
83
|
+
$$ LANGUAGE plpgsql;
|
|
84
|
+
|
|
85
|
+
CREATE TRIGGER tgr_records_last_modified
|
|
86
|
+
BEFORE INSERT OR UPDATE OF data ON records
|
|
87
|
+
FOR EACH ROW EXECUTE PROCEDURE bump_timestamp();
|
|
88
|
+
|
|
89
|
+
CREATE TRIGGER tgr_deleted_last_modified
|
|
90
|
+
BEFORE INSERT ON deleted
|
|
91
|
+
FOR EACH ROW EXECUTE PROCEDURE bump_timestamp();
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
-- Bump storage schema version.
|
|
95
|
+
INSERT INTO metadata (name, value) VALUES ('storage_schema_version', '15');
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
--
|
|
2
|
+
-- Triggers to set last_modified on INSERT/UPDATE
|
|
3
|
+
--
|
|
4
|
+
DROP TRIGGER IF EXISTS tgr_records_last_modified ON records;
|
|
5
|
+
DROP TRIGGER IF EXISTS tgr_deleted_last_modified ON deleted;
|
|
6
|
+
|
|
7
|
+
CREATE OR REPLACE FUNCTION bump_timestamp()
|
|
8
|
+
RETURNS trigger AS $$
|
|
9
|
+
DECLARE
|
|
10
|
+
previous TIMESTAMP;
|
|
11
|
+
current TIMESTAMP;
|
|
12
|
+
BEGIN
|
|
13
|
+
previous := NULL;
|
|
14
|
+
WITH existing_timestamps AS (
|
|
15
|
+
-- Timestamp of latest record.
|
|
16
|
+
(
|
|
17
|
+
SELECT last_modified
|
|
18
|
+
FROM records
|
|
19
|
+
WHERE parent_id = NEW.parent_id
|
|
20
|
+
AND collection_id = NEW.collection_id
|
|
21
|
+
ORDER BY last_modified DESC
|
|
22
|
+
LIMIT 1
|
|
23
|
+
)
|
|
24
|
+
-- Timestamp of latest tombstone.
|
|
25
|
+
UNION
|
|
26
|
+
(
|
|
27
|
+
SELECT last_modified
|
|
28
|
+
FROM deleted
|
|
29
|
+
WHERE parent_id = NEW.parent_id
|
|
30
|
+
AND collection_id = NEW.collection_id
|
|
31
|
+
ORDER BY last_modified DESC
|
|
32
|
+
LIMIT 1
|
|
33
|
+
)
|
|
34
|
+
-- Timestamp when collection was empty.
|
|
35
|
+
UNION
|
|
36
|
+
(
|
|
37
|
+
SELECT last_modified
|
|
38
|
+
FROM timestamps
|
|
39
|
+
WHERE parent_id = NEW.parent_id
|
|
40
|
+
AND collection_id = NEW.collection_id
|
|
41
|
+
)
|
|
42
|
+
)
|
|
43
|
+
SELECT MAX(last_modified) INTO previous
|
|
44
|
+
FROM existing_timestamps;
|
|
45
|
+
|
|
46
|
+
--
|
|
47
|
+
-- This bumps the current timestamp to 1 msec in the future if the previous
|
|
48
|
+
-- timestamp is equal to the current one (or higher if was bumped already).
|
|
49
|
+
--
|
|
50
|
+
-- If a bunch of requests from the same user on the same collection
|
|
51
|
+
-- arrive in the same millisecond, the unicity constraint can raise
|
|
52
|
+
-- an error (operation is cancelled).
|
|
53
|
+
-- See https://github.com/mozilla-services/cliquet/issues/25
|
|
54
|
+
--
|
|
55
|
+
current := clock_timestamp();
|
|
56
|
+
IF previous IS NOT NULL AND previous >= current THEN
|
|
57
|
+
current := previous + INTERVAL '1 milliseconds';
|
|
58
|
+
END IF;
|
|
59
|
+
|
|
60
|
+
IF NEW.last_modified IS NULL OR
|
|
61
|
+
(previous IS NOT NULL AND as_epoch(NEW.last_modified) = as_epoch(previous)) THEN
|
|
62
|
+
-- If record does not carry last-modified, or if the one specified
|
|
63
|
+
-- is equal to previous, assign it to current (i.e. bump it).
|
|
64
|
+
NEW.last_modified := current;
|
|
65
|
+
END IF;
|
|
66
|
+
|
|
67
|
+
RETURN NEW;
|
|
68
|
+
END;
|
|
69
|
+
$$ LANGUAGE plpgsql;
|
|
70
|
+
|
|
71
|
+
CREATE TRIGGER tgr_records_last_modified
|
|
72
|
+
BEFORE INSERT OR UPDATE OF data ON records
|
|
73
|
+
FOR EACH ROW EXECUTE PROCEDURE bump_timestamp();
|
|
74
|
+
|
|
75
|
+
CREATE TRIGGER tgr_deleted_last_modified
|
|
76
|
+
BEFORE INSERT ON deleted
|
|
77
|
+
FOR EACH ROW EXECUTE PROCEDURE bump_timestamp();
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
-- Bump storage schema version.
|
|
81
|
+
INSERT INTO metadata (name, value) VALUES ('storage_schema_version', '17');
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
-- Add new deleted column (split into commands is more efficient)
|
|
2
|
+
ALTER TABLE records ADD COLUMN deleted BOOLEAN;
|
|
3
|
+
UPDATE records SET deleted = FALSE;
|
|
4
|
+
ALTER TABLE records ALTER COLUMN deleted SET NOT NULL;
|
|
5
|
+
ALTER TABLE records ALTER COLUMN deleted SET DEFAULT FALSE;
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
-- Lock records and deleted tables before merging them.
|
|
9
|
+
BEGIN WORK;
|
|
10
|
+
LOCK TABLE records IN ACCESS EXCLUSIVE MODE;
|
|
11
|
+
LOCK TABLE deleted IN ACCESS EXCLUSIVE MODE;
|
|
12
|
+
|
|
13
|
+
INSERT INTO records (id, parent_id, collection_id, data, last_modified, deleted)
|
|
14
|
+
SELECT id, parent_id, collection_id, '{"deleted": true}'::JSONB, last_modified, TRUE
|
|
15
|
+
FROM deleted
|
|
16
|
+
-- Because of Bug Kinto/kinto#1375, some tombstones may exist.
|
|
17
|
+
ON CONFLICT (id, parent_id, collection_id) DO NOTHING;
|
|
18
|
+
COMMIT WORK;
|
|
19
|
+
-- Table merged.
|
|
20
|
+
|
|
21
|
+
-- We do not drop the `deleted` table here.
|
|
22
|
+
-- It can be dropped manually once Web heads run the appropriate Kinto version.
|
|
23
|
+
|
|
24
|
+
-- Bump storage schema version.
|
|
25
|
+
INSERT INTO metadata (name, value) VALUES ('storage_schema_version', '18');
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
-- Alter collation to C to improve LIKE-prefix queries in delete_all.
|
|
2
|
+
ALTER TABLE records
|
|
3
|
+
ALTER COLUMN id TYPE TEXT COLLATE "C",
|
|
4
|
+
ALTER COLUMN parent_id TYPE TEXT COLLATE "C",
|
|
5
|
+
ALTER COLUMN collection_id TYPE TEXT COLLATE "C";
|
|
6
|
+
|
|
7
|
+
-- Bump storage schema version.
|
|
8
|
+
INSERT INTO metadata (name, value) VALUES ('storage_schema_version', '19');
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
-- Alter collation to C to improve LIKE-prefix queries in delete_all.
|
|
2
|
+
ALTER TABLE timestamps
|
|
3
|
+
ALTER COLUMN parent_id TYPE TEXT COLLATE "C",
|
|
4
|
+
ALTER COLUMN collection_id TYPE TEXT COLLATE "C";
|
|
5
|
+
|
|
6
|
+
-- Bump storage schema version.
|
|
7
|
+
INSERT INTO metadata (name, value) VALUES ('storage_schema_version', '20');
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
ALTER TABLE records RENAME COLUMN collection_id TO resource_name;
|
|
2
|
+
ALTER TABLE records RENAME TO objects;
|
|
3
|
+
ALTER TABLE timestamps RENAME COLUMN collection_id TO resource_name;
|
|
4
|
+
ALTER INDEX idx_records_parent_id_collection_id_last_modified RENAME TO idx_objects_parent_id_resource_name_last_modified;
|
|
5
|
+
ALTER INDEX idx_records_last_modified_epoch RENAME TO idx_objects_last_modified_epoch;
|
|
6
|
+
|
|
7
|
+
DROP TRIGGER IF EXISTS tgr_records_last_modified ON objects;
|
|
8
|
+
|
|
9
|
+
CREATE OR REPLACE FUNCTION bump_timestamp()
|
|
10
|
+
RETURNS trigger AS $$
|
|
11
|
+
DECLARE
|
|
12
|
+
previous TIMESTAMP;
|
|
13
|
+
current TIMESTAMP;
|
|
14
|
+
BEGIN
|
|
15
|
+
previous := NULL;
|
|
16
|
+
WITH existing_timestamps AS (
|
|
17
|
+
-- Timestamp of latest record.
|
|
18
|
+
(
|
|
19
|
+
SELECT last_modified
|
|
20
|
+
FROM objects
|
|
21
|
+
WHERE parent_id = NEW.parent_id
|
|
22
|
+
AND resource_name = NEW.resource_name
|
|
23
|
+
ORDER BY last_modified DESC
|
|
24
|
+
LIMIT 1
|
|
25
|
+
)
|
|
26
|
+
-- Timestamp when collection was empty.
|
|
27
|
+
UNION
|
|
28
|
+
(
|
|
29
|
+
SELECT last_modified
|
|
30
|
+
FROM timestamps
|
|
31
|
+
WHERE parent_id = NEW.parent_id
|
|
32
|
+
AND resource_name = NEW.resource_name
|
|
33
|
+
)
|
|
34
|
+
)
|
|
35
|
+
SELECT MAX(last_modified) INTO previous
|
|
36
|
+
FROM existing_timestamps;
|
|
37
|
+
|
|
38
|
+
--
|
|
39
|
+
-- This bumps the current timestamp to 1 msec in the future if the previous
|
|
40
|
+
-- timestamp is equal to the current one (or higher if was bumped already).
|
|
41
|
+
--
|
|
42
|
+
-- If a bunch of requests from the same user on the same collection
|
|
43
|
+
-- arrive in the same millisecond, the unicity constraint can raise
|
|
44
|
+
-- an error (operation is cancelled).
|
|
45
|
+
-- See https://github.com/mozilla-services/cliquet/issues/25
|
|
46
|
+
--
|
|
47
|
+
current := clock_timestamp();
|
|
48
|
+
IF previous IS NOT NULL AND previous >= current THEN
|
|
49
|
+
current := previous + INTERVAL '1 milliseconds';
|
|
50
|
+
END IF;
|
|
51
|
+
|
|
52
|
+
IF NEW.last_modified IS NULL OR
|
|
53
|
+
(previous IS NOT NULL AND as_epoch(NEW.last_modified) = as_epoch(previous)) THEN
|
|
54
|
+
-- If record does not carry last-modified, or if the one specified
|
|
55
|
+
-- is equal to previous, assign it to current (i.e. bump it).
|
|
56
|
+
NEW.last_modified := current;
|
|
57
|
+
END IF;
|
|
58
|
+
|
|
59
|
+
RETURN NEW;
|
|
60
|
+
END;
|
|
61
|
+
$$ LANGUAGE plpgsql;
|
|
62
|
+
|
|
63
|
+
CREATE TRIGGER tgr_objects_last_modified
|
|
64
|
+
BEFORE INSERT OR UPDATE OF data ON objects
|
|
65
|
+
FOR EACH ROW EXECUTE PROCEDURE bump_timestamp();
|
|
66
|
+
|
|
67
|
+
-- Bump storage schema version.
|
|
68
|
+
INSERT INTO metadata (name, value) VALUES ('storage_schema_version', '21');
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
DROP TRIGGER IF EXISTS tgr_objects_last_modified ON objects;
|
|
2
|
+
|
|
3
|
+
CREATE OR REPLACE FUNCTION bump_timestamp()
|
|
4
|
+
RETURNS trigger AS $$
|
|
5
|
+
DECLARE
|
|
6
|
+
previous BIGINT;
|
|
7
|
+
current BIGINT;
|
|
8
|
+
BEGIN
|
|
9
|
+
previous := NULL;
|
|
10
|
+
WITH existing_timestamps AS (
|
|
11
|
+
-- Timestamp of latest record.
|
|
12
|
+
(
|
|
13
|
+
SELECT last_modified
|
|
14
|
+
FROM objects
|
|
15
|
+
WHERE parent_id = NEW.parent_id
|
|
16
|
+
AND resource_name = NEW.resource_name
|
|
17
|
+
ORDER BY as_epoch(last_modified) DESC
|
|
18
|
+
LIMIT 1
|
|
19
|
+
)
|
|
20
|
+
-- Timestamp when resource was empty.
|
|
21
|
+
UNION
|
|
22
|
+
(
|
|
23
|
+
SELECT last_modified
|
|
24
|
+
FROM timestamps
|
|
25
|
+
WHERE parent_id = NEW.parent_id
|
|
26
|
+
AND resource_name = NEW.resource_name
|
|
27
|
+
)
|
|
28
|
+
)
|
|
29
|
+
SELECT as_epoch(MAX(last_modified)) INTO previous
|
|
30
|
+
FROM existing_timestamps;
|
|
31
|
+
|
|
32
|
+
--
|
|
33
|
+
-- This bumps the current timestamp to 1 msec in the future if the previous
|
|
34
|
+
-- timestamp is equal to the current one (or higher if was bumped already).
|
|
35
|
+
--
|
|
36
|
+
-- If a bunch of requests from the same user on the same resource
|
|
37
|
+
-- arrive in the same millisecond, the unicity constraint can raise
|
|
38
|
+
-- an error (operation is cancelled).
|
|
39
|
+
-- See https://github.com/mozilla-services/cliquet/issues/25
|
|
40
|
+
--
|
|
41
|
+
current := as_epoch(clock_timestamp()::TIMESTAMP);
|
|
42
|
+
IF previous IS NOT NULL AND previous >= current THEN
|
|
43
|
+
current := previous + 1;
|
|
44
|
+
END IF;
|
|
45
|
+
|
|
46
|
+
IF NEW.last_modified IS NULL OR
|
|
47
|
+
(previous IS NOT NULL AND as_epoch(NEW.last_modified) = previous) THEN
|
|
48
|
+
-- If record does not carry last-modified, or if the one specified
|
|
49
|
+
-- is equal to previous, assign it to current (i.e. bump it).
|
|
50
|
+
NEW.last_modified := from_epoch(current);
|
|
51
|
+
END IF;
|
|
52
|
+
|
|
53
|
+
RETURN NEW;
|
|
54
|
+
END;
|
|
55
|
+
$$ LANGUAGE plpgsql;
|
|
56
|
+
|
|
57
|
+
CREATE TRIGGER tgr_objects_last_modified
|
|
58
|
+
BEFORE INSERT OR UPDATE OF data ON objects
|
|
59
|
+
FOR EACH ROW EXECUTE PROCEDURE bump_timestamp();
|
|
60
|
+
|
|
61
|
+
-- Bump storage schema version.
|
|
62
|
+
INSERT INTO metadata (name, value) VALUES ('storage_schema_version', '22');
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
"""
|
|
2
|
+
A helper class to run migrations using a series of SQL files.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import os
|
|
7
|
+
|
|
8
|
+
from kinto.core.utils import sqlalchemy as sa
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class MigratorMixin:
|
|
15
|
+
"""Mixin to allow the running of migrations.
|
|
16
|
+
|
|
17
|
+
Your class must provide a `client` attribute (a PostgreSQLClient),
|
|
18
|
+
as well as override some class attributes.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
"""Name of this migrator (e.g. "storage"). Override this."""
|
|
22
|
+
name = None
|
|
23
|
+
|
|
24
|
+
"""The current "newest" schema version. Override this."""
|
|
25
|
+
schema_version = None
|
|
26
|
+
|
|
27
|
+
"""The file to find the current "newest" schema in. Override this."""
|
|
28
|
+
schema_file = None
|
|
29
|
+
|
|
30
|
+
"""The directory to find migration files in.
|
|
31
|
+
|
|
32
|
+
Migrations should each be a file named migration_nnn_mmm.sql, where mmm = nnn + 1.
|
|
33
|
+
"""
|
|
34
|
+
migrations_directory = None
|
|
35
|
+
|
|
36
|
+
def get_installed_version(self):
|
|
37
|
+
"""Return current version of schema or None if none found.
|
|
38
|
+
|
|
39
|
+
Override this.
|
|
40
|
+
|
|
41
|
+
This may be called several times during a single migration.
|
|
42
|
+
"""
|
|
43
|
+
raise NotImplementedError("method not overridden") # pragma: no cover
|
|
44
|
+
|
|
45
|
+
def create_or_migrate_schema(self, dry_run=False):
|
|
46
|
+
"""Either create or migrate the schema, as needed."""
|
|
47
|
+
version = self.get_installed_version()
|
|
48
|
+
if not version:
|
|
49
|
+
self.create_schema(dry_run)
|
|
50
|
+
return
|
|
51
|
+
|
|
52
|
+
logger.info(f"Detected PostgreSQL {self.name} schema version {version}.")
|
|
53
|
+
if version == self.schema_version:
|
|
54
|
+
logger.info(f"PostgreSQL {self.name} schema is up-to-date.")
|
|
55
|
+
return
|
|
56
|
+
|
|
57
|
+
self.migrate_schema(version, dry_run)
|
|
58
|
+
|
|
59
|
+
def create_schema(self, dry_run):
|
|
60
|
+
"""Actually create the schema from scratch using self.schema_file.
|
|
61
|
+
|
|
62
|
+
You can override this if you want to add additional sanity checks.
|
|
63
|
+
"""
|
|
64
|
+
logger.info(
|
|
65
|
+
f"Create PostgreSQL {self.name} schema at version {self.schema_version} from {self.schema_file}."
|
|
66
|
+
)
|
|
67
|
+
if not dry_run:
|
|
68
|
+
self._execute_sql_file(self.schema_file)
|
|
69
|
+
logger.info(f"Created PostgreSQL {self.name} schema (version {self.schema_version}).")
|
|
70
|
+
|
|
71
|
+
def migrate_schema(self, start_version, dry_run):
|
|
72
|
+
migrations = [(v, v + 1) for v in range(start_version, self.schema_version)]
|
|
73
|
+
for migration in migrations:
|
|
74
|
+
expected = migration[0]
|
|
75
|
+
current = self.get_installed_version()
|
|
76
|
+
error_msg = f"PostgreSQL {self.name} schema: Expected version {expected}. Found version {current}."
|
|
77
|
+
if not dry_run and expected != current:
|
|
78
|
+
raise AssertionError(error_msg)
|
|
79
|
+
|
|
80
|
+
logger.info(
|
|
81
|
+
f"Migrate PostgreSQL {self.name} schema from version {migration[0]} to {migration[1]}."
|
|
82
|
+
)
|
|
83
|
+
filename = "migration_{0:03d}_{1:03d}.sql".format(*migration)
|
|
84
|
+
filepath = os.path.join(self.migrations_directory, filename)
|
|
85
|
+
logger.info(f"Execute PostgreSQL {self.name} migration from {filepath}")
|
|
86
|
+
if not dry_run:
|
|
87
|
+
self._execute_sql_file(filepath)
|
|
88
|
+
logger.info(
|
|
89
|
+
f"PostgreSQL {self.name} schema migration {'simulated' if dry_run else 'done'}"
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
def _execute_sql_file(self, filepath):
|
|
93
|
+
"""Helper method to execute the SQL in a file."""
|
|
94
|
+
with open(filepath) as f:
|
|
95
|
+
schema = f.read()
|
|
96
|
+
# Since called outside request, force commit.
|
|
97
|
+
with self.client.connect(force_commit=True) as conn:
|
|
98
|
+
conn.execute(sa.text(schema))
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
from sqlalchemy.pool import QueuePool
|
|
2
|
+
from sqlalchemy.util.queue import Queue
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class _QueueWithMaxBacklog(Queue):
|
|
6
|
+
"""SQLAlchemy Queue subclass with a limit on the length of the backlog.
|
|
7
|
+
|
|
8
|
+
This base Queue class sets no limit on the number of threads that can be
|
|
9
|
+
simultaneously blocked waiting for an item on the queue. This class
|
|
10
|
+
adds a "max_backlog" parameter that can be used to bound this number.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self, maxsize=0, max_backlog=-1):
|
|
14
|
+
self.max_backlog = max_backlog
|
|
15
|
+
self.cur_backlog = 0
|
|
16
|
+
Queue.__init__(self, maxsize)
|
|
17
|
+
|
|
18
|
+
def get(self, block=True, timeout=None):
|
|
19
|
+
# The SQLAlchemy Queue class uses a re-entrant mutext by default,
|
|
20
|
+
# so it's safe to acquire it both here and in the superclass method.
|
|
21
|
+
with self.mutex:
|
|
22
|
+
self.cur_backlog += 1
|
|
23
|
+
try:
|
|
24
|
+
if self.max_backlog >= 0: # pragma: no branch
|
|
25
|
+
if self.cur_backlog > self.max_backlog:
|
|
26
|
+
block = False
|
|
27
|
+
timeout = None
|
|
28
|
+
return Queue.get(self, block, timeout)
|
|
29
|
+
finally:
|
|
30
|
+
self.cur_backlog -= 1
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class QueuePoolWithMaxBacklog(QueuePool):
|
|
34
|
+
"""An SQLAlchemy QueuePool with a limit on the length of the backlog.
|
|
35
|
+
|
|
36
|
+
The base QueuePool class sets no limit on the number of threads that can
|
|
37
|
+
be simultaneously attempting to connect to the database. This means that
|
|
38
|
+
a misbehaving database can easily lock up all threads by keeping them
|
|
39
|
+
waiting in the queue.
|
|
40
|
+
|
|
41
|
+
This QueuePool subclass provides a "max_backlog" that limits the number
|
|
42
|
+
of threads that can be in the queue waiting for a connection. Once this
|
|
43
|
+
limit has been reached, any further attempts to acquire a connection will
|
|
44
|
+
be rejected immediately.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
def __init__(self, creator, max_backlog=-1, **kwds):
|
|
48
|
+
kwds.setdefault("pool_size", 25)
|
|
49
|
+
QueuePool.__init__(self, creator, **kwds)
|
|
50
|
+
self._pool = _QueueWithMaxBacklog(self._pool.maxsize, max_backlog)
|
|
51
|
+
|
|
52
|
+
def recreate(self):
|
|
53
|
+
new_self = QueuePool.recreate(self)
|
|
54
|
+
new_self._pool = _QueueWithMaxBacklog(self._pool.maxsize, self._pool.max_backlog)
|
|
55
|
+
return new_self
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
--
|
|
2
|
+
-- Automated script, we do not need NOTICE and WARNING
|
|
3
|
+
--
|
|
4
|
+
SET client_min_messages TO ERROR;
|
|
5
|
+
--
|
|
6
|
+
-- Convert timestamps to milliseconds epoch integer
|
|
7
|
+
--
|
|
8
|
+
CREATE OR REPLACE FUNCTION as_epoch(ts TIMESTAMP) RETURNS BIGINT AS $$
|
|
9
|
+
BEGIN
|
|
10
|
+
RETURN (EXTRACT(EPOCH FROM ts) * 1000)::BIGINT;
|
|
11
|
+
END;
|
|
12
|
+
$$ LANGUAGE plpgsql
|
|
13
|
+
IMMUTABLE;
|
|
14
|
+
|
|
15
|
+
CREATE OR REPLACE FUNCTION from_epoch(epoch BIGINT) RETURNS TIMESTAMP AS $$
|
|
16
|
+
BEGIN
|
|
17
|
+
RETURN TIMESTAMP WITH TIME ZONE 'epoch' + epoch * INTERVAL '1 millisecond';
|
|
18
|
+
END;
|
|
19
|
+
$$ LANGUAGE plpgsql
|
|
20
|
+
IMMUTABLE;
|
|
21
|
+
|
|
22
|
+
--
|
|
23
|
+
-- Actual objects
|
|
24
|
+
--
|
|
25
|
+
CREATE TABLE IF NOT EXISTS objects (
|
|
26
|
+
-- These are all IDs stored as text, and not human language.
|
|
27
|
+
-- Therefore, we store them in the C collation. This lets Postgres
|
|
28
|
+
-- use the index on parent_id for prefix matching (parent_id LIKE
|
|
29
|
+
-- '/buckets/abc/%').
|
|
30
|
+
id TEXT COLLATE "C" NOT NULL,
|
|
31
|
+
parent_id TEXT COLLATE "C" NOT NULL,
|
|
32
|
+
resource_name TEXT COLLATE "C" NOT NULL,
|
|
33
|
+
|
|
34
|
+
-- Timestamp is relevant because adequate semantically.
|
|
35
|
+
-- Since the HTTP API manipulates integers, it could make sense
|
|
36
|
+
-- to replace the timestamp columns type by integer.
|
|
37
|
+
last_modified TIMESTAMP NOT NULL,
|
|
38
|
+
|
|
39
|
+
-- JSONB, 2x faster than JSON.
|
|
40
|
+
data JSONB NOT NULL DEFAULT '{}'::JSONB,
|
|
41
|
+
|
|
42
|
+
deleted BOOLEAN NOT NULL,
|
|
43
|
+
|
|
44
|
+
PRIMARY KEY (id, parent_id, resource_name)
|
|
45
|
+
);
|
|
46
|
+
CREATE UNIQUE INDEX IF NOT EXISTS idx_objects_parent_id_resource_name_last_modified
|
|
47
|
+
ON objects(parent_id, resource_name, last_modified DESC);
|
|
48
|
+
CREATE INDEX IF NOT EXISTS idx_objects_last_modified_epoch
|
|
49
|
+
ON objects(as_epoch(last_modified));
|
|
50
|
+
CREATE INDEX IF NOT EXISTS idx_objects_resource_name_parent_id_deleted
|
|
51
|
+
ON objects(resource_name, parent_id, deleted);
|
|
52
|
+
-- Index for collections timestamps
|
|
53
|
+
CREATE INDEX IF NOT EXISTS idx_objects_parent_id_record_last_modified
|
|
54
|
+
ON objects (parent_id, last_modified DESC)
|
|
55
|
+
WHERE resource_name = 'record';
|
|
56
|
+
-- Index for history plugin trimming.
|
|
57
|
+
CREATE INDEX IF NOT EXISTS idx_objects_history_userid_and_resourcename
|
|
58
|
+
ON objects ((data->'user_id'), (data->'resource_name'))
|
|
59
|
+
WHERE resource_name = 'history';
|
|
60
|
+
|
|
61
|
+
CREATE TABLE IF NOT EXISTS timestamps (
|
|
62
|
+
parent_id TEXT NOT NULL COLLATE "C",
|
|
63
|
+
resource_name TEXT NOT NULL COLLATE "C",
|
|
64
|
+
last_modified TIMESTAMP NOT NULL,
|
|
65
|
+
PRIMARY KEY (parent_id, resource_name)
|
|
66
|
+
);
|
|
67
|
+
|
|
68
|
+
--
|
|
69
|
+
-- Triggers to set last_modified on INSERT/UPDATE
|
|
70
|
+
--
|
|
71
|
+
DROP TRIGGER IF EXISTS tgr_objects_last_modified ON objects;
|
|
72
|
+
|
|
73
|
+
CREATE OR REPLACE FUNCTION bump_timestamp()
|
|
74
|
+
RETURNS trigger AS $$
|
|
75
|
+
DECLARE
|
|
76
|
+
previous BIGINT;
|
|
77
|
+
current BIGINT;
|
|
78
|
+
BEGIN
|
|
79
|
+
previous := NULL;
|
|
80
|
+
WITH existing_timestamps AS (
|
|
81
|
+
-- Timestamp of latest record.
|
|
82
|
+
(
|
|
83
|
+
SELECT last_modified
|
|
84
|
+
FROM objects
|
|
85
|
+
WHERE parent_id = NEW.parent_id
|
|
86
|
+
AND resource_name = NEW.resource_name
|
|
87
|
+
ORDER BY as_epoch(last_modified) DESC
|
|
88
|
+
LIMIT 1
|
|
89
|
+
)
|
|
90
|
+
-- Timestamp when resource was empty.
|
|
91
|
+
UNION
|
|
92
|
+
(
|
|
93
|
+
SELECT last_modified
|
|
94
|
+
FROM timestamps
|
|
95
|
+
WHERE parent_id = NEW.parent_id
|
|
96
|
+
AND resource_name = NEW.resource_name
|
|
97
|
+
)
|
|
98
|
+
)
|
|
99
|
+
SELECT as_epoch(MAX(last_modified)) INTO previous
|
|
100
|
+
FROM existing_timestamps;
|
|
101
|
+
|
|
102
|
+
--
|
|
103
|
+
-- This bumps the current timestamp to 1 msec in the future if the previous
|
|
104
|
+
-- timestamp is equal to the current one (or higher if was bumped already).
|
|
105
|
+
--
|
|
106
|
+
-- If a bunch of requests from the same user on the same resource
|
|
107
|
+
-- arrive in the same millisecond, the unicity constraint can raise
|
|
108
|
+
-- an error (operation is cancelled).
|
|
109
|
+
-- See https://github.com/mozilla-services/cliquet/issues/25
|
|
110
|
+
--
|
|
111
|
+
current := as_epoch(clock_timestamp()::TIMESTAMP);
|
|
112
|
+
IF previous IS NOT NULL AND previous >= current THEN
|
|
113
|
+
current := previous + 1;
|
|
114
|
+
END IF;
|
|
115
|
+
|
|
116
|
+
IF NEW.last_modified IS NULL OR
|
|
117
|
+
(previous IS NOT NULL AND as_epoch(NEW.last_modified) = previous) THEN
|
|
118
|
+
-- If record does not carry last-modified, or if the one specified
|
|
119
|
+
-- is equal to previous, assign it to current (i.e. bump it).
|
|
120
|
+
NEW.last_modified := from_epoch(current);
|
|
121
|
+
END IF;
|
|
122
|
+
|
|
123
|
+
RETURN NEW;
|
|
124
|
+
END;
|
|
125
|
+
$$ LANGUAGE plpgsql;
|
|
126
|
+
|
|
127
|
+
CREATE TRIGGER tgr_objects_last_modified
|
|
128
|
+
BEFORE INSERT OR UPDATE OF data ON objects
|
|
129
|
+
FOR EACH ROW EXECUTE PROCEDURE bump_timestamp();
|
|
130
|
+
|
|
131
|
+
--
|
|
132
|
+
-- Metadata table
|
|
133
|
+
--
|
|
134
|
+
CREATE TABLE IF NOT EXISTS metadata (
|
|
135
|
+
name VARCHAR(128) NOT NULL,
|
|
136
|
+
value VARCHAR(512) NOT NULL
|
|
137
|
+
);
|
|
138
|
+
INSERT INTO metadata (name, value) VALUES ('created_at', NOW()::TEXT);
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
-- Set storage schema version.
|
|
142
|
+
-- Should match ``kinto.core.storage.postgresql.PostgreSQL.schema_version``
|
|
143
|
+
INSERT INTO metadata (name, value) VALUES ('storage_schema_version', '25');
|