kinto 23.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kinto/__init__.py +92 -0
- kinto/__main__.py +249 -0
- kinto/authorization.py +134 -0
- kinto/config/__init__.py +94 -0
- kinto/config/kinto.tpl +270 -0
- kinto/contribute.json +27 -0
- kinto/core/__init__.py +246 -0
- kinto/core/authentication.py +48 -0
- kinto/core/authorization.py +311 -0
- kinto/core/cache/__init__.py +131 -0
- kinto/core/cache/memcached.py +112 -0
- kinto/core/cache/memory.py +104 -0
- kinto/core/cache/postgresql/__init__.py +178 -0
- kinto/core/cache/postgresql/schema.sql +23 -0
- kinto/core/cache/testing.py +208 -0
- kinto/core/cornice/__init__.py +93 -0
- kinto/core/cornice/cors.py +144 -0
- kinto/core/cornice/errors.py +40 -0
- kinto/core/cornice/pyramidhook.py +373 -0
- kinto/core/cornice/renderer.py +89 -0
- kinto/core/cornice/resource.py +205 -0
- kinto/core/cornice/service.py +641 -0
- kinto/core/cornice/util.py +138 -0
- kinto/core/cornice/validators/__init__.py +94 -0
- kinto/core/cornice/validators/_colander.py +142 -0
- kinto/core/cornice/validators/_marshmallow.py +182 -0
- kinto/core/cornice_swagger/__init__.py +92 -0
- kinto/core/cornice_swagger/converters/__init__.py +21 -0
- kinto/core/cornice_swagger/converters/exceptions.py +6 -0
- kinto/core/cornice_swagger/converters/parameters.py +90 -0
- kinto/core/cornice_swagger/converters/schema.py +249 -0
- kinto/core/cornice_swagger/swagger.py +725 -0
- kinto/core/cornice_swagger/templates/index.html +73 -0
- kinto/core/cornice_swagger/templates/index_script_template.html +21 -0
- kinto/core/cornice_swagger/util.py +42 -0
- kinto/core/cornice_swagger/views.py +78 -0
- kinto/core/decorators.py +74 -0
- kinto/core/errors.py +216 -0
- kinto/core/events.py +301 -0
- kinto/core/initialization.py +738 -0
- kinto/core/listeners/__init__.py +9 -0
- kinto/core/metrics.py +94 -0
- kinto/core/openapi.py +115 -0
- kinto/core/permission/__init__.py +202 -0
- kinto/core/permission/memory.py +167 -0
- kinto/core/permission/postgresql/__init__.py +489 -0
- kinto/core/permission/postgresql/migrations/migration_001_002.sql +18 -0
- kinto/core/permission/postgresql/schema.sql +41 -0
- kinto/core/permission/testing.py +487 -0
- kinto/core/resource/__init__.py +1311 -0
- kinto/core/resource/model.py +412 -0
- kinto/core/resource/schema.py +502 -0
- kinto/core/resource/viewset.py +230 -0
- kinto/core/schema.py +119 -0
- kinto/core/scripts.py +50 -0
- kinto/core/statsd.py +1 -0
- kinto/core/storage/__init__.py +436 -0
- kinto/core/storage/exceptions.py +53 -0
- kinto/core/storage/generators.py +58 -0
- kinto/core/storage/memory.py +651 -0
- kinto/core/storage/postgresql/__init__.py +1131 -0
- kinto/core/storage/postgresql/client.py +120 -0
- kinto/core/storage/postgresql/migrations/migration_001_002.sql +10 -0
- kinto/core/storage/postgresql/migrations/migration_002_003.sql +33 -0
- kinto/core/storage/postgresql/migrations/migration_003_004.sql +18 -0
- kinto/core/storage/postgresql/migrations/migration_004_005.sql +20 -0
- kinto/core/storage/postgresql/migrations/migration_005_006.sql +11 -0
- kinto/core/storage/postgresql/migrations/migration_006_007.sql +74 -0
- kinto/core/storage/postgresql/migrations/migration_007_008.sql +66 -0
- kinto/core/storage/postgresql/migrations/migration_008_009.sql +41 -0
- kinto/core/storage/postgresql/migrations/migration_009_010.sql +98 -0
- kinto/core/storage/postgresql/migrations/migration_010_011.sql +14 -0
- kinto/core/storage/postgresql/migrations/migration_011_012.sql +9 -0
- kinto/core/storage/postgresql/migrations/migration_012_013.sql +71 -0
- kinto/core/storage/postgresql/migrations/migration_013_014.sql +14 -0
- kinto/core/storage/postgresql/migrations/migration_014_015.sql +95 -0
- kinto/core/storage/postgresql/migrations/migration_015_016.sql +4 -0
- kinto/core/storage/postgresql/migrations/migration_016_017.sql +81 -0
- kinto/core/storage/postgresql/migrations/migration_017_018.sql +25 -0
- kinto/core/storage/postgresql/migrations/migration_018_019.sql +8 -0
- kinto/core/storage/postgresql/migrations/migration_019_020.sql +7 -0
- kinto/core/storage/postgresql/migrations/migration_020_021.sql +68 -0
- kinto/core/storage/postgresql/migrations/migration_021_022.sql +62 -0
- kinto/core/storage/postgresql/migrations/migration_022_023.sql +5 -0
- kinto/core/storage/postgresql/migrations/migration_023_024.sql +6 -0
- kinto/core/storage/postgresql/migrations/migration_024_025.sql +6 -0
- kinto/core/storage/postgresql/migrator.py +98 -0
- kinto/core/storage/postgresql/pool.py +55 -0
- kinto/core/storage/postgresql/schema.sql +143 -0
- kinto/core/storage/testing.py +1857 -0
- kinto/core/storage/utils.py +37 -0
- kinto/core/testing.py +182 -0
- kinto/core/utils.py +553 -0
- kinto/core/views/__init__.py +0 -0
- kinto/core/views/batch.py +163 -0
- kinto/core/views/errors.py +145 -0
- kinto/core/views/heartbeat.py +106 -0
- kinto/core/views/hello.py +69 -0
- kinto/core/views/openapi.py +35 -0
- kinto/core/views/version.py +50 -0
- kinto/events.py +3 -0
- kinto/plugins/__init__.py +0 -0
- kinto/plugins/accounts/__init__.py +94 -0
- kinto/plugins/accounts/authentication.py +63 -0
- kinto/plugins/accounts/scripts.py +61 -0
- kinto/plugins/accounts/utils.py +13 -0
- kinto/plugins/accounts/views.py +136 -0
- kinto/plugins/admin/README.md +3 -0
- kinto/plugins/admin/VERSION +1 -0
- kinto/plugins/admin/__init__.py +40 -0
- kinto/plugins/admin/build/VERSION +1 -0
- kinto/plugins/admin/build/assets/index-CYFwtKtL.css +6 -0
- kinto/plugins/admin/build/assets/index-DJ0m93zA.js +149 -0
- kinto/plugins/admin/build/assets/logo-VBRiKSPX.png +0 -0
- kinto/plugins/admin/build/index.html +18 -0
- kinto/plugins/admin/public/help.html +25 -0
- kinto/plugins/admin/views.py +42 -0
- kinto/plugins/default_bucket/__init__.py +191 -0
- kinto/plugins/flush.py +28 -0
- kinto/plugins/history/__init__.py +65 -0
- kinto/plugins/history/listener.py +181 -0
- kinto/plugins/history/views.py +66 -0
- kinto/plugins/openid/__init__.py +131 -0
- kinto/plugins/openid/utils.py +14 -0
- kinto/plugins/openid/views.py +193 -0
- kinto/plugins/prometheus.py +300 -0
- kinto/plugins/statsd.py +85 -0
- kinto/schema_validation.py +135 -0
- kinto/views/__init__.py +34 -0
- kinto/views/admin.py +195 -0
- kinto/views/buckets.py +45 -0
- kinto/views/collections.py +58 -0
- kinto/views/contribute.py +39 -0
- kinto/views/groups.py +90 -0
- kinto/views/permissions.py +235 -0
- kinto/views/records.py +133 -0
- kinto-23.2.1.dist-info/METADATA +232 -0
- kinto-23.2.1.dist-info/RECORD +142 -0
- kinto-23.2.1.dist-info/WHEEL +5 -0
- kinto-23.2.1.dist-info/entry_points.txt +5 -0
- kinto-23.2.1.dist-info/licenses/LICENSE +13 -0
- kinto-23.2.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,489 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
from collections import OrderedDict
|
|
4
|
+
|
|
5
|
+
from kinto.core.permission import PermissionBase
|
|
6
|
+
from kinto.core.storage.postgresql.client import create_from_config
|
|
7
|
+
from kinto.core.storage.postgresql.migrator import MigratorMixin
|
|
8
|
+
from kinto.core.utils import sqlalchemy as sa
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
HERE = os.path.dirname(__file__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Permission(PermissionBase, MigratorMixin):
|
|
17
|
+
"""Permission backend using PostgreSQL.
|
|
18
|
+
|
|
19
|
+
Enable in configuration::
|
|
20
|
+
|
|
21
|
+
kinto.permission_backend = kinto.core.permission.postgresql
|
|
22
|
+
|
|
23
|
+
Database location URI can be customized::
|
|
24
|
+
|
|
25
|
+
kinto.permission_url = postgresql://user:pass@db.server.lan:5432/dbname
|
|
26
|
+
|
|
27
|
+
Alternatively, username and password could also rely on system user ident
|
|
28
|
+
or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*).
|
|
29
|
+
|
|
30
|
+
.. note::
|
|
31
|
+
|
|
32
|
+
Some tables and indices are created when ``kinto migrate`` is run.
|
|
33
|
+
This requires some privileges on the database, or some error will
|
|
34
|
+
be raised.
|
|
35
|
+
|
|
36
|
+
**Alternatively**, the schema can be initialized outside the
|
|
37
|
+
python application, using the SQL file located in
|
|
38
|
+
:file:`kinto/core/permission/postgresql/schema.sql`. This allows to
|
|
39
|
+
distinguish schema manipulation privileges from schema usage.
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
A connection pool is enabled by default::
|
|
43
|
+
|
|
44
|
+
kinto.permission_pool_size = 10
|
|
45
|
+
kinto.permission_maxoverflow = 10
|
|
46
|
+
kinto.permission_max_backlog = -1
|
|
47
|
+
kinto.permission_pool_recycle = -1
|
|
48
|
+
kinto.permission_pool_timeout = 30
|
|
49
|
+
kinto.cache_poolclass =
|
|
50
|
+
kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog
|
|
51
|
+
|
|
52
|
+
The ``max_backlog`` limits the number of threads that can be in the queue
|
|
53
|
+
waiting for a connection. Once this limit has been reached, any further
|
|
54
|
+
attempts to acquire a connection will be rejected immediately, instead of
|
|
55
|
+
locking up all threads by keeping them waiting in the queue.
|
|
56
|
+
|
|
57
|
+
See `dedicated section in SQLAlchemy documentation
|
|
58
|
+
<http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_
|
|
59
|
+
for default values and behaviour.
|
|
60
|
+
|
|
61
|
+
.. note::
|
|
62
|
+
|
|
63
|
+
Using a `dedicated connection pool <http://pgpool.net>`_ is still
|
|
64
|
+
recommended to allow load balancing, replication or limit the number
|
|
65
|
+
of connections used in a multi-process deployment.
|
|
66
|
+
|
|
67
|
+
:noindex:
|
|
68
|
+
""" # NOQA
|
|
69
|
+
|
|
70
|
+
name = "permission"
|
|
71
|
+
schema_version = 2
|
|
72
|
+
schema_file = os.path.join(HERE, "schema.sql")
|
|
73
|
+
migrations_directory = os.path.join(HERE, "migrations")
|
|
74
|
+
|
|
75
|
+
def __init__(self, client, *args, **kwargs):
|
|
76
|
+
super().__init__(*args, **kwargs)
|
|
77
|
+
self.client = client
|
|
78
|
+
|
|
79
|
+
def initialize_schema(self, dry_run=False):
|
|
80
|
+
return self.create_or_migrate_schema(dry_run)
|
|
81
|
+
|
|
82
|
+
def get_installed_version(self):
|
|
83
|
+
"""Return current version of schema or None if not any found.
|
|
84
|
+
|
|
85
|
+
Migrations were only added to the permission backend in
|
|
86
|
+
8.1.2. Before this, the permission backend was only the two
|
|
87
|
+
tables ``user_principals`` and ``access_control_entries``. The
|
|
88
|
+
presence of these two tables and absence of a metadata
|
|
89
|
+
table/permission_schema_version is therefore version 1.
|
|
90
|
+
|
|
91
|
+
In version 8.1.2, the permission backend added a ``metadata``
|
|
92
|
+
table. If the permission and storage backends point to the
|
|
93
|
+
same database, this will be the same table created by the
|
|
94
|
+
storage backend. This means either backend could create the
|
|
95
|
+
table without the knowledge of the other one. For this reason,
|
|
96
|
+
be careful to handle the case where the metadata table exists
|
|
97
|
+
but no version exists.
|
|
98
|
+
|
|
99
|
+
"""
|
|
100
|
+
query = "SELECT tablename FROM pg_tables WHERE tablename = 'metadata';"
|
|
101
|
+
with self.client.connect() as conn:
|
|
102
|
+
result = conn.execute(sa.text(query))
|
|
103
|
+
table_exists = result.rowcount > 0
|
|
104
|
+
|
|
105
|
+
if table_exists:
|
|
106
|
+
query = """
|
|
107
|
+
SELECT value AS version
|
|
108
|
+
FROM metadata
|
|
109
|
+
WHERE name = 'permission_schema_version'
|
|
110
|
+
ORDER BY LPAD(value, 3, '0') DESC
|
|
111
|
+
LIMIT 1;
|
|
112
|
+
"""
|
|
113
|
+
with self.client.connect() as conn:
|
|
114
|
+
result = conn.execute(sa.text(query))
|
|
115
|
+
if result.rowcount > 0:
|
|
116
|
+
return int(result.fetchone().version)
|
|
117
|
+
|
|
118
|
+
# Either the metadata table doesn't exist, or it doesn't have
|
|
119
|
+
# a permission_schema_version row. Many possiblities exist:
|
|
120
|
+
#
|
|
121
|
+
# - Maybe we are migrating from <8.1.2 and the permission
|
|
122
|
+
# backend doesn't have a metadata table.
|
|
123
|
+
#
|
|
124
|
+
# - Maybe we are on a new install and don't have any tables.
|
|
125
|
+
#
|
|
126
|
+
# - Maybe we are on a new install and the storage backend has
|
|
127
|
+
# created the metadata table but we haven't initialized yet.
|
|
128
|
+
#
|
|
129
|
+
# Check if user_principals table exists. If it does, we are
|
|
130
|
+
# migrating from pre-8.1.2 and we are version 1.
|
|
131
|
+
query = """
|
|
132
|
+
SELECT 1
|
|
133
|
+
FROM information_schema.tables
|
|
134
|
+
WHERE table_name = 'user_principals';
|
|
135
|
+
"""
|
|
136
|
+
with self.client.connect(readonly=True) as conn:
|
|
137
|
+
result = conn.execute(sa.text(query))
|
|
138
|
+
if result.rowcount > 0:
|
|
139
|
+
return 1
|
|
140
|
+
|
|
141
|
+
# Metadata table missing or has no
|
|
142
|
+
# permission_schema_version, and no user_principals table. We
|
|
143
|
+
# need to initialize.
|
|
144
|
+
return None
|
|
145
|
+
|
|
146
|
+
def flush(self):
|
|
147
|
+
query = """
|
|
148
|
+
DELETE FROM user_principals;
|
|
149
|
+
DELETE FROM access_control_entries;
|
|
150
|
+
"""
|
|
151
|
+
# Since called outside request (e.g. tests), force commit.
|
|
152
|
+
with self.client.connect(force_commit=True) as conn:
|
|
153
|
+
conn.execute(sa.text(query))
|
|
154
|
+
logger.debug("Flushed PostgreSQL permission tables")
|
|
155
|
+
|
|
156
|
+
def add_user_principal(self, user_id, principal):
|
|
157
|
+
query = """
|
|
158
|
+
INSERT INTO user_principals (user_id, principal)
|
|
159
|
+
SELECT :user_id, :principal
|
|
160
|
+
WHERE NOT EXISTS (
|
|
161
|
+
SELECT principal
|
|
162
|
+
FROM user_principals
|
|
163
|
+
WHERE user_id = :user_id
|
|
164
|
+
AND principal = :principal
|
|
165
|
+
);"""
|
|
166
|
+
with self.client.connect() as conn:
|
|
167
|
+
conn.execute(sa.text(query), dict(user_id=user_id, principal=principal))
|
|
168
|
+
|
|
169
|
+
def remove_user_principal(self, user_id, principal):
|
|
170
|
+
query = """
|
|
171
|
+
DELETE FROM user_principals
|
|
172
|
+
WHERE user_id = :user_id
|
|
173
|
+
AND principal = :principal;"""
|
|
174
|
+
with self.client.connect() as conn:
|
|
175
|
+
conn.execute(sa.text(query), dict(user_id=user_id, principal=principal))
|
|
176
|
+
|
|
177
|
+
def remove_principal(self, principal):
|
|
178
|
+
query = """
|
|
179
|
+
DELETE FROM user_principals
|
|
180
|
+
WHERE principal = :principal;"""
|
|
181
|
+
with self.client.connect() as conn:
|
|
182
|
+
conn.execute(sa.text(query), dict(principal=principal))
|
|
183
|
+
|
|
184
|
+
def get_user_principals(self, user_id):
|
|
185
|
+
query = """
|
|
186
|
+
SELECT principal
|
|
187
|
+
FROM user_principals
|
|
188
|
+
WHERE user_id = :user_id
|
|
189
|
+
OR user_id = 'system.Authenticated';"""
|
|
190
|
+
with self.client.connect(readonly=True) as conn:
|
|
191
|
+
result = conn.execute(sa.text(query), dict(user_id=user_id))
|
|
192
|
+
results = result.fetchall()
|
|
193
|
+
return set([r.principal for r in results])
|
|
194
|
+
|
|
195
|
+
def add_principal_to_ace(self, object_id, permission, principal):
|
|
196
|
+
query = """
|
|
197
|
+
INSERT INTO access_control_entries (object_id, permission, principal)
|
|
198
|
+
SELECT :object_id, :permission, :principal
|
|
199
|
+
WHERE NOT EXISTS (
|
|
200
|
+
SELECT principal
|
|
201
|
+
FROM access_control_entries
|
|
202
|
+
WHERE object_id = :object_id
|
|
203
|
+
AND permission = :permission
|
|
204
|
+
AND principal = :principal
|
|
205
|
+
);"""
|
|
206
|
+
with self.client.connect() as conn:
|
|
207
|
+
conn.execute(
|
|
208
|
+
sa.text(query),
|
|
209
|
+
dict(object_id=object_id, permission=permission, principal=principal),
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
def remove_principal_from_ace(self, object_id, permission, principal):
|
|
213
|
+
query = """
|
|
214
|
+
DELETE FROM access_control_entries
|
|
215
|
+
WHERE object_id = :object_id
|
|
216
|
+
AND permission = :permission
|
|
217
|
+
AND principal = :principal;"""
|
|
218
|
+
with self.client.connect() as conn:
|
|
219
|
+
conn.execute(
|
|
220
|
+
sa.text(query),
|
|
221
|
+
dict(object_id=object_id, permission=permission, principal=principal),
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
def get_object_permission_principals(self, object_id, permission):
|
|
225
|
+
query = """
|
|
226
|
+
SELECT principal
|
|
227
|
+
FROM access_control_entries
|
|
228
|
+
WHERE object_id = :object_id
|
|
229
|
+
AND permission = :permission;"""
|
|
230
|
+
with self.client.connect(readonly=True) as conn:
|
|
231
|
+
result = conn.execute(sa.text(query), dict(object_id=object_id, permission=permission))
|
|
232
|
+
results = result.fetchall()
|
|
233
|
+
return set([r.principal for r in results])
|
|
234
|
+
|
|
235
|
+
def get_authorized_principals(self, bound_permissions):
|
|
236
|
+
# XXX: this method is not used, except in test suites :(
|
|
237
|
+
if not bound_permissions:
|
|
238
|
+
return set()
|
|
239
|
+
|
|
240
|
+
placeholders = {}
|
|
241
|
+
perm_values = []
|
|
242
|
+
for i, (obj, perm) in enumerate(bound_permissions):
|
|
243
|
+
placeholders[f"obj_{i}"] = obj
|
|
244
|
+
placeholders[f"perm_{i}"] = perm
|
|
245
|
+
perm_values.append(f"(:obj_{i}, :perm_{i})")
|
|
246
|
+
|
|
247
|
+
query = f"""
|
|
248
|
+
WITH required_perms AS (
|
|
249
|
+
VALUES {",".join(perm_values)}
|
|
250
|
+
)
|
|
251
|
+
SELECT principal
|
|
252
|
+
FROM required_perms JOIN access_control_entries
|
|
253
|
+
ON (object_id = column1 AND permission = column2);
|
|
254
|
+
"""
|
|
255
|
+
with self.client.connect(readonly=True) as conn:
|
|
256
|
+
result = conn.execute(sa.text(query), placeholders)
|
|
257
|
+
results = result.fetchall()
|
|
258
|
+
return set([r.principal for r in results])
|
|
259
|
+
|
|
260
|
+
def get_accessible_objects(self, principals, bound_permissions=None, with_children=True):
|
|
261
|
+
placeholders = {}
|
|
262
|
+
|
|
263
|
+
if bound_permissions is None:
|
|
264
|
+
# Return all objects on which the specified principals have some
|
|
265
|
+
# permissions.
|
|
266
|
+
# (e.g. permissions endpoint which lists everything)
|
|
267
|
+
query = """
|
|
268
|
+
SELECT object_id, permission
|
|
269
|
+
FROM access_control_entries
|
|
270
|
+
WHERE principal IN :principals
|
|
271
|
+
"""
|
|
272
|
+
placeholders["principals"] = tuple(principals)
|
|
273
|
+
|
|
274
|
+
elif len(bound_permissions) == 0:
|
|
275
|
+
# If the list of object permissions to filter on is empty, then
|
|
276
|
+
# do not bother querying the backend. The result will be empty.
|
|
277
|
+
# (e.g. root object /buckets)
|
|
278
|
+
return {}
|
|
279
|
+
else:
|
|
280
|
+
principals_values = []
|
|
281
|
+
for i, principal in enumerate(principals):
|
|
282
|
+
placeholders[f"principal_{i}"] = principal
|
|
283
|
+
principals_values.append(f"(:principal_{i})")
|
|
284
|
+
|
|
285
|
+
perm_values = []
|
|
286
|
+
for i, (obj, perm) in enumerate(bound_permissions):
|
|
287
|
+
placeholders[f"obj_{i}"] = obj.replace("*", "%")
|
|
288
|
+
placeholders[f"perm_{i}"] = perm
|
|
289
|
+
perm_values.append("(:obj_{0}, :perm_{0})".format(i))
|
|
290
|
+
|
|
291
|
+
if with_children:
|
|
292
|
+
object_id_condition = "object_id LIKE pattern"
|
|
293
|
+
else:
|
|
294
|
+
object_id_condition = (
|
|
295
|
+
"object_id LIKE pattern AND object_id NOT LIKE pattern || '/%'"
|
|
296
|
+
)
|
|
297
|
+
query = f"""
|
|
298
|
+
WITH required_perms AS (
|
|
299
|
+
VALUES {",".join(perm_values)}
|
|
300
|
+
),
|
|
301
|
+
user_principals AS (
|
|
302
|
+
VALUES {",".join(principals_values)}
|
|
303
|
+
),
|
|
304
|
+
potential_objects AS (
|
|
305
|
+
SELECT object_id, permission, required_perms.column1 AS pattern
|
|
306
|
+
FROM access_control_entries
|
|
307
|
+
JOIN user_principals
|
|
308
|
+
ON (principal = user_principals.column1)
|
|
309
|
+
JOIN required_perms
|
|
310
|
+
ON (permission = required_perms.column2)
|
|
311
|
+
)
|
|
312
|
+
SELECT object_id, permission
|
|
313
|
+
FROM potential_objects
|
|
314
|
+
WHERE {object_id_condition};
|
|
315
|
+
"""
|
|
316
|
+
|
|
317
|
+
with self.client.connect(readonly=True) as conn:
|
|
318
|
+
result = conn.execute(sa.text(query), placeholders)
|
|
319
|
+
results = result.fetchall()
|
|
320
|
+
|
|
321
|
+
perms_by_id = {}
|
|
322
|
+
for r in results:
|
|
323
|
+
perms_by_id.setdefault(r.object_id, set()).add(r[1])
|
|
324
|
+
return perms_by_id
|
|
325
|
+
|
|
326
|
+
def check_permission(self, principals, bound_permissions):
|
|
327
|
+
if not bound_permissions:
|
|
328
|
+
return False
|
|
329
|
+
|
|
330
|
+
placeholders = {}
|
|
331
|
+
perms_values = []
|
|
332
|
+
for i, (obj, perm) in enumerate(bound_permissions):
|
|
333
|
+
placeholders[f"obj_{i}"] = obj
|
|
334
|
+
placeholders[f"perm_{i}"] = perm
|
|
335
|
+
perms_values.append("(:obj_{0}, :perm_{0})".format(i))
|
|
336
|
+
|
|
337
|
+
principals_values = []
|
|
338
|
+
for i, principal in enumerate(principals):
|
|
339
|
+
placeholders[f"principal_{i}"] = principal
|
|
340
|
+
principals_values.append(f"(:principal_{i})")
|
|
341
|
+
|
|
342
|
+
query = f"""
|
|
343
|
+
WITH required_perms AS (
|
|
344
|
+
VALUES {",".join(perms_values)}
|
|
345
|
+
),
|
|
346
|
+
allowed_principals AS (
|
|
347
|
+
SELECT principal
|
|
348
|
+
FROM required_perms JOIN access_control_entries
|
|
349
|
+
ON (object_id = column1 AND permission = column2)
|
|
350
|
+
),
|
|
351
|
+
required_principals AS (
|
|
352
|
+
VALUES {",".join(principals_values)}
|
|
353
|
+
)
|
|
354
|
+
SELECT COUNT(*) AS matched
|
|
355
|
+
FROM required_principals JOIN allowed_principals
|
|
356
|
+
ON (required_principals.column1 = principal);
|
|
357
|
+
"""
|
|
358
|
+
|
|
359
|
+
with self.client.connect(readonly=True) as conn:
|
|
360
|
+
result = conn.execute(sa.text(query), placeholders)
|
|
361
|
+
total = result.fetchone()
|
|
362
|
+
return total.matched > 0
|
|
363
|
+
|
|
364
|
+
def get_objects_permissions(self, objects_ids, permissions=None):
|
|
365
|
+
placeholders = {"object_ids": tuple(objects_ids)}
|
|
366
|
+
query = """
|
|
367
|
+
SELECT object_id, permission, principal
|
|
368
|
+
FROM access_control_entries
|
|
369
|
+
WHERE object_id IN :object_ids
|
|
370
|
+
{permissions_condition};
|
|
371
|
+
"""
|
|
372
|
+
safeholders = {"permissions_condition": ""}
|
|
373
|
+
if permissions is not None:
|
|
374
|
+
safeholders["permissions_condition"] = """
|
|
375
|
+
AND permission IN :permissions"""
|
|
376
|
+
placeholders["permissions"] = tuple(permissions)
|
|
377
|
+
|
|
378
|
+
query = query.format_map(safeholders)
|
|
379
|
+
|
|
380
|
+
with self.client.connect(readonly=True) as conn:
|
|
381
|
+
result = conn.execute(sa.text(query), placeholders)
|
|
382
|
+
rows = result.fetchall()
|
|
383
|
+
|
|
384
|
+
groupby_id = OrderedDict()
|
|
385
|
+
for object_id in objects_ids:
|
|
386
|
+
groupby_id[object_id] = {}
|
|
387
|
+
for row in rows:
|
|
388
|
+
object_id, permission, principal = (
|
|
389
|
+
row.object_id,
|
|
390
|
+
row.permission,
|
|
391
|
+
row.principal,
|
|
392
|
+
)
|
|
393
|
+
groupby_id[object_id].setdefault(permission, set()).add(principal)
|
|
394
|
+
return list(groupby_id.values())
|
|
395
|
+
|
|
396
|
+
def replace_object_permissions(self, object_id, permissions):
|
|
397
|
+
if not permissions:
|
|
398
|
+
return
|
|
399
|
+
|
|
400
|
+
placeholders = {"object_id": object_id}
|
|
401
|
+
|
|
402
|
+
new_aces = []
|
|
403
|
+
specified_perms = []
|
|
404
|
+
for i, (perm, principals) in enumerate(permissions.items()):
|
|
405
|
+
placeholders[f"perm_{i}"] = perm
|
|
406
|
+
specified_perms.append(f"(:perm_{i})")
|
|
407
|
+
for principal in set(principals):
|
|
408
|
+
j = len(new_aces)
|
|
409
|
+
placeholders[f"principal_{j}"] = principal
|
|
410
|
+
new_aces.append(f"(:perm_{i}, :principal_{j})")
|
|
411
|
+
|
|
412
|
+
if not new_aces:
|
|
413
|
+
query = f"""
|
|
414
|
+
WITH specified_perms AS (
|
|
415
|
+
VALUES {",".join(specified_perms)}
|
|
416
|
+
)
|
|
417
|
+
DELETE FROM access_control_entries
|
|
418
|
+
USING specified_perms
|
|
419
|
+
WHERE object_id = :object_id AND permission = column1
|
|
420
|
+
"""
|
|
421
|
+
|
|
422
|
+
else:
|
|
423
|
+
query = f"""
|
|
424
|
+
WITH specified_perms AS (
|
|
425
|
+
VALUES {",".join(specified_perms)}
|
|
426
|
+
),
|
|
427
|
+
delete_specified AS (
|
|
428
|
+
DELETE FROM access_control_entries
|
|
429
|
+
USING specified_perms
|
|
430
|
+
WHERE object_id = :object_id AND permission = column1
|
|
431
|
+
RETURNING object_id
|
|
432
|
+
),
|
|
433
|
+
affected_object AS (
|
|
434
|
+
SELECT object_id FROM delete_specified
|
|
435
|
+
UNION SELECT :object_id
|
|
436
|
+
),
|
|
437
|
+
new_aces AS (
|
|
438
|
+
VALUES {",".join(new_aces)}
|
|
439
|
+
)
|
|
440
|
+
INSERT INTO access_control_entries(object_id, permission, principal)
|
|
441
|
+
SELECT DISTINCT d.object_id, n.column1, n.column2
|
|
442
|
+
FROM new_aces AS n, affected_object AS d;
|
|
443
|
+
"""
|
|
444
|
+
|
|
445
|
+
with self.client.connect() as conn:
|
|
446
|
+
conn.execute(sa.text(query), placeholders)
|
|
447
|
+
|
|
448
|
+
def delete_object_permissions(self, *object_id_list):
|
|
449
|
+
if len(object_id_list) == 0:
|
|
450
|
+
return
|
|
451
|
+
|
|
452
|
+
object_ids_values = []
|
|
453
|
+
placeholders = {}
|
|
454
|
+
for i, obj_id in enumerate(object_id_list):
|
|
455
|
+
object_ids_values.append(f"(:obj_id_{i})")
|
|
456
|
+
placeholders[f"obj_id_{i}"] = obj_id.replace("*", "%")
|
|
457
|
+
|
|
458
|
+
query = """
|
|
459
|
+
WITH object_ids AS (
|
|
460
|
+
VALUES {object_ids_values}
|
|
461
|
+
)
|
|
462
|
+
DELETE FROM access_control_entries
|
|
463
|
+
USING object_ids
|
|
464
|
+
WHERE object_id LIKE column1;"""
|
|
465
|
+
|
|
466
|
+
safeholders = {"object_ids_values": ",".join(object_ids_values)}
|
|
467
|
+
|
|
468
|
+
if len(object_id_list) == 1:
|
|
469
|
+
# Optimized version for just one object ID. This can be
|
|
470
|
+
# done using an index scan on
|
|
471
|
+
# idx_access_control_entries_object_id. The more
|
|
472
|
+
# complicated form above confuses Postgres, which chooses
|
|
473
|
+
# to do a sequential table scan rather than an index scan
|
|
474
|
+
# for each entry in object_ids, even when there's only one
|
|
475
|
+
# entry in object_ids.
|
|
476
|
+
query = """
|
|
477
|
+
DELETE FROM access_control_entries
|
|
478
|
+
WHERE object_id LIKE :obj_id_0;
|
|
479
|
+
"""
|
|
480
|
+
|
|
481
|
+
query = query.format_map(safeholders)
|
|
482
|
+
|
|
483
|
+
with self.client.connect() as conn:
|
|
484
|
+
conn.execute(sa.text(query), placeholders)
|
|
485
|
+
|
|
486
|
+
|
|
487
|
+
def load_from_config(config):
|
|
488
|
+
client = create_from_config(config, prefix="permission_")
|
|
489
|
+
return Permission(client=client)
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
-- Same table as exists in the storage backend, but used to track
|
|
2
|
+
-- migration status for both. Only one schema actually has to create
|
|
3
|
+
-- it.
|
|
4
|
+
CREATE TABLE IF NOT EXISTS metadata (
|
|
5
|
+
name VARCHAR(128) NOT NULL,
|
|
6
|
+
value VARCHAR(512) NOT NULL
|
|
7
|
+
);
|
|
8
|
+
|
|
9
|
+
-- IDs are not really human language text, so set them to be COLLATE
|
|
10
|
+
-- "C" rather than the DB default collation. This also speeds up
|
|
11
|
+
-- prefix-match queries (object_id LIKE '/bucket/abc/%').
|
|
12
|
+
ALTER TABLE user_principals
|
|
13
|
+
ALTER COLUMN user_id TYPE TEXT COLLATE "C";
|
|
14
|
+
|
|
15
|
+
ALTER TABLE access_control_entries
|
|
16
|
+
ALTER COLUMN object_id TYPE TEXT COLLATE "C";
|
|
17
|
+
|
|
18
|
+
INSERT INTO metadata (name, value) VALUES ('permission_schema_version', '2');
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
--
|
|
2
|
+
-- Automated script, we do not need NOTICE and WARNING
|
|
3
|
+
--
|
|
4
|
+
SET client_min_messages TO ERROR;
|
|
5
|
+
|
|
6
|
+
CREATE TABLE IF NOT EXISTS user_principals (
|
|
7
|
+
-- IDs are not really human language text, so set them to be
|
|
8
|
+
-- COLLATE "C" rather than the DB default collation. This also
|
|
9
|
+
-- speeds up prefix-match queries (object_id LIKE
|
|
10
|
+
-- '/bucket/abc/%').
|
|
11
|
+
user_id TEXT COLLATE "C",
|
|
12
|
+
principal TEXT,
|
|
13
|
+
|
|
14
|
+
PRIMARY KEY (user_id, principal)
|
|
15
|
+
);
|
|
16
|
+
|
|
17
|
+
CREATE TABLE IF NOT EXISTS access_control_entries (
|
|
18
|
+
-- Use COLLATE "C" as above because object IDs are really URLs,
|
|
19
|
+
-- not human text.
|
|
20
|
+
object_id TEXT COLLATE "C",
|
|
21
|
+
permission TEXT,
|
|
22
|
+
principal TEXT,
|
|
23
|
+
|
|
24
|
+
PRIMARY KEY (object_id, permission, principal)
|
|
25
|
+
);
|
|
26
|
+
CREATE INDEX IF NOT EXISTS idx_access_control_entries_object_id
|
|
27
|
+
ON access_control_entries(object_id);
|
|
28
|
+
CREATE INDEX IF NOT EXISTS idx_access_control_entries_permission
|
|
29
|
+
ON access_control_entries(permission);
|
|
30
|
+
CREATE INDEX IF NOT EXISTS idx_access_control_entries_principal
|
|
31
|
+
ON access_control_entries(principal);
|
|
32
|
+
|
|
33
|
+
-- Same table as exists in the storage backend, but used to track
|
|
34
|
+
-- migration status for both. Only one schema actually has to create
|
|
35
|
+
-- it.
|
|
36
|
+
CREATE TABLE IF NOT EXISTS metadata (
|
|
37
|
+
name VARCHAR(128) NOT NULL,
|
|
38
|
+
value VARCHAR(512) NOT NULL
|
|
39
|
+
);
|
|
40
|
+
|
|
41
|
+
INSERT INTO metadata VALUES ('permission_schema_version', '2');
|