kinto 23.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (142) hide show
  1. kinto/__init__.py +92 -0
  2. kinto/__main__.py +249 -0
  3. kinto/authorization.py +134 -0
  4. kinto/config/__init__.py +94 -0
  5. kinto/config/kinto.tpl +270 -0
  6. kinto/contribute.json +27 -0
  7. kinto/core/__init__.py +246 -0
  8. kinto/core/authentication.py +48 -0
  9. kinto/core/authorization.py +311 -0
  10. kinto/core/cache/__init__.py +131 -0
  11. kinto/core/cache/memcached.py +112 -0
  12. kinto/core/cache/memory.py +104 -0
  13. kinto/core/cache/postgresql/__init__.py +178 -0
  14. kinto/core/cache/postgresql/schema.sql +23 -0
  15. kinto/core/cache/testing.py +208 -0
  16. kinto/core/cornice/__init__.py +93 -0
  17. kinto/core/cornice/cors.py +144 -0
  18. kinto/core/cornice/errors.py +40 -0
  19. kinto/core/cornice/pyramidhook.py +373 -0
  20. kinto/core/cornice/renderer.py +89 -0
  21. kinto/core/cornice/resource.py +205 -0
  22. kinto/core/cornice/service.py +641 -0
  23. kinto/core/cornice/util.py +138 -0
  24. kinto/core/cornice/validators/__init__.py +94 -0
  25. kinto/core/cornice/validators/_colander.py +142 -0
  26. kinto/core/cornice/validators/_marshmallow.py +182 -0
  27. kinto/core/cornice_swagger/__init__.py +92 -0
  28. kinto/core/cornice_swagger/converters/__init__.py +21 -0
  29. kinto/core/cornice_swagger/converters/exceptions.py +6 -0
  30. kinto/core/cornice_swagger/converters/parameters.py +90 -0
  31. kinto/core/cornice_swagger/converters/schema.py +249 -0
  32. kinto/core/cornice_swagger/swagger.py +725 -0
  33. kinto/core/cornice_swagger/templates/index.html +73 -0
  34. kinto/core/cornice_swagger/templates/index_script_template.html +21 -0
  35. kinto/core/cornice_swagger/util.py +42 -0
  36. kinto/core/cornice_swagger/views.py +78 -0
  37. kinto/core/decorators.py +74 -0
  38. kinto/core/errors.py +216 -0
  39. kinto/core/events.py +301 -0
  40. kinto/core/initialization.py +738 -0
  41. kinto/core/listeners/__init__.py +9 -0
  42. kinto/core/metrics.py +94 -0
  43. kinto/core/openapi.py +115 -0
  44. kinto/core/permission/__init__.py +202 -0
  45. kinto/core/permission/memory.py +167 -0
  46. kinto/core/permission/postgresql/__init__.py +489 -0
  47. kinto/core/permission/postgresql/migrations/migration_001_002.sql +18 -0
  48. kinto/core/permission/postgresql/schema.sql +41 -0
  49. kinto/core/permission/testing.py +487 -0
  50. kinto/core/resource/__init__.py +1311 -0
  51. kinto/core/resource/model.py +412 -0
  52. kinto/core/resource/schema.py +502 -0
  53. kinto/core/resource/viewset.py +230 -0
  54. kinto/core/schema.py +119 -0
  55. kinto/core/scripts.py +50 -0
  56. kinto/core/statsd.py +1 -0
  57. kinto/core/storage/__init__.py +436 -0
  58. kinto/core/storage/exceptions.py +53 -0
  59. kinto/core/storage/generators.py +58 -0
  60. kinto/core/storage/memory.py +651 -0
  61. kinto/core/storage/postgresql/__init__.py +1131 -0
  62. kinto/core/storage/postgresql/client.py +120 -0
  63. kinto/core/storage/postgresql/migrations/migration_001_002.sql +10 -0
  64. kinto/core/storage/postgresql/migrations/migration_002_003.sql +33 -0
  65. kinto/core/storage/postgresql/migrations/migration_003_004.sql +18 -0
  66. kinto/core/storage/postgresql/migrations/migration_004_005.sql +20 -0
  67. kinto/core/storage/postgresql/migrations/migration_005_006.sql +11 -0
  68. kinto/core/storage/postgresql/migrations/migration_006_007.sql +74 -0
  69. kinto/core/storage/postgresql/migrations/migration_007_008.sql +66 -0
  70. kinto/core/storage/postgresql/migrations/migration_008_009.sql +41 -0
  71. kinto/core/storage/postgresql/migrations/migration_009_010.sql +98 -0
  72. kinto/core/storage/postgresql/migrations/migration_010_011.sql +14 -0
  73. kinto/core/storage/postgresql/migrations/migration_011_012.sql +9 -0
  74. kinto/core/storage/postgresql/migrations/migration_012_013.sql +71 -0
  75. kinto/core/storage/postgresql/migrations/migration_013_014.sql +14 -0
  76. kinto/core/storage/postgresql/migrations/migration_014_015.sql +95 -0
  77. kinto/core/storage/postgresql/migrations/migration_015_016.sql +4 -0
  78. kinto/core/storage/postgresql/migrations/migration_016_017.sql +81 -0
  79. kinto/core/storage/postgresql/migrations/migration_017_018.sql +25 -0
  80. kinto/core/storage/postgresql/migrations/migration_018_019.sql +8 -0
  81. kinto/core/storage/postgresql/migrations/migration_019_020.sql +7 -0
  82. kinto/core/storage/postgresql/migrations/migration_020_021.sql +68 -0
  83. kinto/core/storage/postgresql/migrations/migration_021_022.sql +62 -0
  84. kinto/core/storage/postgresql/migrations/migration_022_023.sql +5 -0
  85. kinto/core/storage/postgresql/migrations/migration_023_024.sql +6 -0
  86. kinto/core/storage/postgresql/migrations/migration_024_025.sql +6 -0
  87. kinto/core/storage/postgresql/migrator.py +98 -0
  88. kinto/core/storage/postgresql/pool.py +55 -0
  89. kinto/core/storage/postgresql/schema.sql +143 -0
  90. kinto/core/storage/testing.py +1857 -0
  91. kinto/core/storage/utils.py +37 -0
  92. kinto/core/testing.py +182 -0
  93. kinto/core/utils.py +553 -0
  94. kinto/core/views/__init__.py +0 -0
  95. kinto/core/views/batch.py +163 -0
  96. kinto/core/views/errors.py +145 -0
  97. kinto/core/views/heartbeat.py +106 -0
  98. kinto/core/views/hello.py +69 -0
  99. kinto/core/views/openapi.py +35 -0
  100. kinto/core/views/version.py +50 -0
  101. kinto/events.py +3 -0
  102. kinto/plugins/__init__.py +0 -0
  103. kinto/plugins/accounts/__init__.py +94 -0
  104. kinto/plugins/accounts/authentication.py +63 -0
  105. kinto/plugins/accounts/scripts.py +61 -0
  106. kinto/plugins/accounts/utils.py +13 -0
  107. kinto/plugins/accounts/views.py +136 -0
  108. kinto/plugins/admin/README.md +3 -0
  109. kinto/plugins/admin/VERSION +1 -0
  110. kinto/plugins/admin/__init__.py +40 -0
  111. kinto/plugins/admin/build/VERSION +1 -0
  112. kinto/plugins/admin/build/assets/index-CYFwtKtL.css +6 -0
  113. kinto/plugins/admin/build/assets/index-DJ0m93zA.js +149 -0
  114. kinto/plugins/admin/build/assets/logo-VBRiKSPX.png +0 -0
  115. kinto/plugins/admin/build/index.html +18 -0
  116. kinto/plugins/admin/public/help.html +25 -0
  117. kinto/plugins/admin/views.py +42 -0
  118. kinto/plugins/default_bucket/__init__.py +191 -0
  119. kinto/plugins/flush.py +28 -0
  120. kinto/plugins/history/__init__.py +65 -0
  121. kinto/plugins/history/listener.py +181 -0
  122. kinto/plugins/history/views.py +66 -0
  123. kinto/plugins/openid/__init__.py +131 -0
  124. kinto/plugins/openid/utils.py +14 -0
  125. kinto/plugins/openid/views.py +193 -0
  126. kinto/plugins/prometheus.py +300 -0
  127. kinto/plugins/statsd.py +85 -0
  128. kinto/schema_validation.py +135 -0
  129. kinto/views/__init__.py +34 -0
  130. kinto/views/admin.py +195 -0
  131. kinto/views/buckets.py +45 -0
  132. kinto/views/collections.py +58 -0
  133. kinto/views/contribute.py +39 -0
  134. kinto/views/groups.py +90 -0
  135. kinto/views/permissions.py +235 -0
  136. kinto/views/records.py +133 -0
  137. kinto-23.2.1.dist-info/METADATA +232 -0
  138. kinto-23.2.1.dist-info/RECORD +142 -0
  139. kinto-23.2.1.dist-info/WHEEL +5 -0
  140. kinto-23.2.1.dist-info/entry_points.txt +5 -0
  141. kinto-23.2.1.dist-info/licenses/LICENSE +13 -0
  142. kinto-23.2.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,85 @@
1
+ import warnings
2
+ from datetime import timedelta
3
+ from urllib.parse import urlparse
4
+
5
+ from pyramid.exceptions import ConfigurationError
6
+ from zope.interface import implementer
7
+
8
+ from kinto.core import metrics
9
+
10
+
11
+ try:
12
+ import statsd as statsd_module
13
+ except ImportError: # pragma: no cover
14
+ statsd_module = None
15
+
16
+
17
+ def sanitize(value):
18
+ """
19
+ Telegraf does not support ':' in values.
20
+ See https://github.com/influxdata/telegraf/issues/4495
21
+ """
22
+ return value.replace(":", "") if isinstance(value, str) else value
23
+
24
+
25
+ @implementer(metrics.IMetricsService)
26
+ class StatsDService:
27
+ def __init__(self, host, port, prefix):
28
+ self._client = statsd_module.StatsClient(host, port, prefix=prefix)
29
+
30
+ def timer(self, key, value=None, labels=[]):
31
+ if labels:
32
+ # [("method", "get")] -> "method.get"
33
+ key = f"{key}." + ".".join(f"{label[0]}.{sanitize(label[1])}" for label in labels)
34
+ if value:
35
+ value = timedelta(seconds=value)
36
+ return self._client.timing(key, value)
37
+ return self._client.timer(key)
38
+
39
+ def observe(self, key, value, labels=[]):
40
+ return self._client.gauge(key, sanitize(value))
41
+
42
+ def count(self, key, count=1, unique=None):
43
+ if unique is None:
44
+ return self._client.incr(key, count=count)
45
+ if isinstance(unique, list):
46
+ # [("method", "get")] -> "method.get"
47
+ # [("endpoint", "/"), ("method", "get")] -> "endpoint./.method.get"
48
+ unique = ".".join(f"{label[0]}.{sanitize(label[1])}" for label in unique)
49
+ else:
50
+ warnings.warn(
51
+ "`unique` parameter should be of type ``list[tuple[str, str]]``",
52
+ DeprecationWarning,
53
+ )
54
+ return self._client.set(key, unique)
55
+
56
+
57
+ def load_from_config(config):
58
+ # If this is called, it means that a ``statsd_url`` was specified in settings.
59
+ # (see ``kinto.core.initialization``)
60
+ # Raise a proper error if the ``statsd`` module is not installed.
61
+ if statsd_module is None:
62
+ error_msg = "Please install Kinto with monitoring dependencies (e.g. statsd package)"
63
+ raise ConfigurationError(error_msg)
64
+
65
+ settings = config.get_settings()
66
+ uri = settings["statsd_url"]
67
+ uri = urlparse(uri)
68
+
69
+ if settings["project_name"] != "":
70
+ prefix = settings["project_name"]
71
+ else:
72
+ prefix = settings["statsd_prefix"]
73
+
74
+ return StatsDService(uri.hostname, uri.port, prefix)
75
+
76
+
77
+ def includeme(config):
78
+ settings = config.get_settings()
79
+
80
+ # TODO: this backend abstraction may not be required anymore.
81
+ statsd_mod = settings["statsd_backend"]
82
+ statsd_mod = config.maybe_dotted(statsd_mod)
83
+ client = statsd_mod.load_from_config(config)
84
+
85
+ config.registry.registerUtility(client, metrics.IMetricsService)
@@ -0,0 +1,135 @@
1
+ import colander
2
+ from jsonschema import Draft7Validator as DraftValidator
3
+ from jsonschema import RefResolutionError, SchemaError, ValidationError
4
+ from jsonschema.validators import validator_for
5
+ from pyramid.settings import asbool
6
+
7
+ from kinto.core import utils
8
+ from kinto.core.errors import raise_invalid
9
+ from kinto.views import object_exists_or_404
10
+
11
+
12
+ class JSONSchemaMapping(colander.SchemaNode):
13
+ def schema_type(self, **kw):
14
+ return colander.Mapping(unknown="preserve")
15
+
16
+ def deserialize(self, cstruct=colander.null):
17
+ # Start by deserializing a simple mapping.
18
+ validated = super().deserialize(cstruct)
19
+ try:
20
+ check_schema(validated)
21
+ except ValidationError as e:
22
+ self.raise_invalid(e.message)
23
+ return validated
24
+
25
+
26
+ def check_schema(data):
27
+ try:
28
+ DraftValidator.check_schema(data)
29
+ except SchemaError as e:
30
+ message = e.path.pop() + e.message
31
+ raise ValidationError(message)
32
+
33
+
34
+ # Module level global that stores a version of every possible schema (as a <class 'dict'>)
35
+ # turned into a jsonschema instance (as <class 'jsonschema.validators.Validator'>).
36
+ _schema_cache = {}
37
+
38
+
39
+ def validate(data, schema):
40
+ """Raise a ValidationError or a RefResolutionError if the data doesn't validate
41
+ with the given schema.
42
+
43
+ Note that this function is just a "wrapper" on `jsonschema.validate()` but with
44
+ some memoization based on the schema for better repeat performance.
45
+ """
46
+ # Because the schema is a dict, it can't be used as a hash key so it needs to be
47
+ # "transformed" to something that is hashable. The quickest solution is to convert
48
+ # it to a string.
49
+ # Note that the order of the dict will determine the string it becomes. The solution
50
+ # to that would a canonical serializer like `json.dumps(..., sort_keys=True)` but it's
51
+ # overkill since the assumption is that the schema is very unlikely to be exactly
52
+ # the same but different order.
53
+ cache_key = str(schema)
54
+ if cache_key not in _schema_cache:
55
+ # This is essentially what the `jsonschema.validate()` shortcut function does.
56
+ cls = validator_for(schema)
57
+ cls.check_schema(schema)
58
+ _schema_cache[cache_key] = cls(schema)
59
+ return _schema_cache[cache_key].validate(data)
60
+
61
+
62
+ def validate_schema(data, schema, id_field, ignore_fields=None):
63
+ if ignore_fields is None:
64
+ ignore_fields = []
65
+ # Only ignore the `id` field if the schema does not explicitly mention it.
66
+ if id_field not in schema.get("properties", {}):
67
+ ignore_fields += (id_field,)
68
+
69
+ required_fields = [f for f in schema.get("required", []) if f not in ignore_fields]
70
+ # jsonschema doesn't accept 'required': [] yet.
71
+ # See https://github.com/Julian/jsonschema/issues/337.
72
+ # In the meantime, strip out 'required' if no other fields are required.
73
+ if required_fields:
74
+ schema = {**schema, "required": required_fields}
75
+ else:
76
+ schema = {f: v for f, v in schema.items() if f != "required"}
77
+
78
+ data = {f: v for f, v in data.items() if f not in ignore_fields}
79
+
80
+ try:
81
+ validate(data, schema)
82
+ except ValidationError as e:
83
+ if e.path:
84
+ field = e.path[-1]
85
+ elif e.validator_value:
86
+ field = e.validator_value[-1]
87
+ else:
88
+ field = e.schema_path[-1]
89
+ e.field = field
90
+ raise e
91
+ # Raise an error here if a reference in the schema doesn't resolve.
92
+ # jsonschema doesn't provide schema validation checking upon creation yet,
93
+ # it must be validated against data.
94
+ # See https://github.com/Julian/jsonschema/issues/399
95
+ # For future support https://github.com/Julian/jsonschema/issues/346.
96
+ except RefResolutionError as e:
97
+ raise e
98
+
99
+
100
+ def validate_from_bucket_schema_or_400(data, resource_name, request, id_field, ignore_fields):
101
+ """Lookup in the parent objects if a schema was defined for this resource.
102
+
103
+ If the schema validation feature is enabled, if a schema is/are defined, and if the
104
+ data does not validate it/them, then it raises a 400 exception.
105
+ """
106
+ settings = request.registry.settings
107
+ schema_validation = "experimental_collection_schema_validation"
108
+ # If disabled from settings, do nothing.
109
+ if not asbool(settings.get(schema_validation)):
110
+ return
111
+
112
+ bucket_id = request.matchdict["bucket_id"]
113
+ bucket_uri = utils.instance_uri(request, "bucket", id=bucket_id)
114
+ buckets = request.bound_data.setdefault("buckets", {})
115
+ if bucket_uri not in buckets:
116
+ # Unknown yet, fetch from storage.
117
+ bucket = object_exists_or_404(
118
+ request, resource_name="bucket", parent_id="", object_id=bucket_id
119
+ )
120
+ buckets[bucket_uri] = bucket
121
+
122
+ # Let's see if the bucket defines a schema for this resource.
123
+ metadata_field = f"{resource_name}:schema"
124
+ bucket = buckets[bucket_uri]
125
+ if metadata_field not in bucket:
126
+ return
127
+
128
+ # Validate or fail with 400.
129
+ schema = bucket[metadata_field]
130
+ try:
131
+ validate_schema(data, schema, ignore_fields=ignore_fields, id_field=id_field)
132
+ except ValidationError as e:
133
+ raise_invalid(request, name=e.field, description=e.message)
134
+ except RefResolutionError as e:
135
+ raise_invalid(request, name="schema", description=str(e))
@@ -0,0 +1,34 @@
1
+ import random
2
+ import string
3
+
4
+ from pyramid.httpexceptions import HTTPNotFound
5
+
6
+ from kinto.core.errors import ERRORS, http_error
7
+ from kinto.core.storage import exceptions, generators
8
+
9
+
10
+ class NameGenerator(generators.Generator):
11
+ def __call__(self):
12
+ alpha_num = string.ascii_letters + string.digits
13
+ alphabet = alpha_num + "-_"
14
+ letters = [random.SystemRandom().choice(alpha_num)]
15
+ letters += [random.SystemRandom().choice(alphabet) for x in range(7)]
16
+
17
+ return "".join(letters)
18
+
19
+
20
+ class RelaxedUUID(generators.UUID4):
21
+ """A generator that generates UUIDs but accepts any string."""
22
+
23
+ regexp = generators.Generator.regexp
24
+
25
+
26
+ def object_exists_or_404(request, resource_name, object_id, parent_id=""):
27
+ storage = request.registry.storage
28
+ try:
29
+ return storage.get(resource_name=resource_name, parent_id=parent_id, object_id=object_id)
30
+ except exceptions.ObjectNotFoundError:
31
+ # XXX: We gave up putting details about parent id here (See #53).
32
+ details = {"id": object_id, "resource_name": resource_name}
33
+ response = http_error(HTTPNotFound(), errno=ERRORS.MISSING_RESOURCE, details=details)
34
+ raise response
kinto/views/admin.py ADDED
@@ -0,0 +1,195 @@
1
+ """
2
+ Special views for administration.
3
+ """
4
+
5
+ import collections
6
+ import itertools
7
+
8
+ import colander
9
+
10
+ from kinto.authorization import RouteFactory
11
+ from kinto.core import resource
12
+ from kinto.core import utils as core_utils
13
+ from kinto.core.events import ACTIONS, notify_resource_event
14
+ from kinto.core.resource import viewset
15
+ from kinto.core.storage import Filter
16
+
17
+
18
+ def slice_into_batches(iterable, batch_size):
19
+ # Taken from https://code.activestate.com/recipes/303279-getting-items-in-batches/
20
+ i = iter(iterable)
21
+ while True:
22
+ batchiter = itertools.islice(i, batch_size)
23
+ try:
24
+ yield itertools.chain([next(batchiter)], batchiter)
25
+ except StopIteration:
26
+ return
27
+
28
+
29
+ class Deleted(resource.ResourceSchema):
30
+ principal = colander.SchemaNode(colander.String())
31
+
32
+ class Options:
33
+ preserve_unknown = False
34
+
35
+
36
+ class UserDataFactory(RouteFactory):
37
+ method_permissions = {"delete": "delete"}
38
+
39
+
40
+ class UserDataViewSet(viewset.ViewSet):
41
+ factory = UserDataFactory
42
+
43
+
44
+ def get_parent_uri(object_uri):
45
+ """Get the parent URI for an object_uri.
46
+
47
+ In order to be generic over any kind of resource hierarchy, we do
48
+ this by string manipulation on the URI instead of trying to parse
49
+ the URI, identify the parent resource, and generate a new URI.
50
+
51
+ """
52
+ path = object_uri.rsplit("/", 2)
53
+ # len(path) == 1: no '/', probably a broken URL?
54
+ # len(path) == 2: one '/', doesn't conform to our URL scheme
55
+ if len(path) < 3:
56
+ return ""
57
+
58
+ return path[0]
59
+
60
+
61
+ def condense_under_parents(request, object_uris):
62
+ """Simplify object_uris by removing "duplicates".
63
+
64
+ Deleting a resource usually cascades to all its descendant
65
+ resources. Use this out-of-band knowledge to remove any objects
66
+ which will already be deleted by virtue of deleting their
67
+ ancestors.
68
+
69
+ """
70
+ # Sort object_uris so we see ancestors before descendants.
71
+ object_uris = list(object_uris)
72
+ object_uris.sort()
73
+
74
+ ancestor_object_uris = set()
75
+ for object_uri in object_uris:
76
+ include = True
77
+ parent_uri = get_parent_uri(object_uri)
78
+ while parent_uri:
79
+ if parent_uri in ancestor_object_uris:
80
+ # It's being deleted already.
81
+ include = False
82
+ break
83
+ parent_uri = get_parent_uri(parent_uri)
84
+
85
+ if include:
86
+ ancestor_object_uris.add(object_uri)
87
+
88
+ return list(ancestor_object_uris)
89
+
90
+
91
+ @resource.register(
92
+ name="user-data",
93
+ description="Delete the data owned by a user",
94
+ plural_path="/__user_data__",
95
+ object_path="/__user_data__/{{principal}}",
96
+ viewset=UserDataViewSet(),
97
+ plural_methods=tuple(),
98
+ object_methods=("DELETE",),
99
+ )
100
+ class UserData(resource.Resource):
101
+ schema = Deleted
102
+
103
+ def delete(self):
104
+ principal = self.request.matchdict["principal"]
105
+ storage = self.request.registry.storage
106
+ permission = self.request.registry.permission
107
+ object_uris_and_permissions = permission.get_accessible_objects([principal])
108
+ object_uris = list(object_uris_and_permissions.keys())
109
+ write_perm_principals = permission.get_objects_permissions(object_uris, ["write"])
110
+ to_delete = set()
111
+ for object_uri, principals in zip(object_uris, write_perm_principals):
112
+ principals = principals["write"]
113
+ # "Ownership" isn't a real concept in Kinto, so instead we
114
+ # define ownership as meaning "this user is the only one
115
+ # who can write to this object".
116
+ if principals == set([principal]):
117
+ to_delete.add(object_uri)
118
+
119
+ # Any accessible objects that won't be deleted, need to have
120
+ # the user's permission removed.
121
+ for object_uri, permissions in object_uris_and_permissions.items():
122
+ if object_uri in to_delete:
123
+ continue
124
+
125
+ for perm in permissions:
126
+ permission.remove_principal_from_ace(object_uri, perm, principal)
127
+
128
+ to_delete = condense_under_parents(self.request, to_delete)
129
+
130
+ # Group by (parent_uri, resource of child) to make fewer
131
+ # requests to storage backend.
132
+ # Store the parsed object IDs, since those are what we
133
+ # actually give to the storage backend.
134
+ object_ids_by_parent_uri = collections.defaultdict(list)
135
+ # Store also the object URIs, which we give to the permission backend.
136
+ objects_by_parent_uri = collections.defaultdict(list)
137
+ # We have to get the matchdict of the child here anyhow, so
138
+ # keep that to generate events later.
139
+ matchdicts_by_parent_uri = {}
140
+ for object_uri in to_delete:
141
+ parent_uri = get_parent_uri(object_uri)
142
+ resource_name, matchdict = core_utils.view_lookup(self.request, object_uri)
143
+ objects_by_parent_uri[(parent_uri, resource_name)].append(object_uri)
144
+ object_ids_by_parent_uri[(parent_uri, resource_name)].append(matchdict["id"])
145
+ # This overwrites previous matchdicts for the parent, but
146
+ # we'll only use the fields that are relevant to the
147
+ # parent, which will be the same for each child.
148
+ matchdicts_by_parent_uri[parent_uri] = matchdict
149
+
150
+ for (parent_uri, resource_name), object_ids in object_ids_by_parent_uri.items():
151
+ # Generate the parent matchdict from an arbitrary child's matchdict.
152
+ matchdict = {**matchdicts_by_parent_uri[parent_uri]}
153
+ matchdict.pop("id", None)
154
+
155
+ # Deletes are paginated too, so take the page size from settings.
156
+ batch_size = self.request.registry.settings["storage_max_fetch_size"]
157
+ for batch in slice_into_batches(object_ids, batch_size):
158
+ batch = list(batch)
159
+ filters = [Filter("id", batch, core_utils.COMPARISON.IN)]
160
+ timestamp = storage.resource_timestamp(resource_name, parent_uri)
161
+ records = storage.list_all(
162
+ resource_name=resource_name, parent_id=parent_uri, filters=filters
163
+ )
164
+ tombstones = storage.delete_all(
165
+ resource_name=resource_name, parent_id=parent_uri, filters=filters
166
+ )
167
+ notify_resource_event(
168
+ self.request,
169
+ parent_uri,
170
+ timestamp,
171
+ tombstones,
172
+ ACTIONS.DELETE,
173
+ old=records,
174
+ resource_name=resource_name,
175
+ resource_data=matchdict,
176
+ )
177
+ # FIXME: need to purge the above tombstones, but no
178
+ # way to purge just some tombstones for just this
179
+ # principal
180
+
181
+ # Clear permissions from the deleted objects, for
182
+ # example those of other users.
183
+ permission.delete_object_permissions(
184
+ *objects_by_parent_uri[(parent_uri, resource_name)]
185
+ )
186
+
187
+ # Remove this principal from existing users.
188
+ permission.remove_principal(principal)
189
+
190
+ # Remove this principal from all groups that contain it.
191
+ associated_principals = permission.get_user_principals(principal)
192
+ for associated_principal in associated_principals:
193
+ permission.remove_user_principal(principal, associated_principal)
194
+
195
+ return {"data": {"principal": principal}}
kinto/views/buckets.py ADDED
@@ -0,0 +1,45 @@
1
+ import colander
2
+ from pyramid.events import subscriber
3
+
4
+ from kinto.core import resource
5
+ from kinto.core.events import ACTIONS, ResourceChanged
6
+ from kinto.core.utils import instance_uri
7
+ from kinto.schema_validation import JSONSchemaMapping
8
+
9
+
10
+ class BucketSchema(resource.ResourceSchema):
11
+ def __init__(self, *args, **kwargs):
12
+ super().__init__(*args, **kwargs)
13
+ self["collection:schema"] = JSONSchemaMapping(missing=colander.drop)
14
+ self["group:schema"] = JSONSchemaMapping(missing=colander.drop)
15
+ self["record:schema"] = JSONSchemaMapping(missing=colander.drop)
16
+
17
+
18
+ @resource.register(name="bucket", plural_path="/buckets", object_path="/buckets/{{id}}")
19
+ class Bucket(resource.Resource):
20
+ schema = BucketSchema
21
+ permissions = ("read", "write", "collection:create", "group:create")
22
+
23
+ def get_parent_id(self, request):
24
+ # Buckets are not isolated by user, unlike Kinto-Core resources.
25
+ return ""
26
+
27
+
28
+ @subscriber(ResourceChanged, for_resources=("bucket",), for_actions=(ACTIONS.DELETE,))
29
+ def on_buckets_deleted(event):
30
+ """Some buckets were deleted, delete sub-resources."""
31
+ storage = event.request.registry.storage
32
+ permission = event.request.registry.permission
33
+
34
+ for change in event.impacted_objects:
35
+ bucket = change["old"]
36
+ bucket_uri = instance_uri(event.request, "bucket", id=bucket["id"])
37
+
38
+ # Delete everything with current parent id (eg. collections, groups)
39
+ # and descending children objects (eg. records).
40
+ for pattern in (bucket_uri, bucket_uri + "/*"):
41
+ storage.delete_all(parent_id=pattern, resource_name=None, with_deleted=False)
42
+ # Remove remaining tombstones too.
43
+ storage.purge_deleted(parent_id=pattern, resource_name=None)
44
+ # Remove related permissions
45
+ permission.delete_object_permissions(pattern)
@@ -0,0 +1,58 @@
1
+ import colander
2
+ from pyramid.events import subscriber
3
+
4
+ from kinto.core import resource, utils
5
+ from kinto.core.events import ACTIONS, ResourceChanged
6
+ from kinto.schema_validation import JSONSchemaMapping, validate_from_bucket_schema_or_400
7
+
8
+
9
+ class CollectionSchema(resource.ResourceSchema):
10
+ schema = JSONSchemaMapping(missing=colander.drop)
11
+ cache_expires = colander.SchemaNode(colander.Int(), missing=colander.drop)
12
+
13
+
14
+ @resource.register(
15
+ name="collection",
16
+ plural_path="/buckets/{{bucket_id}}/collections",
17
+ object_path="/buckets/{{bucket_id}}/collections/{{id}}",
18
+ )
19
+ class Collection(resource.Resource):
20
+ schema = CollectionSchema
21
+ permissions = ("read", "write", "record:create")
22
+
23
+ def get_parent_id(self, request):
24
+ bucket_id = request.matchdict["bucket_id"]
25
+ parent_id = utils.instance_uri(request, "bucket", id=bucket_id)
26
+ return parent_id
27
+
28
+ def process_object(self, new, old=None):
29
+ """Additional collection schema validation from bucket, if any."""
30
+ new = super().process_object(new, old)
31
+
32
+ # Remove internal and auto-assigned fields.
33
+ internal_fields = (self.model.modified_field, self.model.permissions_field)
34
+ validate_from_bucket_schema_or_400(
35
+ new,
36
+ resource_name="collection",
37
+ request=self.request,
38
+ ignore_fields=internal_fields,
39
+ id_field=self.model.id_field,
40
+ )
41
+ return new
42
+
43
+
44
+ @subscriber(ResourceChanged, for_resources=("collection",), for_actions=(ACTIONS.DELETE,))
45
+ def on_collections_deleted(event):
46
+ """Some collections were deleted, delete records."""
47
+ storage = event.request.registry.storage
48
+ permission = event.request.registry.permission
49
+
50
+ for change in event.impacted_objects:
51
+ collection = change["old"]
52
+ bucket_id = event.payload["bucket_id"]
53
+ parent_id = utils.instance_uri(
54
+ event.request, "collection", bucket_id=bucket_id, id=collection["id"]
55
+ )
56
+ storage.delete_all(resource_name=None, parent_id=parent_id, with_deleted=False)
57
+ storage.purge_deleted(resource_name=None, parent_id=parent_id)
58
+ permission.delete_object_permissions(parent_id + "/*")
@@ -0,0 +1,39 @@
1
+ import json
2
+ import os
3
+
4
+ import colander
5
+ from pyramid.security import NO_PERMISSION_REQUIRED
6
+
7
+ from kinto.core import Service
8
+
9
+
10
+ HERE = os.path.dirname(__file__)
11
+ ORIGIN = os.path.dirname(HERE) # package root.
12
+ _CONTRIBUTE_INFO = None
13
+
14
+ contribute = Service(
15
+ name="contribute.json", description="Open-source information", path="/contribute.json"
16
+ )
17
+
18
+
19
+ class ContributeResponseSchema(colander.MappingSchema):
20
+ body = colander.SchemaNode(colander.Mapping(unknown="preserve"))
21
+
22
+
23
+ contribute_responses = {
24
+ "200": ContributeResponseSchema(description="Return open source contributing information.")
25
+ }
26
+
27
+
28
+ @contribute.get(
29
+ permission=NO_PERMISSION_REQUIRED,
30
+ tags=["Utilities"],
31
+ operation_id="contribute",
32
+ response_schemas=contribute_responses,
33
+ )
34
+ def contribute_get(request):
35
+ global _CONTRIBUTE_INFO
36
+ if _CONTRIBUTE_INFO is None:
37
+ with open(os.path.join(ORIGIN, "contribute.json")) as f:
38
+ _CONTRIBUTE_INFO = json.load(f)
39
+ return _CONTRIBUTE_INFO
kinto/views/groups.py ADDED
@@ -0,0 +1,90 @@
1
+ import colander
2
+ from pyramid.events import subscriber
3
+
4
+ from kinto.core import resource, utils
5
+ from kinto.core.events import ACTIONS, ResourceChanged
6
+ from kinto.schema_validation import validate_from_bucket_schema_or_400
7
+
8
+
9
+ def validate_member(node, member):
10
+ if member.startswith("/buckets/") or member == "system.Everyone":
11
+ raise colander.Invalid(node, f"'{member}' is not a valid user ID.")
12
+
13
+
14
+ class GroupSchema(resource.ResourceSchema):
15
+ members = colander.SchemaNode(
16
+ colander.Sequence(),
17
+ colander.SchemaNode(colander.String(), validator=validate_member),
18
+ missing=[],
19
+ )
20
+
21
+
22
+ @resource.register(
23
+ name="group",
24
+ plural_path="/buckets/{{bucket_id}}/groups",
25
+ object_path="/buckets/{{bucket_id}}/groups/{{id}}",
26
+ )
27
+ class Group(resource.Resource):
28
+ schema = GroupSchema
29
+
30
+ def get_parent_id(self, request):
31
+ bucket_id = request.matchdict["bucket_id"]
32
+ parent_id = utils.instance_uri(request, "bucket", id=bucket_id)
33
+ return parent_id
34
+
35
+ def process_object(self, new, old=None):
36
+ """Additional collection schema validation from bucket, if any."""
37
+ new = super().process_object(new, old)
38
+
39
+ # Remove internal and auto-assigned fields.
40
+ internal_fields = (self.model.modified_field, self.model.permissions_field)
41
+ validate_from_bucket_schema_or_400(
42
+ new,
43
+ resource_name="group",
44
+ request=self.request,
45
+ ignore_fields=internal_fields,
46
+ id_field=self.model.id_field,
47
+ )
48
+
49
+ return new
50
+
51
+
52
+ @subscriber(ResourceChanged, for_resources=("group",), for_actions=(ACTIONS.DELETE,))
53
+ def on_groups_deleted(event):
54
+ """Some groups were deleted, remove them from users principals."""
55
+ permission_backend = event.request.registry.permission
56
+
57
+ for change in event.impacted_objects:
58
+ group = change["old"]
59
+ bucket_id = event.payload["bucket_id"]
60
+ group_uri = utils.instance_uri(event.request, "group", bucket_id=bucket_id, id=group["id"])
61
+
62
+ permission_backend.remove_principal(group_uri)
63
+
64
+
65
+ @subscriber(
66
+ ResourceChanged, for_resources=("group",), for_actions=(ACTIONS.CREATE, ACTIONS.UPDATE)
67
+ )
68
+ def on_groups_changed(event):
69
+ """Some groups were changed, update users principals."""
70
+ permission_backend = event.request.registry.permission
71
+
72
+ for change in event.impacted_objects:
73
+ if "old" in change:
74
+ existing_record_members = set(change["old"].get("members", []))
75
+ else:
76
+ existing_record_members = set()
77
+
78
+ group = change["new"]
79
+ group_uri = f"/buckets/{event.payload['bucket_id']}/groups/{group['id']}"
80
+ new_record_members = set(group.get("members", []))
81
+ new_members = new_record_members - existing_record_members
82
+ removed_members = existing_record_members - new_record_members
83
+
84
+ for member in new_members:
85
+ # Add the group to the member principal.
86
+ permission_backend.add_user_principal(member, group_uri)
87
+
88
+ for member in removed_members:
89
+ # Remove the group from the member principal.
90
+ permission_backend.remove_user_principal(member, group_uri)