dyff-schema 0.23.0__py3-none-any.whl → 0.24.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dyff-schema might be problematic. Click here for more details.

@@ -6,14 +6,21 @@ from __future__ import annotations
6
6
 
7
7
  import functools
8
8
  import json
9
- from typing import Any, Iterable, NamedTuple, Protocol, Type
9
+ import operator
10
+ import re
11
+ from typing import Any, Callable, Iterable, Literal, NamedTuple, Protocol, Type
10
12
 
11
13
  import jsonpath_ng as jsonpath
12
14
  from jsonpath_ng.exceptions import JSONPathError
15
+ from jsonpath_ng.ext.parser import parse as jsonpath_parse_ext
13
16
 
14
17
  from dyff.schema.platform import SchemaAdapter
15
18
 
16
19
 
20
+ def _json_deep_copy(data):
21
+ return json.loads(json.dumps(data))
22
+
23
+
17
24
  def map_structure(fn, data):
18
25
  """Given a JSON data structure ``data``, create a new data structure instance with
19
26
  the same shape as ``data`` by applying ``fn`` to each "leaf" value in the nested
@@ -70,90 +77,346 @@ class Adapter(Protocol):
70
77
  raise NotImplementedError()
71
78
 
72
79
 
80
+ class _Literal:
81
+ def __init__(self, value):
82
+ self.value = value
83
+
84
+ def __call__(self, x):
85
+ return self.value
86
+
87
+
88
+ class _Func_findall:
89
+ def __init__(self, *, pattern: str, flags: int = 0):
90
+ self.pattern = pattern
91
+ self.flags = flags
92
+
93
+ def __call__(self, x) -> list[str]:
94
+ return re.findall(self.pattern, x, self.flags)
95
+
96
+
97
+ class _Func_join:
98
+ def __init__(self, *, separator: str = ""):
99
+ self._separator = separator
100
+
101
+ def __call__(self, x: list[str]) -> str:
102
+ return self._separator.join(x)
103
+
104
+
105
+ class _Func_list:
106
+ def __call__(self, x) -> list:
107
+ return list(x)
108
+
109
+
110
+ class _Func_reduce:
111
+ def __call__(self, x):
112
+ return functools.reduce(operator.add, x)
113
+
114
+
115
+ class _Func_search:
116
+ def __init__(
117
+ self,
118
+ *,
119
+ pattern: str,
120
+ flags: int = 0,
121
+ group: int = 0,
122
+ default: str | None = None,
123
+ ):
124
+ self.pattern = pattern
125
+ self.flags = flags
126
+ self.group = group
127
+ self.default = default
128
+
129
+ def __call__(self, x) -> str | None:
130
+ m = re.search(self.pattern, x, self.flags)
131
+ return self.default if m is None else m.group(self.group)
132
+
133
+
134
+ class _Func_split:
135
+ def __init__(self, *, pattern: str, maxsplit: int = 0, flags: int = 0):
136
+ self.pattern = pattern
137
+ self.maxsplit = maxsplit
138
+ self.flags = flags
139
+
140
+ def __call__(self, x) -> list[str]:
141
+ return re.split(self.pattern, x, self.maxsplit, self.flags)
142
+
143
+
144
+ class _Func_sub:
145
+ def __init__(self, *, pattern: str, repl: str, count: int = 0, flags: int = 0):
146
+ self.pattern = pattern
147
+ self.repl = repl
148
+ self.count = count
149
+ self.flags = flags
150
+
151
+ def __call__(self, x) -> str:
152
+ return re.sub(self.pattern, self.repl, x, self.count, self.flags)
153
+
154
+
155
+ class _Value_jsonpath:
156
+ def __init__(self, expr, *, kind: Literal["scalar", "list"] = "scalar"):
157
+ self._expr: jsonpath.JSONPath = jsonpath.parse(expr)
158
+ self._kind = kind
159
+
160
+ def __call__(self, x):
161
+ results = self._expr.find(x)
162
+ if self._kind == "list":
163
+ return [result.value for result in results]
164
+ elif self._kind == "scalar":
165
+ if len(results) == 0:
166
+ raise ValueError(f"no match for '{self._expr}' in '{x}'")
167
+ elif len(results) > 1:
168
+ raise ValueError(f"multiple results for '{self._expr}' in '{x}'")
169
+ return results[0].value
170
+ else:
171
+ raise AssertionError(f"kind {self._kind}")
172
+
173
+
174
+ class _Value_list:
175
+ def __init__(self, exprs: list[Callable]):
176
+ self._exprs = exprs
177
+
178
+ def __call__(self, x) -> list:
179
+ return [e(x) for e in self._exprs]
180
+
181
+
182
+ def _maybe_value_expr(expr: dict) -> Callable | None:
183
+ kinds = ["$literal", "$scalar", "$list"]
184
+ maybe_exprs = {k: expr.get(k) for k in kinds}
185
+ just_exprs = [k for k in kinds if maybe_exprs[k] is not None]
186
+ if len(just_exprs) == 0:
187
+ return None
188
+ if len(just_exprs) > 1:
189
+ raise ValueError(f"must specify exactly one of {kinds}: got {just_exprs}")
190
+
191
+ # remove sigil
192
+ kind: Literal["literal", "scalar", "list"] = just_exprs[0][1:] # type: ignore
193
+ value = maybe_exprs[just_exprs[0]]
194
+ if kind == "literal":
195
+ return _Literal(value)
196
+
197
+ op: Callable = _Literal(value)
198
+ if isinstance(value, str):
199
+ if value.startswith("$"):
200
+ if value.startswith("$$"):
201
+ # Literal string -- remove "escape" character
202
+ op = _Literal(value[1:])
203
+ else:
204
+ op = _Value_jsonpath(value, kind=kind)
205
+ elif kind == "list" and isinstance(value, list):
206
+ exprs = [_maybe_value_expr(e) for e in value]
207
+ if any(e is None for e in exprs):
208
+ raise ValueError("$list elements must be value expressions")
209
+ op = _Value_list(exprs) # type: ignore
210
+ if isinstance(op, _Literal) and kind != "literal":
211
+ raise ValueError("must use $literal when providing a literal value")
212
+ return op
213
+
214
+
215
+ class _LeafExpression:
216
+ FUNCTIONS = {
217
+ "findall": _Func_findall,
218
+ "join": _Func_join,
219
+ "list": _Func_list,
220
+ "reduce": _Func_reduce,
221
+ "search": _Func_search,
222
+ "split": _Func_split,
223
+ "sub": _Func_sub,
224
+ }
225
+
226
+ def __init__(self, pipeline: dict | list[dict]):
227
+ if isinstance(pipeline, dict):
228
+ pipeline = [pipeline]
229
+
230
+ self._compiled_pipeline: list[Callable] = []
231
+ for step in pipeline:
232
+ if (value_op := _maybe_value_expr(step)) is not None:
233
+ self._compiled_pipeline.append(value_op)
234
+ elif (func := step.pop("$func", None)) is not None:
235
+ self._compiled_pipeline.append(_LeafExpression.FUNCTIONS[func](**step))
236
+ else:
237
+ raise ValueError(f"invalid $compute step: {step}")
238
+
239
+ def __call__(self, x):
240
+ output = None
241
+ for i, step in enumerate(self._compiled_pipeline):
242
+ if i == 0:
243
+ output = step(x)
244
+ else:
245
+ output = step(output)
246
+ return output
247
+
248
+
73
249
  class TransformJSON:
74
- """Transform an input JSON structure by creating a new output JSON structure where
75
- all of the "leaf" values are populated by either:
250
+ """Create a new JSON structure where the "leaf" values are populated by the results
251
+ of transformation functions applied to the input.
252
+
253
+ The "value" for each leaf can be::
254
+
255
+ 1. A JSON literal value, or
256
+ 2. The result of a jsonpath query on the input structure, or
257
+ 3. The result of a computation pipeline starting from (1) or (2).
76
258
 
77
- 1. A provided JSON literal value, or
78
- 2. The result of a jsonpath query on the input structure.
259
+ To distinguish the specifications of leaf values from the specification of
260
+ the output structure, we apply the following rules::
79
261
 
80
- For example, if the ``output_structure`` parameter is::
262
+ 1. Composite values (``list`` and ``dict``) specify the structure of
263
+ the output.
264
+ 2. Scalar values are output as-is, unless they are strings containing
265
+ JSONPath queries.
266
+ 3. JSONPath queries are strings beginning with '$'. They are replaced
267
+ by the result of the query.
268
+ 4. A ``dict`` containing the special key ``"$compute"`` introduces a
269
+ "compute context", which computes a leaf value from the input data.
270
+ Descendents of this key have "compute context semantics", which are
271
+ different from the "normal" semantics.
272
+
273
+ For example, if the ``configuration`` is::
81
274
 
82
275
  {
83
276
  "id": "$.object.id",
84
277
  "name": "literal",
85
278
  "children": {"left": "$.list[0]", "right": "$.list[1]"}
279
+ "characters": {
280
+ "letters": {
281
+ "$compute": [
282
+ {"$scalar": "$.object.id"},
283
+ {
284
+ "$func": "sub",
285
+ "pattern": "[A-Za-z]",
286
+ "repl": "",
287
+ },
288
+ {"$func": "list"}
289
+ ]
290
+ }
291
+ }
86
292
  }
87
293
 
88
294
  and the data is::
89
295
 
90
296
  {
91
- "object": {"id": 42, "name": "spam"},
297
+ "object": {"id": "abc123", "name": "spam"},
92
298
  "list": [1, 2]
93
299
  }
94
300
 
95
- Then applying the transformer to the data will result in the new structure::
301
+ Then applying the transformation to the data will result in the new structure::
96
302
 
97
303
  {
98
- "id": 42,
304
+ "id": "abc123",
99
305
  "name": "literal",
100
- "children: {"left": 1, "right": 2}
306
+ "children: {"left": 1, "right": 2},
307
+ "characters": {
308
+ "letters": ["a", "b", "c"]
309
+ }
101
310
  }
102
311
 
103
- A value is interpreted as a jsonpath query if it is a string that starts
104
- with the '$' character. If you need a literal string that starts with
105
- the '$' character, escape it with a second '$', e.g., "$$PATH" will appear
106
- as the literal string "$PATH" in the output.
107
-
108
- All of the jsonpath queries must return *exactly one value* when executed
109
- against each input item. If not, a ``ValueError`` will be raised.
312
+ The ``.characters.letters`` field was derived by::
313
+
314
+ 1. Extracting the value of the ``.object.id`` field in the input
315
+ 2. Applying ``re.sub(r"[A-Za-z]", "", _)`` to the result of (1)
316
+ 3. Applying ``list(_)`` to the result of (2)
317
+
318
+ Notice that descendents of the ``$compute`` key no longer describe the
319
+ structure of the output, but instead describe steps of the computation.
320
+ The value of ``"$compute"`` can be either an object or a list of objects.
321
+ A list is interpreted as a "pipeline" where each step is applied to the
322
+ output of the previous step.
323
+
324
+ Implicit queries
325
+ ================
326
+
327
+ Outside of the ``$compute`` context, string values that start with a ``$``
328
+ character are interpreted as jsonpath queries. Queries in this context must
329
+ return **exactly one value**, otherwise a ``ValueError`` will be raised.
330
+ This is because when multiple values are returned, there's no way to
331
+ distinguish a scalar-valued query that found 1 scalar from a list-valued
332
+ query that found a list with 1 element. In the ``$compute`` context, you
333
+ can specify which semantics you want.
334
+
335
+ If you need a literal string that starts with the '$' character, escape it
336
+ with a second '$', e.g., "$$PATH" will appear as the literal string "$PATH"
337
+ in the output. This works for both keys and values, e.g.,
338
+ ``{"$$key": "$$value"}`` outputs ``{"$key": "$value"}``. All keys that
339
+ begin with ``$`` are reserved, and you must always escape them.
340
+
341
+ The $compute context
342
+ ====================
343
+
344
+ A ``$compute`` context is introduced by a ``dict`` that contains the key
345
+ ``{"$compute": ...}``. Semantics in the ``$compute`` context are different
346
+ from semantics in the "normal" context.
347
+
348
+ $literal vs. $scalar vs. $list
349
+ ------------------------------
350
+
351
+ Inside a ``$compute`` context, we distinguish explicitly between literal
352
+ values, jsonpath queries that return scalars, and jsonpath queries that
353
+ return lists. You specify which semantics you intend by using
354
+ ``{"$literal": [1, 2]}``, ``{"$scalar": "$.foo"}``, or ``{"$list": $.foo[*]}``.
355
+ Items with ``$literal`` semantics are **never** interpreted as jsonpath
356
+ queries, even if they start with ``$``. In the ``$literal`` context, you
357
+ **should not** escape the leading ``$`` character.
358
+
359
+ A ``$scalar`` query has the same semantiics as a jsonpath query outside
360
+ of the ``$compute`` context, i.e., it must return exactly 1 item.
361
+ A ``$list`` query will return a list, which can be empty. Scalar-valued
362
+ queries in a ``$list`` context will return a list with 1 element.
363
+
364
+ $func
365
+ -----
366
+
367
+ You use blocks with a ``$func`` key to specify computation steps. The
368
+ available functions are: ``findall``, ``join``, ``list``, ``reduce``,
369
+ ``search``, ``split``, ``sub``. These behave the same way as the
370
+ corresponding functions from the Python standard library::
371
+
372
+ * ``findall``, ``search``, ``split``, and ``sub`` are from the
373
+ ``re`` module.
374
+ * ``reduce`` uses the ``+`` operator with no starting value; it will
375
+ raise an error if called on an empty list.
376
+
377
+ All of these functions take named parameters with the same names and
378
+ semantics as their parameters in Python.
110
379
  """
111
380
 
112
381
  def __init__(self, configuration: dict):
113
- """
114
- Parameters:
115
- ``output_structure``: A JSON object where all the "leaf" values
116
- are strings containing jsonpath queries.
117
- """
118
382
  if configuration != json.loads(json.dumps(configuration)):
119
383
  raise ValueError("configuration is not valid JSON")
120
- self.output_structure = configuration
121
- try:
122
- self._expressions = map_structure(
123
- self._jsonpath_expr_or_literal, self.output_structure
124
- )
125
- except JSONPathError as ex:
126
- raise ValueError(
127
- "output_structure leaf values must be JSON literals or jsonpath query strings"
128
- ) from ex
129
-
130
- def _jsonpath_expr_or_literal(self, x):
131
- if isinstance(x, str):
384
+ self.configuration = configuration
385
+ self._transformation = self._compile(self.configuration)
386
+
387
+ def _compile(self, x) -> Callable | list | dict:
388
+ if isinstance(x, dict):
389
+ if (compute := x.get("$compute")) is not None:
390
+ if len(x) != 1:
391
+ raise ValueError("$compute must be the only key in the dict")
392
+ return _LeafExpression(compute)
393
+ else:
394
+ # Escape '$' in dict keys
395
+ d: dict[str, Any] = {}
396
+ for k, v in x.items():
397
+ if k.startswith("$"):
398
+ if k.startswith("$$"):
399
+ k = k[1:]
400
+ else:
401
+ raise ValueError(
402
+ f"dict key '{k}': keys beginning with '$' are reserved; use '$$' to escape"
403
+ )
404
+ d[k] = self._compile(v)
405
+ return d
406
+ elif isinstance(x, list):
407
+ return [self._compile(y) for y in x]
408
+ elif isinstance(x, str):
132
409
  if x.startswith("$"):
133
410
  if x.startswith("$$"):
134
411
  # Literal string -- remove "escape" character
135
- return x[1:]
412
+ return _Literal(x[1:])
136
413
  else:
137
- return jsonpath.parse(x)
138
- return x
414
+ return _Value_jsonpath(x, kind="scalar")
415
+ return _Literal(x)
139
416
 
140
417
  def __call__(self, stream: Iterable[dict]) -> Iterable[dict]:
141
- def query(data, expr):
142
- if not isinstance(expr, jsonpath.JSONPath):
143
- # Literal
144
- return expr
145
- results = expr.find(data)
146
- if len(results) == 0:
147
- raise ValueError(f"no match for {expr}")
148
- elif len(results) > 1:
149
- raise ValueError(f"multiple results for {expr}")
150
- return results[0].value
151
-
152
418
  for item in stream:
153
- transformed = map_structure(
154
- lambda expr: query(item, expr), self._expressions
155
- )
156
- yield transformed
419
+ yield map_structure(lambda compute: compute(item), self._transformation)
157
420
 
158
421
 
159
422
  class EmbedIndex:
@@ -573,13 +836,16 @@ def _test():
573
836
  print(list(transformer([data])))
574
837
 
575
838
  transformer = TransformJSON(
576
- {"id": "$.object.id", "children": {"left": "$.list[0]", "right": "$.list[1]"}}
839
+ {
840
+ "id": "$.object.id",
841
+ "children": {"left": "$.list[0]", "right": "$.list[1]"},
842
+ }
577
843
  )
578
844
  print(
579
845
  list(
580
846
  transformer(
581
847
  [
582
- {"object": {"id": 42, "name": "spam"}, "list": [1, 2]},
848
+ {"object": {"id": "abc123", "name": "spam"}, "list": [1, 2]},
583
849
  ]
584
850
  )
585
851
  )
@@ -1370,6 +1370,50 @@ class TaskSchema(DyffSchemaBaseModel):
1370
1370
  objective: str
1371
1371
 
1372
1372
 
1373
+ class EvaluationClientConfiguration(DyffSchemaBaseModel):
1374
+ badRequestPolicy: Literal["Abort", "Skip"] = pydantic.Field(
1375
+ default="Abort",
1376
+ description="What to do if an inference call raises a 400 Bad Request"
1377
+ " or a similar error that indicates a problem with the input instance."
1378
+ " Abort (default): the evaluation fails immediately."
1379
+ " Skip: output None for the bad instance and continue.",
1380
+ )
1381
+
1382
+ transientErrorRetryLimit: int = pydantic.Field(
1383
+ default=120,
1384
+ description="How many times to retry transient errors before the"
1385
+ " evaluation fails. The count is reset after a successful inference."
1386
+ " Note that transient errors often occur during inference service"
1387
+ " startup. The maximum time that the evaluation will wait for a"
1388
+ " service (re)start is (retry limit) * (retry delay).",
1389
+ )
1390
+
1391
+ transientErrorRetryDelaySeconds: int = pydantic.Field(
1392
+ default=30,
1393
+ description="How long to wait before retrying a transient error."
1394
+ " Note that transient errors often occur during inference service"
1395
+ " startup. The maximum time that the evaluation will wait for a"
1396
+ " service (re)start is (retry limit) * (retry delay).",
1397
+ )
1398
+
1399
+ duplicateOutputPolicy: Literal["Deduplicate", "Error", "Ignore"] = pydantic.Field(
1400
+ default="Deduplicate",
1401
+ description="What to do if there are missing outputs."
1402
+ " Deduplicate (default): output only one of the duplicates, chosen"
1403
+ " arbitrarily. Error: the evaluation fails. Ignore: duplicates are"
1404
+ " retained in the output."
1405
+ " Setting this to Error is discouraged because duplicates can"
1406
+ " arise in normal operation if the client restarts due to"
1407
+ " a transient failure.",
1408
+ )
1409
+
1410
+ missingOutputPolicy: Literal["Error", "Ignore"] = pydantic.Field(
1411
+ default="Error",
1412
+ description="What to do if there are missing outputs."
1413
+ " Error (default): the evaluation fails. Ignore: no error.",
1414
+ )
1415
+
1416
+
1373
1417
  class EvaluationBase(DyffSchemaBaseModel):
1374
1418
  dataset: str = pydantic.Field(description="The Dataset to evaluate on.")
1375
1419
 
@@ -1377,11 +1421,17 @@ class EvaluationBase(DyffSchemaBaseModel):
1377
1421
  default=1, description="Number of replications to run."
1378
1422
  )
1379
1423
 
1424
+ # TODO: This should be in the client config object
1380
1425
  workersPerReplica: Optional[int] = pydantic.Field(
1381
1426
  default=None,
1382
1427
  description="Number of data workers per inference service replica.",
1383
1428
  )
1384
1429
 
1430
+ client: EvaluationClientConfiguration = pydantic.Field(
1431
+ default_factory=EvaluationClientConfiguration,
1432
+ description="Configuration for the evaluation client.",
1433
+ )
1434
+
1385
1435
 
1386
1436
  class Evaluation(DyffEntity, EvaluationBase):
1387
1437
  """A description of how to run an InferenceService on a Dataset to obtain a set of
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: dyff-schema
3
- Version: 0.23.0
3
+ Version: 0.24.0
4
4
  Summary: Data models for the Dyff AI auditing platform.
5
5
  Author-email: Digital Safety Research Institute <contact@dsri.org>
6
6
  License: Apache-2.0
@@ -22,9 +22,9 @@ dyff/schema/io/__init__.py,sha256=L5y8UhRnojerPYHumsxQJRcHCNz8Hj9NM8b47mewMNs,92
22
22
  dyff/schema/io/vllm.py,sha256=2q05M_-lTzq9oywKXHPPpCFCSDVCSsRQqtmERzWTtio,123
23
23
  dyff/schema/v0/__init__.py,sha256=L5y8UhRnojerPYHumsxQJRcHCNz8Hj9NM8b47mewMNs,92
24
24
  dyff/schema/v0/r1/__init__.py,sha256=L5y8UhRnojerPYHumsxQJRcHCNz8Hj9NM8b47mewMNs,92
25
- dyff/schema/v0/r1/adapters.py,sha256=2t2oxsnGfSEDKKDIEYw4qqLXMH7qlFIwPVuLyUmbsHs,23552
25
+ dyff/schema/v0/r1/adapters.py,sha256=dmQS2ecgDX4ZvTMOW-6NzV_Oq_UpaiyFd7QnSNoOnK8,33057
26
26
  dyff/schema/v0/r1/base.py,sha256=IpvlYDr6JjYo6tn8XW4C1Fpgd_uqzZGZsG_cuEn_gQs,19441
27
- dyff/schema/v0/r1/platform.py,sha256=xaJohO5SB_Nh6kIRmbAe1gc-xV_vGoM3R7UgMJcbByM,75980
27
+ dyff/schema/v0/r1/platform.py,sha256=GCVzHkFl7EZLDEpMxCy71HHfwU8191dxCemnqG4-kzw,78259
28
28
  dyff/schema/v0/r1/requests.py,sha256=4TM1IKG9IP4MyprIy9E9XA_JqvkuwKAuY1ao1BbVLI0,15676
29
29
  dyff/schema/v0/r1/test.py,sha256=X6dUyVd5svcPCI-PBMOAqEfK9jv3bRDvkQTJzwS96c0,10720
30
30
  dyff/schema/v0/r1/version.py,sha256=isKAGuGxsdru8vDaYmI4YiZdJOu_wNxXK7u6QzD6FE4,392
@@ -37,9 +37,9 @@ dyff/schema/v0/r1/dataset/text.py,sha256=nLIn91Zlt0tNdXUklSgjJ-kEDxoPX32ISLkiv2D
37
37
  dyff/schema/v0/r1/dataset/vision.py,sha256=aIe0fbfM_g3DsrDTdg2K803YKLjZBpurM_VJcJFuZLc,369
38
38
  dyff/schema/v0/r1/io/__init__.py,sha256=L5y8UhRnojerPYHumsxQJRcHCNz8Hj9NM8b47mewMNs,92
39
39
  dyff/schema/v0/r1/io/vllm.py,sha256=CUE9y8KthtUI7sD49S875rDmPvKotSXVIRaBS79aBZs,5320
40
- dyff_schema-0.23.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
41
- dyff_schema-0.23.0.dist-info/METADATA,sha256=D_F0_u21Cwr_Wh1n8KGZtOOKlQCYk52gkL-Z6EqbC5o,3482
42
- dyff_schema-0.23.0.dist-info/NOTICE,sha256=YONACu0s_Ui6jNi-wtEsVQbTU1JIkh8wvLH6d1-Ni_w,43
43
- dyff_schema-0.23.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
44
- dyff_schema-0.23.0.dist-info/top_level.txt,sha256=9e3VVdeX73t_sUJOPQPCcGtYO1JhoErhHIi3WoWGcFI,5
45
- dyff_schema-0.23.0.dist-info/RECORD,,
40
+ dyff_schema-0.24.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
41
+ dyff_schema-0.24.0.dist-info/METADATA,sha256=ysTXvjZNSeDrJXjK1EYy8iYpxOqJ7PD1J8-bnNkHm_g,3482
42
+ dyff_schema-0.24.0.dist-info/NOTICE,sha256=YONACu0s_Ui6jNi-wtEsVQbTU1JIkh8wvLH6d1-Ni_w,43
43
+ dyff_schema-0.24.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
44
+ dyff_schema-0.24.0.dist-info/top_level.txt,sha256=9e3VVdeX73t_sUJOPQPCcGtYO1JhoErhHIi3WoWGcFI,5
45
+ dyff_schema-0.24.0.dist-info/RECORD,,