ogc-na 0.1.2__tar.gz → 0.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ogc-na might be problematic. Click here for more details.

Files changed (48) hide show
  1. {ogc_na-0.1.2 → ogc_na-0.1.4}/PKG-INFO +1 -1
  2. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc/na/annotate_schema.py +142 -52
  3. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc/na/ingest_json.py +16 -7
  4. ogc_na-0.1.4/ogc/na/input_filters/__init__.py +25 -0
  5. ogc_na-0.1.4/ogc/na/input_filters/csv.py +71 -0
  6. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc/na/profile.py +1 -1
  7. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc/na/util.py +45 -12
  8. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc_na.egg-info/PKG-INFO +1 -1
  9. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc_na.egg-info/SOURCES.txt +8 -0
  10. ogc_na-0.1.4/test/data/headers.csv +89 -0
  11. ogc_na-0.1.4/test/data/no-headers.csv +6 -0
  12. ogc_na-0.1.4/test/data/sample-context.jsonld +13 -0
  13. ogc_na-0.1.4/test/data/sample-schema-prop-c.yml +7 -0
  14. ogc_na-0.1.4/test/data/sample-schema.yml +12 -0
  15. ogc_na-0.1.4/test/test_annotate_schema.py +108 -0
  16. ogc_na-0.1.4/test/test_input_filters_csv.py +86 -0
  17. ogc_na-0.1.2/test/test_annotate_schema.py +0 -51
  18. {ogc_na-0.1.2 → ogc_na-0.1.4}/.github/workflows/python-publish.yml +0 -0
  19. {ogc_na-0.1.2 → ogc_na-0.1.4}/.gitignore +0 -0
  20. {ogc_na-0.1.2 → ogc_na-0.1.4}/MANIFEST.in +0 -0
  21. {ogc_na-0.1.2 → ogc_na-0.1.4}/README.md +0 -0
  22. {ogc_na-0.1.2 → ogc_na-0.1.4}/docs/examples.md +0 -0
  23. {ogc_na-0.1.2 → ogc_na-0.1.4}/docs/gen_ref_pages.py +0 -0
  24. {ogc_na-0.1.2 → ogc_na-0.1.4}/docs/index.md +0 -0
  25. {ogc_na-0.1.2 → ogc_na-0.1.4}/docs/tutorials.md +0 -0
  26. {ogc_na-0.1.2 → ogc_na-0.1.4}/mkdocs.yml +0 -0
  27. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc/na/__init__.py +0 -0
  28. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc/na/domain_config.py +0 -0
  29. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc/na/download.py +0 -0
  30. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc/na/provenance.py +0 -0
  31. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc/na/update_vocabs.py +0 -0
  32. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc/na/validation.py +0 -0
  33. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc_na.egg-info/dependency_links.txt +0 -0
  34. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc_na.egg-info/requires.txt +0 -0
  35. {ogc_na-0.1.2 → ogc_na-0.1.4}/ogc_na.egg-info/top_level.txt +0 -0
  36. {ogc_na-0.1.2 → ogc_na-0.1.4}/pyproject.toml +0 -0
  37. {ogc_na-0.1.2 → ogc_na-0.1.4}/rdf/catalog-v001.xml +0 -0
  38. {ogc_na-0.1.2 → ogc_na-0.1.4}/rdf/domaincfg.vocab.ttl +0 -0
  39. {ogc_na-0.1.2 → ogc_na-0.1.4}/requirements.txt +0 -0
  40. {ogc_na-0.1.2 → ogc_na-0.1.4}/setup.cfg +0 -0
  41. {ogc_na-0.1.2 → ogc_na-0.1.4}/setup.py +0 -0
  42. {ogc_na-0.1.2 → ogc_na-0.1.4}/test/__init__.py +0 -0
  43. {ogc_na-0.1.2 → ogc_na-0.1.4}/test/data/empty.ttl +0 -0
  44. {ogc_na-0.1.2 → ogc_na-0.1.4}/test/data/profile_tree.ttl +0 -0
  45. {ogc_na-0.1.2 → ogc_na-0.1.4}/test/data/profile_tree_cyclic.ttl +0 -0
  46. {ogc_na-0.1.2 → ogc_na-0.1.4}/test/data/uplift_context_valid.yml +0 -0
  47. {ogc_na-0.1.2 → ogc_na-0.1.4}/test/test_ingest_json.py +0 -0
  48. {ogc_na-0.1.2 → ogc_na-0.1.4}/test/test_profile.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ogc_na
3
- Version: 0.1.2
3
+ Version: 0.1.4
4
4
  Summary: OGC Naming Authority tools
5
5
  Author-email: Rob Atkinson <ratkinson@ogc.org>, Piotr Zaborowski <pzaborowski@ogc.org>, Alejandro Villar <avillar@ogc.org>
6
6
  Project-URL: Homepage, https://github.com/opengeospatial/ogc-na-tools/
@@ -116,16 +116,16 @@ The resulting context will be printed to the standard output.
116
116
  from __future__ import annotations
117
117
  import argparse
118
118
  import dataclasses
119
- import functools
120
119
  import json
121
120
  import logging
121
+ import re
122
122
  import sys
123
123
  from pathlib import Path
124
- from typing import Any, AnyStr
124
+ from typing import Any, AnyStr, Callable
125
125
  from urllib.parse import urlparse, urljoin
126
126
  import yaml
127
127
  import requests
128
- from ogc.na.util import is_url, merge_dicts, load_yaml
128
+ from ogc.na.util import is_url, merge_dicts, load_yaml, LRUCache
129
129
 
130
130
  try:
131
131
  from yaml import CLoader as YamlLoader, CDumper as YamlDumper
@@ -137,8 +137,11 @@ logger = logging.getLogger(__name__)
137
137
  ANNOTATION_CONTEXT = 'x-jsonld-context'
138
138
  ANNOTATION_ID = 'x-jsonld-id'
139
139
  ANNOTATION_TYPE = 'x-jsonld-type'
140
+ ANNOTATION_PREFIXES = 'x-jsonld-prefixes'
140
141
  REF_ROOT_MARKER = '$_ROOT_/'
141
142
 
143
+ context_term_cache = LRUCache(maxsize=20)
144
+
142
145
 
143
146
  @dataclasses.dataclass
144
147
  class AnnotatedSchema:
@@ -218,63 +221,95 @@ def resolve_ref(ref: str, fn_from: str | Path | None = None, url_from: str | Non
218
221
  return ref, None
219
222
 
220
223
 
221
- @functools.lru_cache(maxsize=20)
222
- def read_context_terms(file: Path | str = None, url: str = None) -> dict[str, str]:
224
+ def read_context_terms(ctx: Path | str | dict) -> tuple[dict[str, str], dict[str, str], dict[str, str]]:
223
225
  """
224
226
  Reads all the terms from a JSON-LD context document.
225
227
 
226
- :param file: file path to load
227
- :param url: URL to load
228
+ :param ctx: file path (Path), URL (str) or dictionary (dict) to load
228
229
  :return: a dictionary with term -> URI mappings
229
230
  """
231
+
232
+ cached = context_term_cache.get(ctx)
233
+ if cached:
234
+ return cached
235
+
230
236
  context: dict[str, Any] | None = None
231
237
 
232
- if file:
233
- with open(file) as f:
238
+ if isinstance(ctx, Path):
239
+ with open(ctx) as f:
234
240
  context = json.load(f).get('@context')
235
- elif url:
236
- r = requests.get(url)
241
+ elif isinstance(ctx, str):
242
+ r = requests.get(ctx)
237
243
  r.raise_for_status()
238
244
  context = r.json().get('@context')
245
+ elif ctx:
246
+ context = ctx.get('@context')
239
247
 
240
248
  if not context:
241
- return {}
249
+ return {}, {}, {}
242
250
 
243
- result: dict[str, str] = {}
244
- pending: dict[str, list] = {}
251
+ result: dict[str, str | tuple[str, str]] = {}
252
+ types: dict[str, str | tuple[str, str]] = {}
245
253
 
246
254
  vocab = context.get('@vocab')
247
255
 
256
+ def expand_uri(uri: str) -> str | tuple[str, str] | None:
257
+ if not uri:
258
+ return None
259
+
260
+ if ':' in uri:
261
+ # either URI or prefix:suffix
262
+ pref, suf = uri.split(':', 1)
263
+ if suf.startswith('//') or pref == 'urn':
264
+ # assume full URI
265
+ return uri
266
+ else:
267
+ # prefix:suffix -> add to pending for expansion
268
+ return pref, suf
269
+ elif vocab:
270
+ # append term_val to vocab to get URI
271
+ return f"{vocab}{term_id}"
272
+ else:
273
+ return uri
274
+
248
275
  for term, term_val in context.items():
249
276
  if not term.startswith("@"):
250
277
  # assume term
278
+ term_type = None
251
279
  if isinstance(term_val, str):
252
280
  term_id = term_val
253
281
  elif isinstance(term_val, dict):
254
282
  term_id = term_val.get('@id')
283
+ term_type = term_val.get('@type')
255
284
  else:
256
285
  term_id = None
257
286
 
258
- if term_id:
259
- if ':' in term_id:
260
- # either URI or prefix:suffix
261
- pref, suf = term_id.split(':', 1)
262
- if suf.startswith('//'):
263
- # assume URI -> add to result
264
- result[term] = term_id
265
- else:
266
- # prefix:suffix -> add to pending for expansion
267
- pending[term] = [pref, suf]
268
- elif vocab:
269
- # append term_val to vocab to get URI
270
- result[term] = f"{vocab}{term_id}"
287
+ expanded_id = expand_uri(term_id)
288
+ if expanded_id:
289
+ result[term] = expanded_id
290
+ expanded_type = expand_uri(term_type)
291
+ if expanded_type:
292
+ types[term] = expanded_type
271
293
 
272
- for term, term_val in pending.items():
273
- pref, suf = term_val
274
- if pref in result:
275
- result[term] = f"{result[pref]}{suf}"
294
+ prefixes = {}
276
295
 
277
- return result
296
+ def expand_result(d: dict[str, str | tuple[str, str]]) -> dict[str, str]:
297
+ r = {}
298
+ for term, term_val in d.items():
299
+ if isinstance(term_val, str):
300
+ r[term] = term_val
301
+ else:
302
+ pref, suf = term_val
303
+ if pref in result:
304
+ r[term] = f"{result[pref]}{suf}"
305
+ prefixes[pref] = result[pref]
306
+ return r
307
+
308
+ expanded_types = expand_result(types)
309
+ expanded_terms = expand_result(result)
310
+
311
+ context_term_cache[ctx] = expanded_terms, prefixes, expanded_types
312
+ return expanded_terms, prefixes, expanded_types
278
313
 
279
314
 
280
315
  class SchemaAnnotator:
@@ -287,7 +322,8 @@ class SchemaAnnotator:
287
322
  """
288
323
 
289
324
  def __init__(self, fn: Path | str | None = None, url: str | None = None,
290
- follow_refs: bool = True, ref_root: Path | str | None = None):
325
+ follow_refs: bool = True, ref_root: Path | str | None = None,
326
+ context: str | Path | dict | None = None):
291
327
  """
292
328
  :param fn: file path to load (root schema)
293
329
  :param url: URL to load (root schema)
@@ -297,6 +333,7 @@ class SchemaAnnotator:
297
333
  self.bundled_schema = None
298
334
  self.ref_root = Path(ref_root) if ref_root else None
299
335
  self._follow_refs = follow_refs
336
+ self._provided_context = context
300
337
 
301
338
  self._process_schema(fn, url)
302
339
 
@@ -309,19 +346,29 @@ class SchemaAnnotator:
309
346
 
310
347
  base_url = schema.get('$id', base_url)
311
348
 
312
- if not context_fn:
313
- terms = {}
314
- elif base_url:
315
- context_fn = urljoin(base_url, context_fn)
316
- terms = read_context_terms(url=context_fn)
317
- else:
318
- context_fn = Path(fn).parent / context_fn
319
- terms = read_context_terms(file=context_fn)
349
+ terms = {}
350
+ prefixes = {}
351
+ types = {}
352
+
353
+ if context_fn != self._provided_context or not (isinstance(context_fn, Path)
354
+ and isinstance(self._provided_context, Path)
355
+ and self._provided_context.resolve() == context_fn.resolve()):
356
+ # Only load the provided context if it's different from the schema-referenced one
357
+ terms, prefixes, types = read_context_terms(self._provided_context)
358
+
359
+ if context_fn:
360
+ if base_url:
361
+ context_fn = urljoin(base_url, str(context_fn))
362
+ else:
363
+ context_fn = Path(fn).parent / context_fn
364
+
365
+ for e in zip((terms, prefixes, types), read_context_terms(context_fn)):
366
+ e[0].update(e[1])
320
367
 
321
368
  def process_properties(obj: dict):
322
369
  properties: dict[str, dict] = obj.get('properties') if obj else None
323
- if not properties:
324
- return
370
+ if not isinstance(properties, dict):
371
+ raise ValueError('"properties" must be a dictionary')
325
372
 
326
373
  empty_properties = []
327
374
  for prop, prop_value in properties.items():
@@ -330,7 +377,9 @@ class SchemaAnnotator:
330
377
  continue
331
378
  if prop in terms:
332
379
  prop_value[ANNOTATION_ID] = terms[prop]
333
- if '$ref' in prop_value:
380
+ if prop in types:
381
+ prop_value[ANNOTATION_TYPE] = types[prop]
382
+ if '$ref' in prop_value and self._follow_refs:
334
383
 
335
384
  ref_fn, ref_url = resolve_ref(prop_value['$ref'], fn, url, base_url)
336
385
  ref = ref_fn or ref_url
@@ -346,13 +395,28 @@ class SchemaAnnotator:
346
395
 
347
396
  properties.update({p: {ANNOTATION_ID: terms[p]} for p in empty_properties if p in terms})
348
397
 
349
- schema_type = schema.get('type')
398
+ def process_subschema(subschema):
399
+
400
+ schema_type = subschema.get('type')
401
+ if not schema_type and 'properties' in subschema:
402
+ schema_type = 'object'
403
+
404
+ if schema_type == 'object':
405
+ process_properties(subschema)
406
+ elif schema_type == 'array':
407
+ for k in ('prefixItems', 'items', 'contains'):
408
+ process_properties(subschema.get(k))
350
409
 
351
- if schema_type == 'object':
352
- process_properties(schema)
353
- elif schema_type == 'array':
354
- for k in ('prefixItems', 'items', 'contains'):
355
- process_properties(schema.get(k))
410
+ for defs_prop in ('$defs', 'definitions'):
411
+ defs_value = subschema.get(defs_prop)
412
+ if isinstance(defs_value, dict):
413
+ for defs_entry in defs_value.values():
414
+ process_subschema(defs_entry)
415
+
416
+ process_subschema(schema)
417
+
418
+ if prefixes:
419
+ schema[ANNOTATION_PREFIXES] = prefixes
356
420
 
357
421
  self.schemas[fn or url] = AnnotatedSchema(
358
422
  source=fn or url,
@@ -389,18 +453,43 @@ class ContextBuilder:
389
453
 
390
454
  base_url = schema.get('$id', base_url)
391
455
 
456
+ prefixes = schema.get(ANNOTATION_PREFIXES, {}).items()
457
+ rev_prefixes = {v: k for k, v in prefixes}
458
+
459
+ def compact_uri(uri: str) -> str:
460
+ if uri.startswith('@'):
461
+ # JSON-LD keyword
462
+ return uri
463
+ parts = urlparse(uri)
464
+ if parts.fragment:
465
+ pref, suf = uri.rsplit('#', 1)
466
+ pref += '#'
467
+ elif len(parts.path) > 1:
468
+ pref, suf = uri.rsplit('/', 1)
469
+ pref += '/'
470
+ else:
471
+ return uri
472
+
473
+ if pref in rev_prefixes:
474
+ return f"{rev_prefixes[pref]}:{suf}"
475
+ else:
476
+ return uri
477
+
392
478
  own_context = {}
393
479
 
480
+ if prefixes:
481
+ own_context.update(prefixes)
482
+
394
483
  def read_properties(where: dict):
395
484
  if not isinstance(where, dict):
396
485
  return
397
486
  for prop, prop_val in where.get('properties', {}).items():
398
487
  if isinstance(prop_val, dict) and ANNOTATION_ID in prop_val:
399
488
  prop_context = {
400
- '@id': prop_val[ANNOTATION_ID]
489
+ '@id': compact_uri(prop_val[ANNOTATION_ID])
401
490
  }
402
491
  if ANNOTATION_TYPE in prop_val:
403
- prop_context['@type'] = prop_val[ANNOTATION_TYPE]
492
+ prop_context['@type'] = compact_uri(prop_val[ANNOTATION_TYPE])
404
493
 
405
494
  if '$ref' in prop_val:
406
495
  ref_fn, ref_url = resolve_ref(prop_val['$ref'], fn, url, base_url)
@@ -459,6 +548,7 @@ def dump_annotated_schemas(annotator: SchemaAnnotator, subdir: Path | str = 'ann
459
548
  :param annotator: a `SchemaAnnotator` with the annotated schemas to read
460
549
  :param subdir: a name for the mirror directory
461
550
  :param root_dir: root directory for computing relative paths to schemas
551
+ :param output_fn_transform: optional callable to transform the output path
462
552
  """
463
553
  wd = (Path(root_dir) if root_dir else Path()).resolve()
464
554
  subdir = subdir if isinstance(subdir, Path) else Path(subdir)
@@ -50,6 +50,7 @@ from rdflib.namespace import Namespace, DefinedNamespace
50
50
  from ogc.na import util, profile
51
51
  from ogc.na.domain_config import UpliftConfigurationEntry, DomainConfiguration
52
52
  from ogc.na.provenance import ProvenanceMetadata, FileProvenanceMetadata, generate_provenance
53
+ from ogc.na.input_filters import apply_input_filter
53
54
 
54
55
  logger = logging.getLogger(__name__)
55
56
 
@@ -393,7 +394,8 @@ def process_file(input_fn: str | Path,
393
394
  If False, no Turtle output will be generated.
394
395
  :param context_fn: YAML context filename. If None, will be autodetected:
395
396
  1. From a file with the same name but yml/yaml extension (test.json -> test.yml)
396
- 2. From a _json-context.yml/_json-context.yaml file in the same directory
397
+ 2. From the domain_cfg
398
+ 3. From a _json-context.yml/_json-context.yaml file in the same directory
397
399
  :param domain_cfg: domain configuration with uplift definition locations
398
400
  :param base: base URI for JSON-LD
399
401
  :param provenance_base_uri: base URI for provenance resources
@@ -436,8 +438,15 @@ def process_file(input_fn: str | Path,
436
438
  if not contexts:
437
439
  raise MissingContextException('No context file provided and one could not be discovered automatically')
438
440
 
439
- with open(input_fn, 'r') as j:
440
- input_data = json.load(j)
441
+ # Apply input filter of first context only (if any)
442
+ input_filters = contexts[0].get('input-filter')
443
+ if input_filters:
444
+ if not isinstance(input_filters, dict):
445
+ raise ValueError('input-filter must be an object')
446
+ input_data = apply_input_filter(input_fn, input_filters)
447
+ else:
448
+ with open(input_fn, 'r') as j:
449
+ input_data = json.load(j)
441
450
 
442
451
  provenance_metadata: ProvenanceMetadata | None = None
443
452
  if provenance_base_uri is not False:
@@ -635,7 +644,10 @@ def process(input_files: str | Path | Sequence[str | Path],
635
644
  logger.info("Input files: %s", input_files)
636
645
  remaining_fn: deque = deque()
637
646
  for input_file in input_files:
638
- remaining_fn.extend(input_file.split(','))
647
+ if isinstance(input_file, str):
648
+ remaining_fn.extend(input_file.split(','))
649
+ else:
650
+ remaining_fn.append(input_file)
639
651
  while remaining_fn:
640
652
  fn = str(remaining_fn.popleft())
641
653
 
@@ -645,9 +657,6 @@ def process(input_files: str | Path | Sequence[str | Path],
645
657
  remaining_fn.extend(filenames_from_context(fn, domain_config=domain_cfg) or [])
646
658
  continue
647
659
 
648
- if not re.match(r'.*\.json-?(ld)?$', fn):
649
- logger.debug('File %s does not match, skipping', fn)
650
- continue
651
660
  logger.info('File %s matches, processing', fn)
652
661
  try:
653
662
  result.append(process_file(
@@ -0,0 +1,25 @@
1
+ from __future__ import annotations
2
+
3
+ from importlib import import_module
4
+ from io import BytesIO
5
+ from pathlib import Path
6
+ from typing import Any, IO, TextIO
7
+
8
+
9
+ def apply_input_filter(stream: IO | bytes | str | Path, filters: dict[str, dict]) -> dict[str, Any] | list:
10
+ filter_name, filter_conf = filters.popitem()
11
+ try:
12
+ filter_mod = import_module(f"ogc.na.input_filters.{filter_name}")
13
+ except ImportError:
14
+ raise ValueError(f'Cannot find input filter with name "{filter_name}"')
15
+
16
+ content: bytes | None = None
17
+ if isinstance(stream, Path) or isinstance(stream, str):
18
+ with open(stream, 'rb') as f:
19
+ content = f.read()
20
+ elif isinstance(stream, TextIO):
21
+ content = stream.read().encode('utf-8')
22
+ else:
23
+ content = stream.read()
24
+
25
+ return filter_mod.apply_filter(content, filter_conf)
@@ -0,0 +1,71 @@
1
+ """
2
+ CSV Input filter for ingest_json.
3
+
4
+ Returns CSV rows as a list. Values will always be strings (no type inference or coercion is performed).
5
+
6
+ Configuration values:
7
+
8
+ * `rows` (default: `dict`): type of elements in the result list:
9
+ * `dict`: elements will be dictionaries, with the keys taken from the `header-row`.
10
+ * `list`: each resulting row will be an array values.
11
+ * `header-row` (default: `0`): if `rows` is `dict`, the (0-based) index of the header row. All rows before the
12
+ header row will be skipped.
13
+ * `skip-rows` (default: `0`): number of rows to skip at the beginning of the file (apart from the header and pre-header
14
+ ones if `rows` is `dict`).
15
+ * `delimiter` (default: `,`): field separator character
16
+ * `quotechar` (default: `"`): char used to quote (enclose) field values
17
+ * `skip-empty-rows` (default: `True`): whether to omit empty rows (i.e., those with no values) from the result
18
+ * `trim-values` (default: `False`): whether to apply `.strip()` to the resulting values
19
+ """
20
+ from __future__ import annotations
21
+
22
+ import csv
23
+ from io import BytesIO, TextIOWrapper, StringIO
24
+ from typing import IO, Any
25
+
26
+ from ogc.na import util
27
+
28
+ DEFAULT_CONF = {
29
+ 'rows': 'dict',
30
+ 'header-row': 0,
31
+ 'skip-rows': 0,
32
+ 'delimiter': ',',
33
+ 'quotechar': '"',
34
+ 'skip-empty-rows': True,
35
+ 'trim-values': False,
36
+ }
37
+
38
+
39
+ def apply_filter(content: bytes, conf: dict[str, Any] | None) -> dict[str, Any] | list:
40
+ conf = util.deep_update(DEFAULT_CONF, conf) if conf else DEFAULT_CONF
41
+
42
+ textio = StringIO(content.decode('utf-8'))
43
+ reader = csv.reader(textio, delimiter=conf['delimiter'], quotechar=conf['quotechar'])
44
+
45
+ headers = None
46
+ if conf['rows'] == 'dict':
47
+ header_row = max(conf['header-row'], 0)
48
+ # Skip to header row
49
+ for i in range(header_row):
50
+ next(reader, None)
51
+ headers = next(reader, [])
52
+ if not headers:
53
+ return []
54
+
55
+ # Skip requested rows
56
+ for i in range(conf['skip-rows']):
57
+ next(reader, None)
58
+
59
+ result = []
60
+ for row in reader:
61
+ if not row and conf['skip-empty-rows']:
62
+ # skip empty rows
63
+ continue
64
+ if conf['trim-values']:
65
+ row = [v.strip() for v in row]
66
+ if conf['rows'] == 'list':
67
+ result.append(row)
68
+ else:
69
+ result.append(dict(zip(headers, row)))
70
+
71
+ return result
@@ -202,7 +202,7 @@ class ProfileRegistry:
202
202
  if isinstance(src, str) and src.startswith('sparql:'):
203
203
  endpoint = src[len('sparql:'):]
204
204
  logger.info("Fetching profiles from SPARQL endpoint %s", endpoint)
205
- assert util.isurl(endpoint)
205
+ assert util.is_url(endpoint, http_only=True)
206
206
  s = g.query(PROFILES_QUERY.replace('__SERVICE__', f"SERVICE <{endpoint}>")).graph
207
207
  util.copy_triples(s, g)
208
208
  else:
@@ -7,7 +7,8 @@ import os.path
7
7
  import shlex
8
8
  from glob import glob
9
9
  from pathlib import Path
10
- from typing import Optional, Union, Any
10
+ from time import time
11
+ from typing import Optional, Union, Any, Mapping, Hashable
11
12
 
12
13
  import requests
13
14
  import rfc3987
@@ -18,6 +19,7 @@ from urllib.parse import urlparse
18
19
  from ogc.na.validation import ValidationReport
19
20
 
20
21
  import yaml
22
+
21
23
  try:
22
24
  from yaml import CLoader as YamlLoader, CSafeLoader as SafeYamlLoader, CDumper as YamlDumper
23
25
  except ImportError:
@@ -105,12 +107,12 @@ def validate(g: Graph, shacl_graph: Graph, extra: Optional[Graph] = None) -> Val
105
107
  advanced=True))
106
108
 
107
109
 
108
- def isurl(url: str, http_only: bool = False) -> bool:
110
+ def is_url(url: str, http_only: bool = False) -> bool:
109
111
  """
110
112
  Checks whether a string is a valid URL.
111
113
 
112
114
  :param url: the input string
113
- :param http_only: whether to only accept HTTP and HTTPS URL's as valid
115
+ :param http_only: whether to only accept HTTP and HTTPS URLs as valid
114
116
  :return: `True` if this is a valid URL, otherwise `False`
115
117
  """
116
118
  if not url:
@@ -159,7 +161,7 @@ def dump_yaml(content: Any, filename: str | Path | None = None,
159
161
 
160
162
  :param content: content to convert to YAML.
161
163
  :param filename: optional filename to dump the content into. If None, string content will be returned.
162
- :param kwargs: other args to pass to yaml.dump
164
+ :param kwargs: other args to pass to `yaml.dump()`
163
165
  """
164
166
  kwargs.setdefault('sort_keys', False)
165
167
  if filename:
@@ -188,14 +190,6 @@ def merge_dicts(src: dict, dst: dict) -> dict:
188
190
  return dst
189
191
 
190
192
 
191
- def is_url(s: str) -> bool:
192
- try:
193
- url = urlparse(s)
194
- return bool(url.scheme and url.netloc)
195
- except ValueError:
196
- return False
197
-
198
-
199
193
  def glob_list_split(s: str, exclude_dirs: bool = True, recursive: bool = False) -> list[str]:
200
194
  result = []
201
195
  for e in shlex.split(s):
@@ -206,3 +200,42 @@ def glob_list_split(s: str, exclude_dirs: bool = True, recursive: bool = False)
206
200
  if not exclude_dirs or os.path.isfile(fn):
207
201
  result.append(fn)
208
202
  return result
203
+
204
+
205
+ class LRUCache:
206
+
207
+ def __init__(self, maxsize: int = 10):
208
+ self._cache: dict[Hashable, Any] = {}
209
+ self._last_access: dict[Hashable, float] = {}
210
+ self._maxsize = maxsize
211
+
212
+ def __contains__(self, item):
213
+ return item in self._cache
214
+
215
+ def __len__(self):
216
+ return len(self._cache)
217
+
218
+ def get(self, key, default=None):
219
+ if not isinstance(key, Hashable):
220
+ return default
221
+ return self._cache.get(key, default)
222
+
223
+ def __setitem__(self, key, value):
224
+ if not isinstance(key, Hashable):
225
+ return
226
+ if len(self._cache) >= self._maxsize and key not in self._cache:
227
+ key_to_remove = min(self._last_access, key=self._last_access.get)
228
+ del self._cache[key_to_remove]
229
+ del self._last_access[key_to_remove]
230
+ self._cache[key] = value
231
+ self._last_access[key] = time()
232
+
233
+
234
+ def deep_update(orig_dict: dict, with_dict: dict, replace: bool = False) -> dict:
235
+ dest = orig_dict if replace else {**orig_dict}
236
+ for k, v in with_dict.items():
237
+ if isinstance(v, Mapping):
238
+ dest[k] = deep_update(orig_dict.get(k, {}), with_dict, replace)
239
+ else:
240
+ dest[k] = v
241
+ return dest
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ogc-na
3
- Version: 0.1.2
3
+ Version: 0.1.4
4
4
  Summary: OGC Naming Authority tools
5
5
  Author-email: Rob Atkinson <ratkinson@ogc.org>, Piotr Zaborowski <pzaborowski@ogc.org>, Alejandro Villar <avillar@ogc.org>
6
6
  Project-URL: Homepage, https://github.com/opengeospatial/ogc-na-tools/
@@ -20,6 +20,8 @@ ogc/na/provenance.py
20
20
  ogc/na/update_vocabs.py
21
21
  ogc/na/util.py
22
22
  ogc/na/validation.py
23
+ ogc/na/input_filters/__init__.py
24
+ ogc/na/input_filters/csv.py
23
25
  ogc_na.egg-info/PKG-INFO
24
26
  ogc_na.egg-info/SOURCES.txt
25
27
  ogc_na.egg-info/dependency_links.txt
@@ -30,8 +32,14 @@ rdf/domaincfg.vocab.ttl
30
32
  test/__init__.py
31
33
  test/test_annotate_schema.py
32
34
  test/test_ingest_json.py
35
+ test/test_input_filters_csv.py
33
36
  test/test_profile.py
34
37
  test/data/empty.ttl
38
+ test/data/headers.csv
39
+ test/data/no-headers.csv
35
40
  test/data/profile_tree.ttl
36
41
  test/data/profile_tree_cyclic.ttl
42
+ test/data/sample-context.jsonld
43
+ test/data/sample-schema-prop-c.yml
44
+ test/data/sample-schema.yml
37
45
  test/data/uplift_context_valid.yml
@@ -0,0 +1,89 @@
1
+ "Year","Score","Title"
2
+ 1968,86,"Greetings"
3
+ 1970,17,"Bloody Mama"
4
+ 1970,73,"Hi,Mom!"
5
+ 1971,40,"Born to Win"
6
+ 1973,98,"Mean Streets"
7
+ 1973,88,"Bang the Drum Slowly"
8
+ 1974,97,"The Godfather,Part II"
9
+ 1976,41,"The Last Tycoon"
10
+ 1976,99,"Taxi Driver"
11
+ 1977,47,"1900"
12
+ 1977,67,"New York,New York"
13
+ 1978,93,"The Deer Hunter"
14
+ 1980,97,"Raging Bull"
15
+ 1981,75,"True Confessions"
16
+ 1983,90,"The King of Comedy"
17
+ 1984,89,"Once Upon a Time in America"
18
+ 1984,60,"Falling in Love"
19
+ 1985,98,"Brazil"
20
+ 1986,65,"The Mission"
21
+ 1987,100,"Dear America: Letters Home From Vietnam"
22
+ 1987,80,"The Untouchables"
23
+ 1987,78,"Angel Heart"
24
+ 1988,96,"Midnight Run"
25
+ 1989,64,"Jacknife"
26
+ 1989,47,"We're No Angels"
27
+ 1990,88,"Awakenings"
28
+ 1990,29,"Stanley & Iris"
29
+ 1990,96,"Goodfellas"
30
+ 1991,76,"Cape Fear"
31
+ 1991,69,"Mistress"
32
+ 1991,65,"Guilty by Suspicion"
33
+ 1991,71,"Backdraft"
34
+ 1992,87,"Thunderheart"
35
+ 1992,67,"Night and the City"
36
+ 1993,75,"This Boy's Life"
37
+ 1993,78,"Mad Dog and Glory"
38
+ 1993,96,"A Bronx Tale"
39
+ 1994,39,"Mary Shelley's Frankenstein"
40
+ 1995,80,"Casino"
41
+ 1995,86,"Heat"
42
+ 1996,74,"Sleepers"
43
+ 1996,38,"The Fan"
44
+ 1996,80,"Marvin's Room"
45
+ 1997,85,"Wag the Dog"
46
+ 1997,87,"Jackie Brown"
47
+ 1997,72,"Cop Land"
48
+ 1998,68,"Ronin"
49
+ 1998,38,"Great Expectations"
50
+ 1999,69,"Analyze This"
51
+ 1999,43,"Flawless"
52
+ 2000,43,"The Adventures of Rocky & Bullwinkle"
53
+ 2000,84,"Meet the Parents"
54
+ 2000,41,"Men of Honor"
55
+ 2001,73,"The Score"
56
+ 2001,33,"15 Minutes"
57
+ 2002,48,"City by the Sea"
58
+ 2002,27,"Analyze That"
59
+ 2003,4,"Godsend"
60
+ 2004,35,"Shark Tale"
61
+ 2004,38,"Meet the Fockers"
62
+ 2005,4,"The Bridge of San Luis Rey"
63
+ 2005,46,"Rent"
64
+ 2005,13,"Hide and Seek"
65
+ 2006,54,"The Good Shepherd"
66
+ 2007,21,"Arthur and the Invisibles"
67
+ 2007,76,"Captain Shakespeare"
68
+ 2008,19,"Righteous Kill"
69
+ 2008,51,"What Just Happened?"
70
+ 2009,46,"Everybody's Fine"
71
+ 2010,72,"Machete"
72
+ 2010,10,"Little Fockers"
73
+ 2010,50,"Stone"
74
+ 2011,25,"Killer Elite"
75
+ 2011,7,"New Year's Eve"
76
+ 2011,70,"Limitless"
77
+ 2012,92,"Silver Linings Playbook"
78
+ 2012,51,"Being Flynn"
79
+ 2012,29,"Red Lights"
80
+ 2013,46,"Last Vegas"
81
+ 2013,7,"The Big Wedding"
82
+ 2013,29,"Grudge Match"
83
+ 2013,11,"Killing Season"
84
+ 2014,9,"The Bag Man"
85
+ 2015,60,"Joy"
86
+ 2015,26,"Heist"
87
+ 2015,61,"The Intern"
88
+ 2016,11,"Dirty Grandpa"
89
+
@@ -0,0 +1,6 @@
1
+ John,Doe,120 jefferson st.,Riverside, NJ, 08075
2
+ Jack,McGinnis,220 hobo Av.,Phila, PA,09119
3
+ "John ""Da Man""",Repici,120 Jefferson St.,Riverside, NJ,08075
4
+ Stephen,Tyler,"7452 Terrace ""At the Plaza"" road",SomeTown,SD, 91234
5
+ ,Blankman,,SomeTown, SD, 00298
6
+ "Joan ""the bone"", Anne",Jet,"9th, at Terrace plc",Desert City,CO,00123
@@ -0,0 +1,13 @@
1
+ {
2
+ "@context": {
3
+ "prop": "http://example.com/props/",
4
+ "propA": "prop:a",
5
+ "propB": {
6
+ "@id": "prop:b",
7
+ "@context": {
8
+ "propD": "prop:d-inner"
9
+ }
10
+ },
11
+ "propD": "prop:d"
12
+ }
13
+ }
@@ -0,0 +1,7 @@
1
+ $schema: https://json-schema.org/draft/2020-12/schema
2
+ description: Sensor, Observation, Sample, and Actuator (SOSA)
3
+ x-jsonld-context: sample-context.jsonld
4
+ type: object
5
+ properties:
6
+ propA:
7
+ type: object
@@ -0,0 +1,12 @@
1
+ $schema: https://json-schema.org/draft/2020-12/schema
2
+ description: Sensor, Observation, Sample, and Actuator (SOSA)
3
+ x-jsonld-context: sample-context.jsonld
4
+ type: object
5
+ properties:
6
+ propA: {}
7
+ propB:
8
+ type: object
9
+ propD: {}
10
+ propC:
11
+ $ref: sample-schema-prop-c.yml
12
+ propD: {}
@@ -0,0 +1,108 @@
1
+ import unittest
2
+ from pathlib import Path
3
+
4
+ from rich import json
5
+
6
+ from ogc.na import annotate_schema
7
+ from ogc.na.annotate_schema import SchemaAnnotator
8
+
9
+ THIS_DIR = Path(__file__).parent
10
+ DATA_DIR = THIS_DIR / 'data'
11
+
12
+
13
+ def deep_get(dct, *keys):
14
+ for key in keys:
15
+ dct = dct.get(key)
16
+ if dct is None:
17
+ return None
18
+ return dct
19
+
20
+
21
+ class AnnotateSchemaTest(unittest.TestCase):
22
+
23
+ def test_resolve_ref_url_full(self):
24
+ ref = 'http://www.example.com/path/to/ref'
25
+ self.assertEqual(annotate_schema.resolve_ref(ref), (None, ref))
26
+
27
+ def test_resolve_ref_url_relative(self):
28
+ ref = '/path/to/ref'
29
+ base_url = 'http://www.example.com/base/url'
30
+ self.assertEqual(annotate_schema.resolve_ref(ref, base_url=base_url),
31
+ (None, 'http://www.example.com/path/to/ref'))
32
+
33
+ ref = 'relative/ref'
34
+ self.assertEqual(annotate_schema.resolve_ref(ref, base_url=base_url),
35
+ (None, 'http://www.example.com/base/relative/ref'))
36
+
37
+ ref = '../relative/ref'
38
+ self.assertEqual(annotate_schema.resolve_ref(ref, base_url=base_url),
39
+ (None, 'http://www.example.com/relative/ref'))
40
+
41
+ def test_resolve_ref_filename(self):
42
+ ref = '/tmp/relative/test'
43
+ fn_from = '/var/lib/from.yml'
44
+
45
+ self.assertEqual(annotate_schema.resolve_ref(ref, fn_from),
46
+ (Path(ref), None))
47
+
48
+ ref = 'child/ref'
49
+ self.assertEqual(annotate_schema.resolve_ref(ref, fn_from),
50
+ (Path(fn_from).parent / ref, None))
51
+
52
+ ref = '../child/ref2'
53
+ result = annotate_schema.resolve_ref(ref, fn_from)
54
+ self.assertEqual(result[0].resolve(), Path(fn_from).parent.joinpath(ref).resolve(), None)
55
+ self.assertIsNone(result[1])
56
+
57
+ def test_resolve_ref_root(self):
58
+ ref = f'{annotate_schema.REF_ROOT_MARKER}tmp/relative/test'
59
+ ref_root = Path('/var/lib/root')
60
+
61
+ self.assertEqual(annotate_schema.resolve_ref(ref, ref_root=ref_root),
62
+ (ref_root / 'tmp/relative/test', None))
63
+
64
+ self.assertEqual(annotate_schema.resolve_ref(ref, ref_root=None),
65
+ (Path() / 'tmp/relative/test', None))
66
+
67
+ def test_annotate_follow_refs(self):
68
+ annotator = SchemaAnnotator(fn=DATA_DIR / 'sample-schema.yml', follow_refs=True)
69
+ schemas = annotator.schemas
70
+ self.assertEqual({*schemas.keys()},
71
+ {Path(DATA_DIR / 'sample-schema.yml'), Path(DATA_DIR / 'sample-schema-prop-c.yml')})
72
+ full = schemas[Path(DATA_DIR / 'sample-schema.yml')].schema
73
+ prop_c = schemas[Path(DATA_DIR / 'sample-schema-prop-c.yml')].schema
74
+
75
+ self.assertEqual(deep_get(full, 'properties', 'propA', 'x-jsonld-id'), 'http://example.com/props/a')
76
+ self.assertEqual(deep_get(full, 'properties', 'propB', 'x-jsonld-id'), 'http://example.com/props/b')
77
+ self.assertEqual(deep_get(full, 'properties', 'propC', 'x-jsonld-id'), None)
78
+ self.assertEqual(deep_get(full, 'properties', 'propD', 'x-jsonld-id'), 'http://example.com/props/d')
79
+
80
+ self.assertEqual(deep_get(prop_c, 'properties', 'propA', 'x-jsonld-id'), 'http://example.com/props/a')
81
+
82
+ def test_annotate_no_follow_refs(self):
83
+ annotator = SchemaAnnotator(fn=DATA_DIR / 'sample-schema.yml', follow_refs=False)
84
+ schemas = annotator.schemas
85
+ self.assertEqual({*schemas.keys()}, {Path(DATA_DIR / 'sample-schema.yml')})
86
+ full = schemas[Path(DATA_DIR / 'sample-schema.yml')].schema
87
+
88
+ self.assertEqual(deep_get(full, 'properties', 'propA', 'x-jsonld-id'), 'http://example.com/props/a')
89
+ self.assertEqual(deep_get(full, 'properties', 'propB', 'x-jsonld-id'), 'http://example.com/props/b')
90
+ self.assertEqual(deep_get(full, 'properties', 'propC', 'x-jsonld-id'), None)
91
+ self.assertEqual(deep_get(full, 'properties', 'propD', 'x-jsonld-id'), 'http://example.com/props/d')
92
+
93
+ def test_annotate_provided_context(self):
94
+ annotator = SchemaAnnotator(fn=DATA_DIR / 'sample-schema.yml',
95
+ follow_refs=False,
96
+ context={
97
+ '@context': {
98
+ 'another': 'http://example.net/another/',
99
+ 'propA': 'another:a',
100
+ 'propC': 'another:c'
101
+ }
102
+ })
103
+ schemas = annotator.schemas
104
+ self.assertEqual({*schemas.keys()}, {Path(DATA_DIR / 'sample-schema.yml')})
105
+ full = schemas[Path(DATA_DIR / 'sample-schema.yml')].schema
106
+
107
+ self.assertEqual(deep_get(full, 'properties', 'propA', 'x-jsonld-id'), 'http://example.com/props/a')
108
+ self.assertEqual(deep_get(full, 'properties', 'propC', 'x-jsonld-id'), 'http://example.net/another/c')
@@ -0,0 +1,86 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import unittest
4
+ from io import BytesIO
5
+ from pathlib import Path
6
+
7
+ from ogc.na.input_filters import csv
8
+
9
+ THIS_DIR = Path(__file__).parent
10
+ DATA_DIR = THIS_DIR / 'data'
11
+
12
+ with open(DATA_DIR / 'headers.csv', 'rb') as f:
13
+ WITH_HEADERS = f.read()
14
+ with open(DATA_DIR / 'no-headers.csv', 'rb') as f:
15
+ NO_HEADERS = f.read()
16
+
17
+
18
+ class InputFiltersCSVTest(unittest.TestCase):
19
+
20
+ def test_rows_objects(self):
21
+ cfg = {
22
+ 'rows': 'dict',
23
+ 'skip-rows': 0,
24
+ 'header-row': 0,
25
+ }
26
+ rows = csv.apply_filter(WITH_HEADERS, cfg)
27
+ self.assertEqual(len(rows), 87)
28
+ self.assertEqual(rows[0], {
29
+ 'Year': '1968',
30
+ 'Score': '86',
31
+ 'Title': 'Greetings',
32
+ })
33
+ self.assertEqual(rows[10], {
34
+ 'Year': '1977',
35
+ 'Score': '67',
36
+ 'Title': 'New York,New York',
37
+ })
38
+
39
+ cfg['skip-rows'] = 3
40
+ rows = csv.apply_filter(WITH_HEADERS, cfg)
41
+ self.assertEqual(len(rows), 84)
42
+ self.assertEqual(rows[0], {
43
+ 'Year': '1971',
44
+ 'Score': '40',
45
+ 'Title': 'Born to Win',
46
+ })
47
+ self.assertEqual(rows[7], {
48
+ 'Year': '1977',
49
+ 'Score': '67',
50
+ 'Title': 'New York,New York',
51
+ })
52
+
53
+ cfg['skip-rows'] = 0
54
+ cfg['header-row'] = 2
55
+ rows = csv.apply_filter(WITH_HEADERS, cfg)
56
+ self.assertEqual(len(rows), 85)
57
+ self.assertEqual(rows[0], {
58
+ '1970': '1970',
59
+ '17': '73',
60
+ 'Bloody Mama': 'Hi,Mom!',
61
+ })
62
+
63
+ def test_rows_lists(self):
64
+ cfg = {
65
+ 'rows': 'list',
66
+ 'skip-rows': 0,
67
+ }
68
+ rows = csv.apply_filter(WITH_HEADERS, cfg)
69
+ self.assertEqual(len(rows), 88)
70
+ self.assertEqual(rows[0], ['Year', 'Score', 'Title'])
71
+ self.assertEqual(rows[10], ['1977', '47', '1900'])
72
+ cfg['skip-rows'] = 1
73
+ rows = csv.apply_filter(WITH_HEADERS, cfg)
74
+ self.assertEqual(len(rows), 87)
75
+ self.assertEqual(rows[0], ['1968', '86', "Greetings"])
76
+ self.assertEqual(rows[3], ['1971', '40', "Born to Win"])
77
+ cfg['header-row'] = 2 # should have no effect
78
+ self.assertEqual(rows, csv.apply_filter(WITH_HEADERS, cfg))
79
+
80
+ cfg['skip-rows'] = 0
81
+ rows = csv.apply_filter(NO_HEADERS, cfg)
82
+ self.assertEqual(len(rows), 6)
83
+ self.assertEqual(rows[0], ['John', 'Doe', '120 jefferson st.', 'Riverside', ' NJ', ' 08075'])
84
+ cfg['trim-values'] = True
85
+ rows = csv.apply_filter(NO_HEADERS, cfg)
86
+ self.assertEqual(rows[0], ['John', 'Doe', '120 jefferson st.', 'Riverside', 'NJ', '08075'])
@@ -1,51 +0,0 @@
1
- import unittest
2
- from pathlib import Path
3
-
4
- from ogc.na import annotate_schema
5
-
6
-
7
- class AnnotateSchemaTest(unittest.TestCase):
8
-
9
- def test_resolve_ref_url_full(self):
10
- ref = 'http://www.example.com/path/to/ref'
11
- self.assertEqual(annotate_schema.resolve_ref(ref), (None, ref))
12
-
13
- def test_resolve_ref_url_relative(self):
14
- ref = '/path/to/ref'
15
- base_url = 'http://www.example.com/base/url'
16
- self.assertEqual(annotate_schema.resolve_ref(ref, base_url=base_url),
17
- (None, 'http://www.example.com/path/to/ref'))
18
-
19
- ref = 'relative/ref'
20
- self.assertEqual(annotate_schema.resolve_ref(ref, base_url=base_url),
21
- (None, 'http://www.example.com/base/relative/ref'))
22
-
23
- ref = '../relative/ref'
24
- self.assertEqual(annotate_schema.resolve_ref(ref, base_url=base_url),
25
- (None, 'http://www.example.com/relative/ref'))
26
-
27
- def test_resolve_ref_filename(self):
28
- ref = '/tmp/relative/test'
29
- fn_from = '/var/lib/from.yml'
30
-
31
- self.assertEqual(annotate_schema.resolve_ref(ref, fn_from),
32
- (Path(ref), None))
33
-
34
- ref = 'child/ref'
35
- self.assertEqual(annotate_schema.resolve_ref(ref, fn_from),
36
- (Path(fn_from).parent / ref, None))
37
-
38
- ref = '../child/ref2'
39
- result = annotate_schema.resolve_ref(ref, fn_from)
40
- self.assertEqual(result[0].resolve(), Path(fn_from).parent.joinpath(ref).resolve(), None)
41
- self.assertIsNone(result[1])
42
-
43
- def test_resolve_ref_root(self):
44
- ref = f'{annotate_schema.REF_ROOT_MARKER}tmp/relative/test'
45
- ref_root = Path('/var/lib/root')
46
-
47
- self.assertEqual(annotate_schema.resolve_ref(ref, ref_root=ref_root),
48
- (ref_root / 'tmp/relative/test', None))
49
-
50
- self.assertEqual(annotate_schema.resolve_ref(ref, ref_root=None),
51
- (Path() / 'tmp/relative/test', None))
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes