ogc-na 0.4.3__py3-none-any.whl → 0.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ogc/na/_version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.4.3'
32
- __version_tuple__ = version_tuple = (0, 4, 3)
31
+ __version__ = version = '0.5.4'
32
+ __version_tuple__ = version_tuple = (0, 5, 4)
33
33
 
34
34
  __commit_id__ = commit_id = None
ogc/na/annotate_schema.py CHANGED
@@ -135,13 +135,14 @@ import requests_cache
135
135
 
136
136
  from ogc.na.exceptions import ContextLoadError, SchemaLoadError
137
137
  from ogc.na.util import is_url, load_yaml, LRUCache, dump_yaml, \
138
- merge_contexts, merge_dicts, dict_contains, JSON_LD_KEYWORDS
138
+ merge_contexts, merge_dicts, dict_contains, JSON_LD_KEYWORDS, UNDEFINED, prune_context
139
139
 
140
140
  logger = logging.getLogger(__name__)
141
141
 
142
142
  ANNOTATION_PREFIX = 'x-jsonld-'
143
143
  ANNOTATION_CONTEXT = f'{ANNOTATION_PREFIX}context'
144
144
  ANNOTATION_ID = f'{ANNOTATION_PREFIX}id'
145
+ ANNOTATION_REVERSE = f'{ANNOTATION_PREFIX}reverse'
145
146
  ANNOTATION_PREFIXES = f'{ANNOTATION_PREFIX}prefixes'
146
147
  ANNOTATION_EXTRA_TERMS = f'{ANNOTATION_PREFIX}extra-terms'
147
148
  ANNOTATION_BASE = f'{ANNOTATION_PREFIX}base'
@@ -151,13 +152,6 @@ ANNOTATION_IGNORE_EXPAND = [ANNOTATION_CONTEXT, ANNOTATION_EXTRA_TERMS, ANNOTATI
151
152
 
152
153
  CURIE_TERMS = '@id', '@type', '@index'
153
154
 
154
- class Undefined:
155
-
156
- def __bool__(self):
157
- return False
158
-
159
- UNDEFINED = Undefined()
160
-
161
155
  context_term_cache = LRUCache(maxsize=20)
162
156
  requests_session = requests_cache.CachedSession('ogc.na.annotate_schema', backend='memory', expire_after=180)
163
157
 
@@ -180,6 +174,10 @@ class ReferencedSchema:
180
174
  is_json: bool = False
181
175
  anchors: dict[str, Any] = dataclasses.field(default_factory=dict)
182
176
 
177
+ @property
178
+ def full_ref(self):
179
+ return f"{self.location}#{self.fragment}" if self.fragment else self.location
180
+
183
181
 
184
182
  @dataclasses.dataclass
185
183
  class ResolvedContext:
@@ -187,10 +185,17 @@ class ResolvedContext:
187
185
  prefixes: dict[str, str] = dataclasses.field(default_factory=dict)
188
186
 
189
187
 
188
+ @dataclasses.dataclass
189
+ class SchemaLevelEntry:
190
+ subschema: dict[str, Any]
191
+ from_schema: ReferencedSchema
192
+ parent_prop_schema: ReferencedSchema = None
193
+
194
+
190
195
  class SchemaResolver:
191
196
 
192
197
  def __init__(self, working_directory=Path()):
193
- self.working_directory = working_directory.resolve()
198
+ self.working_directory = working_directory.absolute()
194
199
  self._schema_cache: dict[str | Path, Any] = {}
195
200
 
196
201
  @staticmethod
@@ -198,6 +203,8 @@ class SchemaResolver:
198
203
  ref = re.sub('^#', '', ref)
199
204
  if anchors and ref in anchors:
200
205
  return anchors[ref]
206
+ if not ref.startswith('/'):
207
+ raise ValueError(f'Invalid anchor reference: #{ref}')
201
208
  return jsonpointer.resolve_pointer(schema, ref)
202
209
 
203
210
  @staticmethod
@@ -243,15 +250,15 @@ class SchemaResolver:
243
250
 
244
251
  if isinstance(location, Path):
245
252
  if location.is_absolute():
246
- location = location.resolve()
253
+ location = location.absolute()
247
254
  elif not from_schema:
248
- location = self.working_directory.joinpath(location).resolve()
255
+ location = self.working_directory.joinpath(location).absolute()
249
256
  elif from_schema.full_contents.get('$id'):
250
257
  location = urljoin(from_schema.full_contents['$id'], str(location))
251
258
  elif not isinstance(from_schema.location, Path):
252
259
  location = urljoin(from_schema.location, str(location))
253
260
  else:
254
- location = from_schema.location.resolve().parent.joinpath(location).resolve()
261
+ location = from_schema.location.absolute().parent.joinpath(location).absolute()
255
262
 
256
263
  if location is None:
257
264
  raise ValueError(f'Unexpected ref type {type(ref).__name__}')
@@ -259,11 +266,11 @@ class SchemaResolver:
259
266
  return location, fragment
260
267
 
261
268
  def resolve_schema(self, ref: str | Path, from_schema: ReferencedSchema | None = None,
262
- force_contents: dict | str | None = None) -> ReferencedSchema | None:
269
+ force_contents: dict | str | None = None, return_none_on_loop=True) -> ReferencedSchema | None:
263
270
  chain = from_schema.chain + [from_schema] if from_schema else []
264
271
  try:
265
272
  schema_source, fragment = self.resolve_ref(ref, from_schema)
266
- if from_schema:
273
+ if from_schema and return_none_on_loop:
267
274
  for ancestor in from_schema.chain:
268
275
  if (not schema_source or ancestor.location == schema_source) and ancestor.fragment == fragment:
269
276
  return None
@@ -280,7 +287,6 @@ class SchemaResolver:
280
287
  ref=ref,
281
288
  is_json=from_schema.is_json,
282
289
  anchors=from_schema.anchors)
283
-
284
290
  if force_contents:
285
291
  is_json = False
286
292
  if isinstance(force_contents, str):
@@ -290,6 +296,8 @@ class SchemaResolver:
290
296
  raise SchemaLoadError('Error loading schema from string contents') from e
291
297
  else:
292
298
  contents = force_contents
299
+ elif from_schema and schema_source == from_schema.location:
300
+ contents, is_json = from_schema.full_contents, from_schema.is_json
293
301
  else:
294
302
  contents, is_json = self.load_contents(schema_source)
295
303
  anchors = SchemaResolver._find_anchors(contents)
@@ -375,7 +383,7 @@ def resolve_ref(ref: str, fn_from: str | Path | None = None, url_from: str | Non
375
383
  return None, urljoin(base_url, ref)
376
384
  else:
377
385
  fn_from = fn_from if isinstance(fn_from, Path) else Path(fn_from)
378
- ref = (fn_from.resolve().parent / ref).resolve()
386
+ ref = (fn_from.absolute().parent / ref).absolute()
379
387
  return ref, None
380
388
 
381
389
 
@@ -402,11 +410,6 @@ def resolve_context(ctx: Path | str | dict | list, expand_uris=True) -> Resolved
402
410
  prefix_uri = term_val if isinstance(term_val, str) else term_val.get('@id')
403
411
  prefixes[prefix] = prefix_uri
404
412
  return f"{prefix_uri}{localpart}"
405
- elif '@vocab' in c and vocab is UNDEFINED:
406
- # look for @vocab unless it has been overridden (e.g. set to null) somewhere down the chain
407
- vocab = c['@vocab']
408
- if isinstance(vocab, str):
409
- return f"{c['@vocab']}{curie}"
410
413
 
411
414
  return curie
412
415
 
@@ -523,7 +526,7 @@ class SchemaAnnotator:
523
526
  if default_context and (context_fn != default_context
524
527
  or not (isinstance(context_fn, Path)
525
528
  and isinstance(default_context, Path)
526
- and default_context.resolve() == context_fn.resolve())):
529
+ and default_context.absolute() == context_fn.absolute())):
527
530
  # Only load the provided context if it's different from the schema-referenced one
528
531
  resolved_default_context = resolve_context(default_context)
529
532
  context, prefixes = attrgetter('context', 'prefixes')(resolved_default_context)
@@ -538,29 +541,16 @@ class SchemaAnnotator:
538
541
  updated_refs: set[int] = set()
539
542
 
540
543
  def find_prop_context(prop, context_stack) -> dict | None:
541
- vocab = UNDEFINED
542
544
  for ctx in reversed(context_stack):
543
- if vocab is UNDEFINED and '@vocab' in ctx:
544
- vocab = ctx.get('@vocab')
545
545
  if prop in ctx:
546
546
  prop_ctx = ctx[prop]
547
547
  if isinstance(prop_ctx, str):
548
- if vocab and ':' not in prop_ctx and prop_ctx not in JSON_LD_KEYWORDS:
549
- prop_ctx = f"{vocab}{prop_ctx}"
550
548
  return {'@id': prop_ctx}
551
- elif '@id' not in prop_ctx and '@reverse' not in prop_ctx and not vocab:
549
+ elif '@id' not in prop_ctx and '@reverse' not in prop_ctx:
552
550
  raise ValueError(f'Missing @id for property {prop} in context {json.dumps(ctx, indent=2)}')
553
551
  else:
554
552
  result = {k: v for k, v in prop_ctx.items() if k in JSON_LD_KEYWORDS}
555
- if vocab:
556
- prop_id = result.get('@id')
557
- if not prop_id:
558
- result['@id'] = f"{vocab}{prop}"
559
- elif ':' not in prop_id and prop_id not in JSON_LD_KEYWORDS:
560
- result['@id'] = f"{vocab}{prop_id}"
561
553
  return result
562
- elif isinstance(vocab, str):
563
- return {'@id': f"{ctx['@vocab']}{prop}"}
564
554
 
565
555
  def process_properties(obj: dict, context_stack: list[dict[str, Any]],
566
556
  from_schema: ReferencedSchema, level) -> Iterable[str]:
@@ -664,7 +654,9 @@ class SchemaAnnotator:
664
654
  schema_type = 'object'
665
655
 
666
656
  if schema_type == 'object':
667
- used_terms.update(process_properties(subschema, context_stack, from_schema, level + 1))
657
+ new_terms = process_properties(subschema, context_stack, from_schema, level + 1)
658
+ if not in_defs:
659
+ used_terms.update(new_terms)
668
660
  elif schema_type == 'array':
669
661
  for k in ('prefixItems', 'items', 'contains'):
670
662
  new_terms = process_subschema(subschema.get(k), context_stack, from_schema, level + 1,
@@ -762,103 +754,92 @@ class ContextBuilder:
762
754
  # store processed $defs and definitions to avoid parsing them twice
763
755
  processed_refs = set()
764
756
 
765
- def read_properties(subschema: dict, from_schema: ReferencedSchema,
766
- onto_context: dict, schema_path: list[str]) -> dict | None:
767
- if schema_path:
768
- schema_path_str = '/' + '/'.join(schema_path)
769
- else:
770
- schema_path_str = ''
771
- if not isinstance(subschema, dict):
772
- return None
773
- if subschema.get('type', 'object') != 'object':
774
- return None
775
- for prop, prop_val in subschema.get('properties', {}).items():
776
- full_property_path = schema_path + [prop]
777
- full_property_path_str = f"{schema_path_str}/{prop}"
778
- self.visited_properties.setdefault(full_property_path_str, None)
779
- if from_schema == root_schema:
780
- self._missed_properties.setdefault(full_property_path_str, True)
781
- if not isinstance(prop_val, dict):
782
- continue
783
- prop_context = {'@context': {}}
784
- for term, term_val in prop_val.items():
785
- if term == ANNOTATION_BASE:
786
- prop_context.setdefault('@context', {})['@base'] = term_val
787
- elif term.startswith(ANNOTATION_PREFIX) and term not in ANNOTATION_IGNORE_EXPAND:
788
- if term == ANNOTATION_ID:
789
- self.visited_properties[full_property_path_str] = term_val
790
- self._missed_properties[full_property_path_str] = False
791
- prop_context['@' + term[len(ANNOTATION_PREFIX):]] = term_val
792
-
793
- if isinstance(prop_context.get('@id'), str) or isinstance(prop_context.get('@reverse'), str):
794
- prop_id_value = prop_context.get('@id', prop_context.get('@reverse'))
795
- self.visited_properties[full_property_path_str] = prop_id_value
796
- self._missed_properties[full_property_path_str] = False
797
- if prop_id_value in ('@nest', '@graph'):
798
- merge_contexts(onto_context, process_subschema(prop_val, from_schema, full_property_path))
799
- else:
800
- merge_contexts(prop_context['@context'],
801
- process_subschema(prop_val, from_schema, full_property_path))
802
- if prop not in onto_context or isinstance(onto_context[prop], str):
803
- onto_context[prop] = prop_context
804
- else:
805
- merge_contexts(onto_context[prop], prop_context)
806
- else:
807
- merge_contexts(onto_context, process_subschema(prop_val, from_schema, full_property_path))
757
+ PropertyContexts = dict[str, list[dict[str, Any]]]
808
758
 
809
759
  imported_prefixes: dict[str | Path, dict[str, str]] = {}
810
760
  imported_extra_terms: dict[str | Path, dict[str, str]] = {}
811
761
 
812
- cached_schema_contexts = {}
762
+ cached_schema_entries = {}
813
763
 
814
- def process_subschema(subschema: dict, from_schema: ReferencedSchema,
815
- schema_path: list[str]) -> dict:
764
+ pending_subschemas = deque()
816
765
 
817
- onto_context = {}
766
+ def merge_property_contexts(existing: dict[str, list[SchemaLevelEntry]],
767
+ new: dict[str, list[SchemaLevelEntry]]):
768
+ if new:
769
+ for prop, entries in new.items():
770
+ existing.setdefault(prop, []).extend(entries)
771
+ return existing
818
772
 
819
- if not isinstance(subschema, dict):
820
- return {}
773
+ def process_subschema_level(subschema: dict, from_schema: ReferencedSchema,
774
+ schema_path: list[str], is_vocab=False,
775
+ local_refs_only=False) -> dict[str, list[SchemaLevelEntry]]:
821
776
 
822
- for key in (ANNOTATION_BASE, ANNOTATION_VOCAB):
823
- top_level_value = subschema.get(key)
824
- if top_level_value:
825
- onto_context[f"@{key[len(ANNOTATION_PREFIX):]}"] = top_level_value
777
+ level_entries: dict[str, list[SchemaLevelEntry]] = {}
826
778
 
827
- read_properties(subschema, from_schema, onto_context, schema_path)
779
+ if not isinstance(subschema, dict):
780
+ return {}
828
781
 
829
782
  if '$ref' in subschema:
830
783
  ref = subschema['$ref']
831
- ref_path_str = f"{from_schema.location}{ref}"
832
- processed_refs.add(ref_path_str)
833
- referenced_schema = self.schema_resolver.resolve_schema(ref, from_schema)
834
- if referenced_schema:
835
- ref_ctx = copy.deepcopy(cached_schema_contexts.get(ref_path_str))
836
- if ref_ctx is None:
837
- ref_ctx = process_subschema(referenced_schema.subschema, referenced_schema, schema_path)
838
- merge_contexts(onto_context, ref_ctx)
784
+ if not local_refs_only or ref.startswith('#'):
785
+ ref_path_str = f"{from_schema.location}{ref}"
786
+ referenced_schema = self.schema_resolver.resolve_schema(ref, from_schema)
787
+ if referenced_schema and not referenced_schema.full_ref == from_schema.full_ref:
788
+ full_ref = referenced_schema.full_ref
789
+ if full_ref in cached_schema_entries:
790
+ ref_entries = cached_schema_entries[ref_path_str]
791
+ else:
792
+ print('resolving', full_ref)
793
+ ref_entries = process_subschema_level(
794
+ referenced_schema.subschema,
795
+ referenced_schema, schema_path,
796
+ is_vocab=is_vocab, local_refs_only=local_refs_only)
797
+ cached_schema_entries[ref_path_str] = ref_entries
798
+ merge_property_contexts(
799
+ level_entries,
800
+ ref_entries
801
+ )
839
802
 
840
803
  for i in ('allOf', 'anyOf', 'oneOf'):
841
804
  l = subschema.get(i)
842
805
  if isinstance(l, list):
843
806
  for idx, sub_subschema in enumerate(l):
844
- merge_contexts(onto_context, process_subschema(sub_subschema, from_schema, schema_path))
807
+ merge_property_contexts(
808
+ level_entries,
809
+ process_subschema_level(sub_subschema, from_schema,
810
+ schema_path, is_vocab=is_vocab))
845
811
 
846
812
  for i in ('prefixItems', 'items', 'contains', 'then', 'else', 'additionalProperties'):
847
813
  l = subschema.get(i)
848
814
  if isinstance(l, dict):
849
- merge_contexts(onto_context, process_subschema(l, from_schema, schema_path))
815
+ merge_property_contexts(
816
+ level_entries,
817
+ process_subschema_level(l, from_schema,
818
+ schema_path, is_vocab=is_vocab))
850
819
 
851
820
  for pp_k, pp in subschema.get('patternProperties', {}).items():
852
821
  if isinstance(pp, dict):
853
- merge_contexts(onto_context, process_subschema(pp, from_schema, schema_path + [pp_k]))
822
+ merge_property_contexts(
823
+ level_entries,
824
+ process_subschema_level(pp, from_schema,
825
+ schema_path + [pp_k],
826
+ is_vocab=is_vocab))
854
827
 
855
828
  if ANNOTATION_EXTRA_TERMS in subschema:
856
829
  for extra_term, extra_term_context in subschema[ANNOTATION_EXTRA_TERMS].items():
857
- if extra_term not in onto_context:
830
+ if extra_term not in level_entries:
858
831
  if isinstance(extra_term_context, dict):
859
- extra_term_context = {f"@{k[len(ANNOTATION_PREFIX):]}": v
860
- for k, v in extra_term_context.items()}
861
- onto_context[extra_term] = extra_term_context
832
+ level_entries.setdefault(extra_term, []).insert(
833
+ 0,
834
+ SchemaLevelEntry(subschema=extra_term_context,
835
+ from_schema=from_schema)
836
+ )
837
+
838
+ if subschema_properties := subschema.get('properties'):
839
+ for prop_name, prop_schema in subschema_properties.items():
840
+ level_entries.setdefault(prop_name, []).append(
841
+ SchemaLevelEntry(subschema=prop_schema, from_schema=from_schema)
842
+ )
862
843
 
863
844
  if from_schema:
864
845
  current_ref = f"{from_schema.location}{from_schema.ref}"
@@ -877,10 +858,68 @@ class ContextBuilder:
877
858
  if isinstance(sub_prefixes, dict):
878
859
  prefixes.update({k: v for k, v in sub_prefixes.items() if k not in prefixes})
879
860
 
880
- cached_schema_contexts[f"{from_schema.location}#{from_schema.fragment}"] = onto_context
861
+ cached_schema_entries[f"{from_schema.location}#{from_schema.fragment}"] = level_entries
862
+ return level_entries
863
+
864
+ def process_subschema(subschema: dict, onto_context: dict, from_schema: ReferencedSchema,
865
+ property_path: list, is_vocab=False, local_refs_only=False) -> dict:
866
+
867
+ if not isinstance(subschema, dict):
868
+ return {}
869
+
870
+ for key in (ANNOTATION_BASE, ANNOTATION_VOCAB):
871
+ top_level_value = subschema.get(key)
872
+ if top_level_value:
873
+ onto_context[f"@{key[len(ANNOTATION_PREFIX):]}"] = top_level_value
874
+ is_vocab = is_vocab or bool(onto_context.get('@vocab'))
875
+
876
+ level_props = process_subschema_level(subschema, from_schema, property_path,
877
+ is_vocab=is_vocab, local_refs_only=local_refs_only)
878
+ for prop_name, prop_entries in level_props.items():
879
+ prop_context = {}
880
+ for prop_entry in prop_entries:
881
+ if isinstance(prop_entry.subschema, dict):
882
+ prop_context.update({'@' + k[len(ANNOTATION_PREFIX):]: v
883
+ for k, v in prop_entry.subschema.items()
884
+ if k.startswith(ANNOTATION_PREFIX)})
885
+
886
+ new_prop_path = property_path + [prop_name]
887
+
888
+ if '@id' in prop_context:
889
+ prop_uri = prop_context['@id']
890
+ visited_str = prop_uri
891
+ elif '@reverse' in prop_context:
892
+ prop_uri = prop_context['@reverse']
893
+ visited_str = '^' + prop_uri
894
+ elif is_vocab:
895
+ prop_uri = prop_name
896
+ visited_str = '@vocab'
897
+ else:
898
+ # Allow bubbling of local subproperties
899
+ pending_subschemas.extend(
900
+ (entry.subschema, onto_context,
901
+ entry.from_schema, new_prop_path, is_vocab, True)
902
+ for entry in prop_entries
903
+ )
904
+ continue
905
+
906
+ onto_context[prop_name] = prop_context
907
+
908
+ if prop_uri:
909
+ prop_path_str = '/'.join(new_prop_path)
910
+ pending_subschemas.extend(
911
+ (entry.subschema, prop_context.setdefault('@context', {}),
912
+ entry.from_schema, new_prop_path, is_vocab, False)
913
+ for entry in prop_entries
914
+ )
915
+ self.visited_properties[prop_path_str] = visited_str
916
+ self._missed_properties[prop_path_str] = False
917
+
881
918
  return onto_context
882
919
 
883
- merge_contexts(own_context, process_subschema(root_schema.subschema, root_schema, []))
920
+ pending_subschemas.append((root_schema.subschema, own_context, root_schema, [], False, False))
921
+ while pending_subschemas:
922
+ process_subschema(*pending_subschemas.popleft())
884
923
 
885
924
  for imported_et in imported_extra_terms.values():
886
925
  for term, v in imported_et.items():
@@ -900,6 +939,8 @@ class ContextBuilder:
900
939
  else:
901
940
  del prefixes[prefix]
902
941
 
942
+ prune_context(own_context)
943
+
903
944
  if compact:
904
945
 
905
946
  def compact_uri(uri: str) -> str:
@@ -992,11 +1033,11 @@ def dump_annotated_schema(schema: AnnotatedSchema, subdir: Path | str = 'annotat
992
1033
  :param root_dir: root directory for computing relative paths to schemas
993
1034
  :param output_fn_transform: optional callable to transform the output path
994
1035
  """
995
- wd = (Path(root_dir) if root_dir else Path()).resolve()
1036
+ wd = (Path(root_dir) if root_dir else Path()).absolute()
996
1037
  subdir = subdir if isinstance(subdir, Path) else Path(subdir)
997
1038
  path = schema.source
998
1039
  if isinstance(path, Path):
999
- output_fn = path.resolve().relative_to(wd)
1040
+ output_fn = path.absolute().relative_to(wd)
1000
1041
  else:
1001
1042
  parsed = urlparse(str(path))
1002
1043
  output_fn = parsed.path
ogc/na/download.py CHANGED
@@ -20,6 +20,7 @@ def download_file(url: str,
20
20
  dest: str | Path,
21
21
  object_diff: bool = True,
22
22
  ignore_diff_errors: bool = True):
23
+ logger.info('Downloading %s to %s', url, dest)
23
24
  if not isinstance(dest, Path):
24
25
  dest = Path(dest)
25
26
  r = requests.get(url)
@@ -30,12 +31,15 @@ def download_file(url: str,
30
31
  newcontent = util.load_yaml(content=r.content)
31
32
  oldcontent = util.load_yaml(filename=dest)
32
33
  overwrite = newcontent != oldcontent
34
+ if overwrite:
35
+ logger.info('Contents have changed, existing file will be overwritten')
33
36
  except Exception as e:
34
37
  if ignore_diff_errors:
35
38
  logger.warning('Error when loading content for diff: %s', str(e))
36
39
  else:
37
40
  raise
38
41
  if overwrite:
42
+ logger.info('Saving %s', dest)
39
43
  dest.parent.mkdir(parents=True, exist_ok=True)
40
44
  with open(dest, 'wb') as f:
41
45
  f.write(r.content)
ogc/na/util.py CHANGED
@@ -28,6 +28,15 @@ try:
28
28
  except ImportError:
29
29
  from yaml import Loader as YamlLoader, SafeLoader as SafeYamlLoader, Dumper as YamlDumper
30
30
 
31
+
32
+ class _Undefined:
33
+
34
+ def __bool__(self):
35
+ return False
36
+
37
+
38
+ UNDEFINED = _Undefined()
39
+
31
40
  JSON_LD_KEYWORDS = {
32
41
  '@base',
33
42
  '@container',
@@ -329,6 +338,8 @@ def merge_contexts(a: dict, b: dict, fix_nest=True) -> dict[str, Any]:
329
338
  a[term] = va
330
339
  if isinstance(vb, str):
331
340
  vb = {'@id': vb}
341
+ if vb and isinstance(vb.get('@id'), _Undefined) and '@id' in va:
342
+ vb['@id'] = va['@id']
332
343
  if vb:
333
344
  for vb_term, vb_term_val in vb.items():
334
345
  if vb_term != '@context':
@@ -368,6 +379,19 @@ def merge_contexts(a: dict, b: dict, fix_nest=True) -> dict[str, Any]:
368
379
  return a
369
380
 
370
381
 
382
+ def prune_context(c: Any):
383
+ if isinstance(c, list):
384
+ for entry in c:
385
+ prune_context(entry)
386
+ elif isinstance(c, dict):
387
+ for k in list(c.keys()):
388
+ v = c[k]
389
+ if k == '@id' and isinstance(v, _Undefined):
390
+ del c[k]
391
+ else:
392
+ prune_context(v)
393
+
394
+
371
395
  def dict_contains(greater: dict, smaller: dict):
372
396
  for k, v in smaller.items():
373
397
  if k not in greater:
@@ -408,4 +432,4 @@ def _main():
408
432
 
409
433
 
410
434
  if __name__ == '__main__':
411
- _main()
435
+ _main()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ogc_na
3
- Version: 0.4.3
3
+ Version: 0.5.4
4
4
  Summary: OGC Naming Authority tools
5
5
  Author-email: Rob Atkinson <ratkinson@ogc.org>, Piotr Zaborowski <pzaborowski@ogc.org>, Alejandro Villar <avillar@ogc.org>
6
6
  License-Expression: Apache-2.0
@@ -1,8 +1,8 @@
1
1
  ogc/na/__init__.py,sha256=uzcNiJ3uKFNJ1HBfKxIwgAy2HMUFsLAe5RkrUg8ncac,464
2
- ogc/na/_version.py,sha256=bmI9ViMEsJ1Rjce-6ExwiNh2B7sZKTyBkze4k8NsTrU,704
3
- ogc/na/annotate_schema.py,sha256=MYXYzmFDua0DjOxccmGTAFHDS3T7U01jNv-u001ytj4,44584
2
+ ogc/na/_version.py,sha256=OrfVZdCDQ-QC6dUnxdROooJjwvLfeDMedTBstpAdSBU,704
3
+ ogc/na/annotate_schema.py,sha256=r-4gZgXD6L7ay2e41dqwFqOsVXiqEFEHomDhlN4Zoug,45938
4
4
  ogc/na/domain_config.py,sha256=ORzITa1rTrD1MQdpWYrIVW5SwSa9lJd3hnyHIxNgiIU,13947
5
- ogc/na/download.py,sha256=2afrLyl4WsAlxkCgXsl47fs9mNKfDmhVpeT2iwNSoq0,3354
5
+ ogc/na/download.py,sha256=PmmCRMQm5ikNAttqbmieOLc5r0MgsGxJyRjVGjnueR4,3558
6
6
  ogc/na/exceptions.py,sha256=cwvnq79ih90T9lfwJww0zOx_QwuICaUvlo3Mc8m8ouA,85
7
7
  ogc/na/gsp.py,sha256=KGa2G9i8kPefYTHNPUDoXnNyF7Tiwt8K__Ew_Qa7eeg,6048
8
8
  ogc/na/ingest_json.py,sha256=tCqQLxudnI7aIG9XslGm0tdvbfIY68HPrtgYfKtWO4A,37889
@@ -10,14 +10,14 @@ ogc/na/models.py,sha256=nGV8EALtXvmBtkUbu0FA4KOgwNUqQGWIDuMo7UGOKP8,652
10
10
  ogc/na/profile.py,sha256=T7nesbm7azF2ijF60UenJnQQKjIgJlnJ3pUbGT5nYgM,16511
11
11
  ogc/na/provenance.py,sha256=BXiyF6zuRhCz7s_6-m8VtM1DduVo1sA6_2xCLxSM0qQ,6365
12
12
  ogc/na/update_vocabs.py,sha256=9um_Qn3Si6yQ20qLYsFhiaXcxA2ryzduvYprNb252-U,21370
13
- ogc/na/util.py,sha256=Ztju3g1YuguUDbk4n2RJfCrl_IIzNAj7linfy24T6VA,12067
13
+ ogc/na/util.py,sha256=xUIIMjqQZvwlxIwsV2WL1vkC7Y-S0F685ap0gH1TryQ,12607
14
14
  ogc/na/validation.py,sha256=5xjHH55NZKM8HtUk8XgVzm8W5ZlZY00u_qsWfXK_8dM,3732
15
15
  ogc/na/input_filters/__init__.py,sha256=AhE7n_yECwxFKwOM3Jc0ft96TtF5i_Z-fHrS4HYOjaE,1179
16
16
  ogc/na/input_filters/csv.py,sha256=nFfB1XQF_QApcGGzMqEvzD_b3pBtCtsfUECsZ9UGE6s,2616
17
17
  ogc/na/input_filters/xlsx.py,sha256=X9EpFgC9WwHQD8iUJRGdaDYfgiLKjXPdhTVhDmNPAQ0,2730
18
18
  ogc/na/input_filters/xml.py,sha256=9qYjp_w5JLInFM48zB15IYH9eTafjp1Aqd_8kfuW3aA,2074
19
- ogc_na-0.4.3.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
20
- ogc_na-0.4.3.dist-info/METADATA,sha256=kdSKsp_WMZhKj6lKJAg85TnyPf7sXzHVvfuGRij2bPA,3917
21
- ogc_na-0.4.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
22
- ogc_na-0.4.3.dist-info/top_level.txt,sha256=Kvy3KhzcIhNPT4_nZuJCmS946ptRr_MDyU4IIhZJhCY,4
23
- ogc_na-0.4.3.dist-info/RECORD,,
19
+ ogc_na-0.5.4.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
20
+ ogc_na-0.5.4.dist-info/METADATA,sha256=-_s_Wn13oRr6Yzzn6Hkou2hgTkjnahLkV1_91ZhgexE,3917
21
+ ogc_na-0.5.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
22
+ ogc_na-0.5.4.dist-info/top_level.txt,sha256=Kvy3KhzcIhNPT4_nZuJCmS946ptRr_MDyU4IIhZJhCY,4
23
+ ogc_na-0.5.4.dist-info/RECORD,,
File without changes