udata 10.8.3.dev37185__py2.py3-none-any.whl → 10.8.3.dev37212__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of udata might be problematic. Click here for more details.

Files changed (26) hide show
  1. udata/core/organization/models.py +14 -9
  2. udata/harvest/backends/dcat.py +153 -162
  3. udata/harvest/tests/test_dcat_backend.py +1 -1
  4. udata/settings.py +1 -1
  5. udata/static/chunks/{10.471164b2a9fe15614797.js → 10.8ca60413647062717b1e.js} +3 -3
  6. udata/static/chunks/{10.471164b2a9fe15614797.js.map → 10.8ca60413647062717b1e.js.map} +1 -1
  7. udata/static/chunks/{11.51d706fb9521c16976bc.js → 11.b6f741fcc366abfad9c4.js} +3 -3
  8. udata/static/chunks/{11.51d706fb9521c16976bc.js.map → 11.b6f741fcc366abfad9c4.js.map} +1 -1
  9. udata/static/chunks/{13.f29411b06be1883356a3.js → 13.2d06442dd9a05d9777b5.js} +2 -2
  10. udata/static/chunks/{13.f29411b06be1883356a3.js.map → 13.2d06442dd9a05d9777b5.js.map} +1 -1
  11. udata/static/chunks/{17.3bd0340930d4a314ce9c.js → 17.e8e4caaad5cb0cc0bacc.js} +2 -2
  12. udata/static/chunks/{17.3bd0340930d4a314ce9c.js.map → 17.e8e4caaad5cb0cc0bacc.js.map} +1 -1
  13. udata/static/chunks/{19.8da42e8359d72afc2618.js → 19.f03a102365af4315f9db.js} +3 -3
  14. udata/static/chunks/{19.8da42e8359d72afc2618.js.map → 19.f03a102365af4315f9db.js.map} +1 -1
  15. udata/static/chunks/{8.54e44b102164ae5e7a67.js → 8.778091d55cd8ea39af6b.js} +2 -2
  16. udata/static/chunks/{8.54e44b102164ae5e7a67.js.map → 8.778091d55cd8ea39af6b.js.map} +1 -1
  17. udata/static/chunks/{9.07515e5187f475bce828.js → 9.033d7e190ca9e226a5d0.js} +3 -3
  18. udata/static/chunks/{9.07515e5187f475bce828.js.map → 9.033d7e190ca9e226a5d0.js.map} +1 -1
  19. udata/static/common.js +1 -1
  20. udata/static/common.js.map +1 -1
  21. {udata-10.8.3.dev37185.dist-info → udata-10.8.3.dev37212.dist-info}/METADATA +4 -1
  22. {udata-10.8.3.dev37185.dist-info → udata-10.8.3.dev37212.dist-info}/RECORD +26 -26
  23. {udata-10.8.3.dev37185.dist-info → udata-10.8.3.dev37212.dist-info}/LICENSE +0 -0
  24. {udata-10.8.3.dev37185.dist-info → udata-10.8.3.dev37212.dist-info}/WHEEL +0 -0
  25. {udata-10.8.3.dev37185.dist-info → udata-10.8.3.dev37212.dist-info}/entry_points.txt +0 -0
  26. {udata-10.8.3.dev37185.dist-info → udata-10.8.3.dev37212.dist-info}/top_level.txt +0 -0
@@ -188,6 +188,10 @@ class Organization(
188
188
  after_delete = Signal()
189
189
  on_delete = Signal()
190
190
 
191
+ def __init__(self, *args, **kwargs):
192
+ super().__init__(*args, **kwargs)
193
+ self.compute_aggregate_metrics = True
194
+
191
195
  @classmethod
192
196
  def pre_save(cls, sender, document, **kwargs):
193
197
  cls.before_save.send(document)
@@ -307,15 +311,16 @@ class Organization(
307
311
  from udata.models import Dataset, Follow, Reuse
308
312
 
309
313
  self.metrics["datasets"] = Dataset.objects(organization=self).visible().count()
310
- self.metrics["datasets_by_months"] = get_stock_metrics(
311
- Dataset.objects(organization=self).visible(), date_label="created_at_internal"
312
- )
313
- self.metrics["datasets_followers_by_months"] = get_stock_metrics(
314
- Follow.objects(following__in=Dataset.objects(organization=self)), date_label="since"
315
- )
316
- self.metrics["datasets_reuses_by_months"] = get_stock_metrics(
317
- Reuse.objects(datasets__in=Dataset.objects(organization=self)).visible()
318
- )
314
+ if self.compute_aggregate_metrics:
315
+ self.metrics["datasets_by_months"] = get_stock_metrics(
316
+ Dataset.objects(organization=self).visible(), date_label="created_at_internal"
317
+ )
318
+ self.metrics["datasets_followers_by_months"] = get_stock_metrics(
319
+ Follow.objects(following__in=Dataset.objects(organization=self)), date_label="since"
320
+ )
321
+ self.metrics["datasets_reuses_by_months"] = get_stock_metrics(
322
+ Reuse.objects(datasets__in=Dataset.objects(organization=self)).visible()
323
+ )
319
324
 
320
325
  self.save(signal_kwargs={"ignores": ["post_save"]})
321
326
 
@@ -1,11 +1,12 @@
1
1
  import logging
2
2
  from datetime import date
3
- from typing import Generator
3
+ from typing import ClassVar, Generator
4
4
 
5
5
  import lxml.etree as ET
6
6
  from flask import current_app
7
7
  from rdflib import Graph
8
8
  from rdflib.namespace import RDF
9
+ from typing_extensions import override
9
10
 
10
11
  from udata.core.dataservices.rdf import dataservice_from_rdf
11
12
  from udata.core.dataset.rdf import dataset_from_rdf
@@ -55,9 +56,6 @@ URIS_TO_REPLACE = {
55
56
  }
56
57
 
57
58
 
58
- SAFE_PARSER = ET.XMLParser(resolve_entities=False)
59
-
60
-
61
59
  def extract_graph(source, target, node, specs):
62
60
  for p, o in source.predicate_objects(node):
63
61
  target.add((node, p, o))
@@ -68,6 +66,10 @@ def extract_graph(source, target, node, specs):
68
66
  class DcatBackend(BaseBackend):
69
67
  display_name = "DCAT"
70
68
 
69
+ def __init__(self, *args, **kwargs):
70
+ super().__init__(*args, **kwargs)
71
+ self.organizations_to_update = set()
72
+
71
73
  def inner_harvest(self):
72
74
  fmt = self.get_format()
73
75
  self.job.data = {"format": fmt}
@@ -78,6 +80,10 @@ class DcatBackend(BaseBackend):
78
80
  self.process_one_datasets_page(page_number, page)
79
81
  pages.append((page_number, page))
80
82
 
83
+ for org in self.organizations_to_update:
84
+ org.compute_aggregate_metrics = True
85
+ org.count_datasets()
86
+
81
87
  # We do a second pass to have all datasets in memory and attach datasets
82
88
  # to dataservices. It could be better to be one pass of graph walking and
83
89
  # then one pass of attaching datasets to dataservices.
@@ -219,7 +225,11 @@ class DcatBackend(BaseBackend):
219
225
 
220
226
  dataset = self.get_dataset(item.remote_id)
221
227
  remote_url_prefix = self.get_extra_config_value("remote_url_prefix")
222
- return dataset_from_rdf(page, dataset, node=node, remote_url_prefix=remote_url_prefix)
228
+ dataset = dataset_from_rdf(page, dataset, node=node, remote_url_prefix=remote_url_prefix)
229
+ if dataset.organization:
230
+ dataset.organization.compute_aggregate_metrics = False
231
+ self.organizations_to_update.add(dataset.organization)
232
+ return dataset
223
233
 
224
234
  def inner_process_dataservice(self, item: HarvestItem, page_number: int, page: Graph, node):
225
235
  item.kwargs["page_number"] = page_number
@@ -240,104 +250,165 @@ class DcatBackend(BaseBackend):
240
250
  return node
241
251
  raise ValueError(f"Unable to find dataset with DCT.identifier:{item.remote_id}")
242
252
 
243
- def next_record_if_should_continue(self, start, search_results):
244
- next_record = int(search_results.attrib["nextRecord"])
245
- matched_count = int(search_results.attrib["numberOfRecordsMatched"])
246
- returned_count = int(search_results.attrib["numberOfRecordsReturned"])
247
253
 
248
- # Break conditions copied gratefully from
249
- # noqa https://github.com/geonetwork/core-geonetwork/blob/main/harvesters/src/main/java/org/fao/geonet/kernel/harvest/harvester/csw/Harvester.java#L338-L369
250
- break_conditions = (
251
- # standard CSW: A value of 0 means all records have been returned.
252
- next_record == 0,
253
- # Misbehaving CSW server returning a next record > matched count
254
- next_record > matched_count,
255
- # No results returned already
256
- returned_count == 0,
257
- # Current next record is lower than previous one
258
- next_record < start,
259
- # Enough items have been harvested already
260
- self.max_items and len(self.job.items) >= self.max_items,
261
- )
254
+ class CswDcatBackend(DcatBackend):
255
+ """
256
+ CSW harvester fetching records as DCAT.
257
+ The parsing of items is then the same as for the DcatBackend.
258
+ """
262
259
 
263
- if any(break_conditions):
264
- return None
265
- else:
266
- return next_record
260
+ display_name = "CSW-DCAT"
267
261
 
262
+ # CSW_REQUEST is based on:
263
+ # - Request syntax from spec [1] and example requests [1] [2].
264
+ # - Sort settings to ensure stable paging [3].
265
+ # - Filter settings to only retrieve record types currently mapped in udata.
266
+ #
267
+ # If you modify the request, make sure:
268
+ # - `typeNames` and `outputSchema` are consistent. You'll likely want to keep "gmd:MD_Metadata",
269
+ # since "csw:Record" contains less information.
270
+ # - `typeNames` and namespaces in `csw:Query` (`Filter`, `SortBy`, ...) are consistent, although
271
+ # they are ignored on some servers [4] [5].
272
+ # - It works on real catalogs! Not many servers implement the whole spec.
273
+ #
274
+ # References:
275
+ # [1] OpenGIS Catalogue Services Specification 2.0.2 – ISO Metadata Application Profile: Corrigendum
276
+ # https://portal.ogc.org/files/80534
277
+ # [2] GeoNetwork - CSW test requests
278
+ # https://github.com/geonetwork/core-geonetwork/tree/3.10.4/web/src/main/webapp/xml/csw/test
279
+ # [3] Udata - Support csw dcat harvest
280
+ # https://github.com/opendatateam/udata/pull/2800#discussion_r1129053500
281
+ # [4] GeoNetwork - GetRecords ignores namespaces for Filter/SortBy fields
282
+ # https://github.com/geonetwork/core-geonetwork/blob/3.10.4/csw-server/src/main/java/org/fao/geonet/kernel/csw/services/getrecords/FieldMapper.java#L92
283
+ # [5] GeoNetwork - GetRecords ignores `typeNames`
284
+ # https://github.com/geonetwork/core-geonetwork/blob/3.10.4/csw-server/src/main/java/org/fao/geonet/kernel/csw/services/getrecords/CatalogSearcher.java#L194
285
+ CSW_REQUEST: ClassVar[str] = """
286
+ <csw:GetRecords xmlns:apiso="http://www.opengis.net/cat/csw/apiso/1.0"
287
+ xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
288
+ xmlns:ogc="http://www.opengis.net/ogc"
289
+ service="CSW" version="2.0.2" outputFormat="application/xml"
290
+ resultType="results" startPosition="{start}" maxRecords="25"
291
+ outputSchema="{output_schema}">
292
+ <csw:Query typeNames="gmd:MD_Metadata">
293
+ <csw:ElementSetName>full</csw:ElementSetName>
294
+ <csw:Constraint version="1.1.0">
295
+ <ogc:Filter>
296
+ <ogc:Or>
297
+ <ogc:PropertyIsEqualTo>
298
+ <ogc:PropertyName>apiso:type</ogc:PropertyName>
299
+ <ogc:Literal>dataset</ogc:Literal>
300
+ </ogc:PropertyIsEqualTo>
301
+ <ogc:PropertyIsEqualTo>
302
+ <ogc:PropertyName>apiso:type</ogc:PropertyName>
303
+ <ogc:Literal>nonGeographicDataset</ogc:Literal>
304
+ </ogc:PropertyIsEqualTo>
305
+ <ogc:PropertyIsEqualTo>
306
+ <ogc:PropertyName>apiso:type</ogc:PropertyName>
307
+ <ogc:Literal>series</ogc:Literal>
308
+ </ogc:PropertyIsEqualTo>
309
+ <ogc:PropertyIsEqualTo>
310
+ <ogc:PropertyName>apiso:type</ogc:PropertyName>
311
+ <ogc:Literal>service</ogc:Literal>
312
+ </ogc:PropertyIsEqualTo>
313
+ </ogc:Or>
314
+ </ogc:Filter>
315
+ </csw:Constraint>
316
+ <ogc:SortBy>
317
+ <ogc:SortProperty>
318
+ <ogc:PropertyName>apiso:identifier</ogc:PropertyName>
319
+ <ogc:SortOrder>ASC</ogc:SortOrder>
320
+ </ogc:SortProperty>
321
+ </ogc:SortBy>
322
+ </csw:Query>
323
+ </csw:GetRecords>
324
+ """
268
325
 
269
- class CswDcatBackend(DcatBackend):
270
- display_name = "CSW-DCAT"
326
+ CSW_OUTPUT_SCHEMA = "http://www.w3.org/ns/dcat#"
271
327
 
272
- DCAT_SCHEMA = "http://www.w3.org/ns/dcat#"
328
+ def __init__(self, *args, **kwargs):
329
+ super().__init__(*args, **kwargs)
330
+ self.xml_parser = ET.XMLParser(resolve_entities=False)
273
331
 
274
332
  def walk_graph(self, url: str, fmt: str) -> Generator[tuple[int, Graph], None, None]:
275
333
  """
276
334
  Yield all RDF pages as `Graph` from the source
277
335
  """
278
- body = """<csw:GetRecords xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
279
- xmlns:gmd="http://www.isotc211.org/2005/gmd"
280
- service="CSW" version="2.0.2" resultType="results"
281
- startPosition="{start}" maxPosition="200"
282
- outputSchema="{schema}">
283
- <csw:Query typeNames="gmd:MD_Metadata">
284
- <csw:ElementSetName>full</csw:ElementSetName>
285
- <ogc:SortBy xmlns:ogc="http://www.opengis.net/ogc">
286
- <ogc:SortProperty>
287
- <ogc:PropertyName>identifier</ogc:PropertyName>
288
- <ogc:SortOrder>ASC</ogc:SortOrder>
289
- </ogc:SortProperty>
290
- </ogc:SortBy>
291
- </csw:Query>
292
- </csw:GetRecords>"""
293
- headers = {"Content-Type": "application/xml"}
294
-
295
336
  page_number = 0
296
337
  start = 1
297
338
 
298
- response = self.post(
299
- url, data=body.format(start=start, schema=self.DCAT_SCHEMA), headers=headers
300
- )
301
- response.raise_for_status()
302
- content = response.content
303
- tree = ET.fromstring(content, parser=SAFE_PARSER)
304
- if tree.tag == "{" + OWS_NAMESPACE + "}ExceptionReport":
305
- raise ValueError(f"Failed to query CSW:\n{content}")
306
- while tree is not None:
339
+ while True:
340
+ data = self.CSW_REQUEST.format(output_schema=self.CSW_OUTPUT_SCHEMA, start=start)
341
+ response = self.post(url, data=data, headers={"Content-Type": "application/xml"})
342
+ response.raise_for_status()
343
+
344
+ content = response.content
345
+ tree = ET.fromstring(content, parser=self.xml_parser)
346
+ if tree.tag == "{" + OWS_NAMESPACE + "}ExceptionReport":
347
+ raise ValueError(f"Failed to query CSW:\n{content}")
348
+
307
349
  search_results = tree.find("csw:SearchResults", {"csw": CSW_NAMESPACE})
308
- if search_results is None:
350
+ if not search_results:
309
351
  log.error(f"No search results found for {url} on page {page_number}")
310
- break
311
- for child in search_results:
352
+ return
353
+
354
+ for result in search_results:
312
355
  subgraph = Graph(namespace_manager=namespace_manager)
313
- subgraph.parse(data=ET.tostring(child), format=fmt)
356
+ doc = ET.tostring(self.as_dcat(result))
357
+ subgraph.parse(data=doc, format=fmt)
358
+
359
+ if not subgraph.subjects(
360
+ RDF.type, [DCAT.Dataset, DCAT.DatasetSeries, DCAT.DataService]
361
+ ):
362
+ raise ValueError("Failed to fetch CSW content")
314
363
 
315
364
  yield page_number, subgraph
365
+
316
366
  if self.has_reached_max_items():
317
367
  return
318
368
 
319
- next_record = self.next_record_if_should_continue(start, search_results)
320
- if not next_record:
321
- break
322
-
323
- start = next_record
324
369
  page_number += 1
370
+ start = self.next_position(start, search_results)
371
+ if not start:
372
+ return
325
373
 
326
- tree = ET.fromstring(
327
- self.post(
328
- url, data=body.format(start=start, schema=self.DCAT_SCHEMA), headers=headers
329
- ).content,
330
- parser=SAFE_PARSER,
331
- )
374
+ def as_dcat(self, tree: ET._Element) -> ET._Element:
375
+ """
376
+ Return the input tree as a DCAT tree.
377
+ For CswDcatBackend, this method return the incoming tree as-is, since it's already DCAT.
378
+ For subclasses of CswDcatBackend, this method should convert the incoming tree to DCAT.
379
+ """
380
+ return tree
332
381
 
382
+ def next_position(self, start: int, search_results: ET._Element) -> int | None:
383
+ next_record = int(search_results.attrib["nextRecord"])
384
+ matched_count = int(search_results.attrib["numberOfRecordsMatched"])
385
+ returned_count = int(search_results.attrib["numberOfRecordsReturned"])
333
386
 
334
- class CswIso19139DcatBackend(DcatBackend):
387
+ # Break conditions copied gratefully from
388
+ # noqa https://github.com/geonetwork/core-geonetwork/blob/main/harvesters/src/main/java/org/fao/geonet/kernel/harvest/harvester/csw/Harvester.java#L338-L369
389
+ should_break = (
390
+ # A value of 0 means all records have been returned (standard CSW)
391
+ (next_record == 0)
392
+ # Misbehaving CSW server returning a next record > matched count
393
+ or (next_record > matched_count)
394
+ # No results returned already
395
+ or (returned_count == 0)
396
+ # Current next record is lower than previous one
397
+ or (next_record < start)
398
+ # Enough items have been harvested already
399
+ or self.has_reached_max_items()
400
+ )
401
+ return None if should_break else next_record
402
+
403
+
404
+ class CswIso19139DcatBackend(CswDcatBackend):
335
405
  """
336
- An harvester that takes CSW ISO 19139 as input and transforms it to DCAT using SEMIC GeoDCAT-AP XSLT.
406
+ CSW harvester fetching records as ISO-19139 and using XSLT to convert them to DCAT.
337
407
  The parsing of items is then the same as for the DcatBackend.
338
408
  """
339
409
 
340
410
  display_name = "CSW-ISO-19139"
411
+
341
412
  extra_configs = (
342
413
  HarvestExtraConfig(
343
414
  _("Remote URL prefix"),
@@ -347,94 +418,14 @@ class CswIso19139DcatBackend(DcatBackend):
347
418
  ),
348
419
  )
349
420
 
350
- ISO_SCHEMA = "http://www.isotc211.org/2005/gmd"
351
-
352
- def walk_graph(self, url: str, fmt: str) -> Generator[tuple[int, Graph], None, None]:
353
- """
354
- Yield all RDF pages as `Graph` from the source
355
-
356
- Parse CSW graph querying ISO schema.
357
- Use SEMIC GeoDCAT-AP XSLT to map it to a correct version.
358
- See https://github.com/SEMICeu/iso-19139-to-dcat-ap for more information on the XSLT.
359
- """
360
- # Load XSLT
361
- xsl_url = current_app.config["HARVEST_ISO19139_XSL_URL"]
362
- xsl = ET.fromstring(self.get(xsl_url).content, parser=SAFE_PARSER)
363
- transform = ET.XSLT(xsl)
364
-
365
- # Start querying and parsing graph
366
- # Filter on dataset or serie records
367
- body = """<csw:GetRecords xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
368
- xmlns:gmd="http://www.isotc211.org/2005/gmd"
369
- service="CSW" version="2.0.2" resultType="results"
370
- startPosition="{start}" maxPosition="10"
371
- outputSchema="{schema}">
372
- <csw:Query typeNames="csw:Record">
373
- <csw:ElementSetName>full</csw:ElementSetName>
374
- <csw:Constraint version="1.1.0">
375
- <ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
376
- <ogc:Or xmlns:ogc="http://www.opengis.net/ogc">
377
- <ogc:PropertyIsEqualTo>
378
- <ogc:PropertyName>dc:type</ogc:PropertyName>
379
- <ogc:Literal>dataset</ogc:Literal>
380
- </ogc:PropertyIsEqualTo>
381
- <ogc:PropertyIsEqualTo>
382
- <ogc:PropertyName>dc:type</ogc:PropertyName>
383
- <ogc:Literal>service</ogc:Literal>
384
- </ogc:PropertyIsEqualTo>
385
- <ogc:PropertyIsEqualTo>
386
- <ogc:PropertyName>dc:type</ogc:PropertyName>
387
- <ogc:Literal>series</ogc:Literal>
388
- </ogc:PropertyIsEqualTo>
389
- </ogc:Or>
390
- </ogc:Filter>
391
- </csw:Constraint>
392
- </csw:Query>
393
- </csw:GetRecords>"""
394
- headers = {"Content-Type": "application/xml"}
421
+ CSW_OUTPUT_SCHEMA = "http://www.isotc211.org/2005/gmd"
395
422
 
396
- page_number = 0
397
- start = 1
398
-
399
- response = self.post(
400
- url, data=body.format(start=start, schema=self.ISO_SCHEMA), headers=headers
401
- )
402
- response.raise_for_status()
403
-
404
- tree_before_transform = ET.fromstring(response.content, parser=SAFE_PARSER)
405
- # Disabling CoupledResourceLookUp to prevent failure on xlink:href
406
- # https://github.com/SEMICeu/iso-19139-to-dcat-ap/blob/master/documentation/HowTo.md#parameter-coupledresourcelookup
407
- tree = transform(tree_before_transform, CoupledResourceLookUp="'disabled'")
408
-
409
- while tree:
410
- # We query the tree before the transformation because the XSLT remove the search results
411
- # infos (useful for pagination)
412
- search_results = tree_before_transform.find("csw:SearchResults", {"csw": CSW_NAMESPACE})
413
- if search_results is None:
414
- log.error(f"No search results found for {url} on page {page_number}")
415
- break
416
-
417
- subgraph = Graph(namespace_manager=namespace_manager)
418
- subgraph.parse(ET.tostring(tree), format=fmt)
419
-
420
- if not subgraph.subjects(RDF.type, DCAT.Dataset):
421
- raise ValueError("Failed to fetch CSW content")
422
-
423
- yield page_number, subgraph
424
- if self.has_reached_max_items():
425
- return
426
-
427
- next_record = self.next_record_if_should_continue(start, search_results)
428
- if not next_record:
429
- break
430
-
431
- start = next_record
432
- page_number += 1
433
-
434
- response = self.post(
435
- url, data=body.format(start=start, schema=self.ISO_SCHEMA), headers=headers
436
- )
437
- response.raise_for_status()
423
+ def __init__(self, *args, **kwargs):
424
+ super().__init__(*args, **kwargs)
425
+ xslt_url = current_app.config["HARVEST_ISO19139_XSLT_URL"]
426
+ xslt = ET.fromstring(self.get(xslt_url).content, parser=self.xml_parser)
427
+ self.transform = ET.XSLT(xslt)
438
428
 
439
- tree_before_transform = ET.fromstring(response.content, parser=SAFE_PARSER)
440
- tree = transform(tree_before_transform, CoupledResourceLookUp="'disabled'")
429
+ @override
430
+ def as_dcat(self, tree: ET._Element) -> ET._Element:
431
+ return self.transform(tree, CoupledResourceLookUp="'disabled'")
@@ -899,7 +899,7 @@ class CswIso19139DcatBackendTest:
899
899
  with open(os.path.join(CSW_DCAT_FILES_DIR, "XSLT.xml"), "r") as f:
900
900
  xslt = f.read()
901
901
  url = mock_csw_pagination(rmock, "geonetwork/srv/eng/csw.rdf", "geonetwork-iso-page-{}.xml")
902
- rmock.get(current_app.config.get("HARVEST_ISO19139_XSL_URL"), text=xslt)
902
+ rmock.get(current_app.config.get("HARVEST_ISO19139_XSLT_URL"), text=xslt)
903
903
  org = OrganizationFactory()
904
904
  source = HarvestSourceFactory(
905
905
  backend="csw-iso-19139",
udata/settings.py CHANGED
@@ -283,7 +283,7 @@ class Defaults(object):
283
283
  HARVEST_GRAPHS_S3_BUCKET = None # If the catalog is bigger than `HARVEST_MAX_CATALOG_SIZE_IN_MONGO` store the graph inside S3 instead of MongoDB
284
284
  HARVEST_GRAPHS_S3_FILENAME_PREFIX = "" # Useful to store the graphs inside a subfolder of the bucket. For example by setting `HARVEST_GRAPHS_S3_FILENAME_PREFIX = 'graphs/'`
285
285
 
286
- HARVEST_ISO19139_XSL_URL = "https://raw.githubusercontent.com/SEMICeu/iso-19139-to-dcat-ap/refs/heads/geodcat-ap-2.0.0/iso-19139-to-dcat-ap.xsl"
286
+ HARVEST_ISO19139_XSLT_URL = "https://raw.githubusercontent.com/SEMICeu/iso-19139-to-dcat-ap/refs/heads/geodcat-ap-2.0.0/iso-19139-to-dcat-ap.xsl"
287
287
 
288
288
  # S3 connection details
289
289
  S3_URL = None