invenio-vocabularies 9.0.0__py2.py3-none-any.whl → 9.1.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of invenio-vocabularies might be problematic. Click here for more details.
- invenio_vocabularies/__init__.py +1 -1
- invenio_vocabularies/contrib/names/datastreams.py +11 -4
- invenio_vocabularies/contrib/names/schema.py +5 -3
- invenio_vocabularies/datastreams/datastreams.py +52 -4
- invenio_vocabularies/datastreams/tasks.py +82 -4
- invenio_vocabularies/datastreams/writers.py +26 -9
- invenio_vocabularies/jobs.py +28 -0
- invenio_vocabularies/services/tasks.py +3 -1
- {invenio_vocabularies-9.0.0.dist-info → invenio_vocabularies-9.1.0.dist-info}/METADATA +6 -2
- {invenio_vocabularies-9.0.0.dist-info → invenio_vocabularies-9.1.0.dist-info}/RECORD +15 -15
- {invenio_vocabularies-9.0.0.dist-info → invenio_vocabularies-9.1.0.dist-info}/WHEEL +0 -0
- {invenio_vocabularies-9.0.0.dist-info → invenio_vocabularies-9.1.0.dist-info}/entry_points.txt +0 -0
- {invenio_vocabularies-9.0.0.dist-info → invenio_vocabularies-9.1.0.dist-info}/licenses/AUTHORS.rst +0 -0
- {invenio_vocabularies-9.0.0.dist-info → invenio_vocabularies-9.1.0.dist-info}/licenses/LICENSE +0 -0
- {invenio_vocabularies-9.0.0.dist-info → invenio_vocabularies-9.1.0.dist-info}/top_level.txt +0 -0
invenio_vocabularies/__init__.py
CHANGED
|
@@ -69,12 +69,11 @@ class OrcidDataSyncReader(BaseReader):
|
|
|
69
69
|
date_format_no_millis = "%Y-%m-%d %H:%M:%S"
|
|
70
70
|
|
|
71
71
|
if self.since:
|
|
72
|
-
last_sync = datetime.
|
|
72
|
+
last_sync = datetime.strptime(self.since, date_format)
|
|
73
73
|
else:
|
|
74
74
|
last_sync = datetime.now() - timedelta(
|
|
75
75
|
**current_app.config["VOCABULARIES_ORCID_SYNC_SINCE"]
|
|
76
76
|
)
|
|
77
|
-
|
|
78
77
|
try:
|
|
79
78
|
content = io.TextIOWrapper(fileobj, encoding="utf-8")
|
|
80
79
|
csv_reader = csv.DictReader(content)
|
|
@@ -95,7 +94,10 @@ class OrcidDataSyncReader(BaseReader):
|
|
|
95
94
|
|
|
96
95
|
if last_modified_date < last_sync:
|
|
97
96
|
current_app.logger.debug(
|
|
98
|
-
f"Skipping ORCiD {orcid}
|
|
97
|
+
f"Skipping ORCiD {orcid}: last modified {last_modified_date} is older than cutoff {last_sync}"
|
|
98
|
+
)
|
|
99
|
+
current_app.logger.info(
|
|
100
|
+
"Reached cutoff date. No more recent records to process."
|
|
99
101
|
)
|
|
100
102
|
break
|
|
101
103
|
current_app.logger.debug(f"Yielding ORCiD {orcid} for sync.")
|
|
@@ -285,7 +287,12 @@ class OrcidTransformer(BaseTransformer):
|
|
|
285
287
|
f"Family name not found in ORCiD entry for ORCiD ID: {orcid_id}."
|
|
286
288
|
)
|
|
287
289
|
)
|
|
288
|
-
|
|
290
|
+
full_name = " ".join(
|
|
291
|
+
p.strip()
|
|
292
|
+
for p in (given_names, family_name)
|
|
293
|
+
if isinstance(p, str) and p.strip()
|
|
294
|
+
)
|
|
295
|
+
if not self._is_valid_name(full_name):
|
|
289
296
|
errors.append(
|
|
290
297
|
TransformerError(
|
|
291
298
|
f"Invalid characters in name for ORCiD ID: {orcid_id}."
|
|
@@ -80,18 +80,20 @@ class NameSchema(BaseVocabularySchema, ModePIDFieldVocabularyMixin):
|
|
|
80
80
|
|
|
81
81
|
@validates_schema
|
|
82
82
|
def validate_affiliations(self, data, **kwargs):
|
|
83
|
-
"""Validate
|
|
83
|
+
"""Validate and return distinct affiliations."""
|
|
84
84
|
affiliations = data.get("affiliations", [])
|
|
85
85
|
seen_names = set()
|
|
86
86
|
for affiliation in affiliations:
|
|
87
87
|
name = affiliation.get("name")
|
|
88
88
|
if not affiliation.get("id") and name:
|
|
89
89
|
if name in seen_names:
|
|
90
|
-
|
|
91
|
-
raise ValidationError({"affiliations": messages})
|
|
90
|
+
affiliations.remove(affiliation)
|
|
92
91
|
else:
|
|
93
92
|
seen_names.add(name)
|
|
94
93
|
|
|
94
|
+
if affiliations:
|
|
95
|
+
data["affiliations"] = affiliations
|
|
96
|
+
|
|
95
97
|
@post_load
|
|
96
98
|
def update_name(self, data, **kwargs):
|
|
97
99
|
"""Update names for person.
|
|
@@ -9,6 +9,11 @@
|
|
|
9
9
|
"""Base data stream."""
|
|
10
10
|
|
|
11
11
|
from flask import current_app
|
|
12
|
+
from invenio_access.permissions import system_identity, system_user_id
|
|
13
|
+
from invenio_access.utils import get_identity
|
|
14
|
+
from invenio_accounts.proxies import current_datastore
|
|
15
|
+
from invenio_jobs.logging.jobs import EMPTY_JOB_CTX, job_context
|
|
16
|
+
from invenio_jobs.proxies import current_runs_service
|
|
12
17
|
|
|
13
18
|
from .errors import ReaderError, TransformerError, WriterError
|
|
14
19
|
|
|
@@ -78,6 +83,14 @@ class DataStream:
|
|
|
78
83
|
def process_batch(self, batch):
|
|
79
84
|
"""Process a batch of entries."""
|
|
80
85
|
current_app.logger.info(f"Processing batch of size: {len(batch)}")
|
|
86
|
+
if job_context.get() is not EMPTY_JOB_CTX:
|
|
87
|
+
run_id = job_context.get()["run_id"]
|
|
88
|
+
current_runs_service.add_total_entries(
|
|
89
|
+
system_identity,
|
|
90
|
+
run_id=run_id,
|
|
91
|
+
job_id=job_context.get()["job_id"],
|
|
92
|
+
total_entries=len(batch),
|
|
93
|
+
)
|
|
81
94
|
transformed_entries = []
|
|
82
95
|
transformed_entries_with_errors = []
|
|
83
96
|
for stream_entry in batch:
|
|
@@ -168,12 +181,36 @@ class DataStream:
|
|
|
168
181
|
|
|
169
182
|
return stream_entry
|
|
170
183
|
|
|
184
|
+
def _prepare_async_context(self):
|
|
185
|
+
"""Prepare the async context for writers."""
|
|
186
|
+
job_ctx = job_context.get()
|
|
187
|
+
run_id = job_ctx.get("run_id")
|
|
188
|
+
identity_id = job_ctx.get("identity_id")
|
|
189
|
+
job_id = job_ctx.get("job_id")
|
|
190
|
+
# System user needs to be handled separately because it doesn't exist in the database
|
|
191
|
+
if identity_id == system_user_id:
|
|
192
|
+
identity = system_identity
|
|
193
|
+
else:
|
|
194
|
+
user = current_datastore.get_user(identity_id)
|
|
195
|
+
if user is None:
|
|
196
|
+
raise ValueError(f"User with ID:{identity_id} not found.")
|
|
197
|
+
identity = get_identity(user)
|
|
198
|
+
|
|
199
|
+
subtask_run = current_runs_service.create_subtask_run(
|
|
200
|
+
identity, parent_run_id=run_id, job_id=job_id
|
|
201
|
+
)
|
|
202
|
+
return str(subtask_run.id)
|
|
203
|
+
|
|
171
204
|
def write(self, stream_entry, *args, **kwargs):
|
|
172
|
-
"""
|
|
205
|
+
"""Write a single stream entry."""
|
|
173
206
|
current_app.logger.debug(f"Writing entry: {stream_entry.entry}")
|
|
174
207
|
for writer in self._writers:
|
|
175
208
|
try:
|
|
176
|
-
writer.
|
|
209
|
+
if writer.is_async and job_context.get() is not EMPTY_JOB_CTX:
|
|
210
|
+
subtask_run_id = self._prepare_async_context()
|
|
211
|
+
writer.write(stream_entry, subtask_run_id=subtask_run_id)
|
|
212
|
+
else:
|
|
213
|
+
writer.write(stream_entry)
|
|
177
214
|
except WriterError as err:
|
|
178
215
|
current_app.logger.error(f"Writer error: {str(err)}")
|
|
179
216
|
stream_entry.errors.append(f"{writer.__class__.__name__}: {str(err)}")
|
|
@@ -181,10 +218,21 @@ class DataStream:
|
|
|
181
218
|
return stream_entry
|
|
182
219
|
|
|
183
220
|
def batch_write(self, stream_entries, *args, **kwargs):
|
|
184
|
-
"""
|
|
221
|
+
"""Write a batch of stream entries."""
|
|
185
222
|
current_app.logger.debug(f"Batch writing entries: {len(stream_entries)}")
|
|
186
223
|
for writer in self._writers:
|
|
187
|
-
|
|
224
|
+
try:
|
|
225
|
+
if writer.is_async and job_context.get() is not EMPTY_JOB_CTX:
|
|
226
|
+
subtask_run_id = self._prepare_async_context()
|
|
227
|
+
yield from writer.write_many(
|
|
228
|
+
stream_entries, subtask_run_id=subtask_run_id
|
|
229
|
+
)
|
|
230
|
+
else:
|
|
231
|
+
yield from writer.write_many(stream_entries)
|
|
232
|
+
except WriterError as err:
|
|
233
|
+
current_app.logger.error(f"Writer error: {str(err)}")
|
|
234
|
+
for entry in stream_entries:
|
|
235
|
+
entry.errors.append(f"{writer.__class__.__name__}: {str(err)}")
|
|
188
236
|
|
|
189
237
|
def total(self, *args, **kwargs):
|
|
190
238
|
"""The total of entries obtained from the origin."""
|
|
@@ -9,29 +9,107 @@
|
|
|
9
9
|
"""Data Streams Celery tasks."""
|
|
10
10
|
|
|
11
11
|
from celery import shared_task
|
|
12
|
+
from flask import current_app
|
|
13
|
+
from invenio_access.permissions import system_identity
|
|
14
|
+
from invenio_jobs.logging.jobs import EMPTY_JOB_CTX, job_context
|
|
15
|
+
from invenio_jobs.proxies import current_runs_service
|
|
12
16
|
|
|
13
17
|
from ..datastreams import StreamEntry
|
|
14
18
|
from ..datastreams.factories import WriterFactory
|
|
15
19
|
|
|
16
20
|
|
|
17
21
|
@shared_task(ignore_result=True)
|
|
18
|
-
def write_entry(writer_config, entry):
|
|
22
|
+
def write_entry(writer_config, entry, subtask_run_id=None):
|
|
19
23
|
"""Write an entry.
|
|
20
24
|
|
|
21
25
|
:param writer: writer configuration as accepted by the WriterFactory.
|
|
22
26
|
:param entry: dictionary, StreamEntry is not serializable.
|
|
23
27
|
"""
|
|
28
|
+
job_ctx = job_context.get()
|
|
29
|
+
job_id = job_ctx.get("job_id", None) if job_ctx is not EMPTY_JOB_CTX else None
|
|
30
|
+
if subtask_run_id and job_id:
|
|
31
|
+
subtask_run = current_runs_service.get(
|
|
32
|
+
system_identity, job_id=job_id, run_id=subtask_run_id
|
|
33
|
+
)
|
|
34
|
+
current_runs_service.start_processing_subtask(
|
|
35
|
+
system_identity, subtask_run.id, job_id=job_id
|
|
36
|
+
)
|
|
37
|
+
|
|
24
38
|
writer = WriterFactory.create(config=writer_config)
|
|
25
|
-
|
|
39
|
+
try:
|
|
40
|
+
processed_stream_entry = writer.write(StreamEntry(entry))
|
|
41
|
+
errored_entries_count = 1 if processed_stream_entry.errors else 0
|
|
42
|
+
inserted_count = 1 if processed_stream_entry.op_type == "create" else 0
|
|
43
|
+
updated_count = 1 if processed_stream_entry.op_type == "update" else 0
|
|
44
|
+
if subtask_run_id and job_id:
|
|
45
|
+
current_runs_service.finalize_subtask(
|
|
46
|
+
system_identity,
|
|
47
|
+
subtask_run_id,
|
|
48
|
+
job_id,
|
|
49
|
+
success=True if not processed_stream_entry.errors else False,
|
|
50
|
+
errored_entries_count=errored_entries_count,
|
|
51
|
+
inserted_entries_count=inserted_count,
|
|
52
|
+
updated_entries_count=updated_count,
|
|
53
|
+
)
|
|
54
|
+
except Exception as exc:
|
|
55
|
+
current_app.logger.error(f"Error writing entry {entry}: {exc}")
|
|
56
|
+
if subtask_run_id and job_id:
|
|
57
|
+
current_runs_service.finalize_subtask(
|
|
58
|
+
system_identity,
|
|
59
|
+
subtask_run_id,
|
|
60
|
+
job_id,
|
|
61
|
+
success=False,
|
|
62
|
+
errored_entries_count=1,
|
|
63
|
+
)
|
|
26
64
|
|
|
27
65
|
|
|
28
66
|
@shared_task(ignore_result=True)
|
|
29
|
-
def write_many_entry(writer_config, entries):
|
|
67
|
+
def write_many_entry(writer_config, entries, subtask_run_id=None):
|
|
30
68
|
"""Write many entries.
|
|
31
69
|
|
|
32
70
|
:param writer: writer configuration as accepted by the WriterFactory.
|
|
33
71
|
:param entry: lisf ot dictionaries, StreamEntry is not serializable.
|
|
34
72
|
"""
|
|
73
|
+
job_ctx = job_context.get()
|
|
74
|
+
job_id = job_ctx.get("job_id", None) if job_ctx is not EMPTY_JOB_CTX else None
|
|
75
|
+
if subtask_run_id and job_id:
|
|
76
|
+
subtask_run = current_runs_service.get(
|
|
77
|
+
system_identity, job_id=job_id, run_id=subtask_run_id
|
|
78
|
+
)
|
|
79
|
+
current_runs_service.start_processing_subtask(
|
|
80
|
+
system_identity, subtask_run.id, job_id=job_id
|
|
81
|
+
)
|
|
35
82
|
writer = WriterFactory.create(config=writer_config)
|
|
36
83
|
stream_entries = [StreamEntry(entry) for entry in entries]
|
|
37
|
-
|
|
84
|
+
try:
|
|
85
|
+
processed_stream_entries = writer.write_many(stream_entries)
|
|
86
|
+
errored_entries_count = sum(
|
|
87
|
+
1 for entry in processed_stream_entries if entry.errors
|
|
88
|
+
)
|
|
89
|
+
inserted_count = sum(
|
|
90
|
+
1 for entry in processed_stream_entries if entry.op_type == "create"
|
|
91
|
+
)
|
|
92
|
+
updated_count = sum(
|
|
93
|
+
1 for entry in processed_stream_entries if entry.op_type == "update"
|
|
94
|
+
)
|
|
95
|
+
if subtask_run_id and job_id:
|
|
96
|
+
current_runs_service.finalize_subtask(
|
|
97
|
+
system_identity,
|
|
98
|
+
subtask_run_id,
|
|
99
|
+
job_id,
|
|
100
|
+
success=True,
|
|
101
|
+
errored_entries_count=errored_entries_count,
|
|
102
|
+
inserted_entries_count=inserted_count,
|
|
103
|
+
updated_entries_count=updated_count,
|
|
104
|
+
)
|
|
105
|
+
except Exception as exc:
|
|
106
|
+
current_app.logger.error(
|
|
107
|
+
f"Error writing entries {entries}: {exc}. The errorred entries count might be incorrect as an entire batch might have failed"
|
|
108
|
+
)
|
|
109
|
+
if subtask_run_id and job_id:
|
|
110
|
+
current_runs_service.finalize_subtask(
|
|
111
|
+
system_identity,
|
|
112
|
+
subtask_run_id,
|
|
113
|
+
job_id,
|
|
114
|
+
success=False,
|
|
115
|
+
)
|
|
@@ -28,6 +28,8 @@ from .tasks import write_entry, write_many_entry
|
|
|
28
28
|
class BaseWriter(ABC):
|
|
29
29
|
"""Base writer."""
|
|
30
30
|
|
|
31
|
+
is_async = False
|
|
32
|
+
|
|
31
33
|
def __init__(self, *args, **kwargs):
|
|
32
34
|
"""Base initialization logic."""
|
|
33
35
|
# Add any base initialization here if needed
|
|
@@ -91,7 +93,9 @@ class ServiceWriter(BaseWriter):
|
|
|
91
93
|
current = self._resolve(vocab_id)
|
|
92
94
|
updated = dict(current.to_dict(), **entry)
|
|
93
95
|
current_app.logger.debug(f"Updating entry with ID: {vocab_id}")
|
|
94
|
-
return StreamEntry(
|
|
96
|
+
return StreamEntry(
|
|
97
|
+
self._service.update(self._identity, vocab_id, updated), op_type="update"
|
|
98
|
+
)
|
|
95
99
|
|
|
96
100
|
def write(self, stream_entry, *args, **kwargs):
|
|
97
101
|
"""Writes the input entry using a given service."""
|
|
@@ -102,7 +106,9 @@ class ServiceWriter(BaseWriter):
|
|
|
102
106
|
if self._insert:
|
|
103
107
|
try:
|
|
104
108
|
current_app.logger.debug("Inserting entry.")
|
|
105
|
-
return StreamEntry(
|
|
109
|
+
return StreamEntry(
|
|
110
|
+
self._service.create(self._identity, entry), op_type="create"
|
|
111
|
+
)
|
|
106
112
|
except PIDAlreadyExists:
|
|
107
113
|
if not self._update:
|
|
108
114
|
raise WriterError([f"Vocabulary entry already exists: {entry}"])
|
|
@@ -182,6 +188,8 @@ class YamlWriter(BaseWriter):
|
|
|
182
188
|
class AsyncWriter(BaseWriter):
|
|
183
189
|
"""Writes the entries asynchronously (celery task)."""
|
|
184
190
|
|
|
191
|
+
is_async = True
|
|
192
|
+
|
|
185
193
|
def __init__(self, writer, *args, **kwargs):
|
|
186
194
|
"""Constructor.
|
|
187
195
|
|
|
@@ -190,16 +198,25 @@ class AsyncWriter(BaseWriter):
|
|
|
190
198
|
super().__init__(*args, **kwargs)
|
|
191
199
|
self._writer = writer
|
|
192
200
|
|
|
193
|
-
def write(self, stream_entry, *args, **kwargs):
|
|
194
|
-
"""Launches a celery task to write an entry."""
|
|
195
|
-
|
|
201
|
+
def write(self, stream_entry, subtask_run_id=None, *args, **kwargs):
|
|
202
|
+
"""Launches a celery task to write an entry with a delay."""
|
|
203
|
+
# Add some delay to avoid processing the tasks too fast
|
|
204
|
+
write_entry.apply_async(
|
|
205
|
+
args=(self._writer, stream_entry.entry, subtask_run_id), countdown=1
|
|
206
|
+
)
|
|
196
207
|
|
|
197
208
|
return stream_entry
|
|
198
209
|
|
|
199
|
-
def write_many(self, stream_entries, *args, **kwargs):
|
|
200
|
-
"""Launches a celery task to write
|
|
201
|
-
|
|
202
|
-
|
|
210
|
+
def write_many(self, stream_entries, subtask_run_id=None, *args, **kwargs):
|
|
211
|
+
"""Launches a celery task to write entries with a delay."""
|
|
212
|
+
# Add some delay to avoid processing the tasks too fast
|
|
213
|
+
write_many_entry.apply_async(
|
|
214
|
+
args=(
|
|
215
|
+
self._writer,
|
|
216
|
+
[stream_entry.entry for stream_entry in stream_entries],
|
|
217
|
+
subtask_run_id,
|
|
218
|
+
),
|
|
219
|
+
countdown=1,
|
|
203
220
|
)
|
|
204
221
|
|
|
205
222
|
return stream_entries
|
invenio_vocabularies/jobs.py
CHANGED
|
@@ -8,6 +8,9 @@
|
|
|
8
8
|
|
|
9
9
|
"""Jobs module."""
|
|
10
10
|
|
|
11
|
+
from datetime import datetime, timedelta
|
|
12
|
+
|
|
13
|
+
from flask import current_app
|
|
11
14
|
from invenio_i18n import lazy_gettext as _
|
|
12
15
|
from invenio_jobs.jobs import JobType
|
|
13
16
|
|
|
@@ -177,3 +180,28 @@ class ImportORCIDJob(ProcessDataStreamJob):
|
|
|
177
180
|
# It is the responsibility of the reader to handle it or ignore it
|
|
178
181
|
reader["args"] = {**reader.get("args", {}), "since": str(since)}
|
|
179
182
|
return task_args
|
|
183
|
+
|
|
184
|
+
@classmethod
|
|
185
|
+
def _build_task_arguments(cls, job_obj, since=None, custom_args=None, **kwargs):
|
|
186
|
+
"""Build dict of arguments injected on task execution.
|
|
187
|
+
|
|
188
|
+
:param job_obj (Job): the Job object.
|
|
189
|
+
:param since (datetime): last time the job was executed.
|
|
190
|
+
:param custom_args (dict): when provided, takes precedence over any other
|
|
191
|
+
provided argument.
|
|
192
|
+
:return: a dict of arguments to be injected on task execution.
|
|
193
|
+
"""
|
|
194
|
+
if custom_args:
|
|
195
|
+
return custom_args
|
|
196
|
+
|
|
197
|
+
if since is None:
|
|
198
|
+
"""We set since to a time in the past defined by the VOCABULARIES_ORCID_SYNC_SINCE."""
|
|
199
|
+
|
|
200
|
+
since = datetime.now() - timedelta(
|
|
201
|
+
**current_app.config["VOCABULARIES_ORCID_SYNC_SINCE"]
|
|
202
|
+
)
|
|
203
|
+
"""
|
|
204
|
+
Otherwise, since is already specified as a datetime with a timezone (see PredefinedArgsSchema) or we have never
|
|
205
|
+
run the job before so there is no logical value.
|
|
206
|
+
"""
|
|
207
|
+
return {**cls.build_task_arguments(job_obj, since=since, **kwargs)}
|
|
@@ -30,7 +30,9 @@ def process_datastream(config):
|
|
|
30
30
|
for err in result.errors:
|
|
31
31
|
current_app.logger.error(err)
|
|
32
32
|
entries_with_errors += 1
|
|
33
|
+
|
|
33
34
|
if entries_with_errors:
|
|
34
35
|
raise TaskExecutionPartialError(
|
|
35
|
-
message=f"Task execution succeeded with {entries_with_errors} entries with errors."
|
|
36
|
+
message=f"Task execution succeeded with {entries_with_errors} entries with errors.",
|
|
37
|
+
errored_entries_count=entries_with_errors,
|
|
36
38
|
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: invenio-vocabularies
|
|
3
|
-
Version: 9.
|
|
3
|
+
Version: 9.1.0
|
|
4
4
|
Summary: Invenio module for managing vocabularies.
|
|
5
5
|
Home-page: https://github.com/inveniosoftware/invenio-vocabularies
|
|
6
6
|
Author: CERN
|
|
@@ -15,7 +15,7 @@ License-File: AUTHORS.rst
|
|
|
15
15
|
Requires-Dist: invenio-i18n<4.0.0,>=3.0.0
|
|
16
16
|
Requires-Dist: invenio-records-resources<9.0.0,>=8.0.0
|
|
17
17
|
Requires-Dist: invenio-administration<5.0.0,>=4.0.0
|
|
18
|
-
Requires-Dist: invenio-jobs<
|
|
18
|
+
Requires-Dist: invenio-jobs<7.0.0,>=6.0.0
|
|
19
19
|
Requires-Dist: lxml>=4.5.0
|
|
20
20
|
Requires-Dist: pycountry>=24.0.0
|
|
21
21
|
Requires-Dist: PyYAML>=5.4.1
|
|
@@ -89,6 +89,10 @@ Invenio module for managing vocabularies, based on Invenio-Records and Invenio-R
|
|
|
89
89
|
Changes
|
|
90
90
|
=======
|
|
91
91
|
|
|
92
|
+
Version v9.1.0 (released 2025-09-05)
|
|
93
|
+
|
|
94
|
+
- installation: bump major version of invenio-jobs
|
|
95
|
+
|
|
92
96
|
Version v9.0.0 (released 2025-09-05)
|
|
93
97
|
|
|
94
98
|
- setup: bump major version of invenio-jobs
|
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
invenio_vocabularies/__init__.py,sha256=
|
|
1
|
+
invenio_vocabularies/__init__.py,sha256=XwPV7VV-dIH6JCpnyDYCJ4sZcPPW9K2z4q1Wf0873Uk,434
|
|
2
2
|
invenio_vocabularies/cli.py,sha256=CpXTTIn2GTpUqNfLEMlRAp3JWst8ZjHVxoGYdhuuv_4,5959
|
|
3
3
|
invenio_vocabularies/config.py,sha256=v6fTdeQXfUl0LavI2Xslql7mv3DVuIuGTqe2z3H3S7o,6942
|
|
4
4
|
invenio_vocabularies/ext.py,sha256=GujJ4UARd4Fxf4z7zznRk9JAgHamZuYCOdrKU5czg00,5987
|
|
5
5
|
invenio_vocabularies/factories.py,sha256=DM4jRUYu-so1jHRzhbNoNTnWKpYTZeRJhSP273giYws,6400
|
|
6
6
|
invenio_vocabularies/fixtures.py,sha256=iEPkWf_ZjdP2D9r2sLdIlPoR8Rq2m5cnoFwywUGHneg,1696
|
|
7
|
-
invenio_vocabularies/jobs.py,sha256=
|
|
7
|
+
invenio_vocabularies/jobs.py,sha256=71_peishXz0E1a1PhjeLQ6vjVmLFZ0TSIzvUtmG3-7I,7220
|
|
8
8
|
invenio_vocabularies/proxies.py,sha256=k7cTUgWfnCoYIuNqAj_VFi1zBN33KNNclRSVnBkObEM,711
|
|
9
9
|
invenio_vocabularies/views.py,sha256=PNJ5nvc3O7ASwNe56xmqy5YaU9n3UYF3W2JwvtE_kYs,1561
|
|
10
10
|
invenio_vocabularies/webpack.py,sha256=FkM8TxXClmaJcD8YsQq5Mai56nYVJh_1IYTMEHb3c1M,1891
|
|
@@ -175,13 +175,13 @@ invenio_vocabularies/contrib/names/__init__.py,sha256=QwPEMnNyjt9LVeBBdFbVQfIxOn
|
|
|
175
175
|
invenio_vocabularies/contrib/names/api.py,sha256=sEPn_jFX3gyoxgbdEUSIvOoPCUI8pocI6qCZO6mzCgQ,300
|
|
176
176
|
invenio_vocabularies/contrib/names/components.py,sha256=PyYD1lOhmsuNoyDwM_huxkeo7kWd44vkEbJk9gqbDrM,769
|
|
177
177
|
invenio_vocabularies/contrib/names/config.py,sha256=62jh4MP-CygnBpnRBVaCoGySHDEwhBSG1MnlUBumthw,2046
|
|
178
|
-
invenio_vocabularies/contrib/names/datastreams.py,sha256=
|
|
178
|
+
invenio_vocabularies/contrib/names/datastreams.py,sha256=IjqBPW1Nq3IlCKuSrXFkItOqWOQm3D9x16_KzvQE5Vk,17282
|
|
179
179
|
invenio_vocabularies/contrib/names/models.py,sha256=SYdtDDG-y5Wq_d06YhiVO5n8gfxPW_mx-tECsIcv5H8,308
|
|
180
180
|
invenio_vocabularies/contrib/names/names.py,sha256=jej3gkBgOJpKwp5RmWk1AP678WkMb0VqCpzbTHLTyEc,2675
|
|
181
181
|
invenio_vocabularies/contrib/names/permissions.py,sha256=5xrpYsA3oQUJ5lJpF7wjRAFiW-pM6_yP1k9zllbRwnQ,844
|
|
182
182
|
invenio_vocabularies/contrib/names/resources.py,sha256=Z8XqLKfFKE69zdTTvcTDmpEZ6wqiqjIH5tp0LzXTSwQ,1588
|
|
183
183
|
invenio_vocabularies/contrib/names/s3client.py,sha256=9n_Moet-XCQe0NEt1lYzRk_vorcNoCnmylZPKTbaqLA,1342
|
|
184
|
-
invenio_vocabularies/contrib/names/schema.py,sha256=
|
|
184
|
+
invenio_vocabularies/contrib/names/schema.py,sha256=RX9uteii6ww0MkfarncV1giZKhxx5BczXfPhMVgid1I,3794
|
|
185
185
|
invenio_vocabularies/contrib/names/services.py,sha256=ntcGUTM0ZsKnRTxIKvZhKrRuup6Tjv965PATCaJR6Cc,2127
|
|
186
186
|
invenio_vocabularies/contrib/names/jsonschemas/__init__.py,sha256=pdDZdyoxqWbAQ6ngiclhYoDUsGKgRDRPXlIDy0U5Jzg,241
|
|
187
187
|
invenio_vocabularies/contrib/names/jsonschemas/names/name-v1.0.0.json,sha256=WlIroNhE9o6oh1Cd13ymBPXuXDOs0NYfjLGtAH417YI,1574
|
|
@@ -222,13 +222,13 @@ invenio_vocabularies/contrib/subjects/mesh/datastreams.py,sha256=6W6bgQ7P_31kf3e
|
|
|
222
222
|
invenio_vocabularies/contrib/subjects/nvs/__init__.py,sha256=H7joYEPU0lzGN_p72i9FElSRoaQTYHM7PvOn-ZPHyzM,245
|
|
223
223
|
invenio_vocabularies/contrib/subjects/nvs/datastreams.py,sha256=PQsHKat9kAeZYAT_gEkVHLcZJLCwlc6TTwfMxewgVek,4273
|
|
224
224
|
invenio_vocabularies/datastreams/__init__.py,sha256=VPefh6k4Q3eYxKIW8I5zXUGucntp7VHxaOR5Vhgkfmg,412
|
|
225
|
-
invenio_vocabularies/datastreams/datastreams.py,sha256=
|
|
225
|
+
invenio_vocabularies/datastreams/datastreams.py,sha256=oFYCyLceC8dEUbR0Drb7yQRrfH7abhSchyPgBZ6WMZI,9637
|
|
226
226
|
invenio_vocabularies/datastreams/errors.py,sha256=IDUZ3gNtYGrhcOgApHCms1gNNJTyJzoMPmG5JtIeYNU,678
|
|
227
227
|
invenio_vocabularies/datastreams/factories.py,sha256=kuuN4Zt7Xw58rwf0M03djqcdZOZRWgJdLK16-HmID24,2213
|
|
228
228
|
invenio_vocabularies/datastreams/readers.py,sha256=rDYnYASYgPvp9OH0mtlGHsOEJGzDNcnSbyD8arXkmKE,14803
|
|
229
|
-
invenio_vocabularies/datastreams/tasks.py,sha256=
|
|
229
|
+
invenio_vocabularies/datastreams/tasks.py,sha256=l-DcbW68jY0ww8u7pBDQsIvSDQRH7R-dAUOY0H7npX4,4466
|
|
230
230
|
invenio_vocabularies/datastreams/transformers.py,sha256=PJFbmRSj3dpJ95NzONAIns5ksztshd99JOp_FLQAlJM,4133
|
|
231
|
-
invenio_vocabularies/datastreams/writers.py,sha256=
|
|
231
|
+
invenio_vocabularies/datastreams/writers.py,sha256=NgIdow2Q_Hr2j_49fcs2fmGccPvLqQzDJ9JaQeph8aw,7815
|
|
232
232
|
invenio_vocabularies/datastreams/xml.py,sha256=HFa-lfxj7kFrr2IjeN1jxSLDfcvpBwO9nZLZF2-BryE,997
|
|
233
233
|
invenio_vocabularies/records/__init__.py,sha256=Uj7O6fYdAtLOkLXUGSAYPADBB7aqP4yVs9b6OAjA158,243
|
|
234
234
|
invenio_vocabularies/records/api.py,sha256=Lynt6Sz4BVN1orh0zgJ5ljhnUobEtcq8c22PmSeUo2U,1494
|
|
@@ -262,7 +262,7 @@ invenio_vocabularies/services/querystr.py,sha256=OrNUR_QAcQ_T-EiL3H1Jvzz9gK2ZB5F
|
|
|
262
262
|
invenio_vocabularies/services/results.py,sha256=6LZIpzWSbt9wpRNWgjA1uIM4RFooOYTkHcp5-PnIJdU,3767
|
|
263
263
|
invenio_vocabularies/services/schema.py,sha256=mwIBFylpQlWw1M6h_axc-z4Yd7X3Z1S0PxJOlZGpfrQ,4634
|
|
264
264
|
invenio_vocabularies/services/service.py,sha256=2LrtXa6gFXhHSWn4R3Kl7Nj2f2mFmxzjgAF3K3Q3LGM,6749
|
|
265
|
-
invenio_vocabularies/services/tasks.py,sha256=
|
|
265
|
+
invenio_vocabularies/services/tasks.py,sha256=_kg7k0ESK-YWn8De-xTr6mkJnrjdTcydRcWUOHdKfAU,1245
|
|
266
266
|
invenio_vocabularies/services/custom_fields/__init__.py,sha256=Wi7v8VUUTi16TCpMNKZ1zK3zWO2UjWBEnRMJjNm2foI,426
|
|
267
267
|
invenio_vocabularies/services/custom_fields/subject.py,sha256=XTHnOddoSZsDHqGQq-hns46-WaLn88fqz57K--YLHg4,2243
|
|
268
268
|
invenio_vocabularies/services/custom_fields/vocabulary.py,sha256=oQwI8Aoi2Nr9k3eWKnde5H7RXc7qdlATSeI6coy8UR0,3020
|
|
@@ -328,10 +328,10 @@ invenio_vocabularies/translations/zh_CN/LC_MESSAGES/messages.mo,sha256=AEoPrwqBX
|
|
|
328
328
|
invenio_vocabularies/translations/zh_CN/LC_MESSAGES/messages.po,sha256=sP0-MvZo4F4wIG4JVLebCAOxh1tFIkfM2bLaKL-B148,8024
|
|
329
329
|
invenio_vocabularies/translations/zh_TW/LC_MESSAGES/messages.mo,sha256=p2PFN_PL1m7miZ6qyoHFmBi6UUF3qhuz1sfCqTxm5fY,599
|
|
330
330
|
invenio_vocabularies/translations/zh_TW/LC_MESSAGES/messages.po,sha256=vO8YRTkTqKE6gepSW_N3uNKYp3Me1rqnYaONFQGJOoY,7519
|
|
331
|
-
invenio_vocabularies-9.
|
|
332
|
-
invenio_vocabularies-9.
|
|
333
|
-
invenio_vocabularies-9.
|
|
334
|
-
invenio_vocabularies-9.
|
|
335
|
-
invenio_vocabularies-9.
|
|
336
|
-
invenio_vocabularies-9.
|
|
337
|
-
invenio_vocabularies-9.
|
|
331
|
+
invenio_vocabularies-9.1.0.dist-info/licenses/AUTHORS.rst,sha256=8d0p_WWE1r9DavvzMDi2D4YIGBHiMYcN3LYxqQOj8sY,291
|
|
332
|
+
invenio_vocabularies-9.1.0.dist-info/licenses/LICENSE,sha256=UvI8pR8jGWqe0sTkb_hRG6eIrozzWwWzyCGEpuXX4KE,1062
|
|
333
|
+
invenio_vocabularies-9.1.0.dist-info/METADATA,sha256=6bBS7XM2SClCorCpGrJ1CWEgFZxd0HbCtLYIuZCW1Yw,15270
|
|
334
|
+
invenio_vocabularies-9.1.0.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
|
|
335
|
+
invenio_vocabularies-9.1.0.dist-info/entry_points.txt,sha256=lget4Ekno9VRTq36oowWReIJA99165gOp0BOiiET60Y,3179
|
|
336
|
+
invenio_vocabularies-9.1.0.dist-info/top_level.txt,sha256=x1gRNbaODF_bCD0SBLM3nVOFPGi06cmGX5X94WKrFKk,21
|
|
337
|
+
invenio_vocabularies-9.1.0.dist-info/RECORD,,
|
|
File without changes
|
{invenio_vocabularies-9.0.0.dist-info → invenio_vocabularies-9.1.0.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{invenio_vocabularies-9.0.0.dist-info → invenio_vocabularies-9.1.0.dist-info}/licenses/AUTHORS.rst
RENAMED
|
File without changes
|
{invenio_vocabularies-9.0.0.dist-info → invenio_vocabularies-9.1.0.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
|
File without changes
|