invenio-vocabularies 4.4.0__py2.py3-none-any.whl → 5.0.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of invenio-vocabularies might be problematic. Click here for more details.

Files changed (36) hide show
  1. invenio_vocabularies/__init__.py +1 -1
  2. invenio_vocabularies/assets/semantic-ui/js/invenio_vocabularies/package.json +0 -6
  3. invenio_vocabularies/config.py +7 -1
  4. invenio_vocabularies/contrib/affiliations/affiliations.py +1 -0
  5. invenio_vocabularies/contrib/affiliations/config.py +12 -1
  6. invenio_vocabularies/contrib/affiliations/mappings/os-v1/affiliations/affiliation-v2.0.0.json +171 -0
  7. invenio_vocabularies/contrib/affiliations/mappings/os-v2/affiliations/affiliation-v2.0.0.json +171 -0
  8. invenio_vocabularies/contrib/common/ror/datastreams.py +34 -32
  9. invenio_vocabularies/contrib/funders/config.py +3 -1
  10. invenio_vocabularies/contrib/funders/funders.py +1 -0
  11. invenio_vocabularies/contrib/funders/mappings/os-v1/funders/funder-v2.0.0.json +140 -0
  12. invenio_vocabularies/contrib/funders/mappings/os-v2/funders/funder-v2.0.0.json +140 -0
  13. invenio_vocabularies/contrib/names/config.py +5 -3
  14. invenio_vocabularies/contrib/names/mappings/os-v1/names/name-v2.0.0.json +150 -0
  15. invenio_vocabularies/contrib/names/mappings/os-v2/names/name-v2.0.0.json +150 -0
  16. invenio_vocabularies/contrib/names/names.py +1 -0
  17. invenio_vocabularies/contrib/subjects/config.py +9 -3
  18. invenio_vocabularies/contrib/subjects/datastreams.py +55 -0
  19. invenio_vocabularies/contrib/subjects/jsonschemas/subjects/subject-v1.0.0.json +12 -0
  20. invenio_vocabularies/contrib/subjects/mappings/os-v1/subjects/subject-v1.0.0.json +18 -0
  21. invenio_vocabularies/contrib/subjects/mappings/os-v2/subjects/subject-v1.0.0.json +18 -0
  22. invenio_vocabularies/contrib/subjects/mappings/v7/subjects/subject-v1.0.0.json +18 -0
  23. invenio_vocabularies/contrib/subjects/mesh/datastreams.py +43 -0
  24. invenio_vocabularies/contrib/subjects/schema.py +20 -2
  25. invenio_vocabularies/factories.py +13 -0
  26. invenio_vocabularies/services/config.py +1 -1
  27. invenio_vocabularies/services/service.py +1 -1
  28. invenio_vocabularies/translations/messages.pot +95 -48
  29. invenio_vocabularies/webpack.py +1 -1
  30. {invenio_vocabularies-4.4.0.dist-info → invenio_vocabularies-5.0.0.dist-info}/METADATA +5 -1
  31. {invenio_vocabularies-4.4.0.dist-info → invenio_vocabularies-5.0.0.dist-info}/RECORD +36 -28
  32. {invenio_vocabularies-4.4.0.dist-info → invenio_vocabularies-5.0.0.dist-info}/AUTHORS.rst +0 -0
  33. {invenio_vocabularies-4.4.0.dist-info → invenio_vocabularies-5.0.0.dist-info}/LICENSE +0 -0
  34. {invenio_vocabularies-4.4.0.dist-info → invenio_vocabularies-5.0.0.dist-info}/WHEEL +0 -0
  35. {invenio_vocabularies-4.4.0.dist-info → invenio_vocabularies-5.0.0.dist-info}/entry_points.txt +0 -0
  36. {invenio_vocabularies-4.4.0.dist-info → invenio_vocabularies-5.0.0.dist-info}/top_level.txt +0 -0
@@ -10,6 +10,6 @@
10
10
 
11
11
  from .ext import InvenioVocabularies
12
12
 
13
- __version__ = "4.4.0"
13
+ __version__ = "5.0.0"
14
14
 
15
15
  __all__ = ("__version__", "InvenioVocabularies")
@@ -13,7 +13,6 @@
13
13
  "coveralls": "^3.0.0",
14
14
  "enzyme": "^3.10.0",
15
15
  "enzyme-adapter-react-16": "^1.15.0",
16
- "enzyme-to-json": "^3.4.0",
17
16
  "expect": "^26.0.0",
18
17
  "lodash": "^4.17.0",
19
18
  "luxon": "^1.23.0",
@@ -22,10 +21,5 @@
22
21
  "react-scripts": "^5.0.1",
23
22
  "semantic-ui-react": "^2.1.0",
24
23
  "react-overridable": "^0.0.3"
25
- },
26
- "jest": {
27
- "snapshotSerializers": [
28
- "enzyme-to-json/serializer"
29
- ]
30
24
  }
31
25
  }
@@ -2,6 +2,7 @@
2
2
  #
3
3
  # Copyright (C) 2020-2024 CERN.
4
4
  # Copyright (C) 2021 Northwestern University.
5
+ # Copyright (C) 2024 University of Münster.
5
6
  #
6
7
  # Invenio-Vocabularies is free software; you can redistribute it and/or
7
8
  # modify it under the terms of the MIT License; see LICENSE file for more
@@ -104,7 +105,11 @@ VOCABULARIES_NAMES_SCHEMES = {
104
105
  }
105
106
  """Names allowed identifier schemes."""
106
107
 
107
- # configure CUSTOM_VOCABULARY_TYPES to differentiate output. Is used in VocabulariesServiceConfig
108
+ VOCABULARIES_SUBJECTS_SCHEMES = {
109
+ "gnd": {"label": _("GND"), "validator": idutils.is_gnd, "datacite": "GND"},
110
+ }
111
+ """Subjects allowed identifier schemes."""
112
+
108
113
  VOCABULARIES_CUSTOM_VOCABULARY_TYPES = [
109
114
  "names",
110
115
  "affiliations",
@@ -112,6 +117,7 @@ VOCABULARIES_CUSTOM_VOCABULARY_TYPES = [
112
117
  "funders",
113
118
  "subjects",
114
119
  ]
120
+ """List of custom vocabulary types."""
115
121
 
116
122
  VOCABULARIES_DATASTREAM_READERS = {
117
123
  "csv": CSVReader,
@@ -34,6 +34,7 @@ record_type = RecordTypeFactory(
34
34
  },
35
35
  schema_version="1.0.0",
36
36
  schema_path="local://affiliations/affiliation-v1.0.0.json",
37
+ index_name="affiliations-affiliation-v2.0.0",
37
38
  record_dumper=SearchDumper(
38
39
  model_fields={"pid": ("id", str)},
39
40
  extensions=[
@@ -9,6 +9,7 @@
9
9
  """Vocabulary affiliations configuration."""
10
10
 
11
11
  from flask import current_app
12
+ from invenio_i18n import get_locale
12
13
  from invenio_i18n import lazy_gettext as _
13
14
  from invenio_records_resources.services import SearchOptions
14
15
  from invenio_records_resources.services.records.components import DataComponent
@@ -20,13 +21,23 @@ from ...services.components import PIDComponent
20
21
  affiliation_schemes = LocalProxy(
21
22
  lambda: current_app.config["VOCABULARIES_AFFILIATION_SCHEMES"]
22
23
  )
24
+ localized_title = LocalProxy(lambda: f"title.{get_locale()}^20")
23
25
 
24
26
 
25
27
  class AffiliationsSearchOptions(SearchOptions):
26
28
  """Search options."""
27
29
 
28
30
  suggest_parser_cls = SuggestQueryParser.factory(
29
- fields=["name^100", "acronym^50", "title.*^20", "id^20", "aliases^20"],
31
+ fields=[
32
+ "name^100",
33
+ "acronym.keyword^100",
34
+ "acronym^40",
35
+ localized_title,
36
+ "id^20",
37
+ "aliases^20",
38
+ ],
39
+ type="most_fields", # https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-multi-match-query.html#multi-match-types
40
+ fuzziness="AUTO", # https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html#fuzziness
30
41
  )
31
42
 
32
43
  sort_default = "bestmatch"
@@ -0,0 +1,171 @@
1
+ {
2
+ "settings": {
3
+ "analysis": {
4
+ "char_filter": {
5
+ "strip_special_chars": {
6
+ "type": "pattern_replace",
7
+ "pattern": "[\\p{Punct}\\p{S}]",
8
+ "replacement": ""
9
+ }
10
+ },
11
+ "analyzer": {
12
+ "accent_edge_analyzer": {
13
+ "tokenizer": "standard",
14
+ "type": "custom",
15
+ "char_filter": ["strip_special_chars"],
16
+ "filter": [
17
+ "lowercase",
18
+ "asciifolding",
19
+ "edgegrams"
20
+ ]
21
+ },
22
+ "accent_analyzer": {
23
+ "tokenizer": "standard",
24
+ "type": "custom",
25
+ "char_filter": ["strip_special_chars"],
26
+ "filter": [
27
+ "lowercase",
28
+ "asciifolding"
29
+ ]
30
+ }
31
+ },
32
+ "normalizer": {
33
+ "accent_normalizer": {
34
+ "type": "custom",
35
+ "char_filter": ["strip_special_chars"],
36
+ "filter": [
37
+ "lowercase",
38
+ "asciifolding"
39
+ ]
40
+ }
41
+ },
42
+ "filter": {
43
+ "lowercase": {
44
+ "type": "lowercase",
45
+ "preserve_original": true
46
+ },
47
+ "asciifolding": {
48
+ "type": "asciifolding",
49
+ "preserve_original": true
50
+ },
51
+ "edgegrams": {
52
+ "type": "edge_ngram",
53
+ "min_gram": 2,
54
+ "max_gram": 20
55
+ }
56
+ }
57
+ }
58
+ },
59
+ "mappings": {
60
+ "dynamic_templates": [
61
+ {
62
+ "i18n_title": {
63
+ "path_match": "title.*",
64
+ "match_mapping_type": "string",
65
+ "mapping": {
66
+ "type": "text",
67
+ "analyzer": "accent_edge_analyzer",
68
+ "search_analyzer": "accent_analyzer"
69
+ }
70
+ }
71
+ }
72
+ ],
73
+ "dynamic": "strict",
74
+ "properties": {
75
+ "$schema": {
76
+ "type": "keyword",
77
+ "index": "false"
78
+ },
79
+ "created": {
80
+ "type": "date"
81
+ },
82
+ "updated": {
83
+ "type": "date"
84
+ },
85
+ "indexed_at": {
86
+ "type": "date"
87
+ },
88
+ "uuid": {
89
+ "type": "keyword"
90
+ },
91
+ "version_id": {
92
+ "type": "integer"
93
+ },
94
+ "id": {
95
+ "type": "keyword"
96
+ },
97
+ "name_sort": {
98
+ "type": "keyword"
99
+ },
100
+ "name": {
101
+ "type": "text",
102
+ "analyzer": "accent_edge_analyzer",
103
+ "search_analyzer": "accent_analyzer",
104
+ "copy_to": "name_sort"
105
+ },
106
+ "acronym": {
107
+ "type": "text",
108
+ "analyzer": "accent_edge_analyzer",
109
+ "search_analyzer": "accent_analyzer",
110
+ "fields": {
111
+ "keyword": {
112
+ "type": "keyword",
113
+ "normalizer": "accent_normalizer"
114
+ }
115
+ }
116
+ },
117
+ "identifiers": {
118
+ "properties": {
119
+ "identifier": {
120
+ "type": "keyword"
121
+ },
122
+ "scheme": {
123
+ "type": "keyword"
124
+ }
125
+ }
126
+ },
127
+ "pid": {
128
+ "type": "object",
129
+ "properties": {
130
+ "pk": {
131
+ "type": "integer"
132
+ },
133
+ "pid_type": {
134
+ "type": "keyword"
135
+ },
136
+ "obj_type": {
137
+ "type": "keyword"
138
+ },
139
+ "status": {
140
+ "type": "keyword"
141
+ }
142
+ }
143
+ },
144
+ "title": {
145
+ "type": "object",
146
+ "dynamic": "true"
147
+ },
148
+ "tags": {
149
+ "type": "keyword"
150
+ },
151
+ "country": {
152
+ "type": "text"
153
+ },
154
+ "country_name": {
155
+ "type": "text"
156
+ },
157
+ "location_name": {
158
+ "type": "text"
159
+ },
160
+ "status": {
161
+ "type": "keyword"
162
+ },
163
+ "aliases": {
164
+ "type": "text"
165
+ },
166
+ "types": {
167
+ "type": "keyword"
168
+ }
169
+ }
170
+ }
171
+ }
@@ -0,0 +1,171 @@
1
+ {
2
+ "settings": {
3
+ "analysis": {
4
+ "char_filter": {
5
+ "strip_special_chars": {
6
+ "type": "pattern_replace",
7
+ "pattern": "[\\p{Punct}\\p{S}]",
8
+ "replacement": ""
9
+ }
10
+ },
11
+ "analyzer": {
12
+ "accent_edge_analyzer": {
13
+ "tokenizer": "standard",
14
+ "type": "custom",
15
+ "char_filter": ["strip_special_chars"],
16
+ "filter": [
17
+ "lowercase",
18
+ "asciifolding",
19
+ "edgegrams"
20
+ ]
21
+ },
22
+ "accent_analyzer": {
23
+ "tokenizer": "standard",
24
+ "type": "custom",
25
+ "char_filter": ["strip_special_chars"],
26
+ "filter": [
27
+ "lowercase",
28
+ "asciifolding"
29
+ ]
30
+ }
31
+ },
32
+ "normalizer": {
33
+ "accent_normalizer": {
34
+ "type": "custom",
35
+ "char_filter": ["strip_special_chars"],
36
+ "filter": [
37
+ "lowercase",
38
+ "asciifolding"
39
+ ]
40
+ }
41
+ },
42
+ "filter": {
43
+ "lowercase": {
44
+ "type": "lowercase",
45
+ "preserve_original": true
46
+ },
47
+ "asciifolding": {
48
+ "type": "asciifolding",
49
+ "preserve_original": true
50
+ },
51
+ "edgegrams": {
52
+ "type": "edge_ngram",
53
+ "min_gram": 2,
54
+ "max_gram": 20
55
+ }
56
+ }
57
+ }
58
+ },
59
+ "mappings": {
60
+ "dynamic_templates": [
61
+ {
62
+ "i18n_title": {
63
+ "path_match": "title.*",
64
+ "match_mapping_type": "string",
65
+ "mapping": {
66
+ "type": "text",
67
+ "analyzer": "accent_edge_analyzer",
68
+ "search_analyzer": "accent_analyzer"
69
+ }
70
+ }
71
+ }
72
+ ],
73
+ "dynamic": "strict",
74
+ "properties": {
75
+ "$schema": {
76
+ "type": "keyword",
77
+ "index": "false"
78
+ },
79
+ "created": {
80
+ "type": "date"
81
+ },
82
+ "updated": {
83
+ "type": "date"
84
+ },
85
+ "indexed_at": {
86
+ "type": "date"
87
+ },
88
+ "uuid": {
89
+ "type": "keyword"
90
+ },
91
+ "version_id": {
92
+ "type": "integer"
93
+ },
94
+ "id": {
95
+ "type": "keyword"
96
+ },
97
+ "name_sort": {
98
+ "type": "keyword"
99
+ },
100
+ "name": {
101
+ "type": "text",
102
+ "analyzer": "accent_edge_analyzer",
103
+ "search_analyzer": "accent_analyzer",
104
+ "copy_to": "name_sort"
105
+ },
106
+ "acronym": {
107
+ "type": "text",
108
+ "analyzer": "accent_edge_analyzer",
109
+ "search_analyzer": "accent_analyzer",
110
+ "fields": {
111
+ "keyword": {
112
+ "type": "keyword",
113
+ "normalizer": "accent_normalizer"
114
+ }
115
+ }
116
+ },
117
+ "identifiers": {
118
+ "properties": {
119
+ "identifier": {
120
+ "type": "keyword"
121
+ },
122
+ "scheme": {
123
+ "type": "keyword"
124
+ }
125
+ }
126
+ },
127
+ "pid": {
128
+ "type": "object",
129
+ "properties": {
130
+ "pk": {
131
+ "type": "integer"
132
+ },
133
+ "pid_type": {
134
+ "type": "keyword"
135
+ },
136
+ "obj_type": {
137
+ "type": "keyword"
138
+ },
139
+ "status": {
140
+ "type": "keyword"
141
+ }
142
+ }
143
+ },
144
+ "title": {
145
+ "type": "object",
146
+ "dynamic": "true"
147
+ },
148
+ "tags": {
149
+ "type": "keyword"
150
+ },
151
+ "country": {
152
+ "type": "text"
153
+ },
154
+ "country_name": {
155
+ "type": "text"
156
+ },
157
+ "location_name": {
158
+ "type": "text"
159
+ },
160
+ "status": {
161
+ "type": "keyword"
162
+ },
163
+ "aliases": {
164
+ "type": "text"
165
+ },
166
+ "types": {
167
+ "type": "keyword"
168
+ }
169
+ }
170
+ }
171
+ }
@@ -10,8 +10,8 @@
10
10
  """ROR-related Datastreams Readers/Writers/Transformers module."""
11
11
 
12
12
  import io
13
- from datetime import datetime
14
13
 
14
+ import arrow
15
15
  import requests
16
16
  from idutils import normalize_ror
17
17
 
@@ -33,6 +33,26 @@ class RORHTTPReader(BaseReader):
33
33
  "RORHTTPReader downloads one file and therefore does not iterate through items"
34
34
  )
35
35
 
36
+ def _get_last_dump_date(self, linksets):
37
+ """Get the last dump date."""
38
+ for linkset in linksets:
39
+ metadata_formats = linkset.get("describedby", [])
40
+ for format_link in metadata_formats:
41
+ if format_link.get("type") == "application/ld+json":
42
+ json_ld_reponse = requests.get(
43
+ format_link["href"],
44
+ headers={"Accept": format_link["type"]},
45
+ )
46
+ json_ld_reponse.raise_for_status()
47
+ json_ld_data = json_ld_reponse.json()
48
+
49
+ last_dump_date = arrow.get(json_ld_data["dateCreated"])
50
+ return last_dump_date
51
+ else:
52
+ raise ReaderError(
53
+ "Couldn't find JSON-LD in publisher's linkset to determine last dump date."
54
+ )
55
+
36
56
  def read(self, item=None, *args, **kwargs):
37
57
  """Reads the latest ROR data dump ZIP file from Zenodo and yields an in-memory binary stream of it."""
38
58
  if item:
@@ -54,39 +74,21 @@ class RORHTTPReader(BaseReader):
54
74
  headers={"Accept": "application/linkset+json"},
55
75
  )
56
76
  linkset_response.raise_for_status()
77
+ linksets = linkset_response.json()["linkset"]
57
78
 
58
79
  if self._since:
59
- for link in linkset_response.json()["linkset"]:
60
- if "type" in link and link["type"] == "application/ld+json":
61
- json_ld_reponse = requests.get(
62
- link["anchor"], headers={"Accept": link["type"]}
63
- )
64
- json_ld_reponse.raise_for_status()
65
-
66
- # TODO Update to use dateCreated once the field is added to InvenioRDM. (https://github.com/inveniosoftware/invenio-rdm-records/issues/1777)
67
- last_dump_date = json_ld_reponse.json()["datePublished"]
68
- if datetime.fromisoformat(last_dump_date) < datetime.fromisoformat(
69
- self._since
70
- ):
71
- return
72
- break
73
- else:
74
- raise ReaderError("Couldn't find json-ld in publisher's linkset.")
75
-
76
- # Extract the Landing page Link Set Object located as the first (index 0) item.
77
- landing_page_linkset = linkset_response.json()["linkset"][0]
78
-
79
- # Extract the URL of the only ZIP file linked to the record.
80
- landing_page_zip_items = [
81
- item
82
- for item in landing_page_linkset["item"]
83
- if item["type"] == "application/zip"
84
- ]
85
- if len(landing_page_zip_items) != 1:
86
- raise ReaderError(
87
- f"Expected 1 ZIP item but got {len(landing_page_zip_items)}"
88
- )
89
- file_url = landing_page_zip_items[0]["href"]
80
+ last_dump_date = self._get_last_dump_date(linksets)
81
+ if last_dump_date < arrow.get(self._since):
82
+ return
83
+
84
+ for linkset in linksets:
85
+ items = linkset.get("item", [])
86
+ zip_files = [item for item in items if item["type"] == "application/zip"]
87
+ if len(zip_files) == 1:
88
+ file_url = zip_files[0]["href"]
89
+ break
90
+ if len(zip_files) > 1:
91
+ raise ReaderError(f"Expected 1 ZIP item but got {len(zip_files)}")
90
92
 
91
93
  # Download the ZIP file and fully load the response bytes content in memory.
92
94
  # The bytes content are then wrapped by a BytesIO to be file-like object (as required by `zipfile.ZipFile`).
@@ -33,7 +33,9 @@ class FundersSearchOptions(SearchOptions):
33
33
  "identifiers.identifier^10",
34
34
  "acronym^10",
35
35
  "aliases^10",
36
- ]
36
+ ],
37
+ type="most_fields", # https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-multi-match-query.html#multi-match-types
38
+ fuzziness="AUTO", # https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html#fuzziness
37
39
  )
38
40
 
39
41
  sort_default = "bestmatch"
@@ -46,6 +46,7 @@ record_type = RecordTypeFactory(
46
46
  ),
47
47
  schema_version="1.0.0",
48
48
  schema_path="local://funders/funder-v1.0.0.json",
49
+ index_name="funders-funder-v2.0.0",
49
50
  # Service layer
50
51
  service_id="funders",
51
52
  service_schema=FunderSchema,
@@ -0,0 +1,140 @@
1
+ {
2
+ "settings": {
3
+ "analysis": {
4
+ "char_filter": {
5
+ "strip_special_chars": {
6
+ "type": "pattern_replace",
7
+ "pattern": "[\\p{Punct}\\p{S}]",
8
+ "replacement": ""
9
+ }
10
+ },
11
+ "analyzer": {
12
+ "accent_edge_analyzer": {
13
+ "tokenizer": "standard",
14
+ "type": "custom",
15
+ "char_filter": ["strip_special_chars"],
16
+ "filter": [
17
+ "lowercase",
18
+ "asciifolding",
19
+ "edgegrams"
20
+ ]
21
+ },
22
+ "accent_analyzer": {
23
+ "tokenizer": "standard",
24
+ "type": "custom",
25
+ "char_filter": ["strip_special_chars"],
26
+ "filter": [
27
+ "lowercase",
28
+ "asciifolding"
29
+ ]
30
+ }
31
+ },
32
+ "filter": {
33
+ "lowercase": {
34
+ "type": "lowercase",
35
+ "preserve_original": true
36
+ },
37
+ "asciifolding": {
38
+ "type": "asciifolding",
39
+ "preserve_original": true
40
+ },
41
+ "edgegrams": {
42
+ "type": "edge_ngram",
43
+ "min_gram": 2,
44
+ "max_gram": 20
45
+ }
46
+ }
47
+ }
48
+ },
49
+ "mappings": {
50
+ "dynamic_templates": [
51
+ {
52
+ "i18n_title": {
53
+ "path_match": "title.*",
54
+ "match_mapping_type": "string",
55
+ "mapping": {
56
+ "type": "text",
57
+ "analyzer": "accent_edge_analyzer",
58
+ "search_analyzer": "accent_analyzer"
59
+ }
60
+ }
61
+ }
62
+ ],
63
+ "dynamic": "strict",
64
+ "properties": {
65
+ "$schema": {
66
+ "type": "keyword",
67
+ "index": "false"
68
+ },
69
+ "created": {
70
+ "type": "date"
71
+ },
72
+ "updated": {
73
+ "type": "date"
74
+ },
75
+ "indexed_at": {
76
+ "type": "date"
77
+ },
78
+ "uuid": {
79
+ "type": "keyword"
80
+ },
81
+ "version_id": {
82
+ "type": "integer"
83
+ },
84
+ "identifiers": {
85
+ "properties": {
86
+ "identifier": {
87
+ "type": "keyword"
88
+ },
89
+ "scheme": {
90
+ "type": "keyword"
91
+ }
92
+ }
93
+ },
94
+ "name_sort": {
95
+ "type": "keyword"
96
+ },
97
+ "name": {
98
+ "type": "text",
99
+ "analyzer": "accent_edge_analyzer",
100
+ "search_analyzer": "accent_analyzer",
101
+ "copy_to": "name_sort"
102
+ },
103
+ "country": {
104
+ "type": "text"
105
+ },
106
+ "country_name": {
107
+ "type": "text"
108
+ },
109
+ "location_name": {
110
+ "type": "text"
111
+ },
112
+ "acronym": {
113
+ "type": "text",
114
+ "analyzer": "accent_edge_analyzer",
115
+ "search_analyzer": "accent_analyzer"
116
+ },
117
+ "status": {
118
+ "type": "keyword"
119
+ },
120
+ "aliases": {
121
+ "type": "text",
122
+ "analyzer": "accent_edge_analyzer",
123
+ "search_analyzer": "accent_analyzer"
124
+ },
125
+ "types": {
126
+ "type": "keyword"
127
+ },
128
+ "id": {
129
+ "type": "keyword"
130
+ },
131
+ "title": {
132
+ "type": "object",
133
+ "dynamic": "true"
134
+ },
135
+ "tags": {
136
+ "type": "keyword"
137
+ }
138
+ }
139
+ }
140
+ }