udata 10.3.1.dev34878__py2.py3-none-any.whl → 10.3.2__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of udata might be problematic. Click here for more details.

Files changed (49) hide show
  1. udata/__init__.py +1 -1
  2. udata/core/activity/api.py +1 -1
  3. udata/{frontend → core}/csv.py +1 -1
  4. udata/core/dataservices/csv.py +1 -1
  5. udata/core/dataset/api.py +11 -0
  6. udata/core/dataset/csv.py +1 -1
  7. udata/core/dataset/tasks.py +1 -2
  8. udata/core/discussions/csv.py +1 -1
  9. udata/core/organization/api.py +40 -0
  10. udata/core/organization/csv.py +1 -1
  11. udata/core/reuse/csv.py +1 -1
  12. udata/core/site/api.py +106 -1
  13. udata/core/tags/csv.py +1 -1
  14. udata/core/tags/views.py +1 -1
  15. udata/core/topic/models.py +1 -1
  16. udata/core/topic/parsers.py +4 -0
  17. udata/harvest/csv.py +1 -1
  18. udata/migrations/2025-04-24-topic-featured-default-false.py +15 -0
  19. udata/routing.py +3 -0
  20. udata/templates/mail/account_inactivity.html +15 -4
  21. udata/templates/mail/account_inactivity.txt +13 -4
  22. udata/tests/api/test_organizations_api.py +98 -1
  23. udata/tests/api/test_topics_api.py +13 -2
  24. udata/tests/apiv2/test_datasets.py +16 -0
  25. udata/tests/apiv2/test_topics.py +13 -2
  26. udata/tests/frontend/test_csv.py +1 -1
  27. udata/tests/site/test_site_csv_exports.py +464 -0
  28. udata/tests/test_tags.py +1 -1
  29. udata/translations/ar/LC_MESSAGES/udata.mo +0 -0
  30. udata/translations/ar/LC_MESSAGES/udata.po +38 -32
  31. udata/translations/de/LC_MESSAGES/udata.mo +0 -0
  32. udata/translations/de/LC_MESSAGES/udata.po +38 -32
  33. udata/translations/es/LC_MESSAGES/udata.mo +0 -0
  34. udata/translations/es/LC_MESSAGES/udata.po +38 -32
  35. udata/translations/fr/LC_MESSAGES/udata.mo +0 -0
  36. udata/translations/fr/LC_MESSAGES/udata.po +40 -34
  37. udata/translations/it/LC_MESSAGES/udata.mo +0 -0
  38. udata/translations/it/LC_MESSAGES/udata.po +38 -32
  39. udata/translations/pt/LC_MESSAGES/udata.mo +0 -0
  40. udata/translations/pt/LC_MESSAGES/udata.po +38 -32
  41. udata/translations/sr/LC_MESSAGES/udata.mo +0 -0
  42. udata/translations/sr/LC_MESSAGES/udata.po +38 -32
  43. udata/translations/udata.pot +43 -34
  44. {udata-10.3.1.dev34878.dist-info → udata-10.3.2.dist-info}/METADATA +10 -2
  45. {udata-10.3.1.dev34878.dist-info → udata-10.3.2.dist-info}/RECORD +49 -47
  46. {udata-10.3.1.dev34878.dist-info → udata-10.3.2.dist-info}/LICENSE +0 -0
  47. {udata-10.3.1.dev34878.dist-info → udata-10.3.2.dist-info}/WHEEL +0 -0
  48. {udata-10.3.1.dev34878.dist-info → udata-10.3.2.dist-info}/entry_points.txt +0 -0
  49. {udata-10.3.1.dev34878.dist-info → udata-10.3.2.dist-info}/top_level.txt +0 -0
udata/__init__.py CHANGED
@@ -4,5 +4,5 @@
4
4
  udata
5
5
  """
6
6
 
7
- __version__ = "10.3.1.dev"
7
+ __version__ = "10.3.2"
8
8
  __description__ = "Open data portal"
@@ -70,7 +70,7 @@ activity_parser.add_argument(
70
70
  )
71
71
 
72
72
 
73
- @api.route("/activity", endpoint="activity")
73
+ @api.route("/activity/", endpoint="activity")
74
74
  class SiteActivityAPI(API):
75
75
  @api.doc("activity")
76
76
  @api.expect(activity_parser)
@@ -240,7 +240,7 @@ def stream(queryset_or_adapter, basename=None):
240
240
  cls = _adapters.get(queryset_or_adapter._document)
241
241
  adapter = cls(queryset_or_adapter)
242
242
  else:
243
- raise ValueError("Unsupported object type")
243
+ raise ValueError(f"Unsupported object type {queryset_or_adapter}")
244
244
 
245
245
  timestamp = datetime.utcnow().strftime("%Y-%m-%d-%H-%M")
246
246
  headers = {
@@ -1,4 +1,4 @@
1
- from udata.frontend import csv
1
+ from udata.core import csv
2
2
 
3
3
  from .models import Dataservice
4
4
 
udata/core/dataset/api.py CHANGED
@@ -38,6 +38,7 @@ from udata.core.dataservices.models import Dataservice
38
38
  from udata.core.dataset.models import CHECKSUM_TYPES
39
39
  from udata.core.followers.api import FollowAPI
40
40
  from udata.core.organization.models import Organization
41
+ from udata.core.reuse.models import Reuse
41
42
  from udata.core.storages.api import handle_upload, upload_parser
42
43
  from udata.core.topic.models import Topic
43
44
  from udata.linkchecker.checker import check_resource
@@ -122,6 +123,7 @@ class DatasetApiParser(ModelApiParser):
122
123
  self.parser.add_argument("topic", type=str, location="args")
123
124
  self.parser.add_argument("credit", type=str, location="args")
124
125
  self.parser.add_argument("dataservice", type=str, location="args")
126
+ self.parser.add_argument("reuse", type=str, location="args")
125
127
  self.parser.add_argument(
126
128
  "archived",
127
129
  type=boolean,
@@ -200,6 +202,15 @@ class DatasetApiParser(ModelApiParser):
200
202
  pass
201
203
  else:
202
204
  datasets = datasets.filter(id__in=[d.id for d in dataservice.datasets])
205
+ if args.get("reuse"):
206
+ if not ObjectId.is_valid(args["reuse"]):
207
+ api.abort(400, "Reuse arg must be an identifier")
208
+ try:
209
+ reuse = Reuse.objects.get(id=args["reuse"])
210
+ except Reuse.DoesNotExist:
211
+ pass
212
+ else:
213
+ datasets = datasets.filter(id__in=[d.id for d in reuse.datasets])
203
214
  if args.get("archived") is not None:
204
215
  if current_user.is_anonymous:
205
216
  abort(401)
udata/core/dataset/csv.py CHANGED
@@ -1,8 +1,8 @@
1
1
  # for backwards compatibility (see https://github.com/opendatateam/udata/pull/3152)
2
2
  import json
3
3
 
4
+ from udata.core import csv
4
5
  from udata.core.discussions.csv import DiscussionCsvAdapter # noqa: F401
5
- from udata.frontend import csv
6
6
 
7
7
  from .models import Dataset, Resource
8
8
 
@@ -9,9 +9,8 @@ from mongoengine import ValidationError
9
9
 
10
10
  from udata import mail
11
11
  from udata import models as udata_models
12
- from udata.core import storages
12
+ from udata.core import csv, storages
13
13
  from udata.core.dataservices.models import Dataservice
14
- from udata.frontend import csv
15
14
  from udata.harvest.models import HarvestJob
16
15
  from udata.i18n import lazy_gettext as _
17
16
  from udata.models import Activity, Discussion, Follow, Organization, Topic, Transfer, db
@@ -1,4 +1,4 @@
1
- from udata.frontend import csv
1
+ from udata.core import csv
2
2
 
3
3
  from .models import Discussion
4
4
 
@@ -1,3 +1,4 @@
1
+ import itertools
1
2
  from datetime import datetime
2
3
 
3
4
  from flask import make_response, redirect, request, url_for
@@ -6,6 +7,7 @@ from mongoengine.queryset.visitor import Q
6
7
  from udata.api import API, api, errors
7
8
  from udata.api.parsers import ModelApiParser
8
9
  from udata.auth import admin_permission, current_user
10
+ from udata.core import csv
9
11
  from udata.core.badges import api as badges_api
10
12
  from udata.core.badges.fields import badge_fields
11
13
  from udata.core.contact_point.api import ContactPointApiParser
@@ -13,8 +15,10 @@ from udata.core.contact_point.api_fields import contact_point_page_fields
13
15
  from udata.core.dataservices.models import Dataservice
14
16
  from udata.core.dataset.api import DatasetApiParser
15
17
  from udata.core.dataset.api_fields import dataset_page_fields
18
+ from udata.core.dataset.csv import DatasetCsvAdapter, ResourcesCsvAdapter
16
19
  from udata.core.dataset.models import Dataset
17
20
  from udata.core.discussions.api import discussion_fields
21
+ from udata.core.discussions.csv import DiscussionCsvAdapter
18
22
  from udata.core.discussions.models import Discussion
19
23
  from udata.core.followers.api import FollowAPI
20
24
  from udata.core.reuse.models import Reuse
@@ -164,6 +168,42 @@ class OrganizationAPI(API):
164
168
  return "", 204
165
169
 
166
170
 
171
+ @ns.route("/<org:org>/datasets.csv", endpoint="organization_datasets_csv", doc=common_doc)
172
+ @api.response(404, "Organization not found")
173
+ @api.response(410, "Organization has been deleted")
174
+ class DatasetsCsvAPI(API):
175
+ def get(self, org):
176
+ datasets = Dataset.objects(organization=str(org.id)).visible()
177
+ adapter = DatasetCsvAdapter(datasets)
178
+ return csv.stream(adapter, "{0}-datasets".format(org.slug))
179
+
180
+
181
+ @ns.route("/<org:org>/discussions.csv", endpoint="organization_discussions_csv", doc=common_doc)
182
+ @api.response(404, "Organization not found")
183
+ @api.response(410, "Organization has been deleted")
184
+ class DiscussionsCsvAPI(API):
185
+ def get(self, org):
186
+ datasets = Dataset.objects.filter(organization=str(org.id))
187
+ discussions = [Discussion.objects.filter(subject=dataset) for dataset in datasets]
188
+ # Turns a list of lists into a flat list.
189
+ adapter = DiscussionCsvAdapter(itertools.chain(*discussions))
190
+ return csv.stream(adapter, "{0}-discussions".format(org.slug))
191
+
192
+
193
+ @ns.route(
194
+ "/<org:org>/datasets-resources.csv",
195
+ endpoint="organization_datasets_resources_csv",
196
+ doc=common_doc,
197
+ )
198
+ @api.response(404, "Organization not found")
199
+ @api.response(410, "Organization has been deleted")
200
+ class DatasetsResourcesCsvAPI(API):
201
+ def get(self, org):
202
+ datasets = Dataset.objects(organization=str(org.id)).visible()
203
+ adapter = ResourcesCsvAdapter(datasets)
204
+ return csv.stream(adapter, "{0}-datasets-resources".format(org.slug))
205
+
206
+
167
207
  @ns.route("/<org:org>/catalog", endpoint="organization_rdf", doc=common_doc)
168
208
  @api.response(404, "Organization not found")
169
209
  @api.response(410, "Organization has been deleted")
@@ -1,5 +1,5 @@
1
+ from udata.core import csv
1
2
  from udata.core.dataset.models import Dataset
2
- from udata.frontend import csv
3
3
 
4
4
  from .models import Organization
5
5
 
udata/core/reuse/csv.py CHANGED
@@ -1,4 +1,4 @@
1
- from udata.frontend import csv
1
+ from udata.core import csv
2
2
 
3
3
  from .models import Reuse
4
4
 
udata/core/site/api.py CHANGED
@@ -1,10 +1,23 @@
1
1
  from bson import ObjectId
2
- from flask import json, make_response, redirect, request, url_for
2
+ from flask import current_app, json, make_response, redirect, request, url_for
3
3
 
4
4
  from udata.api import API, api, fields
5
5
  from udata.auth import admin_permission
6
+ from udata.core import csv
7
+ from udata.core.dataservices.csv import DataserviceCsvAdapter
6
8
  from udata.core.dataservices.models import Dataservice
9
+ from udata.core.dataset.api import DatasetApiParser
7
10
  from udata.core.dataset.api_fields import dataset_fields
11
+ from udata.core.dataset.csv import ResourcesCsvAdapter
12
+ from udata.core.dataset.search import DatasetSearch
13
+ from udata.core.dataset.tasks import get_queryset as get_csv_queryset
14
+ from udata.core.organization.api import OrgApiParser
15
+ from udata.core.organization.csv import OrganizationCsvAdapter
16
+ from udata.core.organization.models import Organization
17
+ from udata.core.reuse.api import ReuseApiParser
18
+ from udata.core.reuse.csv import ReuseCsvAdapter
19
+ from udata.harvest.csv import HarvestSourceCsvAdapter
20
+ from udata.harvest.models import HarvestSource
8
21
  from udata.models import Dataset, Reuse
9
22
  from udata.rdf import CONTEXT, RDF_EXTENSIONS, graph_response, negociate_content
10
23
  from udata.utils import multi_to_dict
@@ -114,9 +127,101 @@ class SiteRdfCatalogFormat(API):
114
127
  return make_response(*graph_response(catalog, format))
115
128
 
116
129
 
130
+ @api.route("/site/datasets.csv", endpoint="site_datasets_csv")
131
+ class SiteDatasetsCsv(API):
132
+ def get(self):
133
+ # redirect to EXPORT_CSV dataset if feature is enabled and no filter is set
134
+ exported_models = current_app.config.get("EXPORT_CSV_MODELS", [])
135
+ if not request.args and "dataset" in exported_models:
136
+ return redirect(get_export_url("dataset"))
137
+ search_parser = DatasetSearch.as_request_parser(store_missing=False)
138
+ params = search_parser.parse_args()
139
+ params["facets"] = False
140
+ datasets = DatasetApiParser.parse_filters(get_csv_queryset(Dataset), params)
141
+ adapter = csv.get_adapter(Dataset)
142
+ return csv.stream(adapter(datasets), "datasets")
143
+
144
+
145
+ @api.route("/site/resources.csv", endpoint="site_datasets_resources_csv")
146
+ class SiteResourcesCsv(API):
147
+ def get(self):
148
+ # redirect to EXPORT_CSV dataset if feature is enabled and no filter is set
149
+ exported_models = current_app.config.get("EXPORT_CSV_MODELS", [])
150
+ if not request.args and "resource" in exported_models:
151
+ return redirect(get_export_url("resource"))
152
+ search_parser = DatasetSearch.as_request_parser(store_missing=False)
153
+ params = search_parser.parse_args()
154
+ params["facets"] = False
155
+ datasets = DatasetApiParser.parse_filters(get_csv_queryset(Dataset), params)
156
+ return csv.stream(ResourcesCsvAdapter(datasets), "resources")
157
+
158
+
159
+ @api.route("/site/organizations.csv", endpoint="site_organizations_csv")
160
+ class SiteOrganizationsCsv(API):
161
+ def get(self):
162
+ params = multi_to_dict(request.args)
163
+ # redirect to EXPORT_CSV dataset if feature is enabled and no filter is set
164
+ exported_models = current_app.config.get("EXPORT_CSV_MODELS", [])
165
+ if not params and "organization" in exported_models:
166
+ return redirect(get_export_url("organization"))
167
+ params["facets"] = False
168
+ organizations = OrgApiParser.parse_filters(get_csv_queryset(Organization), params)
169
+ return csv.stream(OrganizationCsvAdapter(organizations), "organizations")
170
+
171
+
172
+ @api.route("/site/reuses.csv", endpoint="site_reuses_csv")
173
+ class SiteReusesCsv(API):
174
+ def get(self):
175
+ params = multi_to_dict(request.args)
176
+ # redirect to EXPORT_CSV dataset if feature is enabled and no filter is set
177
+ exported_models = current_app.config.get("EXPORT_CSV_MODELS", [])
178
+ if not params and "reuse" in exported_models:
179
+ return redirect(get_export_url("reuse"))
180
+ params["facets"] = False
181
+ reuses = ReuseApiParser.parse_filters(get_csv_queryset(Reuse), params)
182
+ return csv.stream(ReuseCsvAdapter(reuses), "reuses")
183
+
184
+
185
+ @api.route("/site/dataservices.csv", endpoint="site_dataservices_csv")
186
+ class SiteDataservicesCsv(API):
187
+ def get(self):
188
+ params = multi_to_dict(request.args)
189
+ # redirect to EXPORT_CSV dataset if feature is enabled and no filter is set
190
+ exported_models = current_app.config.get("EXPORT_CSV_MODELS", [])
191
+ if not params and "dataservice" in exported_models:
192
+ return redirect(get_export_url("dataservice"))
193
+ params["facets"] = False
194
+ dataservices = Dataservice.apply_sort_filters(get_csv_queryset(Dataservice))
195
+ return csv.stream(DataserviceCsvAdapter(dataservices), "dataservices")
196
+
197
+
198
+ @api.route("/site/harvests.csv", endpoint="site_harvests_csv")
199
+ class SiteHarvestsCsv(API):
200
+ def get(self):
201
+ # redirect to EXPORT_CSV dataset if feature is enabled
202
+ exported_models = current_app.config.get("EXPORT_CSV_MODELS", [])
203
+ if "harvest" in exported_models:
204
+ return redirect(get_export_url("harvest"))
205
+ adapter = HarvestSourceCsvAdapter(get_csv_queryset(HarvestSource).order_by("created_at"))
206
+ return csv.stream(adapter, "harvest")
207
+
208
+
117
209
  @api.route("/site/context.jsonld", endpoint="site_jsonld_context")
118
210
  class SiteJsonLdContext(API):
119
211
  def get(self):
120
212
  response = make_response(json.dumps(CONTEXT))
121
213
  response.headers["Content-Type"] = "application/ld+json"
122
214
  return response
215
+
216
+
217
+ def get_export_url(model):
218
+ did = current_app.config["EXPORT_CSV_DATASET_ID"]
219
+ dataset = Dataset.objects.get_or_404(id=did)
220
+ resource = None
221
+ for r in dataset.resources:
222
+ if r.extras.get("csv-export:model", "") == model:
223
+ resource = r
224
+ break
225
+ if not resource:
226
+ api.abort(404)
227
+ return resource.url
udata/core/tags/csv.py CHANGED
@@ -1,4 +1,4 @@
1
- from udata.frontend import csv
1
+ from udata.core import csv
2
2
 
3
3
  from .models import Tag
4
4
 
udata/core/tags/views.py CHANGED
@@ -1,6 +1,6 @@
1
1
  import logging
2
2
 
3
- from udata.frontend import csv
3
+ from udata.core import csv
4
4
  from udata.i18n import I18nBlueprint
5
5
 
6
6
  from .csv import TagCsvAdapter
@@ -21,7 +21,7 @@ class Topic(db.Document, Owned, db.Datetimed):
21
21
  datasets = db.ListField(db.LazyReferenceField("Dataset", reverse_delete_rule=db.PULL))
22
22
  reuses = db.ListField(db.LazyReferenceField("Reuse", reverse_delete_rule=db.PULL))
23
23
 
24
- featured = db.BooleanField()
24
+ featured = db.BooleanField(default=False)
25
25
  private = db.BooleanField()
26
26
  extras = db.ExtrasField()
27
27
 
@@ -1,4 +1,5 @@
1
1
  from bson.objectid import ObjectId
2
+ from flask_restx.inputs import boolean
2
3
 
3
4
  from udata.api import api
4
5
  from udata.api.parsers import ModelApiParser
@@ -20,6 +21,7 @@ class TopicApiParser(ModelApiParser):
20
21
  self.parser.add_argument("granularity", type=str, location="args")
21
22
  self.parser.add_argument("organization", type=str, location="args")
22
23
  self.parser.add_argument("owner", type=str, location="args")
24
+ self.parser.add_argument("featured", type=boolean, location="args")
23
25
 
24
26
  @staticmethod
25
27
  def parse_filters(topics, args):
@@ -38,6 +40,8 @@ class TopicApiParser(ModelApiParser):
38
40
  topics = topics.filter(spatial__zones=args["geozone"])
39
41
  if args.get("granularity"):
40
42
  topics = topics.filter(spatial__granularity=args["granularity"])
43
+ if args.get("featured") is not None:
44
+ topics = topics.filter(featured=args["featured"])
41
45
  if args.get("organization"):
42
46
  if not ObjectId.is_valid(args["organization"]):
43
47
  api.abort(400, "Organization arg must be an identifier")
udata/harvest/csv.py CHANGED
@@ -1,4 +1,4 @@
1
- from udata.frontend import csv
1
+ from udata.core import csv
2
2
 
3
3
  from .models import HarvestSource
4
4
 
@@ -0,0 +1,15 @@
1
+ """
2
+ This migration updates Topic.featured to False when it is None.
3
+ """
4
+
5
+ import logging
6
+
7
+ from udata.models import Topic
8
+
9
+ log = logging.getLogger(__name__)
10
+
11
+
12
+ def migrate(db):
13
+ log.info("Processing topics...")
14
+ count = Topic.objects(featured__exists=False).update(featured=False)
15
+ log.info(f"\tConverted {count} topics from `featured=None` to `featured=False`")
udata/routing.py CHANGED
@@ -12,6 +12,7 @@ from udata.core.dataservices.models import Dataservice
12
12
  from udata.core.spatial.models import GeoZone
13
13
  from udata.i18n import ISO_639_1_CODES
14
14
  from udata.mongo import db
15
+ from udata.uris import endpoint_for
15
16
 
16
17
 
17
18
  class LazyRedirect(object):
@@ -247,3 +248,5 @@ def init_app(app):
247
248
  app.url_map.converters["territory"] = TerritoryConverter
248
249
  app.url_map.converters["contact_point"] = ContactPointConverter
249
250
  app.url_map.converters["report"] = ReportConverter
251
+
252
+ app.jinja_env.globals["endpoint_for"] = endpoint_for
@@ -4,17 +4,19 @@
4
4
  {% block body %}
5
5
  <p style="margin: 0;padding: 0;">
6
6
  {{ _(
7
- 'Your account (%(user_email)s) has been inactive for %(inactivity_years)d years or more.',
7
+ 'We have noticed that your account associated to (%(user_email)s) has been inactive for %(inactivity_years)d years or more'
8
+ ' on %(site)s, the open platform for public data.',
8
9
  user_email=user.email,
9
- inactivity_years=config.YEARS_OF_INACTIVITY_BEFORE_DELETION
10
+ inactivity_years=config.YEARS_OF_INACTIVITY_BEFORE_DELETION,
11
+ site=config.SITE_TITLE
10
12
  )
11
13
  }}
12
14
  </p>
13
15
  <br/>
14
16
  <p style="margin: 0;padding: 0;"><b>
15
17
  {{ _(
16
- 'If you want to keep your account, please log in with your account on %(site)s.',
17
- site=config.SITE_TITLE
18
+ 'If you want to keep your account, please log in with your account on %(home)s.',
19
+ home=endpoint_for("site.home_redirect", "api.site", _external=True)
18
20
  )
19
21
  }}
20
22
  </b></p>
@@ -26,4 +28,13 @@
26
28
  )
27
29
  }}
28
30
  </p>
31
+ <br/>
32
+ <p style="margin: 0;padding: 0;">
33
+ {{ _(
34
+ 'This account is not tied to your other administration accounts and '
35
+ 'you can always re-create an account on the %(site)s platform if necessary.',
36
+ site=config.SITE_TITLE
37
+ )
38
+ }}
39
+ </p>
29
40
  {% endblock %}
@@ -2,15 +2,17 @@
2
2
 
3
3
  {% block body %}
4
4
  {{ _(
5
- 'Your account (%(user_email)s) has been inactive for %(inactivity_years)d years or more.',
5
+ 'We have noticed that your account associated to (%(user_email)s) has been inactive for %(inactivity_years)d years or more'
6
+ ' on %(site)s, the open platform for public data.',
6
7
  user_email=user.email,
7
- inactivity_years=config.YEARS_OF_INACTIVITY_BEFORE_DELETION
8
+ inactivity_years=config.YEARS_OF_INACTIVITY_BEFORE_DELETION,
9
+ site=config.SITE_TITLE
8
10
  )
9
11
  }}
10
12
 
11
13
  {{ _(
12
- 'If you want to keep your account, please log in with your account on %(site)s.',
13
- site=config.SITE_TITLE
14
+ 'If you want to keep your account, please log in with your account on %(home)s.',
15
+ home=endpoint_for("site.home_redirect", "api.site", _external=True)
14
16
  )
15
17
  }}
16
18
 
@@ -19,4 +21,11 @@
19
21
  notify_delay=config.DAYS_BEFORE_ACCOUNT_INACTIVITY_NOTIFY_DELAY
20
22
  )
21
23
  }}
24
+ {{
25
+ _(
26
+ 'This account is not tied to your other administration accounts and '
27
+ 'you can always re-create an account on the %(site)s platform if necessary',
28
+ site=config.SITE_TITLE
29
+ )
30
+ }}
22
31
  {% endblock %}
@@ -1,12 +1,15 @@
1
1
  from datetime import datetime
2
+ from io import StringIO
2
3
 
3
4
  import pytest
4
5
  from flask import url_for
5
6
 
6
7
  import udata.core.organization.constants as org_constants
8
+ from udata.core import csv
7
9
  from udata.core.badges.factories import badge_factory
8
10
  from udata.core.badges.signals import on_badge_added, on_badge_removed
9
- from udata.core.dataset.factories import DatasetFactory
11
+ from udata.core.dataset.factories import DatasetFactory, ResourceFactory
12
+ from udata.core.discussions.factories import DiscussionFactory
10
13
  from udata.core.organization.factories import OrganizationFactory
11
14
  from udata.core.reuse.factories import ReuseFactory
12
15
  from udata.core.user.factories import AdminFactory, UserFactory
@@ -22,6 +25,7 @@ from udata.tests.helpers import (
22
25
  assert410,
23
26
  assert_emit,
24
27
  assert_not_emit,
28
+ assert_starts_with,
25
29
  assert_status,
26
30
  )
27
31
  from udata.utils import faker
@@ -967,3 +971,96 @@ class OrganizationContactPointsAPITest:
967
971
 
968
972
  assert response.json["data"][0]["name"] == data["name"]
969
973
  assert response.json["data"][0]["email"] == data["email"]
974
+
975
+
976
+ class OrganizationCsvExportsTest:
977
+ modules = []
978
+
979
+ def test_datasets_csv(self, api):
980
+ org = OrganizationFactory()
981
+ [DatasetFactory(organization=org, resources=[ResourceFactory()]) for _ in range(3)]
982
+
983
+ response = api.get(url_for("api.organization_datasets_csv", org=org))
984
+
985
+ assert200(response)
986
+ assert response.mimetype == "text/csv"
987
+ assert response.charset == "utf-8"
988
+
989
+ csvfile = StringIO(response.data.decode("utf-8"))
990
+ reader = csv.get_reader(csvfile)
991
+ header = next(reader)
992
+
993
+ assert header[0] == "id"
994
+ assert "title" in header
995
+ assert "url" in header
996
+ assert "description" in header
997
+ assert "created_at" in header
998
+ assert "last_modified" in header
999
+ assert "tags" in header
1000
+ assert "metric.reuses" in header
1001
+
1002
+ def test_resources_csv(self, api):
1003
+ org = OrganizationFactory()
1004
+ datasets = [
1005
+ DatasetFactory(organization=org, resources=[ResourceFactory(), ResourceFactory()])
1006
+ for _ in range(3)
1007
+ ]
1008
+ not_org_dataset = DatasetFactory(resources=[ResourceFactory()])
1009
+ hidden_dataset = DatasetFactory(private=True)
1010
+
1011
+ response = api.get(url_for("api.organization_datasets_resources_csv", org=org))
1012
+
1013
+ assert200(response)
1014
+ assert response.mimetype == "text/csv"
1015
+ assert response.charset == "utf-8"
1016
+
1017
+ csvfile = StringIO(response.data.decode("utf-8"))
1018
+ reader = csv.get_reader(csvfile)
1019
+ header = next(reader)
1020
+
1021
+ assert header[0] == "dataset.id"
1022
+ assert "dataset.title" in header
1023
+ assert "dataset.url" in header
1024
+ assert "title" in header
1025
+ assert "filetype" in header
1026
+ assert "url" in header
1027
+ assert "created_at" in header
1028
+ assert "modified" in header
1029
+ assert "downloads" in header
1030
+
1031
+ resource_id_index = header.index("id")
1032
+
1033
+ rows = list(reader)
1034
+ ids = [(row[0], row[resource_id_index]) for row in rows]
1035
+
1036
+ assert len(rows) == sum(len(d.resources) for d in datasets)
1037
+ for dataset in datasets:
1038
+ for resource in dataset.resources:
1039
+ assert (str(dataset.id), str(resource.id)) in ids
1040
+
1041
+ dataset_ids = set(row[0] for row in rows)
1042
+ assert str(hidden_dataset.id) not in dataset_ids
1043
+ assert str(not_org_dataset.id) not in dataset_ids
1044
+
1045
+ def test_discussions_csv_content_empty(self, api):
1046
+ organization = OrganizationFactory()
1047
+ response = api.get(url_for("api.organization_discussions_csv", org=organization))
1048
+ assert200(response)
1049
+
1050
+ assert response.data.decode("utf8") == (
1051
+ '"id";"user";"subject";"subject_class";"subject_id";"title";"size";"participants";'
1052
+ '"messages";"created";"closed";"closed_by";"closed_by_id";"closed_by_organization";'
1053
+ '"closed_by_organization_id"\r\n'
1054
+ )
1055
+
1056
+ def test_discussions_csv_content_filled(self, api):
1057
+ organization = OrganizationFactory()
1058
+ dataset = DatasetFactory(organization=organization)
1059
+ user = UserFactory(first_name="John", last_name="Snow")
1060
+ discussion = DiscussionFactory(subject=dataset, user=user)
1061
+ response = api.get(url_for("api.organization_discussions_csv", org=organization))
1062
+ assert200(response)
1063
+
1064
+ headers, data = response.data.decode("utf-8").strip().split("\r\n")
1065
+ expected = '"{discussion.id}";"{discussion.user}"'
1066
+ assert_starts_with(data, expected.format(discussion=discussion))
@@ -28,12 +28,13 @@ class TopicsAPITest(APITestCase):
28
28
  private_topic = TopicFactory(private=True)
29
29
  geozone_topic = TopicFactory(spatial=SpatialCoverageFactory(zones=[paca.id]))
30
30
  granularity_topic = TopicFactory(spatial=SpatialCoverageFactory(granularity="country"))
31
+ featured_topic = TopicFactory(featured=True)
31
32
  owner_topic = TopicFactory(owner=owner)
32
33
  org_topic = TopicFactory(organization=org)
33
34
 
34
35
  response = self.get(url_for("api.topics"))
35
36
  self.assert200(response)
36
- self.assertEqual(len(response.json["data"]), 7)
37
+ self.assertEqual(len(response.json["data"]), 8)
37
38
 
38
39
  response = self.get(url_for("api.topics", q="topic-for"))
39
40
  self.assert200(response)
@@ -69,7 +70,7 @@ class TopicsAPITest(APITestCase):
69
70
 
70
71
  response = self.get(url_for("api.topics", include_private="true"))
71
72
  self.assert200(response)
72
- self.assertEqual(len(response.json["data"]), 7)
73
+ self.assertEqual(len(response.json["data"]), 8)
73
74
  # we're not logged in, so the private topic does not appear
74
75
  self.assertNotIn(str(private_topic.id), [t["id"] for t in response.json["data"]])
75
76
 
@@ -83,6 +84,16 @@ class TopicsAPITest(APITestCase):
83
84
  self.assertEqual(len(response.json["data"]), 1)
84
85
  self.assertIn(str(granularity_topic.id), [t["id"] for t in response.json["data"]])
85
86
 
87
+ response = self.get(url_for("api.topics", featured="true"))
88
+ self.assert200(response)
89
+ self.assertEqual(len(response.json["data"]), 1)
90
+ self.assertIn(str(featured_topic.id), [t["id"] for t in response.json["data"]])
91
+
92
+ response = self.get(url_for("api.topics", featured="false"))
93
+ self.assert200(response)
94
+ self.assertEqual(len(response.json["data"]), 7)
95
+ self.assertNotIn(str(featured_topic.id), [t["id"] for t in response.json["data"]])
96
+
86
97
  response = self.get(url_for("api.topics", owner=owner.id))
87
98
  self.assert200(response)
88
99
  self.assertEqual(len(response.json["data"]), 1)
@@ -11,6 +11,7 @@ from udata.core.dataset.factories import (
11
11
  )
12
12
  from udata.core.dataset.models import ResourceMixin
13
13
  from udata.core.organization.factories import Member, OrganizationFactory
14
+ from udata.core.reuse.factories import ReuseFactory
14
15
  from udata.models import Dataset, db
15
16
  from udata.tests.api import APITestCase
16
17
  from udata.tests.helpers import assert_not_emit
@@ -43,6 +44,21 @@ class DatasetAPIV2Test(APITestCase):
43
44
  assert data["data"][1]["community_resources"]["total"] == 0
44
45
  assert data["data"][0]["community_resources"]["total"] == 0
45
46
 
47
+ def test_filter_by_reuse(self):
48
+ DatasetFactory(title="Dataset without reuse")
49
+
50
+ dataset_with_reuse = DatasetFactory(title="Dataset with reuse")
51
+ archived_dataset_with_reuse = DatasetFactory(
52
+ title="Dataset with reuse", archived=datetime(2022, 2, 22)
53
+ )
54
+ reuse = ReuseFactory(datasets=[dataset_with_reuse.id, archived_dataset_with_reuse.id])
55
+
56
+ response = self.get(url_for("apiv2.datasets", reuse=reuse.id))
57
+ self.assert200(response)
58
+ data = response.json
59
+ assert len(data["data"]) == 1
60
+ assert data["data"][0]["title"] == dataset_with_reuse.title
61
+
46
62
  def test_get_dataset(self):
47
63
  resources = [ResourceFactory() for _ in range(2)]
48
64
  dataset = DatasetFactory(resources=resources)