udata 10.3.2.dev34939__py2.py3-none-any.whl → 10.3.2.dev34982__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of udata might be problematic. Click here for more details.
- udata/{frontend → core}/csv.py +1 -1
- udata/core/dataservices/csv.py +1 -1
- udata/core/dataset/api.py +11 -0
- udata/core/dataset/csv.py +1 -1
- udata/core/dataset/tasks.py +1 -2
- udata/core/discussions/csv.py +1 -1
- udata/core/organization/api.py +40 -0
- udata/core/organization/csv.py +1 -1
- udata/core/reuse/csv.py +1 -1
- udata/core/site/api.py +106 -1
- udata/core/tags/csv.py +1 -1
- udata/core/tags/views.py +1 -1
- udata/harvest/csv.py +1 -1
- udata/static/chunks/{10.8ca60413647062717b1e.js → 10.471164b2a9fe15614797.js} +3 -3
- udata/static/chunks/{10.8ca60413647062717b1e.js.map → 10.471164b2a9fe15614797.js.map} +1 -1
- udata/static/chunks/{11.b6f741fcc366abfad9c4.js → 11.83535504cd650ea08f65.js} +3 -3
- udata/static/chunks/{11.b6f741fcc366abfad9c4.js.map → 11.83535504cd650ea08f65.js.map} +1 -1
- udata/static/chunks/{13.2d06442dd9a05d9777b5.js → 13.d9c1735d14038b94c17e.js} +2 -2
- udata/static/chunks/{13.2d06442dd9a05d9777b5.js.map → 13.d9c1735d14038b94c17e.js.map} +1 -1
- udata/static/chunks/{17.e8e4caaad5cb0cc0bacc.js → 17.81c57c0dedf812e43013.js} +2 -2
- udata/static/chunks/{17.e8e4caaad5cb0cc0bacc.js.map → 17.81c57c0dedf812e43013.js.map} +1 -1
- udata/static/chunks/{19.f03a102365af4315f9db.js → 19.df16abde17a42033a7f8.js} +3 -3
- udata/static/chunks/{19.f03a102365af4315f9db.js.map → 19.df16abde17a42033a7f8.js.map} +1 -1
- udata/static/chunks/{8.778091d55cd8ea39af6b.js → 8.462bb3029de008497675.js} +2 -2
- udata/static/chunks/{8.778091d55cd8ea39af6b.js.map → 8.462bb3029de008497675.js.map} +1 -1
- udata/static/chunks/{9.033d7e190ca9e226a5d0.js → 9.07515e5187f475bce828.js} +3 -3
- udata/static/chunks/{9.033d7e190ca9e226a5d0.js.map → 9.07515e5187f475bce828.js.map} +1 -1
- udata/static/common.js +1 -1
- udata/static/common.js.map +1 -1
- udata/tests/api/test_organizations_api.py +98 -1
- udata/tests/apiv2/test_datasets.py +16 -0
- udata/tests/frontend/test_csv.py +1 -1
- udata/tests/site/test_site_csv_exports.py +464 -0
- udata/tests/test_tags.py +1 -1
- {udata-10.3.2.dev34939.dist-info → udata-10.3.2.dev34982.dist-info}/METADATA +3 -1
- {udata-10.3.2.dev34939.dist-info → udata-10.3.2.dev34982.dist-info}/RECORD +40 -39
- {udata-10.3.2.dev34939.dist-info → udata-10.3.2.dev34982.dist-info}/LICENSE +0 -0
- {udata-10.3.2.dev34939.dist-info → udata-10.3.2.dev34982.dist-info}/WHEEL +0 -0
- {udata-10.3.2.dev34939.dist-info → udata-10.3.2.dev34982.dist-info}/entry_points.txt +0 -0
- {udata-10.3.2.dev34939.dist-info → udata-10.3.2.dev34982.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,464 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from io import StringIO
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
from flask import url_for
|
|
6
|
+
|
|
7
|
+
from udata.core import csv
|
|
8
|
+
from udata.core.dataservices.factories import DataserviceFactory
|
|
9
|
+
from udata.core.dataset import tasks as dataset_tasks
|
|
10
|
+
from udata.core.dataset.factories import DatasetFactory, ResourceFactory
|
|
11
|
+
from udata.core.organization.factories import OrganizationFactory
|
|
12
|
+
from udata.core.reuse.factories import ReuseFactory
|
|
13
|
+
from udata.harvest.models import HarvestSource
|
|
14
|
+
from udata.tests.api import APITestCase
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class SiteCsvExportsTest(APITestCase):
|
|
18
|
+
modules = []
|
|
19
|
+
|
|
20
|
+
def test_datasets_csv(self):
|
|
21
|
+
self.app.config["EXPORT_CSV_MODELS"] = []
|
|
22
|
+
datasets = [DatasetFactory(resources=[ResourceFactory()]) for _ in range(5)]
|
|
23
|
+
archived_datasets = [DatasetFactory(archived=datetime.utcnow()) for _ in range(3)]
|
|
24
|
+
hidden_dataset = DatasetFactory(private=True)
|
|
25
|
+
|
|
26
|
+
response = self.get(url_for("api.site_datasets_csv"))
|
|
27
|
+
|
|
28
|
+
self.assert200(response)
|
|
29
|
+
self.assertEqual(response.mimetype, "text/csv")
|
|
30
|
+
self.assertEqual(response.charset, "utf-8")
|
|
31
|
+
|
|
32
|
+
csvfile = StringIO(response.data.decode("utf8"))
|
|
33
|
+
reader = csv.get_reader(csvfile)
|
|
34
|
+
header = next(reader)
|
|
35
|
+
|
|
36
|
+
self.assertEqual(header[0], "id")
|
|
37
|
+
self.assertIn("title", header)
|
|
38
|
+
self.assertIn("description", header)
|
|
39
|
+
self.assertIn("created_at", header)
|
|
40
|
+
self.assertIn("last_modified", header)
|
|
41
|
+
self.assertIn("tags", header)
|
|
42
|
+
self.assertIn("metric.reuses", header)
|
|
43
|
+
|
|
44
|
+
rows = list(reader)
|
|
45
|
+
ids = [row[0] for row in rows]
|
|
46
|
+
|
|
47
|
+
self.assertEqual(len(rows), len(datasets) + len(archived_datasets))
|
|
48
|
+
for dataset in datasets:
|
|
49
|
+
self.assertIn(str(dataset.id), ids)
|
|
50
|
+
self.assertNotIn(str(hidden_dataset.id), ids)
|
|
51
|
+
|
|
52
|
+
@pytest.mark.usefixtures("instance_path")
|
|
53
|
+
def test_datasets_csv_w_export_csv_feature(self):
|
|
54
|
+
# no export generated, 404
|
|
55
|
+
response = self.get(url_for("api.site_datasets_csv"))
|
|
56
|
+
self.assert404(response)
|
|
57
|
+
|
|
58
|
+
# generate the export
|
|
59
|
+
d = DatasetFactory()
|
|
60
|
+
self.app.config["EXPORT_CSV_DATASET_ID"] = d.id
|
|
61
|
+
dataset_tasks.export_csv()
|
|
62
|
+
response = self.get(url_for("api.site_datasets_csv"))
|
|
63
|
+
self.assertStatus(response, 302)
|
|
64
|
+
self.assertIn("export-dataset-", response.location)
|
|
65
|
+
|
|
66
|
+
def test_datasets_csv_with_filters(self):
|
|
67
|
+
"""Should handle filtering but ignore paging"""
|
|
68
|
+
filtered_datasets = [
|
|
69
|
+
DatasetFactory(resources=[ResourceFactory()], tags=["selected"]) for _ in range(6)
|
|
70
|
+
]
|
|
71
|
+
datasets = [DatasetFactory(resources=[ResourceFactory()]) for _ in range(3)]
|
|
72
|
+
hidden_dataset = DatasetFactory(private=True)
|
|
73
|
+
|
|
74
|
+
response = self.get(url_for("api.site_datasets_csv", tag="selected", page_size=3))
|
|
75
|
+
|
|
76
|
+
self.assert200(response)
|
|
77
|
+
self.assertEqual(response.mimetype, "text/csv")
|
|
78
|
+
self.assertEqual(response.charset, "utf-8")
|
|
79
|
+
|
|
80
|
+
csvfile = StringIO(response.data.decode("utf8"))
|
|
81
|
+
reader = csv.get_reader(csvfile)
|
|
82
|
+
header = next(reader)
|
|
83
|
+
|
|
84
|
+
self.assertEqual(header[0], "id")
|
|
85
|
+
self.assertIn("title", header)
|
|
86
|
+
self.assertIn("description", header)
|
|
87
|
+
self.assertIn("created_at", header)
|
|
88
|
+
self.assertIn("last_modified", header)
|
|
89
|
+
self.assertIn("tags", header)
|
|
90
|
+
self.assertIn("metric.reuses", header)
|
|
91
|
+
|
|
92
|
+
rows = list(reader)
|
|
93
|
+
ids = [row[0] for row in rows]
|
|
94
|
+
|
|
95
|
+
# Should ignore paging
|
|
96
|
+
self.assertEqual(len(rows), len(filtered_datasets))
|
|
97
|
+
# SHoulf pass filter
|
|
98
|
+
for dataset in filtered_datasets:
|
|
99
|
+
self.assertIn(str(dataset.id), ids)
|
|
100
|
+
for dataset in datasets:
|
|
101
|
+
self.assertNotIn(str(dataset.id), ids)
|
|
102
|
+
self.assertNotIn(str(hidden_dataset.id), ids)
|
|
103
|
+
|
|
104
|
+
def test_resources_csv(self):
|
|
105
|
+
self.app.config["EXPORT_CSV_MODELS"] = []
|
|
106
|
+
datasets = [
|
|
107
|
+
DatasetFactory(resources=[ResourceFactory(), ResourceFactory()]) for _ in range(3)
|
|
108
|
+
]
|
|
109
|
+
DatasetFactory()
|
|
110
|
+
|
|
111
|
+
response = self.get(url_for("api.site_datasets_resources_csv"))
|
|
112
|
+
|
|
113
|
+
self.assert200(response)
|
|
114
|
+
self.assertEqual(response.mimetype, "text/csv")
|
|
115
|
+
self.assertEqual(response.charset, "utf-8")
|
|
116
|
+
|
|
117
|
+
csvfile = StringIO(response.data.decode("utf8"))
|
|
118
|
+
reader = csv.get_reader(csvfile)
|
|
119
|
+
header = next(reader)
|
|
120
|
+
|
|
121
|
+
self.assertEqual(header[0], "dataset.id")
|
|
122
|
+
self.assertIn("dataset.title", header)
|
|
123
|
+
self.assertIn("dataset.url", header)
|
|
124
|
+
self.assertIn("title", header)
|
|
125
|
+
self.assertIn("description", header)
|
|
126
|
+
self.assertIn("filetype", header)
|
|
127
|
+
self.assertIn("url", header)
|
|
128
|
+
self.assertIn("created_at", header)
|
|
129
|
+
self.assertIn("modified", header)
|
|
130
|
+
self.assertIn("downloads", header)
|
|
131
|
+
|
|
132
|
+
resource_id_index = header.index("id")
|
|
133
|
+
|
|
134
|
+
rows = list(reader)
|
|
135
|
+
ids = [(row[0], row[resource_id_index]) for row in rows]
|
|
136
|
+
|
|
137
|
+
self.assertEqual(len(rows), sum(len(d.resources) for d in datasets))
|
|
138
|
+
for dataset in datasets:
|
|
139
|
+
for resource in dataset.resources:
|
|
140
|
+
self.assertIn((str(dataset.id), str(resource.id)), ids)
|
|
141
|
+
|
|
142
|
+
@pytest.mark.usefixtures("instance_path")
|
|
143
|
+
def test_resources_csv_w_export_csv_feature(self):
|
|
144
|
+
# no export generated, 404
|
|
145
|
+
response = self.get(url_for("api.site_datasets_resources_csv"))
|
|
146
|
+
self.assert404(response)
|
|
147
|
+
|
|
148
|
+
# generate the export
|
|
149
|
+
d = DatasetFactory()
|
|
150
|
+
self.app.config["EXPORT_CSV_DATASET_ID"] = d.id
|
|
151
|
+
dataset_tasks.export_csv()
|
|
152
|
+
response = self.get(url_for("api.site_datasets_resources_csv"))
|
|
153
|
+
self.assertStatus(response, 302)
|
|
154
|
+
self.assertIn("export-resource-", response.location)
|
|
155
|
+
|
|
156
|
+
def test_resources_csv_with_filters(self):
|
|
157
|
+
"""Should handle filtering but ignore paging"""
|
|
158
|
+
filtered_datasets = [
|
|
159
|
+
DatasetFactory(resources=[ResourceFactory(), ResourceFactory()], tags=["selected"])
|
|
160
|
+
for _ in range(6)
|
|
161
|
+
]
|
|
162
|
+
[DatasetFactory(resources=[ResourceFactory()]) for _ in range(3)]
|
|
163
|
+
DatasetFactory()
|
|
164
|
+
|
|
165
|
+
response = self.get(url_for("api.site_datasets_resources_csv", tag="selected", page_size=3))
|
|
166
|
+
|
|
167
|
+
self.assert200(response)
|
|
168
|
+
self.assertEqual(response.mimetype, "text/csv")
|
|
169
|
+
self.assertEqual(response.charset, "utf-8")
|
|
170
|
+
|
|
171
|
+
csvfile = StringIO(response.data.decode("utf8"))
|
|
172
|
+
reader = csv.get_reader(csvfile)
|
|
173
|
+
header = next(reader)
|
|
174
|
+
|
|
175
|
+
self.assertEqual(header[0], "dataset.id")
|
|
176
|
+
self.assertIn("dataset.title", header)
|
|
177
|
+
self.assertIn("dataset.url", header)
|
|
178
|
+
self.assertIn("title", header)
|
|
179
|
+
self.assertIn("description", header)
|
|
180
|
+
self.assertIn("filetype", header)
|
|
181
|
+
self.assertIn("url", header)
|
|
182
|
+
self.assertIn("created_at", header)
|
|
183
|
+
self.assertIn("modified", header)
|
|
184
|
+
self.assertIn("downloads", header)
|
|
185
|
+
|
|
186
|
+
resource_id_index = header.index("id")
|
|
187
|
+
|
|
188
|
+
rows = list(reader)
|
|
189
|
+
ids = [(row[0], row[resource_id_index]) for row in rows]
|
|
190
|
+
|
|
191
|
+
self.assertEqual(len(rows), sum(len(d.resources) for d in filtered_datasets))
|
|
192
|
+
for dataset in filtered_datasets:
|
|
193
|
+
for resource in dataset.resources:
|
|
194
|
+
self.assertIn((str(dataset.id), str(resource.id)), ids)
|
|
195
|
+
|
|
196
|
+
def test_organizations_csv(self):
|
|
197
|
+
self.app.config["EXPORT_CSV_MODELS"] = []
|
|
198
|
+
orgs = [OrganizationFactory() for _ in range(5)]
|
|
199
|
+
hidden_org = OrganizationFactory(deleted=datetime.utcnow())
|
|
200
|
+
|
|
201
|
+
response = self.get(url_for("api.site_organizations_csv"))
|
|
202
|
+
|
|
203
|
+
self.assert200(response)
|
|
204
|
+
self.assertEqual(response.mimetype, "text/csv")
|
|
205
|
+
self.assertEqual(response.charset, "utf-8")
|
|
206
|
+
|
|
207
|
+
csvfile = StringIO(response.data.decode("utf8"))
|
|
208
|
+
reader = csv.get_reader(csvfile)
|
|
209
|
+
header = next(reader)
|
|
210
|
+
|
|
211
|
+
self.assertEqual(header[0], "id")
|
|
212
|
+
self.assertIn("name", header)
|
|
213
|
+
self.assertIn("description", header)
|
|
214
|
+
self.assertIn("created_at", header)
|
|
215
|
+
self.assertIn("last_modified", header)
|
|
216
|
+
self.assertIn("metric.datasets", header)
|
|
217
|
+
|
|
218
|
+
rows = list(reader)
|
|
219
|
+
ids = [row[0] for row in rows]
|
|
220
|
+
|
|
221
|
+
self.assertEqual(len(rows), len(orgs))
|
|
222
|
+
for org in orgs:
|
|
223
|
+
self.assertIn(str(org.id), ids)
|
|
224
|
+
self.assertNotIn(str(hidden_org.id), ids)
|
|
225
|
+
|
|
226
|
+
@pytest.mark.usefixtures("instance_path")
|
|
227
|
+
def test_organizations_csv_w_export_csv_feature(self):
|
|
228
|
+
# no export generated, 404
|
|
229
|
+
response = self.get(url_for("api.site_organizations_csv"))
|
|
230
|
+
self.assert404(response)
|
|
231
|
+
|
|
232
|
+
# generate the export
|
|
233
|
+
d = DatasetFactory()
|
|
234
|
+
self.app.config["EXPORT_CSV_DATASET_ID"] = d.id
|
|
235
|
+
dataset_tasks.export_csv()
|
|
236
|
+
response = self.get(url_for("api.site_organizations_csv"))
|
|
237
|
+
self.assertStatus(response, 302)
|
|
238
|
+
self.assertIn("export-organization-", response.location)
|
|
239
|
+
|
|
240
|
+
def test_reuses_csv(self):
|
|
241
|
+
self.app.config["EXPORT_CSV_MODELS"] = []
|
|
242
|
+
reuses = [ReuseFactory(datasets=[DatasetFactory()]) for _ in range(5)]
|
|
243
|
+
archived_reuses = [ReuseFactory(archived=datetime.utcnow()) for _ in range(3)]
|
|
244
|
+
hidden_reuse = ReuseFactory(private=True)
|
|
245
|
+
|
|
246
|
+
response = self.get(url_for("api.site_reuses_csv"))
|
|
247
|
+
|
|
248
|
+
self.assert200(response)
|
|
249
|
+
self.assertEqual(response.mimetype, "text/csv")
|
|
250
|
+
self.assertEqual(response.charset, "utf-8")
|
|
251
|
+
|
|
252
|
+
csvfile = StringIO(response.data.decode("utf8"))
|
|
253
|
+
reader = csv.get_reader(csvfile)
|
|
254
|
+
header = next(reader)
|
|
255
|
+
|
|
256
|
+
self.assertEqual(header[0], "id")
|
|
257
|
+
self.assertIn("title", header)
|
|
258
|
+
self.assertIn("description", header)
|
|
259
|
+
self.assertIn("created_at", header)
|
|
260
|
+
self.assertIn("last_modified", header)
|
|
261
|
+
self.assertIn("tags", header)
|
|
262
|
+
self.assertIn("metric.datasets", header)
|
|
263
|
+
|
|
264
|
+
rows = list(reader)
|
|
265
|
+
ids = [row[0] for row in rows]
|
|
266
|
+
|
|
267
|
+
self.assertEqual(len(rows), len(reuses) + len(archived_reuses))
|
|
268
|
+
for reuse in reuses:
|
|
269
|
+
self.assertIn(str(reuse.id), ids)
|
|
270
|
+
self.assertNotIn(str(hidden_reuse.id), ids)
|
|
271
|
+
|
|
272
|
+
@pytest.mark.usefixtures("instance_path")
|
|
273
|
+
def test_reuses_csv_w_export_csv_feature(self):
|
|
274
|
+
# no export generated, 404
|
|
275
|
+
response = self.get(url_for("api.site_reuses_csv"))
|
|
276
|
+
self.assert404(response)
|
|
277
|
+
|
|
278
|
+
# generate the export
|
|
279
|
+
d = DatasetFactory()
|
|
280
|
+
self.app.config["EXPORT_CSV_DATASET_ID"] = d.id
|
|
281
|
+
dataset_tasks.export_csv()
|
|
282
|
+
response = self.get(url_for("api.site_reuses_csv"))
|
|
283
|
+
self.assertStatus(response, 302)
|
|
284
|
+
self.assertIn("export-reuse-", response.location)
|
|
285
|
+
|
|
286
|
+
def test_reuses_csv_with_filters(self):
|
|
287
|
+
"""Should handle filtering but ignore paging or facets"""
|
|
288
|
+
filtered_reuses = [
|
|
289
|
+
ReuseFactory(datasets=[DatasetFactory()], tags=["selected"]) for _ in range(6)
|
|
290
|
+
]
|
|
291
|
+
reuses = [ReuseFactory(datasets=[DatasetFactory()]) for _ in range(3)]
|
|
292
|
+
hidden_reuse = ReuseFactory(private=True)
|
|
293
|
+
|
|
294
|
+
response = self.get(url_for("api.site_reuses_csv", tag="selected", page_size=3))
|
|
295
|
+
|
|
296
|
+
self.assert200(response)
|
|
297
|
+
self.assertEqual(response.mimetype, "text/csv")
|
|
298
|
+
self.assertEqual(response.charset, "utf-8")
|
|
299
|
+
|
|
300
|
+
csvfile = StringIO(response.data.decode("utf8"))
|
|
301
|
+
reader = csv.get_reader(csvfile)
|
|
302
|
+
header = next(reader)
|
|
303
|
+
|
|
304
|
+
self.assertEqual(header[0], "id")
|
|
305
|
+
self.assertIn("title", header)
|
|
306
|
+
self.assertIn("description", header)
|
|
307
|
+
self.assertIn("created_at", header)
|
|
308
|
+
self.assertIn("last_modified", header)
|
|
309
|
+
self.assertIn("tags", header)
|
|
310
|
+
self.assertIn("metric.datasets", header)
|
|
311
|
+
|
|
312
|
+
rows = list(reader)
|
|
313
|
+
ids = [row[0] for row in rows]
|
|
314
|
+
|
|
315
|
+
# Should ignore paging
|
|
316
|
+
self.assertEqual(len(rows), len(filtered_reuses))
|
|
317
|
+
# SHoulf pass filter
|
|
318
|
+
for reuse in filtered_reuses:
|
|
319
|
+
self.assertIn(str(reuse.id), ids)
|
|
320
|
+
for reuse in reuses:
|
|
321
|
+
self.assertNotIn(str(reuse.id), ids)
|
|
322
|
+
self.assertNotIn(str(hidden_reuse.id), ids)
|
|
323
|
+
|
|
324
|
+
def test_dataservices_csv(self):
|
|
325
|
+
self.app.config["EXPORT_CSV_MODELS"] = []
|
|
326
|
+
dataservices = [DataserviceFactory(datasets=[DatasetFactory()]) for _ in range(5)]
|
|
327
|
+
archived_dataservices = [
|
|
328
|
+
DataserviceFactory(archived_at=datetime.utcnow()) for _ in range(3)
|
|
329
|
+
]
|
|
330
|
+
hidden_dataservice = DataserviceFactory(private=True)
|
|
331
|
+
|
|
332
|
+
response = self.get(url_for("api.site_dataservices_csv"))
|
|
333
|
+
print(response.json)
|
|
334
|
+
|
|
335
|
+
self.assert200(response)
|
|
336
|
+
self.assertEqual(response.mimetype, "text/csv")
|
|
337
|
+
self.assertEqual(response.charset, "utf-8")
|
|
338
|
+
|
|
339
|
+
csvfile = StringIO(response.data.decode("utf8"))
|
|
340
|
+
reader = csv.get_reader(csvfile)
|
|
341
|
+
header = next(reader)
|
|
342
|
+
|
|
343
|
+
self.assertEqual(header[0], "id")
|
|
344
|
+
self.assertIn("title", header)
|
|
345
|
+
self.assertIn("description", header)
|
|
346
|
+
self.assertIn("created_at", header)
|
|
347
|
+
self.assertIn("metadata_modified_at", header)
|
|
348
|
+
self.assertIn("tags", header)
|
|
349
|
+
self.assertIn("base_api_url", header)
|
|
350
|
+
|
|
351
|
+
rows = list(reader)
|
|
352
|
+
ids = [row[0] for row in rows]
|
|
353
|
+
|
|
354
|
+
self.assertEqual(len(rows), len(dataservices) + len(archived_dataservices))
|
|
355
|
+
for dataservice in dataservices:
|
|
356
|
+
self.assertIn(str(dataservice.id), ids)
|
|
357
|
+
self.assertNotIn(str(hidden_dataservice.id), ids)
|
|
358
|
+
|
|
359
|
+
@pytest.mark.usefixtures("instance_path")
|
|
360
|
+
def test_dataservices_csv_w_export_csv_feature(self):
|
|
361
|
+
# no export generated, 404
|
|
362
|
+
response = self.get(url_for("api.site_dataservices_csv"))
|
|
363
|
+
self.assert404(response)
|
|
364
|
+
|
|
365
|
+
# generate the export
|
|
366
|
+
d = DatasetFactory()
|
|
367
|
+
self.app.config["EXPORT_CSV_DATASET_ID"] = d.id
|
|
368
|
+
dataset_tasks.export_csv()
|
|
369
|
+
response = self.get(url_for("api.site_dataservices_csv"))
|
|
370
|
+
self.assertStatus(response, 302)
|
|
371
|
+
self.assertIn("export-dataservice-", response.location)
|
|
372
|
+
|
|
373
|
+
def test_dataservices_csv_with_filters(self):
|
|
374
|
+
"""Should handle filtering but ignore paging or facets"""
|
|
375
|
+
filtered_dataservices = [
|
|
376
|
+
DataserviceFactory(datasets=[DatasetFactory()], tags=["selected"]) for _ in range(6)
|
|
377
|
+
]
|
|
378
|
+
dataservices = [DataserviceFactory(datasets=[DatasetFactory()]) for _ in range(3)]
|
|
379
|
+
|
|
380
|
+
response = self.get(url_for("api.site_dataservices_csv", tag="selected", page_size=3))
|
|
381
|
+
|
|
382
|
+
self.assert200(response)
|
|
383
|
+
self.assertEqual(response.mimetype, "text/csv")
|
|
384
|
+
self.assertEqual(response.charset, "utf-8")
|
|
385
|
+
|
|
386
|
+
csvfile = StringIO(response.data.decode("utf8"))
|
|
387
|
+
reader = csv.get_reader(csvfile)
|
|
388
|
+
header = next(reader)
|
|
389
|
+
|
|
390
|
+
self.assertEqual(header[0], "id")
|
|
391
|
+
self.assertIn("title", header)
|
|
392
|
+
self.assertIn("description", header)
|
|
393
|
+
self.assertIn("created_at", header)
|
|
394
|
+
self.assertIn("metadata_modified_at", header)
|
|
395
|
+
self.assertIn("tags", header)
|
|
396
|
+
self.assertIn("base_api_url", header)
|
|
397
|
+
|
|
398
|
+
rows = list(reader)
|
|
399
|
+
ids = [row[0] for row in rows]
|
|
400
|
+
|
|
401
|
+
# Should ignore paging
|
|
402
|
+
self.assertEqual(len(rows), len(filtered_dataservices))
|
|
403
|
+
# SHoulf pass filter
|
|
404
|
+
for dataservice in filtered_dataservices:
|
|
405
|
+
self.assertIn(str(dataservice.id), ids)
|
|
406
|
+
for dataservice in dataservices:
|
|
407
|
+
self.assertNotIn(str(dataservice.id), ids)
|
|
408
|
+
|
|
409
|
+
def test_harvest_csv(self):
|
|
410
|
+
self.app.config["EXPORT_CSV_MODELS"] = []
|
|
411
|
+
organization = OrganizationFactory()
|
|
412
|
+
harvests = [
|
|
413
|
+
HarvestSource.objects.create(
|
|
414
|
+
backend="factory",
|
|
415
|
+
name="harvest",
|
|
416
|
+
url=f"https://example.com/{i}",
|
|
417
|
+
organization=organization,
|
|
418
|
+
)
|
|
419
|
+
for i in range(5)
|
|
420
|
+
]
|
|
421
|
+
hidden_harvest = HarvestSource.objects.create(
|
|
422
|
+
backend="factory", url="https://example.com/deleted", deleted=datetime.utcnow()
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
response = self.get(url_for("api.site_harvests_csv"))
|
|
426
|
+
|
|
427
|
+
self.assert200(response)
|
|
428
|
+
self.assertEqual(response.mimetype, "text/csv")
|
|
429
|
+
self.assertEqual(response.charset, "utf-8")
|
|
430
|
+
|
|
431
|
+
csvfile = StringIO(response.data.decode("utf8"))
|
|
432
|
+
reader = csv.get_reader(csvfile)
|
|
433
|
+
header = next(reader)
|
|
434
|
+
|
|
435
|
+
self.assertEqual(header[0], "id")
|
|
436
|
+
self.assertIn("name", header)
|
|
437
|
+
self.assertIn("url", header)
|
|
438
|
+
self.assertIn("organization", header)
|
|
439
|
+
self.assertIn("organization_id", header)
|
|
440
|
+
self.assertIn("backend", header)
|
|
441
|
+
self.assertIn("created_at", header)
|
|
442
|
+
self.assertIn("validation", header)
|
|
443
|
+
|
|
444
|
+
rows = list(reader)
|
|
445
|
+
ids = [row[0] for row in rows]
|
|
446
|
+
|
|
447
|
+
self.assertEqual(len(rows), len(harvests))
|
|
448
|
+
for harvest in harvests:
|
|
449
|
+
self.assertIn(str(harvest.id), ids)
|
|
450
|
+
self.assertNotIn(str(hidden_harvest.id), ids)
|
|
451
|
+
|
|
452
|
+
@pytest.mark.usefixtures("instance_path")
|
|
453
|
+
def test_harvest_csv_w_export_csv_feature(self):
|
|
454
|
+
# no export generated, 404
|
|
455
|
+
response = self.get(url_for("api.site_harvests_csv"))
|
|
456
|
+
self.assert404(response)
|
|
457
|
+
|
|
458
|
+
# generate the export
|
|
459
|
+
d = DatasetFactory()
|
|
460
|
+
self.app.config["EXPORT_CSV_DATASET_ID"] = d.id
|
|
461
|
+
dataset_tasks.export_csv()
|
|
462
|
+
response = self.get(url_for("api.site_harvests_csv"))
|
|
463
|
+
self.assertStatus(response, 302)
|
|
464
|
+
self.assertIn("export-harvest-", response.location)
|
udata/tests/test_tags.py
CHANGED
|
@@ -4,11 +4,11 @@ from io import StringIO
|
|
|
4
4
|
import pytest
|
|
5
5
|
from flask import url_for
|
|
6
6
|
|
|
7
|
+
from udata.core import csv
|
|
7
8
|
from udata.core.dataset.factories import DatasetFactory
|
|
8
9
|
from udata.core.reuse.factories import ReuseFactory
|
|
9
10
|
from udata.core.tags.models import Tag
|
|
10
11
|
from udata.core.tags.tasks import count_tags
|
|
11
|
-
from udata.frontend import csv
|
|
12
12
|
from udata.tags import normalize, slug, tags_list
|
|
13
13
|
from udata.tests.helpers import assert200
|
|
14
14
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: udata
|
|
3
|
-
Version: 10.3.2.
|
|
3
|
+
Version: 10.3.2.dev34982
|
|
4
4
|
Summary: Open data portal
|
|
5
5
|
Home-page: https://github.com/opendatateam/udata
|
|
6
6
|
Author: Opendata Team
|
|
@@ -143,6 +143,8 @@ It is collectively taken care of by members of the
|
|
|
143
143
|
|
|
144
144
|
- feat(topics): add featured filter in API [#3301](https://github.com/opendatateam/udata/pull/3301)
|
|
145
145
|
- Improve wording on account inactivity emails [#3304](https://github.com/opendatateam/udata/pull/3304)
|
|
146
|
+
- Migrate CSV exports from udata-front to udata (requires adding redirection to Nginx) [#3306](https://github.com/opendatateam/udata/pull/3306)
|
|
147
|
+
- Add reuse filter on datasets API [#3307](https://github.com/opendatateam/udata/pull/3307)
|
|
146
148
|
|
|
147
149
|
## 10.3.1 (2025-04-29)
|
|
148
150
|
|