django-gisserver 1.5.0__py3-none-any.whl → 2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {django_gisserver-1.5.0.dist-info → django_gisserver-2.0.dist-info}/METADATA +14 -4
- django_gisserver-2.0.dist-info/RECORD +66 -0
- {django_gisserver-1.5.0.dist-info → django_gisserver-2.0.dist-info}/WHEEL +1 -1
- gisserver/__init__.py +1 -1
- gisserver/compat.py +23 -0
- gisserver/conf.py +7 -0
- gisserver/db.py +56 -47
- gisserver/exceptions.py +26 -2
- gisserver/extensions/__init__.py +4 -0
- gisserver/{parsers/fes20 → extensions}/functions.py +10 -4
- gisserver/extensions/queries.py +261 -0
- gisserver/features.py +220 -156
- gisserver/geometries.py +32 -37
- gisserver/management/__init__.py +0 -0
- gisserver/management/commands/__init__.py +0 -0
- gisserver/management/commands/loadgeojson.py +291 -0
- gisserver/operations/base.py +122 -308
- gisserver/operations/wfs20.py +423 -337
- gisserver/output/__init__.py +9 -48
- gisserver/output/base.py +178 -139
- gisserver/output/csv.py +65 -74
- gisserver/output/geojson.py +34 -35
- gisserver/output/gml32.py +254 -246
- gisserver/output/iters.py +207 -0
- gisserver/output/results.py +52 -26
- gisserver/output/stored.py +143 -0
- gisserver/output/utils.py +75 -170
- gisserver/output/xmlschema.py +85 -46
- gisserver/parsers/__init__.py +10 -10
- gisserver/parsers/ast.py +320 -0
- gisserver/parsers/fes20/__init__.py +13 -27
- gisserver/parsers/fes20/expressions.py +82 -38
- gisserver/parsers/fes20/filters.py +111 -43
- gisserver/parsers/fes20/identifiers.py +44 -26
- gisserver/parsers/fes20/lookups.py +144 -0
- gisserver/parsers/fes20/operators.py +331 -127
- gisserver/parsers/fes20/sorting.py +104 -33
- gisserver/parsers/gml/__init__.py +12 -11
- gisserver/parsers/gml/base.py +5 -2
- gisserver/parsers/gml/geometries.py +69 -35
- gisserver/parsers/ows/__init__.py +25 -0
- gisserver/parsers/ows/kvp.py +190 -0
- gisserver/parsers/ows/requests.py +158 -0
- gisserver/parsers/query.py +175 -0
- gisserver/parsers/values.py +26 -0
- gisserver/parsers/wfs20/__init__.py +37 -0
- gisserver/parsers/wfs20/adhoc.py +245 -0
- gisserver/parsers/wfs20/base.py +143 -0
- gisserver/parsers/wfs20/projection.py +103 -0
- gisserver/parsers/wfs20/requests.py +482 -0
- gisserver/parsers/wfs20/stored.py +192 -0
- gisserver/parsers/xml.py +249 -0
- gisserver/projection.py +357 -0
- gisserver/static/gisserver/index.css +12 -1
- gisserver/templates/gisserver/index.html +1 -1
- gisserver/templates/gisserver/service_description.html +2 -2
- gisserver/templates/gisserver/wfs/2.0.0/get_capabilities.xml +9 -9
- gisserver/templates/gisserver/wfs/feature_field.html +2 -2
- gisserver/templatetags/gisserver_tags.py +20 -0
- gisserver/types.py +322 -259
- gisserver/views.py +198 -56
- django_gisserver-1.5.0.dist-info/RECORD +0 -54
- gisserver/parsers/base.py +0 -149
- gisserver/parsers/fes20/query.py +0 -285
- gisserver/parsers/tags.py +0 -102
- gisserver/queries/__init__.py +0 -37
- gisserver/queries/adhoc.py +0 -185
- gisserver/queries/base.py +0 -186
- gisserver/queries/projection.py +0 -240
- gisserver/queries/stored.py +0 -206
- gisserver/templates/gisserver/wfs/2.0.0/describe_stored_queries.xml +0 -20
- gisserver/templates/gisserver/wfs/2.0.0/list_stored_queries.xml +0 -14
- {django_gisserver-1.5.0.dist-info → django_gisserver-2.0.dist-info}/LICENSE +0 -0
- {django_gisserver-1.5.0.dist-info → django_gisserver-2.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from collections.abc import Iterable
|
|
5
|
+
from itertools import islice
|
|
6
|
+
from typing import TypeVar
|
|
7
|
+
|
|
8
|
+
from django.db import connections, models
|
|
9
|
+
from lru import LRU
|
|
10
|
+
|
|
11
|
+
M = TypeVar("M", bound=models.Model)
|
|
12
|
+
|
|
13
|
+
DEFAULT_SQL_CHUNK_SIZE = 2000 # allow unit tests to alter this.
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class CountingIterator(Iterable[M]):
|
|
19
|
+
"""A simple iterator that counts how many results are given."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, iterator: Iterable[M], max_results=0):
|
|
22
|
+
self._iterator = iterator
|
|
23
|
+
self._number_returned = 0
|
|
24
|
+
self._in_iterator = False
|
|
25
|
+
self._max_results = max_results
|
|
26
|
+
self._has_more = None
|
|
27
|
+
|
|
28
|
+
def __iter__(self):
|
|
29
|
+
# Count the number of returned items while reading them.
|
|
30
|
+
# Tried using map(itemgetter(0), zip(model_iter, count_iter)) but that's not faster.
|
|
31
|
+
self._in_iterator = True
|
|
32
|
+
try:
|
|
33
|
+
self._number_returned = 0
|
|
34
|
+
for instance in self._iterator:
|
|
35
|
+
if self._max_results and self._number_returned == self._max_results:
|
|
36
|
+
self._has_more = True
|
|
37
|
+
break
|
|
38
|
+
self._number_returned += 1
|
|
39
|
+
yield instance
|
|
40
|
+
finally:
|
|
41
|
+
if self._max_results and self._has_more is None:
|
|
42
|
+
self._has_more = False # ignored the sentinel item
|
|
43
|
+
self._in_iterator = False
|
|
44
|
+
|
|
45
|
+
@property
|
|
46
|
+
def number_returned(self) -> int:
|
|
47
|
+
"""Tell how many objects the iterator processed"""
|
|
48
|
+
if self._in_iterator:
|
|
49
|
+
raise RuntimeError("Can't read number of returned results during iteration")
|
|
50
|
+
return self._number_returned
|
|
51
|
+
|
|
52
|
+
@property
|
|
53
|
+
def has_more(self) -> bool | None:
|
|
54
|
+
return self._has_more
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class lru_dict(dict):
|
|
58
|
+
"""A 'defaultdict' with LRU items for each value."""
|
|
59
|
+
|
|
60
|
+
def __init__(self, max_size):
|
|
61
|
+
super().__init__()
|
|
62
|
+
self.max_size = max_size
|
|
63
|
+
|
|
64
|
+
def __missing__(self, key):
|
|
65
|
+
logger.debug("Creating cache for prefetches of '%s'", key)
|
|
66
|
+
value = LRU(self.max_size)
|
|
67
|
+
self[key] = value
|
|
68
|
+
return value
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class ChunkedQuerySetIterator(Iterable[M]):
|
|
72
|
+
"""An optimal strategy to perform ``prefetch_related()`` on large datasets.
|
|
73
|
+
|
|
74
|
+
It fetches data from the queryset in chunks,
|
|
75
|
+
and performs ``prefetch_related()`` behavior on each chunk.
|
|
76
|
+
|
|
77
|
+
Django's ``QuerySet.prefetch_related()`` works by loading the whole queryset into memory,
|
|
78
|
+
and performing an analysis of the related objects to fetch. When working on large datasets,
|
|
79
|
+
this is very inefficient as more memory is consumed. Instead, ``QuerySet.iterator()``
|
|
80
|
+
is preferred here as it returns instances while reading them. Nothing is stored in memory.
|
|
81
|
+
Hence, both approaches are fundamentally incompatible. This class performs a
|
|
82
|
+
mixed strategy: load a chunk, and perform prefetches for that particular batch.
|
|
83
|
+
|
|
84
|
+
As extra performance benefit, a local cache avoids prefetching the same records
|
|
85
|
+
again when the next chunk is analysed. It has a "least recently used" cache to avoid
|
|
86
|
+
flooding the caches when foreign keys constantly point to different unique objects.
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
def __init__(self, queryset: models.QuerySet, chunk_size=None, sql_chunk_size=None):
|
|
90
|
+
"""
|
|
91
|
+
:param queryset: The queryset to iterate over, that has ``prefetch_related()`` data.
|
|
92
|
+
:param chunk_size: The size of each segment to analyse in-memory for related objects.
|
|
93
|
+
:param sql_chunk_size: The size of each segment to fetch from the database,
|
|
94
|
+
used when server-side cursors are not available. The default follows Django behavior.
|
|
95
|
+
"""
|
|
96
|
+
self.queryset = queryset
|
|
97
|
+
self.sql_chunk_size = sql_chunk_size or DEFAULT_SQL_CHUNK_SIZE
|
|
98
|
+
self.chunk_size = chunk_size or self.sql_chunk_size
|
|
99
|
+
self._fk_caches = lru_dict(self.chunk_size // 2)
|
|
100
|
+
self._number_returned = 0
|
|
101
|
+
self._in_iterator = False
|
|
102
|
+
|
|
103
|
+
def __iter__(self):
|
|
104
|
+
# Using iter() ensures the ModelIterable is resumed with the next chunk.
|
|
105
|
+
self._number_returned = 0
|
|
106
|
+
self._in_iterator = True
|
|
107
|
+
try:
|
|
108
|
+
qs_iter = iter(self._get_queryset_iterator())
|
|
109
|
+
|
|
110
|
+
# Keep fetching chunks
|
|
111
|
+
while True:
|
|
112
|
+
instances = list(islice(qs_iter, self.chunk_size))
|
|
113
|
+
if not instances:
|
|
114
|
+
break
|
|
115
|
+
|
|
116
|
+
# Perform prefetches on this chunk:
|
|
117
|
+
if self.queryset._prefetch_related_lookups:
|
|
118
|
+
self._add_prefetches(instances)
|
|
119
|
+
|
|
120
|
+
# And return to parent loop
|
|
121
|
+
yield from instances
|
|
122
|
+
self._number_returned += len(instances)
|
|
123
|
+
finally:
|
|
124
|
+
self._in_iterator = False
|
|
125
|
+
|
|
126
|
+
def _get_queryset_iterator(self) -> Iterable:
|
|
127
|
+
"""The body of queryset.iterator(), while circumventing prefetching."""
|
|
128
|
+
# The old code did return `self.queryset.iterator(chunk_size=self.sql_chunk_size)`
|
|
129
|
+
# However, Django 4 supports using prefetch_related() with iterator() in that scenario.
|
|
130
|
+
#
|
|
131
|
+
# This code is the core of Django's QuerySet.iterator() that only produces the
|
|
132
|
+
# old-style iteration, without any prefetches. Those are added by this class instead.
|
|
133
|
+
use_chunked_fetch = not connections[self.queryset.db].settings_dict.get(
|
|
134
|
+
"DISABLE_SERVER_SIDE_CURSORS"
|
|
135
|
+
)
|
|
136
|
+
iterable = self.queryset._iterable_class(
|
|
137
|
+
self.queryset, chunked_fetch=use_chunked_fetch, chunk_size=self.sql_chunk_size
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
yield from iterable
|
|
141
|
+
|
|
142
|
+
@property
|
|
143
|
+
def number_returned(self) -> int:
|
|
144
|
+
"""Tell how many objects the iterator processed"""
|
|
145
|
+
if self._in_iterator:
|
|
146
|
+
raise RuntimeError("Can't read number of returned results during iteration")
|
|
147
|
+
return self._number_returned
|
|
148
|
+
|
|
149
|
+
def _add_prefetches(self, instances: list[M]):
|
|
150
|
+
"""Merge the prefetched objects for this batch with the model instances."""
|
|
151
|
+
if self._fk_caches:
|
|
152
|
+
# Make sure prefetch_related_objects() doesn't have
|
|
153
|
+
# to fetch items again that infrequently changes.
|
|
154
|
+
all_restored = self._restore_caches(instances)
|
|
155
|
+
if all_restored:
|
|
156
|
+
logger.debug("Restored all prefetches from cache")
|
|
157
|
+
return
|
|
158
|
+
|
|
159
|
+
# Reuse the Django machinery for retrieving missing sub objects.
|
|
160
|
+
# and analyse the ForeignKey caches to allow faster prefetches next time
|
|
161
|
+
logger.debug("Perform additional prefetches for %d objects", len(instances))
|
|
162
|
+
models.prefetch_related_objects(instances, *self.queryset._prefetch_related_lookups)
|
|
163
|
+
self._persist_prefetch_cache(instances)
|
|
164
|
+
|
|
165
|
+
def _persist_prefetch_cache(self, instances):
|
|
166
|
+
"""Store the prefetched data so it can be applied to the next batch"""
|
|
167
|
+
for instance in instances:
|
|
168
|
+
for lookup, obj in instance._state.fields_cache.items():
|
|
169
|
+
if obj is not None:
|
|
170
|
+
cache = self._fk_caches[lookup]
|
|
171
|
+
cache[obj.pk] = obj
|
|
172
|
+
|
|
173
|
+
def _restore_caches(self, instances) -> bool:
|
|
174
|
+
"""Restore prefetched data to the new set of instances.
|
|
175
|
+
This avoids unneeded prefetching of the same ForeignKey relation.
|
|
176
|
+
"""
|
|
177
|
+
if not instances:
|
|
178
|
+
return True
|
|
179
|
+
if not self._fk_caches:
|
|
180
|
+
return False
|
|
181
|
+
|
|
182
|
+
all_restored = True
|
|
183
|
+
|
|
184
|
+
for lookup, cache in self._fk_caches.items():
|
|
185
|
+
field = instances[0]._meta.get_field(lookup)
|
|
186
|
+
if not hasattr(field, "attname"):
|
|
187
|
+
logger.debug(
|
|
188
|
+
"Unable to restore prefetches for '%s' (%s)", lookup, field.__class__.__name__
|
|
189
|
+
)
|
|
190
|
+
# Retrieving prefetches from ForeignObjectRel wouldn't work here.
|
|
191
|
+
# Let standard prefetch_related() take over.
|
|
192
|
+
all_restored = False
|
|
193
|
+
continue
|
|
194
|
+
|
|
195
|
+
logger.debug("Restoring prefetches for '%s'", lookup)
|
|
196
|
+
for instance in instances:
|
|
197
|
+
id_value = getattr(instance, field.attname)
|
|
198
|
+
if id_value is None:
|
|
199
|
+
continue
|
|
200
|
+
|
|
201
|
+
obj = cache.get(id_value, None)
|
|
202
|
+
if obj is not None:
|
|
203
|
+
instance._state.fields_cache[lookup] = obj
|
|
204
|
+
else:
|
|
205
|
+
all_restored = False
|
|
206
|
+
|
|
207
|
+
return all_restored
|
gisserver/output/results.py
CHANGED
|
@@ -20,10 +20,13 @@ from gisserver import conf
|
|
|
20
20
|
from gisserver.features import FeatureType
|
|
21
21
|
from gisserver.geometries import BoundingBox
|
|
22
22
|
|
|
23
|
-
from .
|
|
23
|
+
from .iters import ChunkedQuerySetIterator, CountingIterator
|
|
24
24
|
|
|
25
25
|
if typing.TYPE_CHECKING:
|
|
26
|
-
from gisserver.
|
|
26
|
+
from gisserver.projection import FeatureProjection, QueryExpression
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
CALCULATE = -9999999
|
|
27
30
|
|
|
28
31
|
|
|
29
32
|
class SimpleFeatureCollection:
|
|
@@ -36,21 +39,29 @@ class SimpleFeatureCollection:
|
|
|
36
39
|
def __init__(
|
|
37
40
|
self,
|
|
38
41
|
source_query: QueryExpression,
|
|
39
|
-
|
|
42
|
+
feature_types: list[FeatureType],
|
|
40
43
|
queryset: models.QuerySet,
|
|
41
44
|
start: int,
|
|
42
45
|
stop: int,
|
|
46
|
+
number_matched: int | None = CALCULATE,
|
|
43
47
|
):
|
|
44
48
|
self.source_query = source_query
|
|
45
|
-
self.
|
|
49
|
+
self.feature_types = feature_types
|
|
46
50
|
self.queryset = queryset
|
|
47
51
|
self.start = start
|
|
48
52
|
self.stop = stop
|
|
53
|
+
self._number_matched = number_matched
|
|
49
54
|
|
|
50
55
|
self._result_cache = None
|
|
51
56
|
self._result_iterator = None
|
|
52
57
|
self._has_more = None
|
|
53
58
|
|
|
59
|
+
# Tell that is a resultType=hits request.
|
|
60
|
+
# Typically, start and stop are 0. However, for resultType=hits with count,
|
|
61
|
+
# that does not apply. Instead, it detects whether the known amount is already provided.
|
|
62
|
+
# Detecting that queryset.none() is provided won't work, as that can be used by IdOperator too.
|
|
63
|
+
self._is_hits_request = number_matched is not None and number_matched != CALCULATE
|
|
64
|
+
|
|
54
65
|
def __iter__(self) -> Iterable[models.Model]:
|
|
55
66
|
"""Iterate through all results.
|
|
56
67
|
|
|
@@ -58,7 +69,7 @@ class SimpleFeatureCollection:
|
|
|
58
69
|
the results can either be cached first, or be streamed without caching.
|
|
59
70
|
This picks the best-performance scenario in most cases.
|
|
60
71
|
"""
|
|
61
|
-
if self.
|
|
72
|
+
if self._is_hits_request:
|
|
62
73
|
self._result_cache = []
|
|
63
74
|
|
|
64
75
|
if self._result_cache is not None:
|
|
@@ -83,7 +94,7 @@ class SimpleFeatureCollection:
|
|
|
83
94
|
if self._result_cache is not None:
|
|
84
95
|
# In case the results were read already, reuse that.
|
|
85
96
|
return iter(self._result_cache)
|
|
86
|
-
elif self.
|
|
97
|
+
elif self._is_hits_request:
|
|
87
98
|
# resulttype=hits
|
|
88
99
|
return iter([])
|
|
89
100
|
else:
|
|
@@ -129,8 +140,7 @@ class SimpleFeatureCollection:
|
|
|
129
140
|
if self._result_iterator is not None:
|
|
130
141
|
raise RuntimeError("Results for feature collection are read twice.")
|
|
131
142
|
|
|
132
|
-
if self.
|
|
133
|
-
# resulttype=hits
|
|
143
|
+
if self._is_hits_request:
|
|
134
144
|
self._result_cache = []
|
|
135
145
|
else:
|
|
136
146
|
# This still allows prefetch_related() to work,
|
|
@@ -171,8 +181,8 @@ class SimpleFeatureCollection:
|
|
|
171
181
|
@cached_property
|
|
172
182
|
def number_returned(self) -> int:
|
|
173
183
|
"""Return the number of results for this page."""
|
|
174
|
-
if self.
|
|
175
|
-
return 0
|
|
184
|
+
if self._is_hits_request:
|
|
185
|
+
return 0
|
|
176
186
|
elif self._result_iterator is not None:
|
|
177
187
|
# When requesting the data after the fact, results are counted.
|
|
178
188
|
return self._result_iterator.number_returned
|
|
@@ -183,9 +193,19 @@ class SimpleFeatureCollection:
|
|
|
183
193
|
self.fetch_results()
|
|
184
194
|
return len(self._result_cache)
|
|
185
195
|
|
|
186
|
-
@
|
|
196
|
+
@property
|
|
187
197
|
def number_matched(self) -> int:
|
|
188
198
|
"""Return the total number of matches across all pages."""
|
|
199
|
+
if self._is_hits_request:
|
|
200
|
+
if self.stop:
|
|
201
|
+
# resulttype=hits&COUNT=n should minimize how many are "matched".
|
|
202
|
+
return min(self._number_matched, self.stop - self.start)
|
|
203
|
+
else:
|
|
204
|
+
return self._number_matched
|
|
205
|
+
elif self._number_matched != CALCULATE:
|
|
206
|
+
# Return previously cached result
|
|
207
|
+
return self._number_matched
|
|
208
|
+
|
|
189
209
|
if self._is_surely_last_page:
|
|
190
210
|
# For resulttype=results, an expensive COUNT query can be avoided
|
|
191
211
|
# when this is the first and only page or the last page.
|
|
@@ -197,17 +217,24 @@ class SimpleFeatureCollection:
|
|
|
197
217
|
# Otherwise, it becomes SELECT COUNT(*) FROM (SELECT AsGML(..), ...)
|
|
198
218
|
key: value
|
|
199
219
|
for key, value in qs.query.annotations.items()
|
|
200
|
-
if not key.startswith("_as_")
|
|
220
|
+
if not key.startswith("_as_") and not key.startswith("_As") # AsGML / AsEWKT
|
|
201
221
|
}
|
|
202
222
|
if clean_annotations != qs.query.annotations:
|
|
203
223
|
qs = self.queryset.all() # make a clone to allow editing
|
|
204
224
|
qs.query.annotations = clean_annotations
|
|
205
225
|
|
|
206
|
-
return
|
|
226
|
+
# Calculate, cache and return
|
|
227
|
+
self._number_matched = qs.count()
|
|
228
|
+
return self._number_matched
|
|
207
229
|
|
|
208
230
|
@property
|
|
209
231
|
def _is_surely_last_page(self):
|
|
210
232
|
"""Return true when it's totally clear this is the last page."""
|
|
233
|
+
if self.start == self.stop == 0:
|
|
234
|
+
return True # hits request without count
|
|
235
|
+
elif self._is_hits_request:
|
|
236
|
+
return False
|
|
237
|
+
|
|
211
238
|
# Optimization to avoid making COUNT() queries when we can already know the answer.
|
|
212
239
|
if self.stop == math.inf:
|
|
213
240
|
return True # Infinite page requested
|
|
@@ -226,27 +253,30 @@ class SimpleFeatureCollection:
|
|
|
226
253
|
# For GeoJSON output, the iterator was read first, and `number_returned` is already filled in.
|
|
227
254
|
# For GML output, the pagination details are requested first, and will fetch all data.
|
|
228
255
|
# Hence, reading `number_returned` here can be quite an intensive operation.
|
|
229
|
-
page_size = self.stop - self.start
|
|
256
|
+
page_size = self.stop - self.start
|
|
230
257
|
return page_size and (self.number_returned < page_size or self._has_more is False)
|
|
231
258
|
|
|
232
259
|
@property
|
|
233
260
|
def has_next(self):
|
|
234
|
-
if self.stop == math.inf:
|
|
261
|
+
if self.stop == math.inf or (self.start == self.stop == 0):
|
|
235
262
|
return False
|
|
236
263
|
elif self._has_more is not None:
|
|
237
264
|
return self._has_more # did page+1 record check, answer is known.
|
|
238
265
|
elif self._is_surely_last_page:
|
|
239
266
|
return False # Fewer results than expected, answer is known.
|
|
240
267
|
|
|
241
|
-
|
|
242
|
-
|
|
268
|
+
if self._is_hits_request:
|
|
269
|
+
return self.stop <= self._number_matched
|
|
270
|
+
else:
|
|
271
|
+
# This will perform an slow COUNT() query...
|
|
272
|
+
return self.stop < self.number_matched
|
|
243
273
|
|
|
244
274
|
@cached_property
|
|
245
275
|
def projection(self) -> FeatureProjection:
|
|
246
276
|
"""Provide the projection to render these results with."""
|
|
247
277
|
# Note this attribute would technically be part of the 'query' object,
|
|
248
|
-
# but since the projection needs to be
|
|
249
|
-
return self.source_query.get_projection(
|
|
278
|
+
# but since the projection needs to be calculated once, it's stored here for convenience.
|
|
279
|
+
return self.source_query.get_projection()
|
|
250
280
|
|
|
251
281
|
def get_bounding_box(self) -> BoundingBox:
|
|
252
282
|
"""Determine bounding box of all items."""
|
|
@@ -255,11 +285,10 @@ class SimpleFeatureCollection:
|
|
|
255
285
|
# Start with an obviously invalid bbox,
|
|
256
286
|
# which corrects at the first extend_to_geometry call.
|
|
257
287
|
bbox = BoundingBox(math.inf, math.inf, -math.inf, -math.inf)
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
).child
|
|
288
|
+
|
|
289
|
+
# Allow the geometry to exist in a dotted relationship.
|
|
261
290
|
for instance in self:
|
|
262
|
-
geometry_value =
|
|
291
|
+
geometry_value = self.projection.get_main_geometry_value(instance)
|
|
263
292
|
if geometry_value is None:
|
|
264
293
|
continue
|
|
265
294
|
|
|
@@ -268,9 +297,6 @@ class SimpleFeatureCollection:
|
|
|
268
297
|
return bbox
|
|
269
298
|
|
|
270
299
|
|
|
271
|
-
CALCULATE = -9999999
|
|
272
|
-
|
|
273
|
-
|
|
274
300
|
class FeatureCollection:
|
|
275
301
|
"""WFS object that holds the result type for GetFeature.
|
|
276
302
|
This object type is defined in the WFS spec.
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
"""Outputting XML for the stored query logic."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from io import StringIO
|
|
6
|
+
from xml.etree.ElementTree import Element, tostring
|
|
7
|
+
|
|
8
|
+
from gisserver.extensions.queries import QueryExpressionText, StoredQueryDescription
|
|
9
|
+
from gisserver.output.utils import attr_escape, tag_escape
|
|
10
|
+
from gisserver.parsers.xml import xmlns
|
|
11
|
+
|
|
12
|
+
from .base import XmlOutputRenderer
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ListStoredQueriesRenderer(XmlOutputRenderer):
|
|
16
|
+
"""Rendering for the ``<wfs:ListStoredQueriesResponse>``."""
|
|
17
|
+
|
|
18
|
+
# XML Namespaces to include by default
|
|
19
|
+
xml_namespaces = {
|
|
20
|
+
xmlns.wfs20: "",
|
|
21
|
+
xmlns.xs: "xs",
|
|
22
|
+
xmlns.xsi: "xsi",
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
def __init__(self, operation, query_descriptions: list[StoredQueryDescription]):
|
|
26
|
+
"""Take the list of stored queries to render."""
|
|
27
|
+
super().__init__(operation)
|
|
28
|
+
self.all_feature_types = operation.view.get_bound_feature_types()
|
|
29
|
+
self.query_descriptions = query_descriptions
|
|
30
|
+
|
|
31
|
+
def render_stream(self):
|
|
32
|
+
self.output = StringIO()
|
|
33
|
+
self.output.write(
|
|
34
|
+
'<?xml version="1.0" encoding="UTF-8"?>\n'
|
|
35
|
+
f"<ListStoredQueriesResponse"
|
|
36
|
+
f" {self.render_xmlns_attributes()}"
|
|
37
|
+
f' xsi:schemaLocation="http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd">\n'
|
|
38
|
+
)
|
|
39
|
+
for query_description in self.query_descriptions:
|
|
40
|
+
self.write_query(query_description)
|
|
41
|
+
|
|
42
|
+
self.output.write("</ListStoredQueriesResponse>\n")
|
|
43
|
+
return self.output.getvalue()
|
|
44
|
+
|
|
45
|
+
def write_query(self, query_description: StoredQueryDescription):
|
|
46
|
+
self.output.write(
|
|
47
|
+
f' <StoredQuery id="{query_description.id}">\n'
|
|
48
|
+
f" <Title>{tag_escape(query_description.title)}</Title>\n"
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
for expression in query_description.expressions:
|
|
52
|
+
return_types = expression.return_feature_types or self.all_feature_types
|
|
53
|
+
for return_type in return_types:
|
|
54
|
+
feature_qname = self.feature_to_qname(return_type)
|
|
55
|
+
self.output.write(
|
|
56
|
+
f" <ReturnFeatureType>{tag_escape(feature_qname)}</ReturnFeatureType>\n"
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
self.output.write(" </StoredQuery>\n")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class DescribeStoredQueriesRenderer(XmlOutputRenderer):
|
|
63
|
+
"""Rendering for the ``<wfs:DescribeStoredQueriesResponse>``."""
|
|
64
|
+
|
|
65
|
+
# XML Namespaces to include by default
|
|
66
|
+
xml_namespaces = {
|
|
67
|
+
xmlns.wfs20: "",
|
|
68
|
+
xmlns.xs: "xs",
|
|
69
|
+
xmlns.xsi: "xsi",
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
def __init__(self, operation, query_descriptions: list[StoredQueryDescription]):
|
|
73
|
+
"""Take the list of stored queries to render."""
|
|
74
|
+
super().__init__(operation)
|
|
75
|
+
self.all_feature_types = operation.view.get_bound_feature_types()
|
|
76
|
+
self.query_descriptions = query_descriptions
|
|
77
|
+
|
|
78
|
+
def render_stream(self):
|
|
79
|
+
self.output = StringIO()
|
|
80
|
+
self.output.write(
|
|
81
|
+
'<?xml version="1.0" encoding="UTF-8"?>\n'
|
|
82
|
+
f"<DescribeStoredQueriesResponse"
|
|
83
|
+
f" {self.render_xmlns_attributes()}"
|
|
84
|
+
f' xsi:schemaLocation="http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd">\n'
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
for query_description in self.query_descriptions:
|
|
88
|
+
self.write_description(query_description)
|
|
89
|
+
|
|
90
|
+
self.output.write("</DescribeStoredQueriesResponse>\n")
|
|
91
|
+
return self.output.getvalue()
|
|
92
|
+
|
|
93
|
+
def write_description(self, query_description: StoredQueryDescription):
|
|
94
|
+
"""Write the stored query description."""
|
|
95
|
+
self.output.write(
|
|
96
|
+
f'<StoredQueryDescription id="{attr_escape(query_description.id)}">\n'
|
|
97
|
+
f" <Title>{tag_escape(query_description.title)}</Title>\n"
|
|
98
|
+
f" <Abstract>{tag_escape(query_description.abstract)}</Abstract>\n"
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Declare parameters
|
|
102
|
+
for name, xsd_type in query_description.parameters.items():
|
|
103
|
+
type_qname = self.to_qname(xsd_type)
|
|
104
|
+
self.output.write(f' <Parameter name="{attr_escape(name)}" type="{type_qname}"/>\n')
|
|
105
|
+
|
|
106
|
+
# The QueryExpressionText nodes allow code per return type.
|
|
107
|
+
for expression in query_description.expressions:
|
|
108
|
+
self.render_expression(expression)
|
|
109
|
+
|
|
110
|
+
self.output.write("</StoredQueryDescription>\n")
|
|
111
|
+
|
|
112
|
+
def render_expression(self, expression: QueryExpressionText):
|
|
113
|
+
"""Render the 'QueryExpressionText' node (no body content for now)."""
|
|
114
|
+
is_private = "true" if expression.is_private else "false"
|
|
115
|
+
if expression.return_feature_types is None:
|
|
116
|
+
# for GetFeatureById
|
|
117
|
+
types = " ".join(self.feature_to_qname(ft) for ft in self.all_feature_types)
|
|
118
|
+
else:
|
|
119
|
+
types = " ".join(
|
|
120
|
+
self.feature_to_qname(return_type)
|
|
121
|
+
for return_type in expression.return_feature_types
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
if expression.is_private or not expression.implementation_text:
|
|
125
|
+
implementation_text = ""
|
|
126
|
+
elif isinstance(expression.implementation_text, Element):
|
|
127
|
+
# XML serialization (will recreate namespaces)
|
|
128
|
+
default_namespace = next(
|
|
129
|
+
(ns for ns, prefix in self.app_namespaces.items() if prefix == ""), None
|
|
130
|
+
)
|
|
131
|
+
implementation_text = tostring(
|
|
132
|
+
expression.implementation_text,
|
|
133
|
+
xml_declaration=False,
|
|
134
|
+
default_namespace=default_namespace,
|
|
135
|
+
)
|
|
136
|
+
else:
|
|
137
|
+
# Some raw content (e.g. language="python")
|
|
138
|
+
implementation_text = tag_escape(expression.implementation_text)
|
|
139
|
+
|
|
140
|
+
self.output.write(
|
|
141
|
+
f' <QueryExpressionText isPrivate="{is_private}" language="{expression.language}"'
|
|
142
|
+
f' returnFeatureTypes="{types}">{implementation_text}</QueryExpressionText>\n'
|
|
143
|
+
)
|