django-gisserver 1.4.1__py3-none-any.whl → 2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {django_gisserver-1.4.1.dist-info → django_gisserver-2.0.dist-info}/METADATA +23 -13
- django_gisserver-2.0.dist-info/RECORD +66 -0
- {django_gisserver-1.4.1.dist-info → django_gisserver-2.0.dist-info}/WHEEL +1 -1
- gisserver/__init__.py +1 -1
- gisserver/compat.py +23 -0
- gisserver/conf.py +7 -0
- gisserver/db.py +63 -60
- gisserver/exceptions.py +47 -9
- gisserver/extensions/__init__.py +4 -0
- gisserver/{parsers/fes20 → extensions}/functions.py +11 -5
- gisserver/extensions/queries.py +261 -0
- gisserver/features.py +267 -240
- gisserver/geometries.py +34 -39
- gisserver/management/__init__.py +0 -0
- gisserver/management/commands/__init__.py +0 -0
- gisserver/management/commands/loadgeojson.py +291 -0
- gisserver/operations/base.py +129 -305
- gisserver/operations/wfs20.py +428 -336
- gisserver/output/__init__.py +10 -48
- gisserver/output/base.py +198 -143
- gisserver/output/csv.py +81 -85
- gisserver/output/geojson.py +63 -72
- gisserver/output/gml32.py +310 -281
- gisserver/output/iters.py +207 -0
- gisserver/output/results.py +71 -30
- gisserver/output/stored.py +143 -0
- gisserver/output/utils.py +75 -154
- gisserver/output/xmlschema.py +86 -47
- gisserver/parsers/__init__.py +10 -10
- gisserver/parsers/ast.py +320 -0
- gisserver/parsers/fes20/__init__.py +15 -11
- gisserver/parsers/fes20/expressions.py +89 -50
- gisserver/parsers/fes20/filters.py +111 -43
- gisserver/parsers/fes20/identifiers.py +44 -26
- gisserver/parsers/fes20/lookups.py +144 -0
- gisserver/parsers/fes20/operators.py +336 -128
- gisserver/parsers/fes20/sorting.py +107 -34
- gisserver/parsers/gml/__init__.py +12 -11
- gisserver/parsers/gml/base.py +6 -3
- gisserver/parsers/gml/geometries.py +69 -35
- gisserver/parsers/ows/__init__.py +25 -0
- gisserver/parsers/ows/kvp.py +190 -0
- gisserver/parsers/ows/requests.py +158 -0
- gisserver/parsers/query.py +175 -0
- gisserver/parsers/values.py +26 -0
- gisserver/parsers/wfs20/__init__.py +37 -0
- gisserver/parsers/wfs20/adhoc.py +245 -0
- gisserver/parsers/wfs20/base.py +143 -0
- gisserver/parsers/wfs20/projection.py +103 -0
- gisserver/parsers/wfs20/requests.py +482 -0
- gisserver/parsers/wfs20/stored.py +192 -0
- gisserver/parsers/xml.py +249 -0
- gisserver/projection.py +357 -0
- gisserver/static/gisserver/index.css +12 -1
- gisserver/templates/gisserver/index.html +1 -1
- gisserver/templates/gisserver/service_description.html +2 -2
- gisserver/templates/gisserver/wfs/2.0.0/get_capabilities.xml +11 -11
- gisserver/templates/gisserver/wfs/feature_field.html +2 -2
- gisserver/templatetags/gisserver_tags.py +20 -0
- gisserver/types.py +375 -258
- gisserver/views.py +206 -75
- django_gisserver-1.4.1.dist-info/RECORD +0 -53
- gisserver/parsers/base.py +0 -149
- gisserver/parsers/fes20/query.py +0 -275
- gisserver/parsers/tags.py +0 -102
- gisserver/queries/__init__.py +0 -34
- gisserver/queries/adhoc.py +0 -181
- gisserver/queries/base.py +0 -146
- gisserver/queries/stored.py +0 -205
- gisserver/templates/gisserver/wfs/2.0.0/describe_stored_queries.xml +0 -20
- gisserver/templates/gisserver/wfs/2.0.0/list_stored_queries.xml +0 -14
- {django_gisserver-1.4.1.dist-info → django_gisserver-2.0.dist-info}/LICENSE +0 -0
- {django_gisserver-1.4.1.dist-info → django_gisserver-2.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from collections.abc import Iterable
|
|
5
|
+
from itertools import islice
|
|
6
|
+
from typing import TypeVar
|
|
7
|
+
|
|
8
|
+
from django.db import connections, models
|
|
9
|
+
from lru import LRU
|
|
10
|
+
|
|
11
|
+
M = TypeVar("M", bound=models.Model)
|
|
12
|
+
|
|
13
|
+
DEFAULT_SQL_CHUNK_SIZE = 2000 # allow unit tests to alter this.
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class CountingIterator(Iterable[M]):
|
|
19
|
+
"""A simple iterator that counts how many results are given."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, iterator: Iterable[M], max_results=0):
|
|
22
|
+
self._iterator = iterator
|
|
23
|
+
self._number_returned = 0
|
|
24
|
+
self._in_iterator = False
|
|
25
|
+
self._max_results = max_results
|
|
26
|
+
self._has_more = None
|
|
27
|
+
|
|
28
|
+
def __iter__(self):
|
|
29
|
+
# Count the number of returned items while reading them.
|
|
30
|
+
# Tried using map(itemgetter(0), zip(model_iter, count_iter)) but that's not faster.
|
|
31
|
+
self._in_iterator = True
|
|
32
|
+
try:
|
|
33
|
+
self._number_returned = 0
|
|
34
|
+
for instance in self._iterator:
|
|
35
|
+
if self._max_results and self._number_returned == self._max_results:
|
|
36
|
+
self._has_more = True
|
|
37
|
+
break
|
|
38
|
+
self._number_returned += 1
|
|
39
|
+
yield instance
|
|
40
|
+
finally:
|
|
41
|
+
if self._max_results and self._has_more is None:
|
|
42
|
+
self._has_more = False # ignored the sentinel item
|
|
43
|
+
self._in_iterator = False
|
|
44
|
+
|
|
45
|
+
@property
|
|
46
|
+
def number_returned(self) -> int:
|
|
47
|
+
"""Tell how many objects the iterator processed"""
|
|
48
|
+
if self._in_iterator:
|
|
49
|
+
raise RuntimeError("Can't read number of returned results during iteration")
|
|
50
|
+
return self._number_returned
|
|
51
|
+
|
|
52
|
+
@property
|
|
53
|
+
def has_more(self) -> bool | None:
|
|
54
|
+
return self._has_more
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class lru_dict(dict):
|
|
58
|
+
"""A 'defaultdict' with LRU items for each value."""
|
|
59
|
+
|
|
60
|
+
def __init__(self, max_size):
|
|
61
|
+
super().__init__()
|
|
62
|
+
self.max_size = max_size
|
|
63
|
+
|
|
64
|
+
def __missing__(self, key):
|
|
65
|
+
logger.debug("Creating cache for prefetches of '%s'", key)
|
|
66
|
+
value = LRU(self.max_size)
|
|
67
|
+
self[key] = value
|
|
68
|
+
return value
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class ChunkedQuerySetIterator(Iterable[M]):
|
|
72
|
+
"""An optimal strategy to perform ``prefetch_related()`` on large datasets.
|
|
73
|
+
|
|
74
|
+
It fetches data from the queryset in chunks,
|
|
75
|
+
and performs ``prefetch_related()`` behavior on each chunk.
|
|
76
|
+
|
|
77
|
+
Django's ``QuerySet.prefetch_related()`` works by loading the whole queryset into memory,
|
|
78
|
+
and performing an analysis of the related objects to fetch. When working on large datasets,
|
|
79
|
+
this is very inefficient as more memory is consumed. Instead, ``QuerySet.iterator()``
|
|
80
|
+
is preferred here as it returns instances while reading them. Nothing is stored in memory.
|
|
81
|
+
Hence, both approaches are fundamentally incompatible. This class performs a
|
|
82
|
+
mixed strategy: load a chunk, and perform prefetches for that particular batch.
|
|
83
|
+
|
|
84
|
+
As extra performance benefit, a local cache avoids prefetching the same records
|
|
85
|
+
again when the next chunk is analysed. It has a "least recently used" cache to avoid
|
|
86
|
+
flooding the caches when foreign keys constantly point to different unique objects.
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
def __init__(self, queryset: models.QuerySet, chunk_size=None, sql_chunk_size=None):
|
|
90
|
+
"""
|
|
91
|
+
:param queryset: The queryset to iterate over, that has ``prefetch_related()`` data.
|
|
92
|
+
:param chunk_size: The size of each segment to analyse in-memory for related objects.
|
|
93
|
+
:param sql_chunk_size: The size of each segment to fetch from the database,
|
|
94
|
+
used when server-side cursors are not available. The default follows Django behavior.
|
|
95
|
+
"""
|
|
96
|
+
self.queryset = queryset
|
|
97
|
+
self.sql_chunk_size = sql_chunk_size or DEFAULT_SQL_CHUNK_SIZE
|
|
98
|
+
self.chunk_size = chunk_size or self.sql_chunk_size
|
|
99
|
+
self._fk_caches = lru_dict(self.chunk_size // 2)
|
|
100
|
+
self._number_returned = 0
|
|
101
|
+
self._in_iterator = False
|
|
102
|
+
|
|
103
|
+
def __iter__(self):
|
|
104
|
+
# Using iter() ensures the ModelIterable is resumed with the next chunk.
|
|
105
|
+
self._number_returned = 0
|
|
106
|
+
self._in_iterator = True
|
|
107
|
+
try:
|
|
108
|
+
qs_iter = iter(self._get_queryset_iterator())
|
|
109
|
+
|
|
110
|
+
# Keep fetching chunks
|
|
111
|
+
while True:
|
|
112
|
+
instances = list(islice(qs_iter, self.chunk_size))
|
|
113
|
+
if not instances:
|
|
114
|
+
break
|
|
115
|
+
|
|
116
|
+
# Perform prefetches on this chunk:
|
|
117
|
+
if self.queryset._prefetch_related_lookups:
|
|
118
|
+
self._add_prefetches(instances)
|
|
119
|
+
|
|
120
|
+
# And return to parent loop
|
|
121
|
+
yield from instances
|
|
122
|
+
self._number_returned += len(instances)
|
|
123
|
+
finally:
|
|
124
|
+
self._in_iterator = False
|
|
125
|
+
|
|
126
|
+
def _get_queryset_iterator(self) -> Iterable:
|
|
127
|
+
"""The body of queryset.iterator(), while circumventing prefetching."""
|
|
128
|
+
# The old code did return `self.queryset.iterator(chunk_size=self.sql_chunk_size)`
|
|
129
|
+
# However, Django 4 supports using prefetch_related() with iterator() in that scenario.
|
|
130
|
+
#
|
|
131
|
+
# This code is the core of Django's QuerySet.iterator() that only produces the
|
|
132
|
+
# old-style iteration, without any prefetches. Those are added by this class instead.
|
|
133
|
+
use_chunked_fetch = not connections[self.queryset.db].settings_dict.get(
|
|
134
|
+
"DISABLE_SERVER_SIDE_CURSORS"
|
|
135
|
+
)
|
|
136
|
+
iterable = self.queryset._iterable_class(
|
|
137
|
+
self.queryset, chunked_fetch=use_chunked_fetch, chunk_size=self.sql_chunk_size
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
yield from iterable
|
|
141
|
+
|
|
142
|
+
@property
|
|
143
|
+
def number_returned(self) -> int:
|
|
144
|
+
"""Tell how many objects the iterator processed"""
|
|
145
|
+
if self._in_iterator:
|
|
146
|
+
raise RuntimeError("Can't read number of returned results during iteration")
|
|
147
|
+
return self._number_returned
|
|
148
|
+
|
|
149
|
+
def _add_prefetches(self, instances: list[M]):
|
|
150
|
+
"""Merge the prefetched objects for this batch with the model instances."""
|
|
151
|
+
if self._fk_caches:
|
|
152
|
+
# Make sure prefetch_related_objects() doesn't have
|
|
153
|
+
# to fetch items again that infrequently changes.
|
|
154
|
+
all_restored = self._restore_caches(instances)
|
|
155
|
+
if all_restored:
|
|
156
|
+
logger.debug("Restored all prefetches from cache")
|
|
157
|
+
return
|
|
158
|
+
|
|
159
|
+
# Reuse the Django machinery for retrieving missing sub objects.
|
|
160
|
+
# and analyse the ForeignKey caches to allow faster prefetches next time
|
|
161
|
+
logger.debug("Perform additional prefetches for %d objects", len(instances))
|
|
162
|
+
models.prefetch_related_objects(instances, *self.queryset._prefetch_related_lookups)
|
|
163
|
+
self._persist_prefetch_cache(instances)
|
|
164
|
+
|
|
165
|
+
def _persist_prefetch_cache(self, instances):
|
|
166
|
+
"""Store the prefetched data so it can be applied to the next batch"""
|
|
167
|
+
for instance in instances:
|
|
168
|
+
for lookup, obj in instance._state.fields_cache.items():
|
|
169
|
+
if obj is not None:
|
|
170
|
+
cache = self._fk_caches[lookup]
|
|
171
|
+
cache[obj.pk] = obj
|
|
172
|
+
|
|
173
|
+
def _restore_caches(self, instances) -> bool:
|
|
174
|
+
"""Restore prefetched data to the new set of instances.
|
|
175
|
+
This avoids unneeded prefetching of the same ForeignKey relation.
|
|
176
|
+
"""
|
|
177
|
+
if not instances:
|
|
178
|
+
return True
|
|
179
|
+
if not self._fk_caches:
|
|
180
|
+
return False
|
|
181
|
+
|
|
182
|
+
all_restored = True
|
|
183
|
+
|
|
184
|
+
for lookup, cache in self._fk_caches.items():
|
|
185
|
+
field = instances[0]._meta.get_field(lookup)
|
|
186
|
+
if not hasattr(field, "attname"):
|
|
187
|
+
logger.debug(
|
|
188
|
+
"Unable to restore prefetches for '%s' (%s)", lookup, field.__class__.__name__
|
|
189
|
+
)
|
|
190
|
+
# Retrieving prefetches from ForeignObjectRel wouldn't work here.
|
|
191
|
+
# Let standard prefetch_related() take over.
|
|
192
|
+
all_restored = False
|
|
193
|
+
continue
|
|
194
|
+
|
|
195
|
+
logger.debug("Restoring prefetches for '%s'", lookup)
|
|
196
|
+
for instance in instances:
|
|
197
|
+
id_value = getattr(instance, field.attname)
|
|
198
|
+
if id_value is None:
|
|
199
|
+
continue
|
|
200
|
+
|
|
201
|
+
obj = cache.get(id_value, None)
|
|
202
|
+
if obj is not None:
|
|
203
|
+
instance._state.fields_cache[lookup] = obj
|
|
204
|
+
else:
|
|
205
|
+
all_restored = False
|
|
206
|
+
|
|
207
|
+
return all_restored
|
gisserver/output/results.py
CHANGED
|
@@ -8,6 +8,7 @@ from __future__ import annotations
|
|
|
8
8
|
|
|
9
9
|
import math
|
|
10
10
|
import operator
|
|
11
|
+
import typing
|
|
11
12
|
from collections.abc import Iterable
|
|
12
13
|
from datetime import timezone
|
|
13
14
|
from functools import cached_property, reduce
|
|
@@ -19,7 +20,13 @@ from gisserver import conf
|
|
|
19
20
|
from gisserver.features import FeatureType
|
|
20
21
|
from gisserver.geometries import BoundingBox
|
|
21
22
|
|
|
22
|
-
from .
|
|
23
|
+
from .iters import ChunkedQuerySetIterator, CountingIterator
|
|
24
|
+
|
|
25
|
+
if typing.TYPE_CHECKING:
|
|
26
|
+
from gisserver.projection import FeatureProjection, QueryExpression
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
CALCULATE = -9999999
|
|
23
30
|
|
|
24
31
|
|
|
25
32
|
class SimpleFeatureCollection:
|
|
@@ -31,19 +38,30 @@ class SimpleFeatureCollection:
|
|
|
31
38
|
|
|
32
39
|
def __init__(
|
|
33
40
|
self,
|
|
34
|
-
|
|
41
|
+
source_query: QueryExpression,
|
|
42
|
+
feature_types: list[FeatureType],
|
|
35
43
|
queryset: models.QuerySet,
|
|
36
44
|
start: int,
|
|
37
45
|
stop: int,
|
|
46
|
+
number_matched: int | None = CALCULATE,
|
|
38
47
|
):
|
|
39
|
-
self.
|
|
48
|
+
self.source_query = source_query
|
|
49
|
+
self.feature_types = feature_types
|
|
40
50
|
self.queryset = queryset
|
|
41
51
|
self.start = start
|
|
42
52
|
self.stop = stop
|
|
53
|
+
self._number_matched = number_matched
|
|
54
|
+
|
|
43
55
|
self._result_cache = None
|
|
44
56
|
self._result_iterator = None
|
|
45
57
|
self._has_more = None
|
|
46
58
|
|
|
59
|
+
# Tell that is a resultType=hits request.
|
|
60
|
+
# Typically, start and stop are 0. However, for resultType=hits with count,
|
|
61
|
+
# that does not apply. Instead, it detects whether the known amount is already provided.
|
|
62
|
+
# Detecting that queryset.none() is provided won't work, as that can be used by IdOperator too.
|
|
63
|
+
self._is_hits_request = number_matched is not None and number_matched != CALCULATE
|
|
64
|
+
|
|
47
65
|
def __iter__(self) -> Iterable[models.Model]:
|
|
48
66
|
"""Iterate through all results.
|
|
49
67
|
|
|
@@ -51,7 +69,7 @@ class SimpleFeatureCollection:
|
|
|
51
69
|
the results can either be cached first, or be streamed without caching.
|
|
52
70
|
This picks the best-performance scenario in most cases.
|
|
53
71
|
"""
|
|
54
|
-
if self.
|
|
72
|
+
if self._is_hits_request:
|
|
55
73
|
self._result_cache = []
|
|
56
74
|
|
|
57
75
|
if self._result_cache is not None:
|
|
@@ -67,7 +85,7 @@ class SimpleFeatureCollection:
|
|
|
67
85
|
def iterator(self):
|
|
68
86
|
"""Explicitly request the results to be streamed.
|
|
69
87
|
|
|
70
|
-
This can be used by output formats that stream
|
|
88
|
+
This can be used by output formats that stream results, and don't
|
|
71
89
|
access `number_returned`. Note this is not compatible with prefetch_related().
|
|
72
90
|
"""
|
|
73
91
|
if self._result_iterator is not None:
|
|
@@ -76,7 +94,7 @@ class SimpleFeatureCollection:
|
|
|
76
94
|
if self._result_cache is not None:
|
|
77
95
|
# In case the results were read already, reuse that.
|
|
78
96
|
return iter(self._result_cache)
|
|
79
|
-
elif self.
|
|
97
|
+
elif self._is_hits_request:
|
|
80
98
|
# resulttype=hits
|
|
81
99
|
return iter([])
|
|
82
100
|
else:
|
|
@@ -122,8 +140,7 @@ class SimpleFeatureCollection:
|
|
|
122
140
|
if self._result_iterator is not None:
|
|
123
141
|
raise RuntimeError("Results for feature collection are read twice.")
|
|
124
142
|
|
|
125
|
-
if self.
|
|
126
|
-
# resulttype=hits
|
|
143
|
+
if self._is_hits_request:
|
|
127
144
|
self._result_cache = []
|
|
128
145
|
else:
|
|
129
146
|
# This still allows prefetch_related() to work,
|
|
@@ -164,21 +181,31 @@ class SimpleFeatureCollection:
|
|
|
164
181
|
@cached_property
|
|
165
182
|
def number_returned(self) -> int:
|
|
166
183
|
"""Return the number of results for this page."""
|
|
167
|
-
if self.
|
|
168
|
-
return 0
|
|
184
|
+
if self._is_hits_request:
|
|
185
|
+
return 0
|
|
169
186
|
elif self._result_iterator is not None:
|
|
170
187
|
# When requesting the data after the fact, results are counted.
|
|
171
188
|
return self._result_iterator.number_returned
|
|
172
189
|
else:
|
|
173
|
-
# Count by fetching all data. Otherwise the results are queried twice.
|
|
190
|
+
# Count by fetching all data. Otherwise, the results are queried twice.
|
|
174
191
|
# For GML/XML, it's not possible the stream the queryset results
|
|
175
192
|
# as the first tag needs to describe the number of results.
|
|
176
193
|
self.fetch_results()
|
|
177
194
|
return len(self._result_cache)
|
|
178
195
|
|
|
179
|
-
@
|
|
196
|
+
@property
|
|
180
197
|
def number_matched(self) -> int:
|
|
181
198
|
"""Return the total number of matches across all pages."""
|
|
199
|
+
if self._is_hits_request:
|
|
200
|
+
if self.stop:
|
|
201
|
+
# resulttype=hits&COUNT=n should minimize how many are "matched".
|
|
202
|
+
return min(self._number_matched, self.stop - self.start)
|
|
203
|
+
else:
|
|
204
|
+
return self._number_matched
|
|
205
|
+
elif self._number_matched != CALCULATE:
|
|
206
|
+
# Return previously cached result
|
|
207
|
+
return self._number_matched
|
|
208
|
+
|
|
182
209
|
if self._is_surely_last_page:
|
|
183
210
|
# For resulttype=results, an expensive COUNT query can be avoided
|
|
184
211
|
# when this is the first and only page or the last page.
|
|
@@ -187,20 +214,27 @@ class SimpleFeatureCollection:
|
|
|
187
214
|
qs = self.queryset
|
|
188
215
|
clean_annotations = {
|
|
189
216
|
# HACK: remove database optimizations from output renderer.
|
|
190
|
-
# Otherwise it becomes SELECT COUNT(*) FROM (SELECT AsGML(..), ...)
|
|
217
|
+
# Otherwise, it becomes SELECT COUNT(*) FROM (SELECT AsGML(..), ...)
|
|
191
218
|
key: value
|
|
192
219
|
for key, value in qs.query.annotations.items()
|
|
193
|
-
if not key.startswith("_as_")
|
|
220
|
+
if not key.startswith("_as_") and not key.startswith("_As") # AsGML / AsEWKT
|
|
194
221
|
}
|
|
195
222
|
if clean_annotations != qs.query.annotations:
|
|
196
223
|
qs = self.queryset.all() # make a clone to allow editing
|
|
197
224
|
qs.query.annotations = clean_annotations
|
|
198
225
|
|
|
199
|
-
return
|
|
226
|
+
# Calculate, cache and return
|
|
227
|
+
self._number_matched = qs.count()
|
|
228
|
+
return self._number_matched
|
|
200
229
|
|
|
201
230
|
@property
|
|
202
231
|
def _is_surely_last_page(self):
|
|
203
232
|
"""Return true when it's totally clear this is the last page."""
|
|
233
|
+
if self.start == self.stop == 0:
|
|
234
|
+
return True # hits request without count
|
|
235
|
+
elif self._is_hits_request:
|
|
236
|
+
return False
|
|
237
|
+
|
|
204
238
|
# Optimization to avoid making COUNT() queries when we can already know the answer.
|
|
205
239
|
if self.stop == math.inf:
|
|
206
240
|
return True # Infinite page requested
|
|
@@ -219,20 +253,30 @@ class SimpleFeatureCollection:
|
|
|
219
253
|
# For GeoJSON output, the iterator was read first, and `number_returned` is already filled in.
|
|
220
254
|
# For GML output, the pagination details are requested first, and will fetch all data.
|
|
221
255
|
# Hence, reading `number_returned` here can be quite an intensive operation.
|
|
222
|
-
page_size = self.stop - self.start
|
|
256
|
+
page_size = self.stop - self.start
|
|
223
257
|
return page_size and (self.number_returned < page_size or self._has_more is False)
|
|
224
258
|
|
|
225
259
|
@property
|
|
226
260
|
def has_next(self):
|
|
227
|
-
if self.stop == math.inf:
|
|
261
|
+
if self.stop == math.inf or (self.start == self.stop == 0):
|
|
228
262
|
return False
|
|
229
263
|
elif self._has_more is not None:
|
|
230
264
|
return self._has_more # did page+1 record check, answer is known.
|
|
231
265
|
elif self._is_surely_last_page:
|
|
232
|
-
return False #
|
|
266
|
+
return False # Fewer results than expected, answer is known.
|
|
233
267
|
|
|
234
|
-
|
|
235
|
-
|
|
268
|
+
if self._is_hits_request:
|
|
269
|
+
return self.stop <= self._number_matched
|
|
270
|
+
else:
|
|
271
|
+
# This will perform an slow COUNT() query...
|
|
272
|
+
return self.stop < self.number_matched
|
|
273
|
+
|
|
274
|
+
@cached_property
|
|
275
|
+
def projection(self) -> FeatureProjection:
|
|
276
|
+
"""Provide the projection to render these results with."""
|
|
277
|
+
# Note this attribute would technically be part of the 'query' object,
|
|
278
|
+
# but since the projection needs to be calculated once, it's stored here for convenience.
|
|
279
|
+
return self.source_query.get_projection()
|
|
236
280
|
|
|
237
281
|
def get_bounding_box(self) -> BoundingBox:
|
|
238
282
|
"""Determine bounding box of all items."""
|
|
@@ -241,22 +285,18 @@ class SimpleFeatureCollection:
|
|
|
241
285
|
# Start with an obviously invalid bbox,
|
|
242
286
|
# which corrects at the first extend_to_geometry call.
|
|
243
287
|
bbox = BoundingBox(math.inf, math.inf, -math.inf, -math.inf)
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
).child
|
|
288
|
+
|
|
289
|
+
# Allow the geometry to exist in a dotted relationship.
|
|
247
290
|
for instance in self:
|
|
248
|
-
|
|
249
|
-
if
|
|
291
|
+
geometry_value = self.projection.get_main_geometry_value(instance)
|
|
292
|
+
if geometry_value is None:
|
|
250
293
|
continue
|
|
251
294
|
|
|
252
|
-
bbox.extend_to_geometry(
|
|
295
|
+
bbox.extend_to_geometry(geometry_value)
|
|
253
296
|
|
|
254
297
|
return bbox
|
|
255
298
|
|
|
256
299
|
|
|
257
|
-
CALCULATE = -9999999
|
|
258
|
-
|
|
259
|
-
|
|
260
300
|
class FeatureCollection:
|
|
261
301
|
"""WFS object that holds the result type for GetFeature.
|
|
262
302
|
This object type is defined in the WFS spec.
|
|
@@ -270,6 +310,7 @@ class FeatureCollection:
|
|
|
270
310
|
previous: str | None = None,
|
|
271
311
|
):
|
|
272
312
|
"""
|
|
313
|
+
:param source_query: The query that generated this output.
|
|
273
314
|
:param results: All retrieved feature collections (one per FeatureType)
|
|
274
315
|
:param number_matched: Total number of features across all pages
|
|
275
316
|
:param next: URL of the next page
|
|
@@ -305,7 +346,7 @@ class FeatureCollection:
|
|
|
305
346
|
conf.GISSERVER_COUNT_NUMBER_MATCHED == 2 and self.results[0].start > 0
|
|
306
347
|
):
|
|
307
348
|
# Report "unknown" for either all pages, or the second page.
|
|
308
|
-
# Most clients don't need this metadata and thus we avoid a COUNT query.
|
|
349
|
+
# Most clients don't need this metadata, and thus we avoid a COUNT query.
|
|
309
350
|
return None
|
|
310
351
|
|
|
311
352
|
return sum(c.number_matched for c in self.results)
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
"""Outputting XML for the stored query logic."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from io import StringIO
|
|
6
|
+
from xml.etree.ElementTree import Element, tostring
|
|
7
|
+
|
|
8
|
+
from gisserver.extensions.queries import QueryExpressionText, StoredQueryDescription
|
|
9
|
+
from gisserver.output.utils import attr_escape, tag_escape
|
|
10
|
+
from gisserver.parsers.xml import xmlns
|
|
11
|
+
|
|
12
|
+
from .base import XmlOutputRenderer
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ListStoredQueriesRenderer(XmlOutputRenderer):
|
|
16
|
+
"""Rendering for the ``<wfs:ListStoredQueriesResponse>``."""
|
|
17
|
+
|
|
18
|
+
# XML Namespaces to include by default
|
|
19
|
+
xml_namespaces = {
|
|
20
|
+
xmlns.wfs20: "",
|
|
21
|
+
xmlns.xs: "xs",
|
|
22
|
+
xmlns.xsi: "xsi",
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
def __init__(self, operation, query_descriptions: list[StoredQueryDescription]):
|
|
26
|
+
"""Take the list of stored queries to render."""
|
|
27
|
+
super().__init__(operation)
|
|
28
|
+
self.all_feature_types = operation.view.get_bound_feature_types()
|
|
29
|
+
self.query_descriptions = query_descriptions
|
|
30
|
+
|
|
31
|
+
def render_stream(self):
|
|
32
|
+
self.output = StringIO()
|
|
33
|
+
self.output.write(
|
|
34
|
+
'<?xml version="1.0" encoding="UTF-8"?>\n'
|
|
35
|
+
f"<ListStoredQueriesResponse"
|
|
36
|
+
f" {self.render_xmlns_attributes()}"
|
|
37
|
+
f' xsi:schemaLocation="http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd">\n'
|
|
38
|
+
)
|
|
39
|
+
for query_description in self.query_descriptions:
|
|
40
|
+
self.write_query(query_description)
|
|
41
|
+
|
|
42
|
+
self.output.write("</ListStoredQueriesResponse>\n")
|
|
43
|
+
return self.output.getvalue()
|
|
44
|
+
|
|
45
|
+
def write_query(self, query_description: StoredQueryDescription):
|
|
46
|
+
self.output.write(
|
|
47
|
+
f' <StoredQuery id="{query_description.id}">\n'
|
|
48
|
+
f" <Title>{tag_escape(query_description.title)}</Title>\n"
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
for expression in query_description.expressions:
|
|
52
|
+
return_types = expression.return_feature_types or self.all_feature_types
|
|
53
|
+
for return_type in return_types:
|
|
54
|
+
feature_qname = self.feature_to_qname(return_type)
|
|
55
|
+
self.output.write(
|
|
56
|
+
f" <ReturnFeatureType>{tag_escape(feature_qname)}</ReturnFeatureType>\n"
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
self.output.write(" </StoredQuery>\n")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class DescribeStoredQueriesRenderer(XmlOutputRenderer):
|
|
63
|
+
"""Rendering for the ``<wfs:DescribeStoredQueriesResponse>``."""
|
|
64
|
+
|
|
65
|
+
# XML Namespaces to include by default
|
|
66
|
+
xml_namespaces = {
|
|
67
|
+
xmlns.wfs20: "",
|
|
68
|
+
xmlns.xs: "xs",
|
|
69
|
+
xmlns.xsi: "xsi",
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
def __init__(self, operation, query_descriptions: list[StoredQueryDescription]):
|
|
73
|
+
"""Take the list of stored queries to render."""
|
|
74
|
+
super().__init__(operation)
|
|
75
|
+
self.all_feature_types = operation.view.get_bound_feature_types()
|
|
76
|
+
self.query_descriptions = query_descriptions
|
|
77
|
+
|
|
78
|
+
def render_stream(self):
|
|
79
|
+
self.output = StringIO()
|
|
80
|
+
self.output.write(
|
|
81
|
+
'<?xml version="1.0" encoding="UTF-8"?>\n'
|
|
82
|
+
f"<DescribeStoredQueriesResponse"
|
|
83
|
+
f" {self.render_xmlns_attributes()}"
|
|
84
|
+
f' xsi:schemaLocation="http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd">\n'
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
for query_description in self.query_descriptions:
|
|
88
|
+
self.write_description(query_description)
|
|
89
|
+
|
|
90
|
+
self.output.write("</DescribeStoredQueriesResponse>\n")
|
|
91
|
+
return self.output.getvalue()
|
|
92
|
+
|
|
93
|
+
def write_description(self, query_description: StoredQueryDescription):
|
|
94
|
+
"""Write the stored query description."""
|
|
95
|
+
self.output.write(
|
|
96
|
+
f'<StoredQueryDescription id="{attr_escape(query_description.id)}">\n'
|
|
97
|
+
f" <Title>{tag_escape(query_description.title)}</Title>\n"
|
|
98
|
+
f" <Abstract>{tag_escape(query_description.abstract)}</Abstract>\n"
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Declare parameters
|
|
102
|
+
for name, xsd_type in query_description.parameters.items():
|
|
103
|
+
type_qname = self.to_qname(xsd_type)
|
|
104
|
+
self.output.write(f' <Parameter name="{attr_escape(name)}" type="{type_qname}"/>\n')
|
|
105
|
+
|
|
106
|
+
# The QueryExpressionText nodes allow code per return type.
|
|
107
|
+
for expression in query_description.expressions:
|
|
108
|
+
self.render_expression(expression)
|
|
109
|
+
|
|
110
|
+
self.output.write("</StoredQueryDescription>\n")
|
|
111
|
+
|
|
112
|
+
def render_expression(self, expression: QueryExpressionText):
|
|
113
|
+
"""Render the 'QueryExpressionText' node (no body content for now)."""
|
|
114
|
+
is_private = "true" if expression.is_private else "false"
|
|
115
|
+
if expression.return_feature_types is None:
|
|
116
|
+
# for GetFeatureById
|
|
117
|
+
types = " ".join(self.feature_to_qname(ft) for ft in self.all_feature_types)
|
|
118
|
+
else:
|
|
119
|
+
types = " ".join(
|
|
120
|
+
self.feature_to_qname(return_type)
|
|
121
|
+
for return_type in expression.return_feature_types
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
if expression.is_private or not expression.implementation_text:
|
|
125
|
+
implementation_text = ""
|
|
126
|
+
elif isinstance(expression.implementation_text, Element):
|
|
127
|
+
# XML serialization (will recreate namespaces)
|
|
128
|
+
default_namespace = next(
|
|
129
|
+
(ns for ns, prefix in self.app_namespaces.items() if prefix == ""), None
|
|
130
|
+
)
|
|
131
|
+
implementation_text = tostring(
|
|
132
|
+
expression.implementation_text,
|
|
133
|
+
xml_declaration=False,
|
|
134
|
+
default_namespace=default_namespace,
|
|
135
|
+
)
|
|
136
|
+
else:
|
|
137
|
+
# Some raw content (e.g. language="python")
|
|
138
|
+
implementation_text = tag_escape(expression.implementation_text)
|
|
139
|
+
|
|
140
|
+
self.output.write(
|
|
141
|
+
f' <QueryExpressionText isPrivate="{is_private}" language="{expression.language}"'
|
|
142
|
+
f' returnFeatureTypes="{types}">{implementation_text}</QueryExpressionText>\n'
|
|
143
|
+
)
|