django-gisserver 1.5.0__py3-none-any.whl → 2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {django_gisserver-1.5.0.dist-info → django_gisserver-2.1.dist-info}/METADATA +34 -8
- django_gisserver-2.1.dist-info/RECORD +68 -0
- {django_gisserver-1.5.0.dist-info → django_gisserver-2.1.dist-info}/WHEEL +1 -1
- gisserver/__init__.py +1 -1
- gisserver/compat.py +23 -0
- gisserver/conf.py +7 -0
- gisserver/crs.py +401 -0
- gisserver/db.py +126 -51
- gisserver/exceptions.py +132 -4
- gisserver/extensions/__init__.py +4 -0
- gisserver/{parsers/fes20 → extensions}/functions.py +131 -31
- gisserver/extensions/queries.py +266 -0
- gisserver/features.py +253 -181
- gisserver/geometries.py +64 -311
- gisserver/management/__init__.py +0 -0
- gisserver/management/commands/__init__.py +0 -0
- gisserver/management/commands/loadgeojson.py +311 -0
- gisserver/operations/base.py +130 -312
- gisserver/operations/wfs20.py +399 -375
- gisserver/output/__init__.py +14 -49
- gisserver/output/base.py +198 -144
- gisserver/output/csv.py +78 -75
- gisserver/output/geojson.py +37 -37
- gisserver/output/gml32.py +287 -259
- gisserver/output/iters.py +207 -0
- gisserver/output/results.py +73 -61
- gisserver/output/stored.py +143 -0
- gisserver/output/utils.py +81 -169
- gisserver/output/xmlschema.py +85 -46
- gisserver/parsers/__init__.py +10 -10
- gisserver/parsers/ast.py +426 -0
- gisserver/parsers/fes20/__init__.py +89 -31
- gisserver/parsers/fes20/expressions.py +172 -58
- gisserver/parsers/fes20/filters.py +116 -45
- gisserver/parsers/fes20/identifiers.py +66 -28
- gisserver/parsers/fes20/lookups.py +146 -0
- gisserver/parsers/fes20/operators.py +417 -161
- gisserver/parsers/fes20/sorting.py +113 -34
- gisserver/parsers/gml/__init__.py +17 -25
- gisserver/parsers/gml/base.py +36 -15
- gisserver/parsers/gml/geometries.py +105 -44
- gisserver/parsers/ows/__init__.py +25 -0
- gisserver/parsers/ows/kvp.py +198 -0
- gisserver/parsers/ows/requests.py +160 -0
- gisserver/parsers/query.py +179 -0
- gisserver/parsers/values.py +87 -4
- gisserver/parsers/wfs20/__init__.py +39 -0
- gisserver/parsers/wfs20/adhoc.py +253 -0
- gisserver/parsers/wfs20/base.py +148 -0
- gisserver/parsers/wfs20/projection.py +103 -0
- gisserver/parsers/wfs20/requests.py +483 -0
- gisserver/parsers/wfs20/stored.py +193 -0
- gisserver/parsers/xml.py +261 -0
- gisserver/projection.py +367 -0
- gisserver/static/gisserver/index.css +20 -4
- gisserver/templates/gisserver/base.html +12 -0
- gisserver/templates/gisserver/index.html +9 -15
- gisserver/templates/gisserver/service_description.html +12 -6
- gisserver/templates/gisserver/wfs/2.0.0/get_capabilities.xml +9 -9
- gisserver/templates/gisserver/wfs/feature_field.html +3 -3
- gisserver/templates/gisserver/wfs/feature_type.html +35 -13
- gisserver/templatetags/gisserver_tags.py +20 -0
- gisserver/types.py +445 -313
- gisserver/views.py +227 -62
- django_gisserver-1.5.0.dist-info/RECORD +0 -54
- gisserver/parsers/base.py +0 -149
- gisserver/parsers/fes20/query.py +0 -285
- gisserver/parsers/tags.py +0 -102
- gisserver/queries/__init__.py +0 -37
- gisserver/queries/adhoc.py +0 -185
- gisserver/queries/base.py +0 -186
- gisserver/queries/projection.py +0 -240
- gisserver/queries/stored.py +0 -206
- gisserver/templates/gisserver/wfs/2.0.0/describe_stored_queries.xml +0 -20
- gisserver/templates/gisserver/wfs/2.0.0/list_stored_queries.xml +0 -14
- {django_gisserver-1.5.0.dist-info → django_gisserver-2.1.dist-info/licenses}/LICENSE +0 -0
- {django_gisserver-1.5.0.dist-info → django_gisserver-2.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from collections.abc import Iterable
|
|
5
|
+
from itertools import islice
|
|
6
|
+
from typing import TypeVar
|
|
7
|
+
|
|
8
|
+
from django.db import connections, models
|
|
9
|
+
from lru import LRU
|
|
10
|
+
|
|
11
|
+
M = TypeVar("M", bound=models.Model)
|
|
12
|
+
|
|
13
|
+
DEFAULT_SQL_CHUNK_SIZE = 2000 # allow unit tests to alter this.
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class CountingIterator(Iterable[M]):
|
|
19
|
+
"""A simple iterator that counts how many results are given."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, iterator: Iterable[M], max_results=0):
|
|
22
|
+
self._iterator = iterator
|
|
23
|
+
self._number_returned = 0
|
|
24
|
+
self._in_iterator = False
|
|
25
|
+
self._max_results = max_results
|
|
26
|
+
self._has_more = None
|
|
27
|
+
|
|
28
|
+
def __iter__(self):
|
|
29
|
+
# Count the number of returned items while reading them.
|
|
30
|
+
# Tried using map(itemgetter(0), zip(model_iter, count_iter)) but that's not faster.
|
|
31
|
+
self._in_iterator = True
|
|
32
|
+
try:
|
|
33
|
+
self._number_returned = 0
|
|
34
|
+
for instance in self._iterator:
|
|
35
|
+
if self._max_results and self._number_returned == self._max_results:
|
|
36
|
+
self._has_more = True
|
|
37
|
+
break
|
|
38
|
+
self._number_returned += 1
|
|
39
|
+
yield instance
|
|
40
|
+
finally:
|
|
41
|
+
if self._max_results and self._has_more is None:
|
|
42
|
+
self._has_more = False # ignored the sentinel item
|
|
43
|
+
self._in_iterator = False
|
|
44
|
+
|
|
45
|
+
@property
|
|
46
|
+
def number_returned(self) -> int:
|
|
47
|
+
"""Tell how many objects the iterator processed"""
|
|
48
|
+
if self._in_iterator:
|
|
49
|
+
raise RuntimeError("Can't read number of returned results during iteration")
|
|
50
|
+
return self._number_returned
|
|
51
|
+
|
|
52
|
+
@property
|
|
53
|
+
def has_more(self) -> bool | None:
|
|
54
|
+
return self._has_more
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class lru_dict(dict):
|
|
58
|
+
"""A 'defaultdict' with LRU items for each value."""
|
|
59
|
+
|
|
60
|
+
def __init__(self, max_size):
|
|
61
|
+
super().__init__()
|
|
62
|
+
self.max_size = max_size
|
|
63
|
+
|
|
64
|
+
def __missing__(self, key):
|
|
65
|
+
logger.debug("Creating cache for prefetches of '%s'", key)
|
|
66
|
+
value = LRU(self.max_size)
|
|
67
|
+
self[key] = value
|
|
68
|
+
return value
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class ChunkedQuerySetIterator(Iterable[M]):
|
|
72
|
+
"""An optimal strategy to perform ``prefetch_related()`` on large datasets.
|
|
73
|
+
|
|
74
|
+
It fetches data from the queryset in chunks,
|
|
75
|
+
and performs ``prefetch_related()`` behavior on each chunk.
|
|
76
|
+
|
|
77
|
+
Django's ``QuerySet.prefetch_related()`` works by loading the whole queryset into memory,
|
|
78
|
+
and performing an analysis of the related objects to fetch. When working on large datasets,
|
|
79
|
+
this is very inefficient as more memory is consumed. Instead, ``QuerySet.iterator()``
|
|
80
|
+
is preferred here as it returns instances while reading them. Nothing is stored in memory.
|
|
81
|
+
Hence, both approaches are fundamentally incompatible. This class performs a
|
|
82
|
+
mixed strategy: load a chunk, and perform prefetches for that particular batch.
|
|
83
|
+
|
|
84
|
+
As extra performance benefit, a local cache avoids prefetching the same records
|
|
85
|
+
again when the next chunk is analysed. It has a "least recently used" cache to avoid
|
|
86
|
+
flooding the caches when foreign keys constantly point to different unique objects.
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
def __init__(self, queryset: models.QuerySet, chunk_size=None, sql_chunk_size=None):
|
|
90
|
+
"""
|
|
91
|
+
:param queryset: The queryset to iterate over, that has ``prefetch_related()`` data.
|
|
92
|
+
:param chunk_size: The size of each segment to analyse in-memory for related objects.
|
|
93
|
+
:param sql_chunk_size: The size of each segment to fetch from the database,
|
|
94
|
+
used when server-side cursors are not available. The default follows Django behavior.
|
|
95
|
+
"""
|
|
96
|
+
self.queryset = queryset
|
|
97
|
+
self.sql_chunk_size = sql_chunk_size or DEFAULT_SQL_CHUNK_SIZE
|
|
98
|
+
self.chunk_size = chunk_size or self.sql_chunk_size
|
|
99
|
+
self._fk_caches = lru_dict(self.chunk_size // 2)
|
|
100
|
+
self._number_returned = 0
|
|
101
|
+
self._in_iterator = False
|
|
102
|
+
|
|
103
|
+
def __iter__(self):
|
|
104
|
+
# Using iter() ensures the ModelIterable is resumed with the next chunk.
|
|
105
|
+
self._number_returned = 0
|
|
106
|
+
self._in_iterator = True
|
|
107
|
+
try:
|
|
108
|
+
qs_iter = iter(self._get_queryset_iterator())
|
|
109
|
+
|
|
110
|
+
# Keep fetching chunks
|
|
111
|
+
while True:
|
|
112
|
+
instances = list(islice(qs_iter, self.chunk_size))
|
|
113
|
+
if not instances:
|
|
114
|
+
break
|
|
115
|
+
|
|
116
|
+
# Perform prefetches on this chunk:
|
|
117
|
+
if self.queryset._prefetch_related_lookups:
|
|
118
|
+
self._add_prefetches(instances)
|
|
119
|
+
|
|
120
|
+
# And return to parent loop
|
|
121
|
+
yield from instances
|
|
122
|
+
self._number_returned += len(instances)
|
|
123
|
+
finally:
|
|
124
|
+
self._in_iterator = False
|
|
125
|
+
|
|
126
|
+
def _get_queryset_iterator(self) -> Iterable:
|
|
127
|
+
"""The body of queryset.iterator(), while circumventing prefetching."""
|
|
128
|
+
# The old code did return `self.queryset.iterator(chunk_size=self.sql_chunk_size)`
|
|
129
|
+
# However, Django 4 supports using prefetch_related() with iterator() in that scenario.
|
|
130
|
+
#
|
|
131
|
+
# This code is the core of Django's QuerySet.iterator() that only produces the
|
|
132
|
+
# old-style iteration, without any prefetches. Those are added by this class instead.
|
|
133
|
+
use_chunked_fetch = not connections[self.queryset.db].settings_dict.get(
|
|
134
|
+
"DISABLE_SERVER_SIDE_CURSORS"
|
|
135
|
+
)
|
|
136
|
+
iterable = self.queryset._iterable_class(
|
|
137
|
+
self.queryset, chunked_fetch=use_chunked_fetch, chunk_size=self.sql_chunk_size
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
yield from iterable
|
|
141
|
+
|
|
142
|
+
@property
|
|
143
|
+
def number_returned(self) -> int:
|
|
144
|
+
"""Tell how many objects the iterator processed"""
|
|
145
|
+
if self._in_iterator:
|
|
146
|
+
raise RuntimeError("Can't read number of returned results during iteration")
|
|
147
|
+
return self._number_returned
|
|
148
|
+
|
|
149
|
+
def _add_prefetches(self, instances: list[M]):
|
|
150
|
+
"""Merge the prefetched objects for this batch with the model instances."""
|
|
151
|
+
if self._fk_caches:
|
|
152
|
+
# Make sure prefetch_related_objects() doesn't have
|
|
153
|
+
# to fetch items again that infrequently changes.
|
|
154
|
+
all_restored = self._restore_caches(instances)
|
|
155
|
+
if all_restored:
|
|
156
|
+
logger.debug("Restored all prefetches from cache")
|
|
157
|
+
return
|
|
158
|
+
|
|
159
|
+
# Reuse the Django machinery for retrieving missing sub objects.
|
|
160
|
+
# and analyse the ForeignKey caches to allow faster prefetches next time
|
|
161
|
+
logger.debug("Perform additional prefetches for %d objects", len(instances))
|
|
162
|
+
models.prefetch_related_objects(instances, *self.queryset._prefetch_related_lookups)
|
|
163
|
+
self._persist_prefetch_cache(instances)
|
|
164
|
+
|
|
165
|
+
def _persist_prefetch_cache(self, instances):
|
|
166
|
+
"""Store the prefetched data so it can be applied to the next batch"""
|
|
167
|
+
for instance in instances:
|
|
168
|
+
for lookup, obj in instance._state.fields_cache.items():
|
|
169
|
+
if obj is not None:
|
|
170
|
+
cache = self._fk_caches[lookup]
|
|
171
|
+
cache[obj.pk] = obj
|
|
172
|
+
|
|
173
|
+
def _restore_caches(self, instances) -> bool:
|
|
174
|
+
"""Restore prefetched data to the new set of instances.
|
|
175
|
+
This avoids unneeded prefetching of the same ForeignKey relation.
|
|
176
|
+
"""
|
|
177
|
+
if not instances:
|
|
178
|
+
return True
|
|
179
|
+
if not self._fk_caches:
|
|
180
|
+
return False
|
|
181
|
+
|
|
182
|
+
all_restored = True
|
|
183
|
+
|
|
184
|
+
for lookup, cache in self._fk_caches.items():
|
|
185
|
+
field = instances[0]._meta.get_field(lookup)
|
|
186
|
+
if not hasattr(field, "attname"):
|
|
187
|
+
logger.debug(
|
|
188
|
+
"Unable to restore prefetches for '%s' (%s)", lookup, field.__class__.__name__
|
|
189
|
+
)
|
|
190
|
+
# Retrieving prefetches from ForeignObjectRel wouldn't work here.
|
|
191
|
+
# Let standard prefetch_related() take over.
|
|
192
|
+
all_restored = False
|
|
193
|
+
continue
|
|
194
|
+
|
|
195
|
+
logger.debug("Restoring prefetches for '%s'", lookup)
|
|
196
|
+
for instance in instances:
|
|
197
|
+
id_value = getattr(instance, field.attname)
|
|
198
|
+
if id_value is None:
|
|
199
|
+
continue
|
|
200
|
+
|
|
201
|
+
obj = cache.get(id_value, None)
|
|
202
|
+
if obj is not None:
|
|
203
|
+
instance._state.fields_cache[lookup] = obj
|
|
204
|
+
else:
|
|
205
|
+
all_restored = False
|
|
206
|
+
|
|
207
|
+
return all_restored
|
gisserver/output/results.py
CHANGED
|
@@ -7,50 +7,60 @@ properties match the WFS 2.0 spec closely.
|
|
|
7
7
|
from __future__ import annotations
|
|
8
8
|
|
|
9
9
|
import math
|
|
10
|
-
import operator
|
|
11
10
|
import typing
|
|
12
11
|
from collections.abc import Iterable
|
|
13
12
|
from datetime import timezone
|
|
14
|
-
from functools import cached_property
|
|
13
|
+
from functools import cached_property
|
|
15
14
|
|
|
16
15
|
from django.db import models
|
|
17
16
|
from django.utils.timezone import now
|
|
18
17
|
|
|
19
18
|
from gisserver import conf
|
|
19
|
+
from gisserver.exceptions import wrap_filter_errors
|
|
20
20
|
from gisserver.features import FeatureType
|
|
21
|
-
from gisserver.geometries import BoundingBox
|
|
22
21
|
|
|
23
|
-
from .
|
|
22
|
+
from .iters import ChunkedQuerySetIterator, CountingIterator
|
|
24
23
|
|
|
25
24
|
if typing.TYPE_CHECKING:
|
|
26
|
-
from gisserver.
|
|
25
|
+
from gisserver.projection import FeatureProjection, QueryExpression
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
CALCULATE = -9999999
|
|
27
29
|
|
|
28
30
|
|
|
29
31
|
class SimpleFeatureCollection:
|
|
30
32
|
"""Wrapper to read a result set.
|
|
31
33
|
|
|
32
34
|
This object type is defined in the WFS spec.
|
|
33
|
-
It holds a collection of
|
|
35
|
+
It holds a collection of ``<wfs:member>`` objects.
|
|
34
36
|
"""
|
|
35
37
|
|
|
36
38
|
def __init__(
|
|
37
39
|
self,
|
|
38
40
|
source_query: QueryExpression,
|
|
39
|
-
|
|
41
|
+
feature_types: list[FeatureType],
|
|
40
42
|
queryset: models.QuerySet,
|
|
41
43
|
start: int,
|
|
42
44
|
stop: int,
|
|
45
|
+
number_matched: int | None = CALCULATE,
|
|
43
46
|
):
|
|
44
47
|
self.source_query = source_query
|
|
45
|
-
self.
|
|
48
|
+
self.feature_types = feature_types
|
|
46
49
|
self.queryset = queryset
|
|
47
50
|
self.start = start
|
|
48
51
|
self.stop = stop
|
|
52
|
+
self._number_matched = number_matched
|
|
49
53
|
|
|
50
54
|
self._result_cache = None
|
|
51
55
|
self._result_iterator = None
|
|
52
56
|
self._has_more = None
|
|
53
57
|
|
|
58
|
+
# Tell that is a resultType=hits request.
|
|
59
|
+
# Typically, start and stop are 0. However, for resultType=hits with count,
|
|
60
|
+
# that does not apply. Instead, it detects whether the known amount is already provided.
|
|
61
|
+
# Detecting that queryset.none() is provided won't work, as that can be used by IdOperator too.
|
|
62
|
+
self._is_hits_request = number_matched is not None and number_matched != CALCULATE
|
|
63
|
+
|
|
54
64
|
def __iter__(self) -> Iterable[models.Model]:
|
|
55
65
|
"""Iterate through all results.
|
|
56
66
|
|
|
@@ -58,7 +68,7 @@ class SimpleFeatureCollection:
|
|
|
58
68
|
the results can either be cached first, or be streamed without caching.
|
|
59
69
|
This picks the best-performance scenario in most cases.
|
|
60
70
|
"""
|
|
61
|
-
if self.
|
|
71
|
+
if self._is_hits_request:
|
|
62
72
|
self._result_cache = []
|
|
63
73
|
|
|
64
74
|
if self._result_cache is not None:
|
|
@@ -75,7 +85,8 @@ class SimpleFeatureCollection:
|
|
|
75
85
|
"""Explicitly request the results to be streamed.
|
|
76
86
|
|
|
77
87
|
This can be used by output formats that stream results, and don't
|
|
78
|
-
access
|
|
88
|
+
access :attr:`number_returned`.
|
|
89
|
+
Note this is not compatible with ``prefetch_related()``.
|
|
79
90
|
"""
|
|
80
91
|
if self._result_iterator is not None:
|
|
81
92
|
raise RuntimeError("Results for feature collection are read twice.")
|
|
@@ -83,7 +94,7 @@ class SimpleFeatureCollection:
|
|
|
83
94
|
if self._result_cache is not None:
|
|
84
95
|
# In case the results were read already, reuse that.
|
|
85
96
|
return iter(self._result_cache)
|
|
86
|
-
elif self.
|
|
97
|
+
elif self._is_hits_request:
|
|
87
98
|
# resulttype=hits
|
|
88
99
|
return iter([])
|
|
89
100
|
else:
|
|
@@ -115,12 +126,13 @@ class SimpleFeatureCollection:
|
|
|
115
126
|
return self.queryset[self.start : self.stop + (1 if add_sentinel else 0)]
|
|
116
127
|
|
|
117
128
|
def first(self):
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
129
|
+
with wrap_filter_errors(self.source_query):
|
|
130
|
+
try:
|
|
131
|
+
# Don't query a full page, return only one instance (for GetFeatureById)
|
|
132
|
+
# This also preserves the extra added annotations (like _as_gml_FIELD)
|
|
133
|
+
return self.queryset[self.start]
|
|
134
|
+
except IndexError:
|
|
135
|
+
return None
|
|
124
136
|
|
|
125
137
|
def fetch_results(self):
|
|
126
138
|
"""Forcefully read the results early."""
|
|
@@ -129,8 +141,7 @@ class SimpleFeatureCollection:
|
|
|
129
141
|
if self._result_iterator is not None:
|
|
130
142
|
raise RuntimeError("Results for feature collection are read twice.")
|
|
131
143
|
|
|
132
|
-
if self.
|
|
133
|
-
# resulttype=hits
|
|
144
|
+
if self._is_hits_request:
|
|
134
145
|
self._result_cache = []
|
|
135
146
|
else:
|
|
136
147
|
# This still allows prefetch_related() to work,
|
|
@@ -138,11 +149,15 @@ class SimpleFeatureCollection:
|
|
|
138
149
|
if self.stop == math.inf:
|
|
139
150
|
# Infinite page requested, see if start is still requested
|
|
140
151
|
qs = self.queryset[self.start :] if self.start else self.queryset.all()
|
|
141
|
-
|
|
152
|
+
|
|
153
|
+
with wrap_filter_errors(self.source_query):
|
|
154
|
+
self._result_cache = list(qs)
|
|
142
155
|
elif self._use_sentinel_record:
|
|
143
156
|
# No counting, but instead fetch an extra item as sentinel to see if there are more results.
|
|
144
157
|
qs = self.queryset[self.start : self.stop + 1]
|
|
145
|
-
|
|
158
|
+
|
|
159
|
+
with wrap_filter_errors(self.source_query):
|
|
160
|
+
page_results = list(qs)
|
|
146
161
|
|
|
147
162
|
# The stop + 1 sentinel allows checking if there is a next page.
|
|
148
163
|
# This means no COUNT() is needed to detect that.
|
|
@@ -157,7 +172,9 @@ class SimpleFeatureCollection:
|
|
|
157
172
|
# Fetch exactly the page size, no more is needed.
|
|
158
173
|
# Will use a COUNT on the total table, so it can be used to see if there are more pages.
|
|
159
174
|
qs = self.queryset[self.start : self.stop]
|
|
160
|
-
|
|
175
|
+
|
|
176
|
+
with wrap_filter_errors(self.source_query):
|
|
177
|
+
self._result_cache = list(qs)
|
|
161
178
|
|
|
162
179
|
@cached_property
|
|
163
180
|
def _use_sentinel_record(self) -> bool:
|
|
@@ -171,8 +188,8 @@ class SimpleFeatureCollection:
|
|
|
171
188
|
@cached_property
|
|
172
189
|
def number_returned(self) -> int:
|
|
173
190
|
"""Return the number of results for this page."""
|
|
174
|
-
if self.
|
|
175
|
-
return 0
|
|
191
|
+
if self._is_hits_request:
|
|
192
|
+
return 0
|
|
176
193
|
elif self._result_iterator is not None:
|
|
177
194
|
# When requesting the data after the fact, results are counted.
|
|
178
195
|
return self._result_iterator.number_returned
|
|
@@ -183,9 +200,19 @@ class SimpleFeatureCollection:
|
|
|
183
200
|
self.fetch_results()
|
|
184
201
|
return len(self._result_cache)
|
|
185
202
|
|
|
186
|
-
@
|
|
203
|
+
@property
|
|
187
204
|
def number_matched(self) -> int:
|
|
188
205
|
"""Return the total number of matches across all pages."""
|
|
206
|
+
if self._is_hits_request:
|
|
207
|
+
if self.stop:
|
|
208
|
+
# resulttype=hits&COUNT=n should minimize how many are "matched".
|
|
209
|
+
return min(self._number_matched, self.stop - self.start)
|
|
210
|
+
else:
|
|
211
|
+
return self._number_matched
|
|
212
|
+
elif self._number_matched != CALCULATE:
|
|
213
|
+
# Return previously cached result
|
|
214
|
+
return self._number_matched
|
|
215
|
+
|
|
189
216
|
if self._is_surely_last_page:
|
|
190
217
|
# For resulttype=results, an expensive COUNT query can be avoided
|
|
191
218
|
# when this is the first and only page or the last page.
|
|
@@ -197,17 +224,25 @@ class SimpleFeatureCollection:
|
|
|
197
224
|
# Otherwise, it becomes SELECT COUNT(*) FROM (SELECT AsGML(..), ...)
|
|
198
225
|
key: value
|
|
199
226
|
for key, value in qs.query.annotations.items()
|
|
200
|
-
if not key.startswith("_as_")
|
|
227
|
+
if not key.startswith("_as_") and not key.startswith("_As") # AsGML / AsEWKT
|
|
201
228
|
}
|
|
202
229
|
if clean_annotations != qs.query.annotations:
|
|
203
230
|
qs = self.queryset.all() # make a clone to allow editing
|
|
204
231
|
qs.query.annotations = clean_annotations
|
|
205
232
|
|
|
206
|
-
return
|
|
233
|
+
# Calculate, cache and return
|
|
234
|
+
with wrap_filter_errors(self.source_query):
|
|
235
|
+
self._number_matched = qs.count()
|
|
236
|
+
return self._number_matched
|
|
207
237
|
|
|
208
238
|
@property
|
|
209
239
|
def _is_surely_last_page(self):
|
|
210
240
|
"""Return true when it's totally clear this is the last page."""
|
|
241
|
+
if self.start == self.stop == 0:
|
|
242
|
+
return True # hits request without count
|
|
243
|
+
elif self._is_hits_request:
|
|
244
|
+
return False
|
|
245
|
+
|
|
211
246
|
# Optimization to avoid making COUNT() queries when we can already know the answer.
|
|
212
247
|
if self.stop == math.inf:
|
|
213
248
|
return True # Infinite page requested
|
|
@@ -226,54 +261,36 @@ class SimpleFeatureCollection:
|
|
|
226
261
|
# For GeoJSON output, the iterator was read first, and `number_returned` is already filled in.
|
|
227
262
|
# For GML output, the pagination details are requested first, and will fetch all data.
|
|
228
263
|
# Hence, reading `number_returned` here can be quite an intensive operation.
|
|
229
|
-
page_size = self.stop - self.start
|
|
264
|
+
page_size = self.stop - self.start
|
|
230
265
|
return page_size and (self.number_returned < page_size or self._has_more is False)
|
|
231
266
|
|
|
232
267
|
@property
|
|
233
268
|
def has_next(self):
|
|
234
|
-
if self.stop == math.inf:
|
|
269
|
+
if self.stop == math.inf or (self.start == self.stop == 0):
|
|
235
270
|
return False
|
|
236
271
|
elif self._has_more is not None:
|
|
237
272
|
return self._has_more # did page+1 record check, answer is known.
|
|
238
273
|
elif self._is_surely_last_page:
|
|
239
274
|
return False # Fewer results than expected, answer is known.
|
|
240
275
|
|
|
241
|
-
|
|
242
|
-
|
|
276
|
+
if self._is_hits_request:
|
|
277
|
+
return self.stop <= self._number_matched
|
|
278
|
+
else:
|
|
279
|
+
# This will perform an slow COUNT() query...
|
|
280
|
+
return self.stop < self.number_matched
|
|
243
281
|
|
|
244
282
|
@cached_property
|
|
245
283
|
def projection(self) -> FeatureProjection:
|
|
246
284
|
"""Provide the projection to render these results with."""
|
|
247
285
|
# Note this attribute would technically be part of the 'query' object,
|
|
248
|
-
# but since the projection needs to be
|
|
249
|
-
return self.source_query.get_projection(
|
|
250
|
-
|
|
251
|
-
def get_bounding_box(self) -> BoundingBox:
|
|
252
|
-
"""Determine bounding box of all items."""
|
|
253
|
-
self.fetch_results() # Avoid querying results twice
|
|
254
|
-
|
|
255
|
-
# Start with an obviously invalid bbox,
|
|
256
|
-
# which corrects at the first extend_to_geometry call.
|
|
257
|
-
bbox = BoundingBox(math.inf, math.inf, -math.inf, -math.inf)
|
|
258
|
-
geometry_field = self.feature_type.resolve_element(
|
|
259
|
-
self.feature_type.geometry_field.name
|
|
260
|
-
).child
|
|
261
|
-
for instance in self:
|
|
262
|
-
geometry_value = geometry_field.get_value(instance)
|
|
263
|
-
if geometry_value is None:
|
|
264
|
-
continue
|
|
265
|
-
|
|
266
|
-
bbox.extend_to_geometry(geometry_value)
|
|
267
|
-
|
|
268
|
-
return bbox
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
CALCULATE = -9999999
|
|
286
|
+
# but since the projection needs to be calculated once, it's stored here for convenience.
|
|
287
|
+
return self.source_query.get_projection()
|
|
272
288
|
|
|
273
289
|
|
|
274
290
|
class FeatureCollection:
|
|
275
|
-
"""WFS object that holds the result type for GetFeature
|
|
291
|
+
"""WFS object that holds the result type for ``GetFeature``.
|
|
276
292
|
This object type is defined in the WFS spec.
|
|
293
|
+
It holds a collection of :class:`SimpleFeatureCollection` results.
|
|
277
294
|
"""
|
|
278
295
|
|
|
279
296
|
def __init__(
|
|
@@ -297,11 +314,6 @@ class FeatureCollection:
|
|
|
297
314
|
self.date = now()
|
|
298
315
|
self.timestamp = self.date.astimezone(timezone.utc).isoformat()
|
|
299
316
|
|
|
300
|
-
def get_bounding_box(self) -> BoundingBox:
|
|
301
|
-
"""Determine bounding box of all items."""
|
|
302
|
-
# Combine the bounding box of all collections
|
|
303
|
-
return reduce(operator.add, [c.get_bounding_box() for c in self.results])
|
|
304
|
-
|
|
305
317
|
@cached_property
|
|
306
318
|
def number_returned(self) -> int:
|
|
307
319
|
"""Return the total number of returned features"""
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
"""Outputting XML for the stored query logic."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from io import StringIO
|
|
6
|
+
from xml.etree.ElementTree import Element, tostring
|
|
7
|
+
|
|
8
|
+
from gisserver.extensions.queries import QueryExpressionText, StoredQueryDescription
|
|
9
|
+
from gisserver.output.utils import attr_escape, tag_escape
|
|
10
|
+
from gisserver.parsers.xml import xmlns
|
|
11
|
+
|
|
12
|
+
from .base import XmlOutputRenderer
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ListStoredQueriesRenderer(XmlOutputRenderer):
|
|
16
|
+
"""Rendering for the ``<wfs:ListStoredQueriesResponse>``."""
|
|
17
|
+
|
|
18
|
+
# XML Namespaces to include by default
|
|
19
|
+
xml_namespaces = {
|
|
20
|
+
xmlns.wfs20: "",
|
|
21
|
+
xmlns.xs: "xs",
|
|
22
|
+
xmlns.xsi: "xsi",
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
def __init__(self, operation, query_descriptions: list[StoredQueryDescription]):
|
|
26
|
+
"""Take the list of stored queries to render."""
|
|
27
|
+
super().__init__(operation)
|
|
28
|
+
self.all_feature_types = operation.view.get_bound_feature_types()
|
|
29
|
+
self.query_descriptions = query_descriptions
|
|
30
|
+
|
|
31
|
+
def render_stream(self):
|
|
32
|
+
self.output = StringIO()
|
|
33
|
+
self.output.write(
|
|
34
|
+
'<?xml version="1.0" encoding="UTF-8"?>\n'
|
|
35
|
+
f"<ListStoredQueriesResponse"
|
|
36
|
+
f" {self.render_xmlns_attributes()}"
|
|
37
|
+
f' xsi:schemaLocation="http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd">\n'
|
|
38
|
+
)
|
|
39
|
+
for query_description in self.query_descriptions:
|
|
40
|
+
self.write_query(query_description)
|
|
41
|
+
|
|
42
|
+
self.output.write("</ListStoredQueriesResponse>\n")
|
|
43
|
+
return self.output.getvalue()
|
|
44
|
+
|
|
45
|
+
def write_query(self, query_description: StoredQueryDescription):
|
|
46
|
+
self.output.write(
|
|
47
|
+
f' <StoredQuery id="{query_description.id}">\n'
|
|
48
|
+
f" <Title>{tag_escape(query_description.title)}</Title>\n"
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
for expression in query_description.expressions:
|
|
52
|
+
return_types = expression.return_feature_types or self.all_feature_types
|
|
53
|
+
for return_type in return_types:
|
|
54
|
+
feature_qname = self.feature_to_qname(return_type)
|
|
55
|
+
self.output.write(
|
|
56
|
+
f" <ReturnFeatureType>{tag_escape(feature_qname)}</ReturnFeatureType>\n"
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
self.output.write(" </StoredQuery>\n")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class DescribeStoredQueriesRenderer(XmlOutputRenderer):
|
|
63
|
+
"""Rendering for the ``<wfs:DescribeStoredQueriesResponse>``."""
|
|
64
|
+
|
|
65
|
+
# XML Namespaces to include by default
|
|
66
|
+
xml_namespaces = {
|
|
67
|
+
xmlns.wfs20: "",
|
|
68
|
+
xmlns.xs: "xs",
|
|
69
|
+
xmlns.xsi: "xsi",
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
def __init__(self, operation, query_descriptions: list[StoredQueryDescription]):
|
|
73
|
+
"""Take the list of stored queries to render."""
|
|
74
|
+
super().__init__(operation)
|
|
75
|
+
self.all_feature_types = operation.view.get_bound_feature_types()
|
|
76
|
+
self.query_descriptions = query_descriptions
|
|
77
|
+
|
|
78
|
+
def render_stream(self):
|
|
79
|
+
self.output = StringIO()
|
|
80
|
+
self.output.write(
|
|
81
|
+
'<?xml version="1.0" encoding="UTF-8"?>\n'
|
|
82
|
+
f"<DescribeStoredQueriesResponse"
|
|
83
|
+
f" {self.render_xmlns_attributes()}"
|
|
84
|
+
f' xsi:schemaLocation="http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd">\n'
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
for query_description in self.query_descriptions:
|
|
88
|
+
self.write_description(query_description)
|
|
89
|
+
|
|
90
|
+
self.output.write("</DescribeStoredQueriesResponse>\n")
|
|
91
|
+
return self.output.getvalue()
|
|
92
|
+
|
|
93
|
+
def write_description(self, query_description: StoredQueryDescription):
|
|
94
|
+
"""Write the stored query description."""
|
|
95
|
+
self.output.write(
|
|
96
|
+
f'<StoredQueryDescription id="{attr_escape(query_description.id)}">\n'
|
|
97
|
+
f" <Title>{tag_escape(query_description.title)}</Title>\n"
|
|
98
|
+
f" <Abstract>{tag_escape(query_description.abstract)}</Abstract>\n"
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Declare parameters
|
|
102
|
+
for name, xsd_type in query_description.parameters.items():
|
|
103
|
+
type_qname = self.to_qname(xsd_type)
|
|
104
|
+
self.output.write(f' <Parameter name="{attr_escape(name)}" type="{type_qname}"/>\n')
|
|
105
|
+
|
|
106
|
+
# The QueryExpressionText nodes allow code per return type.
|
|
107
|
+
for expression in query_description.expressions:
|
|
108
|
+
self.render_expression(expression)
|
|
109
|
+
|
|
110
|
+
self.output.write("</StoredQueryDescription>\n")
|
|
111
|
+
|
|
112
|
+
def render_expression(self, expression: QueryExpressionText):
|
|
113
|
+
"""Render the 'QueryExpressionText' node (no body content for now)."""
|
|
114
|
+
is_private = "true" if expression.is_private else "false"
|
|
115
|
+
if expression.return_feature_types is None:
|
|
116
|
+
# for GetFeatureById
|
|
117
|
+
types = " ".join(self.feature_to_qname(ft) for ft in self.all_feature_types)
|
|
118
|
+
else:
|
|
119
|
+
types = " ".join(
|
|
120
|
+
self.feature_to_qname(return_type)
|
|
121
|
+
for return_type in expression.return_feature_types
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
if expression.is_private or not expression.implementation_text:
|
|
125
|
+
implementation_text = ""
|
|
126
|
+
elif isinstance(expression.implementation_text, Element):
|
|
127
|
+
# XML serialization (will recreate namespaces)
|
|
128
|
+
default_namespace = next(
|
|
129
|
+
(ns for ns, prefix in self.app_namespaces.items() if prefix == ""), None
|
|
130
|
+
)
|
|
131
|
+
implementation_text = tostring(
|
|
132
|
+
expression.implementation_text,
|
|
133
|
+
xml_declaration=False,
|
|
134
|
+
default_namespace=default_namespace,
|
|
135
|
+
)
|
|
136
|
+
else:
|
|
137
|
+
# Some raw content (e.g. language="python")
|
|
138
|
+
implementation_text = tag_escape(expression.implementation_text)
|
|
139
|
+
|
|
140
|
+
self.output.write(
|
|
141
|
+
f' <QueryExpressionText isPrivate="{is_private}" language="{expression.language}"'
|
|
142
|
+
f' returnFeatureTypes="{types}">{implementation_text}</QueryExpressionText>\n'
|
|
143
|
+
)
|