ssb-sgis 1.3.5__py3-none-any.whl → 1.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -67,7 +67,7 @@ def to_geoseries(obj: Any, crs: Any | None = None) -> GeoSeries:
67
67
  # pandas objects
68
68
  index = obj.index
69
69
  else:
70
- # list
70
+ # list (has method 'index')
71
71
  index = None
72
72
  except AttributeError:
73
73
  index = None
@@ -38,8 +38,9 @@ def update_geometries(
38
38
  ) -> GeoDataFrame:
39
39
  """Puts geometries on top of each other rowwise.
40
40
 
41
- Since this operation is done rowwise, it's important to
42
- first sort the GeoDataFrame approriately. See example below.
41
+ IMPORTANT: Since this operation is done rowwise, meaning the top rows
42
+ are put on top of bottommore rows, it is important to first sort the
43
+ GeoDataFrame approriately. See examples below.
43
44
 
44
45
  Args:
45
46
  gdf: The GeoDataFrame to be updated.
@@ -63,6 +64,22 @@ def update_geometries(
63
64
 
64
65
  Example:
65
66
  --------
67
+ Create some overlapping circles and update the geometries based on area.
68
+ Sorting the data small to large might be the appropriate choice if you want to keep all details
69
+ in the coverage and the data has no attributes that should be given priority over another.
70
+
71
+ >>> coords = [(0, 0), (0, 1), (1, 1), (1, 0)]
72
+ >>> buffers = [0.9, 1.3, 0.7, 1.1]
73
+ >>> circles = sg.to_gdf(coords)
74
+ >>> circles["geometry"] = circles["geometry"].buffer(buffers)
75
+ >>> updated_smallest_first = sg.update_geometries(sg.sort_small_first(circles))
76
+ >>> updated_largest_first = sg.update_geometries(sg.sort_large_first(circles))
77
+ >>> sg.explore(circles, updated_smallest_first, updated_largest_first, tiles=["dark"])
78
+
79
+ If you want to prioritize geometries based on attributes,
80
+
81
+ >>> circles["hva"] = ["skog", "elv", "bro", "tunnel"]
82
+
66
83
  Create two circles and get the overlap.
67
84
 
68
85
  >>> import sgis as sg
@@ -123,7 +140,7 @@ def update_geometries(
123
140
  )
124
141
 
125
142
  geom_col = copied._geometry_column_name
126
- index_mapper = {i: idx for i, idx in enumerate(copied.index)}
143
+ index_mapper = dict(enumerate(copied.index))
127
144
  copied = copied.reset_index(drop=True)
128
145
 
129
146
  left, right = rtree_runner.run(
@@ -828,16 +828,21 @@ def make_edge_coords_cols(gdf: GeoDataFrame) -> GeoDataFrame:
828
828
  Returns:
829
829
  A GeoDataFrame with new columns 'source_coords' and 'target_coords'
830
830
  """
831
+ if not gdf.index.is_unique:
832
+ index_mapper = dict(enumerate(gdf.index))
833
+ gdf = gdf.reset_index(drop=True)
834
+ else:
835
+ index_mapper = None
831
836
  try:
832
837
  gdf, endpoints = _prepare_make_edge_cols_simple(gdf)
833
838
  except ValueError:
834
839
  gdf, endpoints = _prepare_make_edge_cols(gdf)
835
840
 
836
- coords = [(geom.x, geom.y) for geom in endpoints.geometry]
837
- gdf["source_coords"], gdf["target_coords"] = (
838
- coords[0::2],
839
- coords[1::2],
840
- )
841
+ gdf["source_coords"] = endpoints.groupby(level=0).first()
842
+ gdf["target_coords"] = endpoints.groupby(level=0).last()
843
+
844
+ if index_mapper is not None:
845
+ gdf.index = gdf.index.map(index_mapper)
841
846
 
842
847
  return gdf
843
848
 
@@ -855,19 +860,22 @@ def make_edge_wkt_cols(gdf: GeoDataFrame) -> GeoDataFrame:
855
860
  Returns:
856
861
  A GeoDataFrame with new columns 'source_wkt' and 'target_wkt'
857
862
  """
863
+ if not gdf.index.is_unique:
864
+ index_mapper = dict(enumerate(gdf.index))
865
+ gdf = gdf.reset_index(drop=True)
866
+ else:
867
+ index_mapper = None
858
868
  try:
859
869
  gdf, endpoints = _prepare_make_edge_cols_simple(gdf)
860
870
  except ValueError:
861
871
  gdf, endpoints = _prepare_make_edge_cols(gdf)
862
872
 
863
- wkt_geom = [
864
- f"POINT ({x} {y})" for x, y in zip(endpoints.x, endpoints.y, strict=True)
865
- ]
866
- gdf["source_wkt"], gdf["target_wkt"] = (
867
- wkt_geom[0::2],
868
- wkt_geom[1::2],
869
- )
873
+ endpoints = endpoints.force_2d()
874
+ gdf["source_wkt"] = endpoints.groupby(level=0).first().to_wkt()
875
+ gdf["target_wkt"] = endpoints.groupby(level=0).last().to_wkt()
870
876
 
877
+ if index_mapper is not None:
878
+ gdf.index = gdf.index.map(index_mapper)
871
879
  return gdf
872
880
 
873
881
 
@@ -889,18 +897,16 @@ def _prepare_make_edge_cols(lines: GeoDataFrame) -> tuple[GeoDataFrame, GeoDataF
889
897
  "Try using: to_single_geom_type(gdf, 'lines')."
890
898
  )
891
899
 
892
- geom_col = lines._geometry_column_name
893
-
894
900
  # some LineStrings are in fact rings and must be removed manually
895
901
  lines, _ = split_out_circles(lines)
896
902
 
897
- endpoints = lines[geom_col].boundary.explode(ignore_index=True)
903
+ endpoints = lines.geometry.boundary.explode(ignore_index=False)
898
904
 
899
905
  if len(lines) and len(endpoints) / len(lines) != 2:
900
906
  raise ValueError(
901
907
  "The lines should have only two endpoints each. "
902
908
  "Try splitting multilinestrings with explode.",
903
- lines[geom_col],
909
+ lines.geometry,
904
910
  )
905
911
 
906
912
  return lines, endpoints
@@ -910,7 +916,7 @@ def _prepare_make_edge_cols_simple(
910
916
  lines: GeoDataFrame,
911
917
  ) -> tuple[GeoDataFrame, GeoDataFrame]:
912
918
  """Faster version of _prepare_make_edge_cols."""
913
- endpoints = lines[lines._geometry_column_name].boundary.explode(ignore_index=True)
919
+ endpoints = lines.geometry.boundary.explode(ignore_index=False)
914
920
 
915
921
  if len(lines) and len(endpoints) / len(lines) != 2:
916
922
  raise ValueError(
@@ -5,12 +5,15 @@ import pandas as pd
5
5
  from geopandas import GeoDataFrame
6
6
  from geopandas import GeoSeries
7
7
  from shapely import distance
8
- from shapely import union_all
9
8
  from shapely.ops import nearest_points
10
9
 
10
+ from ..conf import _get_instance
11
+ from ..conf import config
12
+ from ..geopandas_tools.conversion import to_geoseries
11
13
  from ..geopandas_tools.geometry_types import get_geom_type
12
14
  from ..geopandas_tools.geometry_types import to_single_geom_type
13
15
  from ..geopandas_tools.polygon_operations import PolygonsAsRings
16
+ from ..geopandas_tools.runners import RTreeQueryRunner
14
17
 
15
18
 
16
19
  def snap_within_distance(
@@ -19,6 +22,8 @@ def snap_within_distance(
19
22
  max_distance: int | float,
20
23
  *,
21
24
  distance_col: str | None = None,
25
+ n_jobs: int = 1,
26
+ rtree_runner: RTreeQueryRunner | None = None,
22
27
  ) -> GeoDataFrame | GeoSeries:
23
28
  """Snaps points to nearest geometry if within given distance.
24
29
 
@@ -33,6 +38,9 @@ def snap_within_distance(
33
38
  distance_col: Name of column with the snap distance. Defaults to
34
39
  'snap_distance'. Set to None to not get any distance column. This will make
35
40
  the function a bit faster.
41
+ n_jobs: Number of workers.
42
+ rtree_runner: Optionally debug/manipulate the spatial indexing operations.
43
+ See the 'runners' module for example implementations.
36
44
 
37
45
  Returns:
38
46
  A GeoDataFrame or GeoSeries with the points snapped to the nearest point in the
@@ -80,22 +88,31 @@ def snap_within_distance(
80
88
  """
81
89
  to = _polygons_to_rings(to)
82
90
 
91
+ if not isinstance(to, GeoSeries):
92
+ to = to_geoseries(to)
93
+
83
94
  if not distance_col and not isinstance(points, GeoDataFrame):
84
95
  return _shapely_snap(
85
96
  points=points,
86
- to=to,
97
+ to=to.values,
87
98
  max_distance=max_distance,
99
+ rtree_runner=rtree_runner,
100
+ n_jobs=n_jobs,
88
101
  )
89
- elif not isinstance(points, GeoDataFrame):
90
- points = points.to_frame()
91
102
 
92
103
  copied = points.copy()
93
104
 
94
- copied.geometry = _shapely_snap(
95
- points=copied.geometry.values,
96
- to=to,
105
+ snapped = _shapely_snap(
106
+ points=copied.geometry,
107
+ to=to.values,
97
108
  max_distance=max_distance,
109
+ rtree_runner=rtree_runner,
110
+ n_jobs=n_jobs,
98
111
  )
112
+ if isinstance(copied, GeoSeries):
113
+ copied = snapped.to_frame("geometry")
114
+ else:
115
+ copied.geometry = snapped
99
116
 
100
117
  if distance_col:
101
118
  copied[distance_col] = copied.distance(points)
@@ -111,6 +128,8 @@ def snap_all(
111
128
  to: GeoDataFrame | GeoSeries,
112
129
  *,
113
130
  distance_col: str | None = None,
131
+ n_jobs: int = 1,
132
+ rtree_runner: RTreeQueryRunner | None = None,
114
133
  ) -> GeoDataFrame | GeoSeries:
115
134
  """Snaps points to the nearest geometry.
116
135
 
@@ -121,6 +140,9 @@ def snap_all(
121
140
  points: The GeoDataFrame of points to snap.
122
141
  to: The GeoDataFrame to snap to.
123
142
  distance_col: Name of column with the snap distance. Defaults to None.
143
+ n_jobs: Number of workers.
144
+ rtree_runner: Optionally debug/manipulate the spatial indexing operations.
145
+ See the 'runners' module for example implementations.
124
146
 
125
147
  Returns:
126
148
  A GeoDataFrame or GeoSeries with the points snapped to the nearest point in the
@@ -159,30 +181,15 @@ def snap_all(
159
181
  0 POINT (2.00000 2.00000) 2.828427
160
182
  1 POINT (2.00000 2.00000) 1.414214
161
183
  """
162
- to = _polygons_to_rings(to)
163
-
164
- if not isinstance(points, GeoDataFrame):
165
- return _shapely_snap(
166
- points=points,
167
- to=to,
168
- max_distance=None,
169
- )
170
-
171
- copied = points.copy()
172
-
173
- copied.geometry = _shapely_snap(
174
- points=copied.geometry.values,
175
- to=to,
184
+ return snap_within_distance(
185
+ points,
186
+ to,
176
187
  max_distance=None,
188
+ distance_col=distance_col,
189
+ rtree_runner=rtree_runner,
190
+ n_jobs=n_jobs,
177
191
  )
178
192
 
179
- if distance_col:
180
- copied[distance_col] = copied.distance(points)
181
- copied[distance_col] = np.where(
182
- copied[distance_col] == 0, pd.NA, copied[distance_col]
183
- )
184
- return copied
185
-
186
193
 
187
194
  def _polygons_to_rings(gdf: GeoDataFrame) -> GeoDataFrame:
188
195
  if get_geom_type(gdf) == "polygon":
@@ -197,17 +204,19 @@ def _polygons_to_rings(gdf: GeoDataFrame) -> GeoDataFrame:
197
204
 
198
205
  def _shapely_snap(
199
206
  points: np.ndarray | GeoSeries,
200
- to: GeoSeries | GeoDataFrame,
207
+ to: np.ndarray | GeoSeries,
208
+ *,
209
+ rtree_runner: RTreeQueryRunner | None,
210
+ n_jobs: int,
201
211
  max_distance: int | float | None = None,
202
212
  ) -> GeoSeries:
203
- try:
204
- unioned = union_all(to.geometry.values)
205
- except AttributeError:
206
- unioned = union_all(to)
213
+ if rtree_runner is None:
214
+ rtree_runner = _get_instance(config, "rtree_runner", n_jobs=n_jobs)
207
215
 
208
- nearest = nearest_points(points, unioned)[1]
216
+ nearest_indices = rtree_runner.run(points, to, method="nearest")
217
+ nearest = nearest_points(points, to[nearest_indices])[1]
209
218
 
210
- if not max_distance:
219
+ if max_distance is None:
211
220
  return nearest
212
221
 
213
222
  distances = distance(points, nearest)
@@ -219,6 +228,6 @@ def _shapely_snap(
219
228
  )
220
229
 
221
230
  if isinstance(points, GeoSeries):
222
- return GeoSeries(points, crs=points.crs, index=points.index, name=points.name)
231
+ return GeoSeries(snapped, crs=points.crs, index=points.index, name=points.name)
223
232
 
224
233
  return points.__class__(snapped)
@@ -177,10 +177,16 @@ def _strtree_query(
177
177
  ):
178
178
  tree = STRtree(arr2)
179
179
  func = getattr(tree, method)
180
- left, right = func(arr1, **kwargs)
180
+ results = func(arr1, **kwargs)
181
+ if results.ndim == 2:
182
+ left, right = results
183
+ else:
184
+ left = results
181
185
  if indices1 is not None:
182
186
  index_mapper1 = {i: x for i, x in enumerate(indices1)}
183
187
  left = np.array([index_mapper1[i] for i in left])
188
+ if results.ndim == 1:
189
+ return left
184
190
  if indices2 is not None:
185
191
  index_mapper2 = {i: x for i, x in enumerate(indices2)}
186
192
  right = np.array([index_mapper2[i] for i in right])
@@ -228,6 +234,11 @@ class RTreeQueryRunner(AbstractRunner):
228
234
  )
229
235
  for chunk in chunks
230
236
  )
237
+ results = np.concatenate(results)
238
+ if results.ndim == 2:
239
+ left, right = results
240
+ return left, right
241
+ return results
231
242
  left = np.concatenate([x[0] for x in results])
232
243
  right = np.concatenate([x[1] for x in results])
233
244
  return left, right
@@ -248,6 +259,11 @@ class RTreeQueryRunner(AbstractRunner):
248
259
  )
249
260
  for chunk in chunks
250
261
  )
262
+ results = np.concatenate(results)
263
+ if results.ndim == 2:
264
+ left, right = results
265
+ return left, right
266
+ return results
251
267
  left = np.concatenate([x[0] for x in results])
252
268
  right = np.concatenate([x[1] for x in results])
253
269
  return left, right
@@ -1,10 +1,12 @@
1
+ from collections.abc import Iterable
2
+
1
3
  import geopandas as gpd
2
4
  import numpy as np
3
5
  import pandas as pd
6
+ import shapely
4
7
  from geopandas import GeoDataFrame
5
8
  from igraph import Graph
6
9
  from pandas import DataFrame
7
- from shapely import shortest_line
8
10
 
9
11
 
10
12
  def _od_cost_matrix(
@@ -32,82 +34,43 @@ def _od_cost_matrix(
32
34
  )
33
35
  results = rowwise_df.merge(results, on=["origin", "destination"], how="left")
34
36
 
35
- results["wkt_ori"] = results["origin"].map(origins.geometry)
36
- results["wkt_des"] = results["destination"].map(destinations.geometry)
37
-
38
- results.loc[results.wkt_ori == results.wkt_des, weight] = 0
37
+ results["geom_ori"] = results["origin"].map(origins.geometry)
38
+ results["geom_des"] = results["destination"].map(destinations.geometry)
39
39
 
40
40
  # straight lines between origin and destination
41
41
  if lines:
42
- results["geometry"] = shortest_line(results["wkt_ori"], results["wkt_des"])
42
+ results["geometry"] = shapely.shortest_line(
43
+ results["geom_ori"], results["geom_des"]
44
+ )
43
45
  results = gpd.GeoDataFrame(results, geometry="geometry", crs=25833)
44
46
 
45
- results = results.drop(["wkt_ori", "wkt_des"], axis=1, errors="ignore")
47
+ results.loc[
48
+ shapely.to_wkb(results["geom_ori"]) == shapely.to_wkb(results["geom_des"]),
49
+ weight,
50
+ ] = 0
46
51
 
47
- return results.reset_index(drop=True)
52
+ return results.drop(["geom_ori", "geom_des"], axis=1, errors="ignore").reset_index(
53
+ drop=True
54
+ )
48
55
 
49
56
 
50
57
  def _get_od_df(
51
- graph: Graph, origins: GeoDataFrame, destinations: GeoDataFrame, weight_col: str
58
+ graph: Graph, origins: Iterable[str], destinations: Iterable[str], weight_col: str
52
59
  ) -> pd.DataFrame:
53
60
  distances: list[list[float]] = graph.distances(
54
61
  weights="weight",
55
62
  source=origins,
56
63
  target=destinations,
64
+ algorithm="dijkstra",
57
65
  )
58
66
 
59
- ori_idx, des_idx, costs = [], [], []
60
- for i, f_idx in enumerate(origins):
61
- for j, t_idx in enumerate(destinations):
62
- ori_idx.append(f_idx)
63
- des_idx.append(t_idx)
64
- costs.append(distances[i][j])
65
-
66
- return (
67
- pd.DataFrame(
68
- data={"origin": ori_idx, "destination": des_idx, weight_col: costs}
69
- )
70
- .replace([np.inf, -np.inf], np.nan)
71
- .reset_index(drop=True)
67
+ costs = np.array(
68
+ [distances[i][j] for j in range(len(destinations)) for i in range(len(origins))]
72
69
  )
70
+ costs[(costs == np.inf) | (costs == -np.inf)] = np.nan
71
+ ori_idx = np.array([x for _ in range(len(destinations)) for x in origins])
72
+ des_idx = np.array([x for x in destinations for _ in range(len(origins))])
73
73
 
74
-
75
- def _get_one_od_df(
76
- graph: Graph, origins: GeoDataFrame, destinations: GeoDataFrame, weight_col: str
77
- ) -> pd.DataFrame:
78
- distances: list[list[float]] = graph.distances(
79
- weights="weight",
80
- source=origins,
81
- target=destinations,
74
+ return pd.DataFrame(
75
+ data={"origin": ori_idx, "destination": des_idx, weight_col: costs}
82
76
  )
83
-
84
- ori_idx, des_idx, costs = [], [], []
85
- for i, f_idx in enumerate(origins):
86
- for j, t_idx in enumerate(destinations):
87
- ori_idx.append(f_idx)
88
- des_idx.append(t_idx)
89
- costs.append(distances[i][j])
90
-
91
- return (
92
- pd.DataFrame(
93
- data={"origin": ori_idx, "destination": des_idx, weight_col: costs}
94
- )
95
- .replace([np.inf, -np.inf], np.nan)
96
- .reset_index(drop=True)
97
- )
98
-
99
-
100
- # def _get_od_df(
101
- # graph: Graph,
102
- # origins: GeoDataFrame,
103
- # destinations: GeoDataFrame,
104
- # weight_col: str,
105
- # ) -> pd.DataFrame:
106
- # from ..parallel.parallel import Parallel
107
-
108
- # results: list[pd.DataFrame] = Parallel(40, backend="loky").map(
109
- # _get_one_od_df,
110
- # [origins[origins.index == i] for i in origins.index.unique()],
111
- # kwargs=dict(graph=graph, destinations=destinations, weight_col=weight_col),
112
- # )
113
- # return pd.concat(results, ignore_index=True)
@@ -1396,8 +1396,6 @@ class NetworkAnalysis:
1396
1396
  directed=self.rules.directed,
1397
1397
  )
1398
1398
 
1399
- self._add_missing_vertices()
1400
-
1401
1399
  self._graph_updated_count += 1
1402
1400
 
1403
1401
  self._update_wkts()
@@ -1438,6 +1436,7 @@ class NetworkAnalysis:
1438
1436
  weights = weights + weights_start
1439
1437
 
1440
1438
  if self.destinations is None:
1439
+ edges, weights = self._add_fake_edges(edges, weights)
1441
1440
  edge_ids = self.network._create_edge_ids(edges, weights)
1442
1441
  return edges, weights, edge_ids
1443
1442
 
@@ -1450,10 +1449,29 @@ class NetworkAnalysis:
1450
1449
  edges = edges + edges_end
1451
1450
  weights = weights + weights_end
1452
1451
 
1452
+ edges, weights = self._add_fake_edges(edges, weights)
1453
1453
  edge_ids = self.network._create_edge_ids(edges, weights)
1454
1454
 
1455
1455
  return edges, weights, edge_ids
1456
1456
 
1457
+ def _add_fake_edges(self, edges, weights):
1458
+ """Add edges for origins and destinations to not get error in igraph because of non-existing nodes."""
1459
+ nodes = {x[0] for x in edges} | {x[1] for x in edges}
1460
+
1461
+ fake_edges = [
1462
+ (idx, idx)
1463
+ for idx in list(self.origins.gdf["temp_idx"])
1464
+ + list(
1465
+ self.destinations.gdf["temp_idx"]
1466
+ if self.destinations is not None
1467
+ else []
1468
+ )
1469
+ if idx not in nodes
1470
+ ]
1471
+ edges = edges + fake_edges
1472
+ weights = weights + [0 for _ in fake_edges]
1473
+ return edges, weights
1474
+
1457
1475
  def _split_lines(self) -> None:
1458
1476
  if self.destinations is not None:
1459
1477
  points = pd.concat(
@@ -1497,31 +1515,6 @@ class NetworkAnalysis:
1497
1515
  ).drop("temp_idx__", axis=1)
1498
1516
  del self.network._not_splitted
1499
1517
 
1500
- def _add_missing_vertices(self):
1501
- """Adds the missing points.
1502
-
1503
- Nodes that had no nodes within the search_tolerance are added to the graph.
1504
- To not get an error when running the distance calculation.
1505
- """
1506
- # TODO: either check if any() beforehand, or add fictional edges before
1507
- # making the graph, to make things faster
1508
- # (this method took 64.660 out of 500 seconds)
1509
- self.graph.add_vertices(
1510
- [
1511
- idx
1512
- for idx in self.origins.gdf["temp_idx"]
1513
- if idx not in self.graph.vs["name"]
1514
- ]
1515
- )
1516
- if self.destinations is not None:
1517
- self.graph.add_vertices(
1518
- [
1519
- idx
1520
- for idx in self.destinations.gdf["temp_idx"]
1521
- if idx not in self.graph.vs["name"]
1522
- ]
1523
- )
1524
-
1525
1518
  @staticmethod
1526
1519
  def _make_graph(
1527
1520
  edges: list[tuple[str, ...]] | np.ndarray[tuple[str, ...]],
@@ -1530,10 +1523,8 @@ class NetworkAnalysis:
1530
1523
  directed: bool,
1531
1524
  ) -> Graph:
1532
1525
  """Creates an igraph Graph from a list of edges and weights."""
1533
- assert len(edges) == len(weights)
1534
-
1526
+ assert len(edges) == len(weights) == len(edge_ids)
1535
1527
  graph = igraph.Graph.TupleList(edges, directed=directed)
1536
-
1537
1528
  graph.es["weight"] = weights
1538
1529
  graph.es["src_tgt_wt"] = edge_ids
1539
1530
  graph.es["edge_tuples"] = edges
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ssb-sgis
3
- Version: 1.3.5
3
+ Version: 1.3.6
4
4
  Summary: GIS functions used at Statistics Norway.
5
5
  Home-page: https://github.com/statisticsnorway/ssb-sgis
6
6
  License: MIT
@@ -7,17 +7,17 @@ sgis/geopandas_tools/bounds.py,sha256=Os9o4EZ1ax38reW-9cdJzgKHoX2il9lUEIZ7wc5CpB
7
7
  sgis/geopandas_tools/buffer_dissolve_explode.py,sha256=z9HvakazR_prXH862e8-gEe7UFbeI4rRTbUaBgPeMBk,19552
8
8
  sgis/geopandas_tools/centerlines.py,sha256=Q65Sx01SeAlulBEd9oaZkB2maBBNdLcJwAbTILg4SPU,11848
9
9
  sgis/geopandas_tools/cleaning.py,sha256=fST0xFztmyn-QUOAfvjZmu7aO_zPiolWK7gd7TR6ffI,24393
10
- sgis/geopandas_tools/conversion.py,sha256=iX954YEpobmn_R1Ecx_zDU1RlWq_67pfbzMXtwTH04I,24162
11
- sgis/geopandas_tools/duplicates.py,sha256=TDDM4u1n7SIkyJrOfl1Lno92AmUPqtXBHsj1IUKC0hI,14992
12
- sgis/geopandas_tools/general.py,sha256=DTQM8p-krgR3gA3miP931eCrxDCpF1ya7dRiMy3K_bs,44099
10
+ sgis/geopandas_tools/conversion.py,sha256=viZz9t1Gi-8nJ9GwZlCVvObXO52VTODkTZ_F3b0gLek,24183
11
+ sgis/geopandas_tools/duplicates.py,sha256=AU8J2X3sUuohRDsQOc9TA5K0k2KHDlGLbvb6eMSQvvc,15893
12
+ sgis/geopandas_tools/general.py,sha256=B4houRK2zDrlarhihQs4w6ITfh6UZHhUciB120bbniI,44447
13
13
  sgis/geopandas_tools/geocoding.py,sha256=sZjUW52ULhQWDLmU51C9_itBePkDuWkp8swvYaiYmJk,679
14
14
  sgis/geopandas_tools/geometry_types.py,sha256=ijQDbQaZPqPGjBl707H4yooNXpk21RXyatI7itnvqLk,7603
15
15
  sgis/geopandas_tools/neighbors.py,sha256=VZGOwwC3-C6KpwLQ3j0K5cOVInmckxIXoGMqPGkemk4,17606
16
16
  sgis/geopandas_tools/overlay.py,sha256=uR9troLM9amo-z5Bs6jhPtFbr0RrDFsW-WJpeuQ4WSQ,23522
17
- sgis/geopandas_tools/point_operations.py,sha256=JM4hvfIVxZaZdGNlGzcCurrKzkgC_b9hzbFYN42f9WY,6972
17
+ sgis/geopandas_tools/point_operations.py,sha256=R_39MoMIMsTB0mxmWibZxfUoUZUriuOoxjMNC8mFCds,7624
18
18
  sgis/geopandas_tools/polygon_operations.py,sha256=v-B9IgbFfm4dVHKPyzvmnNiqVCdtl9ddpCsQpZZ-9sU,49284
19
19
  sgis/geopandas_tools/polygons_as_rings.py,sha256=BX_GZS6F9I4NbEpiOlNBd7zywJjdfdJVi_MkeONBuiM,14941
20
- sgis/geopandas_tools/runners.py,sha256=J4lH0RXYDYTLVeQFgNv8gEY0E97QGIQ4zPW5vfoxgDU,12979
20
+ sgis/geopandas_tools/runners.py,sha256=ZVJnUoJAgc0K_UNIIBOwUWaQDCKCzebygGMu2Fdw6LA,13470
21
21
  sgis/geopandas_tools/sfilter.py,sha256=CZ_-c4t1CQCwJ7RHCKo1Na9u-aAg18xXnJAMiUqoaj8,10411
22
22
  sgis/geopandas_tools/utils.py,sha256=X0pRvB1tWgV_0BCrRS1HU9LtLGnZCpvVPxyqM9JGb0Y,1415
23
23
  sgis/helpers.py,sha256=4N6vFWQ3TYVzRHNcWY_fNa_GkFuaZB3vtCkkFND-qs0,9628
@@ -39,7 +39,7 @@ sgis/maps/tilesources.py,sha256=F4mFHxPwkiPJdVKzNkScTX6xbJAMIUtlTq4mQ83oguw,1746
39
39
  sgis/maps/wms.py,sha256=sCVpKxH1Rsd14GECW7BFh8yaWngpVWYvw9Yhuez1yW8,12482
40
40
  sgis/networkanalysis/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  sgis/networkanalysis/_get_route.py,sha256=3m5xQdQqGtt51vcI5fcmYQAOQCeBtL6sorDoPxBNf84,7818
42
- sgis/networkanalysis/_od_cost_matrix.py,sha256=zkyPX7ObT996ahaFJ2oI0D0SqQWbWyfy_qLtXwValPg,3434
42
+ sgis/networkanalysis/_od_cost_matrix.py,sha256=u2UtgZ0GvvCE1PNvKpB3lM35RCcFKXqtC3NLYWX_jmA,2348
43
43
  sgis/networkanalysis/_points.py,sha256=128QEepBGAyI5XakMUVvNyOnJE0Ts7hvGiVUt1YTXiU,4439
44
44
  sgis/networkanalysis/_service_area.py,sha256=-xSdiZrZ3uqAhd4ZaBY8a8fMJjN09MPoYxlVMXm3UZk,5642
45
45
  sgis/networkanalysis/closing_network_holes.py,sha256=FYZ677nRwLmDkP6bQ1ssQ_h29RzAG463U4xmbu5ksfg,14572
@@ -47,7 +47,7 @@ sgis/networkanalysis/cutting_lines.py,sha256=ZQAt0cufaPeNAEqUzp-imu26AIL9S5-lw6X
47
47
  sgis/networkanalysis/directednetwork.py,sha256=Mrc2zHip4P5RNxnyffKm-xU832AVQeSHz-YZueAc0pM,11413
48
48
  sgis/networkanalysis/finding_isolated_networks.py,sha256=Wg4ILhm7uS9RLOGcL0WN8uZBMJYjdljJc8L5DU5nIPY,3754
49
49
  sgis/networkanalysis/network.py,sha256=zV9bAbVdTgTohg2o2RFGy2uhOJrd3Ma57hwIAStxMAQ,7847
50
- sgis/networkanalysis/networkanalysis.py,sha256=P2kqE1kBxabBnv4h6ort1IiO4W0XlsJn5DrT7IbeG5w,68651
50
+ sgis/networkanalysis/networkanalysis.py,sha256=5LFCapexrkO97ZyAV2-0lwEPHfVQrdQ4Q1A2eRQBOVo,68498
51
51
  sgis/networkanalysis/networkanalysisrules.py,sha256=na26iZTpnlwwIEEGswoqMH4W1ZOLN2GIvACZbxn8GPw,12958
52
52
  sgis/networkanalysis/nodes.py,sha256=atFSpqz-_uJHMrf6MC0zhrrcWIydRMFZrsaHC2xr1GU,3374
53
53
  sgis/networkanalysis/traveling_salesman.py,sha256=Jjo6bHY4KJ-eK0LycyTy0sWxZjgITs5MBllZ_G9FhTE,5655
@@ -61,7 +61,7 @@ sgis/raster/indices.py,sha256=efJmgfPg_VuSzXFosXV661IendF8CwPFWtMhyP4TMUg,222
61
61
  sgis/raster/regex.py,sha256=4idTJ9vFtsGtbxcjJrx2VrpJJuDMP3bLdqF93Vc_cmY,3752
62
62
  sgis/raster/sentinel_config.py,sha256=nySDqn2R8M6W8jguoBeSAK_zzbAsqmaI59i32446FwY,1268
63
63
  sgis/raster/zonal.py,sha256=D4Gyptw-yOLTCO41peIuYbY-DANsJCG19xXDlf1QAz4,2299
64
- ssb_sgis-1.3.5.dist-info/LICENSE,sha256=np3IfD5m0ZUofn_kVzDZqliozuiO6wrktw3LRPjyEiI,1073
65
- ssb_sgis-1.3.5.dist-info/METADATA,sha256=TbqucBYChWXB_Xw2eFf9PZwQoguEWeKGav18lFbhUfI,11495
66
- ssb_sgis-1.3.5.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
67
- ssb_sgis-1.3.5.dist-info/RECORD,,
64
+ ssb_sgis-1.3.6.dist-info/LICENSE,sha256=np3IfD5m0ZUofn_kVzDZqliozuiO6wrktw3LRPjyEiI,1073
65
+ ssb_sgis-1.3.6.dist-info/METADATA,sha256=M-4XAxb5WFhaHhbzq4IIb2oE0Ww8gNhbJSqFKjoPnSs,11495
66
+ ssb_sgis-1.3.6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
67
+ ssb_sgis-1.3.6.dist-info/RECORD,,