ssb-sgis 1.3.5__py3-none-any.whl → 1.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -67,7 +67,7 @@ def to_geoseries(obj: Any, crs: Any | None = None) -> GeoSeries:
67
67
  # pandas objects
68
68
  index = obj.index
69
69
  else:
70
- # list
70
+ # list (has method 'index')
71
71
  index = None
72
72
  except AttributeError:
73
73
  index = None
@@ -38,8 +38,9 @@ def update_geometries(
38
38
  ) -> GeoDataFrame:
39
39
  """Puts geometries on top of each other rowwise.
40
40
 
41
- Since this operation is done rowwise, it's important to
42
- first sort the GeoDataFrame approriately. See example below.
41
+ IMPORTANT: Since this operation is done rowwise, meaning the top rows
42
+ are put on top of bottommore rows, it is important to first sort the
43
+ GeoDataFrame approriately. See examples below.
43
44
 
44
45
  Args:
45
46
  gdf: The GeoDataFrame to be updated.
@@ -63,6 +64,22 @@ def update_geometries(
63
64
 
64
65
  Example:
65
66
  --------
67
+ Create some overlapping circles and update the geometries based on area.
68
+ Sorting the data small to large might be the appropriate choice if you want to keep all details
69
+ in the coverage and the data has no attributes that should be given priority over another.
70
+
71
+ >>> coords = [(0, 0), (0, 1), (1, 1), (1, 0)]
72
+ >>> buffers = [0.9, 1.3, 0.7, 1.1]
73
+ >>> circles = sg.to_gdf(coords)
74
+ >>> circles["geometry"] = circles["geometry"].buffer(buffers)
75
+ >>> updated_smallest_first = sg.update_geometries(sg.sort_small_first(circles))
76
+ >>> updated_largest_first = sg.update_geometries(sg.sort_large_first(circles))
77
+ >>> sg.explore(circles, updated_smallest_first, updated_largest_first, tiles=["dark"])
78
+
79
+ If you want to prioritize geometries based on attributes,
80
+
81
+ >>> circles["hva"] = ["skog", "elv", "bro", "tunnel"]
82
+
66
83
  Create two circles and get the overlap.
67
84
 
68
85
  >>> import sgis as sg
@@ -123,7 +140,7 @@ def update_geometries(
123
140
  )
124
141
 
125
142
  geom_col = copied._geometry_column_name
126
- index_mapper = {i: idx for i, idx in enumerate(copied.index)}
143
+ index_mapper = dict(enumerate(copied.index))
127
144
  copied = copied.reset_index(drop=True)
128
145
 
129
146
  left, right = rtree_runner.run(
@@ -688,8 +688,7 @@ def _split_lines_by_points_along_line(lines, points, splitted_col: str | None =
688
688
  relevant_lines.geometry = shapely.force_2d(relevant_lines.geometry)
689
689
  points.geometry = shapely.force_2d(points.geometry)
690
690
 
691
- # split the lines with buffer + difference, since shaply.split usually doesn't work
692
- # relevant_lines["_idx"] = range(len(relevant_lines))
691
+ # split the lines with tiny buffer + difference, since shaply.split usually doesn't work
693
692
  splitted = relevant_lines.overlay(points_buff, how="difference").explode(
694
693
  ignore_index=True
695
694
  )
@@ -703,8 +702,9 @@ def _split_lines_by_points_along_line(lines, points, splitted_col: str | None =
703
702
  if not len(splitted):
704
703
  return pd.concat([the_other_lines, circles], ignore_index=True)
705
704
 
706
- # the endpoints of the new lines are now sligtly off. Using get_k_nearest_neighbors
707
- # to get the exact snapped point coordinates, . This will map the sligtly
705
+ # the endpoints of the new lines are now sligtly off because of the buffer.
706
+ # Using get_k_nearest_neighbors
707
+ # to get the exact snapped point coordinates. This will map the sligtly
708
708
  # wrong line endpoints with the point the line was split by.
709
709
 
710
710
  points["point_coords"] = [(geom.x, geom.y) for geom in points.geometry]
@@ -721,7 +721,6 @@ def _split_lines_by_points_along_line(lines, points, splitted_col: str | None =
721
721
  lambda x: x["distance"] <= precision * 2
722
722
  ]
723
723
 
724
- # points = points.set_index("point_coords")
725
724
  points.index = points.geometry
726
725
  dists_source = get_nearest(splitted_source, points)
727
726
  dists_target = get_nearest(splitted_target, points)
@@ -828,16 +827,21 @@ def make_edge_coords_cols(gdf: GeoDataFrame) -> GeoDataFrame:
828
827
  Returns:
829
828
  A GeoDataFrame with new columns 'source_coords' and 'target_coords'
830
829
  """
830
+ if not gdf.index.is_unique:
831
+ index_mapper = dict(enumerate(gdf.index))
832
+ gdf = gdf.reset_index(drop=True)
833
+ else:
834
+ index_mapper = None
831
835
  try:
832
836
  gdf, endpoints = _prepare_make_edge_cols_simple(gdf)
833
837
  except ValueError:
834
838
  gdf, endpoints = _prepare_make_edge_cols(gdf)
835
839
 
836
- coords = [(geom.x, geom.y) for geom in endpoints.geometry]
837
- gdf["source_coords"], gdf["target_coords"] = (
838
- coords[0::2],
839
- coords[1::2],
840
- )
840
+ gdf["source_coords"] = endpoints.groupby(level=0).first()
841
+ gdf["target_coords"] = endpoints.groupby(level=0).last()
842
+
843
+ if index_mapper is not None:
844
+ gdf.index = gdf.index.map(index_mapper)
841
845
 
842
846
  return gdf
843
847
 
@@ -855,19 +859,22 @@ def make_edge_wkt_cols(gdf: GeoDataFrame) -> GeoDataFrame:
855
859
  Returns:
856
860
  A GeoDataFrame with new columns 'source_wkt' and 'target_wkt'
857
861
  """
862
+ if not gdf.index.is_unique:
863
+ index_mapper = dict(enumerate(gdf.index))
864
+ gdf = gdf.reset_index(drop=True)
865
+ else:
866
+ index_mapper = None
858
867
  try:
859
868
  gdf, endpoints = _prepare_make_edge_cols_simple(gdf)
860
869
  except ValueError:
861
870
  gdf, endpoints = _prepare_make_edge_cols(gdf)
862
871
 
863
- wkt_geom = [
864
- f"POINT ({x} {y})" for x, y in zip(endpoints.x, endpoints.y, strict=True)
865
- ]
866
- gdf["source_wkt"], gdf["target_wkt"] = (
867
- wkt_geom[0::2],
868
- wkt_geom[1::2],
869
- )
872
+ endpoints = endpoints.force_2d().to_wkt()
873
+ gdf["source_wkt"] = endpoints.groupby(level=0).first()
874
+ gdf["target_wkt"] = endpoints.groupby(level=0).last()
870
875
 
876
+ if index_mapper is not None:
877
+ gdf.index = gdf.index.map(index_mapper)
871
878
  return gdf
872
879
 
873
880
 
@@ -889,18 +896,16 @@ def _prepare_make_edge_cols(lines: GeoDataFrame) -> tuple[GeoDataFrame, GeoDataF
889
896
  "Try using: to_single_geom_type(gdf, 'lines')."
890
897
  )
891
898
 
892
- geom_col = lines._geometry_column_name
893
-
894
899
  # some LineStrings are in fact rings and must be removed manually
895
900
  lines, _ = split_out_circles(lines)
896
901
 
897
- endpoints = lines[geom_col].boundary.explode(ignore_index=True)
902
+ endpoints = lines.geometry.boundary.explode(ignore_index=False)
898
903
 
899
904
  if len(lines) and len(endpoints) / len(lines) != 2:
900
905
  raise ValueError(
901
906
  "The lines should have only two endpoints each. "
902
907
  "Try splitting multilinestrings with explode.",
903
- lines[geom_col],
908
+ lines.geometry,
904
909
  )
905
910
 
906
911
  return lines, endpoints
@@ -910,7 +915,7 @@ def _prepare_make_edge_cols_simple(
910
915
  lines: GeoDataFrame,
911
916
  ) -> tuple[GeoDataFrame, GeoDataFrame]:
912
917
  """Faster version of _prepare_make_edge_cols."""
913
- endpoints = lines[lines._geometry_column_name].boundary.explode(ignore_index=True)
918
+ endpoints = lines.geometry.boundary.explode(ignore_index=False)
914
919
 
915
920
  if len(lines) and len(endpoints) / len(lines) != 2:
916
921
  raise ValueError(
@@ -5,12 +5,15 @@ import pandas as pd
5
5
  from geopandas import GeoDataFrame
6
6
  from geopandas import GeoSeries
7
7
  from shapely import distance
8
- from shapely import union_all
9
8
  from shapely.ops import nearest_points
10
9
 
10
+ from ..conf import _get_instance
11
+ from ..conf import config
12
+ from ..geopandas_tools.conversion import to_geoseries
11
13
  from ..geopandas_tools.geometry_types import get_geom_type
12
14
  from ..geopandas_tools.geometry_types import to_single_geom_type
13
15
  from ..geopandas_tools.polygon_operations import PolygonsAsRings
16
+ from ..geopandas_tools.runners import RTreeQueryRunner
14
17
 
15
18
 
16
19
  def snap_within_distance(
@@ -19,6 +22,8 @@ def snap_within_distance(
19
22
  max_distance: int | float,
20
23
  *,
21
24
  distance_col: str | None = None,
25
+ n_jobs: int = 1,
26
+ rtree_runner: RTreeQueryRunner | None = None,
22
27
  ) -> GeoDataFrame | GeoSeries:
23
28
  """Snaps points to nearest geometry if within given distance.
24
29
 
@@ -33,6 +38,9 @@ def snap_within_distance(
33
38
  distance_col: Name of column with the snap distance. Defaults to
34
39
  'snap_distance'. Set to None to not get any distance column. This will make
35
40
  the function a bit faster.
41
+ n_jobs: Number of workers.
42
+ rtree_runner: Optionally debug/manipulate the spatial indexing operations.
43
+ See the 'runners' module for example implementations.
36
44
 
37
45
  Returns:
38
46
  A GeoDataFrame or GeoSeries with the points snapped to the nearest point in the
@@ -80,22 +88,31 @@ def snap_within_distance(
80
88
  """
81
89
  to = _polygons_to_rings(to)
82
90
 
91
+ if not isinstance(to, GeoSeries):
92
+ to = to_geoseries(to)
93
+
83
94
  if not distance_col and not isinstance(points, GeoDataFrame):
84
95
  return _shapely_snap(
85
96
  points=points,
86
- to=to,
97
+ to=to.values,
87
98
  max_distance=max_distance,
99
+ rtree_runner=rtree_runner,
100
+ n_jobs=n_jobs,
88
101
  )
89
- elif not isinstance(points, GeoDataFrame):
90
- points = points.to_frame()
91
102
 
92
103
  copied = points.copy()
93
104
 
94
- copied.geometry = _shapely_snap(
95
- points=copied.geometry.values,
96
- to=to,
105
+ snapped = _shapely_snap(
106
+ points=copied.geometry,
107
+ to=to.values,
97
108
  max_distance=max_distance,
109
+ rtree_runner=rtree_runner,
110
+ n_jobs=n_jobs,
98
111
  )
112
+ if isinstance(copied, GeoSeries):
113
+ copied = snapped.to_frame("geometry")
114
+ else:
115
+ copied.geometry = snapped
99
116
 
100
117
  if distance_col:
101
118
  copied[distance_col] = copied.distance(points)
@@ -111,6 +128,8 @@ def snap_all(
111
128
  to: GeoDataFrame | GeoSeries,
112
129
  *,
113
130
  distance_col: str | None = None,
131
+ n_jobs: int = 1,
132
+ rtree_runner: RTreeQueryRunner | None = None,
114
133
  ) -> GeoDataFrame | GeoSeries:
115
134
  """Snaps points to the nearest geometry.
116
135
 
@@ -121,6 +140,9 @@ def snap_all(
121
140
  points: The GeoDataFrame of points to snap.
122
141
  to: The GeoDataFrame to snap to.
123
142
  distance_col: Name of column with the snap distance. Defaults to None.
143
+ n_jobs: Number of workers.
144
+ rtree_runner: Optionally debug/manipulate the spatial indexing operations.
145
+ See the 'runners' module for example implementations.
124
146
 
125
147
  Returns:
126
148
  A GeoDataFrame or GeoSeries with the points snapped to the nearest point in the
@@ -159,30 +181,15 @@ def snap_all(
159
181
  0 POINT (2.00000 2.00000) 2.828427
160
182
  1 POINT (2.00000 2.00000) 1.414214
161
183
  """
162
- to = _polygons_to_rings(to)
163
-
164
- if not isinstance(points, GeoDataFrame):
165
- return _shapely_snap(
166
- points=points,
167
- to=to,
168
- max_distance=None,
169
- )
170
-
171
- copied = points.copy()
172
-
173
- copied.geometry = _shapely_snap(
174
- points=copied.geometry.values,
175
- to=to,
184
+ return snap_within_distance(
185
+ points,
186
+ to,
176
187
  max_distance=None,
188
+ distance_col=distance_col,
189
+ rtree_runner=rtree_runner,
190
+ n_jobs=n_jobs,
177
191
  )
178
192
 
179
- if distance_col:
180
- copied[distance_col] = copied.distance(points)
181
- copied[distance_col] = np.where(
182
- copied[distance_col] == 0, pd.NA, copied[distance_col]
183
- )
184
- return copied
185
-
186
193
 
187
194
  def _polygons_to_rings(gdf: GeoDataFrame) -> GeoDataFrame:
188
195
  if get_geom_type(gdf) == "polygon":
@@ -197,17 +204,19 @@ def _polygons_to_rings(gdf: GeoDataFrame) -> GeoDataFrame:
197
204
 
198
205
  def _shapely_snap(
199
206
  points: np.ndarray | GeoSeries,
200
- to: GeoSeries | GeoDataFrame,
207
+ to: np.ndarray | GeoSeries,
208
+ *,
209
+ rtree_runner: RTreeQueryRunner | None,
210
+ n_jobs: int,
201
211
  max_distance: int | float | None = None,
202
212
  ) -> GeoSeries:
203
- try:
204
- unioned = union_all(to.geometry.values)
205
- except AttributeError:
206
- unioned = union_all(to)
213
+ if rtree_runner is None:
214
+ rtree_runner = _get_instance(config, "rtree_runner", n_jobs=n_jobs)
207
215
 
208
- nearest = nearest_points(points, unioned)[1]
216
+ nearest_indices = rtree_runner.run(points, to, method="nearest")
217
+ nearest = nearest_points(points, to[nearest_indices])[1]
209
218
 
210
- if not max_distance:
219
+ if max_distance is None:
211
220
  return nearest
212
221
 
213
222
  distances = distance(points, nearest)
@@ -219,6 +228,6 @@ def _shapely_snap(
219
228
  )
220
229
 
221
230
  if isinstance(points, GeoSeries):
222
- return GeoSeries(points, crs=points.crs, index=points.index, name=points.name)
231
+ return GeoSeries(snapped, crs=points.crs, index=points.index, name=points.name)
223
232
 
224
233
  return points.__class__(snapped)
@@ -177,10 +177,16 @@ def _strtree_query(
177
177
  ):
178
178
  tree = STRtree(arr2)
179
179
  func = getattr(tree, method)
180
- left, right = func(arr1, **kwargs)
180
+ results = func(arr1, **kwargs)
181
+ if results.ndim == 2:
182
+ left, right = results
183
+ else:
184
+ left = results
181
185
  if indices1 is not None:
182
186
  index_mapper1 = {i: x for i, x in enumerate(indices1)}
183
187
  left = np.array([index_mapper1[i] for i in left])
188
+ if results.ndim == 1:
189
+ return left
184
190
  if indices2 is not None:
185
191
  index_mapper2 = {i: x for i, x in enumerate(indices2)}
186
192
  right = np.array([index_mapper2[i] for i in right])
@@ -228,9 +234,11 @@ class RTreeQueryRunner(AbstractRunner):
228
234
  )
229
235
  for chunk in chunks
230
236
  )
231
- left = np.concatenate([x[0] for x in results])
232
- right = np.concatenate([x[1] for x in results])
233
- return left, right
237
+ results = np.concatenate(results)
238
+ if results.ndim == 2:
239
+ left, right = results
240
+ return left, right
241
+ return results
234
242
  elif (
235
243
  (self.n_jobs or 1) > 1
236
244
  and len(arr2) / self.n_jobs > 10_000
@@ -248,9 +256,11 @@ class RTreeQueryRunner(AbstractRunner):
248
256
  )
249
257
  for chunk in chunks
250
258
  )
251
- left = np.concatenate([x[0] for x in results])
252
- right = np.concatenate([x[1] for x in results])
253
- return left, right
259
+ results = np.concatenate(results)
260
+ if results.ndim == 2:
261
+ left, right = results
262
+ return left, right
263
+ return results
254
264
 
255
265
  return _strtree_query(arr1, arr2, method=method, **kwargs)
256
266
 
@@ -121,7 +121,7 @@ def read_geopandas(
121
121
  ).replace("==", "=")
122
122
  glob_func = _get_glob_func(file_system)
123
123
  suffix: str = Path(gcs_path).suffix
124
- paths = glob_func(str(Path(gcs_path) / expression / f"*{suffix}"))
124
+ paths = glob_func(_standardize_path(gcs_path) + f"/{expression}/*{suffix}")
125
125
  if paths:
126
126
  return _read_geopandas_from_iterable(
127
127
  paths,
@@ -256,7 +256,7 @@ def _read_pyarrow(path: str, file_system, mask=None, **kwargs) -> pyarrow.Table
256
256
  if not len(
257
257
  {
258
258
  x
259
- for x in glob_func(str(Path(path) / "**"))
259
+ for x in glob_func(str(_standardize_path(path) + "/**"))
260
260
  if not paths_are_equal(path, x)
261
261
  }
262
262
  ):
@@ -618,7 +618,7 @@ def _write_partitioned_geoparquet(
618
618
  as_partition_part(col, value)
619
619
  for col, value in zip(partition_cols, group, strict=True)
620
620
  )
621
- paths.append(Path(path) / partition_parts)
621
+ paths.append(_standardize_path(path) + f"/{partition_parts}")
622
622
  dfs.append(rows)
623
623
 
624
624
  def threaded_write(rows: DataFrame, path: str) -> None:
@@ -626,7 +626,9 @@ def _write_partitioned_geoparquet(
626
626
  this_basename = (uuid.uuid4().hex + "-{i}.parquet").replace("-{i}", "0")
627
627
  else:
628
628
  this_basename = basename_template.replace("-{i}", "0")
629
- for i, sibling_path in enumerate(sorted(glob_func(str(Path(path) / "**")))):
629
+ for i, sibling_path in enumerate(
630
+ sorted(glob_func(str(_standardize_path(path) + "/**")))
631
+ ):
630
632
  if paths_are_equal(sibling_path, path):
631
633
  continue
632
634
  if existing_data_behavior == "delete_matching":
@@ -638,7 +640,7 @@ def _write_partitioned_geoparquet(
638
640
  else:
639
641
  this_basename = basename_template.replace("-{i}", str(i + 1))
640
642
 
641
- out_path = str(Path(path) / this_basename)
643
+ out_path = str(_standardize_path(path) + "/" + this_basename)
642
644
  try:
643
645
  with file_system.open(out_path, mode="wb") as file:
644
646
  write_func(rows, file, schema=schema, **kwargs)
@@ -780,7 +782,7 @@ def _read_partitioned_parquet(
780
782
  glob_func = _get_glob_func(file_system)
781
783
 
782
784
  if child_paths is None:
783
- child_paths = list(glob_func(str(Path(path) / "**/*.parquet")))
785
+ child_paths = list(glob_func(str(_standardize_path(path) + "/**/*.parquet")))
784
786
 
785
787
  filters = _filters_to_expression(filters)
786
788
 
@@ -830,7 +832,7 @@ def get_child_paths(path, file_system) -> list[str]:
830
832
  glob_func = _get_glob_func(file_system)
831
833
  return [
832
834
  x
833
- for x in glob_func(str(Path(path) / "**/*.parquet"))
835
+ for x in glob_func(str(_standardize_path(path) + "/**/*.parquet"))
834
836
  if not paths_are_equal(x, path)
835
837
  ]
836
838
 
@@ -938,3 +940,8 @@ def _maybe_strip_prefix(path, file_system):
938
940
  if isinstance(file_system, GCSFileSystem) and path.startswith("gs://"):
939
941
  return path.replace("gs://", "")
940
942
  return path
943
+
944
+
945
+ def _standardize_path(path: str | Path) -> str:
946
+ """Make sure delimiter is '/' and path ends without '/'."""
947
+ return str(path).replace("\\", "/").replace(r"\"", "/")
@@ -1,10 +1,12 @@
1
+ from collections.abc import Iterable
2
+
1
3
  import geopandas as gpd
2
4
  import numpy as np
3
5
  import pandas as pd
6
+ import shapely
4
7
  from geopandas import GeoDataFrame
5
8
  from igraph import Graph
6
9
  from pandas import DataFrame
7
- from shapely import shortest_line
8
10
 
9
11
 
10
12
  def _od_cost_matrix(
@@ -24,90 +26,49 @@ def _od_cost_matrix(
24
26
  # calculating all-to-all distances is much faster than looping rowwise,
25
27
  # so filtering to rowwise afterwards instead
26
28
  if rowwise:
27
- rowwise_df = DataFrame(
28
- {
29
- "origin": origins.index,
30
- "destination": destinations.index,
31
- }
29
+ keys = pd.MultiIndex.from_arrays(
30
+ [origins.index, destinations.index],
31
+ names=["origin", "destination"],
32
32
  )
33
- results = rowwise_df.merge(results, on=["origin", "destination"], how="left")
34
-
35
- results["wkt_ori"] = results["origin"].map(origins.geometry)
36
- results["wkt_des"] = results["destination"].map(destinations.geometry)
33
+ results = results.set_index(["origin", "destination"]).loc[keys].reset_index()
37
34
 
38
- results.loc[results.wkt_ori == results.wkt_des, weight] = 0
35
+ results["geom_ori"] = results["origin"].map(origins.geometry)
36
+ results["geom_des"] = results["destination"].map(destinations.geometry)
39
37
 
40
38
  # straight lines between origin and destination
41
39
  if lines:
42
- results["geometry"] = shortest_line(results["wkt_ori"], results["wkt_des"])
40
+ results["geometry"] = shapely.shortest_line(
41
+ results["geom_ori"], results["geom_des"]
42
+ )
43
43
  results = gpd.GeoDataFrame(results, geometry="geometry", crs=25833)
44
44
 
45
- results = results.drop(["wkt_ori", "wkt_des"], axis=1, errors="ignore")
45
+ results.loc[
46
+ shapely.to_wkb(results["geom_ori"]) == shapely.to_wkb(results["geom_des"]),
47
+ weight,
48
+ ] = 0
46
49
 
47
- return results.reset_index(drop=True)
50
+ return results.drop(["geom_ori", "geom_des"], axis=1, errors="ignore").reset_index(
51
+ drop=True
52
+ )
48
53
 
49
54
 
50
55
  def _get_od_df(
51
- graph: Graph, origins: GeoDataFrame, destinations: GeoDataFrame, weight_col: str
56
+ graph: Graph, origins: Iterable[str], destinations: Iterable[str], weight_col: str
52
57
  ) -> pd.DataFrame:
53
58
  distances: list[list[float]] = graph.distances(
54
59
  weights="weight",
55
60
  source=origins,
56
61
  target=destinations,
62
+ algorithm="dijkstra",
57
63
  )
58
64
 
59
- ori_idx, des_idx, costs = [], [], []
60
- for i, f_idx in enumerate(origins):
61
- for j, t_idx in enumerate(destinations):
62
- ori_idx.append(f_idx)
63
- des_idx.append(t_idx)
64
- costs.append(distances[i][j])
65
-
66
- return (
67
- pd.DataFrame(
68
- data={"origin": ori_idx, "destination": des_idx, weight_col: costs}
69
- )
70
- .replace([np.inf, -np.inf], np.nan)
71
- .reset_index(drop=True)
65
+ costs = np.array(
66
+ [distances[i][j] for j in range(len(destinations)) for i in range(len(origins))]
72
67
  )
68
+ costs[(costs == np.inf) | (costs == -np.inf)] = np.nan
69
+ ori_idx = np.array([x for _ in range(len(destinations)) for x in origins])
70
+ des_idx = np.array([x for x in destinations for _ in range(len(origins))])
73
71
 
74
-
75
- def _get_one_od_df(
76
- graph: Graph, origins: GeoDataFrame, destinations: GeoDataFrame, weight_col: str
77
- ) -> pd.DataFrame:
78
- distances: list[list[float]] = graph.distances(
79
- weights="weight",
80
- source=origins,
81
- target=destinations,
72
+ return pd.DataFrame(
73
+ data={"origin": ori_idx, "destination": des_idx, weight_col: costs}
82
74
  )
83
-
84
- ori_idx, des_idx, costs = [], [], []
85
- for i, f_idx in enumerate(origins):
86
- for j, t_idx in enumerate(destinations):
87
- ori_idx.append(f_idx)
88
- des_idx.append(t_idx)
89
- costs.append(distances[i][j])
90
-
91
- return (
92
- pd.DataFrame(
93
- data={"origin": ori_idx, "destination": des_idx, weight_col: costs}
94
- )
95
- .replace([np.inf, -np.inf], np.nan)
96
- .reset_index(drop=True)
97
- )
98
-
99
-
100
- # def _get_od_df(
101
- # graph: Graph,
102
- # origins: GeoDataFrame,
103
- # destinations: GeoDataFrame,
104
- # weight_col: str,
105
- # ) -> pd.DataFrame:
106
- # from ..parallel.parallel import Parallel
107
-
108
- # results: list[pd.DataFrame] = Parallel(40, backend="loky").map(
109
- # _get_one_od_df,
110
- # [origins[origins.index == i] for i in origins.index.unique()],
111
- # kwargs=dict(graph=graph, destinations=destinations, weight_col=weight_col),
112
- # )
113
- # return pd.concat(results, ignore_index=True)
@@ -16,10 +16,7 @@ opposite directions.
16
16
 
17
17
 
18
18
  class Points:
19
- def __init__(
20
- self,
21
- points: GeoDataFrame,
22
- ) -> None:
19
+ def __init__(self, points: GeoDataFrame) -> None:
23
20
  self.gdf = points.copy()
24
21
 
25
22
  def _make_temp_idx(self, start: int) -> None:
@@ -79,6 +79,7 @@ def close_network_holes(
79
79
  gdf: GeoDataFrame,
80
80
  max_distance: int | float,
81
81
  max_angle: int,
82
+ *,
82
83
  hole_col: str | None = "hole",
83
84
  ) -> GeoDataFrame:
84
85
  """Fills network gaps with straigt lines.
@@ -282,11 +283,13 @@ def _close_holes_all_lines(
282
283
  ) -> GeoSeries:
283
284
  k = min(len(nodes), 50)
284
285
 
286
+ n_dict = nodes.set_index("wkt")["n"]
287
+
285
288
  # make points for the deadends and the other endpoint of the deadend lines
286
- deadends_target = lines.loc[lines["n_target"] == 1].rename(
289
+ deadends_target = lines.loc[lines["target_wkt"].map(n_dict) == 1].rename(
287
290
  columns={"target_wkt": "wkt", "source_wkt": "wkt_other_end"}
288
291
  )
289
- deadends_source = lines.loc[lines["n_source"] == 1].rename(
292
+ deadends_source = lines.loc[lines["source_wkt"].map(n_dict) == 1].rename(
290
293
  columns={"source_wkt": "wkt", "target_wkt": "wkt_other_end"}
291
294
  )
292
295
  deadends = pd.concat([deadends_source, deadends_target], ignore_index=True)
@@ -349,12 +352,6 @@ def _close_holes_all_lines(
349
352
  to_idx = indices[condition]
350
353
  to_wkt = nodes.iloc[to_idx]["wkt"]
351
354
 
352
- # all_angles = all_angles + [
353
- # diff
354
- # for f, diff in zip(from_wkt, angles_difference[condition], strict=True)
355
- # if f not in new_sources
356
- # ]
357
-
358
355
  # now add the wkts to the lists of new sources and targets. If the source
359
356
  # is already added, the new wks will not be added again
360
357
  new_targets = new_targets + [
@@ -57,9 +57,7 @@ def get_connected_components(gdf: GeoDataFrame) -> GeoDataFrame:
57
57
 
58
58
  gdf["connected"] = gdf.source.map(largest_component_dict).fillna(0)
59
59
 
60
- gdf = gdf.drop(
61
- ["source_wkt", "target_wkt", "source", "target", "n_source", "n_target"], axis=1
62
- )
60
+ gdf = gdf.drop(["source_wkt", "target_wkt", "source", "target"], axis=1)
63
61
 
64
62
  return gdf
65
63
 
@@ -120,8 +118,6 @@ def get_component_size(gdf: GeoDataFrame) -> GeoDataFrame:
120
118
  gdf["component_index"] = gdf["source"].map(mapper["component_index"])
121
119
  gdf["component_size"] = gdf["source"].map(mapper["component_size"])
122
120
 
123
- gdf = gdf.drop(
124
- ["source_wkt", "target_wkt", "source", "target", "n_source", "n_target"], axis=1
125
- )
121
+ gdf = gdf.drop(["source_wkt", "target_wkt", "source", "target"], axis=1)
126
122
 
127
123
  return gdf
@@ -36,14 +36,12 @@ class Network:
36
36
  raise TypeError(f"'lines' should be GeoDataFrame, got {type(gdf)}")
37
37
 
38
38
  if not len(gdf):
39
- raise ZeroLinesError
39
+ raise ZeroLinesError()
40
40
 
41
41
  self.gdf = self._prepare_network(gdf)
42
42
 
43
43
  self._make_node_ids()
44
44
 
45
- self._percent_bidirectional = self._check_percent_bidirectional()
46
-
47
45
  def _make_node_ids(self) -> None:
48
46
  """Gives the lines node ids and return lines (edges) and nodes.
49
47
 
@@ -55,6 +53,7 @@ class Network:
55
53
  The lines must be singlepart linestrings.
56
54
  """
57
55
  self.gdf, self._nodes = make_node_ids(self.gdf)
56
+ self._percent_bidirectional = self._check_percent_bidirectional()
58
57
 
59
58
  @staticmethod
60
59
  def _prepare_network(gdf: GeoDataFrame) -> GeoDataFrame:
@@ -138,6 +137,8 @@ class Network:
138
137
  or any superfluous node-ids (meaning rows have been removed from the lines
139
138
  gdf).
140
139
  """
140
+ if not hasattr(self, "_nodes"):
141
+ return False
141
142
  new_or_missing = (~self.gdf.source.isin(self._nodes.node_id)) | (
142
143
  ~self.gdf.target.isin(self._nodes.node_id)
143
144
  )
@@ -20,6 +20,7 @@ from pandas import MultiIndex
20
20
  from shapely import force_2d
21
21
 
22
22
  from ..geopandas_tools.general import _push_geom_col
23
+ from ..geopandas_tools.sfilter import sfilter_inverse
23
24
  from ._get_route import _get_k_routes
24
25
  from ._get_route import _get_route
25
26
  from ._get_route import _get_route_frequencies
@@ -30,6 +31,8 @@ from ._service_area import _service_area
30
31
  from .cutting_lines import split_lines_by_nearest_point
31
32
  from .network import Network
32
33
  from .networkanalysisrules import NetworkAnalysisRules
34
+ from .nodes import _map_node_ids_from_wkt
35
+ from .nodes import make_node_ids
33
36
 
34
37
 
35
38
  class NetworkAnalysis:
@@ -1372,15 +1375,14 @@ class NetworkAnalysis:
1372
1375
 
1373
1376
  self.origins = Origins(origins)
1374
1377
  self.origins._make_temp_idx(
1375
- start=max(self.network.nodes.node_id.astype(int)) + 1
1378
+ start=max(self.network.nodes["node_id"].astype(int)) + 1
1376
1379
  )
1377
1380
 
1378
1381
  if destinations is not None:
1379
1382
  self.destinations = Destinations(destinations)
1380
1383
  self.destinations._make_temp_idx(
1381
- start=max(self.origins.gdf.temp_idx.astype(int)) + 1
1384
+ start=max(self.origins.gdf["temp_idx"].astype(int)) + 1
1382
1385
  )
1383
-
1384
1386
  else:
1385
1387
  self.destinations = None
1386
1388
 
@@ -1396,8 +1398,6 @@ class NetworkAnalysis:
1396
1398
  directed=self.rules.directed,
1397
1399
  )
1398
1400
 
1399
- self._add_missing_vertices()
1400
-
1401
1401
  self._graph_updated_count += 1
1402
1402
 
1403
1403
  self._update_wkts()
@@ -1413,7 +1413,6 @@ class NetworkAnalysis:
1413
1413
  """
1414
1414
  if self.rules.split_lines:
1415
1415
  self._split_lines()
1416
- self.network._make_node_ids()
1417
1416
  self.origins._make_temp_idx(
1418
1417
  start=max(self.network.nodes.node_id.astype(int)) + 1
1419
1418
  )
@@ -1428,6 +1427,7 @@ class NetworkAnalysis:
1428
1427
 
1429
1428
  self.network.gdf["src_tgt_wt"] = self.network._create_edge_ids(edges, weights)
1430
1429
 
1430
+ # add edges between origins+destinations to the network nodes
1431
1431
  edges_start, weights_start = self.origins._get_edges_and_weights(
1432
1432
  nodes=self.network.nodes,
1433
1433
  rules=self.rules,
@@ -1438,6 +1438,7 @@ class NetworkAnalysis:
1438
1438
  weights = weights + weights_start
1439
1439
 
1440
1440
  if self.destinations is None:
1441
+ edges, weights = self._add_fake_edges(edges, weights)
1441
1442
  edge_ids = self.network._create_edge_ids(edges, weights)
1442
1443
  return edges, weights, edge_ids
1443
1444
 
@@ -1450,10 +1451,29 @@ class NetworkAnalysis:
1450
1451
  edges = edges + edges_end
1451
1452
  weights = weights + weights_end
1452
1453
 
1454
+ edges, weights = self._add_fake_edges(edges, weights)
1453
1455
  edge_ids = self.network._create_edge_ids(edges, weights)
1454
1456
 
1455
1457
  return edges, weights, edge_ids
1456
1458
 
1459
+ def _add_fake_edges(self, edges, weights):
1460
+ """Add edges for origins and destinations to not get error in igraph because of non-existing nodes."""
1461
+ nodes = {x[0] for x in edges} | {x[1] for x in edges}
1462
+
1463
+ fake_edges = [
1464
+ (idx, idx)
1465
+ for idx in list(self.origins.gdf["temp_idx"])
1466
+ + list(
1467
+ self.destinations.gdf["temp_idx"]
1468
+ if self.destinations is not None
1469
+ else []
1470
+ )
1471
+ if idx not in nodes
1472
+ ]
1473
+ edges = edges + fake_edges
1474
+ weights = weights + [0 for _ in fake_edges]
1475
+ return edges, weights
1476
+
1457
1477
  def _split_lines(self) -> None:
1458
1478
  if self.destinations is not None:
1459
1479
  points = pd.concat(
@@ -1464,7 +1484,7 @@ class NetworkAnalysis:
1464
1484
 
1465
1485
  points = points.drop_duplicates(points.geometry.name)
1466
1486
 
1467
- self.network.gdf["meters_"] = self.network.gdf.length
1487
+ self.network.gdf["_meters2"] = self.network.gdf.length
1468
1488
 
1469
1489
  # create an id from before the split, used to revert the split later
1470
1490
  self.network.gdf["temp_idx__"] = range(len(self.network.gdf))
@@ -1477,50 +1497,53 @@ class NetworkAnalysis:
1477
1497
  )
1478
1498
 
1479
1499
  # save the unsplit lines for later
1480
- splitted = lines.loc[lines["splitted"] == 1, "temp_idx__"]
1500
+ splitted = lines.loc[lines["splitted"] == 1]
1481
1501
  self.network._not_splitted = self.network.gdf.loc[
1482
- self.network.gdf["temp_idx__"].isin(splitted)
1502
+ lambda x: x["temp_idx__"].isin(splitted["temp_idx__"])
1483
1503
  ]
1484
1504
 
1505
+ new_lines, new_nodes = make_node_ids(splitted)
1506
+ new_nodes = sfilter_inverse(new_nodes, self.network.nodes.buffer(1e-5))
1507
+ new_nodes["node_id"] = (
1508
+ new_nodes["node_id"].astype(int) + len(self.network.nodes) + 1
1509
+ ).astype(str)
1510
+ self.network._new_node_ids = list(new_nodes["node_id"])
1511
+
1485
1512
  # adjust weight to new length
1486
- lines[self.rules.weight] = lines[self.rules.weight] * (
1487
- lines.length / lines["meters_"]
1513
+ new_lines[self.rules.weight] = new_lines[self.rules.weight] * (
1514
+ new_lines.length / new_lines["_meters2"]
1515
+ )
1516
+ self.network._nodes = pd.concat(
1517
+ [self.network._nodes, new_nodes],
1518
+ ignore_index=True,
1488
1519
  )
1489
1520
 
1521
+ lines = pd.concat(
1522
+ [
1523
+ self.network.gdf.loc[
1524
+ lambda x: ~x["temp_idx__"].isin(splitted["temp_idx__"])
1525
+ ],
1526
+ new_lines,
1527
+ ],
1528
+ ignore_index=True,
1529
+ )
1530
+
1531
+ lines = _map_node_ids_from_wkt(lines, self.network._nodes)
1532
+
1490
1533
  self.network.gdf = lines
1491
1534
 
1492
1535
  def _unsplit_network(self):
1493
1536
  """Remove the splitted lines and add the unsplitted ones."""
1537
+ if not hasattr(self.network, "_not_splitted"):
1538
+ return
1494
1539
  lines = self.network.gdf.loc[self.network.gdf["splitted"] != 1]
1495
1540
  self.network.gdf = pd.concat(
1496
1541
  [lines, self.network._not_splitted], ignore_index=True
1497
1542
  ).drop("temp_idx__", axis=1)
1498
- del self.network._not_splitted
1499
-
1500
- def _add_missing_vertices(self):
1501
- """Adds the missing points.
1502
-
1503
- Nodes that had no nodes within the search_tolerance are added to the graph.
1504
- To not get an error when running the distance calculation.
1505
- """
1506
- # TODO: either check if any() beforehand, or add fictional edges before
1507
- # making the graph, to make things faster
1508
- # (this method took 64.660 out of 500 seconds)
1509
- self.graph.add_vertices(
1510
- [
1511
- idx
1512
- for idx in self.origins.gdf["temp_idx"]
1513
- if idx not in self.graph.vs["name"]
1514
- ]
1515
- )
1516
- if self.destinations is not None:
1517
- self.graph.add_vertices(
1518
- [
1519
- idx
1520
- for idx in self.destinations.gdf["temp_idx"]
1521
- if idx not in self.graph.vs["name"]
1522
- ]
1523
- )
1543
+ self.network._nodes = self.network._nodes[
1544
+ lambda x: ~x["node_id"].isin(self.network._new_node_ids)
1545
+ ]
1546
+ del self.network._not_splitted, self.network._new_node_ids
1524
1547
 
1525
1548
  @staticmethod
1526
1549
  def _make_graph(
@@ -1530,10 +1553,8 @@ class NetworkAnalysis:
1530
1553
  directed: bool,
1531
1554
  ) -> Graph:
1532
1555
  """Creates an igraph Graph from a list of edges and weights."""
1533
- assert len(edges) == len(weights)
1534
-
1556
+ assert len(edges) == len(weights) == len(edge_ids)
1535
1557
  graph = igraph.Graph.TupleList(edges, directed=directed)
1536
-
1537
1558
  graph.es["weight"] = weights
1538
1559
  graph.es["src_tgt_wt"] = edge_ids
1539
1560
  graph.es["edge_tuples"] = edges
@@ -1566,7 +1587,7 @@ class NetworkAnalysis:
1566
1587
  for points in ["origins", "destinations"]:
1567
1588
  if self[points] is None:
1568
1589
  continue
1569
- if points not in self.wkts:
1590
+ if not hasattr(self, points) or self[points] is None:
1570
1591
  return False
1571
1592
  if self._points_have_changed(self[points].gdf, what=points):
1572
1593
  return False
@@ -1596,8 +1617,6 @@ class NetworkAnalysis:
1596
1617
  """
1597
1618
  self.wkts = {}
1598
1619
 
1599
- self.wkts["network"] = self.network.gdf.geometry.to_wkt().values
1600
-
1601
1620
  if not hasattr(self, "origins"):
1602
1621
  return
1603
1622
 
@@ -261,10 +261,10 @@ class NetworkAnalysisRules:
261
261
  @staticmethod
262
262
  def _check_for_nans(df: GeoDataFrame, col: str) -> None:
263
263
  """Remove NaNs and give warning if there are any."""
264
- if all(df[col].isna()):
264
+ if df[col].isna().all():
265
265
  raise ValueError(f"All values in the {col!r} column are NaN.")
266
266
 
267
- nans = sum(df[col].isna())
267
+ nans = df[col].isna().sum()
268
268
  if nans:
269
269
  raise ValueError(
270
270
  f"{nans} rows have missing values in the {col!r} column. "
@@ -274,7 +274,7 @@ class NetworkAnalysisRules:
274
274
  @staticmethod
275
275
  def _check_for_negative_values(df: GeoDataFrame, col: str) -> None:
276
276
  """Remove negative values and give warning if there are any."""
277
- negative = sum(df[col] < 0)
277
+ negative = (df[col] < 0).sum()
278
278
  if negative:
279
279
  raise ValueError(
280
280
  f"{negative} negative values found in the {col!r} column. Fill these "
@@ -47,16 +47,14 @@ def make_node_ids(
47
47
  gdf = make_edge_coords_cols(gdf)
48
48
  geomcol1, geomcol2, geomcol_final = "source_coords", "target_coords", "coords"
49
49
 
50
- # remove identical lines in opposite directions
50
+ # remove identical lines in opposite directions in order to get n==1 for deadends
51
51
  gdf["meters_"] = gdf.length.astype(str)
52
-
53
52
  sources = gdf[[geomcol1, geomcol2, "meters_"]].rename(
54
53
  columns={geomcol1: geomcol_final, geomcol2: "temp"}
55
54
  )
56
55
  targets = gdf[[geomcol1, geomcol2, "meters_"]].rename(
57
56
  columns={geomcol2: geomcol_final, geomcol1: "temp"}
58
57
  )
59
-
60
58
  nodes = (
61
59
  pd.concat([sources, targets], axis=0, ignore_index=True)
62
60
  .drop_duplicates([geomcol_final, "temp", "meters_"])
@@ -66,22 +64,11 @@ def make_node_ids(
66
64
  gdf = gdf.drop("meters_", axis=1)
67
65
 
68
66
  nodes["n"] = nodes.assign(n=1).groupby(geomcol_final)["n"].transform("sum")
69
-
70
67
  nodes = nodes.drop_duplicates(subset=[geomcol_final]).reset_index(drop=True)
71
-
72
68
  nodes["node_id"] = nodes.index
73
69
  nodes["node_id"] = nodes["node_id"].astype(str)
74
70
 
75
- id_dict = {
76
- geom: node_id
77
- for geom, node_id in zip(nodes[geomcol_final], nodes["node_id"], strict=True)
78
- }
79
- gdf["source"] = gdf[geomcol1].map(id_dict)
80
- gdf["target"] = gdf[geomcol2].map(id_dict)
81
-
82
- n_dict = {geom: n for geom, n in zip(nodes[geomcol_final], nodes["n"], strict=True)}
83
- gdf["n_source"] = gdf[geomcol1].map(n_dict)
84
- gdf["n_target"] = gdf[geomcol2].map(n_dict)
71
+ gdf = _map_node_ids_from_wkt(gdf, nodes, wkt=wkt)
85
72
 
86
73
  if wkt:
87
74
  nodes["geometry"] = gpd.GeoSeries.from_wkt(nodes[geomcol_final], crs=gdf.crs)
@@ -95,3 +82,17 @@ def make_node_ids(
95
82
  gdf = _push_geom_col(gdf)
96
83
 
97
84
  return gdf, nodes
85
+
86
+
87
+ def _map_node_ids_from_wkt(lines, nodes, wkt: bool = True) -> GeoDataFrame:
88
+ if wkt:
89
+ geomcol1, geomcol2, geomcol_final = "source_wkt", "target_wkt", "wkt"
90
+ else:
91
+ geomcol1, geomcol2, geomcol_final = "source_coords", "target_coords", "coords"
92
+ id_dict = {
93
+ geom: node_id
94
+ for geom, node_id in zip(nodes[geomcol_final], nodes["node_id"], strict=True)
95
+ }
96
+ lines["source"] = lines[geomcol1].map(id_dict)
97
+ lines["target"] = lines[geomcol2].map(id_dict)
98
+ return lines
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ssb-sgis
3
- Version: 1.3.5
3
+ Version: 1.3.8
4
4
  Summary: GIS functions used at Statistics Norway.
5
5
  Home-page: https://github.com/statisticsnorway/ssb-sgis
6
6
  License: MIT
@@ -7,23 +7,23 @@ sgis/geopandas_tools/bounds.py,sha256=Os9o4EZ1ax38reW-9cdJzgKHoX2il9lUEIZ7wc5CpB
7
7
  sgis/geopandas_tools/buffer_dissolve_explode.py,sha256=z9HvakazR_prXH862e8-gEe7UFbeI4rRTbUaBgPeMBk,19552
8
8
  sgis/geopandas_tools/centerlines.py,sha256=Q65Sx01SeAlulBEd9oaZkB2maBBNdLcJwAbTILg4SPU,11848
9
9
  sgis/geopandas_tools/cleaning.py,sha256=fST0xFztmyn-QUOAfvjZmu7aO_zPiolWK7gd7TR6ffI,24393
10
- sgis/geopandas_tools/conversion.py,sha256=iX954YEpobmn_R1Ecx_zDU1RlWq_67pfbzMXtwTH04I,24162
11
- sgis/geopandas_tools/duplicates.py,sha256=TDDM4u1n7SIkyJrOfl1Lno92AmUPqtXBHsj1IUKC0hI,14992
12
- sgis/geopandas_tools/general.py,sha256=DTQM8p-krgR3gA3miP931eCrxDCpF1ya7dRiMy3K_bs,44099
10
+ sgis/geopandas_tools/conversion.py,sha256=viZz9t1Gi-8nJ9GwZlCVvObXO52VTODkTZ_F3b0gLek,24183
11
+ sgis/geopandas_tools/duplicates.py,sha256=AU8J2X3sUuohRDsQOc9TA5K0k2KHDlGLbvb6eMSQvvc,15893
12
+ sgis/geopandas_tools/general.py,sha256=W8R0_EXGZXDnTl36iGVtF3YTe_BvdrR2Jzy1TZo5o9A,44363
13
13
  sgis/geopandas_tools/geocoding.py,sha256=sZjUW52ULhQWDLmU51C9_itBePkDuWkp8swvYaiYmJk,679
14
14
  sgis/geopandas_tools/geometry_types.py,sha256=ijQDbQaZPqPGjBl707H4yooNXpk21RXyatI7itnvqLk,7603
15
15
  sgis/geopandas_tools/neighbors.py,sha256=VZGOwwC3-C6KpwLQ3j0K5cOVInmckxIXoGMqPGkemk4,17606
16
16
  sgis/geopandas_tools/overlay.py,sha256=uR9troLM9amo-z5Bs6jhPtFbr0RrDFsW-WJpeuQ4WSQ,23522
17
- sgis/geopandas_tools/point_operations.py,sha256=JM4hvfIVxZaZdGNlGzcCurrKzkgC_b9hzbFYN42f9WY,6972
17
+ sgis/geopandas_tools/point_operations.py,sha256=R_39MoMIMsTB0mxmWibZxfUoUZUriuOoxjMNC8mFCds,7624
18
18
  sgis/geopandas_tools/polygon_operations.py,sha256=v-B9IgbFfm4dVHKPyzvmnNiqVCdtl9ddpCsQpZZ-9sU,49284
19
19
  sgis/geopandas_tools/polygons_as_rings.py,sha256=BX_GZS6F9I4NbEpiOlNBd7zywJjdfdJVi_MkeONBuiM,14941
20
- sgis/geopandas_tools/runners.py,sha256=J4lH0RXYDYTLVeQFgNv8gEY0E97QGIQ4zPW5vfoxgDU,12979
20
+ sgis/geopandas_tools/runners.py,sha256=m2AgT1GTSACPzGeP0cuaItnpN-77bpTPcXHwmaOqNNQ,13170
21
21
  sgis/geopandas_tools/sfilter.py,sha256=CZ_-c4t1CQCwJ7RHCKo1Na9u-aAg18xXnJAMiUqoaj8,10411
22
22
  sgis/geopandas_tools/utils.py,sha256=X0pRvB1tWgV_0BCrRS1HU9LtLGnZCpvVPxyqM9JGb0Y,1415
23
23
  sgis/helpers.py,sha256=4N6vFWQ3TYVzRHNcWY_fNa_GkFuaZB3vtCkkFND-qs0,9628
24
24
  sgis/io/__init__.py,sha256=uyBr20YDqB2bQttrd5q1JuGOvX32A-MSvS7Wmw5f5qg,177
25
25
  sgis/io/_is_dapla.py,sha256=wmfkSe98IrLhUg3dtXZusV6OVC8VlY1kbc5EQDf3P-Q,358
26
- sgis/io/dapla_functions.py,sha256=EmliBKnGm8XELY450BMTYfRtYw2WRMR2SBUxhYP5huw,31896
26
+ sgis/io/dapla_functions.py,sha256=_arLbkfdC18hUi5chNdKyE6JIEPCEBeVjjblBjLnLx8,32195
27
27
  sgis/io/opener.py,sha256=3D65XF0dHLigZfzB5uAG-3T_VXVu4wJXZUMzAkqHypc,844
28
28
  sgis/io/read_parquet.py,sha256=FvZYv1rLkUlrSaUY6QW6E1yntmntTeQuZ9ZRgCDO4IM,3776
29
29
  sgis/maps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -39,17 +39,17 @@ sgis/maps/tilesources.py,sha256=F4mFHxPwkiPJdVKzNkScTX6xbJAMIUtlTq4mQ83oguw,1746
39
39
  sgis/maps/wms.py,sha256=sCVpKxH1Rsd14GECW7BFh8yaWngpVWYvw9Yhuez1yW8,12482
40
40
  sgis/networkanalysis/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  sgis/networkanalysis/_get_route.py,sha256=3m5xQdQqGtt51vcI5fcmYQAOQCeBtL6sorDoPxBNf84,7818
42
- sgis/networkanalysis/_od_cost_matrix.py,sha256=zkyPX7ObT996ahaFJ2oI0D0SqQWbWyfy_qLtXwValPg,3434
43
- sgis/networkanalysis/_points.py,sha256=128QEepBGAyI5XakMUVvNyOnJE0Ts7hvGiVUt1YTXiU,4439
42
+ sgis/networkanalysis/_od_cost_matrix.py,sha256=DZDQGZN_OGkYC35fpS_WoXoZuQ3zoZnmtArXl5eGc70,2333
43
+ sgis/networkanalysis/_points.py,sha256=iG5whhXAo5NcjLNaVgjvNfl6oSm9SOoVmUeMPB3uNWQ,4416
44
44
  sgis/networkanalysis/_service_area.py,sha256=-xSdiZrZ3uqAhd4ZaBY8a8fMJjN09MPoYxlVMXm3UZk,5642
45
- sgis/networkanalysis/closing_network_holes.py,sha256=FYZ677nRwLmDkP6bQ1ssQ_h29RzAG463U4xmbu5ksfg,14572
45
+ sgis/networkanalysis/closing_network_holes.py,sha256=7mQmqvcx4kpu0WGlGS8PH4j4dDFsm19FOm-z0rYD2nk,14455
46
46
  sgis/networkanalysis/cutting_lines.py,sha256=ZQAt0cufaPeNAEqUzp-imu26AIL9S5-lw6Xifa8RoWk,9818
47
47
  sgis/networkanalysis/directednetwork.py,sha256=Mrc2zHip4P5RNxnyffKm-xU832AVQeSHz-YZueAc0pM,11413
48
- sgis/networkanalysis/finding_isolated_networks.py,sha256=Wg4ILhm7uS9RLOGcL0WN8uZBMJYjdljJc8L5DU5nIPY,3754
49
- sgis/networkanalysis/network.py,sha256=zV9bAbVdTgTohg2o2RFGy2uhOJrd3Ma57hwIAStxMAQ,7847
50
- sgis/networkanalysis/networkanalysis.py,sha256=P2kqE1kBxabBnv4h6ort1IiO4W0XlsJn5DrT7IbeG5w,68651
51
- sgis/networkanalysis/networkanalysisrules.py,sha256=na26iZTpnlwwIEEGswoqMH4W1ZOLN2GIvACZbxn8GPw,12958
52
- sgis/networkanalysis/nodes.py,sha256=atFSpqz-_uJHMrf6MC0zhrrcWIydRMFZrsaHC2xr1GU,3374
48
+ sgis/networkanalysis/finding_isolated_networks.py,sha256=FDel7zF03eBpicwZnfK2-e9IXYaZ4KVleaTnUvgW-4k,3678
49
+ sgis/networkanalysis/network.py,sha256=13yoArGOX1J_nCLTxh7tQjDTxj545RDYFYc_11m3FuM,7913
50
+ sgis/networkanalysis/networkanalysis.py,sha256=gy69ym4mBkQJAq6U27mB9Vkvq71ZuOI1Fr0deggG0oY,69641
51
+ sgis/networkanalysis/networkanalysisrules.py,sha256=k1f-DNEmJtiP_TkCCDclsnRjGTkBqAkj-MYQMUAt_Yc,12963
52
+ sgis/networkanalysis/nodes.py,sha256=h7j3vCFdTsuiVEkmB26ebe44HiFLKOlQhghJ5NV0cU4,3562
53
53
  sgis/networkanalysis/traveling_salesman.py,sha256=Jjo6bHY4KJ-eK0LycyTy0sWxZjgITs5MBllZ_G9FhTE,5655
54
54
  sgis/parallel/__init__.py,sha256=fw_Fl3IJk1bKzrRBhZIoOpznJqwd09NVHJJFj2ZLeIU,32
55
55
  sgis/parallel/parallel.py,sha256=3Nq7cgvkVmg-2TEyNCEiqBshYOHt6Qf6xg6rrcnzf78,39394
@@ -61,7 +61,7 @@ sgis/raster/indices.py,sha256=efJmgfPg_VuSzXFosXV661IendF8CwPFWtMhyP4TMUg,222
61
61
  sgis/raster/regex.py,sha256=4idTJ9vFtsGtbxcjJrx2VrpJJuDMP3bLdqF93Vc_cmY,3752
62
62
  sgis/raster/sentinel_config.py,sha256=nySDqn2R8M6W8jguoBeSAK_zzbAsqmaI59i32446FwY,1268
63
63
  sgis/raster/zonal.py,sha256=D4Gyptw-yOLTCO41peIuYbY-DANsJCG19xXDlf1QAz4,2299
64
- ssb_sgis-1.3.5.dist-info/LICENSE,sha256=np3IfD5m0ZUofn_kVzDZqliozuiO6wrktw3LRPjyEiI,1073
65
- ssb_sgis-1.3.5.dist-info/METADATA,sha256=TbqucBYChWXB_Xw2eFf9PZwQoguEWeKGav18lFbhUfI,11495
66
- ssb_sgis-1.3.5.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
67
- ssb_sgis-1.3.5.dist-info/RECORD,,
64
+ ssb_sgis-1.3.8.dist-info/LICENSE,sha256=np3IfD5m0ZUofn_kVzDZqliozuiO6wrktw3LRPjyEiI,1073
65
+ ssb_sgis-1.3.8.dist-info/METADATA,sha256=qWEIyFwB_LmGabBeT9zQUq34Ne7FWXzsYoRQL_RgOLE,11495
66
+ ssb_sgis-1.3.8.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
67
+ ssb_sgis-1.3.8.dist-info/RECORD,,