ssb-sgis 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. sgis/__init__.py +107 -121
  2. sgis/exceptions.py +5 -3
  3. sgis/geopandas_tools/__init__.py +1 -0
  4. sgis/geopandas_tools/bounds.py +86 -47
  5. sgis/geopandas_tools/buffer_dissolve_explode.py +62 -39
  6. sgis/geopandas_tools/centerlines.py +53 -44
  7. sgis/geopandas_tools/cleaning.py +87 -104
  8. sgis/geopandas_tools/conversion.py +164 -107
  9. sgis/geopandas_tools/duplicates.py +33 -19
  10. sgis/geopandas_tools/general.py +84 -52
  11. sgis/geopandas_tools/geometry_types.py +24 -10
  12. sgis/geopandas_tools/neighbors.py +23 -11
  13. sgis/geopandas_tools/overlay.py +136 -53
  14. sgis/geopandas_tools/point_operations.py +11 -10
  15. sgis/geopandas_tools/polygon_operations.py +53 -61
  16. sgis/geopandas_tools/polygons_as_rings.py +121 -78
  17. sgis/geopandas_tools/sfilter.py +17 -17
  18. sgis/helpers.py +116 -58
  19. sgis/io/dapla_functions.py +32 -23
  20. sgis/io/opener.py +13 -6
  21. sgis/io/read_parquet.py +2 -2
  22. sgis/maps/examine.py +55 -28
  23. sgis/maps/explore.py +471 -112
  24. sgis/maps/httpserver.py +12 -12
  25. sgis/maps/legend.py +285 -134
  26. sgis/maps/map.py +248 -129
  27. sgis/maps/maps.py +123 -119
  28. sgis/maps/thematicmap.py +260 -94
  29. sgis/maps/tilesources.py +3 -8
  30. sgis/networkanalysis/_get_route.py +5 -4
  31. sgis/networkanalysis/_od_cost_matrix.py +44 -1
  32. sgis/networkanalysis/_points.py +10 -4
  33. sgis/networkanalysis/_service_area.py +5 -2
  34. sgis/networkanalysis/closing_network_holes.py +22 -64
  35. sgis/networkanalysis/cutting_lines.py +58 -46
  36. sgis/networkanalysis/directednetwork.py +16 -8
  37. sgis/networkanalysis/finding_isolated_networks.py +6 -5
  38. sgis/networkanalysis/network.py +15 -13
  39. sgis/networkanalysis/networkanalysis.py +79 -61
  40. sgis/networkanalysis/networkanalysisrules.py +21 -17
  41. sgis/networkanalysis/nodes.py +2 -3
  42. sgis/networkanalysis/traveling_salesman.py +6 -3
  43. sgis/parallel/parallel.py +372 -142
  44. sgis/raster/base.py +9 -3
  45. sgis/raster/cube.py +331 -213
  46. sgis/raster/cubebase.py +15 -29
  47. sgis/raster/image_collection.py +2560 -0
  48. sgis/raster/indices.py +17 -12
  49. sgis/raster/raster.py +356 -275
  50. sgis/raster/sentinel_config.py +104 -0
  51. sgis/raster/zonal.py +38 -14
  52. {ssb_sgis-1.0.1.dist-info → ssb_sgis-1.0.3.dist-info}/LICENSE +1 -1
  53. {ssb_sgis-1.0.1.dist-info → ssb_sgis-1.0.3.dist-info}/METADATA +87 -16
  54. ssb_sgis-1.0.3.dist-info/RECORD +61 -0
  55. {ssb_sgis-1.0.1.dist-info → ssb_sgis-1.0.3.dist-info}/WHEEL +1 -1
  56. sgis/raster/bands.py +0 -48
  57. sgis/raster/gradient.py +0 -78
  58. sgis/raster/methods_as_functions.py +0 -124
  59. sgis/raster/torchgeo.py +0 -150
  60. ssb_sgis-1.0.1.dist-info/RECORD +0 -63
@@ -14,15 +14,19 @@ for the following:
14
14
  - The buff function returns a GeoDataFrame, the geopandas method returns a GeoSeries.
15
15
  """
16
16
 
17
- from typing import Callable
17
+ from collections.abc import Callable
18
+ from collections.abc import Sequence
18
19
 
19
20
  import numpy as np
20
21
  import pandas as pd
21
- from geopandas import GeoDataFrame, GeoSeries
22
+ from geopandas import GeoDataFrame
23
+ from geopandas import GeoSeries
22
24
 
23
- from .general import merge_geometries, parallel_unary_union
25
+ from .general import _merge_geometries
26
+ from .general import _parallel_unary_union
24
27
  from .geometry_types import make_all_singlepart
25
- from .polygon_operations import get_cluster_mapper, get_grouped_centroids
28
+ from .polygon_operations import get_cluster_mapper
29
+ from .polygon_operations import get_grouped_centroids
26
30
 
27
31
 
28
32
  def _decide_ignore_index(kwargs: dict) -> tuple[dict, bool]:
@@ -48,6 +52,7 @@ def buffdissexp(
48
52
  copy: bool = True,
49
53
  grid_size: float | int | None = None,
50
54
  n_jobs: int = 1,
55
+ join_style: int | str = "round",
51
56
  **dissolve_kwargs,
52
57
  ) -> GeoDataFrame:
53
58
  """Buffers and dissolves overlapping geometries.
@@ -65,6 +70,9 @@ def buffdissexp(
65
70
  index_parts: If False (default), the index after dissolve is respected. If
66
71
  True, an integer index level is added during explode.
67
72
  copy: Whether to copy the GeoDataFrame before buffering. Defaults to True.
73
+ grid_size: Rounding of the coordinates. Defaults to None.
74
+ n_jobs: Number of threads to use. Defaults to 1.
75
+ join_style: Buffer join style.
68
76
  **dissolve_kwargs: additional keyword arguments passed to geopandas' dissolve.
69
77
 
70
78
  Returns:
@@ -79,6 +87,7 @@ def buffdissexp(
79
87
  copy=copy,
80
88
  grid_size=grid_size,
81
89
  n_jobs=n_jobs,
90
+ join_style=join_style,
82
91
  **dissolve_kwargs,
83
92
  )
84
93
 
@@ -93,6 +102,7 @@ def buffdiss(
93
102
  resolution: int = 50,
94
103
  copy: bool = True,
95
104
  n_jobs: int = 1,
105
+ join_style: int | str = "round",
96
106
  **dissolve_kwargs,
97
107
  ) -> GeoDataFrame:
98
108
  """Buffers and dissolves geometries.
@@ -108,14 +118,16 @@ def buffdiss(
108
118
  the geometry by
109
119
  resolution: The number of segments used to approximate a quarter circle.
110
120
  Here defaults to 50, as opposed to the default 16 in geopandas.
121
+ join_style: Buffer join style.
111
122
  copy: Whether to copy the GeoDataFrame before buffering. Defaults to True.
123
+ n_jobs: Number of threads to use. Defaults to 1.
112
124
  **dissolve_kwargs: additional keyword arguments passed to geopandas' dissolve.
113
125
 
114
126
  Returns:
115
127
  A buffered GeoDataFrame where geometries are dissolved.
116
128
 
117
- Examples
118
- --------
129
+ Examples:
130
+ ---------
119
131
  Create some random points.
120
132
 
121
133
  >>> import sgis as sg
@@ -162,12 +174,20 @@ def buffdiss(
162
174
  1 b MULTIPOLYGON (((258404.858 6647830.931, 258404... 0.687635
163
175
  2 d MULTIPOLYGON (((258180.258 6647935.731, 258179... 0.580157
164
176
  """
165
- buffered = buff(gdf, distance, resolution=resolution, copy=copy)
177
+ buffered = buff(
178
+ gdf, distance, resolution=resolution, copy=copy, join_style=join_style
179
+ )
166
180
 
167
181
  return _dissolve(buffered, n_jobs=n_jobs, **dissolve_kwargs)
168
182
 
169
183
 
170
- def _dissolve(gdf, aggfunc="first", grid_size=None, n_jobs=1, **dissolve_kwargs):
184
+ def _dissolve(
185
+ gdf: GeoDataFrame,
186
+ aggfunc: str = "first",
187
+ grid_size: None | float = None,
188
+ n_jobs: int = 1,
189
+ **dissolve_kwargs,
190
+ ) -> GeoDataFrame:
171
191
 
172
192
  if not len(gdf):
173
193
  return gdf
@@ -220,7 +240,7 @@ def _dissolve(gdf, aggfunc="first", grid_size=None, n_jobs=1, **dissolve_kwargs)
220
240
 
221
241
  if n_jobs > 1:
222
242
  try:
223
- agged = parallel_unary_union(
243
+ agged = _parallel_unary_union(
224
244
  many_hits, n_jobs=n_jobs, by=by, grid_size=grid_size, **dissolve_kwargs
225
245
  )
226
246
  dissolved[geom_col] = agged
@@ -230,7 +250,7 @@ def _dissolve(gdf, aggfunc="first", grid_size=None, n_jobs=1, **dissolve_kwargs)
230
250
  raise e
231
251
 
232
252
  geoms_agged = many_hits.groupby(by, **dissolve_kwargs)[geom_col].agg(
233
- lambda x: merge_geometries(x, grid_size=grid_size)
253
+ lambda x: _merge_geometries(x, grid_size=grid_size)
234
254
  )
235
255
 
236
256
  if not dissolve_kwargs.get("as_index"):
@@ -248,13 +268,13 @@ def _dissolve(gdf, aggfunc="first", grid_size=None, n_jobs=1, **dissolve_kwargs)
248
268
 
249
269
  def diss(
250
270
  gdf: GeoDataFrame,
251
- by=None,
252
- aggfunc="first",
271
+ by: str | Sequence[str] | None = None,
272
+ aggfunc: str | Callable | dict[str, str | Callable] = "first",
253
273
  as_index: bool = True,
254
274
  grid_size: float | int | None = None,
255
275
  n_jobs: int = 1,
256
276
  **dissolve_kwargs,
257
- ):
277
+ ) -> GeoDataFrame:
258
278
  """Dissolves geometries.
259
279
 
260
280
  It takes a GeoDataFrame and dissolves and fixes geometries.
@@ -265,6 +285,8 @@ def diss(
265
285
  aggfunc: How to aggregate the non-geometry colums not in "by".
266
286
  as_index: Whether the 'by' columns should be returned as index. Defaults to
267
287
  True to be consistent with geopandas.
288
+ grid_size: Rounding of the coordinates. Defaults to None.
289
+ n_jobs: Number of threads to use. Defaults to 1.
268
290
  **dissolve_kwargs: additional keyword arguments passed to geopandas' dissolve.
269
291
 
270
292
  Returns:
@@ -292,14 +314,14 @@ def diss(
292
314
 
293
315
  def dissexp(
294
316
  gdf: GeoDataFrame,
295
- by=None,
296
- aggfunc="first",
317
+ by: str | Sequence[str] | None = None,
318
+ aggfunc: str | Callable | dict[str, str | Callable] = "first",
297
319
  as_index: bool = True,
298
320
  index_parts: bool = False,
299
321
  grid_size: float | int | None = None,
300
322
  n_jobs: int = 1,
301
323
  **dissolve_kwargs,
302
- ):
324
+ ) -> GeoDataFrame:
303
325
  """Dissolves overlapping geometries.
304
326
 
305
327
  It takes a GeoDataFrame and dissolves, fixes and explodes geometries.
@@ -312,6 +334,8 @@ def dissexp(
312
334
  True to be consistent with geopandas.
313
335
  index_parts: If False (default), the index after dissolve is respected. If
314
336
  True, an integer index level is added during explode.
337
+ grid_size: Rounding of the coordinates. Defaults to None.
338
+ n_jobs: Number of threads to use. Defaults to 1.
315
339
  **dissolve_kwargs: additional keyword arguments passed to geopandas' dissolve.
316
340
 
317
341
  Returns:
@@ -334,7 +358,7 @@ def dissexp(
334
358
 
335
359
 
336
360
  def dissexp_by_cluster(
337
- gdf: GeoDataFrame, predicate=None, n_jobs: int = 1, **dissolve_kwargs
361
+ gdf: GeoDataFrame, predicate: str | None = None, n_jobs: int = 1, **dissolve_kwargs
338
362
  ) -> GeoDataFrame:
339
363
  """Dissolves overlapping geometries through clustering with sjoin and networkx.
340
364
 
@@ -348,6 +372,8 @@ def dissexp_by_cluster(
348
372
 
349
373
  Args:
350
374
  gdf: the GeoDataFrame that will be dissolved and exploded.
375
+ predicate: Spatial predicate to use.
376
+ n_jobs: Number of threads to use. Defaults to 1.
351
377
  **dissolve_kwargs: Keyword arguments passed to geopandas' dissolve.
352
378
 
353
379
  Returns:
@@ -373,6 +399,8 @@ def diss_by_cluster(
373
399
 
374
400
  Args:
375
401
  gdf: the GeoDataFrame that will be dissolved and exploded.
402
+ predicate: Spatial predicate to use.
403
+ n_jobs: Number of threads to use. Defaults to 1.
376
404
  **dissolve_kwargs: Keyword arguments passed to geopandas' dissolve.
377
405
 
378
406
  Returns:
@@ -386,27 +414,10 @@ def diss_by_cluster(
386
414
  def _run_func_by_cluster(
387
415
  func: Callable,
388
416
  gdf: GeoDataFrame,
389
- predicate=None,
417
+ predicate: str | None = None,
390
418
  n_jobs: int = 1,
391
419
  **dissolve_kwargs,
392
420
  ) -> GeoDataFrame:
393
- """Dissolves overlapping geometries through clustering with sjoin and networkx.
394
-
395
- Works exactly like dissexp, but, before dissolving, the geometries are divided
396
- into clusters based on overlap (uses the function sgis.get_polygon_clusters).
397
- The geometries are then dissolved based on this column (and optionally other
398
- columns).
399
-
400
- This might be many times faster than a regular dissexp, if there are many
401
- non-overlapping geometries.
402
-
403
- Args:
404
- gdf: the GeoDataFrame that will be dissolved and exploded.
405
- **dissolve_kwargs: Keyword arguments passed to geopandas' dissolve.
406
-
407
- Returns:
408
- A GeoDataFrame where overlapping geometries are dissolved.
409
- """
410
421
  is_geoseries = isinstance(gdf, GeoSeries)
411
422
 
412
423
  by = dissolve_kwargs.pop("by", [])
@@ -458,6 +469,7 @@ def buffdissexp_by_cluster(
458
469
  resolution: int = 50,
459
470
  copy: bool = True,
460
471
  n_jobs: int = 1,
472
+ join_style: int | str = "round",
461
473
  **dissolve_kwargs,
462
474
  ) -> GeoDataFrame:
463
475
  """Buffers and dissolves overlapping geometries.
@@ -476,13 +488,21 @@ def buffdissexp_by_cluster(
476
488
  the geometry by
477
489
  resolution: The number of segments used to approximate a quarter circle.
478
490
  Here defaults to 50, as opposed to the default 16 in geopandas.
491
+ join_style: Buffer join style.
479
492
  copy: Whether to copy the GeoDataFrame before buffering. Defaults to True.
493
+ n_jobs: int = 1,
480
494
  **dissolve_kwargs: additional keyword arguments passed to geopandas' dissolve.
481
495
 
482
496
  Returns:
483
497
  A buffered GeoDataFrame where overlapping geometries are dissolved.
484
498
  """
485
- buffered = buff(gdf, distance, resolution=resolution, copy=copy)
499
+ buffered = buff(
500
+ gdf,
501
+ distance,
502
+ resolution=resolution,
503
+ copy=copy,
504
+ join_style=join_style,
505
+ )
486
506
  return dissexp_by_cluster(buffered, n_jobs=n_jobs, **dissolve_kwargs)
487
507
 
488
508
 
@@ -491,6 +511,7 @@ def buff(
491
511
  distance: int | float,
492
512
  resolution: int = 50,
493
513
  copy: bool = True,
514
+ join_style: int | str = "round",
494
515
  **buffer_kwargs,
495
516
  ) -> GeoDataFrame:
496
517
  """Buffers a GeoDataFrame with high resolution and returns a new GeoDataFrame.
@@ -501,21 +522,23 @@ def buff(
501
522
  the geometry by
502
523
  resolution: The number of segments used to approximate a quarter circle.
503
524
  Here defaults to 50, as opposed to the default 16 in geopandas.
525
+ join_style: Buffer join style.
504
526
  copy: Whether to copy the GeoDataFrame before buffering. Defaults to True.
505
527
  **buffer_kwargs: additional keyword arguments passed to geopandas' buffer.
506
528
 
507
529
  Returns:
508
530
  A buffered GeoDataFrame.
509
531
  """
510
-
511
532
  if isinstance(gdf, GeoSeries):
512
- return gdf.buffer(distance, resolution=resolution, **buffer_kwargs).make_valid()
533
+ return gdf.buffer(
534
+ distance, resolution=resolution, join_style=join_style, **buffer_kwargs
535
+ ).make_valid()
513
536
 
514
537
  if copy:
515
538
  gdf = gdf.copy()
516
539
 
517
540
  gdf[gdf._geometry_column_name] = gdf.buffer(
518
- distance, resolution=resolution, **buffer_kwargs
541
+ distance, resolution=resolution, join_style=join_style, **buffer_kwargs
519
542
  ).make_valid()
520
543
 
521
544
  return gdf
@@ -1,51 +1,56 @@
1
1
  import functools
2
+ import itertools
2
3
  import warnings
3
4
 
4
5
  import numpy as np
5
6
  import pandas as pd
6
7
  import shapely
7
- from geopandas import GeoDataFrame, GeoSeries
8
- from geopandas.array import GeometryArray
8
+ from geopandas import GeoDataFrame
9
+ from geopandas import GeoSeries
9
10
  from numpy.typing import NDArray
10
- from shapely import (
11
- STRtree,
12
- distance,
13
- extract_unique_points,
14
- get_parts,
15
- get_rings,
16
- line_merge,
17
- make_valid,
18
- segmentize,
19
- unary_union,
20
- voronoi_polygons,
21
- )
11
+ from shapely import STRtree
12
+ from shapely import distance
13
+ from shapely import extract_unique_points
14
+ from shapely import get_rings
15
+ from shapely import line_merge
16
+ from shapely import make_valid
17
+ from shapely import segmentize
18
+ from shapely import unary_union
19
+ from shapely import voronoi_polygons
22
20
  from shapely.errors import GEOSException
23
21
  from shapely.geometry import LineString
24
22
  from shapely.ops import nearest_points
25
23
 
26
- from ..maps.maps import explore, explore_locals
24
+ from ..maps.maps import explore
27
25
  from ..networkanalysis.traveling_salesman import traveling_salesman_problem
28
- from .conversion import to_gdf, to_geoseries
29
- from .general import clean_geoms, make_lines_between_points, sort_long_first
26
+ from .conversion import to_gdf
27
+ from .conversion import to_geoseries
28
+ from .general import clean_geoms
29
+ from .general import make_lines_between_points
30
+ from .general import sort_long_first
30
31
  from .geometry_types import make_all_singlepart
31
- from .sfilter import sfilter_inverse, sfilter_split
32
-
32
+ from .sfilter import sfilter_inverse
33
+ from .sfilter import sfilter_split
33
34
 
34
35
  warnings.simplefilter(action="ignore", category=FutureWarning)
35
36
 
36
37
 
37
- def get_traveling_salesman_lines(df, return_to_start=False):
38
+ def get_traveling_salesman_lines(
39
+ df: GeoDataFrame, return_to_start: bool = False
40
+ ) -> list[LineString]:
38
41
  path = traveling_salesman_problem(df, return_to_start=return_to_start)
39
42
 
40
43
  try:
41
- return [LineString([p1, p2]) for p1, p2 in zip(path[:-1], path[1:])]
44
+ return [LineString([p1, p2]) for p1, p2 in itertools.pairwise(path)]
42
45
  except IndexError as e:
43
46
  if len(path) == 1:
44
47
  return path
45
48
  raise e
46
49
 
47
50
 
48
- def remove_longest_if_not_intersecting(centerlines, geoms):
51
+ def _remove_longest_if_not_intersecting(
52
+ centerlines: GeoDataFrame, geoms: GeoDataFrame
53
+ ) -> GeoDataFrame:
49
54
  centerlines = sort_long_first(make_all_singlepart(centerlines))
50
55
 
51
56
  has_only_one_line = centerlines.groupby(level=0).size() == 1
@@ -83,8 +88,7 @@ def get_rough_centerlines(
83
88
  complext polygons like (buffered) road networks.
84
89
 
85
90
  """
86
-
87
- PRECISION = 0.01
91
+ precision = 0.01
88
92
 
89
93
  if not len(gdf):
90
94
  return gdf
@@ -96,12 +100,12 @@ def get_rough_centerlines(
96
100
 
97
101
  segmentized: GeoSeries = segmentize(geoms, max_segment_length=max_segment_length)
98
102
 
99
- points: GeoSeries = get_points_in_polygons(segmentized, PRECISION)
103
+ points: GeoSeries = _get_points_in_polygons(segmentized, precision)
100
104
 
101
105
  has_no_points = geoms.loc[(~geoms.index.isin(points.index))]
102
106
 
103
- more_points: GeoSeries = get_points_in_polygons(
104
- has_no_points.buffer(PRECISION), PRECISION
107
+ more_points: GeoSeries = _get_points_in_polygons(
108
+ has_no_points.buffer(precision), precision
105
109
  )
106
110
 
107
111
  # Geometries that have no lines inside, might be perfect circles.
@@ -131,7 +135,7 @@ def get_rough_centerlines(
131
135
  ]
132
136
 
133
137
  # make sure to include the endpoints
134
- endpoints = get_approximate_polygon_endpoints(segmentized)
138
+ endpoints = _get_approximate_polygon_endpoints(segmentized)
135
139
 
136
140
  geoms = geoms.loc[~geoms.index.isin(still_has_no_points.index)]
137
141
 
@@ -148,7 +152,7 @@ def get_rough_centerlines(
148
152
  # keep lines 90 percent intersecting the polygon
149
153
  length_now = end_to_end.length
150
154
  end_to_end = (
151
- end_to_end.intersection(geoms.buffer(PRECISION))
155
+ end_to_end.intersection(geoms.buffer(precision))
152
156
  .dropna()
153
157
  .loc[lambda x: x.length > length_now * 0.9]
154
158
  )
@@ -157,7 +161,7 @@ def get_rough_centerlines(
157
161
  to_be_erased = points.index.isin(end_to_end.index)
158
162
 
159
163
  dont_intersect = sfilter_inverse(
160
- points.iloc[to_be_erased], end_to_end.buffer(PRECISION, cap_style=2)
164
+ points.iloc[to_be_erased], end_to_end.buffer(precision, cap_style=2)
161
165
  )
162
166
 
163
167
  points = (
@@ -184,7 +188,7 @@ def get_rough_centerlines(
184
188
 
185
189
  explore(points=to_gdf(points, 25833), gdf=gdf)
186
190
 
187
- remove_longest = functools.partial(remove_longest_if_not_intersecting, geoms=geoms)
191
+ remove_longest = functools.partial(_remove_longest_if_not_intersecting, geoms=geoms)
188
192
 
189
193
  centerlines = GeoSeries(
190
194
  points.groupby(level=0).apply(get_traveling_salesman_lines).explode()
@@ -235,7 +239,7 @@ def get_rough_centerlines(
235
239
  return centerlines
236
240
 
237
241
 
238
- def get_points_in_polygons(geometries: GeoSeries, precision: float) -> GeoSeries:
242
+ def _get_points_in_polygons(geometries: GeoSeries, precision: float) -> GeoSeries:
239
243
  # voronoi can cause problems if coordinates are nearly identical
240
244
  # buffering solves it
241
245
  try:
@@ -267,7 +271,7 @@ def get_points_in_polygons(geometries: GeoSeries, precision: float) -> GeoSeries
267
271
  return pd.concat([within_polygons, not_within_but_relevant]).centroid
268
272
 
269
273
 
270
- def get_approximate_polygon_endpoints(geoms: GeoSeries) -> GeoSeries:
274
+ def _get_approximate_polygon_endpoints(geoms: GeoSeries) -> GeoSeries:
271
275
  out_geoms = []
272
276
 
273
277
  are_thin = geoms.buffer(-1e-2).is_empty
@@ -332,7 +336,7 @@ def get_approximate_polygon_endpoints(geoms: GeoSeries) -> GeoSeries:
332
336
 
333
337
  out_geoms.append(nearest_geom_points)
334
338
 
335
- lines_around_geometries = multipoints_to_line_segments(
339
+ lines_around_geometries = _multipoints_to_line_segments(
336
340
  extract_unique_points(rectangles)
337
341
  )
338
342
 
@@ -370,7 +374,7 @@ def get_approximate_polygon_endpoints(geoms: GeoSeries) -> GeoSeries:
370
374
  return pd.concat(out_geoms)
371
375
 
372
376
 
373
- def multipoints_to_line_segments(
377
+ def _multipoints_to_line_segments(
374
378
  multipoints: GeoSeries | GeoDataFrame, to_next: bool = True, cycle: bool = True
375
379
  ) -> GeoSeries | GeoDataFrame:
376
380
  if not len(multipoints):
@@ -384,13 +388,13 @@ def multipoints_to_line_segments(
384
388
  for i in range(multipoints.index.nlevels)
385
389
  ]
386
390
  multipoints.index = pd.MultiIndex.from_arrays(
387
- [list(range(len(multipoints)))] + index,
388
- names=["range_idx"] + multipoints.index.names,
391
+ [list(range(len(multipoints))), *index],
392
+ names=["range_idx", *multipoints.index.names],
389
393
  )
390
394
  else:
391
395
  multipoints.index = pd.MultiIndex.from_arrays(
392
396
  [np.arange(0, len(multipoints)), multipoints.index],
393
- names=["range_idx"] + [multipoints.index.name],
397
+ names=["range_idx", multipoints.index.name],
394
398
  )
395
399
 
396
400
  try:
@@ -402,15 +406,17 @@ def multipoints_to_line_segments(
402
406
 
403
407
  if to_next:
404
408
  shift = -1
405
- filt = lambda x: ~x.index.get_level_values(0).duplicated(keep="first")
409
+ keep = "first"
406
410
  else:
407
411
  shift = 1
408
- filt = lambda x: ~x.index.get_level_values(0).duplicated(keep="last")
412
+ keep = "last"
409
413
 
410
414
  point_df["next"] = point_df.groupby(level=0)["geometry"].shift(shift)
411
415
 
412
416
  if cycle:
413
- first_points = point_df.loc[filt, "geometry"]
417
+ first_points: GeoSeries = point_df.loc[
418
+ lambda x: ~x.index.get_level_values(0).duplicated(keep=keep), "geometry"
419
+ ]
414
420
  is_last_point = point_df["next"].isna()
415
421
 
416
422
  point_df.loc[is_last_point, "next"] = first_points
@@ -419,7 +425,8 @@ def multipoints_to_line_segments(
419
425
  point_df = point_df[point_df["next"].notna()]
420
426
 
421
427
  point_df["geometry"] = [
422
- LineString([x1, x2]) for x1, x2 in zip(point_df["geometry"], point_df["next"])
428
+ LineString([x1, x2])
429
+ for x1, x2 in zip(point_df["geometry"], point_df["next"], strict=False)
423
430
  ]
424
431
  if isinstance(multipoints.index, pd.MultiIndex):
425
432
  point_df.index = point_df.index.droplevel(0)
@@ -431,7 +438,9 @@ def multipoints_to_line_segments(
431
438
  return GeoSeries(point_df["geometry"], crs=crs)
432
439
 
433
440
 
434
- def get_line_segments(lines, extract_unique: bool = False, cycle=False) -> GeoDataFrame:
441
+ def get_line_segments(
442
+ lines: GeoDataFrame | GeoSeries, extract_unique: bool = False, cycle=False
443
+ ) -> GeoDataFrame:
435
444
  try:
436
445
  assert lines.index.is_unique
437
446
  except AttributeError:
@@ -445,4 +454,4 @@ def get_line_segments(lines, extract_unique: bool = False, cycle=False) -> GeoDa
445
454
  coords, indices = shapely.get_coordinates(lines, return_index=True)
446
455
  points = GeoSeries(shapely.points(coords), index=indices)
447
456
 
448
- return multipoints_to_line_segments(points, cycle=cycle)
457
+ return _multipoints_to_line_segments(points, cycle=cycle)