ssb-sgis 1.3.6__py3-none-any.whl → 1.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -688,8 +688,7 @@ def _split_lines_by_points_along_line(lines, points, splitted_col: str | None =
688
688
  relevant_lines.geometry = shapely.force_2d(relevant_lines.geometry)
689
689
  points.geometry = shapely.force_2d(points.geometry)
690
690
 
691
- # split the lines with buffer + difference, since shaply.split usually doesn't work
692
- # relevant_lines["_idx"] = range(len(relevant_lines))
691
+ # split the lines with tiny buffer + difference, since shaply.split usually doesn't work
693
692
  splitted = relevant_lines.overlay(points_buff, how="difference").explode(
694
693
  ignore_index=True
695
694
  )
@@ -703,8 +702,9 @@ def _split_lines_by_points_along_line(lines, points, splitted_col: str | None =
703
702
  if not len(splitted):
704
703
  return pd.concat([the_other_lines, circles], ignore_index=True)
705
704
 
706
- # the endpoints of the new lines are now sligtly off. Using get_k_nearest_neighbors
707
- # to get the exact snapped point coordinates, . This will map the sligtly
705
+ # the endpoints of the new lines are now sligtly off because of the buffer.
706
+ # Using get_k_nearest_neighbors
707
+ # to get the exact snapped point coordinates. This will map the sligtly
708
708
  # wrong line endpoints with the point the line was split by.
709
709
 
710
710
  points["point_coords"] = [(geom.x, geom.y) for geom in points.geometry]
@@ -721,7 +721,6 @@ def _split_lines_by_points_along_line(lines, points, splitted_col: str | None =
721
721
  lambda x: x["distance"] <= precision * 2
722
722
  ]
723
723
 
724
- # points = points.set_index("point_coords")
725
724
  points.index = points.geometry
726
725
  dists_source = get_nearest(splitted_source, points)
727
726
  dists_target = get_nearest(splitted_target, points)
@@ -870,9 +869,9 @@ def make_edge_wkt_cols(gdf: GeoDataFrame) -> GeoDataFrame:
870
869
  except ValueError:
871
870
  gdf, endpoints = _prepare_make_edge_cols(gdf)
872
871
 
873
- endpoints = endpoints.force_2d()
874
- gdf["source_wkt"] = endpoints.groupby(level=0).first().to_wkt()
875
- gdf["target_wkt"] = endpoints.groupby(level=0).last().to_wkt()
872
+ endpoints = endpoints.force_2d().to_wkt()
873
+ gdf["source_wkt"] = endpoints.groupby(level=0).first()
874
+ gdf["target_wkt"] = endpoints.groupby(level=0).last()
876
875
 
877
876
  if index_mapper is not None:
878
877
  gdf.index = gdf.index.map(index_mapper)
@@ -239,9 +239,6 @@ class RTreeQueryRunner(AbstractRunner):
239
239
  left, right = results
240
240
  return left, right
241
241
  return results
242
- left = np.concatenate([x[0] for x in results])
243
- right = np.concatenate([x[1] for x in results])
244
- return left, right
245
242
  elif (
246
243
  (self.n_jobs or 1) > 1
247
244
  and len(arr2) / self.n_jobs > 10_000
@@ -264,9 +261,6 @@ class RTreeQueryRunner(AbstractRunner):
264
261
  left, right = results
265
262
  return left, right
266
263
  return results
267
- left = np.concatenate([x[0] for x in results])
268
- right = np.concatenate([x[1] for x in results])
269
- return left, right
270
264
 
271
265
  return _strtree_query(arr1, arr2, method=method, **kwargs)
272
266
 
@@ -121,7 +121,7 @@ def read_geopandas(
121
121
  ).replace("==", "=")
122
122
  glob_func = _get_glob_func(file_system)
123
123
  suffix: str = Path(gcs_path).suffix
124
- paths = glob_func(str(Path(gcs_path) / expression / f"*{suffix}"))
124
+ paths = glob_func(_standardize_path(gcs_path) + f"/{expression}/*{suffix}")
125
125
  if paths:
126
126
  return _read_geopandas_from_iterable(
127
127
  paths,
@@ -256,7 +256,7 @@ def _read_pyarrow(path: str, file_system, mask=None, **kwargs) -> pyarrow.Table
256
256
  if not len(
257
257
  {
258
258
  x
259
- for x in glob_func(str(Path(path) / "**"))
259
+ for x in glob_func(str(_standardize_path(path) + "/**"))
260
260
  if not paths_are_equal(path, x)
261
261
  }
262
262
  ):
@@ -618,7 +618,7 @@ def _write_partitioned_geoparquet(
618
618
  as_partition_part(col, value)
619
619
  for col, value in zip(partition_cols, group, strict=True)
620
620
  )
621
- paths.append(Path(path) / partition_parts)
621
+ paths.append(_standardize_path(path) + f"/{partition_parts}")
622
622
  dfs.append(rows)
623
623
 
624
624
  def threaded_write(rows: DataFrame, path: str) -> None:
@@ -626,7 +626,9 @@ def _write_partitioned_geoparquet(
626
626
  this_basename = (uuid.uuid4().hex + "-{i}.parquet").replace("-{i}", "0")
627
627
  else:
628
628
  this_basename = basename_template.replace("-{i}", "0")
629
- for i, sibling_path in enumerate(sorted(glob_func(str(Path(path) / "**")))):
629
+ for i, sibling_path in enumerate(
630
+ sorted(glob_func(str(_standardize_path(path) + "/**")))
631
+ ):
630
632
  if paths_are_equal(sibling_path, path):
631
633
  continue
632
634
  if existing_data_behavior == "delete_matching":
@@ -638,7 +640,7 @@ def _write_partitioned_geoparquet(
638
640
  else:
639
641
  this_basename = basename_template.replace("-{i}", str(i + 1))
640
642
 
641
- out_path = str(Path(path) / this_basename)
643
+ out_path = str(_standardize_path(path) + "/" + this_basename)
642
644
  try:
643
645
  with file_system.open(out_path, mode="wb") as file:
644
646
  write_func(rows, file, schema=schema, **kwargs)
@@ -780,7 +782,7 @@ def _read_partitioned_parquet(
780
782
  glob_func = _get_glob_func(file_system)
781
783
 
782
784
  if child_paths is None:
783
- child_paths = list(glob_func(str(Path(path) / "**/*.parquet")))
785
+ child_paths = list(glob_func(str(_standardize_path(path) + "/**/*.parquet")))
784
786
 
785
787
  filters = _filters_to_expression(filters)
786
788
 
@@ -830,7 +832,7 @@ def get_child_paths(path, file_system) -> list[str]:
830
832
  glob_func = _get_glob_func(file_system)
831
833
  return [
832
834
  x
833
- for x in glob_func(str(Path(path) / "**/*.parquet"))
835
+ for x in glob_func(str(_standardize_path(path) + "/**/*.parquet"))
834
836
  if not paths_are_equal(x, path)
835
837
  ]
836
838
 
@@ -938,3 +940,8 @@ def _maybe_strip_prefix(path, file_system):
938
940
  if isinstance(file_system, GCSFileSystem) and path.startswith("gs://"):
939
941
  return path.replace("gs://", "")
940
942
  return path
943
+
944
+
945
+ def _standardize_path(path: str | Path) -> str:
946
+ """Make sure delimiter is '/' and path ends without '/'."""
947
+ return str(path).replace("\\", "/").replace(r"\"", "/")
@@ -26,13 +26,11 @@ def _od_cost_matrix(
26
26
  # calculating all-to-all distances is much faster than looping rowwise,
27
27
  # so filtering to rowwise afterwards instead
28
28
  if rowwise:
29
- rowwise_df = DataFrame(
30
- {
31
- "origin": origins.index,
32
- "destination": destinations.index,
33
- }
29
+ keys = pd.MultiIndex.from_arrays(
30
+ [origins.index, destinations.index],
31
+ names=["origin", "destination"],
34
32
  )
35
- results = rowwise_df.merge(results, on=["origin", "destination"], how="left")
33
+ results = results.set_index(["origin", "destination"]).loc[keys].reset_index()
36
34
 
37
35
  results["geom_ori"] = results["origin"].map(origins.geometry)
38
36
  results["geom_des"] = results["destination"].map(destinations.geometry)
@@ -16,10 +16,7 @@ opposite directions.
16
16
 
17
17
 
18
18
  class Points:
19
- def __init__(
20
- self,
21
- points: GeoDataFrame,
22
- ) -> None:
19
+ def __init__(self, points: GeoDataFrame) -> None:
23
20
  self.gdf = points.copy()
24
21
 
25
22
  def _make_temp_idx(self, start: int) -> None:
@@ -79,6 +79,7 @@ def close_network_holes(
79
79
  gdf: GeoDataFrame,
80
80
  max_distance: int | float,
81
81
  max_angle: int,
82
+ *,
82
83
  hole_col: str | None = "hole",
83
84
  ) -> GeoDataFrame:
84
85
  """Fills network gaps with straigt lines.
@@ -282,11 +283,13 @@ def _close_holes_all_lines(
282
283
  ) -> GeoSeries:
283
284
  k = min(len(nodes), 50)
284
285
 
286
+ n_dict = nodes.set_index("wkt")["n"]
287
+
285
288
  # make points for the deadends and the other endpoint of the deadend lines
286
- deadends_target = lines.loc[lines["n_target"] == 1].rename(
289
+ deadends_target = lines.loc[lines["target_wkt"].map(n_dict) == 1].rename(
287
290
  columns={"target_wkt": "wkt", "source_wkt": "wkt_other_end"}
288
291
  )
289
- deadends_source = lines.loc[lines["n_source"] == 1].rename(
292
+ deadends_source = lines.loc[lines["source_wkt"].map(n_dict) == 1].rename(
290
293
  columns={"source_wkt": "wkt", "target_wkt": "wkt_other_end"}
291
294
  )
292
295
  deadends = pd.concat([deadends_source, deadends_target], ignore_index=True)
@@ -349,12 +352,6 @@ def _close_holes_all_lines(
349
352
  to_idx = indices[condition]
350
353
  to_wkt = nodes.iloc[to_idx]["wkt"]
351
354
 
352
- # all_angles = all_angles + [
353
- # diff
354
- # for f, diff in zip(from_wkt, angles_difference[condition], strict=True)
355
- # if f not in new_sources
356
- # ]
357
-
358
355
  # now add the wkts to the lists of new sources and targets. If the source
359
356
  # is already added, the new wks will not be added again
360
357
  new_targets = new_targets + [
@@ -57,9 +57,7 @@ def get_connected_components(gdf: GeoDataFrame) -> GeoDataFrame:
57
57
 
58
58
  gdf["connected"] = gdf.source.map(largest_component_dict).fillna(0)
59
59
 
60
- gdf = gdf.drop(
61
- ["source_wkt", "target_wkt", "source", "target", "n_source", "n_target"], axis=1
62
- )
60
+ gdf = gdf.drop(["source_wkt", "target_wkt", "source", "target"], axis=1)
63
61
 
64
62
  return gdf
65
63
 
@@ -120,8 +118,6 @@ def get_component_size(gdf: GeoDataFrame) -> GeoDataFrame:
120
118
  gdf["component_index"] = gdf["source"].map(mapper["component_index"])
121
119
  gdf["component_size"] = gdf["source"].map(mapper["component_size"])
122
120
 
123
- gdf = gdf.drop(
124
- ["source_wkt", "target_wkt", "source", "target", "n_source", "n_target"], axis=1
125
- )
121
+ gdf = gdf.drop(["source_wkt", "target_wkt", "source", "target"], axis=1)
126
122
 
127
123
  return gdf
@@ -36,14 +36,12 @@ class Network:
36
36
  raise TypeError(f"'lines' should be GeoDataFrame, got {type(gdf)}")
37
37
 
38
38
  if not len(gdf):
39
- raise ZeroLinesError
39
+ raise ZeroLinesError()
40
40
 
41
41
  self.gdf = self._prepare_network(gdf)
42
42
 
43
43
  self._make_node_ids()
44
44
 
45
- self._percent_bidirectional = self._check_percent_bidirectional()
46
-
47
45
  def _make_node_ids(self) -> None:
48
46
  """Gives the lines node ids and return lines (edges) and nodes.
49
47
 
@@ -55,6 +53,7 @@ class Network:
55
53
  The lines must be singlepart linestrings.
56
54
  """
57
55
  self.gdf, self._nodes = make_node_ids(self.gdf)
56
+ self._percent_bidirectional = self._check_percent_bidirectional()
58
57
 
59
58
  @staticmethod
60
59
  def _prepare_network(gdf: GeoDataFrame) -> GeoDataFrame:
@@ -138,6 +137,8 @@ class Network:
138
137
  or any superfluous node-ids (meaning rows have been removed from the lines
139
138
  gdf).
140
139
  """
140
+ if not hasattr(self, "_nodes"):
141
+ return False
141
142
  new_or_missing = (~self.gdf.source.isin(self._nodes.node_id)) | (
142
143
  ~self.gdf.target.isin(self._nodes.node_id)
143
144
  )
@@ -20,6 +20,7 @@ from pandas import MultiIndex
20
20
  from shapely import force_2d
21
21
 
22
22
  from ..geopandas_tools.general import _push_geom_col
23
+ from ..geopandas_tools.sfilter import sfilter_inverse
23
24
  from ._get_route import _get_k_routes
24
25
  from ._get_route import _get_route
25
26
  from ._get_route import _get_route_frequencies
@@ -30,6 +31,8 @@ from ._service_area import _service_area
30
31
  from .cutting_lines import split_lines_by_nearest_point
31
32
  from .network import Network
32
33
  from .networkanalysisrules import NetworkAnalysisRules
34
+ from .nodes import _map_node_ids_from_wkt
35
+ from .nodes import make_node_ids
33
36
 
34
37
 
35
38
  class NetworkAnalysis:
@@ -1372,15 +1375,14 @@ class NetworkAnalysis:
1372
1375
 
1373
1376
  self.origins = Origins(origins)
1374
1377
  self.origins._make_temp_idx(
1375
- start=max(self.network.nodes.node_id.astype(int)) + 1
1378
+ start=max(self.network.nodes["node_id"].astype(int)) + 1
1376
1379
  )
1377
1380
 
1378
1381
  if destinations is not None:
1379
1382
  self.destinations = Destinations(destinations)
1380
1383
  self.destinations._make_temp_idx(
1381
- start=max(self.origins.gdf.temp_idx.astype(int)) + 1
1384
+ start=max(self.origins.gdf["temp_idx"].astype(int)) + 1
1382
1385
  )
1383
-
1384
1386
  else:
1385
1387
  self.destinations = None
1386
1388
 
@@ -1411,7 +1413,6 @@ class NetworkAnalysis:
1411
1413
  """
1412
1414
  if self.rules.split_lines:
1413
1415
  self._split_lines()
1414
- self.network._make_node_ids()
1415
1416
  self.origins._make_temp_idx(
1416
1417
  start=max(self.network.nodes.node_id.astype(int)) + 1
1417
1418
  )
@@ -1426,6 +1427,7 @@ class NetworkAnalysis:
1426
1427
 
1427
1428
  self.network.gdf["src_tgt_wt"] = self.network._create_edge_ids(edges, weights)
1428
1429
 
1430
+ # add edges between origins+destinations to the network nodes
1429
1431
  edges_start, weights_start = self.origins._get_edges_and_weights(
1430
1432
  nodes=self.network.nodes,
1431
1433
  rules=self.rules,
@@ -1482,7 +1484,7 @@ class NetworkAnalysis:
1482
1484
 
1483
1485
  points = points.drop_duplicates(points.geometry.name)
1484
1486
 
1485
- self.network.gdf["meters_"] = self.network.gdf.length
1487
+ self.network.gdf["_meters2"] = self.network.gdf.length
1486
1488
 
1487
1489
  # create an id from before the split, used to revert the split later
1488
1490
  self.network.gdf["temp_idx__"] = range(len(self.network.gdf))
@@ -1495,25 +1497,53 @@ class NetworkAnalysis:
1495
1497
  )
1496
1498
 
1497
1499
  # save the unsplit lines for later
1498
- splitted = lines.loc[lines["splitted"] == 1, "temp_idx__"]
1500
+ splitted = lines.loc[lines["splitted"] == 1]
1499
1501
  self.network._not_splitted = self.network.gdf.loc[
1500
- self.network.gdf["temp_idx__"].isin(splitted)
1502
+ lambda x: x["temp_idx__"].isin(splitted["temp_idx__"])
1501
1503
  ]
1502
1504
 
1505
+ new_lines, new_nodes = make_node_ids(splitted)
1506
+ new_nodes = sfilter_inverse(new_nodes, self.network.nodes.buffer(1e-5))
1507
+ new_nodes["node_id"] = (
1508
+ new_nodes["node_id"].astype(int) + len(self.network.nodes) + 1
1509
+ ).astype(str)
1510
+ self.network._new_node_ids = list(new_nodes["node_id"])
1511
+
1503
1512
  # adjust weight to new length
1504
- lines[self.rules.weight] = lines[self.rules.weight] * (
1505
- lines.length / lines["meters_"]
1513
+ new_lines[self.rules.weight] = new_lines[self.rules.weight] * (
1514
+ new_lines.length / new_lines["_meters2"]
1515
+ )
1516
+ self.network._nodes = pd.concat(
1517
+ [self.network._nodes, new_nodes],
1518
+ ignore_index=True,
1519
+ )
1520
+
1521
+ lines = pd.concat(
1522
+ [
1523
+ self.network.gdf.loc[
1524
+ lambda x: ~x["temp_idx__"].isin(splitted["temp_idx__"])
1525
+ ],
1526
+ new_lines,
1527
+ ],
1528
+ ignore_index=True,
1506
1529
  )
1507
1530
 
1531
+ lines = _map_node_ids_from_wkt(lines, self.network._nodes)
1532
+
1508
1533
  self.network.gdf = lines
1509
1534
 
1510
1535
  def _unsplit_network(self):
1511
1536
  """Remove the splitted lines and add the unsplitted ones."""
1537
+ if not hasattr(self.network, "_not_splitted"):
1538
+ return
1512
1539
  lines = self.network.gdf.loc[self.network.gdf["splitted"] != 1]
1513
1540
  self.network.gdf = pd.concat(
1514
1541
  [lines, self.network._not_splitted], ignore_index=True
1515
1542
  ).drop("temp_idx__", axis=1)
1516
- del self.network._not_splitted
1543
+ self.network._nodes = self.network._nodes[
1544
+ lambda x: ~x["node_id"].isin(self.network._new_node_ids)
1545
+ ]
1546
+ del self.network._not_splitted, self.network._new_node_ids
1517
1547
 
1518
1548
  @staticmethod
1519
1549
  def _make_graph(
@@ -1557,7 +1587,7 @@ class NetworkAnalysis:
1557
1587
  for points in ["origins", "destinations"]:
1558
1588
  if self[points] is None:
1559
1589
  continue
1560
- if points not in self.wkts:
1590
+ if not hasattr(self, points) or self[points] is None:
1561
1591
  return False
1562
1592
  if self._points_have_changed(self[points].gdf, what=points):
1563
1593
  return False
@@ -1587,8 +1617,6 @@ class NetworkAnalysis:
1587
1617
  """
1588
1618
  self.wkts = {}
1589
1619
 
1590
- self.wkts["network"] = self.network.gdf.geometry.to_wkt().values
1591
-
1592
1620
  if not hasattr(self, "origins"):
1593
1621
  return
1594
1622
 
@@ -261,10 +261,10 @@ class NetworkAnalysisRules:
261
261
  @staticmethod
262
262
  def _check_for_nans(df: GeoDataFrame, col: str) -> None:
263
263
  """Remove NaNs and give warning if there are any."""
264
- if all(df[col].isna()):
264
+ if df[col].isna().all():
265
265
  raise ValueError(f"All values in the {col!r} column are NaN.")
266
266
 
267
- nans = sum(df[col].isna())
267
+ nans = df[col].isna().sum()
268
268
  if nans:
269
269
  raise ValueError(
270
270
  f"{nans} rows have missing values in the {col!r} column. "
@@ -274,7 +274,7 @@ class NetworkAnalysisRules:
274
274
  @staticmethod
275
275
  def _check_for_negative_values(df: GeoDataFrame, col: str) -> None:
276
276
  """Remove negative values and give warning if there are any."""
277
- negative = sum(df[col] < 0)
277
+ negative = (df[col] < 0).sum()
278
278
  if negative:
279
279
  raise ValueError(
280
280
  f"{negative} negative values found in the {col!r} column. Fill these "
@@ -47,16 +47,14 @@ def make_node_ids(
47
47
  gdf = make_edge_coords_cols(gdf)
48
48
  geomcol1, geomcol2, geomcol_final = "source_coords", "target_coords", "coords"
49
49
 
50
- # remove identical lines in opposite directions
50
+ # remove identical lines in opposite directions in order to get n==1 for deadends
51
51
  gdf["meters_"] = gdf.length.astype(str)
52
-
53
52
  sources = gdf[[geomcol1, geomcol2, "meters_"]].rename(
54
53
  columns={geomcol1: geomcol_final, geomcol2: "temp"}
55
54
  )
56
55
  targets = gdf[[geomcol1, geomcol2, "meters_"]].rename(
57
56
  columns={geomcol2: geomcol_final, geomcol1: "temp"}
58
57
  )
59
-
60
58
  nodes = (
61
59
  pd.concat([sources, targets], axis=0, ignore_index=True)
62
60
  .drop_duplicates([geomcol_final, "temp", "meters_"])
@@ -66,22 +64,11 @@ def make_node_ids(
66
64
  gdf = gdf.drop("meters_", axis=1)
67
65
 
68
66
  nodes["n"] = nodes.assign(n=1).groupby(geomcol_final)["n"].transform("sum")
69
-
70
67
  nodes = nodes.drop_duplicates(subset=[geomcol_final]).reset_index(drop=True)
71
-
72
68
  nodes["node_id"] = nodes.index
73
69
  nodes["node_id"] = nodes["node_id"].astype(str)
74
70
 
75
- id_dict = {
76
- geom: node_id
77
- for geom, node_id in zip(nodes[geomcol_final], nodes["node_id"], strict=True)
78
- }
79
- gdf["source"] = gdf[geomcol1].map(id_dict)
80
- gdf["target"] = gdf[geomcol2].map(id_dict)
81
-
82
- n_dict = {geom: n for geom, n in zip(nodes[geomcol_final], nodes["n"], strict=True)}
83
- gdf["n_source"] = gdf[geomcol1].map(n_dict)
84
- gdf["n_target"] = gdf[geomcol2].map(n_dict)
71
+ gdf = _map_node_ids_from_wkt(gdf, nodes, wkt=wkt)
85
72
 
86
73
  if wkt:
87
74
  nodes["geometry"] = gpd.GeoSeries.from_wkt(nodes[geomcol_final], crs=gdf.crs)
@@ -95,3 +82,17 @@ def make_node_ids(
95
82
  gdf = _push_geom_col(gdf)
96
83
 
97
84
  return gdf, nodes
85
+
86
+
87
+ def _map_node_ids_from_wkt(lines, nodes, wkt: bool = True) -> GeoDataFrame:
88
+ if wkt:
89
+ geomcol1, geomcol2, geomcol_final = "source_wkt", "target_wkt", "wkt"
90
+ else:
91
+ geomcol1, geomcol2, geomcol_final = "source_coords", "target_coords", "coords"
92
+ id_dict = {
93
+ geom: node_id
94
+ for geom, node_id in zip(nodes[geomcol_final], nodes["node_id"], strict=True)
95
+ }
96
+ lines["source"] = lines[geomcol1].map(id_dict)
97
+ lines["target"] = lines[geomcol2].map(id_dict)
98
+ return lines
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ssb-sgis
3
- Version: 1.3.6
3
+ Version: 1.3.8
4
4
  Summary: GIS functions used at Statistics Norway.
5
5
  Home-page: https://github.com/statisticsnorway/ssb-sgis
6
6
  License: MIT
@@ -9,7 +9,7 @@ sgis/geopandas_tools/centerlines.py,sha256=Q65Sx01SeAlulBEd9oaZkB2maBBNdLcJwAbTI
9
9
  sgis/geopandas_tools/cleaning.py,sha256=fST0xFztmyn-QUOAfvjZmu7aO_zPiolWK7gd7TR6ffI,24393
10
10
  sgis/geopandas_tools/conversion.py,sha256=viZz9t1Gi-8nJ9GwZlCVvObXO52VTODkTZ_F3b0gLek,24183
11
11
  sgis/geopandas_tools/duplicates.py,sha256=AU8J2X3sUuohRDsQOc9TA5K0k2KHDlGLbvb6eMSQvvc,15893
12
- sgis/geopandas_tools/general.py,sha256=B4houRK2zDrlarhihQs4w6ITfh6UZHhUciB120bbniI,44447
12
+ sgis/geopandas_tools/general.py,sha256=W8R0_EXGZXDnTl36iGVtF3YTe_BvdrR2Jzy1TZo5o9A,44363
13
13
  sgis/geopandas_tools/geocoding.py,sha256=sZjUW52ULhQWDLmU51C9_itBePkDuWkp8swvYaiYmJk,679
14
14
  sgis/geopandas_tools/geometry_types.py,sha256=ijQDbQaZPqPGjBl707H4yooNXpk21RXyatI7itnvqLk,7603
15
15
  sgis/geopandas_tools/neighbors.py,sha256=VZGOwwC3-C6KpwLQ3j0K5cOVInmckxIXoGMqPGkemk4,17606
@@ -17,13 +17,13 @@ sgis/geopandas_tools/overlay.py,sha256=uR9troLM9amo-z5Bs6jhPtFbr0RrDFsW-WJpeuQ4W
17
17
  sgis/geopandas_tools/point_operations.py,sha256=R_39MoMIMsTB0mxmWibZxfUoUZUriuOoxjMNC8mFCds,7624
18
18
  sgis/geopandas_tools/polygon_operations.py,sha256=v-B9IgbFfm4dVHKPyzvmnNiqVCdtl9ddpCsQpZZ-9sU,49284
19
19
  sgis/geopandas_tools/polygons_as_rings.py,sha256=BX_GZS6F9I4NbEpiOlNBd7zywJjdfdJVi_MkeONBuiM,14941
20
- sgis/geopandas_tools/runners.py,sha256=ZVJnUoJAgc0K_UNIIBOwUWaQDCKCzebygGMu2Fdw6LA,13470
20
+ sgis/geopandas_tools/runners.py,sha256=m2AgT1GTSACPzGeP0cuaItnpN-77bpTPcXHwmaOqNNQ,13170
21
21
  sgis/geopandas_tools/sfilter.py,sha256=CZ_-c4t1CQCwJ7RHCKo1Na9u-aAg18xXnJAMiUqoaj8,10411
22
22
  sgis/geopandas_tools/utils.py,sha256=X0pRvB1tWgV_0BCrRS1HU9LtLGnZCpvVPxyqM9JGb0Y,1415
23
23
  sgis/helpers.py,sha256=4N6vFWQ3TYVzRHNcWY_fNa_GkFuaZB3vtCkkFND-qs0,9628
24
24
  sgis/io/__init__.py,sha256=uyBr20YDqB2bQttrd5q1JuGOvX32A-MSvS7Wmw5f5qg,177
25
25
  sgis/io/_is_dapla.py,sha256=wmfkSe98IrLhUg3dtXZusV6OVC8VlY1kbc5EQDf3P-Q,358
26
- sgis/io/dapla_functions.py,sha256=EmliBKnGm8XELY450BMTYfRtYw2WRMR2SBUxhYP5huw,31896
26
+ sgis/io/dapla_functions.py,sha256=_arLbkfdC18hUi5chNdKyE6JIEPCEBeVjjblBjLnLx8,32195
27
27
  sgis/io/opener.py,sha256=3D65XF0dHLigZfzB5uAG-3T_VXVu4wJXZUMzAkqHypc,844
28
28
  sgis/io/read_parquet.py,sha256=FvZYv1rLkUlrSaUY6QW6E1yntmntTeQuZ9ZRgCDO4IM,3776
29
29
  sgis/maps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -39,17 +39,17 @@ sgis/maps/tilesources.py,sha256=F4mFHxPwkiPJdVKzNkScTX6xbJAMIUtlTq4mQ83oguw,1746
39
39
  sgis/maps/wms.py,sha256=sCVpKxH1Rsd14GECW7BFh8yaWngpVWYvw9Yhuez1yW8,12482
40
40
  sgis/networkanalysis/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  sgis/networkanalysis/_get_route.py,sha256=3m5xQdQqGtt51vcI5fcmYQAOQCeBtL6sorDoPxBNf84,7818
42
- sgis/networkanalysis/_od_cost_matrix.py,sha256=u2UtgZ0GvvCE1PNvKpB3lM35RCcFKXqtC3NLYWX_jmA,2348
43
- sgis/networkanalysis/_points.py,sha256=128QEepBGAyI5XakMUVvNyOnJE0Ts7hvGiVUt1YTXiU,4439
42
+ sgis/networkanalysis/_od_cost_matrix.py,sha256=DZDQGZN_OGkYC35fpS_WoXoZuQ3zoZnmtArXl5eGc70,2333
43
+ sgis/networkanalysis/_points.py,sha256=iG5whhXAo5NcjLNaVgjvNfl6oSm9SOoVmUeMPB3uNWQ,4416
44
44
  sgis/networkanalysis/_service_area.py,sha256=-xSdiZrZ3uqAhd4ZaBY8a8fMJjN09MPoYxlVMXm3UZk,5642
45
- sgis/networkanalysis/closing_network_holes.py,sha256=FYZ677nRwLmDkP6bQ1ssQ_h29RzAG463U4xmbu5ksfg,14572
45
+ sgis/networkanalysis/closing_network_holes.py,sha256=7mQmqvcx4kpu0WGlGS8PH4j4dDFsm19FOm-z0rYD2nk,14455
46
46
  sgis/networkanalysis/cutting_lines.py,sha256=ZQAt0cufaPeNAEqUzp-imu26AIL9S5-lw6Xifa8RoWk,9818
47
47
  sgis/networkanalysis/directednetwork.py,sha256=Mrc2zHip4P5RNxnyffKm-xU832AVQeSHz-YZueAc0pM,11413
48
- sgis/networkanalysis/finding_isolated_networks.py,sha256=Wg4ILhm7uS9RLOGcL0WN8uZBMJYjdljJc8L5DU5nIPY,3754
49
- sgis/networkanalysis/network.py,sha256=zV9bAbVdTgTohg2o2RFGy2uhOJrd3Ma57hwIAStxMAQ,7847
50
- sgis/networkanalysis/networkanalysis.py,sha256=5LFCapexrkO97ZyAV2-0lwEPHfVQrdQ4Q1A2eRQBOVo,68498
51
- sgis/networkanalysis/networkanalysisrules.py,sha256=na26iZTpnlwwIEEGswoqMH4W1ZOLN2GIvACZbxn8GPw,12958
52
- sgis/networkanalysis/nodes.py,sha256=atFSpqz-_uJHMrf6MC0zhrrcWIydRMFZrsaHC2xr1GU,3374
48
+ sgis/networkanalysis/finding_isolated_networks.py,sha256=FDel7zF03eBpicwZnfK2-e9IXYaZ4KVleaTnUvgW-4k,3678
49
+ sgis/networkanalysis/network.py,sha256=13yoArGOX1J_nCLTxh7tQjDTxj545RDYFYc_11m3FuM,7913
50
+ sgis/networkanalysis/networkanalysis.py,sha256=gy69ym4mBkQJAq6U27mB9Vkvq71ZuOI1Fr0deggG0oY,69641
51
+ sgis/networkanalysis/networkanalysisrules.py,sha256=k1f-DNEmJtiP_TkCCDclsnRjGTkBqAkj-MYQMUAt_Yc,12963
52
+ sgis/networkanalysis/nodes.py,sha256=h7j3vCFdTsuiVEkmB26ebe44HiFLKOlQhghJ5NV0cU4,3562
53
53
  sgis/networkanalysis/traveling_salesman.py,sha256=Jjo6bHY4KJ-eK0LycyTy0sWxZjgITs5MBllZ_G9FhTE,5655
54
54
  sgis/parallel/__init__.py,sha256=fw_Fl3IJk1bKzrRBhZIoOpznJqwd09NVHJJFj2ZLeIU,32
55
55
  sgis/parallel/parallel.py,sha256=3Nq7cgvkVmg-2TEyNCEiqBshYOHt6Qf6xg6rrcnzf78,39394
@@ -61,7 +61,7 @@ sgis/raster/indices.py,sha256=efJmgfPg_VuSzXFosXV661IendF8CwPFWtMhyP4TMUg,222
61
61
  sgis/raster/regex.py,sha256=4idTJ9vFtsGtbxcjJrx2VrpJJuDMP3bLdqF93Vc_cmY,3752
62
62
  sgis/raster/sentinel_config.py,sha256=nySDqn2R8M6W8jguoBeSAK_zzbAsqmaI59i32446FwY,1268
63
63
  sgis/raster/zonal.py,sha256=D4Gyptw-yOLTCO41peIuYbY-DANsJCG19xXDlf1QAz4,2299
64
- ssb_sgis-1.3.6.dist-info/LICENSE,sha256=np3IfD5m0ZUofn_kVzDZqliozuiO6wrktw3LRPjyEiI,1073
65
- ssb_sgis-1.3.6.dist-info/METADATA,sha256=M-4XAxb5WFhaHhbzq4IIb2oE0Ww8gNhbJSqFKjoPnSs,11495
66
- ssb_sgis-1.3.6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
67
- ssb_sgis-1.3.6.dist-info/RECORD,,
64
+ ssb_sgis-1.3.8.dist-info/LICENSE,sha256=np3IfD5m0ZUofn_kVzDZqliozuiO6wrktw3LRPjyEiI,1073
65
+ ssb_sgis-1.3.8.dist-info/METADATA,sha256=qWEIyFwB_LmGabBeT9zQUq34Ne7FWXzsYoRQL_RgOLE,11495
66
+ ssb_sgis-1.3.8.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
67
+ ssb_sgis-1.3.8.dist-info/RECORD,,