ssb-sgis 0.3.13__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,6 +11,7 @@ version of the solution from GH 2792.
11
11
  import functools
12
12
 
13
13
  import dask
14
+ import dask.array as da
14
15
  import geopandas as gpd
15
16
  import joblib
16
17
  import numpy as np
@@ -49,6 +50,7 @@ def clean_overlay(
49
50
  how: str = "intersection",
50
51
  keep_geom_type: bool | None = None,
51
52
  geom_type: str | None = None,
53
+ predicate: str | None = "intersects",
52
54
  grid_size: float | None = None,
53
55
  n_jobs: int = 1,
54
56
  lsuffix: str = DEFAULT_LSUFFIX,
@@ -158,6 +160,7 @@ def clean_overlay(
158
160
  rsuffix=rsuffix,
159
161
  geom_type=geom_type,
160
162
  n_jobs=n_jobs,
163
+ predicate=predicate,
161
164
  ),
162
165
  geometry="geometry",
163
166
  crs=crs,
@@ -222,6 +225,7 @@ def _shapely_pd_overlay(
222
225
  df2: DataFrame,
223
226
  how: str,
224
227
  grid_size: float = DEFAULT_GRID_SIZE,
228
+ predicate: str = "intersects",
225
229
  lsuffix=DEFAULT_LSUFFIX,
226
230
  rsuffix=DEFAULT_RSUFFIX,
227
231
  geom_type=None,
@@ -231,7 +235,7 @@ def _shapely_pd_overlay(
231
235
  return _no_intersections_return(df1, df2, how, lsuffix, rsuffix)
232
236
 
233
237
  tree = STRtree(df2.geometry.values)
234
- left, right = tree.query(df1.geometry.values, predicate="intersects")
238
+ left, right = tree.query(df1.geometry.values, predicate=predicate)
235
239
 
236
240
  pairs = _get_intersects_pairs(df1, df2, left, right, rsuffix)
237
241
  assert pairs.geometry.notna().all()
@@ -326,12 +330,25 @@ def _run_overlay_dask(arr1, arr2, func, n_jobs, grid_size):
326
330
  return func(arr1, arr2, grid_size=grid_size)
327
331
  except TypeError as e:
328
332
  raise TypeError(e, {type(x) for x in arr1}, {type(x) for x in arr2})
329
- arr1 = dask.array.from_array(arr1, chunks=len(arr1) // n_jobs)
330
- arr2 = dask.array.from_array(arr2, chunks=len(arr2) // n_jobs)
333
+ arr1 = da.from_array(arr1, chunks=len(arr1) // n_jobs)
334
+ arr2 = da.from_array(arr2, chunks=len(arr2) // n_jobs)
331
335
  res = arr1.map_blocks(func, arr2, grid_size=grid_size, dtype=float)
332
336
  return res.compute(scheduler="threads", optimize_graph=False, num_workers=n_jobs)
333
337
 
334
338
 
339
+ def _run_overlay_joblib_threading(arr1, arr2, func, n_jobs, grid_size):
340
+ if len(arr1) // n_jobs <= 1:
341
+ try:
342
+ return func(arr1, arr2, grid_size=grid_size)
343
+ except TypeError as e:
344
+ raise TypeError(e, {type(x) for x in arr1}, {type(x) for x in arr2})
345
+ with joblib.Parallel(n_jobs=n_jobs, backend="threading") as parallel:
346
+ return parallel(
347
+ joblib.delayed(func)(g1, g2, grid_size=grid_size)
348
+ for g1, g2 in zip(arr1, arr2, strict=True)
349
+ )
350
+
351
+
335
352
  def _intersection(pairs, grid_size, geom_type, n_jobs=1) -> GeoDataFrame:
336
353
  if not len(pairs):
337
354
  return pairs.drop(columns="geom_right")
@@ -342,10 +359,8 @@ def _intersection(pairs, grid_size, geom_type, n_jobs=1) -> GeoDataFrame:
342
359
  arr2 = intersections["geom_right"].to_numpy()
343
360
 
344
361
  if n_jobs > 1 and len(arr1) / n_jobs > 10:
345
- # dask_arr1 = dask.array.from_array(arr1, chunks=int(len(arr1) / n_jobs))
346
- # dask_arr2 = dask.array.from_array(arr2, chunks=int(len(arr2) / n_jobs))
347
362
  try:
348
- res = _run_overlay_dask(
363
+ res = _run_overlay_joblib_threading(
349
364
  arr1,
350
365
  arr2,
351
366
  func=intersection,
@@ -359,12 +374,12 @@ def _intersection(pairs, grid_size, geom_type, n_jobs=1) -> GeoDataFrame:
359
374
  arr2 = make_valid_and_keep_geom_type(
360
375
  arr2, geom_type=geom_type, n_jobs=n_jobs
361
376
  )
362
- # dask_arr1 = dask.array.from_array(arr1, chunks=int(len(arr1) / n_jobs))
363
- # dask_arr2 = dask.array.from_array(arr2, chunks=int(len(arr2) / n_jobs))
377
+ arr1 = arr1.loc[lambda x: x.index.isin(arr2.index)]
378
+ arr2 = arr2.loc[lambda x: x.index.isin(arr1.index)]
364
379
 
365
- res = _run_overlay_dask(
366
- arr1,
367
- arr2,
380
+ res = _run_overlay_joblib_threading(
381
+ arr1.to_numpy(),
382
+ arr2.to_numpy(),
368
383
  func=intersection,
369
384
  n_jobs=n_jobs,
370
385
  grid_size=grid_size,
@@ -379,18 +394,17 @@ def _intersection(pairs, grid_size, geom_type, n_jobs=1) -> GeoDataFrame:
379
394
  grid_size=grid_size,
380
395
  )
381
396
  except GEOSException:
397
+ left = make_valid_and_keep_geom_type(
398
+ intersections["geometry"].to_numpy(), geom_type, n_jobs=n_jobs
399
+ )
400
+ right = make_valid_and_keep_geom_type(
401
+ intersections["geom_right"].to_numpy(), geom_type, n_jobs=n_jobs
402
+ )
403
+ left = left.loc[lambda x: x.index.isin(right.index)]
404
+ right = right.loc[lambda x: x.index.isin(left.index)]
405
+
382
406
  intersections["geometry"] = intersection(
383
- make_valid_and_keep_geom_type(
384
- intersections["geometry"].to_numpy(),
385
- geom_type=geom_type,
386
- n_jobs=n_jobs,
387
- ),
388
- make_valid_and_keep_geom_type(
389
- intersections["geom_right"].to_numpy(),
390
- geom_type=geom_type,
391
- n_jobs=n_jobs,
392
- ),
393
- grid_size=grid_size,
407
+ left.to_numpy(), right.to_numpy(), grid_size=grid_size
394
408
  )
395
409
 
396
410
  return intersections.drop(columns="geom_right")
@@ -531,22 +545,70 @@ def _shapely_diffclip_left(pairs, df1, grid_size, geom_type, n_jobs):
531
545
  """Aggregate areas in right by unique values of left, then use those to clip
532
546
  areas out of left"""
533
547
 
534
- aggfuncs = {
535
- c: "first"
536
- for c in df1.columns
537
- if c not in ["_overlay_index_right", "geom_right"]
538
- }
548
+ keep_cols = list(df1.columns.difference({"_overlay_index_right"})) + ["geom_right"]
539
549
 
540
- # if n_jobs == 1:
541
550
  agg_geoms_partial = functools.partial(agg_geoms, grid_size=grid_size)
542
- aggfuncs |= {"geom_right": agg_geoms_partial}
543
551
 
544
- clip_left = pairs.groupby(level=0).agg(aggfuncs)
552
+ try:
553
+ only_one = pairs.groupby(level=0).transform("size") == 1
554
+ one_hit = pairs.loc[only_one, list(keep_cols)]
555
+ many_hits = pairs.loc[~only_one, list(keep_cols) + ["_overlay_index_right"]]
556
+ # keep first in non-geom-cols, agg only geom col bacause of speed
557
+ many_hits_agged = many_hits.loc[
558
+ lambda x: ~x.index.duplicated(),
559
+ lambda x: x.columns.difference({"geom_right"}),
560
+ ]
561
+
562
+ index_mapper = {
563
+ i: x
564
+ for i, x in many_hits.groupby(level=0)["_overlay_index_right"]
565
+ .unique()
566
+ .apply(lambda j: tuple(sorted(j)))
567
+ .items()
568
+ }
545
569
 
546
- # if n_jobs > 1:
547
- # clip_left["geom_right"] = parallel_unary_union(
548
- # pairs, level=0, n_jobs=n_jobs, grid_size=grid_size
549
- # )
570
+ many_hits_agged["_right_indices"] = index_mapper
571
+
572
+ inverse_index_mapper = pd.Series(
573
+ {
574
+ x[0]: x
575
+ for x in many_hits_agged.reset_index()
576
+ .groupby("_right_indices")["index"]
577
+ .unique()
578
+ .apply(tuple)
579
+ }
580
+ ).explode()
581
+ inverse_index_mapper = pd.Series(
582
+ inverse_index_mapper.index, index=inverse_index_mapper.values
583
+ )
584
+
585
+ agger = (
586
+ pd.Series(index_mapper.values(), index=index_mapper.keys())
587
+ .drop_duplicates()
588
+ .explode()
589
+ .to_frame("_overlay_index_right")
590
+ )
591
+ agger["geom_right"] = agger["_overlay_index_right"].map(
592
+ {
593
+ i: g
594
+ for i, g in zip(
595
+ many_hits["_overlay_index_right"], many_hits["geom_right"]
596
+ )
597
+ }
598
+ )
599
+
600
+ agged = pd.Series(
601
+ {
602
+ i: agg_geoms_partial(geoms)
603
+ for i, geoms in agger.groupby(level=0)["geom_right"]
604
+ }
605
+ )
606
+ many_hits_agged["geom_right"] = inverse_index_mapper.map(agged)
607
+ many_hits_agged = many_hits_agged.drop(columns=["_right_indices"])
608
+
609
+ clip_left = pd.concat([one_hit, many_hits_agged])
610
+ except IndexError:
611
+ clip_left = pairs.loc[:, list(keep_cols)]
550
612
 
551
613
  assert clip_left["geometry"].notna().all()
552
614
  assert clip_left["geom_right"].notna().all()
@@ -565,23 +627,40 @@ def _shapely_diffclip_left(pairs, df1, grid_size, geom_type, n_jobs):
565
627
  def _shapely_diffclip_right(pairs, df1, df2, grid_size, rsuffix, geom_type, n_jobs):
566
628
  agg_geoms_partial = functools.partial(agg_geoms, grid_size=grid_size)
567
629
 
568
- clip_right = (
569
- pairs.rename(columns={"geometry": "geom_left", "geom_right": "geometry"})
570
- .groupby(by="_overlay_index_right")
571
- .agg(
572
- {
573
- "geom_left": agg_geoms_partial,
574
- "geometry": "first",
575
- }
630
+ pairs = pairs.rename(columns={"geometry": "geom_left", "geom_right": "geometry"})
631
+
632
+ try:
633
+ only_one = pairs.groupby("_overlay_index_right").transform("size") == 1
634
+ one_hit = pairs[only_one].set_index("_overlay_index_right")[
635
+ ["geom_left", "geometry"]
636
+ ]
637
+ many_hits = (
638
+ pairs[~only_one]
639
+ .groupby("_overlay_index_right")
640
+ .agg(
641
+ {
642
+ "geom_left": agg_geoms_partial,
643
+ "geometry": "first",
644
+ }
645
+ )
576
646
  )
577
- .join(df2.drop(columns=["geometry"]))
578
- .rename(
647
+ clip_right = (
648
+ pd.concat([one_hit, many_hits])
649
+ .join(df2.drop(columns=["geometry"]))
650
+ .rename(
651
+ columns={
652
+ c: f"{c}{rsuffix}" if c in df1.columns and c != "geometry" else c
653
+ for c in df2.columns
654
+ }
655
+ )
656
+ )
657
+ except IndexError:
658
+ clip_right = pairs.join(df2.drop(columns=["geometry"])).rename(
579
659
  columns={
580
660
  c: f"{c}{rsuffix}" if c in df1.columns and c != "geometry" else c
581
661
  for c in df2.columns
582
662
  }
583
663
  )
584
- )
585
664
 
586
665
  assert clip_right["geometry"].notna().all()
587
666
  assert clip_right["geom_left"].notna().all()
@@ -599,12 +678,8 @@ def _shapely_diffclip_right(pairs, df1, df2, grid_size, rsuffix, geom_type, n_jo
599
678
  def _try_difference(left, right, grid_size, geom_type, n_jobs=1):
600
679
  """Try difference overlay, then make_valid and retry."""
601
680
  if n_jobs > 1 and len(left) / n_jobs > 10:
602
- # dask_arr1 = dask.array.from_array(left, chunks=int(len(left) / n_jobs))
603
- # dask_arr2 = dask.array.from_array(right, chunks=int(len(right) / n_jobs))
604
- # dask_arr1 = make_valid_and_keep_geom_type(dask_arr1, geom_type=geom_type)
605
- # dask_arr2 = make_valid_and_keep_geom_type(dask_arr2, geom_type=geom_type)
606
681
  try:
607
- return _run_overlay_dask(
682
+ return _run_overlay_joblib_threading(
608
683
  left,
609
684
  right,
610
685
  func=difference,
@@ -618,12 +693,12 @@ def _try_difference(left, right, grid_size, geom_type, n_jobs=1):
618
693
  right = make_valid_and_keep_geom_type(
619
694
  right, geom_type=geom_type, n_jobs=n_jobs
620
695
  )
621
- # dask_arr1 = dask.array.from_array(arr1, chunks=int(len(arr1) / n_jobs))
622
- # dask_arr2 = dask.array.from_array(arr2, chunks=int(len(arr2) / n_jobs))
696
+ left = left.loc[lambda x: x.index.isin(right.index)]
697
+ right = right.loc[lambda x: x.index.isin(left.index)]
623
698
 
624
- return _run_overlay_dask(
625
- left,
626
- right,
699
+ return _run_overlay_joblib_threading(
700
+ left.to_numpy(),
701
+ right.to_numpy(),
627
702
  func=difference,
628
703
  n_jobs=n_jobs,
629
704
  grid_size=grid_size,
@@ -636,10 +711,14 @@ def _try_difference(left, right, grid_size, geom_type, n_jobs=1):
636
711
  grid_size=grid_size,
637
712
  )
638
713
  except GEOSException:
714
+ left = make_valid_and_keep_geom_type(left, geom_type, n_jobs=n_jobs)
715
+ right = make_valid_and_keep_geom_type(right, geom_type, n_jobs=n_jobs)
716
+ left = left.loc[lambda x: x.index.isin(right.index)]
717
+ right = right.loc[lambda x: x.index.isin(left.index)]
639
718
  try:
640
719
  return difference(
641
- make_valid_and_keep_geom_type(left, geom_type, n_jobs=n_jobs),
642
- make_valid_and_keep_geom_type(right, geom_type, n_jobs=n_jobs),
720
+ left.to_numpy(),
721
+ right.to_numpy(),
643
722
  grid_size=grid_size,
644
723
  )
645
724
  except GEOSException as e:
@@ -648,7 +727,7 @@ def _try_difference(left, right, grid_size, geom_type, n_jobs=1):
648
727
 
649
728
  def make_valid_and_keep_geom_type(
650
729
  geoms: np.ndarray, geom_type: str, n_jobs
651
- ) -> np.ndarray:
730
+ ) -> GeoSeries:
652
731
  """Make GeometryCollections into (Multi)Polygons, (Multi)LineStrings or (Multi)Points.
653
732
 
654
733
  Because GeometryCollections might appear after dissolving (unary_union).
@@ -657,12 +736,13 @@ def make_valid_and_keep_geom_type(
657
736
  """
658
737
  geoms = GeoSeries(geoms)
659
738
  geoms.index = range(len(geoms))
660
- geoms.loc[:] = make_valid(geoms.values)
739
+ geoms.loc[:] = make_valid(geoms.to_numpy())
661
740
  geoms = geoms.explode(index_parts=False).pipe(to_single_geom_type, geom_type)
662
- return geoms.groupby(level=0).agg(unary_union).sort_index().values
741
+ only_one = geoms.groupby(level=0).transform("size") == 1
742
+ one_hit = geoms[only_one]
743
+ many_hits = geoms[~only_one].groupby(level=0).agg(unary_union)
744
+ return pd.concat([one_hit, many_hits]).sort_index()
663
745
 
664
746
 
665
747
  def agg_geoms(g, grid_size=None):
666
- return (
667
- make_valid(unary_union(g, grid_size=grid_size)) if len(g) > 1 else make_valid(g)
668
- )
748
+ return make_valid(unary_union(g, grid_size=grid_size))
@@ -169,7 +169,7 @@ class PolygonsAsRings:
169
169
 
170
170
  if len(results) != len(self.rings):
171
171
  raise ValueError(
172
- f"Different length of results. Got {len(results)} and n rings {len(self.rings)}"
172
+ f"Different length of results. Got {len(results)} and {len(self.rings)} original rings"
173
173
  )
174
174
 
175
175
  self.rings.loc[:] = results
@@ -1,5 +1,6 @@
1
1
  """Functions for reading and writing GeoDataFrames in Statistics Norway's GCS Dapla.
2
2
  """
3
+
3
4
  from pathlib import Path
4
5
  from typing import Optional
5
6
 
@@ -59,16 +60,6 @@ def read_geopandas(
59
60
  if pandas_fallback or not len(df):
60
61
  return df
61
62
  else:
62
- try:
63
- raise e.__class__(e, gcs_path)
64
- except Exception:
65
- print(gcs_path)
66
- raise e
67
- except Exception as e:
68
- try:
69
- raise e.__class__(e, gcs_path)
70
- except Exception:
71
- print(gcs_path)
72
63
  raise e
73
64
  else:
74
65
  with file_system.open(gcs_path, mode="rb") as file:
@@ -127,6 +118,8 @@ def write_geopandas(
127
118
  dp.write_pandas(df, gcs_path, **kwargs)
128
119
  return
129
120
 
121
+ file_system = dp.FileClient.get_gcs_file_system()
122
+
130
123
  if ".parquet" in gcs_path or "prqt" in gcs_path:
131
124
  with file_system.open(gcs_path, mode="wb") as buffer:
132
125
  table = _geopandas_to_arrow(df, index=df.index, schema_version=None)
@@ -177,10 +170,10 @@ def check_files(
177
170
  within_minutes: Optionally include only files that were updated in the
178
171
  last n minutes.
179
172
  """
180
- fs = dp.FileClient.get_gcs_file_system()
173
+ file_system = dp.FileClient.get_gcs_file_system()
181
174
 
182
175
  # (recursive doesn't work, so doing recursive search below)
183
- info = fs.ls(folder, detail=True, recursive=True)
176
+ info = file_system.ls(folder, detail=True, recursive=True)
184
177
 
185
178
  if not info:
186
179
  return pd.DataFrame(columns=["kb", "mb", "name", "child", "path"])
@@ -232,7 +225,7 @@ def check_files(
232
225
 
233
226
 
234
227
  def get_files_in_subfolders(folderinfo: list[dict]) -> list[dict]:
235
- fs = dp.FileClient.get_gcs_file_system()
228
+ file_system = dp.FileClient.get_gcs_file_system()
236
229
 
237
230
  if isinstance(folderinfo, (str, Path)):
238
231
  folderinfo = [folderinfo]
@@ -242,7 +235,7 @@ def get_files_in_subfolders(folderinfo: list[dict]) -> list[dict]:
242
235
  while folderinfo:
243
236
  new_folderinfo = []
244
237
  for m in folderinfo:
245
- more_info = fs.ls(m, detail=True, recursive=True)
238
+ more_info = file_system.ls(m, detail=True, recursive=True)
246
239
  if not more_info:
247
240
  continue
248
241
 
sgis/maps/explore.py CHANGED
@@ -3,6 +3,7 @@
3
3
  This module holds the Explore class, which is the basis for the explore, samplemap and
4
4
  clipmap functions from the 'maps' module.
5
5
  """
6
+
6
7
  import os
7
8
  import warnings
8
9
  from collections.abc import Iterable
@@ -31,6 +32,14 @@ from .map import Map
31
32
  from .tilesources import kartverket, xyz
32
33
 
33
34
 
35
+ try:
36
+ from torchgeo.datasets.geo import RasterDataset
37
+ except ImportError:
38
+
39
+ class RasterDataset:
40
+ """Placeholder"""
41
+
42
+
34
43
  # the geopandas._explore raises a deprication warning. Ignoring for now.
35
44
  warnings.filterwarnings(
36
45
  action="ignore", category=matplotlib.MatplotlibDeprecationWarning
@@ -138,7 +147,12 @@ def to_tile(tile: str | xyzservices.TileProvider, max_zoom: int) -> folium.TileL
138
147
 
139
148
  class Explore(Map):
140
149
  # class attribute that can be overridden locally
141
- tiles = ("OpenStreetMap", "dark", "norge_i_bilder", "grunnkart")
150
+ tiles = (
151
+ "grunnkart",
152
+ "norge_i_bilder",
153
+ "dark",
154
+ "OpenStreetMap",
155
+ )
142
156
 
143
157
  def __init__(
144
158
  self,
@@ -151,7 +165,7 @@ class Explore(Map):
151
165
  browser: bool = False,
152
166
  prefer_canvas: bool = True,
153
167
  measure_control: bool = True,
154
- geocoder: bool = True,
168
+ geocoder: bool = False,
155
169
  save=None,
156
170
  show: bool | Iterable[bool] | None = None,
157
171
  text: str | None = None,
@@ -181,6 +195,13 @@ class Explore(Map):
181
195
  else:
182
196
  show_was_none = False
183
197
 
198
+ self.raster_datasets = tuple(
199
+ raster_dataset_to_background_map(x)
200
+ for x in gdfs
201
+ if isinstance(x, RasterDataset)
202
+ )
203
+ self.tiles # += self.raster_datasets
204
+
184
205
  super().__init__(*gdfs, column=column, show=show, **kwargs)
185
206
 
186
207
  if self.gdfs is None:
@@ -247,7 +268,7 @@ class Explore(Map):
247
268
  def explore(
248
269
  self, column: str | None = None, center=None, size=None, **kwargs
249
270
  ) -> None:
250
- if not any(len(gdf) for gdf in self._gdfs):
271
+ if not any(len(gdf) for gdf in self._gdfs) and not len(self.raster_datasets):
251
272
  warnings.warn("None of the GeoDataFrames have rows.")
252
273
  return
253
274
  if column:
@@ -821,6 +842,11 @@ def _tooltip_popup(type, fields, gdf, **kwds):
821
842
  return folium.GeoJsonPopup(fields, **kwds)
822
843
 
823
844
 
845
+ def raster_dataset_to_background_map(dataset: RasterDataset):
846
+ crs = dataset.crs
847
+ bbox = dataset.bounds
848
+
849
+
824
850
  def _categorical_legend(m, title, categories, colors):
825
851
  """
826
852
  Add categorical legend to a map
sgis/maps/map.py CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  This module holds the Map class, which is the basis for the Explore class.
4
4
  """
5
+
5
6
  import warnings
6
7
 
7
8
  import matplotlib
@@ -23,6 +24,14 @@ from ..geopandas_tools.general import (
23
24
  from ..helpers import get_object_name
24
25
 
25
26
 
27
+ try:
28
+ from torchgeo.datasets.geo import RasterDataset
29
+ except ImportError:
30
+
31
+ class RasterDataset:
32
+ """Placeholder"""
33
+
34
+
26
35
  # the geopandas._explore raises a deprication warning. Ignoring for now.
27
36
  warnings.filterwarnings(
28
37
  action="ignore", category=matplotlib.MatplotlibDeprecationWarning
@@ -83,6 +92,7 @@ class Map:
83
92
  scheme: str = DEFAULT_SCHEME,
84
93
  **kwargs,
85
94
  ):
95
+
86
96
  gdfs, column, kwargs = self._separate_args(gdfs, column, kwargs)
87
97
 
88
98
  self._column = column
@@ -326,7 +336,9 @@ class Map:
326
336
  return obj
327
337
  raise TypeError
328
338
 
329
- gdfs: tuple[GeoDataFrame] = ()
339
+ allowed_types = (GeoDataFrame, GeoSeries, Geometry, RasterDataset)
340
+
341
+ gdfs: tuple[GeoDataFrame | GeoSeries | Geometry | RasterDataset] = ()
330
342
  for arg in args:
331
343
  if isinstance(arg, str):
332
344
  if column is None:
@@ -335,13 +347,13 @@ class Map:
335
347
  raise ValueError(
336
348
  "Can specify at most one string as a positional argument."
337
349
  )
338
- elif isinstance(arg, (GeoDataFrame, GeoSeries, Geometry)):
350
+ elif isinstance(arg, allowed_types):
339
351
  gdfs = gdfs + (arg,)
340
352
  elif isinstance(arg, dict) or hasattr(arg, "__dict__"):
341
353
  # add dicts or classes with GeoDataFrames to kwargs
342
354
  more_gdfs = {}
343
355
  for key, value in as_dict(arg).items():
344
- if isinstance(value, (GeoDataFrame, GeoSeries, Geometry)):
356
+ if isinstance(value, allowed_types):
345
357
  more_gdfs[key] = value
346
358
  elif isinstance(value, dict) or hasattr(value, "__dict__"):
347
359
  try:
@@ -349,7 +361,7 @@ class Map:
349
361
  more_gdfs |= {
350
362
  k: v
351
363
  for k, v in value.items()
352
- if isinstance(v, (GeoDataFrame, GeoSeries, Geometry))
364
+ if isinstance(v, allowed_types)
353
365
  }
354
366
  except Exception:
355
367
  # no need to raise here