tunned-geobr 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. tunned_geobr/__init__.py +38 -0
  2. tunned_geobr/constants.py +13 -0
  3. tunned_geobr/data/grid_state_correspondence_table.csv +140 -0
  4. tunned_geobr/list_geobr.py +39 -0
  5. tunned_geobr/lookup_muni.py +111 -0
  6. tunned_geobr/read_amazon.py +42 -0
  7. tunned_geobr/read_amazon_ibas.py +92 -0
  8. tunned_geobr/read_atlantic_forest_ibas.py +93 -0
  9. tunned_geobr/read_biomes.py +43 -0
  10. tunned_geobr/read_census_tract.py +97 -0
  11. tunned_geobr/read_climate_aggressiveness.py +74 -0
  12. tunned_geobr/read_comparable_areas.py +75 -0
  13. tunned_geobr/read_conservation_units.py +43 -0
  14. tunned_geobr/read_country.py +43 -0
  15. tunned_geobr/read_disaster_risk_area.py +47 -0
  16. tunned_geobr/read_geology.py +77 -0
  17. tunned_geobr/read_geomorphology.py +77 -0
  18. tunned_geobr/read_health_facilities.py +49 -0
  19. tunned_geobr/read_health_region.py +52 -0
  20. tunned_geobr/read_immediate_region.py +81 -0
  21. tunned_geobr/read_indigenous_land.py +44 -0
  22. tunned_geobr/read_intermediate_region.py +61 -0
  23. tunned_geobr/read_meso_region.py +78 -0
  24. tunned_geobr/read_metro_area.py +44 -0
  25. tunned_geobr/read_micro_region.py +78 -0
  26. tunned_geobr/read_mining_processes.py +76 -0
  27. tunned_geobr/read_municipal_seat.py +41 -0
  28. tunned_geobr/read_municipality.py +83 -0
  29. tunned_geobr/read_neighborhood.py +39 -0
  30. tunned_geobr/read_pedology.py +77 -0
  31. tunned_geobr/read_pop_arrangements.py +45 -0
  32. tunned_geobr/read_region.py +41 -0
  33. tunned_geobr/read_schools.py +44 -0
  34. tunned_geobr/read_semiarid.py +42 -0
  35. tunned_geobr/read_settlements.py +85 -0
  36. tunned_geobr/read_state.py +88 -0
  37. tunned_geobr/read_statistical_grid.py +127 -0
  38. tunned_geobr/read_urban_area.py +44 -0
  39. tunned_geobr/read_urban_concentrations.py +46 -0
  40. tunned_geobr/read_weighting_area.py +74 -0
  41. tunned_geobr/utils.py +326 -0
  42. tunned_geobr-0.1.0.dist-info/METADATA +103 -0
  43. tunned_geobr-0.1.0.dist-info/RECORD +46 -0
  44. tunned_geobr-0.1.0.dist-info/WHEEL +4 -0
  45. tunned_geobr-0.1.0.dist-info/entry_points.txt +4 -0
  46. tunned_geobr-0.1.0.dist-info/licenses/LICENSE.txt +2 -0
@@ -0,0 +1,97 @@
1
+ from cursed_geobr.utils import select_metadata, download_gpkg, test_options
2
+
3
+
4
+ def read_census_tract(
5
+ code_tract, year=2010, zone="urban", simplified=True, verbose=False
6
+ ):
7
+ """Download shape files of census tracts of the Brazilian Population Census (Only years 2000 and 2010 are currently available).
8
+
9
+ Parameters
10
+ ----------
11
+ code_tract: int
12
+ The 7-digit code of a Municipality. If the two-digit code or a two-letter uppercase abbreviation of
13
+ a state is passed, (e.g. 33 or "RJ") the function will load all census tracts of that state. If code_tract="all",
14
+ all census tracts of the country are loaded.
15
+ year : int, optional
16
+ Year of the data, by default 2010
17
+ zone: string, optional
18
+ "urban" or "rural" census tracts come in separate files in the year 2000, by default urban
19
+ simplified: boolean, by default True
20
+ Data 'type', indicating whether the function returns the 'original' dataset
21
+ with high resolution or a dataset with 'simplified' borders (Default)
22
+ verbose : bool, optional
23
+ by default False
24
+
25
+ Returns
26
+ -------
27
+ gpd.GeoDataFrame
28
+ Metadata and geopackage of selected states
29
+
30
+ Raises
31
+ ------
32
+ Exception
33
+ If parameters are not found or not well defined
34
+
35
+ Example
36
+ -------
37
+ >>> from cursed_geobr import read_census_tract
38
+
39
+ # Read rural census tracts for years before 2007
40
+ >>> df = read_census_tract(code_tract=5201108, year=2000, zone='rural')
41
+
42
+
43
+ # Read all census tracts of a state at a given year
44
+ >>> df = read_census_tract(code_tract=53, year=2010) # or
45
+ >>> df = read_census_tract(code_tract="DF", year=2010)
46
+
47
+ # Read all census tracts of a municipality at a given year
48
+ >>> df = read_census_tract(code_tract=5201108, year=2010)
49
+
50
+ # Read all census tracts of the country at a given year
51
+ >>> df = read_census_tract(code_tract="all", year=2010)
52
+
53
+ """
54
+
55
+ test_options(zone, "zone", allowed=["urban", "rural"])
56
+ test_options(code_tract, "code_tract", not_allowed=[None])
57
+
58
+ metadata = select_metadata("census_tract", year=year, simplified=simplified)
59
+
60
+ # For year <= 2007, the code, eg. U11, comes with a trailing letter U for urban and
61
+ # R for rural. So, this code checks if the trailing code letter is the same as
62
+ # the argument zone.
63
+ if year <= 2007:
64
+
65
+ metadata = metadata[
66
+ metadata["code"].apply(lambda x: x[0].lower() == zone[0].lower())
67
+ ]
68
+ # [R]12 == [r]ural
69
+
70
+ if code_tract == "all":
71
+
72
+ if verbose:
73
+ print("Loading data for the whole country. This might take a few minutes.")
74
+
75
+ return download_gpkg(metadata)
76
+
77
+ else:
78
+
79
+ metadata = metadata[
80
+ metadata[["code", "code_abbrev"]].apply(
81
+ lambda x: str(code_tract)[:2] in str(x["code"])
82
+ or str(code_tract)[:2] # if number e.g. 12
83
+ in str(x["code_abbrev"]), # if UF e.g. RO
84
+ 1,
85
+ )
86
+ ]
87
+
88
+ gdf = download_gpkg(metadata)
89
+
90
+ if len(str(code_tract)) == 2:
91
+ return gdf
92
+
93
+ elif code_tract in gdf["code_muni"].tolist():
94
+ return gdf.query(f"code_muni == {code_tract}")
95
+
96
+ else:
97
+ raise Exception("Invalid Value to argument code_tract.")
@@ -0,0 +1,74 @@
1
+ import geopandas as gpd
2
+ import tempfile
3
+ import os
4
+ import requests
5
+ from zipfile import ZipFile
6
+ from io import BytesIO
7
+
8
+ def read_climate_aggressiveness(simplified=False):
9
+ """Download climate aggressiveness potential data from IBGE.
10
+
11
+ This function downloads and processes climate aggressiveness potential data from IBGE
12
+ (Brazilian Institute of Geography and Statistics). The data represents areas with
13
+ different levels of climate aggressiveness based on rainfall patterns and other factors.
14
+ Original source: IBGE
15
+
16
+ Parameters
17
+ ----------
18
+ simplified : boolean, by default False
19
+ If True, returns a simplified version of the dataset with fewer columns
20
+
21
+ Returns
22
+ -------
23
+ gpd.GeoDataFrame
24
+ Geodataframe with climate aggressiveness potential data
25
+
26
+ Example
27
+ -------
28
+ >>> from cursed_geobr import read_climate_aggressiveness
29
+
30
+ # Read climate aggressiveness data
31
+ >>> climate = read_climate_aggressiveness()
32
+ """
33
+
34
+ url = "https://geoftp.ibge.gov.br/informacoes_ambientais/climatologia/vetores/regionais/shapes_potencial_agressividade_climatica.zip"
35
+
36
+ try:
37
+ # Download the zip file
38
+ response = requests.get(url)
39
+ if response.status_code != 200:
40
+ raise Exception("Failed to download data from IBGE")
41
+
42
+ # Create a temporary directory
43
+ with tempfile.TemporaryDirectory() as temp_dir:
44
+ # Extract zip content
45
+ with ZipFile(BytesIO(response.content)) as zip_ref:
46
+ zip_ref.extractall(temp_dir)
47
+
48
+ # Find the shapefile
49
+ shp_files = [f for f in os.listdir(temp_dir) if f.endswith('.shp')]
50
+ if not shp_files:
51
+ raise Exception("No shapefile found in the downloaded data")
52
+
53
+ # Read the shapefile
54
+ gdf = gpd.read_file(os.path.join(temp_dir, shp_files[0]))
55
+
56
+ if simplified:
57
+ # Keep only the most relevant columns
58
+ # Note: These columns are based on typical climate data structure
59
+ # You may want to adjust these based on the actual data
60
+ columns_to_keep = [
61
+ 'geometry',
62
+ 'POTENCIAL', # Aggressiveness potential
63
+ 'CLASSE', # Class
64
+ 'DESCRICAO', # Description
65
+ ]
66
+
67
+ # Filter columns that actually exist in the dataset
68
+ existing_columns = ['geometry'] + [col for col in columns_to_keep[1:] if col in gdf.columns]
69
+ gdf = gdf[existing_columns]
70
+
71
+ except Exception as e:
72
+ raise Exception(f"Error downloading climate aggressiveness data: {str(e)}")
73
+
74
+ return gdf
@@ -0,0 +1,75 @@
1
+ from cursed_geobr.utils import select_metadata, download_gpkg
2
+
3
+
4
+ def read_comparable_areas(
5
+ start_year=1970, end_year=2010, simplified=True, verbose=False
6
+ ):
7
+ r"""Download spatial data of historically comparable municipalities
8
+
9
+ This function downloads the shape file of minimum comparable area of
10
+ municipalities, known in Portuguese as 'Areas minimas comparaveis (AMCs)'.
11
+ The data is available for any combination of census years between 1872-2010.
12
+ These data sets are generated based on the Stata code originally developed by
13
+ \doi{10.1590/0101-416147182phe}{Philipp Ehrl}, and translated
14
+ into `R` by the `geobr` team.
15
+
16
+ Years available:
17
+ 1872,1900,1911,1920,1933,1940,1950,1960,1970,1980,1991,2000,2010
18
+
19
+ Parameters
20
+ ----------
21
+ year : int, optional
22
+ Year of the data, by default
23
+ simplified: boolean, by default True
24
+ Data 'type', indicating whether the function returns the 'original' dataset
25
+ with high resolution or a dataset with 'simplified' borders (Default)
26
+ verbose : bool, optional
27
+ by default False
28
+
29
+ Returns
30
+ -------
31
+ gpd.GeoDataFrame
32
+ Metadata and geopackage of selected states
33
+
34
+ Raises
35
+ ------
36
+ Exception
37
+ If parameters are not found or not well defined
38
+
39
+ Example
40
+ -------
41
+ >>> from cursed_geobr import read_comparable_areas
42
+
43
+ # Read specific state at a given year
44
+ >>> df = read_comparable_areas(year=)
45
+ """
46
+
47
+ years_available = [
48
+ 1872,
49
+ 1900,
50
+ 1911,
51
+ 1920,
52
+ 1933,
53
+ 1940,
54
+ 1950,
55
+ 1960,
56
+ 1970,
57
+ 1980,
58
+ 1991,
59
+ 2000,
60
+ 2010,
61
+ ]
62
+
63
+ if (start_year not in years_available) or (end_year not in years_available):
64
+ raise ValueError(
65
+ "Invalid `start_year` or `end_year`."
66
+ f"It must be one of the following: {years_available}"
67
+ )
68
+
69
+ metadata = select_metadata("amc", year=start_year, simplified=simplified)
70
+
71
+ metadata = metadata.query(f'download_path.str.contains("{start_year}_{end_year}")')
72
+
73
+ gdf = download_gpkg(metadata)
74
+
75
+ return gdf
@@ -0,0 +1,43 @@
1
+ from cursed_geobr.utils import select_metadata, download_gpkg
2
+
3
+
4
+ def read_conservation_units(date=201909, simplified=True, verbose=False):
5
+ """ Download official data of Brazilian conservation untis as an sf object.
6
+
7
+ This data set covers the whole of Brazil and it includes the polygons of all conservation untis present in Brazilian
8
+ territory. The last update of the data was 09-2019. The original
9
+ data comes from MMA and can be found at http://mapas.mma.gov.br/i3geo/datadownload.htm .
10
+
11
+ Parameters
12
+ ----------
13
+ date : int, optional
14
+ A date number in YYYYMM format, by default 201909
15
+ simplified: boolean, by default True
16
+ Data 'type', indicating whether the function returns the 'original' dataset
17
+ with high resolution or a dataset with 'simplified' borders (Default)
18
+ verbose : bool, optional
19
+ by default False
20
+
21
+ Returns
22
+ -------
23
+ gpd.GeoDataFrame
24
+ Metadata and geopackage of selected states
25
+
26
+ Raises
27
+ ------
28
+ Exception
29
+ If parameters are not found or not well defined
30
+
31
+ Example
32
+ -------
33
+ >>> from cursed_geobr import read_conservation_units
34
+
35
+ # Read specific state at a given year
36
+ >>> df = read_conservation_units(date=201909)
37
+ """
38
+
39
+ metadata = select_metadata("conservation_units", year=date, simplified=simplified)
40
+
41
+ gdf = download_gpkg(metadata)
42
+
43
+ return gdf
@@ -0,0 +1,43 @@
1
+ from cursed_geobr.utils import select_metadata, download_gpkg
2
+
3
+
4
+ def read_country(year=2010, simplified=True, verbose=False):
5
+ """ Download shape file of Brazil as sf objects. Data at scale 1:250,000, using Geodetic reference system "SIRGAS2000" and CRS(4674)
6
+
7
+ @param year Year of the data (defaults to 2010)
8
+ @param simplifiedWhether the function returns the 'original' dataset with high resolution or a dataset with 'simplified' borders (Default)
9
+ @param showProgress Logical. Defaults to (TRUE) display progress bar
10
+
11
+ Parameters
12
+ ----------
13
+ year : int, optional
14
+ Year of the data, by default 2010
15
+ simplified: boolean, by default True
16
+ Data 'type', indicating whether the function returns the 'original' dataset
17
+ with high resolution or a dataset with 'simplified' borders (Default)
18
+ verbose : bool, optional
19
+ by default False
20
+
21
+ Returns
22
+ -------
23
+ gpd.GeoDataFrame
24
+ Metadata and geopackage of selected states
25
+
26
+ Raises
27
+ ------
28
+ Exception
29
+ If parameters are not found or not well defined
30
+
31
+ Example
32
+ -------
33
+ >>> from cursed_geobr import read_country
34
+
35
+ # Read specific state at a given year
36
+ >>> df = read_country(year=2010)
37
+ """
38
+
39
+ metadata = select_metadata("country", year=year, simplified=simplified)
40
+
41
+ gdf = download_gpkg(metadata)
42
+
43
+ return gdf
@@ -0,0 +1,47 @@
1
+ from cursed_geobr.utils import select_metadata, download_gpkg
2
+
3
+
4
+ def read_disaster_risk_area(year=2010, simplified=True, verbose=False):
5
+ """ Download official data of disaster risk areas as an sf object.
6
+
7
+ This function reads the the official data of disaster risk areas in Brazil. It specifically focuses
8
+ on geodynamic and hydro-meteorological disasters capable of triggering landslides and floods. The
9
+ data set covers the whole country. Each risk area polygon (known as 'BATER') has unique code id (column 'geo_bater').
10
+ The data set brings information on the extent to which the risk area polygons overlap with census
11
+ tracts and block faces (column "acuracia") and number of ris areas within each risk area (column 'num').
12
+ Orignal data were generated by IBGE and CEMADEN. For more information about the methodology, see deails
13
+ at https://www.ibge.gov.br/geociencias/organizacao-do-territorio/tipologias-do-territorio/21538-populacao-em-areas-de-risco-no-brasil.html
14
+
15
+ Parameters
16
+ ----------
17
+ year : int, optional
18
+ Year of the data, by default 2010
19
+ simplified: boolean, by default True
20
+ Data 'type', indicating whether the function returns the 'original' dataset
21
+ with high resolution or a dataset with 'simplified' borders (Default)
22
+ verbose : bool, optional
23
+ by default False
24
+
25
+ Returns
26
+ -------
27
+ gpd.GeoDataFrame
28
+ Metadata and geopackage of selected states
29
+
30
+ Raises
31
+ ------
32
+ Exception
33
+ If parameters are not found or not well defined
34
+
35
+ Example
36
+ -------
37
+ >>> from cursed_geobr import read_disaster_risk_area
38
+
39
+ # Read specific state at a given year
40
+ >>> df = read_disaster_risk_area(year=2010)
41
+ """
42
+
43
+ metadata = select_metadata("disaster_risk_area", year=year, simplified=simplified)
44
+
45
+ gdf = download_gpkg(metadata)
46
+
47
+ return gdf
@@ -0,0 +1,77 @@
1
+ import geopandas as gpd
2
+ import tempfile
3
+ import os
4
+ import requests
5
+ from zipfile import ZipFile
6
+ from io import BytesIO
7
+
8
+ def read_geology(simplified=False):
9
+ """Download official geology data from IBGE.
10
+
11
+ This function downloads and processes geological data from IBGE (Brazilian Institute of Geography and Statistics).
12
+ The data includes geological formations and units at 1:250,000 scale.
13
+ Original source: IBGE
14
+
15
+ Parameters
16
+ ----------
17
+ simplified : boolean, by default False
18
+ If True, returns a simplified version of the dataset with fewer columns
19
+
20
+ Returns
21
+ -------
22
+ gpd.GeoDataFrame
23
+ Geodataframe with geological data
24
+
25
+ Example
26
+ -------
27
+ >>> from cursed_geobr import read_geology
28
+
29
+ # Read geology data
30
+ >>> geology = read_geology()
31
+ """
32
+
33
+ url = "https://geoftp.ibge.gov.br/informacoes_ambientais/geologia/levantamento_geologico/vetores/escala_250_mil/versao_2023/geol_area.zip"
34
+
35
+ try:
36
+ # Download the zip file
37
+ response = requests.get(url)
38
+ if response.status_code != 200:
39
+ raise Exception("Failed to download data from IBGE")
40
+
41
+ # Create a temporary directory
42
+ with tempfile.TemporaryDirectory() as temp_dir:
43
+ # Extract zip content
44
+ with ZipFile(BytesIO(response.content)) as zip_ref:
45
+ zip_ref.extractall(temp_dir)
46
+
47
+ # Find the shapefile
48
+ shp_files = [f for f in os.listdir(temp_dir) if f.endswith('.shp')]
49
+ if not shp_files:
50
+ raise Exception("No shapefile found in the downloaded data")
51
+
52
+ # Read the shapefile
53
+ gdf = gpd.read_file(os.path.join(temp_dir, shp_files[0]))
54
+
55
+ if simplified:
56
+ # Keep only the most relevant columns
57
+ # Note: These columns are based on typical geological data structure
58
+ # You may want to adjust these based on the actual data
59
+ columns_to_keep = [
60
+ 'geometry',
61
+ 'SIGLA_UNID', # Unit code
62
+ 'NOME_UNIDA', # Unit name
63
+ 'HIERARQUIA', # Hierarchy
64
+ 'IDADE_MAX', # Maximum age
65
+ 'IDADE_MIN', # Minimum age
66
+ 'ERRO_MAX', # Maximum error
67
+ 'ERRO_MIN', # Minimum error
68
+ 'ORIGEM', # Origin
69
+ 'LITOTIPO1', # Main lithotype
70
+ 'LITOTIPO2', # Secondary lithotype
71
+ ]
72
+ gdf = gdf[columns_to_keep]
73
+
74
+ except Exception as e:
75
+ raise Exception(f"Error downloading geology data: {str(e)}")
76
+
77
+ return gdf
@@ -0,0 +1,77 @@
1
+ import geopandas as gpd
2
+ import tempfile
3
+ import os
4
+ import requests
5
+ from zipfile import ZipFile
6
+ from io import BytesIO
7
+
8
+ def read_geomorphology(simplified=False):
9
+ """Download official geomorphology data from IBGE.
10
+
11
+ This function downloads and processes geomorphological data from IBGE (Brazilian Institute of Geography and Statistics).
12
+ The data includes geomorphological units and features at 1:250,000 scale.
13
+ Original source: IBGE
14
+
15
+ Parameters
16
+ ----------
17
+ simplified : boolean, by default False
18
+ If True, returns a simplified version of the dataset with fewer columns
19
+
20
+ Returns
21
+ -------
22
+ gpd.GeoDataFrame
23
+ Geodataframe with geomorphological data
24
+
25
+ Example
26
+ -------
27
+ >>> from cursed_geobr import read_geomorphology
28
+
29
+ # Read geomorphology data
30
+ >>> geomorph = read_geomorphology()
31
+ """
32
+
33
+ url = "https://geoftp.ibge.gov.br/informacoes_ambientais/geomorfologia/vetores/escala_250_mil/versao_2023/geom_area.zip"
34
+
35
+ try:
36
+ # Download the zip file
37
+ response = requests.get(url)
38
+ if response.status_code != 200:
39
+ raise Exception("Failed to download data from IBGE")
40
+
41
+ # Create a temporary directory
42
+ with tempfile.TemporaryDirectory() as temp_dir:
43
+ # Extract zip content
44
+ with ZipFile(BytesIO(response.content)) as zip_ref:
45
+ zip_ref.extractall(temp_dir)
46
+
47
+ # Find the shapefile
48
+ shp_files = [f for f in os.listdir(temp_dir) if f.endswith('.shp')]
49
+ if not shp_files:
50
+ raise Exception("No shapefile found in the downloaded data")
51
+
52
+ # Read the shapefile
53
+ gdf = gpd.read_file(os.path.join(temp_dir, shp_files[0]))
54
+
55
+ if simplified:
56
+ # Keep only the most relevant columns
57
+ # Note: These columns are based on typical geomorphological data structure
58
+ # You may want to adjust these based on the actual data
59
+ columns_to_keep = [
60
+ 'geometry',
61
+ 'COMPART', # Compartment
62
+ 'MODELADO', # Landform model
63
+ 'MORFOEST', # Morphostructure
64
+ 'MORFOLOG', # Morphology
65
+ 'DECLIVIDA', # Slope
66
+ 'AMPLIT_A', # Amplitude
67
+ 'ORDEM_REL', # Relief order
68
+ ]
69
+
70
+ # Filter columns that actually exist in the dataset
71
+ existing_columns = ['geometry'] + [col for col in columns_to_keep[1:] if col in gdf.columns]
72
+ gdf = gdf[existing_columns]
73
+
74
+ except Exception as e:
75
+ raise Exception(f"Error downloading geomorphology data: {str(e)}")
76
+
77
+ return gdf
@@ -0,0 +1,49 @@
1
+ from cursed_geobr.utils import select_metadata, download_gpkg
2
+
3
+
4
+ def read_health_facilities(date=202303, verbose=False):
5
+ """ Download geolocated data of health facilities as an sf object.
6
+
7
+ Data comes from the National Registry of Healthcare facilities (Cadastro Nacional de Estabelecimentos de Saude - CNES),
8
+ originally collected by the Brazilian Ministry of Health. The date of the last data update is
9
+ registered in the database in the columns 'date_update' and 'year_update'. These data uses Geodetic reference
10
+ system "SIRGAS2000" and CRS(4674). The coordinates of each facility was obtained by CNES
11
+ and validated by means of space operations. These operations verify if the point is in the
12
+ municipality, considering a radius of 5,000 meters. When the coordinate is not correct,
13
+ further searches are done in other systems of the Ministry of Health and in web services
14
+ like Google Maps . Finally, if the coordinates have been correctly obtained in this process,
15
+ the coordinates of the municipal head office are used. The final source used is registered
16
+ in the database in a specific column 'data_source'. Periodically the coordinates are revised
17
+ with the objective of improving the quality of the data. More information
18
+ available at http://dados.gov.br/dataset/cnes
19
+
20
+ Parameters
21
+ ----------
22
+ date : Numeric. Date of the data in YYYYMM format. Defaults to `202303`,
23
+ which was the latest data available by the time of this update.
24
+ verbose : bool, optional
25
+ by default False
26
+
27
+ Returns
28
+ -------
29
+ gpd.GeoDataFrame
30
+ Metadata and geopackage of selected states
31
+
32
+ Raises
33
+ ------
34
+ Exception
35
+ If parameters are not found or not well defined
36
+
37
+ Example
38
+ -------
39
+ >>> from cursed_geobr import read_health_facilities
40
+
41
+ # Read specific state at a given year
42
+ >>> df = read_health_facilities()
43
+ """
44
+
45
+ metadata = select_metadata("health_facilities", year=date, simplified=False)
46
+
47
+ gdf = download_gpkg(metadata)
48
+
49
+ return gdf
@@ -0,0 +1,52 @@
1
+ from cursed_geobr.utils import select_metadata, download_gpkg
2
+
3
+
4
+ def read_health_region(year=2013, macro=False, simplified=True, verbose=False):
5
+ """Download official data of Brazilian health regions as an sf object.
6
+
7
+ Health regions are used to guide the the regional and state planning of health services.
8
+ Macro health regions, in particular, are used to guide the planning of high complexity
9
+ health services. These services involve larger economics of scale and are concentrated in
10
+ few municipalities because they are generally more technology intensive, costly and face
11
+ shortages of specialized professionals. A macro region comprises one or more health regions.
12
+
13
+ Parameters
14
+ ----------
15
+ year : int, optional
16
+ Year of the data, by default 2013
17
+ macro: If `False` (default), the function downloads health regions data.
18
+ If `True`, the function downloads macro regions data.
19
+ simplified: boolean, by default True
20
+ Data 'type', indicating whether the function returns the 'original' dataset
21
+ with high resolution or a dataset with 'simplified' borders (Default)
22
+ verbose : bool, optional
23
+ by default False
24
+
25
+ Returns
26
+ -------
27
+ gpd.GeoDataFrame
28
+ Metadata and geopackage of selected states
29
+
30
+ Raises
31
+ ------
32
+ Exception
33
+ If parameters are not found or not well defined
34
+
35
+ Example
36
+ -------
37
+ >>> from cursed_geobr import read_health_region
38
+
39
+ # Read specific state at a given year
40
+ >>> df = read_health_region(year=2013)
41
+ """
42
+
43
+ if macro:
44
+ metadata = select_metadata(
45
+ "health_region_macro", year=year, simplified=simplified
46
+ )
47
+ else:
48
+ metadata = select_metadata("health_region", year=year, simplified=simplified)
49
+
50
+ gdf = download_gpkg(metadata)
51
+
52
+ return gdf