dsgrid-toolkit 0.3.3__cp313-cp313-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- build_backend.py +93 -0
- dsgrid/__init__.py +22 -0
- dsgrid/api/__init__.py +0 -0
- dsgrid/api/api_manager.py +179 -0
- dsgrid/api/app.py +419 -0
- dsgrid/api/models.py +60 -0
- dsgrid/api/response_models.py +116 -0
- dsgrid/apps/__init__.py +0 -0
- dsgrid/apps/project_viewer/app.py +216 -0
- dsgrid/apps/registration_gui.py +444 -0
- dsgrid/chronify.py +32 -0
- dsgrid/cli/__init__.py +0 -0
- dsgrid/cli/common.py +120 -0
- dsgrid/cli/config.py +176 -0
- dsgrid/cli/download.py +13 -0
- dsgrid/cli/dsgrid.py +157 -0
- dsgrid/cli/dsgrid_admin.py +92 -0
- dsgrid/cli/install_notebooks.py +62 -0
- dsgrid/cli/query.py +729 -0
- dsgrid/cli/registry.py +1862 -0
- dsgrid/cloud/__init__.py +0 -0
- dsgrid/cloud/cloud_storage_interface.py +140 -0
- dsgrid/cloud/factory.py +31 -0
- dsgrid/cloud/fake_storage_interface.py +37 -0
- dsgrid/cloud/s3_storage_interface.py +156 -0
- dsgrid/common.py +36 -0
- dsgrid/config/__init__.py +0 -0
- dsgrid/config/annual_time_dimension_config.py +194 -0
- dsgrid/config/common.py +142 -0
- dsgrid/config/config_base.py +148 -0
- dsgrid/config/dataset_config.py +907 -0
- dsgrid/config/dataset_schema_handler_factory.py +46 -0
- dsgrid/config/date_time_dimension_config.py +136 -0
- dsgrid/config/dimension_config.py +54 -0
- dsgrid/config/dimension_config_factory.py +65 -0
- dsgrid/config/dimension_mapping_base.py +350 -0
- dsgrid/config/dimension_mappings_config.py +48 -0
- dsgrid/config/dimensions.py +1025 -0
- dsgrid/config/dimensions_config.py +71 -0
- dsgrid/config/file_schema.py +190 -0
- dsgrid/config/index_time_dimension_config.py +80 -0
- dsgrid/config/input_dataset_requirements.py +31 -0
- dsgrid/config/mapping_tables.py +209 -0
- dsgrid/config/noop_time_dimension_config.py +42 -0
- dsgrid/config/project_config.py +1462 -0
- dsgrid/config/registration_models.py +188 -0
- dsgrid/config/representative_period_time_dimension_config.py +194 -0
- dsgrid/config/simple_models.py +49 -0
- dsgrid/config/supplemental_dimension.py +29 -0
- dsgrid/config/time_dimension_base_config.py +192 -0
- dsgrid/data_models.py +155 -0
- dsgrid/dataset/__init__.py +0 -0
- dsgrid/dataset/dataset.py +123 -0
- dsgrid/dataset/dataset_expression_handler.py +86 -0
- dsgrid/dataset/dataset_mapping_manager.py +121 -0
- dsgrid/dataset/dataset_schema_handler_base.py +945 -0
- dsgrid/dataset/dataset_schema_handler_one_table.py +209 -0
- dsgrid/dataset/dataset_schema_handler_two_table.py +322 -0
- dsgrid/dataset/growth_rates.py +162 -0
- dsgrid/dataset/models.py +51 -0
- dsgrid/dataset/table_format_handler_base.py +257 -0
- dsgrid/dataset/table_format_handler_factory.py +17 -0
- dsgrid/dataset/unpivoted_table.py +121 -0
- dsgrid/dimension/__init__.py +0 -0
- dsgrid/dimension/base_models.py +230 -0
- dsgrid/dimension/dimension_filters.py +308 -0
- dsgrid/dimension/standard.py +252 -0
- dsgrid/dimension/time.py +352 -0
- dsgrid/dimension/time_utils.py +103 -0
- dsgrid/dsgrid_rc.py +88 -0
- dsgrid/exceptions.py +105 -0
- dsgrid/filesystem/__init__.py +0 -0
- dsgrid/filesystem/cloud_filesystem.py +32 -0
- dsgrid/filesystem/factory.py +32 -0
- dsgrid/filesystem/filesystem_interface.py +136 -0
- dsgrid/filesystem/local_filesystem.py +74 -0
- dsgrid/filesystem/s3_filesystem.py +118 -0
- dsgrid/loggers.py +132 -0
- dsgrid/minimal_patterns.cp313-win_amd64.pyd +0 -0
- dsgrid/notebooks/connect_to_dsgrid_registry.ipynb +949 -0
- dsgrid/notebooks/registration.ipynb +48 -0
- dsgrid/notebooks/start_notebook.sh +11 -0
- dsgrid/project.py +451 -0
- dsgrid/query/__init__.py +0 -0
- dsgrid/query/dataset_mapping_plan.py +142 -0
- dsgrid/query/derived_dataset.py +388 -0
- dsgrid/query/models.py +728 -0
- dsgrid/query/query_context.py +287 -0
- dsgrid/query/query_submitter.py +994 -0
- dsgrid/query/report_factory.py +19 -0
- dsgrid/query/report_peak_load.py +70 -0
- dsgrid/query/reports_base.py +20 -0
- dsgrid/registry/__init__.py +0 -0
- dsgrid/registry/bulk_register.py +165 -0
- dsgrid/registry/common.py +287 -0
- dsgrid/registry/config_update_checker_base.py +63 -0
- dsgrid/registry/data_store_factory.py +34 -0
- dsgrid/registry/data_store_interface.py +74 -0
- dsgrid/registry/dataset_config_generator.py +158 -0
- dsgrid/registry/dataset_registry_manager.py +950 -0
- dsgrid/registry/dataset_update_checker.py +16 -0
- dsgrid/registry/dimension_mapping_registry_manager.py +575 -0
- dsgrid/registry/dimension_mapping_update_checker.py +16 -0
- dsgrid/registry/dimension_registry_manager.py +413 -0
- dsgrid/registry/dimension_update_checker.py +16 -0
- dsgrid/registry/duckdb_data_store.py +207 -0
- dsgrid/registry/filesystem_data_store.py +150 -0
- dsgrid/registry/filter_registry_manager.py +123 -0
- dsgrid/registry/project_config_generator.py +57 -0
- dsgrid/registry/project_registry_manager.py +1623 -0
- dsgrid/registry/project_update_checker.py +48 -0
- dsgrid/registry/registration_context.py +223 -0
- dsgrid/registry/registry_auto_updater.py +316 -0
- dsgrid/registry/registry_database.py +667 -0
- dsgrid/registry/registry_interface.py +446 -0
- dsgrid/registry/registry_manager.py +558 -0
- dsgrid/registry/registry_manager_base.py +367 -0
- dsgrid/registry/versioning.py +92 -0
- dsgrid/rust_ext/__init__.py +14 -0
- dsgrid/rust_ext/find_minimal_patterns.py +129 -0
- dsgrid/spark/__init__.py +0 -0
- dsgrid/spark/functions.py +589 -0
- dsgrid/spark/types.py +110 -0
- dsgrid/tests/__init__.py +0 -0
- dsgrid/tests/common.py +140 -0
- dsgrid/tests/make_us_data_registry.py +265 -0
- dsgrid/tests/register_derived_datasets.py +103 -0
- dsgrid/tests/utils.py +25 -0
- dsgrid/time/__init__.py +0 -0
- dsgrid/time/time_conversions.py +80 -0
- dsgrid/time/types.py +67 -0
- dsgrid/units/__init__.py +0 -0
- dsgrid/units/constants.py +113 -0
- dsgrid/units/convert.py +71 -0
- dsgrid/units/energy.py +145 -0
- dsgrid/units/power.py +87 -0
- dsgrid/utils/__init__.py +0 -0
- dsgrid/utils/dataset.py +830 -0
- dsgrid/utils/files.py +179 -0
- dsgrid/utils/filters.py +125 -0
- dsgrid/utils/id_remappings.py +100 -0
- dsgrid/utils/py_expression_eval/LICENSE +19 -0
- dsgrid/utils/py_expression_eval/README.md +8 -0
- dsgrid/utils/py_expression_eval/__init__.py +847 -0
- dsgrid/utils/py_expression_eval/tests.py +283 -0
- dsgrid/utils/run_command.py +70 -0
- dsgrid/utils/scratch_dir_context.py +65 -0
- dsgrid/utils/spark.py +918 -0
- dsgrid/utils/spark_partition.py +98 -0
- dsgrid/utils/timing.py +239 -0
- dsgrid/utils/utilities.py +221 -0
- dsgrid/utils/versioning.py +36 -0
- dsgrid_toolkit-0.3.3.dist-info/METADATA +193 -0
- dsgrid_toolkit-0.3.3.dist-info/RECORD +157 -0
- dsgrid_toolkit-0.3.3.dist-info/WHEEL +4 -0
- dsgrid_toolkit-0.3.3.dist-info/entry_points.txt +4 -0
- dsgrid_toolkit-0.3.3.dist-info/licenses/LICENSE +29 -0
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Self
|
|
4
|
+
|
|
5
|
+
from dsgrid.registry.data_store_interface import DataStoreInterface
|
|
6
|
+
from dsgrid.spark.functions import coalesce
|
|
7
|
+
from dsgrid.spark.types import DataFrame
|
|
8
|
+
from dsgrid.utils.files import delete_if_exists
|
|
9
|
+
from dsgrid.utils.spark import read_dataframe, write_dataframe, write_dataframe_and_auto_partition
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
TABLE_FILENAME = "table.parquet"
|
|
13
|
+
LOOKUP_TABLE_FILENAME = "lookup_table.parquet"
|
|
14
|
+
MISSING_ASSOCIATIONS_TABLE_FILENAME = "missing_associations_table.parquet"
|
|
15
|
+
# We used to write these filenames. Keep support for old registries, for now.
|
|
16
|
+
ALT_TABLE_FILENAME = "load_data.parquet"
|
|
17
|
+
ALT_LOOKUP_TABLE_FILENAME = "load_data_lookup.parquet"
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class FilesystemDataStore(DataStoreInterface):
|
|
23
|
+
"""Data store that stores tables in Parquet files on the local or remote filesystem."""
|
|
24
|
+
|
|
25
|
+
@classmethod
|
|
26
|
+
def create(cls, base_path: Path) -> Self:
|
|
27
|
+
base_path.mkdir(exist_ok=True)
|
|
28
|
+
return cls(base_path)
|
|
29
|
+
|
|
30
|
+
@classmethod
|
|
31
|
+
def load(cls, base_path: Path) -> Self:
|
|
32
|
+
if not base_path.exists():
|
|
33
|
+
msg = f"Base path {base_path} does not exist. Cannot load FilesystemDataStore."
|
|
34
|
+
raise FileNotFoundError(msg)
|
|
35
|
+
return cls(base_path)
|
|
36
|
+
|
|
37
|
+
def read_table(self, dataset_id: str, version: str) -> DataFrame:
|
|
38
|
+
filename = self._table_filename(dataset_id, version)
|
|
39
|
+
if not filename.exists():
|
|
40
|
+
filename = self._alt_table_filename(dataset_id, version)
|
|
41
|
+
if not filename.exists():
|
|
42
|
+
msg = f"Table does not exist for dataset {dataset_id}, version {version} at {filename.parent}."
|
|
43
|
+
raise FileNotFoundError(msg)
|
|
44
|
+
return read_dataframe(filename)
|
|
45
|
+
|
|
46
|
+
def replace_table(self, df: DataFrame, dataset_id: str, version: str) -> None:
|
|
47
|
+
filename = self._get_existing_table_filename(dataset_id, version)
|
|
48
|
+
if filename is None:
|
|
49
|
+
self.write_table(df, dataset_id, version)
|
|
50
|
+
return
|
|
51
|
+
self._replace_table(df, filename)
|
|
52
|
+
|
|
53
|
+
def read_lookup_table(self, dataset_id: str, version: str) -> DataFrame:
|
|
54
|
+
filename = self._get_existing_lookup_table_filename(dataset_id, version)
|
|
55
|
+
if filename is None:
|
|
56
|
+
msg = f"Table does not exist for dataset {dataset_id}, version {version}."
|
|
57
|
+
raise FileNotFoundError(msg)
|
|
58
|
+
return read_dataframe(filename)
|
|
59
|
+
|
|
60
|
+
def replace_lookup_table(self, df: DataFrame, dataset_id: str, version: str) -> None:
|
|
61
|
+
filename = self._get_existing_lookup_table_filename(dataset_id, version)
|
|
62
|
+
if filename is None:
|
|
63
|
+
self.write_lookup_table(df, dataset_id, version)
|
|
64
|
+
return
|
|
65
|
+
self._replace_table(df, filename)
|
|
66
|
+
|
|
67
|
+
def read_missing_associations_tables(
|
|
68
|
+
self, dataset_id: str, version: str
|
|
69
|
+
) -> dict[str, DataFrame]:
|
|
70
|
+
assoc_dir = self._missing_associations_dir(dataset_id, version)
|
|
71
|
+
if not assoc_dir.exists():
|
|
72
|
+
return {}
|
|
73
|
+
return {x.stem: read_dataframe(x) for x in assoc_dir.iterdir()}
|
|
74
|
+
|
|
75
|
+
def write_table(
|
|
76
|
+
self, df: DataFrame, dataset_id: str, version: str, overwrite: bool = False
|
|
77
|
+
) -> None:
|
|
78
|
+
filename = self._table_filename(dataset_id, version)
|
|
79
|
+
filename.parent.mkdir(parents=True, exist_ok=True)
|
|
80
|
+
write_dataframe_and_auto_partition(df, filename)
|
|
81
|
+
|
|
82
|
+
def write_lookup_table(
|
|
83
|
+
self, df: DataFrame, dataset_id: str, version: str, overwrite: bool = False
|
|
84
|
+
) -> None:
|
|
85
|
+
filename = self._lookup_table_filename(dataset_id, version)
|
|
86
|
+
filename.parent.mkdir(parents=True, exist_ok=True)
|
|
87
|
+
write_dataframe(coalesce(df, 1), filename, overwrite=overwrite)
|
|
88
|
+
|
|
89
|
+
def write_missing_associations_tables(
|
|
90
|
+
self, dfs: dict[str, DataFrame], dataset_id: str, version: str, overwrite: bool = False
|
|
91
|
+
) -> None:
|
|
92
|
+
for name, df in dfs.items():
|
|
93
|
+
filename = self._missing_associations_table_filename(name, dataset_id, version)
|
|
94
|
+
filename.parent.mkdir(parents=True, exist_ok=True)
|
|
95
|
+
write_dataframe_and_auto_partition(df, filename)
|
|
96
|
+
|
|
97
|
+
def remove_tables(self, dataset_id: str, version: str) -> None:
|
|
98
|
+
delete_if_exists(self._base_dir(dataset_id, version))
|
|
99
|
+
|
|
100
|
+
@property
|
|
101
|
+
def _data_dir(self) -> Path:
|
|
102
|
+
return self.base_path / "data"
|
|
103
|
+
|
|
104
|
+
def _base_dir(self, dataset_id: str, version: str) -> Path:
|
|
105
|
+
return self._data_dir / dataset_id / version
|
|
106
|
+
|
|
107
|
+
def _lookup_table_filename(self, dataset_id: str, version: str) -> Path:
|
|
108
|
+
return self._data_dir / dataset_id / version / LOOKUP_TABLE_FILENAME
|
|
109
|
+
|
|
110
|
+
def _missing_associations_dir(self, dataset_id: str, version: str) -> Path:
|
|
111
|
+
return self._data_dir / dataset_id / version / "missing_associations"
|
|
112
|
+
|
|
113
|
+
def _missing_associations_table_filename(
|
|
114
|
+
self, name: str, dataset_id: str, version: str
|
|
115
|
+
) -> Path:
|
|
116
|
+
return self._missing_associations_dir(dataset_id, version) / f"{name}.parquet"
|
|
117
|
+
|
|
118
|
+
def _table_filename(self, dataset_id: str, version: str) -> Path:
|
|
119
|
+
return self._data_dir / dataset_id / version / TABLE_FILENAME
|
|
120
|
+
|
|
121
|
+
def _alt_lookup_table_filename(self, dataset_id: str, version: str) -> Path:
|
|
122
|
+
return self._data_dir / dataset_id / version / ALT_LOOKUP_TABLE_FILENAME
|
|
123
|
+
|
|
124
|
+
def _alt_table_filename(self, dataset_id: str, version: str) -> Path:
|
|
125
|
+
return self._data_dir / dataset_id / version / ALT_TABLE_FILENAME
|
|
126
|
+
|
|
127
|
+
def _get_existing_lookup_table_filename(self, dataset_id: str, version: str) -> Path | None:
|
|
128
|
+
filename = self._lookup_table_filename(dataset_id, version)
|
|
129
|
+
if filename.exists():
|
|
130
|
+
return filename
|
|
131
|
+
alt_filename = self._alt_lookup_table_filename(dataset_id, version)
|
|
132
|
+
if alt_filename.exists():
|
|
133
|
+
return alt_filename
|
|
134
|
+
return None
|
|
135
|
+
|
|
136
|
+
def _get_existing_table_filename(self, dataset_id: str, version: str) -> Path | None:
|
|
137
|
+
filename = self._table_filename(dataset_id, version)
|
|
138
|
+
if filename.exists():
|
|
139
|
+
return filename
|
|
140
|
+
alt_filename = self._alt_table_filename(dataset_id, version)
|
|
141
|
+
if alt_filename.exists():
|
|
142
|
+
return alt_filename
|
|
143
|
+
return None
|
|
144
|
+
|
|
145
|
+
@staticmethod
|
|
146
|
+
def _replace_table(df: DataFrame, filename: Path) -> None:
|
|
147
|
+
tmp_name = filename.parent / f"{filename.stem}_tmp.parquet"
|
|
148
|
+
write_dataframe(df, tmp_name)
|
|
149
|
+
delete_if_exists(filename)
|
|
150
|
+
tmp_name.rename(filename)
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
from sqlalchemy import Connection
|
|
4
|
+
|
|
5
|
+
from dsgrid.config.simple_models import RegistrySimpleModel
|
|
6
|
+
from dsgrid.config.dataset_schema_handler_factory import make_dataset_schema_handler
|
|
7
|
+
from dsgrid.spark.functions import is_dataframe_empty
|
|
8
|
+
from dsgrid.utils.timing import track_timing, timer_stats_collector
|
|
9
|
+
from .registry_manager import RegistryManager
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class FilterRegistryManager(RegistryManager):
|
|
16
|
+
"""Specialized RegistryManager that performs filtering operations."""
|
|
17
|
+
|
|
18
|
+
@track_timing(timer_stats_collector)
|
|
19
|
+
def filter(self, simple_model: RegistrySimpleModel, conn: Connection | None = None):
|
|
20
|
+
"""Filter the registry as described by simple_model.
|
|
21
|
+
|
|
22
|
+
Parameters
|
|
23
|
+
----------
|
|
24
|
+
simple_model : RegistrySimpleModel
|
|
25
|
+
Filter all configs and data according to this model.
|
|
26
|
+
"""
|
|
27
|
+
if conn is None:
|
|
28
|
+
with self.project_manager.db.engine.begin() as conn:
|
|
29
|
+
self._filter(conn, simple_model)
|
|
30
|
+
else:
|
|
31
|
+
self._filter(conn, simple_model)
|
|
32
|
+
|
|
33
|
+
def _filter(self, conn: Connection, simple_model: RegistrySimpleModel):
|
|
34
|
+
project_ids_to_keep = {x.project_id for x in simple_model.projects}
|
|
35
|
+
to_remove = [
|
|
36
|
+
x for x in self._project_mgr.list_ids(conn=conn) if x not in project_ids_to_keep
|
|
37
|
+
]
|
|
38
|
+
for project_id in to_remove:
|
|
39
|
+
self._project_mgr.remove(project_id, conn=conn)
|
|
40
|
+
|
|
41
|
+
dataset_ids_to_keep = {x.dataset_id for x in simple_model.datasets}
|
|
42
|
+
dataset_ids_to_remove = set(self._dataset_mgr.list_ids(conn=conn)) - dataset_ids_to_keep
|
|
43
|
+
for dataset_id in dataset_ids_to_remove:
|
|
44
|
+
self._dataset_mgr.remove(dataset_id, conn=conn)
|
|
45
|
+
|
|
46
|
+
modified_dims = set()
|
|
47
|
+
modified_dim_records = {}
|
|
48
|
+
|
|
49
|
+
def handle_dimension(simple_dim, dim):
|
|
50
|
+
records = dim.get_records_dataframe()
|
|
51
|
+
df = records.filter(records.id.isin(simple_dim.record_ids))
|
|
52
|
+
filtered_records = [x.asDict() for x in df.collect()]
|
|
53
|
+
modified_dims.add(dim.model.dimension_id)
|
|
54
|
+
modified_dim_records[dim.model.dimension_id] = {
|
|
55
|
+
x.id for x in df.select("id").distinct().collect()
|
|
56
|
+
}
|
|
57
|
+
return filtered_records
|
|
58
|
+
|
|
59
|
+
logger.info("Filter project dimensions")
|
|
60
|
+
for project in simple_model.projects:
|
|
61
|
+
changed_project = False
|
|
62
|
+
project_config = self._project_mgr.get_by_id(project.project_id, conn=conn)
|
|
63
|
+
indices_to_remove = []
|
|
64
|
+
for i, dataset in enumerate(project_config.model.datasets):
|
|
65
|
+
if dataset.dataset_id in dataset_ids_to_remove:
|
|
66
|
+
indices_to_remove.append(i)
|
|
67
|
+
for index in reversed(indices_to_remove):
|
|
68
|
+
project_config.model.datasets.pop(index)
|
|
69
|
+
changed_project = True
|
|
70
|
+
for simple_dim in project.dimensions.base_dimensions:
|
|
71
|
+
for dim in project_config.list_base_dimensions(
|
|
72
|
+
dimension_type=simple_dim.dimension_type
|
|
73
|
+
):
|
|
74
|
+
dim.model.records = handle_dimension(simple_dim, dim)
|
|
75
|
+
self.dimension_manager.db.replace(conn, dim.model)
|
|
76
|
+
|
|
77
|
+
for simple_dim in project.dimensions.supplemental_dimensions:
|
|
78
|
+
for dim in project_config.list_supplemental_dimensions(simple_dim.dimension_type):
|
|
79
|
+
if dim.model.name == simple_dim.dimension_name:
|
|
80
|
+
dim.model.records = handle_dimension(simple_dim, dim)
|
|
81
|
+
self.dimension_manager.db.replace(conn, dim.model)
|
|
82
|
+
if changed_project:
|
|
83
|
+
self.project_manager.db.replace(conn, project_config.model)
|
|
84
|
+
|
|
85
|
+
logger.info("Filter dataset dimensions")
|
|
86
|
+
for dataset in simple_model.datasets:
|
|
87
|
+
logger.info("Filter dataset %s", dataset.dataset_id)
|
|
88
|
+
dataset_config = self._dataset_mgr.get_by_id(dataset.dataset_id, conn=conn)
|
|
89
|
+
for simple_dim in dataset.dimensions:
|
|
90
|
+
dim = dataset_config.get_dimension(simple_dim.dimension_type)
|
|
91
|
+
dim.model.records = handle_dimension(simple_dim, dim)
|
|
92
|
+
self.dimension_manager.db.replace(conn, dim.model)
|
|
93
|
+
handler = make_dataset_schema_handler(
|
|
94
|
+
conn,
|
|
95
|
+
dataset_config,
|
|
96
|
+
self._dimension_mgr,
|
|
97
|
+
self._dimension_mapping_mgr,
|
|
98
|
+
store=self._data_store,
|
|
99
|
+
)
|
|
100
|
+
handler.filter_data(dataset.dimensions, self._data_store)
|
|
101
|
+
|
|
102
|
+
logger.info("Filter dimension mapping records")
|
|
103
|
+
for mapping in self._dimension_mapping_mgr.iter_configs():
|
|
104
|
+
records = None
|
|
105
|
+
changed = False
|
|
106
|
+
from_id = mapping.model.from_dimension.dimension_id
|
|
107
|
+
to_id = mapping.model.to_dimension.dimension_id
|
|
108
|
+
if from_id in modified_dims or to_id in modified_dims:
|
|
109
|
+
records = mapping.get_records_dataframe()
|
|
110
|
+
if from_id in modified_dims:
|
|
111
|
+
records = records.filter(records.from_id.isin(modified_dim_records[from_id]))
|
|
112
|
+
changed = True
|
|
113
|
+
if to_id in modified_dims:
|
|
114
|
+
records = records.filter(records.to_id.isin(modified_dim_records[to_id]))
|
|
115
|
+
changed = True
|
|
116
|
+
|
|
117
|
+
# TODO: probably need to remove a dimension mapping if it is empty
|
|
118
|
+
if records is not None and changed and not is_dataframe_empty(records):
|
|
119
|
+
mapping.model.records = [x.asDict() for x in records.collect()]
|
|
120
|
+
self.dimension_mapping_manager.db.replace(conn, mapping.model)
|
|
121
|
+
logger.info(
|
|
122
|
+
"Filtered dimension mapping records from ID %s", mapping.model.mapping_id
|
|
123
|
+
)
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Iterable
|
|
4
|
+
|
|
5
|
+
from chronify.utils.path_utils import check_overwrite
|
|
6
|
+
|
|
7
|
+
from dsgrid.dimension.time import TimeDimensionType
|
|
8
|
+
from dsgrid.exceptions import DSGInvalidParameter
|
|
9
|
+
from dsgrid.utils.files import dump_data
|
|
10
|
+
from dsgrid.config.project_config import make_unvalidated_project_config
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def generate_project_config(
|
|
17
|
+
project_id: str,
|
|
18
|
+
dataset_ids: Iterable[str],
|
|
19
|
+
metric_types: Iterable[str],
|
|
20
|
+
name: str | None = None,
|
|
21
|
+
description: str | None = None,
|
|
22
|
+
time_type: TimeDimensionType = TimeDimensionType.DATETIME,
|
|
23
|
+
output_directory: Path | None = None,
|
|
24
|
+
overwrite: bool = False,
|
|
25
|
+
):
|
|
26
|
+
"""Generate project config files and filesystem skeleton."""
|
|
27
|
+
if not metric_types:
|
|
28
|
+
msg = "At least one metric type must be passed"
|
|
29
|
+
raise DSGInvalidParameter(msg)
|
|
30
|
+
output_dir = (output_directory or Path()) / project_id
|
|
31
|
+
check_overwrite(output_dir, overwrite)
|
|
32
|
+
output_dir.mkdir()
|
|
33
|
+
project_dir = output_dir / "project"
|
|
34
|
+
project_dir.mkdir()
|
|
35
|
+
project_file = project_dir / "project.json5"
|
|
36
|
+
datasets_dir = output_dir / "datasets"
|
|
37
|
+
datasets_dir.mkdir()
|
|
38
|
+
(datasets_dir / "historical").mkdir()
|
|
39
|
+
(datasets_dir / "modeled").mkdir()
|
|
40
|
+
dimensions_dir = project_dir / "dimensions"
|
|
41
|
+
dimensions_dir.mkdir()
|
|
42
|
+
(dimensions_dir / "subset").mkdir()
|
|
43
|
+
(dimensions_dir / "supplemental").mkdir()
|
|
44
|
+
(project_dir / "dimension_mappings").mkdir()
|
|
45
|
+
|
|
46
|
+
config = make_unvalidated_project_config(
|
|
47
|
+
project_id,
|
|
48
|
+
dataset_ids,
|
|
49
|
+
metric_types,
|
|
50
|
+
name=name,
|
|
51
|
+
description=description,
|
|
52
|
+
time_type=time_type,
|
|
53
|
+
)
|
|
54
|
+
dump_data(config, project_file, indent=2)
|
|
55
|
+
logger.info(
|
|
56
|
+
"Created project directory structure at %s with config file %s", output_dir, project_file
|
|
57
|
+
)
|