clickzetta-semantic-model-generator 1.0.1__tar.gz → 1.0.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/PKG-INFO +5 -5
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/pyproject.toml +5 -5
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/clickzetta_utils/clickzetta_connector.py +91 -33
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/clickzetta_utils/env_vars.py +7 -2
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/generate_model.py +588 -224
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/llm/dashscope_client.py +4 -2
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/llm/enrichment.py +144 -57
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/llm/progress_tracker.py +16 -15
- clickzetta_semantic_model_generator-1.0.3/semantic_model_generator/relationships/__init__.py +15 -0
- clickzetta_semantic_model_generator-1.0.3/semantic_model_generator/relationships/discovery.py +202 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/tests/clickzetta_connector_test.py +3 -7
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/tests/generate_model_classification_test.py +12 -2
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/tests/llm_enrichment_test.py +152 -46
- clickzetta_semantic_model_generator-1.0.3/semantic_model_generator/tests/relationship_discovery_test.py +114 -0
- clickzetta_semantic_model_generator-1.0.3/semantic_model_generator/tests/relationships_filters_test.py +361 -0
- clickzetta_semantic_model_generator-1.0.3/semantic_model_generator/validate/keywords.py +457 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/validate/schema.py +4 -2
- clickzetta_semantic_model_generator-1.0.1/semantic_model_generator/tests/relationships_filters_test.py +0 -225
- clickzetta_semantic_model_generator-1.0.1/semantic_model_generator/validate/keywords.py +0 -57
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/LICENSE +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/README.md +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/__init__.py +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/clickzetta_utils/utils.py +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/data_processing/__init__.py +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/data_processing/cte_utils.py +1 -1
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/data_processing/cte_utils_test.py +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/data_processing/data_types.py +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/data_processing/proto_utils.py +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/llm/__init__.py +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/output_models/.keep +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/protos/semantic_model.proto +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/protos/semantic_model_pb2.py +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/protos/semantic_model_pb2.pyi +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/tests/cte_utils_test.py +1 -1
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/tests/samples/validate_yamls.py +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/tests/utils_test.py +1 -1
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/tests/validate_model_test.py +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/tests/yaml_to_semantic_model_test.py +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/validate/context_length.py +0 -0
- {clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/semantic_model_generator/validate_model.py +0 -0
{clickzetta_semantic_model_generator-1.0.1 → clickzetta_semantic_model_generator-1.0.3}/PKG-INFO
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: clickzetta-semantic-model-generator
|
3
|
-
Version: 1.0.
|
3
|
+
Version: 1.0.3
|
4
4
|
Summary: Curate a Semantic Model for ClickZetta Lakehouse
|
5
5
|
License: Apache Software License; BSD License
|
6
6
|
Author: qililiang
|
@@ -13,12 +13,12 @@ Classifier: Programming Language :: Python :: 3.10
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.11
|
14
14
|
Provides-Extra: looker
|
15
15
|
Requires-Dist: PyYAML (>=6.0.1,<7.0.0)
|
16
|
-
Requires-Dist: clickzetta-connector-python (
|
17
|
-
Requires-Dist: clickzetta-zettapark-python (
|
16
|
+
Requires-Dist: clickzetta-connector-python (>=0.8.92)
|
17
|
+
Requires-Dist: clickzetta-zettapark-python (>=0.1.3)
|
18
18
|
Requires-Dist: dashscope (>=1.22.2,<2.0.0)
|
19
19
|
Requires-Dist: loguru (>=0.7.2,<0.8.0)
|
20
20
|
Requires-Dist: looker-sdk (>=24.14.0,<25.0.0) ; extra == "looker"
|
21
|
-
Requires-Dist: numpy (>=1.26.4,<
|
21
|
+
Requires-Dist: numpy (>=1.26.4,<3.0.0)
|
22
22
|
Requires-Dist: pandas (>=2.0.1,<3.0.0)
|
23
23
|
Requires-Dist: protobuf (==5.26.1)
|
24
24
|
Requires-Dist: pyarrow (==14.0.2)
|
@@ -31,7 +31,7 @@ Requires-Dist: streamlit (==1.36.0)
|
|
31
31
|
Requires-Dist: streamlit-extras (==0.4.0)
|
32
32
|
Requires-Dist: strictyaml (>=1.7.3,<2.0.0)
|
33
33
|
Requires-Dist: tqdm (>=4.66.5,<5.0.0)
|
34
|
-
Requires-Dist: urllib3 (>=1.26.19,<
|
34
|
+
Requires-Dist: urllib3 (>=1.26.19,<3.0.0)
|
35
35
|
Description-Content-Type: text/markdown
|
36
36
|
|
37
37
|
# semantic-model-generator
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "clickzetta-semantic-model-generator"
|
3
|
-
version = "1.0.
|
3
|
+
version = "1.0.3"
|
4
4
|
description = "Curate a Semantic Model for ClickZetta Lakehouse"
|
5
5
|
authors = ["qililiang <qililiang@clickzetta.com>"]
|
6
6
|
license = "Apache Software License; BSD License"
|
@@ -11,7 +11,7 @@ packages = [{include = "semantic_model_generator"}]
|
|
11
11
|
python = ">=3.9,<3.9.7 || >3.9.7,<3.12"
|
12
12
|
pandas = "^2.0.1"
|
13
13
|
loguru = "^0.7.2"
|
14
|
-
clickzetta-connector-python = "0.8.92"
|
14
|
+
clickzetta-connector-python = ">=0.8.92"
|
15
15
|
protobuf = "5.26.1"
|
16
16
|
pydantic = ">=2.8.2,<3.0.0"
|
17
17
|
PyYAML = "^6.0.1"
|
@@ -22,11 +22,11 @@ sqlglot = "25.10.0"
|
|
22
22
|
strictyaml = "^1.7.3"
|
23
23
|
streamlit = "1.36.0"
|
24
24
|
streamlit-extras = "0.4.0"
|
25
|
-
numpy = "
|
25
|
+
numpy = ">=1.26.4,<3.0.0"
|
26
26
|
python-dotenv = "^1.0.1"
|
27
|
-
urllib3 = "
|
27
|
+
urllib3 = ">=1.26.19,<3.0.0"
|
28
28
|
requests = "^2.32.3"
|
29
|
-
clickzetta-zettapark-python = "0.1.3"
|
29
|
+
clickzetta-zettapark-python = ">=0.1.3"
|
30
30
|
dashscope = "^1.22.2"
|
31
31
|
|
32
32
|
# Optional dependencies for functionality such as partner semantic model support.
|
@@ -4,7 +4,6 @@ import concurrent.futures
|
|
4
4
|
import re
|
5
5
|
from collections import defaultdict
|
6
6
|
from contextlib import contextmanager
|
7
|
-
from types import SimpleNamespace
|
8
7
|
from typing import Any, Dict, Generator, List, Optional, TypeVar, Union
|
9
8
|
|
10
9
|
import pandas as pd
|
@@ -12,10 +11,7 @@ from clickzetta.zettapark.session import Session
|
|
12
11
|
from loguru import logger
|
13
12
|
|
14
13
|
from semantic_model_generator.clickzetta_utils import env_vars
|
15
|
-
from semantic_model_generator.clickzetta_utils.utils import
|
16
|
-
clickzetta_connection,
|
17
|
-
create_session,
|
18
|
-
)
|
14
|
+
from semantic_model_generator.clickzetta_utils.utils import create_session
|
19
15
|
from semantic_model_generator.data_processing.data_types import Column, Table
|
20
16
|
|
21
17
|
ConnectionType = TypeVar("ConnectionType", bound=Session)
|
@@ -115,7 +111,9 @@ class ClickzettaCursor:
|
|
115
111
|
def execute(self, query: str) -> "ClickzettaCursor":
|
116
112
|
self._df = _execute_query_to_pandas(self._session, query)
|
117
113
|
columns = [] if self._df is None else list(self._df.columns)
|
118
|
-
self.description = [
|
114
|
+
self.description = [
|
115
|
+
(col, None, None, None, None, None, None) for col in columns
|
116
|
+
]
|
119
117
|
return self
|
120
118
|
|
121
119
|
def fetchone(self) -> Optional[tuple[Any, ...]]:
|
@@ -159,7 +157,11 @@ def _quote_identifier(name: str) -> str:
|
|
159
157
|
|
160
158
|
def _qualify_table(workspace: str, schema_name: str, table_name: str) -> str:
|
161
159
|
return ".".join(
|
162
|
-
[
|
160
|
+
[
|
161
|
+
_quote_identifier(workspace),
|
162
|
+
_quote_identifier(schema_name),
|
163
|
+
_quote_identifier(table_name),
|
164
|
+
]
|
163
165
|
)
|
164
166
|
|
165
167
|
|
@@ -214,12 +216,18 @@ def _fetch_distinct_values(
|
|
214
216
|
column_name: str,
|
215
217
|
ndv: int,
|
216
218
|
) -> Optional[List[str]]:
|
217
|
-
workspace_part =
|
218
|
-
|
219
|
+
workspace_part = (
|
220
|
+
_sanitize_identifier(workspace, workspace).upper() if workspace else ""
|
221
|
+
)
|
222
|
+
schema_part = (
|
223
|
+
_sanitize_identifier(schema_name, schema_name).upper() if schema_name else ""
|
224
|
+
)
|
219
225
|
table_part = _sanitize_identifier(table_name, table_name).upper()
|
220
226
|
column_part = _sanitize_identifier(column_name, column_name).upper()
|
221
227
|
|
222
|
-
qualified_parts = [
|
228
|
+
qualified_parts = [
|
229
|
+
part for part in (workspace_part, schema_part, table_part) if part
|
230
|
+
]
|
223
231
|
qualified_table = ".".join(qualified_parts)
|
224
232
|
|
225
233
|
query = f"SELECT DISTINCT {column_part} FROM {qualified_table} LIMIT {ndv}"
|
@@ -257,7 +265,6 @@ def _get_column_representation(
|
|
257
265
|
else:
|
258
266
|
column_datatype = str(column_datatype_raw)
|
259
267
|
column_datatype = _normalize_column_type(column_datatype)
|
260
|
-
normalized_type = column_datatype.split("(")[0].strip()
|
261
268
|
column_values = (
|
262
269
|
_fetch_distinct_values(
|
263
270
|
session=session,
|
@@ -351,7 +358,14 @@ def _catalog_category(session: Session, workspace: str) -> str:
|
|
351
358
|
return "UNKNOWN"
|
352
359
|
|
353
360
|
df.columns = [str(col).upper() for col in df.columns]
|
354
|
-
name_col = next(
|
361
|
+
name_col = next(
|
362
|
+
(
|
363
|
+
col
|
364
|
+
for col in ("WORKSPACE_NAME", "NAME", "CATALOG_NAME")
|
365
|
+
if col in df.columns
|
366
|
+
),
|
367
|
+
None,
|
368
|
+
)
|
355
369
|
category_col = next((col for col in ("CATEGORY",) if col in df.columns), None)
|
356
370
|
if not name_col or not category_col:
|
357
371
|
_CATALOG_CATEGORY_CACHE[workspace_upper] = "UNKNOWN"
|
@@ -408,7 +422,9 @@ ORDER BY kc.ordinal_position
|
|
408
422
|
if result is not None:
|
409
423
|
return result
|
410
424
|
except Exception:
|
411
|
-
logger.debug(
|
425
|
+
logger.debug(
|
426
|
+
"Primary key lookup via sys.information_schema failed; falling back."
|
427
|
+
)
|
412
428
|
|
413
429
|
fallback_query = f"""
|
414
430
|
SELECT kc.column_name
|
@@ -423,7 +439,13 @@ ORDER BY kc.ordinal_position
|
|
423
439
|
if result is not None:
|
424
440
|
return result
|
425
441
|
except Exception as exc:
|
426
|
-
logger.warning(
|
442
|
+
logger.warning(
|
443
|
+
"Primary key lookup failed for {}.{}.{}: {}",
|
444
|
+
workspace,
|
445
|
+
schema_name,
|
446
|
+
table_name,
|
447
|
+
exc,
|
448
|
+
)
|
427
449
|
return None
|
428
450
|
|
429
451
|
|
@@ -432,9 +454,7 @@ def _build_information_schema_query(
|
|
432
454
|
table_schema: Optional[str],
|
433
455
|
table_names: Optional[List[str]],
|
434
456
|
) -> str:
|
435
|
-
where_conditions: List[str] = [
|
436
|
-
"1=1"
|
437
|
-
]
|
457
|
+
where_conditions: List[str] = ["1=1"]
|
438
458
|
if table_schema:
|
439
459
|
where_conditions.append(f"upper(t.table_schema) = '{table_schema.upper()}'")
|
440
460
|
if table_names:
|
@@ -442,7 +462,6 @@ def _build_information_schema_query(
|
|
442
462
|
where_conditions.append(f"upper(t.table_name) IN ({formatted_names})")
|
443
463
|
|
444
464
|
where_clause = " AND ".join(where_conditions)
|
445
|
-
base = "information_schema"
|
446
465
|
return f"""
|
447
466
|
SELECT
|
448
467
|
t.table_schema AS {_TABLE_SCHEMA_COL},
|
@@ -474,27 +493,48 @@ def _fetch_columns_via_show(
|
|
474
493
|
schema = table_schema.upper() if table_schema else ""
|
475
494
|
|
476
495
|
for table_name in table_names:
|
477
|
-
qualified_parts = [
|
496
|
+
qualified_parts = [
|
497
|
+
part for part in (catalog, schema, table_name.upper()) if part
|
498
|
+
]
|
478
499
|
qualified_table = ".".join(qualified_parts)
|
479
500
|
query = f"SHOW COLUMNS IN {qualified_table}"
|
480
501
|
try:
|
481
502
|
df = session.sql(query).to_pandas()
|
482
503
|
except Exception as exc:
|
483
|
-
logger.debug(
|
504
|
+
logger.debug(
|
505
|
+
"SHOW COLUMNS fallback failed for {}: {}", qualified_table, exc
|
506
|
+
)
|
484
507
|
continue
|
485
508
|
if df.empty:
|
486
509
|
continue
|
487
510
|
df.columns = [str(col).upper() for col in df.columns]
|
488
|
-
schema_col = next(
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
511
|
+
schema_col = next(
|
512
|
+
(col for col in ("TABLE_SCHEMA", "SCHEMA_NAME") if col in df.columns), None
|
513
|
+
)
|
514
|
+
table_col = next(
|
515
|
+
(col for col in ("TABLE_NAME", "NAME") if col in df.columns), None
|
516
|
+
)
|
517
|
+
column_col = next(
|
518
|
+
(
|
519
|
+
col
|
520
|
+
for col in ("COLUMN_NAME", "NAME")
|
521
|
+
if col in df.columns and col != table_col
|
522
|
+
),
|
523
|
+
None,
|
524
|
+
)
|
525
|
+
datatype_col = next(
|
526
|
+
(col for col in ("DATA_TYPE", "TYPE") if col in df.columns), None
|
527
|
+
)
|
528
|
+
comment_col = next(
|
529
|
+
(col for col in ("COMMENT", "COLUMN_COMMENT") if col in df.columns), None
|
530
|
+
)
|
493
531
|
|
494
532
|
normalized = pd.DataFrame()
|
495
533
|
normalized[_TABLE_SCHEMA_COL] = df[schema_col] if schema_col else table_schema
|
496
534
|
normalized[_TABLE_NAME_COL] = df[table_col] if table_col else table_name
|
497
|
-
normalized[_COLUMN_NAME_COL] =
|
535
|
+
normalized[_COLUMN_NAME_COL] = (
|
536
|
+
df[column_col] if column_col else df.index.astype(str)
|
537
|
+
)
|
498
538
|
normalized[_DATATYPE_COL] = df[datatype_col] if datatype_col else ""
|
499
539
|
normalized[_COLUMN_COMMENT_ALIAS] = df[comment_col] if comment_col else ""
|
500
540
|
normalized[_TABLE_COMMENT_COL] = ""
|
@@ -552,6 +592,7 @@ def get_valid_schemas_tables_columns_df(
|
|
552
592
|
if _TABLE_SCHEMA_COL in result.columns:
|
553
593
|
result[_TABLE_SCHEMA_COL] = result[_TABLE_SCHEMA_COL].astype(str).str.upper()
|
554
594
|
if _IS_PRIMARY_KEY_COL in result.columns:
|
595
|
+
|
555
596
|
def _normalize_pk(value: Any) -> bool:
|
556
597
|
if isinstance(value, bool):
|
557
598
|
return value
|
@@ -617,10 +658,11 @@ def fetch_tables_views_in_schema(
|
|
617
658
|
workspace_upper = workspace.upper()
|
618
659
|
schema_upper = schema.upper()
|
619
660
|
|
620
|
-
target = ""
|
621
661
|
try:
|
622
662
|
if workspace_upper and schema_upper:
|
623
|
-
df = session.sql(
|
663
|
+
df = session.sql(
|
664
|
+
f"SHOW TABLES IN {workspace_upper}.{schema_upper}"
|
665
|
+
).to_pandas()
|
624
666
|
else:
|
625
667
|
df = session.sql("SHOW TABLES").to_pandas()
|
626
668
|
except Exception as exc: # pragma: no cover
|
@@ -634,17 +676,27 @@ def fetch_tables_views_in_schema(
|
|
634
676
|
df.columns = [str(col).upper() for col in df.columns]
|
635
677
|
name_column = "TABLE_NAME" if "TABLE_NAME" in df.columns else df.columns[0]
|
636
678
|
schema_column = next(
|
637
|
-
(
|
679
|
+
(
|
680
|
+
col
|
681
|
+
for col in ("SCHEMA_NAME", "TABLE_SCHEMA", "NAMESPACE")
|
682
|
+
if col in df.columns
|
683
|
+
),
|
638
684
|
None,
|
639
685
|
)
|
640
686
|
catalog_column = next(
|
641
|
-
(
|
687
|
+
(
|
688
|
+
col
|
689
|
+
for col in ("CATALOG_NAME", "WORKSPACE_NAME", "TABLE_CATALOG")
|
690
|
+
if col in df.columns
|
691
|
+
),
|
642
692
|
None,
|
643
693
|
)
|
644
694
|
|
645
695
|
results: List[str] = []
|
646
696
|
for _, row in df.iterrows():
|
647
|
-
if _value_is_true(row.get("IS_VIEW")) and not _value_is_true(
|
697
|
+
if _value_is_true(row.get("IS_VIEW")) and not _value_is_true(
|
698
|
+
row.get("IS_MATERIALIZED_VIEW")
|
699
|
+
):
|
648
700
|
continue
|
649
701
|
if _value_is_true(row.get("IS_EXTERNAL")):
|
650
702
|
continue
|
@@ -756,7 +808,11 @@ def fetch_yaml_names_in_stage(
|
|
756
808
|
if stage.lower().startswith("volume:user://"):
|
757
809
|
volume_body = stage[len("volume:") :]
|
758
810
|
# Normalize relative directory
|
759
|
-
relative =
|
811
|
+
relative = (
|
812
|
+
volume_body[len("user://") :]
|
813
|
+
if volume_body.startswith("user://")
|
814
|
+
else volume_body
|
815
|
+
)
|
760
816
|
relative = relative.lstrip("~/")
|
761
817
|
relative = relative.strip("/")
|
762
818
|
|
@@ -842,7 +898,9 @@ def create_table_in_schema(
|
|
842
898
|
table_fqn: str,
|
843
899
|
columns_schema: Dict[str, str],
|
844
900
|
) -> bool:
|
845
|
-
fields = ", ".join(
|
901
|
+
fields = ", ".join(
|
902
|
+
f"{_quote_identifier(name)} {dtype}" for name, dtype in columns_schema.items()
|
903
|
+
)
|
846
904
|
query = f"CREATE TABLE IF NOT EXISTS {table_fqn} ({fields})"
|
847
905
|
try:
|
848
906
|
session.sql(query).collect()
|
@@ -20,7 +20,9 @@ _CONFIG_PATHS = [
|
|
20
20
|
_ACTIVE_CONFIG_PATH: Optional[str] = None
|
21
21
|
|
22
22
|
|
23
|
-
def _load_config_from_file() ->
|
23
|
+
def _load_config_from_file() -> (
|
24
|
+
Tuple[Optional[Dict[str, str]], Dict[str, Dict[str, str]]]
|
25
|
+
):
|
24
26
|
global _ACTIVE_CONFIG_PATH
|
25
27
|
_ACTIVE_CONFIG_PATH = None
|
26
28
|
for path in _CONFIG_PATHS:
|
@@ -91,7 +93,10 @@ def _deep_lookup(mapping: Any, key: str) -> Optional[Any]:
|
|
91
93
|
if isinstance(current, dict):
|
92
94
|
for candidate_key, candidate_value in current.items():
|
93
95
|
candidate_key_str = str(candidate_key).lower()
|
94
|
-
if candidate_key_str == normalized_key and candidate_value not in (
|
96
|
+
if candidate_key_str == normalized_key and candidate_value not in (
|
97
|
+
None,
|
98
|
+
"",
|
99
|
+
):
|
95
100
|
return candidate_value
|
96
101
|
if isinstance(candidate_value, (dict, list)):
|
97
102
|
queue.append(candidate_value)
|