dragon-ml-toolbox 10.2.0__py3-none-any.whl → 14.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dragon-ml-toolbox might be problematic. Click here for more details.
- {dragon_ml_toolbox-10.2.0.dist-info → dragon_ml_toolbox-14.2.0.dist-info}/METADATA +38 -63
- dragon_ml_toolbox-14.2.0.dist-info/RECORD +48 -0
- {dragon_ml_toolbox-10.2.0.dist-info → dragon_ml_toolbox-14.2.0.dist-info}/licenses/LICENSE +1 -1
- {dragon_ml_toolbox-10.2.0.dist-info → dragon_ml_toolbox-14.2.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +11 -0
- ml_tools/ETL_cleaning.py +72 -34
- ml_tools/ETL_engineering.py +506 -70
- ml_tools/GUI_tools.py +2 -1
- ml_tools/MICE_imputation.py +212 -7
- ml_tools/ML_callbacks.py +73 -40
- ml_tools/ML_datasetmaster.py +267 -284
- ml_tools/ML_evaluation.py +119 -58
- ml_tools/ML_evaluation_multi.py +107 -32
- ml_tools/ML_inference.py +15 -5
- ml_tools/ML_models.py +234 -170
- ml_tools/ML_models_advanced.py +323 -0
- ml_tools/ML_optimization.py +321 -97
- ml_tools/ML_scaler.py +10 -5
- ml_tools/ML_trainer.py +585 -40
- ml_tools/ML_utilities.py +528 -0
- ml_tools/ML_vision_datasetmaster.py +1315 -0
- ml_tools/ML_vision_evaluation.py +260 -0
- ml_tools/ML_vision_inference.py +428 -0
- ml_tools/ML_vision_models.py +627 -0
- ml_tools/ML_vision_transformers.py +58 -0
- ml_tools/PSO_optimization.py +10 -7
- ml_tools/RNN_forecast.py +2 -0
- ml_tools/SQL.py +22 -9
- ml_tools/VIF_factor.py +4 -3
- ml_tools/_ML_vision_recipe.py +88 -0
- ml_tools/__init__.py +1 -0
- ml_tools/_logger.py +0 -2
- ml_tools/_schema.py +96 -0
- ml_tools/constants.py +79 -0
- ml_tools/custom_logger.py +164 -16
- ml_tools/data_exploration.py +1092 -109
- ml_tools/ensemble_evaluation.py +48 -1
- ml_tools/ensemble_inference.py +6 -7
- ml_tools/ensemble_learning.py +4 -3
- ml_tools/handle_excel.py +1 -0
- ml_tools/keys.py +80 -0
- ml_tools/math_utilities.py +259 -0
- ml_tools/optimization_tools.py +198 -24
- ml_tools/path_manager.py +144 -45
- ml_tools/serde.py +192 -0
- ml_tools/utilities.py +287 -227
- dragon_ml_toolbox-10.2.0.dist-info/RECORD +0 -36
- {dragon_ml_toolbox-10.2.0.dist-info → dragon_ml_toolbox-14.2.0.dist-info}/WHEEL +0 -0
- {dragon_ml_toolbox-10.2.0.dist-info → dragon_ml_toolbox-14.2.0.dist-info}/top_level.txt +0 -0
ml_tools/PSO_optimization.py
CHANGED
|
@@ -4,20 +4,23 @@ import xgboost as xgb
|
|
|
4
4
|
import lightgbm as lgb
|
|
5
5
|
from typing import Literal, Union, Tuple, Dict, Optional
|
|
6
6
|
from copy import deepcopy
|
|
7
|
-
from .utilities import (
|
|
8
|
-
threshold_binary_values,
|
|
9
|
-
threshold_binary_values_batch,
|
|
10
|
-
deserialize_object)
|
|
11
|
-
from .path_manager import sanitize_filename, make_fullpath, list_files_by_extension
|
|
12
7
|
import torch
|
|
13
8
|
from tqdm import trange
|
|
9
|
+
from contextlib import nullcontext
|
|
10
|
+
|
|
11
|
+
from .serde import deserialize_object
|
|
12
|
+
from .math_utilities import threshold_binary_values, threshold_binary_values_batch
|
|
13
|
+
from .path_manager import sanitize_filename, make_fullpath, list_files_by_extension
|
|
14
14
|
from ._logger import _LOGGER
|
|
15
15
|
from .keys import EnsembleKeys
|
|
16
16
|
from ._script_info import _script_info
|
|
17
17
|
from .SQL import DatabaseManager
|
|
18
|
-
from contextlib import nullcontext
|
|
19
18
|
from .optimization_tools import _save_result
|
|
20
19
|
|
|
20
|
+
"""
|
|
21
|
+
DEPRECATED
|
|
22
|
+
"""
|
|
23
|
+
|
|
21
24
|
|
|
22
25
|
__all__ = [
|
|
23
26
|
"ObjectiveFunction",
|
|
@@ -47,7 +50,7 @@ class ObjectiveFunction():
|
|
|
47
50
|
self.binary_features = binary_features
|
|
48
51
|
self.is_hybrid = False if binary_features <= 0 else True
|
|
49
52
|
self.use_noise = add_noise
|
|
50
|
-
self._artifact = deserialize_object(trained_model_path, verbose=False
|
|
53
|
+
self._artifact = deserialize_object(trained_model_path, verbose=False)
|
|
51
54
|
self.model = self._get_from_artifact(EnsembleKeys.MODEL)
|
|
52
55
|
self.feature_names: Optional[list[str]] = self._get_from_artifact(EnsembleKeys.FEATURES) # type: ignore
|
|
53
56
|
self.target_name: Optional[str] = self._get_from_artifact(EnsembleKeys.TARGET) # type: ignore
|
ml_tools/RNN_forecast.py
CHANGED
ml_tools/SQL.py
CHANGED
|
@@ -2,9 +2,10 @@ import sqlite3
|
|
|
2
2
|
import pandas as pd
|
|
3
3
|
from pathlib import Path
|
|
4
4
|
from typing import Union, Dict, Any, Optional, List, Literal
|
|
5
|
+
|
|
5
6
|
from ._logger import _LOGGER
|
|
6
7
|
from ._script_info import _script_info
|
|
7
|
-
from .path_manager import make_fullpath
|
|
8
|
+
from .path_manager import make_fullpath, sanitize_filename
|
|
8
9
|
|
|
9
10
|
|
|
10
11
|
__all__ = [
|
|
@@ -94,11 +95,13 @@ class DatabaseManager:
|
|
|
94
95
|
if not self.cursor:
|
|
95
96
|
_LOGGER.error("Database connection is not open.")
|
|
96
97
|
raise sqlite3.Error()
|
|
98
|
+
|
|
99
|
+
sanitized_table_name = sanitize_filename(table_name)
|
|
97
100
|
|
|
98
101
|
columns_def = ", ".join([f'"{col_name}" {col_type}' for col_name, col_type in schema.items()])
|
|
99
102
|
exists_clause = "IF NOT EXISTS" if if_not_exists else ""
|
|
100
103
|
|
|
101
|
-
query = f"CREATE TABLE {exists_clause} {
|
|
104
|
+
query = f"CREATE TABLE {exists_clause} {sanitized_table_name} ({columns_def})"
|
|
102
105
|
|
|
103
106
|
_LOGGER.info(f"➡️ Executing: {query}")
|
|
104
107
|
self.cursor.execute(query)
|
|
@@ -118,12 +121,14 @@ class DatabaseManager:
|
|
|
118
121
|
if not self.cursor:
|
|
119
122
|
_LOGGER.error("Database connection is not open.")
|
|
120
123
|
raise sqlite3.Error()
|
|
124
|
+
|
|
125
|
+
sanitized_table_name = sanitize_filename(table_name)
|
|
121
126
|
|
|
122
127
|
columns = ', '.join(f'"{k}"' for k in data.keys())
|
|
123
128
|
placeholders = ', '.join(['?'] * len(data))
|
|
124
129
|
values = list(data.values())
|
|
125
130
|
|
|
126
|
-
query = f
|
|
131
|
+
query = f'INSERT INTO "{sanitized_table_name}" ({columns}) VALUES ({placeholders})'
|
|
127
132
|
|
|
128
133
|
self.cursor.execute(query, values)
|
|
129
134
|
|
|
@@ -185,6 +190,8 @@ class DatabaseManager:
|
|
|
185
190
|
if not data:
|
|
186
191
|
_LOGGER.warning("'insert_many' called with empty data list. No action taken.")
|
|
187
192
|
return
|
|
193
|
+
|
|
194
|
+
sanitized_table_name = sanitize_filename(table_name)
|
|
188
195
|
|
|
189
196
|
# Assume all dicts have the same keys as the first one
|
|
190
197
|
first_row = data[0]
|
|
@@ -194,10 +201,10 @@ class DatabaseManager:
|
|
|
194
201
|
# Create a list of tuples, where each tuple is a row of values
|
|
195
202
|
values_to_insert = [list(row.values()) for row in data]
|
|
196
203
|
|
|
197
|
-
query = f
|
|
204
|
+
query = f'INSERT INTO "{sanitized_table_name}" ({columns}) VALUES ({placeholders})'
|
|
198
205
|
|
|
199
206
|
self.cursor.executemany(query, values_to_insert)
|
|
200
|
-
_LOGGER.info(f"➡️ Bulk inserted {len(values_to_insert)} rows into '{
|
|
207
|
+
_LOGGER.info(f"➡️ Bulk inserted {len(values_to_insert)} rows into '{sanitized_table_name}'.")
|
|
201
208
|
|
|
202
209
|
def insert_from_dataframe(self, table_name: str, df: pd.DataFrame, if_exists: Literal['fail', 'replace', 'append'] = 'append'):
|
|
203
210
|
"""
|
|
@@ -218,9 +225,11 @@ class DatabaseManager:
|
|
|
218
225
|
if not self.conn:
|
|
219
226
|
_LOGGER.error("Database connection is not open.")
|
|
220
227
|
raise sqlite3.Error()
|
|
228
|
+
|
|
229
|
+
sanitized_table_name = sanitize_filename(table_name)
|
|
221
230
|
|
|
222
231
|
df.to_sql(
|
|
223
|
-
|
|
232
|
+
sanitized_table_name,
|
|
224
233
|
self.conn,
|
|
225
234
|
if_exists=if_exists,
|
|
226
235
|
index=False # Typically, we don't want to save the DataFrame index
|
|
@@ -246,9 +255,11 @@ class DatabaseManager:
|
|
|
246
255
|
if not self.conn:
|
|
247
256
|
_LOGGER.error("Database connection is not open.")
|
|
248
257
|
raise sqlite3.Error()
|
|
258
|
+
|
|
259
|
+
sanitized_table_name = sanitize_filename(table_name)
|
|
249
260
|
|
|
250
261
|
# PRAGMA is a special SQL command in SQLite for database metadata
|
|
251
|
-
return pd.read_sql_query(f'PRAGMA table_info("{
|
|
262
|
+
return pd.read_sql_query(f'PRAGMA table_info("{sanitized_table_name}");', self.conn)
|
|
252
263
|
|
|
253
264
|
def create_index(self, table_name: str, column_name: str, unique: bool = False):
|
|
254
265
|
"""
|
|
@@ -267,11 +278,13 @@ class DatabaseManager:
|
|
|
267
278
|
if not self.cursor:
|
|
268
279
|
_LOGGER.error("Database connection is not open.")
|
|
269
280
|
raise sqlite3.Error()
|
|
281
|
+
|
|
282
|
+
sanitized_table_name = sanitize_filename(table_name)
|
|
270
283
|
|
|
271
|
-
index_name = f"idx_{
|
|
284
|
+
index_name = f"idx_{sanitized_table_name}_{column_name}"
|
|
272
285
|
unique_clause = "UNIQUE" if unique else ""
|
|
273
286
|
|
|
274
|
-
query = f
|
|
287
|
+
query = f'CREATE {unique_clause} INDEX IF NOT EXISTS "{index_name}" ON "{sanitized_table_name}" ("{column_name}")'
|
|
275
288
|
|
|
276
289
|
_LOGGER.info(f"➡️ Executing: {query}")
|
|
277
290
|
self.cursor.execute(query)
|
ml_tools/VIF_factor.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
|
|
2
1
|
import pandas as pd
|
|
3
2
|
import numpy as np
|
|
4
3
|
import matplotlib.pyplot as plt
|
|
@@ -7,11 +6,13 @@ from statsmodels.stats.outliers_influence import variance_inflation_factor
|
|
|
7
6
|
from statsmodels.tools.tools import add_constant
|
|
8
7
|
import warnings
|
|
9
8
|
from pathlib import Path
|
|
10
|
-
|
|
9
|
+
|
|
10
|
+
from .utilities import yield_dataframes_from_dir, save_dataframe_filename
|
|
11
11
|
from .path_manager import sanitize_filename, make_fullpath
|
|
12
12
|
from ._logger import _LOGGER
|
|
13
13
|
from ._script_info import _script_info
|
|
14
14
|
|
|
15
|
+
|
|
15
16
|
__all__ = [
|
|
16
17
|
"compute_vif",
|
|
17
18
|
"drop_vif_based",
|
|
@@ -228,7 +229,7 @@ def compute_vif_multi(input_directory: Union[str, Path],
|
|
|
228
229
|
result_df, dropped_cols = drop_vif_based(df=df, vif_df=vif_dataframe)
|
|
229
230
|
|
|
230
231
|
if len(dropped_cols) > 0:
|
|
231
|
-
|
|
232
|
+
save_dataframe_filename(df=result_df, save_dir=output_dataset_path, filename=new_filename)
|
|
232
233
|
|
|
233
234
|
|
|
234
235
|
def info():
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import torch
|
|
3
|
+
from torchvision import transforms
|
|
4
|
+
from typing import Dict, Any, List, Callable, Union
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
from .ML_vision_transformers import TRANSFORM_REGISTRY
|
|
8
|
+
from ._logger import _LOGGER
|
|
9
|
+
from .keys import VisionTransformRecipeKeys
|
|
10
|
+
from .path_manager import make_fullpath
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def save_recipe(recipe: Dict[str, Any], filepath: Path) -> None:
|
|
14
|
+
"""
|
|
15
|
+
Saves a transform recipe dictionary to a JSON file.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
recipe (Dict[str, Any]): The recipe dictionary to save.
|
|
19
|
+
filepath (str): The path to the output .json file.
|
|
20
|
+
"""
|
|
21
|
+
final_filepath = filepath.with_suffix(".json")
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
with open(final_filepath, 'w') as f:
|
|
25
|
+
json.dump(recipe, f, indent=4)
|
|
26
|
+
_LOGGER.info(f"Transform recipe saved as '{final_filepath.name}'.")
|
|
27
|
+
except Exception as e:
|
|
28
|
+
_LOGGER.error(f"Failed to save recipe to '{final_filepath}': {e}")
|
|
29
|
+
raise
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def load_recipe_and_build_transform(filepath: Union[str,Path]) -> transforms.Compose:
|
|
33
|
+
"""
|
|
34
|
+
Loads a transform recipe from a .json file and reconstructs the
|
|
35
|
+
torchvision.transforms.Compose pipeline.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
filepath (str): Path to the saved transform recipe .json file.
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
transforms.Compose: The reconstructed transformation pipeline.
|
|
42
|
+
|
|
43
|
+
Raises:
|
|
44
|
+
ValueError: If a transform name in the recipe is not found in
|
|
45
|
+
torchvision.transforms or the custom TRANSFORM_REGISTRY.
|
|
46
|
+
"""
|
|
47
|
+
# validate filepath
|
|
48
|
+
final_filepath = make_fullpath(filepath, enforce="file")
|
|
49
|
+
|
|
50
|
+
try:
|
|
51
|
+
with open(final_filepath, 'r') as f:
|
|
52
|
+
recipe = json.load(f)
|
|
53
|
+
except Exception as e:
|
|
54
|
+
_LOGGER.error(f"Failed to load recipe from '{final_filepath}': {e}")
|
|
55
|
+
raise
|
|
56
|
+
|
|
57
|
+
pipeline_steps: List[Callable] = []
|
|
58
|
+
|
|
59
|
+
if VisionTransformRecipeKeys.PIPELINE not in recipe:
|
|
60
|
+
_LOGGER.error("Recipe file is invalid: missing 'pipeline' key.")
|
|
61
|
+
raise ValueError("Invalid recipe format.")
|
|
62
|
+
|
|
63
|
+
for step in recipe[VisionTransformRecipeKeys.PIPELINE]:
|
|
64
|
+
t_name = step[VisionTransformRecipeKeys.NAME]
|
|
65
|
+
t_kwargs = step[VisionTransformRecipeKeys.KWARGS]
|
|
66
|
+
|
|
67
|
+
transform_class: Any = None
|
|
68
|
+
|
|
69
|
+
# 1. Check standard torchvision transforms
|
|
70
|
+
if hasattr(transforms, t_name):
|
|
71
|
+
transform_class = getattr(transforms, t_name)
|
|
72
|
+
# 2. Check custom transforms
|
|
73
|
+
elif t_name in TRANSFORM_REGISTRY:
|
|
74
|
+
transform_class = TRANSFORM_REGISTRY[t_name]
|
|
75
|
+
# 3. Not found
|
|
76
|
+
else:
|
|
77
|
+
_LOGGER.error(f"Unknown transform '{t_name}' in recipe. Not found in torchvision.transforms or TRANSFORM_REGISTRY.")
|
|
78
|
+
raise ValueError(f"Unknown transform name: {t_name}")
|
|
79
|
+
|
|
80
|
+
# Instantiate the transform
|
|
81
|
+
try:
|
|
82
|
+
pipeline_steps.append(transform_class(**t_kwargs))
|
|
83
|
+
except Exception as e:
|
|
84
|
+
_LOGGER.error(f"Failed to instantiate transform '{t_name}' with kwargs {t_kwargs}: {e}")
|
|
85
|
+
raise
|
|
86
|
+
|
|
87
|
+
_LOGGER.info(f"Successfully loaded and built transform pipeline from '{final_filepath.name}'.")
|
|
88
|
+
return transforms.Compose(pipeline_steps)
|
ml_tools/__init__.py
CHANGED
ml_tools/_logger.py
CHANGED
ml_tools/_schema.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
from typing import NamedTuple, Tuple, Optional, Dict, Union
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
from .custom_logger import save_list_strings
|
|
5
|
+
from .keys import DatasetKeys
|
|
6
|
+
from ._logger import _LOGGER
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class FeatureSchema(NamedTuple):
|
|
10
|
+
"""Holds the final, definitive schema for the model pipeline."""
|
|
11
|
+
|
|
12
|
+
# The final, ordered list of all feature names
|
|
13
|
+
feature_names: Tuple[str, ...]
|
|
14
|
+
|
|
15
|
+
# List of all continuous feature names
|
|
16
|
+
continuous_feature_names: Tuple[str, ...]
|
|
17
|
+
|
|
18
|
+
# List of all categorical feature names
|
|
19
|
+
categorical_feature_names: Tuple[str, ...]
|
|
20
|
+
|
|
21
|
+
# Map of {column_index: cardinality} for categorical features
|
|
22
|
+
categorical_index_map: Optional[Dict[int, int]]
|
|
23
|
+
|
|
24
|
+
# Map string-to-int category values (e.g., {'color': {'red': 0, 'blue': 1}})
|
|
25
|
+
categorical_mappings: Optional[Dict[str, Dict[str, int]]]
|
|
26
|
+
|
|
27
|
+
def _save_helper(self, artifact: Tuple[str, ...], directory: Union[str,Path], filename: str, verbose: bool):
|
|
28
|
+
to_save = list(artifact)
|
|
29
|
+
|
|
30
|
+
# empty check
|
|
31
|
+
if not to_save:
|
|
32
|
+
_LOGGER.warning(f"Skipping save for '{filename}': The feature list is empty.")
|
|
33
|
+
return
|
|
34
|
+
|
|
35
|
+
save_list_strings(list_strings=to_save,
|
|
36
|
+
directory=directory,
|
|
37
|
+
filename=filename,
|
|
38
|
+
verbose=verbose)
|
|
39
|
+
|
|
40
|
+
def save_all_features(self, directory: Union[str,Path], verbose: bool=True):
|
|
41
|
+
"""
|
|
42
|
+
Saves all feature names to a text file.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
directory: The directory where the file will be saved.
|
|
46
|
+
verbose: If True, prints a confirmation message upon saving.
|
|
47
|
+
"""
|
|
48
|
+
self._save_helper(artifact=self.feature_names,
|
|
49
|
+
directory=directory,
|
|
50
|
+
filename=DatasetKeys.FEATURE_NAMES,
|
|
51
|
+
verbose=verbose)
|
|
52
|
+
|
|
53
|
+
def save_continuous_features(self, directory: Union[str,Path], verbose: bool=True):
|
|
54
|
+
"""
|
|
55
|
+
Saves continuous feature names to a text file.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
directory: The directory where the file will be saved.
|
|
59
|
+
verbose: If True, prints a confirmation message upon saving.
|
|
60
|
+
"""
|
|
61
|
+
self._save_helper(artifact=self.continuous_feature_names,
|
|
62
|
+
directory=directory,
|
|
63
|
+
filename=DatasetKeys.CONTINUOUS_NAMES,
|
|
64
|
+
verbose=verbose)
|
|
65
|
+
|
|
66
|
+
def save_categorical_features(self, directory: Union[str,Path], verbose: bool=True):
|
|
67
|
+
"""
|
|
68
|
+
Saves categorical feature names to a text file.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
directory: The directory where the file will be saved.
|
|
72
|
+
verbose: If True, prints a confirmation message upon saving.
|
|
73
|
+
"""
|
|
74
|
+
self._save_helper(artifact=self.categorical_feature_names,
|
|
75
|
+
directory=directory,
|
|
76
|
+
filename=DatasetKeys.CATEGORICAL_NAMES,
|
|
77
|
+
verbose=verbose)
|
|
78
|
+
|
|
79
|
+
def save_artifacts(self, directory: Union[str,Path]):
|
|
80
|
+
"""
|
|
81
|
+
Saves feature names, categorical feature names, continuous feature names to separate text files.
|
|
82
|
+
"""
|
|
83
|
+
self.save_all_features(directory=directory, verbose=True)
|
|
84
|
+
self.save_continuous_features(directory=directory, verbose=True)
|
|
85
|
+
self.save_categorical_features(directory=directory, verbose=True)
|
|
86
|
+
|
|
87
|
+
def __repr__(self) -> str:
|
|
88
|
+
"""Returns a concise representation of the schema's contents."""
|
|
89
|
+
total = len(self.feature_names)
|
|
90
|
+
cont = len(self.continuous_feature_names)
|
|
91
|
+
cat = len(self.categorical_feature_names)
|
|
92
|
+
index_map = self.categorical_index_map is not None
|
|
93
|
+
cat_map = self.categorical_mappings is not None
|
|
94
|
+
return (
|
|
95
|
+
f"<FeatureSchema(total={total}, continuous={cont}, categorical={cat}, index_map={index_map}, categorical_map={cat_map})>"
|
|
96
|
+
)
|
ml_tools/constants.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
CHEMICAL_ELEMENTS = [
|
|
2
|
+
"Hydrogen", "Helium", "Lithium", "Beryllium", "Boron", "Carbon", "Nitrogen", "Oxygen", "Fluorine", "Neon",
|
|
3
|
+
"Sodium", "Magnesium", "Aluminum", "Silicon", "Phosphorus", "Sulfur", "Chlorine", "Argon",
|
|
4
|
+
"Potassium", "Calcium", "Scandium", "Titanium", "Vanadium", "Chromium", "Manganese", "Iron", "Cobalt", "Nickel", "Copper", "Zinc",
|
|
5
|
+
"Gallium", "Germanium", "Arsenic", "Selenium", "Bromine", "Krypton",
|
|
6
|
+
"Rubidium", "Strontium", "Yttrium", "Zirconium", "Niobium", "Molybdenum", "Technetium", "Ruthenium", "Rhodium", "Palladium", "Silver", "Cadmium",
|
|
7
|
+
"Indium", "Tin", "Antimony", "Tellurium", "Iodine", "Xenon",
|
|
8
|
+
"Cesium", "Barium", "Lanthanum", "Cerium", "Praseodymium", "Neodymium", "Promethium", "Samarium", "Europium", "Gadolinium", "Terbium", "Dysprosium", "Holmium", "Erbium", "Thulium", "Ytterbium", "Lutetium",
|
|
9
|
+
"Hafnium", "Tantalum", "Tungsten", "Rhenium", "Osmium", "Iridium", "Platinum", "Gold", "Mercury",
|
|
10
|
+
"Thallium", "Lead", "Bismuth", "Polonium", "Astatine", "Radon",
|
|
11
|
+
"Francium", "Radium", "Actinium", "Thorium", "Protactinium", "Uranium", "Neptunium", "Plutonium", "Americium", "Curium", "Berkelium", "Californium", "Einsteinium", "Fermium", "Mendelevium", "Nobelium", "Lawrencium",
|
|
12
|
+
"Rutherfordium", "Dubnium", "Seaborgium", "Bohrium", "Hassium", "Meitnerium", "Darmstadtium", "Roentgenium", "Copernicium", "Nihonium", "Flerovium", "Moscovium", "Livermorium", "Tennessine", "Oganesson"
|
|
13
|
+
]
|
|
14
|
+
|
|
15
|
+
CHEMICAL_ELEMENT_SYMBOLS = [
|
|
16
|
+
"H", "He", "Li", "Be", "B", "C", "N", "O", "F", "Ne",
|
|
17
|
+
"Na", "Mg", "Al", "Si", "P", "S", "Cl", "Ar",
|
|
18
|
+
"K", "Ca", "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn",
|
|
19
|
+
"Ga", "Ge", "As", "Se", "Br", "Kr",
|
|
20
|
+
"Rb", "Sr", "Y", "Zr", "Nb", "Mo", "Tc", "Ru", "Rh", "Pd", "Ag", "Cd",
|
|
21
|
+
"In", "Sn", "Sb", "Te", "I", "Xe",
|
|
22
|
+
"Cs", "Ba", "La", "Ce", "Pr", "Nd", "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er", "Tm", "Yb", "Lu",
|
|
23
|
+
"Hf", "Ta", "W", "Re", "Os", "Ir", "Pt", "Au", "Hg",
|
|
24
|
+
"Tl", "Pb", "Bi", "Po", "At", "Rn",
|
|
25
|
+
"Fr", "Ra", "Ac", "Th", "Pa", "U", "Np", "Pu", "Am", "Cm", "Bk", "Cf", "Es", "Fm", "Md", "No", "Lr",
|
|
26
|
+
"Rf", "Db", "Sg", "Bh", "Hs", "Mt", "Ds", "Rg", "Cn", "Nh", "Fl", "Mc", "Lv", "Ts", "Og"
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
# --- Physics & Chemistry ---
|
|
30
|
+
|
|
31
|
+
# Speed of light in vacuum (m/s)
|
|
32
|
+
SPEED_OF_LIGHT = 299792458.0
|
|
33
|
+
|
|
34
|
+
# Planck constant (J·s)
|
|
35
|
+
PLANCK_CONSTANT = 6.62607015e-34
|
|
36
|
+
|
|
37
|
+
# Avogadro's number (mol⁻¹)
|
|
38
|
+
AVOGADRO_NUMBER = 6.02214076e23
|
|
39
|
+
|
|
40
|
+
# Universal gas constant (J/(mol·K))
|
|
41
|
+
UNIVERSAL_GAS_CONSTANT = 8.314462618
|
|
42
|
+
|
|
43
|
+
# Boltzmann constant (J/K)
|
|
44
|
+
BOLTZMANN_CONSTANT = 1.380649e-23
|
|
45
|
+
|
|
46
|
+
# Gravitational constant (m³·kg⁻¹·s⁻²)
|
|
47
|
+
GRAVITATIONAL_CONSTANT = 6.67430e-11
|
|
48
|
+
|
|
49
|
+
# Standard acceleration of gravity on Earth (m/s²)
|
|
50
|
+
STANDARD_GRAVITY = 9.80665
|
|
51
|
+
|
|
52
|
+
# Elementary charge (C)
|
|
53
|
+
ELEMENTARY_CHARGE = 1.602176634e-19
|
|
54
|
+
|
|
55
|
+
# Electron mass (kg)
|
|
56
|
+
ELECTRON_MASS_KG = 9.1093837015e-31
|
|
57
|
+
|
|
58
|
+
# Proton mass (kg)
|
|
59
|
+
PROTON_MASS_KG = 1.67262192369e-27
|
|
60
|
+
|
|
61
|
+
# Absolute zero (in Celsius)
|
|
62
|
+
ABSOLUTE_ZERO_CELSIUS = -273.15
|
|
63
|
+
|
|
64
|
+
# --- Astronomy ---
|
|
65
|
+
|
|
66
|
+
# Astronomical Unit, the mean Earth-Sun distance (meters)
|
|
67
|
+
ASTRONOMICAL_UNIT_KM = 149597870.7
|
|
68
|
+
|
|
69
|
+
# Light-year (meters)
|
|
70
|
+
LIGHT_YEAR_KM = 9460730472580.8
|
|
71
|
+
|
|
72
|
+
# Earth's equatorial radius (meters)
|
|
73
|
+
EARTH_RADIUS_KM = 6378.137
|
|
74
|
+
|
|
75
|
+
# Mass of the Earth (kg)
|
|
76
|
+
EARTH_MASS_KG = 5.9722e24
|
|
77
|
+
|
|
78
|
+
# Mass of the Sun (kg)
|
|
79
|
+
SUN_MASS_KG = 1.98847e30
|