omnigenome 0.3.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of omnigenome might be problematic. Click here for more details.

Files changed (85) hide show
  1. omnigenome/__init__.py +281 -0
  2. omnigenome/auto/__init__.py +3 -0
  3. omnigenome/auto/auto_bench/__init__.py +12 -0
  4. omnigenome/auto/auto_bench/auto_bench.py +484 -0
  5. omnigenome/auto/auto_bench/auto_bench_cli.py +230 -0
  6. omnigenome/auto/auto_bench/auto_bench_config.py +216 -0
  7. omnigenome/auto/auto_bench/config_check.py +34 -0
  8. omnigenome/auto/auto_train/__init__.py +13 -0
  9. omnigenome/auto/auto_train/auto_train.py +430 -0
  10. omnigenome/auto/auto_train/auto_train_cli.py +222 -0
  11. omnigenome/auto/bench_hub/__init__.py +12 -0
  12. omnigenome/auto/bench_hub/bench_hub.py +25 -0
  13. omnigenome/cli/__init__.py +13 -0
  14. omnigenome/cli/commands/__init__.py +13 -0
  15. omnigenome/cli/commands/base.py +83 -0
  16. omnigenome/cli/commands/bench/__init__.py +13 -0
  17. omnigenome/cli/commands/bench/bench_cli.py +202 -0
  18. omnigenome/cli/commands/rna/__init__.py +13 -0
  19. omnigenome/cli/commands/rna/rna_design.py +178 -0
  20. omnigenome/cli/omnigenome_cli.py +128 -0
  21. omnigenome/src/__init__.py +12 -0
  22. omnigenome/src/abc/__init__.py +12 -0
  23. omnigenome/src/abc/abstract_dataset.py +622 -0
  24. omnigenome/src/abc/abstract_metric.py +114 -0
  25. omnigenome/src/abc/abstract_model.py +689 -0
  26. omnigenome/src/abc/abstract_tokenizer.py +267 -0
  27. omnigenome/src/dataset/__init__.py +16 -0
  28. omnigenome/src/dataset/omni_dataset.py +435 -0
  29. omnigenome/src/lora/__init__.py +13 -0
  30. omnigenome/src/lora/lora_model.py +294 -0
  31. omnigenome/src/metric/__init__.py +15 -0
  32. omnigenome/src/metric/classification_metric.py +184 -0
  33. omnigenome/src/metric/metric.py +199 -0
  34. omnigenome/src/metric/ranking_metric.py +142 -0
  35. omnigenome/src/metric/regression_metric.py +191 -0
  36. omnigenome/src/misc/__init__.py +3 -0
  37. omnigenome/src/misc/utils.py +439 -0
  38. omnigenome/src/model/__init__.py +19 -0
  39. omnigenome/src/model/augmentation/__init__.py +12 -0
  40. omnigenome/src/model/augmentation/model.py +219 -0
  41. omnigenome/src/model/classification/__init__.py +12 -0
  42. omnigenome/src/model/classification/model.py +642 -0
  43. omnigenome/src/model/embedding/__init__.py +12 -0
  44. omnigenome/src/model/embedding/model.py +263 -0
  45. omnigenome/src/model/mlm/__init__.py +12 -0
  46. omnigenome/src/model/mlm/model.py +177 -0
  47. omnigenome/src/model/module_utils.py +232 -0
  48. omnigenome/src/model/regression/__init__.py +12 -0
  49. omnigenome/src/model/regression/model.py +786 -0
  50. omnigenome/src/model/regression/resnet.py +483 -0
  51. omnigenome/src/model/rna_design/__init__.py +12 -0
  52. omnigenome/src/model/rna_design/model.py +426 -0
  53. omnigenome/src/model/seq2seq/__init__.py +12 -0
  54. omnigenome/src/model/seq2seq/model.py +44 -0
  55. omnigenome/src/tokenizer/__init__.py +16 -0
  56. omnigenome/src/tokenizer/bpe_tokenizer.py +226 -0
  57. omnigenome/src/tokenizer/kmers_tokenizer.py +247 -0
  58. omnigenome/src/tokenizer/single_nucleotide_tokenizer.py +249 -0
  59. omnigenome/src/trainer/__init__.py +14 -0
  60. omnigenome/src/trainer/accelerate_trainer.py +739 -0
  61. omnigenome/src/trainer/hf_trainer.py +75 -0
  62. omnigenome/src/trainer/trainer.py +579 -0
  63. omnigenome/utility/__init__.py +3 -0
  64. omnigenome/utility/dataset_hub/__init__.py +13 -0
  65. omnigenome/utility/dataset_hub/dataset_hub.py +178 -0
  66. omnigenome/utility/ensemble.py +324 -0
  67. omnigenome/utility/hub_utils.py +517 -0
  68. omnigenome/utility/model_hub/__init__.py +12 -0
  69. omnigenome/utility/model_hub/model_hub.py +231 -0
  70. omnigenome/utility/pipeline_hub/__init__.py +12 -0
  71. omnigenome/utility/pipeline_hub/pipeline.py +483 -0
  72. omnigenome/utility/pipeline_hub/pipeline_hub.py +129 -0
  73. omnigenome-0.3.0a0.dist-info/METADATA +224 -0
  74. omnigenome-0.3.0a0.dist-info/RECORD +85 -0
  75. omnigenome-0.3.0a0.dist-info/WHEEL +5 -0
  76. omnigenome-0.3.0a0.dist-info/entry_points.txt +3 -0
  77. omnigenome-0.3.0a0.dist-info/licenses/LICENSE +201 -0
  78. omnigenome-0.3.0a0.dist-info/top_level.txt +2 -0
  79. tests/__init__.py +9 -0
  80. tests/conftest.py +160 -0
  81. tests/test_dataset_patterns.py +291 -0
  82. tests/test_examples_syntax.py +83 -0
  83. tests/test_model_loading.py +183 -0
  84. tests/test_rna_functions.py +255 -0
  85. tests/test_training_patterns.py +302 -0
@@ -0,0 +1,230 @@
1
+ # -*- coding: utf-8 -*-
2
+ # file: auto_bench_cli.py
3
+ # time: 19:18 05/02/2025
4
+ # author: YANG, HENG <hy345@exeter.ac.uk> (杨恒)
5
+ # Homepage: https://yangheng95.github.io
6
+ # github: https://github.com/yangheng95
7
+ # huggingface: https://huggingface.co/yangheng
8
+ # google scholar: https://scholar.google.com/citations?user=NPq5a_0AAAAJ&hl=en
9
+ # Copyright (C) 2019-2025. All Rights Reserved.
10
+
11
+ import argparse
12
+ import os
13
+ import platform
14
+ import sys
15
+ import time
16
+
17
+ from typing import Optional
18
+ from omnigenome import AutoBench
19
+ from omnigenome.src.misc.utils import fprint
20
+
21
+
22
+ def bench_command(args: Optional[list] = None):
23
+ """
24
+ Entry point for the OmniGenome benchmark command-line interface.
25
+
26
+ This function parses command-line arguments, initializes the AutoBench,
27
+ and runs the evaluation.
28
+
29
+ :param args: A list of command-line arguments. If None, `sys.argv` is used.
30
+ """
31
+
32
+ parser = create_parser()
33
+ parsed_args = parser.parse_args(args)
34
+
35
+ model_path = parsed_args.model
36
+ fprint(f"\n>> Starting evaluation for model: {model_path}")
37
+
38
+ # Special handling for multimolecule models
39
+ if "multimolecule" in model_path:
40
+ from multimolecule import RnaTokenizer, AutoModelForTokenPrediction
41
+
42
+ tokenizer = RnaTokenizer.from_pretrained(model_path)
43
+ model = AutoModelForTokenPrediction.from_pretrained(
44
+ model_path, trust_remote_code=True
45
+ ).base_model
46
+ else:
47
+ tokenizer = parsed_args.tokenizer
48
+ model = model_path
49
+
50
+ # Initialize benchmark
51
+ autobench = AutoBench(
52
+ benchmark=parsed_args.benchmark,
53
+ model_name_or_path=model,
54
+ tokenizer=tokenizer,
55
+ overwrite=parsed_args.overwrite,
56
+ trainer=parsed_args.trainer,
57
+ )
58
+
59
+ # Run evaluation
60
+ autobench.run(**vars(parsed_args))
61
+
62
+
63
+ def create_parser() -> argparse.ArgumentParser:
64
+ """
65
+ Creates the argument parser for the benchmark CLI.
66
+
67
+ :return: An `argparse.ArgumentParser` instance.
68
+ """
69
+ parser = argparse.ArgumentParser(
70
+ description="Genomic Foundation Model Benchmark Suite (Single Model)",
71
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
72
+ )
73
+ # Required argument
74
+ parser.add_argument(
75
+ "-b",
76
+ "--benchmark",
77
+ type=str,
78
+ default="RGB",
79
+ # choices=["RGB", "PGB", "GUE", "GB", "BEACON"],
80
+ help="Path to the BEACON benchmark root directory.",
81
+ )
82
+ parser.add_argument(
83
+ "-t",
84
+ "--tokenizer",
85
+ type=str,
86
+ default=None,
87
+ help="Path to the tokenizer to use (HF tokenizer ID or local path).",
88
+ )
89
+
90
+ parser.add_argument(
91
+ "-m",
92
+ "--model",
93
+ type=str,
94
+ required=True,
95
+ help="Path to the model to evaluate (HF model ID or local path).",
96
+ )
97
+
98
+ # Optional arguments
99
+ parser.add_argument(
100
+ "--overwrite",
101
+ type=bool,
102
+ default=False,
103
+ help="Overwrite existing bench results, otherwise resume from benchmark checkpoint.",
104
+ )
105
+ parser.add_argument(
106
+ "--bs_scale",
107
+ type=int,
108
+ default=1,
109
+ help="Batch size scale factor. To increase GPU memory utilization, set to 2 or 4, etc.",
110
+ )
111
+ parser.add_argument(
112
+ "--trainer",
113
+ type=str,
114
+ default="accelerate",
115
+ choices=["native", "accelerate", "hf_trainer"],
116
+ help="Trainer to use for training. \n"
117
+ "Use 'accelerate' for distributed training. Set to false to disable. "
118
+ "You can use 'accelerate config' to customize behavior.\n"
119
+ "Use 'hf_trainer' for Hugging Face Trainer. \n"
120
+ "Set to 'native' to use native PyTorch training loop.\n",
121
+ )
122
+ parser.add_argument(
123
+ "--autocast",
124
+ type=str,
125
+ default="fp16",
126
+ choices=["fp16", "fp32", "bf16", "fp8", "no"],
127
+ help="Automatic mixed precision training mode.",
128
+ )
129
+
130
+ return parser
131
+
132
+
133
+ def run_bench():
134
+ """
135
+ Wrapper function to run the benchmark command.
136
+
137
+ This function sets up logging, constructs the command to execute
138
+ (potentially with `accelerate launch`), and runs it.
139
+ """
140
+ fprint("Running benchmark, this may take a while, please be patient...")
141
+ fprint("You can find the logs in the 'autobench_logs' directory.")
142
+ fprint("You can find the metrics in the 'autobench_evaluations' directory.")
143
+ fprint(
144
+ "If you don't intend to use accelerate, please add '--trainer native' false' to the command."
145
+ )
146
+ fprint(
147
+ "If you want to alter accelerate's behavior, please refer to 'accelerate config' command."
148
+ )
149
+ fprint("If you encounter any issues, please report them on the GitHub repository.")
150
+ os.makedirs("autobench_logs", exist_ok=True)
151
+ time_str = time.strftime("%Y-%m-%d-%H-%M-%S")
152
+ log_file = f"autobench_logs/AutoBench-{time_str}.log"
153
+ from pathlib import Path
154
+
155
+ try:
156
+ mixed_precision = sys.argv[sys.argv.index("--autocast") + 1].lower()
157
+ except ValueError:
158
+ mixed_precision = "fp16"
159
+ file_path = Path(__file__).resolve()
160
+ if (
161
+ "--trainer" in sys.argv
162
+ and sys.argv[sys.argv.index("--trainer") + 1].lower() == "native"
163
+ ):
164
+ cmd_base = f'python "{file_path}" ' + " ".join(sys.argv[1:])
165
+ else:
166
+ cmd_base = (
167
+ f'accelerate launch --mixed_precision "{mixed_precision}" "{file_path}" '
168
+ + " ".join(sys.argv[1:])
169
+ )
170
+
171
+ # Use platform-specific tee commands:
172
+ if platform.system() == "Windows":
173
+ # On Windows, use PowerShell's tee-object.
174
+ # The command below launches PowerShell and passes the tee-object command.
175
+ # try:
176
+ # cmd = f"{cmd_base} 2>&1 | powershell -Command Get-Content {log_file} -Wait"
177
+ # except Exception as e:
178
+ # fprint(f"The log file cannot be saved due to Error: {e}")
179
+ # fprint(
180
+ # "If commands not allowed in PowerShell, "
181
+ # "please run 'Set-ExecutionPolicy RemoteSigned' in PowerShell with Admin."
182
+ # )
183
+ cmd = f"{cmd_base} 2>&1"
184
+ else:
185
+ # On Unix-like systems, use the standard tee command.
186
+ cmd = f"{cmd_base} 2>&1 | tee '{log_file}'"
187
+
188
+ # Execute the command.
189
+ sys.exit(os.system(cmd))
190
+
191
+ # # 匹配tqdm进度条的正则表达式(根据实际输出调整)
192
+ # tqdm_pattern = re.compile(r'^.*\d+%\|.*\|\s+\d+/\d+\s+\[.*\]\s*$')
193
+ #
194
+ # last_tqdm_line = ''
195
+ #
196
+ # with open(log_file, 'w', encoding='utf-8') as log_file:
197
+ # # 执行命令并捕获输出流
198
+ # proc = subprocess.Popen(
199
+ # cmd_base,
200
+ # shell=True,
201
+ # stdout=subprocess.PIPE,
202
+ # stderr=subprocess.STDOUT,
203
+ # bufsize=1,
204
+ # universal_newlines=True
205
+ # )
206
+ #
207
+ # # 实时处理输出流
208
+ # for line in proc.stdout:
209
+ # line = line.rstrip() # 移除行尾换行符
210
+ # if tqdm_pattern.match(line):
211
+ # # 更新最后一行tqdm输出
212
+ # last_tqdm_line = line + '\n' # 换行符需要手动添加
213
+ # # 实时显示进度条(覆盖模式)
214
+ # sys.stdout.write('\r' + line)
215
+ # sys.stdout.flush()
216
+ # else:
217
+ # # 写入日志并正常打印
218
+ # log_file.write(line + '\n')
219
+ # print(line)
220
+ #
221
+ # # 命令执行完毕后写入最后一个tqdm进度条
222
+ # if last_tqdm_line:
223
+ # log_file.write(last_tqdm_line)
224
+ # sys.stdout.write('\n') # 最后换行避免覆盖
225
+ #
226
+ # sys.exit(proc.returncode)
227
+
228
+
229
+ if __name__ == "__main__":
230
+ bench_command()
@@ -0,0 +1,216 @@
1
+ # -*- coding: utf-8 -*-
2
+ # file: auto_bench_config.py
3
+ # time: 14:58 29/04/2024
4
+ # author: YANG, HENG <hy345@exeter.ac.uk> (杨恒)
5
+ # github: https://github.com/yangheng95
6
+ # huggingface: https://huggingface.co/yangheng
7
+ # google scholar: https://scholar.google.com/citations?user=NPq5a_0AAAAJ&hl=en
8
+ # Copyright (C) 2019-2024. All Rights Reserved.
9
+ from argparse import Namespace
10
+
11
+ from transformers import PretrainedConfig
12
+
13
+ from .config_check import config_check
14
+
15
+
16
+ class AutoBenchConfig(PretrainedConfig):
17
+ """
18
+ A configuration class for AutoBench, extending `transformers.PretrainedConfig`.
19
+
20
+ This class holds the configuration parameters for a benchmark run. It behaves
21
+ like a dictionary and also tracks how many times each parameter is accessed.
22
+ """
23
+
24
+ def __init__(self, args=None, **kwargs):
25
+ """
26
+ Initializes the AutoBenchConfig.
27
+
28
+ :param args: A dictionary or `argparse.Namespace` of parameters.
29
+ :param kwargs: Additional keyword arguments for `PretrainedConfig`.
30
+ """
31
+ if not args:
32
+ args = {}
33
+ super().__init__(**kwargs)
34
+
35
+ if isinstance(args, Namespace):
36
+ self.args = vars(args)
37
+ self.args_call_count = {arg: 0 for arg in vars(args)}
38
+ else:
39
+ self.args = args
40
+ self.args_call_count = {arg: 0 for arg in args}
41
+
42
+ def __getattribute__(self, arg_name):
43
+ """
44
+ Get the value of an argument and increment its call count.
45
+
46
+ :param arg_name: The name of the argument.
47
+ :return: The value of the argument.
48
+ """
49
+ if arg_name == "args" or arg_name == "args_call_count":
50
+ return super().__getattribute__(arg_name)
51
+ try:
52
+ value = super().__getattribute__("args")[arg_name]
53
+ args_call_count = super().__getattribute__("args_call_count")
54
+ args_call_count[arg_name] += 1
55
+ super().__setattr__("args_call_count", args_call_count)
56
+ return value
57
+
58
+ except Exception as e:
59
+ return super().__getattribute__(arg_name)
60
+
61
+ def __setattr__(self, arg_name, value):
62
+ """
63
+ Set the value of an argument and add it to the argument dict and call count dict.
64
+
65
+ :param arg_name: The name of the argument.
66
+ :param value: The value of the argument.
67
+ """
68
+ if arg_name == "args" or arg_name == "args_call_count":
69
+ super().__setattr__(arg_name, value)
70
+ return
71
+ try:
72
+ args = super().__getattribute__("args")
73
+ args[arg_name] = value
74
+ super().__setattr__("args", args)
75
+ args_call_count = super().__getattribute__("args_call_count")
76
+
77
+ if arg_name in args_call_count:
78
+ super().__setattr__("args_call_count", args_call_count)
79
+
80
+ else:
81
+ args_call_count[arg_name] = 0
82
+ super().__setattr__("args_call_count", args_call_count)
83
+
84
+ except Exception as e:
85
+ super().__setattr__(arg_name, value)
86
+
87
+ def get(self, key, default=None):
88
+ """
89
+ Get the value of a key from the parameter dict. If the key is found, increment its call frequency.
90
+ :param key: The key to look for in the parameter dict.
91
+ :param default: The default value to return if the key is not found.
92
+ :return: The value of the key in the parameter dict, or the default value if the key is not found.
93
+ """
94
+ if key in self.args_call_count:
95
+ self.args_call_count[key] += 1
96
+ return self.args.get(key, default)
97
+
98
+ def update(self, *args, **kwargs):
99
+ """
100
+ Update the parameter dict with the given arguments and keyword arguments, and check if the updated configuration is valid.
101
+ :param args: Positional arguments to update the parameter dict.
102
+ :param kwargs: Keyword arguments to update the parameter dict.
103
+ """
104
+ self.args.update(*args, **kwargs)
105
+ config_check(self.args)
106
+
107
+ def pop(self, *args):
108
+ """
109
+ Pop a value from the parameter dict.
110
+ :param args: Arguments to pop from the parameter dict.
111
+ :return: The value popped from the parameter dict.
112
+ """
113
+ return self.args.pop(*args)
114
+
115
+ def keys(self):
116
+ """
117
+ Get a list of all keys in the parameter dict.
118
+ :return: A list of all keys in the parameter dict.
119
+ """
120
+ return self.args.keys()
121
+
122
+ def values(self):
123
+ """
124
+ Get a list of all values in the parameter dict.
125
+ :return: A list of all values in the parameter dict.
126
+ """
127
+ return self.args.values()
128
+
129
+ def items(self):
130
+ """
131
+ Get a list of all key-value pairs in the parameter dict.
132
+ :return: A list of all key-value pairs in the parameter dict.
133
+ """
134
+ return self.args.items()
135
+
136
+ def __str__(self):
137
+ """
138
+ Get a string representation of the parameter dict.
139
+ :return: A string representation of the parameter dict.
140
+ """
141
+ return str(self.args)
142
+
143
+ def __repr__(self):
144
+ """
145
+ Return a detailed string representation of the configuration,
146
+ including all parameters and the frequency of their access.
147
+ """
148
+ param_list = []
149
+ for key, value in self.args.items():
150
+ count = self.args_call_count.get(key, 0)
151
+ param_list.append(f"{key}={value!r} (accessed {count} times)")
152
+ params_str = ", ".join(param_list)
153
+ return f"{self.__class__.__name__}({params_str})"
154
+
155
+ def __len__(self):
156
+ """
157
+ Return the number of items in the parameter dict.
158
+ """
159
+ return len(self.args)
160
+
161
+ def __iter__(self):
162
+ """
163
+ Return an iterator over the keys of the parameter dict.
164
+ """
165
+ return iter(self.args)
166
+
167
+ def __contains__(self, item):
168
+ """
169
+ Check if the given item is in the parameter dict.
170
+ :param item: The item to check.
171
+ :return: True if the item is in the parameter dict, False otherwise.
172
+ """
173
+ return item in self.args
174
+
175
+ def __getitem__(self, item):
176
+ """
177
+ Get the value of a key from the parameter dict.
178
+ :param item: The key to look for in the parameter dict.
179
+ :return: The value of the key in the parameter dict.
180
+ """
181
+ return self.args[item]
182
+
183
+ def __setitem__(self, key, value):
184
+ """
185
+ Set the value of a key in the parameter dict. Also set the call frequency of the key to 0 and check if the updated
186
+ configuration is valid.
187
+ :param key: The key to set the value for in the parameter dict.
188
+ :param value: The value to set for the key in the parameter dict.
189
+ """
190
+ self.args[key] = value
191
+ self.args_call_count[key] = 0
192
+ config_check(self.args)
193
+
194
+ def __delitem__(self, key):
195
+ """
196
+ Delete a key-value pair from the parameter dict and check if the updated configuration is valid.
197
+ :param key: The key to delete from the parameter dict.
198
+ """
199
+ del self.args[key]
200
+ config_check(self.args)
201
+
202
+ def __eq__(self, other):
203
+ """
204
+ Check if the parameter dict is equal to another object.
205
+ :param other: The other object to compare with the parameter dict.
206
+ :return: True if the parameter dict is equal to the other object, False otherwise.
207
+ """
208
+ return self.args == other
209
+
210
+ def __ne__(self, other):
211
+ """
212
+ Check if the parameter dict is not equal to another object.
213
+ :param other: The other object to compare with the parameter dict.
214
+ :return: True if the parameter dict is not equal to the other object, False otherwise.
215
+ """
216
+ return self.args != other
@@ -0,0 +1,34 @@
1
+ # -*- coding: utf-8 -*-
2
+ # file: config_verification.py
3
+ # time: 02/11/2022 17:05
4
+ # author: YANG, HENG <hy345@exeter.ac.uk> (杨恒)
5
+ # github: https://github.com/yangheng95
6
+ # GScholar: https://scholar.google.com/citations?user=NPq5a_0AAAAJ&hl=en
7
+ # ResearchGate: https://www.researchgate.net/profile/Heng-Yang-17/research
8
+ # Copyright (C) 2022. All Rights Reserved.
9
+
10
+ one_shot_messages = set()
11
+
12
+
13
+ def config_check(args):
14
+ """
15
+ Performs a basic check on the configuration arguments.
16
+
17
+ This function can be expanded to include more complex validation logic
18
+ for the benchmark configuration.
19
+
20
+ :param args: A dictionary of configuration arguments.
21
+ :raises RuntimeError: If a configuration check fails.
22
+ """
23
+ try:
24
+ if "use_amp" in args:
25
+ assert args["use_amp"] in {True, False}
26
+ # if "patience" in args:
27
+ # assert args["patience"] > 0
28
+
29
+ except AssertionError as e:
30
+ raise RuntimeError(
31
+ "Exception: {}. Some parameters are not valid, please see the main example.".format(
32
+ e
33
+ )
34
+ )
@@ -0,0 +1,13 @@
1
+ # -*- coding: utf-8 -*-
2
+ # file: __init__.py
3
+ # time: 15:36 13/02/2025
4
+ # author: YANG, HENG <hy345@exeter.ac.uk> (杨恒)
5
+ # Homepage: https://yangheng95.github.io
6
+ # github: https://github.com/yangheng95
7
+ # huggingface: https://huggingface.co/yangheng
8
+ # google scholar: https://scholar.google.com/citations?user=NPq5a_0AAAAJ&hl=en
9
+ # Copyright (C) 2019-2025. All Rights Reserved.
10
+ """
11
+ This package contains modules for automated training of models.
12
+ """
13
+