mdo-lib 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. mdo_lib-0.1.0/LICENSE +21 -0
  2. mdo_lib-0.1.0/PKG-INFO +252 -0
  3. mdo_lib-0.1.0/README.md +209 -0
  4. mdo_lib-0.1.0/mdo_lib/__init__.py +45 -0
  5. mdo_lib-0.1.0/mdo_lib/core/__init__.py +18 -0
  6. mdo_lib-0.1.0/mdo_lib/core/constraint.py +29 -0
  7. mdo_lib-0.1.0/mdo_lib/core/evaluator.py +60 -0
  8. mdo_lib-0.1.0/mdo_lib/core/objective.py +27 -0
  9. mdo_lib-0.1.0/mdo_lib/core/parameter.py +41 -0
  10. mdo_lib-0.1.0/mdo_lib/core/problem.py +24 -0
  11. mdo_lib-0.1.0/mdo_lib/core/result.py +39 -0
  12. mdo_lib-0.1.0/mdo_lib/doe/__init__.py +13 -0
  13. mdo_lib-0.1.0/mdo_lib/doe/base.py +22 -0
  14. mdo_lib-0.1.0/mdo_lib/doe/central_composite.py +61 -0
  15. mdo_lib-0.1.0/mdo_lib/doe/full_factorial.py +39 -0
  16. mdo_lib-0.1.0/mdo_lib/doe/latin_hypercube.py +43 -0
  17. mdo_lib-0.1.0/mdo_lib/examples/__init__.py +13 -0
  18. mdo_lib-0.1.0/mdo_lib/examples/basic_optimization.py +51 -0
  19. mdo_lib-0.1.0/mdo_lib/examples/reliability_analysis.py +61 -0
  20. mdo_lib-0.1.0/mdo_lib/examples/sensitivity_analysis.py +62 -0
  21. mdo_lib-0.1.0/mdo_lib/examples/surrogate_modeling.py +58 -0
  22. mdo_lib-0.1.0/mdo_lib/optimization/__init__.py +15 -0
  23. mdo_lib-0.1.0/mdo_lib/optimization/base.py +22 -0
  24. mdo_lib-0.1.0/mdo_lib/optimization/bayesian.py +117 -0
  25. mdo_lib-0.1.0/mdo_lib/optimization/genetic.py +184 -0
  26. mdo_lib-0.1.0/mdo_lib/optimization/gradient.py +88 -0
  27. mdo_lib-0.1.0/mdo_lib/optimization/pso.py +132 -0
  28. mdo_lib-0.1.0/mdo_lib/reliability/__init__.py +13 -0
  29. mdo_lib-0.1.0/mdo_lib/reliability/base.py +22 -0
  30. mdo_lib-0.1.0/mdo_lib/reliability/form.py +90 -0
  31. mdo_lib-0.1.0/mdo_lib/reliability/monte_carlo.py +73 -0
  32. mdo_lib-0.1.0/mdo_lib/reliability/sorm.py +103 -0
  33. mdo_lib-0.1.0/mdo_lib/sensitivity/__init__.py +13 -0
  34. mdo_lib-0.1.0/mdo_lib/sensitivity/base.py +22 -0
  35. mdo_lib-0.1.0/mdo_lib/sensitivity/fast.py +67 -0
  36. mdo_lib-0.1.0/mdo_lib/sensitivity/morris.py +74 -0
  37. mdo_lib-0.1.0/mdo_lib/sensitivity/sobol.py +79 -0
  38. mdo_lib-0.1.0/mdo_lib/surrogate/__init__.py +13 -0
  39. mdo_lib-0.1.0/mdo_lib/surrogate/base.py +39 -0
  40. mdo_lib-0.1.0/mdo_lib/surrogate/kriging.py +116 -0
  41. mdo_lib-0.1.0/mdo_lib/surrogate/polynomial.py +56 -0
  42. mdo_lib-0.1.0/mdo_lib/surrogate/rbf.py +74 -0
  43. mdo_lib-0.1.0/mdo_lib/tests/__init__.py +1 -0
  44. mdo_lib-0.1.0/mdo_lib/uncertainty/__init__.py +11 -0
  45. mdo_lib-0.1.0/mdo_lib/uncertainty/base.py +24 -0
  46. mdo_lib-0.1.0/mdo_lib/uncertainty/reliability_based.py +114 -0
  47. mdo_lib-0.1.0/mdo_lib/uncertainty/robust.py +93 -0
  48. mdo_lib-0.1.0/mdo_lib/utils/__init__.py +14 -0
  49. mdo_lib-0.1.0/mdo_lib/utils/decorators.py +40 -0
  50. mdo_lib-0.1.0/mdo_lib/utils/parallel.py +23 -0
  51. mdo_lib-0.1.0/mdo_lib/utils/visualization.py +110 -0
  52. mdo_lib-0.1.0/mdo_lib.egg-info/PKG-INFO +252 -0
  53. mdo_lib-0.1.0/mdo_lib.egg-info/SOURCES.txt +57 -0
  54. mdo_lib-0.1.0/mdo_lib.egg-info/dependency_links.txt +1 -0
  55. mdo_lib-0.1.0/mdo_lib.egg-info/requires.txt +5 -0
  56. mdo_lib-0.1.0/mdo_lib.egg-info/top_level.txt +1 -0
  57. mdo_lib-0.1.0/pyproject.toml +30 -0
  58. mdo_lib-0.1.0/setup.cfg +4 -0
  59. mdo_lib-0.1.0/setup.py +31 -0
mdo_lib-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 MDO Contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
mdo_lib-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,252 @@
1
+ Metadata-Version: 2.1
2
+ Name: mdo-lib
3
+ Version: 0.1.0
4
+ Summary: Multi-Disciplinary Optimization library
5
+ Home-page: https://github.com/yourusername/mdo-lib
6
+ Author: Your Name
7
+ Author-email: Your Name <your.email@example.com>
8
+ License: MIT License
9
+
10
+ Copyright (c) 2026 MDO Contributors
11
+
12
+ Permission is hereby granted, free of charge, to any person obtaining a copy
13
+ of this software and associated documentation files (the "Software"), to deal
14
+ in the Software without restriction, including without limitation the rights
15
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16
+ copies of the Software, and to permit persons to whom the Software is
17
+ furnished to do so, subject to the following conditions:
18
+
19
+ The above copyright notice and this permission notice shall be included in all
20
+ copies or substantial portions of the Software.
21
+
22
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28
+ SOFTWARE.
29
+
30
+ Project-URL: Homepage, https://github.com/yourusername/mdo-lib
31
+ Project-URL: Bug Tracker, https://github.com/yourusername/mdo-lib/issues
32
+ Classifier: Programming Language :: Python :: 3
33
+ Classifier: License :: OSI Approved :: MIT License
34
+ Classifier: Operating System :: OS Independent
35
+ Requires-Python: >=3.7
36
+ Description-Content-Type: text/markdown
37
+ License-File: LICENSE
38
+ Requires-Dist: numpy
39
+ Requires-Dist: scipy
40
+ Requires-Dist: pandas
41
+ Requires-Dist: scikit-learn
42
+ Requires-Dist: matplotlib
43
+
44
+ # MDO - Multi-Disciplinary Optimization Library
45
+
46
+ MDO is a comprehensive Python library for optimization, surrogate modeling, sensitivity analysis, reliability analysis, and uncertainty optimization.
47
+
48
+ ## Features
49
+
50
+ - **Parameter Management**: Define and manage optimization parameters with bounds and constraints
51
+ - **Design of Experiments (DOE)**: Generate samples using various DOE methods
52
+ - **Surrogate Modeling**: Build surrogate models for efficient function approximation
53
+ - **Sensitivity Analysis**: Analyze the impact of parameters on objectives
54
+ - **Optimization Algorithms**: Implement various optimization algorithms
55
+ - **Reliability Analysis**: Assess the reliability of designs under uncertainty
56
+ - **Uncertainty Optimization**: Optimize designs considering parameter uncertainty
57
+
58
+ ## Installation
59
+
60
+ ```bash
61
+ pip install .
62
+ ```
63
+
64
+ ## Dependencies
65
+
66
+ - numpy
67
+ - scipy
68
+ - pandas
69
+ - scikit-learn (for surrogate models)
70
+ - matplotlib (for visualization)
71
+
72
+ ## Usage Examples
73
+
74
+ ### Basic Optimization
75
+
76
+ ```python
77
+ from mdo import Problem, Parameter, Objective, Constraint
78
+ from mdo.optimization import GeneticAlgorithm
79
+
80
+ # Define parameters
81
+ x1 = Parameter('x1', 0.5, bounds=[0, 1])
82
+ x2 = Parameter('x2', 0.5, bounds=[0, 1])
83
+
84
+ # Define objective function
85
+ def objective_function(x):
86
+ return (x[0] - 0.5)**2 + (x[1] - 0.5)**2
87
+
88
+ obj = Objective('f', 'minimize')
89
+ obj.evaluate = objective_function
90
+
91
+ # Define constraint function
92
+ def constraint_function(x):
93
+ return x[0] + x[1] - 1.0
94
+
95
+ con = Constraint('g', 'inequality', upper_bound=0.0)
96
+ con.evaluate = constraint_function
97
+
98
+ # Create problem
99
+ problem = Problem([x1, x2], [obj], [con])
100
+
101
+ # Create optimizer
102
+ optimizer = GeneticAlgorithm(problem)
103
+
104
+ # Run optimization
105
+ result = optimizer.optimize()
106
+
107
+ print("Best point:", result.sample.values)
108
+ print("Objective value:", result.objectives[0])
109
+ ```
110
+
111
+ ### Surrogate Modeling
112
+
113
+ ```python
114
+ from mdo import Problem, Parameter, Objective
115
+ from mdo.doe import LatinHypercube
116
+ from mdo.surrogate import Kriging
117
+ from mdo.core import Evaluator
118
+
119
+ # Define parameters and objective function
120
+ x1 = Parameter('x1', 0.5, bounds=[0, 1])
121
+ x2 = Parameter('x2', 0.5, bounds=[0, 1])
122
+
123
+ def objective_function(x):
124
+ return np.sin(2 * np.pi * x[0]) * np.cos(2 * np.pi * x[1])
125
+
126
+ obj = Objective('f', 'minimize')
127
+ obj.evaluate = objective_function
128
+
129
+ # Create problem
130
+ problem = Problem([x1, x2], [obj])
131
+
132
+ # Generate samples
133
+ doe = LatinHypercube(problem, n_samples=50)
134
+ samples = doe.generate()
135
+
136
+ # Evaluate samples
137
+ evaluator = Evaluator(problem)
138
+ results = evaluator.evaluate(samples)
139
+
140
+ # Extract X and y
141
+ X = [sample.values for sample in samples]
142
+ y = [result.objectives[0] for result in results]
143
+
144
+ # Train surrogate model
145
+ model = Kriging()
146
+ model.fit(X, y)
147
+
148
+ # Predict
149
+ print(model.predict([[0.25, 0.25]]))
150
+ ```
151
+
152
+ ### Sensitivity Analysis
153
+
154
+ ```python
155
+ from mdo import Problem, Parameter, Objective
156
+ from mdo.doe import LatinHypercube
157
+ from mdo.surrogate import Kriging
158
+ from mdo.sensitivity import SobolIndices
159
+ from mdo.core import Evaluator
160
+
161
+ # Define parameters and objective function
162
+ x1 = Parameter('x1', 0.5, bounds=[0, 1])
163
+ x2 = Parameter('x2', 0.5, bounds=[0, 1])
164
+ x3 = Parameter('x3', 0.5, bounds=[0, 1])
165
+
166
+ def objective_function(x):
167
+ return (x[0] - 0.5)**2 + 2*(x[1] - 0.5)**2 + 3*(x[2] - 0.5)**2
168
+
169
+ obj = Objective('f', 'minimize')
170
+ obj.evaluate = objective_function
171
+
172
+ # Create problem
173
+ problem = Problem([x1, x2, x3], [obj])
174
+
175
+ # Generate samples and evaluate
176
+ doe = LatinHypercube(problem, n_samples=100)
177
+ samples = doe.generate()
178
+ evaluator = Evaluator(problem)
179
+ results = evaluator.evaluate(samples)
180
+
181
+ # Train surrogate model
182
+ X = [sample.values for sample in samples]
183
+ y = [result.objectives[0] for result in results]
184
+ model = Kriging()
185
+ model.fit(X, y)
186
+
187
+ # Perform sensitivity analysis
188
+ sobol = SobolIndices(model, problem)
189
+ results = sobol.analyze()
190
+ print(results)
191
+ ```
192
+
193
+ ### Reliability Analysis
194
+
195
+ ```python
196
+ from mdo import Problem, Parameter, Objective
197
+ from mdo.doe import LatinHypercube
198
+ from mdo.surrogate import Kriging
199
+ from mdo.reliability import MonteCarlo
200
+ from mdo.core import Evaluator
201
+
202
+ # Define parameters and limit state function
203
+ x1 = Parameter('x1', 0.5, bounds=[0, 1])
204
+ x2 = Parameter('x2', 0.5, bounds=[0, 1])
205
+
206
+ def limit_state_function(x):
207
+ return (x[0] - 0.7)**2 + (x[1] - 0.7)**2 - 0.1
208
+
209
+ obj = Objective('g', 'minimize')
210
+ obj.evaluate = limit_state_function
211
+
212
+ # Create problem
213
+ problem = Problem([x1, x2], [obj])
214
+
215
+ # Generate samples and evaluate
216
+ doe = LatinHypercube(problem, n_samples=50)
217
+ samples = doe.generate()
218
+ evaluator = Evaluator(problem)
219
+ results = evaluator.evaluate(samples)
220
+
221
+ # Train surrogate model
222
+ X = [sample.values for sample in samples]
223
+ y = [result.objectives[0] for result in results]
224
+ model = Kriging()
225
+ model.fit(X, y)
226
+
227
+ # Perform reliability analysis
228
+ monte_carlo = MonteCarlo(problem, model, n_samples=10000)
229
+ results = monte_carlo.analyze()
230
+ print(results)
231
+ ```
232
+
233
+ ## Modules
234
+
235
+ - **core**: Core functionality for parameter management, problem definition, and evaluation
236
+ - **doe**: Design of Experiments methods
237
+ - **surrogate**: Surrogate models for function approximation
238
+ - **sensitivity**: Sensitivity analysis methods
239
+ - **optimization**: Optimization algorithms
240
+ - **reliability**: Reliability analysis methods
241
+ - **uncertainty**: Uncertainty optimization methods
242
+ - **utils**: Utility functions for parallel computing and visualization
243
+ - **examples**: Usage examples
244
+ - **tests**: Test cases
245
+
246
+ ## Contributing
247
+
248
+ Contributions are welcome! Please feel free to submit a Pull Request.
249
+
250
+ ## License
251
+
252
+ MIT License
@@ -0,0 +1,209 @@
1
+ # MDO - Multi-Disciplinary Optimization Library
2
+
3
+ MDO is a comprehensive Python library for optimization, surrogate modeling, sensitivity analysis, reliability analysis, and uncertainty optimization.
4
+
5
+ ## Features
6
+
7
+ - **Parameter Management**: Define and manage optimization parameters with bounds and constraints
8
+ - **Design of Experiments (DOE)**: Generate samples using various DOE methods
9
+ - **Surrogate Modeling**: Build surrogate models for efficient function approximation
10
+ - **Sensitivity Analysis**: Analyze the impact of parameters on objectives
11
+ - **Optimization Algorithms**: Implement various optimization algorithms
12
+ - **Reliability Analysis**: Assess the reliability of designs under uncertainty
13
+ - **Uncertainty Optimization**: Optimize designs considering parameter uncertainty
14
+
15
+ ## Installation
16
+
17
+ ```bash
18
+ pip install .
19
+ ```
20
+
21
+ ## Dependencies
22
+
23
+ - numpy
24
+ - scipy
25
+ - pandas
26
+ - scikit-learn (for surrogate models)
27
+ - matplotlib (for visualization)
28
+
29
+ ## Usage Examples
30
+
31
+ ### Basic Optimization
32
+
33
+ ```python
34
+ from mdo import Problem, Parameter, Objective, Constraint
35
+ from mdo.optimization import GeneticAlgorithm
36
+
37
+ # Define parameters
38
+ x1 = Parameter('x1', 0.5, bounds=[0, 1])
39
+ x2 = Parameter('x2', 0.5, bounds=[0, 1])
40
+
41
+ # Define objective function
42
+ def objective_function(x):
43
+ return (x[0] - 0.5)**2 + (x[1] - 0.5)**2
44
+
45
+ obj = Objective('f', 'minimize')
46
+ obj.evaluate = objective_function
47
+
48
+ # Define constraint function
49
+ def constraint_function(x):
50
+ return x[0] + x[1] - 1.0
51
+
52
+ con = Constraint('g', 'inequality', upper_bound=0.0)
53
+ con.evaluate = constraint_function
54
+
55
+ # Create problem
56
+ problem = Problem([x1, x2], [obj], [con])
57
+
58
+ # Create optimizer
59
+ optimizer = GeneticAlgorithm(problem)
60
+
61
+ # Run optimization
62
+ result = optimizer.optimize()
63
+
64
+ print("Best point:", result.sample.values)
65
+ print("Objective value:", result.objectives[0])
66
+ ```
67
+
68
+ ### Surrogate Modeling
69
+
70
+ ```python
71
+ from mdo import Problem, Parameter, Objective
72
+ from mdo.doe import LatinHypercube
73
+ from mdo.surrogate import Kriging
74
+ from mdo.core import Evaluator
75
+
76
+ # Define parameters and objective function
77
+ x1 = Parameter('x1', 0.5, bounds=[0, 1])
78
+ x2 = Parameter('x2', 0.5, bounds=[0, 1])
79
+
80
+ def objective_function(x):
81
+ return np.sin(2 * np.pi * x[0]) * np.cos(2 * np.pi * x[1])
82
+
83
+ obj = Objective('f', 'minimize')
84
+ obj.evaluate = objective_function
85
+
86
+ # Create problem
87
+ problem = Problem([x1, x2], [obj])
88
+
89
+ # Generate samples
90
+ doe = LatinHypercube(problem, n_samples=50)
91
+ samples = doe.generate()
92
+
93
+ # Evaluate samples
94
+ evaluator = Evaluator(problem)
95
+ results = evaluator.evaluate(samples)
96
+
97
+ # Extract X and y
98
+ X = [sample.values for sample in samples]
99
+ y = [result.objectives[0] for result in results]
100
+
101
+ # Train surrogate model
102
+ model = Kriging()
103
+ model.fit(X, y)
104
+
105
+ # Predict
106
+ print(model.predict([[0.25, 0.25]]))
107
+ ```
108
+
109
+ ### Sensitivity Analysis
110
+
111
+ ```python
112
+ from mdo import Problem, Parameter, Objective
113
+ from mdo.doe import LatinHypercube
114
+ from mdo.surrogate import Kriging
115
+ from mdo.sensitivity import SobolIndices
116
+ from mdo.core import Evaluator
117
+
118
+ # Define parameters and objective function
119
+ x1 = Parameter('x1', 0.5, bounds=[0, 1])
120
+ x2 = Parameter('x2', 0.5, bounds=[0, 1])
121
+ x3 = Parameter('x3', 0.5, bounds=[0, 1])
122
+
123
+ def objective_function(x):
124
+ return (x[0] - 0.5)**2 + 2*(x[1] - 0.5)**2 + 3*(x[2] - 0.5)**2
125
+
126
+ obj = Objective('f', 'minimize')
127
+ obj.evaluate = objective_function
128
+
129
+ # Create problem
130
+ problem = Problem([x1, x2, x3], [obj])
131
+
132
+ # Generate samples and evaluate
133
+ doe = LatinHypercube(problem, n_samples=100)
134
+ samples = doe.generate()
135
+ evaluator = Evaluator(problem)
136
+ results = evaluator.evaluate(samples)
137
+
138
+ # Train surrogate model
139
+ X = [sample.values for sample in samples]
140
+ y = [result.objectives[0] for result in results]
141
+ model = Kriging()
142
+ model.fit(X, y)
143
+
144
+ # Perform sensitivity analysis
145
+ sobol = SobolIndices(model, problem)
146
+ results = sobol.analyze()
147
+ print(results)
148
+ ```
149
+
150
+ ### Reliability Analysis
151
+
152
+ ```python
153
+ from mdo import Problem, Parameter, Objective
154
+ from mdo.doe import LatinHypercube
155
+ from mdo.surrogate import Kriging
156
+ from mdo.reliability import MonteCarlo
157
+ from mdo.core import Evaluator
158
+
159
+ # Define parameters and limit state function
160
+ x1 = Parameter('x1', 0.5, bounds=[0, 1])
161
+ x2 = Parameter('x2', 0.5, bounds=[0, 1])
162
+
163
+ def limit_state_function(x):
164
+ return (x[0] - 0.7)**2 + (x[1] - 0.7)**2 - 0.1
165
+
166
+ obj = Objective('g', 'minimize')
167
+ obj.evaluate = limit_state_function
168
+
169
+ # Create problem
170
+ problem = Problem([x1, x2], [obj])
171
+
172
+ # Generate samples and evaluate
173
+ doe = LatinHypercube(problem, n_samples=50)
174
+ samples = doe.generate()
175
+ evaluator = Evaluator(problem)
176
+ results = evaluator.evaluate(samples)
177
+
178
+ # Train surrogate model
179
+ X = [sample.values for sample in samples]
180
+ y = [result.objectives[0] for result in results]
181
+ model = Kriging()
182
+ model.fit(X, y)
183
+
184
+ # Perform reliability analysis
185
+ monte_carlo = MonteCarlo(problem, model, n_samples=10000)
186
+ results = monte_carlo.analyze()
187
+ print(results)
188
+ ```
189
+
190
+ ## Modules
191
+
192
+ - **core**: Core functionality for parameter management, problem definition, and evaluation
193
+ - **doe**: Design of Experiments methods
194
+ - **surrogate**: Surrogate models for function approximation
195
+ - **sensitivity**: Sensitivity analysis methods
196
+ - **optimization**: Optimization algorithms
197
+ - **reliability**: Reliability analysis methods
198
+ - **uncertainty**: Uncertainty optimization methods
199
+ - **utils**: Utility functions for parallel computing and visualization
200
+ - **examples**: Usage examples
201
+ - **tests**: Test cases
202
+
203
+ ## Contributing
204
+
205
+ Contributions are welcome! Please feel free to submit a Pull Request.
206
+
207
+ ## License
208
+
209
+ MIT License
@@ -0,0 +1,45 @@
1
+ """MDO (Multi-Disciplinary Optimization) library"""
2
+
3
+ __version__ = "0.1.0"
4
+
5
+ from .core import Problem, Parameter, Objective, Constraint, Evaluator, Sample, Result
6
+ from .doe import DOE, FullFactorial, LatinHypercube, CentralComposite
7
+ from .surrogate import SurrogateModel, PolynomialRegression, Kriging, RBF
8
+ from .sensitivity import SensitivityAnalysis, SobolIndices, MorrisMethod, FAST
9
+ from .optimization import Optimizer, GradientDescent, GeneticAlgorithm, ParticleSwarmOptimization, BayesianOptimization
10
+ from .reliability import ReliabilityAnalysis, MonteCarlo, FORM, SORM
11
+ from .uncertainty import UncertaintyOptimizer, RobustOptimization, ReliabilityBasedOptimization
12
+
13
+ __all__ = [
14
+ "Problem",
15
+ "Parameter",
16
+ "Objective",
17
+ "Constraint",
18
+ "Evaluator",
19
+ "Sample",
20
+ "Result",
21
+ "DOE",
22
+ "FullFactorial",
23
+ "LatinHypercube",
24
+ "CentralComposite",
25
+ "SurrogateModel",
26
+ "PolynomialRegression",
27
+ "Kriging",
28
+ "RBF",
29
+ "SensitivityAnalysis",
30
+ "SobolIndices",
31
+ "MorrisMethod",
32
+ "FAST",
33
+ "Optimizer",
34
+ "GradientDescent",
35
+ "GeneticAlgorithm",
36
+ "ParticleSwarmOptimization",
37
+ "BayesianOptimization",
38
+ "ReliabilityAnalysis",
39
+ "MonteCarlo",
40
+ "FORM",
41
+ "SORM",
42
+ "UncertaintyOptimizer",
43
+ "RobustOptimization",
44
+ "ReliabilityBasedOptimization",
45
+ ]
@@ -0,0 +1,18 @@
1
+ """Core module for MDO library"""
2
+
3
+ from .parameter import Parameter
4
+ from .objective import Objective
5
+ from .constraint import Constraint
6
+ from .problem import Problem
7
+ from .evaluator import Evaluator
8
+ from .result import Sample, Result
9
+
10
+ __all__ = [
11
+ "Parameter",
12
+ "Objective",
13
+ "Constraint",
14
+ "Problem",
15
+ "Evaluator",
16
+ "Sample",
17
+ "Result",
18
+ ]
@@ -0,0 +1,29 @@
1
+ """Constraint module"""
2
+
3
+ class Constraint:
4
+ """Constraint class for optimization problems"""
5
+
6
+ def __init__(self, name, type='inequality', upper_bound=0.0, lower_bound=None):
7
+ """Initialize a constraint
8
+
9
+ Args:
10
+ name (str): Constraint name
11
+ type (str, optional): Constraint type ('inequality' or 'equality'). Defaults to 'inequality'.
12
+ upper_bound (float, optional): Upper bound for inequality constraint. Defaults to 0.0.
13
+ lower_bound (float, optional): Lower bound for equality constraint. Defaults to None.
14
+ """
15
+ self.name = name
16
+ self.type = type
17
+ self.upper_bound = upper_bound
18
+ self.lower_bound = lower_bound
19
+
20
+ def evaluate(self, x):
21
+ """Evaluate the constraint
22
+
23
+ Args:
24
+ x (list): Design variables
25
+
26
+ Returns:
27
+ float: Constraint value
28
+ """
29
+ raise NotImplementedError("Subclass must implement evaluate method")
@@ -0,0 +1,60 @@
1
+ """Sample evaluation module"""
2
+
3
+ from concurrent.futures import ThreadPoolExecutor
4
+
5
+ class Evaluator:
6
+ """Evaluator class for evaluating samples"""
7
+
8
+ def __init__(self, problem, parallel=False):
9
+ """Initialize an evaluator
10
+
11
+ Args:
12
+ problem (Problem): Problem to evaluate
13
+ parallel (bool, optional): Whether to evaluate in parallel. Defaults to False.
14
+ """
15
+ self.problem = problem
16
+ self.parallel = parallel
17
+
18
+ def evaluate(self, samples):
19
+ """Evaluate samples
20
+
21
+ Args:
22
+ samples (list): List of Sample objects
23
+
24
+ Returns:
25
+ list: List of Result objects
26
+ """
27
+ from .result import Result
28
+
29
+ if self.parallel:
30
+ with ThreadPoolExecutor() as executor:
31
+ results = list(executor.map(self._evaluate_sample, samples))
32
+ else:
33
+ results = [self._evaluate_sample(sample) for sample in samples]
34
+
35
+ return results
36
+
37
+ def _evaluate_sample(self, sample):
38
+ """Evaluate a single sample
39
+
40
+ Args:
41
+ sample (Sample): Sample to evaluate
42
+
43
+ Returns:
44
+ Result: Evaluation result
45
+ """
46
+ from .result import Result
47
+
48
+ # Evaluate objectives
49
+ objectives = []
50
+ for obj in self.problem.objectives:
51
+ value = obj.evaluate(sample.values)
52
+ objectives.append(value)
53
+
54
+ # Evaluate constraints
55
+ constraints = []
56
+ for con in self.problem.constraints:
57
+ value = con.evaluate(sample.values)
58
+ constraints.append(value)
59
+
60
+ return Result(sample, objectives, constraints)
@@ -0,0 +1,27 @@
1
+ """Objective function module"""
2
+
3
+ class Objective:
4
+ """Objective function class for optimization problems"""
5
+
6
+ def __init__(self, name, sense='minimize', weight=1.0):
7
+ """Initialize an objective function
8
+
9
+ Args:
10
+ name (str): Objective name
11
+ sense (str, optional): Optimization sense ('minimize' or 'maximize'). Defaults to 'minimize'.
12
+ weight (float, optional): Objective weight. Defaults to 1.0.
13
+ """
14
+ self.name = name
15
+ self.sense = sense
16
+ self.weight = weight
17
+
18
+ def evaluate(self, x):
19
+ """Evaluate the objective function
20
+
21
+ Args:
22
+ x (list): Design variables
23
+
24
+ Returns:
25
+ float: Objective value
26
+ """
27
+ raise NotImplementedError("Subclass must implement evaluate method")
@@ -0,0 +1,41 @@
1
+ """Parameter management module"""
2
+
3
+ class Parameter:
4
+ """Parameter class for optimization problems"""
5
+
6
+ def __init__(self, name, value, bounds=None, is_continuous=True, is_integer=False):
7
+ """Initialize a parameter
8
+
9
+ Args:
10
+ name (str): Parameter name
11
+ value (float): Initial value
12
+ bounds (tuple, optional): (lower, upper) bounds. Defaults to None.
13
+ is_continuous (bool, optional): Whether the parameter is continuous. Defaults to True.
14
+ is_integer (bool, optional): Whether the parameter is integer. Defaults to False.
15
+ """
16
+ self.name = name
17
+ self.value = value
18
+ self.bounds = bounds
19
+ self.is_continuous = is_continuous
20
+ self.is_integer = is_integer
21
+
22
+ def validate(self, value):
23
+ """Validate a parameter value
24
+
25
+ Args:
26
+ value (float): Value to validate
27
+
28
+ Returns:
29
+ bool: True if valid, False otherwise
30
+ """
31
+ # Check if value is within bounds
32
+ if self.bounds:
33
+ lower, upper = self.bounds
34
+ if value < lower or value > upper:
35
+ return False
36
+
37
+ # Check if value is integer if required
38
+ if self.is_integer and not isinstance(value, int) and not value.is_integer():
39
+ return False
40
+
41
+ return True