pydflt 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. pydflt-0.1.0/LICENSE.md +9 -0
  2. pydflt-0.1.0/PKG-INFO +82 -0
  3. pydflt-0.1.0/README.md +53 -0
  4. pydflt-0.1.0/pyproject.toml +95 -0
  5. pydflt-0.1.0/setup.cfg +4 -0
  6. pydflt-0.1.0/src/__init__.py +23 -0
  7. pydflt-0.1.0/src/abstract_models/__init__.py +4 -0
  8. pydflt-0.1.0/src/abstract_models/base.py +263 -0
  9. pydflt-0.1.0/src/abstract_models/cvxpy_diff.py +155 -0
  10. pydflt-0.1.0/src/abstract_models/grbpy.py +278 -0
  11. pydflt-0.1.0/src/abstract_models/grbpy_two_stage.py +148 -0
  12. pydflt-0.1.0/src/concrete_models/__init__.py +6 -0
  13. pydflt-0.1.0/src/concrete_models/cvxpy_knapsack.py +111 -0
  14. pydflt-0.1.0/src/concrete_models/grbpy_knapsack.py +180 -0
  15. pydflt-0.1.0/src/concrete_models/grbpy_shortest_path.py +205 -0
  16. pydflt-0.1.0/src/concrete_models/grbpy_tsp.py +120 -0
  17. pydflt-0.1.0/src/concrete_models/grbpy_two_stage_knapsack.py +186 -0
  18. pydflt-0.1.0/src/concrete_models/grbpy_two_stage_weighted_set_multi_cover.py +281 -0
  19. pydflt-0.1.0/src/dataset.py +121 -0
  20. pydflt-0.1.0/src/decision_makers/__init__.py +6 -0
  21. pydflt-0.1.0/src/decision_makers/base.py +793 -0
  22. pydflt-0.1.0/src/decision_makers/differentiable_decision_maker.py +367 -0
  23. pydflt-0.1.0/src/decision_makers/lancer_decision_maker.py +358 -0
  24. pydflt-0.1.0/src/decision_makers/sfge_decision_maker.py +204 -0
  25. pydflt-0.1.0/src/generate_data_functions/__init__.py +7 -0
  26. pydflt-0.1.0/src/generate_data_functions/generate_data_knapsack.py +53 -0
  27. pydflt-0.1.0/src/generate_data_functions/generate_data_shortest_path.py +45 -0
  28. pydflt-0.1.0/src/generate_data_functions/generate_data_tsp.py +46 -0
  29. pydflt-0.1.0/src/generate_data_functions/generate_data_two_stage_knapsack.py +49 -0
  30. pydflt-0.1.0/src/generate_data_functions/generate_data_wsmc.py +65 -0
  31. pydflt-0.1.0/src/logger.py +165 -0
  32. pydflt-0.1.0/src/noisifier.py +317 -0
  33. pydflt-0.1.0/src/predictors/__init__.py +9 -0
  34. pydflt-0.1.0/src/predictors/base.py +132 -0
  35. pydflt-0.1.0/src/predictors/multilayer_normal.py +166 -0
  36. pydflt-0.1.0/src/predictors/multilayer_perceptron.py +133 -0
  37. pydflt-0.1.0/src/predictors/multilayer_sample.py +80 -0
  38. pydflt-0.1.0/src/predictors/truncated_normal.py +148 -0
  39. pydflt-0.1.0/src/problem.py +496 -0
  40. pydflt-0.1.0/src/pydflt.egg-info/PKG-INFO +82 -0
  41. pydflt-0.1.0/src/pydflt.egg-info/SOURCES.txt +64 -0
  42. pydflt-0.1.0/src/pydflt.egg-info/dependency_links.txt +1 -0
  43. pydflt-0.1.0/src/pydflt.egg-info/requires.txt +19 -0
  44. pydflt-0.1.0/src/pydflt.egg-info/top_level.txt +13 -0
  45. pydflt-0.1.0/src/registries/__init__.py +0 -0
  46. pydflt-0.1.0/src/registries/data.py +160 -0
  47. pydflt-0.1.0/src/registries/decision_makers.py +112 -0
  48. pydflt-0.1.0/src/registries/models.py +150 -0
  49. pydflt-0.1.0/src/runner.py +258 -0
  50. pydflt-0.1.0/src/utils/__init__.py +0 -0
  51. pydflt-0.1.0/src/utils/experiments.py +72 -0
  52. pydflt-0.1.0/src/utils/load.py +84 -0
  53. pydflt-0.1.0/src/utils/optuna.py +111 -0
  54. pydflt-0.1.0/src/utils/reproducability.py +24 -0
  55. pydflt-0.1.0/tests/test_decision_maker_settings.py +61 -0
  56. pydflt-0.1.0/tests/test_decision_makers.py +187 -0
  57. pydflt-0.1.0/tests/test_knapsack.py +84 -0
  58. pydflt-0.1.0/tests/test_problem_class.py +109 -0
  59. pydflt-0.1.0/tests/test_quadratic_proxy.py +50 -0
  60. pydflt-0.1.0/tests/test_registries.py +105 -0
  61. pydflt-0.1.0/tests/test_runner.py +65 -0
  62. pydflt-0.1.0/tests/test_sfge_different_settings.py +182 -0
  63. pydflt-0.1.0/tests/test_shortest_path.py +72 -0
  64. pydflt-0.1.0/tests/test_tsp.py +87 -0
  65. pydflt-0.1.0/tests/test_two_stage_knapsack.py +87 -0
  66. pydflt-0.1.0/tests/test_wsmc.py +76 -0
@@ -0,0 +1,9 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Noah Schutte, Kim van den Houten, Grigorii Veviurko
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6
+
7
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8
+
9
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
pydflt-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,82 @@
1
+ Metadata-Version: 2.4
2
+ Name: pydflt
3
+ Version: 0.1.0
4
+ Summary: PyDFLT
5
+ Author-email: Noah Schutte <N.J.Schutte@tudelft.nl>, Kim van den Houten <K.C.vandenhouten@tudelft.nl>, Grigorii Veviurko <G.Veviurko@tudelft.nl>
6
+ Requires-Python: >=3.10
7
+ Description-Content-Type: text/markdown
8
+ License-File: LICENSE.md
9
+ Requires-Dist: torch>=2.7.1
10
+ Requires-Dist: cvxpy>=1.4.1
11
+ Requires-Dist: diffcp>=1.0.18
12
+ Requires-Dist: cvxpylayers>=0.1.6
13
+ Requires-Dist: numpy>=1.26.4
14
+ Requires-Dist: ortools>=9.12.4544
15
+ Requires-Dist: gurobipy>=11.0.3
16
+ Requires-Dist: pyepo~=0.3.9
17
+ Requires-Dist: pyyaml>=6.0.2
18
+ Requires-Dist: tomli>=2.0.1
19
+ Requires-Dist: wandb>=0.19.11
20
+ Requires-Dist: pandas>=2.2.3
21
+ Requires-Dist: matplotlib>=3.6.3
22
+ Requires-Dist: optuna>=4.2.1
23
+ Requires-Dist: optuna-dashboard>=0.19.0
24
+ Requires-Dist: scikit-learn>=1.6.1
25
+ Requires-Dist: scipy>=1.13.1
26
+ Requires-Dist: joblib>=1.4.2
27
+ Requires-Dist: ipywidgets>=8.1.7
28
+ Dynamic: license-file
29
+
30
+ [![CI](https://github.com/PyDFLT/PyDFLT/actions/workflows/CI.yml/badge.svg)](https://github.com/PyDFLT/PyDFLT/actions/workflows/CI.yml)
31
+
32
+ ![alt text](https://github.com/PyDFLT/PyDFLT/blob/main/images/logo.png?raw=true)
33
+
34
+
35
+ ## A Python-based Decision-Focused Learning Toolbox
36
+ **PyDFLT** is designed to help researchers apply and develop Decision Focused Learning (DFL) tools in Python. It uses **CVXPYLayers** [1] for differentiable models, **PyEPO** [2] for models with a linear objective and has an implementation of **SFGE** [3] and **Lancer** [4]. To help with research, it supports Weights & Biases (https://wandb.ai/) and Optuna (https://optuna.org).
37
+ In the near future, we will publish PyDFLT on the Python Package Index, after which you can install it by running:
38
+
39
+ `pip install pydflt`
40
+
41
+ ### Documentation
42
+
43
+ Documentation can be found https://pydflt.github.io/documentation.
44
+
45
+ ### Contributing
46
+ If you want to contribute, you can fork the repository and send a pull request. We make use of **uv** (https://github.com/astral-sh/uv) for the installation and testing. Install uv [here](https://docs.astral.sh/uv/getting-started/installation/). To create the virtual environment:
47
+
48
+ `uv sync --all-extras --all-groups`
49
+
50
+ Notice that your IDE might automatically create the environment, but does only install the basic package dependencies. Make sure to run above command to install all dependencies.
51
+
52
+ #### Before committing
53
+
54
+ We make use of **pre-commit** (https://pre-commit.com/) and **pytest** to ensure code is consistent and functioning properly. Both are part of the dev dependencies and therefore installed in the virtual environment. Before committing make sure to run both:
55
+
56
+ `uv run pre-commit run --all-files`
57
+
58
+ `uv run pytest`
59
+
60
+ #### Documentation
61
+
62
+ We use **Sphinx** (https://www.sphinx-doc.org/en/master/) for the documentation. The Makefile in this directory can be used to build the documentation.
63
+
64
+ You can run `uv run make html --directory=docs` rom the project root as well, which will build the documentation in the exact same way as it will be displayed on the website.
65
+
66
+ Then, go to docs/build/html/api/src.html and drag the file into a browser.
67
+
68
+
69
+ ### Using Weights & Biases
70
+ If you want to use Weights & Biases, either set an environment variable named `WANDB_KEY` with your key,
71
+ or create a `.env` file with `WANDB_KEY = 'your-key-here'`.
72
+
73
+
74
+ ### References
75
+
76
+ [1] Akshay Agrawal, Brandon Amos, Shane Barratt, Stephen Boyd, Steven Diamond, and J Zico Kolter. Differentiable convex optimization layers. Advances in neural information processing systems, 32, 2019. doi:10.48550/arXiv.1910.12430.
77
+
78
+ [2] Bo Tang and Elias B. Khalil. Pyepo: a pytorch-based end-to-end predict-then-optimize library for linear and integer programming. Mathematical Programming Computation, 16(3):297–335, 2024. doi:10.1007/s12532-024-00255-x.
79
+
80
+ [3] Mattia Silvestri, Senne Berden, Jayanta Mandi, Ali ˙Irfan Mahmuto˘gulları, Maxime Mulamba, Allegra De Filippo, Tias Guns, and Michele Lombardi. Score function gradient estimation to widen the applicability of decision-focused learning. CoRR, abs/2307.05213, 2023. doi:10.48550/arXiv.2307.05213.
81
+
82
+ [4] Arman Zharmagambetov, Brandon Amos, Aaron Ferber, Taoan Huang, Bistra Dilkina, and Yuandong Tian. Landscape surrogate: Learning decision losses for mathematical optimization under partial information. Advances in Neural Information Processing Systems, 36:27332–27350, 2023. doi:10.48550/arXiv.2307.08964.
pydflt-0.1.0/README.md ADDED
@@ -0,0 +1,53 @@
1
+ [![CI](https://github.com/PyDFLT/PyDFLT/actions/workflows/CI.yml/badge.svg)](https://github.com/PyDFLT/PyDFLT/actions/workflows/CI.yml)
2
+
3
+ ![alt text](https://github.com/PyDFLT/PyDFLT/blob/main/images/logo.png?raw=true)
4
+
5
+
6
+ ## A Python-based Decision-Focused Learning Toolbox
7
+ **PyDFLT** is designed to help researchers apply and develop Decision Focused Learning (DFL) tools in Python. It uses **CVXPYLayers** [1] for differentiable models, **PyEPO** [2] for models with a linear objective and has an implementation of **SFGE** [3] and **Lancer** [4]. To help with research, it supports Weights & Biases (https://wandb.ai/) and Optuna (https://optuna.org).
8
+ In the near future, we will publish PyDFLT on the Python Package Index, after which you can install it by running:
9
+
10
+ `pip install pydflt`
11
+
12
+ ### Documentation
13
+
14
+ Documentation can be found https://pydflt.github.io/documentation.
15
+
16
+ ### Contributing
17
+ If you want to contribute, you can fork the repository and send a pull request. We make use of **uv** (https://github.com/astral-sh/uv) for the installation and testing. Install uv [here](https://docs.astral.sh/uv/getting-started/installation/). To create the virtual environment:
18
+
19
+ `uv sync --all-extras --all-groups`
20
+
21
+ Notice that your IDE might automatically create the environment, but does only install the basic package dependencies. Make sure to run above command to install all dependencies.
22
+
23
+ #### Before committing
24
+
25
+ We make use of **pre-commit** (https://pre-commit.com/) and **pytest** to ensure code is consistent and functioning properly. Both are part of the dev dependencies and therefore installed in the virtual environment. Before committing make sure to run both:
26
+
27
+ `uv run pre-commit run --all-files`
28
+
29
+ `uv run pytest`
30
+
31
+ #### Documentation
32
+
33
+ We use **Sphinx** (https://www.sphinx-doc.org/en/master/) for the documentation. The Makefile in this directory can be used to build the documentation.
34
+
35
+ You can run `uv run make html --directory=docs` rom the project root as well, which will build the documentation in the exact same way as it will be displayed on the website.
36
+
37
+ Then, go to docs/build/html/api/src.html and drag the file into a browser.
38
+
39
+
40
+ ### Using Weights & Biases
41
+ If you want to use Weights & Biases, either set an environment variable named `WANDB_KEY` with your key,
42
+ or create a `.env` file with `WANDB_KEY = 'your-key-here'`.
43
+
44
+
45
+ ### References
46
+
47
+ [1] Akshay Agrawal, Brandon Amos, Shane Barratt, Stephen Boyd, Steven Diamond, and J Zico Kolter. Differentiable convex optimization layers. Advances in neural information processing systems, 32, 2019. doi:10.48550/arXiv.1910.12430.
48
+
49
+ [2] Bo Tang and Elias B. Khalil. Pyepo: a pytorch-based end-to-end predict-then-optimize library for linear and integer programming. Mathematical Programming Computation, 16(3):297–335, 2024. doi:10.1007/s12532-024-00255-x.
50
+
51
+ [3] Mattia Silvestri, Senne Berden, Jayanta Mandi, Ali ˙Irfan Mahmuto˘gulları, Maxime Mulamba, Allegra De Filippo, Tias Guns, and Michele Lombardi. Score function gradient estimation to widen the applicability of decision-focused learning. CoRR, abs/2307.05213, 2023. doi:10.48550/arXiv.2307.05213.
52
+
53
+ [4] Arman Zharmagambetov, Brandon Amos, Aaron Ferber, Taoan Huang, Bistra Dilkina, and Yuandong Tian. Landscape surrogate: Learning decision losses for mathematical optimization under partial information. Advances in Neural Information Processing Systems, 36:27332–27350, 2023. doi:10.48550/arXiv.2307.08964.
@@ -0,0 +1,95 @@
1
+ [project]
2
+ name = "pydflt"
3
+ version = "0.1.0"
4
+ description = "PyDFLT"
5
+ authors = [
6
+ { name = "Noah Schutte", email = "N.J.Schutte@tudelft.nl"},
7
+ { name = "Kim van den Houten", email = "K.C.vandenhouten@tudelft.nl"},
8
+ { name = "Grigorii Veviurko", email = "G.Veviurko@tudelft.nl"}
9
+ ]
10
+ readme = "README.md"
11
+ requires-python = ">=3.10"
12
+ dependencies = [
13
+ "torch>=2.7.1",
14
+ "cvxpy>=1.4.1",
15
+ "diffcp>=1.0.18",
16
+ "cvxpylayers>=0.1.6",
17
+ "numpy>=1.26.4",
18
+ "ortools>=9.12.4544",
19
+ "gurobipy>=11.0.3",
20
+ "pyepo~=0.3.9",
21
+ "pyyaml>=6.0.2", # PyYAML is same package
22
+ "tomli>=2.0.1",
23
+ "wandb>=0.19.11",
24
+ "pandas>=2.2.3",
25
+ "matplotlib>=3.6.3",
26
+ "optuna>=4.2.1",
27
+ "optuna-dashboard>=0.19.0",
28
+ "scikit-learn>=1.6.1",
29
+ "scipy>=1.13.1",
30
+ "joblib>=1.4.2",
31
+ "ipywidgets>=8.1.7"
32
+ ]
33
+
34
+ [dependency-groups]
35
+ dev = [
36
+ "pytest>=8.3.2",
37
+ "pytest-cov>=5.0.0",
38
+ "nbmake>=1.5.5",
39
+ "codecov>=2.1.13",
40
+ "pre-commit>=3.8.0",
41
+ "ruff>=0.12.4",
42
+ ]
43
+
44
+ docs = [
45
+ "sphinx>=7.4.7",
46
+ "nbsphinx>=0.9.5",
47
+ "numpydoc>=1.8.0",
48
+ "sphinx-immaterial>=0.12.2",
49
+ "myst-parser>=2.0.0",
50
+ "pypandoc-binary>=1.12",
51
+ ]
52
+
53
+ examples = [
54
+ "jupyter>=1.0.0",
55
+ ]
56
+
57
+ [tool.uv]
58
+ required-version = "0.6.16"
59
+ default-groups = ["dev", "docs"]
60
+
61
+ [tool.black]
62
+ line-length = 160
63
+
64
+ [tool.ruff]
65
+ line-length = 160
66
+ extend-include = ["*.ipynb"]
67
+
68
+
69
+ [tool.ruff.lint]
70
+ select = [
71
+ "E", # pycodestyle errors
72
+ "I", # isort
73
+ "W", # pycodestyle warnings,
74
+ "F", # pyflakes,
75
+ "C", # flake8-comprehensions,
76
+ "B", # flake8-bugbear
77
+ ]
78
+ ignore = [
79
+ "C901", # too complex
80
+ ]
81
+
82
+ [tool.ruff.lint.isort]
83
+ known-third-party = ["numpy", "wandb"] # can cause issues with pre-commit otherwise
84
+
85
+ [tool.pytest.ini_options]
86
+ addopts = "--cov=. --cov-report=xml --nbmake"
87
+ testpaths = [
88
+ "tests",
89
+ "examples",
90
+ ]
91
+
92
+
93
+ [build-system]
94
+ requires = ["setuptools >= 61.0"]
95
+ build-backend = "setuptools.build_meta"
pydflt-0.1.0/setup.cfg ADDED
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,23 @@
1
+ import torch
2
+
3
+ from src.registries.data import data_registry as data_registry
4
+ from src.registries.data import get_data as get_data
5
+ from src.registries.data import register_data as register_data
6
+ from src.registries.decision_makers import (
7
+ decision_maker_registry as decision_maker_registry,
8
+ )
9
+ from src.registries.decision_makers import make_decision_maker as make_decision_maker
10
+ from src.registries.decision_makers import (
11
+ register_decision_maker as register_decision_maker,
12
+ )
13
+ from src.registries.models import make_model as make_model
14
+ from src.registries.models import model_registry as model_registry
15
+ from src.registries.models import register_model as register_model
16
+
17
+ from .dataset import DFLDataset as DFLDataset
18
+ from .logger import Logger as Logger
19
+ from .noisifier import Noisifier as Noisifier
20
+ from .problem import Problem as Problem
21
+ from .runner import Runner as Runner
22
+
23
+ torch.set_default_dtype(torch.float32)
@@ -0,0 +1,4 @@
1
+ from .base import OptimizationModel as OptimizationModel
2
+ from .cvxpy_diff import CVXPYDiffModel as CVXPYDiffModel
3
+ from .grbpy import GRBPYModel as GRBPYModel
4
+ from .grbpy_two_stage import GRBPYTwoStageModel as GRBPYTwoStageModel
@@ -0,0 +1,263 @@
1
+ import copy
2
+ import inspect
3
+ from abc import ABC, abstractmethod
4
+ from typing import Literal
5
+
6
+ import numpy as np
7
+ import torch
8
+
9
+ MAX = "MAX"
10
+ MIN = "MIN"
11
+
12
+
13
+ class OptimizationModel(ABC):
14
+ """
15
+ Base class that specifies the interface for all optimization models.
16
+ To implement your own model, it is recommended to inherit from one of the children of this class,
17
+ e.g., CVXPYModel or GRBPYModel.
18
+
19
+ Attributes:
20
+ var_names (list[str]): Sorted list of decision variable names.
21
+ var_shapes (dict[str, tuple[int, ...]]): Dictionary mapping decision variable names to their shapes.
22
+ param_to_predict_names (list[str]): Sorted list of parameter names that need to be predicted.
23
+ param_to_predict_shapes (dict[str, tuple[int, ...]]): Dictionary mapping parameters to predict to their shapes.
24
+ extra_param_names (list[str]): Sorted list of additional parameter names that change per sample but are known.
25
+ extra_param_shapes (dict[str, tuple[int, ...]]): Dictionary mapping extra parameters to their shapes.
26
+ all_param_names (list[str]): Concatenation of `param_to_predict_names` and `extra_param_names`.
27
+ num_predictions (int): Total number of elements across all parameters to be predicted.
28
+ num_vars (int): Total number of elements across all decision variables.
29
+ num_params (int): Total number of elements across all parameters.
30
+ model_sense (str): Specifies whether the model minimizes ('MIN') or maximizes ('MAX').
31
+ init_arguments (dict[str, Any]): Stores the initial arguments used to create this model instance,
32
+ used for creating variants or copies.
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ var_shapes: dict[str, tuple[int, ...]],
38
+ param_to_predict_shapes: dict[str, tuple[int, ...]],
39
+ model_sense: Literal["MIN", "MAX"],
40
+ extra_param_shapes: dict[str, tuple[int, ...]] | None = None,
41
+ num_scenarios: int = 1,
42
+ ) -> None:
43
+ """
44
+ Initializes the OptimizationModel.
45
+
46
+ Args:
47
+ var_shapes (dict[str, tuple[int, ...]]): A dictionary specifying the names and shapes of
48
+ decision variables (e.g., {'decision': (10,)}).
49
+ param_to_predict_shapes (dict[str, tuple[int, ...]]): A dictionary specifying the names and shapes of
50
+ parameters that must be provided prior to
51
+ running optimization.
52
+ model_sense (str): Specifies whether the model minimizes ('MIN') or maximizes ('MAX').
53
+ Must be either 'MIN' or 'MAX'.
54
+ extra_param_shapes (dict[str, tuple[int, ...]] | None): An optional dictionary specifying additional
55
+ parameters that change from sample to sample
56
+ but are known.
57
+ num_scenarios (int): The number of scenarios for multi-scenario models. Defaults to 1.
58
+ """
59
+
60
+ assert model_sense.upper() in [
61
+ MIN,
62
+ MAX,
63
+ ], f"model_sense must be {MIN} for minimization or {MAX} for maximization!"
64
+
65
+ # We store the init_arguments from the child class, so we can create model variants with the same parameters
66
+ self.init_arguments = {
67
+ name: getattr(self, name) for name in inspect.signature(type(self).__init__).parameters if name != "self" and hasattr(self, name)
68
+ }
69
+
70
+ # Parse and save input arguments
71
+ self.var_names = sorted(var_shapes.keys())
72
+ self.var_shapes = var_shapes
73
+
74
+ self.param_to_predict_names = sorted(param_to_predict_shapes.keys())
75
+ self.param_to_predict_shapes = param_to_predict_shapes
76
+ extra_param_shapes = extra_param_shapes or {}
77
+ self.extra_param_names = sorted(extra_param_shapes.keys())
78
+ self.extra_param_shapes = extra_param_shapes
79
+ self.all_param_names = self.param_to_predict_names + self.extra_param_names
80
+
81
+ self.num_predictions = np.sum([np.prod(self.param_to_predict_shapes[name]) for name in self.param_to_predict_names])
82
+ self.num_vars = sum([np.prod(shape) for key, shape in var_shapes.items()])
83
+ self.num_params = sum([np.prod(shape) for shape in self.param_to_predict_shapes.values()])
84
+
85
+ self.model_sense = model_sense
86
+
87
+ @property
88
+ def model_sense_int(self) -> int:
89
+ """
90
+ Returns the model sense as an integer: 1 for minimization ('MIN') and -1 for maximization ('MAX').
91
+
92
+ Returns:
93
+ int: 1 if model_sense is 'MIN', -1 if model_sense is 'MAX'.
94
+ """
95
+ if self.model_sense == MIN:
96
+ return 1
97
+ elif self.model_sense == MAX:
98
+ return -1
99
+ else:
100
+ raise NotImplementedError
101
+
102
+ @abstractmethod
103
+ def solve_batch(self, data_batch: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
104
+ """
105
+ Runs the optimization for a batch of data and computes the optimal decisions.
106
+ The `data_batch` must include all parameters specified in `param_to_predict_shapes`
107
+ and `extra_param_shapes`.
108
+
109
+ Args:
110
+ data_batch (dict[str, torch.Tensor]): A dictionary containing input data for the optimization,
111
+ including predicted parameters and extra parameters.
112
+
113
+ Returns:
114
+ dict[str, torch.Tensor]: A dictionary containing the computed decision variables for the batch,
115
+ matching the keys in `var_shapes`.
116
+ """
117
+ raise NotImplementedError("Subclasses must implement solve_batch method.")
118
+
119
+ @abstractmethod
120
+ def get_objective(
121
+ self,
122
+ data_batch: dict[str, torch.Tensor],
123
+ decisions_batch: dict[str, torch.Tensor],
124
+ predictions_batch: dict[str, torch.Tensor] | None = None,
125
+ ) -> torch.Tensor:
126
+ """
127
+ Computes the objective function value achieved by `decisions_batch` for the given `data_batch`.
128
+
129
+ Args:
130
+ data_batch (dict[str, torch.Tensor]): A dictionary containing input data for the optimization.
131
+ decisions_batch (dict[str, torch.Tensor]): A dictionary containing the decision variables.
132
+ predictions_batch (dict[str, torch.Tensor] | None): An optional dictionary containing the
133
+ predictions for relevant parameters, if applicable.
134
+ Defaults to None.
135
+
136
+ Returns:
137
+ torch.Tensor: A tensor representing the objective function value(s) for the batch.
138
+ """
139
+ raise NotImplementedError("Subclasses must implement get_objective method.")
140
+
141
+ def get_penalty(self, x_u: torch.Tensor, data_batch: dict[str, torch.Tensor]) -> torch.Tensor:
142
+ """
143
+ Computes the constraint violation (penalty) for a given point `x_u`.
144
+ This method is used internally by some algorithms for constraint handling.
145
+
146
+ Args:
147
+ x_u (torch.Tensor): The point (decision variables) for which to compute the penalty.
148
+ data_batch (dict[str, torch.Tensor]): A dictionary containing input data.
149
+
150
+ Returns:
151
+ torch.Tensor: A tensor representing the penalty value(s).
152
+ """
153
+ raise NotImplementedError
154
+
155
+ def get_reward_gradient(
156
+ self,
157
+ decisions_batch: dict[str, torch.Tensor],
158
+ data_batch: dict[str, torch.Tensor],
159
+ ) -> torch.Tensor:
160
+ """
161
+ Computes the gradient of the reward (objective) function with respect to the solution.
162
+
163
+ Args:
164
+ decisions_batch (dict[str, torch.Tensor]): A dictionary containing the decision variables.
165
+ data_batch (dict[str, torch.Tensor]): A dictionary containing input data.
166
+
167
+ Returns:
168
+ torch.Tensor: A tensor representing the gradient of the reward function.
169
+ """
170
+ raise NotImplementedError
171
+
172
+ def create_quadratic_variant(self) -> "OptimizationModel":
173
+ """
174
+ Creates a quadratic proxy (QP) variant of the problem.
175
+ The new model will have the same constraints as the original, but its objective will be
176
+ to minimize the squared Euclidean distance (L2 norm) between the decision variables `x`
177
+ and predicted target vector `w` (i.e., minimize ||x-w||^2_2).
178
+
179
+ Returns:
180
+ OptimizationModel: A new instance of the model representing the QP variant.
181
+ """
182
+ raise NotImplementedError
183
+
184
+ def evaluate(
185
+ self,
186
+ data_batch: dict[str, torch.Tensor],
187
+ decisions_batch: dict[str, torch.Tensor],
188
+ predictions_batch: dict[str, torch.Tensor] | None = None,
189
+ epsilon: float = 1e-5,
190
+ metrics: list[str] | None = None,
191
+ ) -> dict[str, np.ndarray]:
192
+ """
193
+ Evaluates a batch of decisions by computing various metrics such as objective value,
194
+ absolute regret, relative regret, and symmetric relative regret.
195
+
196
+ Args:
197
+ data_batch (dict[str, torch.Tensor]): A dictionary containing input data for the evaluation.
198
+ Expected to include 'objective_optimal' if regret is to be computed.
199
+ decisions_batch (dict[str, torch.Tensor]): A dictionary containing the computed decision variables.
200
+ predictions_batch (dict[str, torch.Tensor] | None): An optional dictionary containing the
201
+ predictions for relevant parameters. Defaults to None.
202
+ epsilon (float): A small value added to the denominator in relative regret calculations to prevent
203
+ division by zero. Defaults to 1e-5.
204
+ metrics (list[str] | None): List of metrics on which to evaluate.
205
+
206
+ Returns:
207
+ dict[str, np.ndarray]: A dictionary containing evaluation metrics, including:
208
+ - 'objective': The objective function value(s).
209
+ - 'abs_regret' (optional): Absolute regret if 'objective_optimal' is in `data_batch`.
210
+ - 'rel_regret' (optional): Relative regret if 'objective_optimal' is in `data_batch`.
211
+ - 'sym_rel_regret' (optional): Symmetric relative regret if 'objective_optimal'
212
+ is in `data_batch`.
213
+ """
214
+ if metrics is None: # Check if metrics was not provided
215
+ metrics = ["abs_regret"]
216
+
217
+ eval_dict = {}
218
+ if "objective" in metrics or "abs_regret" in metrics or "rel_regret" in metrics or "sym_rel_regret" in metrics:
219
+ objectives = self.get_objective(data_batch, decisions_batch, predictions_batch)
220
+ if "objective" in metrics:
221
+ eval_dict["objective"] = objectives.cpu().cpu().detach().numpy().astype(np.float32)
222
+ if "abs_regret" in metrics or "rel_regret" in metrics or "sym_rel_regret" in metrics:
223
+ optimal_objectives = data_batch["objective_optimal"]
224
+ regret = (objectives - optimal_objectives) * float(self.model_sense_int)
225
+ if "abs_regret" in metrics:
226
+ eval_dict["abs_regret"] = regret.cpu().detach().numpy().astype(np.float32)
227
+ if "rel_regret" in metrics or "sym_rel_regret" in metrics:
228
+ relative_regret = regret / optimal_objectives
229
+ if "rel_regret" in metrics:
230
+ eval_dict["rel_regret"] = relative_regret.cpu().detach().numpy().astype(np.float32)
231
+ if "sym_rel_regret" in metrics:
232
+ symmetric_relative_regret = regret / (abs(optimal_objectives) + abs(objectives) + epsilon)
233
+ eval_dict["sym_rel_regret"] = symmetric_relative_regret.cpu().detach().numpy().astype(np.float32)
234
+
235
+ return eval_dict
236
+
237
+ def create_saa_variant(self, num_scenarios: int) -> "OptimizationModel":
238
+ """
239
+ Creates a Sample Average Approximation (SAA) variant of the optimization problem.
240
+ This version incorporates multiple scenarios for stochastic optimization.
241
+
242
+ Args:
243
+ num_scenarios (int): The number of scenarios to include in the SAA variant.
244
+
245
+ Returns:
246
+ OptimizationModel: A new instance of the model representing the SAA variant.
247
+ """
248
+ assert "num_scenarios" in self.init_arguments, "Concrete model needs to have a num_scenarios argument if you want to create a SAA variant."
249
+ assert all(
250
+ arg in self.init_arguments for arg in inspect.signature(type(self).__init__).parameters if arg != "self"
251
+ ), "Concrete model needs to have all attributes set to arguments if you want to create a SAA variant."
252
+ init_arguments = {key: item for key, item in self.init_arguments.items() if key != "num_scenarios"}
253
+
254
+ return self.__class__(**init_arguments, num_scenarios=num_scenarios)
255
+
256
+ def create_copy(self) -> "OptimizationModel":
257
+ """
258
+ Creates a shallow copy of the current model instance.
259
+
260
+ Returns:
261
+ OptimizationModel: A new instance of the same model with the same attributes.
262
+ """
263
+ return copy.copy(self)