desdeo 2.1.0__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- desdeo/api/models/nimbus.py +8 -4
- desdeo/api/routers/emo.py +75 -104
- desdeo/api/routers/generic.py +26 -58
- desdeo/api/routers/nimbus.py +108 -247
- desdeo/api/routers/problem.py +69 -56
- desdeo/api/routers/reference_point_method.py +29 -27
- desdeo/api/routers/session.py +15 -11
- desdeo/api/routers/user_authentication.py +27 -5
- desdeo/api/routers/utils.py +42 -37
- desdeo/api/routers/utopia.py +11 -12
- desdeo/api/tests/test_routes.py +6 -5
- desdeo/emo/__init__.py +2 -0
- desdeo/emo/operators/__init__.py +1 -1
- desdeo/emo/operators/generator.py +153 -2
- desdeo/emo/options/__init__.py +4 -0
- desdeo/emo/options/generator.py +24 -0
- desdeo/problem/__init__.py +12 -11
- desdeo/problem/evaluator.py +4 -5
- desdeo/problem/gurobipy_evaluator.py +37 -12
- desdeo/problem/infix_parser.py +1 -16
- desdeo/problem/json_parser.py +7 -11
- desdeo/problem/schema.py +6 -9
- desdeo/problem/utils.py +1 -1
- desdeo/tools/pyomo_solver_interfaces.py +1 -1
- {desdeo-2.1.0.dist-info → desdeo-2.2.0.dist-info}/METADATA +21 -12
- {desdeo-2.1.0.dist-info → desdeo-2.2.0.dist-info}/RECORD +28 -28
- {desdeo-2.1.0.dist-info → desdeo-2.2.0.dist-info}/WHEEL +1 -1
- {desdeo-2.1.0.dist-info → desdeo-2.2.0.dist-info}/licenses/LICENSE +0 -0
desdeo/api/routers/utopia.py
CHANGED
|
@@ -4,9 +4,8 @@ import json
|
|
|
4
4
|
from typing import Annotated
|
|
5
5
|
|
|
6
6
|
from fastapi import APIRouter, Depends
|
|
7
|
-
from sqlmodel import
|
|
7
|
+
from sqlmodel import select
|
|
8
8
|
|
|
9
|
-
from desdeo.api.db import get_session
|
|
10
9
|
from desdeo.api.models import (
|
|
11
10
|
ForestProblemMetaData,
|
|
12
11
|
NIMBUSFinalState,
|
|
@@ -14,33 +13,33 @@ from desdeo.api.models import (
|
|
|
14
13
|
NIMBUSSaveState,
|
|
15
14
|
ProblemMetaDataDB,
|
|
16
15
|
StateDB,
|
|
17
|
-
User,
|
|
18
16
|
UtopiaRequest,
|
|
19
17
|
UtopiaResponse,
|
|
20
18
|
)
|
|
21
|
-
from desdeo.api.routers.
|
|
19
|
+
from desdeo.api.routers.utils import SessionContext, get_session_context
|
|
22
20
|
|
|
23
21
|
router = APIRouter(prefix="/utopia")
|
|
24
22
|
|
|
25
23
|
|
|
26
24
|
@router.post("/")
|
|
27
|
-
def get_utopia_data(
|
|
25
|
+
def get_utopia_data( # noqa: C901
|
|
28
26
|
request: UtopiaRequest,
|
|
29
|
-
|
|
30
|
-
session: Annotated[Session, Depends(get_session)],
|
|
27
|
+
context: Annotated[SessionContext, Depends(get_session_context)],
|
|
31
28
|
) -> UtopiaResponse:
|
|
32
29
|
"""Request and receive the Utopia map corresponding to the decision variables sent.
|
|
33
30
|
|
|
34
31
|
Args:
|
|
35
32
|
request (UtopiaRequest): the set of decision variables and problem for which the utopia forest map is requested
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
33
|
+
for.
|
|
34
|
+
context (Annotated[SessionContext, Depends(get_session_context)]): the current session context
|
|
35
|
+
|
|
39
36
|
Raises:
|
|
40
37
|
HTTPException:
|
|
41
38
|
Returns:
|
|
42
39
|
UtopiaResponse: the map for the forest, to be rendered in frontend
|
|
43
40
|
"""
|
|
41
|
+
session = context.db_session
|
|
42
|
+
|
|
44
43
|
empty_response = UtopiaResponse(is_utopia=False, map_name="", map_json={}, options={}, description="", years=[])
|
|
45
44
|
|
|
46
45
|
state = session.exec(select(StateDB).where(StateDB.id == request.solution.state_id)).first()
|
|
@@ -105,9 +104,9 @@ def get_utopia_data(
|
|
|
105
104
|
# The dict keys get converted to ints to strings when it's loaded from database
|
|
106
105
|
try:
|
|
107
106
|
treatments = forest_metadata.schedule_dict[key][str(decision_variables[key].index(1))]
|
|
108
|
-
except ValueError
|
|
107
|
+
except ValueError:
|
|
109
108
|
# if the optimization didn't choose any decision alternative, it's safe to assume
|
|
110
|
-
#
|
|
109
|
+
# that nothing is being done at that forest stand
|
|
111
110
|
treatments = forest_metadata.schedule_dict[key]["0"]
|
|
112
111
|
# print(e)
|
|
113
112
|
treatments_dict[key] = {forest_metadata.years[0]: 0, forest_metadata.years[1]: 0, forest_metadata.years[2]: 0}
|
desdeo/api/tests/test_routes.py
CHANGED
|
@@ -110,6 +110,7 @@ def test_refresh(client: TestClient):
|
|
|
110
110
|
response_refresh = client.post("/refresh")
|
|
111
111
|
|
|
112
112
|
assert "access_token" in response_refresh.json()
|
|
113
|
+
assert "access_token" in response_refresh.cookies
|
|
113
114
|
|
|
114
115
|
assert response_good.json()["access_token"] != response_refresh.json()["access_token"]
|
|
115
116
|
|
|
@@ -649,7 +650,7 @@ def test_nimbus_save_and_delete_save(client: TestClient):
|
|
|
649
650
|
assert len(solve_result.saved_solutions) > 0
|
|
650
651
|
|
|
651
652
|
# 4. Delete save
|
|
652
|
-
request: NIMBUSDeleteSaveRequest = NIMBUSDeleteSaveRequest(state_id=2, solution_index=1)
|
|
653
|
+
request: NIMBUSDeleteSaveRequest = NIMBUSDeleteSaveRequest(state_id=2, solution_index=1, problem_id=1)
|
|
653
654
|
response = post_json(client, "/method/nimbus/delete_save", request.model_dump(), access_token)
|
|
654
655
|
delete_save_result: NIMBUSDeleteSaveResponse = NIMBUSDeleteSaveResponse.model_validate(json.loads(response.content))
|
|
655
656
|
|
|
@@ -930,9 +931,9 @@ def test_preferred_solver(client: TestClient):
|
|
|
930
931
|
response = post_json(client, "/method/nimbus/initialize", request.model_dump(), access_token)
|
|
931
932
|
model = NIMBUSInitializationResponse.model_validate(response.json())
|
|
932
933
|
except Exception as e:
|
|
933
|
-
print(e)
|
|
934
|
-
print("^ This outcome is expected since pyomo_cbc doesn't support nonlinear problems.")
|
|
935
|
-
print(" As that solver is what we set it to be in the start, we can verify that they actually get used.")
|
|
934
|
+
print(e) # noqa: T201
|
|
935
|
+
print("^ This outcome is expected since pyomo_cbc doesn't support nonlinear problems.") # noqa: T201
|
|
936
|
+
print(" As that solver is what we set it to be in the start, we can verify that they actually get used.") # noqa: T201
|
|
936
937
|
|
|
937
938
|
|
|
938
939
|
def test_get_available_solvers(client: TestClient):
|
|
@@ -1027,7 +1028,7 @@ def test_gdm_score_bands(client: TestClient):
|
|
|
1027
1028
|
response = post_json(client=client, endpoint="/gdm/add_to_group", json=req, access_token=access_token)
|
|
1028
1029
|
assert response.status_code == 200
|
|
1029
1030
|
|
|
1030
|
-
access_token = login(client=client, username="dm", password="dm")
|
|
1031
|
+
access_token = login(client=client, username="dm", password="dm") # noqa: S106
|
|
1031
1032
|
|
|
1032
1033
|
# Now we have a group, so let's get on with making stuff with gdm score bands.
|
|
1033
1034
|
req = GDMScoreBandsInitializationRequest(
|
desdeo/emo/__init__.py
CHANGED
|
@@ -42,6 +42,7 @@ from .options.generator import (
|
|
|
42
42
|
RandomGeneratorOptions,
|
|
43
43
|
RandomIntegerGeneratorOptions,
|
|
44
44
|
RandomMixedIntegerGeneratorOptions,
|
|
45
|
+
SeededHybridGeneratorOptions,
|
|
45
46
|
)
|
|
46
47
|
from .options.mutation import (
|
|
47
48
|
BinaryFlipMutationOptions,
|
|
@@ -131,6 +132,7 @@ generator = SimpleNamespace(
|
|
|
131
132
|
RandomGeneratorOptions=RandomGeneratorOptions,
|
|
132
133
|
RandomIntegerGeneratorOptions=RandomIntegerGeneratorOptions,
|
|
133
134
|
RandomMixedIntegerGeneratorOptions=RandomMixedIntegerGeneratorOptions,
|
|
135
|
+
SeededHybridGeneratorOptions=SeededHybridGeneratorOptions,
|
|
134
136
|
)
|
|
135
137
|
|
|
136
138
|
templates = SimpleNamespace(
|
desdeo/emo/operators/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
|
|
1
|
+
"""Exports of the 'operators' module."""
|
|
@@ -383,7 +383,7 @@ class ArchiveGenerator(BaseGenerator):
|
|
|
383
383
|
publisher: Publisher,
|
|
384
384
|
verbosity: int,
|
|
385
385
|
solutions: pl.DataFrame,
|
|
386
|
-
**kwargs, # just to dump seed
|
|
386
|
+
**kwargs: dict, # just to dump seed
|
|
387
387
|
):
|
|
388
388
|
"""Initialize the ArchiveGenerator class.
|
|
389
389
|
|
|
@@ -395,10 +395,11 @@ class ArchiveGenerator(BaseGenerator):
|
|
|
395
395
|
verbosity (int): The verbosity level of the generator. A verbosity of 2 is needed if you want to maintain
|
|
396
396
|
an external archive. Otherwise, a verbosity of 1 is sufficient.
|
|
397
397
|
solutions (pl.DataFrame): The decision variable vectors to use as the initial population.
|
|
398
|
+
kwargs (dict): Other keyword arguments to pass, e.g., a random seed.
|
|
398
399
|
"""
|
|
399
400
|
super().__init__(problem, verbosity=verbosity, publisher=publisher)
|
|
400
401
|
if not isinstance(solutions, pl.DataFrame):
|
|
401
|
-
raise
|
|
402
|
+
raise TypeError("The solutions must be a polars DataFrame.")
|
|
402
403
|
if solutions.shape[0] == 0:
|
|
403
404
|
raise ValueError("The solutions DataFrame is empty.")
|
|
404
405
|
self.solutions = solutions
|
|
@@ -457,3 +458,153 @@ class ArchiveGenerator(BaseGenerator):
|
|
|
457
458
|
|
|
458
459
|
def update(self, message) -> None:
|
|
459
460
|
"""Update the generator based on the message."""
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
class SeededHybridGenerator(BaseGenerator):
|
|
464
|
+
"""Generates an initial population using a mix of seeded, perturbed, and random solutions."""
|
|
465
|
+
|
|
466
|
+
def __init__(
|
|
467
|
+
self,
|
|
468
|
+
problem,
|
|
469
|
+
evaluator,
|
|
470
|
+
publisher,
|
|
471
|
+
verbosity,
|
|
472
|
+
seed: int,
|
|
473
|
+
n_points: int,
|
|
474
|
+
seed_solution: pl.DataFrame,
|
|
475
|
+
perturb_fraction: float = 0.2,
|
|
476
|
+
sigma: float = 0.02,
|
|
477
|
+
flip_prob: float = 0.1,
|
|
478
|
+
):
|
|
479
|
+
"""Initialize the seeded hybrid generator.
|
|
480
|
+
|
|
481
|
+
The generator always includes the provided seed solution in the initial
|
|
482
|
+
population, fills a fraction of the population with small perturbations
|
|
483
|
+
around the seed, and fills the remainder with randomly generated solutions.
|
|
484
|
+
|
|
485
|
+
Args:
|
|
486
|
+
problem (Problem): The optimization problem.
|
|
487
|
+
evaluator (EMOEvaluator): Evaluator used to compute objectives and constraints.
|
|
488
|
+
publisher (Publisher): Publisher used for emitting generator messages.
|
|
489
|
+
verbosity (int): Verbosity level of the generator.
|
|
490
|
+
seed (int): Seed used for random number generation.
|
|
491
|
+
n_points (int): Total size of the initial population.
|
|
492
|
+
seed_solution (pl.DataFrame): A single-row DataFrame containing a seed
|
|
493
|
+
decision variable vector.
|
|
494
|
+
perturb_fraction (float, optional): Fraction of the population generated
|
|
495
|
+
by perturbing the seed solution. Defaults to 0.2.
|
|
496
|
+
sigma (float, optional): Relative perturbation scale with respect to
|
|
497
|
+
variable ranges. Defaults to 0.02.
|
|
498
|
+
flip_prob (float, optional): Probability of flipping a binary variable
|
|
499
|
+
when perturbing the seed. Defaults to 0.1.
|
|
500
|
+
|
|
501
|
+
Raises:
|
|
502
|
+
TypeError: If ``seed_solution`` is not a polars DataFrame.
|
|
503
|
+
ValueError: If ``seed_solution`` does not contain exactly one row.
|
|
504
|
+
ValueError: If ``seed_solution`` columns do not match problem variables.
|
|
505
|
+
ValueError: If ``n_points`` is not positive.
|
|
506
|
+
ValueError: If ``perturb_fraction`` is outside ``[0, 1]``.
|
|
507
|
+
ValueError: If ``sigma`` is negative.
|
|
508
|
+
ValueError: If ``flip_prob`` is outside ``[0, 1]``.
|
|
509
|
+
"""
|
|
510
|
+
super().__init__(problem, verbosity=verbosity, publisher=publisher)
|
|
511
|
+
|
|
512
|
+
if not isinstance(seed_solution, pl.DataFrame):
|
|
513
|
+
raise TypeError("seed_solution must be a polars DataFrame.")
|
|
514
|
+
if seed_solution.shape[0] != 1:
|
|
515
|
+
raise ValueError("seed_solution must have exactly one row.")
|
|
516
|
+
if set(seed_solution.columns) != set(self.variable_symbols):
|
|
517
|
+
raise ValueError("seed_solution columns must match problem variables.")
|
|
518
|
+
|
|
519
|
+
if n_points <= 0:
|
|
520
|
+
raise ValueError("n_points must be > 0.")
|
|
521
|
+
if not (0.0 <= perturb_fraction <= 1.0):
|
|
522
|
+
raise ValueError("perturb_fraction must be in [0, 1].")
|
|
523
|
+
if sigma < 0:
|
|
524
|
+
raise ValueError("sigma must be >= 0.")
|
|
525
|
+
if not (0.0 <= flip_prob <= 1.0):
|
|
526
|
+
raise ValueError("flip_prob must be in [0, 1].")
|
|
527
|
+
|
|
528
|
+
self.n_points = n_points
|
|
529
|
+
self.seed_solution = seed_solution
|
|
530
|
+
self.perturb_fraction = perturb_fraction
|
|
531
|
+
self.sigma = sigma
|
|
532
|
+
self.flip_prob = flip_prob
|
|
533
|
+
|
|
534
|
+
self.evaluator = evaluator
|
|
535
|
+
self.seed = seed
|
|
536
|
+
self.rng = np.random.default_rng(self.seed)
|
|
537
|
+
|
|
538
|
+
self.population = None
|
|
539
|
+
self.out = None
|
|
540
|
+
|
|
541
|
+
def _random_population(self, n: int) -> pl.DataFrame:
|
|
542
|
+
tmp = {}
|
|
543
|
+
for var in self.problem.variables:
|
|
544
|
+
if var.variable_type in [VariableTypeEnum.binary, VariableTypeEnum.integer]:
|
|
545
|
+
vals = self.rng.integers(var.lowerbound, var.upperbound, size=n, endpoint=True).astype(float)
|
|
546
|
+
else:
|
|
547
|
+
vals = self.rng.uniform(var.lowerbound, var.upperbound, size=n).astype(float)
|
|
548
|
+
tmp[var.symbol] = vals
|
|
549
|
+
return pl.DataFrame(tmp)
|
|
550
|
+
|
|
551
|
+
def _perturb_seed(self, n: int) -> pl.DataFrame:
|
|
552
|
+
# includes the exact seed as first row
|
|
553
|
+
seed_row = self.seed_solution.select(self.variable_symbols).to_dict(as_series=False)
|
|
554
|
+
seed_vals = {k: float(v[0]) for k, v in seed_row.items()}
|
|
555
|
+
|
|
556
|
+
rows = [seed_vals] # ensure seed present
|
|
557
|
+
if n <= 1:
|
|
558
|
+
return pl.DataFrame(rows)
|
|
559
|
+
|
|
560
|
+
for _ in range(n - 1):
|
|
561
|
+
x = {}
|
|
562
|
+
for var in self.problem.variables:
|
|
563
|
+
lb, ub = float(var.lowerbound), float(var.upperbound)
|
|
564
|
+
r = ub - lb
|
|
565
|
+
|
|
566
|
+
v0 = seed_vals[var.symbol]
|
|
567
|
+
|
|
568
|
+
if var.variable_type == VariableTypeEnum.binary:
|
|
569
|
+
v = 1.0 - v0 if self.rng.random() < self.flip_prob else v0
|
|
570
|
+
elif var.variable_type == VariableTypeEnum.integer:
|
|
571
|
+
# scales integer nose
|
|
572
|
+
step = max(1, round(self.sigma * r)) if r >= 1 else 0
|
|
573
|
+
dv = self.rng.integers(-step, step + 1) if step > 0 else 0
|
|
574
|
+
v = float(int(np.clip(round(v0 + dv), lb, ub)))
|
|
575
|
+
else:
|
|
576
|
+
# continuous noise is proportional to range
|
|
577
|
+
dv = self.rng.normal(0.0, self.sigma * r if r > 0 else 0.0)
|
|
578
|
+
v = float(np.clip(v0 + dv, lb, ub))
|
|
579
|
+
|
|
580
|
+
x[var.symbol] = v
|
|
581
|
+
rows.append(x)
|
|
582
|
+
|
|
583
|
+
return pl.DataFrame(rows)
|
|
584
|
+
|
|
585
|
+
def do(self) -> tuple[pl.DataFrame, pl.DataFrame]:
|
|
586
|
+
"""Generate a population.
|
|
587
|
+
|
|
588
|
+
Returns:
|
|
589
|
+
tuple[pl.DataFrame, pl.DataFrame]: the population.
|
|
590
|
+
"""
|
|
591
|
+
if self.population is not None and self.out is not None:
|
|
592
|
+
self.notify()
|
|
593
|
+
return self.population, self.out
|
|
594
|
+
|
|
595
|
+
n_pert = max(1, round(self.perturb_fraction * self.n_points))
|
|
596
|
+
n_pert = min(n_pert, self.n_points)
|
|
597
|
+
n_rand = self.n_points - n_pert
|
|
598
|
+
|
|
599
|
+
pert = self._perturb_seed(n_pert)
|
|
600
|
+
rand = self._random_population(n_rand) if n_rand > 0 else pl.DataFrame({s: [] for s in self.variable_symbols})
|
|
601
|
+
|
|
602
|
+
self.population = pl.concat([pert, rand], how="vertical")
|
|
603
|
+
|
|
604
|
+
self.out = self.evaluator.evaluate(self.population)
|
|
605
|
+
self.notify()
|
|
606
|
+
|
|
607
|
+
return self.population, self.out
|
|
608
|
+
|
|
609
|
+
def update(self, message) -> None:
|
|
610
|
+
"""Update the generator based on the message."""
|
desdeo/emo/options/__init__.py
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
"""Exports of the 'options' module."""
|
|
2
|
+
|
|
1
3
|
from .crossover import (
|
|
2
4
|
BlendAlphaCrossoverOptions,
|
|
3
5
|
BoundedExponentialCrossoverOptions,
|
|
@@ -17,6 +19,7 @@ from .generator import (
|
|
|
17
19
|
RandomGeneratorOptions,
|
|
18
20
|
RandomIntegerGeneratorOptions,
|
|
19
21
|
RandomMixedIntegerGeneratorOptions,
|
|
22
|
+
SeededHybridGeneratorOptions,
|
|
20
23
|
generator_constructor,
|
|
21
24
|
)
|
|
22
25
|
from .mutation import (
|
|
@@ -105,4 +108,5 @@ __all__ = [ # noqa: RUF022
|
|
|
105
108
|
"selection_constructor",
|
|
106
109
|
"RepairOptions",
|
|
107
110
|
"repair_constructor",
|
|
111
|
+
"SeededHybridGeneratorOptions",
|
|
108
112
|
]
|
desdeo/emo/options/generator.py
CHANGED
|
@@ -16,6 +16,7 @@ from desdeo.emo.operators.generator import (
|
|
|
16
16
|
RandomGenerator,
|
|
17
17
|
RandomIntegerGenerator,
|
|
18
18
|
RandomMixedIntegerGenerator,
|
|
19
|
+
SeededHybridGenerator,
|
|
19
20
|
)
|
|
20
21
|
|
|
21
22
|
if TYPE_CHECKING:
|
|
@@ -85,6 +86,27 @@ class ArchiveGeneratorOptions(BaseModel):
|
|
|
85
86
|
outputs: pl.DataFrame
|
|
86
87
|
"""The corresponding outputs of the initial solutions."""
|
|
87
88
|
|
|
89
|
+
|
|
90
|
+
class SeededHybridGeneratorOptions(BaseGeneratorOptions):
|
|
91
|
+
"""Options for the seeded hybrid generator."""
|
|
92
|
+
|
|
93
|
+
name: Literal["SeededHybridGenerator"] = Field(default="SeededHybridGenerator", frozen=True)
|
|
94
|
+
model_config = {"arbitrary_types_allowed": True, "use_attribute_docstrings": True}
|
|
95
|
+
|
|
96
|
+
seed_solution: pl.DataFrame
|
|
97
|
+
"""A dataframe with a single row representing the solution seed. The columns
|
|
98
|
+
must math the symbols of the variables in the problem being solved.
|
|
99
|
+
"""
|
|
100
|
+
perturb_fraction: float = Field(default=0.2, ge=0.0, le=1.0)
|
|
101
|
+
"""The desired fraction of perturbed vs random solutions in the generated population."""
|
|
102
|
+
|
|
103
|
+
sigma: float = Field(default=0.02, ge=0.0)
|
|
104
|
+
"""The relative perturbation scale with respect to variable ranges."""
|
|
105
|
+
|
|
106
|
+
flip_prob: float = Field(default=0.1, ge=0.0, le=1.0)
|
|
107
|
+
"""The flipping probability when perturbing binary variables."""
|
|
108
|
+
|
|
109
|
+
|
|
88
110
|
GeneratorOptions = (
|
|
89
111
|
LHSGeneratorOptions
|
|
90
112
|
| RandomBinaryGeneratorOptions
|
|
@@ -92,6 +114,7 @@ GeneratorOptions = (
|
|
|
92
114
|
| RandomIntegerGeneratorOptions
|
|
93
115
|
| RandomMixedIntegerGeneratorOptions
|
|
94
116
|
| ArchiveGeneratorOptions
|
|
117
|
+
| SeededHybridGeneratorOptions
|
|
95
118
|
)
|
|
96
119
|
|
|
97
120
|
|
|
@@ -123,6 +146,7 @@ def generator_constructor(
|
|
|
123
146
|
"RandomIntegerGenerator": RandomIntegerGenerator,
|
|
124
147
|
"RandomMixedIntegerGenerator": RandomMixedIntegerGenerator,
|
|
125
148
|
"ArchiveGenerator": ArchiveGenerator,
|
|
149
|
+
"SeededHybridGenerator": SeededHybridGenerator,
|
|
126
150
|
}
|
|
127
151
|
options: dict = options.model_dump()
|
|
128
152
|
name = options.pop("name")
|
desdeo/problem/__init__.py
CHANGED
|
@@ -7,35 +7,36 @@ __all__ = [
|
|
|
7
7
|
"DiscreteRepresentation",
|
|
8
8
|
"Evaluator",
|
|
9
9
|
"ExtraFunction",
|
|
10
|
-
"flatten_variable_dict",
|
|
11
10
|
"FormatEnum",
|
|
12
11
|
"GurobipyEvaluator",
|
|
13
|
-
"get_nadir_dict",
|
|
14
|
-
"get_ideal_dict",
|
|
15
12
|
"InfixExpressionParser",
|
|
16
13
|
"MathParser",
|
|
17
|
-
"numpy_array_to_objective_dict",
|
|
18
|
-
"objective_dict_to_numpy_array",
|
|
19
14
|
"Objective",
|
|
20
15
|
"ObjectiveTypeEnum",
|
|
21
|
-
"Problem",
|
|
22
|
-
"PyomoEvaluator",
|
|
23
|
-
"SympyEvaluator",
|
|
24
|
-
"tensor_constant_from_dataframe",
|
|
25
16
|
"PolarsEvaluator",
|
|
26
17
|
"PolarsEvaluatorModesEnum",
|
|
18
|
+
"Problem",
|
|
19
|
+
"PyomoEvaluator",
|
|
27
20
|
"ScalarizationFunction",
|
|
28
21
|
"Simulator",
|
|
22
|
+
"SimulatorEvaluator",
|
|
23
|
+
"SympyEvaluator",
|
|
24
|
+
"Tensor",
|
|
29
25
|
"TensorConstant",
|
|
30
26
|
"TensorVariable",
|
|
31
27
|
"Url",
|
|
32
|
-
"unflatten_variable_array",
|
|
33
28
|
"Variable",
|
|
34
29
|
"VariableDimensionEnum",
|
|
35
30
|
"VariableDomainTypeEnum",
|
|
36
31
|
"VariableType",
|
|
37
|
-
"Tensor",
|
|
38
32
|
"VariableTypeEnum",
|
|
33
|
+
"flatten_variable_dict",
|
|
34
|
+
"get_ideal_dict",
|
|
35
|
+
"get_nadir_dict",
|
|
36
|
+
"numpy_array_to_objective_dict",
|
|
37
|
+
"objective_dict_to_numpy_array",
|
|
38
|
+
"tensor_constant_from_dataframe",
|
|
39
|
+
"unflatten_variable_array",
|
|
39
40
|
"variable_dimension_enumerate",
|
|
40
41
|
]
|
|
41
42
|
|
desdeo/problem/evaluator.py
CHANGED
|
@@ -33,7 +33,7 @@ class PolarsEvaluatorModesEnum(str, Enum):
|
|
|
33
33
|
mixed = "mixed"
|
|
34
34
|
"""Indicates that the problem has analytical and simulator and/or surrogate
|
|
35
35
|
based objectives, constraints and extra functions. In this mode, the evaluator
|
|
36
|
-
only handles data-based and analytical functions. For data-
|
|
36
|
+
only handles data-based and analytical functions. For data-based objectives,
|
|
37
37
|
it assumes that the variables are to be evaluated by finding the closest
|
|
38
38
|
variables values in the data compare to the input, and evaluating the result
|
|
39
39
|
to be the matching objective function values that match to the closest
|
|
@@ -74,7 +74,7 @@ def variable_dimension_enumerate(problem: Problem) -> VariableDimensionEnum:
|
|
|
74
74
|
enum = VariableDimensionEnum.scalar
|
|
75
75
|
for var in problem.variables:
|
|
76
76
|
if isinstance(var, TensorVariable):
|
|
77
|
-
if len(var.shape) == 1 or len(var.shape) == 2 and not (var.shape[0] > 1 and var.shape[1] > 1): # noqa: PLR2004
|
|
77
|
+
if len(var.shape) == 1 or (len(var.shape) == 2 and not (var.shape[0] > 1 and var.shape[1] > 1)): # noqa: PLR2004
|
|
78
78
|
enum = VariableDimensionEnum.vector
|
|
79
79
|
else:
|
|
80
80
|
return VariableDimensionEnum.tensor
|
|
@@ -187,7 +187,7 @@ class PolarsEvaluator:
|
|
|
187
187
|
f"Provided 'evaluator_mode' {evaluator_mode} not supported. Must be one of {PolarsEvaluatorModesEnum}."
|
|
188
188
|
)
|
|
189
189
|
|
|
190
|
-
def _polars_init(self): # noqa: C901
|
|
190
|
+
def _polars_init(self): # noqa: C901
|
|
191
191
|
"""Initialization of the evaluator for parser type 'polars'."""
|
|
192
192
|
# If any constants are defined in problem, replace their symbol with the defined numerical
|
|
193
193
|
# value in all the function expressions found in the Problem.
|
|
@@ -211,8 +211,7 @@ class PolarsEvaluator:
|
|
|
211
211
|
parsed_obj_funcs[f"{obj.symbol}"] = None
|
|
212
212
|
else:
|
|
213
213
|
msg = (
|
|
214
|
-
f"Incorrect objective-type {obj.objective_type} encountered. "
|
|
215
|
-
f"Must be one of {ObjectiveTypeEnum}"
|
|
214
|
+
f"Incorrect objective-type {obj.objective_type} encountered. Must be one of {ObjectiveTypeEnum}"
|
|
216
215
|
)
|
|
217
216
|
raise PolarsEvaluatorError(msg)
|
|
218
217
|
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
"""Defines an evaluator compatible with the Problem JSON format and transforms it into a GurobipyModel."""
|
|
2
2
|
|
|
3
|
+
import warnings
|
|
3
4
|
from operator import eq as _eq
|
|
4
5
|
from operator import le as _le
|
|
5
|
-
import warnings
|
|
6
6
|
|
|
7
7
|
import gurobipy as gp
|
|
8
8
|
import numpy as np
|
|
@@ -18,8 +18,7 @@ from desdeo.problem.schema import (
|
|
|
18
18
|
TensorConstant,
|
|
19
19
|
TensorVariable,
|
|
20
20
|
Variable,
|
|
21
|
-
VariableTypeEnum
|
|
22
|
-
|
|
21
|
+
VariableTypeEnum,
|
|
23
22
|
)
|
|
24
23
|
|
|
25
24
|
|
|
@@ -125,8 +124,16 @@ class GurobipyEvaluator:
|
|
|
125
124
|
|
|
126
125
|
elif isinstance(var, TensorVariable):
|
|
127
126
|
# handle tensor variables, i.e., vectors etc..
|
|
128
|
-
lowerbounds =
|
|
129
|
-
|
|
127
|
+
lowerbounds = (
|
|
128
|
+
var.get_lowerbound_values()
|
|
129
|
+
if var.lowerbounds is not None
|
|
130
|
+
else np.full(var.shape, float("-inf")).tolist()
|
|
131
|
+
)
|
|
132
|
+
upperbounds = (
|
|
133
|
+
var.get_upperbound_values()
|
|
134
|
+
if var.upperbounds is not None
|
|
135
|
+
else np.full(var.shape, float("inf")).tolist()
|
|
136
|
+
)
|
|
130
137
|
|
|
131
138
|
# figure out the variable type
|
|
132
139
|
match var.variable_type:
|
|
@@ -143,13 +150,18 @@ class GurobipyEvaluator:
|
|
|
143
150
|
raise GurobipyEvaluatorError(msg)
|
|
144
151
|
|
|
145
152
|
# add the variable to the model
|
|
146
|
-
gvar = self.model.addMVar(
|
|
153
|
+
gvar = self.model.addMVar(
|
|
154
|
+
shape=tuple(var.shape),
|
|
155
|
+
lb=np.array(lowerbounds),
|
|
156
|
+
ub=np.array(upperbounds),
|
|
157
|
+
vtype=domain,
|
|
158
|
+
name=var.symbol,
|
|
159
|
+
)
|
|
147
160
|
# set the initial value, if one has been defined
|
|
148
161
|
if var.initial_values is not None:
|
|
149
162
|
gvar.setAttr("Start", np.array(var.get_initial_values()))
|
|
150
163
|
self.mvars[var.symbol] = gvar
|
|
151
164
|
|
|
152
|
-
|
|
153
165
|
# update the model before returning, so that other expressions can reference the variables
|
|
154
166
|
self.model.update()
|
|
155
167
|
|
|
@@ -413,8 +425,16 @@ class GurobipyEvaluator:
|
|
|
413
425
|
gvar.setAttr("Start", var.initial_value)
|
|
414
426
|
elif isinstance(var, TensorVariable):
|
|
415
427
|
# handle tensor variables, i.e., vectors etc..
|
|
416
|
-
lowerbounds =
|
|
417
|
-
|
|
428
|
+
lowerbounds = (
|
|
429
|
+
var.get_lowerbound_values()
|
|
430
|
+
if var.lowerbounds is not None
|
|
431
|
+
else np.full(var.shape, float("-inf")).tolist()
|
|
432
|
+
)
|
|
433
|
+
upperbounds = (
|
|
434
|
+
var.get_upperbound_values()
|
|
435
|
+
if var.upperbounds is not None
|
|
436
|
+
else np.full(var.shape, float("inf")).tolist()
|
|
437
|
+
)
|
|
418
438
|
|
|
419
439
|
# figure out the variable type
|
|
420
440
|
match var.variable_type:
|
|
@@ -431,7 +451,13 @@ class GurobipyEvaluator:
|
|
|
431
451
|
raise GurobipyEvaluatorError(msg)
|
|
432
452
|
|
|
433
453
|
# add the variable to the model
|
|
434
|
-
gvar = self.model.addMVar(
|
|
454
|
+
gvar = self.model.addMVar(
|
|
455
|
+
shape=tuple(var.shape),
|
|
456
|
+
lb=np.array(lowerbounds),
|
|
457
|
+
ub=np.array(upperbounds),
|
|
458
|
+
vtype=domain,
|
|
459
|
+
name=var.symbol,
|
|
460
|
+
)
|
|
435
461
|
# set the initial value, if one has been defined
|
|
436
462
|
if var.initial_values is not None:
|
|
437
463
|
gvar.setAttr("Start", np.array(var.get_initial_values()))
|
|
@@ -470,8 +496,7 @@ class GurobipyEvaluator:
|
|
|
470
496
|
expression = self.constants[name]
|
|
471
497
|
return expression
|
|
472
498
|
|
|
473
|
-
|
|
474
|
-
def get_values(self) -> dict[str, float | int | bool | list[float] | list[int]]: # noqa: C901
|
|
499
|
+
def get_values(self) -> dict[str, float | int | bool | list[float] | list[int]]:
|
|
475
500
|
"""Get the values from the Gurobipy Model in a dict.
|
|
476
501
|
|
|
477
502
|
The keys of the dict will be the symbols defined in the problem utilized to initialize the evaluator.
|
desdeo/problem/infix_parser.py
CHANGED
|
@@ -193,7 +193,7 @@ class InfixExpressionParser:
|
|
|
193
193
|
def _is_number_or_variable(self, c):
|
|
194
194
|
return isinstance(c, int | float) or (isinstance(c, str) and c not in self.reserved_symbols)
|
|
195
195
|
|
|
196
|
-
def _to_math_json(self, parsed: list | str) -> list:
|
|
196
|
+
def _to_math_json(self, parsed: list | str) -> list:
|
|
197
197
|
"""Converts a list of expressions into a MathJSON compliant format.
|
|
198
198
|
|
|
199
199
|
The conversion happens recursively. Each list of recursed until a terminal character is reached.
|
|
@@ -324,18 +324,3 @@ class InfixExpressionParser:
|
|
|
324
324
|
# simple expressions, like 'x_1', are parsed into just a string after removing any extra
|
|
325
325
|
# brackets, so we add them back there in case it is needed
|
|
326
326
|
return expr if isinstance(expr, list) else [expr]
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
if __name__ == "__main__":
|
|
330
|
-
# Parse and convert
|
|
331
|
-
test = "(x_1 - c_2) ** 2 + x_2 ** 2 - 25"
|
|
332
|
-
|
|
333
|
-
ohh_no = "['Add', ['Negate', ['Square', ['Subtract', 'x_1', 8]]], ['Negate', ['Square', ['Add', 'x_2', 3]]], 7.7]"
|
|
334
|
-
|
|
335
|
-
to_json_parser = InfixExpressionParser(target="MathJSON")
|
|
336
|
-
|
|
337
|
-
parsed_expression = to_json_parser.parse(test)
|
|
338
|
-
|
|
339
|
-
print(f"Expresion:\n{test}")
|
|
340
|
-
print(f"Parsed:\n{to_json_parser._pre_parse(test)}")
|
|
341
|
-
print(f"MathJSON:\n{parsed_expression}")
|
desdeo/problem/json_parser.py
CHANGED
|
@@ -35,7 +35,7 @@ class MathParser:
|
|
|
35
35
|
Currently only parses MathJSON to polars expressions. Pyomo WIP.
|
|
36
36
|
"""
|
|
37
37
|
|
|
38
|
-
def __init__(self, to_format: FormatEnum = "polars"):
|
|
38
|
+
def __init__(self, to_format: FormatEnum = "polars"): # noqa: C901
|
|
39
39
|
"""Create a parser instance for parsing MathJSON notation into polars expressions.
|
|
40
40
|
|
|
41
41
|
Args:
|
|
@@ -135,7 +135,7 @@ class MathParser:
|
|
|
135
135
|
acc = acc.to_numpy()
|
|
136
136
|
x = x.to_numpy()
|
|
137
137
|
|
|
138
|
-
if len(acc.shape) == 2 and len(x.shape) == 2:
|
|
138
|
+
if len(acc.shape) == 2 and len(x.shape) == 2: # noqa: PLR2004
|
|
139
139
|
# Row vectors, just return the dot product, polars does not handle
|
|
140
140
|
# "column" vectors anyway
|
|
141
141
|
return pl.Series(values=np.einsum("ij,ij->i", acc, x, optimize=True))
|
|
@@ -373,7 +373,7 @@ class MathParser:
|
|
|
373
373
|
hasattr(x, "is_indexed")
|
|
374
374
|
and x.is_indexed()
|
|
375
375
|
and x.dim() > 0
|
|
376
|
-
and (not hasattr(y, "is_indexed") or not y.is_indexed() or y.is_indexed() and y.dim() == 0)
|
|
376
|
+
and (not hasattr(y, "is_indexed") or not y.is_indexed() or (y.is_indexed() and y.dim() == 0))
|
|
377
377
|
):
|
|
378
378
|
# x is a tensor, y is scalar
|
|
379
379
|
expr = pyomo.Expression(
|
|
@@ -385,7 +385,7 @@ class MathParser:
|
|
|
385
385
|
hasattr(y, "is_indexed")
|
|
386
386
|
and y.is_indexed()
|
|
387
387
|
and y.dim() > 0
|
|
388
|
-
and (not hasattr(x, "is_indexed") or not x.is_indexed() or x.is_indexed() and x.dim() == 0)
|
|
388
|
+
and (not hasattr(x, "is_indexed") or not x.is_indexed() or (x.is_indexed() and x.dim() == 0))
|
|
389
389
|
):
|
|
390
390
|
# y is a tensor, x is scalar
|
|
391
391
|
expr = pyomo.Expression(
|
|
@@ -534,16 +534,13 @@ class MathParser:
|
|
|
534
534
|
def _sympy_matmul(*args):
|
|
535
535
|
"""Sympy matrix multiplication."""
|
|
536
536
|
msg = (
|
|
537
|
-
"Matrix multiplication '@' has not been implemented for the Sympy parser yet."
|
|
538
|
-
" Feel free to contribute!"
|
|
537
|
+
"Matrix multiplication '@' has not been implemented for the Sympy parser yet. Feel free to contribute!"
|
|
539
538
|
)
|
|
540
539
|
raise NotImplementedError(msg)
|
|
541
540
|
|
|
542
541
|
def _sympy_summation(summand):
|
|
543
542
|
"""Sympy matrix summation."""
|
|
544
|
-
msg =
|
|
545
|
-
"Matrix summation 'Sum' has not been implemented for the Sympy parser yet." " Feel free to contribute!"
|
|
546
|
-
)
|
|
543
|
+
msg = "Matrix summation 'Sum' has not been implemented for the Sympy parser yet. Feel free to contribute!"
|
|
547
544
|
raise NotImplementedError(msg)
|
|
548
545
|
|
|
549
546
|
def _sympy_random_access(*args):
|
|
@@ -627,8 +624,7 @@ class MathParser:
|
|
|
627
624
|
|
|
628
625
|
return _sum(summand)
|
|
629
626
|
msg = (
|
|
630
|
-
"Matrix summation 'Sum' has not been implemented for the Gurobipy parser yet."
|
|
631
|
-
" Feel free to contribute!"
|
|
627
|
+
"Matrix summation 'Sum' has not been implemented for the Gurobipy parser yet. Feel free to contribute!"
|
|
632
628
|
)
|
|
633
629
|
raise NotImplementedError(msg)
|
|
634
630
|
|