ai-nk-cce 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_nk_cce-0.1.0.dist-info/METADATA +118 -0
- ai_nk_cce-0.1.0.dist-info/RECORD +46 -0
- ai_nk_cce-0.1.0.dist-info/WHEEL +4 -0
- api/__init__.py +0 -0
- api/mpcdf_vllm.py +94 -0
- evals/nk_model.py +277 -0
- model/README.md +64 -0
- model/config/dataset_conv_v1.yml +9 -0
- model/config/dataset_conv_v2_m2.yml +9 -0
- model/config/dataset_conv_v3_m2_assembl_nearest.yml +9 -0
- model/config/dataset_debug.yml +9 -0
- model/config/dataset_v4_int_format.yml +9 -0
- model/config/dataset_v5.yml +9 -0
- model/config/inference.yml +7 -0
- model/config/train.yml +24 -0
- model/config/train_debug.yml +19 -0
- model/config/train_from_checkpoint.yml +24 -0
- model/config/train_from_checkpoint_debug.yml +19 -0
- model/config/train_grpo.yml +30 -0
- model/config/train_grpo_debug.yml +30 -0
- model/config/train_grpo_debug_vllm.yml +32 -0
- model/config.py +54 -0
- model/dataset.py +324 -0
- model/inference.py +51 -0
- model/nk_assistant.py +207 -0
- model/parser.py +70 -0
- model/run_slurm.py +335 -0
- model/score.ipynb +596 -0
- model/scripts/template.slurm +54 -0
- model/scripts/template_rl.slurm +54 -0
- model/train.py +293 -0
- nk_model/__init__.py +0 -0
- nk_model/assembler.py +112 -0
- nk_model/biased_prediction_agent.py +389 -0
- nk_model/dataset.py +434 -0
- nk_model/enums.py +21 -0
- nk_model/landscape_cache.py +149 -0
- nk_model/models.py +172 -0
- nk_model/nk_landscape.py +498 -0
- simulation/hill_climber_simulation.py +211 -0
- simulation/hill_climber_vs_ai_simulation.py +132 -0
- simulation/landscape_selection.py +179 -0
- utils/__init__.py +0 -0
- utils/binary_conversion.py +128 -0
- utils/logging.py +33 -0
- utils/utils.py +51 -0
nk_model/models.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Literal, Union
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
from pydantic import (
|
|
6
|
+
BaseModel,
|
|
7
|
+
Field,
|
|
8
|
+
NonNegativeInt,
|
|
9
|
+
PositiveFloat,
|
|
10
|
+
PositiveInt,
|
|
11
|
+
field_validator,
|
|
12
|
+
model_validator,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
from src.nk_model.enums import ConvolutionMethod, NeighborhoodMethod
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class NKParams(BaseModel):
|
|
19
|
+
n: PositiveInt
|
|
20
|
+
k: NonNegativeInt
|
|
21
|
+
power: PositiveFloat
|
|
22
|
+
max_val: PositiveFloat = Field(
|
|
23
|
+
default=1.0,
|
|
24
|
+
description="Maximum value for NK-landscape values. All values "
|
|
25
|
+
"will be scaled so that the maximum value equals this float. "
|
|
26
|
+
"Example: 1000.0 means the maximum value will be 1000.0.",
|
|
27
|
+
)
|
|
28
|
+
m: PositiveInt = Field(
|
|
29
|
+
description=(
|
|
30
|
+
"Number of convolutions to apply to the NK model landscape. "
|
|
31
|
+
"Default is m=n, which means no convolutions are applied."
|
|
32
|
+
),
|
|
33
|
+
)
|
|
34
|
+
neighborhood: NeighborhoodMethod = Field(
|
|
35
|
+
default=NeighborhoodMethod.RANDOM,
|
|
36
|
+
description=(
|
|
37
|
+
"Method to determine the nearest neighbor for a given node in "
|
|
38
|
+
"the NK model."
|
|
39
|
+
),
|
|
40
|
+
)
|
|
41
|
+
convolution: ConvolutionMethod = Field(
|
|
42
|
+
default=ConvolutionMethod.RANDOM,
|
|
43
|
+
description=(
|
|
44
|
+
"Method to determine the convolution method to apply to the NK "
|
|
45
|
+
"model landscape."
|
|
46
|
+
),
|
|
47
|
+
)
|
|
48
|
+
payoff_type: Literal["int", "float"] = Field(
|
|
49
|
+
description=(
|
|
50
|
+
"Type for payoff values. If 'int', all payoffs will be integers. "
|
|
51
|
+
"If 'float', all payoffs will be floats."
|
|
52
|
+
),
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
@field_validator("payoff_type", mode="before")
|
|
56
|
+
@classmethod
|
|
57
|
+
def validate_payoff_type(cls, v):
|
|
58
|
+
"""Convert type objects to strings and validate."""
|
|
59
|
+
# Allow both type objects and strings
|
|
60
|
+
if v == int or v == "int":
|
|
61
|
+
return "int"
|
|
62
|
+
elif v == float or v == "float":
|
|
63
|
+
return "float"
|
|
64
|
+
else:
|
|
65
|
+
raise ValueError(
|
|
66
|
+
"payoff_type must be int, float, 'int', or 'float'"
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
@property
|
|
70
|
+
def payoff_type_class(self) -> type[int] | type[float]:
|
|
71
|
+
"""Get the actual Python type for payoff_type."""
|
|
72
|
+
return int if self.payoff_type == "int" else float
|
|
73
|
+
|
|
74
|
+
@model_validator(mode="before")
|
|
75
|
+
@classmethod
|
|
76
|
+
def set_m_default(cls, data: dict) -> dict:
|
|
77
|
+
"""Set m to n if not provided or None."""
|
|
78
|
+
if isinstance(data, dict):
|
|
79
|
+
data = data.copy()
|
|
80
|
+
# Set m to n if m is missing or None, and n is present
|
|
81
|
+
if ("m" not in data or data.get("m") is None) and "n" in data:
|
|
82
|
+
data["m"] = data["n"]
|
|
83
|
+
return data
|
|
84
|
+
|
|
85
|
+
@model_validator(mode="after")
|
|
86
|
+
def validate_and_set_defaults(self) -> "NKParams":
|
|
87
|
+
"""Validate k and m."""
|
|
88
|
+
if self.k >= self.n:
|
|
89
|
+
raise ValueError("The value of k must be less than n.")
|
|
90
|
+
|
|
91
|
+
if self.m > self.n:
|
|
92
|
+
raise ValueError("The value of m must be less than or equal to n.")
|
|
93
|
+
return self
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class Item(BaseModel):
|
|
97
|
+
"""
|
|
98
|
+
Represents an item in an NK model landscape.
|
|
99
|
+
|
|
100
|
+
Attributes:
|
|
101
|
+
coordinates (np.ndarray):
|
|
102
|
+
Binary coordinates of the item (e.g., np.array([0, 1, 1, 0])).
|
|
103
|
+
payoff (Union[int, float]):
|
|
104
|
+
Payoff value. Type depends on NKParams.payoff_type.
|
|
105
|
+
"""
|
|
106
|
+
|
|
107
|
+
# Allows np.ndarray as attribute
|
|
108
|
+
model_config = {"arbitrary_types_allowed": True}
|
|
109
|
+
|
|
110
|
+
coordinates: np.ndarray
|
|
111
|
+
payoff: Union[int, float] = Field(..., ge=0)
|
|
112
|
+
|
|
113
|
+
@field_validator("coordinates")
|
|
114
|
+
def _validate_state(cls, coordinates: np.ndarray):
|
|
115
|
+
assert coordinates.ndim == 1, "Coordinates must be a 1D array."
|
|
116
|
+
assert all(
|
|
117
|
+
x in [0, 1] for x in coordinates
|
|
118
|
+
), "Coordinates must be binary."
|
|
119
|
+
return coordinates
|
|
120
|
+
|
|
121
|
+
def __lt__(self, other: "Item") -> bool:
|
|
122
|
+
"Compare two items based on their payoffs"
|
|
123
|
+
return self.payoff < other.payoff
|
|
124
|
+
|
|
125
|
+
def model_dump(self) -> dict:
|
|
126
|
+
"""
|
|
127
|
+
Return a dictionary of the Item,
|
|
128
|
+
because arrays cannot be serialized to JSON.
|
|
129
|
+
"""
|
|
130
|
+
return {
|
|
131
|
+
"coordinates": self.coordinates.tolist(),
|
|
132
|
+
"payoff": self.payoff,
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
class NKLandscapeCache(BaseModel):
|
|
137
|
+
"""
|
|
138
|
+
Pydantic model for caching NKLandscape data.
|
|
139
|
+
|
|
140
|
+
This model simplifies the serialization and deserialization
|
|
141
|
+
of cached landscape data by using Pydantic's built-in
|
|
142
|
+
serialization capabilities.
|
|
143
|
+
"""
|
|
144
|
+
|
|
145
|
+
model_config = {"arbitrary_types_allowed": True}
|
|
146
|
+
|
|
147
|
+
params: NKParams
|
|
148
|
+
items: list[Item]
|
|
149
|
+
|
|
150
|
+
def model_dump_json(self) -> str:
|
|
151
|
+
"""Serialize to JSON string."""
|
|
152
|
+
return json.dumps(
|
|
153
|
+
{
|
|
154
|
+
"params": self.params.model_dump(mode="json"),
|
|
155
|
+
"items": [item.model_dump() for item in self.items],
|
|
156
|
+
}
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
@classmethod
|
|
160
|
+
def model_validate_json(cls, json_str: str) -> "NKLandscapeCache":
|
|
161
|
+
"""Deserialize from JSON string."""
|
|
162
|
+
data = json.loads(json_str)
|
|
163
|
+
return cls(
|
|
164
|
+
params=NKParams(**data["params"]),
|
|
165
|
+
items=[
|
|
166
|
+
Item(
|
|
167
|
+
coordinates=np.array(item["coordinates"]),
|
|
168
|
+
payoff=item["payoff"],
|
|
169
|
+
)
|
|
170
|
+
for item in data["items"]
|
|
171
|
+
],
|
|
172
|
+
)
|
nk_model/nk_landscape.py
ADDED
|
@@ -0,0 +1,498 @@
|
|
|
1
|
+
import itertools
|
|
2
|
+
import random
|
|
3
|
+
from math import ceil, floor
|
|
4
|
+
from typing import Dict, List, Optional
|
|
5
|
+
from uuid import uuid4
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
from src.nk_model.assembler import assembler_v1_sym, ensemble_builder
|
|
10
|
+
from src.nk_model.enums import ConvolutionMethod, NeighborhoodMethod
|
|
11
|
+
from src.nk_model.landscape_cache import LandscapeCache
|
|
12
|
+
from src.nk_model.models import Item, NKLandscapeCache, NKParams
|
|
13
|
+
from src.utils.binary_conversion import binary_array_to_int
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class NKLandscape:
|
|
17
|
+
"""
|
|
18
|
+
NK model fitness landscape class.
|
|
19
|
+
|
|
20
|
+
This class generates NK model landscapes, applies transformations
|
|
21
|
+
(normalization, power scaling, max_val scaling), and stores the
|
|
22
|
+
results as a list of Item objects.
|
|
23
|
+
|
|
24
|
+
The class always creates new landscapes in __init__ and provides
|
|
25
|
+
a class method from_cache() to load existing landscapes from cache.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
# Class-level cache
|
|
29
|
+
_cache = LandscapeCache()
|
|
30
|
+
|
|
31
|
+
def __init__(self, params: NKParams):
|
|
32
|
+
"""
|
|
33
|
+
Initialize NKLandscape with given parameters.
|
|
34
|
+
|
|
35
|
+
Always creates a new landscape and caches it with a generated UUID.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
params: Parameters for the NK model containing:
|
|
39
|
+
- n: Number of components in the system
|
|
40
|
+
- k: Number of interactions per component
|
|
41
|
+
- m: Number of subcombinations to convolute
|
|
42
|
+
- power: Power scaling factor for payoffs
|
|
43
|
+
- max_val: Maximum value for scaled payoffs
|
|
44
|
+
- neighborhood: Method to determine neighbors
|
|
45
|
+
- convolution: Method for convolution
|
|
46
|
+
|
|
47
|
+
Attributes:
|
|
48
|
+
params: NKParams instance
|
|
49
|
+
items: List of Item objects with transformed payoffs
|
|
50
|
+
uuid: Unique identifier for this landscape
|
|
51
|
+
_payoff_lookup: Dictionary mapping coordinate tuples to payoffs
|
|
52
|
+
"""
|
|
53
|
+
self.params = params
|
|
54
|
+
self.N = params.n
|
|
55
|
+
self.K = params.k
|
|
56
|
+
self.M = params.m
|
|
57
|
+
|
|
58
|
+
# Generate UUID for this landscape
|
|
59
|
+
self.uuid = str(uuid4())
|
|
60
|
+
|
|
61
|
+
# Create the landscape with transformed payoffs
|
|
62
|
+
self.items = self._create_landscape()
|
|
63
|
+
|
|
64
|
+
# Create lookup dictionary for O(1) payoff access
|
|
65
|
+
self._payoff_lookup = {
|
|
66
|
+
tuple(item.coordinates): item.payoff for item in self.items
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
# Cache the landscape
|
|
70
|
+
cache_data = NKLandscapeCache(params=self.params, items=self.items)
|
|
71
|
+
self._cache.save(self.uuid, cache_data)
|
|
72
|
+
|
|
73
|
+
@classmethod
|
|
74
|
+
def from_cache(cls, uuid: str) -> "NKLandscape":
|
|
75
|
+
"""
|
|
76
|
+
Load a landscape from cache by UUID.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
uuid: The UUID of the landscape to retrieve
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
NKLandscape instance with cached data
|
|
83
|
+
|
|
84
|
+
Raises:
|
|
85
|
+
ValueError: If the landscape with the given UUID is not found
|
|
86
|
+
"""
|
|
87
|
+
cached_data = cls._cache.get(uuid)
|
|
88
|
+
if cached_data is None:
|
|
89
|
+
raise ValueError(f"Landscape with uuid {uuid} not found in cache.")
|
|
90
|
+
|
|
91
|
+
# Create instance without calling __init__
|
|
92
|
+
instance = cls.__new__(cls)
|
|
93
|
+
instance.params = cached_data.params
|
|
94
|
+
instance.N = cached_data.params.n
|
|
95
|
+
instance.K = cached_data.params.k
|
|
96
|
+
instance.M = cached_data.params.m
|
|
97
|
+
instance.uuid = uuid
|
|
98
|
+
instance.items = cached_data.items
|
|
99
|
+
|
|
100
|
+
# Create lookup dictionary
|
|
101
|
+
instance._payoff_lookup = {
|
|
102
|
+
tuple(item.coordinates): item.payoff for item in instance.items
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
return instance
|
|
106
|
+
|
|
107
|
+
@classmethod
|
|
108
|
+
def from_dict(
|
|
109
|
+
cls,
|
|
110
|
+
data: Dict[str, int | float],
|
|
111
|
+
k: Optional[int] = None,
|
|
112
|
+
m: Optional[int] = None,
|
|
113
|
+
power: float = 1.0,
|
|
114
|
+
neighborhood: NeighborhoodMethod = (
|
|
115
|
+
NeighborhoodMethod.RANDOM
|
|
116
|
+
),
|
|
117
|
+
convolution: ConvolutionMethod = ConvolutionMethod.RANDOM,
|
|
118
|
+
) -> "NKLandscape":
|
|
119
|
+
"""
|
|
120
|
+
Load a landscape from dictionary data.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
data: Dictionary mapping binary string locations
|
|
124
|
+
(e.g., '01101010') to payoff values (int or float).
|
|
125
|
+
n: Number of components. If None, derived from binary
|
|
126
|
+
string length.
|
|
127
|
+
k: Number of interactions per component. If None,
|
|
128
|
+
defaults to n-1 (ensuring k < n).
|
|
129
|
+
m: Number of subcombinations to convolute. If None,
|
|
130
|
+
defaults to n.
|
|
131
|
+
power: Power scaling factor for payoffs. Defaults to 1.0.
|
|
132
|
+
neighborhood: Method to determine neighbors.
|
|
133
|
+
Defaults to RANDOM.
|
|
134
|
+
convolution: Method for convolution. Defaults to RANDOM.
|
|
135
|
+
payoff_type: Type for payoff values ('int' or 'float').
|
|
136
|
+
If None, derived from data (int if all values are int,
|
|
137
|
+
else float).
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
NKLandscape instance with data from dictionary.
|
|
141
|
+
|
|
142
|
+
Raises:
|
|
143
|
+
ValueError: If binary strings have inconsistent lengths
|
|
144
|
+
or if derived parameters are invalid.
|
|
145
|
+
"""
|
|
146
|
+
if not data:
|
|
147
|
+
raise ValueError("Data dictionary cannot be empty.")
|
|
148
|
+
|
|
149
|
+
# Extract binary strings and values
|
|
150
|
+
binary_strings = list(data.keys())
|
|
151
|
+
values = list(data.values())
|
|
152
|
+
|
|
153
|
+
# Derive parameters from data
|
|
154
|
+
payoff_type = "float" if any(isinstance(v, float) for v in values) else "int"
|
|
155
|
+
n = len(binary_strings[0])
|
|
156
|
+
max_val = max(values)
|
|
157
|
+
|
|
158
|
+
# Set default parameters
|
|
159
|
+
k = k or max(0, n - 1)
|
|
160
|
+
m = m or n
|
|
161
|
+
|
|
162
|
+
for bs in binary_strings:
|
|
163
|
+
# Validate binary strings contain only '0' and '1'
|
|
164
|
+
if not all(bit in "01" for bit in bs):
|
|
165
|
+
raise ValueError(
|
|
166
|
+
f"Binary string '{bs}' contains invalid characters. "
|
|
167
|
+
+ "Only '0' and '1' are allowed."
|
|
168
|
+
)
|
|
169
|
+
if len(bs) != n:
|
|
170
|
+
raise ValueError(
|
|
171
|
+
f"Binary string '{bs}' has length {len(bs)}, "
|
|
172
|
+
+ f"but expected length {n}."
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
# Create NKParams
|
|
176
|
+
params = NKParams(
|
|
177
|
+
n=n,
|
|
178
|
+
k=k,
|
|
179
|
+
m=m,
|
|
180
|
+
power=power,
|
|
181
|
+
max_val=max_val,
|
|
182
|
+
neighborhood=neighborhood,
|
|
183
|
+
convolution=convolution,
|
|
184
|
+
payoff_type=payoff_type,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Convert dictionary data to Item objects
|
|
188
|
+
items = []
|
|
189
|
+
for binary_str, payoff in data.items():
|
|
190
|
+
coordinates = np.array([int(bit) for bit in binary_str])
|
|
191
|
+
payoff_type_class = params.payoff_type_class
|
|
192
|
+
items.append(
|
|
193
|
+
Item(
|
|
194
|
+
coordinates=coordinates,
|
|
195
|
+
payoff=payoff_type_class(payoff),
|
|
196
|
+
)
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
# Create instance without calling __init__
|
|
200
|
+
instance = cls.__new__(cls)
|
|
201
|
+
instance.params = params
|
|
202
|
+
instance.N = params.n
|
|
203
|
+
instance.K = params.k
|
|
204
|
+
instance.M = params.m
|
|
205
|
+
instance.uuid = str(uuid4())
|
|
206
|
+
instance.items = items
|
|
207
|
+
|
|
208
|
+
# Create lookup dictionary
|
|
209
|
+
instance._payoff_lookup = {
|
|
210
|
+
tuple(item.coordinates): item.payoff for item in instance.items
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
return instance
|
|
214
|
+
|
|
215
|
+
@classmethod
|
|
216
|
+
def clear_cache(cls) -> None:
|
|
217
|
+
"""Clear the entire landscape cache."""
|
|
218
|
+
cls._cache.clear()
|
|
219
|
+
|
|
220
|
+
def get_payoff(self, state: np.ndarray) -> int | float:
|
|
221
|
+
"""
|
|
222
|
+
Get the payoff for a given state.
|
|
223
|
+
|
|
224
|
+
Args:
|
|
225
|
+
state: Binary state vector of length N
|
|
226
|
+
|
|
227
|
+
Returns:
|
|
228
|
+
Payoff value for the given state
|
|
229
|
+
|
|
230
|
+
Raises:
|
|
231
|
+
AssertionError: If state length doesn't match N
|
|
232
|
+
"""
|
|
233
|
+
assert (
|
|
234
|
+
len(state) == self.params.n
|
|
235
|
+
), f"""State length ({len(state)})
|
|
236
|
+
must be equal to N ({self.params.n})."""
|
|
237
|
+
return self._payoff_lookup[tuple(state)]
|
|
238
|
+
|
|
239
|
+
def get_ordered_payoffs(self) -> List[int | float]:
|
|
240
|
+
"""
|
|
241
|
+
Get the payoffs for all states in the landscape,
|
|
242
|
+
ordered by ascending coordinates.
|
|
243
|
+
"""
|
|
244
|
+
return [
|
|
245
|
+
payoff
|
|
246
|
+
for _, payoff in sorted(
|
|
247
|
+
self._payoff_lookup.items(),
|
|
248
|
+
key=lambda x: binary_array_to_int(np.array(x[0])),
|
|
249
|
+
)
|
|
250
|
+
]
|
|
251
|
+
|
|
252
|
+
def _create_landscape(self) -> list[Item]:
|
|
253
|
+
"""
|
|
254
|
+
Create the NK landscape with transformed payoffs.
|
|
255
|
+
|
|
256
|
+
This method applies the following transformations:
|
|
257
|
+
1. Generate raw NK model payoffs
|
|
258
|
+
2. Normalize (min=0, max=1)
|
|
259
|
+
3. Apply power scaling
|
|
260
|
+
4. Scale to max_val if specified
|
|
261
|
+
|
|
262
|
+
Returns:
|
|
263
|
+
List of Item objects with transformed payoffs
|
|
264
|
+
"""
|
|
265
|
+
# Generate raw NK model items
|
|
266
|
+
raw_items = self._create_nk_model()
|
|
267
|
+
|
|
268
|
+
# Extract payoffs
|
|
269
|
+
payoffs = np.array([item.payoff for item in raw_items])
|
|
270
|
+
|
|
271
|
+
# Normalize payoffs
|
|
272
|
+
min_payoff = np.min(payoffs)
|
|
273
|
+
max_payoff = np.max(payoffs)
|
|
274
|
+
if max_payoff > min_payoff:
|
|
275
|
+
payoffs = (payoffs - min_payoff) / (max_payoff - min_payoff)
|
|
276
|
+
else:
|
|
277
|
+
payoffs = np.zeros_like(payoffs)
|
|
278
|
+
|
|
279
|
+
# Apply power scaling
|
|
280
|
+
scaled_payoffs = payoffs**self.params.power
|
|
281
|
+
|
|
282
|
+
# Scale to max_val if specified
|
|
283
|
+
if self.params.max_val is not None:
|
|
284
|
+
max_value = np.max(scaled_payoffs)
|
|
285
|
+
if max_value > 0:
|
|
286
|
+
scaled_payoffs = scaled_payoffs * (
|
|
287
|
+
self.params.max_val / max_value
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
# Convert payoffs to the specified type
|
|
291
|
+
if self.params.payoff_type == "int":
|
|
292
|
+
scaled_payoffs = np.round(scaled_payoffs).astype(int)
|
|
293
|
+
else:
|
|
294
|
+
scaled_payoffs = scaled_payoffs.astype(float)
|
|
295
|
+
|
|
296
|
+
# Create items with transformed payoffs
|
|
297
|
+
payoff_type_class = self.params.payoff_type_class
|
|
298
|
+
return [
|
|
299
|
+
Item(
|
|
300
|
+
coordinates=item.coordinates, payoff=payoff_type_class(payoff)
|
|
301
|
+
)
|
|
302
|
+
for item, payoff in zip(raw_items, scaled_payoffs)
|
|
303
|
+
]
|
|
304
|
+
|
|
305
|
+
def _create_nk_model(self) -> list[Item]:
|
|
306
|
+
"""
|
|
307
|
+
Create the NK model by generating interaction matrices and
|
|
308
|
+
computing items list for all states.
|
|
309
|
+
|
|
310
|
+
Returns:
|
|
311
|
+
List of Item objects containing coordinates and raw payoff
|
|
312
|
+
values for all possible states in the NK model.
|
|
313
|
+
"""
|
|
314
|
+
# Generate node interaction matrix
|
|
315
|
+
interaction_method_map = {
|
|
316
|
+
NeighborhoodMethod.RANDOM: (self._get_node_interaction_matrix),
|
|
317
|
+
NeighborhoodMethod.NEAREST: (
|
|
318
|
+
self._get_nearest_neighbor_interaction_matrix
|
|
319
|
+
),
|
|
320
|
+
NeighborhoodMethod.RING: (self._get_ring_node_interaction_matrix),
|
|
321
|
+
}
|
|
322
|
+
node_interaction_matrix = interaction_method_map[
|
|
323
|
+
self.params.neighborhood
|
|
324
|
+
]()
|
|
325
|
+
|
|
326
|
+
# Generate subcombination payoff matrix
|
|
327
|
+
base = np.random.uniform(
|
|
328
|
+
low=0,
|
|
329
|
+
high=1,
|
|
330
|
+
size=((self.M,) + (2,) * (self.K + 1)),
|
|
331
|
+
)
|
|
332
|
+
convolution_method_map = {
|
|
333
|
+
ConvolutionMethod.RANDOM: self._get_random_convolution_matrix,
|
|
334
|
+
ConvolutionMethod.SYMMETRIC: (
|
|
335
|
+
self._get_symmetric_convolution_matrix
|
|
336
|
+
),
|
|
337
|
+
}
|
|
338
|
+
subcombination_payoff_matrix = convolution_method_map[
|
|
339
|
+
self.params.convolution
|
|
340
|
+
](base=base)
|
|
341
|
+
|
|
342
|
+
# Compute items list
|
|
343
|
+
coordinates = itertools.product([0, 1], repeat=self.N)
|
|
344
|
+
|
|
345
|
+
def get_payoff(
|
|
346
|
+
coordinate: tuple[int, ...],
|
|
347
|
+
interaction_matrix: np.ndarray,
|
|
348
|
+
subcombination_matrix: np.ndarray,
|
|
349
|
+
) -> float:
|
|
350
|
+
# Sum up the payoffs for all digits of the coordinates
|
|
351
|
+
return sum(
|
|
352
|
+
# For each dimension, get the interacting node IDs,
|
|
353
|
+
# extract their coordinates values, and look up the
|
|
354
|
+
# payoff from the subcombination matrix
|
|
355
|
+
subcombination_matrix[dimension][
|
|
356
|
+
tuple(
|
|
357
|
+
[
|
|
358
|
+
coordinate[interact_node]
|
|
359
|
+
for interact_node in interaction_matrix[dimension]
|
|
360
|
+
]
|
|
361
|
+
)
|
|
362
|
+
]
|
|
363
|
+
for dimension in range(len(coordinate))
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
return [
|
|
367
|
+
Item(
|
|
368
|
+
coordinates=np.array(coordinate),
|
|
369
|
+
payoff=get_payoff(
|
|
370
|
+
coordinate=coordinate,
|
|
371
|
+
interaction_matrix=node_interaction_matrix,
|
|
372
|
+
subcombination_matrix=subcombination_payoff_matrix,
|
|
373
|
+
),
|
|
374
|
+
)
|
|
375
|
+
for coordinate in coordinates
|
|
376
|
+
]
|
|
377
|
+
|
|
378
|
+
def _get_random_convolution_matrix(
|
|
379
|
+
self,
|
|
380
|
+
base: np.ndarray,
|
|
381
|
+
) -> np.ndarray:
|
|
382
|
+
"""
|
|
383
|
+
Apply random convolution method to create final subcombination
|
|
384
|
+
matrix.
|
|
385
|
+
|
|
386
|
+
Args:
|
|
387
|
+
base: Base matrix of shape (m, 2, 2, ..., 2).
|
|
388
|
+
|
|
389
|
+
Returns:
|
|
390
|
+
np.ndarray: A multidimensional array of shape
|
|
391
|
+
(n, 2, 2, ..., 2) containing random payoff values.
|
|
392
|
+
"""
|
|
393
|
+
matrix = np.empty(((self.N,) + (2,) * (self.K + 1)), dtype=np.float64)
|
|
394
|
+
for i in range(self.N):
|
|
395
|
+
matrix[i] = base[np.random.randint(0, base.shape[0])]
|
|
396
|
+
return matrix
|
|
397
|
+
|
|
398
|
+
def _get_symmetric_convolution_matrix(
|
|
399
|
+
self,
|
|
400
|
+
base: np.ndarray,
|
|
401
|
+
) -> np.ndarray:
|
|
402
|
+
"""
|
|
403
|
+
Apply symmetric convolution method to create final subcombination
|
|
404
|
+
matrix.
|
|
405
|
+
|
|
406
|
+
Args:
|
|
407
|
+
base: Base matrix of shape (m, 2, 2, ..., 2).
|
|
408
|
+
|
|
409
|
+
Returns:
|
|
410
|
+
np.ndarray: A multidimensional array of shape
|
|
411
|
+
(n, 2, 2, ..., 2) containing random payoff values.
|
|
412
|
+
"""
|
|
413
|
+
assert self.M == 2, "Symmetric convolution requires m=2"
|
|
414
|
+
assert self.N % 2 == 0, "Symmetric convolution requires n to be even"
|
|
415
|
+
ensemble = ensemble_builder(
|
|
416
|
+
assembler=assembler_v1_sym,
|
|
417
|
+
n=16,
|
|
418
|
+
max_len=self.N,
|
|
419
|
+
vectors=[[0], [1]],
|
|
420
|
+
)
|
|
421
|
+
filtered_ensemble = [
|
|
422
|
+
vector for vector in ensemble if len(vector) == self.N
|
|
423
|
+
]
|
|
424
|
+
convolution_vector = random.choice(filtered_ensemble)
|
|
425
|
+
matrix = np.empty(((self.N,) + (2,) * (self.K + 1)), dtype=np.float64)
|
|
426
|
+
for i in range(self.N):
|
|
427
|
+
matrix[i] = base[convolution_vector[i]]
|
|
428
|
+
return matrix
|
|
429
|
+
|
|
430
|
+
def _get_node_interaction_matrix(self) -> np.ndarray:
|
|
431
|
+
"""
|
|
432
|
+
Generate an interaction matrix for the NK model.
|
|
433
|
+
This matrix defines which nodes in our landscape interact
|
|
434
|
+
with each other (are considered neighbors).
|
|
435
|
+
The n rows of this matrix correspond to the n nodes in
|
|
436
|
+
our landscape.
|
|
437
|
+
The k+1 columns of this matrix correspond to the k+1
|
|
438
|
+
possible neighbors for each node.
|
|
439
|
+
|
|
440
|
+
Returns:
|
|
441
|
+
np.ndarray: An N x (K + 1) matrix where each row has
|
|
442
|
+
K + 1 indices that are the neighbors of the node.
|
|
443
|
+
|
|
444
|
+
Note:
|
|
445
|
+
This function ensures that each node interacts with K
|
|
446
|
+
other distinct nodes, creating a sparse interaction
|
|
447
|
+
matrix for the NK model landscape.
|
|
448
|
+
"""
|
|
449
|
+
# Initialize interaction matrix with ones on the diagonal
|
|
450
|
+
interaction_matrix = np.zeros((self.N, self.K + 1), dtype=np.int64)
|
|
451
|
+
|
|
452
|
+
# Choose K non-zero entries for each row randomly
|
|
453
|
+
for row_index in range(self.N):
|
|
454
|
+
# Get indices of all columns except the diagonal element
|
|
455
|
+
indices = list(range(self.N))
|
|
456
|
+
indices.remove(row_index)
|
|
457
|
+
|
|
458
|
+
# Choose K non-zero entries for the row randomly
|
|
459
|
+
chosen_indices = np.random.choice(
|
|
460
|
+
indices, size=self.K, replace=False
|
|
461
|
+
)
|
|
462
|
+
chosen_indices = np.append(chosen_indices, row_index)
|
|
463
|
+
np.random.shuffle(chosen_indices)
|
|
464
|
+
|
|
465
|
+
interaction_matrix[row_index] = chosen_indices
|
|
466
|
+
|
|
467
|
+
return interaction_matrix
|
|
468
|
+
|
|
469
|
+
def _get_ring_node_interaction_matrix(self) -> np.ndarray:
|
|
470
|
+
"""
|
|
471
|
+
Generate a ring interaction matrix for the NK model.
|
|
472
|
+
"""
|
|
473
|
+
interaction_matrix = np.zeros((self.N, self.K + 1), dtype=np.int64)
|
|
474
|
+
|
|
475
|
+
for row_index in range(self.N):
|
|
476
|
+
indices = np.arange(2 * self.N)
|
|
477
|
+
indices = np.array(
|
|
478
|
+
[i for i in indices if i - row_index in range(0, self.K + 1)]
|
|
479
|
+
)
|
|
480
|
+
indices = indices % self.N
|
|
481
|
+
interaction_matrix[row_index] = indices
|
|
482
|
+
return interaction_matrix
|
|
483
|
+
|
|
484
|
+
def _get_nearest_neighbor_interaction_matrix(self) -> np.ndarray:
|
|
485
|
+
"""
|
|
486
|
+
Generate a nearest neighbor interaction matrix for the NK model.
|
|
487
|
+
"""
|
|
488
|
+
interaction_matrix = np.zeros((self.N, self.K + 1), dtype=np.int64)
|
|
489
|
+
for row_index in range(self.N):
|
|
490
|
+
indices = np.array(
|
|
491
|
+
range(
|
|
492
|
+
row_index - floor((self.K + 1) / 2),
|
|
493
|
+
row_index + ceil((self.K + 1) / 2),
|
|
494
|
+
)
|
|
495
|
+
)
|
|
496
|
+
indices = indices % self.N
|
|
497
|
+
interaction_matrix[row_index] = indices
|
|
498
|
+
return interaction_matrix
|