accelforge 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- accelforge/__init__.py +21 -0
- accelforge/_accelerated_imports.py +16 -0
- accelforge/_deprecate/_simanneal/evalmapping.py +271 -0
- accelforge/_deprecate/_simanneal/mapspaceglobals.py +298 -0
- accelforge/_deprecate/_simanneal/simanneal.py +666 -0
- accelforge/_deprecate/_simanneal/tracking.py +105 -0
- accelforge/_deprecate/_simanneal/wrappers.py +218 -0
- accelforge/_deprecate/_simanneal2/__init__.py +7 -0
- accelforge/_deprecate/_simanneal2/simanneal.py +493 -0
- accelforge/_deprecate/_simanneal2/tracking.py +116 -0
- accelforge/_deprecate/compatibility_util.py +181 -0
- accelforge/_deprecate/layerdeduplication/__init__.py +2 -0
- accelforge/_deprecate/layerdeduplication/group_similar_einsums.py +160 -0
- accelforge/_deprecate/layerdeduplication/grouped_einsums.py +84 -0
- accelforge/_deprecate/mapping_filter_tags/__init__.py +2 -0
- accelforge/_deprecate/mapping_filter_tags/ffmt.py +212 -0
- accelforge/_deprecate/mapping_filter_tags/onesplit.py +24 -0
- accelforge/_deprecate/mapping_filter_tags/util.py +24 -0
- accelforge/_deprecate/tags.py +69 -0
- accelforge/_deprecate/viz/__init__.py +0 -0
- accelforge/_deprecate/viz/interactive.py +159 -0
- accelforge/_deprecate/viz/reservationtree.py +307 -0
- accelforge/_deprecate/viz/ski_slope.py +88 -0
- accelforge/_version.py +15 -0
- accelforge/examples.py +39 -0
- accelforge/frontend/__init__.py +10 -0
- accelforge/frontend/_binding.py +129 -0
- accelforge/frontend/_workload_isl/__init__.py +2 -0
- accelforge/frontend/_workload_isl/_isl.py +149 -0
- accelforge/frontend/_workload_isl/_symbolic.py +141 -0
- accelforge/frontend/arch copy.py +1544 -0
- accelforge/frontend/arch.py +1642 -0
- accelforge/frontend/config.py +63 -0
- accelforge/frontend/mapper/__init__.py +5 -0
- accelforge/frontend/mapper/ffm.py +126 -0
- accelforge/frontend/mapper/mapper.py +7 -0
- accelforge/frontend/mapper/metrics.py +30 -0
- accelforge/frontend/mapping/__init__.py +1 -0
- accelforge/frontend/mapping/mapping.py +1736 -0
- accelforge/frontend/model.py +14 -0
- accelforge/frontend/renames.py +150 -0
- accelforge/frontend/spec copy.py +230 -0
- accelforge/frontend/spec.py +301 -0
- accelforge/frontend/variables.py +12 -0
- accelforge/frontend/workload.py +952 -0
- accelforge/mapper/FFM/__init__.py +9 -0
- accelforge/mapper/FFM/_join_pmappings/__init__.py +0 -0
- accelforge/mapper/FFM/_join_pmappings/compatibility.py +653 -0
- accelforge/mapper/FFM/_join_pmappings/compress_pmappings.py +140 -0
- accelforge/mapper/FFM/_join_pmappings/join_pmappings.py +703 -0
- accelforge/mapper/FFM/_join_pmappings/pmapping_dataframe.py +901 -0
- accelforge/mapper/FFM/_join_pmappings/pmapping_group.py +337 -0
- accelforge/mapper/FFM/_make_pmappings/contraints/__init__.py +0 -0
- accelforge/mapper/FFM/_make_pmappings/contraints/constraints.py +360 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmapping_templates/__init__.py +1 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmapping_templates/make_loops.py +373 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmapping_templates/make_pmapping_templates.py +463 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmapping_templates/make_reservations.py +95 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmapping_templates/make_storage_order.py +382 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmapping_templates/make_storages.py +155 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmappings.py +411 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmappings_from_templates/__init__.py +1 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmappings_from_templates/make_pmappings_from_templates.py +407 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmappings_from_templates/make_tile_shapes.py +1681 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmappings_from_templates/run_model.py +170 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmappings_from_templates/symbol_relations.py +174 -0
- accelforge/mapper/FFM/_make_pmappings/pmapper_job.py +282 -0
- accelforge/mapper/FFM/_pareto_df/df_convention.py +273 -0
- accelforge/mapper/FFM/_pareto_df/pareto copy.py +836 -0
- accelforge/mapper/FFM/_pareto_df/pareto.py +508 -0
- accelforge/mapper/FFM/data.py +61 -0
- accelforge/mapper/FFM/main copy.py +236 -0
- accelforge/mapper/FFM/main.py +208 -0
- accelforge/mapper/FFM/mappings.py +510 -0
- accelforge/mapper/FFM/pmappings.py +310 -0
- accelforge/mapper/__init__.py +4 -0
- accelforge/mapper.py +0 -0
- accelforge/model/__init__.py +1 -0
- accelforge/model/_looptree/__init__.py +0 -0
- accelforge/model/_looptree/accesses.py +335 -0
- accelforge/model/_looptree/capacity/__init__.py +1 -0
- accelforge/model/_looptree/capacity/aggregators.py +36 -0
- accelforge/model/_looptree/capacity/capacity.py +47 -0
- accelforge/model/_looptree/energy.py +150 -0
- accelforge/model/_looptree/equivalent_ranks.py +29 -0
- accelforge/model/_looptree/latency/__init__.py +1 -0
- accelforge/model/_looptree/latency/latency.py +98 -0
- accelforge/model/_looptree/latency/memory.py +120 -0
- accelforge/model/_looptree/latency/processors.py +92 -0
- accelforge/model/_looptree/mapping_utilities.py +71 -0
- accelforge/model/_looptree/reuse/__init__.py +4 -0
- accelforge/model/_looptree/reuse/isl/__init__.py +1 -0
- accelforge/model/_looptree/reuse/isl/des.py +59 -0
- accelforge/model/_looptree/reuse/isl/isl_functions.py +374 -0
- accelforge/model/_looptree/reuse/isl/mapping_to_isl/__init__.py +4 -0
- accelforge/model/_looptree/reuse/isl/mapping_to_isl/analyze_mapping.py +297 -0
- accelforge/model/_looptree/reuse/isl/mapping_to_isl/skews_from_mapping.py +236 -0
- accelforge/model/_looptree/reuse/isl/mapping_to_isl/tiling.py +685 -0
- accelforge/model/_looptree/reuse/isl/mapping_to_isl/types.py +188 -0
- accelforge/model/_looptree/reuse/isl/spatial.py +260 -0
- accelforge/model/_looptree/reuse/isl/temporal.py +182 -0
- accelforge/model/_looptree/reuse/symbolic/__init__.py +1 -0
- accelforge/model/_looptree/reuse/symbolic/symbolic copy 2.py +1346 -0
- accelforge/model/_looptree/reuse/symbolic/symbolic copy.py +1408 -0
- accelforge/model/_looptree/reuse/symbolic/symbolic.py +1396 -0
- accelforge/model/_looptree/run.py +122 -0
- accelforge/model/_looptree/types.py +26 -0
- accelforge/model/_looptree/visualization/__init__.py +0 -0
- accelforge/model/_looptree/visualization/occupancy.py +11 -0
- accelforge/model/main.py +222 -0
- accelforge/plotting/__init__.py +2 -0
- accelforge/plotting/mappings.py +219 -0
- accelforge/plotting/specs.py +57 -0
- accelforge/util/__init__.py +4 -0
- accelforge/util/_base_analysis_types.py +24 -0
- accelforge/util/_basetypes.py +1089 -0
- accelforge/util/_frozenset.py +36 -0
- accelforge/util/_isl.py +29 -0
- accelforge/util/_itertools.py +14 -0
- accelforge/util/_mathfuncs.py +57 -0
- accelforge/util/_parse_expressions.py +339 -0
- accelforge/util/_picklecache.py +32 -0
- accelforge/util/_setexpressions.py +268 -0
- accelforge/util/_sympy/__init__.py +0 -0
- accelforge/util/_sympy/broadcast_max.py +18 -0
- accelforge/util/_visualization.py +112 -0
- accelforge/util/_yaml.py +579 -0
- accelforge/util/parallel.py +193 -0
- accelforge-0.0.1.dist-info/METADATA +64 -0
- accelforge-0.0.1.dist-info/RECORD +258 -0
- accelforge-0.0.1.dist-info/WHEEL +5 -0
- accelforge-0.0.1.dist-info/licenses/LICENSE +19 -0
- accelforge-0.0.1.dist-info/top_level.txt +5 -0
- docs/_build/html/_sources/fastfusion.frontend.mapper.rst.txt +37 -0
- docs/_build/html/_sources/fastfusion.frontend.rst.txt +70 -0
- docs/_build/html/_sources/fastfusion.frontend.workload.rst.txt +21 -0
- docs/_build/html/_sources/fastfusion.mapper.FFM.rst.txt +37 -0
- docs/_build/html/_sources/fastfusion.mapper.rst.txt +18 -0
- docs/_build/html/_sources/fastfusion.rst.txt +20 -0
- docs/_build/html/_sources/fastfusion.util.rst.txt +21 -0
- docs/_build/html/_sources/index.rst.txt +87 -0
- docs/_build/html/_sources/modules.rst.txt +7 -0
- docs/_build/html/_sources/notes/citation.rst.txt +45 -0
- docs/_build/html/_sources/notes/definitions.rst.txt +43 -0
- docs/_build/html/_sources/notes/faqs.rst.txt +39 -0
- docs/_build/html/_sources/notes/modeling/accelerator_energy_latency.rst.txt +72 -0
- docs/_build/html/_sources/notes/modeling/component_energy_area.rst.txt +96 -0
- docs/_build/html/_sources/notes/modeling/mapping.rst.txt +100 -0
- docs/_build/html/_sources/notes/modeling.rst.txt +33 -0
- docs/_build/html/_sources/notes/parsing/arithmetic_parsing.rst.txt +136 -0
- docs/_build/html/_sources/notes/parsing/setexpressions.rst.txt +63 -0
- docs/_build/html/_sources/notes/parsing/yaml_parsing.rst.txt +176 -0
- docs/_build/html/_sources/notes/quickstart_and_installation.rst.txt +9 -0
- docs/_build/html/_sources/notes/spec/architecture.rst.txt +133 -0
- docs/_build/html/_sources/notes/spec/mapping.rst.txt +12 -0
- docs/_build/html/_sources/notes/spec/workload.rst.txt +83 -0
- docs/_build/html/_sources/notes/spec.rst.txt +36 -0
- docs/source/_ext/include_attrs.py +213 -0
- docs/source/_ext/include_docstring.py +364 -0
- docs/source/_ext/include_functions.py +154 -0
- docs/source/_ext/include_notebook.py +131 -0
- docs/source/_ext/include_yaml.py +119 -0
- docs/source/_ext/inherited_attributes.py +222 -0
- docs/source/_ext/paths.py +4 -0
- docs/source/conf.py +79 -0
- examples/arches/compute_in_memory/_include.yaml +74 -0
- examples/arches/compute_in_memory/_include_functions.py +229 -0
- examples/arches/compute_in_memory/_load_spec.py +57 -0
- examples/arches/compute_in_memory/components/c2c_multiplier.py +181 -0
- examples/arches/compute_in_memory/components/dac_c2c_r2r.py +605 -0
- examples/arches/compute_in_memory/components/misc.py +195 -0
- examples/arches/compute_in_memory/components/util/bit_functions.py +51 -0
- examples/arches/compute_in_memory/components/zero_comparator.py +92 -0
- examples/arches/compute_in_memory/isaac.yaml +233 -0
- examples/arches/compute_in_memory/memory_cells/ecram_demo.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/rram_example.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/rram_isaac_isca_2016.yaml +64 -0
- examples/arches/compute_in_memory/memory_cells/rram_neurosim_default.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/rram_raella_isca_2023.yaml +70 -0
- examples/arches/compute_in_memory/memory_cells/rram_wan_nature_2022.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/sram_colonnade_jssc_2021.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/sram_example.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/sram_jia_jssc_2020.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/sram_sinangil_jssc_2021.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/sram_wang_vlsi_2022.yaml +63 -0
- examples/arches/compute_in_memory/wang_vlsi_2022.yaml +289 -0
- examples/arches/eyeriss.yaml +68 -0
- examples/arches/fanout_variations/at_glb.yaml +31 -0
- examples/arches/fanout_variations/at_glb_with_fanout_node.yaml +34 -0
- examples/arches/fanout_variations/at_mac.yaml +31 -0
- examples/arches/fanout_variations/at_mac_with_constraints.yaml +38 -0
- examples/arches/fanout_variations/at_mac_with_fanout_node.yaml +34 -0
- examples/arches/nvdla.yaml +47 -0
- examples/arches/simple.yaml +28 -0
- examples/arches/tpu_v4i.yaml +67 -0
- examples/mappings/unfused_matmuls_to_simple.yaml +33 -0
- examples/misc/component_annotated.yaml +33 -0
- examples/workloads/gpt3_6.7B.yaml +124 -0
- examples/workloads/matmuls.yaml +20 -0
- examples/workloads/mobilenet_28.yaml +81 -0
- examples/workloads/mobilenet_various_separate.yaml +106 -0
- examples/workloads/three_matmuls_annotated.yaml +59 -0
- notebooks/.ipynb_checkpoints/fastfusion_arch_study_michael-checkpoint.ipynb +359 -0
- notebooks/compute_in_memory/_scripts.py +339 -0
- notebooks/compute_in_memory/isaac.guide.ipynb +270 -0
- notebooks/compute_in_memory/wang_vlsi_2022.ipynb +602 -0
- notebooks/paths.py +4 -0
- notebooks/tutorials/.ipynb_checkpoints/1_FFM-checkpoint.ipynb +3110 -0
- notebooks/tutorials/FFM.ipynb +3498 -0
- notebooks/tutorials/_include.py +48 -0
- notebooks/tutorials/component_energy_area.ipynb +363 -0
- tests/Q_mapping.yaml +38 -0
- tests/__init__.py +0 -0
- tests/conv.mapping.yaml +27 -0
- tests/conv.workload.yaml +13 -0
- tests/conv_sym.mapping.yaml +43 -0
- tests/copy.mapping.yaml +35 -0
- tests/copy.workload.yaml +15 -0
- tests/distribuffers/__init__.py +0 -0
- tests/distribuffers/multicast/test_cases.yaml +482 -0
- tests/distribuffers/spec/binding/valid_bindings.yaml +97 -0
- tests/distribuffers/spec/distributed.yaml +100 -0
- tests/distribuffers/spec/logical_arch.yaml +32 -0
- tests/distribuffers/spec/physical_arch.yaml +69 -0
- tests/distribuffers/test_binding.py +48 -0
- tests/frontend/__init__.py +0 -0
- tests/frontend/test_mapping_viz.py +52 -0
- tests/mapper/__init__.py +0 -0
- tests/mapper/configs/conv1d/conv1d.mapping.yaml +31 -0
- tests/mapper/configs/conv1d/conv1d.workload.yaml +11 -0
- tests/mapper/configs/two_conv1d/two_conv1d.expected.yaml +38 -0
- tests/mapper/configs/two_conv1d/two_conv1d.mapping.yaml +54 -0
- tests/mapper/configs/two_conv1d/two_conv1d.workload.yaml +19 -0
- tests/mapper/test_mapping_to_isl.py +90 -0
- tests/mapper/test_spatial_reuse_analysis.py +67 -0
- tests/mapper/test_temporal_reuse_analysis.py +56 -0
- tests/mapper/util.py +58 -0
- tests/matmul.mapping.yaml +29 -0
- tests/matmul.workload.yaml +12 -0
- tests/matmul_spatial.mapping.yaml +44 -0
- tests/mha.renames.yaml +65 -0
- tests/mha.workload.yaml +67 -0
- tests/mha.yaml +59 -0
- tests/mha_full.workload.yaml +67 -0
- tests/mobilenet.workload.yaml +35 -0
- tests/mobilenet_long.workload.yaml +64 -0
- tests/pmappingcache.py +24 -0
- tests/processing_stage.arch.yaml +40 -0
- tests/snowcat.arch.yaml +36 -0
- tests/test_ffm_join_pmappings.py +106 -0
- tests/test_ffm_make_pmappings.py +82 -0
- tests/test_ffm_make_tile_shapes.py +49 -0
- tests/test_mapper.py +100 -0
- tests/test_model.py +37 -0
- tests/test_plotting.py +72 -0
- tests/test_processing_stage.py +46 -0
- tests/test_symbolic_model.py +248 -0
- tests/test_workload.py +141 -0
|
@@ -0,0 +1,493 @@
|
|
|
1
|
+
import inspect
|
|
2
|
+
import os
|
|
3
|
+
import random
|
|
4
|
+
from typing import Callable, Generator
|
|
5
|
+
from fastfusion import arch, util
|
|
6
|
+
from fastfusion import Spec
|
|
7
|
+
from fastfusion.frontend.mapper.metrics import Metrics
|
|
8
|
+
from fastfusion.mapper.FFM.pmappings import MultiEinsumPmappings
|
|
9
|
+
from fastfusion.mapper.FFM._join_pmappings.compress_pmappings import (
|
|
10
|
+
compress_einsum2pmappings,
|
|
11
|
+
decompress_pmappings,
|
|
12
|
+
)
|
|
13
|
+
from fastfusion.frontend.workload import EinsumName
|
|
14
|
+
from fastfusion.frontend.mapping import Mapping
|
|
15
|
+
from fastfusion.mapper.FFM import PmappingGroup
|
|
16
|
+
from fastfusion.mapper.FFM._pareto_df.df_convention import (
|
|
17
|
+
MAPPING_COLUMN,
|
|
18
|
+
col2nameloop,
|
|
19
|
+
)
|
|
20
|
+
from fastfusion.mapper.FFM._join_pmappings.pmapping_group import PmappingDataframe
|
|
21
|
+
from fastfusion.mapper.FFM._make_pmappings.make_pmappings import (
|
|
22
|
+
get_rank_variable_bounds_for_all_einsums,
|
|
23
|
+
)
|
|
24
|
+
from fastfusion._accelerated_imports import pd
|
|
25
|
+
import joblib
|
|
26
|
+
from fastfusion.mapper.FFM._join_pmappings.compatibility import Compatibility
|
|
27
|
+
from fastfusion.mapper._simanneal2.tracking import EvaluationsScoreTracker
|
|
28
|
+
|
|
29
|
+
# Simulated annealing algorithm
|
|
30
|
+
# -----------------------------
|
|
31
|
+
# Given:
|
|
32
|
+
# - Pmappings for each Einsum
|
|
33
|
+
|
|
34
|
+
# 1. Make a compatibility -> PmappingGroups dict for each Einsum
|
|
35
|
+
# 2. While True:
|
|
36
|
+
# a. Randomly change a compatibility choice for one Einsum
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
# Functions:
|
|
40
|
+
# - Given compatibility choices & pmapping index numbers, return a score
|
|
41
|
+
# - Given compatibility choices & pmapping index numbers, make sure all compatibilities
|
|
42
|
+
# & indices match
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class FailedMutation(Exception):
|
|
46
|
+
pass
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class MapspaceGlobals:
|
|
50
|
+
def __init__(
|
|
51
|
+
self,
|
|
52
|
+
einsum2sims: dict[EinsumName, list[PmappingGroup]],
|
|
53
|
+
resource2capacity: dict[str, int],
|
|
54
|
+
aliased_tensors: dict[str, set[str]],
|
|
55
|
+
objective_function: Callable[[pd.Series], float],
|
|
56
|
+
tracker: EvaluationsScoreTracker,
|
|
57
|
+
) -> None:
|
|
58
|
+
self.einsum2sims: dict[EinsumName, list[PmappingGroup]] = einsum2sims
|
|
59
|
+
self.resource2capacity: dict[str, int] = resource2capacity
|
|
60
|
+
self.aliased_tensors: dict[str, set[str]] = aliased_tensors
|
|
61
|
+
self.objective_function: Callable[[pd.Series], float] = objective_function
|
|
62
|
+
self.tracker: EvaluationsScoreTracker = tracker
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class SimAnnealMapping:
|
|
66
|
+
def __init__(self, mapspace_globals: MapspaceGlobals) -> None:
|
|
67
|
+
# self.einsum2sim: dict[EinsumName, PmappingGroup] = {
|
|
68
|
+
# e: random.choice(s) for e, s in mapspace_globals.einsum2sims.items()
|
|
69
|
+
# }
|
|
70
|
+
self.mapspace_globals: MapspaceGlobals = mapspace_globals
|
|
71
|
+
self.einsum2sim: dict[EinsumName, PmappingGroup] = {
|
|
72
|
+
e: random.choice(s) for e, s in mapspace_globals.einsum2sims.items()
|
|
73
|
+
}
|
|
74
|
+
self.einsum2index: dict[EinsumName, int] = {e: 0 for e in self.einsum2sim}
|
|
75
|
+
self.ensure_match(list(self.einsum2sim.keys())[0])
|
|
76
|
+
for e in self.einsum2sim:
|
|
77
|
+
self.randomize_index(e)
|
|
78
|
+
self._prev_score = None
|
|
79
|
+
|
|
80
|
+
def mutate(self) -> None:
|
|
81
|
+
# Pick a random einsum
|
|
82
|
+
e = random.choice(list(self.einsum2sim.keys()))
|
|
83
|
+
|
|
84
|
+
random.choice(
|
|
85
|
+
[
|
|
86
|
+
self.randomize_index,
|
|
87
|
+
self.randomize_sim,
|
|
88
|
+
]
|
|
89
|
+
)(e)
|
|
90
|
+
|
|
91
|
+
self.ensure_match(e)
|
|
92
|
+
|
|
93
|
+
def randomize_index(self, e: EinsumName) -> None:
|
|
94
|
+
self._prev_score = None
|
|
95
|
+
self.einsum2index[e] = random.randint(0, 10000000000000)
|
|
96
|
+
self.mapspace_globals.tracker.add_evaluation(1, float("inf"))
|
|
97
|
+
|
|
98
|
+
def randomize_sim(self, e: EinsumName) -> None:
|
|
99
|
+
self.einsum2sim[e] = random.choice(self.mapspace_globals.einsum2sims[e])
|
|
100
|
+
self.randomize_index(e)
|
|
101
|
+
|
|
102
|
+
def _einsum_position_in_list(self, e: EinsumName) -> int:
|
|
103
|
+
return list(self.einsum2sim.keys()).index(e)
|
|
104
|
+
|
|
105
|
+
def ensure_match(
|
|
106
|
+
self,
|
|
107
|
+
lock_choice_for_einsum: EinsumName,
|
|
108
|
+
) -> None:
|
|
109
|
+
|
|
110
|
+
new_einsum2sim: dict[EinsumName, PmappingGroup] = {}
|
|
111
|
+
|
|
112
|
+
# Grab all the compatibilities that match
|
|
113
|
+
for i, (e, s) in enumerate(list(self.einsum2sim.items())):
|
|
114
|
+
if e == lock_choice_for_einsum:
|
|
115
|
+
new_einsum2sim[e] = s
|
|
116
|
+
continue
|
|
117
|
+
|
|
118
|
+
following_tensors = self._einsum2tensors(range(i + 1, len(self.einsum2sim)))
|
|
119
|
+
|
|
120
|
+
to_check = [(s2, s) for s2 in new_einsum2sim.values()]
|
|
121
|
+
|
|
122
|
+
if i < self._einsum_position_in_list(lock_choice_for_einsum):
|
|
123
|
+
to_check.append((s, self.einsum2sim[lock_choice_for_einsum]))
|
|
124
|
+
else:
|
|
125
|
+
to_check.append((self.einsum2sim[lock_choice_for_einsum], s))
|
|
126
|
+
|
|
127
|
+
for left, right in to_check:
|
|
128
|
+
c = left.compatibility.clear_dead_tensors(
|
|
129
|
+
right.compatibility.tensor_names
|
|
130
|
+
).clear_tile_patterns_and_reservation_indices()
|
|
131
|
+
c2 = right.compatibility.clear_dead_tensors(
|
|
132
|
+
left.compatibility.tensor_names
|
|
133
|
+
).clear_tile_patterns_and_reservation_indices()
|
|
134
|
+
if c != c2:
|
|
135
|
+
break
|
|
136
|
+
|
|
137
|
+
c = left.compatibility.clear_dead_tensors(
|
|
138
|
+
following_tensors
|
|
139
|
+
).clear_tile_patterns_and_reservation_indices()
|
|
140
|
+
c2 = right.compatibility.clear_dead_tensors(
|
|
141
|
+
following_tensors
|
|
142
|
+
).clear_tile_patterns_and_reservation_indices()
|
|
143
|
+
|
|
144
|
+
# Can't merge. I have more loops than the next, so my dataflow can't be
|
|
145
|
+
# carried through a LoopTree to where it's needed.
|
|
146
|
+
if c.n_loops > c2.n_loops:
|
|
147
|
+
break
|
|
148
|
+
|
|
149
|
+
else:
|
|
150
|
+
new_einsum2sim[e] = s
|
|
151
|
+
|
|
152
|
+
# Grab compatibilities that don't match
|
|
153
|
+
def _matches(s: PmappingGroup, c: Compatibility) -> bool:
|
|
154
|
+
cs = s.compatibility.clear_dead_tensors(
|
|
155
|
+
c.tensor_names
|
|
156
|
+
).clear_tile_patterns_and_reservation_indices()
|
|
157
|
+
cn = c.clear_dead_tensors(
|
|
158
|
+
s.compatibility.tensor_names
|
|
159
|
+
).clear_tile_patterns_and_reservation_indices()
|
|
160
|
+
return cs == cn
|
|
161
|
+
|
|
162
|
+
for e, pmapping_groups in self.mapspace_globals.einsum2sims.items():
|
|
163
|
+
if e in new_einsum2sim:
|
|
164
|
+
continue
|
|
165
|
+
|
|
166
|
+
for s in new_einsum2sim.values():
|
|
167
|
+
pmapping_groups = [
|
|
168
|
+
s2 for s2 in pmapping_groups if _matches(s2, s.compatibility)
|
|
169
|
+
]
|
|
170
|
+
|
|
171
|
+
if not pmapping_groups:
|
|
172
|
+
# print(f"No compatible PmappingGroups found for {e}")
|
|
173
|
+
raise FailedMutation(f"No compatible PmappingGroups found for {e}")
|
|
174
|
+
|
|
175
|
+
new_einsum2sim[e] = random.choice(pmapping_groups)
|
|
176
|
+
self.randomize_index(e)
|
|
177
|
+
|
|
178
|
+
# pmapping_groups = self.mapspace_globals.einsum2sims[e]
|
|
179
|
+
# [s.compatibility for s in self.einsum2sim.values()]
|
|
180
|
+
# [s.compatibility for s in new_einsum2sim.values()]
|
|
181
|
+
# {e: s.compatibility for e, s in new_einsum2sim.items()}
|
|
182
|
+
|
|
183
|
+
assert len(new_einsum2sim) == len(self.einsum2sim)
|
|
184
|
+
assert set(new_einsum2sim.keys()) == set(self.einsum2sim.keys())
|
|
185
|
+
self.einsum2sim = {k: new_einsum2sim[k] for k in self.einsum2sim.keys()}
|
|
186
|
+
|
|
187
|
+
def _einsum2tensors(
|
|
188
|
+
self, e: EinsumName | int | Generator[EinsumName | int, None, None]
|
|
189
|
+
) -> set[str]:
|
|
190
|
+
if isinstance(e, Generator) or isinstance(e, range):
|
|
191
|
+
return set.union(set(), *(self._einsum2tensors(i) for i in e))
|
|
192
|
+
if isinstance(e, int):
|
|
193
|
+
e = list(self.einsum2sim.keys())[e]
|
|
194
|
+
return self.einsum2sim[e].compatibility.tensor_names
|
|
195
|
+
|
|
196
|
+
def _access_index(self, e: EinsumName, index_override: int | None = None):
|
|
197
|
+
s = self.einsum2sim[e]
|
|
198
|
+
data = s.mappings.data
|
|
199
|
+
i = self.einsum2index[e] if index_override is None else index_override
|
|
200
|
+
i %= len(data)
|
|
201
|
+
return PmappingGroup(
|
|
202
|
+
compatibility=s.compatibility,
|
|
203
|
+
mappings=PmappingDataframe(data.iloc[i : i + 1]),
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
def get_score(self) -> float:
|
|
207
|
+
if self._prev_score is not None:
|
|
208
|
+
return self._prev_score
|
|
209
|
+
|
|
210
|
+
items: list[tuple[EinsumName, PmappingGroup]] = list(self.einsum2sim.items())
|
|
211
|
+
joined: PmappingGroup = items.pop(0)[1]
|
|
212
|
+
for i, (e, s) in enumerate(items):
|
|
213
|
+
right_tensors = self._einsum2tensors(i)
|
|
214
|
+
live_tensors = self._einsum2tensors(range(i + 1, len(items)))
|
|
215
|
+
|
|
216
|
+
joined.compatibility = joined.compatibility.clear_dead_tensors(
|
|
217
|
+
live_tensors | right_tensors
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
def _merge_next(
|
|
221
|
+
left: PmappingGroup,
|
|
222
|
+
right: PmappingGroup,
|
|
223
|
+
apply_resource_limit: bool = True,
|
|
224
|
+
) -> PmappingGroup:
|
|
225
|
+
try:
|
|
226
|
+
return left.merge_next(
|
|
227
|
+
right,
|
|
228
|
+
live_tensors=live_tensors,
|
|
229
|
+
live_tensors_with_right=live_tensors | right_tensors,
|
|
230
|
+
aliased_tensors=self.mapspace_globals.aliased_tensors,
|
|
231
|
+
compatibility_joined=joined.compatibility.merge_next(
|
|
232
|
+
s.compatibility, live_tensors
|
|
233
|
+
),
|
|
234
|
+
resource2capacity=(
|
|
235
|
+
self.mapspace_globals.resource2capacity
|
|
236
|
+
if apply_resource_limit
|
|
237
|
+
else None
|
|
238
|
+
),
|
|
239
|
+
drop_valid_reservations=True,
|
|
240
|
+
delay=False,
|
|
241
|
+
)
|
|
242
|
+
except ValueError as err:
|
|
243
|
+
# print(err)
|
|
244
|
+
raise FailedMutation(f"No valid pmappings: {err}")
|
|
245
|
+
|
|
246
|
+
# Try to merge using the index we already have set
|
|
247
|
+
joined_new = _merge_next(joined, self._access_index(e))
|
|
248
|
+
if len(joined_new.mappings.data) == 1:
|
|
249
|
+
joined = joined_new
|
|
250
|
+
# print(' '.join(f'{k}={v}' for k, v in dict(joined.mappings.data.iloc[0]).items() if col2nameloop(k)))
|
|
251
|
+
continue
|
|
252
|
+
if len(joined_new.mappings.data) > 1:
|
|
253
|
+
raise ValueError(
|
|
254
|
+
f"Got {len(joined_new.mappings.data)} pmappings for {e}"
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
# No valid pmappings! Merge all possible, then pick one
|
|
258
|
+
self.mapspace_globals.tracker.add_evaluation(1, float("inf"))
|
|
259
|
+
s = self.einsum2sim[e]
|
|
260
|
+
s.mappings.data["_INDEX"] = list(range(len(s.mappings.data)))
|
|
261
|
+
joined_new = _merge_next(
|
|
262
|
+
joined,
|
|
263
|
+
s,
|
|
264
|
+
apply_resource_limit=False,
|
|
265
|
+
)
|
|
266
|
+
s.mappings._data = s.mappings.data.drop(columns=["_INDEX"])
|
|
267
|
+
try:
|
|
268
|
+
i = random.choice(list(set(joined_new.mappings.data["_INDEX"])))
|
|
269
|
+
except IndexError:
|
|
270
|
+
raise FailedMutation(f"No valid pmappings for {e}")
|
|
271
|
+
|
|
272
|
+
# Now that we've picked, merge with the index we just set
|
|
273
|
+
joined_new = _merge_next(joined, self._access_index(e, i))
|
|
274
|
+
|
|
275
|
+
if len(joined_new.mappings.data) == 1:
|
|
276
|
+
# If it worked, set the index
|
|
277
|
+
self.einsum2index[e] = i
|
|
278
|
+
joined = joined_new
|
|
279
|
+
# print(' '.join(f'{k}={v}' for k, v in dict(joined.mappings.data.iloc[0]).items() if col2nameloop(k)))
|
|
280
|
+
continue
|
|
281
|
+
|
|
282
|
+
if len(joined_new.mappings.data) > 1:
|
|
283
|
+
raise ValueError(
|
|
284
|
+
f"Got {len(joined_new.mappings.data)} pmappings for {e}"
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
raise FailedMutation(
|
|
288
|
+
f"Got {len(joined_new.mappings.data)} pmappings for {e}"
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
assert len(joined.mappings.data) == 1
|
|
292
|
+
score = self.mapspace_globals.objective_function(joined.mappings.data.iloc[0])
|
|
293
|
+
self.mapspace_globals.tracker.add_evaluation(0, score)
|
|
294
|
+
self._prev_score = score
|
|
295
|
+
return score
|
|
296
|
+
|
|
297
|
+
def copy(self) -> "SimAnnealMapping":
|
|
298
|
+
s = SimAnnealMapping(self.mapspace_globals)
|
|
299
|
+
s.einsum2sim = self.einsum2sim.copy()
|
|
300
|
+
s.einsum2index = self.einsum2index.copy()
|
|
301
|
+
s._prev_score = self._prev_score
|
|
302
|
+
return s
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
def get_random_mapping(mapspace_globals: MapspaceGlobals) -> SimAnnealMapping:
|
|
306
|
+
while True:
|
|
307
|
+
try:
|
|
308
|
+
s = SimAnnealMapping(mapspace_globals)
|
|
309
|
+
s.get_score()
|
|
310
|
+
return s
|
|
311
|
+
except FailedMutation:
|
|
312
|
+
if mapspace_globals.tracker.finished():
|
|
313
|
+
return None
|
|
314
|
+
continue
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
def join_pmappings(
|
|
318
|
+
pmapping_groups: dict[EinsumName, list[PmappingGroup]],
|
|
319
|
+
spec: Spec,
|
|
320
|
+
resource2capacity: dict[str, int],
|
|
321
|
+
tracker: EvaluationsScoreTracker,
|
|
322
|
+
pop_size_per_thread: int,
|
|
323
|
+
) -> PmappingGroup:
|
|
324
|
+
objective = spec.mapper.ffm.metrics
|
|
325
|
+
if objective == Metrics.ENERGY:
|
|
326
|
+
objective_function = lambda x: x["Total<SEP>energy"]
|
|
327
|
+
elif objective == Metrics.LATENCY:
|
|
328
|
+
objective_function = lambda x: x["Total<SEP>latency"]
|
|
329
|
+
elif objective == (Metrics.ENERGY | Metrics.LATENCY):
|
|
330
|
+
objective_function = lambda x: x["Total<SEP>energy"] * x["Total<SEP>latency"]
|
|
331
|
+
else:
|
|
332
|
+
raise ValueError(f"Unknown objective {objective}")
|
|
333
|
+
# print(f'Resource2capacity: {resource2capacity}')
|
|
334
|
+
mapspace_globals = MapspaceGlobals(
|
|
335
|
+
einsum2sims=pmapping_groups,
|
|
336
|
+
resource2capacity=resource2capacity,
|
|
337
|
+
aliased_tensors=spec.workload.get_tensor_copies(),
|
|
338
|
+
objective_function=objective_function,
|
|
339
|
+
tracker=tracker,
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
mappings = []
|
|
343
|
+
while len(mappings) < pop_size_per_thread:
|
|
344
|
+
mappings.append(get_random_mapping(mapspace_globals))
|
|
345
|
+
if tracker.finished():
|
|
346
|
+
return
|
|
347
|
+
print(f"Completed making initial population of {len(mappings)} mappings")
|
|
348
|
+
|
|
349
|
+
i = 0
|
|
350
|
+
while True:
|
|
351
|
+
if i > 1e6:
|
|
352
|
+
break
|
|
353
|
+
i += 1
|
|
354
|
+
for i, m in enumerate(list(mappings)):
|
|
355
|
+
try:
|
|
356
|
+
new = m.copy()
|
|
357
|
+
new.mutate()
|
|
358
|
+
if new.get_score() < m.get_score():
|
|
359
|
+
mappings[i] = new
|
|
360
|
+
|
|
361
|
+
# 0 evaluations because they've been accounted for in the mutation and
|
|
362
|
+
# score calculation functions
|
|
363
|
+
if tracker.finished():
|
|
364
|
+
break
|
|
365
|
+
except FailedMutation:
|
|
366
|
+
continue
|
|
367
|
+
|
|
368
|
+
# else:
|
|
369
|
+
# for einsum_name, sim in simanneal_mapping.einsum2sim.items():
|
|
370
|
+
# print(f"Einsum {einsum_name}, index {simanneal_mapping.einsum2index[einsum_name]}")
|
|
371
|
+
# for c in sim.compatibility.tensors:
|
|
372
|
+
# print(f'\t{c}')
|
|
373
|
+
|
|
374
|
+
# df = sim.mappings.data.iloc[simanneal_mapping.einsum2index[einsum_name] % len(sim.mappings.data)]
|
|
375
|
+
# for s in sim.compatibility.symbols():
|
|
376
|
+
# print(f'\t{s} = {df[s]}')
|
|
377
|
+
# print(f"Iteration {i}: Score {new_score} (prev {prev_score})")
|
|
378
|
+
# raise ValueError("No valid mapping found")
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
def get_n_tile_shapes(sim: PmappingGroup) -> int:
|
|
382
|
+
df = sim.mappings.data
|
|
383
|
+
symbols = sim.compatibility.symbols()
|
|
384
|
+
cols = [c for c in df.columns if c in symbols]
|
|
385
|
+
if not cols:
|
|
386
|
+
return 1
|
|
387
|
+
return len(df.groupby(cols).size())
|
|
388
|
+
|
|
389
|
+
|
|
390
|
+
def join_pmappings(
|
|
391
|
+
spec: Spec,
|
|
392
|
+
pmappings: MultiEinsumPmappings,
|
|
393
|
+
max_evaluations: int = 1,
|
|
394
|
+
population_size=100,
|
|
395
|
+
score_target: float | None = None,
|
|
396
|
+
) -> EvaluationsScoreTracker:
|
|
397
|
+
tracker = EvaluationsScoreTracker(
|
|
398
|
+
max_evaluations=max_evaluations / util.N_PARALLEL_PROCESSES,
|
|
399
|
+
stop_at_score=None,
|
|
400
|
+
print_period=1,
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
if score_target is not None:
|
|
404
|
+
tracker.multiply_score_by(1 / score_target)
|
|
405
|
+
|
|
406
|
+
pop_size_per_thread = population_size // util.N_PARALLEL_PROCESSES
|
|
407
|
+
|
|
408
|
+
# Multiply by the number of einsums
|
|
409
|
+
tracker.multiply_scale_by(len(pmappings.einsum2pmappings))
|
|
410
|
+
|
|
411
|
+
# Expected #pmappings before a Pareto-optimal one is found
|
|
412
|
+
# tracker.multiply_scale_by(pmappings._evaluated_pmappings_for_simanneal_baseline_compare() / pmappings.n_pareto_optimal_pmappings())
|
|
413
|
+
tracker.multiply_scale_by(
|
|
414
|
+
pmappings.n_evaluated_pmappings() / pmappings.n_pareto_optimal_pmappings()
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
# Normalize to the speed of the intra-Einsum pmapper
|
|
418
|
+
tracker.multiply_scale_by(1 / pmappings.n_evaluated_pmappings())
|
|
419
|
+
|
|
420
|
+
for einsum_name, einsum_pmappings in pmappings.einsum2pmappings.items():
|
|
421
|
+
total = sum(len(p.mappings.data) for p in einsum_pmappings)
|
|
422
|
+
n_compatibilities = len(einsum_pmappings)
|
|
423
|
+
print(
|
|
424
|
+
f"Einsum {einsum_name} has {total} pmappings with {n_compatibilities} compatibilities"
|
|
425
|
+
)
|
|
426
|
+
if total == 0:
|
|
427
|
+
raise ValueError(f"Einsum {einsum_name} has no pmappings")
|
|
428
|
+
|
|
429
|
+
print(f"TODO: Populate PmappingGroups with all permutations")
|
|
430
|
+
|
|
431
|
+
compressed, decompress_data = compress_einsum2pmappings(pmappings.einsum2pmappings)
|
|
432
|
+
|
|
433
|
+
permuted = {}
|
|
434
|
+
for einsum_name, einsum_sims in compressed.items():
|
|
435
|
+
for s in einsum_sims:
|
|
436
|
+
for c_perm, _ in s.compatibility.make_equivalent_permutations():
|
|
437
|
+
permuted.setdefault(einsum_name, []).append(
|
|
438
|
+
PmappingGroup(
|
|
439
|
+
compatibility=c_perm,
|
|
440
|
+
mappings=s.mappings,
|
|
441
|
+
)
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
tile_shapes = [
|
|
445
|
+
get_n_tile_shapes(s)
|
|
446
|
+
for pmapping_groups in compressed.values()
|
|
447
|
+
for s in pmapping_groups
|
|
448
|
+
]
|
|
449
|
+
|
|
450
|
+
# average_tile_shapes = sum(tile_shapes) / len(tile_shapes)
|
|
451
|
+
# print(f"Average tile shapes: {average_tile_shapes}")
|
|
452
|
+
# tracker.multiply_scale_by(average_tile_shapes)
|
|
453
|
+
|
|
454
|
+
def parallel_join(
|
|
455
|
+
permuted: dict[EinsumName, list[PmappingGroup]],
|
|
456
|
+
spec: Spec,
|
|
457
|
+
resource2capacity: dict[str, int],
|
|
458
|
+
tracker: EvaluationsScoreTracker,
|
|
459
|
+
pop_size_per_thread: int,
|
|
460
|
+
) -> EvaluationsScoreTracker:
|
|
461
|
+
join_pmappings(permuted, spec, resource2capacity, tracker, pop_size_per_thread)
|
|
462
|
+
return tracker
|
|
463
|
+
|
|
464
|
+
trackers = util.parallel(
|
|
465
|
+
joblib.delayed(parallel_join)(
|
|
466
|
+
permuted,
|
|
467
|
+
spec,
|
|
468
|
+
pmappings.resource2capacity,
|
|
469
|
+
tracker,
|
|
470
|
+
pop_size_per_thread,
|
|
471
|
+
)
|
|
472
|
+
for _ in range(util.N_PARALLEL_PROCESSES)
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
t0 = trackers[0]
|
|
476
|
+
for t in trackers[1:]:
|
|
477
|
+
t0.merge_with(t)
|
|
478
|
+
|
|
479
|
+
# for einsum_name in pmappings.einsum2pmappings:
|
|
480
|
+
# col = f"{einsum_name}<SEP>{MAPPING_COLUMN}"
|
|
481
|
+
# joined.data[col] = joined.data[col].apply(
|
|
482
|
+
# lambda x: pmappings.pmapping_objects[einsum_name][x]
|
|
483
|
+
# )
|
|
484
|
+
|
|
485
|
+
# rank_variable_bounds = get_rank_variable_bounds_for_all_einsums(spec)
|
|
486
|
+
# joined.data[f"Total<SEP>{MAPPING_COLUMN}"] = joined.data.apply(
|
|
487
|
+
# lambda row: MappingFromRow(row, spec, rank_variable_bounds), axis=1
|
|
488
|
+
# )
|
|
489
|
+
# # Fill nans with 0. We might get missing columns for some mapping entries if there
|
|
490
|
+
# # are energy entries for some pmappings but not others (e.g., one pmapping accesses
|
|
491
|
+
# # DRAM while another doesn't.)
|
|
492
|
+
# joined._data = joined.data.fillna(0)
|
|
493
|
+
return t0 # Mappings(spec, list(pmappings.einsum2pmappings.keys()), joined.data)
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import time
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class EvaluationsScoreTracker:
|
|
5
|
+
def __init__(
|
|
6
|
+
self, max_evaluations: int, stop_at_score: float, print_period: int = 10
|
|
7
|
+
):
|
|
8
|
+
self.max_evaluations = max_evaluations
|
|
9
|
+
self.stop_at_score = stop_at_score
|
|
10
|
+
self.evaluations = 0
|
|
11
|
+
self.score = float("inf")
|
|
12
|
+
self.history = [(0, float("inf"))]
|
|
13
|
+
self._scale_by = 1
|
|
14
|
+
self._scale_score_by = 1
|
|
15
|
+
self.print_period = print_period
|
|
16
|
+
self.prev_print_time = None
|
|
17
|
+
self.print_stopped_text = False
|
|
18
|
+
self.n_mappings = {}
|
|
19
|
+
self.runtime = {}
|
|
20
|
+
|
|
21
|
+
def add_evaluation(self, n_evaluations: int, best_score: float):
|
|
22
|
+
self.evaluations += n_evaluations * self._scale_by
|
|
23
|
+
self.score = min(self.score, best_score * self._scale_score_by)
|
|
24
|
+
# Same score as before, remove the last entry
|
|
25
|
+
if len(self.history) > 2 and self.history[-2][1] == self.score:
|
|
26
|
+
self.history.pop(-1)
|
|
27
|
+
self.history.append((self.evaluations, self.score))
|
|
28
|
+
|
|
29
|
+
cur_time = time.time()
|
|
30
|
+
if (
|
|
31
|
+
self.prev_print_time is None
|
|
32
|
+
or cur_time - self.prev_print_time > self.print_period
|
|
33
|
+
):
|
|
34
|
+
self.prev_print_time = cur_time
|
|
35
|
+
print(f"Evaluations: {self.evaluations}, Score: {self.score}")
|
|
36
|
+
|
|
37
|
+
if self.max_evaluations is not None and self.evaluations > self.max_evaluations:
|
|
38
|
+
self.clean_history()
|
|
39
|
+
if not self.print_stopped_text:
|
|
40
|
+
print(
|
|
41
|
+
f"Stopping due to evaluations {self.evaluations} > {self.max_evaluations}"
|
|
42
|
+
)
|
|
43
|
+
self.print_stopped_text = True
|
|
44
|
+
return True
|
|
45
|
+
if self.stop_at_score is not None and self.score < self.stop_at_score:
|
|
46
|
+
self.clean_history()
|
|
47
|
+
if not self.print_stopped_text:
|
|
48
|
+
print(f"Stopping due to score {self.score} < {self.stop_at_score}")
|
|
49
|
+
self.print_stopped_text = True
|
|
50
|
+
return True
|
|
51
|
+
return False
|
|
52
|
+
|
|
53
|
+
def finished(self):
|
|
54
|
+
enough_evaluations = (
|
|
55
|
+
self.max_evaluations is not None and self.evaluations > self.max_evaluations
|
|
56
|
+
)
|
|
57
|
+
enough_score = (
|
|
58
|
+
self.stop_at_score is not None and self.score < self.stop_at_score
|
|
59
|
+
)
|
|
60
|
+
return enough_evaluations or enough_score
|
|
61
|
+
|
|
62
|
+
def multiply_scale_by(self, scale_by: float):
|
|
63
|
+
self._scale_by *= scale_by
|
|
64
|
+
|
|
65
|
+
def multiply_score_by(self, scale_by: float):
|
|
66
|
+
self._scale_score_by *= scale_by
|
|
67
|
+
|
|
68
|
+
def __repr__(self):
|
|
69
|
+
return f"Evaluations: {self.evaluations}, Score: {self.score}"
|
|
70
|
+
|
|
71
|
+
def __str__(self):
|
|
72
|
+
return f"Evaluations: {self.evaluations}, Score: {self.score}"
|
|
73
|
+
|
|
74
|
+
def clean_history(self):
|
|
75
|
+
keep_indices = [0]
|
|
76
|
+
for i in range(1, len(self.history) - 1):
|
|
77
|
+
if (
|
|
78
|
+
self.history[i][1] != self.history[i - 1][1]
|
|
79
|
+
or self.history[i][1] != self.history[i + 1][1]
|
|
80
|
+
):
|
|
81
|
+
keep_indices.append(i)
|
|
82
|
+
keep_indices.append(len(self.history) - 1)
|
|
83
|
+
self.history = [self.history[i] for i in keep_indices]
|
|
84
|
+
|
|
85
|
+
def merge_with(self, other: "EvaluationsScoreTracker"):
|
|
86
|
+
self.score = min(self.score, other.score)
|
|
87
|
+
self.evaluations += other.evaluations
|
|
88
|
+
|
|
89
|
+
i, j = 1, 1
|
|
90
|
+
history = [(0, float("inf"))]
|
|
91
|
+
cur_score = float("inf")
|
|
92
|
+
cur_evaluations = 0
|
|
93
|
+
while i < len(self.history) or j < len(other.history):
|
|
94
|
+
# Grab whichever has the lowest evaluations
|
|
95
|
+
if i < len(self.history) and (
|
|
96
|
+
j == len(other.history) or self.history[i][0] < other.history[j][0]
|
|
97
|
+
):
|
|
98
|
+
new_evaluations = self.history[i][0] - self.history[i - 1][0]
|
|
99
|
+
new_score = self.history[i][1]
|
|
100
|
+
cur_evaluations += new_evaluations
|
|
101
|
+
cur_score = min(cur_score, new_score)
|
|
102
|
+
history.append((cur_evaluations, cur_score))
|
|
103
|
+
i += 1
|
|
104
|
+
elif j < len(other.history):
|
|
105
|
+
new_evaluations = other.history[j][0] - other.history[j - 1][0]
|
|
106
|
+
new_score = other.history[j][1]
|
|
107
|
+
cur_evaluations += new_evaluations
|
|
108
|
+
cur_score = min(cur_score, new_score)
|
|
109
|
+
history.append((cur_evaluations, cur_score))
|
|
110
|
+
j += 1
|
|
111
|
+
self.history = history
|
|
112
|
+
self.clean_history()
|
|
113
|
+
|
|
114
|
+
def increase_all_evaluations(self, n_evaluations: int):
|
|
115
|
+
self.evaluations += n_evaluations
|
|
116
|
+
self.history = [(e + n_evaluations, s) for e, s in self.history]
|