accelforge 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- accelforge/__init__.py +21 -0
- accelforge/_accelerated_imports.py +16 -0
- accelforge/_deprecate/_simanneal/evalmapping.py +271 -0
- accelforge/_deprecate/_simanneal/mapspaceglobals.py +298 -0
- accelforge/_deprecate/_simanneal/simanneal.py +666 -0
- accelforge/_deprecate/_simanneal/tracking.py +105 -0
- accelforge/_deprecate/_simanneal/wrappers.py +218 -0
- accelforge/_deprecate/_simanneal2/__init__.py +7 -0
- accelforge/_deprecate/_simanneal2/simanneal.py +493 -0
- accelforge/_deprecate/_simanneal2/tracking.py +116 -0
- accelforge/_deprecate/compatibility_util.py +181 -0
- accelforge/_deprecate/layerdeduplication/__init__.py +2 -0
- accelforge/_deprecate/layerdeduplication/group_similar_einsums.py +160 -0
- accelforge/_deprecate/layerdeduplication/grouped_einsums.py +84 -0
- accelforge/_deprecate/mapping_filter_tags/__init__.py +2 -0
- accelforge/_deprecate/mapping_filter_tags/ffmt.py +212 -0
- accelforge/_deprecate/mapping_filter_tags/onesplit.py +24 -0
- accelforge/_deprecate/mapping_filter_tags/util.py +24 -0
- accelforge/_deprecate/tags.py +69 -0
- accelforge/_deprecate/viz/__init__.py +0 -0
- accelforge/_deprecate/viz/interactive.py +159 -0
- accelforge/_deprecate/viz/reservationtree.py +307 -0
- accelforge/_deprecate/viz/ski_slope.py +88 -0
- accelforge/_version.py +15 -0
- accelforge/examples.py +39 -0
- accelforge/frontend/__init__.py +10 -0
- accelforge/frontend/_binding.py +129 -0
- accelforge/frontend/_workload_isl/__init__.py +2 -0
- accelforge/frontend/_workload_isl/_isl.py +149 -0
- accelforge/frontend/_workload_isl/_symbolic.py +141 -0
- accelforge/frontend/arch copy.py +1544 -0
- accelforge/frontend/arch.py +1642 -0
- accelforge/frontend/config.py +63 -0
- accelforge/frontend/mapper/__init__.py +5 -0
- accelforge/frontend/mapper/ffm.py +126 -0
- accelforge/frontend/mapper/mapper.py +7 -0
- accelforge/frontend/mapper/metrics.py +30 -0
- accelforge/frontend/mapping/__init__.py +1 -0
- accelforge/frontend/mapping/mapping.py +1736 -0
- accelforge/frontend/model.py +14 -0
- accelforge/frontend/renames.py +150 -0
- accelforge/frontend/spec copy.py +230 -0
- accelforge/frontend/spec.py +301 -0
- accelforge/frontend/variables.py +12 -0
- accelforge/frontend/workload.py +952 -0
- accelforge/mapper/FFM/__init__.py +9 -0
- accelforge/mapper/FFM/_join_pmappings/__init__.py +0 -0
- accelforge/mapper/FFM/_join_pmappings/compatibility.py +653 -0
- accelforge/mapper/FFM/_join_pmappings/compress_pmappings.py +140 -0
- accelforge/mapper/FFM/_join_pmappings/join_pmappings.py +703 -0
- accelforge/mapper/FFM/_join_pmappings/pmapping_dataframe.py +901 -0
- accelforge/mapper/FFM/_join_pmappings/pmapping_group.py +337 -0
- accelforge/mapper/FFM/_make_pmappings/contraints/__init__.py +0 -0
- accelforge/mapper/FFM/_make_pmappings/contraints/constraints.py +360 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmapping_templates/__init__.py +1 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmapping_templates/make_loops.py +373 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmapping_templates/make_pmapping_templates.py +463 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmapping_templates/make_reservations.py +95 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmapping_templates/make_storage_order.py +382 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmapping_templates/make_storages.py +155 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmappings.py +411 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmappings_from_templates/__init__.py +1 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmappings_from_templates/make_pmappings_from_templates.py +407 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmappings_from_templates/make_tile_shapes.py +1681 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmappings_from_templates/run_model.py +170 -0
- accelforge/mapper/FFM/_make_pmappings/make_pmappings_from_templates/symbol_relations.py +174 -0
- accelforge/mapper/FFM/_make_pmappings/pmapper_job.py +282 -0
- accelforge/mapper/FFM/_pareto_df/df_convention.py +273 -0
- accelforge/mapper/FFM/_pareto_df/pareto copy.py +836 -0
- accelforge/mapper/FFM/_pareto_df/pareto.py +508 -0
- accelforge/mapper/FFM/data.py +61 -0
- accelforge/mapper/FFM/main copy.py +236 -0
- accelforge/mapper/FFM/main.py +208 -0
- accelforge/mapper/FFM/mappings.py +510 -0
- accelforge/mapper/FFM/pmappings.py +310 -0
- accelforge/mapper/__init__.py +4 -0
- accelforge/mapper.py +0 -0
- accelforge/model/__init__.py +1 -0
- accelforge/model/_looptree/__init__.py +0 -0
- accelforge/model/_looptree/accesses.py +335 -0
- accelforge/model/_looptree/capacity/__init__.py +1 -0
- accelforge/model/_looptree/capacity/aggregators.py +36 -0
- accelforge/model/_looptree/capacity/capacity.py +47 -0
- accelforge/model/_looptree/energy.py +150 -0
- accelforge/model/_looptree/equivalent_ranks.py +29 -0
- accelforge/model/_looptree/latency/__init__.py +1 -0
- accelforge/model/_looptree/latency/latency.py +98 -0
- accelforge/model/_looptree/latency/memory.py +120 -0
- accelforge/model/_looptree/latency/processors.py +92 -0
- accelforge/model/_looptree/mapping_utilities.py +71 -0
- accelforge/model/_looptree/reuse/__init__.py +4 -0
- accelforge/model/_looptree/reuse/isl/__init__.py +1 -0
- accelforge/model/_looptree/reuse/isl/des.py +59 -0
- accelforge/model/_looptree/reuse/isl/isl_functions.py +374 -0
- accelforge/model/_looptree/reuse/isl/mapping_to_isl/__init__.py +4 -0
- accelforge/model/_looptree/reuse/isl/mapping_to_isl/analyze_mapping.py +297 -0
- accelforge/model/_looptree/reuse/isl/mapping_to_isl/skews_from_mapping.py +236 -0
- accelforge/model/_looptree/reuse/isl/mapping_to_isl/tiling.py +685 -0
- accelforge/model/_looptree/reuse/isl/mapping_to_isl/types.py +188 -0
- accelforge/model/_looptree/reuse/isl/spatial.py +260 -0
- accelforge/model/_looptree/reuse/isl/temporal.py +182 -0
- accelforge/model/_looptree/reuse/symbolic/__init__.py +1 -0
- accelforge/model/_looptree/reuse/symbolic/symbolic copy 2.py +1346 -0
- accelforge/model/_looptree/reuse/symbolic/symbolic copy.py +1408 -0
- accelforge/model/_looptree/reuse/symbolic/symbolic.py +1396 -0
- accelforge/model/_looptree/run.py +122 -0
- accelforge/model/_looptree/types.py +26 -0
- accelforge/model/_looptree/visualization/__init__.py +0 -0
- accelforge/model/_looptree/visualization/occupancy.py +11 -0
- accelforge/model/main.py +222 -0
- accelforge/plotting/__init__.py +2 -0
- accelforge/plotting/mappings.py +219 -0
- accelforge/plotting/specs.py +57 -0
- accelforge/util/__init__.py +4 -0
- accelforge/util/_base_analysis_types.py +24 -0
- accelforge/util/_basetypes.py +1089 -0
- accelforge/util/_frozenset.py +36 -0
- accelforge/util/_isl.py +29 -0
- accelforge/util/_itertools.py +14 -0
- accelforge/util/_mathfuncs.py +57 -0
- accelforge/util/_parse_expressions.py +339 -0
- accelforge/util/_picklecache.py +32 -0
- accelforge/util/_setexpressions.py +268 -0
- accelforge/util/_sympy/__init__.py +0 -0
- accelforge/util/_sympy/broadcast_max.py +18 -0
- accelforge/util/_visualization.py +112 -0
- accelforge/util/_yaml.py +579 -0
- accelforge/util/parallel.py +193 -0
- accelforge-0.0.1.dist-info/METADATA +64 -0
- accelforge-0.0.1.dist-info/RECORD +258 -0
- accelforge-0.0.1.dist-info/WHEEL +5 -0
- accelforge-0.0.1.dist-info/licenses/LICENSE +19 -0
- accelforge-0.0.1.dist-info/top_level.txt +5 -0
- docs/_build/html/_sources/fastfusion.frontend.mapper.rst.txt +37 -0
- docs/_build/html/_sources/fastfusion.frontend.rst.txt +70 -0
- docs/_build/html/_sources/fastfusion.frontend.workload.rst.txt +21 -0
- docs/_build/html/_sources/fastfusion.mapper.FFM.rst.txt +37 -0
- docs/_build/html/_sources/fastfusion.mapper.rst.txt +18 -0
- docs/_build/html/_sources/fastfusion.rst.txt +20 -0
- docs/_build/html/_sources/fastfusion.util.rst.txt +21 -0
- docs/_build/html/_sources/index.rst.txt +87 -0
- docs/_build/html/_sources/modules.rst.txt +7 -0
- docs/_build/html/_sources/notes/citation.rst.txt +45 -0
- docs/_build/html/_sources/notes/definitions.rst.txt +43 -0
- docs/_build/html/_sources/notes/faqs.rst.txt +39 -0
- docs/_build/html/_sources/notes/modeling/accelerator_energy_latency.rst.txt +72 -0
- docs/_build/html/_sources/notes/modeling/component_energy_area.rst.txt +96 -0
- docs/_build/html/_sources/notes/modeling/mapping.rst.txt +100 -0
- docs/_build/html/_sources/notes/modeling.rst.txt +33 -0
- docs/_build/html/_sources/notes/parsing/arithmetic_parsing.rst.txt +136 -0
- docs/_build/html/_sources/notes/parsing/setexpressions.rst.txt +63 -0
- docs/_build/html/_sources/notes/parsing/yaml_parsing.rst.txt +176 -0
- docs/_build/html/_sources/notes/quickstart_and_installation.rst.txt +9 -0
- docs/_build/html/_sources/notes/spec/architecture.rst.txt +133 -0
- docs/_build/html/_sources/notes/spec/mapping.rst.txt +12 -0
- docs/_build/html/_sources/notes/spec/workload.rst.txt +83 -0
- docs/_build/html/_sources/notes/spec.rst.txt +36 -0
- docs/source/_ext/include_attrs.py +213 -0
- docs/source/_ext/include_docstring.py +364 -0
- docs/source/_ext/include_functions.py +154 -0
- docs/source/_ext/include_notebook.py +131 -0
- docs/source/_ext/include_yaml.py +119 -0
- docs/source/_ext/inherited_attributes.py +222 -0
- docs/source/_ext/paths.py +4 -0
- docs/source/conf.py +79 -0
- examples/arches/compute_in_memory/_include.yaml +74 -0
- examples/arches/compute_in_memory/_include_functions.py +229 -0
- examples/arches/compute_in_memory/_load_spec.py +57 -0
- examples/arches/compute_in_memory/components/c2c_multiplier.py +181 -0
- examples/arches/compute_in_memory/components/dac_c2c_r2r.py +605 -0
- examples/arches/compute_in_memory/components/misc.py +195 -0
- examples/arches/compute_in_memory/components/util/bit_functions.py +51 -0
- examples/arches/compute_in_memory/components/zero_comparator.py +92 -0
- examples/arches/compute_in_memory/isaac.yaml +233 -0
- examples/arches/compute_in_memory/memory_cells/ecram_demo.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/rram_example.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/rram_isaac_isca_2016.yaml +64 -0
- examples/arches/compute_in_memory/memory_cells/rram_neurosim_default.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/rram_raella_isca_2023.yaml +70 -0
- examples/arches/compute_in_memory/memory_cells/rram_wan_nature_2022.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/sram_colonnade_jssc_2021.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/sram_example.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/sram_jia_jssc_2020.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/sram_sinangil_jssc_2021.yaml +63 -0
- examples/arches/compute_in_memory/memory_cells/sram_wang_vlsi_2022.yaml +63 -0
- examples/arches/compute_in_memory/wang_vlsi_2022.yaml +289 -0
- examples/arches/eyeriss.yaml +68 -0
- examples/arches/fanout_variations/at_glb.yaml +31 -0
- examples/arches/fanout_variations/at_glb_with_fanout_node.yaml +34 -0
- examples/arches/fanout_variations/at_mac.yaml +31 -0
- examples/arches/fanout_variations/at_mac_with_constraints.yaml +38 -0
- examples/arches/fanout_variations/at_mac_with_fanout_node.yaml +34 -0
- examples/arches/nvdla.yaml +47 -0
- examples/arches/simple.yaml +28 -0
- examples/arches/tpu_v4i.yaml +67 -0
- examples/mappings/unfused_matmuls_to_simple.yaml +33 -0
- examples/misc/component_annotated.yaml +33 -0
- examples/workloads/gpt3_6.7B.yaml +124 -0
- examples/workloads/matmuls.yaml +20 -0
- examples/workloads/mobilenet_28.yaml +81 -0
- examples/workloads/mobilenet_various_separate.yaml +106 -0
- examples/workloads/three_matmuls_annotated.yaml +59 -0
- notebooks/.ipynb_checkpoints/fastfusion_arch_study_michael-checkpoint.ipynb +359 -0
- notebooks/compute_in_memory/_scripts.py +339 -0
- notebooks/compute_in_memory/isaac.guide.ipynb +270 -0
- notebooks/compute_in_memory/wang_vlsi_2022.ipynb +602 -0
- notebooks/paths.py +4 -0
- notebooks/tutorials/.ipynb_checkpoints/1_FFM-checkpoint.ipynb +3110 -0
- notebooks/tutorials/FFM.ipynb +3498 -0
- notebooks/tutorials/_include.py +48 -0
- notebooks/tutorials/component_energy_area.ipynb +363 -0
- tests/Q_mapping.yaml +38 -0
- tests/__init__.py +0 -0
- tests/conv.mapping.yaml +27 -0
- tests/conv.workload.yaml +13 -0
- tests/conv_sym.mapping.yaml +43 -0
- tests/copy.mapping.yaml +35 -0
- tests/copy.workload.yaml +15 -0
- tests/distribuffers/__init__.py +0 -0
- tests/distribuffers/multicast/test_cases.yaml +482 -0
- tests/distribuffers/spec/binding/valid_bindings.yaml +97 -0
- tests/distribuffers/spec/distributed.yaml +100 -0
- tests/distribuffers/spec/logical_arch.yaml +32 -0
- tests/distribuffers/spec/physical_arch.yaml +69 -0
- tests/distribuffers/test_binding.py +48 -0
- tests/frontend/__init__.py +0 -0
- tests/frontend/test_mapping_viz.py +52 -0
- tests/mapper/__init__.py +0 -0
- tests/mapper/configs/conv1d/conv1d.mapping.yaml +31 -0
- tests/mapper/configs/conv1d/conv1d.workload.yaml +11 -0
- tests/mapper/configs/two_conv1d/two_conv1d.expected.yaml +38 -0
- tests/mapper/configs/two_conv1d/two_conv1d.mapping.yaml +54 -0
- tests/mapper/configs/two_conv1d/two_conv1d.workload.yaml +19 -0
- tests/mapper/test_mapping_to_isl.py +90 -0
- tests/mapper/test_spatial_reuse_analysis.py +67 -0
- tests/mapper/test_temporal_reuse_analysis.py +56 -0
- tests/mapper/util.py +58 -0
- tests/matmul.mapping.yaml +29 -0
- tests/matmul.workload.yaml +12 -0
- tests/matmul_spatial.mapping.yaml +44 -0
- tests/mha.renames.yaml +65 -0
- tests/mha.workload.yaml +67 -0
- tests/mha.yaml +59 -0
- tests/mha_full.workload.yaml +67 -0
- tests/mobilenet.workload.yaml +35 -0
- tests/mobilenet_long.workload.yaml +64 -0
- tests/pmappingcache.py +24 -0
- tests/processing_stage.arch.yaml +40 -0
- tests/snowcat.arch.yaml +36 -0
- tests/test_ffm_join_pmappings.py +106 -0
- tests/test_ffm_make_pmappings.py +82 -0
- tests/test_ffm_make_tile_shapes.py +49 -0
- tests/test_mapper.py +100 -0
- tests/test_model.py +37 -0
- tests/test_plotting.py +72 -0
- tests/test_processing_stage.py +46 -0
- tests/test_symbolic_model.py +248 -0
- tests/test_workload.py +141 -0
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
@dataclass
|
|
6
|
+
class LoopTreeStatistics:
|
|
7
|
+
latency: float
|
|
8
|
+
energy: float
|
|
9
|
+
actions: dict
|
|
10
|
+
memory_latency: dict
|
|
11
|
+
capacity_usage: dict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def run_symbolic_model(mapping, workload, architecture):
|
|
15
|
+
from pytimeloop.looptree.reuse import analyze_reuse_and_add_reservations_to_mapping
|
|
16
|
+
from pytimeloop.looptree.energy import gather_actions
|
|
17
|
+
|
|
18
|
+
job = Job.make_job(mapping=mapping, workload=workload, architecture=architecture)
|
|
19
|
+
result = analyze_reuse_and_add_reservations_to_mapping(job)
|
|
20
|
+
actions = gather_actions(result, bindings, use_name=True)
|
|
21
|
+
pass
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def run_looptree(config_dir, paths, tmp_path, bindings, call_accelergy):
|
|
25
|
+
import islpy as isl
|
|
26
|
+
from bindings.config import Config
|
|
27
|
+
from bindings.looptree import LooptreeModelApp, LooptreeWorkload
|
|
28
|
+
from pytimeloop.file import gather_yaml_configs
|
|
29
|
+
from pytimeloop.looptree.capacity import compute_capacity_usage
|
|
30
|
+
from pytimeloop.looptree.reuse._isl.des import deserialize_looptree_output
|
|
31
|
+
from pytimeloop.looptree.energy import gather_actions, compute_energy_from_actions
|
|
32
|
+
from pytimeloop.looptree.latency import get_latency
|
|
33
|
+
from pytimeloop.timeloopfe.v4fused import Spec
|
|
34
|
+
from pytimeloop.timeloopfe.common.backend_calls import call_accelergy_verbose
|
|
35
|
+
|
|
36
|
+
yaml_str = gather_yaml_configs(config_dir, paths)
|
|
37
|
+
config = Config(yaml_str, "yaml")
|
|
38
|
+
model = LooptreeModelApp(config)
|
|
39
|
+
|
|
40
|
+
workload = LooptreeWorkload.parse_cfg(config.root["problem"])
|
|
41
|
+
|
|
42
|
+
spec = Spec.from_yaml_files([str(config_dir / p) for p in paths])
|
|
43
|
+
|
|
44
|
+
if call_accelergy:
|
|
45
|
+
if isinstance(tmp_path, Path):
|
|
46
|
+
tmp_path = str(tmp_path)
|
|
47
|
+
call_accelergy_verbose(spec, tmp_path)
|
|
48
|
+
spec = Spec.from_yaml_files(
|
|
49
|
+
[str(config_dir / p) for p in paths] + [str(Path(tmp_path) / "ERT.yaml")]
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
result = deserialize_looptree_output(model.run(), isl.DEFAULT_CONTEXT)
|
|
53
|
+
|
|
54
|
+
actions = gather_actions(result, bindings)
|
|
55
|
+
energy = compute_energy_from_actions(actions, spec.ERT)
|
|
56
|
+
|
|
57
|
+
latency, comp_latency, mem_latency = get_latency(
|
|
58
|
+
result, spec.mapping, workload, spec.arch, bindings
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
capacity_usage = compute_capacity_usage(
|
|
62
|
+
spec.mapping.nodes, result.occupancy, workload
|
|
63
|
+
)
|
|
64
|
+
component_capacity_usage = {}
|
|
65
|
+
for level, component in bindings.items():
|
|
66
|
+
if level in capacity_usage:
|
|
67
|
+
component_capacity_usage[component] = capacity_usage[level]
|
|
68
|
+
|
|
69
|
+
return LoopTreeStatistics(
|
|
70
|
+
latency, energy, actions, mem_latency, capacity_usage=component_capacity_usage
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def run_looptree_symbolic(config_dir, paths, tmp_path, bindings, call_accelergy):
|
|
75
|
+
from bindings.config import Config
|
|
76
|
+
from bindings.looptree import LooptreeWorkload, LooptreeWorkloadDependencyAnalyzer
|
|
77
|
+
from pytimeloop.file import gather_yaml_configs
|
|
78
|
+
from pytimeloop.looptree.capacity import compute_capacity_usage
|
|
79
|
+
from pytimeloop.looptree.reuse import analyze_reuse_and_add_reservations_to_mapping
|
|
80
|
+
from pytimeloop.looptree.energy import gather_actions, compute_energy_from_actions
|
|
81
|
+
from pytimeloop.looptree.latency import get_latency
|
|
82
|
+
from pytimeloop.timeloopfe.v4fused import Spec
|
|
83
|
+
from pytimeloop.timeloopfe.common.backend_calls import call_accelergy_verbose
|
|
84
|
+
from accelforge.mapper.FFM._make_pmappings.pmapper_job import Job
|
|
85
|
+
|
|
86
|
+
yaml_str = gather_yaml_configs(config_dir, paths)
|
|
87
|
+
|
|
88
|
+
config = Config(yaml_str, "yaml")
|
|
89
|
+
workload = LooptreeWorkload.parse_cfg(config.root["problem"])
|
|
90
|
+
analyzer = LooptreeWorkloadDependencyAnalyzer(workload)
|
|
91
|
+
|
|
92
|
+
spec = Spec.from_yaml_files([str(config_dir / p) for p in paths])
|
|
93
|
+
|
|
94
|
+
if call_accelergy:
|
|
95
|
+
if isinstance(tmp_path, Path):
|
|
96
|
+
tmp_path = str(tmp_path)
|
|
97
|
+
call_accelergy_verbose(spec, tmp_path)
|
|
98
|
+
spec = Spec.from_yaml_files(
|
|
99
|
+
[str(config_dir / p) for p in paths] + [str(Path(tmp_path) / "ERT.yaml")]
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
job = Job.make_job(mapping=spec.mapping, workload=workload, architecture=spec.arch)
|
|
103
|
+
tile_shapes, result = analyze_reuse_and_add_reservations_to_mapping(job)
|
|
104
|
+
|
|
105
|
+
actions = gather_actions(result, bindings, use_name=True)
|
|
106
|
+
energy = compute_energy_from_actions(actions, spec.ERT)
|
|
107
|
+
|
|
108
|
+
latency, comp_latency, mem_latency = get_latency(
|
|
109
|
+
result, spec.mapping, workload, spec.arch, bindings
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
capacity_usage = compute_capacity_usage(
|
|
113
|
+
spec.mapping.nodes, result.occupancy, workload
|
|
114
|
+
)
|
|
115
|
+
component_capacity_usage = {}
|
|
116
|
+
for level, component in bindings.items():
|
|
117
|
+
if level in capacity_usage:
|
|
118
|
+
component_capacity_usage[component] = capacity_usage[level]
|
|
119
|
+
|
|
120
|
+
return LoopTreeStatistics(
|
|
121
|
+
latency, energy, actions, mem_latency, capacity_usage=component_capacity_usage
|
|
122
|
+
)
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Contains shared classes of analysis.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import TypeAlias
|
|
7
|
+
|
|
8
|
+
from accelforge.frontend.mapping import TensorName
|
|
9
|
+
from accelforge.frontend.workload import EinsumName
|
|
10
|
+
|
|
11
|
+
ComponentName: TypeAlias = str
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass(eq=True, frozen=True)
|
|
15
|
+
class Buffet:
|
|
16
|
+
"""
|
|
17
|
+
A logical buffer that stores a tensor, an einsum operating on it, and the
|
|
18
|
+
level the buffer exists on in hardware.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
tensor: TensorName
|
|
22
|
+
"The tensor held by the buffet."
|
|
23
|
+
einsum: EinsumName
|
|
24
|
+
"An einsum operating on the tensor."
|
|
25
|
+
level: ComponentName
|
|
26
|
+
"The abstract hardware level the buffet resides in."
|
|
File without changes
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import matplotlib.pyplot as plt
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def plot_occupancy_graph(output: "IslReuseAnalysisOutput", workload):
|
|
5
|
+
einsum_rank_to_shape = {
|
|
6
|
+
einsum: {
|
|
7
|
+
rank: workload.get_rank_shape(rank)
|
|
8
|
+
for rank in workload.einsum_ospace_dimensions(einsum)
|
|
9
|
+
}
|
|
10
|
+
for einsum in workload.einsum_id_to_name()
|
|
11
|
+
}
|
accelforge/model/main.py
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
from copy import copy, deepcopy
|
|
2
|
+
from uuid import uuid4
|
|
3
|
+
|
|
4
|
+
import pandas as pd
|
|
5
|
+
|
|
6
|
+
from accelforge.frontend import arch
|
|
7
|
+
from accelforge.frontend.arch import Memory
|
|
8
|
+
from accelforge.frontend.renames import EinsumName
|
|
9
|
+
from accelforge.frontend.spec import Mapping, Spec
|
|
10
|
+
from accelforge.frontend.mapping import Compute, Split, Nested, NodeList, TensorHolder
|
|
11
|
+
from accelforge.frontend.workload import Workload
|
|
12
|
+
from accelforge.frontend._workload_isl._symbolic import (
|
|
13
|
+
get_stride_and_halo_of_einsum,
|
|
14
|
+
get_rank_variable_relevancy,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def evaluate_mapping(
|
|
19
|
+
spec: Spec,
|
|
20
|
+
flattened_arches: dict[(EinsumName, str), list[arch.Leaf]] | None = None,
|
|
21
|
+
parsed_specs: dict[EinsumName, Spec] | None = None,
|
|
22
|
+
):
|
|
23
|
+
"""
|
|
24
|
+
Evaluate a mapping.
|
|
25
|
+
|
|
26
|
+
Parameters
|
|
27
|
+
----------
|
|
28
|
+
spec:
|
|
29
|
+
The specification of architecture, workload, and mapping.
|
|
30
|
+
flattened_arches:
|
|
31
|
+
A dictionary of (EinsumName, Compute Name) to lists of architecture nodes. These
|
|
32
|
+
contain the parsed and flattened architecture node for that particular Einsum
|
|
33
|
+
and compute combination. If provided, then these will be used instead of
|
|
34
|
+
re-parsing the architecture.
|
|
35
|
+
parsed_specs:
|
|
36
|
+
A dictionary of Einsum names to parsed specifications. These contain the parsed
|
|
37
|
+
specification for that particular Einsum. If provided, then these will be used
|
|
38
|
+
instead of re-parsing the specification.
|
|
39
|
+
"""
|
|
40
|
+
from accelforge.mapper.FFM._join_pmappings.compatibility import Compatibility
|
|
41
|
+
from accelforge.mapper.FFM._join_pmappings.pmapping_dataframe import (
|
|
42
|
+
PmappingDataframe,
|
|
43
|
+
)
|
|
44
|
+
from accelforge.mapper.FFM._join_pmappings.pmapping_group import PmappingGroup
|
|
45
|
+
from accelforge.mapper.FFM._join_pmappings.join_pmappings import (
|
|
46
|
+
clean_compress_and_join_pmappings,
|
|
47
|
+
)
|
|
48
|
+
from accelforge.mapper.FFM.pmappings import MultiEinsumPmappings
|
|
49
|
+
from accelforge.mapper.FFM._make_pmappings.make_pmappings import (
|
|
50
|
+
get_rank_variable_bounds_for_all_einsums,
|
|
51
|
+
)
|
|
52
|
+
from accelforge.mapper.FFM._make_pmappings.make_pmappings_from_templates.run_model import (
|
|
53
|
+
run_model,
|
|
54
|
+
)
|
|
55
|
+
from accelforge.mapper.FFM._make_pmappings.pmapper_job import Job
|
|
56
|
+
|
|
57
|
+
assert (parsed_specs is not None) == (
|
|
58
|
+
flattened_arches is not None
|
|
59
|
+
), f"Provide either flattened_arches or parsed_specs, not both."
|
|
60
|
+
|
|
61
|
+
original_job = Job(
|
|
62
|
+
metrics=spec.model.metrics,
|
|
63
|
+
rank_variable_bounds=get_rank_variable_bounds_for_all_einsums(spec),
|
|
64
|
+
spec=spec,
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
einsum2pmappings = {}
|
|
68
|
+
pmapping_objects = {}
|
|
69
|
+
einsum2jobs = {}
|
|
70
|
+
assert not getattr(
|
|
71
|
+
spec, "_parsed", False
|
|
72
|
+
), "Spec must not be parsed before evaluating a mapping"
|
|
73
|
+
for pmapping in _split_mapping_to_pmappings(spec.mapping, spec.workload):
|
|
74
|
+
einsum_name = pmapping.nodes[-1].einsum
|
|
75
|
+
compute_name = pmapping.nodes[-1].component
|
|
76
|
+
pmapping_id = uuid4()
|
|
77
|
+
job = copy(original_job)
|
|
78
|
+
|
|
79
|
+
if flattened_arches is not None:
|
|
80
|
+
flattened_arch = flattened_arches[(einsum_name, compute_name)]
|
|
81
|
+
cur_spec = parsed_specs[einsum_name]
|
|
82
|
+
|
|
83
|
+
else:
|
|
84
|
+
cur_spec = spec.calculate_component_area_energy_latency_leak(
|
|
85
|
+
einsum_name=einsum_name,
|
|
86
|
+
area=False,
|
|
87
|
+
)
|
|
88
|
+
flattened_arch = cur_spec._get_flattened_architecture(
|
|
89
|
+
compute_node=pmapping.nodes[-1].component
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
job.spec = cur_spec
|
|
93
|
+
pmapping.remove_reservations()
|
|
94
|
+
pmapping.split_loop_with_multiple_rank_variables()
|
|
95
|
+
pmapping.split_tensor_holders_with_multiple_tensors()
|
|
96
|
+
_add_backing_to_tensor_holders(pmapping)
|
|
97
|
+
|
|
98
|
+
job.mapping = pmapping
|
|
99
|
+
job.einsum_name = pmapping.nodes[-1].einsum
|
|
100
|
+
job.tensor_to_relevancy = {
|
|
101
|
+
tensor: get_rank_variable_relevancy(
|
|
102
|
+
job.spec.workload.einsums[job.einsum_name], tensor
|
|
103
|
+
)
|
|
104
|
+
for tensor in job.spec.workload.einsums[job.einsum_name].tensor_names
|
|
105
|
+
}
|
|
106
|
+
einsum2jobs[job.einsum_name] = job
|
|
107
|
+
|
|
108
|
+
job.flattened_arch = flattened_arch
|
|
109
|
+
job.memories_track_all = [
|
|
110
|
+
m.name for m in flattened_arch if isinstance(m, Memory)
|
|
111
|
+
]
|
|
112
|
+
|
|
113
|
+
job.stride_and_halo = get_stride_and_halo_of_einsum(
|
|
114
|
+
job.einsum_name, cur_spec.workload
|
|
115
|
+
)
|
|
116
|
+
job.fusable_tensors = set(
|
|
117
|
+
cur_spec.workload.tensor_names_used_in_multiple_einsums
|
|
118
|
+
& set(job.tensor_to_relevancy)
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
_, df, _, _, tensor2mapping = run_model(job)
|
|
122
|
+
new_df = {}
|
|
123
|
+
for key, value in df.items():
|
|
124
|
+
if "Total" in key:
|
|
125
|
+
new_df[key] = value
|
|
126
|
+
else:
|
|
127
|
+
new_df[f"{job.einsum_name}<SEP>{key}"] = value
|
|
128
|
+
df = new_df
|
|
129
|
+
df[f"{job.einsum_name}<SEP>mapping"] = pmapping_id
|
|
130
|
+
|
|
131
|
+
einsum = cur_spec.workload.einsums[job.einsum_name]
|
|
132
|
+
rank_variable_to_ranks = {
|
|
133
|
+
t.name: t.rank_variable2ranks for t in einsum.tensor_accesses
|
|
134
|
+
}
|
|
135
|
+
compatibility = Compatibility.from_mapping(
|
|
136
|
+
job.mapping, einsum.tensor_names, rank_variable_to_ranks
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
einsum2pmappings[job.einsum_name] = [
|
|
140
|
+
PmappingGroup(
|
|
141
|
+
compatibility,
|
|
142
|
+
PmappingDataframe(pd.DataFrame(df, columns=df.keys(), index=[0]), 1, 1),
|
|
143
|
+
)
|
|
144
|
+
]
|
|
145
|
+
pmapping_objects[job.einsum_name] = {pmapping_id: job.mapping}
|
|
146
|
+
|
|
147
|
+
m = MultiEinsumPmappings(
|
|
148
|
+
einsum2pmappings,
|
|
149
|
+
pmapping_objects,
|
|
150
|
+
einsum2jobs,
|
|
151
|
+
can_combine_multiple_runs=True,
|
|
152
|
+
einsums_with_pmappings_generated=spec.workload.einsum_names,
|
|
153
|
+
flattened_arches=flattened_arches,
|
|
154
|
+
parsed_specs=parsed_specs,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
return clean_compress_and_join_pmappings(spec, m)
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def _add_backing_to_tensor_holders(pmapping: Mapping):
|
|
161
|
+
seen_tensors = set()
|
|
162
|
+
for node in pmapping.nodes:
|
|
163
|
+
if isinstance(node, TensorHolder):
|
|
164
|
+
new_tensors = set(node.tensors) - seen_tensors
|
|
165
|
+
node._backing = new_tensors
|
|
166
|
+
seen_tensors.update(new_tensors)
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def _split_mapping_to_pmappings(mapping: Mapping, workload: Workload):
|
|
170
|
+
"""
|
|
171
|
+
A DFS-like algorithm to split a mapping into pmappings at Split nodes.
|
|
172
|
+
|
|
173
|
+
DFS has to be modified because the tree has list of nodes for nested nodes
|
|
174
|
+
instead of links to children.
|
|
175
|
+
"""
|
|
176
|
+
dfs_stack: list[NodeList] = [mapping.nodes]
|
|
177
|
+
cur_pmapping = []
|
|
178
|
+
|
|
179
|
+
while dfs_stack:
|
|
180
|
+
# nodes_segment is a list of nested nodes with a Split or Compute at the end.
|
|
181
|
+
nodes_segment = dfs_stack.pop()
|
|
182
|
+
assert isinstance(nodes_segment[-1], (Split, Compute))
|
|
183
|
+
|
|
184
|
+
cur_pmapping.append(nodes_segment[:-1])
|
|
185
|
+
|
|
186
|
+
last_node = nodes_segment[-1]
|
|
187
|
+
if isinstance(last_node, Split):
|
|
188
|
+
for segment in last_node.nodes:
|
|
189
|
+
assert isinstance(segment, Nested)
|
|
190
|
+
dfs_stack.append(segment.nodes)
|
|
191
|
+
else:
|
|
192
|
+
assert isinstance(last_node, Compute)
|
|
193
|
+
|
|
194
|
+
mapping = Mapping()
|
|
195
|
+
mapping.nodes = deepcopy(
|
|
196
|
+
[n for ns in cur_pmapping for n in ns] + [last_node]
|
|
197
|
+
)
|
|
198
|
+
_remove_storage_of_unrelevant_tensors(mapping, workload)
|
|
199
|
+
yield mapping
|
|
200
|
+
|
|
201
|
+
cur_pmapping.pop() # Remove the last segment
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def _remove_storage_of_unrelevant_tensors(pmapping: Mapping, workload: Workload):
|
|
205
|
+
"""
|
|
206
|
+
Remove tensors from Storage nodes that are not relevant to the Einsum being
|
|
207
|
+
mapped.
|
|
208
|
+
"""
|
|
209
|
+
einsum_name = pmapping.nodes[-1].einsum
|
|
210
|
+
einsum = workload.einsums[einsum_name]
|
|
211
|
+
relevant_tensors = set(t.name for t in einsum.tensor_accesses)
|
|
212
|
+
|
|
213
|
+
new_nodes = []
|
|
214
|
+
for node in pmapping.nodes:
|
|
215
|
+
if isinstance(node, TensorHolder):
|
|
216
|
+
node.tensors = [t for t in node.tensors if t in relevant_tensors]
|
|
217
|
+
if node.tensors:
|
|
218
|
+
new_nodes.append(node)
|
|
219
|
+
else:
|
|
220
|
+
new_nodes.append(node)
|
|
221
|
+
|
|
222
|
+
pmapping.nodes = new_nodes
|
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
from collections.abc import Iterable, Sequence
|
|
2
|
+
|
|
3
|
+
import matplotlib.pyplot as plt
|
|
4
|
+
import pandas as pd
|
|
5
|
+
|
|
6
|
+
from accelforge.mapper.FFM import Mappings
|
|
7
|
+
from accelforge.mapper.FFM._pareto_df.df_convention import col2energy, col2action
|
|
8
|
+
from accelforge.util._base_analysis_types import VerboseActionKey
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def plot_latency_comparison(
|
|
12
|
+
mappings: Iterable[Mappings] | Mappings,
|
|
13
|
+
labels=None,
|
|
14
|
+
):
|
|
15
|
+
"""
|
|
16
|
+
Plot latency comparison of multiple mappings.
|
|
17
|
+
|
|
18
|
+
Parameters
|
|
19
|
+
----------
|
|
20
|
+
mappings:
|
|
21
|
+
A mapping to plot or an iterable of mappings to plot.
|
|
22
|
+
labels:
|
|
23
|
+
Labels to use for each Mapping class in `mappings`.
|
|
24
|
+
"""
|
|
25
|
+
fig, ax = _plot_column_comparison(mappings, labels, "Total<SEP>energy")
|
|
26
|
+
ax.set_ylabel("Latency (s)")
|
|
27
|
+
return fig, ax
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def plot_action_breakdown(
|
|
31
|
+
mappings: Iterable[Mappings] | Mappings,
|
|
32
|
+
separate_by: Sequence[str],
|
|
33
|
+
stack_by: Sequence[str] = None,
|
|
34
|
+
labels: Iterable[str] = None,
|
|
35
|
+
):
|
|
36
|
+
"""
|
|
37
|
+
Plot actions breakdown.
|
|
38
|
+
|
|
39
|
+
Parameters
|
|
40
|
+
----------
|
|
41
|
+
mappings:
|
|
42
|
+
A mapping to plot or an iterable of mappings to plot. Each mapping will
|
|
43
|
+
be plotted in a new subplot.
|
|
44
|
+
labels:
|
|
45
|
+
Labels to use for each Mapping class in `mappings`.
|
|
46
|
+
separate_by:
|
|
47
|
+
A list that has elements in {"einsum", "tensor", "component", "action"}.
|
|
48
|
+
Different bars will be created based on `separate_by`.
|
|
49
|
+
The order from left to right will determine grouping of the breakdown.
|
|
50
|
+
stack_by:
|
|
51
|
+
A list that has elements in {"einsum", "tensor", "component", "action"}.
|
|
52
|
+
Different components in a stacked bar will be created based on `stack_by`.
|
|
53
|
+
By default, will stack actions.
|
|
54
|
+
"""
|
|
55
|
+
if stack_by is None:
|
|
56
|
+
stack_by = ["action"]
|
|
57
|
+
fig, axes = _plot_breakdown(
|
|
58
|
+
mappings, labels, separate_by, stack_by, "action", col2action
|
|
59
|
+
)
|
|
60
|
+
axes[0].set_ylabel("Actions")
|
|
61
|
+
return fig, axes
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def plot_energy_breakdown(
|
|
65
|
+
mappings: Iterable[Mappings] | Mappings,
|
|
66
|
+
separate_by: Sequence[str],
|
|
67
|
+
stack_by: Sequence[str] = None,
|
|
68
|
+
labels: Iterable[str] = None,
|
|
69
|
+
):
|
|
70
|
+
"""
|
|
71
|
+
Plot energy breakdown.
|
|
72
|
+
|
|
73
|
+
Parameters
|
|
74
|
+
----------
|
|
75
|
+
mappings:
|
|
76
|
+
A mapping to plot or an iterable of mappings to plot. Each mapping will
|
|
77
|
+
be plotted in a new subplot.
|
|
78
|
+
labels:
|
|
79
|
+
Labels to use for each Mapping class in `mappings`.
|
|
80
|
+
separate_by:
|
|
81
|
+
A list that has elements in {"einsum", "tensor", "component", "action"}.
|
|
82
|
+
Different bars will be created based on `separate_by`.
|
|
83
|
+
The order from left to right will determine grouping of the breakdown.
|
|
84
|
+
stack_by:
|
|
85
|
+
A list that has elements in {"einsum", "tensor", "component", "action"}.
|
|
86
|
+
Different components in a stacked bar will be created based on `stack_by`.
|
|
87
|
+
"""
|
|
88
|
+
fig, axes = _plot_breakdown(
|
|
89
|
+
mappings, labels, separate_by, stack_by, "energy", col2energy
|
|
90
|
+
)
|
|
91
|
+
axes[0].set_ylabel("Energy (pJ)")
|
|
92
|
+
return fig, axes
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def _plot_breakdown(mappings, labels, separate_by, stack_by, col_keyword: str, keyer):
|
|
96
|
+
mappings = [mappings] if isinstance(mappings, Mappings) else list(mappings)
|
|
97
|
+
n_axes = sum(map(len, (m.data for m in mappings)))
|
|
98
|
+
|
|
99
|
+
fig, axes = plt.subplots(1, n_axes, sharey=True)
|
|
100
|
+
if n_axes == 1:
|
|
101
|
+
axes = [axes]
|
|
102
|
+
|
|
103
|
+
labels = (
|
|
104
|
+
labels + "-" if labels is not None else [f"{i}-" for i in range(len(mappings))]
|
|
105
|
+
)
|
|
106
|
+
assert len(labels) == len(mappings)
|
|
107
|
+
|
|
108
|
+
if len(separate_by) == 0:
|
|
109
|
+
raise ValueError("Missing categories by which to breakdown energy")
|
|
110
|
+
|
|
111
|
+
idx = 0
|
|
112
|
+
for label, df in zip(labels, (m.data for m in mappings)):
|
|
113
|
+
colnames = [c for c in df.columns if col_keyword in c and "Total" not in c]
|
|
114
|
+
bar_components = list(
|
|
115
|
+
_get_bar_components(colnames, keyer, separate_by, stack_by)
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
for j, (_key, row) in enumerate(df.iterrows()):
|
|
119
|
+
ax = axes[idx]
|
|
120
|
+
idx += 1
|
|
121
|
+
|
|
122
|
+
ax.set_title(f"{label}mapping{j}")
|
|
123
|
+
|
|
124
|
+
bars = []
|
|
125
|
+
label2heights = {}
|
|
126
|
+
for name, constituents in bar_components:
|
|
127
|
+
bars.append(name)
|
|
128
|
+
for stack_name, subconstituents in constituents:
|
|
129
|
+
if not stack_name in label2heights:
|
|
130
|
+
label2heights[stack_name] = []
|
|
131
|
+
for label in label2heights:
|
|
132
|
+
label2heights[label] = [0] * len(bars)
|
|
133
|
+
|
|
134
|
+
for name, constituents in bar_components:
|
|
135
|
+
bar_i = bars.index(name)
|
|
136
|
+
for stack_name, subconstituents in constituents:
|
|
137
|
+
heights = label2heights[stack_name]
|
|
138
|
+
|
|
139
|
+
height = 0
|
|
140
|
+
for colname in subconstituents:
|
|
141
|
+
col = df[colname].iloc[0]
|
|
142
|
+
height += col
|
|
143
|
+
heights[bar_i] = height
|
|
144
|
+
assert len(heights) == len(bars)
|
|
145
|
+
|
|
146
|
+
for label, heights in label2heights.items():
|
|
147
|
+
ax.bar(bars, height=heights, label=label)
|
|
148
|
+
ax.set_xticklabels(bars, rotation=90)
|
|
149
|
+
|
|
150
|
+
for ax in axes:
|
|
151
|
+
ax.legend()
|
|
152
|
+
return fig, axes
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def plot_energy_comparison(mappings: Iterable[Mappings] | Mappings, labels=None):
|
|
156
|
+
"""
|
|
157
|
+
Plot energy comparison of multiple mappings.
|
|
158
|
+
|
|
159
|
+
Parameters
|
|
160
|
+
----------
|
|
161
|
+
mappings:
|
|
162
|
+
A mapping to plot or an iterable of mappings to plot.
|
|
163
|
+
labels:
|
|
164
|
+
Labels to use for each Mapping class in `mappings`.
|
|
165
|
+
"""
|
|
166
|
+
fig, ax = _plot_column_comparison(mappings, labels, "Total<SEP>energy")
|
|
167
|
+
ax.set_ylabel("Energy (pJ)")
|
|
168
|
+
return fig, ax
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def _plot_column_comparison(mappings, labels, colname):
|
|
172
|
+
fig, ax = plt.subplots()
|
|
173
|
+
|
|
174
|
+
mappings = [mappings] if isinstance(mappings, Mappings) else list(mappings)
|
|
175
|
+
labels = labels + "-" if labels is not None else [""] * len(mappings)
|
|
176
|
+
assert len(labels) == len(mappings)
|
|
177
|
+
|
|
178
|
+
for label, df in zip(labels, (m.data for m in mappings)):
|
|
179
|
+
bars = [f"{label}mapping{i}" for i in range(len(df))]
|
|
180
|
+
heights = df[colname]
|
|
181
|
+
ax.bar(bars, heights)
|
|
182
|
+
|
|
183
|
+
return fig, ax
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def _get_bar_components(colnames, keyer, separate_by, stack_by=None):
|
|
187
|
+
if not stack_by:
|
|
188
|
+
stack_by = []
|
|
189
|
+
|
|
190
|
+
split_colnames = []
|
|
191
|
+
for c in colnames:
|
|
192
|
+
key = keyer(c)
|
|
193
|
+
if not isinstance(key, VerboseActionKey):
|
|
194
|
+
continue
|
|
195
|
+
split_colnames.append([key.einsum, key.level, key.tensor, key.action, c])
|
|
196
|
+
transposed_colnames = zip(*split_colnames)
|
|
197
|
+
df = pd.DataFrame(
|
|
198
|
+
{
|
|
199
|
+
k: v
|
|
200
|
+
for k, v in zip(
|
|
201
|
+
["einsum", "component", "tensor", "action", "colname"],
|
|
202
|
+
transposed_colnames,
|
|
203
|
+
)
|
|
204
|
+
}
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
result = []
|
|
208
|
+
for group, subdf in df.groupby(by=separate_by):
|
|
209
|
+
group = ", ".join(group)
|
|
210
|
+
if not stack_by:
|
|
211
|
+
result.append((group, [(None, subdf["colname"])]))
|
|
212
|
+
else:
|
|
213
|
+
finer_separation = []
|
|
214
|
+
for subgroup, stack_df in subdf.groupby(by=stack_by):
|
|
215
|
+
stack_df = stack_df.sort_values(by="colname")
|
|
216
|
+
subgroup = ", ".join(subgroup)
|
|
217
|
+
finer_separation.append((subgroup, stack_df["colname"]))
|
|
218
|
+
result.append((group, finer_separation))
|
|
219
|
+
return result
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
from collections.abc import Iterable
|
|
2
|
+
|
|
3
|
+
import matplotlib.axes as axes
|
|
4
|
+
import matplotlib.pyplot as plt
|
|
5
|
+
|
|
6
|
+
from accelforge.frontend.spec import Spec
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def plot_area(
|
|
10
|
+
specs: Iterable[Spec], labels: Iterable[str] = None, ax: axes.Axes = None
|
|
11
|
+
):
|
|
12
|
+
"""
|
|
13
|
+
Plot area of one or more specs.
|
|
14
|
+
|
|
15
|
+
Parameters
|
|
16
|
+
----------
|
|
17
|
+
specs:
|
|
18
|
+
An iterable of specifications.
|
|
19
|
+
labels:
|
|
20
|
+
An iterable of the same length as `specs` to use as labels in the plot.
|
|
21
|
+
ax:
|
|
22
|
+
An matplotlib Axes to use. A new one is created by default.
|
|
23
|
+
"""
|
|
24
|
+
if ax is None:
|
|
25
|
+
fig, ax = plt.subplots()
|
|
26
|
+
ax.set_ylabel("Area (m^2)")
|
|
27
|
+
|
|
28
|
+
if labels is None:
|
|
29
|
+
labels = [f"spec-{i}" for i in range(len(specs))]
|
|
30
|
+
assert len(labels) == len(specs)
|
|
31
|
+
|
|
32
|
+
component2color = {}
|
|
33
|
+
for i, (label, spec) in enumerate(zip(labels, specs)):
|
|
34
|
+
heights = []
|
|
35
|
+
colors = []
|
|
36
|
+
names = []
|
|
37
|
+
height = 0
|
|
38
|
+
for component, area in spec.arch.per_component_total_area.items():
|
|
39
|
+
height += area
|
|
40
|
+
if component not in component2color:
|
|
41
|
+
color = plt.cm.tab10(len(component2color))
|
|
42
|
+
component2color[component] = color
|
|
43
|
+
else:
|
|
44
|
+
color = component2color[component]
|
|
45
|
+
heights.append(height)
|
|
46
|
+
colors.append(color)
|
|
47
|
+
names.append(component)
|
|
48
|
+
|
|
49
|
+
heights = reversed(heights)
|
|
50
|
+
colors = reversed(colors)
|
|
51
|
+
names = reversed(names)
|
|
52
|
+
for height, color, name in zip(heights, colors, names):
|
|
53
|
+
ax.bar(i, height=height, label=name, color=color)
|
|
54
|
+
|
|
55
|
+
ax.set_xticks(range(len(specs)), labels)
|
|
56
|
+
|
|
57
|
+
ax.legend()
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
@dataclass(frozen=True)
|
|
6
|
+
class ActionKey:
|
|
7
|
+
level: str
|
|
8
|
+
action: str
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass(frozen=True)
|
|
12
|
+
class VerboseActionKey(ActionKey):
|
|
13
|
+
tensor: str | None
|
|
14
|
+
einsum: str
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class ActionCount:
|
|
19
|
+
total: Any
|
|
20
|
+
max_per_unit: Any
|
|
21
|
+
|
|
22
|
+
@staticmethod
|
|
23
|
+
def default():
|
|
24
|
+
return ActionCount(0, 0)
|