tritonparse 0.3.2.dev20251210071601__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tritonparse might be problematic. Click here for more details.
- tritonparse/__init__.py +0 -0
- tritonparse/__main__.py +7 -0
- tritonparse/cli.py +110 -0
- tritonparse/common.py +409 -0
- tritonparse/context_manager.py +64 -0
- tritonparse/event_diff.py +122 -0
- tritonparse/extract_source_mappings.py +49 -0
- tritonparse/info/__init__.py +30 -0
- tritonparse/info/cli.py +121 -0
- tritonparse/info/kernel_query.py +209 -0
- tritonparse/info/parse_helper.py +70 -0
- tritonparse/ir_analysis.py +427 -0
- tritonparse/ir_parser.py +365 -0
- tritonparse/mapper.py +102 -0
- tritonparse/reproducer/__init__.py +0 -0
- tritonparse/reproducer/ast_analyzer.py +636 -0
- tritonparse/reproducer/cli.py +72 -0
- tritonparse/reproducer/consolidated_result.py +52 -0
- tritonparse/reproducer/function_extractor.py +228 -0
- tritonparse/reproducer/import_info.py +25 -0
- tritonparse/reproducer/import_parser.py +178 -0
- tritonparse/reproducer/import_resolver.py +151 -0
- tritonparse/reproducer/ingestion/ndjson.py +237 -0
- tritonparse/reproducer/multi_file_analyzer.py +824 -0
- tritonparse/reproducer/orchestrator.py +110 -0
- tritonparse/reproducer/placeholder_replacer.py +335 -0
- tritonparse/reproducer/templates/__init__.py +0 -0
- tritonparse/reproducer/templates/example.py +38 -0
- tritonparse/reproducer/templates/loader.py +59 -0
- tritonparse/reproducer/templates/tritonbench.py +106 -0
- tritonparse/reproducer/templates/utils.py +48 -0
- tritonparse/reproducer/tests/__init__.py +0 -0
- tritonparse/reproducer/tests/artifacts/__init__.py +5 -0
- tritonparse/reproducer/tests/artifacts/triton_fused_kernel.py +65 -0
- tritonparse/reproducer/tests/artifacts/triton_preprocess.py +16 -0
- tritonparse/reproducer/tests/artifacts/triton_utils.py +14 -0
- tritonparse/reproducer/tests/test_import_parser.py +164 -0
- tritonparse/reproducer/tests/test_import_resolver.py +88 -0
- tritonparse/reproducer/tests/test_multi_file_analyzer.py +118 -0
- tritonparse/reproducer/types.py +20 -0
- tritonparse/reproducer/utils.py +580 -0
- tritonparse/shared_vars.py +12 -0
- tritonparse/source_type.py +56 -0
- tritonparse/sourcemap_utils.py +96 -0
- tritonparse/structured_logging.py +1634 -0
- tritonparse/tools/__init__.py +0 -0
- tritonparse/tools/decompress_bin_ndjson.py +120 -0
- tritonparse/tools/disasm.py +81 -0
- tritonparse/tools/extract_irs.py +244 -0
- tritonparse/tools/format_fix.py +151 -0
- tritonparse/tools/load_tensor.py +76 -0
- tritonparse/tools/prettify_ndjson.py +334 -0
- tritonparse/tools/readme.md +37 -0
- tritonparse/tp_logger.py +9 -0
- tritonparse/trace_processor.py +367 -0
- tritonparse/utils.py +155 -0
- tritonparse-0.3.2.dev20251210071601.dist-info/METADATA +195 -0
- tritonparse-0.3.2.dev20251210071601.dist-info/RECORD +62 -0
- tritonparse-0.3.2.dev20251210071601.dist-info/WHEEL +5 -0
- tritonparse-0.3.2.dev20251210071601.dist-info/entry_points.txt +2 -0
- tritonparse-0.3.2.dev20251210071601.dist-info/licenses/LICENSE +29 -0
- tritonparse-0.3.2.dev20251210071601.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,427 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
|
|
5
|
+
from .sourcemap_utils import load_ir_contents
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger("IRAnalysis")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def process_amd_bufferop(ir_content: str, io_keys: list[str]) -> dict[str, int]:
|
|
12
|
+
def make_key(prefix: str) -> str:
|
|
13
|
+
return f"{prefix}_count"
|
|
14
|
+
|
|
15
|
+
io_keys = [(make_key(prefix), prefix) for prefix in io_keys]
|
|
16
|
+
output: dict[str, int] = {}
|
|
17
|
+
for dict_key, _ in io_keys:
|
|
18
|
+
output[dict_key] = 0
|
|
19
|
+
if ir_content:
|
|
20
|
+
for line in ir_content.split("\n"):
|
|
21
|
+
for dict_key, code_key in io_keys:
|
|
22
|
+
if code_key in line:
|
|
23
|
+
output[dict_key] += 1
|
|
24
|
+
return output
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def process_amd_ttgir_bufferops(
|
|
28
|
+
key: str,
|
|
29
|
+
file_content: dict[str, str],
|
|
30
|
+
file_path: dict[str, str],
|
|
31
|
+
) -> dict[str, int]:
|
|
32
|
+
ir_content = load_ir_contents(key, file_content, file_path)
|
|
33
|
+
# TODO: Add atomics
|
|
34
|
+
io_keys = ["tt.load", "tt.store", "amdgpu.buffer_load", "amdgpu.buffer_store"]
|
|
35
|
+
return process_amd_bufferop(ir_content, io_keys)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def process_amd_gcn_bufferops(
|
|
39
|
+
key: str,
|
|
40
|
+
file_content: dict[str, str],
|
|
41
|
+
file_path: dict[str, str],
|
|
42
|
+
) -> dict[str, int]:
|
|
43
|
+
ir_content = load_ir_contents(key, file_content, file_path)
|
|
44
|
+
# TODO: Add atomics
|
|
45
|
+
io_keys = ["global_load", "global_store", "buffer_load", "buffer_store"]
|
|
46
|
+
return process_amd_bufferop(ir_content, io_keys)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def find_loop_bounds(ir_content: str) -> list[tuple[int, int]]:
|
|
50
|
+
"""
|
|
51
|
+
Find the bounds of all scf.for loops in the IR content.
|
|
52
|
+
These are the only candidates for Software Pipelining (SWP).
|
|
53
|
+
|
|
54
|
+
A loop starts with 'scf.for' and ends when its closing brace '}' is found.
|
|
55
|
+
Brace counts are tracked to determine when each loop closes.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
ir_content: The IR content as a string.
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
A list of tuples (start_line, end_line) for each scf.for loop found.
|
|
62
|
+
Line numbers are 0-indexed.
|
|
63
|
+
"""
|
|
64
|
+
if not ir_content:
|
|
65
|
+
return []
|
|
66
|
+
|
|
67
|
+
loop_bounds: list[tuple[int, int]] = []
|
|
68
|
+
lines = ir_content.split("\n")
|
|
69
|
+
|
|
70
|
+
# Stack to track loop starts and their brace counts
|
|
71
|
+
# Each entry is (start_line, brace_count_at_start)
|
|
72
|
+
loop_stack: list[tuple[int, int]] = []
|
|
73
|
+
current_brace_count = 0
|
|
74
|
+
|
|
75
|
+
for line_idx, line in enumerate(lines):
|
|
76
|
+
# Check if this line starts a new scf.for loop
|
|
77
|
+
if "scf.for" in line:
|
|
78
|
+
loop_stack.append((line_idx, current_brace_count))
|
|
79
|
+
|
|
80
|
+
# Count braces on this line
|
|
81
|
+
for char in line:
|
|
82
|
+
if char == "{":
|
|
83
|
+
current_brace_count += 1
|
|
84
|
+
elif char == "}":
|
|
85
|
+
current_brace_count -= 1
|
|
86
|
+
|
|
87
|
+
# Check if we've closed any loops
|
|
88
|
+
while loop_stack and current_brace_count <= loop_stack[-1][1]:
|
|
89
|
+
start_line, _start_brace_count = loop_stack.pop()
|
|
90
|
+
# The loop ends at this line
|
|
91
|
+
loop_bounds.append((start_line, line_idx))
|
|
92
|
+
|
|
93
|
+
return loop_bounds
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def find_inner_loop_bounds(ir_content: str) -> list[tuple[int, int]]:
|
|
97
|
+
"""
|
|
98
|
+
Find the bounds of inner scf.for loops (loops without nested loops inside).
|
|
99
|
+
|
|
100
|
+
Inner loops are the primary candidates for Software Pipelining (SWP) as they
|
|
101
|
+
represent the innermost computation that can be optimized.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
ir_content: The IR content as a string.
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
A list of tuples (start_line, end_line) for each inner scf.for loop found.
|
|
108
|
+
Line numbers are 0-indexed.
|
|
109
|
+
"""
|
|
110
|
+
all_loops = find_loop_bounds(ir_content)
|
|
111
|
+
|
|
112
|
+
if not all_loops:
|
|
113
|
+
return []
|
|
114
|
+
|
|
115
|
+
# Filter to keep only inner loops (loops that don't contain other loops)
|
|
116
|
+
inner_loops: list[tuple[int, int]] = []
|
|
117
|
+
|
|
118
|
+
for i, (start_i, end_i) in enumerate(all_loops):
|
|
119
|
+
# Check if any other loop is nested inside this loop
|
|
120
|
+
has_nested_loop = False
|
|
121
|
+
for j, (start_j, end_j) in enumerate(all_loops):
|
|
122
|
+
if i != j:
|
|
123
|
+
# Check if loop j is nested inside loop i
|
|
124
|
+
if start_i < start_j and end_j < end_i:
|
|
125
|
+
has_nested_loop = True
|
|
126
|
+
break
|
|
127
|
+
|
|
128
|
+
# If no nested loops found, this is an inner loop
|
|
129
|
+
if not has_nested_loop:
|
|
130
|
+
inner_loops.append((start_i, end_i))
|
|
131
|
+
|
|
132
|
+
return inner_loops
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def find_loop_pipelining(
|
|
136
|
+
ttir_content: str,
|
|
137
|
+
ttgir_content: str,
|
|
138
|
+
ttir_loop_start: int,
|
|
139
|
+
ttir_loop_end: int,
|
|
140
|
+
loop_index: int,
|
|
141
|
+
ttir_to_ttgir_mapping: dict[str, dict],
|
|
142
|
+
ttgir_to_source_mapping: dict[str, dict],
|
|
143
|
+
python_source_content: str | None,
|
|
144
|
+
python_source_start_line: int,
|
|
145
|
+
) -> dict[str, list[str]]:
|
|
146
|
+
"""
|
|
147
|
+
Find pipelining information for a specific loop by identifying tt.load and tt.dot operations
|
|
148
|
+
in TTIR and mapping them to their corresponding operations in the original Python source code.
|
|
149
|
+
|
|
150
|
+
For each tt.load or tt.dot operation found in the TTIR loop, this function uses source
|
|
151
|
+
mappings to find the corresponding operations in TTGIR, then maps them back to the original
|
|
152
|
+
Python source code. Operations are categorized into three sections:
|
|
153
|
+
- prologue: Operations that appear before the loop body
|
|
154
|
+
- loop_body: Operations that appear within the loop body
|
|
155
|
+
- epilogue: Operations that appear after the loop body
|
|
156
|
+
|
|
157
|
+
Operations are merged together (both loads and dots) and sorted in program order
|
|
158
|
+
within each section.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
ttir_content: The TTIR content as a string.
|
|
162
|
+
ttgir_content: The TTGIR content as a string.
|
|
163
|
+
ttir_loop_start: The starting line number of the loop in TTIR (0-indexed).
|
|
164
|
+
ttir_loop_end: The ending line number of the loop in TTIR (0-indexed).
|
|
165
|
+
ttir_to_ttgir_mapping: Source mapping from TTIR lines to TTGIR lines.
|
|
166
|
+
ttgir_to_source_mapping: Source mapping from TTGIR lines to original Python source.
|
|
167
|
+
python_source_content: The original Python source code content.
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
A dictionary containing:
|
|
171
|
+
- "prologue": List of Python source line strings in program order
|
|
172
|
+
- "loop_body": List of Python source line strings in program order
|
|
173
|
+
- "epilogue": List of Python source line strings in program order
|
|
174
|
+
"""
|
|
175
|
+
if not ttir_content or not ttgir_content:
|
|
176
|
+
return {
|
|
177
|
+
"prologue": [],
|
|
178
|
+
"loop_body": [],
|
|
179
|
+
"epilogue": [],
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
ttir_lines = ttir_content.split("\n")
|
|
183
|
+
ttgir_lines = ttgir_content.split("\n")
|
|
184
|
+
python_lines = python_source_content.split("\n") if python_source_content else []
|
|
185
|
+
|
|
186
|
+
def apply_trailing_space(op: str) -> str:
|
|
187
|
+
"""
|
|
188
|
+
Add a trailing space to all ops to avoid false positives like
|
|
189
|
+
warp_group_dot and warp_group_dot_wait.
|
|
190
|
+
"""
|
|
191
|
+
return op + " "
|
|
192
|
+
|
|
193
|
+
# Step 1: Find tt.load and tt.dot operations in TTIR loop
|
|
194
|
+
ttir_pipeline_lines: list[int] = []
|
|
195
|
+
pipeline_tt_ops = ["tt.load", "tt.dot"]
|
|
196
|
+
pipeline_tt_ops = [apply_trailing_space(op) for op in pipeline_tt_ops]
|
|
197
|
+
pipeline_ttgir_ops = [
|
|
198
|
+
"tt.load",
|
|
199
|
+
"tt.dot",
|
|
200
|
+
"async_copy_global_to_local",
|
|
201
|
+
"warp_group_dot",
|
|
202
|
+
]
|
|
203
|
+
pipeline_ttgir_ops = [apply_trailing_space(op) for op in pipeline_ttgir_ops]
|
|
204
|
+
for line_idx in range(ttir_loop_start, min(ttir_loop_end + 1, len(ttir_lines))):
|
|
205
|
+
line = ttir_lines[line_idx]
|
|
206
|
+
for op in pipeline_tt_ops:
|
|
207
|
+
if op in line:
|
|
208
|
+
ttir_pipeline_lines.append(line_idx)
|
|
209
|
+
break
|
|
210
|
+
|
|
211
|
+
# Step 2: Find the corresponding loop in TTGIR using source mappings
|
|
212
|
+
# Map the TTIR loop bounds to TTGIR using source mappings
|
|
213
|
+
ttgir_inner_loops = find_inner_loop_bounds(ttgir_content)
|
|
214
|
+
|
|
215
|
+
if not ttgir_inner_loops:
|
|
216
|
+
# No loop found in TTGIR, return empty results
|
|
217
|
+
return {
|
|
218
|
+
"prologue": [],
|
|
219
|
+
"loop_body": [],
|
|
220
|
+
"epilogue": [],
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
# Use the first inner loop as the reference
|
|
224
|
+
# TODO: Implement more sophisticated mapping logic to match TTIR loops to TTGIR loops
|
|
225
|
+
ttgir_loop_start, ttgir_loop_end = ttgir_inner_loops[loop_index]
|
|
226
|
+
|
|
227
|
+
# Step 3: Map TTIR operations to TTGIR operations using source mappings
|
|
228
|
+
# and categorize them by their position relative to the TTGIR loop
|
|
229
|
+
# Store as (line_number, source_line) to maintain order before extracting just the source
|
|
230
|
+
prologue_ops: list[tuple[int, str]] = []
|
|
231
|
+
loop_body_ops: list[tuple[int, str]] = []
|
|
232
|
+
epilogue_ops: list[tuple[int, str]] = []
|
|
233
|
+
|
|
234
|
+
for ttir_line in ttir_pipeline_lines:
|
|
235
|
+
# Convert 0-indexed line to 1-indexed string key for mapping lookup
|
|
236
|
+
ttir_line_key = str(ttir_line + 1)
|
|
237
|
+
|
|
238
|
+
# Get the corresponding TTGIR lines from the source mapping
|
|
239
|
+
if ttir_line_key in ttir_to_ttgir_mapping:
|
|
240
|
+
ttgir_lines_list = ttir_to_ttgir_mapping[ttir_line_key].get(
|
|
241
|
+
"ttgir_lines", []
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
# For each mapped TTGIR line, categorize it
|
|
245
|
+
for ttgir_line in ttgir_lines_list:
|
|
246
|
+
# Convert back to 0-indexed
|
|
247
|
+
ttgir_line_idx = ttgir_line - 1
|
|
248
|
+
|
|
249
|
+
# Get the actual TTGIR line content to check if it's relevant
|
|
250
|
+
if ttgir_line_idx < len(ttgir_lines):
|
|
251
|
+
ttgir_source_line = ttgir_lines[ttgir_line_idx].strip()
|
|
252
|
+
|
|
253
|
+
# Only keep mappings to the "compute" op.
|
|
254
|
+
if any(op in ttgir_source_line for op in pipeline_ttgir_ops):
|
|
255
|
+
# Map TTGIR line back to Python source
|
|
256
|
+
ttgir_line_key = str(ttgir_line)
|
|
257
|
+
python_source_line = ttgir_source_line # Default to TTGIR line
|
|
258
|
+
|
|
259
|
+
if ttgir_line_key in ttgir_to_source_mapping:
|
|
260
|
+
source_info = ttgir_to_source_mapping[ttgir_line_key]
|
|
261
|
+
python_line_num = source_info.get("line")
|
|
262
|
+
|
|
263
|
+
if python_line_num and python_lines:
|
|
264
|
+
# Account for the offset: the Python source may not start at line 1
|
|
265
|
+
# python_line_num is the absolute line number in the original file
|
|
266
|
+
# python_source_start_line is where the extracted code starts
|
|
267
|
+
# So we need to subtract the offset to get the index in our python_lines array
|
|
268
|
+
python_line_idx = (
|
|
269
|
+
python_line_num - python_source_start_line
|
|
270
|
+
)
|
|
271
|
+
if 0 <= python_line_idx < len(python_lines):
|
|
272
|
+
python_source_line = python_lines[
|
|
273
|
+
python_line_idx
|
|
274
|
+
].strip()
|
|
275
|
+
|
|
276
|
+
if ttgir_line_idx < ttgir_loop_start:
|
|
277
|
+
prologue_ops.append((ttgir_line_idx, python_source_line))
|
|
278
|
+
elif ttgir_loop_start <= ttgir_line_idx <= ttgir_loop_end:
|
|
279
|
+
loop_body_ops.append((ttgir_line_idx, python_source_line))
|
|
280
|
+
else:
|
|
281
|
+
epilogue_ops.append((ttgir_line_idx, python_source_line))
|
|
282
|
+
|
|
283
|
+
# Step 4: Sort each section by line number to maintain program order
|
|
284
|
+
prologue_ops.sort(key=lambda x: x[0])
|
|
285
|
+
loop_body_ops.sort(key=lambda x: x[0])
|
|
286
|
+
epilogue_ops.sort(key=lambda x: x[0])
|
|
287
|
+
|
|
288
|
+
# Extract just the source lines (without line numbers)
|
|
289
|
+
prologue_lines = [line for _, line in prologue_ops]
|
|
290
|
+
loop_body_lines = [line for _, line in loop_body_ops]
|
|
291
|
+
epilogue_lines = [line for _, line in epilogue_ops]
|
|
292
|
+
|
|
293
|
+
# Log the pipelining results
|
|
294
|
+
logger.debug(
|
|
295
|
+
f"Loop pipelining results (TTIR lines {ttir_loop_start}-{ttir_loop_end}):"
|
|
296
|
+
)
|
|
297
|
+
logger.debug(f" Prologue ({len(prologue_lines)} ops):")
|
|
298
|
+
for line in prologue_lines:
|
|
299
|
+
logger.debug(f" {line}")
|
|
300
|
+
logger.debug(f" Loop Body ({len(loop_body_lines)} ops):")
|
|
301
|
+
for line in loop_body_lines:
|
|
302
|
+
logger.debug(f" {line}")
|
|
303
|
+
logger.debug(f" Epilogue ({len(epilogue_lines)} ops):")
|
|
304
|
+
for line in epilogue_lines:
|
|
305
|
+
logger.debug(f" {line}")
|
|
306
|
+
|
|
307
|
+
return {
|
|
308
|
+
"prologue": prologue_lines,
|
|
309
|
+
"loop_body": loop_body_lines,
|
|
310
|
+
"epilogue": epilogue_lines,
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
def generate_loop_schedule(
|
|
315
|
+
ttir_key: str,
|
|
316
|
+
ttgir_key: str,
|
|
317
|
+
file_content: dict[str, str],
|
|
318
|
+
file_path: dict[str, str],
|
|
319
|
+
source_mappings: dict[str, dict],
|
|
320
|
+
python_source_content: str | None,
|
|
321
|
+
python_source_start_line: int,
|
|
322
|
+
) -> list[dict]:
|
|
323
|
+
"""
|
|
324
|
+
Generate loop schedule information by finding inner scf.for loops in TTIR
|
|
325
|
+
and analyzing their pipelining potential using source mappings.
|
|
326
|
+
|
|
327
|
+
Only inner loops (loops without nested loops) are considered as they are
|
|
328
|
+
the primary candidates for Software Pipelining (SWP).
|
|
329
|
+
|
|
330
|
+
Args:
|
|
331
|
+
ttir_key: Key for the TTIR file.
|
|
332
|
+
ttgir_key: Key for the TTGIR file.
|
|
333
|
+
file_content: Dictionary mapping file keys to content.
|
|
334
|
+
file_path: Dictionary mapping file keys to file paths.
|
|
335
|
+
source_mappings: Dictionary containing source mappings between IR stages.
|
|
336
|
+
python_source_content: The original Python source code content.
|
|
337
|
+
python_source_start_line: The starting line number of the Python source in the original file.
|
|
338
|
+
|
|
339
|
+
Returns:
|
|
340
|
+
A list of dictionaries, each containing:
|
|
341
|
+
- "loop_bounds": Tuple of (start_line, end_line) for the loop in TTIR
|
|
342
|
+
- "pipelining": Dictionary with Python source lines for operations
|
|
343
|
+
"""
|
|
344
|
+
ttir_content = load_ir_contents(ttir_key, file_content, file_path)
|
|
345
|
+
ttgir_content = load_ir_contents(ttgir_key, file_content, file_path)
|
|
346
|
+
|
|
347
|
+
# Get the TTIR to TTGIR mapping and TTGIR to source mapping
|
|
348
|
+
ttir_to_ttgir_mapping = source_mappings.get("ttir", {})
|
|
349
|
+
ttgir_to_source_mapping = source_mappings.get("ttgir", {})
|
|
350
|
+
|
|
351
|
+
# Find only inner loops (loops without nested loops inside)
|
|
352
|
+
inner_loop_bounds = find_inner_loop_bounds(ttir_content)
|
|
353
|
+
# TODO: Fix loop mapping with multiple loops.
|
|
354
|
+
inner_loop_bounds = inner_loop_bounds[:1]
|
|
355
|
+
|
|
356
|
+
# For each inner loop, find pipelining information
|
|
357
|
+
loop_schedules = []
|
|
358
|
+
for i, (loop_start, loop_end) in enumerate(inner_loop_bounds):
|
|
359
|
+
pipelining_info = find_loop_pipelining(
|
|
360
|
+
ttir_content,
|
|
361
|
+
ttgir_content,
|
|
362
|
+
loop_start,
|
|
363
|
+
loop_end,
|
|
364
|
+
i,
|
|
365
|
+
ttir_to_ttgir_mapping,
|
|
366
|
+
ttgir_to_source_mapping,
|
|
367
|
+
python_source_content,
|
|
368
|
+
python_source_start_line,
|
|
369
|
+
)
|
|
370
|
+
loop_schedules.append(pipelining_info)
|
|
371
|
+
|
|
372
|
+
return loop_schedules
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
def _generate_ir_analysis(entry: str):
|
|
376
|
+
payload = entry.setdefault("payload", {})
|
|
377
|
+
file_content = payload.get("file_content", {})
|
|
378
|
+
file_path = payload.get("file_path", {})
|
|
379
|
+
source_mappings = payload.get("source_mappings", {})
|
|
380
|
+
|
|
381
|
+
# Find the IR file keys
|
|
382
|
+
ttir_key = next((k for k in file_content if k.endswith(".ttir")), None)
|
|
383
|
+
ttgir_key = next((k for k in file_content if k.endswith(".ttgir")), None)
|
|
384
|
+
amdgcn_key = next((k for k in file_content if k.endswith(".amdgcn")), None)
|
|
385
|
+
# Skip if no IR files found
|
|
386
|
+
if not (ttir_key or ttgir_key or amdgcn_key):
|
|
387
|
+
logger.debug("No IR found")
|
|
388
|
+
return {}
|
|
389
|
+
ir_analysis = {}
|
|
390
|
+
if amdgcn_key and ttgir_key:
|
|
391
|
+
# Add BufferOps information
|
|
392
|
+
ttgir_bufferops_info = process_amd_ttgir_bufferops(
|
|
393
|
+
ttgir_key, file_content, file_path
|
|
394
|
+
)
|
|
395
|
+
gcn_bufferops_info = process_amd_gcn_bufferops(
|
|
396
|
+
amdgcn_key, file_content, file_path
|
|
397
|
+
)
|
|
398
|
+
io_counts = {}
|
|
399
|
+
# NDJSON format requires a newline at the end of each line
|
|
400
|
+
if ttgir_bufferops_info:
|
|
401
|
+
io_counts["amd_ttgir_bufferops_count"] = ttgir_bufferops_info
|
|
402
|
+
if gcn_bufferops_info:
|
|
403
|
+
io_counts["amd_gcn_bufferops_count"] = gcn_bufferops_info
|
|
404
|
+
if io_counts:
|
|
405
|
+
ir_analysis["io_counts"] = io_counts
|
|
406
|
+
if ttir_key and ttgir_key:
|
|
407
|
+
# Get Python source content and start line if available
|
|
408
|
+
python_source_content = None
|
|
409
|
+
python_source_start_line = 1 # Default to 1 if not available
|
|
410
|
+
python_source_info = payload.get("python_source")
|
|
411
|
+
if python_source_info:
|
|
412
|
+
python_source_content = python_source_info.get("code")
|
|
413
|
+
python_source_start_line = python_source_info.get("start_line", 1)
|
|
414
|
+
|
|
415
|
+
# Add loop schedule information
|
|
416
|
+
loop_schedule = generate_loop_schedule(
|
|
417
|
+
ttir_key,
|
|
418
|
+
ttgir_key,
|
|
419
|
+
file_content,
|
|
420
|
+
file_path,
|
|
421
|
+
source_mappings,
|
|
422
|
+
python_source_content,
|
|
423
|
+
python_source_start_line,
|
|
424
|
+
)
|
|
425
|
+
if loop_schedule:
|
|
426
|
+
ir_analysis["loop_schedules"] = loop_schedule
|
|
427
|
+
return ir_analysis
|