tritonparse 0.2.4.dev20251001071529__tar.gz → 0.2.4.dev20251003071457__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tritonparse might be problematic. Click here for more details.
- {tritonparse-0.2.4.dev20251001071529/tritonparse.egg-info → tritonparse-0.2.4.dev20251003071457}/PKG-INFO +1 -1
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/pyproject.toml +3 -0
- tritonparse-0.2.4.dev20251003071457/run.py +80 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tests/test_tritonparse.py +114 -15
- tritonparse-0.2.4.dev20251003071457/tritonparse/__main__.py +5 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/reproducer/cli.py +6 -2
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/reproducer/templates/example.py +84 -10
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/reproducer/utils.py +74 -11
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/structured_logging.py +69 -4
- tritonparse-0.2.4.dev20251003071457/tritonparse/tools/disasm.py +81 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457/tritonparse.egg-info}/PKG-INFO +1 -1
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse.egg-info/SOURCES.txt +3 -0
- tritonparse-0.2.4.dev20251003071457/tritonparse.egg-info/entry_points.txt +2 -0
- tritonparse-0.2.4.dev20251001071529/run.py +0 -48
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/.ci/README.md +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/.ci/install-project.sh +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/.ci/install-triton-kernels.sh +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/.ci/install-triton.sh +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/.ci/run-tests.sh +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/.ci/setup.sh +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/.github/PAGES_SETUP.md +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/.github/copilot-instructions.md +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/.github/workflows/deploy-pages-standalone.yml +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/.github/workflows/deploy-pages.yml +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/.github/workflows/nightly-pypi.yml +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/.github/workflows/test.yml +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/.gitignore +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/CHANGELOG.md +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/CODE_OF_CONDUCT.md +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/CONTRIBUTING.md +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/LICENSE +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/Makefile +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/README.md +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/__init__.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/docs/README.md +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/docs/screenshots/code-comparison.png +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/docs/screenshots/kernel-overview.png +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/setup.cfg +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tests/README.md +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tests/__init__.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tests/example_output/logs/dedicated_log_triton_trace_findhao_.ndjson +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tests/example_output/parsed_output/dedicated_log_triton_trace_findhao__mapped.ndjson.gz +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tests/example_output/parsed_output/f0_fc0_a0_cai-.ndjson.gz +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tests/example_output/parsed_output/log_file_list.json +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tests/example_output/parsed_output_complex/dedicated_log_triton_trace_findhao__mapped.ndjson.gz +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tests/example_output/parsed_output_complex/log_file_list.json +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tests/example_output/repro/repro_context_20250816192455.json +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tests/test_add.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/__init__.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/common.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/context_manager.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/event_diff.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/extract_source_mappings.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/ir_parser.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/mapper.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/reproducer/__init__.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/reproducer/ingestion/ndjson.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/reproducer/orchestrator.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/reproducer/templates/loader.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/shared_vars.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/source_type.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/sourcemap_utils.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/tools/__init__.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/tools/decompress_bin_ndjson.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/tools/format_fix.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/tools/load_tensor.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/tools/prettify_ndjson.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/tools/readme.md +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/tp_logger.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/trace_processor.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse/utils.py +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse.egg-info/dependency_links.txt +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse.egg-info/requires.txt +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/tritonparse.egg-info/top_level.txt +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/eslint.config.js +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/index.html +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/package-lock.json +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/package.json +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/postcss.config.js +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/public/dedicated_log_triton_trace_findhao__mapped.ndjson.gz +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/public/f0_fc0_a0_cai-.ndjson +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/public/favicon.ico +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/public/logo.svg +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/scripts/inline-html.js +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/App.css +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/App.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/assets/react.svg +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/ArgumentViewer.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/Callstack.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/CodeComparisonView.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/CodeViewer.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/CompilationInfo.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/CopyCodeButton.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/DataSourceSelector.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/DiffComparisonView.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/DiffViewer.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/ExternalLink.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/SingleCodeViewer.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/StackDiffViewer.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/ToggleSwitch.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/TritonIRs.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/components/WelcomeScreen.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/context/FileDiffSession.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/index.css +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/main.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/pages/CodeView.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/pages/FileDiffView.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/pages/KernelOverview.tsx +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/utils/dataLoader.ts +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/utils/fbDetection.ts +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/utils/safeImport.ts +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/utils/tensor.ts +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/src/vite-env.d.ts +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/tailwind.config.js +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/tsconfig.app.json +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/tsconfig.json +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/tsconfig.node.json +0 -0
- {tritonparse-0.2.4.dev20251001071529 → tritonparse-0.2.4.dev20251003071457}/website/vite.config.ts +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tritonparse
|
|
3
|
-
Version: 0.2.4.
|
|
3
|
+
Version: 0.2.4.dev20251003071457
|
|
4
4
|
Summary: TritonParse: A Compiler Tracer, Visualizer, and mini-Reproducer Generator for Triton Kernels
|
|
5
5
|
Author-email: Yueming Hao <yhao@meta.com>
|
|
6
6
|
License-Expression: BSD-3-Clause
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
3
|
+
|
|
4
|
+
import argparse
|
|
5
|
+
from importlib.metadata import PackageNotFoundError, version
|
|
6
|
+
|
|
7
|
+
from .reproducer.cli import _add_reproducer_args
|
|
8
|
+
from .reproducer.orchestrator import reproduce
|
|
9
|
+
from .utils import _add_parse_args, unified_parse
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _get_package_version() -> str:
|
|
13
|
+
try:
|
|
14
|
+
return version("tritonparse")
|
|
15
|
+
except PackageNotFoundError:
|
|
16
|
+
return "0+unknown"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def main():
|
|
20
|
+
pkg_version = _get_package_version()
|
|
21
|
+
|
|
22
|
+
parser = argparse.ArgumentParser(
|
|
23
|
+
prog="tritonparse",
|
|
24
|
+
description=(
|
|
25
|
+
"TritonParse: parse structured logs and generate minimal reproducers"
|
|
26
|
+
),
|
|
27
|
+
epilog=(
|
|
28
|
+
"Examples:\n"
|
|
29
|
+
" tritonparse parse /path/to/logs --out parsed_output\n"
|
|
30
|
+
" tritonparse reproduce /path/to/trace.ndjson --line 1 --out-dir repro_output\n"
|
|
31
|
+
),
|
|
32
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
33
|
+
)
|
|
34
|
+
parser.add_argument(
|
|
35
|
+
"--version",
|
|
36
|
+
action="version",
|
|
37
|
+
version=f"%(prog)s {pkg_version}",
|
|
38
|
+
help="Show program's version number and exit",
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
subparsers = parser.add_subparsers(dest="command", required=True)
|
|
42
|
+
|
|
43
|
+
# parse subcommand
|
|
44
|
+
parse_parser = subparsers.add_parser(
|
|
45
|
+
"parse",
|
|
46
|
+
help="Parse triton structured logs",
|
|
47
|
+
conflict_handler="resolve",
|
|
48
|
+
)
|
|
49
|
+
_add_parse_args(parse_parser)
|
|
50
|
+
parse_parser.set_defaults(func="parse")
|
|
51
|
+
|
|
52
|
+
# reproduce subcommand
|
|
53
|
+
repro_parser = subparsers.add_parser(
|
|
54
|
+
"reproduce",
|
|
55
|
+
help="Build reproducer from trace file",
|
|
56
|
+
)
|
|
57
|
+
_add_reproducer_args(repro_parser)
|
|
58
|
+
repro_parser.set_defaults(func="reproduce")
|
|
59
|
+
|
|
60
|
+
args = parser.parse_args()
|
|
61
|
+
|
|
62
|
+
if args.func == "parse":
|
|
63
|
+
parse_args = {
|
|
64
|
+
k: v for k, v in vars(args).items() if k not in ["command", "func"]
|
|
65
|
+
}
|
|
66
|
+
unified_parse(**parse_args)
|
|
67
|
+
elif args.func == "reproduce":
|
|
68
|
+
reproduce(
|
|
69
|
+
input_path=args.input,
|
|
70
|
+
line_index=args.line,
|
|
71
|
+
out_dir=args.out_dir,
|
|
72
|
+
template=args.template,
|
|
73
|
+
)
|
|
74
|
+
else:
|
|
75
|
+
raise RuntimeError(f"Unknown command: {args.func}")
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
if __name__ == "__main__":
|
|
79
|
+
# Do not add code here, it won't be run. Add them to the function called below.
|
|
80
|
+
main() # pragma: no cover
|
|
@@ -19,22 +19,20 @@ from typing import Any, Union
|
|
|
19
19
|
|
|
20
20
|
import torch
|
|
21
21
|
|
|
22
|
-
# @manual=//triton:triton
|
|
23
|
-
import triton
|
|
22
|
+
import triton # @manual=//triton:triton
|
|
24
23
|
|
|
25
|
-
# @manual=//triton:triton
|
|
26
|
-
import triton.language as tl
|
|
24
|
+
import triton.language as tl # @manual=//triton:triton
|
|
27
25
|
|
|
28
26
|
import tritonparse.structured_logging
|
|
27
|
+
import tritonparse.tools.disasm
|
|
29
28
|
import tritonparse.utils
|
|
30
29
|
|
|
31
|
-
# @manual=//triton:triton
|
|
32
|
-
from triton.compiler import ASTSource, IRSource
|
|
30
|
+
from triton.compiler import ASTSource, IRSource # @manual=//triton:triton
|
|
33
31
|
|
|
34
|
-
# @manual=//triton:triton
|
|
35
|
-
from triton.knobs import CompileTimes
|
|
32
|
+
from triton.knobs import CompileTimes # @manual=//triton:triton
|
|
36
33
|
from tritonparse.common import is_fbcode
|
|
37
34
|
from tritonparse.structured_logging import convert, extract_python_source_info
|
|
35
|
+
from tritonparse.tools.disasm import is_nvdisasm_available
|
|
38
36
|
|
|
39
37
|
HAS_TRITON_KERNELS = importlib.util.find_spec("triton_kernels") is not None
|
|
40
38
|
|
|
@@ -181,7 +179,7 @@ class TestTritonparseCUDA(unittest.TestCase):
|
|
|
181
179
|
|
|
182
180
|
@unittest.skipUnless(torch.cuda.is_available(), "CUDA not available")
|
|
183
181
|
def test_whole_workflow(self):
|
|
184
|
-
"""Test unified_parse functionality"""
|
|
182
|
+
"""Test unified_parse functionality including SASS extraction"""
|
|
185
183
|
|
|
186
184
|
# Define a simple kernel directly in the test function
|
|
187
185
|
@triton.jit
|
|
@@ -211,9 +209,18 @@ class TestTritonparseCUDA(unittest.TestCase):
|
|
|
211
209
|
os.makedirs(temp_dir_logs, exist_ok=True)
|
|
212
210
|
os.makedirs(temp_dir_parsed, exist_ok=True)
|
|
213
211
|
print(f"Temporary directory: {temp_dir}")
|
|
212
|
+
nvdisasm_available = is_nvdisasm_available()
|
|
213
|
+
if nvdisasm_available:
|
|
214
|
+
print("✓ nvdisasm tool is available, enabling SASS dumping")
|
|
215
|
+
else:
|
|
216
|
+
print("⚠️ nvdisasm tool not available, SASS dumping will be disabled")
|
|
214
217
|
|
|
215
|
-
# Initialize logging
|
|
216
|
-
tritonparse.structured_logging.init(
|
|
218
|
+
# Initialize logging with conditional SASS dumping
|
|
219
|
+
tritonparse.structured_logging.init(
|
|
220
|
+
temp_dir_logs,
|
|
221
|
+
enable_trace_launch=True,
|
|
222
|
+
enable_sass_dump=nvdisasm_available,
|
|
223
|
+
)
|
|
217
224
|
|
|
218
225
|
# Generate test data and run kernels
|
|
219
226
|
torch.manual_seed(0)
|
|
@@ -237,8 +244,8 @@ class TestTritonparseCUDA(unittest.TestCase):
|
|
|
237
244
|
print(f"Found {len(log_files)} log files in {temp_dir_logs}: {log_files}")
|
|
238
245
|
|
|
239
246
|
def check_event_type_counts_in_logs(log_dir: str) -> dict:
|
|
240
|
-
"""Count 'launch' and unique 'compilation' events in all log files"""
|
|
241
|
-
event_counts = {"launch": 0}
|
|
247
|
+
"""Count 'launch' and unique 'compilation' events in all log files and verify SASS content"""
|
|
248
|
+
event_counts = {"launch": 0, "sass_found": False}
|
|
242
249
|
# Track unique compilation hashes
|
|
243
250
|
compilation_hashes = set()
|
|
244
251
|
|
|
@@ -267,6 +274,43 @@ class TestTritonparseCUDA(unittest.TestCase):
|
|
|
267
274
|
print(
|
|
268
275
|
f" Line {line_num}: event_type = 'compilation' (unique hash: {compilation_hash[:8]}...)"
|
|
269
276
|
)
|
|
277
|
+
|
|
278
|
+
# Check for SASS content in compilation events
|
|
279
|
+
file_content = event_data.get("payload", {}).get(
|
|
280
|
+
"file_content", {}
|
|
281
|
+
)
|
|
282
|
+
sass_files = [
|
|
283
|
+
key
|
|
284
|
+
for key in file_content.keys()
|
|
285
|
+
if key.endswith(".sass")
|
|
286
|
+
]
|
|
287
|
+
|
|
288
|
+
if sass_files and not event_counts["sass_found"]:
|
|
289
|
+
event_counts["sass_found"] = True
|
|
290
|
+
sass_content = file_content[sass_files[0]]
|
|
291
|
+
print(f"✓ Found SASS file: {sass_files[0]}")
|
|
292
|
+
print(
|
|
293
|
+
f" SASS content preview (first 200 chars): {sass_content[:200]}..."
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
# Verify SASS content looks like assembly
|
|
297
|
+
assert (
|
|
298
|
+
"Function:" in sass_content
|
|
299
|
+
), "SASS content should contain function declaration"
|
|
300
|
+
# Basic check for NVIDIA GPU assembly patterns
|
|
301
|
+
assert any(
|
|
302
|
+
pattern in sass_content.lower()
|
|
303
|
+
for pattern in [
|
|
304
|
+
"mov",
|
|
305
|
+
"add",
|
|
306
|
+
"mul",
|
|
307
|
+
"ld",
|
|
308
|
+
"st",
|
|
309
|
+
"lop",
|
|
310
|
+
"s2r",
|
|
311
|
+
]
|
|
312
|
+
), "SASS content should contain GPU assembly instructions"
|
|
313
|
+
|
|
270
314
|
except (json.JSONDecodeError, KeyError, TypeError) as e:
|
|
271
315
|
print(f" Line {line_num}: Error processing line - {e}")
|
|
272
316
|
|
|
@@ -277,7 +321,7 @@ class TestTritonparseCUDA(unittest.TestCase):
|
|
|
277
321
|
)
|
|
278
322
|
return event_counts
|
|
279
323
|
|
|
280
|
-
# Verify event counts
|
|
324
|
+
# Verify event counts and conditional SASS extraction
|
|
281
325
|
event_counts = check_event_type_counts_in_logs(temp_dir_logs)
|
|
282
326
|
assert (
|
|
283
327
|
event_counts["compilation"] == 1
|
|
@@ -285,6 +329,16 @@ class TestTritonparseCUDA(unittest.TestCase):
|
|
|
285
329
|
assert (
|
|
286
330
|
event_counts["launch"] == 2
|
|
287
331
|
), f"Expected 2 'launch' events, found {event_counts['launch']}"
|
|
332
|
+
|
|
333
|
+
# Conditionally verify SASS content based on nvdisasm availability
|
|
334
|
+
if nvdisasm_available:
|
|
335
|
+
assert event_counts[
|
|
336
|
+
"sass_found"
|
|
337
|
+
], "SASS content was not found in compilation events"
|
|
338
|
+
print("✓ Successfully verified SASS extraction functionality")
|
|
339
|
+
else:
|
|
340
|
+
print("⚠️ SASS verification skipped: nvdisasm not available")
|
|
341
|
+
|
|
288
342
|
print(
|
|
289
343
|
"✓ Verified correct event type counts: 1 unique compilation hash, 2 launch events"
|
|
290
344
|
)
|
|
@@ -297,6 +351,49 @@ class TestTritonparseCUDA(unittest.TestCase):
|
|
|
297
351
|
# Verify parsing output
|
|
298
352
|
parsed_files = os.listdir(temp_dir_parsed)
|
|
299
353
|
assert len(parsed_files) > 0, "No files found in parsed output directory"
|
|
354
|
+
|
|
355
|
+
# Verify that SASS is preserved in parsed output
|
|
356
|
+
ndjson_gz_files = [f for f in parsed_files if f.endswith(".ndjson.gz")]
|
|
357
|
+
assert (
|
|
358
|
+
len(ndjson_gz_files) > 0
|
|
359
|
+
), "No .ndjson.gz files found in parsed output"
|
|
360
|
+
|
|
361
|
+
sass_found_in_parsed = False
|
|
362
|
+
for ndjson_gz_file in ndjson_gz_files:
|
|
363
|
+
ndjson_gz_path = os.path.join(temp_dir_parsed, ndjson_gz_file)
|
|
364
|
+
with gzip.open(ndjson_gz_path, "rt", encoding="utf-8") as f:
|
|
365
|
+
for line in f:
|
|
366
|
+
try:
|
|
367
|
+
event_data = json.loads(line.strip())
|
|
368
|
+
if event_data.get("event_type") == "compilation":
|
|
369
|
+
file_content = event_data.get("payload", {}).get(
|
|
370
|
+
"file_content", {}
|
|
371
|
+
)
|
|
372
|
+
sass_files = [
|
|
373
|
+
key
|
|
374
|
+
for key in file_content.keys()
|
|
375
|
+
if key.endswith(".sass")
|
|
376
|
+
]
|
|
377
|
+
if sass_files:
|
|
378
|
+
sass_found_in_parsed = True
|
|
379
|
+
print("✓ SASS content preserved in parsed output")
|
|
380
|
+
break
|
|
381
|
+
except json.JSONDecodeError:
|
|
382
|
+
continue
|
|
383
|
+
|
|
384
|
+
if sass_found_in_parsed:
|
|
385
|
+
break
|
|
386
|
+
|
|
387
|
+
# Conditionally verify SASS content is preserved in parsed output
|
|
388
|
+
if nvdisasm_available:
|
|
389
|
+
assert (
|
|
390
|
+
sass_found_in_parsed
|
|
391
|
+
), "SASS content was not preserved in parsed output"
|
|
392
|
+
else:
|
|
393
|
+
print(
|
|
394
|
+
"⚠️ SASS preservation verification skipped: nvdisasm not available"
|
|
395
|
+
)
|
|
396
|
+
|
|
300
397
|
finally:
|
|
301
398
|
# Clean up
|
|
302
399
|
if should_keep_output():
|
|
@@ -745,7 +842,9 @@ class TestTritonparseCUDA(unittest.TestCase):
|
|
|
745
842
|
f.write(kernel_src)
|
|
746
843
|
|
|
747
844
|
# 3) Generate logs by running the kernel once
|
|
748
|
-
tritonparse.structured_logging.init(
|
|
845
|
+
tritonparse.structured_logging.init(
|
|
846
|
+
logs_dir, enable_trace_launch=True, enable_more_tensor_information=True
|
|
847
|
+
)
|
|
749
848
|
try:
|
|
750
849
|
if kernel_dir not in _sys.path:
|
|
751
850
|
_sys.path.insert(0, kernel_dir)
|
|
@@ -5,9 +5,13 @@ def _add_reproducer_args(parser: argparse.ArgumentParser) -> None:
|
|
|
5
5
|
"""Add common arguments for the reproducer to a parser."""
|
|
6
6
|
parser.add_argument("input", help="Path to the ndjson/ndjson.gz log file")
|
|
7
7
|
parser.add_argument(
|
|
8
|
-
"--line
|
|
8
|
+
"--line",
|
|
9
9
|
type=int,
|
|
10
|
-
|
|
10
|
+
default=1,
|
|
11
|
+
help=(
|
|
12
|
+
"The line number (1-based) of the launch event in the input file to reproduce. "
|
|
13
|
+
"Defaults to 1."
|
|
14
|
+
),
|
|
11
15
|
)
|
|
12
16
|
parser.add_argument(
|
|
13
17
|
"--out-dir",
|
|
@@ -1,6 +1,12 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This file is automatically generated by TritonParse reproducer.
|
|
3
|
+
It contains a smallest testing example for a Triton kernel.
|
|
4
|
+
"""
|
|
5
|
+
|
|
1
6
|
import hashlib
|
|
2
7
|
import importlib
|
|
3
8
|
import json
|
|
9
|
+
import logging
|
|
4
10
|
import sys
|
|
5
11
|
from functools import lru_cache
|
|
6
12
|
from pathlib import Path
|
|
@@ -139,25 +145,62 @@ def _create_arg_from_info(arg_info):
|
|
|
139
145
|
elif arg_type == "tensor":
|
|
140
146
|
if arg_info.get("blob_path"):
|
|
141
147
|
return load_tensor(arg_info.get("blob_path"), arg_info.get("device"))
|
|
148
|
+
|
|
149
|
+
# Extract basic tensor properties
|
|
142
150
|
dtype_str = arg_info.get("dtype")
|
|
143
151
|
try:
|
|
144
152
|
torch_dtype = getattr(torch, dtype_str.split(".")[-1])
|
|
145
153
|
except AttributeError:
|
|
154
|
+
logging.error(f"Unsupported dtype: {dtype_str}. Defaulting to float32.")
|
|
146
155
|
torch_dtype = torch.float32
|
|
147
156
|
|
|
148
157
|
shape = arg_info.get("shape", [])
|
|
149
158
|
device = arg_info.get("device", "cpu")
|
|
150
159
|
|
|
160
|
+
# Extract statistical information if available
|
|
161
|
+
mean = arg_info.get("mean")
|
|
162
|
+
std = arg_info.get("std")
|
|
163
|
+
min_val = arg_info.get("min")
|
|
164
|
+
max_val = arg_info.get("max")
|
|
165
|
+
has_stats = (
|
|
166
|
+
mean is not None
|
|
167
|
+
and std is not None
|
|
168
|
+
and min_val is not None
|
|
169
|
+
and max_val is not None
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
if arg_info.get("tensor_capture_error", False):
|
|
173
|
+
logging.error(
|
|
174
|
+
f"Error: Tensor '{arg_info.get('name', '')}' had capture error. Generating random tensor instead."
|
|
175
|
+
)
|
|
176
|
+
|
|
151
177
|
# Use a dummy tensor to check properties of the dtype
|
|
152
178
|
tensor_props = torch.empty(0, dtype=torch_dtype)
|
|
153
179
|
|
|
154
|
-
# Case 1: Floating point
|
|
180
|
+
# Case 1: Floating point types
|
|
155
181
|
if tensor_props.is_floating_point():
|
|
156
|
-
if
|
|
157
|
-
|
|
158
|
-
|
|
182
|
+
if has_stats:
|
|
183
|
+
# Generate tensor with statistical properties matching original data
|
|
184
|
+
if std == 0 or min_val == max_val:
|
|
185
|
+
# Constant tensor
|
|
186
|
+
return torch.full(shape, mean, dtype=torch_dtype, device=device)
|
|
187
|
+
# Generate normal distribution with mean and std, then clamp to [min, max]
|
|
188
|
+
tensor = (
|
|
189
|
+
torch.randn(shape, dtype=torch.float32, device=device) * std + mean
|
|
190
|
+
)
|
|
191
|
+
tensor = torch.clamp(tensor, min=min_val, max=max_val)
|
|
192
|
+
return tensor.to(torch_dtype)
|
|
159
193
|
else:
|
|
160
|
-
|
|
194
|
+
# Fallback to original random generation
|
|
195
|
+
if torch_dtype in [torch.float8_e4m3fn, torch.float8_e5m2]:
|
|
196
|
+
tmp = torch.rand(shape, dtype=torch.float32, device=device)
|
|
197
|
+
return tmp.to(torch_dtype)
|
|
198
|
+
else:
|
|
199
|
+
return torch.empty(
|
|
200
|
+
shape, dtype=torch_dtype, device=device
|
|
201
|
+
).random_()
|
|
202
|
+
|
|
203
|
+
# Case 2: Integer types
|
|
161
204
|
elif torch_dtype in [
|
|
162
205
|
torch.int8,
|
|
163
206
|
torch.int16,
|
|
@@ -166,9 +209,26 @@ def _create_arg_from_info(arg_info):
|
|
|
166
209
|
torch.uint8,
|
|
167
210
|
torch.bool,
|
|
168
211
|
]:
|
|
169
|
-
|
|
170
|
-
|
|
212
|
+
if has_stats and torch_dtype != torch.bool:
|
|
213
|
+
# Generate tensor with statistical properties, then round for integers
|
|
214
|
+
if std == 0 or min_val == max_val:
|
|
215
|
+
# Constant tensor
|
|
216
|
+
return torch.full(
|
|
217
|
+
shape, int(mean), dtype=torch_dtype, device=device
|
|
218
|
+
)
|
|
219
|
+
tensor = (
|
|
220
|
+
torch.randn(shape, dtype=torch.float32, device=device) * std + mean
|
|
221
|
+
)
|
|
222
|
+
tensor = torch.clamp(tensor, min=min_val, max=max_val)
|
|
223
|
+
return torch.round(tensor).to(torch_dtype)
|
|
224
|
+
else:
|
|
225
|
+
# Fallback to original random generation
|
|
226
|
+
return torch.empty(shape, dtype=torch_dtype, device=device).random_()
|
|
227
|
+
|
|
228
|
+
# Case 3: Complex numbers need special handling
|
|
171
229
|
elif tensor_props.is_complex():
|
|
230
|
+
# Complex types: fallback to original logic for now
|
|
231
|
+
# TODO: Could be improved to use statistical info if available
|
|
172
232
|
float_dtype = (
|
|
173
233
|
torch.float32 if torch_dtype == torch.complex64 else torch.float64
|
|
174
234
|
)
|
|
@@ -176,10 +236,24 @@ def _create_arg_from_info(arg_info):
|
|
|
176
236
|
imag_part = torch.rand(shape, dtype=float_dtype, device=device)
|
|
177
237
|
return torch.complex(real_part, imag_part)
|
|
178
238
|
|
|
179
|
-
# Case
|
|
239
|
+
# Case 4: Handle other unsigned integers (like uint32) which fail with random_()
|
|
180
240
|
elif "uint" in str(torch_dtype):
|
|
181
|
-
|
|
182
|
-
|
|
241
|
+
if has_stats:
|
|
242
|
+
# Generate tensor with statistical properties for unsigned integers
|
|
243
|
+
if std == 0 or min_val == max_val:
|
|
244
|
+
return torch.full(
|
|
245
|
+
shape, int(mean), dtype=torch_dtype, device=device
|
|
246
|
+
)
|
|
247
|
+
tensor = (
|
|
248
|
+
torch.randn(shape, dtype=torch.float32, device=device) * std + mean
|
|
249
|
+
)
|
|
250
|
+
tensor = torch.clamp(tensor, min=min_val, max=max_val)
|
|
251
|
+
return torch.round(tensor).to(torch_dtype)
|
|
252
|
+
else:
|
|
253
|
+
# Fallback to original random generation
|
|
254
|
+
return torch.randint(0, 1000, shape, dtype=torch_dtype, device=device)
|
|
255
|
+
|
|
256
|
+
# Case 5: If we don't know how to handle the type, raise an error
|
|
183
257
|
else:
|
|
184
258
|
raise NotImplementedError(
|
|
185
259
|
f"Random data generation not implemented for dtype: {torch_dtype}"
|
|
@@ -84,25 +84,57 @@ def _create_arg_from_info(arg_info):
|
|
|
84
84
|
elif arg_type == "tensor":
|
|
85
85
|
if arg_info.get("blob_path"):
|
|
86
86
|
return load_tensor(arg_info.get("blob_path"), arg_info.get("device"))
|
|
87
|
+
|
|
88
|
+
# Extract basic tensor properties
|
|
87
89
|
dtype_str = arg_info.get("dtype")
|
|
88
90
|
try:
|
|
89
91
|
torch_dtype = getattr(torch, dtype_str.split(".")[-1])
|
|
90
92
|
except AttributeError:
|
|
93
|
+
logger.error(f"Unsupported dtype: {dtype_str}. Defaulting to float32.")
|
|
91
94
|
torch_dtype = torch.float32
|
|
92
95
|
|
|
93
96
|
shape = arg_info.get("shape", [])
|
|
94
97
|
device = arg_info.get("device", "cpu")
|
|
95
98
|
|
|
99
|
+
# Extract statistical information if available
|
|
100
|
+
mean = arg_info.get("mean")
|
|
101
|
+
std = arg_info.get("std")
|
|
102
|
+
min_val = arg_info.get("min")
|
|
103
|
+
max_val = arg_info.get("max")
|
|
104
|
+
has_stats = (
|
|
105
|
+
mean is not None
|
|
106
|
+
and std is not None
|
|
107
|
+
and min_val is not None
|
|
108
|
+
and max_val is not None
|
|
109
|
+
)
|
|
110
|
+
|
|
96
111
|
# Use a dummy tensor to check properties of the dtype
|
|
97
112
|
tensor_props = torch.empty(0, dtype=torch_dtype)
|
|
98
113
|
|
|
99
|
-
# Case 1: Floating point
|
|
114
|
+
# Case 1: Floating point types
|
|
100
115
|
if tensor_props.is_floating_point():
|
|
101
|
-
if
|
|
102
|
-
|
|
103
|
-
|
|
116
|
+
if has_stats:
|
|
117
|
+
# Generate tensor with statistical properties matching original data
|
|
118
|
+
if std == 0 or min_val == max_val:
|
|
119
|
+
# Constant tensor
|
|
120
|
+
return torch.full(shape, mean, dtype=torch_dtype, device=device)
|
|
121
|
+
# Generate normal distribution with mean and std, then clamp to [min, max]
|
|
122
|
+
tensor = (
|
|
123
|
+
torch.randn(shape, dtype=torch.float32, device=device) * std + mean
|
|
124
|
+
)
|
|
125
|
+
tensor = torch.clamp(tensor, min=min_val, max=max_val)
|
|
126
|
+
return tensor.to(torch_dtype)
|
|
104
127
|
else:
|
|
105
|
-
|
|
128
|
+
# Fallback to original random generation
|
|
129
|
+
if torch_dtype in [torch.float8_e4m3fn, torch.float8_e5m2]:
|
|
130
|
+
tmp = torch.rand(shape, dtype=torch.float32, device=device)
|
|
131
|
+
return tmp.to(torch_dtype)
|
|
132
|
+
else:
|
|
133
|
+
return torch.empty(
|
|
134
|
+
shape, dtype=torch_dtype, device=device
|
|
135
|
+
).random_()
|
|
136
|
+
|
|
137
|
+
# Case 2: Integer types
|
|
106
138
|
elif torch_dtype in [
|
|
107
139
|
torch.int8,
|
|
108
140
|
torch.int16,
|
|
@@ -111,9 +143,26 @@ def _create_arg_from_info(arg_info):
|
|
|
111
143
|
torch.uint8,
|
|
112
144
|
torch.bool,
|
|
113
145
|
]:
|
|
114
|
-
|
|
115
|
-
|
|
146
|
+
if has_stats and torch_dtype != torch.bool:
|
|
147
|
+
# Generate tensor with statistical properties, then round for integers
|
|
148
|
+
if std == 0 or min_val == max_val:
|
|
149
|
+
# Constant tensor
|
|
150
|
+
return torch.full(
|
|
151
|
+
shape, int(mean), dtype=torch_dtype, device=device
|
|
152
|
+
)
|
|
153
|
+
tensor = (
|
|
154
|
+
torch.randn(shape, dtype=torch.float32, device=device) * std + mean
|
|
155
|
+
)
|
|
156
|
+
tensor = torch.clamp(tensor, min=min_val, max=max_val)
|
|
157
|
+
return torch.round(tensor).to(torch_dtype)
|
|
158
|
+
else:
|
|
159
|
+
# Fallback to original random generation
|
|
160
|
+
return torch.empty(shape, dtype=torch_dtype, device=device).random_()
|
|
161
|
+
|
|
162
|
+
# Case 3: Complex numbers need special handling
|
|
116
163
|
elif tensor_props.is_complex():
|
|
164
|
+
# Complex types: fallback to original logic for now
|
|
165
|
+
# TODO: Could be improved to use statistical info if available
|
|
117
166
|
float_dtype = (
|
|
118
167
|
torch.float32 if torch_dtype == torch.complex64 else torch.float64
|
|
119
168
|
)
|
|
@@ -121,14 +170,29 @@ def _create_arg_from_info(arg_info):
|
|
|
121
170
|
imag_part = torch.rand(shape, dtype=float_dtype, device=device)
|
|
122
171
|
return torch.complex(real_part, imag_part)
|
|
123
172
|
|
|
124
|
-
# Case
|
|
173
|
+
# Case 4: Handle other unsigned integers (like uint32) which fail with random_()
|
|
125
174
|
elif "uint" in str(torch_dtype):
|
|
126
|
-
|
|
127
|
-
|
|
175
|
+
if has_stats:
|
|
176
|
+
# Generate tensor with statistical properties for unsigned integers
|
|
177
|
+
if std == 0 or min_val == max_val:
|
|
178
|
+
return torch.full(
|
|
179
|
+
shape, int(mean), dtype=torch_dtype, device=device
|
|
180
|
+
)
|
|
181
|
+
tensor = (
|
|
182
|
+
torch.randn(shape, dtype=torch.float32, device=device) * std + mean
|
|
183
|
+
)
|
|
184
|
+
tensor = torch.clamp(tensor, min=min_val, max=max_val)
|
|
185
|
+
return torch.round(tensor).to(torch_dtype)
|
|
186
|
+
else:
|
|
187
|
+
# Fallback to original random generation
|
|
188
|
+
return torch.randint(0, 1000, shape, dtype=torch_dtype, device=device)
|
|
189
|
+
|
|
190
|
+
# Case 5: If we don't know how to handle the type, raise an error
|
|
128
191
|
else:
|
|
129
192
|
raise NotImplementedError(
|
|
130
193
|
f"Random data generation not implemented for dtype: {torch_dtype}"
|
|
131
194
|
)
|
|
195
|
+
|
|
132
196
|
elif arg_type == "triton_kernels.tensor.Tensor":
|
|
133
197
|
if not TRITON_KERNELS_CUSTOM_TYPES:
|
|
134
198
|
raise RuntimeError(
|
|
@@ -162,7 +226,6 @@ def _create_arg_from_info(arg_info):
|
|
|
162
226
|
)
|
|
163
227
|
Tensor, Storage, StridedLayout = _get_triton_tensor_types()
|
|
164
228
|
return StridedLayout(shape=arg_info.get("initial_shape"))
|
|
165
|
-
|
|
166
229
|
else:
|
|
167
230
|
print(f"Warning: Unhandled argument type '{arg_type}'. Returning None.")
|
|
168
231
|
return None
|