tritonparse 0.2.4.dev20251007071533__tar.gz → 0.2.4.dev20251008071501__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tritonparse might be problematic. Click here for more details.

Files changed (122) hide show
  1. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/.gitignore +1 -0
  2. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/PKG-INFO +36 -31
  3. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/README.md +35 -30
  4. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/context_manager.py +14 -1
  5. tritonparse-0.2.4.dev20251008071501/tritonparse/reproducer/templates/example.py +387 -0
  6. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/structured_logging.py +320 -3
  7. tritonparse-0.2.4.dev20251008071501/tritonparse/tools/load_tensor.py +74 -0
  8. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse.egg-info/PKG-INFO +36 -31
  9. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse.egg-info/SOURCES.txt +0 -1
  10. tritonparse-0.2.4.dev20251007071533/.github/copilot-instructions.md +0 -47
  11. tritonparse-0.2.4.dev20251007071533/tritonparse/reproducer/templates/example.py +0 -320
  12. tritonparse-0.2.4.dev20251007071533/tritonparse/tools/load_tensor.py +0 -58
  13. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/.ci/README.md +0 -0
  14. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/.ci/install-project.sh +0 -0
  15. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/.ci/install-triton-kernels.sh +0 -0
  16. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/.ci/install-triton.sh +0 -0
  17. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/.ci/run-tests.sh +0 -0
  18. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/.ci/setup.sh +0 -0
  19. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/.github/PAGES_SETUP.md +0 -0
  20. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/.github/workflows/deploy-pages-standalone.yml +0 -0
  21. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/.github/workflows/deploy-pages.yml +0 -0
  22. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/.github/workflows/nightly-pypi.yml +0 -0
  23. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/.github/workflows/test.yml +0 -0
  24. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/CHANGELOG.md +0 -0
  25. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/CODE_OF_CONDUCT.md +0 -0
  26. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/CONTRIBUTING.md +0 -0
  27. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/LICENSE +0 -0
  28. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/Makefile +0 -0
  29. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/__init__.py +0 -0
  30. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/docs/README.md +0 -0
  31. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/docs/screenshots/code-comparison.png +0 -0
  32. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/docs/screenshots/kernel-overview.png +0 -0
  33. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/pyproject.toml +0 -0
  34. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/run.py +0 -0
  35. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/setup.cfg +0 -0
  36. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tests/README.md +0 -0
  37. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tests/__init__.py +0 -0
  38. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tests/example_output/logs/dedicated_log_triton_trace_findhao_.ndjson +0 -0
  39. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tests/example_output/parsed_output/dedicated_log_triton_trace_findhao__mapped.ndjson.gz +0 -0
  40. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tests/example_output/parsed_output/f0_fc0_a0_cai-.ndjson.gz +0 -0
  41. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tests/example_output/parsed_output/log_file_list.json +0 -0
  42. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tests/example_output/parsed_output_complex/dedicated_log_triton_trace_findhao__mapped.ndjson.gz +0 -0
  43. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tests/example_output/parsed_output_complex/log_file_list.json +0 -0
  44. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tests/example_output/repro/repro_context_20250816192455.json +0 -0
  45. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tests/test_add.py +0 -0
  46. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tests/test_tritonparse.py +0 -0
  47. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/__init__.py +0 -0
  48. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/__main__.py +0 -0
  49. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/cli.py +0 -0
  50. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/common.py +0 -0
  51. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/event_diff.py +0 -0
  52. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/extract_source_mappings.py +0 -0
  53. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/ir_parser.py +0 -0
  54. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/mapper.py +0 -0
  55. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/reproducer/__init__.py +0 -0
  56. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/reproducer/cli.py +0 -0
  57. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/reproducer/ingestion/ndjson.py +0 -0
  58. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/reproducer/orchestrator.py +0 -0
  59. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/reproducer/placeholder_replacer.py +0 -0
  60. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/reproducer/templates/__init__.py +0 -0
  61. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/reproducer/templates/loader.py +0 -0
  62. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/reproducer/utils.py +0 -0
  63. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/shared_vars.py +0 -0
  64. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/source_type.py +0 -0
  65. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/sourcemap_utils.py +0 -0
  66. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/tools/__init__.py +0 -0
  67. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/tools/decompress_bin_ndjson.py +0 -0
  68. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/tools/disasm.py +0 -0
  69. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/tools/format_fix.py +0 -0
  70. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/tools/prettify_ndjson.py +0 -0
  71. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/tools/readme.md +0 -0
  72. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/tp_logger.py +0 -0
  73. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/trace_processor.py +0 -0
  74. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse/utils.py +0 -0
  75. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse.egg-info/dependency_links.txt +0 -0
  76. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse.egg-info/entry_points.txt +0 -0
  77. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse.egg-info/requires.txt +0 -0
  78. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/tritonparse.egg-info/top_level.txt +0 -0
  79. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/eslint.config.js +0 -0
  80. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/index.html +0 -0
  81. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/package-lock.json +0 -0
  82. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/package.json +0 -0
  83. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/postcss.config.js +0 -0
  84. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/public/dedicated_log_triton_trace_findhao__mapped.ndjson.gz +0 -0
  85. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/public/f0_fc0_a0_cai-.ndjson +0 -0
  86. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/public/favicon.ico +0 -0
  87. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/public/logo.svg +0 -0
  88. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/scripts/inline-html.js +0 -0
  89. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/App.css +0 -0
  90. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/App.tsx +0 -0
  91. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/assets/react.svg +0 -0
  92. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/ArgumentViewer.tsx +0 -0
  93. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/Callstack.tsx +0 -0
  94. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/CodeComparisonView.tsx +0 -0
  95. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/CodeViewer.tsx +0 -0
  96. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/CompilationInfo.tsx +0 -0
  97. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/CopyCodeButton.tsx +0 -0
  98. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/DataSourceSelector.tsx +0 -0
  99. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/DiffComparisonView.tsx +0 -0
  100. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/DiffViewer.tsx +0 -0
  101. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/ExternalLink.tsx +0 -0
  102. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/SingleCodeViewer.tsx +0 -0
  103. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/StackDiffViewer.tsx +0 -0
  104. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/ToggleSwitch.tsx +0 -0
  105. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/TritonIRs.tsx +0 -0
  106. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/components/WelcomeScreen.tsx +0 -0
  107. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/context/FileDiffSession.tsx +0 -0
  108. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/index.css +0 -0
  109. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/main.tsx +0 -0
  110. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/pages/CodeView.tsx +0 -0
  111. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/pages/FileDiffView.tsx +0 -0
  112. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/pages/KernelOverview.tsx +0 -0
  113. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/utils/dataLoader.ts +0 -0
  114. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/utils/fbDetection.ts +0 -0
  115. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/utils/safeImport.ts +0 -0
  116. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/utils/tensor.ts +0 -0
  117. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/src/vite-env.d.ts +0 -0
  118. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/tailwind.config.js +0 -0
  119. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/tsconfig.app.json +0 -0
  120. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/tsconfig.json +0 -0
  121. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/tsconfig.node.json +0 -0
  122. {tritonparse-0.2.4.dev20251007071533 → tritonparse-0.2.4.dev20251008071501}/website/vite.config.ts +0 -0
@@ -70,4 +70,5 @@ env.bak/
70
70
  venv.bak/
71
71
  *.mdc
72
72
  repro_output/
73
+ .github/copilot-instructions.md
73
74
  # end
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tritonparse
3
- Version: 0.2.4.dev20251007071533
3
+ Version: 0.2.4.dev20251008071501
4
4
  Summary: TritonParse: A Compiler Tracer, Visualizer, and mini-Reproducer Generator for Triton Kernels
5
5
  Author-email: Yueming Hao <yhao@meta.com>
6
6
  License-Expression: BSD-3-Clause
@@ -27,13 +27,22 @@ Dynamic: license-file
27
27
 
28
28
  ## ✨ Key Features
29
29
 
30
- - **🚀 Launch Difference Analysis** - Automatically detect and visualize variations in kernel launch parameters, helping you pinpoint performance bottlenecks and debug launch configurations.
31
- - **🔍 Interactive Visualization** - Explore Triton kernels with detailed metadata and stack traces
32
- - **📊 Multi-format IR Support** - View TTGIR, TTIR, LLIR, PTX, and AMDGCN in one place
33
- - **🔄 Side-by-side Comparison** - Compare IR stages with synchronized highlighting
34
- - **📝 Structured Logging** - Capture detailed compilation and launch events with source mapping
35
- - **🌐 Ready-to-use Interface** - No installation required, works in your browser
36
- - **🔒 Privacy-first** - All processing happens locally in your browser, no data uploaded
30
+ ### 🔍 Visualization & Analysis
31
+ - **🚀 Launch Difference Analysis** - Detect and visualize kernel launch parameter variations
32
+ - **📊 IR Code View** - Side-by-side IR viewing with synchronized highlighting and line mapping
33
+ - **🔄 File Diff View** - Compare kernels across different trace files side-by-side
34
+ - **📝 Multi-format IR Support** - View TTGIR, TTIR, LLIR, PTX, and AMDGCN
35
+ - **🎯 Interactive Code Views** - Click-to-highlight corresponding lines across IR stages
36
+
37
+ ### 📊 Structured Logging & Analysis
38
+ - **📝 Compilation & Launch Tracing** - Capture detailed events with source mapping
39
+ - **🔍 Stack Trace Integration** - Full Python stack traces for debugging
40
+ - **📈 Metadata Extraction** - Comprehensive kernel statistics
41
+
42
+ ### 🛠️ Developer Tools
43
+ - **🔧 Reproducer Generation** - Generate standalone Python scripts to reproduce kernels
44
+ - **🌐 Browser-based Interface** - No installation required, works in your browser
45
+ - **🔒 Privacy-first** - All processing happens locally, no data uploaded
37
46
 
38
47
  ## 🚀 Quick Start
39
48
 
@@ -41,22 +50,22 @@ Dynamic: license-file
41
50
 
42
51
  ```python
43
52
  import tritonparse.structured_logging
53
+ import tritonparse.utils
44
54
 
45
- # Initialize logging with launch tracing enabled
55
+ # Initialize logging
46
56
  tritonparse.structured_logging.init("./logs/", enable_trace_launch=True)
47
57
 
48
58
  # Your Triton/PyTorch code here
49
59
  # ... your kernels ...
50
60
 
51
61
  # Parse and generate trace files
52
- import tritonparse.utils
53
- tritonparse.utils.unified_parse("./logs/")
62
+ tritonparse.utils.unified_parse("./logs/", out="./parsed_output")
54
63
  ```
55
- The example terminal output is:
56
- ```bash
57
- tritonparse log file list: /tmp/tmp1gan7zky/log_file_list.json
58
- INFO:tritonparse:Copying parsed logs from /tmp/tmp1gan7zky to /scratch/findhao/tritonparse/tests/parsed_output
59
64
 
65
+ <details>
66
+ <summary>📝 Example output (click to expand)</summary>
67
+
68
+ ```bash
60
69
  ================================================================================
61
70
  📁 TRITONPARSE PARSING RESULTS
62
71
  ================================================================================
@@ -64,13 +73,13 @@ INFO:tritonparse:Copying parsed logs from /tmp/tmp1gan7zky to /scratch/findhao/t
64
73
  📊 Total files generated: 2
65
74
 
66
75
  📄 Generated files:
67
- --------------------------------------------------
68
76
  1. 📝 dedicated_log_triton_trace_findhao__mapped.ndjson.gz (7.2KB)
69
77
  2. 📝 log_file_list.json (181B)
70
78
  ================================================================================
71
79
  ✅ Parsing completed successfully!
72
80
  ================================================================================
73
81
  ```
82
+ </details>
74
83
 
75
84
  ### 2. Visualize Results
76
85
 
@@ -106,18 +115,13 @@ pip install triton
106
115
 
107
116
  | 📖 Guide | Description |
108
117
  |----------|-------------|
109
- | **[🏠 Wiki Home](https://github.com/meta-pytorch/tritonparse/wiki)** | Complete documentation and navigation |
110
- | **[📦 Installation Guide](https://github.com/meta-pytorch/tritonparse/wiki/01.-Installation)** | Detailed setup for all scenarios |
111
- | **[📋 Usage Guide](https://github.com/meta-pytorch/tritonparse/wiki/02.-Usage-Guide)** | Complete workflow and examples |
112
- | **[🌐 Web Interface Guide](https://github.com/meta-pytorch/tritonparse/wiki/03.-Web-Interface-Guide)** | Master the visualization interface |
113
- | **[🔧 Developer Guide](https://github.com/meta-pytorch/tritonparse/wiki/04.-Developer-Guide)** | Contributing and development setup |
114
- | **[ FAQ](https://github.com/meta-pytorch/tritonparse/wiki/06.-FAQ)** | Frequently asked questions |
115
-
116
- ## 🛠️ Tech Stack
117
-
118
- - **Frontend**: React 19, TypeScript, Vite, Tailwind CSS, Monaco Editor
119
- - **Backend**: Python with Triton integration, structured logging
120
- - **Deployment**: GitHub Pages, automatic deployment
118
+ | **[🏠 Wiki Home](https://github.com/meta-pytorch/tritonparse/wiki)** | Complete documentation and quick navigation |
119
+ | **[📦 Installation](https://github.com/meta-pytorch/tritonparse/wiki/01.-Installation)** | Setup guide for all scenarios |
120
+ | **[📋 Usage Guide](https://github.com/meta-pytorch/tritonparse/wiki/02.-Usage-Guide)** | Complete workflow, examples, and reproducer |
121
+ | **[🌐 Web Interface](https://github.com/meta-pytorch/tritonparse/wiki/03.-Web-Interface-Guide)** | Master the visualization interface |
122
+ | **[🔧 Developer Guide](https://github.com/meta-pytorch/tritonparse/wiki/04.-Developer-Guide)** | Contributing and architecture overview |
123
+ | **[📝 Code Formatting](https://github.com/meta-pytorch/tritonparse/wiki/05.-Code-Formatting)** | Formatting standards and tools |
124
+ | **[❓ FAQ](https://github.com/meta-pytorch/tritonparse/wiki/06.-FAQ)** | Quick answers and troubleshooting |
121
125
 
122
126
  ## 📊 Understanding Triton Compilation
123
127
 
@@ -130,9 +134,10 @@ Each stage can be inspected and compared to understand optimization transformati
130
134
  ## 🤝 Contributing
131
135
 
132
136
  We welcome contributions! Please see our **[Developer Guide](https://github.com/meta-pytorch/tritonparse/wiki/04.-Developer-Guide)** for:
133
- - Development setup
134
- - Code formatting standards
135
- - Pull request process
137
+ - Development setup and prerequisites
138
+ - Code formatting standards (**[Formatting Guide](https://github.com/meta-pytorch/tritonparse/wiki/05.-Code-Formatting)**)
139
+ - Pull request and code review process
140
+ - Testing guidelines
136
141
  - Architecture overview
137
142
 
138
143
  ## 📞 Support & Community
@@ -9,13 +9,22 @@
9
9
 
10
10
  ## ✨ Key Features
11
11
 
12
- - **🚀 Launch Difference Analysis** - Automatically detect and visualize variations in kernel launch parameters, helping you pinpoint performance bottlenecks and debug launch configurations.
13
- - **🔍 Interactive Visualization** - Explore Triton kernels with detailed metadata and stack traces
14
- - **📊 Multi-format IR Support** - View TTGIR, TTIR, LLIR, PTX, and AMDGCN in one place
15
- - **🔄 Side-by-side Comparison** - Compare IR stages with synchronized highlighting
16
- - **📝 Structured Logging** - Capture detailed compilation and launch events with source mapping
17
- - **🌐 Ready-to-use Interface** - No installation required, works in your browser
18
- - **🔒 Privacy-first** - All processing happens locally in your browser, no data uploaded
12
+ ### 🔍 Visualization & Analysis
13
+ - **🚀 Launch Difference Analysis** - Detect and visualize kernel launch parameter variations
14
+ - **📊 IR Code View** - Side-by-side IR viewing with synchronized highlighting and line mapping
15
+ - **🔄 File Diff View** - Compare kernels across different trace files side-by-side
16
+ - **📝 Multi-format IR Support** - View TTGIR, TTIR, LLIR, PTX, and AMDGCN
17
+ - **🎯 Interactive Code Views** - Click-to-highlight corresponding lines across IR stages
18
+
19
+ ### 📊 Structured Logging & Analysis
20
+ - **📝 Compilation & Launch Tracing** - Capture detailed events with source mapping
21
+ - **🔍 Stack Trace Integration** - Full Python stack traces for debugging
22
+ - **📈 Metadata Extraction** - Comprehensive kernel statistics
23
+
24
+ ### 🛠️ Developer Tools
25
+ - **🔧 Reproducer Generation** - Generate standalone Python scripts to reproduce kernels
26
+ - **🌐 Browser-based Interface** - No installation required, works in your browser
27
+ - **🔒 Privacy-first** - All processing happens locally, no data uploaded
19
28
 
20
29
  ## 🚀 Quick Start
21
30
 
@@ -23,22 +32,22 @@
23
32
 
24
33
  ```python
25
34
  import tritonparse.structured_logging
35
+ import tritonparse.utils
26
36
 
27
- # Initialize logging with launch tracing enabled
37
+ # Initialize logging
28
38
  tritonparse.structured_logging.init("./logs/", enable_trace_launch=True)
29
39
 
30
40
  # Your Triton/PyTorch code here
31
41
  # ... your kernels ...
32
42
 
33
43
  # Parse and generate trace files
34
- import tritonparse.utils
35
- tritonparse.utils.unified_parse("./logs/")
44
+ tritonparse.utils.unified_parse("./logs/", out="./parsed_output")
36
45
  ```
37
- The example terminal output is:
38
- ```bash
39
- tritonparse log file list: /tmp/tmp1gan7zky/log_file_list.json
40
- INFO:tritonparse:Copying parsed logs from /tmp/tmp1gan7zky to /scratch/findhao/tritonparse/tests/parsed_output
41
46
 
47
+ <details>
48
+ <summary>📝 Example output (click to expand)</summary>
49
+
50
+ ```bash
42
51
  ================================================================================
43
52
  📁 TRITONPARSE PARSING RESULTS
44
53
  ================================================================================
@@ -46,13 +55,13 @@ INFO:tritonparse:Copying parsed logs from /tmp/tmp1gan7zky to /scratch/findhao/t
46
55
  📊 Total files generated: 2
47
56
 
48
57
  📄 Generated files:
49
- --------------------------------------------------
50
58
  1. 📝 dedicated_log_triton_trace_findhao__mapped.ndjson.gz (7.2KB)
51
59
  2. 📝 log_file_list.json (181B)
52
60
  ================================================================================
53
61
  ✅ Parsing completed successfully!
54
62
  ================================================================================
55
63
  ```
64
+ </details>
56
65
 
57
66
  ### 2. Visualize Results
58
67
 
@@ -88,18 +97,13 @@ pip install triton
88
97
 
89
98
  | 📖 Guide | Description |
90
99
  |----------|-------------|
91
- | **[🏠 Wiki Home](https://github.com/meta-pytorch/tritonparse/wiki)** | Complete documentation and navigation |
92
- | **[📦 Installation Guide](https://github.com/meta-pytorch/tritonparse/wiki/01.-Installation)** | Detailed setup for all scenarios |
93
- | **[📋 Usage Guide](https://github.com/meta-pytorch/tritonparse/wiki/02.-Usage-Guide)** | Complete workflow and examples |
94
- | **[🌐 Web Interface Guide](https://github.com/meta-pytorch/tritonparse/wiki/03.-Web-Interface-Guide)** | Master the visualization interface |
95
- | **[🔧 Developer Guide](https://github.com/meta-pytorch/tritonparse/wiki/04.-Developer-Guide)** | Contributing and development setup |
96
- | **[ FAQ](https://github.com/meta-pytorch/tritonparse/wiki/06.-FAQ)** | Frequently asked questions |
97
-
98
- ## 🛠️ Tech Stack
99
-
100
- - **Frontend**: React 19, TypeScript, Vite, Tailwind CSS, Monaco Editor
101
- - **Backend**: Python with Triton integration, structured logging
102
- - **Deployment**: GitHub Pages, automatic deployment
100
+ | **[🏠 Wiki Home](https://github.com/meta-pytorch/tritonparse/wiki)** | Complete documentation and quick navigation |
101
+ | **[📦 Installation](https://github.com/meta-pytorch/tritonparse/wiki/01.-Installation)** | Setup guide for all scenarios |
102
+ | **[📋 Usage Guide](https://github.com/meta-pytorch/tritonparse/wiki/02.-Usage-Guide)** | Complete workflow, examples, and reproducer |
103
+ | **[🌐 Web Interface](https://github.com/meta-pytorch/tritonparse/wiki/03.-Web-Interface-Guide)** | Master the visualization interface |
104
+ | **[🔧 Developer Guide](https://github.com/meta-pytorch/tritonparse/wiki/04.-Developer-Guide)** | Contributing and architecture overview |
105
+ | **[📝 Code Formatting](https://github.com/meta-pytorch/tritonparse/wiki/05.-Code-Formatting)** | Formatting standards and tools |
106
+ | **[❓ FAQ](https://github.com/meta-pytorch/tritonparse/wiki/06.-FAQ)** | Quick answers and troubleshooting |
103
107
 
104
108
  ## 📊 Understanding Triton Compilation
105
109
 
@@ -112,9 +116,10 @@ Each stage can be inspected and compared to understand optimization transformati
112
116
  ## 🤝 Contributing
113
117
 
114
118
  We welcome contributions! Please see our **[Developer Guide](https://github.com/meta-pytorch/tritonparse/wiki/04.-Developer-Guide)** for:
115
- - Development setup
116
- - Code formatting standards
117
- - Pull request process
119
+ - Development setup and prerequisites
120
+ - Code formatting standards (**[Formatting Guide](https://github.com/meta-pytorch/tritonparse/wiki/05.-Code-Formatting)**)
121
+ - Pull request and code review process
122
+ - Testing guidelines
118
123
  - Architecture overview
119
124
 
120
125
  ## 📞 Support & Community
@@ -17,6 +17,8 @@ class TritonParseManager:
17
17
  self,
18
18
  enable_trace_launch=False,
19
19
  split_inductor_compilations=True,
20
+ enable_tensor_blob_storage=False,
21
+ tensor_storage_quota=None,
20
22
  **parse_kwargs,
21
23
  ):
22
24
  """
@@ -25,17 +27,28 @@ class TritonParseManager:
25
27
  Args:
26
28
  enable_trace_launch: Whether to enable trace launch
27
29
  split_inductor_compilations: Whether to split inductor compilations in the output
30
+ enable_tensor_blob_storage: Whether to enable tensor blob storage
31
+ tensor_storage_quota: Storage quota in bytes for tensor blobs (default: 100GB)
28
32
  **parse_kwargs: Additional keyword arguments to pass to unified_parse
29
33
  """
30
34
  self.enable_trace_launch = enable_trace_launch
31
35
  self.split_inductor_compilations = split_inductor_compilations
36
+ self.enable_tensor_blob_storage = enable_tensor_blob_storage
37
+ self.tensor_storage_quota = tensor_storage_quota
32
38
  self.parse_kwargs = parse_kwargs
33
39
  self.dir_path = None
34
40
  self.output_link = None
35
41
 
36
42
  def __enter__(self):
37
43
  self.dir_path = createUniqueTempDirectory()
38
- init(self.dir_path, enable_trace_launch=self.enable_trace_launch)
44
+ init_kwargs = {
45
+ "enable_trace_launch": self.enable_trace_launch,
46
+ "enable_tensor_blob_storage": self.enable_tensor_blob_storage,
47
+ }
48
+ if self.tensor_storage_quota is not None:
49
+ init_kwargs["tensor_storage_quota"] = self.tensor_storage_quota
50
+
51
+ init(self.dir_path, **init_kwargs)
39
52
  return self
40
53
 
41
54
  def __exit__(self, exc_type, exc_val, exc_tb):
@@ -0,0 +1,387 @@
1
+ """
2
+ This file is automatically generated by TritonParse reproducer.
3
+ It contains a smallest testing example for a Triton kernel.
4
+ """
5
+
6
+ import gzip
7
+ import hashlib
8
+ import importlib
9
+ import io
10
+ import json
11
+ import logging
12
+ import sys
13
+ from functools import lru_cache
14
+ from pathlib import Path
15
+ from typing import Union
16
+
17
+ import torch
18
+
19
+ # {{KERNEL_SYSPATH_PLACEHOLDER}}
20
+
21
+ # {{KERNEL_IMPORT_PLACEHOLDER}}
22
+
23
+ TRITON_KERNELS_CUSTOM_TYPES = (
24
+ importlib.util.find_spec("triton_kernels") is not None
25
+ and importlib.util.find_spec("triton_kernels.tensor") is not None
26
+ )
27
+
28
+
29
+ @lru_cache(maxsize=1)
30
+ def _get_triton_tensor_types():
31
+ """
32
+ Import and cache Triton custom tensor types.
33
+
34
+ Returns:
35
+ tuple: (Tensor, Storage, StridedLayout) classes from triton_kernels.tensor.
36
+
37
+ Raises:
38
+ ImportError: If the optional module 'triton_kernels.tensor' is not available.
39
+ """
40
+ mod = importlib.import_module("triton_kernels.tensor")
41
+ return (
42
+ mod.Tensor,
43
+ mod.Storage,
44
+ mod.StridedLayout,
45
+ )
46
+
47
+
48
+ def load_tensor(tensor_file_path: Union[str, Path], device: str = None) -> torch.Tensor:
49
+ """
50
+ Load a tensor from its file path and verify its integrity using the hash in the filename.
51
+
52
+ Args:
53
+ tensor_file_path (str | Path): Direct path to the tensor file. Supports both:
54
+ - .bin.gz: gzip-compressed tensor (hash is of uncompressed data)
55
+ - .bin: uncompressed tensor (for backward compatibility)
56
+ device (str, optional): Device to load the tensor to (e.g., 'cuda:0', 'cpu').
57
+ If None, keeps the tensor on its original device.
58
+
59
+ Returns:
60
+ torch.Tensor: The loaded tensor (moved to the specified device if provided)
61
+
62
+ Raises:
63
+ FileNotFoundError: If the tensor file doesn't exist
64
+ RuntimeError: If the tensor cannot be loaded
65
+ ValueError: If the computed hash doesn't match the filename hash
66
+ """
67
+ blob_path = Path(tensor_file_path)
68
+
69
+ if not blob_path.exists():
70
+ raise FileNotFoundError(f"Tensor blob not found: {blob_path}")
71
+
72
+ # Detect compression by file extension
73
+ is_compressed = blob_path.name.endswith(".bin.gz")
74
+
75
+ # Read file contents (decompress if needed)
76
+ try:
77
+ with open(blob_path, "rb") as f:
78
+ file_obj = gzip.GzipFile(fileobj=f, mode="rb") if is_compressed else f
79
+ file_contents = file_obj.read()
80
+ except (OSError, gzip.BadGzipFile) as e:
81
+ if is_compressed:
82
+ raise RuntimeError(f"Failed to decompress gzip file {blob_path}: {str(e)}")
83
+ else:
84
+ raise RuntimeError(f"Failed to read file {blob_path}: {str(e)}")
85
+
86
+ # Extract expected hash from filename
87
+ # abc123.bin.gz -> abc123 or abc123.bin -> abc123
88
+ expected_hash = blob_path.name.removesuffix(".bin.gz" if is_compressed else ".bin")
89
+
90
+ # Compute hash of uncompressed data
91
+ computed_hash = hashlib.blake2b(file_contents).hexdigest()
92
+
93
+ # Verify hash matches filename
94
+ if computed_hash != expected_hash:
95
+ raise ValueError(
96
+ f"Hash verification failed: expected '{expected_hash}' but computed '{computed_hash}'"
97
+ )
98
+
99
+ try:
100
+ # Load the tensor from memory buffer
101
+ tensor = torch.load(io.BytesIO(file_contents), map_location=device)
102
+ return tensor
103
+ except Exception as e:
104
+ raise RuntimeError(f"Failed to load tensor from {blob_path}: {str(e)}")
105
+
106
+
107
+ def create_args_from_json_file(json_path):
108
+ with open(json_path, "r") as f:
109
+ data = json.load(f)
110
+ return create_args_from_json(data)
111
+
112
+
113
+ def create_args_from_json(data):
114
+ """
115
+ Parse a reproducer JSON and build kernel grid and argument dictionary.
116
+
117
+ Args:
118
+ json_path (str): Path to the JSON file describing the kernel launch.
119
+
120
+ Returns:
121
+ tuple[list, dict]: Grid specification list and map of argument name to value.
122
+ """
123
+ # Handle data format validation and extraction
124
+ if isinstance(data, list):
125
+ if len(data) != 1:
126
+ print(
127
+ f"Error: Expected single element list, got list with {len(data)} elements"
128
+ )
129
+ sys.exit(1)
130
+ data = data[0]
131
+ elif not isinstance(data, dict):
132
+ print(f"Error: Expected list or dict, got {type(data)}")
133
+ sys.exit(1)
134
+
135
+ grid = data.get("grid", [])
136
+ args_dict = {}
137
+ extracted_args = data.get("extracted_args", {})
138
+
139
+ for arg_name, arg_info in extracted_args.items():
140
+ args_dict[arg_name] = _create_arg_from_info(arg_info)
141
+
142
+ return grid, args_dict
143
+
144
+
145
+ def _apply_stride_and_offset(tensor, shape, stride, storage_offset):
146
+ """
147
+ Apply custom stride and storage offset to a tensor if needed.
148
+
149
+ Args:
150
+ tensor: The base contiguous tensor
151
+ shape: The desired shape
152
+ stride: The desired stride (or None for contiguous)
153
+ storage_offset: The desired storage offset
154
+
155
+ Returns:
156
+ torch.Tensor: The strided tensor view or original tensor if contiguous
157
+ """
158
+ if stride is None:
159
+ return tensor
160
+
161
+ # Calculate expected contiguous stride
162
+ expected_contiguous_stride = []
163
+ s = 1
164
+ for dim_size in reversed(shape):
165
+ expected_contiguous_stride.insert(0, s)
166
+ s *= dim_size
167
+
168
+ # If stride matches contiguous stride and no storage offset, return as-is
169
+ if tuple(stride) == tuple(expected_contiguous_stride) and storage_offset == 0:
170
+ return tensor
171
+
172
+ # Calculate required storage size
173
+ if len(shape) > 0 and len(stride) > 0:
174
+ max_offset = storage_offset
175
+ for dim_stride, dim_size in zip(stride, shape):
176
+ if dim_size > 0:
177
+ max_offset += dim_stride * (dim_size - 1)
178
+ storage_size = max_offset + 1
179
+ else:
180
+ storage_size = storage_offset + 1
181
+
182
+ # Create larger storage tensor and create strided view
183
+ storage_tensor = torch.empty(storage_size, dtype=tensor.dtype, device=tensor.device)
184
+
185
+ # Create strided view
186
+ strided_view = storage_tensor.as_strided(
187
+ size=shape, stride=stride, storage_offset=storage_offset
188
+ )
189
+
190
+ # Copy data from the base tensor into the strided layout
191
+ strided_view.copy_(tensor.flatten()[: strided_view.numel()].view(shape))
192
+
193
+ return strided_view
194
+
195
+
196
+ def _create_base_tensor(arg_info) -> torch.Tensor:
197
+ if arg_info.get("blob_path"):
198
+ return load_tensor(arg_info.get("blob_path"), arg_info.get("device"))
199
+
200
+ # Extract basic tensor properties
201
+ dtype_str = arg_info.get("dtype")
202
+ try:
203
+ torch_dtype = getattr(torch, dtype_str.split(".")[-1])
204
+ except AttributeError:
205
+ logging.error(f"Unsupported dtype: {dtype_str}. Defaulting to float32.")
206
+ torch_dtype = torch.float32
207
+
208
+ shape = arg_info.get("shape", [])
209
+ device = arg_info.get("device", "cpu")
210
+
211
+ # Extract statistical information if available
212
+ mean = arg_info.get("mean")
213
+ std = arg_info.get("std")
214
+ min_val = arg_info.get("min")
215
+ max_val = arg_info.get("max")
216
+ has_stats = (
217
+ mean is not None
218
+ and std is not None
219
+ and min_val is not None
220
+ and max_val is not None
221
+ )
222
+
223
+ if arg_info.get("tensor_capture_error", False):
224
+ logging.error(
225
+ f"Error: Tensor '{arg_info.get('name', '')}' had capture error. Generating random tensor instead."
226
+ )
227
+
228
+ # Use a dummy tensor to check properties of the dtype
229
+ tensor_props = torch.empty(0, dtype=torch_dtype)
230
+
231
+ # Case 1: Floating point types
232
+ if tensor_props.is_floating_point():
233
+ if has_stats:
234
+ # Generate tensor with statistical properties matching original data
235
+ if std == 0 or min_val == max_val:
236
+ # Constant tensor
237
+ return torch.full(shape, mean, dtype=torch_dtype, device=device)
238
+ # Generate normal distribution with mean and std, then clamp to [min, max]
239
+ tensor = torch.randn(shape, dtype=torch.float32, device=device) * std + mean
240
+ tensor = torch.clamp(tensor, min=min_val, max=max_val)
241
+ return tensor.to(torch_dtype)
242
+ else:
243
+ # Fallback to original random generation
244
+ if torch_dtype in [torch.float8_e4m3fn, torch.float8_e5m2]:
245
+ tmp = torch.rand(shape, dtype=torch.float32, device=device)
246
+ return tmp.to(torch_dtype)
247
+ else:
248
+ return torch.empty(shape, dtype=torch_dtype, device=device).random_()
249
+
250
+ # Case 2: Integer types
251
+ elif torch_dtype in [
252
+ torch.int8,
253
+ torch.int16,
254
+ torch.int32,
255
+ torch.int64,
256
+ torch.uint8,
257
+ torch.bool,
258
+ ]:
259
+ if has_stats and torch_dtype != torch.bool:
260
+ # Generate tensor with statistical properties, then round for integers
261
+ if std == 0 or min_val == max_val:
262
+ # Constant tensor
263
+ return torch.full(shape, int(mean), dtype=torch_dtype, device=device)
264
+ tensor = torch.randn(shape, dtype=torch.float32, device=device) * std + mean
265
+ tensor = torch.clamp(tensor, min=min_val, max=max_val)
266
+ return torch.round(tensor).to(torch_dtype)
267
+ else:
268
+ # Fallback to original random generation
269
+ return torch.empty(shape, dtype=torch_dtype, device=device).random_()
270
+
271
+ # Case 3: Complex numbers need special handling
272
+ elif tensor_props.is_complex():
273
+ # Complex types: fallback to original logic for now
274
+ # TODO: Could be improved to use statistical info if available
275
+ float_dtype = torch.float32 if torch_dtype == torch.complex64 else torch.float64
276
+ real_part = torch.rand(shape, dtype=float_dtype, device=device)
277
+ imag_part = torch.rand(shape, dtype=float_dtype, device=device)
278
+ return torch.complex(real_part, imag_part)
279
+
280
+ # Case 4: Handle other unsigned integers (like uint32) which fail with random_()
281
+ elif "uint" in str(torch_dtype):
282
+ if has_stats:
283
+ # Generate tensor with statistical properties for unsigned integers
284
+ if std == 0 or min_val == max_val:
285
+ return torch.full(shape, int(mean), dtype=torch_dtype, device=device)
286
+ tensor = torch.randn(shape, dtype=torch.float32, device=device) * std + mean
287
+ tensor = torch.clamp(tensor, min=min_val, max=max_val)
288
+ return torch.round(tensor).to(torch_dtype)
289
+ else:
290
+ # Fallback to original random generation
291
+ return torch.randint(0, 1000, shape, dtype=torch_dtype, device=device)
292
+
293
+ # Case 5: If we don't know how to handle the type, raise an error
294
+ else:
295
+ raise NotImplementedError(
296
+ f"Random data generation not implemented for dtype: {torch_dtype}"
297
+ )
298
+
299
+
300
+ def _create_tensor(arg_info) -> torch.Tensor:
301
+ tensor = _create_base_tensor(arg_info)
302
+
303
+ # Apply stride and storage offset if needed
304
+ shape = arg_info.get("shape", [])
305
+ stride = arg_info.get("stride")
306
+ storage_offset = arg_info.get("storage_offset", 0)
307
+ return _apply_stride_and_offset(tensor, shape, stride, storage_offset)
308
+
309
+
310
+ def _create_arg_from_info(arg_info):
311
+ """
312
+ Recursively construct a kernel argument from its JSON schema.
313
+
314
+ Args:
315
+ arg_info (dict): JSON object describing a single argument, including
316
+ fields like 'type', 'value', 'dtype', 'shape', 'device', etc.
317
+
318
+ Returns:
319
+ Any: The constructed Python object suitable for kernel invocation.
320
+
321
+ Raises:
322
+ RuntimeError: When required optional dependencies are missing.
323
+ NotImplementedError: When a dtype or type is not supported yet.
324
+ """
325
+ arg_type = arg_info.get("type")
326
+
327
+ if arg_type == "NoneType":
328
+ return None
329
+
330
+ if arg_type in ["int", "bool", "str", "float"]:
331
+ return arg_info.get("value")
332
+
333
+ elif arg_type == "tensor":
334
+ return _create_tensor(arg_info)
335
+
336
+ elif arg_type == "triton_kernels.tensor.Tensor":
337
+ if not TRITON_KERNELS_CUSTOM_TYPES:
338
+ raise RuntimeError(
339
+ "Optional dependency 'triton_kernels.tensor' is not installed; cannot construct Tensor."
340
+ )
341
+ Tensor, Storage, StridedLayout = _get_triton_tensor_types()
342
+ storage = _create_arg_from_info(arg_info.get("storage"))
343
+ dtype_str = arg_info.get("dtype")
344
+ torch_dtype = getattr(torch, dtype_str.split(".")[-1])
345
+ return Tensor(
346
+ storage=storage,
347
+ shape=arg_info.get("shape"),
348
+ shape_max=arg_info.get("shape_max"),
349
+ dtype=torch_dtype,
350
+ )
351
+
352
+ elif arg_type == "triton_kernels.tensor.Storage":
353
+ if not TRITON_KERNELS_CUSTOM_TYPES:
354
+ raise RuntimeError(
355
+ "Optional dependency 'triton_kernels.tensor' is not installed; cannot construct Storage."
356
+ )
357
+ Tensor, Storage, StridedLayout = _get_triton_tensor_types()
358
+ data = _create_arg_from_info(arg_info.get("data"))
359
+ layout = _create_arg_from_info(arg_info.get("layout"))
360
+ return Storage(data=data, layout=layout)
361
+
362
+ elif arg_type == "StridedLayout":
363
+ if not TRITON_KERNELS_CUSTOM_TYPES:
364
+ raise RuntimeError(
365
+ "Optional dependency 'triton_kernels.tensor' is not installed; cannot construct StridedLayout."
366
+ )
367
+ Tensor, Storage, StridedLayout = _get_triton_tensor_types()
368
+ return StridedLayout(shape=arg_info.get("initial_shape"))
369
+ else:
370
+ print(f"Warning: Unhandled argument type '{arg_type}'. Returning None.")
371
+ return None
372
+
373
+
374
+ if __name__ == "__main__":
375
+ script_dir = Path(__file__).resolve().parent
376
+ json_file = script_dir / "{{JSON_FILE_NAME_PLACEHOLDER}}"
377
+ grid, args_dict = create_args_from_json_file(str(json_file))
378
+
379
+ print("Generated kernel arguments dictionary:")
380
+ for name, arg in args_dict.items():
381
+ print(f" {name}: {arg}")
382
+ print(f"Grid: {grid}")
383
+
384
+ # {{KERNEL_INVOCATION_PLACEHOLDER}}
385
+
386
+ torch.cuda.synchronize()
387
+ print("Kernel execution finished.")