tritonparse 0.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tritonparse might be problematic. Click here for more details.

Files changed (49) hide show
  1. tritonparse-0.1.1/LICENSE +29 -0
  2. tritonparse-0.1.1/PKG-INFO +10 -0
  3. tritonparse-0.1.1/README.md +124 -0
  4. tritonparse-0.1.1/pyproject.toml +34 -0
  5. tritonparse-0.1.1/setup.cfg +4 -0
  6. tritonparse-0.1.1/tests/test_add.py +81 -0
  7. tritonparse-0.1.1/tests/test_add2.py +81 -0
  8. tritonparse-0.1.1/tests/test_reproducer.py +128 -0
  9. tritonparse-0.1.1/tests/test_tritonparse.py +598 -0
  10. tritonparse-0.1.1/tritonparse/__init__.py +0 -0
  11. tritonparse-0.1.1/tritonparse/common.py +409 -0
  12. tritonparse-0.1.1/tritonparse/event_diff.py +120 -0
  13. tritonparse-0.1.1/tritonparse/extract_source_mappings.py +49 -0
  14. tritonparse-0.1.1/tritonparse/ir_parser.py +220 -0
  15. tritonparse-0.1.1/tritonparse/mapper.py +100 -0
  16. tritonparse-0.1.1/tritonparse/reproducer/__init__.py +21 -0
  17. tritonparse-0.1.1/tritonparse/reproducer/__main__.py +81 -0
  18. tritonparse-0.1.1/tritonparse/reproducer/cli.py +37 -0
  19. tritonparse-0.1.1/tritonparse/reproducer/config.py +15 -0
  20. tritonparse-0.1.1/tritonparse/reproducer/factory.py +16 -0
  21. tritonparse-0.1.1/tritonparse/reproducer/ingestion/__init__.py +6 -0
  22. tritonparse-0.1.1/tritonparse/reproducer/ingestion/ndjson.py +165 -0
  23. tritonparse-0.1.1/tritonparse/reproducer/orchestrator.py +65 -0
  24. tritonparse-0.1.1/tritonparse/reproducer/param_generator.py +142 -0
  25. tritonparse-0.1.1/tritonparse/reproducer/prompts/__init__.py +1 -0
  26. tritonparse-0.1.1/tritonparse/reproducer/prompts/loader.py +18 -0
  27. tritonparse-0.1.1/tritonparse/reproducer/providers/__init__.py +1 -0
  28. tritonparse-0.1.1/tritonparse/reproducer/providers/base.py +14 -0
  29. tritonparse-0.1.1/tritonparse/reproducer/providers/gemini.py +47 -0
  30. tritonparse-0.1.1/tritonparse/reproducer/runtime/__init__.py +1 -0
  31. tritonparse-0.1.1/tritonparse/reproducer/runtime/executor.py +13 -0
  32. tritonparse-0.1.1/tritonparse/reproducer/utils/io.py +6 -0
  33. tritonparse-0.1.1/tritonparse/shared_vars.py +9 -0
  34. tritonparse-0.1.1/tritonparse/source_type.py +56 -0
  35. tritonparse-0.1.1/tritonparse/sourcemap_utils.py +72 -0
  36. tritonparse-0.1.1/tritonparse/structured_logging.py +1046 -0
  37. tritonparse-0.1.1/tritonparse/tools/__init__.py +0 -0
  38. tritonparse-0.1.1/tritonparse/tools/decompress_bin_ndjson.py +118 -0
  39. tritonparse-0.1.1/tritonparse/tools/format_fix.py +149 -0
  40. tritonparse-0.1.1/tritonparse/tools/load_tensor.py +58 -0
  41. tritonparse-0.1.1/tritonparse/tools/prettify_ndjson.py +315 -0
  42. tritonparse-0.1.1/tritonparse/tp_logger.py +9 -0
  43. tritonparse-0.1.1/tritonparse/trace_processor.py +331 -0
  44. tritonparse-0.1.1/tritonparse/utils.py +156 -0
  45. tritonparse-0.1.1/tritonparse.egg-info/PKG-INFO +10 -0
  46. tritonparse-0.1.1/tritonparse.egg-info/SOURCES.txt +47 -0
  47. tritonparse-0.1.1/tritonparse.egg-info/dependency_links.txt +1 -0
  48. tritonparse-0.1.1/tritonparse.egg-info/requires.txt +4 -0
  49. tritonparse-0.1.1/tritonparse.egg-info/top_level.txt +1 -0
@@ -0,0 +1,29 @@
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2019, pytorch
4
+ All rights reserved.
5
+
6
+ Redistribution and use in source and binary forms, with or without
7
+ modification, are permitted provided that the following conditions are met:
8
+
9
+ 1. Redistributions of source code must retain the above copyright notice, this
10
+ list of conditions and the following disclaimer.
11
+
12
+ 2. Redistributions in binary form must reproduce the above copyright notice,
13
+ this list of conditions and the following disclaimer in the documentation
14
+ and/or other materials provided with the distribution.
15
+
16
+ 3. Neither the name of the copyright holder nor the names of its
17
+ contributors may be used to endorse or promote products derived from
18
+ this software without specific prior written permission.
19
+
20
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@@ -0,0 +1,10 @@
1
+ Metadata-Version: 2.4
2
+ Name: tritonparse
3
+ Version: 0.1.1
4
+ Project-URL: Homepage, https://github.com/meta-pytorch/tritonparse
5
+ Requires-Python: >=3.10
6
+ License-File: LICENSE
7
+ Requires-Dist: triton>3.3.1
8
+ Provides-Extra: test
9
+ Requires-Dist: coverage>=7.0.0; extra == "test"
10
+ Dynamic: license-file
@@ -0,0 +1,124 @@
1
+ # TritonParse
2
+
3
+ [![License: BSD-3](https://img.shields.io/badge/License-BSD--3-blue.svg)](https://opensource.org/licenses/BSD-3-Clause)
4
+ [![GitHub Pages](https://img.shields.io/badge/GitHub%20Pages-Deploy-brightgreen)](https://meta-pytorch.org/tritonparse/)
5
+
6
+ **A comprehensive visualization and analysis tool for Triton IR files** — helping developers analyze, debug, and understand Triton kernel compilation processes.
7
+
8
+ 🌐 **[Try it online →](https://meta-pytorch.org/tritonparse/?json_url=https://meta-pytorch.org/tritonparse/dedicated_log_triton_trace_findhao__mapped.ndjson.gz)**
9
+
10
+ ## ✨ Key Features
11
+
12
+ - **🚀 Launch Difference Analysis** - Automatically detect and visualize variations in kernel launch parameters, helping you pinpoint performance bottlenecks and debug launch configurations.
13
+ - **🔍 Interactive Visualization** - Explore Triton kernels with detailed metadata and stack traces
14
+ - **📊 Multi-format IR Support** - View TTGIR, TTIR, LLIR, PTX, and AMDGCN in one place
15
+ - **🔄 Side-by-side Comparison** - Compare IR stages with synchronized highlighting
16
+ - **📝 Structured Logging** - Capture detailed compilation and launch events with source mapping
17
+ - **🌐 Ready-to-use Interface** - No installation required, works in your browser
18
+ - **🔒 Privacy-first** - All processing happens locally in your browser, no data uploaded
19
+
20
+ ## 🚀 Quick Start
21
+
22
+ ### 1. Generate Traces
23
+
24
+ ```python
25
+ import tritonparse.structured_logging
26
+
27
+ # Initialize logging with launch tracing enabled
28
+ tritonparse.structured_logging.init("./logs/", enable_trace_launch=True)
29
+
30
+ # Your Triton/PyTorch code here
31
+ # ... your kernels ...
32
+
33
+ # Parse and generate trace files
34
+ import tritonparse.utils
35
+ tritonparse.utils.unified_parse("./logs/")
36
+ ```
37
+ The example terminal output is:
38
+ ```bash
39
+ tritonparse log file list: /tmp/tmp1gan7zky/log_file_list.json
40
+ INFO:tritonparse:Copying parsed logs from /tmp/tmp1gan7zky to /scratch/findhao/tritonparse/tests/parsed_output
41
+
42
+ ================================================================================
43
+ 📁 TRITONPARSE PARSING RESULTS
44
+ ================================================================================
45
+ 📂 Parsed files directory: /scratch/findhao/tritonparse/tests/parsed_output
46
+ 📊 Total files generated: 2
47
+
48
+ 📄 Generated files:
49
+ --------------------------------------------------
50
+ 1. 📝 dedicated_log_triton_trace_findhao__mapped.ndjson.gz (7.2KB)
51
+ 2. 📝 log_file_list.json (181B)
52
+ ================================================================================
53
+ ✅ Parsing completed successfully!
54
+ ================================================================================
55
+ ```
56
+
57
+ ### 2. Visualize Results
58
+
59
+ **Visit [https://meta-pytorch.org/tritonparse/](https://meta-pytorch.org/tritonparse/?json_url=https://meta-pytorch.org/tritonparse/dedicated_log_triton_trace_findhao__mapped.ndjson.gz)** and open your local trace files (.ndjson.gz format).
60
+
61
+ > **🔒 Privacy Note**: Your trace files are processed entirely in your browser - nothing is uploaded to any server!
62
+
63
+ ## 🛠️ Installation
64
+
65
+ **For basic usage (trace generation):**
66
+ ```bash
67
+ git clone https://github.com/meta-pytorch/tritonparse.git
68
+ cd tritonparse
69
+ pip install -e .
70
+ ```
71
+
72
+ **Prerequisites:** Python ≥ 3.10, Triton ≥ 3.4.0, GPU required (NVIDIA/AMD)
73
+
74
+ TritonParse relies on new features in Triton. Please install the latest version of Triton:
75
+ ```bash
76
+ pip install triton
77
+ ```
78
+
79
+ ## 📚 Complete Documentation
80
+
81
+ | 📖 Guide | Description |
82
+ |----------|-------------|
83
+ | **[🏠 Wiki Home](https://github.com/meta-pytorch/tritonparse/wiki)** | Complete documentation and navigation |
84
+ | **[📦 Installation Guide](https://github.com/meta-pytorch/tritonparse/wiki/01.-Installation)** | Detailed setup for all scenarios |
85
+ | **[📋 Usage Guide](https://github.com/meta-pytorch/tritonparse/wiki/02.-Usage-Guide)** | Complete workflow and examples |
86
+ | **[🌐 Web Interface Guide](https://github.com/meta-pytorch/tritonparse/wiki/03.-Web-Interface-Guide)** | Master the visualization interface |
87
+ | **[🔧 Developer Guide](https://github.com/meta-pytorch/tritonparse/wiki/04.-Developer-Guide)** | Contributing and development setup |
88
+ | **[❓ FAQ](https://github.com/meta-pytorch/tritonparse/wiki/06.-FAQ)** | Frequently asked questions |
89
+
90
+ ## 🛠️ Tech Stack
91
+
92
+ - **Frontend**: React 19, TypeScript, Vite, Tailwind CSS, Monaco Editor
93
+ - **Backend**: Python with Triton integration, structured logging
94
+ - **Deployment**: GitHub Pages, automatic deployment
95
+
96
+ ## 📊 Understanding Triton Compilation
97
+
98
+ TritonParse visualizes the complete Triton compilation pipeline:
99
+
100
+ **Python Source** → **TTIR** → **TTGIR** → **LLIR** → **PTX/AMDGCN**
101
+
102
+ Each stage can be inspected and compared to understand optimization transformations.
103
+
104
+ ## 🤝 Contributing
105
+
106
+ We welcome contributions! Please see our **[Developer Guide](https://github.com/meta-pytorch/tritonparse/wiki/04.-Developer-Guide)** for:
107
+ - Development setup
108
+ - Code formatting standards
109
+ - Pull request process
110
+ - Architecture overview
111
+
112
+ ## 📞 Support & Community
113
+
114
+ - **🐛 Report Issues**: [GitHub Issues](https://github.com/meta-pytorch/tritonparse/issues)
115
+ - **💬 Discussions**: [GitHub Discussions](https://github.com/meta-pytorch/tritonparse/discussions)
116
+ - **📚 Documentation**: [TritonParse Wiki](https://github.com/meta-pytorch/tritonparse/wiki)
117
+
118
+ ## 📄 License
119
+
120
+ This project is licensed under the BSD-3 License - see the [LICENSE](LICENSE) file for details.
121
+
122
+ ---
123
+
124
+ **✨ Ready to get started?** Visit our **[Installation Guide](https://github.com/meta-pytorch/tritonparse/wiki/01.-Installation)** or try the **[online tool](https://meta-pytorch.org/tritonparse/)** directly!
@@ -0,0 +1,34 @@
1
+ [build-system]
2
+ requires = ["setuptools>=40.8.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "tritonparse"
7
+ version = "0.1.1"
8
+ dependencies = [
9
+ "triton>3.3.1",
10
+ ]
11
+ requires-python = ">=3.10"
12
+
13
+ [project.optional-dependencies]
14
+ test = [
15
+ "coverage>=7.0.0",
16
+ ]
17
+
18
+
19
+ [tool.setuptools.packages.find]
20
+ include = ["tritonparse*"]
21
+
22
+ [tool.black]
23
+ line-length = 88
24
+ target-version = ["py310"]
25
+
26
+ [tool.ufmt]
27
+ formatter = "black"
28
+ sorter = "usort"
29
+
30
+ [tool.usort]
31
+ first_party_detection = false
32
+
33
+ [project.urls]
34
+ "Homepage" = "https://github.com/meta-pytorch/tritonparse"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,81 @@
1
+ """
2
+ Simple Triton kernel for tensor addition. This file is not included in unit tests.
3
+
4
+ Test Plan:
5
+ ```
6
+ TORCHINDUCTOR_FX_GRAPH_CACHE=0 TRITONPARSE_DEBUG=1 python tests/test_add.py
7
+ ```
8
+ """
9
+
10
+ import os
11
+
12
+ import torch
13
+ import triton
14
+ import triton.language as tl
15
+
16
+ import tritonparse.structured_logging
17
+ import tritonparse.utils
18
+
19
+ log_path = "./logs"
20
+ tritonparse.structured_logging.init(log_path, enable_trace_launch=True)
21
+
22
+ os.environ["TORCHINDUCTOR_FX_GRAPH_CACHE"] = "0"
23
+
24
+
25
+ @triton.jit
26
+ def add_kernel(
27
+ a_ptr,
28
+ b_ptr,
29
+ c_ptr,
30
+ n_elements,
31
+ BLOCK_SIZE: tl.constexpr,
32
+ ):
33
+ pid = tl.program_id(axis=0)
34
+ block_start = pid * BLOCK_SIZE
35
+ offsets = block_start + tl.arange(0, BLOCK_SIZE)
36
+ mask = offsets < n_elements
37
+
38
+ a = tl.load(a_ptr + offsets, mask=mask)
39
+ b = tl.load(b_ptr + offsets, mask=mask)
40
+ c = a + b
41
+ tl.store(c_ptr + offsets, c, mask=mask)
42
+
43
+
44
+ def tensor_add(a, b):
45
+ n_elements = a.numel()
46
+ c = torch.empty_like(a)
47
+ BLOCK_SIZE = 1024
48
+ grid = (triton.cdiv(n_elements, BLOCK_SIZE),)
49
+ add_kernel[grid](a, b, c, n_elements, BLOCK_SIZE)
50
+ return c
51
+
52
+
53
+ def simple_add(a, b):
54
+ return a + b
55
+
56
+
57
+ def test_tensor_add():
58
+ torch.manual_seed(0)
59
+ size = (1024, 1024)
60
+ a = torch.randn(size, device="cuda", dtype=torch.float32)
61
+ b = torch.randn(size, device="cuda", dtype=torch.float32)
62
+
63
+ # Test Triton kernel
64
+ c_triton = tensor_add(a, b)
65
+ c_triton.sum()
66
+ tensor_add(a, b)
67
+ print("Triton kernel executed successfully")
68
+
69
+ # Test torch.compile
70
+ compiled_add = torch.compile(simple_add)
71
+ c_compiled = compiled_add(a, b)
72
+ c_compiled.sum()
73
+ print("Torch compiled function executed successfully")
74
+
75
+
76
+ if __name__ == "__main__":
77
+ test_tensor_add()
78
+ # Use improved unified_parse with explicit output directory
79
+ tritonparse.utils.unified_parse(
80
+ source=log_path, out="./parsed_output", overwrite=True
81
+ )
@@ -0,0 +1,81 @@
1
+ """
2
+ Simple Triton kernel for tensor addition. This file is not included in unit tests.
3
+
4
+ Test Plan:
5
+ ```
6
+ TORCHINDUCTOR_FX_GRAPH_CACHE=0 TRITONPARSE_DEBUG=1 python tests/test_add.py
7
+ ```
8
+ """
9
+
10
+ import os
11
+
12
+ import torch
13
+ import triton
14
+ import triton.language as tl
15
+
16
+ import tritonparse.structured_logging
17
+ import tritonparse.utils
18
+
19
+ log_path = "./logs"
20
+ tritonparse.structured_logging.init(log_path, enable_trace_launch=True)
21
+
22
+ os.environ["TORCHINDUCTOR_FX_GRAPH_CACHE"] = "0"
23
+
24
+
25
+ @triton.jit
26
+ def add_kernel(
27
+ a_ptr,
28
+ b_ptr,
29
+ c_ptr,
30
+ n_elements,
31
+ BLOCK_SIZE: tl.constexpr,
32
+ ):
33
+ pid = tl.program_id(axis=0)
34
+ block_start = pid * BLOCK_SIZE
35
+ offsets = block_start + tl.arange(0, BLOCK_SIZE)
36
+ mask = offsets < n_elements
37
+
38
+ a = tl.load(a_ptr + offsets, mask=mask)
39
+ b = tl.load(b_ptr + offsets, mask=mask)
40
+ c = a + b
41
+ tl.store(c_ptr + offsets, c, mask=mask)
42
+
43
+
44
+ def tensor_add(a, b):
45
+ n_elements = a.numel()
46
+ c = torch.empty_like(a)
47
+ BLOCK_SIZE = 1024
48
+ grid = (triton.cdiv(n_elements, BLOCK_SIZE),)
49
+ add_kernel[grid](a, b, c, n_elements, BLOCK_SIZE)
50
+ return c
51
+
52
+
53
+ def simple_add(a, b):
54
+ return a + b
55
+
56
+
57
+ def test_tensor_add():
58
+ torch.manual_seed(0)
59
+ size = (1024, 1024)
60
+ a = torch.randn(size, device="cuda", dtype=torch.float32)
61
+ b = torch.randn(size, device="cuda", dtype=torch.float32)
62
+
63
+ # # Test Triton kernel
64
+ # c_triton = tensor_add(a, b)
65
+ # c_triton.sum()
66
+ # tensor_add(a, b)
67
+ # print("Triton kernel executed successfully")
68
+
69
+ # Test torch.compile
70
+ compiled_add = torch.compile(simple_add)
71
+ c_compiled = compiled_add(a, b)
72
+ c_compiled.sum()
73
+ print("Torch compiled function executed successfully")
74
+
75
+
76
+ if __name__ == "__main__":
77
+ test_tensor_add()
78
+ # Use improved unified_parse with explicit output directory
79
+ tritonparse.utils.unified_parse(
80
+ source=log_path, out="./parsed_output", overwrite=True
81
+ )
@@ -0,0 +1,128 @@
1
+ import json
2
+ from pathlib import Path
3
+
4
+ from tritonparse.reproducer import (
5
+ generate_allocation_snippet,
6
+ generate_from_ndjson,
7
+ generate_kwargs_dict,
8
+ )
9
+
10
+
11
+ class DummyProvider:
12
+ def generate_code(
13
+ self,
14
+ system_prompt: str,
15
+ user_prompt: str,
16
+ *,
17
+ temperature: float = 0.2,
18
+ max_tokens: int = 8192,
19
+ stop=None,
20
+ extra=None,
21
+ ) -> str:
22
+ # Return a tiny script that always runs successfully
23
+ return """
24
+ print("dummy ok")
25
+ """
26
+
27
+
28
+ def _write_minimal_ndjson(path: Path) -> None:
29
+ comp_event = {
30
+ "event_type": "compilation",
31
+ "payload": {
32
+ "metadata": {
33
+ "hash": "h1",
34
+ "num_warps": 4,
35
+ "num_stages": 2,
36
+ "arch": "sm_90",
37
+ "backend_name": "ptx",
38
+ "triton_version": "3.0.0",
39
+ },
40
+ "python_source": {
41
+ "code": """
42
+ import triton
43
+ import triton.language as tl
44
+
45
+ @triton.jit
46
+ def kernel(X_ptr):
47
+ pass
48
+ """,
49
+ },
50
+ },
51
+ }
52
+ launch_event = {
53
+ "event_type": "launch",
54
+ "grid": [1],
55
+ "compilation_metadata": {
56
+ "hash": "h1",
57
+ "num_warps": 4,
58
+ "num_stages": 2,
59
+ },
60
+ "extracted_args": {
61
+ "X": {
62
+ "type": "tensor",
63
+ "shape": [8, 8],
64
+ "dtype": "float32",
65
+ "device": "cuda:0",
66
+ "stride": [8, 1],
67
+ "is_contiguous": True,
68
+ "numel": 64,
69
+ },
70
+ "BLOCK": {"type": "constexpr", "value": 128},
71
+ },
72
+ }
73
+ with path.open("w", encoding="utf-8") as f:
74
+ f.write(json.dumps(comp_event) + "\n")
75
+ f.write(json.dumps(launch_event) + "\n")
76
+
77
+
78
+ def test_param_generator_snippet_basic(tmp_path):
79
+ # Make a pseudo bundle and verify snippet contains expected constructs
80
+ bundle = {
81
+ "tensor_args": {
82
+ "X": {
83
+ "type": "tensor",
84
+ "shape": [4, 4],
85
+ "dtype": "float32",
86
+ "device": "cuda:0",
87
+ "stride": [4, 1],
88
+ "is_contiguous": True,
89
+ }
90
+ },
91
+ "args": {
92
+ "X": {
93
+ "type": "tensor",
94
+ "shape": [4, 4],
95
+ "dtype": "float32",
96
+ "device": "cuda:0",
97
+ "stride": [4, 1],
98
+ "is_contiguous": True,
99
+ },
100
+ "N": {"type": "constexpr", "value": 4},
101
+ },
102
+ }
103
+ snippet = generate_allocation_snippet(bundle)
104
+ assert "torch.empty" in snippet
105
+ assert "device = 'cuda:0'" in snippet
106
+ kwargs = generate_kwargs_dict({"launch": {"kwargs": {"N": 4}}})
107
+ assert kwargs == {"N": 4}
108
+
109
+
110
+ def test_orchestrator_with_dummy_provider(tmp_path):
111
+ ndjson = tmp_path / "trace.ndjson"
112
+ _write_minimal_ndjson(ndjson)
113
+
114
+ out_py = tmp_path / "repro.py"
115
+ res = generate_from_ndjson(
116
+ str(ndjson),
117
+ provider=DummyProvider(),
118
+ launch_index=0,
119
+ out_py=str(out_py),
120
+ execute=True,
121
+ retries=0,
122
+ temperature=0.0,
123
+ max_tokens=256,
124
+ )
125
+ assert out_py.exists()
126
+ assert res.get("returncode", 0) == 0
127
+ # stdout should contain our dummy output
128
+ assert "dummy ok" in (res.get("stdout") or "")