gwc-pybundle 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pybundle/steps/mypy.py ADDED
@@ -0,0 +1,60 @@
1
+ from __future__ import annotations
2
+
3
+ import subprocess
4
+ import time
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+
8
+ from .base import StepResult
9
+ from ..context import BundleContext
10
+ from ..tools import which
11
+
12
+
13
+ def _has_mypy_config(root: Path) -> bool:
14
+ if (root / "mypy.ini").is_file():
15
+ return True
16
+ if (root / "setup.cfg").is_file():
17
+ return True
18
+ if (root / "pyproject.toml").is_file():
19
+ # we don't parse TOML here; presence is enough for v1
20
+ return True
21
+ return False
22
+
23
+
24
+ @dataclass
25
+ class MypyStep:
26
+ name: str = "mypy"
27
+ target: str = "pybundle"
28
+ outfile: str = "logs/33_mypy.txt"
29
+
30
+ def run(self, ctx: BundleContext) -> StepResult:
31
+ start = time.time()
32
+ out = ctx.workdir / self.outfile
33
+ out.parent.mkdir(parents=True, exist_ok=True)
34
+
35
+ mypy = which("mypy")
36
+ if not mypy:
37
+ out.write_text(
38
+ "mypy not found; skipping (pip install mypy)\n", encoding="utf-8"
39
+ )
40
+ return StepResult(self.name, "SKIP", 0, "missing mypy")
41
+
42
+ if not _has_mypy_config(ctx.root):
43
+ out.write_text(
44
+ "no mypy config detected (mypy.ini/setup.cfg/pyproject.toml); skipping\n",
45
+ encoding="utf-8",
46
+ )
47
+ return StepResult(self.name, "SKIP", 0, "no config")
48
+
49
+ cmd = [mypy, "--exclude", "^artifacts/", self.target]
50
+ header = f"## PWD: {ctx.root}\n## CMD: {' '.join(cmd)}\n\n"
51
+
52
+ cp = subprocess.run(
53
+ cmd, cwd=str(ctx.root), text=True, capture_output=True, check=False
54
+ )
55
+ text = header + (cp.stdout or "") + ("\n" + cp.stderr if cp.stderr else "")
56
+ out.write_text(ctx.redact_text(text), encoding="utf-8")
57
+
58
+ dur = int(time.time() - start)
59
+ note = "" if cp.returncode == 0 else f"exit={cp.returncode} (type findings)"
60
+ return StepResult(self.name, "PASS", dur, note)
@@ -0,0 +1,66 @@
1
+ from __future__ import annotations
2
+
3
+ import subprocess
4
+ import time
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+
8
+ from .base import StepResult
9
+ from ..context import BundleContext
10
+ from ..tools import which
11
+
12
+
13
+ def _has_tests(root: Path) -> bool:
14
+ # common conventions
15
+ if (root / "tests").is_dir():
16
+ return True
17
+ # sometimes tests are inside the package
18
+ # (don’t walk the whole tree; just check a couple likely paths)
19
+ for candidate in ["src/tests", "app/tests"]:
20
+ if (root / candidate).is_dir():
21
+ return True
22
+ # any */tests at depth 2 is also a common pattern
23
+ for p in root.glob("*/tests"):
24
+ if p.is_dir():
25
+ return True
26
+ return False
27
+
28
+
29
+ @dataclass
30
+ class PytestStep:
31
+ name: str = "pytest"
32
+ args: list[str] | None = None
33
+ outfile: str = "logs/34_pytest_q.txt"
34
+
35
+ def run(self, ctx: BundleContext) -> StepResult:
36
+ start = time.time()
37
+ out = ctx.workdir / self.outfile
38
+ out.parent.mkdir(parents=True, exist_ok=True)
39
+
40
+ pytest_bin = which("pytest")
41
+ if not pytest_bin:
42
+ out.write_text(
43
+ "pytest not found; skipping (pip install pytest)\n", encoding="utf-8"
44
+ )
45
+ return StepResult(self.name, "SKIP", 0, "missing pytest")
46
+
47
+ if not _has_tests(ctx.root):
48
+ out.write_text(
49
+ "no tests directory detected; skipping pytest\n", encoding="utf-8"
50
+ )
51
+ return StepResult(self.name, "SKIP", 0, "no tests")
52
+
53
+ args = self.args or ["-q"]
54
+ cmd = [pytest_bin, *args]
55
+
56
+ header = f"## PWD: {ctx.root}\n## CMD: {' '.join(cmd)}\n\n"
57
+
58
+ cp = subprocess.run(
59
+ cmd, cwd=str(ctx.root), text=True, capture_output=True, check=False
60
+ )
61
+ text = header + (cp.stdout or "") + ("\n" + cp.stderr if cp.stderr else "")
62
+ out.write_text(ctx.redact_text(text), encoding="utf-8")
63
+
64
+ dur = int(time.time() - start)
65
+ note = "" if cp.returncode == 0 else f"exit={cp.returncode} (test failures)"
66
+ return StepResult(self.name, "PASS", dur, note)
@@ -0,0 +1,161 @@
1
+ from __future__ import annotations
2
+
3
+ import platform
4
+ import sys
5
+ import time
6
+ from dataclasses import dataclass
7
+ from pathlib import Path
8
+
9
+ from .base import StepResult
10
+ from ..context import BundleContext
11
+ from ..tools import which
12
+
13
+
14
+ @dataclass
15
+ class ReproMarkdownStep:
16
+ name: str = "generate REPRO.md"
17
+ outfile: str = "REPRO.md"
18
+
19
+ def run(self, ctx: BundleContext) -> StepResult:
20
+ start = time.time()
21
+ repro = ctx.workdir / self.outfile
22
+
23
+ # ---- tool detection ----
24
+ tool_names = [
25
+ "python",
26
+ "pip",
27
+ "git",
28
+ "ruff",
29
+ "mypy",
30
+ "pytest",
31
+ "rg",
32
+ "zip",
33
+ "tar",
34
+ ]
35
+ detected = {t: which(t) for t in tool_names}
36
+
37
+ # Prefer ctx.tools.python if you have it
38
+ if getattr(ctx, "tools", None) and getattr(ctx.tools, "python", None):
39
+ detected["python"] = ctx.tools.python
40
+
41
+ # ---- file inventory (what actually exists) ----
42
+ def list_txt(dirpath: Path) -> list[str]:
43
+ if not dirpath.is_dir():
44
+ return []
45
+ return sorted(
46
+ str(p.relative_to(ctx.workdir)) for p in dirpath.rglob("*.txt")
47
+ )
48
+
49
+ logs_list = list_txt(ctx.logdir)
50
+ meta_list = list_txt(ctx.metadir)
51
+
52
+ # Also include key top-level files if present
53
+ top_files = []
54
+ for name in [
55
+ "RUN_LOG.txt",
56
+ "SUMMARY.json",
57
+ "error_files_from_logs.txt",
58
+ "error_refs_count.txt",
59
+ ]:
60
+ p = ctx.workdir / name
61
+ if p.exists():
62
+ top_files.append(name)
63
+
64
+ # ---- step summary (best-effort, never crash) ----
65
+ results = getattr(ctx, "results", [])
66
+ ctx.results = results # ensure it's set for future steps
67
+
68
+ summary_lines = []
69
+ for r in results:
70
+ note = f" ({r.note})" if getattr(r, "note", "") else ""
71
+ summary_lines.append(f"- **{r.name}**: {r.status}{note}")
72
+
73
+ # ---- environment ----
74
+ pyver = sys.version.split()[0]
75
+ plat = platform.platform()
76
+ profile = ctx.profile_name
77
+ utc_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
78
+
79
+ # ---- build markdown ----
80
+ def fmt_tool(t: str) -> str:
81
+ path = detected.get(t)
82
+ return f"- `{t}`: ✅ `{path}`" if path else f"- `{t}`: ❌ not found"
83
+
84
+ md = []
85
+ md += ["# Reproduction Guide", ""]
86
+ md += [
87
+ "This bundle captures diagnostic outputs and the minimum relevant project context",
88
+ "to reproduce issues reliably on another system.",
89
+ "",
90
+ "## Overview",
91
+ f"- Profile: `{profile}`",
92
+ f"- Generated (UTC): `{utc_now}`",
93
+ f"- Project root: `{ctx.root}`",
94
+ "",
95
+ "## Environment Snapshot",
96
+ f"- OS: `{plat}`",
97
+ f"- Python: `{pyver}`",
98
+ "",
99
+ "## Tools Detected",
100
+ *[fmt_tool(t) for t in tool_names],
101
+ "",
102
+ ]
103
+
104
+ if summary_lines:
105
+ md += ["## Steps Executed", *summary_lines, ""]
106
+
107
+ md += [
108
+ "## How to Reproduce",
109
+ "",
110
+ "From the project root:",
111
+ "",
112
+ "```bash",
113
+ f"python -m pybundle run {profile}",
114
+ "```",
115
+ "",
116
+ "Re-run individual tools (if installed):",
117
+ "",
118
+ "```bash",
119
+ "python -m compileall .",
120
+ "ruff check .",
121
+ "ruff format --check .",
122
+ "mypy .",
123
+ "pytest -q",
124
+ "```",
125
+ "",
126
+ "## Produced Artifacts",
127
+ "",
128
+ ]
129
+
130
+ if top_files:
131
+ md += ["### Top-level", *[f"- `{p}`" for p in top_files], ""]
132
+
133
+ md += (
134
+ ["### logs/", *(f"- `{p}`" for p in logs_list)]
135
+ if logs_list
136
+ else ["### logs/", "- (none)", ""]
137
+ )
138
+ md += (
139
+ ["", "### meta/", *(f"- `{p}`" for p in meta_list)]
140
+ if meta_list
141
+ else ["", "### meta/", "- (none)"]
142
+ )
143
+
144
+ md += [
145
+ "",
146
+ "## Context Packs",
147
+ "",
148
+ "- `src/_error_refs/` – files directly referenced by tool output",
149
+ "- `src/_error_context/` – related imports + pytest glue (conftest/__init__) + configs",
150
+ "",
151
+ "## Notes",
152
+ "",
153
+ "- Non-zero exits from linters/tests are recorded for diagnosis; bundle creation continues.",
154
+ "- Missing tools typically produce SKIP logs rather than failing the bundle.",
155
+ "",
156
+ ]
157
+
158
+ repro.write_text("\n".join(md) + "\n", encoding="utf-8")
159
+
160
+ dur = int(time.time() - start)
161
+ return StepResult(self.name, "PASS", dur, "")
@@ -0,0 +1,78 @@
1
+ from __future__ import annotations
2
+
3
+ import subprocess
4
+ import time
5
+ from dataclasses import dataclass
6
+
7
+ from .base import StepResult
8
+ from ..context import BundleContext
9
+ from ..tools import which
10
+
11
+
12
+ @dataclass
13
+ class RipgrepScanStep:
14
+ name: str
15
+ pattern: str
16
+ outfile: str
17
+ target: str = "." # directory or file
18
+ extra_args: list[str] | None = None
19
+
20
+ def run(self, ctx: BundleContext) -> StepResult:
21
+ start = time.time()
22
+ out = ctx.workdir / self.outfile
23
+ out.parent.mkdir(parents=True, exist_ok=True)
24
+
25
+ rg = which("rg")
26
+ if not rg:
27
+ out.write_text(
28
+ "rg (ripgrep) not found; skipping (install ripgrep)\n", encoding="utf-8"
29
+ )
30
+ return StepResult(self.name, "SKIP", 0, "missing rg")
31
+
32
+ args = self.extra_args or []
33
+ # -n line numbers, --no-heading keeps it grep-like, -S smart case can be handy
34
+ cmd = [rg, "-n", "--no-heading", "-S", *args, self.pattern, self.target]
35
+ header = f"## PWD: {ctx.root}\n## CMD: {' '.join(cmd)}\n\n"
36
+
37
+ cp = subprocess.run(
38
+ cmd, cwd=str(ctx.root), text=True, capture_output=True, check=False
39
+ )
40
+ # rg exit codes:
41
+ # 0 = matches found
42
+ # 1 = no matches found (not an error!)
43
+ # 2 = actual error
44
+ text = header + (cp.stdout or "") + ("\n" + cp.stderr if cp.stderr else "")
45
+ out.write_text(ctx.redact_text(text), encoding="utf-8")
46
+
47
+ dur = int(time.time() - start)
48
+ note = ""
49
+ if cp.returncode == 2:
50
+ note = "rg error (exit=2) recorded"
51
+ elif cp.returncode == 1:
52
+ note = "no matches"
53
+
54
+ # Always PASS; we’re collecting info, not enforcing policy (yet).
55
+ return StepResult(self.name, "PASS", dur, note)
56
+
57
+
58
+ def default_rg_steps(target: str = ".") -> list[RipgrepScanStep]:
59
+ return [
60
+ RipgrepScanStep(
61
+ name="rg TODO/FIXME/HACK",
62
+ pattern=r"TODO|FIXME|HACK",
63
+ outfile="logs/40_rg_todos.txt",
64
+ target=target,
65
+ ),
66
+ RipgrepScanStep(
67
+ name="rg print(",
68
+ pattern=r"^\s*print\(",
69
+ outfile="logs/41_rg_prints.txt",
70
+ target=target,
71
+ ),
72
+ RipgrepScanStep(
73
+ name="rg except patterns",
74
+ pattern=r"except\s+Exception|except\s*:",
75
+ outfile="logs/42_rg_bare_excepts.txt",
76
+ target=target,
77
+ ),
78
+ ]
@@ -0,0 +1,158 @@
1
+ from __future__ import annotations
2
+ import json
3
+ import time
4
+ from dataclasses import dataclass
5
+ from pathlib import Path
6
+
7
+ from .base import StepResult
8
+ from ..context import BundleContext
9
+ from ..roadmap_scan import build_roadmap
10
+ from ..steps.copy_pack import DEFAULT_EXCLUDE_DIRS
11
+ from ..policy import AIContextPolicy
12
+
13
+ @dataclass
14
+ class RoadmapStep:
15
+ name: str = "roadmap (project map)"
16
+ out_md: str = "meta/70_roadmap.md"
17
+ out_json: str = "meta/70_roadmap.json"
18
+ include: list[str] | None = None
19
+ max_files: int = 20000
20
+ policy: AIContextPolicy | None = None
21
+
22
+ def run(self, ctx: BundleContext) -> StepResult:
23
+ start = time.time()
24
+
25
+ include_dirs = []
26
+ if self.include:
27
+ include_dirs = [ctx.root / p for p in self.include]
28
+ else:
29
+ # sane defaults: mimic your curated source approach
30
+ include_dirs: list[Path] = []
31
+ if self.include:
32
+ include_dirs = [ctx.root / p for p in self.include]
33
+ else:
34
+ # sane defaults: scan source trees, not the whole repo
35
+ candidates = [
36
+ ctx.root / "src",
37
+ ctx.root / "pybundle", # in case this repo isn't src-layout
38
+ ctx.root / "src-tauri",
39
+ ctx.root / "frontend",
40
+ ctx.root / "web",
41
+ ctx.root / "ui",
42
+ ctx.root / "templates",
43
+ ctx.root / "static",
44
+ ]
45
+ include_dirs = [p for p in candidates if p.exists()]
46
+
47
+ # fallback if none exist
48
+ if not include_dirs:
49
+ include_dirs = [ctx.root]
50
+
51
+ policy = self.policy or AIContextPolicy()
52
+ include_dirs = [p for p in policy.include_dir_candidates(ctx.root)]
53
+ exclude_dirs = set(policy.exclude_dirs)
54
+
55
+ graph = build_roadmap(
56
+ root=ctx.root,
57
+ include_dirs=include_dirs,
58
+ exclude_dirs=exclude_dirs,
59
+ max_files=policy.roadmap_max_files,
60
+ # later: depth=policy.roadmap_depth
61
+ )
62
+
63
+ # Write JSON
64
+ out_json_path = ctx.workdir / self.out_json
65
+ out_json_path.parent.mkdir(parents=True, exist_ok=True)
66
+ out_json_path.write_text(json.dumps(graph.to_dict(), indent=2), encoding="utf-8")
67
+
68
+ # Write Markdown (with Mermaid)
69
+ out_md_path = ctx.workdir / self.out_md
70
+ out_md_path.parent.mkdir(parents=True, exist_ok=True)
71
+ out_md_path.write_text(self._render_md(graph), encoding="utf-8")
72
+
73
+ langs = sorted({n.lang for n in graph.nodes if getattr(n, "lang", None)})
74
+ summary = {
75
+ "languages": langs,
76
+ "entrypoints": [ep.node for ep in graph.entrypoints[:50]],
77
+ "stats": graph.stats,
78
+ }
79
+ (ctx.workdir / "meta" / "71_roadmap_summary.json").write_text(
80
+ json.dumps(summary, indent=2),
81
+ encoding="utf-8",
82
+ )
83
+
84
+ dur = int(time.time() - start)
85
+ note = f"nodes={len(graph.nodes)} edges={len(graph.edges)} entrypoints={len(graph.entrypoints)}"
86
+ return StepResult(self.name, "PASS", dur, note)
87
+
88
+ def _render_md(self, graph) -> str:
89
+ lines = []
90
+ lines.append("# Project Roadmap")
91
+ lines.append("")
92
+ lines.append("## Entrypoints")
93
+ if not graph.entrypoints:
94
+ lines.append("- (none detected)")
95
+ else:
96
+ for ep in graph.entrypoints[:50]:
97
+ lines.append(f"- `{ep.node}` — {ep.reason} (confidence {ep.confidence}/3)")
98
+ lines.append("")
99
+ lines.append("## High-level map")
100
+
101
+ depth = 2
102
+ max_edges = 180
103
+ try:
104
+ # if policy is passed through, prefer it
105
+ if hasattr(self, "policy") and self.policy is not None:
106
+ depth = self.policy.roadmap_mermaid_depth
107
+ max_edges = self.policy.roadmap_mermaid_max_edges
108
+ except Exception:
109
+ pass
110
+
111
+ lines.append("```mermaid")
112
+ lines.append("flowchart LR")
113
+ lines.extend(self._render_mermaid_bfs(graph, max_depth=depth, max_edges=max_edges))
114
+ lines.append("```")
115
+ lines.append("")
116
+ lines.append("## Stats")
117
+ for k in sorted(graph.stats.keys()):
118
+ lines.append(f"- **{k}**: {graph.stats[k]}")
119
+ lines.append("")
120
+ lines.append("## Notes")
121
+ lines.append("- Destinations like `py:...`, `js:...`, `rs:...` are dependency specs (not resolved to paths yet).")
122
+ lines.append("- This is designed to be deterministic and readable, not a perfect compiler-grade call graph.")
123
+ lines.append("")
124
+ return "\n".join(lines)
125
+
126
+ def _render_mermaid_bfs(self, graph, max_depth: int = 2, max_edges: int = 180) -> list[str]:
127
+ from collections import deque
128
+
129
+ adj: dict[str, list[str]] = {}
130
+ for e in graph.edges:
131
+ adj.setdefault(e.src, []).append(e.dst)
132
+
133
+ entry = [ep.node for ep in graph.entrypoints]
134
+ if not entry:
135
+ return [' A["(no entrypoints)"]']
136
+
137
+ q = deque([(n, 0) for n in entry])
138
+ seen_edges: set[tuple[str, str]] = set()
139
+ shown: list[str] = []
140
+ seen_nodes: set[str] = set(entry)
141
+
142
+ while q and len(shown) < max_edges:
143
+ node, depth = q.popleft()
144
+ if depth >= max_depth:
145
+ continue
146
+ for dst in adj.get(node, []):
147
+ key = (node, dst)
148
+ if key in seen_edges:
149
+ continue
150
+ seen_edges.add(key)
151
+ shown.append(f' "{node}" --> "{dst}"')
152
+ if dst not in seen_nodes:
153
+ seen_nodes.add(dst)
154
+ q.append((dst, depth + 1))
155
+ if len(shown) >= max_edges:
156
+ break
157
+
158
+ return shown or [' A["(no edges rendered)"]']
pybundle/steps/ruff.py ADDED
@@ -0,0 +1,111 @@
1
+ from __future__ import annotations
2
+
3
+ import subprocess
4
+ import time
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+
8
+ from .base import StepResult
9
+ from ..context import BundleContext
10
+ from ..tools import which
11
+
12
+
13
+ def _repo_has_py_files(root: Path) -> bool:
14
+ # Fast-ish heuristic: look for any .py file in top couple levels
15
+ # (Avoid walking deep trees; ruff itself can handle it.)
16
+ for p in root.rglob("*.py"):
17
+ # ignore common junk dirs
18
+ parts = set(p.parts)
19
+ if (
20
+ ".venv" in parts
21
+ or "__pycache__" in parts
22
+ or ".mypy_cache" in parts
23
+ or ".ruff_cache" in parts
24
+ ):
25
+ continue
26
+ if (
27
+ "node_modules" in parts
28
+ or "dist" in parts
29
+ or "build" in parts
30
+ or "artifacts" in parts
31
+ ):
32
+ continue
33
+ return True
34
+ return False
35
+
36
+
37
+ @dataclass
38
+ class RuffCheckStep:
39
+ name: str = "ruff check"
40
+ target: str = "."
41
+ outfile: str = "logs/31_ruff_check.txt"
42
+
43
+ def run(self, ctx: BundleContext) -> StepResult:
44
+ start = time.time()
45
+ out = ctx.workdir / self.outfile
46
+ out.parent.mkdir(parents=True, exist_ok=True)
47
+
48
+ ruff = which("ruff")
49
+ if not ruff:
50
+ out.write_text(
51
+ "ruff not found; skipping (pip install ruff)\n", encoding="utf-8"
52
+ )
53
+ return StepResult(self.name, "SKIP", 0, "missing ruff")
54
+
55
+ if not _repo_has_py_files(ctx.root):
56
+ out.write_text(
57
+ "no .py files detected; skipping ruff check\n", encoding="utf-8"
58
+ )
59
+ return StepResult(self.name, "SKIP", 0, "no python files")
60
+
61
+ cmd = [ruff, "check", self.target]
62
+ header = f"## PWD: {ctx.root}\n## CMD: {' '.join(cmd)}\n\n"
63
+
64
+ cp = subprocess.run(
65
+ cmd, cwd=str(ctx.root), text=True, capture_output=True, check=False
66
+ )
67
+ text = header + (cp.stdout or "") + ("\n" + cp.stderr if cp.stderr else "")
68
+ out.write_text(ctx.redact_text(text), encoding="utf-8")
69
+
70
+ dur = int(time.time() - start)
71
+ # ruff nonzero = lint failures; that’s *valuable*, but for bundling we record it.
72
+ note = "" if cp.returncode == 0 else f"exit={cp.returncode} (lint findings)"
73
+ return StepResult(self.name, "PASS", dur, note)
74
+
75
+
76
+ @dataclass
77
+ class RuffFormatCheckStep:
78
+ name: str = "ruff format --check"
79
+ target: str = "."
80
+ outfile: str = "logs/32_ruff_format_check.txt"
81
+
82
+ def run(self, ctx: BundleContext) -> StepResult:
83
+ start = time.time()
84
+ out = ctx.workdir / self.outfile
85
+ out.parent.mkdir(parents=True, exist_ok=True)
86
+
87
+ ruff = which("ruff")
88
+ if not ruff:
89
+ out.write_text(
90
+ "ruff not found; skipping (pip install ruff)\n", encoding="utf-8"
91
+ )
92
+ return StepResult(self.name, "SKIP", 0, "missing ruff")
93
+
94
+ if not _repo_has_py_files(ctx.root):
95
+ out.write_text(
96
+ "no .py files detected; skipping ruff format check\n", encoding="utf-8"
97
+ )
98
+ return StepResult(self.name, "SKIP", 0, "no python files")
99
+
100
+ cmd = [ruff, "format", "--check", self.target]
101
+ header = f"## PWD: {ctx.root}\n## CMD: {' '.join(cmd)}\n\n"
102
+
103
+ cp = subprocess.run(
104
+ cmd, cwd=str(ctx.root), text=True, capture_output=True, check=False
105
+ )
106
+ text = header + (cp.stdout or "") + ("\n" + cp.stderr if cp.stderr else "")
107
+ out.write_text(ctx.redact_text(text), encoding="utf-8")
108
+
109
+ dur = int(time.time() - start)
110
+ note = "" if cp.returncode == 0 else f"exit={cp.returncode} (format drift)"
111
+ return StepResult(self.name, "PASS", dur, note)