@pennyfarthing/core 7.7.0 → 7.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/package.json +1 -1
- package/packages/core/dist/cli/commands/doctor.d.ts.map +1 -1
- package/packages/core/dist/cli/commands/doctor.js +114 -0
- package/packages/core/dist/cli/commands/doctor.js.map +1 -1
- package/pennyfarthing-dist/agents/sm-setup.md +37 -2
- package/pennyfarthing-dist/agents/sm.md +68 -22
- package/pennyfarthing-dist/agents/workflow-status-check.md +11 -1
- package/pennyfarthing-dist/commands/git-cleanup.md +43 -308
- package/pennyfarthing-dist/commands/solo.md +31 -0
- package/pennyfarthing-dist/guides/patterns/approval-gates-pattern.md +1 -1
- package/pennyfarthing-dist/personas/themes/gilligans-island.yaml +83 -83
- package/pennyfarthing-dist/personas/themes/the-expanse.yaml +11 -11
- package/pennyfarthing-dist/scripts/core/check-context.sh +3 -0
- package/pennyfarthing-dist/scripts/core/handoff-marker.sh +13 -2
- package/pennyfarthing-dist/scripts/core/prime.sh +3 -157
- package/pennyfarthing-dist/scripts/core/run.sh +9 -0
- package/pennyfarthing-dist/scripts/hooks/__pycache__/question_reflector_check.cpython-314.pyc +0 -0
- package/pennyfarthing-dist/scripts/hooks/question_reflector_check.py +117 -20
- package/pennyfarthing-dist/scripts/jira/README.md +10 -7
- package/pennyfarthing-dist/scripts/misc/add-short-names.sh +13 -0
- package/pennyfarthing-dist/scripts/misc/add_short_names.py +226 -0
- package/pennyfarthing-dist/scripts/misc/migrate-bmad-workflow.sh +6 -5
- package/pennyfarthing-dist/scripts/misc/migrate_bmad_workflow.py +319 -0
- package/pennyfarthing-dist/scripts/sprint/import-epic-to-future.sh +6 -5
- package/pennyfarthing-dist/scripts/sprint/import_epic_to_future.py +270 -0
- package/pennyfarthing-dist/scripts/test/ensure-swebench-data.sh +59 -0
- package/pennyfarthing-dist/scripts/theme/compute-theme-tiers.sh +8 -6
- package/pennyfarthing-dist/scripts/theme/compute_theme_tiers.py +402 -0
- package/pennyfarthing-dist/scripts/workflow/check.sh +3 -476
- package/pennyfarthing-dist/scripts/workflow/get-workflow-type.py +61 -0
- package/pennyfarthing-dist/scripts/workflow/get-workflow-type.sh +13 -0
- package/pennyfarthing-dist/skills/judge/SKILL.md +57 -0
- package/pennyfarthing-dist/skills/sprint/scripts/sync-epic-jira.sh +4 -22
- package/pennyfarthing-dist/workflows/git-cleanup/steps/step-01-analyze.md +83 -0
- package/pennyfarthing-dist/workflows/git-cleanup/steps/step-02-categorize.md +116 -0
- package/pennyfarthing-dist/workflows/git-cleanup/steps/step-03-execute.md +210 -0
- package/pennyfarthing-dist/workflows/git-cleanup/steps/step-04-verify.md +88 -0
- package/pennyfarthing-dist/workflows/git-cleanup/steps/step-05-complete.md +71 -0
- package/pennyfarthing-dist/workflows/git-cleanup.yaml +59 -0
- package/pennyfarthing-dist/scripts/hooks/question-reflector-check.mjs +0 -393
- package/pennyfarthing-dist/scripts/hooks/tests/question-reflector.test.mjs +0 -545
- package/pennyfarthing-dist/scripts/jira/jira-bidirectional-sync.mjs +0 -327
- package/pennyfarthing-dist/scripts/jira/jira-bidirectional-sync.test.mjs +0 -503
- package/pennyfarthing-dist/scripts/jira/jira-lib.mjs +0 -443
- package/pennyfarthing-dist/scripts/jira/jira-sync-story.mjs +0 -208
- package/pennyfarthing-dist/scripts/jira/jira-sync.mjs +0 -198
- package/pennyfarthing-dist/scripts/misc/add-short-names.mjs +0 -264
- package/pennyfarthing-dist/scripts/misc/migrate-bmad-workflow.mjs +0 -474
- package/pennyfarthing-dist/scripts/sprint/import-epic-to-future.mjs +0 -377
- package/pennyfarthing-dist/scripts/theme/compute-theme-tiers.js +0 -492
- /package/pennyfarthing-dist/guides/{AGENT-COORDINATION.md → agent-coordination.md} +0 -0
- /package/pennyfarthing-dist/guides/{HOOKS.md → hooks.md} +0 -0
- /package/pennyfarthing-dist/guides/{PROMPT-PATTERNS.md → prompt-patterns.md} +0 -0
- /package/pennyfarthing-dist/guides/{SESSION-ARTIFACTS.md → session-artifacts.md} +0 -0
- /package/pennyfarthing-dist/guides/{XML-TAGS.md → xml-tags.md} +0 -0
|
@@ -0,0 +1,319 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
migrate_bmad_workflow.py - Migrate BMAD workflows to Pennyfarthing format
|
|
4
|
+
|
|
5
|
+
Converts BMAD workflow format to Pennyfarthing stepped workflow format:
|
|
6
|
+
- Parses BMAD workflow.md with YAML frontmatter
|
|
7
|
+
- Extracts step files and their frontmatter
|
|
8
|
+
- Converts variable syntax ({var-name} -> {var_name})
|
|
9
|
+
- Generates workflow.yaml with Pennyfarthing schema
|
|
10
|
+
- Preserves tri-modal structure (steps-c, steps-v, steps-e)
|
|
11
|
+
|
|
12
|
+
Usage: python migrate_bmad_workflow.py [--dry-run] <source-dir> [target-dir]
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import argparse
|
|
16
|
+
import re
|
|
17
|
+
import shutil
|
|
18
|
+
import sys
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class Colors:
|
|
23
|
+
RED = '\033[31m'
|
|
24
|
+
GREEN = '\033[32m'
|
|
25
|
+
YELLOW = '\033[33m'
|
|
26
|
+
BLUE = '\033[34m'
|
|
27
|
+
NC = '\033[0m'
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def log_info(msg: str) -> None:
|
|
31
|
+
print(f"{Colors.BLUE}INFO{Colors.NC}: {msg}")
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def log_success(msg: str) -> None:
|
|
35
|
+
print(f"{Colors.GREEN}SUCCESS{Colors.NC}: {msg}")
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def log_warn(msg: str) -> None:
|
|
39
|
+
print(f"{Colors.YELLOW}WARN{Colors.NC}: {msg}")
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def log_error(msg: str) -> None:
|
|
43
|
+
print(f"{Colors.RED}ERROR{Colors.NC}: {msg}", file=sys.stderr)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def extract_frontmatter(content: str) -> tuple[dict, str]:
|
|
47
|
+
"""Extract YAML frontmatter from markdown content."""
|
|
48
|
+
lines = content.split('\n')
|
|
49
|
+
|
|
50
|
+
if not lines or lines[0].strip() != '---':
|
|
51
|
+
return {}, content
|
|
52
|
+
|
|
53
|
+
end_index = -1
|
|
54
|
+
for i, line in enumerate(lines[1:], 1):
|
|
55
|
+
if line.strip() == '---':
|
|
56
|
+
end_index = i
|
|
57
|
+
break
|
|
58
|
+
|
|
59
|
+
if end_index == -1:
|
|
60
|
+
return {}, content
|
|
61
|
+
|
|
62
|
+
frontmatter = {}
|
|
63
|
+
for line in lines[1:end_index]:
|
|
64
|
+
match = re.match(r'^([a-zA-Z_][a-zA-Z0-9_]*):\s*(.*)$', line)
|
|
65
|
+
if match:
|
|
66
|
+
key, value = match.groups()
|
|
67
|
+
value = value.strip().strip('"\'')
|
|
68
|
+
frontmatter[key] = value
|
|
69
|
+
|
|
70
|
+
rest_content = '\n'.join(lines[end_index + 1:])
|
|
71
|
+
return frontmatter, rest_content
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def convert_variables(text: str) -> str:
|
|
75
|
+
"""Convert BMAD-style dashed variables to Pennyfarthing underscore style."""
|
|
76
|
+
result = text
|
|
77
|
+
prev = None
|
|
78
|
+
|
|
79
|
+
while result != prev:
|
|
80
|
+
prev = result
|
|
81
|
+
result = re.sub(
|
|
82
|
+
r'\{([a-zA-Z0-9_]+)-([a-zA-Z0-9_-]+)\}',
|
|
83
|
+
lambda m: '{' + m.group(1) + '_' + m.group(2).replace('-', '_') + '}',
|
|
84
|
+
result
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
return result
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def is_trimodal(source_dir: Path) -> bool:
|
|
91
|
+
"""Check if workflow is tri-modal (has steps-c, steps-v, or steps-e)."""
|
|
92
|
+
return any(
|
|
93
|
+
(source_dir / d).exists()
|
|
94
|
+
for d in ['steps-c', 'steps-v', 'steps-e']
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def get_step_directories(source_dir: Path) -> list[str]:
|
|
99
|
+
"""Get step directories to process."""
|
|
100
|
+
if is_trimodal(source_dir):
|
|
101
|
+
return [d for d in ['steps-c', 'steps-v', 'steps-e']
|
|
102
|
+
if (source_dir / d).exists()]
|
|
103
|
+
elif (source_dir / 'steps').exists():
|
|
104
|
+
return ['steps']
|
|
105
|
+
return []
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def get_supporting_directories(source_dir: Path) -> list[str]:
|
|
109
|
+
"""Get supporting directories (templates, data, etc.)."""
|
|
110
|
+
return [d for d in ['templates', 'data', 'assets']
|
|
111
|
+
if (source_dir / d).exists()]
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def get_root_template_files(source_dir: Path) -> list[str]:
|
|
115
|
+
"""Get root-level template files."""
|
|
116
|
+
return [f.name for f in source_dir.iterdir()
|
|
117
|
+
if f.is_file() and '-template' in f.name and f.suffix == '.md']
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def copy_dir_with_transform(src_dir: Path, dest_dir: Path,
|
|
121
|
+
transform: callable, dry_run: bool) -> None:
|
|
122
|
+
"""Recursively copy directory with file transformation."""
|
|
123
|
+
if not src_dir.exists():
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
if not dry_run:
|
|
127
|
+
dest_dir.mkdir(parents=True, exist_ok=True)
|
|
128
|
+
|
|
129
|
+
for entry in src_dir.iterdir():
|
|
130
|
+
dest_path = dest_dir / entry.name
|
|
131
|
+
|
|
132
|
+
if entry.is_dir():
|
|
133
|
+
copy_dir_with_transform(entry, dest_path, transform, dry_run)
|
|
134
|
+
elif entry.is_file():
|
|
135
|
+
content = entry.read_text()
|
|
136
|
+
transformed = transform(content, entry)
|
|
137
|
+
|
|
138
|
+
if dry_run:
|
|
139
|
+
if content != transformed:
|
|
140
|
+
log_info(f"[DRY-RUN] Would convert variables in: {entry}")
|
|
141
|
+
else:
|
|
142
|
+
dest_path.write_text(transformed)
|
|
143
|
+
log_success(f"Copied: {dest_path.name}")
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def generate_workflow_yaml(name: str, description: str, trimodal: bool) -> str:
|
|
147
|
+
"""Generate workflow.yaml content."""
|
|
148
|
+
converted_desc = convert_variables(description)
|
|
149
|
+
|
|
150
|
+
if trimodal:
|
|
151
|
+
step_config = """ path: ./steps-c/
|
|
152
|
+
pattern: step-*.md"""
|
|
153
|
+
modes_config = """
|
|
154
|
+
# Tri-modal workflow paths
|
|
155
|
+
modes:
|
|
156
|
+
create: ./steps-c/
|
|
157
|
+
validate: ./steps-v/
|
|
158
|
+
edit: ./steps-e/"""
|
|
159
|
+
else:
|
|
160
|
+
step_config = """ path: ./steps/
|
|
161
|
+
pattern: step-*.md"""
|
|
162
|
+
modes_config = ""
|
|
163
|
+
|
|
164
|
+
return f"""# {name} Workflow - Migrated from BMAD format
|
|
165
|
+
# Generated by migrate_bmad_workflow.py
|
|
166
|
+
|
|
167
|
+
workflow:
|
|
168
|
+
name: {name}
|
|
169
|
+
description: {converted_desc}
|
|
170
|
+
version: "1.0.0"
|
|
171
|
+
type: stepped
|
|
172
|
+
|
|
173
|
+
# Step configuration
|
|
174
|
+
steps:
|
|
175
|
+
{step_config}
|
|
176
|
+
{modes_config}
|
|
177
|
+
# Variables available in step files
|
|
178
|
+
variables:
|
|
179
|
+
project_root: .
|
|
180
|
+
planning_artifacts: ./artifacts
|
|
181
|
+
output_file: artifacts/{name}.md
|
|
182
|
+
|
|
183
|
+
# Agent assignment (customize as needed)
|
|
184
|
+
agent: architect
|
|
185
|
+
|
|
186
|
+
# Triggers - when to suggest this workflow
|
|
187
|
+
triggers:
|
|
188
|
+
types: [{name}]
|
|
189
|
+
tags: [{name}, stepped]
|
|
190
|
+
"""
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def main() -> int:
|
|
194
|
+
parser = argparse.ArgumentParser(
|
|
195
|
+
description="Migrate BMAD workflows to Pennyfarthing stepped workflow format"
|
|
196
|
+
)
|
|
197
|
+
parser.add_argument('--dry-run', action='store_true',
|
|
198
|
+
help='Show what would be done without making changes')
|
|
199
|
+
parser.add_argument('source_dir', type=Path,
|
|
200
|
+
help='Path to BMAD workflow directory (must contain workflow.md)')
|
|
201
|
+
parser.add_argument('target_dir', type=Path, nargs='?', default=Path('.'),
|
|
202
|
+
help='Path to output directory (default: current directory)')
|
|
203
|
+
args = parser.parse_args()
|
|
204
|
+
|
|
205
|
+
source_dir = args.source_dir.resolve()
|
|
206
|
+
target_dir = args.target_dir.resolve()
|
|
207
|
+
dry_run = args.dry_run
|
|
208
|
+
|
|
209
|
+
if not source_dir.exists():
|
|
210
|
+
log_error(f"Source directory not found: {source_dir}")
|
|
211
|
+
return 1
|
|
212
|
+
|
|
213
|
+
workflow_md = source_dir / 'workflow.md'
|
|
214
|
+
if not workflow_md.exists():
|
|
215
|
+
log_error(f"workflow.md not found in {source_dir}")
|
|
216
|
+
return 1
|
|
217
|
+
|
|
218
|
+
log_info('BMAD Workflow Migration')
|
|
219
|
+
log_info('=======================')
|
|
220
|
+
log_info(f'Source: {source_dir}')
|
|
221
|
+
log_info(f'Target: {target_dir}')
|
|
222
|
+
|
|
223
|
+
if dry_run:
|
|
224
|
+
log_warn('DRY-RUN MODE - No changes will be made')
|
|
225
|
+
|
|
226
|
+
print('')
|
|
227
|
+
|
|
228
|
+
# Parse workflow.md
|
|
229
|
+
content = workflow_md.read_text()
|
|
230
|
+
frontmatter, _ = extract_frontmatter(content)
|
|
231
|
+
|
|
232
|
+
name = frontmatter.get('name')
|
|
233
|
+
description = frontmatter.get('description', '')
|
|
234
|
+
|
|
235
|
+
if not name:
|
|
236
|
+
log_error('Could not extract workflow name from workflow.md')
|
|
237
|
+
return 1
|
|
238
|
+
|
|
239
|
+
log_info(f'Parsed workflow: {name}')
|
|
240
|
+
log_info(f'Description: {description}')
|
|
241
|
+
print('')
|
|
242
|
+
|
|
243
|
+
# Create target directory
|
|
244
|
+
if not dry_run:
|
|
245
|
+
target_dir.mkdir(parents=True, exist_ok=True)
|
|
246
|
+
|
|
247
|
+
# Generate workflow.yaml
|
|
248
|
+
log_info('Generating workflow.yaml')
|
|
249
|
+
trimodal = is_trimodal(source_dir)
|
|
250
|
+
workflow_yaml = generate_workflow_yaml(name, description, trimodal)
|
|
251
|
+
|
|
252
|
+
if dry_run:
|
|
253
|
+
log_info('[DRY-RUN] Would create: workflow.yaml')
|
|
254
|
+
else:
|
|
255
|
+
(target_dir / 'workflow.yaml').write_text(workflow_yaml)
|
|
256
|
+
log_success('Created workflow.yaml')
|
|
257
|
+
|
|
258
|
+
print('')
|
|
259
|
+
|
|
260
|
+
# Copy step directories
|
|
261
|
+
def md_transform(content: str, filepath: Path) -> str:
|
|
262
|
+
if filepath.suffix == '.md':
|
|
263
|
+
return convert_variables(content)
|
|
264
|
+
return content
|
|
265
|
+
|
|
266
|
+
for dir_name in get_step_directories(source_dir):
|
|
267
|
+
log_info(f'Processing step directory: {dir_name}')
|
|
268
|
+
if dry_run:
|
|
269
|
+
log_info(f'[DRY-RUN] Would copy and transform: {dir_name}/')
|
|
270
|
+
copy_dir_with_transform(
|
|
271
|
+
source_dir / dir_name,
|
|
272
|
+
target_dir / dir_name,
|
|
273
|
+
md_transform,
|
|
274
|
+
dry_run
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
print('')
|
|
278
|
+
|
|
279
|
+
# Copy supporting directories
|
|
280
|
+
for dir_name in get_supporting_directories(source_dir):
|
|
281
|
+
log_info(f'Processing supporting directory: {dir_name}')
|
|
282
|
+
if dry_run:
|
|
283
|
+
log_info(f'[DRY-RUN] Would copy: {dir_name}/')
|
|
284
|
+
copy_dir_with_transform(
|
|
285
|
+
source_dir / dir_name,
|
|
286
|
+
target_dir / dir_name,
|
|
287
|
+
md_transform,
|
|
288
|
+
dry_run
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
print('')
|
|
292
|
+
|
|
293
|
+
# Copy root-level template files
|
|
294
|
+
for filename in get_root_template_files(source_dir):
|
|
295
|
+
log_info(f'Processing root template file: {filename}')
|
|
296
|
+
src_path = source_dir / filename
|
|
297
|
+
dest_path = target_dir / filename
|
|
298
|
+
|
|
299
|
+
if dry_run:
|
|
300
|
+
log_info(f'[DRY-RUN] Would copy: {filename}')
|
|
301
|
+
else:
|
|
302
|
+
content = src_path.read_text()
|
|
303
|
+
transformed = convert_variables(content)
|
|
304
|
+
dest_path.write_text(transformed)
|
|
305
|
+
log_success(f'Copied: {filename}')
|
|
306
|
+
|
|
307
|
+
print('')
|
|
308
|
+
|
|
309
|
+
if dry_run:
|
|
310
|
+
log_info('DRY-RUN complete. No files were modified.')
|
|
311
|
+
else:
|
|
312
|
+
log_success('Migration complete!')
|
|
313
|
+
log_info(f'Output directory: {target_dir}')
|
|
314
|
+
|
|
315
|
+
return 0
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
if __name__ == "__main__":
|
|
319
|
+
sys.exit(main())
|
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
#!/usr/bin/env bash
|
|
2
|
-
# import-epic-to-future.sh -
|
|
2
|
+
# import-epic-to-future.sh - Import epics-and-stories workflow output to future.yaml
|
|
3
3
|
#
|
|
4
4
|
# Usage: ./scripts/sprint/import-epic-to-future.sh [--dry-run] <epics-md-file> [initiative-name]
|
|
5
|
-
#
|
|
6
|
-
# Delegates to Node script for cleaner markdown parsing and YAML generation.
|
|
7
5
|
|
|
8
|
-
|
|
9
|
-
|
|
6
|
+
set -euo pipefail
|
|
7
|
+
|
|
8
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
9
|
+
|
|
10
|
+
exec python3 "$SCRIPT_DIR/import_epic_to_future.py" "$@"
|
|
@@ -0,0 +1,270 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
import_epic_to_future.py - Import epics-and-stories workflow output to future.yaml
|
|
4
|
+
|
|
5
|
+
Transforms the markdown output from the epics-and-stories workflow into
|
|
6
|
+
the YAML format used by sprint/future.yaml.
|
|
7
|
+
|
|
8
|
+
Usage: python import_epic_to_future.py [--dry-run] <epics-md-file> [initiative-name]
|
|
9
|
+
|
|
10
|
+
Example:
|
|
11
|
+
python import_epic_to_future.py docs/planning/reflector-epics-and-stories.md "Reflector Consolidation"
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import argparse
|
|
15
|
+
import re
|
|
16
|
+
import sys
|
|
17
|
+
from datetime import date
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Colors:
|
|
22
|
+
RED = '\033[31m'
|
|
23
|
+
GREEN = '\033[32m'
|
|
24
|
+
YELLOW = '\033[33m'
|
|
25
|
+
BLUE = '\033[34m'
|
|
26
|
+
NC = '\033[0m'
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def log_info(msg: str) -> None:
|
|
30
|
+
print(f"{Colors.BLUE}INFO{Colors.NC}: {msg}")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def log_success(msg: str) -> None:
|
|
34
|
+
print(f"{Colors.GREEN}SUCCESS{Colors.NC}: {msg}")
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def log_error(msg: str) -> None:
|
|
38
|
+
print(f"{Colors.RED}ERROR{Colors.NC}: {msg}", file=sys.stderr)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def find_project_root() -> Path:
|
|
42
|
+
"""Find project root by looking for .claude directory."""
|
|
43
|
+
current = Path.cwd()
|
|
44
|
+
while current != current.parent:
|
|
45
|
+
if (current / ".claude").is_dir():
|
|
46
|
+
return current
|
|
47
|
+
current = current.parent
|
|
48
|
+
return Path.cwd()
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def parse_epics_markdown(content: str) -> dict:
|
|
52
|
+
"""Parse epics markdown file."""
|
|
53
|
+
result = {
|
|
54
|
+
'title': '',
|
|
55
|
+
'description': '',
|
|
56
|
+
'total_points': 0,
|
|
57
|
+
'epic_title': '',
|
|
58
|
+
'epic_description': '',
|
|
59
|
+
'stories': [],
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
# Extract title from first # heading
|
|
63
|
+
title_match = re.search(r'^# (.+?)( - Epic Breakdown)?$', content, re.MULTILINE)
|
|
64
|
+
if title_match:
|
|
65
|
+
result['title'] = title_match.group(1).replace(' - Epics and Stories', '')
|
|
66
|
+
|
|
67
|
+
# Extract description from Overview section
|
|
68
|
+
overview_match = re.search(r'## Overview\s*\n\s*\n(.+?)(?=\n\n##|\n##)', content, re.DOTALL)
|
|
69
|
+
if overview_match:
|
|
70
|
+
result['description'] = overview_match.group(1).strip()
|
|
71
|
+
|
|
72
|
+
# Extract total points
|
|
73
|
+
points_match = re.search(r'\*\*Points:\*\*\s*(\d+)', content)
|
|
74
|
+
if points_match:
|
|
75
|
+
result['total_points'] = int(points_match.group(1))
|
|
76
|
+
else:
|
|
77
|
+
effort_match = re.search(r'Total Effort.*?(\d+)\s*story points', content, re.IGNORECASE)
|
|
78
|
+
if effort_match:
|
|
79
|
+
result['total_points'] = int(effort_match.group(1))
|
|
80
|
+
|
|
81
|
+
# Extract epic title
|
|
82
|
+
epic_title_match = re.search(r'^## Epic \d+:\s*(.+)$', content, re.MULTILINE)
|
|
83
|
+
if epic_title_match:
|
|
84
|
+
result['epic_title'] = epic_title_match.group(1)
|
|
85
|
+
|
|
86
|
+
# Extract epic description (User Outcome)
|
|
87
|
+
user_outcome_match = re.search(r'\*\*User Outcome:\*\*\s*(.+)', content)
|
|
88
|
+
if user_outcome_match:
|
|
89
|
+
result['epic_description'] = user_outcome_match.group(1)
|
|
90
|
+
|
|
91
|
+
# Parse stories
|
|
92
|
+
story_pattern = re.compile(r'^### Story \d+\.(\d+):\s*(.+)$', re.MULTILINE)
|
|
93
|
+
i_want_pattern = re.compile(r'^I want \*\*(.+?)\*\*,', re.MULTILINE)
|
|
94
|
+
|
|
95
|
+
story_positions = []
|
|
96
|
+
for match in story_pattern.finditer(content):
|
|
97
|
+
story_positions.append({
|
|
98
|
+
'num': int(match.group(1)),
|
|
99
|
+
'title': match.group(2),
|
|
100
|
+
'index': match.start(),
|
|
101
|
+
})
|
|
102
|
+
|
|
103
|
+
for i, story in enumerate(story_positions):
|
|
104
|
+
start_index = story['index']
|
|
105
|
+
end_index = story_positions[i + 1]['index'] if i < len(story_positions) - 1 else len(content)
|
|
106
|
+
story_content = content[start_index:end_index]
|
|
107
|
+
|
|
108
|
+
i_want_match = i_want_pattern.search(story_content)
|
|
109
|
+
description = i_want_match.group(1) if i_want_match else ''
|
|
110
|
+
|
|
111
|
+
result['stories'].append({
|
|
112
|
+
'num': story['num'],
|
|
113
|
+
'title': story['title'],
|
|
114
|
+
'description': description,
|
|
115
|
+
})
|
|
116
|
+
|
|
117
|
+
return result
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def get_next_epic_number(future_yaml_path: Path) -> int:
|
|
121
|
+
"""Get next epic number from future.yaml."""
|
|
122
|
+
if not future_yaml_path.exists():
|
|
123
|
+
return 60
|
|
124
|
+
|
|
125
|
+
content = future_yaml_path.read_text()
|
|
126
|
+
|
|
127
|
+
# Look for "Next Available Epic Number: XX"
|
|
128
|
+
next_match = re.search(r'Next Available Epic Number:\s*(\d+)', content, re.IGNORECASE)
|
|
129
|
+
if next_match:
|
|
130
|
+
return int(next_match.group(1))
|
|
131
|
+
|
|
132
|
+
# Fallback: find highest epic-XX number
|
|
133
|
+
max_num = 59
|
|
134
|
+
for match in re.finditer(r'epic-(\d+)', content):
|
|
135
|
+
num = int(match.group(1))
|
|
136
|
+
if num > max_num:
|
|
137
|
+
max_num = num
|
|
138
|
+
|
|
139
|
+
return max_num + 1
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def generate_yaml(parsed: dict, epic_num: int, initiative_name: str, epics_file: str) -> str:
|
|
143
|
+
"""Generate YAML for initiative."""
|
|
144
|
+
today = date.today().isoformat()
|
|
145
|
+
|
|
146
|
+
yaml = f"""
|
|
147
|
+
# ==========================================================================
|
|
148
|
+
# {initiative_name}
|
|
149
|
+
# Imported from: {epics_file}
|
|
150
|
+
# Date: {today}
|
|
151
|
+
# ==========================================================================
|
|
152
|
+
- name: "{initiative_name}"
|
|
153
|
+
description: |
|
|
154
|
+
{parsed['description']}
|
|
155
|
+
status: ready
|
|
156
|
+
blocked_by: null
|
|
157
|
+
total_points: {parsed['total_points']}
|
|
158
|
+
prd: docs/planning/reflector-prd.md
|
|
159
|
+
epics_doc: {epics_file}
|
|
160
|
+
epics:
|
|
161
|
+
- id: epic-{epic_num}
|
|
162
|
+
title: "{parsed['epic_title'] or initiative_name}"
|
|
163
|
+
description: |
|
|
164
|
+
{parsed['epic_description'] or parsed['description']}
|
|
165
|
+
points: {parsed['total_points']}
|
|
166
|
+
priority: P1
|
|
167
|
+
marker: "reflector"
|
|
168
|
+
repos: pennyfarthing
|
|
169
|
+
status: planning
|
|
170
|
+
stories:
|
|
171
|
+
"""
|
|
172
|
+
|
|
173
|
+
for story in parsed['stories']:
|
|
174
|
+
yaml += f""" - id: "{epic_num}-{story['num']}"
|
|
175
|
+
title: "{story['title']}"
|
|
176
|
+
description: |
|
|
177
|
+
{story['description'] or story['title']}
|
|
178
|
+
points: 1
|
|
179
|
+
priority: P0
|
|
180
|
+
status: planning
|
|
181
|
+
repos: pennyfarthing
|
|
182
|
+
"""
|
|
183
|
+
|
|
184
|
+
return yaml
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def update_future_yaml(future_yaml_path: Path, new_content: str, next_epic_num: int) -> str:
|
|
188
|
+
"""Update future.yaml."""
|
|
189
|
+
content = future_yaml_path.read_text()
|
|
190
|
+
|
|
191
|
+
# Update next epic number comment
|
|
192
|
+
content = re.sub(
|
|
193
|
+
r'Next Available Epic Number:\s*\d+',
|
|
194
|
+
f'Next Available Epic Number: {next_epic_num + 1}',
|
|
195
|
+
content,
|
|
196
|
+
flags=re.IGNORECASE
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
# Find insertion point (before SUMMARY section)
|
|
200
|
+
summary_marker = '# =============================================================================\n# SUMMARY'
|
|
201
|
+
summary_index = content.find(summary_marker)
|
|
202
|
+
|
|
203
|
+
if summary_index == -1:
|
|
204
|
+
content += new_content
|
|
205
|
+
else:
|
|
206
|
+
content = content[:summary_index] + new_content + '\n' + content[summary_index:]
|
|
207
|
+
|
|
208
|
+
return content
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def main() -> int:
|
|
212
|
+
parser = argparse.ArgumentParser(
|
|
213
|
+
description="Import epics-and-stories workflow output to future.yaml"
|
|
214
|
+
)
|
|
215
|
+
parser.add_argument('--dry-run', action='store_true',
|
|
216
|
+
help='Print YAML to stdout instead of updating future.yaml')
|
|
217
|
+
parser.add_argument('epics_file', type=str,
|
|
218
|
+
help='Path to the markdown file from epics-and-stories workflow')
|
|
219
|
+
parser.add_argument('initiative_name', type=str, nargs='?', default='',
|
|
220
|
+
help='Name for the initiative (optional, extracted from file if not provided)')
|
|
221
|
+
args = parser.parse_args()
|
|
222
|
+
|
|
223
|
+
project_root = find_project_root()
|
|
224
|
+
|
|
225
|
+
epics_path = Path(args.epics_file)
|
|
226
|
+
if not epics_path.is_absolute():
|
|
227
|
+
epics_path = project_root / epics_path
|
|
228
|
+
|
|
229
|
+
if not epics_path.exists():
|
|
230
|
+
log_error(f"File not found: {epics_path}")
|
|
231
|
+
return 1
|
|
232
|
+
|
|
233
|
+
future_yaml_path = project_root / 'sprint' / 'future.yaml'
|
|
234
|
+
|
|
235
|
+
# Read and parse epics file
|
|
236
|
+
content = epics_path.read_text()
|
|
237
|
+
parsed = parse_epics_markdown(content)
|
|
238
|
+
|
|
239
|
+
# Use provided name or extract from file
|
|
240
|
+
initiative_name = args.initiative_name or parsed['title'] or 'Imported Initiative'
|
|
241
|
+
|
|
242
|
+
# Get next epic number
|
|
243
|
+
next_epic_num = get_next_epic_number(future_yaml_path)
|
|
244
|
+
|
|
245
|
+
log_info(f"Next epic number: {next_epic_num}")
|
|
246
|
+
log_info(f"Initiative name: {initiative_name}")
|
|
247
|
+
log_info(f"Total points: {parsed['total_points']}")
|
|
248
|
+
log_info(f"Epic title: {parsed['epic_title']}")
|
|
249
|
+
log_info(f"Stories found: {len(parsed['stories'])}")
|
|
250
|
+
|
|
251
|
+
# Generate YAML
|
|
252
|
+
new_yaml = generate_yaml(parsed, next_epic_num, initiative_name, args.epics_file)
|
|
253
|
+
|
|
254
|
+
if args.dry_run:
|
|
255
|
+
print(f"\n{Colors.YELLOW}=== DRY RUN: Would append to future.yaml ==={Colors.NC}")
|
|
256
|
+
print(new_yaml)
|
|
257
|
+
print(f"{Colors.YELLOW}=== End of YAML ==={Colors.NC}\n")
|
|
258
|
+
print(f"{Colors.GREEN}To apply, run without --dry-run{Colors.NC}")
|
|
259
|
+
else:
|
|
260
|
+
updated_content = update_future_yaml(future_yaml_path, new_yaml, next_epic_num)
|
|
261
|
+
future_yaml_path.write_text(updated_content)
|
|
262
|
+
|
|
263
|
+
log_success(f"Added epic-{next_epic_num} to {future_yaml_path}")
|
|
264
|
+
log_success(f"Next available epic number is now: {next_epic_num + 1}")
|
|
265
|
+
|
|
266
|
+
return 0
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
if __name__ == "__main__":
|
|
270
|
+
sys.exit(main())
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# ensure-swebench-data.sh - Downloads SWE-bench data if not present
|
|
3
|
+
#
|
|
4
|
+
# Usage: ensure-swebench-data.sh [--force]
|
|
5
|
+
#
|
|
6
|
+
# Downloads SWE-bench Verified dataset from HuggingFace to /tmp/swebench_all.json
|
|
7
|
+
# This is a dependency for:
|
|
8
|
+
# - swebench-judge.py
|
|
9
|
+
# - ground-truth-judge.py
|
|
10
|
+
#
|
|
11
|
+
# Options:
|
|
12
|
+
# --force Re-download even if file exists
|
|
13
|
+
|
|
14
|
+
set -euo pipefail
|
|
15
|
+
|
|
16
|
+
CACHE_PATH="/tmp/swebench_all.json"
|
|
17
|
+
DATASET_URL="https://huggingface.co/datasets/princeton-nlp/SWE-bench_Verified/resolve/main/data/test.jsonl"
|
|
18
|
+
|
|
19
|
+
force=false
|
|
20
|
+
if [[ "${1:-}" == "--force" ]]; then
|
|
21
|
+
force=true
|
|
22
|
+
fi
|
|
23
|
+
|
|
24
|
+
# Check if already present
|
|
25
|
+
if [[ -f "$CACHE_PATH" ]] && [[ "$force" == "false" ]]; then
|
|
26
|
+
echo "SWE-bench data already cached at $CACHE_PATH"
|
|
27
|
+
exit 0
|
|
28
|
+
fi
|
|
29
|
+
|
|
30
|
+
echo "Downloading SWE-bench Verified dataset..."
|
|
31
|
+
|
|
32
|
+
# Download JSONL and convert to JSON array
|
|
33
|
+
if command -v curl &>/dev/null; then
|
|
34
|
+
curl -sL "$DATASET_URL" | python3 -c "
|
|
35
|
+
import json
|
|
36
|
+
import sys
|
|
37
|
+
lines = [json.loads(line) for line in sys.stdin if line.strip()]
|
|
38
|
+
print(json.dumps(lines, indent=2))
|
|
39
|
+
" > "$CACHE_PATH"
|
|
40
|
+
elif command -v wget &>/dev/null; then
|
|
41
|
+
wget -qO- "$DATASET_URL" | python3 -c "
|
|
42
|
+
import json
|
|
43
|
+
import sys
|
|
44
|
+
lines = [json.loads(line) for line in sys.stdin if line.strip()]
|
|
45
|
+
print(json.dumps(lines, indent=2))
|
|
46
|
+
" > "$CACHE_PATH"
|
|
47
|
+
else
|
|
48
|
+
echo "Error: curl or wget required to download SWE-bench data"
|
|
49
|
+
exit 1
|
|
50
|
+
fi
|
|
51
|
+
|
|
52
|
+
# Verify download
|
|
53
|
+
if [[ -f "$CACHE_PATH" ]]; then
|
|
54
|
+
count=$(python3 -c "import json; print(len(json.load(open('$CACHE_PATH'))))")
|
|
55
|
+
echo "Downloaded $count SWE-bench scenarios to $CACHE_PATH"
|
|
56
|
+
else
|
|
57
|
+
echo "Error: Failed to download SWE-bench data"
|
|
58
|
+
exit 1
|
|
59
|
+
fi
|
|
@@ -1,11 +1,13 @@
|
|
|
1
|
-
#!/bin/bash
|
|
2
|
-
# compute-theme-tiers.sh -
|
|
3
|
-
# Computes tier rankings from job-fair results and updates theme files
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# compute-theme-tiers.sh - Compute tier rankings from job-fair results
|
|
4
3
|
#
|
|
5
4
|
# Uses the MOST COMPLETE run for each theme (most matrix entries),
|
|
6
5
|
# not the most recent. This prevents incomplete runs from overriding good data.
|
|
7
6
|
#
|
|
8
|
-
#
|
|
7
|
+
# Usage: compute-theme-tiers.sh [--dry-run] [--verbose] [--min-entries N]
|
|
9
8
|
|
|
10
|
-
|
|
11
|
-
|
|
9
|
+
set -euo pipefail
|
|
10
|
+
|
|
11
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
12
|
+
|
|
13
|
+
exec python3 "$SCRIPT_DIR/compute_theme_tiers.py" "$@"
|