@leejungkiin/awkit 1.4.0 → 1.4.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/awk.js +458 -7
- package/bin/claude-generators.js +122 -0
- package/core/AGENTS.md +16 -0
- package/core/CLAUDE.md +155 -0
- package/core/GEMINI.md +44 -9
- package/package.json +1 -1
- package/skills/ai-sprite-maker/SKILL.md +81 -0
- package/skills/ai-sprite-maker/scripts/animate_sprite.py +102 -0
- package/skills/ai-sprite-maker/scripts/process_sprites.py +140 -0
- package/skills/code-review/SKILL.md +21 -33
- package/skills/lucylab-tts/SKILL.md +64 -0
- package/skills/lucylab-tts/resources/voices_library.json +908 -0
- package/skills/lucylab-tts/scripts/.env +1 -0
- package/skills/lucylab-tts/scripts/lucylab_tts.py +506 -0
- package/skills/orchestrator/SKILL.md +5 -0
- package/skills/short-maker/SKILL.md +150 -0
- package/skills/short-maker/_backup/storyboard.html +106 -0
- package/skills/short-maker/_backup/video_mixer.py +296 -0
- package/skills/short-maker/outputs/fitbite-promo/background.jpg +0 -0
- package/skills/short-maker/outputs/fitbite-promo/final/promo-final.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/script.md +19 -0
- package/skills/short-maker/outputs/fitbite-promo/segments/scene-01.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/segments/scene-02.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/segments/scene-03.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/segments/scene-04.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard/scene-01.png +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard/scene-02.png +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard/scene-03.png +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard/scene-04.png +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard.html +133 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard.json +38 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/merged_chroma.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/merged_crossfaded.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/ready_00.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/ready_01.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/ready_02.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/ready_03.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/manifest.json +31 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/scene-01.wav +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/scene-02.wav +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/scene-03.wav +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/scene-04.wav +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts_script.txt +11 -0
- package/skills/short-maker/scripts/google-flow-cli/.project-identity +41 -0
- package/skills/short-maker/scripts/google-flow-cli/.trae/rules/project_rules.md +52 -0
- package/skills/short-maker/scripts/google-flow-cli/CODEBASE.md +67 -0
- package/skills/short-maker/scripts/google-flow-cli/GoogleFlowCli.code-workspace +29 -0
- package/skills/short-maker/scripts/google-flow-cli/README.md +168 -0
- package/skills/short-maker/scripts/google-flow-cli/docs/specs/PROJECT.md +12 -0
- package/skills/short-maker/scripts/google-flow-cli/docs/specs/REQUIREMENTS.md +22 -0
- package/skills/short-maker/scripts/google-flow-cli/docs/specs/ROADMAP.md +16 -0
- package/skills/short-maker/scripts/google-flow-cli/docs/specs/TECH-SPEC.md +13 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/__init__.py +3 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/api/__init__.py +19 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/api/client.py +1921 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/api/models.py +64 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/api/rpc_ids.py +98 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/__init__.py +15 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/browser_auth.py +692 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/humanizer.py +417 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/proxy_ext.py +120 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/recaptcha.py +482 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/batchexecute/__init__.py +5 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/batchexecute/client.py +414 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/cli/__init__.py +1 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/cli/main.py +1075 -0
- package/skills/short-maker/scripts/google-flow-cli/pyproject.toml +36 -0
- package/skills/short-maker/scripts/google-flow-cli/script.txt +22 -0
- package/skills/short-maker/scripts/google-flow-cli/tests/__init__.py +0 -0
- package/skills/short-maker/scripts/google-flow-cli/tests/test_batchexecute.py +113 -0
- package/skills/short-maker/scripts/google-flow-cli/tests/test_client.py +190 -0
- package/skills/short-maker/templates/aida_script.md +40 -0
- package/skills/short-maker/templates/mimic_analyzer.md +29 -0
- package/skills/single-flow-task-execution/SKILL.md +9 -6
- package/skills/skill-creator/SKILL.md +44 -0
- package/skills/spm-build-analysis/SKILL.md +92 -0
- package/skills/spm-build-analysis/references/build-optimization-sources.md +155 -0
- package/skills/spm-build-analysis/references/recommendation-format.md +85 -0
- package/skills/spm-build-analysis/references/spm-analysis-checks.md +105 -0
- package/skills/spm-build-analysis/scripts/check_spm_pins.py +118 -0
- package/skills/symphony-enforcer/SKILL.md +51 -83
- package/skills/symphony-orchestrator/SKILL.md +1 -1
- package/skills/trello-sync/SKILL.md +27 -28
- package/skills/verification-gate/SKILL.md +13 -2
- package/skills/xcode-build-benchmark/SKILL.md +88 -0
- package/skills/xcode-build-benchmark/references/benchmark-artifacts.md +94 -0
- package/skills/xcode-build-benchmark/references/benchmarking-workflow.md +67 -0
- package/skills/xcode-build-benchmark/schemas/build-benchmark.schema.json +230 -0
- package/skills/xcode-build-benchmark/scripts/benchmark_builds.py +308 -0
- package/skills/xcode-build-fixer/SKILL.md +218 -0
- package/skills/xcode-build-fixer/references/build-settings-best-practices.md +216 -0
- package/skills/xcode-build-fixer/references/fix-patterns.md +290 -0
- package/skills/xcode-build-fixer/references/recommendation-format.md +85 -0
- package/skills/xcode-build-fixer/scripts/benchmark_builds.py +308 -0
- package/skills/xcode-build-orchestrator/SKILL.md +156 -0
- package/skills/xcode-build-orchestrator/references/benchmark-artifacts.md +94 -0
- package/skills/xcode-build-orchestrator/references/build-settings-best-practices.md +216 -0
- package/skills/xcode-build-orchestrator/references/orchestration-report-template.md +143 -0
- package/skills/xcode-build-orchestrator/references/recommendation-format.md +85 -0
- package/skills/xcode-build-orchestrator/scripts/benchmark_builds.py +308 -0
- package/skills/xcode-build-orchestrator/scripts/diagnose_compilation.py +273 -0
- package/skills/xcode-build-orchestrator/scripts/generate_optimization_report.py +533 -0
- package/skills/xcode-compilation-analyzer/SKILL.md +89 -0
- package/skills/xcode-compilation-analyzer/references/build-optimization-sources.md +155 -0
- package/skills/xcode-compilation-analyzer/references/code-compilation-checks.md +106 -0
- package/skills/xcode-compilation-analyzer/references/recommendation-format.md +85 -0
- package/skills/xcode-compilation-analyzer/scripts/diagnose_compilation.py +273 -0
- package/skills/xcode-project-analyzer/SKILL.md +76 -0
- package/skills/xcode-project-analyzer/references/build-optimization-sources.md +155 -0
- package/skills/xcode-project-analyzer/references/build-settings-best-practices.md +216 -0
- package/skills/xcode-project-analyzer/references/project-audit-checks.md +101 -0
- package/skills/xcode-project-analyzer/references/recommendation-format.md +85 -0
- package/templates/project-identity/android.json +0 -10
- package/templates/project-identity/backend-nestjs.json +0 -10
- package/templates/project-identity/expo.json +0 -10
- package/templates/project-identity/ios.json +0 -10
- package/templates/project-identity/web-nextjs.json +0 -10
- package/workflows/_uncategorized/ship-to-code.md +85 -0
- package/workflows/context/codebase-sync.md +10 -87
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
# Recommendation Format
|
|
2
|
+
|
|
3
|
+
All optimization skills should report recommendations in a shared structure so the orchestrator can merge and prioritize them cleanly.
|
|
4
|
+
|
|
5
|
+
## Required Fields
|
|
6
|
+
|
|
7
|
+
Each recommendation should include:
|
|
8
|
+
|
|
9
|
+
- `title`
|
|
10
|
+
- `wait_time_impact` -- plain-language statement of expected wall-clock impact, e.g. "Expected to reduce your clean build by ~3s", "Reduces parallel compile work but unlikely to reduce build wait time", or "Impact on wait time is uncertain -- re-benchmark to confirm"
|
|
11
|
+
- `actionability` -- classifies how fixable the issue is from the project (see values below)
|
|
12
|
+
- `category`
|
|
13
|
+
- `observed_evidence`
|
|
14
|
+
- `estimated_impact`
|
|
15
|
+
- `confidence`
|
|
16
|
+
- `approval_required`
|
|
17
|
+
- `benchmark_verification_status`
|
|
18
|
+
|
|
19
|
+
### Actionability Values
|
|
20
|
+
|
|
21
|
+
Every recommendation must include an `actionability` classification:
|
|
22
|
+
|
|
23
|
+
- `repo-local` -- Fix lives entirely in project files, source code, or local configuration. The developer can apply it without side effects outside the repo.
|
|
24
|
+
- `package-manager` -- Requires CocoaPods or SPM configuration changes that may have broad side effects (e.g., linkage mode, dependency restructuring). These should be benchmarked before and after.
|
|
25
|
+
- `xcode-behavior` -- Observed cost is driven by Xcode internals and is not suppressible from the project. Report the finding for awareness but do not promise a fix.
|
|
26
|
+
- `upstream` -- Requires changes in a third-party dependency or external tool. The developer cannot fix it locally.
|
|
27
|
+
|
|
28
|
+
## Suggested Optional Fields
|
|
29
|
+
|
|
30
|
+
- `scope`
|
|
31
|
+
- `affected_files`
|
|
32
|
+
- `affected_targets`
|
|
33
|
+
- `affected_packages`
|
|
34
|
+
- `implementation_notes`
|
|
35
|
+
- `risk_level`
|
|
36
|
+
|
|
37
|
+
## JSON Example
|
|
38
|
+
|
|
39
|
+
```json
|
|
40
|
+
{
|
|
41
|
+
"recommendations": [
|
|
42
|
+
{
|
|
43
|
+
"title": "Guard a release-only symbol upload script",
|
|
44
|
+
"wait_time_impact": "Expected to reduce your incremental build by approximately 6 seconds.",
|
|
45
|
+
"actionability": "repo-local",
|
|
46
|
+
"category": "project",
|
|
47
|
+
"observed_evidence": [
|
|
48
|
+
"Incremental builds spend 6.3 seconds in a run script phase.",
|
|
49
|
+
"The script runs for Debug builds even though the output is only needed in Release."
|
|
50
|
+
],
|
|
51
|
+
"estimated_impact": "High incremental-build improvement",
|
|
52
|
+
"confidence": "High",
|
|
53
|
+
"approval_required": true,
|
|
54
|
+
"benchmark_verification_status": "Not yet verified",
|
|
55
|
+
"scope": "Target build phase",
|
|
56
|
+
"risk_level": "Low"
|
|
57
|
+
}
|
|
58
|
+
]
|
|
59
|
+
}
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
## Markdown Rendering Guidance
|
|
63
|
+
|
|
64
|
+
When rendering for human review, preserve the same field order:
|
|
65
|
+
|
|
66
|
+
1. title
|
|
67
|
+
2. wait-time impact
|
|
68
|
+
3. actionability
|
|
69
|
+
4. observed evidence
|
|
70
|
+
5. estimated impact
|
|
71
|
+
6. confidence
|
|
72
|
+
7. approval required
|
|
73
|
+
8. benchmark verification status
|
|
74
|
+
|
|
75
|
+
That makes it easier for the developer to approve or reject specific items quickly.
|
|
76
|
+
|
|
77
|
+
## Verification Status Values
|
|
78
|
+
|
|
79
|
+
Recommended values:
|
|
80
|
+
|
|
81
|
+
- `Not yet verified`
|
|
82
|
+
- `Queued for verification`
|
|
83
|
+
- `Verified improvement`
|
|
84
|
+
- `No measurable improvement`
|
|
85
|
+
- `Inconclusive due to benchmark noise`
|
|
@@ -0,0 +1,308 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
import platform
|
|
7
|
+
import re
|
|
8
|
+
import shutil
|
|
9
|
+
import statistics
|
|
10
|
+
import subprocess
|
|
11
|
+
import sys
|
|
12
|
+
import tempfile
|
|
13
|
+
import time
|
|
14
|
+
from datetime import datetime, timezone
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import Dict, List, Optional
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def parse_args() -> argparse.Namespace:
|
|
20
|
+
parser = argparse.ArgumentParser(description="Benchmark Xcode clean and incremental builds.")
|
|
21
|
+
group = parser.add_mutually_exclusive_group(required=True)
|
|
22
|
+
group.add_argument("--workspace", help="Path to the .xcworkspace file")
|
|
23
|
+
group.add_argument("--project", help="Path to the .xcodeproj file")
|
|
24
|
+
parser.add_argument("--scheme", required=True, help="Scheme to build")
|
|
25
|
+
parser.add_argument("--configuration", default="Debug", help="Build configuration")
|
|
26
|
+
parser.add_argument("--destination", help="xcodebuild destination string")
|
|
27
|
+
parser.add_argument("--derived-data-path", help="DerivedData path override")
|
|
28
|
+
parser.add_argument("--output-dir", default=".build-benchmark", help="Output directory for artifacts")
|
|
29
|
+
parser.add_argument("--repeats", type=int, default=3, help="Measured runs per build type")
|
|
30
|
+
parser.add_argument("--skip-warmup", action="store_true", help="Skip the validation build")
|
|
31
|
+
parser.add_argument(
|
|
32
|
+
"--touch-file",
|
|
33
|
+
help="Path to a source file to touch before each incremental build. "
|
|
34
|
+
"When provided, measures a real edit-rebuild loop instead of a zero-change build.",
|
|
35
|
+
)
|
|
36
|
+
parser.add_argument(
|
|
37
|
+
"--no-cached-clean",
|
|
38
|
+
action="store_true",
|
|
39
|
+
help="Skip cached clean builds even when COMPILATION_CACHING is detected.",
|
|
40
|
+
)
|
|
41
|
+
parser.add_argument(
|
|
42
|
+
"--extra-arg",
|
|
43
|
+
action="append",
|
|
44
|
+
default=[],
|
|
45
|
+
help="Additional xcodebuild argument to append. Can be passed multiple times.",
|
|
46
|
+
)
|
|
47
|
+
return parser.parse_args()
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def command_base(args: argparse.Namespace) -> List[str]:
|
|
51
|
+
command = ["xcodebuild"]
|
|
52
|
+
if args.workspace:
|
|
53
|
+
command.extend(["-workspace", args.workspace])
|
|
54
|
+
if args.project:
|
|
55
|
+
command.extend(["-project", args.project])
|
|
56
|
+
command.extend(["-scheme", args.scheme, "-configuration", args.configuration])
|
|
57
|
+
if args.destination:
|
|
58
|
+
command.extend(["-destination", args.destination])
|
|
59
|
+
if args.derived_data_path:
|
|
60
|
+
command.extend(["-derivedDataPath", args.derived_data_path])
|
|
61
|
+
command.extend(args.extra_arg)
|
|
62
|
+
return command
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def shell_join(parts: List[str]) -> str:
|
|
66
|
+
return " ".join(subprocess.list2cmdline([part]) for part in parts)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
_TASK_COUNT_RE = re.compile(r"^(.+?)\s*\((\d+)\s+tasks?\)$")
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _extract_task_count(name: str) -> tuple[str, Optional[int]]:
|
|
73
|
+
"""Split 'Category (N tasks)' into ('Category', N)."""
|
|
74
|
+
match = _TASK_COUNT_RE.match(name)
|
|
75
|
+
if match:
|
|
76
|
+
return match.group(1).strip(), int(match.group(2))
|
|
77
|
+
return name, None
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def parse_timing_summary(output: str) -> List[Dict]:
|
|
81
|
+
categories: Dict[str, float] = {}
|
|
82
|
+
task_counts: Dict[str, Optional[int]] = {}
|
|
83
|
+
for raw_line in output.splitlines():
|
|
84
|
+
line = raw_line.strip()
|
|
85
|
+
if not line:
|
|
86
|
+
continue
|
|
87
|
+
for suffix in (" seconds", " second", " sec"):
|
|
88
|
+
if not line.endswith(suffix):
|
|
89
|
+
continue
|
|
90
|
+
trimmed = line[: -len(suffix)]
|
|
91
|
+
if "|" in trimmed:
|
|
92
|
+
name_part, _, seconds_text = trimmed.rpartition("|")
|
|
93
|
+
else:
|
|
94
|
+
name_part, _, seconds_text = trimmed.rpartition(" ")
|
|
95
|
+
try:
|
|
96
|
+
seconds = float(seconds_text.strip())
|
|
97
|
+
except ValueError:
|
|
98
|
+
continue
|
|
99
|
+
cleaned_name = name_part.replace(" ", " ").strip(" -:")
|
|
100
|
+
if len(cleaned_name) < 3:
|
|
101
|
+
continue
|
|
102
|
+
base_name, count = _extract_task_count(cleaned_name)
|
|
103
|
+
categories[base_name] = categories.get(base_name, 0.0) + seconds
|
|
104
|
+
if count is not None:
|
|
105
|
+
task_counts[base_name] = (task_counts.get(base_name) or 0) + count
|
|
106
|
+
break
|
|
107
|
+
result: List[Dict] = []
|
|
108
|
+
for name, seconds in sorted(categories.items(), key=lambda item: item[1], reverse=True):
|
|
109
|
+
entry: Dict = {"name": name, "seconds": round(seconds, 3)}
|
|
110
|
+
if name in task_counts:
|
|
111
|
+
entry["task_count"] = task_counts[name]
|
|
112
|
+
result.append(entry)
|
|
113
|
+
return result
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def run_command(command: List[str]) -> subprocess.CompletedProcess:
|
|
117
|
+
return subprocess.run(command, capture_output=True, text=True)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def stats_for(runs: List[Dict[str, object]]) -> Dict[str, float]:
|
|
121
|
+
durations = [run["duration_seconds"] for run in runs if run.get("success")]
|
|
122
|
+
if not durations:
|
|
123
|
+
return {
|
|
124
|
+
"count": 0,
|
|
125
|
+
"min_seconds": 0.0,
|
|
126
|
+
"max_seconds": 0.0,
|
|
127
|
+
"median_seconds": 0.0,
|
|
128
|
+
"average_seconds": 0.0,
|
|
129
|
+
}
|
|
130
|
+
return {
|
|
131
|
+
"count": len(durations),
|
|
132
|
+
"min_seconds": round(min(durations), 3),
|
|
133
|
+
"max_seconds": round(max(durations), 3),
|
|
134
|
+
"median_seconds": round(statistics.median(durations), 3),
|
|
135
|
+
"average_seconds": round(statistics.fmean(durations), 3),
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def xcode_version() -> str:
|
|
140
|
+
result = run_command(["xcodebuild", "-version"])
|
|
141
|
+
return result.stdout.strip() if result.returncode == 0 else "unknown"
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def detect_compilation_caching(base_command: List[str]) -> bool:
|
|
145
|
+
"""Check whether COMPILATION_CACHING is enabled in the resolved build settings."""
|
|
146
|
+
result = run_command([*base_command, "-showBuildSettings"])
|
|
147
|
+
if result.returncode != 0:
|
|
148
|
+
return False
|
|
149
|
+
for line in result.stdout.splitlines():
|
|
150
|
+
stripped = line.strip()
|
|
151
|
+
if stripped.startswith("COMPILATION_CACHING") and "=" in stripped:
|
|
152
|
+
value = stripped.split("=", 1)[1].strip()
|
|
153
|
+
return value == "YES"
|
|
154
|
+
return False
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def measure_build(
|
|
158
|
+
base_command: List[str],
|
|
159
|
+
artifact_stem: str,
|
|
160
|
+
output_dir: Path,
|
|
161
|
+
build_type: str,
|
|
162
|
+
run_index: int,
|
|
163
|
+
) -> Dict[str, object]:
|
|
164
|
+
build_command = [*base_command, "build", "-showBuildTimingSummary"]
|
|
165
|
+
started = time.perf_counter()
|
|
166
|
+
result = run_command(build_command)
|
|
167
|
+
elapsed = round(time.perf_counter() - started, 3)
|
|
168
|
+
log_path = output_dir / f"{artifact_stem}-{build_type}-{run_index}.log"
|
|
169
|
+
log_path.write_text(result.stdout + result.stderr)
|
|
170
|
+
return {
|
|
171
|
+
"id": f"{build_type}-{run_index}",
|
|
172
|
+
"build_type": build_type,
|
|
173
|
+
"duration_seconds": elapsed,
|
|
174
|
+
"success": result.returncode == 0,
|
|
175
|
+
"exit_code": result.returncode,
|
|
176
|
+
"command": shell_join(build_command),
|
|
177
|
+
"raw_log_path": str(log_path),
|
|
178
|
+
"timing_summary_categories": parse_timing_summary(result.stdout + result.stderr),
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def main() -> int:
|
|
183
|
+
args = parse_args()
|
|
184
|
+
output_dir = Path(args.output_dir)
|
|
185
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
186
|
+
|
|
187
|
+
timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
|
|
188
|
+
artifact_stem = f"{timestamp}-{args.scheme.replace(' ', '-').lower()}"
|
|
189
|
+
base_command = command_base(args)
|
|
190
|
+
|
|
191
|
+
if not args.skip_warmup:
|
|
192
|
+
warmup = run_command([*base_command, "build"])
|
|
193
|
+
if warmup.returncode != 0:
|
|
194
|
+
sys.stderr.write(warmup.stdout + warmup.stderr)
|
|
195
|
+
return warmup.returncode
|
|
196
|
+
warmup_clean = run_command([*base_command, "clean"])
|
|
197
|
+
if warmup_clean.returncode != 0:
|
|
198
|
+
sys.stderr.write(warmup_clean.stdout + warmup_clean.stderr)
|
|
199
|
+
return warmup_clean.returncode
|
|
200
|
+
warmup_rebuild = run_command([*base_command, "build"])
|
|
201
|
+
if warmup_rebuild.returncode != 0:
|
|
202
|
+
sys.stderr.write(warmup_rebuild.stdout + warmup_rebuild.stderr)
|
|
203
|
+
return warmup_rebuild.returncode
|
|
204
|
+
|
|
205
|
+
runs: Dict[str, list] = {"clean": [], "incremental": []}
|
|
206
|
+
|
|
207
|
+
for index in range(1, args.repeats + 1):
|
|
208
|
+
clean_result = run_command([*base_command, "clean"])
|
|
209
|
+
clean_log_path = output_dir / f"{artifact_stem}-clean-prep-{index}.log"
|
|
210
|
+
clean_log_path.write_text(clean_result.stdout + clean_result.stderr)
|
|
211
|
+
if clean_result.returncode != 0:
|
|
212
|
+
sys.stderr.write(clean_result.stdout + clean_result.stderr)
|
|
213
|
+
return clean_result.returncode
|
|
214
|
+
runs["clean"].append(measure_build(base_command, artifact_stem, output_dir, "clean", index))
|
|
215
|
+
|
|
216
|
+
# --- Cached clean builds ---------------------------------------------------
|
|
217
|
+
# When COMPILATION_CACHING is enabled, the compilation cache lives outside
|
|
218
|
+
# DerivedData and survives product deletion. We measure "cached clean"
|
|
219
|
+
# builds by pointing DerivedData at a temp directory, warming the cache with
|
|
220
|
+
# one build, then deleting the DerivedData directory (but not the cache)
|
|
221
|
+
# before each measured rebuild. This captures the realistic scenario:
|
|
222
|
+
# branch switching, pulling changes, or Clean Build Folder.
|
|
223
|
+
should_cached_clean = not args.no_cached_clean and detect_compilation_caching(base_command)
|
|
224
|
+
if should_cached_clean:
|
|
225
|
+
dd_path = Path(args.derived_data_path) if args.derived_data_path else Path(
|
|
226
|
+
tempfile.mkdtemp(prefix="xcode-bench-dd-")
|
|
227
|
+
)
|
|
228
|
+
cached_cmd = list(base_command)
|
|
229
|
+
if not args.derived_data_path:
|
|
230
|
+
cached_cmd.extend(["-derivedDataPath", str(dd_path)])
|
|
231
|
+
|
|
232
|
+
cache_warmup = run_command([*cached_cmd, "build"])
|
|
233
|
+
if cache_warmup.returncode != 0:
|
|
234
|
+
sys.stderr.write("Warning: cached clean warmup build failed, skipping cached clean benchmarks.\n")
|
|
235
|
+
sys.stderr.write(cache_warmup.stdout + cache_warmup.stderr)
|
|
236
|
+
should_cached_clean = False
|
|
237
|
+
|
|
238
|
+
if should_cached_clean:
|
|
239
|
+
runs["cached_clean"] = []
|
|
240
|
+
for index in range(1, args.repeats + 1):
|
|
241
|
+
shutil.rmtree(dd_path, ignore_errors=True)
|
|
242
|
+
runs["cached_clean"].append(
|
|
243
|
+
measure_build(cached_cmd, artifact_stem, output_dir, "cached-clean", index)
|
|
244
|
+
)
|
|
245
|
+
shutil.rmtree(dd_path, ignore_errors=True)
|
|
246
|
+
|
|
247
|
+
# --- Incremental / zero-change builds --------------------------------------
|
|
248
|
+
incremental_label = "incremental"
|
|
249
|
+
if args.touch_file:
|
|
250
|
+
touch_path = Path(args.touch_file)
|
|
251
|
+
if not touch_path.exists():
|
|
252
|
+
sys.stderr.write(f"--touch-file path does not exist: {touch_path}\n")
|
|
253
|
+
return 1
|
|
254
|
+
incremental_label = "incremental"
|
|
255
|
+
else:
|
|
256
|
+
incremental_label = "zero-change"
|
|
257
|
+
|
|
258
|
+
for index in range(1, args.repeats + 1):
|
|
259
|
+
if args.touch_file:
|
|
260
|
+
touch_path.touch()
|
|
261
|
+
runs["incremental"].append(
|
|
262
|
+
measure_build(base_command, artifact_stem, output_dir, incremental_label, index)
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
summary: Dict[str, object] = {
|
|
266
|
+
"clean": stats_for(runs["clean"]),
|
|
267
|
+
"incremental": stats_for(runs["incremental"]),
|
|
268
|
+
}
|
|
269
|
+
if "cached_clean" in runs:
|
|
270
|
+
summary["cached_clean"] = stats_for(runs["cached_clean"])
|
|
271
|
+
|
|
272
|
+
artifact = {
|
|
273
|
+
"schema_version": "1.2.0" if "cached_clean" in runs else "1.1.0",
|
|
274
|
+
"created_at": datetime.now(timezone.utc).isoformat(),
|
|
275
|
+
"build": {
|
|
276
|
+
"entrypoint": "workspace" if args.workspace else "project",
|
|
277
|
+
"path": args.workspace or args.project,
|
|
278
|
+
"scheme": args.scheme,
|
|
279
|
+
"configuration": args.configuration,
|
|
280
|
+
"destination": args.destination or "",
|
|
281
|
+
"derived_data_path": args.derived_data_path or "",
|
|
282
|
+
"command": shell_join(base_command),
|
|
283
|
+
},
|
|
284
|
+
"environment": {
|
|
285
|
+
"host": platform.node(),
|
|
286
|
+
"macos_version": platform.platform(),
|
|
287
|
+
"xcode_version": xcode_version(),
|
|
288
|
+
"cwd": os.getcwd(),
|
|
289
|
+
},
|
|
290
|
+
"runs": runs,
|
|
291
|
+
"summary": summary,
|
|
292
|
+
"notes": [f"touch-file: {args.touch_file}"] if args.touch_file else [],
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
artifact_path = output_dir / f"{artifact_stem}.json"
|
|
296
|
+
artifact_path.write_text(json.dumps(artifact, indent=2) + "\n")
|
|
297
|
+
|
|
298
|
+
print(f"Saved benchmark artifact: {artifact_path}")
|
|
299
|
+
print(f"Clean median: {artifact['summary']['clean']['median_seconds']}s")
|
|
300
|
+
if "cached_clean" in artifact["summary"]:
|
|
301
|
+
print(f"Cached clean median: {artifact['summary']['cached_clean']['median_seconds']}s")
|
|
302
|
+
inc_label = "Incremental" if args.touch_file else "Zero-change"
|
|
303
|
+
print(f"{inc_label} median: {artifact['summary']['incremental']['median_seconds']}s")
|
|
304
|
+
return 0
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
if __name__ == "__main__":
|
|
308
|
+
raise SystemExit(main())
|
|
@@ -0,0 +1,273 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
|
|
3
|
+
"""Run a single Xcode build with -Xfrontend diagnostics to find slow type-checking."""
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
6
|
+
import json
|
|
7
|
+
import re
|
|
8
|
+
import subprocess
|
|
9
|
+
import sys
|
|
10
|
+
import time
|
|
11
|
+
from datetime import datetime, timezone
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Dict, List, Optional
|
|
14
|
+
|
|
15
|
+
_TYPECHECK_RE = re.compile(
|
|
16
|
+
r"^(?P<file>.+?):(?P<line>\d+):(?P<col>\d+): warning: "
|
|
17
|
+
r"(?P<kind>instance method|global function|getter|type-check|expression) "
|
|
18
|
+
r"'?(?P<name>[^']*?)'?\s+took\s+(?P<ms>\d+)ms\s+to\s+type-check"
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
_EXPRESSION_RE = re.compile(
|
|
22
|
+
r"^(?P<file>.+?):(?P<line>\d+):(?P<col>\d+): warning: "
|
|
23
|
+
r"expression took\s+(?P<ms>\d+)ms\s+to\s+type-check"
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
_FILE_TIME_RE = re.compile(
|
|
27
|
+
r"^\s*(?P<seconds>\d+(?:\.\d+)?)\s+seconds\s+.*\s+compiling\s+(?P<file>\S+)"
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def parse_args() -> argparse.Namespace:
|
|
32
|
+
parser = argparse.ArgumentParser(
|
|
33
|
+
description="Run an Xcode build with -Xfrontend type-checking diagnostics."
|
|
34
|
+
)
|
|
35
|
+
group = parser.add_mutually_exclusive_group(required=True)
|
|
36
|
+
group.add_argument("--workspace", help="Path to the .xcworkspace file")
|
|
37
|
+
group.add_argument("--project", help="Path to the .xcodeproj file")
|
|
38
|
+
parser.add_argument("--scheme", required=True, help="Scheme to build")
|
|
39
|
+
parser.add_argument("--configuration", default="Debug", help="Build configuration")
|
|
40
|
+
parser.add_argument("--destination", help="xcodebuild destination string")
|
|
41
|
+
parser.add_argument("--derived-data-path", help="DerivedData path override")
|
|
42
|
+
parser.add_argument("--output-dir", default=".build-benchmark", help="Output directory")
|
|
43
|
+
parser.add_argument(
|
|
44
|
+
"--threshold",
|
|
45
|
+
type=int,
|
|
46
|
+
default=100,
|
|
47
|
+
help="Millisecond threshold for -warn-long-function-bodies and "
|
|
48
|
+
"-warn-long-expression-type-checking (default: 100)",
|
|
49
|
+
)
|
|
50
|
+
parser.add_argument("--skip-clean", action="store_true", help="Skip clean before build")
|
|
51
|
+
parser.add_argument(
|
|
52
|
+
"--per-file-timing",
|
|
53
|
+
action="store_true",
|
|
54
|
+
help="Add -Xfrontend -debug-time-compilation to report per-file compile times.",
|
|
55
|
+
)
|
|
56
|
+
parser.add_argument(
|
|
57
|
+
"--stats-output",
|
|
58
|
+
action="store_true",
|
|
59
|
+
help="Add -Xfrontend -stats-output-dir to collect detailed compiler statistics.",
|
|
60
|
+
)
|
|
61
|
+
parser.add_argument(
|
|
62
|
+
"--extra-arg",
|
|
63
|
+
action="append",
|
|
64
|
+
default=[],
|
|
65
|
+
help="Additional xcodebuild argument. Can be passed multiple times.",
|
|
66
|
+
)
|
|
67
|
+
return parser.parse_args()
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def command_base(args: argparse.Namespace) -> List[str]:
|
|
71
|
+
command = ["xcodebuild"]
|
|
72
|
+
if args.workspace:
|
|
73
|
+
command.extend(["-workspace", args.workspace])
|
|
74
|
+
if args.project:
|
|
75
|
+
command.extend(["-project", args.project])
|
|
76
|
+
command.extend(["-scheme", args.scheme, "-configuration", args.configuration])
|
|
77
|
+
if args.destination:
|
|
78
|
+
command.extend(["-destination", args.destination])
|
|
79
|
+
if args.derived_data_path:
|
|
80
|
+
command.extend(["-derivedDataPath", args.derived_data_path])
|
|
81
|
+
command.extend(args.extra_arg)
|
|
82
|
+
return command
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def parse_diagnostics(output: str) -> List[Dict]:
|
|
86
|
+
"""Extract type-checking warnings from xcodebuild output."""
|
|
87
|
+
warnings: List[Dict] = []
|
|
88
|
+
seen = set()
|
|
89
|
+
for raw_line in output.splitlines():
|
|
90
|
+
line = raw_line.strip()
|
|
91
|
+
match = _TYPECHECK_RE.match(line)
|
|
92
|
+
if match:
|
|
93
|
+
key = (match.group("file"), match.group("line"), match.group("col"), "function-body")
|
|
94
|
+
if key in seen:
|
|
95
|
+
continue
|
|
96
|
+
seen.add(key)
|
|
97
|
+
warnings.append(
|
|
98
|
+
{
|
|
99
|
+
"file": match.group("file"),
|
|
100
|
+
"line": int(match.group("line")),
|
|
101
|
+
"column": int(match.group("col")),
|
|
102
|
+
"duration_ms": int(match.group("ms")),
|
|
103
|
+
"kind": "function-body",
|
|
104
|
+
"name": match.group("name"),
|
|
105
|
+
}
|
|
106
|
+
)
|
|
107
|
+
continue
|
|
108
|
+
match = _EXPRESSION_RE.match(line)
|
|
109
|
+
if match:
|
|
110
|
+
key = (match.group("file"), match.group("line"), match.group("col"), "expression")
|
|
111
|
+
if key in seen:
|
|
112
|
+
continue
|
|
113
|
+
seen.add(key)
|
|
114
|
+
warnings.append(
|
|
115
|
+
{
|
|
116
|
+
"file": match.group("file"),
|
|
117
|
+
"line": int(match.group("line")),
|
|
118
|
+
"column": int(match.group("col")),
|
|
119
|
+
"duration_ms": int(match.group("ms")),
|
|
120
|
+
"kind": "expression",
|
|
121
|
+
"name": "",
|
|
122
|
+
}
|
|
123
|
+
)
|
|
124
|
+
warnings.sort(key=lambda w: w["duration_ms"], reverse=True)
|
|
125
|
+
return warnings
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def parse_file_timings(output: str) -> List[Dict]:
|
|
129
|
+
"""Extract per-file compile times from -debug-time-compilation output."""
|
|
130
|
+
timings: List[Dict] = []
|
|
131
|
+
seen = set()
|
|
132
|
+
for raw_line in output.splitlines():
|
|
133
|
+
match = _FILE_TIME_RE.match(raw_line.strip())
|
|
134
|
+
if match:
|
|
135
|
+
filepath = match.group("file")
|
|
136
|
+
if filepath in seen:
|
|
137
|
+
continue
|
|
138
|
+
seen.add(filepath)
|
|
139
|
+
timings.append(
|
|
140
|
+
{
|
|
141
|
+
"file": filepath,
|
|
142
|
+
"duration_seconds": float(match.group("seconds")),
|
|
143
|
+
}
|
|
144
|
+
)
|
|
145
|
+
timings.sort(key=lambda t: t["duration_seconds"], reverse=True)
|
|
146
|
+
return timings
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def main() -> int:
|
|
150
|
+
args = parse_args()
|
|
151
|
+
output_dir = Path(args.output_dir)
|
|
152
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
153
|
+
|
|
154
|
+
timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
|
|
155
|
+
scheme_slug = args.scheme.replace(" ", "-").lower()
|
|
156
|
+
artifact_stem = f"{timestamp}-{scheme_slug}"
|
|
157
|
+
base = command_base(args)
|
|
158
|
+
|
|
159
|
+
if not args.skip_clean:
|
|
160
|
+
print("Cleaning build products...")
|
|
161
|
+
clean = subprocess.run([*base, "clean"], capture_output=True, text=True)
|
|
162
|
+
if clean.returncode != 0:
|
|
163
|
+
sys.stderr.write(clean.stdout + clean.stderr)
|
|
164
|
+
return clean.returncode
|
|
165
|
+
|
|
166
|
+
threshold = str(args.threshold)
|
|
167
|
+
swift_flags = (
|
|
168
|
+
f"$(inherited) -Xfrontend -warn-long-function-bodies={threshold} "
|
|
169
|
+
f"-Xfrontend -warn-long-expression-type-checking={threshold}"
|
|
170
|
+
)
|
|
171
|
+
if args.per_file_timing:
|
|
172
|
+
swift_flags += " -Xfrontend -debug-time-compilation"
|
|
173
|
+
|
|
174
|
+
stats_dir: Optional[Path] = None
|
|
175
|
+
if args.stats_output:
|
|
176
|
+
stats_dir = output_dir / f"{artifact_stem}-stats"
|
|
177
|
+
stats_dir.mkdir(parents=True, exist_ok=True)
|
|
178
|
+
swift_flags += f" -Xfrontend -stats-output-dir -Xfrontend {stats_dir}"
|
|
179
|
+
|
|
180
|
+
build_command = [
|
|
181
|
+
*base,
|
|
182
|
+
"build",
|
|
183
|
+
"-showBuildTimingSummary",
|
|
184
|
+
f"OTHER_SWIFT_FLAGS={swift_flags}",
|
|
185
|
+
]
|
|
186
|
+
|
|
187
|
+
extras = []
|
|
188
|
+
if args.per_file_timing:
|
|
189
|
+
extras.append("per-file timing")
|
|
190
|
+
if args.stats_output:
|
|
191
|
+
extras.append("stats output")
|
|
192
|
+
extras_label = f" + {', '.join(extras)}" if extras else ""
|
|
193
|
+
print(f"Building with type-check threshold {threshold}ms{extras_label}...")
|
|
194
|
+
started = time.perf_counter()
|
|
195
|
+
result = subprocess.run(build_command, capture_output=True, text=True)
|
|
196
|
+
elapsed = round(time.perf_counter() - started, 3)
|
|
197
|
+
|
|
198
|
+
combined_output = result.stdout + result.stderr
|
|
199
|
+
log_path = output_dir / f"{artifact_stem}-diagnostics.log"
|
|
200
|
+
log_path.write_text(combined_output)
|
|
201
|
+
|
|
202
|
+
warnings = parse_diagnostics(combined_output)
|
|
203
|
+
|
|
204
|
+
file_timings: Optional[List[Dict]] = None
|
|
205
|
+
if args.per_file_timing:
|
|
206
|
+
file_timings = parse_file_timings(combined_output)
|
|
207
|
+
|
|
208
|
+
artifact = {
|
|
209
|
+
"schema_version": "1.0.0",
|
|
210
|
+
"created_at": datetime.now(timezone.utc).isoformat(),
|
|
211
|
+
"type": "compilation-diagnostics",
|
|
212
|
+
"build": {
|
|
213
|
+
"entrypoint": "workspace" if args.workspace else "project",
|
|
214
|
+
"path": args.workspace or args.project,
|
|
215
|
+
"scheme": args.scheme,
|
|
216
|
+
"configuration": args.configuration,
|
|
217
|
+
"destination": args.destination or "",
|
|
218
|
+
},
|
|
219
|
+
"threshold_ms": args.threshold,
|
|
220
|
+
"build_duration_seconds": elapsed,
|
|
221
|
+
"build_success": result.returncode == 0,
|
|
222
|
+
"raw_log_path": str(log_path),
|
|
223
|
+
"warnings": warnings,
|
|
224
|
+
"summary": {
|
|
225
|
+
"total_warnings": len(warnings),
|
|
226
|
+
"function_body_warnings": sum(1 for w in warnings if w["kind"] == "function-body"),
|
|
227
|
+
"expression_warnings": sum(1 for w in warnings if w["kind"] == "expression"),
|
|
228
|
+
"slowest_ms": warnings[0]["duration_ms"] if warnings else 0,
|
|
229
|
+
},
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
if file_timings is not None:
|
|
233
|
+
artifact["per_file_timings"] = file_timings
|
|
234
|
+
if stats_dir is not None:
|
|
235
|
+
artifact["stats_dir"] = str(stats_dir)
|
|
236
|
+
|
|
237
|
+
artifact_path = output_dir / f"{artifact_stem}-diagnostics.json"
|
|
238
|
+
artifact_path.write_text(json.dumps(artifact, indent=2) + "\n")
|
|
239
|
+
|
|
240
|
+
print(f"\nSaved diagnostics artifact: {artifact_path}")
|
|
241
|
+
print(f"Build {'succeeded' if result.returncode == 0 else 'failed'} in {elapsed}s")
|
|
242
|
+
print(f"Found {len(warnings)} type-check warnings above {threshold}ms threshold\n")
|
|
243
|
+
|
|
244
|
+
if warnings:
|
|
245
|
+
print(f"{'Duration':>10} {'Kind':<15} {'Location'}")
|
|
246
|
+
print(f"{'--------':>10} {'----':<15} {'--------'}")
|
|
247
|
+
for w in warnings[:20]:
|
|
248
|
+
loc = f"{w['file']}:{w['line']}:{w['column']}"
|
|
249
|
+
label = w["name"] if w["name"] else "(expression)"
|
|
250
|
+
print(f"{w['duration_ms']:>8}ms {w['kind']:<15} {loc} {label}")
|
|
251
|
+
if len(warnings) > 20:
|
|
252
|
+
print(f"\n ... and {len(warnings) - 20} more (see {artifact_path})")
|
|
253
|
+
else:
|
|
254
|
+
print("No type-checking hotspots found above threshold.")
|
|
255
|
+
|
|
256
|
+
if file_timings:
|
|
257
|
+
print(f"\nPer-file compile times (top 20):\n")
|
|
258
|
+
print(f"{'Duration':>12} {'File'}")
|
|
259
|
+
print(f"{'--------':>12} {'----'}")
|
|
260
|
+
for t in file_timings[:20]:
|
|
261
|
+
print(f"{t['duration_seconds']:>10.3f}s {t['file']}")
|
|
262
|
+
if len(file_timings) > 20:
|
|
263
|
+
print(f"\n ... and {len(file_timings) - 20} more (see {artifact_path})")
|
|
264
|
+
|
|
265
|
+
if stats_dir is not None:
|
|
266
|
+
stat_files = list(stats_dir.glob("*.json"))
|
|
267
|
+
print(f"\nCompiler statistics: {len(stat_files)} files written to {stats_dir}")
|
|
268
|
+
|
|
269
|
+
return 0
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
if __name__ == "__main__":
|
|
273
|
+
raise SystemExit(main())
|