livepilot 1.10.4 → 1.10.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/.claude-plugin/marketplace.json +3 -3
  2. package/AGENTS.md +3 -3
  3. package/CHANGELOG.md +148 -0
  4. package/CONTRIBUTING.md +1 -1
  5. package/README.md +6 -6
  6. package/livepilot/.Codex-plugin/plugin.json +2 -2
  7. package/livepilot/.claude-plugin/plugin.json +2 -2
  8. package/livepilot/skills/livepilot-core/SKILL.md +4 -4
  9. package/livepilot/skills/livepilot-core/references/overview.md +3 -3
  10. package/livepilot/skills/livepilot-evaluation/references/capability-modes.md +1 -1
  11. package/livepilot/skills/livepilot-release/SKILL.md +5 -5
  12. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  13. package/m4l_device/livepilot_bridge.js +12 -1
  14. package/manifest.json +3 -3
  15. package/mcp_server/__init__.py +1 -1
  16. package/mcp_server/composer/sample_resolver.py +10 -6
  17. package/mcp_server/composer/tools.py +10 -6
  18. package/mcp_server/connection.py +6 -1
  19. package/mcp_server/creative_constraints/tools.py +9 -8
  20. package/mcp_server/experiment/engine.py +9 -5
  21. package/mcp_server/experiment/tools.py +9 -9
  22. package/mcp_server/hook_hunter/tools.py +14 -9
  23. package/mcp_server/m4l_bridge.py +11 -0
  24. package/mcp_server/memory/taste_graph.py +7 -2
  25. package/mcp_server/mix_engine/tools.py +8 -3
  26. package/mcp_server/musical_intelligence/tools.py +15 -10
  27. package/mcp_server/performance_engine/tools.py +6 -2
  28. package/mcp_server/preview_studio/tools.py +21 -15
  29. package/mcp_server/project_brain/tools.py +18 -10
  30. package/mcp_server/reference_engine/tools.py +7 -5
  31. package/mcp_server/runtime/capability_probe.py +10 -4
  32. package/mcp_server/runtime/tools.py +8 -2
  33. package/mcp_server/sample_engine/tools.py +394 -33
  34. package/mcp_server/semantic_moves/tools.py +5 -1
  35. package/mcp_server/server.py +10 -9
  36. package/mcp_server/services/motif_service.py +9 -3
  37. package/mcp_server/session_continuity/tools.py +7 -3
  38. package/mcp_server/session_continuity/tracker.py +9 -8
  39. package/mcp_server/song_brain/tools.py +17 -12
  40. package/mcp_server/splice_client/client.py +19 -6
  41. package/mcp_server/stuckness_detector/tools.py +8 -5
  42. package/mcp_server/tools/_agent_os_engine/__init__.py +52 -0
  43. package/mcp_server/tools/_agent_os_engine/critics.py +134 -0
  44. package/mcp_server/tools/_agent_os_engine/evaluation.py +206 -0
  45. package/mcp_server/tools/_agent_os_engine/models.py +132 -0
  46. package/mcp_server/tools/_agent_os_engine/taste.py +192 -0
  47. package/mcp_server/tools/_agent_os_engine/techniques.py +161 -0
  48. package/mcp_server/tools/_agent_os_engine/world_model.py +170 -0
  49. package/mcp_server/tools/_composition_engine/__init__.py +67 -0
  50. package/mcp_server/tools/_composition_engine/analysis.py +174 -0
  51. package/mcp_server/tools/_composition_engine/critics.py +522 -0
  52. package/mcp_server/tools/_composition_engine/gestures.py +230 -0
  53. package/mcp_server/tools/_composition_engine/harmony.py +70 -0
  54. package/mcp_server/tools/_composition_engine/models.py +193 -0
  55. package/mcp_server/tools/_composition_engine/sections.py +371 -0
  56. package/mcp_server/tools/_perception_engine.py +18 -11
  57. package/mcp_server/tools/agent_os.py +23 -15
  58. package/mcp_server/tools/analyzer.py +166 -7
  59. package/mcp_server/tools/automation.py +6 -1
  60. package/mcp_server/tools/composition.py +25 -16
  61. package/mcp_server/tools/devices.py +10 -6
  62. package/mcp_server/tools/motif.py +7 -2
  63. package/mcp_server/tools/planner.py +6 -2
  64. package/mcp_server/tools/research.py +13 -10
  65. package/mcp_server/transition_engine/tools.py +6 -1
  66. package/mcp_server/translation_engine/tools.py +8 -6
  67. package/mcp_server/wonder_mode/engine.py +8 -3
  68. package/mcp_server/wonder_mode/tools.py +29 -21
  69. package/package.json +2 -2
  70. package/remote_script/LivePilot/__init__.py +1 -1
  71. package/requirements.txt +6 -0
  72. package/livepilot.mcpb +0 -0
  73. package/mcp_server/tools/_agent_os_engine.py +0 -947
  74. package/mcp_server/tools/_composition_engine.py +0 -1530
@@ -0,0 +1,230 @@
1
+ """Part of the _composition_engine package — extracted from the single-file engine.
2
+
3
+ Pure-computation core, no external deps. Callers should import from the package
4
+ facade (e.g. `from mcp_server.tools._composition_engine import X`), which
5
+ re-exports everything from these sub-modules.
6
+ """
7
+ from __future__ import annotations
8
+
9
+ import math
10
+ import re
11
+ from dataclasses import asdict, dataclass, field
12
+ from enum import Enum
13
+ from typing import Any, Optional
14
+
15
+ from .models import GestureIntent, GesturePlan
16
+
17
+ _GESTURE_MAPPINGS: dict[GestureIntent, dict] = {
18
+ GestureIntent.REVEAL: {
19
+ "description": "Open filter, introduce width, grow send level, unmask harmonics",
20
+ "parameter_hints": ["filter_cutoff", "send_level", "utility_width"],
21
+ "curve_family": "exponential",
22
+ "default_direction": "up",
23
+ "typical_duration_bars": 4,
24
+ },
25
+ GestureIntent.CONCEAL: {
26
+ "description": "Close filter, narrow image, reduce send, darken support",
27
+ "parameter_hints": ["filter_cutoff", "volume", "utility_width"],
28
+ "curve_family": "logarithmic",
29
+ "default_direction": "down",
30
+ "typical_duration_bars": 4,
31
+ },
32
+ GestureIntent.HANDOFF: {
33
+ "description": "One voice dims while another emerges",
34
+ "parameter_hints": ["volume", "send_level"],
35
+ "curve_family": "s_curve",
36
+ "default_direction": "crossfade",
37
+ "typical_duration_bars": 2,
38
+ },
39
+ GestureIntent.INHALE: {
40
+ "description": "Pull energy back before impact — pre-drop vacuum",
41
+ "parameter_hints": ["volume", "filter_cutoff", "send_level"],
42
+ "curve_family": "exponential",
43
+ "default_direction": "down",
44
+ "typical_duration_bars": 2,
45
+ },
46
+ GestureIntent.RELEASE: {
47
+ "description": "Restore weight, width, or harmonic color after tension",
48
+ "parameter_hints": ["filter_cutoff", "utility_width", "volume"],
49
+ "curve_family": "spring",
50
+ "default_direction": "up",
51
+ "typical_duration_bars": 1,
52
+ },
53
+ GestureIntent.LIFT: {
54
+ "description": "HP filter rise, reverb send increase — upward energy",
55
+ "parameter_hints": ["hp_filter", "send_level", "reverb_mix"],
56
+ "curve_family": "exponential",
57
+ "default_direction": "up",
58
+ "typical_duration_bars": 8,
59
+ },
60
+ GestureIntent.SINK: {
61
+ "description": "LP filter close, remove highs, settle into sub",
62
+ "parameter_hints": ["filter_cutoff", "eq_high"],
63
+ "curve_family": "logarithmic",
64
+ "default_direction": "down",
65
+ "typical_duration_bars": 4,
66
+ },
67
+ GestureIntent.PUNCTUATE: {
68
+ "description": "Dub throw spike, beat repeat burst — accent a moment",
69
+ "parameter_hints": ["send_level", "beat_repeat"],
70
+ "curve_family": "spike",
71
+ "default_direction": "burst",
72
+ "typical_duration_bars": 1,
73
+ },
74
+ GestureIntent.DRIFT: {
75
+ "description": "Subtle organic movement — perlin noise on parameters",
76
+ "parameter_hints": ["filter_cutoff", "pan", "send_level"],
77
+ "curve_family": "perlin",
78
+ "default_direction": "oscillate",
79
+ "typical_duration_bars": 8,
80
+ },
81
+ }
82
+
83
+ def plan_gesture(
84
+ intent: GestureIntent,
85
+ target_tracks: list[int],
86
+ start_bar: int,
87
+ duration_bars: Optional[int] = None,
88
+ foreground: bool = False,
89
+ ) -> GesturePlan:
90
+ """Create a gesture plan from a musical intent.
91
+
92
+ Maps the abstract intent to concrete automation parameters and curve type.
93
+ The agent uses this plan with apply_automation_shape to execute.
94
+ """
95
+ mapping = _GESTURE_MAPPINGS.get(intent)
96
+ if mapping is None:
97
+ raise ValueError(f"Unknown gesture intent: {intent}")
98
+
99
+ actual_duration = duration_bars or mapping["typical_duration_bars"]
100
+
101
+ return GesturePlan(
102
+ gesture_id=f"gest_{intent.value}_{start_bar}",
103
+ intent=intent,
104
+ description=mapping["description"],
105
+ target_tracks=target_tracks,
106
+ parameter_hints=mapping["parameter_hints"],
107
+ curve_family=mapping["curve_family"],
108
+ direction=mapping["default_direction"],
109
+ start_bar=start_bar,
110
+ end_bar=start_bar + actual_duration,
111
+ foreground=foreground,
112
+ )
113
+
114
+ GESTURE_TEMPLATES: dict[str, dict] = {
115
+ "pre_arrival_vacuum": {
116
+ "description": "Pull energy back before impact — classic build technique",
117
+ "steps": [
118
+ {"intent": "inhale", "offset_bars": -4, "duration_bars": 3},
119
+ {"intent": "release", "offset_bars": 0, "duration_bars": 1},
120
+ ],
121
+ "best_for": ["pre_drop", "pre_chorus", "turnaround"],
122
+ },
123
+ "sectional_width_bloom": {
124
+ "description": "Narrow then widen — creates sense of opening up",
125
+ "steps": [
126
+ {"intent": "conceal", "offset_bars": -2, "duration_bars": 2},
127
+ {"intent": "reveal", "offset_bars": 0, "duration_bars": 4},
128
+ {"intent": "drift", "offset_bars": 4, "duration_bars": 8},
129
+ ],
130
+ "best_for": ["chorus_entry", "verse_to_chorus", "section_expansion"],
131
+ },
132
+ "phrase_end_throw": {
133
+ "description": "Accent the end of a phrase with a dub throw",
134
+ "steps": [
135
+ {"intent": "punctuate", "offset_bars": -1, "duration_bars": 1},
136
+ ],
137
+ "best_for": ["phrase_cadence", "hook_accent", "transition"],
138
+ },
139
+ "turnaround_accent": {
140
+ "description": "Mark turnaround with lift then settle",
141
+ "steps": [
142
+ {"intent": "lift", "offset_bars": -2, "duration_bars": 2},
143
+ {"intent": "sink", "offset_bars": 0, "duration_bars": 2},
144
+ ],
145
+ "best_for": ["loop_turnaround", "phrase_repeat", "section_end"],
146
+ },
147
+ "outro_decay_dissolve": {
148
+ "description": "Gradual dissolution for endings",
149
+ "steps": [
150
+ {"intent": "conceal", "offset_bars": 0, "duration_bars": 8},
151
+ {"intent": "sink", "offset_bars": 4, "duration_bars": 8},
152
+ ],
153
+ "best_for": ["outro", "fade_out", "ending"],
154
+ },
155
+ "bass_tuck_before_kick": {
156
+ "description": "Duck bass before kick re-entry",
157
+ "steps": [
158
+ {"intent": "inhale", "offset_bars": -1, "duration_bars": 1},
159
+ {"intent": "release", "offset_bars": 0, "duration_bars": 1},
160
+ ],
161
+ "best_for": ["kick_reentry", "drop", "bass_return"],
162
+ },
163
+ "harmonic_tint_rise": {
164
+ "description": "Gradually introduce harmonic color via filter opening",
165
+ "steps": [
166
+ {"intent": "reveal", "offset_bars": 0, "duration_bars": 8},
167
+ ],
168
+ "best_for": ["verse_development", "pad_introduction", "harmonic_shift"],
169
+ },
170
+ "response_echo": {
171
+ "description": "Echo gesture — punctuate then drift the tail",
172
+ "steps": [
173
+ {"intent": "punctuate", "offset_bars": 0, "duration_bars": 1},
174
+ {"intent": "drift", "offset_bars": 1, "duration_bars": 4},
175
+ ],
176
+ "best_for": ["call_and_response", "hook_echo", "delay_throw"],
177
+ },
178
+ "texture_drift_bed": {
179
+ "description": "Subtle ongoing motion for background textures",
180
+ "steps": [
181
+ {"intent": "drift", "offset_bars": 0, "duration_bars": 16},
182
+ ],
183
+ "best_for": ["pad_movement", "background_texture", "atmosphere"],
184
+ },
185
+ "tension_ratchet": {
186
+ "description": "Stepped tension increase — reveal in stages",
187
+ "steps": [
188
+ {"intent": "reveal", "offset_bars": 0, "duration_bars": 4},
189
+ {"intent": "reveal", "offset_bars": 4, "duration_bars": 4},
190
+ {"intent": "lift", "offset_bars": 8, "duration_bars": 4},
191
+ ],
192
+ "best_for": ["long_build", "riser", "gradual_intensification"],
193
+ },
194
+ "re_entry_spotlight": {
195
+ "description": "Spotlight a returning element",
196
+ "steps": [
197
+ {"intent": "conceal", "offset_bars": -2, "duration_bars": 2},
198
+ {"intent": "release", "offset_bars": 0, "duration_bars": 1},
199
+ ],
200
+ "best_for": ["hook_return", "melody_reentry", "element_spotlight"],
201
+ },
202
+ }
203
+
204
+ def resolve_gesture_template(
205
+ template_name: str,
206
+ target_tracks: list[int],
207
+ anchor_bar: int,
208
+ foreground: bool = False,
209
+ ) -> list[GesturePlan]:
210
+ """Resolve a gesture template into a sequence of concrete GesturePlans.
211
+
212
+ anchor_bar: the reference point (e.g., section boundary bar number).
213
+ Steps with negative offsets happen before the anchor.
214
+ """
215
+ template = GESTURE_TEMPLATES.get(template_name)
216
+ if template is None:
217
+ valid = list(GESTURE_TEMPLATES.keys())
218
+ raise ValueError(f"Unknown template '{template_name}'. Valid: {valid}")
219
+
220
+ plans = []
221
+ for i, step in enumerate(template["steps"]):
222
+ intent = GestureIntent(step["intent"])
223
+ start = anchor_bar + step.get("offset_bars", 0)
224
+ duration = step.get("duration_bars", None)
225
+ gesture = plan_gesture(intent, target_tracks, start, duration, foreground)
226
+ gesture.gesture_id = f"{template_name}_{i:02d}_{start}"
227
+ plans.append(gesture)
228
+
229
+ return plans
230
+
@@ -0,0 +1,70 @@
1
+ """Part of the _composition_engine package — extracted from the single-file engine.
2
+
3
+ Pure-computation core, no external deps. Callers should import from the package
4
+ facade (e.g. `from mcp_server.tools._composition_engine import X`), which
5
+ re-exports everything from these sub-modules.
6
+ """
7
+ from __future__ import annotations
8
+
9
+ import math
10
+ import re
11
+ from dataclasses import asdict, dataclass, field
12
+ from enum import Enum
13
+ from typing import Any, Optional
14
+
15
+ from .models import HarmonyField
16
+
17
+ def build_harmony_field(
18
+ section_id: str,
19
+ harmony_analysis: Optional[dict] = None,
20
+ scale_info: Optional[dict] = None,
21
+ progression_info: Optional[dict] = None,
22
+ voice_leading_info: Optional[dict] = None,
23
+ ) -> HarmonyField:
24
+ """Build a HarmonyField from theory/harmony tool outputs.
25
+
26
+ All parameters are optional — degrades gracefully.
27
+ """
28
+ hf = HarmonyField(section_id=section_id)
29
+
30
+ # Scale / key info
31
+ if scale_info:
32
+ top = scale_info.get("top_match", {})
33
+ hf.key = top.get("tonic", "")
34
+ hf.mode = top.get("mode", "")
35
+ hf.confidence = top.get("confidence", 0.0)
36
+
37
+ # Chord progression
38
+ if harmony_analysis:
39
+ chords = harmony_analysis.get("chords", [])
40
+ hf.chord_progression = [c.get("chord_name", "?") for c in chords]
41
+
42
+ # Instability: ratio of non-tonic chords
43
+ roman_numerals = [c.get("roman_numeral", "?") for c in chords]
44
+ if roman_numerals:
45
+ non_tonic = sum(1 for r in roman_numerals if r not in ("i", "I", "?"))
46
+ hf.instability = non_tonic / len(roman_numerals)
47
+
48
+ # Resolution potential: does it end on tonic?
49
+ if roman_numerals:
50
+ hf.resolution_potential = 1.0 if roman_numerals[-1] in ("i", "I") else 0.3
51
+
52
+ # Progression classification
53
+ if progression_info:
54
+ classification = progression_info.get("classification", "")
55
+ # "diatonic" = more stable, "free neo-Riemannian" = more unstable
56
+ if "diatonic" in classification.lower():
57
+ hf.instability = max(0.0, hf.instability - 0.1)
58
+ elif "free" in classification.lower():
59
+ hf.instability = min(1.0, hf.instability + 0.1)
60
+
61
+ # Voice leading quality
62
+ if voice_leading_info:
63
+ steps = voice_leading_info.get("steps", 0)
64
+ found = voice_leading_info.get("found", False)
65
+ if found and steps > 0:
66
+ # Fewer steps = smoother voice leading
67
+ hf.voice_leading_quality = max(0.0, 1.0 - (steps - 1) * 0.15)
68
+
69
+ return hf
70
+
@@ -0,0 +1,193 @@
1
+ """Part of the _composition_engine package — extracted from the single-file engine.
2
+
3
+ Pure-computation core, no external deps. Callers should import from the package
4
+ facade (e.g. `from mcp_server.tools._composition_engine import X`), which
5
+ re-exports everything from these sub-modules.
6
+ """
7
+ from __future__ import annotations
8
+
9
+ import math
10
+ import re
11
+ from dataclasses import asdict, dataclass, field
12
+ from enum import Enum
13
+ from typing import Any, Optional
14
+
15
+
16
+ # ── Enums ─────────────────────────────────────────────────────────────
17
+ class SectionType(str, Enum):
18
+ LOOP = "loop"
19
+ INTRO = "intro"
20
+ VERSE = "verse"
21
+ PRE_CHORUS = "pre_chorus"
22
+ CHORUS = "chorus"
23
+ BUILD = "build"
24
+ DROP = "drop"
25
+ BRIDGE = "bridge"
26
+ BREAKDOWN = "breakdown"
27
+ OUTRO = "outro"
28
+ UNKNOWN = "unknown"
29
+
30
+ class RoleType(str, Enum):
31
+ KICK_ANCHOR = "kick_anchor"
32
+ BASS_ANCHOR = "bass_anchor"
33
+ HOOK = "hook"
34
+ LEAD = "lead"
35
+ HARMONY_BED = "harmony_bed"
36
+ RHYTHMIC_TEXTURE = "rhythmic_texture"
37
+ TEXTURE_WASH = "texture_wash"
38
+ TRANSITION_FX = "transition_fx"
39
+ UTILITY = "utility"
40
+ UNKNOWN = "unknown"
41
+
42
+ class GestureIntent(str, Enum):
43
+ REVEAL = "reveal"
44
+ CONCEAL = "conceal"
45
+ HANDOFF = "handoff"
46
+ INHALE = "inhale"
47
+ RELEASE = "release"
48
+ LIFT = "lift"
49
+ SINK = "sink"
50
+ PUNCTUATE = "punctuate"
51
+ DRIFT = "drift"
52
+
53
+ @dataclass
54
+ class SectionNode:
55
+ """A section of the arrangement with inferred type and energy."""
56
+ section_id: str
57
+ start_bar: int
58
+ end_bar: int
59
+ section_type: SectionType
60
+ confidence: float # 0.0-1.0
61
+ energy: float # 0.0-1.0 (relative within the track)
62
+ density: float # 0.0-1.0 (how many tracks are active)
63
+ tracks_active: list[int] = field(default_factory=list)
64
+ name: str = ""
65
+
66
+ def length_bars(self) -> int:
67
+ return self.end_bar - self.start_bar
68
+
69
+ def to_dict(self) -> dict:
70
+ d = asdict(self)
71
+ d["section_type"] = self.section_type.value
72
+ d["length_bars"] = self.length_bars()
73
+ return d
74
+
75
+
76
+ # ── Phrase Grid ───────────────────────────────────────────────────────
77
+ @dataclass
78
+ class PhraseUnit:
79
+ """A musical phrase within a section."""
80
+ phrase_id: str
81
+ section_id: str
82
+ start_bar: int
83
+ end_bar: int
84
+ cadence_strength: float # 0.0-1.0 (how strongly it resolves)
85
+ note_density: float # notes per bar
86
+ has_variation: bool # differs from adjacent phrases
87
+
88
+ def length_bars(self) -> int:
89
+ return self.end_bar - self.start_bar
90
+
91
+ def to_dict(self) -> dict:
92
+ d = asdict(self)
93
+ d["length_bars"] = self.length_bars()
94
+ return d
95
+
96
+
97
+ # ── Role Inference ────────────────────────────────────────────────────
98
+ @dataclass
99
+ class RoleNode:
100
+ """A track's musical role within a specific section."""
101
+ track_index: int
102
+ track_name: str
103
+ section_id: str
104
+ role: RoleType
105
+ confidence: float # 0.0-1.0
106
+ foreground: bool # is this a focal element?
107
+
108
+ def to_dict(self) -> dict:
109
+ d = asdict(self)
110
+ d["role"] = self.role.value
111
+ return d
112
+
113
+
114
+ # ── Composition Critics ───────────────────────────────────────────────
115
+ @dataclass
116
+ class CompositionIssue:
117
+ """A structural or musical problem detected by a critic."""
118
+ issue_type: str
119
+ critic: str # "form", "section_identity", "phrase"
120
+ severity: float # 0.0-1.0
121
+ confidence: float # 0.0-1.0
122
+ scope: dict = field(default_factory=dict) # e.g., {"section_id": "sec_01"}
123
+ recommended_moves: list[str] = field(default_factory=list)
124
+ evidence: str = ""
125
+
126
+ def to_dict(self) -> dict:
127
+ return asdict(self)
128
+
129
+ @dataclass
130
+ class GesturePlan:
131
+ """A concrete automation plan derived from a musical gesture intent."""
132
+ gesture_id: str
133
+ intent: GestureIntent
134
+ description: str
135
+ target_tracks: list[int]
136
+ parameter_hints: list[str]
137
+ curve_family: str
138
+ direction: str
139
+ start_bar: int
140
+ end_bar: int
141
+ foreground: bool # is this a musical focus or background motion?
142
+
143
+ def to_dict(self) -> dict:
144
+ d = asdict(self)
145
+ d["intent"] = self.intent.value
146
+ d["duration_bars"] = self.end_bar - self.start_bar
147
+ return d
148
+
149
+
150
+ # ── Full Analysis Pipeline ────────────────────────────────────────────
151
+ @dataclass
152
+ class CompositionAnalysis:
153
+ """Complete composition analysis result."""
154
+ sections: list[SectionNode]
155
+ phrases: list[PhraseUnit]
156
+ roles: list[RoleNode]
157
+ issues: list[CompositionIssue]
158
+
159
+ def to_dict(self) -> dict:
160
+ return {
161
+ "sections": [s.to_dict() for s in self.sections],
162
+ "section_count": len(self.sections),
163
+ "phrases": [p.to_dict() for p in self.phrases],
164
+ "phrase_count": len(self.phrases),
165
+ "roles": [r.to_dict() for r in self.roles],
166
+ "role_count": len(self.roles),
167
+ "issues": [i.to_dict() for i in self.issues],
168
+ "issue_count": len(self.issues),
169
+ "issue_summary": {
170
+ "form": len([i for i in self.issues if i.critic == "form"]),
171
+ "section_identity": len([i for i in self.issues if i.critic == "section_identity"]),
172
+ "phrase": len([i for i in self.issues if i.critic == "phrase"]),
173
+ "transition": len([i for i in self.issues if i.critic == "transition"]),
174
+ },
175
+ }
176
+
177
+
178
+ # ── Harmony Field (Round 1) ──────────────────────────────────────────
179
+ @dataclass
180
+ class HarmonyField:
181
+ """Harmonic analysis of a section — key, chords, voice-leading, tension."""
182
+ section_id: str
183
+ key: str = ""
184
+ mode: str = ""
185
+ confidence: float = 0.0
186
+ chord_progression: list[str] = field(default_factory=list)
187
+ voice_leading_quality: float = 0.5 # 0=rough, 1=smooth
188
+ instability: float = 0.0 # 0=stable/tonic, 1=highly unstable
189
+ resolution_potential: float = 0.5 # tendency toward resolution
190
+
191
+ def to_dict(self) -> dict:
192
+ return asdict(self)
193
+