livepilot 1.9.13 → 1.9.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +3 -3
- package/AGENTS.md +3 -3
- package/CHANGELOG.md +51 -0
- package/CONTRIBUTING.md +1 -1
- package/README.md +7 -7
- package/bin/livepilot.js +32 -8
- package/installer/install.js +21 -2
- package/livepilot/.Codex-plugin/plugin.json +2 -2
- package/livepilot/.claude-plugin/plugin.json +2 -2
- package/livepilot/agents/livepilot-producer/AGENT.md +243 -49
- package/livepilot/skills/livepilot-core/SKILL.md +81 -6
- package/livepilot/skills/livepilot-core/references/m4l-devices.md +2 -2
- package/livepilot/skills/livepilot-core/references/overview.md +3 -3
- package/livepilot/skills/livepilot-core/references/sound-design.md +3 -2
- package/livepilot/skills/livepilot-release/SKILL.md +13 -13
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +6 -3
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/curves.py +11 -3
- package/mcp_server/evaluation/__init__.py +1 -0
- package/mcp_server/evaluation/fabric.py +575 -0
- package/mcp_server/evaluation/feature_extractors.py +84 -0
- package/mcp_server/evaluation/policy.py +67 -0
- package/mcp_server/evaluation/tools.py +53 -0
- package/mcp_server/memory/__init__.py +11 -2
- package/mcp_server/memory/anti_memory.py +78 -0
- package/mcp_server/memory/promotion.py +94 -0
- package/mcp_server/memory/session_memory.py +108 -0
- package/mcp_server/memory/taste_memory.py +158 -0
- package/mcp_server/memory/technique_store.py +2 -1
- package/mcp_server/memory/tools.py +112 -0
- package/mcp_server/mix_engine/__init__.py +1 -0
- package/mcp_server/mix_engine/critics.py +299 -0
- package/mcp_server/mix_engine/models.py +152 -0
- package/mcp_server/mix_engine/planner.py +103 -0
- package/mcp_server/mix_engine/state_builder.py +316 -0
- package/mcp_server/mix_engine/tools.py +214 -0
- package/mcp_server/performance_engine/__init__.py +1 -0
- package/mcp_server/performance_engine/models.py +148 -0
- package/mcp_server/performance_engine/planner.py +267 -0
- package/mcp_server/performance_engine/safety.py +162 -0
- package/mcp_server/performance_engine/tools.py +183 -0
- package/mcp_server/project_brain/__init__.py +6 -0
- package/mcp_server/project_brain/arrangement_graph.py +64 -0
- package/mcp_server/project_brain/automation_graph.py +72 -0
- package/mcp_server/project_brain/builder.py +123 -0
- package/mcp_server/project_brain/capability_graph.py +64 -0
- package/mcp_server/project_brain/models.py +282 -0
- package/mcp_server/project_brain/refresh.py +80 -0
- package/mcp_server/project_brain/role_graph.py +103 -0
- package/mcp_server/project_brain/session_graph.py +51 -0
- package/mcp_server/project_brain/tools.py +144 -0
- package/mcp_server/reference_engine/__init__.py +1 -0
- package/mcp_server/reference_engine/gap_analyzer.py +239 -0
- package/mcp_server/reference_engine/models.py +105 -0
- package/mcp_server/reference_engine/profile_builder.py +149 -0
- package/mcp_server/reference_engine/tactic_router.py +117 -0
- package/mcp_server/reference_engine/tools.py +235 -0
- package/mcp_server/runtime/__init__.py +1 -0
- package/mcp_server/runtime/action_ledger.py +117 -0
- package/mcp_server/runtime/action_ledger_models.py +84 -0
- package/mcp_server/runtime/action_tools.py +57 -0
- package/mcp_server/runtime/capability_state.py +218 -0
- package/mcp_server/runtime/safety_kernel.py +339 -0
- package/mcp_server/runtime/safety_tools.py +42 -0
- package/mcp_server/runtime/tools.py +64 -0
- package/mcp_server/server.py +23 -1
- package/mcp_server/sound_design/__init__.py +1 -0
- package/mcp_server/sound_design/critics.py +297 -0
- package/mcp_server/sound_design/models.py +147 -0
- package/mcp_server/sound_design/planner.py +104 -0
- package/mcp_server/sound_design/tools.py +297 -0
- package/mcp_server/tools/_agent_os_engine.py +947 -0
- package/mcp_server/tools/_composition_engine.py +1530 -0
- package/mcp_server/tools/_conductor.py +199 -0
- package/mcp_server/tools/_conductor_budgets.py +222 -0
- package/mcp_server/tools/_evaluation_contracts.py +91 -0
- package/mcp_server/tools/_form_engine.py +416 -0
- package/mcp_server/tools/_motif_engine.py +351 -0
- package/mcp_server/tools/_planner_engine.py +516 -0
- package/mcp_server/tools/_research_engine.py +542 -0
- package/mcp_server/tools/_research_provider.py +185 -0
- package/mcp_server/tools/_snapshot_normalizer.py +49 -0
- package/mcp_server/tools/agent_os.py +440 -0
- package/mcp_server/tools/analyzer.py +18 -0
- package/mcp_server/tools/automation.py +25 -10
- package/mcp_server/tools/composition.py +563 -0
- package/mcp_server/tools/motif.py +104 -0
- package/mcp_server/tools/planner.py +144 -0
- package/mcp_server/tools/research.py +223 -0
- package/mcp_server/tools/tracks.py +18 -3
- package/mcp_server/tools/transport.py +10 -2
- package/mcp_server/transition_engine/__init__.py +6 -0
- package/mcp_server/transition_engine/archetypes.py +167 -0
- package/mcp_server/transition_engine/critics.py +340 -0
- package/mcp_server/transition_engine/models.py +90 -0
- package/mcp_server/transition_engine/tools.py +291 -0
- package/mcp_server/translation_engine/__init__.py +5 -0
- package/mcp_server/translation_engine/critics.py +297 -0
- package/mcp_server/translation_engine/models.py +27 -0
- package/mcp_server/translation_engine/tools.py +74 -0
- package/package.json +2 -2
- package/remote_script/LivePilot/__init__.py +1 -1
- package/remote_script/LivePilot/arrangement.py +12 -2
- package/requirements.txt +1 -1
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""Automation graph builder — scans track devices for automation presence.
|
|
2
|
+
|
|
3
|
+
Pure computation, zero I/O.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from .models import AutomationGraph
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def build_automation_graph(
|
|
12
|
+
track_infos: list[dict],
|
|
13
|
+
sections: list[dict] | None = None,
|
|
14
|
+
) -> AutomationGraph:
|
|
15
|
+
"""Build an AutomationGraph by scanning track device info for automation.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
track_infos: list of per-track info dicts. Each may contain:
|
|
19
|
+
- index: track index
|
|
20
|
+
- name: track name
|
|
21
|
+
- devices: [{name, class_name, parameters: [{name, value, is_automated, ...}]}]
|
|
22
|
+
sections: optional list of section dicts (for density_by_section).
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
AutomationGraph with automated_params and density_by_section.
|
|
26
|
+
"""
|
|
27
|
+
graph = AutomationGraph()
|
|
28
|
+
|
|
29
|
+
if not track_infos:
|
|
30
|
+
return graph
|
|
31
|
+
|
|
32
|
+
automated_params = []
|
|
33
|
+
|
|
34
|
+
for track in track_infos:
|
|
35
|
+
t_idx = track.get("index", 0)
|
|
36
|
+
t_name = track.get("name", "")
|
|
37
|
+
devices = track.get("devices", [])
|
|
38
|
+
|
|
39
|
+
for device in devices:
|
|
40
|
+
device_name = device.get("name", device.get("class_name", ""))
|
|
41
|
+
parameters = device.get("parameters", [])
|
|
42
|
+
|
|
43
|
+
for param in parameters:
|
|
44
|
+
if param.get("is_automated", False) or param.get("automation_state", 0) > 0:
|
|
45
|
+
automated_params.append({
|
|
46
|
+
"track_index": t_idx,
|
|
47
|
+
"track_name": t_name,
|
|
48
|
+
"device_name": device_name,
|
|
49
|
+
"param_name": param.get("name", ""),
|
|
50
|
+
"param_value": param.get("value"),
|
|
51
|
+
})
|
|
52
|
+
|
|
53
|
+
graph.automated_params = automated_params
|
|
54
|
+
|
|
55
|
+
# Compute density_by_section if sections are provided
|
|
56
|
+
if sections:
|
|
57
|
+
total_automated = len(automated_params)
|
|
58
|
+
for sec in sections:
|
|
59
|
+
section_id = sec.get("section_id", "")
|
|
60
|
+
# Without per-section automation data, distribute evenly
|
|
61
|
+
# and weight by section density (more active tracks = more automation)
|
|
62
|
+
sec_density = sec.get("density", 0.0)
|
|
63
|
+
# Automation density approximation: section density * param count ratio
|
|
64
|
+
if total_automated > 0:
|
|
65
|
+
graph.density_by_section[section_id] = round(
|
|
66
|
+
sec_density * min(total_automated / max(len(track_infos), 1), 1.0),
|
|
67
|
+
3,
|
|
68
|
+
)
|
|
69
|
+
else:
|
|
70
|
+
graph.density_by_section[section_id] = 0.0
|
|
71
|
+
|
|
72
|
+
return graph
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
"""Project Brain build pipeline — orchestrates full state construction.
|
|
2
|
+
|
|
3
|
+
Pure computation, zero I/O. MCP tool wrappers call this with
|
|
4
|
+
pre-fetched data from Ableton.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from typing import Any, Optional
|
|
10
|
+
|
|
11
|
+
from .arrangement_graph import build_arrangement_graph
|
|
12
|
+
from .automation_graph import build_automation_graph
|
|
13
|
+
from .capability_graph import build_capability_graph
|
|
14
|
+
from .models import ProjectState
|
|
15
|
+
from .role_graph import build_role_graph
|
|
16
|
+
from .session_graph import build_session_graph
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def build_project_state_from_data(
|
|
20
|
+
session_info: dict,
|
|
21
|
+
scenes: Optional[list[dict]] = None,
|
|
22
|
+
clip_matrix: Optional[list[list[dict]]] = None,
|
|
23
|
+
track_infos: Optional[list[dict]] = None,
|
|
24
|
+
notes_map: Optional[dict[str, dict[int, list[dict]]]] = None,
|
|
25
|
+
arrangement_clips: Optional[dict] = None,
|
|
26
|
+
analyzer_ok: bool = False,
|
|
27
|
+
flucoma_ok: bool = False,
|
|
28
|
+
plugin_health: Optional[dict[str, Any]] = None,
|
|
29
|
+
session_ok: bool = True,
|
|
30
|
+
memory_ok: bool = False,
|
|
31
|
+
web_ok: bool = False,
|
|
32
|
+
analyzer_fresh: bool = False,
|
|
33
|
+
previous_revision: int = 0,
|
|
34
|
+
) -> ProjectState:
|
|
35
|
+
"""Build a full ProjectState from pre-fetched data.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
session_info: raw get_session_info output.
|
|
39
|
+
scenes: list of scene dicts for arrangement graph.
|
|
40
|
+
clip_matrix: [scene_index][track_index] clip slot dicts.
|
|
41
|
+
track_infos: list of per-track info dicts (devices, params).
|
|
42
|
+
notes_map: {section_id: {track_index: [notes]}} for role inference.
|
|
43
|
+
arrangement_clips: legacy dict of track_index -> clip list.
|
|
44
|
+
analyzer_ok: whether M4L analyzer bridge is responding.
|
|
45
|
+
flucoma_ok: whether FluCoMa is available.
|
|
46
|
+
plugin_health: dict of plugin_name -> health info.
|
|
47
|
+
session_ok: whether Ableton session is reachable.
|
|
48
|
+
memory_ok: whether technique memory is available.
|
|
49
|
+
web_ok: whether web research is available.
|
|
50
|
+
analyzer_fresh: whether analyzer data is fresh.
|
|
51
|
+
previous_revision: last known revision number.
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
ProjectState with incremented revision and all subgraphs populated.
|
|
55
|
+
"""
|
|
56
|
+
state = ProjectState()
|
|
57
|
+
state.revision = previous_revision + 1
|
|
58
|
+
|
|
59
|
+
# 1. Session graph (always built)
|
|
60
|
+
state.session_graph = build_session_graph(session_info)
|
|
61
|
+
state.session_graph.freshness.mark_fresh(state.revision)
|
|
62
|
+
|
|
63
|
+
# 2. Arrangement graph
|
|
64
|
+
track_count = len(session_info.get("tracks", []))
|
|
65
|
+
|
|
66
|
+
if scenes and clip_matrix:
|
|
67
|
+
# New path: real scene-based section inference
|
|
68
|
+
state.arrangement_graph = build_arrangement_graph(
|
|
69
|
+
scenes, clip_matrix, track_count,
|
|
70
|
+
)
|
|
71
|
+
elif arrangement_clips:
|
|
72
|
+
# Legacy path: arrangement clips from per-track fetch
|
|
73
|
+
from .models import ArrangementGraph, SectionNode
|
|
74
|
+
arr = ArrangementGraph()
|
|
75
|
+
sections = []
|
|
76
|
+
for track_idx, clips in arrangement_clips.items():
|
|
77
|
+
for clip in clips:
|
|
78
|
+
sections.append(SectionNode(
|
|
79
|
+
section_id=f"t{track_idx}_c{clip.get('index', 0)}",
|
|
80
|
+
start_bar=int(clip.get("start_time", 0)),
|
|
81
|
+
end_bar=int(clip.get("end_time", 0)),
|
|
82
|
+
section_type=clip.get("name", "unknown"),
|
|
83
|
+
))
|
|
84
|
+
arr.sections = sections
|
|
85
|
+
state.arrangement_graph = arr
|
|
86
|
+
# else: leave as default empty ArrangementGraph
|
|
87
|
+
|
|
88
|
+
state.arrangement_graph.freshness.mark_fresh(state.revision)
|
|
89
|
+
|
|
90
|
+
# 3. Role graph
|
|
91
|
+
if state.arrangement_graph.sections and track_infos:
|
|
92
|
+
section_dicts = [s.to_dict() for s in state.arrangement_graph.sections]
|
|
93
|
+
state.role_graph = build_role_graph(
|
|
94
|
+
sections=section_dicts,
|
|
95
|
+
track_data=track_infos,
|
|
96
|
+
notes_map=notes_map or {},
|
|
97
|
+
)
|
|
98
|
+
state.role_graph.freshness.mark_fresh(state.revision)
|
|
99
|
+
|
|
100
|
+
# 4. Automation graph
|
|
101
|
+
section_dicts_for_auto = (
|
|
102
|
+
[s.to_dict() for s in state.arrangement_graph.sections]
|
|
103
|
+
if state.arrangement_graph.sections else None
|
|
104
|
+
)
|
|
105
|
+
state.automation_graph = build_automation_graph(
|
|
106
|
+
track_infos=track_infos or [],
|
|
107
|
+
sections=section_dicts_for_auto,
|
|
108
|
+
)
|
|
109
|
+
state.automation_graph.freshness.mark_fresh(state.revision)
|
|
110
|
+
|
|
111
|
+
# 5. Capability graph
|
|
112
|
+
state.capability_graph = build_capability_graph(
|
|
113
|
+
analyzer_ok=analyzer_ok,
|
|
114
|
+
flucoma_ok=flucoma_ok,
|
|
115
|
+
plugin_health=plugin_health,
|
|
116
|
+
session_ok=session_ok,
|
|
117
|
+
memory_ok=memory_ok,
|
|
118
|
+
web_ok=web_ok,
|
|
119
|
+
analyzer_fresh=analyzer_fresh,
|
|
120
|
+
)
|
|
121
|
+
state.capability_graph.freshness.mark_fresh(state.revision)
|
|
122
|
+
|
|
123
|
+
return state
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
"""Capability graph builder — maps runtime checks to CapabilityGraph fields.
|
|
2
|
+
|
|
3
|
+
Reuses runtime/capability_state.build_capability_state for the domain logic,
|
|
4
|
+
then maps results to the brain CapabilityGraph model.
|
|
5
|
+
|
|
6
|
+
Pure computation, zero I/O.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from ..runtime.capability_state import build_capability_state
|
|
14
|
+
from .models import CapabilityGraph
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def build_capability_graph(
|
|
18
|
+
analyzer_ok: bool = False,
|
|
19
|
+
flucoma_ok: bool = False,
|
|
20
|
+
plugin_health: dict[str, Any] | None = None,
|
|
21
|
+
session_ok: bool = True,
|
|
22
|
+
memory_ok: bool = False,
|
|
23
|
+
web_ok: bool = False,
|
|
24
|
+
analyzer_fresh: bool = False,
|
|
25
|
+
) -> CapabilityGraph:
|
|
26
|
+
"""Build a CapabilityGraph from runtime probe results.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
analyzer_ok: whether the M4L analyzer bridge is responding.
|
|
30
|
+
flucoma_ok: whether FluCoMa is available.
|
|
31
|
+
plugin_health: dict of plugin_name -> health info (parameter_count, etc.).
|
|
32
|
+
session_ok: whether Ableton session is reachable.
|
|
33
|
+
memory_ok: whether technique memory is available.
|
|
34
|
+
web_ok: whether web research is available.
|
|
35
|
+
analyzer_fresh: whether analyzer data is fresh (< 5s old).
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
CapabilityGraph with all fields populated (freshness unfreshed).
|
|
39
|
+
"""
|
|
40
|
+
# Delegate to capability_state for domain reasoning
|
|
41
|
+
cap_state = build_capability_state(
|
|
42
|
+
session_ok=session_ok,
|
|
43
|
+
analyzer_ok=analyzer_ok,
|
|
44
|
+
analyzer_fresh=analyzer_fresh,
|
|
45
|
+
memory_ok=memory_ok,
|
|
46
|
+
web_ok=web_ok,
|
|
47
|
+
flucoma_ok=flucoma_ok,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
# Map to brain model
|
|
51
|
+
graph = CapabilityGraph(
|
|
52
|
+
analyzer_available=analyzer_ok and analyzer_fresh,
|
|
53
|
+
flucoma_available=flucoma_ok,
|
|
54
|
+
plugin_health=plugin_health or {},
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
# Populate research providers from capability state domains
|
|
58
|
+
research_providers = []
|
|
59
|
+
for domain_name, domain in cap_state.domains.items():
|
|
60
|
+
if domain.available and domain_name in ("session_access", "memory", "web"):
|
|
61
|
+
research_providers.append(domain_name)
|
|
62
|
+
graph.research_providers = sorted(research_providers)
|
|
63
|
+
|
|
64
|
+
return graph
|
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
"""Project Brain data models — all dataclasses with to_dict().
|
|
2
|
+
|
|
3
|
+
Zero I/O. Pure data structures representing the five subgraphs
|
|
4
|
+
(SessionGraph, ArrangementGraph, RoleGraph, AutomationGraph,
|
|
5
|
+
CapabilityGraph) plus freshness/confidence metadata.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import time
|
|
11
|
+
import uuid
|
|
12
|
+
from dataclasses import dataclass, field
|
|
13
|
+
from typing import Any, Optional
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# ── Freshness ────────────────────────────────────────────────────────
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class FreshnessInfo:
|
|
21
|
+
"""Tracks when a subgraph was built and whether it is stale."""
|
|
22
|
+
|
|
23
|
+
built_at_ms: float = 0.0
|
|
24
|
+
source_revision: int = 0
|
|
25
|
+
stale: bool = True
|
|
26
|
+
stale_reason: Optional[str] = "never built"
|
|
27
|
+
|
|
28
|
+
def mark_fresh(self, revision: int) -> None:
|
|
29
|
+
self.built_at_ms = time.time() * 1000
|
|
30
|
+
self.source_revision = revision
|
|
31
|
+
self.stale = False
|
|
32
|
+
self.stale_reason = None
|
|
33
|
+
|
|
34
|
+
def mark_stale(self, reason: str) -> None:
|
|
35
|
+
self.stale = True
|
|
36
|
+
self.stale_reason = reason
|
|
37
|
+
|
|
38
|
+
def to_dict(self) -> dict:
|
|
39
|
+
return {
|
|
40
|
+
"built_at_ms": self.built_at_ms,
|
|
41
|
+
"source_revision": self.source_revision,
|
|
42
|
+
"stale": self.stale,
|
|
43
|
+
"stale_reason": self.stale_reason,
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# ── Confidence ───────────────────────────────────────────────────────
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@dataclass
|
|
51
|
+
class ConfidenceInfo:
|
|
52
|
+
"""Confidence summary for inference-bearing graphs."""
|
|
53
|
+
|
|
54
|
+
overall: float = 0.0
|
|
55
|
+
low_confidence_nodes: list[str] = field(default_factory=list)
|
|
56
|
+
|
|
57
|
+
def to_dict(self) -> dict:
|
|
58
|
+
return {
|
|
59
|
+
"overall": self.overall,
|
|
60
|
+
"low_confidence_nodes": list(self.low_confidence_nodes),
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
# ── SessionGraph ─────────────────────────────────────────────────────
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
@dataclass
|
|
68
|
+
class TrackNode:
|
|
69
|
+
"""A single track in the session."""
|
|
70
|
+
|
|
71
|
+
index: int = 0
|
|
72
|
+
name: str = ""
|
|
73
|
+
has_midi: bool = False
|
|
74
|
+
has_audio: bool = False
|
|
75
|
+
mute: bool = False
|
|
76
|
+
solo: bool = False
|
|
77
|
+
arm: bool = False
|
|
78
|
+
group_index: Optional[int] = None
|
|
79
|
+
|
|
80
|
+
def to_dict(self) -> dict:
|
|
81
|
+
return {
|
|
82
|
+
"index": self.index,
|
|
83
|
+
"name": self.name,
|
|
84
|
+
"has_midi": self.has_midi,
|
|
85
|
+
"has_audio": self.has_audio,
|
|
86
|
+
"mute": self.mute,
|
|
87
|
+
"solo": self.solo,
|
|
88
|
+
"arm": self.arm,
|
|
89
|
+
"group_index": self.group_index,
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
@dataclass
|
|
94
|
+
class SessionGraph:
|
|
95
|
+
"""Physical/session topology — tracks, returns, scenes, tempo."""
|
|
96
|
+
|
|
97
|
+
tracks: list[TrackNode] = field(default_factory=list)
|
|
98
|
+
return_tracks: list[dict] = field(default_factory=list)
|
|
99
|
+
scenes: list[dict] = field(default_factory=list)
|
|
100
|
+
tempo: float = 120.0
|
|
101
|
+
time_signature: str = "4/4"
|
|
102
|
+
freshness: FreshnessInfo = field(default_factory=FreshnessInfo)
|
|
103
|
+
|
|
104
|
+
def add_track(self, track: TrackNode) -> None:
|
|
105
|
+
self.tracks.append(track)
|
|
106
|
+
|
|
107
|
+
def to_dict(self) -> dict:
|
|
108
|
+
return {
|
|
109
|
+
"tracks": [t.to_dict() for t in self.tracks],
|
|
110
|
+
"return_tracks": list(self.return_tracks),
|
|
111
|
+
"scenes": list(self.scenes),
|
|
112
|
+
"tempo": self.tempo,
|
|
113
|
+
"time_signature": self.time_signature,
|
|
114
|
+
"freshness": self.freshness.to_dict(),
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
# ── ArrangementGraph ─────────────────────────────────────────────────
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
@dataclass
|
|
122
|
+
class SectionNode:
|
|
123
|
+
"""A section in the arrangement timeline."""
|
|
124
|
+
|
|
125
|
+
section_id: str = ""
|
|
126
|
+
start_bar: int = 0
|
|
127
|
+
end_bar: int = 0
|
|
128
|
+
section_type: str = "unknown"
|
|
129
|
+
energy: float = 0.0
|
|
130
|
+
density: float = 0.0
|
|
131
|
+
|
|
132
|
+
def to_dict(self) -> dict:
|
|
133
|
+
return {
|
|
134
|
+
"section_id": self.section_id,
|
|
135
|
+
"start_bar": self.start_bar,
|
|
136
|
+
"end_bar": self.end_bar,
|
|
137
|
+
"section_type": self.section_type,
|
|
138
|
+
"energy": self.energy,
|
|
139
|
+
"density": self.density,
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
@dataclass
|
|
144
|
+
class ArrangementGraph:
|
|
145
|
+
"""Time-structure layer — sections, boundaries, cue points."""
|
|
146
|
+
|
|
147
|
+
sections: list[SectionNode] = field(default_factory=list)
|
|
148
|
+
boundaries: list[dict] = field(default_factory=list)
|
|
149
|
+
cue_points: list[dict] = field(default_factory=list)
|
|
150
|
+
freshness: FreshnessInfo = field(default_factory=FreshnessInfo)
|
|
151
|
+
|
|
152
|
+
def to_dict(self) -> dict:
|
|
153
|
+
return {
|
|
154
|
+
"sections": [s.to_dict() for s in self.sections],
|
|
155
|
+
"boundaries": list(self.boundaries),
|
|
156
|
+
"cue_points": list(self.cue_points),
|
|
157
|
+
"freshness": self.freshness.to_dict(),
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
# ── RoleGraph ────────────────────────────────────────────────────────
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
@dataclass
|
|
165
|
+
class RoleNode:
|
|
166
|
+
"""Maps a musical function to a track within a section."""
|
|
167
|
+
|
|
168
|
+
track_index: int = 0
|
|
169
|
+
section_id: str = ""
|
|
170
|
+
role: str = "unknown"
|
|
171
|
+
confidence: float = 0.0
|
|
172
|
+
foreground: bool = False
|
|
173
|
+
|
|
174
|
+
def to_dict(self) -> dict:
|
|
175
|
+
return {
|
|
176
|
+
"track_index": self.track_index,
|
|
177
|
+
"section_id": self.section_id,
|
|
178
|
+
"role": self.role,
|
|
179
|
+
"confidence": self.confidence,
|
|
180
|
+
"foreground": self.foreground,
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
@dataclass
|
|
185
|
+
class RoleGraph:
|
|
186
|
+
"""Musical function assignments across tracks and sections."""
|
|
187
|
+
|
|
188
|
+
roles: list[RoleNode] = field(default_factory=list)
|
|
189
|
+
confidence: ConfidenceInfo = field(default_factory=ConfidenceInfo)
|
|
190
|
+
freshness: FreshnessInfo = field(default_factory=FreshnessInfo)
|
|
191
|
+
|
|
192
|
+
def add_role(self, role: RoleNode) -> None:
|
|
193
|
+
self.roles.append(role)
|
|
194
|
+
|
|
195
|
+
def to_dict(self) -> dict:
|
|
196
|
+
return {
|
|
197
|
+
"roles": [r.to_dict() for r in self.roles],
|
|
198
|
+
"confidence": self.confidence.to_dict(),
|
|
199
|
+
"freshness": self.freshness.to_dict(),
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
# ── AutomationGraph ──────────────────────────────────────────────────
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
@dataclass
|
|
207
|
+
class AutomationGraph:
|
|
208
|
+
"""Automation presence and gesture density."""
|
|
209
|
+
|
|
210
|
+
automated_params: list[dict] = field(default_factory=list)
|
|
211
|
+
density_by_section: dict[str, float] = field(default_factory=dict)
|
|
212
|
+
freshness: FreshnessInfo = field(default_factory=FreshnessInfo)
|
|
213
|
+
|
|
214
|
+
def to_dict(self) -> dict:
|
|
215
|
+
return {
|
|
216
|
+
"automated_params": list(self.automated_params),
|
|
217
|
+
"density_by_section": dict(self.density_by_section),
|
|
218
|
+
"freshness": self.freshness.to_dict(),
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
# ── CapabilityGraph ──────────────────────────────────────────────────
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
@dataclass
|
|
226
|
+
class CapabilityGraph:
|
|
227
|
+
"""Runtime capability awareness — what tools/features are available."""
|
|
228
|
+
|
|
229
|
+
analyzer_available: bool = False
|
|
230
|
+
flucoma_available: bool = False
|
|
231
|
+
plugin_health: dict[str, Any] = field(default_factory=dict)
|
|
232
|
+
research_providers: list[str] = field(default_factory=list)
|
|
233
|
+
freshness: FreshnessInfo = field(default_factory=FreshnessInfo)
|
|
234
|
+
|
|
235
|
+
def to_dict(self) -> dict:
|
|
236
|
+
return {
|
|
237
|
+
"analyzer_available": self.analyzer_available,
|
|
238
|
+
"flucoma_available": self.flucoma_available,
|
|
239
|
+
"plugin_health": dict(self.plugin_health),
|
|
240
|
+
"research_providers": list(self.research_providers),
|
|
241
|
+
"freshness": self.freshness.to_dict(),
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
# ── ProjectState ─────────────────────────────────────────────────────
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
@dataclass
|
|
249
|
+
class ProjectState:
|
|
250
|
+
"""Top-level container — one canonical project snapshot."""
|
|
251
|
+
|
|
252
|
+
project_id: str = field(default_factory=lambda: str(uuid.uuid4())[:8])
|
|
253
|
+
revision: int = 0
|
|
254
|
+
session_graph: SessionGraph = field(default_factory=SessionGraph)
|
|
255
|
+
arrangement_graph: ArrangementGraph = field(default_factory=ArrangementGraph)
|
|
256
|
+
role_graph: RoleGraph = field(default_factory=RoleGraph)
|
|
257
|
+
automation_graph: AutomationGraph = field(default_factory=AutomationGraph)
|
|
258
|
+
capability_graph: CapabilityGraph = field(default_factory=CapabilityGraph)
|
|
259
|
+
active_issues: list[dict] = field(default_factory=list)
|
|
260
|
+
|
|
261
|
+
def is_stale(self) -> bool:
|
|
262
|
+
"""True if any subgraph is stale."""
|
|
263
|
+
return any([
|
|
264
|
+
self.session_graph.freshness.stale,
|
|
265
|
+
self.arrangement_graph.freshness.stale,
|
|
266
|
+
self.role_graph.freshness.stale,
|
|
267
|
+
self.automation_graph.freshness.stale,
|
|
268
|
+
self.capability_graph.freshness.stale,
|
|
269
|
+
])
|
|
270
|
+
|
|
271
|
+
def to_dict(self) -> dict:
|
|
272
|
+
return {
|
|
273
|
+
"project_id": self.project_id,
|
|
274
|
+
"revision": self.revision,
|
|
275
|
+
"session_graph": self.session_graph.to_dict(),
|
|
276
|
+
"arrangement_graph": self.arrangement_graph.to_dict(),
|
|
277
|
+
"role_graph": self.role_graph.to_dict(),
|
|
278
|
+
"automation_graph": self.automation_graph.to_dict(),
|
|
279
|
+
"capability_graph": self.capability_graph.to_dict(),
|
|
280
|
+
"active_issues": list(self.active_issues),
|
|
281
|
+
"is_stale": self.is_stale(),
|
|
282
|
+
}
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""Scoped refresh operations — update specific subgraphs without full rebuild.
|
|
2
|
+
|
|
3
|
+
Pure computation, zero I/O.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import copy
|
|
9
|
+
from typing import Optional
|
|
10
|
+
|
|
11
|
+
from .arrangement_graph import build_arrangement_graph
|
|
12
|
+
from .models import ProjectState
|
|
13
|
+
from .session_graph import build_session_graph
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def refresh_tracks(
|
|
17
|
+
state: ProjectState,
|
|
18
|
+
track_indices: list[int],
|
|
19
|
+
session_info: dict,
|
|
20
|
+
) -> ProjectState:
|
|
21
|
+
"""Refresh specific tracks in the session graph without full rebuild.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
state: current ProjectState (not mutated — a new copy is returned).
|
|
25
|
+
track_indices: which track indices to refresh.
|
|
26
|
+
session_info: fresh get_session_info output.
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
New ProjectState with updated session graph and bumped revision.
|
|
30
|
+
"""
|
|
31
|
+
new_state = copy.copy(state)
|
|
32
|
+
new_state.revision = state.revision + 1
|
|
33
|
+
|
|
34
|
+
# Rebuild session graph fully (it's cheap) then mark fresh
|
|
35
|
+
new_state.session_graph = build_session_graph(session_info)
|
|
36
|
+
new_state.session_graph.freshness.mark_fresh(new_state.revision)
|
|
37
|
+
|
|
38
|
+
# Mark role and automation graphs stale since tracks changed
|
|
39
|
+
new_state.role_graph = copy.copy(state.role_graph)
|
|
40
|
+
new_state.role_graph.freshness.mark_stale(
|
|
41
|
+
f"tracks refreshed: {track_indices}"
|
|
42
|
+
)
|
|
43
|
+
new_state.automation_graph = copy.copy(state.automation_graph)
|
|
44
|
+
new_state.automation_graph.freshness.mark_stale(
|
|
45
|
+
f"tracks refreshed: {track_indices}"
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
return new_state
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def refresh_arrangement(
|
|
52
|
+
state: ProjectState,
|
|
53
|
+
scenes: list[dict],
|
|
54
|
+
clip_matrix: list[list[dict]],
|
|
55
|
+
track_count: int,
|
|
56
|
+
) -> ProjectState:
|
|
57
|
+
"""Refresh the arrangement graph without full rebuild.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
state: current ProjectState (not mutated — a new copy is returned).
|
|
61
|
+
scenes: fresh scene list.
|
|
62
|
+
clip_matrix: fresh clip matrix.
|
|
63
|
+
track_count: total track count.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
New ProjectState with updated arrangement graph and bumped revision.
|
|
67
|
+
"""
|
|
68
|
+
new_state = copy.copy(state)
|
|
69
|
+
new_state.revision = state.revision + 1
|
|
70
|
+
|
|
71
|
+
new_state.arrangement_graph = build_arrangement_graph(
|
|
72
|
+
scenes, clip_matrix, track_count,
|
|
73
|
+
)
|
|
74
|
+
new_state.arrangement_graph.freshness.mark_fresh(new_state.revision)
|
|
75
|
+
|
|
76
|
+
# Mark role graph stale since arrangement changed
|
|
77
|
+
new_state.role_graph = copy.copy(state.role_graph)
|
|
78
|
+
new_state.role_graph.freshness.mark_stale("arrangement refreshed")
|
|
79
|
+
|
|
80
|
+
return new_state
|