livepilot 1.9.23 → 1.9.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/.claude-plugin/marketplace.json +1 -1
  2. package/AGENTS.md +2 -2
  3. package/CHANGELOG.md +46 -0
  4. package/README.md +94 -0
  5. package/livepilot/.Codex-plugin/plugin.json +1 -1
  6. package/livepilot/.claude-plugin/plugin.json +1 -1
  7. package/livepilot/skills/livepilot-core/references/overview.md +1 -1
  8. package/livepilot/skills/livepilot-release/SKILL.md +14 -0
  9. package/livepilot.mcpb +0 -0
  10. package/manifest.json +1 -1
  11. package/mcp_server/__init__.py +1 -1
  12. package/mcp_server/hook_hunter/analyzer.py +23 -0
  13. package/mcp_server/hook_hunter/models.py +1 -0
  14. package/mcp_server/hook_hunter/tools.py +4 -2
  15. package/mcp_server/memory/taste_graph.py +68 -1
  16. package/mcp_server/memory/tools.py +15 -4
  17. package/mcp_server/musical_intelligence/detectors.py +14 -1
  18. package/mcp_server/musical_intelligence/tools.py +11 -8
  19. package/mcp_server/persistence/__init__.py +1 -0
  20. package/mcp_server/persistence/base_store.py +82 -0
  21. package/mcp_server/persistence/project_store.py +106 -0
  22. package/mcp_server/persistence/taste_store.py +122 -0
  23. package/mcp_server/preview_studio/models.py +1 -0
  24. package/mcp_server/preview_studio/tools.py +56 -13
  25. package/mcp_server/runtime/capability.py +66 -0
  26. package/mcp_server/runtime/capability_probe.py +118 -0
  27. package/mcp_server/runtime/execution_router.py +139 -0
  28. package/mcp_server/runtime/remote_commands.py +82 -0
  29. package/mcp_server/semantic_moves/mix_moves.py +41 -41
  30. package/mcp_server/semantic_moves/performance_moves.py +13 -13
  31. package/mcp_server/semantic_moves/sound_design_moves.py +15 -15
  32. package/mcp_server/semantic_moves/tools.py +18 -17
  33. package/mcp_server/semantic_moves/transition_moves.py +16 -16
  34. package/mcp_server/services/__init__.py +1 -0
  35. package/mcp_server/services/motif_service.py +67 -0
  36. package/mcp_server/session_continuity/tracker.py +29 -1
  37. package/mcp_server/song_brain/builder.py +28 -1
  38. package/mcp_server/song_brain/models.py +4 -0
  39. package/mcp_server/song_brain/tools.py +20 -2
  40. package/mcp_server/wonder_mode/tools.py +6 -1
  41. package/package.json +1 -1
  42. package/remote_script/LivePilot/__init__.py +1 -1
  43. package/scripts/sync_metadata.py +132 -0
@@ -11,12 +11,12 @@ INCREASE_FORWARD_MOTION = SemanticMove(
11
11
  protect={"clarity": 0.6},
12
12
  risk_level="low",
13
13
  compile_plan=[
14
- {"tool": "apply_automation_shape", "params": {"curve_type": "exponential", "description": "Rising filter cutoff over 4 bars"}, "description": "Rising filter sweep"},
15
- {"tool": "set_track_volume", "params": {"description": "Push rhythm elements +5-8%"}, "description": "Push rhythm forward"},
16
- {"tool": "apply_automation_shape", "params": {"curve_type": "linear", "description": "Rising reverb send for anticipation"}, "description": "Build reverb wash"},
14
+ {"tool": "apply_automation_shape", "params": {"curve_type": "exponential", "description": "Rising filter cutoff over 4 bars"}, "description": "Rising filter sweep", "backend": "mcp_tool"},
15
+ {"tool": "set_track_volume", "params": {"description": "Push rhythm elements +5-8%"}, "description": "Push rhythm forward", "backend": "remote_command"},
16
+ {"tool": "apply_automation_shape", "params": {"curve_type": "linear", "description": "Rising reverb send for anticipation"}, "description": "Build reverb wash", "backend": "mcp_tool"},
17
17
  ],
18
18
  verification_plan=[
19
- {"tool": "get_track_meters", "check": "energy increasing, all tracks alive"},
19
+ {"tool": "get_track_meters", "check": "energy increasing, all tracks alive", "backend": "remote_command"},
20
20
  ],
21
21
  )
22
22
 
@@ -28,13 +28,13 @@ OPEN_CHORUS = SemanticMove(
28
28
  protect={"clarity": 0.6, "cohesion": 0.5},
29
29
  risk_level="medium",
30
30
  compile_plan=[
31
- {"tool": "set_track_volume", "params": {"description": "Push all melodic tracks +10-15%"}, "description": "Push chorus energy"},
32
- {"tool": "set_track_pan", "params": {"description": "Widen stereo field on chords/pads"}, "description": "Widen stereo"},
33
- {"tool": "set_track_send", "params": {"description": "Increase reverb/delay sends for spaciousness"}, "description": "Add space"},
31
+ {"tool": "set_track_volume", "params": {"description": "Push all melodic tracks +10-15%"}, "description": "Push chorus energy", "backend": "remote_command"},
32
+ {"tool": "set_track_pan", "params": {"description": "Widen stereo field on chords/pads"}, "description": "Widen stereo", "backend": "remote_command"},
33
+ {"tool": "set_track_send", "params": {"description": "Increase reverb/delay sends for spaciousness"}, "description": "Add space", "backend": "remote_command"},
34
34
  ],
35
35
  verification_plan=[
36
- {"tool": "get_track_meters", "check": "overall energy increased, stereo field wider"},
37
- {"tool": "analyze_mix", "check": "no clipping, stereo.mono_risk is false"},
36
+ {"tool": "get_track_meters", "check": "overall energy increased, stereo field wider", "backend": "remote_command"},
37
+ {"tool": "analyze_mix", "check": "no clipping, stereo.mono_risk is false", "backend": "mcp_tool"},
38
38
  ],
39
39
  )
40
40
 
@@ -46,12 +46,12 @@ CREATE_BREAKDOWN = SemanticMove(
46
46
  protect={"cohesion": 0.5},
47
47
  risk_level="medium",
48
48
  compile_plan=[
49
- {"tool": "set_track_volume", "params": {"description": "Pull drums to 20-30%"}, "description": "Strip drums"},
50
- {"tool": "set_track_volume", "params": {"description": "Pull bass to 30-40%"}, "description": "Reduce bass"},
51
- {"tool": "set_track_send", "params": {"description": "Increase reverb send on remaining elements"}, "description": "Add reverb depth"},
49
+ {"tool": "set_track_volume", "params": {"description": "Pull drums to 20-30%"}, "description": "Strip drums", "backend": "remote_command"},
50
+ {"tool": "set_track_volume", "params": {"description": "Pull bass to 30-40%"}, "description": "Reduce bass", "backend": "remote_command"},
51
+ {"tool": "set_track_send", "params": {"description": "Increase reverb send on remaining elements"}, "description": "Add reverb depth", "backend": "remote_command"},
52
52
  ],
53
53
  verification_plan=[
54
- {"tool": "get_track_meters", "check": "energy significantly reduced, at least one element still prominent"},
54
+ {"tool": "get_track_meters", "check": "energy significantly reduced, at least one element still prominent", "backend": "remote_command"},
55
55
  ],
56
56
  )
57
57
 
@@ -63,11 +63,11 @@ BRIDGE_SECTIONS = SemanticMove(
63
63
  protect={"clarity": 0.6},
64
64
  risk_level="low",
65
65
  compile_plan=[
66
- {"tool": "apply_automation_shape", "params": {"curve_type": "cosine", "description": "Gentle filter sweep across bridge"}, "description": "Bridge filter motion"},
67
- {"tool": "set_track_volume", "params": {"description": "Gentle volume crossfade between section elements"}, "description": "Crossfade elements"},
66
+ {"tool": "apply_automation_shape", "params": {"curve_type": "cosine", "description": "Gentle filter sweep across bridge"}, "description": "Bridge filter motion", "backend": "mcp_tool"},
67
+ {"tool": "set_track_volume", "params": {"description": "Gentle volume crossfade between section elements"}, "description": "Crossfade elements", "backend": "remote_command"},
68
68
  ],
69
69
  verification_plan=[
70
- {"tool": "get_track_meters", "check": "smooth level transition, no dropouts"},
70
+ {"tool": "get_track_meters", "check": "smooth level transition, no dropouts", "backend": "remote_command"},
71
71
  ],
72
72
  )
73
73
 
@@ -0,0 +1 @@
1
+ """Shared services consumed by multiple domains."""
@@ -0,0 +1,67 @@
1
+ """Shared motif service — one entry point for all motif consumers.
2
+
3
+ SongBrain, HookHunter, and musical_intelligence all import from here
4
+ instead of making ad-hoc calls to the motif engine or TCP.
5
+
6
+ Pure computation — no I/O. Callers provide pre-fetched data.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ from typing import Optional
12
+
13
+
14
+ def get_motif_data(
15
+ notes_by_track: dict[int, list[dict]],
16
+ ) -> dict:
17
+ """Extract motif data from pre-fetched notes.
18
+
19
+ Args:
20
+ notes_by_track: {track_index: [note_dicts]} from get_notes calls
21
+
22
+ Returns:
23
+ Motif analysis dict with motifs, motif_count, tracks_analyzed.
24
+ Returns empty result if no notes or engine unavailable.
25
+ """
26
+ if not notes_by_track:
27
+ return {"motifs": [], "motif_count": 0, "tracks_analyzed": 0}
28
+
29
+ try:
30
+ from ..tools import _motif_engine as motif_engine
31
+ motifs = motif_engine.detect_motifs(notes_by_track)
32
+ return {
33
+ "motifs": [m.to_dict() for m in motifs],
34
+ "motif_count": len(motifs),
35
+ "tracks_analyzed": len(notes_by_track),
36
+ }
37
+ except Exception:
38
+ return {"motifs": [], "motif_count": 0, "tracks_analyzed": 0}
39
+
40
+
41
+ def fetch_notes_from_ableton(ableton, tracks: list[dict], max_clips: int = 8) -> dict[int, list[dict]]:
42
+ """Fetch notes from Ableton for motif analysis.
43
+
44
+ This is the I/O helper — calls get_notes through valid TCP commands.
45
+ Callers pass the ableton connection; this function does the fetching.
46
+ """
47
+ notes_by_track: dict[int, list[dict]] = {}
48
+ for track in tracks:
49
+ t_idx = track.get("index", 0)
50
+ if not track.get("has_midi_input", False) and not any(
51
+ kw in track.get("name", "").lower()
52
+ for kw in ("midi", "synth", "bass", "lead", "pad", "keys", "piano", "chord")
53
+ ):
54
+ continue
55
+ track_notes = []
56
+ for clip_idx in range(max_clips):
57
+ try:
58
+ result = ableton.send_command("get_notes", {
59
+ "track_index": t_idx,
60
+ "clip_index": clip_idx,
61
+ })
62
+ track_notes.extend(result.get("notes", []))
63
+ except Exception:
64
+ pass
65
+ if track_notes:
66
+ notes_by_track[t_idx] = track_notes
67
+ return notes_by_track
@@ -23,14 +23,22 @@ from .models import (
23
23
  _story = SessionStory()
24
24
  _threads: dict[str, CreativeThread] = {}
25
25
  _turns: list[TurnResolution] = []
26
+ _project_store = None # Optional PersistentProjectStore
27
+
28
+
29
+ def set_project_store(store) -> None:
30
+ """Attach a persistent project store for flush-on-write."""
31
+ global _project_store
32
+ _project_store = store
26
33
 
27
34
 
28
35
  def reset_story() -> None:
29
36
  """Reset session story (for testing)."""
30
- global _story, _threads, _turns
37
+ global _story, _threads, _turns, _project_store
31
38
  _story = SessionStory()
32
39
  _threads = {}
33
40
  _turns = []
41
+ _project_store = None
34
42
 
35
43
 
36
44
  # ── Session story ─────────────────────────────────────────────────
@@ -117,6 +125,13 @@ def record_turn_resolution(
117
125
  else:
118
126
  _story.mood_arc.append("neutral")
119
127
 
128
+ # Flush to persistent store
129
+ if _project_store is not None:
130
+ try:
131
+ _project_store.save_turn(turn.to_dict())
132
+ except Exception:
133
+ pass
134
+
120
135
  return turn
121
136
 
122
137
 
@@ -138,6 +153,14 @@ def open_thread(description: str, domain: str = "", priority: float = 0.5) -> Cr
138
153
  last_touched_ms=now,
139
154
  )
140
155
  _threads[thread_id] = thread
156
+
157
+ # Flush to persistent store
158
+ if _project_store is not None:
159
+ try:
160
+ _project_store.save_thread(thread.to_dict())
161
+ except Exception:
162
+ pass
163
+
141
164
  return thread
142
165
 
143
166
 
@@ -147,6 +170,11 @@ def resolve_thread(thread_id: str) -> Optional[CreativeThread]:
147
170
  if thread:
148
171
  thread.status = "resolved"
149
172
  thread.last_touched_ms = int(time.time() * 1000)
173
+ if _project_store is not None:
174
+ try:
175
+ _project_store.save_thread(thread.to_dict())
176
+ except Exception:
177
+ pass
150
178
  return thread
151
179
 
152
180
 
@@ -74,16 +74,43 @@ def build_song_brain(
74
74
 
75
75
  drift_risk = _estimate_drift_risk(recent_moves, sacred)
76
76
 
77
+ # Evidence-weighted confidence adjustment
78
+ # Weights: motif=0.4, composition=0.2, role_graph=0.15, scenes=0.15, recent_moves=0.1
79
+ evidence_weights = {
80
+ "motif_data": 0.4,
81
+ "composition_analysis": 0.2,
82
+ "role_graph": 0.15,
83
+ "scenes": 0.15,
84
+ "recent_moves": 0.1,
85
+ }
86
+ evidence_score = sum(
87
+ weight for source, weight in evidence_weights.items()
88
+ if built_from.get(source, False)
89
+ )
90
+ # Adjust identity confidence by evidence availability
91
+ adjusted_confidence = round(identity_confidence * (0.4 + 0.6 * evidence_score), 3)
92
+
93
+ evidence_breakdown = {
94
+ "raw_confidence": identity_confidence,
95
+ "evidence_score": round(evidence_score, 3),
96
+ "adjusted_confidence": adjusted_confidence,
97
+ "sources": {
98
+ source: {"available": built_from.get(source, False), "weight": weight}
99
+ for source, weight in evidence_weights.items()
100
+ },
101
+ }
102
+
77
103
  return SongBrain(
78
104
  brain_id=brain_id,
79
105
  identity_core=identity_core,
80
- identity_confidence=identity_confidence,
106
+ identity_confidence=adjusted_confidence,
81
107
  sacred_elements=sacred,
82
108
  section_purposes=sections,
83
109
  energy_arc=energy_arc,
84
110
  identity_drift_risk=drift_risk,
85
111
  payoff_targets=payoff_targets,
86
112
  open_questions=open_questions,
113
+ evidence_breakdown=evidence_breakdown,
87
114
  built_from=built_from,
88
115
  )
89
116
 
@@ -84,6 +84,9 @@ class SongBrain:
84
84
  # Open questions the song has not resolved
85
85
  open_questions: list[OpenQuestion] = field(default_factory=list)
86
86
 
87
+ # Evidence breakdown — what data informed each inference
88
+ evidence_breakdown: dict = field(default_factory=dict)
89
+
87
90
  # Metadata
88
91
  built_from: dict = field(default_factory=dict) # what data sources contributed
89
92
 
@@ -98,6 +101,7 @@ class SongBrain:
98
101
  "identity_drift_risk": self.identity_drift_risk,
99
102
  "payoff_targets": self.payoff_targets,
100
103
  "open_questions": [q.to_dict() for q in self.open_questions],
104
+ "evidence_breakdown": self.evidence_breakdown,
101
105
  "built_from": self.built_from,
102
106
  }
103
107
 
@@ -88,9 +88,11 @@ def _fetch_session_data(ctx: Context) -> dict:
88
88
  except Exception:
89
89
  pass
90
90
 
91
- # Motif data — from the motif engine if notes exist
91
+ # Motif data — via shared motif service (pure-Python, not TCP)
92
92
  try:
93
- data["motif_data"] = ableton.send_command("get_motif_graph")
93
+ from ..services.motif_service import get_motif_data, fetch_notes_from_ableton
94
+ notes_by_track = fetch_notes_from_ableton(ableton, data.get("tracks", []))
95
+ data["motif_data"] = get_motif_data(notes_by_track)
94
96
  except Exception:
95
97
  pass # Motif graph requires notes in clips; empty is valid
96
98
 
@@ -147,6 +149,21 @@ def build_song_brain(ctx: Context) -> dict:
147
149
  Returns the full SongBrain as a dict.
148
150
  """
149
151
  data = _fetch_session_data(ctx)
152
+
153
+ # Capability reporting — what data was actually available
154
+ from ..runtime.capability import build_capability
155
+ cap = build_capability(
156
+ required=["session_info", "scenes", "tracks", "motif_data", "composition_analysis", "role_graph"],
157
+ available={
158
+ "session_info": bool(data.get("session_info", {}).get("tempo")),
159
+ "scenes": bool(data.get("scenes")),
160
+ "tracks": bool(data.get("tracks")),
161
+ "motif_data": bool(data.get("motif_data")),
162
+ "composition_analysis": bool(data.get("composition_analysis")),
163
+ "role_graph": bool(data.get("role_graph")),
164
+ },
165
+ )
166
+
150
167
  brain = builder.build_song_brain(
151
168
  session_info=data["session_info"],
152
169
  scenes=data["scenes"],
@@ -161,6 +178,7 @@ def build_song_brain(ctx: Context) -> dict:
161
178
  return {
162
179
  **brain.to_dict(),
163
180
  "summary": brain.summary,
181
+ "capability": cap.to_dict(),
164
182
  }
165
183
 
166
184
 
@@ -29,9 +29,14 @@ def _get_taste_graph(ctx: Context):
29
29
  from ..memory.taste_graph import build_taste_graph
30
30
  from ..memory.taste_memory import TasteMemoryStore
31
31
  from ..memory.anti_memory import AntiMemoryStore
32
+ from ..persistence.taste_store import PersistentTasteStore
32
33
  taste_store = ctx.lifespan_context.setdefault("taste_memory", TasteMemoryStore())
33
34
  anti_store = ctx.lifespan_context.setdefault("anti_memory", AntiMemoryStore())
34
- return build_taste_graph(taste_store=taste_store, anti_store=anti_store)
35
+ persistent = ctx.lifespan_context.setdefault("persistent_taste", PersistentTasteStore())
36
+ return build_taste_graph(
37
+ taste_store=taste_store, anti_store=anti_store,
38
+ persistent_store=persistent,
39
+ )
35
40
  except Exception:
36
41
  pass
37
42
  return None
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "livepilot",
3
- "version": "1.9.23",
3
+ "version": "1.9.24",
4
4
  "mcpName": "io.github.dreamrec/livepilot",
5
5
  "description": "Agentic production system for Ableton Live 12 — 293 tools, 39 domains, device atlas, spectral perception, technique memory, neo-Riemannian harmony, Euclidean rhythm, species counterpoint, MIDI I/O",
6
6
  "author": "Pilot Studio",
@@ -5,7 +5,7 @@ Entry point for the ControlSurface. Ableton calls create_instance(c_instance)
5
5
  when this script is selected in Preferences > Link, Tempo & MIDI.
6
6
  """
7
7
 
8
- __version__ = "1.9.23"
8
+ __version__ = "1.9.24"
9
9
 
10
10
  from _Framework.ControlSurface import ControlSurface
11
11
  from .server import LivePilotServer
@@ -0,0 +1,132 @@
1
+ #!/usr/bin/env python3
2
+ """Metadata sync — single source of truth for version and tool count.
3
+
4
+ Reads version from package.json, tool count from test_tools_contract.py,
5
+ and verifies all known locations are in sync.
6
+
7
+ Usage:
8
+ python scripts/sync_metadata.py --check # verify, exit 1 if stale
9
+ python scripts/sync_metadata.py --fix # auto-fix stale references
10
+ """
11
+
12
+ import json
13
+ import re
14
+ import sys
15
+ from pathlib import Path
16
+
17
+ ROOT = Path(__file__).resolve().parents[1]
18
+
19
+
20
+ def get_version() -> str:
21
+ """Read version from package.json (source of truth)."""
22
+ pkg = json.loads((ROOT / "package.json").read_text())
23
+ return pkg["version"]
24
+
25
+
26
+ def get_tool_count() -> int:
27
+ """Read tool count from test_tools_contract.py assertion."""
28
+ src = (ROOT / "tests" / "test_tools_contract.py").read_text()
29
+ match = re.search(r"assert len\(tools\) == (\d+)", src)
30
+ if match:
31
+ return int(match.group(1))
32
+ raise ValueError("Could not find tool count assertion in test_tools_contract.py")
33
+
34
+
35
+ # Files that must contain the version string
36
+ VERSION_FILES = [
37
+ "package.json",
38
+ "server.json",
39
+ "manifest.json",
40
+ "livepilot/.claude-plugin/plugin.json",
41
+ "livepilot/.Codex-plugin/plugin.json",
42
+ ".claude-plugin/marketplace.json",
43
+ "mcp_server/__init__.py",
44
+ "remote_script/LivePilot/__init__.py",
45
+ "CLAUDE.md",
46
+ "AGENTS.md",
47
+ "livepilot/skills/livepilot-core/references/overview.md",
48
+ "docs/M4L_BRIDGE.md",
49
+ ]
50
+
51
+ # Files that must contain the tool count
52
+ TOOL_COUNT_FILES = [
53
+ "README.md",
54
+ "package.json",
55
+ "server.json",
56
+ "CLAUDE.md",
57
+ "AGENTS.md",
58
+ "CONTRIBUTING.md",
59
+ "livepilot/.claude-plugin/plugin.json",
60
+ "livepilot/.Codex-plugin/plugin.json",
61
+ "livepilot/skills/livepilot-core/SKILL.md",
62
+ "livepilot/skills/livepilot-core/references/overview.md",
63
+ "docs/manual/index.md",
64
+ "docs/manual/tool-reference.md",
65
+ "docs/manual/tool-catalog.md",
66
+ ]
67
+
68
+
69
+ def check_version(version: str) -> list[str]:
70
+ """Check all version files for staleness."""
71
+ issues = []
72
+ for rel_path in VERSION_FILES:
73
+ path = ROOT / rel_path
74
+ if not path.exists():
75
+ continue
76
+ content = path.read_text()
77
+ if version not in content:
78
+ # Find what version IS there
79
+ old = re.search(r"1\.\d+\.\d+", content)
80
+ old_ver = old.group(0) if old else "???"
81
+ if old_ver != version:
82
+ issues.append(f" {rel_path}: has {old_ver}, expected {version}")
83
+ return issues
84
+
85
+
86
+ def check_tool_count(count: int) -> list[str]:
87
+ """Check all tool count files for staleness."""
88
+ issues = []
89
+ count_str = str(count)
90
+ for rel_path in TOOL_COUNT_FILES:
91
+ path = ROOT / rel_path
92
+ if not path.exists():
93
+ continue
94
+ content = path.read_text()
95
+ # Look for "N tools" pattern
96
+ matches = re.findall(r"(\d+)\s+tools", content)
97
+ for m in matches:
98
+ if m != count_str and int(m) > 250: # ignore subset counts like "210 tools"
99
+ issues.append(f" {rel_path}: has '{m} tools', expected '{count_str} tools'")
100
+ break
101
+ return issues
102
+
103
+
104
+ def main():
105
+ mode = sys.argv[1] if len(sys.argv) > 1 else "--check"
106
+
107
+ version = get_version()
108
+ tool_count = get_tool_count()
109
+
110
+ print(f"Source of truth: version={version}, tools={tool_count}")
111
+
112
+ version_issues = check_version(version)
113
+ count_issues = check_tool_count(tool_count)
114
+
115
+ all_issues = version_issues + count_issues
116
+
117
+ if all_issues:
118
+ print(f"\nFound {len(all_issues)} stale reference(s):")
119
+ for issue in all_issues:
120
+ print(issue)
121
+ if mode == "--check":
122
+ sys.exit(1)
123
+ elif mode == "--fix":
124
+ print("\n--fix mode not yet implemented. Fix manually.")
125
+ sys.exit(1)
126
+ else:
127
+ print("All metadata in sync.")
128
+ sys.exit(0)
129
+
130
+
131
+ if __name__ == "__main__":
132
+ main()