livepilot 1.20.2 → 1.21.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,14 @@
1
+ device_slug: auto-filter
2
+ device_class_name: AutoFilter2
3
+ presets:
4
+ slow-sweep:
5
+ description: >
6
+ Slow LFO sweep for return chains. Synced rate (bar-long), moderate
7
+ resonance, low-pass type. Layers well with dub-cathedral reverb.
8
+ param_overrides:
9
+ Filter Type: 0 # LP
10
+ LFO Amount: 0.60
11
+ LFO Sync: 1
12
+ LFO Rate: 4 # bar-long sweep
13
+ Resonance: 0.25
14
+ suggested_pairings: ["Echo", "Reverb"]
@@ -0,0 +1,18 @@
1
+ device_slug: delay
2
+ device_class_name: Delay
3
+ presets:
4
+ ping-pong-dub:
5
+ description: >
6
+ Dotted-eighth ping-pong for dub-techno sends. Moderate feedback,
7
+ high wet, HP+LP filtered to stay out of the kick's sub and the
8
+ vocal's top end.
9
+ param_overrides:
10
+ Sync: 1
11
+ 16th Note: 6 # dotted 8th
12
+ Feedback: 0.45
13
+ Filter On: 1
14
+ HP Freq: 0.20 # ~100 Hz cut below
15
+ LP Freq: 0.65 # mid-top rolloff
16
+ Dry/Wet: 0.55
17
+ risk_notes: "Feedback above 0.6 self-oscillates; keep the return track's output watchable."
18
+ suggested_pairings: ["Auto Filter", "Saturator"]
@@ -0,0 +1,16 @@
1
+ device_slug: reverb
2
+ device_class_name: Reverb
3
+ presets:
4
+ dub-cathedral:
5
+ description: >
6
+ Basic Channel-adjacent huge-space reverb. Long decay, large room,
7
+ noticeable predelay, high diffusion. Pairs well with Echo + Auto
8
+ Filter on a return chain for dub-techno sends.
9
+ param_overrides:
10
+ Decay Time: 0.85
11
+ Room Size: 0.95
12
+ Dry/Wet: 0.40
13
+ Predelay: 0.45
14
+ Diffusion: 0.80
15
+ risk_notes: "Long decay masks transients; avoid routing percussion here."
16
+ suggested_pairings: ["Echo", "Auto Filter"]
@@ -0,0 +1,74 @@
1
+ """Preset loader for affordance-device YAML files.
2
+
3
+ Runtime resolution only — schema validation lives in ``_schema.py`` and
4
+ fires at test-time. The loader is tolerant of malformed files (returns
5
+ None rather than raising) so production code never crashes on a bad
6
+ preset; the schema validator catches those pre-ship.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ from pathlib import Path
12
+ from typing import Any, Optional
13
+
14
+ import yaml
15
+
16
+
17
+ _AFFORDANCES_ROOT = Path(__file__).parent / "devices"
18
+
19
+
20
+ def _load_device_yaml(device_slug: str) -> Optional[dict]:
21
+ """Load and parse a device's YAML file. Returns None on any error."""
22
+ path = _AFFORDANCES_ROOT / f"{device_slug}.yaml"
23
+ if not path.exists():
24
+ return None
25
+ try:
26
+ data = yaml.safe_load(path.read_text(encoding="utf-8"))
27
+ except yaml.YAMLError:
28
+ return None
29
+ return data if isinstance(data, dict) else None
30
+
31
+
32
+ def resolve_preset(device_slug: str, preset_name: str) -> Optional[dict[str, Any]]:
33
+ """Return the ``param_overrides`` dict for a named preset, or None.
34
+
35
+ Returns None on: missing device file, missing preset, unparseable YAML,
36
+ or a preset record without ``param_overrides``.
37
+ """
38
+ data = _load_device_yaml(device_slug)
39
+ if data is None:
40
+ return None
41
+ preset = data.get("presets", {}).get(preset_name)
42
+ if not isinstance(preset, dict):
43
+ return None
44
+ overrides = preset.get("param_overrides")
45
+ return dict(overrides) if isinstance(overrides, dict) else None
46
+
47
+
48
+ def get_preset_metadata(device_slug: str, preset_name: str) -> Optional[dict]:
49
+ """Return the full preset record (description + pairings + risk_notes
50
+ + param_overrides) or None."""
51
+ data = _load_device_yaml(device_slug)
52
+ if data is None:
53
+ return None
54
+ preset = data.get("presets", {}).get(preset_name)
55
+ return dict(preset) if isinstance(preset, dict) else None
56
+
57
+
58
+ def list_devices() -> list[str]:
59
+ """Return device slugs with available preset files, sorted."""
60
+ if not _AFFORDANCES_ROOT.exists():
61
+ return []
62
+ return sorted(p.stem for p in _AFFORDANCES_ROOT.glob("*.yaml"))
63
+
64
+
65
+ def list_presets(device_slug: str) -> list[str]:
66
+ """Return preset names for a given device slug, sorted. Empty list
67
+ on missing device or malformed YAML."""
68
+ data = _load_device_yaml(device_slug)
69
+ if data is None:
70
+ return []
71
+ presets = data.get("presets", {})
72
+ if not isinstance(presets, dict):
73
+ return []
74
+ return sorted(presets.keys())
@@ -630,6 +630,54 @@ def compare_experiments(
630
630
  }
631
631
 
632
632
 
633
+ # v1.21: helpers for commit_experiment's ledger-write block. Mirrors the
634
+ # v1.20 apply_semantic_move pattern (commit 0b3489b) — both writers feed
635
+ # the same SessionLedger, so anti-repetition filters downstream see a
636
+ # unified recency log regardless of which surface executed the move.
637
+
638
+ _TOOL_TO_FAMILY: dict[str, str] = {
639
+ # Minimal first-step-tool → family mapping. Used only when a branch
640
+ # lacks an explicit seed.family. Uncovered tools fall through to
641
+ # default "mix" (same safe default apply_semantic_move would use).
642
+ "set_track_volume": "mix",
643
+ "set_track_pan": "mix",
644
+ "set_track_send": "mix",
645
+ "set_device_parameter": "sound_design",
646
+ "batch_set_parameters": "sound_design",
647
+ "create_clip": "arrangement",
648
+ "add_notes": "arrangement",
649
+ "create_scene": "arrangement",
650
+ "set_scene_tempo": "arrangement",
651
+ "create_midi_track": "arrangement",
652
+ "find_and_load_device": "device_creation",
653
+ "generate_m4l_effect": "device_creation",
654
+ "apply_gesture_template": "transition",
655
+ "set_track_arm": "performance",
656
+ "load_sample_to_simpler": "sample",
657
+ }
658
+
659
+
660
+ def _infer_move_family(target) -> str:
661
+ """Determine move_class for a commit_experiment ledger entry.
662
+
663
+ Priority:
664
+ 1. ``target.seed.family`` — explicit seed classification.
665
+ 2. First compiled_plan step's tool via _TOOL_TO_FAMILY lookup.
666
+ 3. Default "mix" — safe fallback.
667
+ """
668
+ seed = getattr(target, "seed", None)
669
+ if seed is not None and getattr(seed, "family", None):
670
+ return seed.family
671
+
672
+ plan = getattr(target, "compiled_plan", None) or {}
673
+ steps = plan.get("steps", []) or []
674
+ if steps:
675
+ first_tool = steps[0].get("tool", "")
676
+ return _TOOL_TO_FAMILY.get(first_tool, "mix")
677
+
678
+ return "mix"
679
+
680
+
633
681
  @mcp.tool()
634
682
  async def commit_experiment(
635
683
  ctx: Context,
@@ -759,6 +807,65 @@ async def commit_experiment(
759
807
  ctx=ctx,
760
808
  )
761
809
 
810
+ # v1.21: write the committed experiment to the SessionLedger so
811
+ # get_last_move / anti-repetition can see it. Best-effort — a
812
+ # ledger write failure is logged but does not fail the commit.
813
+ ledger_entry_id: Optional[str] = None
814
+ if isinstance(commit_result, dict) and commit_result.get("committed") is True:
815
+ try:
816
+ # store_purpose: writer (v1.21 commit_experiment auto-ledger
817
+ # write; shape mirrors apply_semantic_move commit 0b3489b).
818
+ from ..runtime.action_ledger import SessionLedger
819
+ ledger = ctx.lifespan_context.setdefault(
820
+ "action_ledger", SessionLedger()
821
+ )
822
+ # Engine tag reflects branch SOURCE (not escalation success).
823
+ # A composer-sourced branch that fell back to scaffold is still
824
+ # a composer-engine commit; the escalation-success detail is
825
+ # captured in target.evaluation["composer_escalation"], and
826
+ # doubling up on the engine tag would be noise for the
827
+ # anti-repetition filters downstream.
828
+ engine_tag = (
829
+ "composer"
830
+ if (
831
+ target.seed is not None
832
+ and target.seed.source == "composer"
833
+ )
834
+ else "experiment"
835
+ )
836
+ move_class = _infer_move_family(target)
837
+ ledger_entry_id = ledger.start_move(
838
+ engine=engine_tag,
839
+ move_class=move_class,
840
+ intent=(
841
+ f"{experiment_id}/{branch_id}: "
842
+ f"{target.name or 'committed winner'}"
843
+ ),
844
+ undo_scope="micro",
845
+ )
846
+ # Actions from the POST-escalation plan (execution_log is the
847
+ # router's actual execution record — captures the swapped plan
848
+ # when composer escalation fired successfully).
849
+ for er in (commit_result.get("execution_log") or []):
850
+ if er.get("ok"):
851
+ ledger.append_action(
852
+ ledger_entry_id,
853
+ tool_name=er.get("tool", ""),
854
+ summary=er.get("tool", "") or "step",
855
+ )
856
+ steps_executed = int(commit_result.get("steps_executed", 0))
857
+ steps_failed = int(commit_result.get("steps_failed", 0))
858
+ total = steps_executed + steps_failed
859
+ ledger.finalize_move(
860
+ ledger_entry_id,
861
+ kept=(steps_failed == 0),
862
+ score=(float(steps_executed) / total) if total else 0.0,
863
+ memory_candidate=False,
864
+ )
865
+ except Exception as exc: # pragma: no cover — ledger is best-effort
866
+ logger.warning("commit_experiment ledger write failed: %s", exc)
867
+ ledger_entry_id = None
868
+
762
869
  # Surface escalation details on the commit response so the caller
763
870
  # sees whether a scaffold or resolved plan was applied.
764
871
  if escalation_info is not None and isinstance(commit_result, dict):
@@ -770,6 +877,12 @@ async def commit_experiment(
770
877
  "warnings": escalation_info.get("warnings", []),
771
878
  }
772
879
 
880
+ # Surface ledger_entry_id on the commit response so callers can
881
+ # correlate their MCP response with the ledger entry for post-hoc
882
+ # evaluation. Same pattern as apply_semantic_move.
883
+ if ledger_entry_id is not None and isinstance(commit_result, dict):
884
+ commit_result["ledger_entry_id"] = ledger_entry_id
885
+
773
886
  return commit_result
774
887
 
775
888
 
@@ -55,6 +55,10 @@ def record_anti_preference(
55
55
  @mcp.tool()
56
56
  def get_promotion_candidates(ctx: Context, limit: int = 10) -> dict:
57
57
  """Check the session ledger for entries eligible for memory promotion."""
58
+ # store_purpose: audit_readonly
59
+ # Reads the ledger to find entries already flagged as
60
+ # memory-promotion candidates — an audit/export surface, NOT an
61
+ # anti-repetition recency read.
58
62
  ledger = ctx.lifespan_context.get("action_ledger")
59
63
  if ledger is None:
60
64
  return {"candidates": [], "count": 0, "note": "no session ledger active"}
@@ -75,6 +79,12 @@ def get_promotion_candidates(ctx: Context, limit: int = 10) -> dict:
75
79
  # ── Session Memory ──────────────────────────────────────────────────
76
80
 
77
81
 
82
+ # store_purpose: mcp_tool_definition
83
+ # get_session_memory is the MCP tool that surfaces session-scoped
84
+ # ephemeral observations/decisions. It is NOT the action ledger and
85
+ # NOT the persistent technique library — use the right tool for
86
+ # recency (SessionLedger.get_recent_moves / get_action_ledger_summary)
87
+ # or for learned techniques (memory_list).
78
88
  @mcp.tool()
79
89
  def get_session_memory(
80
90
  ctx: Context, limit: int = 10, category: str = ""
@@ -215,6 +215,13 @@ def get_session_kernel(
215
215
  session_mem: list = []
216
216
  kernel_warnings: list[str] = []
217
217
 
218
+ # store_purpose: audit_readonly
219
+ # The world-model kernel builder surfaces ledger state (total moves,
220
+ # memory candidates, last_move, recent_moves) as diagnostic data for
221
+ # downstream consumers. Not an anti-repetition reader — it's a
222
+ # kernel-assembly surface; consumers that want recency should either
223
+ # call SessionLedger.get_recent_moves directly (annotated as
224
+ # anti_repetition) or use get_action_ledger_summary.
218
225
  try:
219
226
  from .action_ledger import SessionLedger
220
227
  ledger = ctx.lifespan_context.get("action_ledger")
@@ -44,7 +44,33 @@ _MOVE_TO_TEMPLATE: dict[str, str] = {
44
44
 
45
45
  def _compile_device_creation(move: SemanticMove, kernel: dict) -> CompiledPlan:
46
46
  """Map plan_template steps to CompiledStep, injecting Device Forge
47
- `gen_code` when the move is in _MOVE_TO_TEMPLATE."""
47
+ `gen_code` and threading `track_index` through `find_and_load_device`."""
48
+ # v1.21 parity-gate fix: thread track_index from seed_args into
49
+ # find_and_load_device steps. Pre-fix, plan_templates emitted the
50
+ # ergonomic key ``query`` with no track_index — broken at runtime
51
+ # since pre-v1.20 because the ``remote_command`` backend bypasses
52
+ # MCP normalization and Ableton's handler requires
53
+ # ``track_index`` + ``device_name``.
54
+ seed_args = kernel.get("seed_args") or {}
55
+ track_index = seed_args.get("track_index")
56
+ needs_load_step = any(
57
+ s.get("tool") == "find_and_load_device" for s in move.plan_template
58
+ )
59
+ if needs_load_step and track_index is None:
60
+ return CompiledPlan(
61
+ move_id=move.move_id,
62
+ intent=move.intent,
63
+ steps=[],
64
+ risk_level=move.risk_level,
65
+ summary=f"{move.move_id} requires seed_args.track_index",
66
+ warnings=[
67
+ f"{move.move_id} requires seed_args.track_index (int) to "
68
+ "load the generated device onto a track. Example: "
69
+ f"apply_semantic_move(\"{move.move_id}\", mode=\"explore\", "
70
+ "args={\"track_index\": 0})"
71
+ ],
72
+ )
73
+
48
74
  # Resolve the GenExpr template once per compile (idempotent).
49
75
  template_code: str | None = None
50
76
  template_id = _MOVE_TO_TEMPLATE.get(move.move_id)
@@ -60,14 +86,22 @@ def _compile_device_creation(move: SemanticMove, kernel: dict) -> CompiledPlan:
60
86
  steps: list[CompiledStep] = []
61
87
  for step in move.plan_template:
62
88
  params = dict(step.get("params") or {})
89
+ tool = step.get("tool", "")
63
90
 
64
91
  # Inject gen_code for Device Forge moves. Done BEFORE CompiledStep
65
92
  # construction so the step snapshot is correct, not mutated later.
66
- if template_code is not None and step.get("tool") == "generate_m4l_effect":
93
+ if template_code is not None and tool == "generate_m4l_effect":
67
94
  params["gen_code"] = template_code
68
95
 
96
+ # v1.21: inject track_index into find_and_load_device. plan_templates
97
+ # now emit {"device_name": ...} (wire-format key); compiler adds
98
+ # {"track_index": ...} from seed_args so the remote_command backend
99
+ # sends a handler-compatible payload.
100
+ if tool == "find_and_load_device":
101
+ params["track_index"] = track_index
102
+
69
103
  steps.append(CompiledStep(
70
- tool=step.get("tool", ""),
104
+ tool=tool,
71
105
  params=params,
72
106
  description=step.get("description", ""),
73
107
  verify_after=bool(step.get("verify_after", True)),
@@ -28,7 +28,7 @@ CREATE_CHAOS_MODULATOR = SemanticMove(
28
28
  },
29
29
  {
30
30
  "tool": "find_and_load_device",
31
- "params": {"query": "Wonder Chaos Mod"},
31
+ "params": {"device_name": "Wonder Chaos Mod"},
32
32
  "description": "Load generated device onto target track",
33
33
  "backend": "remote_command",
34
34
  },
@@ -59,7 +59,7 @@ CREATE_FEEDBACK_RESONATOR = SemanticMove(
59
59
  },
60
60
  {
61
61
  "tool": "find_and_load_device",
62
- "params": {"query": "Wonder Resonator"},
62
+ "params": {"device_name": "Wonder Resonator"},
63
63
  "description": "Load resonator onto target track",
64
64
  "backend": "remote_command",
65
65
  },
@@ -90,7 +90,7 @@ CREATE_WAVEFOLDER_EFFECT = SemanticMove(
90
90
  },
91
91
  {
92
92
  "tool": "find_and_load_device",
93
- "params": {"query": "Wonder Wavefolder"},
93
+ "params": {"device_name": "Wonder Wavefolder"},
94
94
  "description": "Load wavefolder onto target track",
95
95
  "backend": "remote_command",
96
96
  },
@@ -121,7 +121,7 @@ CREATE_BITCRUSHER_EFFECT = SemanticMove(
121
121
  },
122
122
  {
123
123
  "tool": "find_and_load_device",
124
- "params": {"query": "Wonder Bitcrusher"},
124
+ "params": {"device_name": "Wonder Bitcrusher"},
125
125
  "description": "Load bitcrusher onto target track",
126
126
  "backend": "remote_command",
127
127
  },
@@ -152,7 +152,7 @@ CREATE_KARPLUS_STRING = SemanticMove(
152
152
  },
153
153
  {
154
154
  "tool": "find_and_load_device",
155
- "params": {"query": "Wonder String"},
155
+ "params": {"device_name": "Wonder String"},
156
156
  "description": "Load string synth onto target track",
157
157
  "backend": "remote_command",
158
158
  },
@@ -183,7 +183,7 @@ CREATE_STOCHASTIC_TEXTURE = SemanticMove(
183
183
  },
184
184
  {
185
185
  "tool": "find_and_load_device",
186
- "params": {"query": "Wonder Stochastic"},
186
+ "params": {"device_name": "Wonder Stochastic"},
187
187
  "description": "Load stochastic texture device onto target track",
188
188
  "backend": "remote_command",
189
189
  },
@@ -214,7 +214,7 @@ CREATE_FDN_REVERB = SemanticMove(
214
214
  },
215
215
  {
216
216
  "tool": "find_and_load_device",
217
- "params": {"query": "Wonder FDN Verb"},
217
+ "params": {"device_name": "Wonder FDN Verb"},
218
218
  "description": "Load FDN reverb onto target track",
219
219
  "backend": "remote_command",
220
220
  },
@@ -27,14 +27,30 @@ def _empty_plan(move: SemanticMove, warnings: list[str]) -> CompiledPlan:
27
27
 
28
28
 
29
29
  def _compile_configure_device(move: SemanticMove, kernel: dict) -> CompiledPlan:
30
+ """Compile configure_device.
31
+
32
+ v1.20 contract: seed_args.param_overrides (explicit dict of
33
+ {param_name: value}).
34
+
35
+ v1.21 additions (additive — v1.20 callers unaffected):
36
+ - seed_args.preset: str — named preset in the affordance library
37
+ - seed_args.device_slug: str — required when preset is used; v1.21
38
+ does not auto-infer from class_name
39
+
40
+ Merge contract: preset resolves first, then explicit param_overrides
41
+ entries merge on top (last-write-wins at dict-key granularity).
42
+ """
30
43
  args = kernel.get("seed_args") or {}
31
44
  track_index = args.get("track_index")
32
45
  device_index = args.get("device_index")
33
- overrides = args.get("param_overrides")
46
+ explicit_overrides = args.get("param_overrides")
47
+ preset_name = args.get("preset")
48
+ device_slug = args.get("device_slug")
34
49
 
35
- if track_index is None or device_index is None or overrides is None:
50
+ if track_index is None or device_index is None:
36
51
  return _empty_plan(move, [
37
- "configure_device requires seed_args.track_index + device_index + param_overrides"
52
+ "configure_device requires seed_args.track_index + device_index "
53
+ "(plus either param_overrides or a preset+device_slug pair)"
38
54
  ])
39
55
  if not isinstance(track_index, int) or not isinstance(device_index, int):
40
56
  return _empty_plan(move, [
@@ -43,14 +59,49 @@ def _compile_configure_device(move: SemanticMove, kernel: dict) -> CompiledPlan:
43
59
  ])
44
60
  if device_index < 0:
45
61
  return _empty_plan(move, [f"device_index must be non-negative, got {device_index}"])
46
- if not isinstance(overrides, dict):
62
+
63
+ # explicit param_overrides is now optional (may be None if preset provides
64
+ # everything), but when present must be a dict.
65
+ if explicit_overrides is not None and not isinstance(explicit_overrides, dict):
47
66
  return _empty_plan(move, [
48
- f"param_overrides must be a dict[str, Any], got {type(overrides).__name__}"
67
+ f"param_overrides must be a dict[str, Any], got "
68
+ f"{type(explicit_overrides).__name__}"
49
69
  ])
50
- if not overrides:
70
+
71
+ # v1.21: resolve preset if requested
72
+ preset_overrides: dict = {}
73
+ if preset_name is not None:
74
+ if not device_slug:
75
+ return _empty_plan(move, [
76
+ "preset seed_arg requires device_slug (v1.21 contract — "
77
+ "auto-inference from class_name is v1.22 scope). Example: "
78
+ "args={\"track_index\": -1, \"device_index\": 0, "
79
+ "\"device_slug\": \"reverb\", \"preset\": \"dub-cathedral\"}"
80
+ ])
81
+ # Late import so mcp_server.semantic_moves doesn't hard-depend on
82
+ # mcp_server.affordances at import time — branch is only taken
83
+ # when a caller explicitly asks for a preset.
84
+ from ..affordances import resolve_preset
85
+ resolved = resolve_preset(device_slug, preset_name)
86
+ if resolved is None:
87
+ return _empty_plan(move, [
88
+ f"No preset {preset_name!r} for device slug {device_slug!r}. "
89
+ f"Check mcp_server/affordances/devices/{device_slug}.yaml "
90
+ f"exists and contains the named preset."
91
+ ])
92
+ preset_overrides = resolved
93
+
94
+ # Merge: preset resolves first, explicit param_overrides merge on top
95
+ # (last-write-wins at dict-key granularity).
96
+ merged: dict = dict(preset_overrides)
97
+ if explicit_overrides:
98
+ merged.update(explicit_overrides)
99
+
100
+ if not merged:
51
101
  return _empty_plan(move, [
52
- "param_overrides is empty nothing to configure (delete_device "
53
- "is a different move)"
102
+ "configure_device requires either a non-empty param_overrides "
103
+ "dict OR a preset+device_slug combination that resolves to "
104
+ "params. Neither was provided."
54
105
  ])
55
106
 
56
107
  # WIRE-FORMAT NOTE: compiled steps use the remote_command backend,
@@ -61,9 +112,12 @@ def _compile_configure_device(move: SemanticMove, kernel: dict) -> CompiledPlan:
61
112
  # exclusively. Emit that key directly.
62
113
  parameters = [
63
114
  {"name_or_index": str(name), "value": value}
64
- for name, value in overrides.items()
115
+ for name, value in merged.items()
65
116
  ]
66
117
 
118
+ preset_suffix = (
119
+ f" from preset {device_slug}/{preset_name}" if preset_name else ""
120
+ )
67
121
  step = CompiledStep(
68
122
  tool="batch_set_parameters",
69
123
  params={
@@ -72,9 +126,9 @@ def _compile_configure_device(move: SemanticMove, kernel: dict) -> CompiledPlan:
72
126
  "parameters": parameters,
73
127
  },
74
128
  description=(
75
- f"Configure device at track {track_index}, device_index {device_index} — "
76
- f"set {len(parameters)} parameter(s): "
77
- f"{', '.join(p['name_or_index'] for p in parameters)}"
129
+ f"Configure device at track {track_index}, device_index "
130
+ f"{device_index}{preset_suffix} — set {len(parameters)} "
131
+ f"parameter(s): {', '.join(p['name_or_index'] for p in parameters)}"
78
132
  ),
79
133
  verify_after=True,
80
134
  backend="remote_command",