orbit-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orbit/__init__.py +1 -0
- orbit/agent/__init__.py +0 -0
- orbit/agent/ask.py +35 -0
- orbit/agent/budget.py +58 -0
- orbit/agent/executor.py +99 -0
- orbit/agent/loop.py +129 -0
- orbit/agent/observer.py +40 -0
- orbit/agent/planner.py +112 -0
- orbit/agent/wtf.py +101 -0
- orbit/cli.py +243 -0
- orbit/config.py +127 -0
- orbit/context/__init__.py +0 -0
- orbit/context/docker_ctx.py +48 -0
- orbit/context/filesystem_ctx.py +81 -0
- orbit/context/git_ctx.py +50 -0
- orbit/context/k8s_ctx.py +53 -0
- orbit/context/scanner.py +84 -0
- orbit/context/system_ctx.py +37 -0
- orbit/llm/__init__.py +34 -0
- orbit/llm/anthropic_provider.py +41 -0
- orbit/llm/base.py +40 -0
- orbit/llm/ollama_provider.py +131 -0
- orbit/llm/openai_provider.py +41 -0
- orbit/memory/__init__.py +0 -0
- orbit/memory/history.py +88 -0
- orbit/memory/rag.py +59 -0
- orbit/memory/runbooks.py +49 -0
- orbit/modules/__init__.py +0 -0
- orbit/modules/base.py +28 -0
- orbit/modules/docker_mod.py +47 -0
- orbit/modules/filesystem_mod.py +35 -0
- orbit/modules/git_mod.py +50 -0
- orbit/modules/k8s_mod.py +48 -0
- orbit/modules/registry.py +39 -0
- orbit/modules/shell.py +35 -0
- orbit/router/__init__.py +0 -0
- orbit/router/context_budget.py +49 -0
- orbit/router/decomposer.py +59 -0
- orbit/router/model_registry.py +91 -0
- orbit/router/model_selector.py +47 -0
- orbit/safety/__init__.py +0 -0
- orbit/safety/classifier.py +44 -0
- orbit/safety/patterns.py +317 -0
- orbit/safety/rollback.py +94 -0
- orbit/schemas/__init__.py +23 -0
- orbit/schemas/analysis.py +11 -0
- orbit/schemas/context.py +30 -0
- orbit/schemas/execution.py +21 -0
- orbit/schemas/plan.py +41 -0
- orbit/schemas/runbook.py +23 -0
- orbit/schemas/safety.py +18 -0
- orbit/ui/__init__.py +0 -0
- orbit/ui/confirmations.py +83 -0
- orbit/ui/console.py +6 -0
- orbit/ui/panels.py +109 -0
- orbit/ui/themes.py +23 -0
- orbit_cli-0.1.0.dist-info/METADATA +182 -0
- orbit_cli-0.1.0.dist-info/RECORD +61 -0
- orbit_cli-0.1.0.dist-info/WHEEL +4 -0
- orbit_cli-0.1.0.dist-info/entry_points.txt +2 -0
- orbit_cli-0.1.0.dist-info/licenses/LICENSE +190 -0
orbit/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.0"
|
orbit/agent/__init__.py
ADDED
|
File without changes
|
orbit/agent/ask.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from orbit.config import get_config
|
|
4
|
+
from orbit.context.scanner import scan
|
|
5
|
+
from orbit.llm.ollama_provider import OllamaProvider
|
|
6
|
+
from orbit.ui.console import console
|
|
7
|
+
|
|
8
|
+
ASK_SYSTEM_PROMPT = """You are a helpful DevOps assistant. Answer questions about the user's
|
|
9
|
+
environment based on the provided context. Be concise and specific.
|
|
10
|
+
If you're not sure, say so. Do not execute any commands."""
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
async def ask(question: str) -> None:
|
|
14
|
+
"""Answer a question about the environment. No execution."""
|
|
15
|
+
config = get_config()
|
|
16
|
+
provider = OllamaProvider(host=config.ollama_host, port=config.ollama_port)
|
|
17
|
+
|
|
18
|
+
console.print("[orbit.blue]Scanning environment...[/]")
|
|
19
|
+
env = await scan()
|
|
20
|
+
|
|
21
|
+
context_parts = [f"[{slot.source}]\n{slot.content}" for slot in env.slots if slot.available]
|
|
22
|
+
context_text = "\n\n".join(context_parts) if context_parts else "No context available."
|
|
23
|
+
|
|
24
|
+
messages = [
|
|
25
|
+
{"role": "system", "content": ASK_SYSTEM_PROMPT},
|
|
26
|
+
{"role": "user", "content": f"Environment:\n{context_text}\n\nQuestion: {question}"},
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
# Stream the response
|
|
31
|
+
response = provider.chat(model=config.default_model, messages=messages, temperature=0.3)
|
|
32
|
+
console.print()
|
|
33
|
+
console.print(str(response))
|
|
34
|
+
except Exception as e:
|
|
35
|
+
console.print(f"[orbit.error]Error: {e}[/]")
|
orbit/agent/budget.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class BudgetExhaustedError(Exception):
|
|
5
|
+
"""Raised when an agent budget limit is exceeded."""
|
|
6
|
+
|
|
7
|
+
def __init__(self, resource: str, used: int, limit: int) -> None:
|
|
8
|
+
self.resource = resource
|
|
9
|
+
self.used = used
|
|
10
|
+
self.limit = limit
|
|
11
|
+
super().__init__(f"Budget exhausted: {resource} ({used}/{limit})")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Budget:
|
|
15
|
+
"""Tracks agent loop resource usage with hard limits."""
|
|
16
|
+
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
max_steps: int = 15,
|
|
20
|
+
max_replans_per_step: int = 3,
|
|
21
|
+
max_llm_calls: int = 25,
|
|
22
|
+
) -> None:
|
|
23
|
+
self.max_steps = max_steps
|
|
24
|
+
self.max_replans_per_step = max_replans_per_step
|
|
25
|
+
self.max_llm_calls = max_llm_calls
|
|
26
|
+
self._steps = 0
|
|
27
|
+
self._replans_current_step = 0
|
|
28
|
+
self._total_replans = 0
|
|
29
|
+
self._llm_calls = 0
|
|
30
|
+
|
|
31
|
+
def use_step(self) -> None:
|
|
32
|
+
self._steps += 1
|
|
33
|
+
self._replans_current_step = 0
|
|
34
|
+
if self._steps > self.max_steps:
|
|
35
|
+
raise BudgetExhaustedError("steps", self._steps, self.max_steps)
|
|
36
|
+
|
|
37
|
+
def use_replan(self) -> None:
|
|
38
|
+
self._replans_current_step += 1
|
|
39
|
+
self._total_replans += 1
|
|
40
|
+
if self._replans_current_step > self.max_replans_per_step:
|
|
41
|
+
raise BudgetExhaustedError("replans_per_step", self._replans_current_step, self.max_replans_per_step)
|
|
42
|
+
|
|
43
|
+
def use_llm_call(self) -> None:
|
|
44
|
+
self._llm_calls += 1
|
|
45
|
+
if self._llm_calls > self.max_llm_calls:
|
|
46
|
+
raise BudgetExhaustedError("llm_calls", self._llm_calls, self.max_llm_calls)
|
|
47
|
+
|
|
48
|
+
def can_replan(self) -> bool:
|
|
49
|
+
return self._replans_current_step < self.max_replans_per_step and self._llm_calls < self.max_llm_calls
|
|
50
|
+
|
|
51
|
+
def usage(self) -> dict[str, int]:
|
|
52
|
+
return {
|
|
53
|
+
"steps": self._steps,
|
|
54
|
+
"total_replans": self._total_replans,
|
|
55
|
+
"llm_calls": self._llm_calls,
|
|
56
|
+
"max_steps": self.max_steps,
|
|
57
|
+
"max_llm_calls": self.max_llm_calls,
|
|
58
|
+
}
|
orbit/agent/executor.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import subprocess
|
|
5
|
+
import time
|
|
6
|
+
|
|
7
|
+
from orbit.schemas.execution import CommandResult
|
|
8
|
+
from orbit.schemas.plan import PlanStep
|
|
9
|
+
from orbit.ui.console import console
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
async def run(step: PlanStep, stream: bool = True) -> CommandResult:
|
|
13
|
+
"""Execute a command with streaming output and timeout."""
|
|
14
|
+
start = time.monotonic()
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
if stream:
|
|
18
|
+
return await _run_streaming(step, start)
|
|
19
|
+
else:
|
|
20
|
+
return await _run_simple(step, start)
|
|
21
|
+
except TimeoutError:
|
|
22
|
+
duration = time.monotonic() - start
|
|
23
|
+
return CommandResult(
|
|
24
|
+
command=step.command,
|
|
25
|
+
exit_code=-1,
|
|
26
|
+
stdout="",
|
|
27
|
+
stderr=f"Command timed out after {step.timeout_seconds}s",
|
|
28
|
+
duration_seconds=duration,
|
|
29
|
+
timed_out=True,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async def _run_streaming(step: PlanStep, start: float) -> CommandResult:
|
|
34
|
+
"""Run with streaming stdout to terminal."""
|
|
35
|
+
proc = await asyncio.create_subprocess_shell(
|
|
36
|
+
step.command,
|
|
37
|
+
stdout=asyncio.subprocess.PIPE,
|
|
38
|
+
stderr=asyncio.subprocess.PIPE,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
stdout_lines: list[str] = []
|
|
42
|
+
stderr_lines: list[str] = []
|
|
43
|
+
|
|
44
|
+
async def read_stream(stream: asyncio.StreamReader | None, lines: list[str], display: bool) -> None:
|
|
45
|
+
if stream is None:
|
|
46
|
+
return
|
|
47
|
+
while True:
|
|
48
|
+
line = await stream.readline()
|
|
49
|
+
if not line:
|
|
50
|
+
break
|
|
51
|
+
decoded = line.decode("utf-8", errors="replace")
|
|
52
|
+
lines.append(decoded)
|
|
53
|
+
if display:
|
|
54
|
+
console.print(f" [dim]{decoded.rstrip()}[/]")
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
await asyncio.wait_for(
|
|
58
|
+
asyncio.gather(
|
|
59
|
+
read_stream(proc.stdout, stdout_lines, True),
|
|
60
|
+
read_stream(proc.stderr, stderr_lines, False),
|
|
61
|
+
proc.wait(),
|
|
62
|
+
),
|
|
63
|
+
timeout=step.timeout_seconds,
|
|
64
|
+
)
|
|
65
|
+
except TimeoutError:
|
|
66
|
+
proc.kill()
|
|
67
|
+
raise
|
|
68
|
+
|
|
69
|
+
duration = time.monotonic() - start
|
|
70
|
+
return CommandResult(
|
|
71
|
+
command=step.command,
|
|
72
|
+
exit_code=proc.returncode if proc.returncode is not None else -1,
|
|
73
|
+
stdout="".join(stdout_lines),
|
|
74
|
+
stderr="".join(stderr_lines),
|
|
75
|
+
duration_seconds=duration,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
async def _run_simple(step: PlanStep, start: float) -> CommandResult:
|
|
80
|
+
"""Run without streaming, capture all output."""
|
|
81
|
+
loop = asyncio.get_event_loop()
|
|
82
|
+
result = await loop.run_in_executor(
|
|
83
|
+
None,
|
|
84
|
+
lambda: subprocess.run(
|
|
85
|
+
step.command,
|
|
86
|
+
shell=True,
|
|
87
|
+
capture_output=True,
|
|
88
|
+
text=True,
|
|
89
|
+
timeout=step.timeout_seconds,
|
|
90
|
+
),
|
|
91
|
+
)
|
|
92
|
+
duration = time.monotonic() - start
|
|
93
|
+
return CommandResult(
|
|
94
|
+
command=step.command,
|
|
95
|
+
exit_code=result.returncode,
|
|
96
|
+
stdout=result.stdout,
|
|
97
|
+
stderr=result.stderr,
|
|
98
|
+
duration_seconds=duration,
|
|
99
|
+
)
|
orbit/agent/loop.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
|
|
5
|
+
from orbit.agent import budget as budget_mod
|
|
6
|
+
from orbit.agent import executor, observer, planner
|
|
7
|
+
from orbit.config import OrbitConfig, get_config
|
|
8
|
+
from orbit.context.scanner import scan
|
|
9
|
+
from orbit.llm.ollama_provider import OllamaProvider
|
|
10
|
+
from orbit.router import context_budget, decomposer, model_selector
|
|
11
|
+
from orbit.router.model_registry import ModelRegistry
|
|
12
|
+
from orbit.safety.classifier import classify
|
|
13
|
+
from orbit.schemas.execution import ExecutionRecord
|
|
14
|
+
from orbit.ui.confirmations import confirm_step
|
|
15
|
+
from orbit.ui.console import console
|
|
16
|
+
from orbit.ui.panels import show_fatal, show_plan, show_replan, show_step_success, show_summary
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
async def run(goal: str, config: OrbitConfig | None = None) -> list[ExecutionRecord]:
|
|
20
|
+
"""Execute the full agent loop: scan → decompose → plan → confirm → execute → observe."""
|
|
21
|
+
if config is None:
|
|
22
|
+
config = get_config()
|
|
23
|
+
|
|
24
|
+
budget = budget_mod.Budget(
|
|
25
|
+
max_steps=config.max_steps,
|
|
26
|
+
max_replans_per_step=config.max_replans,
|
|
27
|
+
max_llm_calls=config.max_llm_calls,
|
|
28
|
+
)
|
|
29
|
+
provider = OllamaProvider(host=config.ollama_host, port=config.ollama_port)
|
|
30
|
+
|
|
31
|
+
# 1. Scan environment
|
|
32
|
+
console.print("[orbit.blue]Scanning environment...[/]")
|
|
33
|
+
env = await scan()
|
|
34
|
+
|
|
35
|
+
# 2. Scan models and build registry
|
|
36
|
+
registry = ModelRegistry()
|
|
37
|
+
try:
|
|
38
|
+
registry.scan(provider)
|
|
39
|
+
except Exception as e:
|
|
40
|
+
show_fatal(f"Cannot connect to Ollama: {e}")
|
|
41
|
+
return []
|
|
42
|
+
|
|
43
|
+
# 3. Decompose goal
|
|
44
|
+
console.print("[orbit.blue]Decomposing goal...[/]")
|
|
45
|
+
budget.use_llm_call()
|
|
46
|
+
decomposition = await decomposer.decompose(goal, env, provider, config.default_model)
|
|
47
|
+
|
|
48
|
+
# 4. Select models
|
|
49
|
+
model_map = model_selector.select(decomposition, registry, config.default_model)
|
|
50
|
+
|
|
51
|
+
# 5. Allocate context budget
|
|
52
|
+
ctx_window = registry.get_context_window(config.default_model)
|
|
53
|
+
ctx_budget = context_budget.create_budget(ctx_window)
|
|
54
|
+
env.slots = context_budget.allocate(env.slots, ctx_budget)
|
|
55
|
+
|
|
56
|
+
# 6. Generate plan
|
|
57
|
+
console.print("[orbit.blue]Generating plan...[/]")
|
|
58
|
+
execution_plan = await planner.plan(goal, decomposition, env, model_map, budget, provider)
|
|
59
|
+
|
|
60
|
+
if not execution_plan.steps:
|
|
61
|
+
show_fatal("Could not generate an execution plan.")
|
|
62
|
+
return []
|
|
63
|
+
|
|
64
|
+
# 7. Show plan
|
|
65
|
+
show_plan(execution_plan)
|
|
66
|
+
|
|
67
|
+
# 8. Execute steps
|
|
68
|
+
records: list[ExecutionRecord] = []
|
|
69
|
+
start_time = time.monotonic()
|
|
70
|
+
i = 0
|
|
71
|
+
|
|
72
|
+
try:
|
|
73
|
+
while i < len(execution_plan.steps):
|
|
74
|
+
step = execution_plan.steps[i]
|
|
75
|
+
budget.use_step()
|
|
76
|
+
|
|
77
|
+
# Safety gate
|
|
78
|
+
risk = classify(step.command, env)
|
|
79
|
+
if not confirm_step(step, risk):
|
|
80
|
+
console.print(f" [orbit.warning]Skipped: {step.description}[/]")
|
|
81
|
+
i += 1
|
|
82
|
+
continue
|
|
83
|
+
|
|
84
|
+
# Execute
|
|
85
|
+
result = await executor.run(step)
|
|
86
|
+
record = ExecutionRecord(
|
|
87
|
+
step=step,
|
|
88
|
+
result=result,
|
|
89
|
+
rollback_available=step.rollback_command is not None,
|
|
90
|
+
)
|
|
91
|
+
records.append(record)
|
|
92
|
+
|
|
93
|
+
# Record to history
|
|
94
|
+
try:
|
|
95
|
+
from orbit.memory.history import record as record_history
|
|
96
|
+
|
|
97
|
+
record_history(result, goal=goal)
|
|
98
|
+
except Exception:
|
|
99
|
+
pass # history recording is best-effort
|
|
100
|
+
|
|
101
|
+
# Observe
|
|
102
|
+
decision = observer.analyze(step, result, budget)
|
|
103
|
+
|
|
104
|
+
if decision.status == "success":
|
|
105
|
+
show_step_success(step, result)
|
|
106
|
+
i += 1
|
|
107
|
+
elif decision.status == "replan":
|
|
108
|
+
show_replan(decision.analysis)
|
|
109
|
+
budget.use_replan()
|
|
110
|
+
new_plan = await planner.replan(
|
|
111
|
+
goal, records, decision.analysis, env, budget, provider, config.default_model
|
|
112
|
+
)
|
|
113
|
+
if new_plan.steps:
|
|
114
|
+
# Replace remaining steps with replan output
|
|
115
|
+
execution_plan.steps[i:] = new_plan.steps
|
|
116
|
+
else:
|
|
117
|
+
i += 1
|
|
118
|
+
elif decision.status == "fatal":
|
|
119
|
+
show_fatal(decision.analysis)
|
|
120
|
+
break
|
|
121
|
+
|
|
122
|
+
except budget_mod.BudgetExhaustedError as e:
|
|
123
|
+
show_fatal(f"Budget exhausted: {e}")
|
|
124
|
+
|
|
125
|
+
duration = time.monotonic() - start_time
|
|
126
|
+
completed = sum(1 for r in records if r.result.exit_code == 0)
|
|
127
|
+
show_summary(completed, len(execution_plan.steps), duration)
|
|
128
|
+
|
|
129
|
+
return records
|
orbit/agent/observer.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
|
|
6
|
+
from orbit.agent.budget import Budget
|
|
7
|
+
from orbit.schemas.execution import CommandResult
|
|
8
|
+
from orbit.schemas.plan import PlanStep
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class ObserverDecision:
|
|
13
|
+
"""Decision from the observer about how to proceed."""
|
|
14
|
+
|
|
15
|
+
status: str # "success", "replan", "fatal"
|
|
16
|
+
analysis: str
|
|
17
|
+
suggested_fix: str | None = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def analyze(step: PlanStep, result: CommandResult, budget: Budget) -> ObserverDecision:
|
|
21
|
+
"""Analyze a step result and decide: success / replan / fatal. Deterministic, no LLM."""
|
|
22
|
+
if result.timed_out:
|
|
23
|
+
return ObserverDecision("fatal", f"Command timed out after {step.timeout_seconds}s: {step.command}")
|
|
24
|
+
|
|
25
|
+
if result.exit_code == step.expected_exit_code:
|
|
26
|
+
if step.expected_output_pattern:
|
|
27
|
+
if re.search(step.expected_output_pattern, result.stdout):
|
|
28
|
+
return ObserverDecision("success", "Command succeeded with expected output.")
|
|
29
|
+
return ObserverDecision(
|
|
30
|
+
"success",
|
|
31
|
+
f"Command succeeded (exit 0) but output did not match pattern '{step.expected_output_pattern}'.",
|
|
32
|
+
)
|
|
33
|
+
return ObserverDecision("success", "Command succeeded.")
|
|
34
|
+
|
|
35
|
+
error_summary = (result.stderr[:500] if result.stderr else result.stdout[:500]) or "(no output)"
|
|
36
|
+
if budget.can_replan():
|
|
37
|
+
return ObserverDecision("replan", f"Command failed (exit {result.exit_code}): {error_summary}")
|
|
38
|
+
|
|
39
|
+
msg = f"Command failed (exit {result.exit_code}), replan budget exhausted: {error_summary}"
|
|
40
|
+
return ObserverDecision("fatal", msg)
|
orbit/agent/planner.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from orbit.agent.budget import Budget
|
|
4
|
+
from orbit.llm.base import BaseLLM, LLMValidationError
|
|
5
|
+
from orbit.schemas.context import EnvironmentState
|
|
6
|
+
from orbit.schemas.execution import ExecutionRecord
|
|
7
|
+
from orbit.schemas.plan import Plan, TaskDecomposition
|
|
8
|
+
|
|
9
|
+
PLANNER_SYSTEM_PROMPT = """You are a DevOps execution planner. Given a goal, environment context,
|
|
10
|
+
and task decomposition, generate a concrete execution plan.
|
|
11
|
+
|
|
12
|
+
Rules:
|
|
13
|
+
- Each step must have a real shell command
|
|
14
|
+
- Set appropriate risk_level: safe (read-only), caution (modifying), destructive (data loss), nuclear (catastrophic)
|
|
15
|
+
- Add rollback_command for any destructive or caution step where possible
|
|
16
|
+
- Set realistic timeout_seconds (default 30, longer for builds/deploys)
|
|
17
|
+
- Use expected_output_pattern (regex) when you know what success looks like
|
|
18
|
+
- Keep plans minimal: fewest steps to achieve the goal
|
|
19
|
+
|
|
20
|
+
Respond ONLY as JSON matching the provided schema."""
|
|
21
|
+
|
|
22
|
+
REPLAN_SYSTEM_PROMPT = """You are a DevOps replanner. A step in the execution plan failed.
|
|
23
|
+
Given the original goal, what was accomplished so far, and the error,
|
|
24
|
+
generate replacement steps to complete the goal.
|
|
25
|
+
|
|
26
|
+
Rules:
|
|
27
|
+
- Do NOT re-run steps that already succeeded
|
|
28
|
+
- Address the error directly
|
|
29
|
+
- Keep the remaining plan minimal
|
|
30
|
+
- If the error is unrecoverable, return an empty plan
|
|
31
|
+
|
|
32
|
+
Respond ONLY as JSON matching the provided schema."""
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
async def plan(
|
|
36
|
+
goal: str,
|
|
37
|
+
decomposition: TaskDecomposition,
|
|
38
|
+
env: EnvironmentState,
|
|
39
|
+
model_map: dict[str, str],
|
|
40
|
+
budget: Budget,
|
|
41
|
+
provider: BaseLLM,
|
|
42
|
+
) -> Plan:
|
|
43
|
+
"""Generate an execution plan using LLM structured output."""
|
|
44
|
+
budget.use_llm_call()
|
|
45
|
+
|
|
46
|
+
planning_model = model_map.get("reasoning", model_map.get("general", "qwen2.5:7b"))
|
|
47
|
+
|
|
48
|
+
subtask_descriptions = "\n".join(
|
|
49
|
+
f"- {st.description} (capability: {st.capability})" for st in decomposition.subtasks
|
|
50
|
+
)
|
|
51
|
+
context_text = _build_context(env)
|
|
52
|
+
|
|
53
|
+
messages = [
|
|
54
|
+
{"role": "system", "content": PLANNER_SYSTEM_PROMPT},
|
|
55
|
+
{
|
|
56
|
+
"role": "user",
|
|
57
|
+
"content": f"Goal: {goal}\n\nSubtasks:\n{subtask_descriptions}\n\nEnvironment:\n{context_text}",
|
|
58
|
+
},
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
result = await provider.achat(model=planning_model, messages=messages, schema=Plan, temperature=0.0)
|
|
63
|
+
if isinstance(result, Plan):
|
|
64
|
+
result.goal = goal
|
|
65
|
+
return result
|
|
66
|
+
except LLMValidationError:
|
|
67
|
+
pass
|
|
68
|
+
|
|
69
|
+
return Plan(goal=goal, steps=[])
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
async def replan(
|
|
73
|
+
goal: str,
|
|
74
|
+
records: list[ExecutionRecord],
|
|
75
|
+
error_analysis: str,
|
|
76
|
+
env: EnvironmentState,
|
|
77
|
+
budget: Budget,
|
|
78
|
+
provider: BaseLLM,
|
|
79
|
+
model: str,
|
|
80
|
+
) -> Plan:
|
|
81
|
+
"""Generate replacement steps after a failure."""
|
|
82
|
+
budget.use_llm_call()
|
|
83
|
+
budget.use_replan()
|
|
84
|
+
|
|
85
|
+
completed_summary = "\n".join(
|
|
86
|
+
f"- {r.step.description}: {'OK' if r.result.exit_code == 0 else 'FAILED'}" for r in records
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
messages = [
|
|
90
|
+
{"role": "system", "content": REPLAN_SYSTEM_PROMPT},
|
|
91
|
+
{
|
|
92
|
+
"role": "user",
|
|
93
|
+
"content": (
|
|
94
|
+
f"Goal: {goal}\n\nCompleted steps:\n{completed_summary}\n\n"
|
|
95
|
+
f"Error: {error_analysis}\n\nEnvironment:\n{_build_context(env)}"
|
|
96
|
+
),
|
|
97
|
+
},
|
|
98
|
+
]
|
|
99
|
+
|
|
100
|
+
try:
|
|
101
|
+
result = await provider.achat(model=model, messages=messages, schema=Plan, temperature=0.0)
|
|
102
|
+
if isinstance(result, Plan):
|
|
103
|
+
return result
|
|
104
|
+
except LLMValidationError:
|
|
105
|
+
pass
|
|
106
|
+
|
|
107
|
+
return Plan(goal=goal, steps=[])
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def _build_context(env: EnvironmentState) -> str:
|
|
111
|
+
parts = [f"[{slot.source}]\n{slot.content}" for slot in env.slots if slot.available and slot.estimated_tokens > 0]
|
|
112
|
+
return "\n\n".join(parts) if parts else "No context available."
|
orbit/agent/wtf.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
from orbit.config import get_config
|
|
6
|
+
from orbit.llm.base import LLMValidationError
|
|
7
|
+
from orbit.llm.ollama_provider import OllamaProvider
|
|
8
|
+
from orbit.modules.registry import get_all_modules, load_builtin_modules
|
|
9
|
+
from orbit.schemas.analysis import WtfAnalysis
|
|
10
|
+
from orbit.ui.console import console
|
|
11
|
+
|
|
12
|
+
WTF_SYSTEM_PROMPT = """You are a DevOps error diagnostician. Given a failed command and its output,
|
|
13
|
+
explain the error, identify the root cause, and suggest a fix command.
|
|
14
|
+
|
|
15
|
+
Be specific and actionable. If you're not confident, say so.
|
|
16
|
+
Respond ONLY as JSON matching the provided schema."""
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
async def diagnose() -> None:
|
|
20
|
+
"""Diagnose the last failed command."""
|
|
21
|
+
load_builtin_modules()
|
|
22
|
+
|
|
23
|
+
# Try to get last command from environment
|
|
24
|
+
last_cmd = os.environ.get("ORBIT_LAST_COMMAND")
|
|
25
|
+
last_stderr = os.environ.get("ORBIT_LAST_STDERR", "")
|
|
26
|
+
last_stdout = os.environ.get("ORBIT_LAST_STDOUT", "")
|
|
27
|
+
last_exit = os.environ.get("ORBIT_LAST_EXIT_CODE", "")
|
|
28
|
+
|
|
29
|
+
if not last_cmd:
|
|
30
|
+
# Try history
|
|
31
|
+
try:
|
|
32
|
+
from orbit.memory.history import get_last_failed
|
|
33
|
+
|
|
34
|
+
failed = get_last_failed()
|
|
35
|
+
if failed:
|
|
36
|
+
last_cmd = failed["command"]
|
|
37
|
+
last_stderr = failed.get("stderr", "")
|
|
38
|
+
last_stdout = failed.get("stdout", "")
|
|
39
|
+
last_exit = str(failed.get("exit_code", ""))
|
|
40
|
+
except Exception:
|
|
41
|
+
pass
|
|
42
|
+
|
|
43
|
+
if not last_cmd:
|
|
44
|
+
console.print("[orbit.warning]No failed command found. Run a command first, then try orbit wtf.[/]")
|
|
45
|
+
return
|
|
46
|
+
|
|
47
|
+
console.print(f"[orbit.blue]Diagnosing:[/] {last_cmd}")
|
|
48
|
+
|
|
49
|
+
# Fast path: check module failure patterns
|
|
50
|
+
for module in get_all_modules():
|
|
51
|
+
for pattern, explanation in module.get_common_failures().items():
|
|
52
|
+
if pattern in last_stderr or pattern in last_stdout:
|
|
53
|
+
console.print()
|
|
54
|
+
from rich.panel import Panel
|
|
55
|
+
|
|
56
|
+
console.print(
|
|
57
|
+
Panel(
|
|
58
|
+
f"[orbit.error]Error:[/] {pattern}\n\n[orbit.warning]Explanation:[/] {explanation}",
|
|
59
|
+
title=f"[orbit.blue]{module.name} module[/]",
|
|
60
|
+
border_style="orbit.blue",
|
|
61
|
+
)
|
|
62
|
+
)
|
|
63
|
+
return
|
|
64
|
+
|
|
65
|
+
# Slow path: LLM diagnosis
|
|
66
|
+
config = get_config()
|
|
67
|
+
provider = OllamaProvider(host=config.ollama_host, port=config.ollama_port)
|
|
68
|
+
|
|
69
|
+
error_context = (
|
|
70
|
+
f"Command: {last_cmd}\nExit code: {last_exit}\nStderr: {last_stderr[:1000]}\nStdout: {last_stdout[:1000]}"
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
messages = [
|
|
74
|
+
{"role": "system", "content": WTF_SYSTEM_PROMPT},
|
|
75
|
+
{"role": "user", "content": error_context},
|
|
76
|
+
]
|
|
77
|
+
|
|
78
|
+
try:
|
|
79
|
+
result = await provider.achat(
|
|
80
|
+
model=config.default_model, messages=messages, schema=WtfAnalysis, temperature=0.0
|
|
81
|
+
)
|
|
82
|
+
if isinstance(result, WtfAnalysis):
|
|
83
|
+
_display_analysis(result)
|
|
84
|
+
except (LLMValidationError, Exception) as e:
|
|
85
|
+
console.print(f"[orbit.error]Could not diagnose: {e}[/]")
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def _display_analysis(analysis: WtfAnalysis) -> None:
|
|
89
|
+
from rich.panel import Panel
|
|
90
|
+
|
|
91
|
+
parts = [
|
|
92
|
+
f"[orbit.error]Error:[/] {analysis.error_explanation}",
|
|
93
|
+
f"[orbit.warning]Root cause:[/] {analysis.root_cause}",
|
|
94
|
+
f"[dim]Confidence: {analysis.confidence:.0%}[/]",
|
|
95
|
+
]
|
|
96
|
+
if analysis.fix_command:
|
|
97
|
+
parts.append(f"\n[orbit.success]Fix:[/] [orbit.command]{analysis.fix_command}[/]")
|
|
98
|
+
parts.append(f"[dim]{analysis.fix_explanation}[/]")
|
|
99
|
+
|
|
100
|
+
console.print()
|
|
101
|
+
console.print(Panel("\n".join(parts), title="[orbit.blue]Diagnosis[/]", border_style="orbit.blue"))
|