deuscode 0.1.0__tar.gz → 0.3.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deuscode-0.3.1/.gitignore +8 -0
- {deuscode-0.1.0 → deuscode-0.3.1}/PKG-INFO +6 -1
- {deuscode-0.1.0 → deuscode-0.3.1}/pyproject.toml +12 -1
- deuscode-0.3.1/src/deuscode/__init__.py +1 -0
- deuscode-0.3.1/src/deuscode/agent.py +100 -0
- deuscode-0.3.1/src/deuscode/config.py +45 -0
- deuscode-0.3.1/src/deuscode/main.py +56 -0
- deuscode-0.3.1/src/deuscode/models.py +57 -0
- deuscode-0.3.1/src/deuscode/repomap.py +69 -0
- deuscode-0.3.1/src/deuscode/runpod.py +97 -0
- deuscode-0.3.1/src/deuscode/setup.py +110 -0
- deuscode-0.3.1/src/deuscode/tools.py +115 -0
- deuscode-0.3.1/src/deuscode/ui.py +30 -0
- deuscode-0.3.1/tests/__init__.py +0 -0
- deuscode-0.3.1/tests/test_models.py +17 -0
- deuscode-0.3.1/tests/test_repomap.py +59 -0
- deuscode-0.3.1/tests/test_runpod.py +56 -0
- deuscode-0.3.1/tests/test_setup.py +52 -0
- deuscode-0.3.1/tests/test_tools.py +45 -0
- deuscode-0.1.0/.claude/settings.local.json +0 -10
- deuscode-0.1.0/src/deuscode/__init__.py +0 -1
- deuscode-0.1.0/src/deuscode/main.py +0 -15
- {deuscode-0.1.0 → deuscode-0.3.1}/LICENSE +0 -0
- {deuscode-0.1.0 → deuscode-0.3.1}/README.md +0 -0
|
@@ -1,12 +1,17 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: deuscode
|
|
3
|
-
Version: 0.1
|
|
3
|
+
Version: 0.3.1
|
|
4
4
|
Summary: AI-powered multi-agent CLI coding assistant for local LLMs
|
|
5
5
|
License: AGPL-3.0-or-later
|
|
6
6
|
License-File: LICENSE
|
|
7
7
|
Requires-Python: >=3.12
|
|
8
|
+
Requires-Dist: httpx
|
|
9
|
+
Requires-Dist: pyyaml
|
|
8
10
|
Requires-Dist: rich
|
|
9
11
|
Requires-Dist: typer
|
|
12
|
+
Provides-Extra: dev
|
|
13
|
+
Requires-Dist: pytest; extra == 'dev'
|
|
14
|
+
Requires-Dist: pytest-asyncio; extra == 'dev'
|
|
10
15
|
Description-Content-Type: text/markdown
|
|
11
16
|
|
|
12
17
|
# deuscode
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "deuscode"
|
|
7
|
-
version = "0.1
|
|
7
|
+
version = "0.3.1"
|
|
8
8
|
description = "AI-powered multi-agent CLI coding assistant for local LLMs"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = { text = "AGPL-3.0-or-later" }
|
|
@@ -12,6 +12,14 @@ requires-python = ">=3.12"
|
|
|
12
12
|
dependencies = [
|
|
13
13
|
"typer",
|
|
14
14
|
"rich",
|
|
15
|
+
"httpx",
|
|
16
|
+
"pyyaml",
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
[project.optional-dependencies]
|
|
20
|
+
dev = [
|
|
21
|
+
"pytest",
|
|
22
|
+
"pytest-asyncio",
|
|
15
23
|
]
|
|
16
24
|
|
|
17
25
|
[project.scripts]
|
|
@@ -19,3 +27,6 @@ deus = "deuscode.main:app"
|
|
|
19
27
|
|
|
20
28
|
[tool.hatch.build.targets.wheel]
|
|
21
29
|
packages = ["src/deuscode"]
|
|
30
|
+
|
|
31
|
+
[tool.pytest.ini_options]
|
|
32
|
+
asyncio_mode = "auto"
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
version = "0.3.1"
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
import json
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
import yaml
|
|
5
|
+
|
|
6
|
+
from deuscode.config import Config, CONFIG_PATH
|
|
7
|
+
from deuscode.repomap import generate_repo_map
|
|
8
|
+
from deuscode import tools, ui, runpod
|
|
9
|
+
|
|
10
|
+
_SYSTEM_BASE = (
|
|
11
|
+
"You are Deus, an AI coding assistant. "
|
|
12
|
+
"You have access to tools to read/write files and run shell commands. "
|
|
13
|
+
"Always explain what you are doing before calling a tool."
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
async def run(
|
|
18
|
+
prompt: str,
|
|
19
|
+
config: Config,
|
|
20
|
+
path: str = ".",
|
|
21
|
+
model_override: str | None = None,
|
|
22
|
+
no_map: bool = False,
|
|
23
|
+
) -> str:
|
|
24
|
+
model = model_override or config.model
|
|
25
|
+
system_prompt = _build_system_prompt(path, no_map)
|
|
26
|
+
messages = [
|
|
27
|
+
{"role": "system", "content": system_prompt},
|
|
28
|
+
{"role": "user", "content": prompt},
|
|
29
|
+
]
|
|
30
|
+
ui.thinking(model)
|
|
31
|
+
async with httpx.AsyncClient(timeout=120.0) as client:
|
|
32
|
+
result = await _loop(client, messages, model, config)
|
|
33
|
+
await _maybe_auto_stop(config)
|
|
34
|
+
return result
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _build_system_prompt(path: str, no_map: bool) -> str:
|
|
38
|
+
if no_map:
|
|
39
|
+
return _SYSTEM_BASE
|
|
40
|
+
repo_map = generate_repo_map(path)
|
|
41
|
+
return f"{_SYSTEM_BASE}\n\n## Repo Map\n{repo_map}"
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
async def _loop(client: httpx.AsyncClient, messages: list, model: str, config: Config) -> str:
|
|
45
|
+
while True:
|
|
46
|
+
data = await _chat(client, messages, model, config)
|
|
47
|
+
choice = data["choices"][0]
|
|
48
|
+
msg = choice["message"]
|
|
49
|
+
messages.append(msg)
|
|
50
|
+
|
|
51
|
+
tool_calls = msg.get("tool_calls") or []
|
|
52
|
+
if not tool_calls:
|
|
53
|
+
return msg.get("content") or ""
|
|
54
|
+
|
|
55
|
+
for tc in tool_calls:
|
|
56
|
+
result = await _execute_tool(tc)
|
|
57
|
+
messages.append({
|
|
58
|
+
"role": "tool",
|
|
59
|
+
"tool_call_id": tc["id"],
|
|
60
|
+
"content": result,
|
|
61
|
+
})
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
async def _chat(client: httpx.AsyncClient, messages: list, model: str, config: Config) -> dict:
|
|
65
|
+
response = await client.post(
|
|
66
|
+
f"{config.base_url.rstrip('/')}/chat/completions",
|
|
67
|
+
headers={"Authorization": f"Bearer {config.api_key}", "Content-Type": "application/json"},
|
|
68
|
+
json={
|
|
69
|
+
"model": model,
|
|
70
|
+
"messages": messages,
|
|
71
|
+
"tools": tools.TOOL_SCHEMAS,
|
|
72
|
+
"max_tokens": config.max_tokens,
|
|
73
|
+
},
|
|
74
|
+
)
|
|
75
|
+
response.raise_for_status()
|
|
76
|
+
return response.json()
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
async def _execute_tool(tc: dict) -> str:
|
|
80
|
+
fn = tc["function"]
|
|
81
|
+
ui.tool_call(fn["name"], json.loads(fn.get("arguments", "{}")))
|
|
82
|
+
result = await tools.dispatch(fn["name"], fn.get("arguments", "{}"))
|
|
83
|
+
ui.tool_result(result[:500])
|
|
84
|
+
return result
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
async def _maybe_auto_stop(config: Config) -> None:
|
|
88
|
+
if not config.auto_stop_runpod:
|
|
89
|
+
return
|
|
90
|
+
raw = yaml.safe_load(CONFIG_PATH.read_text()) if CONFIG_PATH.exists() else {}
|
|
91
|
+
pod_id = raw.get("runpod_pod_id")
|
|
92
|
+
api_key = raw.get("runpod_api_key", "")
|
|
93
|
+
if not pod_id:
|
|
94
|
+
return
|
|
95
|
+
ui.console.print(f"[bold yellow]⚡ Auto-stopping RunPod pod {pod_id}...[/bold yellow]")
|
|
96
|
+
try:
|
|
97
|
+
await runpod.stop_pod(api_key, pod_id)
|
|
98
|
+
ui.console.print("[green]✓ Pod stopped. No more charges.[/green]")
|
|
99
|
+
except Exception as e:
|
|
100
|
+
ui.console.print(f"[red]Warning: could not stop pod {pod_id}: {e}[/red]")
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
|
|
4
|
+
import yaml
|
|
5
|
+
|
|
6
|
+
CONFIG_PATH = Path.home() / ".deus" / "config.yaml"
|
|
7
|
+
|
|
8
|
+
_DEFAULTS = {
|
|
9
|
+
"base_url": "https://your-runpod-endpoint/v1",
|
|
10
|
+
"api_key": "your-key",
|
|
11
|
+
"model": "your-model-name",
|
|
12
|
+
"max_tokens": 8192,
|
|
13
|
+
"auto_stop_runpod": False,
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class Config:
|
|
19
|
+
base_url: str
|
|
20
|
+
api_key: str
|
|
21
|
+
model: str
|
|
22
|
+
max_tokens: int
|
|
23
|
+
auto_stop_runpod: bool = False
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def load_config() -> Config:
|
|
27
|
+
if not CONFIG_PATH.exists():
|
|
28
|
+
_create_default_config()
|
|
29
|
+
raise FileNotFoundError(
|
|
30
|
+
f"Config created at {CONFIG_PATH} — please fill in your endpoint details."
|
|
31
|
+
)
|
|
32
|
+
data = yaml.safe_load(CONFIG_PATH.read_text()) or {}
|
|
33
|
+
merged = {**_DEFAULTS, **data}
|
|
34
|
+
return Config(
|
|
35
|
+
base_url=merged["base_url"],
|
|
36
|
+
api_key=merged["api_key"],
|
|
37
|
+
model=merged["model"],
|
|
38
|
+
max_tokens=int(merged["max_tokens"]),
|
|
39
|
+
auto_stop_runpod=bool(merged.get("auto_stop_runpod", False)),
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def _create_default_config() -> None:
|
|
44
|
+
CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
|
|
45
|
+
CONFIG_PATH.write_text(yaml.dump(_DEFAULTS, default_flow_style=False))
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
import typer
|
|
5
|
+
|
|
6
|
+
from deuscode import ui
|
|
7
|
+
from deuscode.config import load_config
|
|
8
|
+
from deuscode import agent
|
|
9
|
+
from deuscode.setup import run_setup_runpod, run_stop_runpod
|
|
10
|
+
|
|
11
|
+
app = typer.Typer(invoke_without_command=True, help="Deus - AI-powered CLI coding assistant")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@app.callback(invoke_without_command=True)
|
|
15
|
+
def main(
|
|
16
|
+
ctx: typer.Context,
|
|
17
|
+
prompt: Optional[str] = typer.Argument(None, help="What to ask Deus"),
|
|
18
|
+
path: str = typer.Option(".", "--path", help="Repo path to map"),
|
|
19
|
+
model: Optional[str] = typer.Option(None, "--model", help="Override config model"),
|
|
20
|
+
no_map: bool = typer.Option(False, "--no-map", help="Skip repo-map generation"),
|
|
21
|
+
) -> None:
|
|
22
|
+
if ctx.invoked_subcommand is not None:
|
|
23
|
+
return
|
|
24
|
+
if not prompt:
|
|
25
|
+
ui.error("Provide a prompt or use a subcommand (e.g. deus setup --runpod)")
|
|
26
|
+
raise typer.Exit(1)
|
|
27
|
+
try:
|
|
28
|
+
config = load_config()
|
|
29
|
+
except FileNotFoundError as e:
|
|
30
|
+
ui.error(str(e))
|
|
31
|
+
raise typer.Exit(1)
|
|
32
|
+
try:
|
|
33
|
+
result = asyncio.run(agent.run(prompt, config, path=path, model_override=model, no_map=no_map))
|
|
34
|
+
ui.final_answer(result)
|
|
35
|
+
except Exception as e:
|
|
36
|
+
ui.error(str(e))
|
|
37
|
+
raise typer.Exit(1)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@app.command()
|
|
41
|
+
def setup(
|
|
42
|
+
runpod: bool = typer.Option(False, "--runpod", help="Configure a RunPod GPU endpoint"),
|
|
43
|
+
stop: bool = typer.Option(False, "--stop", help="Stop the current RunPod pod"),
|
|
44
|
+
) -> None:
|
|
45
|
+
"""Configure Deus endpoints and models."""
|
|
46
|
+
if stop:
|
|
47
|
+
asyncio.run(run_stop_runpod())
|
|
48
|
+
elif runpod:
|
|
49
|
+
asyncio.run(run_setup_runpod())
|
|
50
|
+
else:
|
|
51
|
+
ui.error("Use --runpod to configure or --stop to stop pod")
|
|
52
|
+
raise typer.Exit(1)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
if __name__ == "__main__":
|
|
56
|
+
app()
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
from typing import TypedDict
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ModelEntry(TypedDict):
|
|
5
|
+
id: str
|
|
6
|
+
label: str
|
|
7
|
+
category: str
|
|
8
|
+
vram_gb: int
|
|
9
|
+
description: str
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
MODELS: list[ModelEntry] = [
|
|
13
|
+
{
|
|
14
|
+
"id": "Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
15
|
+
"label": "Qwen2.5-Coder-32B",
|
|
16
|
+
"category": "Coding",
|
|
17
|
+
"vram_gb": 40,
|
|
18
|
+
"description": "Best coding model, recommended",
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"id": "Qwen/Qwen2.5-Coder-7B-Instruct",
|
|
22
|
+
"label": "Qwen2.5-Coder-7B",
|
|
23
|
+
"category": "Coding",
|
|
24
|
+
"vram_gb": 16,
|
|
25
|
+
"description": "Fast and cheap coding model",
|
|
26
|
+
},
|
|
27
|
+
{
|
|
28
|
+
"id": "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct",
|
|
29
|
+
"label": "DeepSeek-Coder-V2-Lite",
|
|
30
|
+
"category": "Coding",
|
|
31
|
+
"vram_gb": 24,
|
|
32
|
+
"description": "Strong coding alternative",
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
"id": "meta-llama/Llama-3.1-70B-Instruct",
|
|
36
|
+
"label": "Llama-3.1-70B",
|
|
37
|
+
"category": "General",
|
|
38
|
+
"vram_gb": 80,
|
|
39
|
+
"description": "Powerful general purpose",
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
"id": "meta-llama/Llama-3.1-8B-Instruct",
|
|
43
|
+
"label": "Llama-3.1-8B",
|
|
44
|
+
"category": "General",
|
|
45
|
+
"vram_gb": 16,
|
|
46
|
+
"description": "Fast general purpose",
|
|
47
|
+
},
|
|
48
|
+
{
|
|
49
|
+
"id": "mistralai/Mistral-7B-Instruct-v0.3",
|
|
50
|
+
"label": "Mistral-7B",
|
|
51
|
+
"category": "General",
|
|
52
|
+
"vram_gb": 16,
|
|
53
|
+
"description": "Lightweight, fast",
|
|
54
|
+
},
|
|
55
|
+
]
|
|
56
|
+
|
|
57
|
+
CUSTOM_MODEL_OPTION = "Custom (type manually)"
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import ast
|
|
2
|
+
import re
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
MAX_CHARS = 4000
|
|
6
|
+
SKIP_DIRS = {".git", "node_modules", "vendor", "__pycache__", ".venv", "dist", "build"}
|
|
7
|
+
SKIP_FILES = {".env"}
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def generate_repo_map(path: str) -> str:
|
|
11
|
+
root = Path(path).resolve()
|
|
12
|
+
lines: list[str] = []
|
|
13
|
+
_walk(root, root, lines)
|
|
14
|
+
output = "\n".join(lines)
|
|
15
|
+
_SUFFIX = "\n... [truncated]"
|
|
16
|
+
if len(output) > MAX_CHARS:
|
|
17
|
+
output = output[: MAX_CHARS - len(_SUFFIX)] + _SUFFIX
|
|
18
|
+
return output
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _walk(root: Path, current: Path, lines: list[str], depth: int = 0) -> None:
|
|
22
|
+
indent = " " * depth
|
|
23
|
+
for item in sorted(current.iterdir()):
|
|
24
|
+
if item.name in SKIP_DIRS or item.name in SKIP_FILES:
|
|
25
|
+
continue
|
|
26
|
+
if item.name.startswith("."):
|
|
27
|
+
continue
|
|
28
|
+
if item.is_dir():
|
|
29
|
+
lines.append(f"{indent}{item.name}/")
|
|
30
|
+
_walk(root, item, lines, depth + 1)
|
|
31
|
+
elif item.is_file():
|
|
32
|
+
_append_file_entry(item, indent, lines)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _append_file_entry(path: Path, indent: str, lines: list[str]) -> None:
|
|
36
|
+
if path.suffix == ".py":
|
|
37
|
+
sigs = _extract_python_signatures(path)
|
|
38
|
+
lines.append(f"{indent}{path.name}")
|
|
39
|
+
lines.extend(f"{indent} {s}" for s in sigs)
|
|
40
|
+
elif path.suffix == ".php":
|
|
41
|
+
sigs = _extract_php_signatures(path)
|
|
42
|
+
lines.append(f"{indent}{path.name}")
|
|
43
|
+
lines.extend(f"{indent} {s}" for s in sigs)
|
|
44
|
+
else:
|
|
45
|
+
lines.append(f"{indent}{path.name}")
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _extract_python_signatures(path: Path) -> list[str]:
|
|
49
|
+
try:
|
|
50
|
+
tree = ast.parse(path.read_text(encoding="utf-8", errors="ignore"))
|
|
51
|
+
except SyntaxError:
|
|
52
|
+
return []
|
|
53
|
+
sigs: list[str] = []
|
|
54
|
+
for node in ast.walk(tree):
|
|
55
|
+
if isinstance(node, ast.ClassDef):
|
|
56
|
+
sigs.append(f"class {node.name}")
|
|
57
|
+
elif isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
|
|
58
|
+
args = [a.arg for a in node.args.args]
|
|
59
|
+
sigs.append(f"def {node.name}({', '.join(args)})")
|
|
60
|
+
return sigs
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _extract_php_signatures(path: Path) -> list[str]:
|
|
64
|
+
text = path.read_text(encoding="utf-8", errors="ignore")
|
|
65
|
+
classes = re.findall(r"class\s+(\w+)", text)
|
|
66
|
+
functions = re.findall(r"function\s+(\w+)\s*\(([^)]*)\)", text)
|
|
67
|
+
sigs: list[str] = [f"class {c}" for c in classes]
|
|
68
|
+
sigs += [f"function {n}({a.strip()})" for n, a in functions]
|
|
69
|
+
return sigs
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
|
|
5
|
+
_API_URL = "https://api.runpod.io/graphql"
|
|
6
|
+
_POLL_INTERVAL = 10
|
|
7
|
+
_TIMEOUT_SECONDS = 300
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _headers(api_key: str) -> dict:
|
|
11
|
+
return {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
async def get_gpu_types(api_key: str) -> list[dict]:
|
|
15
|
+
query = """
|
|
16
|
+
query {
|
|
17
|
+
gpuTypes {
|
|
18
|
+
id
|
|
19
|
+
displayName
|
|
20
|
+
memoryInGb
|
|
21
|
+
securePrice
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
"""
|
|
25
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
26
|
+
r = await client.post(_API_URL, headers=_headers(api_key), json={"query": query})
|
|
27
|
+
r.raise_for_status()
|
|
28
|
+
return r.json()["data"]["gpuTypes"]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
async def start_pod(api_key: str, gpu_type_id: str, model_id: str) -> dict:
|
|
32
|
+
mutation = """
|
|
33
|
+
mutation($input: PodFindAndDeployOnDemandInput!) {
|
|
34
|
+
podFindAndDeployOnDemand(input: $input) {
|
|
35
|
+
id
|
|
36
|
+
desiredStatus
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
"""
|
|
40
|
+
variables = {
|
|
41
|
+
"input": {
|
|
42
|
+
"gpuTypeId": gpu_type_id,
|
|
43
|
+
"imageName": "vllm/vllm-openai:latest",
|
|
44
|
+
"containerDiskInGb": 50,
|
|
45
|
+
"volumeInGb": 50,
|
|
46
|
+
"ports": "8000/http",
|
|
47
|
+
"env": [
|
|
48
|
+
{"key": "MODEL_ID", "value": model_id},
|
|
49
|
+
{"key": "HUGGING_FACE_HUB_TOKEN", "value": ""},
|
|
50
|
+
],
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
54
|
+
r = await client.post(_API_URL, headers=_headers(api_key), json={"query": mutation, "variables": variables})
|
|
55
|
+
r.raise_for_status()
|
|
56
|
+
return r.json()["data"]["podFindAndDeployOnDemand"]
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
async def stop_pod(api_key: str, pod_id: str) -> bool:
|
|
60
|
+
mutation = """
|
|
61
|
+
mutation($input: PodTerminateInput!) {
|
|
62
|
+
podTerminate(input: $input)
|
|
63
|
+
}
|
|
64
|
+
"""
|
|
65
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
66
|
+
r = await client.post(_API_URL, headers=_headers(api_key), json={"query": mutation, "variables": {"input": {"podId": pod_id}}})
|
|
67
|
+
r.raise_for_status()
|
|
68
|
+
return True
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
async def wait_for_ready(api_key: str, pod_id: str) -> str:
|
|
72
|
+
query = """
|
|
73
|
+
query($podId: String!) {
|
|
74
|
+
pod(input: { podId: $podId }) {
|
|
75
|
+
desiredStatus
|
|
76
|
+
runtime { ports { ip port isIpPublic } }
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
"""
|
|
80
|
+
elapsed = 0
|
|
81
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
82
|
+
while elapsed < _TIMEOUT_SECONDS:
|
|
83
|
+
r = await client.post(_API_URL, headers=_headers(api_key), json={"query": query, "variables": {"podId": pod_id}})
|
|
84
|
+
r.raise_for_status()
|
|
85
|
+
pod = r.json()["data"]["pod"]
|
|
86
|
+
if pod["desiredStatus"] == "RUNNING":
|
|
87
|
+
return _extract_endpoint(pod)
|
|
88
|
+
await asyncio.sleep(_POLL_INTERVAL)
|
|
89
|
+
elapsed += _POLL_INTERVAL
|
|
90
|
+
raise TimeoutError(f"Pod {pod_id} did not become ready within {_TIMEOUT_SECONDS}s")
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _extract_endpoint(pod: dict) -> str:
|
|
94
|
+
for port_info in (pod.get("runtime") or {}).get("ports") or []:
|
|
95
|
+
if port_info.get("isIpPublic") and port_info.get("port") == 8000:
|
|
96
|
+
return f"https://{port_info['ip']}:{port_info['port']}"
|
|
97
|
+
return ""
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
import yaml
|
|
2
|
+
from rich.prompt import Prompt, Confirm
|
|
3
|
+
from rich.table import Table
|
|
4
|
+
|
|
5
|
+
from deuscode import ui
|
|
6
|
+
from deuscode.config import CONFIG_PATH
|
|
7
|
+
from deuscode.models import MODELS, CUSTOM_MODEL_OPTION
|
|
8
|
+
from deuscode import runpod
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
async def run_setup_runpod() -> None:
|
|
12
|
+
api_key = Prompt.ask("[bold]RunPod API key[/bold]", password=True)
|
|
13
|
+
model_entry = _pick_model()
|
|
14
|
+
model_id = model_entry["id"] if model_entry else Prompt.ask("Enter model ID")
|
|
15
|
+
vram_needed = model_entry["vram_gb"] if model_entry else 0
|
|
16
|
+
|
|
17
|
+
ui.console.print(f"\n[dim]Fetching GPUs with ≥{vram_needed} GB VRAM...[/dim]")
|
|
18
|
+
gpus = await runpod.get_gpu_types(api_key)
|
|
19
|
+
filtered = [g for g in gpus if (g.get("memoryInGb") or 0) >= vram_needed]
|
|
20
|
+
gpu = _pick_gpu(filtered)
|
|
21
|
+
|
|
22
|
+
price = gpu.get("securePrice") or "?"
|
|
23
|
+
if not Confirm.ask(f"This will cost ~${price}/hr. Continue?", default=False):
|
|
24
|
+
ui.console.print("[dim]Aborted.[/dim]")
|
|
25
|
+
return
|
|
26
|
+
|
|
27
|
+
auto_stop = Confirm.ask("Auto-stop RunPod pod after each prompt completes?", default=False)
|
|
28
|
+
|
|
29
|
+
pod_id = await _start_with_spinner(api_key, gpu["id"], model_id)
|
|
30
|
+
endpoint = await _wait_with_spinner(api_key, pod_id)
|
|
31
|
+
_save_config(endpoint, api_key, model_id, pod_id, auto_stop)
|
|
32
|
+
ui.final_answer(
|
|
33
|
+
f"✓ Deus is ready. Run: deus 'your prompt'\n\n"
|
|
34
|
+
f"⚠ Stop your pod manually anytime: deus setup --stop\n"
|
|
35
|
+
f"Current pod: {pod_id} (~${price}/hr)"
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
async def run_stop_runpod() -> None:
|
|
40
|
+
if not CONFIG_PATH.exists():
|
|
41
|
+
ui.error("No active RunPod pod found in ~/.deus/config.yaml")
|
|
42
|
+
return
|
|
43
|
+
config_data = yaml.safe_load(CONFIG_PATH.read_text()) or {}
|
|
44
|
+
pod_id = config_data.get("runpod_pod_id")
|
|
45
|
+
if not pod_id:
|
|
46
|
+
ui.error("No active RunPod pod found in ~/.deus/config.yaml")
|
|
47
|
+
return
|
|
48
|
+
api_key = config_data.get("runpod_api_key", "")
|
|
49
|
+
ui.console.print(f"[dim]Stopping pod {pod_id}...[/dim]")
|
|
50
|
+
try:
|
|
51
|
+
success = await runpod.stop_pod(api_key, pod_id)
|
|
52
|
+
except Exception as e:
|
|
53
|
+
success = False
|
|
54
|
+
ui.error(f"Failed to stop pod {pod_id}: {e}\nStop manually at runpod.io/console")
|
|
55
|
+
return
|
|
56
|
+
if success:
|
|
57
|
+
config_data.pop("runpod_pod_id", None)
|
|
58
|
+
CONFIG_PATH.write_text(yaml.dump(config_data, default_flow_style=False))
|
|
59
|
+
ui.final_answer("✓ Pod stopped. No more charges.")
|
|
60
|
+
else:
|
|
61
|
+
ui.error(f"Failed to stop pod {pod_id}.\nStop manually at runpod.io/console")
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _pick_model() -> dict | None:
|
|
65
|
+
table = Table(title="Available Models")
|
|
66
|
+
for col in ("#", "Model", "Category", "VRAM", "Description"):
|
|
67
|
+
table.add_column(col)
|
|
68
|
+
ordered = sorted(MODELS, key=lambda m: (0 if m["category"] == "Coding" else 1, m["label"]))
|
|
69
|
+
for i, m in enumerate(ordered, 1):
|
|
70
|
+
table.add_row(str(i), m["label"], m["category"], f"{m['vram_gb']} GB", m["description"])
|
|
71
|
+
table.add_row(str(len(ordered) + 1), CUSTOM_MODEL_OPTION, "", "", "")
|
|
72
|
+
ui.console.print(table)
|
|
73
|
+
idx = int(Prompt.ask("Pick a model", default="1")) - 1
|
|
74
|
+
return ordered[idx] if idx < len(ordered) else None
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _pick_gpu(gpus: list[dict]) -> dict:
|
|
78
|
+
table = Table(title="Available GPUs")
|
|
79
|
+
for col in ("#", "GPU Name", "VRAM", "Price/hr"):
|
|
80
|
+
table.add_column(col)
|
|
81
|
+
for i, g in enumerate(gpus, 1):
|
|
82
|
+
table.add_row(str(i), g.get("displayName", ""), f"{g.get('memoryInGb', '?')} GB", f"${g.get('securePrice', '?')}")
|
|
83
|
+
ui.console.print(table)
|
|
84
|
+
idx = int(Prompt.ask("Pick a GPU", default="1")) - 1
|
|
85
|
+
return gpus[idx]
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
async def _start_with_spinner(api_key: str, gpu_id: str, model_id: str) -> str:
|
|
89
|
+
ui.console.print("[dim]Starting pod...[/dim]")
|
|
90
|
+
pod = await runpod.start_pod(api_key, gpu_id, model_id)
|
|
91
|
+
return pod["id"]
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
async def _wait_with_spinner(api_key: str, pod_id: str) -> str:
|
|
95
|
+
ui.console.print("[dim]Waiting for vLLM to be ready (this takes 2-3 min)...[/dim]")
|
|
96
|
+
return await runpod.wait_for_ready(api_key, pod_id)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def _save_config(endpoint: str, api_key: str, model_id: str, pod_id: str, auto_stop: bool) -> None:
|
|
100
|
+
CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
|
|
101
|
+
existing = yaml.safe_load(CONFIG_PATH.read_text()) if CONFIG_PATH.exists() else {}
|
|
102
|
+
existing.update({
|
|
103
|
+
"base_url": f"{endpoint}/v1",
|
|
104
|
+
"api_key": api_key,
|
|
105
|
+
"model": model_id,
|
|
106
|
+
"runpod_pod_id": pod_id,
|
|
107
|
+
"runpod_api_key": api_key,
|
|
108
|
+
"auto_stop_runpod": auto_stop,
|
|
109
|
+
})
|
|
110
|
+
CONFIG_PATH.write_text(yaml.dump(existing, default_flow_style=False))
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import subprocess
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
from deuscode import ui
|
|
6
|
+
|
|
7
|
+
_CWD = Path.cwd().resolve()
|
|
8
|
+
|
|
9
|
+
TOOL_SCHEMAS = [
|
|
10
|
+
{
|
|
11
|
+
"type": "function",
|
|
12
|
+
"function": {
|
|
13
|
+
"name": "read_file",
|
|
14
|
+
"description": "Read the contents of a file.",
|
|
15
|
+
"parameters": {
|
|
16
|
+
"type": "object",
|
|
17
|
+
"properties": {"path": {"type": "string", "description": "File path to read"}},
|
|
18
|
+
"required": ["path"],
|
|
19
|
+
},
|
|
20
|
+
},
|
|
21
|
+
},
|
|
22
|
+
{
|
|
23
|
+
"type": "function",
|
|
24
|
+
"function": {
|
|
25
|
+
"name": "write_file",
|
|
26
|
+
"description": "Write content to a file after user confirmation.",
|
|
27
|
+
"parameters": {
|
|
28
|
+
"type": "object",
|
|
29
|
+
"properties": {
|
|
30
|
+
"path": {"type": "string", "description": "File path to write"},
|
|
31
|
+
"content": {"type": "string", "description": "Content to write"},
|
|
32
|
+
},
|
|
33
|
+
"required": ["path", "content"],
|
|
34
|
+
},
|
|
35
|
+
},
|
|
36
|
+
},
|
|
37
|
+
{
|
|
38
|
+
"type": "function",
|
|
39
|
+
"function": {
|
|
40
|
+
"name": "bash",
|
|
41
|
+
"description": "Run a shell command after user confirmation.",
|
|
42
|
+
"parameters": {
|
|
43
|
+
"type": "object",
|
|
44
|
+
"properties": {"command": {"type": "string", "description": "Shell command to run"}},
|
|
45
|
+
"required": ["command"],
|
|
46
|
+
},
|
|
47
|
+
},
|
|
48
|
+
},
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def _safe_path(path: str) -> Path | None:
|
|
53
|
+
resolved = Path(path).resolve()
|
|
54
|
+
if not str(resolved).startswith(str(_CWD)):
|
|
55
|
+
return None
|
|
56
|
+
return resolved
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
async def read_file(path: str) -> str:
|
|
60
|
+
target = _safe_path(path)
|
|
61
|
+
if target is None:
|
|
62
|
+
return f"Error: path '{path}' is outside the working directory."
|
|
63
|
+
if not target.exists():
|
|
64
|
+
return f"Error: '{path}' does not exist."
|
|
65
|
+
return target.read_text(encoding="utf-8", errors="replace")
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
async def write_file(path: str, content: str) -> str:
|
|
69
|
+
target = _safe_path(path)
|
|
70
|
+
if target is None:
|
|
71
|
+
return f"Error: path '{path}' is outside the working directory."
|
|
72
|
+
if target.exists():
|
|
73
|
+
existing = target.read_text(encoding="utf-8", errors="replace")
|
|
74
|
+
_show_diff(existing, content, path)
|
|
75
|
+
if not ui.confirm(f"Write to [bold]{path}[/bold]?"):
|
|
76
|
+
return "Cancelled by user."
|
|
77
|
+
target.parent.mkdir(parents=True, exist_ok=True)
|
|
78
|
+
target.write_text(content, encoding="utf-8")
|
|
79
|
+
return f"Written: {path}"
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
async def bash(command: str) -> str:
|
|
83
|
+
ui.console.print(f"[bold yellow]Command:[/bold yellow] {command}")
|
|
84
|
+
if not ui.confirm("Run this command?"):
|
|
85
|
+
return "Cancelled by user."
|
|
86
|
+
result = subprocess.run(command, shell=True, capture_output=True, text=True)
|
|
87
|
+
output = result.stdout + result.stderr
|
|
88
|
+
return output.strip() or "(no output)"
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def _show_diff(old: str, new: str, path: str) -> None:
|
|
92
|
+
old_lines = old.splitlines()
|
|
93
|
+
new_lines = new.splitlines()
|
|
94
|
+
ui.console.print(f"[dim]Diff for {path}:[/dim]")
|
|
95
|
+
for line in old_lines:
|
|
96
|
+
if line not in new_lines:
|
|
97
|
+
ui.console.print(f"[red]- {line}[/red]")
|
|
98
|
+
for line in new_lines:
|
|
99
|
+
if line not in old_lines:
|
|
100
|
+
ui.console.print(f"[green]+ {line}[/green]")
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
TOOL_FUNCTIONS = {
|
|
104
|
+
"read_file": read_file,
|
|
105
|
+
"write_file": write_file,
|
|
106
|
+
"bash": bash,
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
async def dispatch(name: str, args_json: str) -> str:
|
|
111
|
+
fn = TOOL_FUNCTIONS.get(name)
|
|
112
|
+
if fn is None:
|
|
113
|
+
return f"Error: unknown tool '{name}'"
|
|
114
|
+
args = json.loads(args_json)
|
|
115
|
+
return await fn(**args)
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
from rich.console import Console
|
|
2
|
+
from rich.panel import Panel
|
|
3
|
+
from rich.prompt import Confirm
|
|
4
|
+
|
|
5
|
+
console = Console()
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def thinking(model: str) -> None:
|
|
9
|
+
console.print(f"[dim]Deus is thinking... ({model})[/dim]")
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def tool_call(name: str, args: dict) -> None:
|
|
13
|
+
args_str = ", ".join(f"{k}={v!r}" for k, v in args.items())
|
|
14
|
+
console.print(f"[yellow]⚡ Calling: {name}({args_str})[/yellow]")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def tool_result(text: str) -> None:
|
|
18
|
+
console.print(f"[dim grey]{text}[/dim grey]")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def final_answer(text: str) -> None:
|
|
22
|
+
console.print(Panel(text, title="[bold cyan]Deus[/bold cyan]", border_style="cyan"))
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def error(text: str) -> None:
|
|
26
|
+
console.print(Panel(text, title="[bold red]Error[/bold red]", border_style="red"))
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def confirm(prompt: str) -> bool:
|
|
30
|
+
return Confirm.ask(prompt)
|
|
File without changes
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from deuscode.models import MODELS
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def test_all_models_have_required_fields():
|
|
5
|
+
required = {"id", "label", "category", "vram_gb", "description"}
|
|
6
|
+
for m in MODELS:
|
|
7
|
+
assert required <= m.keys(), f"Model missing fields: {m}"
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def test_vram_values_are_positive():
|
|
11
|
+
for m in MODELS:
|
|
12
|
+
assert m["vram_gb"] > 0, f"Non-positive vram_gb for {m['id']}"
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def test_no_duplicate_model_ids():
|
|
16
|
+
ids = [m["id"] for m in MODELS]
|
|
17
|
+
assert len(ids) == len(set(ids)), "Duplicate model IDs found"
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import textwrap
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
|
|
6
|
+
from deuscode.repomap import generate_repo_map
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _make_py(tmp_path: Path, name: str, src: str) -> Path:
|
|
10
|
+
f = tmp_path / name
|
|
11
|
+
f.write_text(textwrap.dedent(src))
|
|
12
|
+
return f
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def test_generates_tree(tmp_path):
|
|
16
|
+
_make_py(tmp_path, "alpha.py", "x = 1")
|
|
17
|
+
_make_py(tmp_path, "beta.py", "y = 2")
|
|
18
|
+
result = generate_repo_map(str(tmp_path))
|
|
19
|
+
assert "alpha.py" in result
|
|
20
|
+
assert "beta.py" in result
|
|
21
|
+
assert len(result) > 0
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def test_extracts_python_signatures(tmp_path):
|
|
25
|
+
_make_py(tmp_path, "module.py", """\
|
|
26
|
+
class Foo:
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
def bar(x, y):
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
async def baz(z):
|
|
33
|
+
pass
|
|
34
|
+
""")
|
|
35
|
+
result = generate_repo_map(str(tmp_path))
|
|
36
|
+
assert "class Foo" in result
|
|
37
|
+
assert "def bar" in result
|
|
38
|
+
assert "def baz" in result
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def test_skips_ignored_dirs(tmp_path):
|
|
42
|
+
(tmp_path / ".git").mkdir()
|
|
43
|
+
(tmp_path / ".git" / "config").write_text("gitconfig")
|
|
44
|
+
(tmp_path / "node_modules").mkdir()
|
|
45
|
+
(tmp_path / "node_modules" / "pkg.js").write_text("module.exports={}")
|
|
46
|
+
_make_py(tmp_path, "app.py", "pass")
|
|
47
|
+
result = generate_repo_map(str(tmp_path))
|
|
48
|
+
assert ".git" not in result
|
|
49
|
+
assert "node_modules" not in result
|
|
50
|
+
assert "app.py" in result
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def test_truncates_large_repos(tmp_path):
|
|
54
|
+
for i in range(60):
|
|
55
|
+
src = "\n".join(f"def func_{j}(x): pass" for j in range(20))
|
|
56
|
+
(tmp_path / f"module_{i}.py").write_text(src)
|
|
57
|
+
result = generate_repo_map(str(tmp_path))
|
|
58
|
+
assert len(result) <= 4000
|
|
59
|
+
assert "truncated" in result
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from unittest.mock import AsyncMock, patch, MagicMock
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
|
|
6
|
+
from deuscode import runpod
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _mock_response(data: dict, status: int = 200) -> MagicMock:
|
|
10
|
+
resp = MagicMock()
|
|
11
|
+
resp.status_code = status
|
|
12
|
+
resp.json.return_value = data
|
|
13
|
+
resp.raise_for_status = MagicMock()
|
|
14
|
+
return resp
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@pytest.mark.asyncio
|
|
18
|
+
async def test_stop_pod_sends_correct_id():
|
|
19
|
+
captured = {}
|
|
20
|
+
|
|
21
|
+
async def fake_post(url, **kwargs):
|
|
22
|
+
captured["body"] = kwargs.get("json", {})
|
|
23
|
+
return _mock_response({"data": {"podTerminate": None}})
|
|
24
|
+
|
|
25
|
+
with patch("deuscode.runpod.httpx.AsyncClient") as MockClient:
|
|
26
|
+
instance = AsyncMock()
|
|
27
|
+
instance.post.side_effect = fake_post
|
|
28
|
+
instance.__aenter__ = AsyncMock(return_value=instance)
|
|
29
|
+
instance.__aexit__ = AsyncMock(return_value=False)
|
|
30
|
+
MockClient.return_value = instance
|
|
31
|
+
|
|
32
|
+
await runpod.stop_pod("key123", "pod-abc")
|
|
33
|
+
|
|
34
|
+
variables = captured["body"].get("variables", {})
|
|
35
|
+
assert variables.get("input", {}).get("podId") == "pod-abc"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@pytest.mark.asyncio
|
|
39
|
+
async def test_wait_for_ready_timeout():
|
|
40
|
+
async def fake_post(url, **kwargs):
|
|
41
|
+
return _mock_response({
|
|
42
|
+
"data": {"pod": {"desiredStatus": "STARTING", "runtime": None}}
|
|
43
|
+
})
|
|
44
|
+
|
|
45
|
+
with patch("deuscode.runpod.httpx.AsyncClient") as MockClient, \
|
|
46
|
+
patch("deuscode.runpod._TIMEOUT_SECONDS", 1), \
|
|
47
|
+
patch("deuscode.runpod._POLL_INTERVAL", 1), \
|
|
48
|
+
patch("asyncio.sleep", new_callable=AsyncMock):
|
|
49
|
+
instance = AsyncMock()
|
|
50
|
+
instance.post.side_effect = fake_post
|
|
51
|
+
instance.__aenter__ = AsyncMock(return_value=instance)
|
|
52
|
+
instance.__aexit__ = AsyncMock(return_value=False)
|
|
53
|
+
MockClient.return_value = instance
|
|
54
|
+
|
|
55
|
+
with pytest.raises(TimeoutError):
|
|
56
|
+
await runpod.wait_for_ready("key123", "pod-xyz")
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
from unittest.mock import AsyncMock, patch, MagicMock
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
import yaml
|
|
6
|
+
|
|
7
|
+
import deuscode.setup as deus_setup
|
|
8
|
+
from deuscode.setup import run_stop_runpod
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _write_config(tmp_path: Path, data: dict) -> Path:
|
|
12
|
+
cfg = tmp_path / "config.yaml"
|
|
13
|
+
cfg.write_text(yaml.dump(data))
|
|
14
|
+
return cfg
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@pytest.mark.asyncio
|
|
18
|
+
async def test_run_stop_no_pod_id(tmp_path):
|
|
19
|
+
cfg = _write_config(tmp_path, {"runpod_api_key": "key"})
|
|
20
|
+
errors = []
|
|
21
|
+
with patch.object(deus_setup, "CONFIG_PATH", cfg), \
|
|
22
|
+
patch("deuscode.setup.ui.error", side_effect=lambda m: errors.append(m)), \
|
|
23
|
+
patch("deuscode.runpod.stop_pod", new_callable=AsyncMock) as mock_stop:
|
|
24
|
+
await run_stop_runpod()
|
|
25
|
+
assert any("No active RunPod pod" in e for e in errors)
|
|
26
|
+
mock_stop.assert_not_called()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@pytest.mark.asyncio
|
|
30
|
+
async def test_run_stop_success(tmp_path):
|
|
31
|
+
cfg = _write_config(tmp_path, {"runpod_pod_id": "pod-123", "runpod_api_key": "key"})
|
|
32
|
+
with patch.object(deus_setup, "CONFIG_PATH", cfg), \
|
|
33
|
+
patch("deuscode.setup.runpod.stop_pod", new_callable=AsyncMock, return_value=True), \
|
|
34
|
+
patch("deuscode.setup.ui.final_answer"), \
|
|
35
|
+
patch("deuscode.setup.ui.console"):
|
|
36
|
+
await run_stop_runpod()
|
|
37
|
+
saved = yaml.safe_load(cfg.read_text()) or {}
|
|
38
|
+
assert "runpod_pod_id" not in saved
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@pytest.mark.asyncio
|
|
42
|
+
async def test_run_stop_failure(tmp_path):
|
|
43
|
+
cfg = _write_config(tmp_path, {"runpod_pod_id": "pod-456", "runpod_api_key": "key"})
|
|
44
|
+
errors = []
|
|
45
|
+
with patch.object(deus_setup, "CONFIG_PATH", cfg), \
|
|
46
|
+
patch("deuscode.setup.runpod.stop_pod", new_callable=AsyncMock, return_value=False), \
|
|
47
|
+
patch("deuscode.setup.ui.error", side_effect=lambda m: errors.append(m)), \
|
|
48
|
+
patch("deuscode.setup.ui.console"):
|
|
49
|
+
await run_stop_runpod()
|
|
50
|
+
saved = yaml.safe_load(cfg.read_text()) or {}
|
|
51
|
+
assert saved.get("runpod_pod_id") == "pod-456"
|
|
52
|
+
assert any("pod-456" in e for e in errors)
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from unittest.mock import patch
|
|
4
|
+
|
|
5
|
+
import pytest
|
|
6
|
+
|
|
7
|
+
import deuscode.tools as tool_module
|
|
8
|
+
from deuscode.tools import read_file, write_file
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@pytest.fixture(autouse=True)
|
|
12
|
+
def set_cwd(tmp_path, monkeypatch):
|
|
13
|
+
monkeypatch.chdir(tmp_path)
|
|
14
|
+
monkeypatch.setattr(tool_module, "_CWD", tmp_path.resolve())
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@pytest.mark.asyncio
|
|
18
|
+
async def test_read_file_success(tmp_path):
|
|
19
|
+
f = tmp_path / "hello.txt"
|
|
20
|
+
f.write_text("hello world")
|
|
21
|
+
result = await read_file(str(f))
|
|
22
|
+
assert result == "hello world"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@pytest.mark.asyncio
|
|
26
|
+
async def test_read_file_blocks_path_traversal(tmp_path):
|
|
27
|
+
result = await read_file("../../etc/passwd")
|
|
28
|
+
assert "Error" in result
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@pytest.mark.asyncio
|
|
32
|
+
async def test_write_file_creates_file(tmp_path):
|
|
33
|
+
target = tmp_path / "output.txt"
|
|
34
|
+
with patch("deuscode.ui.confirm", return_value=True):
|
|
35
|
+
result = await write_file(str(target), "new content")
|
|
36
|
+
assert "Written" in result
|
|
37
|
+
assert target.read_text() == "new content"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@pytest.mark.asyncio
|
|
41
|
+
async def test_write_file_rejects_outside_cwd(tmp_path):
|
|
42
|
+
outside = tmp_path.parent / "evil.txt"
|
|
43
|
+
result = await write_file(str(outside), "bad")
|
|
44
|
+
assert "Error" in result
|
|
45
|
+
assert not outside.exists()
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
version = "0.1.0"
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
import typer
|
|
2
|
-
from rich.console import Console
|
|
3
|
-
|
|
4
|
-
app = typer.Typer()
|
|
5
|
-
console = Console()
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
@app.command()
|
|
9
|
-
def main():
|
|
10
|
-
"""Deus - AI-powered multi-agent CLI coding assistant."""
|
|
11
|
-
console.print("[bold cyan]Deus v0.1.0[/bold cyan] [dim]- Coming soon[/dim]")
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
if __name__ == "__main__":
|
|
15
|
-
app()
|
|
File without changes
|
|
File without changes
|