deuscode 0.2.0__tar.gz → 0.3.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {deuscode-0.2.0 → deuscode-0.3.2}/PKG-INFO +1 -1
- {deuscode-0.2.0 → deuscode-0.3.2}/pyproject.toml +1 -1
- deuscode-0.3.2/src/deuscode/__init__.py +1 -0
- {deuscode-0.2.0 → deuscode-0.3.2}/src/deuscode/agent.py +38 -3
- {deuscode-0.2.0 → deuscode-0.3.2}/src/deuscode/config.py +3 -0
- deuscode-0.3.2/src/deuscode/main.py +56 -0
- deuscode-0.3.2/src/deuscode/models.py +57 -0
- deuscode-0.3.2/src/deuscode/runpod.py +97 -0
- deuscode-0.3.2/src/deuscode/setup.py +110 -0
- deuscode-0.3.2/tests/test_models.py +17 -0
- deuscode-0.3.2/tests/test_runpod.py +56 -0
- deuscode-0.3.2/tests/test_setup.py +52 -0
- deuscode-0.2.0/src/deuscode/__init__.py +0 -1
- deuscode-0.2.0/src/deuscode/main.py +0 -35
- {deuscode-0.2.0 → deuscode-0.3.2}/.gitignore +0 -0
- {deuscode-0.2.0 → deuscode-0.3.2}/LICENSE +0 -0
- {deuscode-0.2.0 → deuscode-0.3.2}/README.md +0 -0
- {deuscode-0.2.0 → deuscode-0.3.2}/src/deuscode/repomap.py +0 -0
- {deuscode-0.2.0 → deuscode-0.3.2}/src/deuscode/tools.py +0 -0
- {deuscode-0.2.0 → deuscode-0.3.2}/src/deuscode/ui.py +0 -0
- {deuscode-0.2.0 → deuscode-0.3.2}/tests/__init__.py +0 -0
- {deuscode-0.2.0 → deuscode-0.3.2}/tests/test_repomap.py +0 -0
- {deuscode-0.2.0 → deuscode-0.3.2}/tests/test_tools.py +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
version = "0.3.2"
|
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
import json
|
|
2
2
|
|
|
3
3
|
import httpx
|
|
4
|
+
import yaml
|
|
4
5
|
|
|
5
|
-
from deuscode.config import Config
|
|
6
|
+
from deuscode.config import Config, CONFIG_PATH
|
|
6
7
|
from deuscode.repomap import generate_repo_map
|
|
7
|
-
from deuscode import tools, ui
|
|
8
|
+
from deuscode import tools, ui, runpod
|
|
8
9
|
|
|
9
10
|
_SYSTEM_BASE = (
|
|
10
11
|
"You are Deus, an AI coding assistant. "
|
|
@@ -13,6 +14,22 @@ _SYSTEM_BASE = (
|
|
|
13
14
|
)
|
|
14
15
|
|
|
15
16
|
|
|
17
|
+
async def run_agent(
|
|
18
|
+
prompt: str,
|
|
19
|
+
path: str = ".",
|
|
20
|
+
model_override: str | None = None,
|
|
21
|
+
no_map: bool = False,
|
|
22
|
+
) -> None:
|
|
23
|
+
from deuscode.config import load_config
|
|
24
|
+
try:
|
|
25
|
+
config = load_config()
|
|
26
|
+
except FileNotFoundError as e:
|
|
27
|
+
ui.error(str(e))
|
|
28
|
+
return
|
|
29
|
+
result = await run(prompt, config, path=path, model_override=model_override, no_map=no_map)
|
|
30
|
+
ui.final_answer(result)
|
|
31
|
+
|
|
32
|
+
|
|
16
33
|
async def run(
|
|
17
34
|
prompt: str,
|
|
18
35
|
config: Config,
|
|
@@ -28,7 +45,9 @@ async def run(
|
|
|
28
45
|
]
|
|
29
46
|
ui.thinking(model)
|
|
30
47
|
async with httpx.AsyncClient(timeout=120.0) as client:
|
|
31
|
-
|
|
48
|
+
result = await _loop(client, messages, model, config)
|
|
49
|
+
await _maybe_auto_stop(config)
|
|
50
|
+
return result
|
|
32
51
|
|
|
33
52
|
|
|
34
53
|
def _build_system_prompt(path: str, no_map: bool) -> str:
|
|
@@ -79,3 +98,19 @@ async def _execute_tool(tc: dict) -> str:
|
|
|
79
98
|
result = await tools.dispatch(fn["name"], fn.get("arguments", "{}"))
|
|
80
99
|
ui.tool_result(result[:500])
|
|
81
100
|
return result
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
async def _maybe_auto_stop(config: Config) -> None:
|
|
104
|
+
if not config.auto_stop_runpod:
|
|
105
|
+
return
|
|
106
|
+
raw = yaml.safe_load(CONFIG_PATH.read_text()) if CONFIG_PATH.exists() else {}
|
|
107
|
+
pod_id = raw.get("runpod_pod_id")
|
|
108
|
+
api_key = raw.get("runpod_api_key", "")
|
|
109
|
+
if not pod_id:
|
|
110
|
+
return
|
|
111
|
+
ui.console.print(f"[bold yellow]⚡ Auto-stopping RunPod pod {pod_id}...[/bold yellow]")
|
|
112
|
+
try:
|
|
113
|
+
await runpod.stop_pod(api_key, pod_id)
|
|
114
|
+
ui.console.print("[green]✓ Pod stopped. No more charges.[/green]")
|
|
115
|
+
except Exception as e:
|
|
116
|
+
ui.console.print(f"[red]Warning: could not stop pod {pod_id}: {e}[/red]")
|
|
@@ -10,6 +10,7 @@ _DEFAULTS = {
|
|
|
10
10
|
"api_key": "your-key",
|
|
11
11
|
"model": "your-model-name",
|
|
12
12
|
"max_tokens": 8192,
|
|
13
|
+
"auto_stop_runpod": False,
|
|
13
14
|
}
|
|
14
15
|
|
|
15
16
|
|
|
@@ -19,6 +20,7 @@ class Config:
|
|
|
19
20
|
api_key: str
|
|
20
21
|
model: str
|
|
21
22
|
max_tokens: int
|
|
23
|
+
auto_stop_runpod: bool = False
|
|
22
24
|
|
|
23
25
|
|
|
24
26
|
def load_config() -> Config:
|
|
@@ -34,6 +36,7 @@ def load_config() -> Config:
|
|
|
34
36
|
api_key=merged["api_key"],
|
|
35
37
|
model=merged["model"],
|
|
36
38
|
max_tokens=int(merged["max_tokens"]),
|
|
39
|
+
auto_stop_runpod=bool(merged.get("auto_stop_runpod", False)),
|
|
37
40
|
)
|
|
38
41
|
|
|
39
42
|
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import asyncio
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
import typer
|
|
6
|
+
|
|
7
|
+
from deuscode import ui
|
|
8
|
+
from deuscode.agent import run_agent
|
|
9
|
+
from deuscode.setup import run_setup_runpod, run_stop_runpod
|
|
10
|
+
|
|
11
|
+
app = typer.Typer(
|
|
12
|
+
name="deus",
|
|
13
|
+
help="Deus - AI-powered CLI coding assistant",
|
|
14
|
+
add_completion=False,
|
|
15
|
+
no_args_is_help=True,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
setup_app = typer.Typer(help="Configure Deus endpoints and models.")
|
|
19
|
+
app.add_typer(setup_app, name="setup")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@setup_app.callback(invoke_without_command=True)
|
|
23
|
+
def setup_callback(
|
|
24
|
+
ctx: typer.Context,
|
|
25
|
+
runpod: bool = typer.Option(False, "--runpod", help="Configure RunPod GPU endpoint"),
|
|
26
|
+
stop: bool = typer.Option(False, "--stop", help="Stop the current RunPod pod"),
|
|
27
|
+
) -> None:
|
|
28
|
+
if ctx.invoked_subcommand is not None:
|
|
29
|
+
return
|
|
30
|
+
if stop:
|
|
31
|
+
asyncio.run(run_stop_runpod())
|
|
32
|
+
elif runpod:
|
|
33
|
+
asyncio.run(run_setup_runpod())
|
|
34
|
+
else:
|
|
35
|
+
ui.error("Use --runpod to configure or --stop to stop pod")
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@app.command(name="ask", hidden=True)
|
|
39
|
+
def ask(
|
|
40
|
+
prompt: str = typer.Argument(..., help="What to ask Deus"),
|
|
41
|
+
path: str = typer.Option(".", "--path", help="Repo path to map"),
|
|
42
|
+
model: Optional[str] = typer.Option(None, "--model", help="Override config model"),
|
|
43
|
+
no_map: bool = typer.Option(False, "--no-map", help="Skip repo-map"),
|
|
44
|
+
) -> None:
|
|
45
|
+
asyncio.run(run_agent(prompt, path, model, no_map))
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def main() -> None:
|
|
49
|
+
known_subcommands = ["setup", "ask", "--help", "-h"]
|
|
50
|
+
if len(sys.argv) > 1 and sys.argv[1] not in known_subcommands:
|
|
51
|
+
sys.argv.insert(1, "ask")
|
|
52
|
+
app()
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
if __name__ == "__main__":
|
|
56
|
+
main()
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
from typing import TypedDict
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ModelEntry(TypedDict):
|
|
5
|
+
id: str
|
|
6
|
+
label: str
|
|
7
|
+
category: str
|
|
8
|
+
vram_gb: int
|
|
9
|
+
description: str
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
MODELS: list[ModelEntry] = [
|
|
13
|
+
{
|
|
14
|
+
"id": "Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
15
|
+
"label": "Qwen2.5-Coder-32B",
|
|
16
|
+
"category": "Coding",
|
|
17
|
+
"vram_gb": 40,
|
|
18
|
+
"description": "Best coding model, recommended",
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"id": "Qwen/Qwen2.5-Coder-7B-Instruct",
|
|
22
|
+
"label": "Qwen2.5-Coder-7B",
|
|
23
|
+
"category": "Coding",
|
|
24
|
+
"vram_gb": 16,
|
|
25
|
+
"description": "Fast and cheap coding model",
|
|
26
|
+
},
|
|
27
|
+
{
|
|
28
|
+
"id": "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct",
|
|
29
|
+
"label": "DeepSeek-Coder-V2-Lite",
|
|
30
|
+
"category": "Coding",
|
|
31
|
+
"vram_gb": 24,
|
|
32
|
+
"description": "Strong coding alternative",
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
"id": "meta-llama/Llama-3.1-70B-Instruct",
|
|
36
|
+
"label": "Llama-3.1-70B",
|
|
37
|
+
"category": "General",
|
|
38
|
+
"vram_gb": 80,
|
|
39
|
+
"description": "Powerful general purpose",
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
"id": "meta-llama/Llama-3.1-8B-Instruct",
|
|
43
|
+
"label": "Llama-3.1-8B",
|
|
44
|
+
"category": "General",
|
|
45
|
+
"vram_gb": 16,
|
|
46
|
+
"description": "Fast general purpose",
|
|
47
|
+
},
|
|
48
|
+
{
|
|
49
|
+
"id": "mistralai/Mistral-7B-Instruct-v0.3",
|
|
50
|
+
"label": "Mistral-7B",
|
|
51
|
+
"category": "General",
|
|
52
|
+
"vram_gb": 16,
|
|
53
|
+
"description": "Lightweight, fast",
|
|
54
|
+
},
|
|
55
|
+
]
|
|
56
|
+
|
|
57
|
+
CUSTOM_MODEL_OPTION = "Custom (type manually)"
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
|
|
5
|
+
_API_URL = "https://api.runpod.io/graphql"
|
|
6
|
+
_POLL_INTERVAL = 10
|
|
7
|
+
_TIMEOUT_SECONDS = 300
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _headers(api_key: str) -> dict:
|
|
11
|
+
return {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
async def get_gpu_types(api_key: str) -> list[dict]:
|
|
15
|
+
query = """
|
|
16
|
+
query {
|
|
17
|
+
gpuTypes {
|
|
18
|
+
id
|
|
19
|
+
displayName
|
|
20
|
+
memoryInGb
|
|
21
|
+
securePrice
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
"""
|
|
25
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
26
|
+
r = await client.post(_API_URL, headers=_headers(api_key), json={"query": query})
|
|
27
|
+
r.raise_for_status()
|
|
28
|
+
return r.json()["data"]["gpuTypes"]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
async def start_pod(api_key: str, gpu_type_id: str, model_id: str) -> dict:
|
|
32
|
+
mutation = """
|
|
33
|
+
mutation($input: PodFindAndDeployOnDemandInput!) {
|
|
34
|
+
podFindAndDeployOnDemand(input: $input) {
|
|
35
|
+
id
|
|
36
|
+
desiredStatus
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
"""
|
|
40
|
+
variables = {
|
|
41
|
+
"input": {
|
|
42
|
+
"gpuTypeId": gpu_type_id,
|
|
43
|
+
"imageName": "vllm/vllm-openai:latest",
|
|
44
|
+
"containerDiskInGb": 50,
|
|
45
|
+
"volumeInGb": 50,
|
|
46
|
+
"ports": "8000/http",
|
|
47
|
+
"env": [
|
|
48
|
+
{"key": "MODEL_ID", "value": model_id},
|
|
49
|
+
{"key": "HUGGING_FACE_HUB_TOKEN", "value": ""},
|
|
50
|
+
],
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
54
|
+
r = await client.post(_API_URL, headers=_headers(api_key), json={"query": mutation, "variables": variables})
|
|
55
|
+
r.raise_for_status()
|
|
56
|
+
return r.json()["data"]["podFindAndDeployOnDemand"]
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
async def stop_pod(api_key: str, pod_id: str) -> bool:
|
|
60
|
+
mutation = """
|
|
61
|
+
mutation($input: PodTerminateInput!) {
|
|
62
|
+
podTerminate(input: $input)
|
|
63
|
+
}
|
|
64
|
+
"""
|
|
65
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
66
|
+
r = await client.post(_API_URL, headers=_headers(api_key), json={"query": mutation, "variables": {"input": {"podId": pod_id}}})
|
|
67
|
+
r.raise_for_status()
|
|
68
|
+
return True
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
async def wait_for_ready(api_key: str, pod_id: str) -> str:
|
|
72
|
+
query = """
|
|
73
|
+
query($podId: String!) {
|
|
74
|
+
pod(input: { podId: $podId }) {
|
|
75
|
+
desiredStatus
|
|
76
|
+
runtime { ports { ip port isIpPublic } }
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
"""
|
|
80
|
+
elapsed = 0
|
|
81
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
82
|
+
while elapsed < _TIMEOUT_SECONDS:
|
|
83
|
+
r = await client.post(_API_URL, headers=_headers(api_key), json={"query": query, "variables": {"podId": pod_id}})
|
|
84
|
+
r.raise_for_status()
|
|
85
|
+
pod = r.json()["data"]["pod"]
|
|
86
|
+
if pod["desiredStatus"] == "RUNNING":
|
|
87
|
+
return _extract_endpoint(pod)
|
|
88
|
+
await asyncio.sleep(_POLL_INTERVAL)
|
|
89
|
+
elapsed += _POLL_INTERVAL
|
|
90
|
+
raise TimeoutError(f"Pod {pod_id} did not become ready within {_TIMEOUT_SECONDS}s")
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _extract_endpoint(pod: dict) -> str:
|
|
94
|
+
for port_info in (pod.get("runtime") or {}).get("ports") or []:
|
|
95
|
+
if port_info.get("isIpPublic") and port_info.get("port") == 8000:
|
|
96
|
+
return f"https://{port_info['ip']}:{port_info['port']}"
|
|
97
|
+
return ""
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
import yaml
|
|
2
|
+
from rich.prompt import Prompt, Confirm
|
|
3
|
+
from rich.table import Table
|
|
4
|
+
|
|
5
|
+
from deuscode import ui
|
|
6
|
+
from deuscode.config import CONFIG_PATH
|
|
7
|
+
from deuscode.models import MODELS, CUSTOM_MODEL_OPTION
|
|
8
|
+
from deuscode import runpod
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
async def run_setup_runpod() -> None:
|
|
12
|
+
api_key = Prompt.ask("[bold]RunPod API key[/bold]", password=True)
|
|
13
|
+
model_entry = _pick_model()
|
|
14
|
+
model_id = model_entry["id"] if model_entry else Prompt.ask("Enter model ID")
|
|
15
|
+
vram_needed = model_entry["vram_gb"] if model_entry else 0
|
|
16
|
+
|
|
17
|
+
ui.console.print(f"\n[dim]Fetching GPUs with ≥{vram_needed} GB VRAM...[/dim]")
|
|
18
|
+
gpus = await runpod.get_gpu_types(api_key)
|
|
19
|
+
filtered = [g for g in gpus if (g.get("memoryInGb") or 0) >= vram_needed]
|
|
20
|
+
gpu = _pick_gpu(filtered)
|
|
21
|
+
|
|
22
|
+
price = gpu.get("securePrice") or "?"
|
|
23
|
+
if not Confirm.ask(f"This will cost ~${price}/hr. Continue?", default=False):
|
|
24
|
+
ui.console.print("[dim]Aborted.[/dim]")
|
|
25
|
+
return
|
|
26
|
+
|
|
27
|
+
auto_stop = Confirm.ask("Auto-stop RunPod pod after each prompt completes?", default=False)
|
|
28
|
+
|
|
29
|
+
pod_id = await _start_with_spinner(api_key, gpu["id"], model_id)
|
|
30
|
+
endpoint = await _wait_with_spinner(api_key, pod_id)
|
|
31
|
+
_save_config(endpoint, api_key, model_id, pod_id, auto_stop)
|
|
32
|
+
ui.final_answer(
|
|
33
|
+
f"✓ Deus is ready. Run: deus 'your prompt'\n\n"
|
|
34
|
+
f"⚠ Stop your pod manually anytime: deus setup --stop\n"
|
|
35
|
+
f"Current pod: {pod_id} (~${price}/hr)"
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
async def run_stop_runpod() -> None:
|
|
40
|
+
if not CONFIG_PATH.exists():
|
|
41
|
+
ui.error("No active RunPod pod found in ~/.deus/config.yaml")
|
|
42
|
+
return
|
|
43
|
+
config_data = yaml.safe_load(CONFIG_PATH.read_text()) or {}
|
|
44
|
+
pod_id = config_data.get("runpod_pod_id")
|
|
45
|
+
if not pod_id:
|
|
46
|
+
ui.error("No active RunPod pod found in ~/.deus/config.yaml")
|
|
47
|
+
return
|
|
48
|
+
api_key = config_data.get("runpod_api_key", "")
|
|
49
|
+
ui.console.print(f"[dim]Stopping pod {pod_id}...[/dim]")
|
|
50
|
+
try:
|
|
51
|
+
success = await runpod.stop_pod(api_key, pod_id)
|
|
52
|
+
except Exception as e:
|
|
53
|
+
success = False
|
|
54
|
+
ui.error(f"Failed to stop pod {pod_id}: {e}\nStop manually at runpod.io/console")
|
|
55
|
+
return
|
|
56
|
+
if success:
|
|
57
|
+
config_data.pop("runpod_pod_id", None)
|
|
58
|
+
CONFIG_PATH.write_text(yaml.dump(config_data, default_flow_style=False))
|
|
59
|
+
ui.final_answer("✓ Pod stopped. No more charges.")
|
|
60
|
+
else:
|
|
61
|
+
ui.error(f"Failed to stop pod {pod_id}.\nStop manually at runpod.io/console")
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _pick_model() -> dict | None:
|
|
65
|
+
table = Table(title="Available Models")
|
|
66
|
+
for col in ("#", "Model", "Category", "VRAM", "Description"):
|
|
67
|
+
table.add_column(col)
|
|
68
|
+
ordered = sorted(MODELS, key=lambda m: (0 if m["category"] == "Coding" else 1, m["label"]))
|
|
69
|
+
for i, m in enumerate(ordered, 1):
|
|
70
|
+
table.add_row(str(i), m["label"], m["category"], f"{m['vram_gb']} GB", m["description"])
|
|
71
|
+
table.add_row(str(len(ordered) + 1), CUSTOM_MODEL_OPTION, "", "", "")
|
|
72
|
+
ui.console.print(table)
|
|
73
|
+
idx = int(Prompt.ask("Pick a model", default="1")) - 1
|
|
74
|
+
return ordered[idx] if idx < len(ordered) else None
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _pick_gpu(gpus: list[dict]) -> dict:
|
|
78
|
+
table = Table(title="Available GPUs")
|
|
79
|
+
for col in ("#", "GPU Name", "VRAM", "Price/hr"):
|
|
80
|
+
table.add_column(col)
|
|
81
|
+
for i, g in enumerate(gpus, 1):
|
|
82
|
+
table.add_row(str(i), g.get("displayName", ""), f"{g.get('memoryInGb', '?')} GB", f"${g.get('securePrice', '?')}")
|
|
83
|
+
ui.console.print(table)
|
|
84
|
+
idx = int(Prompt.ask("Pick a GPU", default="1")) - 1
|
|
85
|
+
return gpus[idx]
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
async def _start_with_spinner(api_key: str, gpu_id: str, model_id: str) -> str:
|
|
89
|
+
ui.console.print("[dim]Starting pod...[/dim]")
|
|
90
|
+
pod = await runpod.start_pod(api_key, gpu_id, model_id)
|
|
91
|
+
return pod["id"]
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
async def _wait_with_spinner(api_key: str, pod_id: str) -> str:
|
|
95
|
+
ui.console.print("[dim]Waiting for vLLM to be ready (this takes 2-3 min)...[/dim]")
|
|
96
|
+
return await runpod.wait_for_ready(api_key, pod_id)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def _save_config(endpoint: str, api_key: str, model_id: str, pod_id: str, auto_stop: bool) -> None:
|
|
100
|
+
CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
|
|
101
|
+
existing = yaml.safe_load(CONFIG_PATH.read_text()) if CONFIG_PATH.exists() else {}
|
|
102
|
+
existing.update({
|
|
103
|
+
"base_url": f"{endpoint}/v1",
|
|
104
|
+
"api_key": api_key,
|
|
105
|
+
"model": model_id,
|
|
106
|
+
"runpod_pod_id": pod_id,
|
|
107
|
+
"runpod_api_key": api_key,
|
|
108
|
+
"auto_stop_runpod": auto_stop,
|
|
109
|
+
})
|
|
110
|
+
CONFIG_PATH.write_text(yaml.dump(existing, default_flow_style=False))
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from deuscode.models import MODELS
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def test_all_models_have_required_fields():
|
|
5
|
+
required = {"id", "label", "category", "vram_gb", "description"}
|
|
6
|
+
for m in MODELS:
|
|
7
|
+
assert required <= m.keys(), f"Model missing fields: {m}"
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def test_vram_values_are_positive():
|
|
11
|
+
for m in MODELS:
|
|
12
|
+
assert m["vram_gb"] > 0, f"Non-positive vram_gb for {m['id']}"
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def test_no_duplicate_model_ids():
|
|
16
|
+
ids = [m["id"] for m in MODELS]
|
|
17
|
+
assert len(ids) == len(set(ids)), "Duplicate model IDs found"
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from unittest.mock import AsyncMock, patch, MagicMock
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
|
|
6
|
+
from deuscode import runpod
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _mock_response(data: dict, status: int = 200) -> MagicMock:
|
|
10
|
+
resp = MagicMock()
|
|
11
|
+
resp.status_code = status
|
|
12
|
+
resp.json.return_value = data
|
|
13
|
+
resp.raise_for_status = MagicMock()
|
|
14
|
+
return resp
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@pytest.mark.asyncio
|
|
18
|
+
async def test_stop_pod_sends_correct_id():
|
|
19
|
+
captured = {}
|
|
20
|
+
|
|
21
|
+
async def fake_post(url, **kwargs):
|
|
22
|
+
captured["body"] = kwargs.get("json", {})
|
|
23
|
+
return _mock_response({"data": {"podTerminate": None}})
|
|
24
|
+
|
|
25
|
+
with patch("deuscode.runpod.httpx.AsyncClient") as MockClient:
|
|
26
|
+
instance = AsyncMock()
|
|
27
|
+
instance.post.side_effect = fake_post
|
|
28
|
+
instance.__aenter__ = AsyncMock(return_value=instance)
|
|
29
|
+
instance.__aexit__ = AsyncMock(return_value=False)
|
|
30
|
+
MockClient.return_value = instance
|
|
31
|
+
|
|
32
|
+
await runpod.stop_pod("key123", "pod-abc")
|
|
33
|
+
|
|
34
|
+
variables = captured["body"].get("variables", {})
|
|
35
|
+
assert variables.get("input", {}).get("podId") == "pod-abc"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@pytest.mark.asyncio
|
|
39
|
+
async def test_wait_for_ready_timeout():
|
|
40
|
+
async def fake_post(url, **kwargs):
|
|
41
|
+
return _mock_response({
|
|
42
|
+
"data": {"pod": {"desiredStatus": "STARTING", "runtime": None}}
|
|
43
|
+
})
|
|
44
|
+
|
|
45
|
+
with patch("deuscode.runpod.httpx.AsyncClient") as MockClient, \
|
|
46
|
+
patch("deuscode.runpod._TIMEOUT_SECONDS", 1), \
|
|
47
|
+
patch("deuscode.runpod._POLL_INTERVAL", 1), \
|
|
48
|
+
patch("asyncio.sleep", new_callable=AsyncMock):
|
|
49
|
+
instance = AsyncMock()
|
|
50
|
+
instance.post.side_effect = fake_post
|
|
51
|
+
instance.__aenter__ = AsyncMock(return_value=instance)
|
|
52
|
+
instance.__aexit__ = AsyncMock(return_value=False)
|
|
53
|
+
MockClient.return_value = instance
|
|
54
|
+
|
|
55
|
+
with pytest.raises(TimeoutError):
|
|
56
|
+
await runpod.wait_for_ready("key123", "pod-xyz")
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
from unittest.mock import AsyncMock, patch, MagicMock
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
import yaml
|
|
6
|
+
|
|
7
|
+
import deuscode.setup as deus_setup
|
|
8
|
+
from deuscode.setup import run_stop_runpod
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _write_config(tmp_path: Path, data: dict) -> Path:
|
|
12
|
+
cfg = tmp_path / "config.yaml"
|
|
13
|
+
cfg.write_text(yaml.dump(data))
|
|
14
|
+
return cfg
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@pytest.mark.asyncio
|
|
18
|
+
async def test_run_stop_no_pod_id(tmp_path):
|
|
19
|
+
cfg = _write_config(tmp_path, {"runpod_api_key": "key"})
|
|
20
|
+
errors = []
|
|
21
|
+
with patch.object(deus_setup, "CONFIG_PATH", cfg), \
|
|
22
|
+
patch("deuscode.setup.ui.error", side_effect=lambda m: errors.append(m)), \
|
|
23
|
+
patch("deuscode.runpod.stop_pod", new_callable=AsyncMock) as mock_stop:
|
|
24
|
+
await run_stop_runpod()
|
|
25
|
+
assert any("No active RunPod pod" in e for e in errors)
|
|
26
|
+
mock_stop.assert_not_called()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@pytest.mark.asyncio
|
|
30
|
+
async def test_run_stop_success(tmp_path):
|
|
31
|
+
cfg = _write_config(tmp_path, {"runpod_pod_id": "pod-123", "runpod_api_key": "key"})
|
|
32
|
+
with patch.object(deus_setup, "CONFIG_PATH", cfg), \
|
|
33
|
+
patch("deuscode.setup.runpod.stop_pod", new_callable=AsyncMock, return_value=True), \
|
|
34
|
+
patch("deuscode.setup.ui.final_answer"), \
|
|
35
|
+
patch("deuscode.setup.ui.console"):
|
|
36
|
+
await run_stop_runpod()
|
|
37
|
+
saved = yaml.safe_load(cfg.read_text()) or {}
|
|
38
|
+
assert "runpod_pod_id" not in saved
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@pytest.mark.asyncio
|
|
42
|
+
async def test_run_stop_failure(tmp_path):
|
|
43
|
+
cfg = _write_config(tmp_path, {"runpod_pod_id": "pod-456", "runpod_api_key": "key"})
|
|
44
|
+
errors = []
|
|
45
|
+
with patch.object(deus_setup, "CONFIG_PATH", cfg), \
|
|
46
|
+
patch("deuscode.setup.runpod.stop_pod", new_callable=AsyncMock, return_value=False), \
|
|
47
|
+
patch("deuscode.setup.ui.error", side_effect=lambda m: errors.append(m)), \
|
|
48
|
+
patch("deuscode.setup.ui.console"):
|
|
49
|
+
await run_stop_runpod()
|
|
50
|
+
saved = yaml.safe_load(cfg.read_text()) or {}
|
|
51
|
+
assert saved.get("runpod_pod_id") == "pod-456"
|
|
52
|
+
assert any("pod-456" in e for e in errors)
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
version = "0.2.0"
|
|
@@ -1,35 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
from typing import Optional
|
|
3
|
-
|
|
4
|
-
import typer
|
|
5
|
-
|
|
6
|
-
from deuscode import ui
|
|
7
|
-
from deuscode.config import load_config
|
|
8
|
-
from deuscode import agent
|
|
9
|
-
|
|
10
|
-
app = typer.Typer(help="Deus - AI-powered CLI coding assistant")
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
@app.command()
|
|
14
|
-
def main(
|
|
15
|
-
prompt: str = typer.Argument(..., help="What to ask Deus"),
|
|
16
|
-
path: str = typer.Option(".", "--path", help="Repo path to map"),
|
|
17
|
-
model: Optional[str] = typer.Option(None, "--model", help="Override config model"),
|
|
18
|
-
no_map: bool = typer.Option(False, "--no-map", help="Skip repo-map generation"),
|
|
19
|
-
) -> None:
|
|
20
|
-
try:
|
|
21
|
-
config = load_config()
|
|
22
|
-
except FileNotFoundError as e:
|
|
23
|
-
ui.error(str(e))
|
|
24
|
-
raise typer.Exit(1)
|
|
25
|
-
|
|
26
|
-
try:
|
|
27
|
-
result = asyncio.run(agent.run(prompt, config, path=path, model_override=model, no_map=no_map))
|
|
28
|
-
ui.final_answer(result)
|
|
29
|
-
except Exception as e:
|
|
30
|
-
ui.error(str(e))
|
|
31
|
-
raise typer.Exit(1)
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
if __name__ == "__main__":
|
|
35
|
-
app()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|