mimiry-cli 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mimiry/__init__.py +27 -0
- mimiry/agent.py +150 -0
- mimiry/cli.py +823 -0
- mimiry/cli_config.py +114 -0
- mimiry/cli_formatters.py +136 -0
- mimiry/client.py +472 -0
- mimiry/exceptions.py +73 -0
- mimiry/models.py +98 -0
- mimiry_cli-0.1.1.dist-info/METADATA +447 -0
- mimiry_cli-0.1.1.dist-info/RECORD +14 -0
- mimiry_cli-0.1.1.dist-info/WHEEL +5 -0
- mimiry_cli-0.1.1.dist-info/entry_points.txt +2 -0
- mimiry_cli-0.1.1.dist-info/licenses/LICENSE +21 -0
- mimiry_cli-0.1.1.dist-info/top_level.txt +1 -0
mimiry/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Mimiry Python SDK - Programmatic access to GPU cloud resources."""
|
|
2
|
+
|
|
3
|
+
__version__ = "0.1.1"
|
|
4
|
+
|
|
5
|
+
from .client import MimiryClient
|
|
6
|
+
from .agent import AgentClient
|
|
7
|
+
from .exceptions import (
|
|
8
|
+
MimiryError,
|
|
9
|
+
AuthenticationError,
|
|
10
|
+
InsufficientCreditsError,
|
|
11
|
+
InsufficientScopeError,
|
|
12
|
+
NotFoundError,
|
|
13
|
+
RateLimitError,
|
|
14
|
+
ServerError,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"MimiryClient",
|
|
19
|
+
"AgentClient",
|
|
20
|
+
"MimiryError",
|
|
21
|
+
"AuthenticationError",
|
|
22
|
+
"InsufficientCreditsError",
|
|
23
|
+
"InsufficientScopeError",
|
|
24
|
+
"NotFoundError",
|
|
25
|
+
"RateLimitError",
|
|
26
|
+
"ServerError",
|
|
27
|
+
]
|
mimiry/agent.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
"""Agent client for the Mimiry CLI AI mode.
|
|
2
|
+
|
|
3
|
+
Sends natural language to the cli-agent edge function, which returns
|
|
4
|
+
a structured tool call that the CLI executes locally.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from typing import Any, Dict, List, Optional
|
|
9
|
+
|
|
10
|
+
import httpx
|
|
11
|
+
|
|
12
|
+
from .exceptions import MimiryError, raise_for_status
|
|
13
|
+
|
|
14
|
+
DEFAULT_AGENT_URL = "https://ypoycmbljujlkmjuhfif.supabase.co/functions/v1/cli-agent"
|
|
15
|
+
|
|
16
|
+
# Tools that mutate state — require user confirmation
|
|
17
|
+
MUTATING_TOOLS = frozenset({
|
|
18
|
+
"submit_job",
|
|
19
|
+
"cancel_job",
|
|
20
|
+
"add_ssh_key",
|
|
21
|
+
"delete_ssh_key",
|
|
22
|
+
"add_registry_credential",
|
|
23
|
+
"delete_registry_credential",
|
|
24
|
+
})
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class AgentAction:
|
|
29
|
+
"""Structured action returned by the AI agent."""
|
|
30
|
+
|
|
31
|
+
tool: str
|
|
32
|
+
params: Dict[str, Any]
|
|
33
|
+
explanation: str
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def is_mutating(self) -> bool:
|
|
37
|
+
"""Whether this action changes state and should require confirmation."""
|
|
38
|
+
return self.tool in MUTATING_TOOLS
|
|
39
|
+
|
|
40
|
+
@property
|
|
41
|
+
def is_direct_answer(self) -> bool:
|
|
42
|
+
"""Whether this is a text answer with no API call needed."""
|
|
43
|
+
return self.tool == "answer_question"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class AgentClient:
|
|
47
|
+
"""Client for the Mimiry CLI agent endpoint.
|
|
48
|
+
|
|
49
|
+
Maintains in-memory conversation history for interactive sessions.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
api_key: Mimiry API key (``mky_...``).
|
|
53
|
+
agent_url: Override the agent endpoint URL.
|
|
54
|
+
llm_base_url: Custom OpenAI-compatible LLM endpoint (optional).
|
|
55
|
+
llm_api_key: API key for custom LLM endpoint (optional).
|
|
56
|
+
timeout: Request timeout in seconds (default 60).
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
def __init__(
|
|
60
|
+
self,
|
|
61
|
+
api_key: str,
|
|
62
|
+
agent_url: str = DEFAULT_AGENT_URL,
|
|
63
|
+
llm_base_url: Optional[str] = None,
|
|
64
|
+
llm_api_key: Optional[str] = None,
|
|
65
|
+
timeout: float = 60.0,
|
|
66
|
+
):
|
|
67
|
+
if not api_key:
|
|
68
|
+
raise MimiryError("api_key is required")
|
|
69
|
+
|
|
70
|
+
self._api_key = api_key
|
|
71
|
+
self._agent_url = agent_url.rstrip("/")
|
|
72
|
+
self._llm_base_url = llm_base_url
|
|
73
|
+
self._llm_api_key = llm_api_key
|
|
74
|
+
self._timeout = timeout
|
|
75
|
+
self._history: List[Dict[str, str]] = []
|
|
76
|
+
self._client = httpx.Client(timeout=self._timeout)
|
|
77
|
+
|
|
78
|
+
def ask(self, message: str, use_history: bool = False) -> AgentAction:
|
|
79
|
+
"""Send a message to the agent and get a structured action back.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
message: Natural language request.
|
|
83
|
+
use_history: Whether to include conversation history (for chat mode).
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
An ``AgentAction`` with tool name, parameters, and explanation.
|
|
87
|
+
|
|
88
|
+
Raises:
|
|
89
|
+
MimiryError: On API or agent errors.
|
|
90
|
+
"""
|
|
91
|
+
payload: Dict[str, Any] = {"message": message}
|
|
92
|
+
|
|
93
|
+
if use_history and self._history:
|
|
94
|
+
payload["conversation_history"] = self._history
|
|
95
|
+
|
|
96
|
+
# Pass custom LLM config if set
|
|
97
|
+
if self._llm_base_url:
|
|
98
|
+
payload["llm_base_url"] = self._llm_base_url
|
|
99
|
+
if self._llm_api_key:
|
|
100
|
+
payload["llm_api_key"] = self._llm_api_key
|
|
101
|
+
|
|
102
|
+
response = self._client.post(
|
|
103
|
+
self._agent_url,
|
|
104
|
+
json=payload,
|
|
105
|
+
headers={
|
|
106
|
+
"Authorization": f"Bearer {self._api_key}",
|
|
107
|
+
"Content-Type": "application/json",
|
|
108
|
+
},
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
body = response.json() if response.content else {}
|
|
112
|
+
|
|
113
|
+
if response.status_code != 200:
|
|
114
|
+
error_msg = body.get("error", f"Agent error (HTTP {response.status_code})")
|
|
115
|
+
raise MimiryError(error_msg, status_code=response.status_code)
|
|
116
|
+
|
|
117
|
+
action = AgentAction(
|
|
118
|
+
tool=body.get("tool", "answer_question"),
|
|
119
|
+
params=body.get("params", {}),
|
|
120
|
+
explanation=body.get("explanation", ""),
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Update history for chat mode
|
|
124
|
+
if use_history:
|
|
125
|
+
self._history.append({"role": "user", "content": message})
|
|
126
|
+
# Store the assistant's response as context
|
|
127
|
+
if action.is_direct_answer:
|
|
128
|
+
self._history.append({"role": "assistant", "content": action.explanation})
|
|
129
|
+
else:
|
|
130
|
+
summary = f"Called {action.tool}({action.params})"
|
|
131
|
+
self._history.append({"role": "assistant", "content": summary})
|
|
132
|
+
|
|
133
|
+
return action
|
|
134
|
+
|
|
135
|
+
def clear_history(self) -> None:
|
|
136
|
+
"""Clear the in-memory conversation history."""
|
|
137
|
+
self._history.clear()
|
|
138
|
+
|
|
139
|
+
def close(self) -> None:
|
|
140
|
+
"""Close the underlying HTTP client."""
|
|
141
|
+
self._client.close()
|
|
142
|
+
|
|
143
|
+
def __enter__(self):
|
|
144
|
+
return self
|
|
145
|
+
|
|
146
|
+
def __exit__(self, *args):
|
|
147
|
+
self.close()
|
|
148
|
+
|
|
149
|
+
def __repr__(self):
|
|
150
|
+
return f"AgentClient(agent_url={self._agent_url!r})"
|