termagent-cli 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- termagent_cli-0.1.0/PKG-INFO +15 -0
- termagent_cli-0.1.0/README.md +89 -0
- termagent_cli-0.1.0/pyproject.toml +24 -0
- termagent_cli-0.1.0/setup.cfg +4 -0
- termagent_cli-0.1.0/termagent/__init__.py +0 -0
- termagent_cli-0.1.0/termagent/agent/graph.py +64 -0
- termagent_cli-0.1.0/termagent/agent/nodes.py +182 -0
- termagent_cli-0.1.0/termagent/agent/state.py +13 -0
- termagent_cli-0.1.0/termagent/ui.py +345 -0
- termagent_cli-0.1.0/termagent_cli.egg-info/PKG-INFO +15 -0
- termagent_cli-0.1.0/termagent_cli.egg-info/SOURCES.txt +13 -0
- termagent_cli-0.1.0/termagent_cli.egg-info/dependency_links.txt +1 -0
- termagent_cli-0.1.0/termagent_cli.egg-info/entry_points.txt +2 -0
- termagent_cli-0.1.0/termagent_cli.egg-info/requires.txt +10 -0
- termagent_cli-0.1.0/termagent_cli.egg-info/top_level.txt +1 -0
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: termagent-cli
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Natural language terminal agent for Windows PowerShell
|
|
5
|
+
Requires-Python: >=3.10
|
|
6
|
+
Requires-Dist: langchain-ollama
|
|
7
|
+
Requires-Dist: langchain-groq
|
|
8
|
+
Requires-Dist: langchain-core
|
|
9
|
+
Requires-Dist: langchain
|
|
10
|
+
Requires-Dist: langgraph
|
|
11
|
+
Requires-Dist: pydantic
|
|
12
|
+
Requires-Dist: python-dotenv
|
|
13
|
+
Requires-Dist: ollama
|
|
14
|
+
Requires-Dist: textual
|
|
15
|
+
Requires-Dist: rich
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
# Terminal Agent
|
|
2
|
+
|
|
3
|
+
> A powerful, native Windows application that translates natural language commands into actionable PowerShell commands or provides casual chat responses.
|
|
4
|
+
|
|
5
|
+
## Quick Start
|
|
6
|
+
|
|
7
|
+
Get running in less than 2 minutes!
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
# 1. Install Dependencies
|
|
11
|
+
pip install -r requirements.txt
|
|
12
|
+
|
|
13
|
+
# 2. Run the TUI Dashboard
|
|
14
|
+
python ui.py
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
*Note: The application will prompt you automatically for a Groq API key upon first run.*
|
|
18
|
+
|
|
19
|
+
---
|
|
20
|
+
|
|
21
|
+
## Features
|
|
22
|
+
|
|
23
|
+
- **Natural Language to PowerShell:** Ask it to "create a new folder named test" and it seamlessly translates it to `New-Item -ItemType Directory -Name "test"`.
|
|
24
|
+
- **Chat vs Command Routing:** Differentiates between system operations and casual conversation.
|
|
25
|
+
- **Safety First:** A strict safety check layer powered by both an LLM review and a comprehensive blacklist prevents execution of high-risk or destructive commands.
|
|
26
|
+
- **Human-in-the-Loop (HITL):** Before executing any flagged risky command, the agent halts and requests explicit user confirmation.
|
|
27
|
+
- **Rich Terminal UI (TUI):** Includes a visually pleasing and interactive TUI developed using Textual, featuring response animations, real-time command processing, and native aesthetic matching.
|
|
28
|
+
- **Persistent Context:** The `cwd` (Current Working Directory) is updated in real-time.
|
|
29
|
+
- **Extensible Architecture:** LangGraph-based framework makes the prompt flow modular and easy to extend.
|
|
30
|
+
|
|
31
|
+
---
|
|
32
|
+
|
|
33
|
+
## Configuration
|
|
34
|
+
|
|
35
|
+
You can configure the application using environment variables.
|
|
36
|
+
|
|
37
|
+
| Variable | Description | Example |
|
|
38
|
+
|----------|-------------|---------|
|
|
39
|
+
| `GROQ_API_KEY` | Your Groq API key for LLaMA-3.3-70b-versatile access | `gsk_ABCXYZ` |
|
|
40
|
+
|
|
41
|
+
Create a `.env` file in the root directory to store your API key.
|
|
42
|
+
|
|
43
|
+
---
|
|
44
|
+
|
|
45
|
+
## Architecture Flow
|
|
46
|
+
|
|
47
|
+
The underlying logic is designed around a directed graph using LangGraph (`agent/graph.py`).
|
|
48
|
+
|
|
49
|
+

|
|
50
|
+
|
|
51
|
+
### Core Pipeline
|
|
52
|
+
|
|
53
|
+
1. **`generate_command`**: Evaluates the user's prompt (with LLM structural output) to determine intent ('chat' vs 'command') and builds the respective command or chat string.
|
|
54
|
+
2. **Intent Routing**:
|
|
55
|
+
- If `intent == "chat"`, it routes to `chat_node`.
|
|
56
|
+
- If `intent == "command"`, it routes to `check_command`.
|
|
57
|
+
3. **`check_command`**: A secondary safety layer where the system checks the generated PowerShell command against:
|
|
58
|
+
- A robust static `BLACKLIST` of restricted patterns.
|
|
59
|
+
- An LLM-powered security review to evaluate operational risk dynamically.
|
|
60
|
+
4. **`confirm_command`**: If flagged as risky (`is_risky == True`), the workflow stalls for human approval.
|
|
61
|
+
5. **`execute_command`**: The approved (or safe) command is invoked via `subprocess.run()`, returning standard output and tracking state changes such as directory traversal.
|
|
62
|
+
|
|
63
|
+
---
|
|
64
|
+
|
|
65
|
+
## Project Structure
|
|
66
|
+
|
|
67
|
+
```text
|
|
68
|
+
terminal-agent/
|
|
69
|
+
├── main.py # Lightweight, loop-based Command Line Interface.
|
|
70
|
+
├── ui.py # Rich Textual Application serving as the primary frontend dashboard.
|
|
71
|
+
├── viz.py # Lightweight script used for generating the workflow visualization.
|
|
72
|
+
└── agent/ # Core AI agent logic
|
|
73
|
+
├── graph.py # LangGraph state machine mapping edges, paths, and flow conditions.
|
|
74
|
+
├── nodes.py # Encapsulates logic for LLM operations, structured outputs, security evaluation.
|
|
75
|
+
└── state.py # Strongly-typed state definition handling attributes like cwd, text, intent.
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
---
|
|
79
|
+
|
|
80
|
+
## Development
|
|
81
|
+
|
|
82
|
+
- **Visualizing Workflows:** Execute `python viz.py` to regenerate the `graph.png` visualization.
|
|
83
|
+
- **Extending Security:** Refine the list of blocked items by updating the `BLACKLIST` array inside `agent/nodes.py`.
|
|
84
|
+
|
|
85
|
+
---
|
|
86
|
+
|
|
87
|
+
## Safety Disclaimer
|
|
88
|
+
|
|
89
|
+
> **⚠️ Warning:** Even with dual safety layers, running autonomous LLM-generated commands on local file systems carries inherent risks. Always verify prompt contexts and system responses, particularly around OS-integrated tasks.
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=64"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "termagent-cli"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Natural language terminal agent for Windows PowerShell"
|
|
9
|
+
requires-python = ">=3.10"
|
|
10
|
+
dependencies = [
|
|
11
|
+
"langchain-ollama",
|
|
12
|
+
"langchain-groq",
|
|
13
|
+
"langchain-core",
|
|
14
|
+
"langchain",
|
|
15
|
+
"langgraph",
|
|
16
|
+
"pydantic",
|
|
17
|
+
"python-dotenv",
|
|
18
|
+
"ollama",
|
|
19
|
+
"textual",
|
|
20
|
+
"rich",
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
[project.scripts]
|
|
24
|
+
termagent = "termagent.ui:main"
|
|
File without changes
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
from langgraph.graph import START, END, StateGraph
|
|
2
|
+
from .nodes import generate_command, check_command, confirm_command, execute_command, chat_node
|
|
3
|
+
from .state import AgentState
|
|
4
|
+
|
|
5
|
+
def if_risky(state: AgentState) -> str:
|
|
6
|
+
if state["is_risky"]:
|
|
7
|
+
return "confirm_command"
|
|
8
|
+
else:
|
|
9
|
+
return "execute_command"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def ask_user(state: AgentState) -> str:
|
|
13
|
+
if state["confirmation"] == "yes":
|
|
14
|
+
return "execute"
|
|
15
|
+
else:
|
|
16
|
+
return "do_not_execute"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def route_intent(state: AgentState) -> str:
|
|
20
|
+
if state["intent"] == "command":
|
|
21
|
+
return "check_command"
|
|
22
|
+
else:
|
|
23
|
+
return "chat_node"
|
|
24
|
+
|
|
25
|
+
graph = StateGraph(AgentState)
|
|
26
|
+
|
|
27
|
+
graph.add_node("generate_command", generate_command)
|
|
28
|
+
graph.add_node("chat_node", chat_node)
|
|
29
|
+
graph.add_node("check_command", check_command)
|
|
30
|
+
graph.add_node("confirm_command", confirm_command)
|
|
31
|
+
graph.add_node("execute_command", execute_command)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
graph.add_edge(START, "generate_command")
|
|
35
|
+
# graph.add_edge("generate_command", "check_command")
|
|
36
|
+
graph.add_conditional_edges(
|
|
37
|
+
"generate_command",
|
|
38
|
+
route_intent,
|
|
39
|
+
{
|
|
40
|
+
"check_command": "check_command",
|
|
41
|
+
"chat_node": "chat_node"
|
|
42
|
+
}
|
|
43
|
+
)
|
|
44
|
+
graph.add_edge("chat_node", END)
|
|
45
|
+
graph.add_conditional_edges(
|
|
46
|
+
"check_command",
|
|
47
|
+
if_risky,
|
|
48
|
+
{
|
|
49
|
+
"confirm_command": "confirm_command",
|
|
50
|
+
"execute_command": "execute_command"
|
|
51
|
+
}
|
|
52
|
+
)
|
|
53
|
+
graph.add_conditional_edges(
|
|
54
|
+
"confirm_command",
|
|
55
|
+
ask_user,
|
|
56
|
+
{
|
|
57
|
+
"execute": "execute_command",
|
|
58
|
+
"do_not_execute": END
|
|
59
|
+
}
|
|
60
|
+
)
|
|
61
|
+
graph.add_edge("execute_command", END)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
app = graph.compile()
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
# from langchain_ollama import ChatOllama
|
|
2
|
+
from langchain_groq import ChatGroq
|
|
3
|
+
from .state import AgentState
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
|
+
from typing import Literal
|
|
6
|
+
from langchain_core.messages.human import HumanMessage
|
|
7
|
+
from langchain_core.messages.system import SystemMessage
|
|
8
|
+
import subprocess
|
|
9
|
+
from dotenv import load_dotenv
|
|
10
|
+
load_dotenv()
|
|
11
|
+
|
|
12
|
+
_confirm_fn = None # Pluggable callback for UI to override confirm_command
|
|
13
|
+
|
|
14
|
+
# OLLAMA_MODEL = ""
|
|
15
|
+
|
|
16
|
+
BLACKLIST = [
|
|
17
|
+
# System Critical Paths
|
|
18
|
+
"system32", "system64", "c:\\windows", "c:/windows",
|
|
19
|
+
|
|
20
|
+
# Disk/Partition Operations
|
|
21
|
+
"diskpart", "format-volume", "clear-disk", "initialize-disk",
|
|
22
|
+
|
|
23
|
+
# Registry
|
|
24
|
+
"regedit", "reg delete", "reg add", "remove-itemproperty",
|
|
25
|
+
"set-itemproperty", "new-itemproperty",
|
|
26
|
+
|
|
27
|
+
# Security/Permissions
|
|
28
|
+
"icacls", "takeown", "secedit", "set-acl",
|
|
29
|
+
"disable-localuser", "remove-localuser",
|
|
30
|
+
|
|
31
|
+
# Remote Code Execution (MOST DANGEROUS)
|
|
32
|
+
"invoke-expression", "iex", "downloadstring",
|
|
33
|
+
"downloadfile", "start-bitstransfer",
|
|
34
|
+
"invoke-webrequest", "curl", "wget",
|
|
35
|
+
"net.webclient", "start-process http",
|
|
36
|
+
|
|
37
|
+
# System State
|
|
38
|
+
"shutdown", "restart-computer", "stop-computer",
|
|
39
|
+
|
|
40
|
+
# Scheduled Tasks (can hide malware)
|
|
41
|
+
"register-scheduledtask", "new-scheduledtask",
|
|
42
|
+
|
|
43
|
+
# Firewall/Network config
|
|
44
|
+
"netsh", "set-netfirewallrule", "disable-netfirewallrule",
|
|
45
|
+
|
|
46
|
+
# Disable Security
|
|
47
|
+
"set-mppreference", "disable-windowsoptionalfeature",
|
|
48
|
+
"uninstall-windowsfeature"
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
class safety_check(BaseModel):
|
|
52
|
+
is_risky: bool = Field(..., description="Whether the command is potentially risky")
|
|
53
|
+
|
|
54
|
+
class CommandOutput(BaseModel):
|
|
55
|
+
intent: Literal["command", "chat"] = Field(..., description="Whether the user request is to execute a command or just a casual chat")
|
|
56
|
+
cmd: str = Field("", description="The PowerShell command to execute, if intent is 'command'")
|
|
57
|
+
response: str = Field("", description="The response to return to the user, if intent is 'chat'")
|
|
58
|
+
|
|
59
|
+
def generate_command(state: AgentState) -> str:
|
|
60
|
+
|
|
61
|
+
messages = [
|
|
62
|
+
SystemMessage(content="""
|
|
63
|
+
You are a Windows PowerShell assistant that can either generate commands or answer questions.
|
|
64
|
+
|
|
65
|
+
First, classify the user's intent:
|
|
66
|
+
- "command": user wants to perform a system operation
|
|
67
|
+
- "chat": user is asking a question or having a conversation
|
|
68
|
+
|
|
69
|
+
RULES FOR COMMANDS:
|
|
70
|
+
- Use simple built-in PowerShell cmdlets only
|
|
71
|
+
- Never use Add-Type, .NET assemblies, cmd.exe style commands
|
|
72
|
+
- No explanations, no markdown, no backticks
|
|
73
|
+
- If the intent is "chat", return an empty string for cmd and provide the answer in response
|
|
74
|
+
- If the intent is "command", provide the PowerShell command in cmd and leave response empty
|
|
75
|
+
|
|
76
|
+
PREFERRED CMDLETS:
|
|
77
|
+
- Files/Folders: New-Item, Remove-Item, Copy-Item, Move-Item, Rename-Item, Get-ChildItem
|
|
78
|
+
- Read/Write: Set-Content, Get-Content, Add-Content
|
|
79
|
+
- Info: Get-Location, Get-Process, Get-Service, ipconfig, whoami
|
|
80
|
+
|
|
81
|
+
EXAMPLES:
|
|
82
|
+
User: create a folder named project
|
|
83
|
+
intent: command
|
|
84
|
+
cmd: New-Item -ItemType Directory -Name "project"
|
|
85
|
+
response: ""
|
|
86
|
+
|
|
87
|
+
User: delete file hello.txt
|
|
88
|
+
intent: command
|
|
89
|
+
cmd: Remove-Item -Path "hello.txt"
|
|
90
|
+
response: ""
|
|
91
|
+
|
|
92
|
+
User: write "hello world" to notes.txt
|
|
93
|
+
intent: command
|
|
94
|
+
cmd: Set-Content -Path "notes.txt" -Value "hello world"
|
|
95
|
+
response: ""
|
|
96
|
+
|
|
97
|
+
User: what are AI agents?
|
|
98
|
+
intent: chat
|
|
99
|
+
cmd: ""
|
|
100
|
+
response: AI agents are autonomous systems that perceive their environment and take actions to achieve goals.
|
|
101
|
+
|
|
102
|
+
User: how are you?
|
|
103
|
+
intent: chat
|
|
104
|
+
cmd: ""
|
|
105
|
+
response: I'm doing great! How can I help you today?
|
|
106
|
+
|
|
107
|
+
User: create a file called "readme.txt" and write "hello world" in it
|
|
108
|
+
intent: command
|
|
109
|
+
cmd: New-Item -ItemType File -Name "readme.txt" -Force; Set-Content -Path "readme.txt" -Value "hello world"
|
|
110
|
+
response: ""
|
|
111
|
+
|
|
112
|
+
"""),
|
|
113
|
+
HumanMessage(content=f"Current working directory: {state['cwd']}\nUser request: {state['text']}")
|
|
114
|
+
]
|
|
115
|
+
llm = ChatGroq(model="llama-3.3-70b-versatile")
|
|
116
|
+
# llm = ChatOllama(model=OLLAMA_MODEL)
|
|
117
|
+
model = llm.with_structured_output(CommandOutput)
|
|
118
|
+
|
|
119
|
+
response = model.invoke(messages)
|
|
120
|
+
|
|
121
|
+
# print(f"DEBUG response: {response}")
|
|
122
|
+
return {"cmd": response.cmd, "intent": response.intent, "response": response.response}
|
|
123
|
+
|
|
124
|
+
def chat_node(state: AgentState) -> AgentState:
|
|
125
|
+
return {"result": state["response"]}
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def check_command(state: AgentState) -> AgentState:
|
|
129
|
+
cmd = state['cmd']
|
|
130
|
+
|
|
131
|
+
messages = [
|
|
132
|
+
SystemMessage(content=f"""
|
|
133
|
+
You are a security analyst reviewing a PowerShell command for potential risks. Analyze this command and determine if it contains any potentially dangerous operations that could harm the system, compromise security, or cause data loss.
|
|
134
|
+
"""),
|
|
135
|
+
HumanMessage(content=f"Command to analyze: {cmd}")
|
|
136
|
+
]
|
|
137
|
+
|
|
138
|
+
llm = ChatGroq(model="llama-3.3-70b-versatile")
|
|
139
|
+
# llm = ChatOllama(model=OLLAMA_MODEL)
|
|
140
|
+
model = llm.with_structured_output(safety_check)
|
|
141
|
+
|
|
142
|
+
response = model.invoke(messages)
|
|
143
|
+
|
|
144
|
+
cmd_lower = cmd.lower()
|
|
145
|
+
if any(r in cmd_lower for r in BLACKLIST) or response.is_risky:
|
|
146
|
+
return {"is_risky": True}
|
|
147
|
+
else:
|
|
148
|
+
return {"is_risky": False}
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def confirm_command(state: AgentState) -> AgentState:
|
|
152
|
+
if _confirm_fn is not None:
|
|
153
|
+
return _confirm_fn(state)
|
|
154
|
+
if state['is_risky']:
|
|
155
|
+
user_input = input(f"Are you sure you want to execute the command: {state['cmd']}? (yes/no): ")
|
|
156
|
+
if user_input.lower() == "yes":
|
|
157
|
+
return {"confirmation": "yes"}
|
|
158
|
+
else:
|
|
159
|
+
return {"confirmation": "no"}
|
|
160
|
+
else:
|
|
161
|
+
return {"confirmation": "yes"}
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def execute_command(state: AgentState) -> AgentState:
|
|
165
|
+
|
|
166
|
+
if state.get('confirmation', 'yes') == "yes":
|
|
167
|
+
result = subprocess.run(
|
|
168
|
+
["powershell", "-Command", f"{state['cmd']}; Get-Location | Select-Object -ExpandProperty Path"],
|
|
169
|
+
capture_output=True,
|
|
170
|
+
text=True,
|
|
171
|
+
cwd=state['cwd']
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
if result.returncode == 0:
|
|
175
|
+
lines = result.stdout.strip().splitlines()
|
|
176
|
+
new_cwd = lines[-1].strip() if lines else state['cwd']
|
|
177
|
+
output = "\n".join(lines[:-1]) if len(lines) > 1 else "Command executed successfully."
|
|
178
|
+
return {"result": output, "cwd": new_cwd}
|
|
179
|
+
else:
|
|
180
|
+
return {"result": f"Error: {result.stderr}", "cwd": state['cwd']}
|
|
181
|
+
else:
|
|
182
|
+
return {"result": "Command cancelled by user.", "cwd": state['cwd']}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from typing import TypedDict, Literal, Optional
|
|
2
|
+
|
|
3
|
+
class AgentState(TypedDict):
|
|
4
|
+
|
|
5
|
+
text: str
|
|
6
|
+
cwd: str
|
|
7
|
+
cmd: str
|
|
8
|
+
is_risky: bool
|
|
9
|
+
confirmation: Optional[Literal["yes", "no"]]
|
|
10
|
+
intent: Optional[Literal["command", "chat"]] = "command"
|
|
11
|
+
response: Optional[str]
|
|
12
|
+
result: str
|
|
13
|
+
|
|
@@ -0,0 +1,345 @@
|
|
|
1
|
+
from textual.app import App, ComposeResult
|
|
2
|
+
from textual.widgets import Input, RichLog, Static, Footer
|
|
3
|
+
from textual.containers import Vertical, Horizontal
|
|
4
|
+
from textual.reactive import reactive
|
|
5
|
+
from textual import work
|
|
6
|
+
from textual.timer import Timer
|
|
7
|
+
from rich.text import Text
|
|
8
|
+
from rich.markup import escape
|
|
9
|
+
import os
|
|
10
|
+
|
|
11
|
+
ASCII_LOGO = """
|
|
12
|
+
████████╗███████╗██████╗ ███╗ ███╗ █████╗ ██████╗ ███████╗███╗ ██╗████████╗
|
|
13
|
+
██╔══╝██╔════╝██╔══██╗████╗ ████║██╔══██╗██╔════╝ ██╔════╝████╗ ██║╚══██╔══╝
|
|
14
|
+
██║ █████╗ ██████╔╝██╔████╔██║███████║██║ ███╗█████╗ ██╔██╗ ██║ ██║
|
|
15
|
+
██║ ██╔══╝ ██╔══██╗██║╚██╔╝██║██╔══██║██║ ██║██╔══╝ ██║╚██╗██║ ██║
|
|
16
|
+
██║ ███████╗██║ ██║██║ ╚═╝ ██║██║ ██║╚██████╔╝███████╗██║ ╚████║ ██║
|
|
17
|
+
╚═╝ ╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═══╝ ╚═╝
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
TAGLINE = "[ Natural Language → PowerShell • Groq • Windows Native ]"
|
|
21
|
+
|
|
22
|
+
# Braille spinner frames
|
|
23
|
+
SPINNER_FRAMES = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
|
|
24
|
+
|
|
25
|
+
CSS = """
|
|
26
|
+
Screen {
|
|
27
|
+
background: #0a0e1a;
|
|
28
|
+
layout: vertical;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
#header {
|
|
32
|
+
height: auto;
|
|
33
|
+
padding: 1 2;
|
|
34
|
+
background: #0a0e1a;
|
|
35
|
+
border-bottom: tall #1a2040;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
#logo {
|
|
39
|
+
color: #00d4ff;
|
|
40
|
+
text-style: bold;
|
|
41
|
+
content-align: center middle;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
#tagline {
|
|
45
|
+
color: #3a5080;
|
|
46
|
+
content-align: center middle;
|
|
47
|
+
margin-top: 0;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
#status-bar {
|
|
51
|
+
height: 1;
|
|
52
|
+
background: #0f1628;
|
|
53
|
+
padding: 0 2;
|
|
54
|
+
layout: horizontal;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
#cwd-label {
|
|
58
|
+
color: #00d4ff;
|
|
59
|
+
width: auto;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
#model-label {
|
|
63
|
+
color: #2a4060;
|
|
64
|
+
width: 1fr;
|
|
65
|
+
content-align: right middle;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
#output-log {
|
|
69
|
+
background: #0a0e1a;
|
|
70
|
+
border: none;
|
|
71
|
+
padding: 1 2;
|
|
72
|
+
scrollbar-color: #1a2040;
|
|
73
|
+
scrollbar-background: #0a0e1a;
|
|
74
|
+
height: 1fr;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
#status-line {
|
|
78
|
+
height: 1;
|
|
79
|
+
padding: 0 4;
|
|
80
|
+
background: #0a0e1a;
|
|
81
|
+
color: #00d4ff;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
#input-container {
|
|
85
|
+
height: auto;
|
|
86
|
+
padding: 0 2 1 2;
|
|
87
|
+
background: #0a0e1a;
|
|
88
|
+
border-top: tall #1a2040;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
#prompt-label {
|
|
92
|
+
color: #00d4ff;
|
|
93
|
+
width: auto;
|
|
94
|
+
padding: 1 0 0 0;
|
|
95
|
+
text-style: bold;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
#user-input {
|
|
99
|
+
background: #0f1628;
|
|
100
|
+
border: tall #1a3050;
|
|
101
|
+
color: #e0f0ff;
|
|
102
|
+
padding: 0 1;
|
|
103
|
+
height: 3;
|
|
104
|
+
width: 1fr;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
#user-input:focus {
|
|
108
|
+
border: tall #00d4ff;
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
Footer {
|
|
112
|
+
background: #060810;
|
|
113
|
+
color: #2a4060;
|
|
114
|
+
}
|
|
115
|
+
"""
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class TermAgent(App):
|
|
119
|
+
CSS = CSS
|
|
120
|
+
BINDINGS = [
|
|
121
|
+
("ctrl+c", "quit", "Quit"),
|
|
122
|
+
("ctrl+l", "clear", "Clear"),
|
|
123
|
+
]
|
|
124
|
+
|
|
125
|
+
cwd = reactive(os.getcwd())
|
|
126
|
+
|
|
127
|
+
def compose(self) -> ComposeResult:
|
|
128
|
+
with Vertical(id="header"):
|
|
129
|
+
yield Static(ASCII_LOGO, id="logo")
|
|
130
|
+
yield Static(TAGLINE, id="tagline")
|
|
131
|
+
|
|
132
|
+
with Horizontal(id="status-bar"):
|
|
133
|
+
yield Static(id="cwd-label")
|
|
134
|
+
yield Static("⬡ llama-3.3-70b-versatile • Groq Inference", id="model-label")
|
|
135
|
+
|
|
136
|
+
yield RichLog(id="output-log", highlight=True, markup=True, wrap=True)
|
|
137
|
+
|
|
138
|
+
# Status line — sits between log and input, shows spinner or result in place
|
|
139
|
+
yield Static("", id="status-line")
|
|
140
|
+
|
|
141
|
+
with Horizontal(id="input-container"):
|
|
142
|
+
yield Static("❯", id="prompt-label")
|
|
143
|
+
yield Input(placeholder="Ask me anything or describe what to do...", id="user-input")
|
|
144
|
+
|
|
145
|
+
yield Footer()
|
|
146
|
+
|
|
147
|
+
def on_mount(self) -> None:
|
|
148
|
+
self._confirmation_handler = None
|
|
149
|
+
self._spinner_timer: Timer | None = None
|
|
150
|
+
self._spinner_frame = 0
|
|
151
|
+
self.update_cwd_label()
|
|
152
|
+
log = self.query_one("#output-log", RichLog)
|
|
153
|
+
log.write(Text.from_markup(
|
|
154
|
+
"[dim]Type a command in plain English or ask a question. Type [bold cyan]bye[/bold cyan] to exit.[/dim]\n"
|
|
155
|
+
))
|
|
156
|
+
self.query_one("#user-input", Input).focus()
|
|
157
|
+
|
|
158
|
+
def watch_cwd(self, new_cwd: str) -> None:
|
|
159
|
+
self.update_cwd_label()
|
|
160
|
+
|
|
161
|
+
def update_cwd_label(self) -> None:
|
|
162
|
+
label = self.query_one("#cwd-label", Static)
|
|
163
|
+
label.update(f" {self.cwd}")
|
|
164
|
+
|
|
165
|
+
# ── Spinner helpers ──────────────────────────────────────────────────────
|
|
166
|
+
|
|
167
|
+
def _start_spinner(self) -> None:
|
|
168
|
+
"""Start animating the status line with a spinner."""
|
|
169
|
+
self._spinner_frame = 0
|
|
170
|
+
self._spinner_timer = self.set_interval(0.08, self._tick_spinner)
|
|
171
|
+
|
|
172
|
+
def _tick_spinner(self) -> None:
|
|
173
|
+
"""Called every 80ms to advance the spinner frame."""
|
|
174
|
+
frame = SPINNER_FRAMES[self._spinner_frame % len(SPINNER_FRAMES)]
|
|
175
|
+
self._spinner_frame += 1
|
|
176
|
+
status = self.query_one("#status-line", Static)
|
|
177
|
+
status.update(Text.from_markup(f"[cyan]{frame}[/cyan] [dim cyan]thinking...[/dim cyan]"))
|
|
178
|
+
|
|
179
|
+
def _stop_spinner(self) -> None:
|
|
180
|
+
"""Stop the spinner timer."""
|
|
181
|
+
if self._spinner_timer is not None:
|
|
182
|
+
self._spinner_timer.stop()
|
|
183
|
+
self._spinner_timer = None
|
|
184
|
+
|
|
185
|
+
def _set_status(self, markup: str) -> None:
|
|
186
|
+
"""Replace status line content (called after spinner stops)."""
|
|
187
|
+
status = self.query_one("#status-line", Static)
|
|
188
|
+
status.update(Text.from_markup(markup))
|
|
189
|
+
|
|
190
|
+
def _clear_status(self) -> None:
|
|
191
|
+
status = self.query_one("#status-line", Static)
|
|
192
|
+
status.update("")
|
|
193
|
+
|
|
194
|
+
# ── Input handling ───────────────────────────────────────────────────────
|
|
195
|
+
|
|
196
|
+
def on_input_submitted(self, event: Input.Submitted) -> None:
|
|
197
|
+
# Confirmation flow takes priority
|
|
198
|
+
if self._confirmation_handler:
|
|
199
|
+
self._confirmation_handler(event)
|
|
200
|
+
return
|
|
201
|
+
|
|
202
|
+
user_input = event.value.strip()
|
|
203
|
+
if not user_input:
|
|
204
|
+
return
|
|
205
|
+
|
|
206
|
+
input_widget = self.query_one("#user-input", Input)
|
|
207
|
+
input_widget.clear()
|
|
208
|
+
|
|
209
|
+
if user_input.lower() == "bye":
|
|
210
|
+
self.exit()
|
|
211
|
+
return
|
|
212
|
+
|
|
213
|
+
log = self.query_one("#output-log", RichLog)
|
|
214
|
+
log.write(Text.from_markup(f"\n[bold cyan]❯[/bold cyan] [white]{user_input}[/white]"))
|
|
215
|
+
self._start_spinner()
|
|
216
|
+
self.process_input(user_input)
|
|
217
|
+
|
|
218
|
+
# ── Agent worker ─────────────────────────────────────────────────────────
|
|
219
|
+
|
|
220
|
+
@work(thread=True)
|
|
221
|
+
def process_input(self, user_input: str) -> None:
|
|
222
|
+
from termagent.agent.graph import app as agent_app
|
|
223
|
+
import termagent.agent.nodes as nodes
|
|
224
|
+
import threading
|
|
225
|
+
|
|
226
|
+
outer_self = self
|
|
227
|
+
|
|
228
|
+
def patched_confirm(state):
|
|
229
|
+
cmd = state['cmd']
|
|
230
|
+
result_holder = {}
|
|
231
|
+
confirmed_event = threading.Event()
|
|
232
|
+
|
|
233
|
+
def ask():
|
|
234
|
+
outer_self._ask_confirmation(cmd, result_holder, confirmed_event)
|
|
235
|
+
|
|
236
|
+
outer_self.call_from_thread(ask)
|
|
237
|
+
confirmed_event.wait()
|
|
238
|
+
return {"confirmation": result_holder.get("answer", "no")}
|
|
239
|
+
|
|
240
|
+
nodes._confirm_fn = patched_confirm
|
|
241
|
+
|
|
242
|
+
try:
|
|
243
|
+
state = {"text": user_input, "cwd": self.cwd}
|
|
244
|
+
result = agent_app.invoke(state)
|
|
245
|
+
|
|
246
|
+
new_cwd = result.get("cwd", self.cwd)
|
|
247
|
+
output = result.get("result", "Command cancelled.")
|
|
248
|
+
intent = result.get("intent", "command")
|
|
249
|
+
|
|
250
|
+
self.call_from_thread(self._update_output, output, intent, new_cwd)
|
|
251
|
+
except Exception as e:
|
|
252
|
+
self.call_from_thread(self._stop_spinner)
|
|
253
|
+
self.call_from_thread(
|
|
254
|
+
self._set_status,
|
|
255
|
+
f"[bold red]✗ Error: {escape(str(e))}[/bold red]"
|
|
256
|
+
)
|
|
257
|
+
finally:
|
|
258
|
+
nodes._confirm_fn = None
|
|
259
|
+
|
|
260
|
+
# ── HITL confirmation ─────────────────────────────────────────────────────
|
|
261
|
+
|
|
262
|
+
def _ask_confirmation(self, cmd: str, result_holder: dict, event) -> None:
|
|
263
|
+
# Stop spinner while waiting for user
|
|
264
|
+
self._stop_spinner()
|
|
265
|
+
self._set_status("[bold yellow]⚠ Risky command — type yes or no[/bold yellow]")
|
|
266
|
+
|
|
267
|
+
log = self.query_one("#output-log", RichLog)
|
|
268
|
+
log.write(Text.from_markup(
|
|
269
|
+
f"\n[bold yellow] Risky command detected:[/bold yellow]\n"
|
|
270
|
+
f" [bold white]{escape(cmd)}[/bold white]\n"
|
|
271
|
+
f"[dim yellow] Type [bold]yes[/bold] to confirm or [bold]no[/bold] to cancel[/dim yellow]"
|
|
272
|
+
))
|
|
273
|
+
|
|
274
|
+
input_widget = self.query_one("#user-input", Input)
|
|
275
|
+
input_widget.placeholder = "yes / no"
|
|
276
|
+
|
|
277
|
+
def on_confirm(submit_event: Input.Submitted):
|
|
278
|
+
answer = submit_event.value.strip().lower()
|
|
279
|
+
if answer in ["yes", "no"]:
|
|
280
|
+
input_widget.clear()
|
|
281
|
+
input_widget.placeholder = "Ask me anything or describe what to do..."
|
|
282
|
+
result_holder["answer"] = answer
|
|
283
|
+
log.write(Text.from_markup(
|
|
284
|
+
f"[dim] → {'[green]Confirmed[/green]' if answer == 'yes' else '[red]Cancelled[/red]'}[/dim]"
|
|
285
|
+
))
|
|
286
|
+
# Restart spinner while agent continues
|
|
287
|
+
self._start_spinner()
|
|
288
|
+
self._confirmation_handler = None
|
|
289
|
+
event.set()
|
|
290
|
+
else:
|
|
291
|
+
log.write(Text.from_markup("[dim yellow] Please type yes or no[/dim yellow]"))
|
|
292
|
+
|
|
293
|
+
self._confirmation_handler = on_confirm
|
|
294
|
+
|
|
295
|
+
# ── Output rendering ──────────────────────────────────────────────────────
|
|
296
|
+
|
|
297
|
+
def _update_output(self, output: str, intent: str, new_cwd: str) -> None:
|
|
298
|
+
self._stop_spinner()
|
|
299
|
+
log = self.query_one("#output-log", RichLog)
|
|
300
|
+
|
|
301
|
+
if intent == "chat":
|
|
302
|
+
self._set_status("[dim cyan]◌ responded[/dim cyan]")
|
|
303
|
+
log.write(Text.from_markup(f" [white]{escape(output)}[/white]"))
|
|
304
|
+
else:
|
|
305
|
+
if output.startswith("Error:"):
|
|
306
|
+
self._set_status("[bold red]✗ Error[/bold red]")
|
|
307
|
+
log.write(Text.from_markup(f"[bold red] ✗[/bold red] [red]{escape(output)}[/red]"))
|
|
308
|
+
elif output == "Command cancelled by user.":
|
|
309
|
+
self._set_status("[dim]✗ Cancelled[/dim]")
|
|
310
|
+
else:
|
|
311
|
+
self._set_status("[bold green]✓ Done[/bold green]")
|
|
312
|
+
lines = output.strip().splitlines()
|
|
313
|
+
if lines and output != "Command executed successfully.":
|
|
314
|
+
for line in lines:
|
|
315
|
+
log.write(Text.from_markup(f" [dim]{escape(line)}[/dim]"))
|
|
316
|
+
|
|
317
|
+
self.cwd = new_cwd
|
|
318
|
+
|
|
319
|
+
def action_clear(self) -> None:
|
|
320
|
+
self.query_one("#output-log", RichLog).clear()
|
|
321
|
+
self._clear_status()
|
|
322
|
+
|
|
323
|
+
def main():
|
|
324
|
+
from dotenv import load_dotenv
|
|
325
|
+
load_dotenv()
|
|
326
|
+
|
|
327
|
+
groq_key = os.getenv("GROQ_API_KEY")
|
|
328
|
+
|
|
329
|
+
if not groq_key:
|
|
330
|
+
print("Groq API key not found.")
|
|
331
|
+
groq_key = input("Enter your Groq API key: ").strip()
|
|
332
|
+
|
|
333
|
+
save = input("Save to .env for future use? (yes/no): ")
|
|
334
|
+
if save.lower() == "yes":
|
|
335
|
+
with open(".env", "a") as f:
|
|
336
|
+
f.write(f"\nGROQ_API_KEY={groq_key}")
|
|
337
|
+
print("Saved!")
|
|
338
|
+
|
|
339
|
+
os.environ["GROQ_API_KEY"] = groq_key
|
|
340
|
+
|
|
341
|
+
app = TermAgent()
|
|
342
|
+
app.run()
|
|
343
|
+
|
|
344
|
+
if __name__ == "__main__":
|
|
345
|
+
main()
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: termagent-cli
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Natural language terminal agent for Windows PowerShell
|
|
5
|
+
Requires-Python: >=3.10
|
|
6
|
+
Requires-Dist: langchain-ollama
|
|
7
|
+
Requires-Dist: langchain-groq
|
|
8
|
+
Requires-Dist: langchain-core
|
|
9
|
+
Requires-Dist: langchain
|
|
10
|
+
Requires-Dist: langgraph
|
|
11
|
+
Requires-Dist: pydantic
|
|
12
|
+
Requires-Dist: python-dotenv
|
|
13
|
+
Requires-Dist: ollama
|
|
14
|
+
Requires-Dist: textual
|
|
15
|
+
Requires-Dist: rich
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
termagent/__init__.py
|
|
4
|
+
termagent/ui.py
|
|
5
|
+
termagent/agent/graph.py
|
|
6
|
+
termagent/agent/nodes.py
|
|
7
|
+
termagent/agent/state.py
|
|
8
|
+
termagent_cli.egg-info/PKG-INFO
|
|
9
|
+
termagent_cli.egg-info/SOURCES.txt
|
|
10
|
+
termagent_cli.egg-info/dependency_links.txt
|
|
11
|
+
termagent_cli.egg-info/entry_points.txt
|
|
12
|
+
termagent_cli.egg-info/requires.txt
|
|
13
|
+
termagent_cli.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
termagent
|