tass 0.1.11__tar.gz → 0.1.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tass
3
- Version: 0.1.11
3
+ Version: 0.1.13
4
4
  Summary: A terminal assistant that allows you to ask an LLM to run commands.
5
5
  Project-URL: Homepage, https://github.com/cetincan0/tass
6
6
  Author: Can Cetin
@@ -44,11 +44,21 @@ You can run it with
44
44
  tass
45
45
  ```
46
46
 
47
- tass has only been tested with gpt-oss-120b using llama.cpp so far, but in theory any LLM with tool calling capabilities should work. By default, it will try connecting to http://localhost:8080. If you want to use another host, set the `TASS_HOST` environment variable. At the moment there's no support for connecting tass to a non-local API, nor are there plans for it. For the time being, I plan on keeping tass completely local. There's no telemetry, no logs, just a simple REPL loop.
47
+ or if you only want to ask/request a single thing
48
+
49
+ ```
50
+ tass "convert video.mp4 to audio.mp3"
51
+ ```
52
+
53
+ tass has only been tested with llama.cpp with LLMs such as gpt-oss-120b and MiniMax M2.1, but any LLM with tool calling capabilities should work.
54
+
55
+ By default, tass will try connecting to http://localhost:8080. To use another host, set the `TASS_HOST` environment variable. If your server requires an API key, you can set the `TASS_API_KEY` environment variable. At the moment there's no support for connecting tass to a non-local API, nor are there plans for it. I plan on keeping tass completely local. There's no telemetry, no logs, just a simple REPL loop.
48
56
 
49
57
  Once it's running, you can ask questions or give commands like "Create an empty file called test.txt" and it will propose a command to run after user confirmation.
50
58
 
51
- You can enter multiline input by ending lines with a backslash (\\). The continuation prompt will appear until you enter a line without a trailing backslash.
59
+ You can enter multiline input by ending lines with a backslash (\\). The continuation prompt will keep appearing until you enter a line without a trailing backslash.
60
+
61
+ You can use the --yolo flag to turn off user confirmations for executing commands and editing files, but I would only recommend using this if you're benchmarking tass with an LLM and highly recommend not using it outside of testing/benchmarking scenarios.
52
62
 
53
63
  ## Upgrade
54
64
 
tass-0.1.13/README.md ADDED
@@ -0,0 +1,61 @@
1
+ # tass
2
+
3
+ <p align="center">
4
+ <img src="assets/tass.gif" alt="Demo" />
5
+ </p>
6
+
7
+ A terminal assistant that allows you to ask an LLM to run commands.
8
+
9
+ ## Warning
10
+
11
+ This tool can run commands including ones that can modify, move, or delete files. Use at your own risk.
12
+
13
+ ## Installation
14
+
15
+ ### Using uv
16
+
17
+ ```
18
+ uv tool install tass
19
+ ```
20
+
21
+ ### Using pip
22
+
23
+ ```
24
+ pip install tass
25
+ ```
26
+
27
+ You can run it with
28
+
29
+ ```
30
+ tass
31
+ ```
32
+
33
+ or if you only want to ask/request a single thing
34
+
35
+ ```
36
+ tass "convert video.mp4 to audio.mp3"
37
+ ```
38
+
39
+ tass has only been tested with llama.cpp with LLMs such as gpt-oss-120b and MiniMax M2.1, but any LLM with tool calling capabilities should work.
40
+
41
+ By default, tass will try connecting to http://localhost:8080. To use another host, set the `TASS_HOST` environment variable. If your server requires an API key, you can set the `TASS_API_KEY` environment variable. At the moment there's no support for connecting tass to a non-local API, nor are there plans for it. I plan on keeping tass completely local. There's no telemetry, no logs, just a simple REPL loop.
42
+
43
+ Once it's running, you can ask questions or give commands like "Create an empty file called test.txt" and it will propose a command to run after user confirmation.
44
+
45
+ You can enter multiline input by ending lines with a backslash (\\). The continuation prompt will keep appearing until you enter a line without a trailing backslash.
46
+
47
+ You can use the --yolo flag to turn off user confirmations for executing commands and editing files, but I would only recommend using this if you're benchmarking tass with an LLM and highly recommend not using it outside of testing/benchmarking scenarios.
48
+
49
+ ## Upgrade
50
+
51
+ ### Using uv
52
+
53
+ ```
54
+ uv tool upgrade tass
55
+ ```
56
+
57
+ ### Using pip
58
+
59
+ ```
60
+ pip install --upgrade tass
61
+ ```
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "tass"
3
- version = "0.1.11"
3
+ version = "0.1.13"
4
4
  description = "A terminal assistant that allows you to ask an LLM to run commands."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10"
@@ -35,8 +35,38 @@ dev = [
35
35
  "pytest>=9.0.2",
36
36
  ]
37
37
 
38
+ [tool.ruff]
39
+ target-version = "py310"
40
+ line-length = 120
41
+
42
+ [tool.ruff.lint]
43
+ select = [
44
+ "A",
45
+ "B",
46
+ "C4",
47
+ "E",
48
+ "F",
49
+ "I",
50
+ "ISC",
51
+ "SIM",
52
+ "RUF",
53
+ "UP",
54
+ "W",
55
+ ]
56
+
57
+ ignore = [
58
+ "E501",
59
+ ]
60
+
61
+ [tool.ruff.lint.isort]
62
+ known-first-party = ["tass"]
63
+
64
+ [tool.ruff.format]
65
+ quote-style = "double"
66
+ indent-style = "space"
67
+
38
68
  [tool.pyright]
39
69
  pythonVersion = "3.10"
40
- include = ["src/**/*.py"]
70
+ include = ["src/**/*.py", "tests/**/*.py"]
41
71
  venvPath = "."
42
72
  venv = ".venv"
tass-0.1.13/src/app.py ADDED
@@ -0,0 +1,284 @@
1
+ import json
2
+
3
+ from prompt_toolkit import prompt
4
+ from rich.console import Group
5
+ from rich.live import Live
6
+ from rich.markdown import Markdown
7
+ from rich.panel import Panel
8
+ from rich.text import Text
9
+
10
+ from src.constants import (
11
+ SYSTEM_PROMPT,
12
+ console,
13
+ )
14
+ from src.llm_client import LLMClient
15
+ from src.tools import (
16
+ EDIT_FILE_TOOL,
17
+ EXECUTE_TOOL,
18
+ READ_FILE_TOOL,
19
+ edit_file,
20
+ execute,
21
+ read_file,
22
+ )
23
+ from src.utils import (
24
+ FileCompleter,
25
+ create_key_bindings,
26
+ )
27
+
28
+
29
+ class TassApp:
30
+
31
+ def __init__(self, yolo_mode: bool = False):
32
+ self.yolo_mode = yolo_mode
33
+ self.messages: list[dict] = [{"role": "system", "content": SYSTEM_PROMPT}]
34
+ self.llm_client = LLMClient()
35
+ self.key_bindings = create_key_bindings()
36
+ self.file_completer = FileCompleter()
37
+ self.TOOLS_MAP = {
38
+ "execute": execute,
39
+ "read_file": read_file,
40
+ "edit_file": edit_file,
41
+ }
42
+
43
+ def check_llm_host(self):
44
+ try:
45
+ response = self.llm_client.get_models()
46
+ console.print("Terminal Assistant [green](LLM connection ✓)[/green]")
47
+ if response.status_code == 200:
48
+ return
49
+ except Exception:
50
+ console.print("Terminal Assistant [red](LLM connection ✗)[/red]")
51
+
52
+ console.print("\n[red]Could not connect to LLM[/red]")
53
+ console.print(f"If your LLM isn't running on {self.llm_client.host}, you can set the [bold]TASS_HOST[/] environment variable to a different URL.")
54
+ new_host = console.input(
55
+ "Enter a different URL for this session (or press Enter to keep current): "
56
+ ).strip()
57
+
58
+ if new_host:
59
+ self.llm_client.host = new_host
60
+
61
+ try:
62
+ response = self.llm_client.get_models()
63
+ if response.status_code == 200:
64
+ console.print(f"[green]Connection established to {self.llm_client.host}[/green]")
65
+ except Exception:
66
+ console.print(f"[red]Unable to verify new host {self.llm_client.host}. Continuing with it anyway.[/red]")
67
+
68
+ def summarize(self):
69
+ max_messages = 20
70
+ if len(self.messages) <= max_messages:
71
+ return
72
+
73
+ prompt = (
74
+ "The conversation is becoming long and might soon go beyond the "
75
+ "context limit. Please provide a detailed summary of the conversation, "
76
+ "preserving all important details. Make sure context is not lost so that "
77
+ "the conversation can continue without needing to reclarify anything. "
78
+ "You don't have to preserve entire contents of files that have been read "
79
+ " or edited, they can be read again if necessary."
80
+ )
81
+
82
+ console.print("\n - Summarizing conversation...")
83
+ response = self.llm_client.get_chat_completions(
84
+ messages=[*self.messages, {"role": "user", "content": prompt}],
85
+ tools=[
86
+ EDIT_FILE_TOOL,
87
+ EXECUTE_TOOL,
88
+ READ_FILE_TOOL,
89
+ ], # For caching purposes
90
+ )
91
+ data = response.json()
92
+ summary = data["choices"][0]["message"]["content"]
93
+ self.messages = [self.messages[0], {"role": "assistant", "content": f"Summary of the conversation so far:\n{summary}"}]
94
+ console.print(" [green]Summarization completed[/green]")
95
+
96
+ def call_llm(self) -> bool:
97
+ response = self.llm_client.get_chat_completions(
98
+ messages=self.messages,
99
+ tools=[
100
+ EDIT_FILE_TOOL,
101
+ EXECUTE_TOOL,
102
+ READ_FILE_TOOL,
103
+ ],
104
+ stream=True,
105
+ )
106
+
107
+ content = ""
108
+ reasoning_content = ""
109
+ tool_calls_map = {}
110
+ timings_str = ""
111
+
112
+ def generate_layout():
113
+ groups = []
114
+
115
+ if reasoning_content:
116
+ last_three_lines = "\n".join(reasoning_content.rstrip().split("\n")[-3:])
117
+ groups.append(Text(""))
118
+ groups.append(
119
+ Panel(
120
+ Text(
121
+ last_three_lines,
122
+ style="grey50",
123
+ ),
124
+ title="Thought process",
125
+ title_align="left",
126
+ subtitle=timings_str,
127
+ style="grey50",
128
+ )
129
+ )
130
+
131
+ if content:
132
+ groups.append(Text(""))
133
+ groups.append(Markdown(content.rstrip()))
134
+
135
+ return Group(*groups)
136
+
137
+ with Live(generate_layout(), refresh_per_second=10) as live:
138
+ for line in response.iter_lines():
139
+ line = line.decode("utf-8")
140
+ if not line.strip():
141
+ continue
142
+
143
+ if line == "data: [DONE]":
144
+ continue
145
+
146
+ chunk = json.loads(line.removeprefix("data:"))
147
+ if all(k in chunk.get("timings", {}) for k in ["prompt_n", "prompt_per_second", "predicted_n", "predicted_per_second"]):
148
+ timings = chunk["timings"]
149
+ timings_str = (
150
+ f"Input: {timings['prompt_n']:,} tokens, {timings['prompt_per_second']:,.2f} tok/s | "
151
+ f"Output: {timings['predicted_n']:,} tokens, {timings['predicted_per_second']:,.2f} tok/s"
152
+ )
153
+
154
+ if chunk["choices"][0]["finish_reason"]:
155
+ live.update(generate_layout())
156
+
157
+ delta = chunk["choices"][0]["delta"]
158
+ if not any(delta.get(key) for key in ["content", "reasoning_content", "tool_calls"]):
159
+ continue
160
+
161
+ if delta.get("reasoning_content"):
162
+ reasoning_content += delta["reasoning_content"]
163
+ live.update(generate_layout())
164
+
165
+ if delta.get("content"):
166
+ content += delta["content"]
167
+ live.update(generate_layout())
168
+
169
+ for tool_call_delta in delta.get("tool_calls") or []:
170
+ index = tool_call_delta["index"]
171
+ if index not in tool_calls_map:
172
+ tool_calls_map[index] = (
173
+ {
174
+ "index": index,
175
+ "id": "",
176
+ "type": "",
177
+ "function": {
178
+ "name": "",
179
+ "arguments": "",
180
+ },
181
+ }
182
+ )
183
+
184
+ tool_call = tool_calls_map[index]
185
+ if tool_call_delta.get("id"):
186
+ tool_call["id"] += tool_call_delta["id"]
187
+ if tool_call_delta.get("type"):
188
+ tool_call["type"] += tool_call_delta["type"]
189
+ if tool_call_delta.get("function"):
190
+ function = tool_call_delta["function"]
191
+ if function.get("name"):
192
+ tool_call["function"]["name"] += function["name"]
193
+ if function.get("arguments"):
194
+ tool_call["function"]["arguments"] += function["arguments"]
195
+
196
+ self.messages.append(
197
+ {
198
+ "role": "assistant",
199
+ "content": content.strip(),
200
+ "reasoning_content": reasoning_content.strip(),
201
+ "tool_calls": list(tool_calls_map.values()) or [],
202
+ }
203
+ )
204
+
205
+ if not tool_calls_map:
206
+ return True
207
+
208
+ try:
209
+ for tool_call in tool_calls_map.values():
210
+ tool = self.TOOLS_MAP[tool_call["function"]["name"]]
211
+ tool_args = json.loads(tool_call["function"]["arguments"])
212
+ tool_args["yolo_mode"] = self.yolo_mode
213
+ result = tool(**tool_args)
214
+ self.messages.append(
215
+ {
216
+ "role": "tool",
217
+ "tool_call_id": tool_call["id"],
218
+ "name": tool_call["function"]["name"],
219
+ "content": result,
220
+ }
221
+ )
222
+ return False
223
+ except Exception as e:
224
+ self.messages.append({"role": "user", "content": f"Tool call failed: {e}"})
225
+ console.print(f" [red]Tool call failed: {str(e).strip()}[/red]")
226
+ return self.call_llm()
227
+
228
+ def run(self, initial_input: str | None = None):
229
+ if initial_input:
230
+ self.messages.append({"role": "user", "content": initial_input})
231
+ while True:
232
+ try:
233
+ finished = self.call_llm()
234
+ except Exception as e:
235
+ console.print(f"Failed to call LLM: {e}")
236
+ break
237
+
238
+ if finished:
239
+ return
240
+
241
+ try:
242
+ self.check_llm_host()
243
+ except KeyboardInterrupt:
244
+ console.print("\nBye!")
245
+ return
246
+
247
+ while True:
248
+ console.print()
249
+ try:
250
+ input_lines = []
251
+ while True:
252
+ input_line = prompt(
253
+ "> ",
254
+ completer=self.file_completer,
255
+ complete_while_typing=True,
256
+ key_bindings=self.key_bindings,
257
+ )
258
+ if not input_line or input_line[-1] != "\\":
259
+ input_lines.append(input_line)
260
+ break
261
+ input_lines.append(input_line[:-1])
262
+ user_input = "\n".join(input_lines)
263
+ except KeyboardInterrupt:
264
+ console.print("\nBye!")
265
+ break
266
+
267
+ if not user_input:
268
+ continue
269
+
270
+ if user_input.lower().strip() == "exit":
271
+ console.print("\nBye!")
272
+ break
273
+
274
+ self.messages.append({"role": "user", "content": user_input})
275
+ while True:
276
+ try:
277
+ finished = self.call_llm()
278
+ except Exception as e:
279
+ console.print(f"Failed to call LLM: {e}")
280
+ break
281
+
282
+ if finished:
283
+ self.summarize()
284
+ break
tass-0.1.13/src/cli.py ADDED
@@ -0,0 +1,27 @@
1
+ import argparse
2
+
3
+ from src.app import TassApp
4
+
5
+
6
+ def main():
7
+ parser = argparse.ArgumentParser(
8
+ description="Terminal Assistant - Ask an LLM to run commands"
9
+ )
10
+ parser.add_argument(
11
+ "--yolo",
12
+ action="store_true",
13
+ help="YOLO mode: execute all commands and edit files without asking for confirmation",
14
+ )
15
+ parser.add_argument(
16
+ "prompt",
17
+ nargs="?",
18
+ help="Prompt to run (enclose in quotes; runs in single-shot mode and exits)",
19
+ )
20
+ args = parser.parse_args()
21
+
22
+ app = TassApp(yolo_mode=args.yolo)
23
+
24
+ if args.prompt:
25
+ app.run(initial_input=args.prompt)
26
+ else:
27
+ app.run()
@@ -0,0 +1,73 @@
1
+ import os
2
+ import platform
3
+ import subprocess
4
+ from pathlib import Path
5
+
6
+ from rich.console import Console
7
+
8
+ console = Console()
9
+ CWD_PATH = Path.cwd().resolve()
10
+
11
+
12
+ def get_shell_info() -> str:
13
+ shell_path = os.environ.get('SHELL', '')
14
+ shell_name = shell_path.split('/')[-1] if shell_path else 'unknown'
15
+ return shell_name
16
+
17
+
18
+ def get_git_info() -> str:
19
+ try:
20
+ git_info = []
21
+
22
+ subprocess.run(['git', 'rev-parse', '--git-dir'], capture_output=True, check=True, cwd=CWD_PATH)
23
+ result = subprocess.run(['git', 'branch', '--show-current'], capture_output=True, text=True, cwd=CWD_PATH)
24
+ branch = result.stdout.strip()
25
+ if branch:
26
+ git_info.append(f"Branch: {branch}")
27
+
28
+ result = subprocess.run(['git', 'status', '--porcelain'], capture_output=True, text=True, cwd=CWD_PATH)
29
+ has_changes = bool(result.stdout.strip())
30
+ git_info.append(f"Uncommitted changes: {'Yes' if has_changes else 'No'}")
31
+
32
+ return '\n'.join(git_info)
33
+ except Exception:
34
+ return "Not a git repository"
35
+
36
+
37
+ def get_directory_listing() -> str:
38
+ try:
39
+ entries = []
40
+ iterdir = CWD_PATH.iterdir()
41
+ for count, entry in enumerate(sorted(iterdir)):
42
+ if count >= 100:
43
+ entries.append(f"... and {len(list(iterdir)) - 100} more items")
44
+ break
45
+ if entry.is_dir():
46
+ entries.append(f"{entry.name}/")
47
+ else:
48
+ entries.append(entry.name)
49
+ return '\n'.join(entries) if entries else "Empty directory"
50
+ except PermissionError:
51
+ return "Permission denied"
52
+
53
+
54
+ SYSTEM_PROMPT = f"""You are tass, or Terminal Assistant, a helpful AI that executes shell commands based on natural-language requests.
55
+
56
+ If the user's request involves making changes to the filesystem such as creating or deleting files or directories, you MUST first check whether the file or directory exists before proceeding.
57
+
58
+ If a user asks for an answer or explanation to something instead of requesting to run a command, answer briefly and concisely. Do not supply extra information, suggestions, tips, or anything of the sort.
59
+
60
+ This app has a feature where the user can refer to files or directories by typing @ which will open an file autocomplete dropdown. When this feature is used, the @ will remain in the filename. When working with said file, ignore the preceding @.
61
+
62
+ Current working directory: {CWD_PATH}
63
+
64
+ # Directory Context
65
+ OS: {platform.system()}
66
+ Shell: {get_shell_info()}
67
+
68
+ # Git Info
69
+ {get_git_info()}
70
+
71
+ # Directory Listing
72
+ {get_directory_listing()}
73
+ """
@@ -0,0 +1,49 @@
1
+ import os
2
+ from typing import Literal
3
+
4
+ import requests
5
+
6
+
7
+ class LLMClient:
8
+
9
+ def __init__(self):
10
+ self.host = os.environ.get("TASS_HOST", "http://localhost:8080")
11
+ self.api_key = os.environ.get("TASS_API_KEY", "")
12
+
13
+ def request(
14
+ self,
15
+ method: Literal["get", "post"],
16
+ url: str,
17
+ **kwargs,
18
+ ):
19
+ return requests.request(
20
+ method,
21
+ f"{self.host}{url}",
22
+ headers={
23
+ "Authorization": f"Bearer {self.api_key}",
24
+ },
25
+ **kwargs,
26
+ )
27
+
28
+ def get(self, url: str, **kwargs):
29
+ return self.request("get", url, **kwargs)
30
+
31
+ def post(self, url: str, **kwargs):
32
+ return self.request("post", url, **kwargs)
33
+
34
+ def get_models(self):
35
+ return self.get("/v1/models", timeout=2)
36
+
37
+ def get_chat_completions(self, messages: list[dict], tools: list[dict], stream: bool = False):
38
+ return self.post(
39
+ "/v1/chat/completions",
40
+ json={
41
+ "messages": messages,
42
+ "tools": tools,
43
+ "stream": stream,
44
+ "chat_template_kwargs": {
45
+ "reasoning_effort": "medium",
46
+ },
47
+ },
48
+ stream=stream,
49
+ )
@@ -0,0 +1,12 @@
1
+ from .edit_file import EDIT_FILE_TOOL, edit_file
2
+ from .execute import EXECUTE_TOOL, execute
3
+ from .read_file import READ_FILE_TOOL, read_file
4
+
5
+ __all__ = [
6
+ "EDIT_FILE_TOOL",
7
+ "EXECUTE_TOOL",
8
+ "READ_FILE_TOOL",
9
+ "edit_file",
10
+ "execute",
11
+ "read_file",
12
+ ]