tass 0.1.8__tar.gz → 0.1.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tass
3
- Version: 0.1.8
3
+ Version: 0.1.9
4
4
  Summary: A terminal assistant that allows you to ask an LLM to run commands.
5
5
  Project-URL: Homepage, https://github.com/cetincan0/tass
6
6
  Author: Can Cetin
@@ -13,6 +13,10 @@ Description-Content-Type: text/markdown
13
13
 
14
14
  # tass
15
15
 
16
+ <p align="center">
17
+ <img src="assets/tass.gif" alt="Demo" />
18
+ </p>
19
+
16
20
  A terminal assistant that allows you to ask an LLM to run commands.
17
21
 
18
22
  ## Warning
@@ -39,7 +43,7 @@ You can run it with
39
43
  tass
40
44
  ```
41
45
 
42
- tass has only been tested with gpt-oss-120b using llama.cpp so far, but in theory any LLM with tool calling capabilities should work. By default, it will try connecting to http://localhost:8080. If you want to use another host, set the `TASS_HOST` environment variable.
46
+ tass has only been tested with gpt-oss-120b using llama.cpp so far, but in theory any LLM with tool calling capabilities should work. By default, it will try connecting to http://localhost:8080. If you want to use another host, set the `TASS_HOST` environment variable. At the moment there's no support for connecting tass to a non-local API, nor are there plans for it. For the time being, I plan on keeping tass completely local. There's no telemetry, no logs, just a simple REPL loop.
43
47
 
44
48
  Once it's running, you can ask questions or give commands like "Create an empty file called test.txt" and it will propose a command to run after user confirmation.
45
49
 
@@ -1,5 +1,9 @@
1
1
  # tass
2
2
 
3
+ <p align="center">
4
+ <img src="assets/tass.gif" alt="Demo" />
5
+ </p>
6
+
3
7
  A terminal assistant that allows you to ask an LLM to run commands.
4
8
 
5
9
  ## Warning
@@ -26,7 +30,7 @@ You can run it with
26
30
  tass
27
31
  ```
28
32
 
29
- tass has only been tested with gpt-oss-120b using llama.cpp so far, but in theory any LLM with tool calling capabilities should work. By default, it will try connecting to http://localhost:8080. If you want to use another host, set the `TASS_HOST` environment variable.
33
+ tass has only been tested with gpt-oss-120b using llama.cpp so far, but in theory any LLM with tool calling capabilities should work. By default, it will try connecting to http://localhost:8080. If you want to use another host, set the `TASS_HOST` environment variable. At the moment there's no support for connecting tass to a non-local API, nor are there plans for it. For the time being, I plan on keeping tass completely local. There's no telemetry, no logs, just a simple REPL loop.
30
34
 
31
35
  Once it's running, you can ask questions or give commands like "Create an empty file called test.txt" and it will propose a command to run after user confirmation.
32
36
 
Binary file
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "tass"
3
- version = "0.1.8"
3
+ version = "0.1.9"
4
4
  description = "A terminal assistant that allows you to ask an LLM to run commands."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10"
@@ -64,11 +64,14 @@ class TassApp:
64
64
 
65
65
  prompt = (
66
66
  "The conversation is becoming long and might soon go beyond the "
67
- "context limit. Please provide a concise summary of the conversation, "
68
- "preserving all important details. Keep the summary short enough "
69
- "to fit within a few paragraphs at the most."
67
+ "context limit. Please provide a detailed summary of the conversation, "
68
+ "preserving all important details. Make sure context is not lost so that "
69
+ "the conversation can continue without needing to reclarify anything. "
70
+ "You don't have to preserve entire contents of files that have been read "
71
+ " or edited, they can be read again if necessary."
70
72
  )
71
73
 
74
+ console.print("\n - Summarizing conversation...")
72
75
  response = requests.post(
73
76
  f"{self.host}/v1/chat/completions",
74
77
  json={
@@ -82,6 +85,7 @@ class TassApp:
82
85
  data = response.json()
83
86
  summary = data["choices"][0]["message"]["content"]
84
87
  self.messages = [self.messages[0], {"role": "assistant", "content": f"Summary of the conversation so far:\n{summary}"}]
88
+ console.print(" [green]Summarization completed[/green]")
85
89
 
86
90
  def call_llm(self) -> bool:
87
91
  response = requests.post(
@@ -137,15 +141,29 @@ class TassApp:
137
141
  continue
138
142
 
139
143
  chunk = json.loads(line.removeprefix("data:"))
144
+ if all(k in chunk.get("timings", {}) for k in ["prompt_n", "prompt_per_second", "predicted_n", "predicted_per_second"]):
145
+ timings = chunk["timings"]
146
+ timings_str = (
147
+ f"Input: {timings['prompt_n']:,} tokens, {timings['prompt_per_second']:,.2f} tok/s | "
148
+ f"Output: {timings['predicted_n']:,} tokens, {timings['predicted_per_second']:,.2f} tok/s"
149
+ )
150
+
151
+ if chunk["choices"][0]["finish_reason"]:
152
+ live.update(generate_layout())
153
+
140
154
  delta = chunk["choices"][0]["delta"]
155
+ if not any([delta.get(key) for key in ["content", "reasoning_content", "tool_calls"]]):
156
+ continue
157
+
158
+ if delta.get("reasoning_content"):
159
+ reasoning_content += delta["reasoning_content"]
160
+ live.update(generate_layout())
161
+
141
162
  if delta.get("content"):
142
163
  content += delta["content"]
143
164
  live.update(generate_layout())
144
- if delta.get("reasoning_content" ):
145
- reasoning_content += delta["reasoning_content"]
146
- live.update(generate_layout())
147
165
 
148
- for tool_call_delta in delta.get("tool_calls", []):
166
+ for tool_call_delta in delta.get("tool_calls") or []:
149
167
  index = tool_call_delta["index"]
150
168
  if index not in tool_calls_map:
151
169
  tool_calls_map[index] = (
@@ -172,22 +190,12 @@ class TassApp:
172
190
  if function.get("arguments"):
173
191
  tool_call["function"]["arguments"] += function["arguments"]
174
192
 
175
- if all(k in chunk.get("timings", {}) for k in ["prompt_n", "prompt_per_second", "predicted_n", "predicted_per_second"]):
176
- timings = chunk["timings"]
177
- timings_str = (
178
- f"Input: {timings['prompt_n']:,} tokens, {timings['prompt_per_second']:,.2f} tok/s | "
179
- f"Output: {timings['predicted_n']:,} tokens, {timings['predicted_per_second']:,.2f} tok/s"
180
- )
181
-
182
- if chunk["choices"][0]["finish_reason"]:
183
- live.update(generate_layout())
184
-
185
193
  self.messages.append(
186
194
  {
187
195
  "role": "assistant",
188
- "content": content,
189
- "reasoning_content": reasoning_content,
190
- "tool_calls": list(tool_calls_map.values()),
196
+ "content": content.strip() or None,
197
+ "reasoning_content": reasoning_content.strip() or None,
198
+ "tool_calls": list(tool_calls_map.values()) or None,
191
199
  }
192
200
  )
193
201
 
@@ -113,4 +113,5 @@ READ_ONLY_COMMANDS = [
113
113
  "which",
114
114
  "sed",
115
115
  "find",
116
+ "test",
116
117
  ]
@@ -261,7 +261,7 @@ wheels = [
261
261
 
262
262
  [[package]]
263
263
  name = "tass"
264
- version = "0.1.8"
264
+ version = "0.1.9"
265
265
  source = { editable = "." }
266
266
  dependencies = [
267
267
  { name = "requests" },
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes