grucli 3.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- grucli/__init__.py +1 -0
- grucli/api.py +725 -0
- grucli/auth.py +115 -0
- grucli/chat_manager.py +190 -0
- grucli/commands.py +318 -0
- grucli/config.py +262 -0
- grucli/handlers.py +75 -0
- grucli/interrupt.py +179 -0
- grucli/main.py +617 -0
- grucli/permissions.py +181 -0
- grucli/stats.py +100 -0
- grucli/sysprompts/main_sysprompt.txt +65 -0
- grucli/theme.py +144 -0
- grucli/tools.py +368 -0
- grucli/ui.py +496 -0
- grucli-3.3.0.dist-info/METADATA +145 -0
- grucli-3.3.0.dist-info/RECORD +21 -0
- grucli-3.3.0.dist-info/WHEEL +5 -0
- grucli-3.3.0.dist-info/entry_points.txt +2 -0
- grucli-3.3.0.dist-info/licenses/LICENSE +21 -0
- grucli-3.3.0.dist-info/top_level.txt +1 -0
grucli/main.py
ADDED
|
@@ -0,0 +1,617 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import re
|
|
3
|
+
import os
|
|
4
|
+
import atexit
|
|
5
|
+
from prompt_toolkit import PromptSession
|
|
6
|
+
from prompt_toolkit.history import FileHistory
|
|
7
|
+
from prompt_toolkit.lexers import Lexer
|
|
8
|
+
from prompt_toolkit.styles import Style
|
|
9
|
+
from prompt_toolkit.formatted_text import HTML
|
|
10
|
+
from . import api
|
|
11
|
+
from . import commands
|
|
12
|
+
from . import handlers
|
|
13
|
+
from . import tools
|
|
14
|
+
from . import config
|
|
15
|
+
from . import auth
|
|
16
|
+
from . import interrupt
|
|
17
|
+
from . import ui
|
|
18
|
+
from . import permissions
|
|
19
|
+
from .theme import Colors, Icons, Styles, Borders
|
|
20
|
+
from .stats import STATS
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def get_yn_input(prompt):
|
|
24
|
+
"""Get yes/no input with styled prompt."""
|
|
25
|
+
while True:
|
|
26
|
+
try:
|
|
27
|
+
choice = interrupt.prompt_input(f"{Colors.INPUT_CONFIRM}{prompt}{Colors.RESET}").strip().lower()
|
|
28
|
+
if choice in ['y', 'n']:
|
|
29
|
+
return choice
|
|
30
|
+
else:
|
|
31
|
+
print(f"{Colors.ERROR}Invalid input. Please enter 'y' or 'n'.{Colors.RESET}")
|
|
32
|
+
except KeyboardInterrupt:
|
|
33
|
+
interrupt.handle_interrupt()
|
|
34
|
+
raise interrupt.BackSignal()
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def get_api_key_for_provider(api_type):
|
|
38
|
+
"""Get API key from saved config or user input."""
|
|
39
|
+
api_name = api_type.capitalize()
|
|
40
|
+
|
|
41
|
+
if api_type == 'gemini' and config.is_using_google_auth():
|
|
42
|
+
token = auth.get_auth_token()
|
|
43
|
+
if not token:
|
|
44
|
+
print(f"\n{Colors.WARNING}Google Auth is enabled but you are not logged in.{Colors.RESET}")
|
|
45
|
+
print(f"{Colors.MUTED}To use Gemini with Google Auth, you must authenticate first.{Colors.RESET}")
|
|
46
|
+
try:
|
|
47
|
+
print(f"\n {Colors.SUCCESS}1{Colors.RESET}) Login with Google now")
|
|
48
|
+
print(f" {Colors.ERROR}2{Colors.RESET}) Cancel and exit")
|
|
49
|
+
choice = interrupt.prompt_input(f"\n{Colors.INPUT_ACTIVE}Choose: {Colors.RESET}").strip()
|
|
50
|
+
except KeyboardInterrupt:
|
|
51
|
+
interrupt.handle_interrupt()
|
|
52
|
+
raise interrupt.BackSignal()
|
|
53
|
+
|
|
54
|
+
if choice == '1':
|
|
55
|
+
try:
|
|
56
|
+
auth.perform_oauth_login()
|
|
57
|
+
token = auth.get_auth_token()
|
|
58
|
+
except Exception as e:
|
|
59
|
+
print(f"{Colors.ERROR}Login failed: {e}{Colors.RESET}")
|
|
60
|
+
sys.exit(1)
|
|
61
|
+
else:
|
|
62
|
+
print(f"{Colors.MUTED}Authentication required for Google Auth mode. Exiting.{Colors.RESET}")
|
|
63
|
+
sys.exit(1)
|
|
64
|
+
|
|
65
|
+
print(f"\n{Colors.INFO}Checking Google Cloud onboarding status...{Colors.RESET}")
|
|
66
|
+
project_id = os.environ.get('GOOGLE_CLOUD_PROJECT') or os.environ.get('GOOGLE_CLOUD_PROJECT_ID') or config.get_google_cloud_project()
|
|
67
|
+
|
|
68
|
+
load_res = api.load_code_assist(token, project_id)
|
|
69
|
+
if not load_res:
|
|
70
|
+
print(f"{Colors.WARNING}Could not load Code Assist status. You might need to onboard.{Colors.RESET}")
|
|
71
|
+
tier_id = 'FREE'
|
|
72
|
+
else:
|
|
73
|
+
current_tier = load_res.get('currentTier', {})
|
|
74
|
+
tier_id = current_tier.get('id', 'FREE')
|
|
75
|
+
|
|
76
|
+
if load_res.get('cloudaicompanionProject'):
|
|
77
|
+
project_id = load_res['cloudaicompanionProject']
|
|
78
|
+
config.set_google_cloud_project(project_id)
|
|
79
|
+
|
|
80
|
+
if not project_id:
|
|
81
|
+
if tier_id == 'FREE':
|
|
82
|
+
print(f"{Colors.MUTED}No project found, attempting to onboard to FREE tier...{Colors.RESET}")
|
|
83
|
+
onboard_res = api.onboard_user(token, 'FREE')
|
|
84
|
+
if onboard_res and onboard_res.get('done'):
|
|
85
|
+
project_id = onboard_res.get('response', {}).get('cloudaicompanionProject', {}).get('id')
|
|
86
|
+
if project_id:
|
|
87
|
+
config.set_google_cloud_project(project_id)
|
|
88
|
+
elif onboard_res:
|
|
89
|
+
print(f"{Colors.WARNING}Onboarding is in progress. Please try again in a few seconds.{Colors.RESET}")
|
|
90
|
+
|
|
91
|
+
if not project_id:
|
|
92
|
+
print(f"\n{Colors.WARNING}Google Cloud Project ID is required for Google Auth mode.{Colors.RESET}")
|
|
93
|
+
print(f"{Colors.MUTED}If you are on Standard tier, please provide your Project ID.{Colors.RESET}")
|
|
94
|
+
print(f"{Colors.MUTED}If you are on Free tier, this might be a managed project ID.{Colors.RESET}")
|
|
95
|
+
try:
|
|
96
|
+
project_id = interrupt.prompt_input(f"{Colors.INPUT_ACTIVE}Enter Project ID (or Enter to skip): {Colors.RESET}").strip()
|
|
97
|
+
except KeyboardInterrupt:
|
|
98
|
+
interrupt.handle_interrupt()
|
|
99
|
+
raise interrupt.BackSignal()
|
|
100
|
+
if project_id:
|
|
101
|
+
config.set_google_cloud_project(project_id)
|
|
102
|
+
|
|
103
|
+
print(f"\n{Colors.SUCCESS}{Icons.CHECK} Authenticated with Google Auth (Tier: {tier_id}, Project: {project_id or 'Managed'}){Colors.RESET}")
|
|
104
|
+
return None
|
|
105
|
+
|
|
106
|
+
saved_api_key = None
|
|
107
|
+
if config.has_saved_api_key(api_type):
|
|
108
|
+
print(f"\n{Colors.INFO}Found previously saved {api_name} API key.{Colors.RESET}")
|
|
109
|
+
use_saved = get_yn_input("Do you want to use the saved API key? (y/n): ")
|
|
110
|
+
if use_saved == 'y':
|
|
111
|
+
saved_api_key = config.load_decrypted_api_key(api_type)
|
|
112
|
+
if saved_api_key:
|
|
113
|
+
print(f"{Colors.SUCCESS}{Icons.CHECK} Using saved API key.{Colors.RESET}")
|
|
114
|
+
return saved_api_key
|
|
115
|
+
else:
|
|
116
|
+
print(f"{Colors.ERROR}Failed to load saved API key. Please enter a new API key.{Colors.RESET}")
|
|
117
|
+
|
|
118
|
+
print(f"\n{Colors.MUTED}Enter your {api_name} API key:{Colors.RESET}")
|
|
119
|
+
try:
|
|
120
|
+
api_key = interrupt.prompt_input(f"{Colors.INPUT_ACTIVE}> {Colors.RESET}").strip()
|
|
121
|
+
except KeyboardInterrupt:
|
|
122
|
+
interrupt.handle_interrupt()
|
|
123
|
+
raise interrupt.BackSignal()
|
|
124
|
+
|
|
125
|
+
if not api_key:
|
|
126
|
+
print(f"{Colors.ERROR}No API key provided. Exiting.{Colors.RESET}")
|
|
127
|
+
sys.exit(1)
|
|
128
|
+
|
|
129
|
+
save_choice = get_yn_input("Save this API key for future use? (y/n): ")
|
|
130
|
+
if save_choice == 'y':
|
|
131
|
+
try:
|
|
132
|
+
config.save_encrypted_api_key(api_key, api_type)
|
|
133
|
+
print(f"{Colors.SUCCESS}{Icons.CHECK} API key saved.{Colors.RESET}")
|
|
134
|
+
except Exception as e:
|
|
135
|
+
print(f"{Colors.ERROR}Failed to save API key: {e}{Colors.RESET}")
|
|
136
|
+
|
|
137
|
+
return api_key
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def select_api_ui():
|
|
141
|
+
"""Display API provider selection menu."""
|
|
142
|
+
while True:
|
|
143
|
+
try:
|
|
144
|
+
title = 'Select API Provider'
|
|
145
|
+
options = [
|
|
146
|
+
('class:option', 'OpenAI'),
|
|
147
|
+
('class:option', 'Anthropic'),
|
|
148
|
+
('class:cloud', 'Gemini (Google Auth)'),
|
|
149
|
+
('class:option', 'Gemini (API Key)'),
|
|
150
|
+
('class:option', 'Ollama'),
|
|
151
|
+
('class:option', 'LM Studio'),
|
|
152
|
+
('class:option', 'Cerebras'),
|
|
153
|
+
]
|
|
154
|
+
_, index = ui.select_option(options, title, is_root=True)
|
|
155
|
+
|
|
156
|
+
if index == 0:
|
|
157
|
+
return "openai", get_api_key_for_provider("openai")
|
|
158
|
+
elif index == 1:
|
|
159
|
+
return "anthropic", get_api_key_for_provider("anthropic")
|
|
160
|
+
elif index == 2:
|
|
161
|
+
config.set_use_google_auth(True)
|
|
162
|
+
return "gemini", get_api_key_for_provider("gemini")
|
|
163
|
+
elif index == 3:
|
|
164
|
+
config.set_use_google_auth(False)
|
|
165
|
+
return "gemini", get_api_key_for_provider("gemini")
|
|
166
|
+
elif index == 4:
|
|
167
|
+
return "ollama", None
|
|
168
|
+
elif index == 5:
|
|
169
|
+
return "lm_studio", None
|
|
170
|
+
elif index == 6:
|
|
171
|
+
return "cerebras", get_api_key_for_provider("cerebras")
|
|
172
|
+
except interrupt.BackSignal:
|
|
173
|
+
interrupt.clear_bottom_warning()
|
|
174
|
+
continue
|
|
175
|
+
except KeyboardInterrupt:
|
|
176
|
+
interrupt.handle_interrupt()
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def select_model_ui(skip_loading=False):
|
|
180
|
+
"""Display model selection menu."""
|
|
181
|
+
while True:
|
|
182
|
+
try:
|
|
183
|
+
models = api.get_models()
|
|
184
|
+
|
|
185
|
+
options = []
|
|
186
|
+
for m in models:
|
|
187
|
+
mid = m.get('id', 'unknown')
|
|
188
|
+
options.append({
|
|
189
|
+
"label": mid,
|
|
190
|
+
"id": mid
|
|
191
|
+
})
|
|
192
|
+
|
|
193
|
+
title = 'Select Model'
|
|
194
|
+
|
|
195
|
+
ui_options = []
|
|
196
|
+
for o in options:
|
|
197
|
+
label = o['label']
|
|
198
|
+
if api.CURRENT_API == "ollama" and "cloud" in label.lower():
|
|
199
|
+
ui_options.append(('class:cloud', label))
|
|
200
|
+
else:
|
|
201
|
+
ui_options.append(label)
|
|
202
|
+
|
|
203
|
+
_, index = ui.select_option(ui_options, title)
|
|
204
|
+
|
|
205
|
+
selected_model = options[index]['id']
|
|
206
|
+
|
|
207
|
+
if selected_model == "custom":
|
|
208
|
+
print(f"\n{Colors.MUTED}Enter custom model name:{Colors.RESET}")
|
|
209
|
+
try:
|
|
210
|
+
selected_model = interrupt.prompt_input(f"{Colors.INPUT_ACTIVE}> {Colors.RESET}").strip()
|
|
211
|
+
except KeyboardInterrupt:
|
|
212
|
+
interrupt.handle_interrupt()
|
|
213
|
+
raise interrupt.BackSignal()
|
|
214
|
+
|
|
215
|
+
if not selected_model:
|
|
216
|
+
print(f"{Colors.WARNING}No model name provided. Try again.{Colors.RESET}")
|
|
217
|
+
continue
|
|
218
|
+
|
|
219
|
+
is_cloud = False
|
|
220
|
+
if api.CURRENT_API in ["gemini", "cerebras", "anthropic", "openai"]:
|
|
221
|
+
is_cloud = True
|
|
222
|
+
elif api.CURRENT_API == "ollama" and "cloud" in selected_model.lower():
|
|
223
|
+
is_cloud = True
|
|
224
|
+
|
|
225
|
+
if api.CURRENT_API == "ollama" and is_cloud and not api.OLLAMA_API_KEY:
|
|
226
|
+
key = get_api_key_for_provider("ollama")
|
|
227
|
+
api.set_api_config("ollama", key)
|
|
228
|
+
|
|
229
|
+
actual_skip = skip_loading or is_cloud
|
|
230
|
+
|
|
231
|
+
if actual_skip:
|
|
232
|
+
return selected_model
|
|
233
|
+
|
|
234
|
+
# Show loading spinner for local models
|
|
235
|
+
spinner = ui.Spinner(f"Loading {selected_model}")
|
|
236
|
+
spinner.start()
|
|
237
|
+
|
|
238
|
+
try:
|
|
239
|
+
api.load_model_and_verify(selected_model)
|
|
240
|
+
spinner.stop()
|
|
241
|
+
return selected_model
|
|
242
|
+
|
|
243
|
+
except KeyboardInterrupt:
|
|
244
|
+
spinner.stop()
|
|
245
|
+
if interrupt.should_quit():
|
|
246
|
+
sys.exit(0)
|
|
247
|
+
print(f"\n{Colors.WARNING}Model loading cancelled.{Colors.RESET}")
|
|
248
|
+
continue
|
|
249
|
+
|
|
250
|
+
except KeyboardInterrupt:
|
|
251
|
+
interrupt.handle_interrupt()
|
|
252
|
+
raise interrupt.BackSignal()
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
def get_tool_category(tool_name: str) -> str:
|
|
256
|
+
"""Get display category for a tool."""
|
|
257
|
+
group = permissions.get_tool_permission_group(tool_name)
|
|
258
|
+
if group == permissions.PermissionGroup.READ:
|
|
259
|
+
return "read"
|
|
260
|
+
elif group == permissions.PermissionGroup.WRITE:
|
|
261
|
+
return "write"
|
|
262
|
+
elif group == permissions.PermissionGroup.DESTRUCTIVE:
|
|
263
|
+
return "destructive"
|
|
264
|
+
return "read"
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def execute_tool(cmd):
|
|
268
|
+
"""Execute a single tool command."""
|
|
269
|
+
name = cmd['name']
|
|
270
|
+
args = cmd['args']
|
|
271
|
+
|
|
272
|
+
result = None
|
|
273
|
+
if name == 'read_file':
|
|
274
|
+
result = tools.read_file(args.get('path'), args.get('start_line'), args.get('end_line'))
|
|
275
|
+
elif name == 'create_file':
|
|
276
|
+
result = tools.create_file(args.get('path'), args.get('content'))
|
|
277
|
+
elif name == 'edit_file':
|
|
278
|
+
result = tools.edit_file(args.get('path'), args.get('old_string'), args.get('new_string'))
|
|
279
|
+
elif name == 'delete_file':
|
|
280
|
+
result = tools.delete_file(args.get('path'))
|
|
281
|
+
elif name == 'get_current_directory_structure':
|
|
282
|
+
result = tools.get_current_directory_structure()
|
|
283
|
+
else:
|
|
284
|
+
result = "error: unknown tool"
|
|
285
|
+
|
|
286
|
+
success = not result.startswith("Error") and not result.startswith("error")
|
|
287
|
+
STATS.record_tool_call(success)
|
|
288
|
+
return result
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def process_tool_calls(cmds, state):
|
|
292
|
+
"""
|
|
293
|
+
Process tool calls with per-tool permissions.
|
|
294
|
+
Returns results string if any tools executed, None if all denied.
|
|
295
|
+
"""
|
|
296
|
+
all_results = []
|
|
297
|
+
denied = False
|
|
298
|
+
total = len(cmds)
|
|
299
|
+
|
|
300
|
+
for i, cmd in enumerate(cmds):
|
|
301
|
+
if denied:
|
|
302
|
+
break
|
|
303
|
+
|
|
304
|
+
name = cmd['name']
|
|
305
|
+
|
|
306
|
+
if 'error' in cmd:
|
|
307
|
+
all_results.append(f"Result of `{cmd['original']}`:\n{cmd['error']}")
|
|
308
|
+
continue
|
|
309
|
+
|
|
310
|
+
args = cmd['args']
|
|
311
|
+
category = get_tool_category(name)
|
|
312
|
+
|
|
313
|
+
# Show tool call counter if multiple
|
|
314
|
+
print()
|
|
315
|
+
if total > 1:
|
|
316
|
+
print(f"{Colors.MUTED}Tool {i + 1} of {total}{Colors.RESET}")
|
|
317
|
+
|
|
318
|
+
# Display the tool call block
|
|
319
|
+
print(ui.format_tool_call_block(name, args, category))
|
|
320
|
+
|
|
321
|
+
# Show diff preview for edit_file (before permission prompt)
|
|
322
|
+
if name == 'edit_file' and 'old_string' in args:
|
|
323
|
+
print(ui.format_diff(
|
|
324
|
+
args.get('old_string', ''),
|
|
325
|
+
args.get('new_string', ''),
|
|
326
|
+
args.get('path', 'unknown')
|
|
327
|
+
))
|
|
328
|
+
|
|
329
|
+
# Check permission (prompt_permission no longer prints its own box)
|
|
330
|
+
allowed, decision = permissions.check_permission(name, args)
|
|
331
|
+
|
|
332
|
+
if not allowed:
|
|
333
|
+
denied = True
|
|
334
|
+
state['messages'].append({"role": "user", "content": "Tool execution denied by user."})
|
|
335
|
+
break
|
|
336
|
+
|
|
337
|
+
# Generate human-readable action message
|
|
338
|
+
path = args.get('path', '')
|
|
339
|
+
if name == 'create_file':
|
|
340
|
+
action_msg = f"Creating {path}"
|
|
341
|
+
elif name == 'edit_file':
|
|
342
|
+
action_msg = f"Editing {path}"
|
|
343
|
+
elif name == 'delete_file':
|
|
344
|
+
action_msg = f"Deleting {path}"
|
|
345
|
+
elif name == 'read_file':
|
|
346
|
+
action_msg = f"Reading {path}"
|
|
347
|
+
elif name == 'get_current_directory_structure':
|
|
348
|
+
action_msg = "Scanning directory"
|
|
349
|
+
else:
|
|
350
|
+
action_msg = f"Running {name}"
|
|
351
|
+
|
|
352
|
+
# Show action status
|
|
353
|
+
print(f"{Colors.MUTED}{action_msg}...{Colors.RESET}", end=" ", flush=True)
|
|
354
|
+
result = execute_tool(cmd)
|
|
355
|
+
|
|
356
|
+
if result.startswith("Error") or result.startswith("error"):
|
|
357
|
+
print(f"{Colors.ERROR}{Icons.CROSS} {result}{Colors.RESET}")
|
|
358
|
+
else:
|
|
359
|
+
# Truncate long results for display
|
|
360
|
+
display_result = result if len(result) < 200 else result[:197] + "..."
|
|
361
|
+
print(f"{Colors.SUCCESS}{Icons.CHECK} {display_result}{Colors.RESET}")
|
|
362
|
+
|
|
363
|
+
all_results.append(f"Result of `{cmd['original']}`:\n{result}")
|
|
364
|
+
|
|
365
|
+
if all_results:
|
|
366
|
+
return "\n\n".join(all_results)
|
|
367
|
+
return None
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
class MentionLexer(Lexer):
|
|
371
|
+
"""Lexer that highlights @file mentions in purple."""
|
|
372
|
+
|
|
373
|
+
def lex_document(self, document):
|
|
374
|
+
def get_line(lineno):
|
|
375
|
+
line = document.lines[lineno]
|
|
376
|
+
tokens = []
|
|
377
|
+
last_idx = 0
|
|
378
|
+
for match in re.finditer(r'@[^\s]+', line):
|
|
379
|
+
start, end = match.start(), match.end()
|
|
380
|
+
if start > last_idx:
|
|
381
|
+
tokens.append(('', line[last_idx:start]))
|
|
382
|
+
tokens.append(('class:mention', line[start:end]))
|
|
383
|
+
last_idx = end
|
|
384
|
+
if last_idx < len(line):
|
|
385
|
+
tokens.append(('', line[last_idx:]))
|
|
386
|
+
return tokens
|
|
387
|
+
return get_line
|
|
388
|
+
|
|
389
|
+
|
|
390
|
+
def get_status_bar(state):
|
|
391
|
+
"""Generate the status bar content with left/middle/right alignment."""
|
|
392
|
+
width = ui.get_terminal_width()
|
|
393
|
+
cwd_text = f"~/{os.path.basename(os.getcwd())}"
|
|
394
|
+
|
|
395
|
+
# Include provider in model text
|
|
396
|
+
provider_text = api.CURRENT_API.upper()
|
|
397
|
+
model_name = state.get('current_model', 'unknown')
|
|
398
|
+
model_text = f"{provider_text}: {model_name}"
|
|
399
|
+
|
|
400
|
+
# Truncate model text if too long
|
|
401
|
+
if len(model_text) > 35:
|
|
402
|
+
model_text = model_text[:32] + "..."
|
|
403
|
+
|
|
404
|
+
# Check for "Accepting Edits" mode
|
|
405
|
+
is_accepting_edits = permissions.PERMISSION_STORE.is_allowed(permissions.PermissionGroup.WRITE)
|
|
406
|
+
center_html = ""
|
|
407
|
+
center_len = 0
|
|
408
|
+
if is_accepting_edits:
|
|
409
|
+
center_text = f"{Icons.WRITE} ACCEPTING EDITS"
|
|
410
|
+
center_html = f'<style fg="#ffff00">{center_text}</style>'
|
|
411
|
+
center_len = len(center_text)
|
|
412
|
+
|
|
413
|
+
# Check for warning message (usually Ctrl+C)
|
|
414
|
+
warning_line = ""
|
|
415
|
+
if state.get('toolbar') and "ctrl+c" in str(state['toolbar']).lower():
|
|
416
|
+
warning_text = interrupt.get_quit_hint()
|
|
417
|
+
warning_line = f'<style fg="#ffff00">{warning_text}</style>\n'
|
|
418
|
+
|
|
419
|
+
if center_len > 0:
|
|
420
|
+
# Position center text
|
|
421
|
+
# Use plain spaces for padding (no style)
|
|
422
|
+
pad_left_len = max(1, (width // 2 - center_len // 2) - len(cwd_text))
|
|
423
|
+
pad_right_len = max(1, width - len(cwd_text) - pad_left_len - center_len - len(model_text))
|
|
424
|
+
|
|
425
|
+
pad_left = " " * pad_left_len
|
|
426
|
+
pad_right = " " * pad_right_len
|
|
427
|
+
|
|
428
|
+
bottom_line = (
|
|
429
|
+
f'<style fg="#8a8a8a">{cwd_text}</style>'
|
|
430
|
+
f'{pad_left}{center_html}{pad_right}'
|
|
431
|
+
f'<style fg="#af5fff">{model_text}</style>'
|
|
432
|
+
)
|
|
433
|
+
else:
|
|
434
|
+
# Just left and right
|
|
435
|
+
padding_len = max(1, width - len(cwd_text) - len(model_text))
|
|
436
|
+
padding = " " * padding_len
|
|
437
|
+
|
|
438
|
+
bottom_line = (
|
|
439
|
+
f'<style fg="#8a8a8a">{cwd_text}</style>'
|
|
440
|
+
f'{padding}'
|
|
441
|
+
f'<style fg="#af5fff">{model_text}</style>'
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
return HTML(f"{warning_line}{bottom_line}")
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
def start_chat(initial_model_id):
|
|
448
|
+
"""Main chat loop."""
|
|
449
|
+
# Clear screen before starting chat
|
|
450
|
+
os.system('cls' if os.name == 'nt' else 'clear')
|
|
451
|
+
|
|
452
|
+
ui.print_ascii_art()
|
|
453
|
+
|
|
454
|
+
config.prune_history(config.get_history_file_path(), 25)
|
|
455
|
+
sys_prompt = api.get_system_prompt()
|
|
456
|
+
state = {
|
|
457
|
+
"messages": [{"role": "system", "content": sys_prompt}],
|
|
458
|
+
"toolbar": None,
|
|
459
|
+
"current_model": initial_model_id
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
# Reset permissions for new session
|
|
463
|
+
permissions.PERMISSION_STORE.reset()
|
|
464
|
+
|
|
465
|
+
bindings = handlers.get_chat_bindings(state)
|
|
466
|
+
|
|
467
|
+
chat_style = Style.from_dict({
|
|
468
|
+
'mention': '#af5fff', # Purple for @mentions
|
|
469
|
+
'prompt_prefix': '#0087ff', # Blue for prompt
|
|
470
|
+
'bottom-toolbar': 'bg:default #8a8a8a',
|
|
471
|
+
'completion-menu.completion': 'bg:#2c2c2c #bcbcbc',
|
|
472
|
+
'completion-menu.completion.current': 'bg:#5f5faf #ffffff',
|
|
473
|
+
'completion-menu.meta.completion': 'bg:#2c2c2c #8a8a8a',
|
|
474
|
+
'completion-menu.meta.completion.current': 'bg:#5f5faf #ffffff',
|
|
475
|
+
'completion-menu.multi-column-meta': 'bg:#2c2c2c #bcbcbc',
|
|
476
|
+
})
|
|
477
|
+
|
|
478
|
+
session = PromptSession(
|
|
479
|
+
completer=commands.completer,
|
|
480
|
+
lexer=MentionLexer(),
|
|
481
|
+
style=chat_style,
|
|
482
|
+
key_bindings=bindings,
|
|
483
|
+
bottom_toolbar=lambda: get_status_bar(state),
|
|
484
|
+
complete_while_typing=True,
|
|
485
|
+
history=FileHistory(config.get_history_file_path())
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
# Print tips and connection info
|
|
489
|
+
ui.print_tips()
|
|
490
|
+
print(f"{Colors.SUCCESS}{Icons.CHECK} Connected to {Colors.SECONDARY}{state['current_model']}{Colors.RESET}")
|
|
491
|
+
print()
|
|
492
|
+
|
|
493
|
+
while True:
|
|
494
|
+
try:
|
|
495
|
+
# Prompt with placeholder hint
|
|
496
|
+
user_input = session.prompt([('class:prompt_prefix', '> ')])
|
|
497
|
+
|
|
498
|
+
if not user_input.strip():
|
|
499
|
+
continue
|
|
500
|
+
|
|
501
|
+
if user_input.startswith('/'):
|
|
502
|
+
result = commands.handle_command(user_input, state)
|
|
503
|
+
if result == "exit":
|
|
504
|
+
break
|
|
505
|
+
if result == "select_model":
|
|
506
|
+
new_model = select_model_ui(skip_loading=(api.CURRENT_API in ["gemini", "cerebras", "anthropic", "openai"]))
|
|
507
|
+
state['current_model'] = new_model
|
|
508
|
+
print(f"\n{Colors.SUCCESS}{Icons.CHECK} Switched to {Colors.SECONDARY}{state['current_model']}{Colors.RESET}\n")
|
|
509
|
+
|
|
510
|
+
if result == "reload_model":
|
|
511
|
+
is_cloud = api.CURRENT_API in ["gemini", "cerebras", "anthropic", "openai"]
|
|
512
|
+
if not is_cloud:
|
|
513
|
+
spinner = ui.Spinner(f"Loading {state['current_model']}")
|
|
514
|
+
spinner.start()
|
|
515
|
+
try:
|
|
516
|
+
api.load_model_and_verify(state['current_model'])
|
|
517
|
+
spinner.stop()
|
|
518
|
+
except Exception:
|
|
519
|
+
spinner.stop()
|
|
520
|
+
print(f"{Colors.WARNING}Failed to verify local model.{Colors.RESET}")
|
|
521
|
+
continue
|
|
522
|
+
|
|
523
|
+
# Handle @file mentions
|
|
524
|
+
processed_content = user_input
|
|
525
|
+
mentions = []
|
|
526
|
+
|
|
527
|
+
found_mentions = re.findall(r'@([^\s]+)', user_input)
|
|
528
|
+
|
|
529
|
+
added_paths = set()
|
|
530
|
+
|
|
531
|
+
for raw_path in found_mentions:
|
|
532
|
+
path = raw_path.rstrip('.,!?:;)]}\'"')
|
|
533
|
+
|
|
534
|
+
if path in added_paths:
|
|
535
|
+
continue
|
|
536
|
+
|
|
537
|
+
content = tools.read_file(path)
|
|
538
|
+
if not content.startswith("Error:"):
|
|
539
|
+
mentions.append(f"Content of {path}:\n```\n{content}\n```")
|
|
540
|
+
added_paths.add(path)
|
|
541
|
+
|
|
542
|
+
if mentions:
|
|
543
|
+
processed_content += "\n\n--- ATTACHED FILES ---" + "\n\n".join(mentions)
|
|
544
|
+
|
|
545
|
+
state['messages'].append({"role": "user", "content": processed_content})
|
|
546
|
+
|
|
547
|
+
while True: # Continue LLM execution after tool is run
|
|
548
|
+
# Show AI prefix
|
|
549
|
+
print()
|
|
550
|
+
ai_res, ai_reasoning, thinking_duration = api.stream_chat(state['current_model'], state['messages'])
|
|
551
|
+
|
|
552
|
+
if not ai_res: # Response cancelled or errored
|
|
553
|
+
break
|
|
554
|
+
|
|
555
|
+
# Parse commands BEFORE adding to history
|
|
556
|
+
cmds = tools.parse_commands(ai_res)
|
|
557
|
+
|
|
558
|
+
if cmds:
|
|
559
|
+
# Tool call detected - clean the response to remove anything after the tool call
|
|
560
|
+
# Find the last tool call's end position
|
|
561
|
+
last_tool_end = 0
|
|
562
|
+
for cmd in cmds:
|
|
563
|
+
tool_str = cmd['original']
|
|
564
|
+
pos = ai_res.rfind(tool_str)
|
|
565
|
+
if pos != -1:
|
|
566
|
+
last_tool_end = max(last_tool_end, pos + len(tool_str))
|
|
567
|
+
|
|
568
|
+
# Truncate the response to end at the last tool call
|
|
569
|
+
if last_tool_end > 0:
|
|
570
|
+
ai_res = ai_res[:last_tool_end]
|
|
571
|
+
|
|
572
|
+
# Now add the cleaned response to history
|
|
573
|
+
history_msg = {"role": "assistant", "content": ai_res}
|
|
574
|
+
if ai_reasoning:
|
|
575
|
+
history_msg["reasoning"] = ai_reasoning
|
|
576
|
+
if thinking_duration:
|
|
577
|
+
history_msg["thinking_duration"] = thinking_duration
|
|
578
|
+
state['messages'].append(history_msg)
|
|
579
|
+
|
|
580
|
+
if cmds:
|
|
581
|
+
result_str = process_tool_calls(cmds, state)
|
|
582
|
+
|
|
583
|
+
if result_str:
|
|
584
|
+
# Continue the loop with tool results
|
|
585
|
+
state['messages'].append({"role": "user", "content": result_str})
|
|
586
|
+
else:
|
|
587
|
+
# All tools denied
|
|
588
|
+
break
|
|
589
|
+
else:
|
|
590
|
+
break
|
|
591
|
+
|
|
592
|
+
except EOFError:
|
|
593
|
+
break
|
|
594
|
+
|
|
595
|
+
|
|
596
|
+
def main():
|
|
597
|
+
"""Application entry point."""
|
|
598
|
+
ui.print_ascii_art()
|
|
599
|
+
|
|
600
|
+
interrupt.hide_control_chars()
|
|
601
|
+
atexit.register(STATS.print_summary)
|
|
602
|
+
|
|
603
|
+
while True:
|
|
604
|
+
try:
|
|
605
|
+
api_type, api_key = select_api_ui()
|
|
606
|
+
api.set_api_config(api_type, api_key)
|
|
607
|
+
|
|
608
|
+
selected_model = select_model_ui(skip_loading=(api_type in ["gemini", "cerebras", "anthropic", "openai"]))
|
|
609
|
+
start_chat(selected_model)
|
|
610
|
+
break
|
|
611
|
+
except interrupt.BackSignal:
|
|
612
|
+
interrupt.clear_bottom_warning()
|
|
613
|
+
continue
|
|
614
|
+
|
|
615
|
+
|
|
616
|
+
if __name__ == "__main__":
|
|
617
|
+
main()
|