ayechat-dev 0.36.8.20260126124453__py3-none-any.whl → 0.36.9.20260204003405__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,7 +7,7 @@ from prompt_toolkit import PromptSession
7
7
  from rich import print as rprint
8
8
  from rich.console import Console
9
9
 
10
- from aye.model.auth import get_user_config, set_user_config
10
+ from aye.model.auth import get_user_config, set_user_config, delete_user_config
11
11
  from aye.model.config import MODELS
12
12
  from aye.presenter.repl_ui import print_error
13
13
  from aye.controller.llm_invoker import invoke_llm
@@ -37,7 +37,7 @@ def handle_model_command(session: Optional[PromptSession], models: list, conf: A
37
37
  num = int(tokens[1])
38
38
  if 1 <= num <= len(models):
39
39
  selected_id = models[num - 1]["id"]
40
-
40
+
41
41
  # Check if this is an offline model and trigger download if needed
42
42
  selected_model = models[num - 1]
43
43
  if selected_model.get("type") == "offline":
@@ -49,7 +49,7 @@ def handle_model_command(session: Optional[PromptSession], models: list, conf: A
49
49
  if download_response and not download_response.get("success", True):
50
50
  rprint(f"[red]Failed to download model: {download_response.get('error', 'Unknown error')}[/]")
51
51
  return
52
-
52
+
53
53
  conf.selected_model = selected_id
54
54
  set_user_config("selected_model", selected_id)
55
55
  rprint(f"[green]Selected model: {models[num - 1]['name']}[/]")
@@ -81,7 +81,7 @@ def handle_model_command(session: Optional[PromptSession], models: list, conf: A
81
81
  num = int(choice)
82
82
  if 1 <= num <= len(models):
83
83
  selected_id = models[num - 1]["id"]
84
-
84
+
85
85
  # Check if this is an offline model and trigger download if needed
86
86
  selected_model = models[num - 1]
87
87
  if selected_model.get("type") == "offline":
@@ -93,7 +93,7 @@ def handle_model_command(session: Optional[PromptSession], models: list, conf: A
93
93
  if download_response and not download_response.get("success", True):
94
94
  rprint(f"[red]Failed to download model: {download_response.get('error', 'Unknown error')}[/]")
95
95
  return
96
-
96
+
97
97
  conf.selected_model = selected_id
98
98
  set_user_config("selected_model", selected_id)
99
99
  rprint(f"[green]Selected: {models[num - 1]['name']}[/]")
@@ -147,7 +147,7 @@ def handle_debug_command(tokens: list):
147
147
 
148
148
  def handle_completion_command(tokens: list) -> Optional[str]:
149
149
  """Handle the 'completion' command for switching completion styles.
150
-
150
+
151
151
  Returns:
152
152
  The new completion style if changed ('readline' or 'multi'), None otherwise.
153
153
  """
@@ -169,25 +169,125 @@ def handle_completion_command(tokens: list) -> Optional[str]:
169
169
  return None
170
170
 
171
171
 
172
+ def handle_llm_command(session: Optional[PromptSession], tokens: list[str]) -> None:
173
+ """Handle the 'llm' command for configuring OpenAI-compatible local model endpoint.
174
+
175
+ Usage:
176
+ llm - Interactively configure URL, key, and model
177
+ llm clear - Remove all LLM config values
178
+
179
+ Config keys stored in ~/.ayecfg:
180
+ llm_api_url
181
+ llm_api_key
182
+ llm_model
183
+ """
184
+ # Handle 'llm clear' subcommand
185
+ if len(tokens) > 1 and tokens[1].lower() == "clear":
186
+ delete_user_config("llm_api_url")
187
+ delete_user_config("llm_api_key")
188
+ delete_user_config("llm_model")
189
+ rprint("[green]LLM config cleared.[/]")
190
+ return
191
+
192
+ # Interactive configuration
193
+ current_url = get_user_config("llm_api_url", "")
194
+ current_key = get_user_config("llm_api_key", "")
195
+ current_model = get_user_config("llm_model", "")
196
+
197
+ # Show current status
198
+ rprint("\n[bold cyan]LLM Endpoint Configuration[/]")
199
+ rprint("[dim]Press Enter to keep current value, or type a new value.[/]\n")
200
+
201
+ if not session:
202
+ rprint("[red]Error: Interactive session not available.[/]")
203
+ return
204
+
205
+ try:
206
+ # Prompt for URL (explicitly non-password; some prompt_toolkit versions may reuse app state)
207
+ url_display = current_url if current_url else "not set"
208
+ new_url = session.prompt(
209
+ f"LLM API URL (current: {url_display}): ",
210
+ is_password=False,
211
+ ).strip()
212
+ final_url = new_url if new_url else current_url
213
+
214
+ # Prompt for API key (hidden input)
215
+ key_display = "set" if current_key else "not set"
216
+ new_key = session.prompt(
217
+ f"LLM API KEY (current: {key_display}): ",
218
+ is_password=True,
219
+ ).strip()
220
+ final_key = new_key if new_key else current_key
221
+
222
+ # Prompt for model (explicitly non-password)
223
+ model_display = current_model if current_model else "not set"
224
+ new_model = session.prompt(
225
+ f"LLM MODEL (current: {model_display}): ",
226
+ is_password=False,
227
+ ).strip()
228
+ final_model = new_model if new_model else current_model
229
+
230
+ except (EOFError, KeyboardInterrupt):
231
+ rprint("\n[yellow]Configuration cancelled.[/]")
232
+ return
233
+
234
+ # Save values (only if they have content)
235
+ if final_url:
236
+ set_user_config("llm_api_url", final_url)
237
+ elif current_url and not new_url:
238
+ # Keep existing
239
+ pass
240
+ else:
241
+ delete_user_config("llm_api_url")
242
+
243
+ if final_key:
244
+ set_user_config("llm_api_key", final_key)
245
+ elif current_key and not new_key:
246
+ # Keep existing
247
+ pass
248
+ else:
249
+ delete_user_config("llm_api_key")
250
+
251
+ if final_model:
252
+ set_user_config("llm_model", final_model)
253
+ elif current_model and not new_model:
254
+ # Keep existing
255
+ pass
256
+ else:
257
+ delete_user_config("llm_model")
258
+
259
+ # Print confirmation
260
+ rprint("\n[bold cyan]LLM Configuration Updated[/]")
261
+ rprint(f" URL: {final_url if final_url else '[dim]not set[/]'}")
262
+ rprint(f" KEY: {'[dim]set (hidden)[/]' if final_key else '[dim]not set[/]'}")
263
+ rprint(f" MODEL: {final_model if final_model else '[dim]not set[/]'}")
264
+
265
+ # Show status message
266
+ if final_url and final_key:
267
+ rprint("\n[green] OpenAI-compatible endpoint is configured and active.[/]")
268
+ else:
269
+ rprint("\n[yellow] Both URL and KEY are required for the local LLM endpoint to be active.[/]")
270
+
271
+
172
272
  def _expand_file_patterns(patterns: list[str], conf: Any) -> list[str]:
173
273
  """Expand wildcard patterns and return a list of existing file paths."""
174
274
  expanded_files = []
175
-
275
+
176
276
  for pattern in patterns:
177
277
  pattern = pattern.strip()
178
278
  if not pattern:
179
279
  continue
180
-
280
+
181
281
  # Check if it's a direct file path first
182
282
  direct_path = conf.root / pattern
183
283
  if direct_path.is_file():
184
284
  expanded_files.append(pattern)
185
285
  continue
186
-
286
+
187
287
  # Use glob to expand wildcards
188
288
  # Search relative to the project root
189
289
  matched_paths = list(conf.root.glob(pattern))
190
-
290
+
191
291
  # Add relative paths of matched files
192
292
  for matched_path in matched_paths:
193
293
  if matched_path.is_file():
@@ -197,26 +297,26 @@ def _expand_file_patterns(patterns: list[str], conf: Any) -> list[str]:
197
297
  except ValueError:
198
298
  # If we can't make it relative, use the original pattern
199
299
  expanded_files.append(pattern)
200
-
300
+
201
301
  return expanded_files
202
302
 
203
303
 
204
304
  def handle_with_command(
205
- prompt: str,
206
- conf: Any,
207
- console: Console,
208
- chat_id: int,
305
+ prompt: str,
306
+ conf: Any,
307
+ console: Console,
308
+ chat_id: int,
209
309
  chat_id_file: Path
210
310
  ) -> Optional[int]:
211
311
  """Handle the 'with' command for file-specific prompts with wildcard support.
212
-
312
+
213
313
  Args:
214
314
  prompt: The full prompt string starting with 'with'
215
315
  conf: Configuration object
216
316
  console: Rich console for output
217
317
  chat_id: Current chat ID
218
318
  chat_id_file: Path to chat ID file
219
-
319
+
220
320
  Returns:
221
321
  New chat_id if available, None otherwise
222
322
  """
@@ -234,16 +334,16 @@ def handle_with_command(
234
334
 
235
335
  # Parse file patterns (can include wildcards)
236
336
  file_patterns = [f.strip() for f in file_list_str.replace(",", " ").split() if f.strip()]
237
-
337
+
238
338
  # Expand wildcards to get actual file paths
239
339
  expanded_files = _expand_file_patterns(file_patterns, conf)
240
-
340
+
241
341
  if not expanded_files:
242
342
  rprint("[red]Error: No files found matching the specified patterns.[/red]")
243
343
  return None
244
-
344
+
245
345
  explicit_source_files = {}
246
-
346
+
247
347
  for file_name in expanded_files:
248
348
  file_path = conf.root / file_name
249
349
  if not file_path.is_file():
@@ -254,11 +354,11 @@ def handle_with_command(
254
354
  except Exception as e:
255
355
  rprint(f"[red]Could not read file '{file_name}': {e}[/red]")
256
356
  continue # Continue with other files instead of breaking
257
-
357
+
258
358
  if not explicit_source_files:
259
359
  rprint("[red]Error: No readable files found.[/red]")
260
360
  return None
261
-
361
+
262
362
  # Show which files were included
263
363
  if conf.verbose or len(explicit_source_files) != len(expanded_files):
264
364
  rprint(f"[cyan]Including {len(explicit_source_files)} file(s): {', '.join(explicit_source_files.keys())}[/cyan]")
@@ -272,20 +372,20 @@ def handle_with_command(
272
372
  verbose=conf.verbose,
273
373
  explicit_source_files=explicit_source_files
274
374
  )
275
-
375
+
276
376
  if llm_response:
277
377
  new_chat_id = process_llm_response(
278
- response=llm_response,
279
- conf=conf,
280
- console=console,
281
- prompt=new_prompt_str.strip(),
378
+ response=llm_response,
379
+ conf=conf,
380
+ console=console,
381
+ prompt=new_prompt_str.strip(),
282
382
  chat_id_file=chat_id_file if llm_response.chat_id else None
283
383
  )
284
384
  return new_chat_id
285
385
  else:
286
386
  rprint("[yellow]No response from LLM.[/]")
287
387
  return None
288
-
388
+
289
389
  except Exception as exc:
290
390
  handle_llm_error(exc)
291
391
  return None
aye/controller/repl.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import os
2
2
  import json
3
3
  from pathlib import Path
4
- from typing import Optional, Any
4
+ from typing import Optional, Any, List
5
5
  import shlex
6
6
  import threading
7
7
  import glob
@@ -41,6 +41,7 @@ from aye.controller.command_handlers import (
41
41
  handle_completion_command,
42
42
  handle_with_command,
43
43
  handle_blog_command,
44
+ handle_llm_command,
44
45
  )
45
46
 
46
47
  DEBUG = False
@@ -252,11 +253,39 @@ def create_prompt_session(completer: Any, completion_style: str = "readline") ->
252
253
  )
253
254
 
254
255
 
256
+ def _execute_forced_shell_command(command: str, args: List[str], conf: Any) -> None:
257
+ """Execute a shell command with force flag (bypasses command validation).
258
+
259
+ Used when user prefixes input with '!' to force shell execution.
260
+
261
+ Args:
262
+ command: The command to execute
263
+ args: List of arguments to pass to the command
264
+ conf: Configuration object with plugin_manager
265
+ """
266
+ telemetry.record_command(command, has_args=len(args) > 0, prefix=_CMD_PREFIX)
267
+ shell_response = conf.plugin_manager.handle_command(
268
+ "execute_shell_command",
269
+ {"command": command, "args": args, "force": True}
270
+ )
271
+ if shell_response is not None:
272
+ if "stdout" in shell_response or "stderr" in shell_response:
273
+ if shell_response.get("stdout", "").strip():
274
+ rprint(shell_response["stdout"])
275
+ if shell_response.get("stderr", "").strip():
276
+ rprint(f"[yellow]{shell_response['stderr']}[/]")
277
+ if "error" in shell_response:
278
+ rprint(f"[red]Error:[/] {shell_response['error']}")
279
+ elif "message" in shell_response:
280
+ rprint(shell_response["message"])
281
+ else:
282
+ rprint(f"[red]Error:[/] Failed to execute shell command")
283
+
255
284
 
256
285
  def chat_repl(conf: Any) -> None:
257
286
  is_first_run = run_first_time_tutorial_if_needed()
258
287
 
259
- BUILTIN_COMMANDS = ["with", "blog", "new", "history", "diff", "restore", "undo", "keep", "model", "verbose", "debug", "completion", "exit", "quit", ":q", "help", "cd", "db"]
288
+ BUILTIN_COMMANDS = ["with", "blog", "new", "history", "diff", "restore", "undo", "keep", "model", "verbose", "debug", "completion", "exit", "quit", ":q", "help", "cd", "db", "llm"]
260
289
 
261
290
  # Get the completion style setting
262
291
  completion_style = get_user_config("completion_style", "readline").lower()
@@ -327,6 +356,14 @@ def chat_repl(conf: Any) -> None:
327
356
  chat_id = new_chat_id
328
357
  continue
329
358
 
359
+ # Check for '!' prefix - force shell execution
360
+ force_shell = False
361
+ if prompt.strip().startswith('!'):
362
+ force_shell = True
363
+ prompt = prompt.strip()[1:] # Remove the '!'
364
+ if not prompt.strip():
365
+ continue # Nothing after the '!', skip
366
+
330
367
  if not prompt.strip():
331
368
  continue
332
369
  tokens = shlex.split(prompt.strip(), posix=False)
@@ -340,6 +377,11 @@ def chat_repl(conf: Any) -> None:
340
377
 
341
378
  original_first, lowered_first = tokens[0], tokens[0].lower()
342
379
 
380
+ # If force_shell is True, execute as shell command directly and skip all other checks
381
+ if force_shell:
382
+ _execute_forced_shell_command(original_first, tokens[1:], conf)
383
+ continue
384
+
343
385
  # Normalize slash-prefixed commands: /restore -> restore, /model -> model, etc.
344
386
  if lowered_first.startswith('/'):
345
387
  lowered_first = lowered_first[1:] # Remove leading slash
@@ -388,6 +430,9 @@ def chat_repl(conf: Any) -> None:
388
430
  # Recreate the session with the new completer
389
431
  session = create_prompt_session(completer, new_style)
390
432
  rprint(f"[green]Completion style is now active.[/]")
433
+ elif lowered_first == "llm":
434
+ telemetry.record_command("llm", has_args=len(tokens) > 1, prefix=_AYE_PREFIX)
435
+ handle_llm_command(session, tokens)
391
436
  elif lowered_first == "blog":
392
437
  telemetry.record_command("blog", has_args=len(tokens) > 1, prefix=_AYE_PREFIX)
393
438
  telemetry.record_llm_prompt("LLM <blog>")
aye/model/auth.py CHANGED
@@ -58,6 +58,27 @@ def set_user_config(key: str, value: Any) -> None:
58
58
  TOKEN_FILE.chmod(0o600)
59
59
 
60
60
 
61
+ def delete_user_config(key: str) -> None:
62
+ """Delete a user config key from the [default] section.
63
+
64
+ If the key doesn't exist, this is a no-op.
65
+ Preserves other settings and maintains file permissions.
66
+ """
67
+ config = _parse_user_config()
68
+ if key not in config:
69
+ return
70
+ config.pop(key, None)
71
+ if not config:
72
+ # If no config left, remove the file entirely
73
+ TOKEN_FILE.unlink(missing_ok=True)
74
+ else:
75
+ new_content = "[default]\n"
76
+ for k, v in config.items():
77
+ new_content += f"{k}={v}\n"
78
+ TOKEN_FILE.write_text(new_content, encoding="utf-8")
79
+ TOKEN_FILE.chmod(0o600)
80
+
81
+
61
82
  def store_token(token: str) -> None:
62
83
  """Persist the token in ~/.ayecfg or value from AYE_TOKEN_FILE environment variable (unless AYE_TOKEN is set)."""
63
84
  token = token.strip()
@@ -122,4 +143,3 @@ def login_flow() -> None:
122
143
  token = typer.prompt("Paste your token", hide_input=True)
123
144
  store_token(token.strip())
124
145
  typer.secho("✅ Token saved.", fg=typer.colors.GREEN)
125
-
@@ -8,6 +8,7 @@ from rich import print as rprint
8
8
 
9
9
  from .plugin_base import Plugin
10
10
  from aye.model.config import SYSTEM_PROMPT, MODELS, DEFAULT_MAX_OUTPUT_TOKENS
11
+ from aye.model.auth import get_user_config
11
12
  from aye.controller.util import is_truncated_json
12
13
 
13
14
  LLM_TIMEOUT = 600.0
@@ -179,9 +180,17 @@ class LocalModelPlugin(Plugin):
179
180
  return self._create_error_response(f"Error calling Databricks API: {str(e)}")
180
181
 
181
182
  def _handle_openai_compatible(self, prompt: str, source_files: Dict[str, str], chat_id: Optional[int] = None, system_prompt: Optional[str] = None, max_output_tokens: int = DEFAULT_MAX_OUTPUT_TOKENS) -> Optional[Dict[str, Any]]:
182
- api_url = os.environ.get("AYE_LLM_API_URL")
183
- api_key = os.environ.get("AYE_LLM_API_KEY")
184
- model_name = os.environ.get("AYE_LLM_MODEL", "gpt-3.5-turbo")
183
+ """Handle OpenAI-compatible API endpoints.
184
+
185
+ Reads configuration from:
186
+ - get_user_config("llm_api_url") / AYE_LLM_API_URL
187
+ - get_user_config("llm_api_key") / AYE_LLM_API_KEY
188
+ - get_user_config("llm_model") / AYE_LLM_MODEL (default: gpt-3.5-turbo)
189
+ """
190
+ # Read from config (supports both ~/.ayecfg and AYE_LLM_* env vars)
191
+ api_url = get_user_config("llm_api_url")
192
+ api_key = get_user_config("llm_api_key")
193
+ model_name = get_user_config("llm_model", "gpt-3.5-turbo")
185
194
 
186
195
  if not api_url or not api_key:
187
196
  return None
@@ -10,7 +10,7 @@ from rich import print as rprint
10
10
 
11
11
  class ShellExecutorPlugin(Plugin):
12
12
  name = "shell_executor"
13
- version = "1.0.2" # Fixed Windows localization bug in command detection
13
+ version = "1.0.3" # Added force parameter for ! prefix execution
14
14
  premium = "free"
15
15
 
16
16
  # Known interactive commands that require a TTY (add more as needed)
@@ -143,15 +143,17 @@ class ShellExecutorPlugin(Plugin):
143
143
  "returncode": e.returncode
144
144
  }
145
145
  except FileNotFoundError:
146
- return None # Command not found
146
+ return {"error": f"Command not found: {command}", "stdout": "", "stderr": "", "returncode": 127}
147
147
 
148
148
  def on_command(self, command_name: str, params: Dict[str, Any]) -> Optional[Dict[str, Any]]:
149
149
  """Handle shell command execution through plugin system."""
150
150
  if command_name == "execute_shell_command":
151
151
  command = params.get("command", "")
152
152
  args = params.get("args", [])
153
+ force = params.get("force", False)
153
154
 
154
- if not self._is_valid_command(command):
155
+ # If force is False, validate the command exists first
156
+ if not force and not self._is_valid_command(command):
155
157
  return None # Command not found or not executable
156
158
 
157
159
  full_cmd_str = self._build_full_cmd(command, args)
aye/presenter/repl_ui.py CHANGED
@@ -87,8 +87,9 @@ def print_help_message():
87
87
  commands = [
88
88
  # Some commands are intentionally undocumented: keep them as such.
89
89
  ("@filename", "Include a file in your prompt inline (e.g., \"explain @main.py\"). Supports wildcards (e.g., @*.py, @src/*.js)."),
90
- ("blog <intent>", "Generate a technical deep-dive blog post derived from the current chat session and write it to blog.md."),
90
+ ("!command", "Force shell execution (e.g., \"!echo hello\")."),
91
91
  ("model", "Select a different model. Selection will persist between sessions."),
92
+ ("llm", "Configure OpenAI-compatible LLM endpoint (URL, key, model). Use 'llm clear' to reset."),
92
93
  (r"verbose \[on|off]", "Toggle verbose mode to increase or decrease chattiness (on/off, persists between sessions)"),
93
94
  (r"completion \[readline|multi]", "Switch auto-completion style (readline or multi, persists between sessions)"),
94
95
  ("new", "Start a new chat session (if you want to change the subject)"),
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ayechat-dev
3
- Version: 0.36.8.20260126124453
3
+ Version: 0.36.9.20260204003405
4
4
  Summary: Aye Chat: Terminal-first AI Code Generator
5
5
  Author-email: "Acrotron, Inc." <info@acrotron.com>
6
6
  License: MIT
@@ -3,18 +3,18 @@ aye/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  aye/__main__.py,sha256=fj7pl0i_mLvpKAbpKtotU3zboLBTivsILSLKuH5M5Sg,5377
4
4
  aye/__main_chat__.py,sha256=R6RaidxG3Px5TaYxcoWAuIleE5KUZlceneUB6u_9UVU,1066
5
5
  aye/controller/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- aye/controller/command_handlers.py,sha256=C-6beHXVhW8vR_AbH4dCPl794ycFNVGuMBQiHnH4naI,13438
6
+ aye/controller/command_handlers.py,sha256=72dA9x4vHOxsSuWb1wEX2xTxL4uCFuiSPL4-VpEPwyA,16728
7
7
  aye/controller/commands.py,sha256=sXmK_sgNBrw9Fs7mKcr93-wsu740ZlvWSisQfS-1EUE,12278
8
8
  aye/controller/llm_handler.py,sha256=BSBab6onF9BYiFndYW1647eEy377Vt52trzu0Qjm4bQ,5075
9
9
  aye/controller/llm_invoker.py,sha256=p_Vk2a3YrWKwDupLfSVRinR5llDfq1Fb_f7WrYozK6M,14127
10
10
  aye/controller/plugin_manager.py,sha256=9ZuITyA5sQJJJU-IntLQ1SsxXsDnbgZKPOF4e9VmsEU,3018
11
- aye/controller/repl.py,sha256=FOw8jH2gJp8DcNrSiUZkwrqB-qtDZ4b_9HwwpK4jiHY,24697
11
+ aye/controller/repl.py,sha256=fTYEdX1PAds49pRBVgvyi-4lIfArR8gjBTp_shwx-K0,26782
12
12
  aye/controller/tutorial.py,sha256=lc92jOcJOYCVrrjTEF0Suk4-8jn-ku98kTJEIL8taUA,7254
13
13
  aye/controller/util.py,sha256=gBmktDEaY63OKhgzZHA2IFrgcWUN_Iphn3e1daEeUBI,2828
14
14
  aye/model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
15
  aye/model/api.py,sha256=HhSMQQ_szdC2ZPOSfNsJRbs1FRwb6WyYIeLejB2ScbA,13272
16
16
  aye/model/ast_chunker.py,sha256=rVcDdynVUXXyxWVgtUcsee_STqB7SAwP776ktWTlYig,4462
17
- aye/model/auth.py,sha256=FDTUGubrIzVQ5tdk97JFOwUGQNfDDcvRaqYOA9MTI_w,4144
17
+ aye/model/auth.py,sha256=ozV_uQxdqXtUoWO3nZwpzVnDOIfnRAmSMC6W0N724vE,4800
18
18
  aye/model/config.py,sha256=o6bQhj5gqhSqtWD6DLow7NNy6Hdaede02h_xb7uPLXo,9280
19
19
  aye/model/download_plugins.py,sha256=6omyFGdxlEIb7tKPLq4rRVrRYeCPUUCE8aZHvJAKGSc,4442
20
20
  aye/model/file_processor.py,sha256=b7YGvHAmhGto9JbtzcfrsdkFtksHbosYt-42EnR22Uo,2131
@@ -42,20 +42,20 @@ aye/plugins/__init__.py,sha256=dSTxs461ICx0O1tbCBCca0W_7QIAa0Yt9PQhHiT5uZQ,173
42
42
  aye/plugins/at_file_completer.py,sha256=uNS4gWpfKvn9_nGxZbhQVjVg_S82g977gfBR-pL3XrQ,19582
43
43
  aye/plugins/auto_detect_mask.py,sha256=gZKH4qkR-A73uKpMkPXhlgI452Ae_2YG1nHtaIkOvwM,6864
44
44
  aye/plugins/completer.py,sha256=qhxke5Q76P2u0LojSIL3V48RTNG5tWL-5-TK5tNutrE,13893
45
- aye/plugins/local_model.py,sha256=Jj4bHiPYaLMx6zTrKamBPrkiGQQ787jWz0F4ojRCjlQ,14394
45
+ aye/plugins/local_model.py,sha256=q0RjSjLhEQcDMOCLAK6k1YCW5ECrvdT_g0lKRHMX-AE,14810
46
46
  aye/plugins/offline_llm.py,sha256=qFmd1e8Lbl7yiMgXpXjOQkQTNxOk0_WXU7km2DTKXGY,13357
47
47
  aye/plugins/plugin_base.py,sha256=t5hTOnA0dZC237BnseAgdXbOqErlSCNLUo_Uul09TSw,1673
48
- aye/plugins/shell_executor.py,sha256=B4R04pJsjS6uy7z4MP_WcbxMPB6ct8V3BNFp9JMldrE,7317
48
+ aye/plugins/shell_executor.py,sha256=a0mlZnQeURONdtPM7iageTcQ8PiNLQbjxoY54EsS32o,7502
49
49
  aye/plugins/slash_completer.py,sha256=MyrDTC_KwVWhtD_kpHbu0WjSjmSAWp36PwOBQczSuXA,2252
50
50
  aye/presenter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
51
  aye/presenter/cli_ui.py,sha256=8oHqQiMHO8hHXCTdqWoquMkJBshl2_3-YWN6SQnlbKg,8449
52
52
  aye/presenter/diff_presenter.py,sha256=cbxfOEqGomPTDvQpKdybfYeNUD2DYVAl85j1uy5--Ww,12512
53
- aye/presenter/repl_ui.py,sha256=OTJlcqNdNoaAwfEUqallig45rIzMoKa0dUSxk6Io4jU,7979
53
+ aye/presenter/repl_ui.py,sha256=5nAv8qLo3azDuoGYAxGdK2SEwowXPoHSEruupvS6jy8,8023
54
54
  aye/presenter/streaming_ui.py,sha256=_3tBEuNH9UQ9Gyq2yuvRfX4SWVkcGMYirEUGj-MXVJ0,12768
55
55
  aye/presenter/ui_utils.py,sha256=6KXR4_ZZZUdF5pCHrPqO8yywlQk7AOzWe-2B4Wj_-ZQ,5441
56
- ayechat_dev-0.36.8.20260126124453.dist-info/licenses/LICENSE,sha256=U1ou6lkMKmPo16-E9YowIu3goU7sOWKUprGo0AOA72s,1065
57
- ayechat_dev-0.36.8.20260126124453.dist-info/METADATA,sha256=8yGbe7K_dhHm0RDKT9q7aPPrmBPKbU0oel9e4Igp4ws,7718
58
- ayechat_dev-0.36.8.20260126124453.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
59
- ayechat_dev-0.36.8.20260126124453.dist-info/entry_points.txt,sha256=KGsOma6szoefNN6vHozg3Pbf1fjZ7ZbmwrOiVwBd0Ik,41
60
- ayechat_dev-0.36.8.20260126124453.dist-info/top_level.txt,sha256=7WZL0LOx4-GKKvgU1mtI5s4Dhk2OdieVZZvVnxFJHr8,4
61
- ayechat_dev-0.36.8.20260126124453.dist-info/RECORD,,
56
+ ayechat_dev-0.36.9.20260204003405.dist-info/licenses/LICENSE,sha256=U1ou6lkMKmPo16-E9YowIu3goU7sOWKUprGo0AOA72s,1065
57
+ ayechat_dev-0.36.9.20260204003405.dist-info/METADATA,sha256=EHTmVsIzzf2ki5cb1N9-x4eWLHLFbp3eWxtU3pUKe4g,7718
58
+ ayechat_dev-0.36.9.20260204003405.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
59
+ ayechat_dev-0.36.9.20260204003405.dist-info/entry_points.txt,sha256=KGsOma6szoefNN6vHozg3Pbf1fjZ7ZbmwrOiVwBd0Ik,41
60
+ ayechat_dev-0.36.9.20260204003405.dist-info/top_level.txt,sha256=7WZL0LOx4-GKKvgU1mtI5s4Dhk2OdieVZZvVnxFJHr8,4
61
+ ayechat_dev-0.36.9.20260204003405.dist-info/RECORD,,