ayechat 0.37.0__tar.gz → 0.39.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (156) hide show
  1. {ayechat-0.37.0/src/ayechat.egg-info → ayechat-0.39.0}/PKG-INFO +1 -1
  2. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/controller/command_handlers.py +148 -29
  3. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/controller/llm_handler.py +55 -3
  4. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/controller/llm_invoker.py +65 -55
  5. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/controller/repl.py +9 -1
  6. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/auth.py +21 -1
  7. ayechat-0.39.0/src/aye/model/autodiff_config.py +32 -0
  8. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/config.py +5 -0
  9. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/snapshot/__init__.py +98 -0
  10. ayechat-0.39.0/src/aye/plugins/databricks_model.py +312 -0
  11. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/plugins/local_model.py +12 -48
  12. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/presenter/repl_ui.py +6 -4
  13. {ayechat-0.37.0 → ayechat-0.39.0/src/ayechat.egg-info}/PKG-INFO +1 -1
  14. {ayechat-0.37.0 → ayechat-0.39.0}/src/ayechat.egg-info/SOURCES.txt +2 -0
  15. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_llm_handler.py +4 -0
  16. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_local_model_plugin.py +1 -60
  17. {ayechat-0.37.0 → ayechat-0.39.0}/.github/ISSUE_TEMPLATE/bug_report.yml +0 -0
  18. {ayechat-0.37.0 → ayechat-0.39.0}/.github/ISSUE_TEMPLATE/config.yml +0 -0
  19. {ayechat-0.37.0 → ayechat-0.39.0}/.github/ISSUE_TEMPLATE/feature_request.yml +0 -0
  20. {ayechat-0.37.0 → ayechat-0.39.0}/.github/dependabot.yml +0 -0
  21. {ayechat-0.37.0 → ayechat-0.39.0}/.github/workflows/build-windows-installer.yml +0 -0
  22. {ayechat-0.37.0 → ayechat-0.39.0}/.github/workflows/message-releases-to-discord.yml +0 -0
  23. {ayechat-0.37.0 → ayechat-0.39.0}/.github/workflows/pylint.yml +0 -0
  24. {ayechat-0.37.0 → ayechat-0.39.0}/.github/workflows/python-publish-dev.yml +0 -0
  25. {ayechat-0.37.0 → ayechat-0.39.0}/.github/workflows/python-publish.yml +0 -0
  26. {ayechat-0.37.0 → ayechat-0.39.0}/.github/workflows/python-testing.yml +0 -0
  27. {ayechat-0.37.0 → ayechat-0.39.0}/.github/workflows/test-homebrew.yml +0 -0
  28. {ayechat-0.37.0 → ayechat-0.39.0}/.github/workflows/test-nix-github.yml +0 -0
  29. {ayechat-0.37.0 → ayechat-0.39.0}/.github/workflows/test-nix.yml +0 -0
  30. {ayechat-0.37.0 → ayechat-0.39.0}/.github/workflows/test-windows-installer.yml +0 -0
  31. {ayechat-0.37.0 → ayechat-0.39.0}/.github/workflows/update-homebrew.yml +0 -0
  32. {ayechat-0.37.0 → ayechat-0.39.0}/.gitignore +0 -0
  33. {ayechat-0.37.0 → ayechat-0.39.0}/.pylintrc +0 -0
  34. {ayechat-0.37.0 → ayechat-0.39.0}/BUILD.md +0 -0
  35. {ayechat-0.37.0 → ayechat-0.39.0}/DISCLAIMER +0 -0
  36. {ayechat-0.37.0 → ayechat-0.39.0}/Formula/aye-chat.rb +0 -0
  37. {ayechat-0.37.0 → ayechat-0.39.0}/LICENSE +0 -0
  38. {ayechat-0.37.0 → ayechat-0.39.0}/README.md +0 -0
  39. {ayechat-0.37.0 → ayechat-0.39.0}/assets/aye-chat.ico +0 -0
  40. {ayechat-0.37.0 → ayechat-0.39.0}/aye-chat.spec +0 -0
  41. {ayechat-0.37.0 → ayechat-0.39.0}/ayechat.nix +0 -0
  42. {ayechat-0.37.0 → ayechat-0.39.0}/flake.lock +0 -0
  43. {ayechat-0.37.0 → ayechat-0.39.0}/flake.nix +0 -0
  44. {ayechat-0.37.0 → ayechat-0.39.0}/installer.iss +0 -0
  45. {ayechat-0.37.0 → ayechat-0.39.0}/publish_pypi.sh +0 -0
  46. {ayechat-0.37.0 → ayechat-0.39.0}/pyproject.toml +0 -0
  47. {ayechat-0.37.0 → ayechat-0.39.0}/requirements.txt +0 -0
  48. {ayechat-0.37.0 → ayechat-0.39.0}/run_tests.cmd +0 -0
  49. {ayechat-0.37.0 → ayechat-0.39.0}/run_tests.sh +0 -0
  50. {ayechat-0.37.0 → ayechat-0.39.0}/setup.cfg +0 -0
  51. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/.gitignore +0 -0
  52. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/__init__.py +0 -0
  53. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/__main__.py +0 -0
  54. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/__main_chat__.py +0 -0
  55. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/controller/__init__.py +0 -0
  56. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/controller/commands.py +0 -0
  57. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/controller/plugin_manager.py +0 -0
  58. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/controller/tutorial.py +0 -0
  59. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/controller/util.py +0 -0
  60. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/__init__.py +0 -0
  61. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/api.py +0 -0
  62. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/ast_chunker.py +0 -0
  63. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/download_plugins.py +0 -0
  64. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/file_processor.py +0 -0
  65. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/ignore_patterns.py +0 -0
  66. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/index_manager/__init__.py +0 -0
  67. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/index_manager/index_manager.py +0 -0
  68. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/index_manager/index_manager_executor.py +0 -0
  69. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/index_manager/index_manager_file_ops.py +0 -0
  70. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/index_manager/index_manager_state.py +0 -0
  71. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/index_manager/index_manager_utils.py +0 -0
  72. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/json_extractor.py +0 -0
  73. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/models.py +0 -0
  74. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/offline_llm_manager.py +0 -0
  75. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/onnx_manager.py +0 -0
  76. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/snapshot/base.py +0 -0
  77. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/snapshot/file_backend.py +0 -0
  78. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/snapshot/git_ref_backend.py +0 -0
  79. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/source_collector.py +0 -0
  80. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/telemetry.py +0 -0
  81. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/vector_db.py +0 -0
  82. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/version_checker.py +0 -0
  83. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/model/write_validator.py +0 -0
  84. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/plugins/__init__.py +0 -0
  85. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/plugins/at_file_completer.py +0 -0
  86. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/plugins/auto_detect_mask.py +0 -0
  87. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/plugins/completer.py +0 -0
  88. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/plugins/offline_llm.py +0 -0
  89. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/plugins/plugin_base.py +0 -0
  90. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/plugins/shell_executor.py +0 -0
  91. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/plugins/slash_completer.py +0 -0
  92. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/presenter/__init__.py +0 -0
  93. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/presenter/cli_ui.py +0 -0
  94. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/presenter/diff_presenter.py +0 -0
  95. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/presenter/streaming_ui.py +0 -0
  96. {ayechat-0.37.0 → ayechat-0.39.0}/src/aye/presenter/ui_utils.py +0 -0
  97. {ayechat-0.37.0 → ayechat-0.39.0}/src/ayechat.egg-info/dependency_links.txt +0 -0
  98. {ayechat-0.37.0 → ayechat-0.39.0}/src/ayechat.egg-info/entry_points.txt +0 -0
  99. {ayechat-0.37.0 → ayechat-0.39.0}/src/ayechat.egg-info/requires.txt +0 -0
  100. {ayechat-0.37.0 → ayechat-0.39.0}/src/ayechat.egg-info/top_level.txt +0 -0
  101. {ayechat-0.37.0 → ayechat-0.39.0}/tests/.gitignore +0 -0
  102. {ayechat-0.37.0 → ayechat-0.39.0}/tests/config/unittest-env.sh +0 -0
  103. {ayechat-0.37.0 → ayechat-0.39.0}/tests/e2e/test_chat_workflow.py +0 -0
  104. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_api.py +0 -0
  105. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_ast_chunker.py +0 -0
  106. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_at_file_completer.py +0 -0
  107. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_auth.py +0 -0
  108. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_auth_uat_1.py +0 -0
  109. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_auto_detect_mask.py +0 -0
  110. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_chromadb_corruption_recovery.py +0 -0
  111. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_cli.py +0 -0
  112. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_command_handlers.py +0 -0
  113. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_commands.py +0 -0
  114. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_completer_plugin.py +0 -0
  115. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_config.py +0 -0
  116. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_diff_presenter.py +0 -0
  117. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_download_plugins.py +0 -0
  118. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_file_processor.py +0 -0
  119. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_git_ref_backend.py +0 -0
  120. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_index_manager.py +0 -0
  121. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_index_manager_executor.py +0 -0
  122. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_index_manager_more.py +0 -0
  123. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_index_manager_state.py +0 -0
  124. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_index_manager_utils.py +0 -0
  125. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_llm_invoker.py +0 -0
  126. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_offline_llm.py +0 -0
  127. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_offline_llm_manager.py +0 -0
  128. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_onnx_manager.py +0 -0
  129. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_plugin_base.py +0 -0
  130. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_plugin_manager.py +0 -0
  131. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_presenter.py +0 -0
  132. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_rag_context_retrieval.py +0 -0
  133. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_repl.py +0 -0
  134. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_service.py +0 -0
  135. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_shell_executor_plugin.py +0 -0
  136. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_slash_completer.py +0 -0
  137. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_snapshot.py +0 -0
  138. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_source_collector.py +0 -0
  139. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_streaming_ui.py +0 -0
  140. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_telemetry.py +0 -0
  141. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_tutorial.py +0 -0
  142. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_ui_utils.py +0 -0
  143. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_util.py +0 -0
  144. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_vector_db.py +0 -0
  145. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_version_checker.py +0 -0
  146. {ayechat-0.37.0 → ayechat-0.39.0}/tests/test_write_validator.py +0 -0
  147. {ayechat-0.37.0 → ayechat-0.39.0}/tests/ua/api_tests.md +0 -0
  148. {ayechat-0.37.0 → ayechat-0.39.0}/tests/ua/auth_tests.md +0 -0
  149. {ayechat-0.37.0 → ayechat-0.39.0}/tests/ua/demo_tests.md +0 -0
  150. {ayechat-0.37.0 → ayechat-0.39.0}/tests/ua/download_plugins_tests.md +0 -0
  151. {ayechat-0.37.0 → ayechat-0.39.0}/tests/ua/plugin_tests.md +0 -0
  152. {ayechat-0.37.0 → ayechat-0.39.0}/tests/ua/service_tests.md +0 -0
  153. {ayechat-0.37.0 → ayechat-0.39.0}/tests/ua/snapshot_tests.md +0 -0
  154. {ayechat-0.37.0 → ayechat-0.39.0}/tests/ua/source_collector_tests.md +0 -0
  155. {ayechat-0.37.0 → ayechat-0.39.0}/tests/ua/ui_tests.md +0 -0
  156. {ayechat-0.37.0 → ayechat-0.39.0}/version_info.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ayechat
3
- Version: 0.37.0
3
+ Version: 0.39.0
4
4
  Summary: Aye Chat: Terminal-first AI Code Generator
5
5
  Author-email: "Acrotron, Inc." <info@acrotron.com>
6
6
  License: MIT
@@ -7,7 +7,7 @@ from prompt_toolkit import PromptSession
7
7
  from rich import print as rprint
8
8
  from rich.console import Console
9
9
 
10
- from aye.model.auth import get_user_config, set_user_config
10
+ from aye.model.auth import get_user_config, set_user_config, delete_user_config
11
11
  from aye.model.config import MODELS
12
12
  from aye.presenter.repl_ui import print_error
13
13
  from aye.controller.llm_invoker import invoke_llm
@@ -37,7 +37,7 @@ def handle_model_command(session: Optional[PromptSession], models: list, conf: A
37
37
  num = int(tokens[1])
38
38
  if 1 <= num <= len(models):
39
39
  selected_id = models[num - 1]["id"]
40
-
40
+
41
41
  # Check if this is an offline model and trigger download if needed
42
42
  selected_model = models[num - 1]
43
43
  if selected_model.get("type") == "offline":
@@ -49,7 +49,7 @@ def handle_model_command(session: Optional[PromptSession], models: list, conf: A
49
49
  if download_response and not download_response.get("success", True):
50
50
  rprint(f"[red]Failed to download model: {download_response.get('error', 'Unknown error')}[/]")
51
51
  return
52
-
52
+
53
53
  conf.selected_model = selected_id
54
54
  set_user_config("selected_model", selected_id)
55
55
  rprint(f"[green]Selected model: {models[num - 1]['name']}[/]")
@@ -81,7 +81,7 @@ def handle_model_command(session: Optional[PromptSession], models: list, conf: A
81
81
  num = int(choice)
82
82
  if 1 <= num <= len(models):
83
83
  selected_id = models[num - 1]["id"]
84
-
84
+
85
85
  # Check if this is an offline model and trigger download if needed
86
86
  selected_model = models[num - 1]
87
87
  if selected_model.get("type") == "offline":
@@ -93,7 +93,7 @@ def handle_model_command(session: Optional[PromptSession], models: list, conf: A
93
93
  if download_response and not download_response.get("success", True):
94
94
  rprint(f"[red]Failed to download model: {download_response.get('error', 'Unknown error')}[/]")
95
95
  return
96
-
96
+
97
97
  conf.selected_model = selected_id
98
98
  set_user_config("selected_model", selected_id)
99
99
  rprint(f"[green]Selected: {models[num - 1]['name']}[/]")
@@ -145,9 +145,28 @@ def handle_debug_command(tokens: list):
145
145
  rprint(f"[yellow]Debug mode is {current.title()}[/]")
146
146
 
147
147
 
148
+ def handle_autodiff_command(tokens: list):
149
+ """Handle the 'autodiff' command for toggling automatic diff display.
150
+
151
+ When autodiff is enabled, diffs are automatically displayed for every
152
+ file modified by an LLM response.
153
+ """
154
+ if len(tokens) > 1:
155
+ val = tokens[1].lower()
156
+ if val in ("on", "off"):
157
+ set_user_config("autodiff", val)
158
+ rprint(f"[green]Autodiff set to {val.title()}[/]")
159
+ else:
160
+ rprint("[red]Usage: autodiff on|off[/]")
161
+ else:
162
+ current = get_user_config("autodiff", "off")
163
+ rprint(f"[yellow]Autodiff is {current.title()}[/]")
164
+ rprint("[dim]When on, diffs are shown automatically after each LLM file update.[/]")
165
+
166
+
148
167
  def handle_completion_command(tokens: list) -> Optional[str]:
149
168
  """Handle the 'completion' command for switching completion styles.
150
-
169
+
151
170
  Returns:
152
171
  The new completion style if changed ('readline' or 'multi'), None otherwise.
153
172
  """
@@ -169,25 +188,125 @@ def handle_completion_command(tokens: list) -> Optional[str]:
169
188
  return None
170
189
 
171
190
 
191
+ def handle_llm_command(session: Optional[PromptSession], tokens: list[str]) -> None:
192
+ """Handle the 'llm' command for configuring OpenAI-compatible local model endpoint.
193
+
194
+ Usage:
195
+ llm - Interactively configure URL, key, and model
196
+ llm clear - Remove all LLM config values
197
+
198
+ Config keys stored in ~/.ayecfg:
199
+ llm_api_url
200
+ llm_api_key
201
+ llm_model
202
+ """
203
+ # Handle 'llm clear' subcommand
204
+ if len(tokens) > 1 and tokens[1].lower() == "clear":
205
+ delete_user_config("llm_api_url")
206
+ delete_user_config("llm_api_key")
207
+ delete_user_config("llm_model")
208
+ rprint("[green]LLM config cleared.[/]")
209
+ return
210
+
211
+ # Interactive configuration
212
+ current_url = get_user_config("llm_api_url", "")
213
+ current_key = get_user_config("llm_api_key", "")
214
+ current_model = get_user_config("llm_model", "")
215
+
216
+ # Show current status
217
+ rprint("\n[bold cyan]LLM Endpoint Configuration[/]")
218
+ rprint("[dim]Press Enter to keep current value, or type a new value.[/]\n")
219
+
220
+ if not session:
221
+ rprint("[red]Error: Interactive session not available.[/]")
222
+ return
223
+
224
+ try:
225
+ # Prompt for URL (explicitly non-password; some prompt_toolkit versions may reuse app state)
226
+ url_display = current_url if current_url else "not set"
227
+ new_url = session.prompt(
228
+ f"LLM API URL (current: {url_display}): ",
229
+ is_password=False,
230
+ ).strip()
231
+ final_url = new_url if new_url else current_url
232
+
233
+ # Prompt for API key (hidden input)
234
+ key_display = "set" if current_key else "not set"
235
+ new_key = session.prompt(
236
+ f"LLM API KEY (current: {key_display}): ",
237
+ is_password=True,
238
+ ).strip()
239
+ final_key = new_key if new_key else current_key
240
+
241
+ # Prompt for model (explicitly non-password)
242
+ model_display = current_model if current_model else "not set"
243
+ new_model = session.prompt(
244
+ f"LLM MODEL (current: {model_display}): ",
245
+ is_password=False,
246
+ ).strip()
247
+ final_model = new_model if new_model else current_model
248
+
249
+ except (EOFError, KeyboardInterrupt):
250
+ rprint("\n[yellow]Configuration cancelled.[/]")
251
+ return
252
+
253
+ # Save values (only if they have content)
254
+ if final_url:
255
+ set_user_config("llm_api_url", final_url)
256
+ elif current_url and not new_url:
257
+ # Keep existing
258
+ pass
259
+ else:
260
+ delete_user_config("llm_api_url")
261
+
262
+ if final_key:
263
+ set_user_config("llm_api_key", final_key)
264
+ elif current_key and not new_key:
265
+ # Keep existing
266
+ pass
267
+ else:
268
+ delete_user_config("llm_api_key")
269
+
270
+ if final_model:
271
+ set_user_config("llm_model", final_model)
272
+ elif current_model and not new_model:
273
+ # Keep existing
274
+ pass
275
+ else:
276
+ delete_user_config("llm_model")
277
+
278
+ # Print confirmation
279
+ rprint("\n[bold cyan]LLM Configuration Updated[/]")
280
+ rprint(f" URL: {final_url if final_url else '[dim]not set[/]'}")
281
+ rprint(f" KEY: {'[dim]set (hidden)[/]' if final_key else '[dim]not set[/]'}")
282
+ rprint(f" MODEL: {final_model if final_model else '[dim]not set[/]'}")
283
+
284
+ # Show status message
285
+ if final_url and final_key:
286
+ rprint("\n[green] OpenAI-compatible endpoint is configured and active.[/]")
287
+ else:
288
+ rprint("\n[yellow] Both URL and KEY are required for the local LLM endpoint to be active.[/]")
289
+
290
+
172
291
  def _expand_file_patterns(patterns: list[str], conf: Any) -> list[str]:
173
292
  """Expand wildcard patterns and return a list of existing file paths."""
174
293
  expanded_files = []
175
-
294
+
176
295
  for pattern in patterns:
177
296
  pattern = pattern.strip()
178
297
  if not pattern:
179
298
  continue
180
-
299
+
181
300
  # Check if it's a direct file path first
182
301
  direct_path = conf.root / pattern
183
302
  if direct_path.is_file():
184
303
  expanded_files.append(pattern)
185
304
  continue
186
-
305
+
187
306
  # Use glob to expand wildcards
188
307
  # Search relative to the project root
189
308
  matched_paths = list(conf.root.glob(pattern))
190
-
309
+
191
310
  # Add relative paths of matched files
192
311
  for matched_path in matched_paths:
193
312
  if matched_path.is_file():
@@ -197,26 +316,26 @@ def _expand_file_patterns(patterns: list[str], conf: Any) -> list[str]:
197
316
  except ValueError:
198
317
  # If we can't make it relative, use the original pattern
199
318
  expanded_files.append(pattern)
200
-
319
+
201
320
  return expanded_files
202
321
 
203
322
 
204
323
  def handle_with_command(
205
- prompt: str,
206
- conf: Any,
207
- console: Console,
208
- chat_id: int,
324
+ prompt: str,
325
+ conf: Any,
326
+ console: Console,
327
+ chat_id: int,
209
328
  chat_id_file: Path
210
329
  ) -> Optional[int]:
211
330
  """Handle the 'with' command for file-specific prompts with wildcard support.
212
-
331
+
213
332
  Args:
214
333
  prompt: The full prompt string starting with 'with'
215
334
  conf: Configuration object
216
335
  console: Rich console for output
217
336
  chat_id: Current chat ID
218
337
  chat_id_file: Path to chat ID file
219
-
338
+
220
339
  Returns:
221
340
  New chat_id if available, None otherwise
222
341
  """
@@ -234,16 +353,16 @@ def handle_with_command(
234
353
 
235
354
  # Parse file patterns (can include wildcards)
236
355
  file_patterns = [f.strip() for f in file_list_str.replace(",", " ").split() if f.strip()]
237
-
356
+
238
357
  # Expand wildcards to get actual file paths
239
358
  expanded_files = _expand_file_patterns(file_patterns, conf)
240
-
359
+
241
360
  if not expanded_files:
242
361
  rprint("[red]Error: No files found matching the specified patterns.[/red]")
243
362
  return None
244
-
363
+
245
364
  explicit_source_files = {}
246
-
365
+
247
366
  for file_name in expanded_files:
248
367
  file_path = conf.root / file_name
249
368
  if not file_path.is_file():
@@ -254,11 +373,11 @@ def handle_with_command(
254
373
  except Exception as e:
255
374
  rprint(f"[red]Could not read file '{file_name}': {e}[/red]")
256
375
  continue # Continue with other files instead of breaking
257
-
376
+
258
377
  if not explicit_source_files:
259
378
  rprint("[red]Error: No readable files found.[/red]")
260
379
  return None
261
-
380
+
262
381
  # Show which files were included
263
382
  if conf.verbose or len(explicit_source_files) != len(expanded_files):
264
383
  rprint(f"[cyan]Including {len(explicit_source_files)} file(s): {', '.join(explicit_source_files.keys())}[/cyan]")
@@ -272,20 +391,20 @@ def handle_with_command(
272
391
  verbose=conf.verbose,
273
392
  explicit_source_files=explicit_source_files
274
393
  )
275
-
394
+
276
395
  if llm_response:
277
396
  new_chat_id = process_llm_response(
278
- response=llm_response,
279
- conf=conf,
280
- console=console,
281
- prompt=new_prompt_str.strip(),
397
+ response=llm_response,
398
+ conf=conf,
399
+ console=console,
400
+ prompt=new_prompt_str.strip(),
282
401
  chat_id_file=chat_id_file if llm_response.chat_id else None
283
402
  )
284
403
  return new_chat_id
285
404
  else:
286
405
  rprint("[yellow]No response from LLM.[/]")
287
406
  return None
288
-
407
+
289
408
  except Exception as exc:
290
409
  handle_llm_error(exc)
291
410
  return None
@@ -1,5 +1,5 @@
1
1
  from pathlib import Path
2
- from typing import Any, Optional
2
+ from typing import Any, Optional, List
3
3
 
4
4
  from rich import print as rprint
5
5
  from rich.console import Console
@@ -11,10 +11,12 @@ from aye.presenter.repl_ui import (
11
11
  print_files_updated,
12
12
  print_error
13
13
  )
14
- from aye.model.snapshot import apply_updates
14
+ from aye.presenter import diff_presenter
15
+ from aye.model.snapshot import apply_updates, get_diff_base_for_file
15
16
  from aye.model.file_processor import filter_unchanged_files, make_paths_relative
16
17
  from aye.model.models import LLMResponse
17
18
  from aye.model.auth import get_user_config
19
+ from aye.model.autodiff_config import is_autodiff_enabled
18
20
  from aye.model.write_validator import (
19
21
  check_files_against_ignore_patterns,
20
22
  is_strict_mode_enabled,
@@ -52,6 +54,51 @@ def _maybe_print_restore_tip(conf: Any, console: Console) -> None:
52
54
  console.print(Padding(msg, (0, 4, 0, 4)))
53
55
 
54
56
 
57
+ def _run_autodiff(updated_files: List[dict], batch_id: str, conf: Any, console: Console) -> None:
58
+ """Display diffs for all updated files against their snapshot versions.
59
+
60
+ Args:
61
+ updated_files: List of file dicts with 'file_name' keys
62
+ batch_id: The batch identifier from apply_updates()
63
+ conf: Configuration object with root path
64
+ console: Rich console for output
65
+ """
66
+ verbose = getattr(conf, 'verbose', False)
67
+ debug = get_user_config("debug", "off").lower() == "on"
68
+
69
+ console.print(Padding("[dim]───── Auto-diff (autodiff=on) ─────[/]", (1, 0, 0, 0)))
70
+
71
+ for item in updated_files:
72
+ file_name = item.get("file_name")
73
+ if not file_name:
74
+ continue
75
+
76
+ file_path = Path(file_name)
77
+
78
+ # Get the snapshot reference for this file
79
+ diff_base = get_diff_base_for_file(batch_id, file_path)
80
+
81
+ if diff_base is None:
82
+ if verbose or debug:
83
+ rprint(f"[yellow]Warning: Could not find snapshot for {file_name}, skipping autodiff[/]")
84
+ continue
85
+
86
+ snapshot_ref, is_git_ref = diff_base
87
+
88
+ # Print file header
89
+ console.print(f"\n[bold cyan]{file_name}[/]")
90
+
91
+ try:
92
+ # show_diff expects: (current_file, snapshot_ref, is_stash_ref)
93
+ # For autodiff, we diff the current (new) file against the snapshot (old)
94
+ diff_presenter.show_diff(file_path, snapshot_ref, is_stash_ref=is_git_ref)
95
+ except Exception as e:
96
+ if verbose or debug:
97
+ rprint(f"[yellow]Warning: Could not show diff for {file_name}: {e}[/]")
98
+
99
+ console.print(Padding("[dim]───── End auto-diff ─────[/]", (1, 0, 0, 0)))
100
+
101
+
55
102
  def process_llm_response(
56
103
  response: LLMResponse,
57
104
  conf: Any,
@@ -118,12 +165,17 @@ def process_llm_response(
118
165
  else:
119
166
  # Apply updates to the model (Model update)
120
167
  try:
121
- apply_updates(updated_files, prompt)
168
+ batch_id = apply_updates(updated_files, prompt)
122
169
  file_names = [item.get("file_name") for item in updated_files if "file_name" in item]
123
170
  if file_names:
124
171
  # Update the view
125
172
  print_files_updated(console, file_names)
126
173
  _maybe_print_restore_tip(conf, console)
174
+
175
+ # Run autodiff if enabled
176
+ if is_autodiff_enabled():
177
+ _run_autodiff(updated_files, batch_id, conf, console)
178
+
127
179
  except Exception as e:
128
180
  rprint(f"[red]Error applying updates:[/] {e}")
129
181
 
@@ -13,12 +13,16 @@ from aye.model.source_collector import collect_sources
13
13
  from aye.model.auth import get_user_config
14
14
  from aye.model.offline_llm_manager import is_offline_model
15
15
  from aye.controller.util import is_truncated_json
16
- from aye.model.config import SYSTEM_PROMPT, MODELS, DEFAULT_MAX_OUTPUT_TOKENS, DEFAULT_CONTEXT_TARGET_KB
16
+ from aye.model.config import SYSTEM_PROMPT, MODELS, DEFAULT_MAX_OUTPUT_TOKENS, DEFAULT_CONTEXT_TARGET_KB, CONTEXT_HARD_LIMIT_KB
17
17
  from aye.model import telemetry
18
18
 
19
19
  import os
20
20
 
21
21
 
22
+ def _is_verbose():
23
+ return get_user_config("verbose", "off").lower() == "on"
24
+
25
+
22
26
  def _is_debug():
23
27
  return get_user_config("debug", "off").lower() == "on"
24
28
 
@@ -77,7 +81,7 @@ def _get_context_hard_limit(model_id: str) -> int:
77
81
  model_config = _get_model_config(model_id)
78
82
  if model_config and "max_prompt_kb" in model_config:
79
83
  return model_config["max_prompt_kb"] * 1024
80
- return 170 * 1024
84
+ return CONTEXT_HARD_LIMIT_KB * 1024
81
85
 
82
86
 
83
87
  def _filter_ground_truth(files: Dict[str, str], conf: Any, verbose: bool) -> Dict[str, str]:
@@ -134,6 +138,8 @@ def _get_rag_context_files(prompt: str, conf: Any, verbose: bool) -> Dict[str, s
134
138
 
135
139
  context_target_size = _get_context_target_size(conf.selected_model)
136
140
  context_hard_limit = _get_context_hard_limit(conf.selected_model)
141
+ #context_target_size = DEFAULT_CONTEXT_TARGET_KB #_get_context_target_size(conf.selected_model)
142
+ #context_hard_limit = CONTEXT_HARD_LIMIT_KB # _get_context_hard_limit(conf.selected_model)
137
143
 
138
144
  if _is_debug():
139
145
  rprint(f"[yellow]Context target: {context_target_size / 1024:.1f}KB, hard limit: {context_hard_limit / 1024:.1f}KB[/]")
@@ -298,50 +304,54 @@ def invoke_llm(
298
304
  model_config = _get_model_config(conf.selected_model)
299
305
  max_output_tokens = model_config.get("max_output_tokens", DEFAULT_MAX_OUTPUT_TOKENS) if model_config else DEFAULT_MAX_OUTPUT_TOKENS
300
306
 
301
- # 1. Try local/offline model plugins first (no streaming UI for local models)
302
- local_response = plugin_manager.handle_command("local_model_invoke", {
303
- "prompt": prompt,
304
- "model_id": conf.selected_model,
305
- "source_files": source_files,
306
- "chat_id": chat_id,
307
- "root": conf.root,
308
- "system_prompt": system_prompt,
309
- "max_output_tokens": max_output_tokens
310
- })
311
-
312
- if local_response is not None:
313
- return LLMResponse(
314
- summary=local_response.get("summary", ""),
315
- updated_files=local_response.get("updated_files", []),
316
- chat_id=None,
317
- source=LLMSource.LOCAL
318
- )
319
-
320
- # 2. API call with spinner + streaming display
321
- if _is_debug():
322
- print(f"[DEBUG] Processing chat message with chat_id={chat_id or -1}, model={conf.selected_model}")
323
-
324
- telemetry_payload = telemetry.build_payload(top_n=20) if telemetry.is_enabled() else None
325
-
326
- # Create spinner - will be stopped when streaming starts
307
+ # Create spinner - will be shown for ALL model types (local, databricks, API)
327
308
  spinner = StoppableSpinner(
328
309
  console,
329
310
  messages=DEFAULT_THINKING_MESSAGES,
330
311
  interval=15.0
331
312
  )
332
313
 
314
+ # For API calls, we also have streaming display
315
+ streaming_display: Optional[StreamingResponseDisplay] = None
316
+
333
317
  def stop_spinner():
334
- """Callback to stop spinner when first content arrives."""
318
+ """Callback to stop spinner when first content arrives (for streaming API)."""
335
319
  spinner.stop()
336
-
337
- # Create streaming display with callback to stop spinner on first content
338
- streaming_display = StreamingResponseDisplay(on_first_content=stop_spinner)
339
- stream_callback = create_streaming_callback(streaming_display)
340
320
 
341
321
  try:
342
- # Start the spinner before the API call
322
+ # Start the spinner before ANY LLM call (local or API)
343
323
  spinner.start()
344
324
 
325
+ # 1. Try local/offline model plugins first
326
+ local_response = plugin_manager.handle_command("local_model_invoke", {
327
+ "prompt": prompt,
328
+ "model_id": conf.selected_model,
329
+ "source_files": source_files,
330
+ "chat_id": chat_id,
331
+ "root": conf.root,
332
+ "system_prompt": system_prompt,
333
+ "max_output_tokens": max_output_tokens
334
+ })
335
+
336
+ if local_response is not None:
337
+ # Local model handled the request - spinner will be stopped in finally block
338
+ return LLMResponse(
339
+ summary=local_response.get("summary", ""),
340
+ updated_files=local_response.get("updated_files", []),
341
+ chat_id=None,
342
+ source=LLMSource.LOCAL
343
+ )
344
+
345
+ # 2. API call with streaming display
346
+ if _is_debug():
347
+ print(f"[DEBUG] Processing chat message with chat_id={chat_id or -1}, model={conf.selected_model}")
348
+
349
+ telemetry_payload = telemetry.build_payload(top_n=20) if telemetry.is_enabled() else None
350
+
351
+ # Create streaming display with callback to stop spinner on first content
352
+ streaming_display = StreamingResponseDisplay(on_first_content=stop_spinner)
353
+ stream_callback = create_streaming_callback(streaming_display)
354
+
345
355
  api_resp = cli_invoke(
346
356
  message=prompt,
347
357
  chat_id=chat_id or -1,
@@ -352,29 +362,29 @@ def invoke_llm(
352
362
  telemetry=telemetry_payload,
353
363
  on_stream_update=stream_callback
354
364
  )
355
- finally:
356
- # Ensure spinner is stopped (in case no streaming content was received)
357
- spinner.stop()
358
-
359
- # Always stop the streaming display when done
360
- if streaming_display.is_active():
361
- streaming_display.stop()
362
365
 
363
- if telemetry_payload is not None:
364
- telemetry.reset()
366
+ if telemetry_payload is not None:
367
+ telemetry.reset()
365
368
 
366
- if _is_debug():
367
- print(f"[DEBUG] Chat message processed, response keys: {api_resp.keys() if api_resp else 'None'}")
369
+ if _is_debug():
370
+ print(f"[DEBUG] Chat message processed, response keys: {api_resp.keys() if api_resp else 'None'}")
368
371
 
369
- # Check if we already displayed the response via streaming
370
- streamed_summary = bool(api_resp.get("_streamed_summary")) if isinstance(api_resp, dict) else False
372
+ # Check if we already displayed the response via streaming
373
+ streamed_summary = bool(api_resp.get("_streamed_summary")) if isinstance(api_resp, dict) else False
371
374
 
372
- # 3. Parse API response
373
- assistant_resp, new_chat_id = _parse_api_response(api_resp)
375
+ # 3. Parse API response
376
+ assistant_resp, new_chat_id = _parse_api_response(api_resp)
374
377
 
375
- return LLMResponse(
376
- summary="" if streamed_summary else assistant_resp.get("answer_summary", ""),
377
- updated_files=assistant_resp.get("source_files", []),
378
- chat_id=new_chat_id,
379
- source=LLMSource.API
380
- )
378
+ return LLMResponse(
379
+ summary="" if streamed_summary else assistant_resp.get("answer_summary", ""),
380
+ updated_files=assistant_resp.get("source_files", []),
381
+ chat_id=new_chat_id,
382
+ source=LLMSource.API
383
+ )
384
+ finally:
385
+ # Ensure spinner is stopped for ALL code paths (local model, API, or error)
386
+ spinner.stop()
387
+
388
+ # Stop the streaming display if it was created and is active
389
+ if streaming_display is not None and streaming_display.is_active():
390
+ streaming_display.stop()
@@ -41,6 +41,8 @@ from aye.controller.command_handlers import (
41
41
  handle_completion_command,
42
42
  handle_with_command,
43
43
  handle_blog_command,
44
+ handle_llm_command,
45
+ handle_autodiff_command,
44
46
  )
45
47
 
46
48
  DEBUG = False
@@ -284,7 +286,7 @@ def _execute_forced_shell_command(command: str, args: List[str], conf: Any) -> N
284
286
  def chat_repl(conf: Any) -> None:
285
287
  is_first_run = run_first_time_tutorial_if_needed()
286
288
 
287
- BUILTIN_COMMANDS = ["with", "blog", "new", "history", "diff", "restore", "undo", "keep", "model", "verbose", "debug", "completion", "exit", "quit", ":q", "help", "cd", "db"]
289
+ BUILTIN_COMMANDS = ["with", "blog", "new", "history", "diff", "restore", "undo", "keep", "model", "verbose", "debug", "autodiff", "completion", "exit", "quit", ":q", "help", "cd", "db", "llm"]
288
290
 
289
291
  # Get the completion style setting
290
292
  completion_style = get_user_config("completion_style", "readline").lower()
@@ -415,6 +417,9 @@ def chat_repl(conf: Any) -> None:
415
417
  elif lowered_first == "debug":
416
418
  telemetry.record_command("debug", has_args=len(tokens) > 1, prefix=_AYE_PREFIX)
417
419
  handle_debug_command(tokens)
420
+ elif lowered_first == "autodiff":
421
+ telemetry.record_command("autodiff", has_args=len(tokens) > 1, prefix=_AYE_PREFIX)
422
+ handle_autodiff_command(tokens)
418
423
  elif lowered_first == "completion":
419
424
  telemetry.record_command("completion", has_args=len(tokens) > 1, prefix=_AYE_PREFIX)
420
425
  new_style = handle_completion_command(tokens)
@@ -429,6 +434,9 @@ def chat_repl(conf: Any) -> None:
429
434
  # Recreate the session with the new completer
430
435
  session = create_prompt_session(completer, new_style)
431
436
  rprint(f"[green]Completion style is now active.[/]")
437
+ elif lowered_first == "llm":
438
+ telemetry.record_command("llm", has_args=len(tokens) > 1, prefix=_AYE_PREFIX)
439
+ handle_llm_command(session, tokens)
432
440
  elif lowered_first == "blog":
433
441
  telemetry.record_command("blog", has_args=len(tokens) > 1, prefix=_AYE_PREFIX)
434
442
  telemetry.record_llm_prompt("LLM <blog>")
@@ -58,6 +58,27 @@ def set_user_config(key: str, value: Any) -> None:
58
58
  TOKEN_FILE.chmod(0o600)
59
59
 
60
60
 
61
+ def delete_user_config(key: str) -> None:
62
+ """Delete a user config key from the [default] section.
63
+
64
+ If the key doesn't exist, this is a no-op.
65
+ Preserves other settings and maintains file permissions.
66
+ """
67
+ config = _parse_user_config()
68
+ if key not in config:
69
+ return
70
+ config.pop(key, None)
71
+ if not config:
72
+ # If no config left, remove the file entirely
73
+ TOKEN_FILE.unlink(missing_ok=True)
74
+ else:
75
+ new_content = "[default]\n"
76
+ for k, v in config.items():
77
+ new_content += f"{k}={v}\n"
78
+ TOKEN_FILE.write_text(new_content, encoding="utf-8")
79
+ TOKEN_FILE.chmod(0o600)
80
+
81
+
61
82
  def store_token(token: str) -> None:
62
83
  """Persist the token in ~/.ayecfg or value from AYE_TOKEN_FILE environment variable (unless AYE_TOKEN is set)."""
63
84
  token = token.strip()
@@ -122,4 +143,3 @@ def login_flow() -> None:
122
143
  token = typer.prompt("Paste your token", hide_input=True)
123
144
  store_token(token.strip())
124
145
  typer.secho("✅ Token saved.", fg=typer.colors.GREEN)
125
-
@@ -0,0 +1,32 @@
1
+ """Autodiff configuration for automatic diff display after LLM changes.
2
+
3
+ This module provides functionality to check if autodiff mode is enabled.
4
+ When enabled, diffs are automatically displayed for every file modified
5
+ by an LLM response.
6
+
7
+ See: autodiff.md for the full design plan.
8
+ """
9
+
10
+ from aye.model.auth import get_user_config
11
+
12
+
13
+ # Config key for autodiff mode
14
+ AUTODIFF_KEY = "autodiff"
15
+
16
+
17
+ def is_autodiff_enabled() -> bool:
18
+ """Check if autodiff mode is enabled.
19
+
20
+ When enabled, diffs are automatically displayed for every file
21
+ modified by an LLM response, immediately after the optimistic
22
+ write is applied.
23
+
24
+ Can be set via:
25
+ - Environment variable: AYE_AUTODIFF=on
26
+ - Config file (~/.ayecfg): autodiff=on
27
+
28
+ Returns:
29
+ True if autodiff mode is enabled, False otherwise (default)
30
+ """
31
+ value = get_user_config(AUTODIFF_KEY, "off")
32
+ return str(value).lower() in ("on", "true", "1", "yes")