ayechat-dev 0.36.9.20260204171331__tar.gz → 0.36.9.20260205235944__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (156) hide show
  1. {ayechat_dev-0.36.9.20260204171331/src/ayechat_dev.egg-info → ayechat_dev-0.36.9.20260205235944}/PKG-INFO +1 -1
  2. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/pyproject.toml +1 -1
  3. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/controller/llm_invoker.py +65 -55
  4. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/config.py +5 -0
  5. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/plugins/databricks_model.py +2 -1
  6. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/plugins/local_model.py +0 -45
  7. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944/src/ayechat_dev.egg-info}/PKG-INFO +1 -1
  8. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_llm_handler.py +4 -0
  9. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_local_model_plugin.py +1 -60
  10. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/ISSUE_TEMPLATE/bug_report.yml +0 -0
  11. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/ISSUE_TEMPLATE/config.yml +0 -0
  12. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/ISSUE_TEMPLATE/feature_request.yml +0 -0
  13. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/dependabot.yml +0 -0
  14. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/workflows/build-windows-installer.yml +0 -0
  15. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/workflows/message-releases-to-discord.yml +0 -0
  16. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/workflows/pylint.yml +0 -0
  17. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/workflows/python-publish-dev.yml +0 -0
  18. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/workflows/python-publish.yml +0 -0
  19. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/workflows/python-testing.yml +0 -0
  20. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/workflows/test-homebrew.yml +0 -0
  21. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/workflows/test-nix-github.yml +0 -0
  22. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/workflows/test-nix.yml +0 -0
  23. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/workflows/test-windows-installer.yml +0 -0
  24. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.github/workflows/update-homebrew.yml +0 -0
  25. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.gitignore +0 -0
  26. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/.pylintrc +0 -0
  27. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/BUILD.md +0 -0
  28. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/DISCLAIMER +0 -0
  29. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/Formula/aye-chat.rb +0 -0
  30. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/LICENSE +0 -0
  31. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/README.md +0 -0
  32. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/assets/aye-chat.ico +0 -0
  33. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/aye-chat.spec +0 -0
  34. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/ayechat.nix +0 -0
  35. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/flake.lock +0 -0
  36. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/flake.nix +0 -0
  37. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/installer.iss +0 -0
  38. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/publish_pypi.sh +0 -0
  39. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/requirements.txt +0 -0
  40. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/run_tests.cmd +0 -0
  41. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/run_tests.sh +0 -0
  42. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/setup.cfg +0 -0
  43. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/.gitignore +0 -0
  44. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/__init__.py +0 -0
  45. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/__main__.py +0 -0
  46. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/__main_chat__.py +0 -0
  47. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/controller/__init__.py +0 -0
  48. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/controller/command_handlers.py +0 -0
  49. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/controller/commands.py +0 -0
  50. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/controller/llm_handler.py +0 -0
  51. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/controller/plugin_manager.py +0 -0
  52. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/controller/repl.py +0 -0
  53. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/controller/tutorial.py +0 -0
  54. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/controller/util.py +0 -0
  55. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/__init__.py +0 -0
  56. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/api.py +0 -0
  57. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/ast_chunker.py +0 -0
  58. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/auth.py +0 -0
  59. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/autodiff_config.py +0 -0
  60. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/download_plugins.py +0 -0
  61. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/file_processor.py +0 -0
  62. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/ignore_patterns.py +0 -0
  63. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/index_manager/__init__.py +0 -0
  64. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/index_manager/index_manager.py +0 -0
  65. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/index_manager/index_manager_executor.py +0 -0
  66. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/index_manager/index_manager_file_ops.py +0 -0
  67. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/index_manager/index_manager_state.py +0 -0
  68. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/index_manager/index_manager_utils.py +0 -0
  69. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/json_extractor.py +0 -0
  70. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/models.py +0 -0
  71. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/offline_llm_manager.py +0 -0
  72. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/onnx_manager.py +0 -0
  73. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/snapshot/__init__.py +0 -0
  74. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/snapshot/base.py +0 -0
  75. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/snapshot/file_backend.py +0 -0
  76. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/snapshot/git_ref_backend.py +0 -0
  77. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/source_collector.py +0 -0
  78. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/telemetry.py +0 -0
  79. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/vector_db.py +0 -0
  80. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/version_checker.py +0 -0
  81. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/model/write_validator.py +0 -0
  82. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/plugins/__init__.py +0 -0
  83. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/plugins/at_file_completer.py +0 -0
  84. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/plugins/auto_detect_mask.py +0 -0
  85. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/plugins/completer.py +0 -0
  86. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/plugins/offline_llm.py +0 -0
  87. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/plugins/plugin_base.py +0 -0
  88. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/plugins/shell_executor.py +0 -0
  89. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/plugins/slash_completer.py +0 -0
  90. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/presenter/__init__.py +0 -0
  91. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/presenter/cli_ui.py +0 -0
  92. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/presenter/diff_presenter.py +0 -0
  93. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/presenter/repl_ui.py +0 -0
  94. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/presenter/streaming_ui.py +0 -0
  95. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/aye/presenter/ui_utils.py +0 -0
  96. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/ayechat_dev.egg-info/SOURCES.txt +0 -0
  97. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/ayechat_dev.egg-info/dependency_links.txt +0 -0
  98. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/ayechat_dev.egg-info/entry_points.txt +0 -0
  99. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/ayechat_dev.egg-info/requires.txt +0 -0
  100. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/src/ayechat_dev.egg-info/top_level.txt +0 -0
  101. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/.gitignore +0 -0
  102. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/config/unittest-env.sh +0 -0
  103. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/e2e/test_chat_workflow.py +0 -0
  104. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_api.py +0 -0
  105. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_ast_chunker.py +0 -0
  106. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_at_file_completer.py +0 -0
  107. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_auth.py +0 -0
  108. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_auth_uat_1.py +0 -0
  109. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_auto_detect_mask.py +0 -0
  110. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_chromadb_corruption_recovery.py +0 -0
  111. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_cli.py +0 -0
  112. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_command_handlers.py +0 -0
  113. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_commands.py +0 -0
  114. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_completer_plugin.py +0 -0
  115. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_config.py +0 -0
  116. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_diff_presenter.py +0 -0
  117. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_download_plugins.py +0 -0
  118. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_file_processor.py +0 -0
  119. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_git_ref_backend.py +0 -0
  120. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_index_manager.py +0 -0
  121. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_index_manager_executor.py +0 -0
  122. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_index_manager_more.py +0 -0
  123. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_index_manager_state.py +0 -0
  124. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_index_manager_utils.py +0 -0
  125. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_llm_invoker.py +0 -0
  126. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_offline_llm.py +0 -0
  127. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_offline_llm_manager.py +0 -0
  128. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_onnx_manager.py +0 -0
  129. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_plugin_base.py +0 -0
  130. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_plugin_manager.py +0 -0
  131. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_presenter.py +0 -0
  132. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_rag_context_retrieval.py +0 -0
  133. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_repl.py +0 -0
  134. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_service.py +0 -0
  135. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_shell_executor_plugin.py +0 -0
  136. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_slash_completer.py +0 -0
  137. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_snapshot.py +0 -0
  138. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_source_collector.py +0 -0
  139. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_streaming_ui.py +0 -0
  140. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_telemetry.py +0 -0
  141. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_tutorial.py +0 -0
  142. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_ui_utils.py +0 -0
  143. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_util.py +0 -0
  144. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_vector_db.py +0 -0
  145. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_version_checker.py +0 -0
  146. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/test_write_validator.py +0 -0
  147. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/ua/api_tests.md +0 -0
  148. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/ua/auth_tests.md +0 -0
  149. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/ua/demo_tests.md +0 -0
  150. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/ua/download_plugins_tests.md +0 -0
  151. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/ua/plugin_tests.md +0 -0
  152. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/ua/service_tests.md +0 -0
  153. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/ua/snapshot_tests.md +0 -0
  154. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/ua/source_collector_tests.md +0 -0
  155. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/tests/ua/ui_tests.md +0 -0
  156. {ayechat_dev-0.36.9.20260204171331 → ayechat_dev-0.36.9.20260205235944}/version_info.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ayechat-dev
3
- Version: 0.36.9.20260204171331
3
+ Version: 0.36.9.20260205235944
4
4
  Summary: Aye Chat: Terminal-first AI Code Generator
5
5
  Author-email: "Acrotron, Inc." <info@acrotron.com>
6
6
  License: MIT
@@ -8,7 +8,7 @@ description = "Aye Chat: Terminal-first AI Code Generator"
8
8
  readme = "README.md"
9
9
  requires-python = ">=3.10, <3.14"
10
10
  dependencies = [ "typer>=0.20.0", "httpx>=0.28.1", "keyring>=25.7.0", "prompt-toolkit>=3.0.52", "pathspec>=0.12.1", "chromadb>=1.3.5", "rapidfuzz",]
11
- version = "0.36.9.20260204171331"
11
+ version = "0.36.9.20260205235944"
12
12
  [[project.authors]]
13
13
  name = "Acrotron, Inc."
14
14
  email = "info@acrotron.com"
@@ -13,12 +13,16 @@ from aye.model.source_collector import collect_sources
13
13
  from aye.model.auth import get_user_config
14
14
  from aye.model.offline_llm_manager import is_offline_model
15
15
  from aye.controller.util import is_truncated_json
16
- from aye.model.config import SYSTEM_PROMPT, MODELS, DEFAULT_MAX_OUTPUT_TOKENS, DEFAULT_CONTEXT_TARGET_KB
16
+ from aye.model.config import SYSTEM_PROMPT, MODELS, DEFAULT_MAX_OUTPUT_TOKENS, DEFAULT_CONTEXT_TARGET_KB, CONTEXT_HARD_LIMIT_KB
17
17
  from aye.model import telemetry
18
18
 
19
19
  import os
20
20
 
21
21
 
22
+ def _is_verbose():
23
+ return get_user_config("verbose", "off").lower() == "on"
24
+
25
+
22
26
  def _is_debug():
23
27
  return get_user_config("debug", "off").lower() == "on"
24
28
 
@@ -77,7 +81,7 @@ def _get_context_hard_limit(model_id: str) -> int:
77
81
  model_config = _get_model_config(model_id)
78
82
  if model_config and "max_prompt_kb" in model_config:
79
83
  return model_config["max_prompt_kb"] * 1024
80
- return 170 * 1024
84
+ return CONTEXT_HARD_LIMIT_KB * 1024
81
85
 
82
86
 
83
87
  def _filter_ground_truth(files: Dict[str, str], conf: Any, verbose: bool) -> Dict[str, str]:
@@ -134,6 +138,8 @@ def _get_rag_context_files(prompt: str, conf: Any, verbose: bool) -> Dict[str, s
134
138
 
135
139
  context_target_size = _get_context_target_size(conf.selected_model)
136
140
  context_hard_limit = _get_context_hard_limit(conf.selected_model)
141
+ #context_target_size = DEFAULT_CONTEXT_TARGET_KB #_get_context_target_size(conf.selected_model)
142
+ #context_hard_limit = CONTEXT_HARD_LIMIT_KB # _get_context_hard_limit(conf.selected_model)
137
143
 
138
144
  if _is_debug():
139
145
  rprint(f"[yellow]Context target: {context_target_size / 1024:.1f}KB, hard limit: {context_hard_limit / 1024:.1f}KB[/]")
@@ -298,50 +304,54 @@ def invoke_llm(
298
304
  model_config = _get_model_config(conf.selected_model)
299
305
  max_output_tokens = model_config.get("max_output_tokens", DEFAULT_MAX_OUTPUT_TOKENS) if model_config else DEFAULT_MAX_OUTPUT_TOKENS
300
306
 
301
- # 1. Try local/offline model plugins first (no streaming UI for local models)
302
- local_response = plugin_manager.handle_command("local_model_invoke", {
303
- "prompt": prompt,
304
- "model_id": conf.selected_model,
305
- "source_files": source_files,
306
- "chat_id": chat_id,
307
- "root": conf.root,
308
- "system_prompt": system_prompt,
309
- "max_output_tokens": max_output_tokens
310
- })
311
-
312
- if local_response is not None:
313
- return LLMResponse(
314
- summary=local_response.get("summary", ""),
315
- updated_files=local_response.get("updated_files", []),
316
- chat_id=None,
317
- source=LLMSource.LOCAL
318
- )
319
-
320
- # 2. API call with spinner + streaming display
321
- if _is_debug():
322
- print(f"[DEBUG] Processing chat message with chat_id={chat_id or -1}, model={conf.selected_model}")
323
-
324
- telemetry_payload = telemetry.build_payload(top_n=20) if telemetry.is_enabled() else None
325
-
326
- # Create spinner - will be stopped when streaming starts
307
+ # Create spinner - will be shown for ALL model types (local, databricks, API)
327
308
  spinner = StoppableSpinner(
328
309
  console,
329
310
  messages=DEFAULT_THINKING_MESSAGES,
330
311
  interval=15.0
331
312
  )
332
313
 
314
+ # For API calls, we also have streaming display
315
+ streaming_display: Optional[StreamingResponseDisplay] = None
316
+
333
317
  def stop_spinner():
334
- """Callback to stop spinner when first content arrives."""
318
+ """Callback to stop spinner when first content arrives (for streaming API)."""
335
319
  spinner.stop()
336
-
337
- # Create streaming display with callback to stop spinner on first content
338
- streaming_display = StreamingResponseDisplay(on_first_content=stop_spinner)
339
- stream_callback = create_streaming_callback(streaming_display)
340
320
 
341
321
  try:
342
- # Start the spinner before the API call
322
+ # Start the spinner before ANY LLM call (local or API)
343
323
  spinner.start()
344
324
 
325
+ # 1. Try local/offline model plugins first
326
+ local_response = plugin_manager.handle_command("local_model_invoke", {
327
+ "prompt": prompt,
328
+ "model_id": conf.selected_model,
329
+ "source_files": source_files,
330
+ "chat_id": chat_id,
331
+ "root": conf.root,
332
+ "system_prompt": system_prompt,
333
+ "max_output_tokens": max_output_tokens
334
+ })
335
+
336
+ if local_response is not None:
337
+ # Local model handled the request - spinner will be stopped in finally block
338
+ return LLMResponse(
339
+ summary=local_response.get("summary", ""),
340
+ updated_files=local_response.get("updated_files", []),
341
+ chat_id=None,
342
+ source=LLMSource.LOCAL
343
+ )
344
+
345
+ # 2. API call with streaming display
346
+ if _is_debug():
347
+ print(f"[DEBUG] Processing chat message with chat_id={chat_id or -1}, model={conf.selected_model}")
348
+
349
+ telemetry_payload = telemetry.build_payload(top_n=20) if telemetry.is_enabled() else None
350
+
351
+ # Create streaming display with callback to stop spinner on first content
352
+ streaming_display = StreamingResponseDisplay(on_first_content=stop_spinner)
353
+ stream_callback = create_streaming_callback(streaming_display)
354
+
345
355
  api_resp = cli_invoke(
346
356
  message=prompt,
347
357
  chat_id=chat_id or -1,
@@ -352,29 +362,29 @@ def invoke_llm(
352
362
  telemetry=telemetry_payload,
353
363
  on_stream_update=stream_callback
354
364
  )
355
- finally:
356
- # Ensure spinner is stopped (in case no streaming content was received)
357
- spinner.stop()
358
-
359
- # Always stop the streaming display when done
360
- if streaming_display.is_active():
361
- streaming_display.stop()
362
365
 
363
- if telemetry_payload is not None:
364
- telemetry.reset()
366
+ if telemetry_payload is not None:
367
+ telemetry.reset()
365
368
 
366
- if _is_debug():
367
- print(f"[DEBUG] Chat message processed, response keys: {api_resp.keys() if api_resp else 'None'}")
369
+ if _is_debug():
370
+ print(f"[DEBUG] Chat message processed, response keys: {api_resp.keys() if api_resp else 'None'}")
368
371
 
369
- # Check if we already displayed the response via streaming
370
- streamed_summary = bool(api_resp.get("_streamed_summary")) if isinstance(api_resp, dict) else False
372
+ # Check if we already displayed the response via streaming
373
+ streamed_summary = bool(api_resp.get("_streamed_summary")) if isinstance(api_resp, dict) else False
371
374
 
372
- # 3. Parse API response
373
- assistant_resp, new_chat_id = _parse_api_response(api_resp)
375
+ # 3. Parse API response
376
+ assistant_resp, new_chat_id = _parse_api_response(api_resp)
374
377
 
375
- return LLMResponse(
376
- summary="" if streamed_summary else assistant_resp.get("answer_summary", ""),
377
- updated_files=assistant_resp.get("source_files", []),
378
- chat_id=new_chat_id,
379
- source=LLMSource.API
380
- )
378
+ return LLMResponse(
379
+ summary="" if streamed_summary else assistant_resp.get("answer_summary", ""),
380
+ updated_files=assistant_resp.get("source_files", []),
381
+ chat_id=new_chat_id,
382
+ source=LLMSource.API
383
+ )
384
+ finally:
385
+ # Ensure spinner is stopped for ALL code paths (local model, API, or error)
386
+ spinner.stop()
387
+
388
+ # Stop the streaming display if it was created and is active
389
+ if streaming_display is not None and streaming_display.is_active():
390
+ streaming_display.stop()
@@ -17,12 +17,17 @@ SMALL_PROJECT_FILE_LIMIT = 200
17
17
  # Projects smaller than this will skip RAG and include all files directly.
18
18
  # Set to match default max_prompt_kb (170KB) so all files can fit in context.
19
19
  SMALL_PROJECT_TOTAL_SIZE_LIMIT = 170 * 1024 # 170KB
20
+ #SMALL_PROJECT_TOTAL_SIZE_LIMIT = 100 * 1024 # 170KB
20
21
 
21
22
  # Default maximum output tokens for LLM responses
22
23
  DEFAULT_MAX_OUTPUT_TOKENS = 32000
24
+ #DEFAULT_MAX_OUTPUT_TOKENS = 16000
23
25
 
24
26
  # Default context target size in KB (used when model doesn't specify one)
25
27
  DEFAULT_CONTEXT_TARGET_KB = 150
28
+ #DEFAULT_CONTEXT_TARGET_KB = 20
29
+
30
+ CONTEXT_HARD_LIMIT_KB = 170
26
31
 
27
32
  # Shared system prompt for all LLM interactions
28
33
  SYSTEM_PROMPT = (
@@ -245,7 +245,8 @@ class DatabricksModelPlugin(Plugin):
245
245
  try:
246
246
  with httpx.Client(timeout=LLM_TIMEOUT) as client:
247
247
  response = client.post(api_url, json=payload, headers=headers)
248
- if self.debug:
248
+ if self.verbose and response.status_code != 200:
249
+ print(f"Status code: {response.status_code}")
249
250
  print("-----------------")
250
251
  print(response.text)
251
252
  print("-----------------")
@@ -136,48 +136,6 @@ class LocalModelPlugin(Plugin):
136
136
  "updated_files": []
137
137
  }
138
138
 
139
- def _handle_databricks(self, prompt: str, source_files: Dict[str, str], chat_id: Optional[int] = None, system_prompt: Optional[str] = None, max_output_tokens: int = DEFAULT_MAX_OUTPUT_TOKENS) -> Optional[Dict[str, Any]]:
140
- api_url = os.environ.get("AYE_DBX_API_URL")
141
- api_key = os.environ.get("AYE_DBX_API_KEY")
142
- model_name = os.environ.get("AYE_DBX_MODEL", "gpt-3.5-turbo")
143
-
144
- if not api_url or not api_key:
145
- return None
146
-
147
- conv_id = self._get_conversation_id(chat_id)
148
- if conv_id not in self.chat_history:
149
- self.chat_history[conv_id] = []
150
-
151
- user_message = self._build_user_message(prompt, source_files)
152
- effective_system_prompt = system_prompt if system_prompt else SYSTEM_PROMPT
153
-
154
- messages = [{"role": "system", "content": effective_system_prompt}] + self.chat_history[conv_id] + [{"role": "user", "content": user_message}]
155
-
156
- headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
157
- payload = {"model": model_name, "messages": messages, "temperature": 0.7, "max_tokens": max_output_tokens, "response_format": {"type": "json_object"}}
158
-
159
- try:
160
- with httpx.Client(timeout=LLM_TIMEOUT) as client:
161
- response = client.post(api_url, json=payload, headers=headers)
162
- response.raise_for_status()
163
- result = response.json()
164
- if result.get("choices") and result["choices"][0].get("message"):
165
- generated_text = result["choices"][0]["message"]["content"][0]["text"]
166
- self.chat_history[conv_id].append({"role": "user", "content": user_message})
167
- self.chat_history[conv_id].append({"role": "assistant", "content": generated_text})
168
- self._save_history()
169
- return self._parse_llm_response(generated_text)
170
- return self._create_error_response("Failed to get a valid response from the Databricks API")
171
- except httpx.HTTPStatusError as e:
172
- error_msg = f"DBX API error: {e.response.status_code}"
173
- try:
174
- error_detail = e.response.json()
175
- if "error" in error_detail:
176
- error_msg += f" - {error_detail['error'].get('message', str(error_detail['error']))}"
177
- except: error_msg += f" - {e.response.text[:200]}"
178
- return self._create_error_response(error_msg)
179
- except Exception as e:
180
- return self._create_error_response(f"Error calling Databricks API: {str(e)}")
181
139
 
182
140
  def _handle_openai_compatible(self, prompt: str, source_files: Dict[str, str], chat_id: Optional[int] = None, system_prompt: Optional[str] = None, max_output_tokens: int = DEFAULT_MAX_OUTPUT_TOKENS) -> Optional[Dict[str, Any]]:
183
141
  """Handle OpenAI-compatible API endpoints.
@@ -288,9 +246,6 @@ class LocalModelPlugin(Plugin):
288
246
  result = self._handle_openai_compatible(prompt, source_files, chat_id, system_prompt, max_output_tokens)
289
247
  if result is not None: return result
290
248
 
291
- result = self._handle_databricks(prompt, source_files, chat_id, system_prompt, max_output_tokens)
292
- if result is not None: return result
293
-
294
249
  if model_id == "google/gemini-2.5-pro":
295
250
  return self._handle_gemini_pro_25(prompt, source_files, chat_id, system_prompt, max_output_tokens)
296
251
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ayechat-dev
3
- Version: 0.36.9.20260204171331
3
+ Version: 0.36.9.20260205235944
4
4
  Summary: Aye Chat: Terminal-first AI Code Generator
5
5
  Author-email: "Acrotron, Inc." <info@acrotron.com>
6
6
  License: MIT
@@ -123,6 +123,7 @@ class TestLlmHandler(TestCase):
123
123
 
124
124
  mock_print_error.assert_called_once_with(exc)
125
125
 
126
+ @patch('aye.controller.llm_handler.is_autodiff_enabled', return_value=False)
126
127
  @patch('aye.controller.llm_handler.get_user_config', return_value='off')
127
128
  @patch('aye.controller.llm_handler.filter_unchanged_files')
128
129
  @patch('aye.controller.llm_handler.make_paths_relative')
@@ -135,6 +136,7 @@ class TestLlmHandler(TestCase):
135
136
  mock_relative,
136
137
  mock_filter,
137
138
  _mock_get_user_config,
139
+ _mock_autodiff,
138
140
  ):
139
141
  updated_files = [{"file_name": "file1.py", "file_content": "content"}]
140
142
  llm_resp = LLMResponse(
@@ -175,6 +177,7 @@ class TestLlmHandler(TestCase):
175
177
  # Per-session gate is stored on conf
176
178
  self.assertTrue(getattr(self.conf, "_restore_tip_shown", False))
177
179
 
180
+ @patch('aye.controller.llm_handler.is_autodiff_enabled', return_value=False)
178
181
  @patch('aye.controller.llm_handler.get_user_config', return_value='on')
179
182
  @patch('aye.controller.llm_handler.filter_unchanged_files')
180
183
  @patch('aye.controller.llm_handler.make_paths_relative')
@@ -187,6 +190,7 @@ class TestLlmHandler(TestCase):
187
190
  mock_relative,
188
191
  mock_filter,
189
192
  _mock_get_user_config,
193
+ _mock_autodiff,
190
194
  ):
191
195
  updated_files = [{"file_name": "file1.py", "file_content": "content"}]
192
196
  llm_resp = LLMResponse(
@@ -137,47 +137,6 @@ class TestLocalModelPlugin(TestCase):
137
137
  def test_handle_openai_no_key(self):
138
138
  self.assertIsNone(self.plugin._handle_openai_compatible("p", {}))
139
139
 
140
- @patch('httpx.Client')
141
- def test_handle_databricks_success(self, mock_client):
142
- os.environ["AYE_DBX_API_URL"] = "http://fake.dbx.api"
143
- os.environ["AYE_DBX_API_KEY"] = "fake_key"
144
-
145
- mock_response = MagicMock()
146
- mock_response.status_code = 200
147
- mock_response.json.return_value = {
148
- "choices": [{"message": {"content": [{"type": "text", "text": json.dumps({"answer_summary": "dbx response"})}]}}]
149
- }
150
- mock_response.raise_for_status.return_value = None
151
- mock_client.return_value.__enter__.return_value.post.return_value = mock_response
152
-
153
- result = self.plugin._handle_databricks("prompt", {})
154
-
155
- self.assertIsNotNone(result)
156
- self.assertEqual(result["summary"], "dbx response")
157
-
158
- @patch('httpx.Client')
159
- def test_handle_databricks_http_error(self, mock_client):
160
- os.environ["AYE_DBX_API_URL"] = "http://fake.dbx.api"
161
- os.environ["AYE_DBX_API_KEY"] = "fake_key"
162
- mock_response = MagicMock(status_code=500, text="Server Error")
163
- mock_response.json.side_effect = json.JSONDecodeError("", "", 0)
164
- mock_client.return_value.__enter__.return_value.post.side_effect = httpx.HTTPStatusError(
165
- "Error", request=MagicMock(), response=mock_response
166
- )
167
- result = self.plugin._handle_databricks("prompt", {})
168
- self.assertIn("DBX API error: 500 - Server Error", result["summary"])
169
-
170
- @patch('httpx.Client')
171
- def test_handle_databricks_generic_error(self, mock_client):
172
- os.environ["AYE_DBX_API_URL"] = "http://fake.dbx.api"
173
- os.environ["AYE_DBX_API_KEY"] = "fake_key"
174
- mock_client.return_value.__enter__.return_value.post.side_effect = Exception("Network down")
175
- result = self.plugin._handle_databricks("prompt", {})
176
- self.assertIn("Error calling Databricks API: Network down", result["summary"])
177
-
178
- def test_handle_databricks_no_key(self):
179
- self.assertIsNone(self.plugin._handle_databricks("p", {}))
180
-
181
140
  @patch('httpx.Client')
182
141
  def test_handle_gemini_success(self, mock_client):
183
142
  os.environ["GEMINI_API_KEY"] = "fake_key"
@@ -248,25 +207,8 @@ class TestLocalModelPlugin(TestCase):
248
207
  self.assertEqual(result, {"summary": "openai handled"})
249
208
 
250
209
  @patch.object(LocalModelPlugin, '_handle_openai_compatible', return_value=None)
251
- @patch.object(LocalModelPlugin, '_handle_databricks')
252
- def test_on_command_invoke_falls_back_to_dbx(self, mock_handle_dbx, mock_handle_openai):
253
- mock_handle_dbx.return_value = {"summary": "dbx handled"}
254
-
255
- params = {
256
- "prompt": "test",
257
- "source_files": {},
258
- "root": self.root
259
- }
260
- result = self.plugin.on_command("local_model_invoke", params)
261
-
262
- mock_handle_openai.assert_called_once()
263
- mock_handle_dbx.assert_called_once()
264
- self.assertEqual(result, {"summary": "dbx handled"})
265
-
266
- @patch.object(LocalModelPlugin, '_handle_openai_compatible', return_value=None)
267
- @patch.object(LocalModelPlugin, '_handle_databricks', return_value=None)
268
210
  @patch.object(LocalModelPlugin, '_handle_gemini_pro_25')
269
- def test_on_command_invoke_routes_to_gemini_by_id(self, mock_handle_gemini, mock_handle_dbx, mock_handle_openai):
211
+ def test_on_command_invoke_routes_to_gemini_by_id(self, mock_handle_gemini, mock_handle_openai):
270
212
  mock_handle_gemini.return_value = {"summary": "gemini handled"}
271
213
 
272
214
  params = {
@@ -278,7 +220,6 @@ class TestLocalModelPlugin(TestCase):
278
220
  result = self.plugin.on_command("local_model_invoke", params)
279
221
 
280
222
  mock_handle_openai.assert_called_once()
281
- mock_handle_dbx.assert_called_once()
282
223
  mock_handle_gemini.assert_called_once()
283
224
  self.assertEqual(result, {"summary": "gemini handled"})
284
225