agent-cli 0.70.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. agent_cli/__init__.py +5 -0
  2. agent_cli/__main__.py +6 -0
  3. agent_cli/_extras.json +14 -0
  4. agent_cli/_requirements/.gitkeep +0 -0
  5. agent_cli/_requirements/audio.txt +79 -0
  6. agent_cli/_requirements/faster-whisper.txt +215 -0
  7. agent_cli/_requirements/kokoro.txt +425 -0
  8. agent_cli/_requirements/llm.txt +183 -0
  9. agent_cli/_requirements/memory.txt +355 -0
  10. agent_cli/_requirements/mlx-whisper.txt +222 -0
  11. agent_cli/_requirements/piper.txt +176 -0
  12. agent_cli/_requirements/rag.txt +402 -0
  13. agent_cli/_requirements/server.txt +154 -0
  14. agent_cli/_requirements/speed.txt +77 -0
  15. agent_cli/_requirements/vad.txt +155 -0
  16. agent_cli/_requirements/wyoming.txt +71 -0
  17. agent_cli/_tools.py +368 -0
  18. agent_cli/agents/__init__.py +23 -0
  19. agent_cli/agents/_voice_agent_common.py +136 -0
  20. agent_cli/agents/assistant.py +383 -0
  21. agent_cli/agents/autocorrect.py +284 -0
  22. agent_cli/agents/chat.py +496 -0
  23. agent_cli/agents/memory/__init__.py +31 -0
  24. agent_cli/agents/memory/add.py +190 -0
  25. agent_cli/agents/memory/proxy.py +160 -0
  26. agent_cli/agents/rag_proxy.py +128 -0
  27. agent_cli/agents/speak.py +209 -0
  28. agent_cli/agents/transcribe.py +671 -0
  29. agent_cli/agents/transcribe_daemon.py +499 -0
  30. agent_cli/agents/voice_edit.py +291 -0
  31. agent_cli/api.py +22 -0
  32. agent_cli/cli.py +106 -0
  33. agent_cli/config.py +503 -0
  34. agent_cli/config_cmd.py +307 -0
  35. agent_cli/constants.py +27 -0
  36. agent_cli/core/__init__.py +1 -0
  37. agent_cli/core/audio.py +461 -0
  38. agent_cli/core/audio_format.py +299 -0
  39. agent_cli/core/chroma.py +88 -0
  40. agent_cli/core/deps.py +191 -0
  41. agent_cli/core/openai_proxy.py +139 -0
  42. agent_cli/core/process.py +195 -0
  43. agent_cli/core/reranker.py +120 -0
  44. agent_cli/core/sse.py +87 -0
  45. agent_cli/core/transcription_logger.py +70 -0
  46. agent_cli/core/utils.py +526 -0
  47. agent_cli/core/vad.py +175 -0
  48. agent_cli/core/watch.py +65 -0
  49. agent_cli/dev/__init__.py +14 -0
  50. agent_cli/dev/cli.py +1588 -0
  51. agent_cli/dev/coding_agents/__init__.py +19 -0
  52. agent_cli/dev/coding_agents/aider.py +24 -0
  53. agent_cli/dev/coding_agents/base.py +167 -0
  54. agent_cli/dev/coding_agents/claude.py +39 -0
  55. agent_cli/dev/coding_agents/codex.py +24 -0
  56. agent_cli/dev/coding_agents/continue_dev.py +15 -0
  57. agent_cli/dev/coding_agents/copilot.py +24 -0
  58. agent_cli/dev/coding_agents/cursor_agent.py +48 -0
  59. agent_cli/dev/coding_agents/gemini.py +28 -0
  60. agent_cli/dev/coding_agents/opencode.py +15 -0
  61. agent_cli/dev/coding_agents/registry.py +49 -0
  62. agent_cli/dev/editors/__init__.py +19 -0
  63. agent_cli/dev/editors/base.py +89 -0
  64. agent_cli/dev/editors/cursor.py +15 -0
  65. agent_cli/dev/editors/emacs.py +46 -0
  66. agent_cli/dev/editors/jetbrains.py +56 -0
  67. agent_cli/dev/editors/nano.py +31 -0
  68. agent_cli/dev/editors/neovim.py +33 -0
  69. agent_cli/dev/editors/registry.py +59 -0
  70. agent_cli/dev/editors/sublime.py +20 -0
  71. agent_cli/dev/editors/vim.py +42 -0
  72. agent_cli/dev/editors/vscode.py +15 -0
  73. agent_cli/dev/editors/zed.py +20 -0
  74. agent_cli/dev/project.py +568 -0
  75. agent_cli/dev/registry.py +52 -0
  76. agent_cli/dev/skill/SKILL.md +141 -0
  77. agent_cli/dev/skill/examples.md +571 -0
  78. agent_cli/dev/terminals/__init__.py +19 -0
  79. agent_cli/dev/terminals/apple_terminal.py +82 -0
  80. agent_cli/dev/terminals/base.py +56 -0
  81. agent_cli/dev/terminals/gnome.py +51 -0
  82. agent_cli/dev/terminals/iterm2.py +84 -0
  83. agent_cli/dev/terminals/kitty.py +77 -0
  84. agent_cli/dev/terminals/registry.py +48 -0
  85. agent_cli/dev/terminals/tmux.py +58 -0
  86. agent_cli/dev/terminals/warp.py +132 -0
  87. agent_cli/dev/terminals/zellij.py +78 -0
  88. agent_cli/dev/worktree.py +856 -0
  89. agent_cli/docs_gen.py +417 -0
  90. agent_cli/example-config.toml +185 -0
  91. agent_cli/install/__init__.py +5 -0
  92. agent_cli/install/common.py +89 -0
  93. agent_cli/install/extras.py +174 -0
  94. agent_cli/install/hotkeys.py +48 -0
  95. agent_cli/install/services.py +87 -0
  96. agent_cli/memory/__init__.py +7 -0
  97. agent_cli/memory/_files.py +250 -0
  98. agent_cli/memory/_filters.py +63 -0
  99. agent_cli/memory/_git.py +157 -0
  100. agent_cli/memory/_indexer.py +142 -0
  101. agent_cli/memory/_ingest.py +408 -0
  102. agent_cli/memory/_persistence.py +182 -0
  103. agent_cli/memory/_prompt.py +91 -0
  104. agent_cli/memory/_retrieval.py +294 -0
  105. agent_cli/memory/_store.py +169 -0
  106. agent_cli/memory/_streaming.py +44 -0
  107. agent_cli/memory/_tasks.py +48 -0
  108. agent_cli/memory/api.py +113 -0
  109. agent_cli/memory/client.py +272 -0
  110. agent_cli/memory/engine.py +361 -0
  111. agent_cli/memory/entities.py +43 -0
  112. agent_cli/memory/models.py +112 -0
  113. agent_cli/opts.py +433 -0
  114. agent_cli/py.typed +0 -0
  115. agent_cli/rag/__init__.py +3 -0
  116. agent_cli/rag/_indexer.py +67 -0
  117. agent_cli/rag/_indexing.py +226 -0
  118. agent_cli/rag/_prompt.py +30 -0
  119. agent_cli/rag/_retriever.py +156 -0
  120. agent_cli/rag/_store.py +48 -0
  121. agent_cli/rag/_utils.py +218 -0
  122. agent_cli/rag/api.py +175 -0
  123. agent_cli/rag/client.py +299 -0
  124. agent_cli/rag/engine.py +302 -0
  125. agent_cli/rag/models.py +55 -0
  126. agent_cli/scripts/.runtime/.gitkeep +0 -0
  127. agent_cli/scripts/__init__.py +1 -0
  128. agent_cli/scripts/check_plugin_skill_sync.py +50 -0
  129. agent_cli/scripts/linux-hotkeys/README.md +63 -0
  130. agent_cli/scripts/linux-hotkeys/toggle-autocorrect.sh +45 -0
  131. agent_cli/scripts/linux-hotkeys/toggle-transcription.sh +58 -0
  132. agent_cli/scripts/linux-hotkeys/toggle-voice-edit.sh +58 -0
  133. agent_cli/scripts/macos-hotkeys/README.md +45 -0
  134. agent_cli/scripts/macos-hotkeys/skhd-config-example +5 -0
  135. agent_cli/scripts/macos-hotkeys/toggle-autocorrect.sh +12 -0
  136. agent_cli/scripts/macos-hotkeys/toggle-transcription.sh +37 -0
  137. agent_cli/scripts/macos-hotkeys/toggle-voice-edit.sh +37 -0
  138. agent_cli/scripts/nvidia-asr-server/README.md +99 -0
  139. agent_cli/scripts/nvidia-asr-server/pyproject.toml +27 -0
  140. agent_cli/scripts/nvidia-asr-server/server.py +255 -0
  141. agent_cli/scripts/nvidia-asr-server/shell.nix +32 -0
  142. agent_cli/scripts/nvidia-asr-server/uv.lock +4654 -0
  143. agent_cli/scripts/run-openwakeword.sh +11 -0
  144. agent_cli/scripts/run-piper-windows.ps1 +30 -0
  145. agent_cli/scripts/run-piper.sh +24 -0
  146. agent_cli/scripts/run-whisper-linux.sh +40 -0
  147. agent_cli/scripts/run-whisper-macos.sh +6 -0
  148. agent_cli/scripts/run-whisper-windows.ps1 +51 -0
  149. agent_cli/scripts/run-whisper.sh +9 -0
  150. agent_cli/scripts/run_faster_whisper_server.py +136 -0
  151. agent_cli/scripts/setup-linux-hotkeys.sh +72 -0
  152. agent_cli/scripts/setup-linux.sh +108 -0
  153. agent_cli/scripts/setup-macos-hotkeys.sh +61 -0
  154. agent_cli/scripts/setup-macos.sh +76 -0
  155. agent_cli/scripts/setup-windows.ps1 +63 -0
  156. agent_cli/scripts/start-all-services-windows.ps1 +53 -0
  157. agent_cli/scripts/start-all-services.sh +178 -0
  158. agent_cli/scripts/sync_extras.py +138 -0
  159. agent_cli/server/__init__.py +3 -0
  160. agent_cli/server/cli.py +721 -0
  161. agent_cli/server/common.py +222 -0
  162. agent_cli/server/model_manager.py +288 -0
  163. agent_cli/server/model_registry.py +225 -0
  164. agent_cli/server/proxy/__init__.py +3 -0
  165. agent_cli/server/proxy/api.py +444 -0
  166. agent_cli/server/streaming.py +67 -0
  167. agent_cli/server/tts/__init__.py +3 -0
  168. agent_cli/server/tts/api.py +335 -0
  169. agent_cli/server/tts/backends/__init__.py +82 -0
  170. agent_cli/server/tts/backends/base.py +139 -0
  171. agent_cli/server/tts/backends/kokoro.py +403 -0
  172. agent_cli/server/tts/backends/piper.py +253 -0
  173. agent_cli/server/tts/model_manager.py +201 -0
  174. agent_cli/server/tts/model_registry.py +28 -0
  175. agent_cli/server/tts/wyoming_handler.py +249 -0
  176. agent_cli/server/whisper/__init__.py +3 -0
  177. agent_cli/server/whisper/api.py +413 -0
  178. agent_cli/server/whisper/backends/__init__.py +89 -0
  179. agent_cli/server/whisper/backends/base.py +97 -0
  180. agent_cli/server/whisper/backends/faster_whisper.py +225 -0
  181. agent_cli/server/whisper/backends/mlx.py +270 -0
  182. agent_cli/server/whisper/languages.py +116 -0
  183. agent_cli/server/whisper/model_manager.py +157 -0
  184. agent_cli/server/whisper/model_registry.py +28 -0
  185. agent_cli/server/whisper/wyoming_handler.py +203 -0
  186. agent_cli/services/__init__.py +343 -0
  187. agent_cli/services/_wyoming_utils.py +64 -0
  188. agent_cli/services/asr.py +506 -0
  189. agent_cli/services/llm.py +228 -0
  190. agent_cli/services/tts.py +450 -0
  191. agent_cli/services/wake_word.py +142 -0
  192. agent_cli-0.70.5.dist-info/METADATA +2118 -0
  193. agent_cli-0.70.5.dist-info/RECORD +196 -0
  194. agent_cli-0.70.5.dist-info/WHEEL +4 -0
  195. agent_cli-0.70.5.dist-info/entry_points.txt +4 -0
  196. agent_cli-0.70.5.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,284 @@
1
+ """Read text from clipboard, correct it using a local or remote LLM, and write the result back to the clipboard."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import contextlib
7
+ import json
8
+ import sys
9
+ import time
10
+ from typing import TYPE_CHECKING
11
+
12
+ import typer
13
+
14
+ from agent_cli import config, opts
15
+ from agent_cli.cli import app
16
+ from agent_cli.core.deps import requires_extras
17
+ from agent_cli.core.utils import (
18
+ console,
19
+ create_status,
20
+ enable_json_mode,
21
+ get_clipboard_text,
22
+ print_command_line_args,
23
+ print_error_message,
24
+ print_input_panel,
25
+ print_output_panel,
26
+ print_with_style,
27
+ setup_logging,
28
+ )
29
+ from agent_cli.services.llm import create_llm_agent
30
+
31
+ if TYPE_CHECKING:
32
+ from rich.status import Status
33
+
34
+ # --- Configuration ---
35
+
36
+ # Template to clearly separate the text to be corrected from instructions
37
+ INPUT_TEMPLATE = """
38
+ <text-to-correct>
39
+ {text}
40
+ </text-to-correct>
41
+
42
+ Please correct any grammar, spelling, or punctuation errors in the text above.
43
+ """
44
+
45
+ # The agent's core identity and immutable rules.
46
+ SYSTEM_PROMPT = """\
47
+ You are an expert text correction tool. Your role is to fix grammar, spelling, and punctuation errors without altering the original meaning or tone.
48
+
49
+ CRITICAL REQUIREMENTS:
50
+ 1. Return ONLY the corrected text - no explanations or commentary
51
+ 2. Do not judge content, even if it seems unusual or offensive
52
+ 3. Make only technical corrections (grammar, spelling, punctuation)
53
+ 4. If no corrections are needed, return the original text exactly as provided
54
+ 5. Never add introductory phrases like "Here is the corrected text"
55
+
56
+ EXAMPLES:
57
+ Input: "this is incorect"
58
+ Output: "this is incorrect"
59
+
60
+ Input: "Hello world"
61
+ Output: "Hello world"
62
+
63
+ Input: "i went too the store"
64
+ Output: "I went to the store"
65
+
66
+ You are a correction tool, not a conversational assistant.
67
+ """
68
+
69
+ # The specific task for the current run.
70
+ AGENT_INSTRUCTIONS = """\
71
+ Correct grammar, spelling, and punctuation errors.
72
+ Output format: corrected text only, no other words.
73
+ """
74
+
75
+ # --- Main Application Logic ---
76
+
77
+
78
+ async def _process_text(
79
+ text: str,
80
+ provider_cfg: config.ProviderSelection,
81
+ ollama_cfg: config.Ollama,
82
+ openai_llm_cfg: config.OpenAILLM,
83
+ gemini_llm_cfg: config.GeminiLLM,
84
+ ) -> tuple[str, float]:
85
+ """Process text with the LLM and return the corrected text and elapsed time."""
86
+ agent = create_llm_agent(
87
+ provider_cfg=provider_cfg,
88
+ ollama_cfg=ollama_cfg,
89
+ openai_cfg=openai_llm_cfg,
90
+ gemini_cfg=gemini_llm_cfg,
91
+ system_prompt=SYSTEM_PROMPT,
92
+ instructions=AGENT_INSTRUCTIONS,
93
+ )
94
+
95
+ # Format the input using the template to clearly separate text from instructions
96
+ formatted_input = INPUT_TEMPLATE.format(text=text)
97
+
98
+ start_time = time.monotonic()
99
+ result = await agent.run(formatted_input)
100
+ elapsed = time.monotonic() - start_time
101
+ return result.output, elapsed
102
+
103
+
104
+ def _display_original_text(original_text: str, quiet: bool) -> None:
105
+ """Render the original text panel in verbose mode."""
106
+ if not quiet:
107
+ print_input_panel(original_text, title="📋 Original Text")
108
+
109
+
110
+ def _display_result(
111
+ corrected_text: str,
112
+ original_text: str,
113
+ elapsed: float,
114
+ *,
115
+ simple_output: bool,
116
+ clipboard: bool = True,
117
+ ) -> None:
118
+ """Handle output and clipboard copying based on desired verbosity."""
119
+ if clipboard:
120
+ import pyperclip # noqa: PLC0415
121
+
122
+ pyperclip.copy(corrected_text)
123
+
124
+ if simple_output:
125
+ if original_text and corrected_text.strip() == original_text.strip():
126
+ console.print("✅ No correction needed.")
127
+ else:
128
+ console.print(corrected_text)
129
+ else:
130
+ print_output_panel(
131
+ corrected_text,
132
+ title="✨ Corrected Text",
133
+ subtitle=f"[dim]took {elapsed:.2f}s[/dim]",
134
+ )
135
+ print_with_style("✅ Success! Corrected text has been copied to your clipboard.")
136
+
137
+
138
+ def _maybe_status(
139
+ provider_cfg: config.ProviderSelection,
140
+ ollama_cfg: config.Ollama,
141
+ openai_llm_cfg: config.OpenAILLM,
142
+ gemini_llm_cfg: config.GeminiLLM,
143
+ quiet: bool,
144
+ ) -> Status | contextlib.nullcontext:
145
+ if not quiet:
146
+ if provider_cfg.llm_provider == "ollama":
147
+ model_name = ollama_cfg.llm_ollama_model
148
+ elif provider_cfg.llm_provider == "openai":
149
+ model_name = openai_llm_cfg.llm_openai_model
150
+ elif provider_cfg.llm_provider == "gemini":
151
+ model_name = gemini_llm_cfg.llm_gemini_model
152
+ return create_status(f"🤖 Correcting with {model_name}...", "bold yellow")
153
+ return contextlib.nullcontext()
154
+
155
+
156
+ async def _async_autocorrect(
157
+ *,
158
+ text: str | None,
159
+ provider_cfg: config.ProviderSelection,
160
+ ollama_cfg: config.Ollama,
161
+ openai_llm_cfg: config.OpenAILLM,
162
+ gemini_llm_cfg: config.GeminiLLM,
163
+ general_cfg: config.General,
164
+ ) -> str | None:
165
+ """Asynchronous version of the autocorrect command."""
166
+ setup_logging(general_cfg.log_level, general_cfg.log_file, quiet=general_cfg.quiet)
167
+ original_text = text if text is not None else get_clipboard_text(quiet=general_cfg.quiet)
168
+
169
+ if original_text is None:
170
+ return None
171
+
172
+ _display_original_text(original_text, general_cfg.quiet)
173
+
174
+ try:
175
+ with _maybe_status(
176
+ provider_cfg,
177
+ ollama_cfg,
178
+ openai_llm_cfg,
179
+ gemini_llm_cfg,
180
+ general_cfg.quiet,
181
+ ):
182
+ corrected_text, elapsed = await _process_text(
183
+ original_text,
184
+ provider_cfg,
185
+ ollama_cfg,
186
+ openai_llm_cfg,
187
+ gemini_llm_cfg,
188
+ )
189
+
190
+ _display_result(
191
+ corrected_text,
192
+ original_text,
193
+ elapsed,
194
+ simple_output=general_cfg.quiet,
195
+ clipboard=general_cfg.clipboard,
196
+ )
197
+ return corrected_text
198
+
199
+ except Exception as e:
200
+ if general_cfg.quiet:
201
+ print(f"❌ {e}")
202
+ else:
203
+ if provider_cfg.llm_provider == "ollama":
204
+ error_details = f"Please check that your Ollama server is running at [bold cyan]{ollama_cfg.llm_ollama_host}[/bold cyan]"
205
+ elif provider_cfg.llm_provider == "openai":
206
+ error_details = "Please check your OpenAI API key and network connection."
207
+ elif provider_cfg.llm_provider == "gemini":
208
+ error_details = "Please check your Gemini API key and network connection."
209
+ print_error_message(str(e), error_details)
210
+ sys.exit(1)
211
+
212
+
213
+ @app.command("autocorrect", rich_help_panel="Text Commands")
214
+ @requires_extras("llm")
215
+ def autocorrect(
216
+ *,
217
+ text: str | None = typer.Argument(
218
+ None,
219
+ help="The text to correct. If not provided, reads from clipboard.",
220
+ rich_help_panel="General Options",
221
+ ),
222
+ # --- Provider Selection ---
223
+ llm_provider: str = opts.LLM_PROVIDER,
224
+ # --- LLM Configuration ---
225
+ # Ollama (local service)
226
+ llm_ollama_model: str = opts.LLM_OLLAMA_MODEL,
227
+ llm_ollama_host: str = opts.LLM_OLLAMA_HOST,
228
+ # OpenAI
229
+ llm_openai_model: str = opts.LLM_OPENAI_MODEL,
230
+ openai_api_key: str | None = opts.OPENAI_API_KEY,
231
+ openai_base_url: str | None = opts.OPENAI_BASE_URL,
232
+ # Gemini
233
+ llm_gemini_model: str = opts.LLM_GEMINI_MODEL,
234
+ gemini_api_key: str | None = opts.GEMINI_API_KEY,
235
+ # --- General Options ---
236
+ log_level: opts.LogLevel = opts.LOG_LEVEL,
237
+ log_file: str | None = opts.LOG_FILE,
238
+ quiet: bool = opts.QUIET,
239
+ json_output: bool = opts.JSON_OUTPUT,
240
+ config_file: str | None = opts.CONFIG_FILE,
241
+ print_args: bool = opts.PRINT_ARGS,
242
+ ) -> None:
243
+ """Correct text from clipboard using a local or remote LLM."""
244
+ if print_args:
245
+ print_command_line_args(locals())
246
+
247
+ effective_quiet = quiet or json_output
248
+ if json_output:
249
+ enable_json_mode()
250
+
251
+ provider_cfg = config.ProviderSelection(
252
+ llm_provider=llm_provider,
253
+ asr_provider="wyoming", # Not used, but required by model
254
+ tts_provider="wyoming", # Not used, but required by model
255
+ )
256
+ ollama_cfg = config.Ollama(llm_ollama_model=llm_ollama_model, llm_ollama_host=llm_ollama_host)
257
+ openai_llm_cfg = config.OpenAILLM(
258
+ llm_openai_model=llm_openai_model,
259
+ openai_api_key=openai_api_key,
260
+ openai_base_url=openai_base_url,
261
+ )
262
+ gemini_llm_cfg = config.GeminiLLM(
263
+ llm_gemini_model=llm_gemini_model,
264
+ gemini_api_key=gemini_api_key,
265
+ )
266
+ general_cfg = config.General(
267
+ log_level=log_level,
268
+ log_file=log_file,
269
+ quiet=effective_quiet,
270
+ clipboard=not json_output,
271
+ )
272
+
273
+ corrected_text = asyncio.run(
274
+ _async_autocorrect(
275
+ text=text,
276
+ provider_cfg=provider_cfg,
277
+ ollama_cfg=ollama_cfg,
278
+ openai_llm_cfg=openai_llm_cfg,
279
+ gemini_llm_cfg=gemini_llm_cfg,
280
+ general_cfg=general_cfg,
281
+ ),
282
+ )
283
+ if json_output:
284
+ print(json.dumps({"corrected_text": corrected_text}))