rubber-ducky 1.4.0__py3-none-any.whl → 1.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ducky/config.py +3 -3
- ducky/crumb.py +84 -0
- ducky/ducky.py +210 -489
- rubber_ducky-1.5.1.dist-info/METADATA +198 -0
- rubber_ducky-1.5.1.dist-info/RECORD +13 -0
- {rubber_ducky-1.4.0.dist-info → rubber_ducky-1.5.1.dist-info}/top_level.txt +0 -1
- crumbs/disk-usage/disk-usage.sh +0 -12
- crumbs/disk-usage/info.txt +0 -3
- crumbs/git-log/git-log.sh +0 -24
- crumbs/git-log/info.txt +0 -3
- crumbs/git-status/git-status.sh +0 -21
- crumbs/git-status/info.txt +0 -3
- crumbs/process-list/info.txt +0 -3
- crumbs/process-list/process-list.sh +0 -20
- crumbs/recent-files/info.txt +0 -3
- crumbs/recent-files/recent-files.sh +0 -13
- crumbs/system-health/info.txt +0 -3
- crumbs/system-health/system-health.sh +0 -58
- rubber_ducky-1.4.0.dist-info/METADATA +0 -210
- rubber_ducky-1.4.0.dist-info/RECORD +0 -24
- {rubber_ducky-1.4.0.dist-info → rubber_ducky-1.5.1.dist-info}/WHEEL +0 -0
- {rubber_ducky-1.4.0.dist-info → rubber_ducky-1.5.1.dist-info}/entry_points.txt +0 -0
- {rubber_ducky-1.4.0.dist-info → rubber_ducky-1.5.1.dist-info}/licenses/LICENSE +0 -0
ducky/ducky.py
CHANGED
|
@@ -6,8 +6,8 @@ import json
|
|
|
6
6
|
import os
|
|
7
7
|
import re
|
|
8
8
|
import shlex
|
|
9
|
+
import subprocess
|
|
9
10
|
import sys
|
|
10
|
-
import signal
|
|
11
11
|
from dataclasses import dataclass
|
|
12
12
|
from datetime import UTC, datetime
|
|
13
13
|
from rich.console import Console
|
|
@@ -16,25 +16,13 @@ from textwrap import dedent
|
|
|
16
16
|
from typing import Any, Dict, List
|
|
17
17
|
|
|
18
18
|
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
name: str
|
|
22
|
-
path: Path
|
|
23
|
-
type: str
|
|
24
|
-
enabled: bool
|
|
25
|
-
description: str | None = None
|
|
26
|
-
poll: bool = False
|
|
27
|
-
poll_type: str | None = None # "interval" or "continuous"
|
|
28
|
-
poll_interval: int = 2
|
|
29
|
-
poll_prompt: str | None = None
|
|
30
|
-
|
|
19
|
+
from .config import ConfigManager
|
|
20
|
+
from .crumb import CrumbManager
|
|
31
21
|
|
|
32
22
|
from contextlib import nullcontext
|
|
33
23
|
|
|
34
24
|
from ollama import AsyncClient
|
|
35
25
|
|
|
36
|
-
from .config import ConfigManager
|
|
37
|
-
|
|
38
26
|
try: # prompt_toolkit is optional at runtime
|
|
39
27
|
from prompt_toolkit import PromptSession
|
|
40
28
|
from prompt_toolkit.application import Application
|
|
@@ -74,109 +62,15 @@ class ShellResult:
|
|
|
74
62
|
HISTORY_DIR = Path.home() / ".ducky"
|
|
75
63
|
PROMPT_HISTORY_FILE = HISTORY_DIR / "prompt_history"
|
|
76
64
|
CONVERSATION_LOG_FILE = HISTORY_DIR / "conversation.log"
|
|
77
|
-
|
|
78
|
-
CRUMBS: Dict[str, Crumb] = {}
|
|
65
|
+
CRUMBS: Dict[str, Any] = {}
|
|
79
66
|
console = Console()
|
|
80
67
|
|
|
81
68
|
|
|
82
69
|
def ensure_history_dir() -> Path:
|
|
83
70
|
HISTORY_DIR.mkdir(parents=True, exist_ok=True)
|
|
84
|
-
CRUMBS_DIR.mkdir(parents=True, exist_ok=True)
|
|
85
71
|
return HISTORY_DIR
|
|
86
72
|
|
|
87
73
|
|
|
88
|
-
def load_crumbs() -> Dict[str, Crumb]:
|
|
89
|
-
"""Populate the global ``CRUMBS`` dictionary from both default and user crumbs.
|
|
90
|
-
|
|
91
|
-
Each crumb is expected to be a directory containing an ``info.txt`` and a
|
|
92
|
-
script file matching the ``type`` field (``shell`` → ``*.sh``).
|
|
93
|
-
|
|
94
|
-
Default crumbs are loaded from the package directory first, then user crumbs
|
|
95
|
-
are loaded from ``~/.ducky/crumbs/`` and can override default crumbs if they
|
|
96
|
-
have the same name.
|
|
97
|
-
"""
|
|
98
|
-
|
|
99
|
-
global CRUMBS
|
|
100
|
-
CRUMBS.clear()
|
|
101
|
-
|
|
102
|
-
# Helper function to load crumbs from a directory
|
|
103
|
-
def _load_from_dir(dir_path: Path) -> None:
|
|
104
|
-
if not dir_path.exists():
|
|
105
|
-
return
|
|
106
|
-
|
|
107
|
-
for crumb_dir in dir_path.iterdir():
|
|
108
|
-
if not crumb_dir.is_dir():
|
|
109
|
-
continue
|
|
110
|
-
info_path = crumb_dir / "info.txt"
|
|
111
|
-
if not info_path.is_file():
|
|
112
|
-
continue
|
|
113
|
-
# Parse key: value pairs
|
|
114
|
-
meta = {}
|
|
115
|
-
for line in info_path.read_text(encoding="utf-8").splitlines():
|
|
116
|
-
if ":" not in line:
|
|
117
|
-
continue
|
|
118
|
-
key, val = line.split(":", 1)
|
|
119
|
-
meta[key.strip()] = val.strip()
|
|
120
|
-
name = meta.get("name", crumb_dir.name)
|
|
121
|
-
ctype = meta.get("type", "shell")
|
|
122
|
-
description = meta.get("description")
|
|
123
|
-
poll = meta.get("poll", "").lower() == "true"
|
|
124
|
-
poll_type = meta.get("poll_type")
|
|
125
|
-
poll_interval = int(meta.get("poll_interval", 2))
|
|
126
|
-
poll_prompt = meta.get("poll_prompt")
|
|
127
|
-
# Find script file: look for executable in the directory
|
|
128
|
-
script_path: Path | None = None
|
|
129
|
-
if ctype == "shell":
|
|
130
|
-
# Prefer a file named <name>.sh if present
|
|
131
|
-
candidate = crumb_dir / f"{name}.sh"
|
|
132
|
-
if candidate.is_file() and os.access(candidate, os.X_OK):
|
|
133
|
-
script_path = candidate
|
|
134
|
-
else:
|
|
135
|
-
# Fallback: first .sh in dir
|
|
136
|
-
for p in crumb_dir.glob("*.sh"):
|
|
137
|
-
if os.access(p, os.X_OK):
|
|
138
|
-
script_path = p
|
|
139
|
-
break
|
|
140
|
-
# Default to first file if script not found
|
|
141
|
-
if script_path is None:
|
|
142
|
-
files = list(crumb_dir.iterdir())
|
|
143
|
-
if files:
|
|
144
|
-
script_path = files[0]
|
|
145
|
-
if script_path is None:
|
|
146
|
-
continue
|
|
147
|
-
crumb = Crumb(
|
|
148
|
-
name=name,
|
|
149
|
-
path=script_path,
|
|
150
|
-
type=ctype,
|
|
151
|
-
enabled=False,
|
|
152
|
-
description=description,
|
|
153
|
-
poll=poll,
|
|
154
|
-
poll_type=poll_type,
|
|
155
|
-
poll_interval=poll_interval,
|
|
156
|
-
poll_prompt=poll_prompt,
|
|
157
|
-
)
|
|
158
|
-
CRUMBS[name] = crumb
|
|
159
|
-
|
|
160
|
-
# Try to load from package directory (where ducky is installed)
|
|
161
|
-
try:
|
|
162
|
-
# Try to locate the crumbs directory relative to the ducky package
|
|
163
|
-
import ducky
|
|
164
|
-
# Get the directory containing the ducky package
|
|
165
|
-
ducky_dir = Path(ducky.__file__).parent
|
|
166
|
-
# Check if crumbs exists in the same directory as ducky package
|
|
167
|
-
default_crumbs_dir = ducky_dir.parent / "crumbs"
|
|
168
|
-
if default_crumbs_dir.exists():
|
|
169
|
-
_load_from_dir(default_crumbs_dir)
|
|
170
|
-
except Exception:
|
|
171
|
-
# If package directory loading fails, continue without default crumbs
|
|
172
|
-
pass
|
|
173
|
-
|
|
174
|
-
# Load user crumbs (these can override default crumbs with the same name)
|
|
175
|
-
_load_from_dir(CRUMBS_DIR)
|
|
176
|
-
|
|
177
|
-
return CRUMBS
|
|
178
|
-
|
|
179
|
-
|
|
180
74
|
class ConversationLogger:
|
|
181
75
|
def __init__(self, log_path: Path) -> None:
|
|
182
76
|
self.log_path = log_path
|
|
@@ -292,52 +186,16 @@ class RubberDuck:
|
|
|
292
186
|
self.model = model
|
|
293
187
|
self.quick = quick
|
|
294
188
|
self.command_mode = command_mode
|
|
295
|
-
self.
|
|
189
|
+
self.last_result: AssistantResult | None = None
|
|
296
190
|
self.messages: List[Dict[str, str]] = [
|
|
297
191
|
{"role": "system", "content": self.system_prompt}
|
|
298
192
|
]
|
|
299
|
-
# Update system prompt to include enabled crumb descriptions
|
|
300
|
-
|
|
301
|
-
def update_system_prompt(self) -> None:
|
|
302
|
-
"""Append enabled crumb descriptions to the system prompt.
|
|
303
|
-
|
|
304
|
-
The system prompt is stored in ``self.system_prompt`` and injected as the
|
|
305
|
-
first system message. When crumbs are enabled, we add a section that
|
|
306
|
-
lists the crumb names and their descriptions. The format is simple:
|
|
307
|
-
|
|
308
|
-
``Crumbs:``\n
|
|
309
|
-
``- <name>: <description>``\n
|
|
310
|
-
If no crumbs are enabled the prompt is unchanged.
|
|
311
|
-
"""
|
|
312
|
-
# Start with the base system prompt
|
|
313
|
-
prompt_lines = [self.system_prompt]
|
|
314
|
-
|
|
315
|
-
if self.crumbs:
|
|
316
|
-
prompt_lines.append(
|
|
317
|
-
"\nCrumbs are simple scripts you can run with bash, uv, or bun."
|
|
318
|
-
)
|
|
319
|
-
prompt_lines.append("Crumbs:")
|
|
320
|
-
for c in self.crumbs.values():
|
|
321
|
-
description = c.description or "no description"
|
|
322
|
-
prompt_lines.append(f"- {c.name}: {description}")
|
|
323
|
-
|
|
324
|
-
# Update the system prompt
|
|
325
|
-
self.system_prompt = "\n".join(prompt_lines)
|
|
326
|
-
|
|
327
|
-
# Update the first system message in the messages list
|
|
328
|
-
if self.messages and self.messages[0]["role"] == "system":
|
|
329
|
-
self.messages[0]["content"] = self.system_prompt
|
|
330
|
-
else:
|
|
331
|
-
# If there's no system message, add one
|
|
332
|
-
self.messages.insert(0, {"role": "system", "content": self.system_prompt})
|
|
333
193
|
|
|
334
194
|
async def send_prompt(
|
|
335
195
|
self, prompt: str | None = None, code: str | None = None, command_mode: bool | None = None
|
|
336
196
|
) -> AssistantResult:
|
|
337
197
|
user_content = (prompt or "").strip()
|
|
338
198
|
|
|
339
|
-
self.update_system_prompt()
|
|
340
|
-
|
|
341
199
|
if code:
|
|
342
200
|
user_content = f"{user_content}\n\n{code}" if user_content else code
|
|
343
201
|
|
|
@@ -363,7 +221,7 @@ class RubberDuck:
|
|
|
363
221
|
model=self.model,
|
|
364
222
|
messages=self.messages,
|
|
365
223
|
stream=False,
|
|
366
|
-
think=
|
|
224
|
+
think=False,
|
|
367
225
|
)
|
|
368
226
|
|
|
369
227
|
assistant_message: Any | None = response.message
|
|
@@ -380,7 +238,9 @@ class RubberDuck:
|
|
|
380
238
|
|
|
381
239
|
command = self._extract_command(content) if effective_command_mode else None
|
|
382
240
|
|
|
383
|
-
|
|
241
|
+
result = AssistantResult(content=content, command=command, thinking=thinking)
|
|
242
|
+
self.last_result = result
|
|
243
|
+
return result
|
|
384
244
|
|
|
385
245
|
async def run_shell_command(self, command: str) -> ShellResult:
|
|
386
246
|
process = await asyncio.create_subprocess_shell(
|
|
@@ -402,8 +262,8 @@ class RubberDuck:
|
|
|
402
262
|
return None
|
|
403
263
|
|
|
404
264
|
command_lines: List[str] = []
|
|
405
|
-
|
|
406
265
|
in_block = False
|
|
266
|
+
|
|
407
267
|
for line in lines:
|
|
408
268
|
stripped = line.strip()
|
|
409
269
|
if stripped.startswith("```"):
|
|
@@ -413,20 +273,18 @@ class RubberDuck:
|
|
|
413
273
|
continue
|
|
414
274
|
if in_block:
|
|
415
275
|
if stripped:
|
|
416
|
-
command_lines
|
|
417
|
-
break
|
|
276
|
+
command_lines.append(stripped)
|
|
418
277
|
continue
|
|
419
278
|
if stripped:
|
|
420
|
-
command_lines
|
|
279
|
+
command_lines.append(stripped)
|
|
280
|
+
# If not in a block, only take the first line
|
|
421
281
|
break
|
|
422
282
|
|
|
423
283
|
if not command_lines:
|
|
424
284
|
return None
|
|
425
285
|
|
|
426
|
-
command
|
|
427
|
-
|
|
428
|
-
if first_semicolon != -1:
|
|
429
|
-
command = command[:first_semicolon].strip()
|
|
286
|
+
# Join all command lines with newlines for multi-line commands
|
|
287
|
+
command = "\n".join(command_lines)
|
|
430
288
|
|
|
431
289
|
return command or None
|
|
432
290
|
|
|
@@ -496,7 +354,7 @@ class InlineInterface:
|
|
|
496
354
|
self.pending_command: str | None = None
|
|
497
355
|
self.session: PromptSession | None = None
|
|
498
356
|
self.selected_model: str | None = None
|
|
499
|
-
self.
|
|
357
|
+
self.crumb_manager = CrumbManager()
|
|
500
358
|
|
|
501
359
|
if (
|
|
502
360
|
PromptSession is not None
|
|
@@ -642,6 +500,12 @@ class InlineInterface:
|
|
|
642
500
|
console.print("Nothing to run yet.", style="yellow")
|
|
643
501
|
return
|
|
644
502
|
|
|
503
|
+
# Check if first word is a crumb name
|
|
504
|
+
first_word = stripped.split()[0].lower()
|
|
505
|
+
if self.crumb_manager.has_crumb(first_word):
|
|
506
|
+
await self._use_crumb(first_word)
|
|
507
|
+
return
|
|
508
|
+
|
|
645
509
|
if stripped.lower() in {":run", "/run"}:
|
|
646
510
|
await self._run_last_command()
|
|
647
511
|
return
|
|
@@ -670,12 +534,8 @@ class InlineInterface:
|
|
|
670
534
|
await self._show_crumbs()
|
|
671
535
|
return
|
|
672
536
|
|
|
673
|
-
if stripped.
|
|
674
|
-
await self.
|
|
675
|
-
return
|
|
676
|
-
|
|
677
|
-
if stripped.startswith("/poll"):
|
|
678
|
-
await self._handle_poll_command(stripped)
|
|
537
|
+
if stripped.startswith("/crumb"):
|
|
538
|
+
await self._handle_crumb_command(stripped)
|
|
679
539
|
return
|
|
680
540
|
|
|
681
541
|
if stripped.startswith("!"):
|
|
@@ -703,8 +563,6 @@ class InlineInterface:
|
|
|
703
563
|
self._code_sent = True
|
|
704
564
|
self.last_command = result.command
|
|
705
565
|
self.pending_command = result.command
|
|
706
|
-
# Set last_shell_output to True so empty Enter will explain the result
|
|
707
|
-
self.last_shell_output = True
|
|
708
566
|
|
|
709
567
|
async def _explain_last_command(self) -> None:
|
|
710
568
|
if not self.assistant.messages or len(self.assistant.messages) < 2:
|
|
@@ -732,7 +590,11 @@ class InlineInterface:
|
|
|
732
590
|
|
|
733
591
|
commands = [
|
|
734
592
|
("[bold]/help[/bold]", "Show this help message"),
|
|
735
|
-
("[bold]/crumbs[/bold]", "List all
|
|
593
|
+
("[bold]/crumbs[/bold]", "List all saved crumb shortcuts"),
|
|
594
|
+
("[bold]/crumb <name>[/bold]", "Save last result as a crumb"),
|
|
595
|
+
("[bold]/crumb add <name> <cmd>[/bold]", "Manually add a crumb"),
|
|
596
|
+
("[bold]/crumb del <name>[/bold]", "Delete a crumb"),
|
|
597
|
+
("[bold]<name>[/bold]", "Invoke a saved crumb"),
|
|
736
598
|
("[bold]/model[/bold]", "Select a model interactively (local or cloud)"),
|
|
737
599
|
(
|
|
738
600
|
"[bold]/local[/bold]",
|
|
@@ -743,22 +605,6 @@ class InlineInterface:
|
|
|
743
605
|
"[bold]/clear[/bold] or [bold]/reset[/bold]",
|
|
744
606
|
"Clear conversation history",
|
|
745
607
|
),
|
|
746
|
-
(
|
|
747
|
-
"[bold]/poll <crumb>[/bold]",
|
|
748
|
-
"Start polling session for a crumb",
|
|
749
|
-
),
|
|
750
|
-
(
|
|
751
|
-
"[bold]/poll <crumb> -i 5[/bold]",
|
|
752
|
-
"Start polling with 5s interval",
|
|
753
|
-
),
|
|
754
|
-
(
|
|
755
|
-
"[bold]/poll <crumb> -p <text>[/bold]",
|
|
756
|
-
"Start polling with custom prompt",
|
|
757
|
-
),
|
|
758
|
-
(
|
|
759
|
-
"[bold]/stop-poll[/bold]",
|
|
760
|
-
"Stop current polling session",
|
|
761
|
-
),
|
|
762
608
|
(
|
|
763
609
|
"[bold]/run[/bold]",
|
|
764
610
|
"Re-run the last suggested command",
|
|
@@ -779,49 +625,31 @@ class InlineInterface:
|
|
|
779
625
|
console.print()
|
|
780
626
|
|
|
781
627
|
async def _show_crumbs(self) -> None:
|
|
782
|
-
"""Display all
|
|
783
|
-
crumbs = self.
|
|
628
|
+
"""Display all saved crumbs."""
|
|
629
|
+
crumbs = self.crumb_manager.list_crumbs()
|
|
784
630
|
|
|
785
631
|
if not crumbs:
|
|
786
|
-
console.print("No crumbs
|
|
632
|
+
console.print("No crumbs saved yet. Use '/crumb <name>' to save a command.", style="yellow")
|
|
787
633
|
return
|
|
788
634
|
|
|
789
|
-
console.print("\
|
|
790
|
-
console.print("
|
|
635
|
+
console.print("\nSaved Crumbs", style="bold blue")
|
|
636
|
+
console.print("=============", style="bold blue")
|
|
791
637
|
console.print()
|
|
792
638
|
|
|
793
|
-
#
|
|
794
|
-
|
|
795
|
-
user_crumbs = []
|
|
639
|
+
# Calculate max name length for alignment
|
|
640
|
+
max_name_len = max(len(name) for name in crumbs.keys())
|
|
796
641
|
|
|
797
|
-
for name,
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
else:
|
|
802
|
-
user_crumbs.append((name, crumb))
|
|
803
|
-
|
|
804
|
-
# Show default crumbs
|
|
805
|
-
if default_crumbs:
|
|
806
|
-
console.print("[bold cyan]Default Crumbs (shipped with ducky):[/bold cyan]", style="cyan")
|
|
807
|
-
for name, crumb in default_crumbs:
|
|
808
|
-
description = crumb.description or "No description"
|
|
809
|
-
# Check if it has polling enabled
|
|
810
|
-
poll_info = " [dim](polling enabled)[/dim]" if crumb.poll else ""
|
|
811
|
-
console.print(f" [bold]{name}[/bold]{poll_info}: {description}")
|
|
812
|
-
console.print()
|
|
642
|
+
for name, data in sorted(crumbs.items()):
|
|
643
|
+
explanation = data.get("explanation", "") or "No explanation yet"
|
|
644
|
+
command = data.get("command", "") or "No command"
|
|
645
|
+
created_at = data.get("created_at", "")
|
|
813
646
|
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
description = crumb.description or "No description"
|
|
819
|
-
# Check if it has polling enabled
|
|
820
|
-
poll_info = " [dim](polling enabled)[/dim]" if crumb.poll else ""
|
|
821
|
-
console.print(f" [bold]{name}[/bold]{poll_info}: {description}")
|
|
822
|
-
console.print()
|
|
647
|
+
# Format: name | explanation | command
|
|
648
|
+
console.print(
|
|
649
|
+
f"[bold]{name:<{max_name_len}}[/bold] | [cyan]{explanation}[/cyan] | [dim]{command}[/dim]"
|
|
650
|
+
)
|
|
823
651
|
|
|
824
|
-
console.print(f"[dim]Total: {len(crumbs)} crumbs
|
|
652
|
+
console.print(f"\n[dim]Total: {len(crumbs)} crumbs[/dim]")
|
|
825
653
|
|
|
826
654
|
async def _clear_history(self) -> None:
|
|
827
655
|
self.assistant.clear_history()
|
|
@@ -829,85 +657,142 @@ class InlineInterface:
|
|
|
829
657
|
self.pending_command = None
|
|
830
658
|
self.last_shell_output = None
|
|
831
659
|
|
|
832
|
-
async def
|
|
833
|
-
"""Handle /
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
)
|
|
660
|
+
async def _handle_crumb_command(self, command: str) -> None:
|
|
661
|
+
"""Handle /crumb commands."""
|
|
662
|
+
parts = command.split()
|
|
663
|
+
if len(parts) == 1:
|
|
664
|
+
# Just "/crumbs" - show list
|
|
665
|
+
await self._show_crumbs()
|
|
839
666
|
return
|
|
840
667
|
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
console.print("Example: /poll log-crumb -i 5", style="dim")
|
|
668
|
+
if len(parts) == 2:
|
|
669
|
+
# "/crumb <name>" - save last result
|
|
670
|
+
name = parts[1]
|
|
671
|
+
await self._save_crumb(name)
|
|
846
672
|
return
|
|
847
673
|
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
674
|
+
if len(parts) >= 3 and parts[1] == "add":
|
|
675
|
+
# "/crumb add <name> <...command>"
|
|
676
|
+
if len(parts) < 4:
|
|
677
|
+
console.print("Usage: /crumb add <name> <command>", style="yellow")
|
|
678
|
+
return
|
|
679
|
+
name = parts[2]
|
|
680
|
+
cmd = " ".join(parts[3:])
|
|
681
|
+
await self._add_crumb_manual(name, cmd)
|
|
682
|
+
return
|
|
851
683
|
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
interval = int(parts[i + 1])
|
|
858
|
-
i += 2
|
|
859
|
-
except ValueError:
|
|
860
|
-
console.print("Invalid interval value.", style="red")
|
|
861
|
-
return
|
|
862
|
-
elif parts[i] in {"-p", "--prompt"} and i + 1 < len(parts):
|
|
863
|
-
prompt = " ".join(parts[i + 1:])
|
|
864
|
-
break
|
|
865
|
-
else:
|
|
866
|
-
i += 1
|
|
684
|
+
if len(parts) == 3 and parts[1] == "del":
|
|
685
|
+
# "/crumb del <name>"
|
|
686
|
+
name = parts[2]
|
|
687
|
+
await self._delete_crumb(name)
|
|
688
|
+
return
|
|
867
689
|
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
690
|
+
console.print(
|
|
691
|
+
"Usage: /crumb <name> | /crumb add <name> <cmd> | /crumb del <name>",
|
|
692
|
+
style="yellow",
|
|
693
|
+
)
|
|
694
|
+
|
|
695
|
+
async def _save_crumb(self, name: str) -> None:
|
|
696
|
+
"""Save the last result as a crumb."""
|
|
697
|
+
if not self.assistant.last_result:
|
|
698
|
+
console.print("No previous command to save. Run a command first.", style="yellow")
|
|
874
699
|
return
|
|
875
700
|
|
|
876
|
-
|
|
701
|
+
if not self.assistant.last_result.command:
|
|
702
|
+
console.print("Last response had no command to save.", style="yellow")
|
|
703
|
+
return
|
|
877
704
|
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
705
|
+
# Find the last user prompt from messages
|
|
706
|
+
last_prompt = ""
|
|
707
|
+
for msg in reversed(self.assistant.messages):
|
|
708
|
+
if msg["role"] == "user":
|
|
709
|
+
last_prompt = msg["content"]
|
|
710
|
+
break
|
|
884
711
|
|
|
885
|
-
|
|
712
|
+
self.crumb_manager.save_crumb(
|
|
713
|
+
name=name,
|
|
714
|
+
prompt=last_prompt,
|
|
715
|
+
response=self.assistant.last_result.content,
|
|
716
|
+
command=self.assistant.last_result.command,
|
|
717
|
+
)
|
|
886
718
|
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
await polling_session(
|
|
890
|
-
self.assistant,
|
|
891
|
-
crumb,
|
|
892
|
-
interval=interval,
|
|
893
|
-
prompt_override=prompt,
|
|
894
|
-
)
|
|
895
|
-
finally:
|
|
896
|
-
self.running_polling = False
|
|
897
|
-
console.print("Polling stopped. Returning to interactive mode.", style="green")
|
|
719
|
+
console.print(f"Saved crumb '{name}'!", style="green")
|
|
720
|
+
console.print("Generating explanation...", style="dim")
|
|
898
721
|
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
722
|
+
# Spawn subprocess to generate explanation asynchronously
|
|
723
|
+
asyncio.create_task(self._generate_crumb_explanation(name))
|
|
724
|
+
|
|
725
|
+
async def _generate_crumb_explanation(self, name: str) -> None:
|
|
726
|
+
"""Generate AI explanation for a crumb."""
|
|
727
|
+
crumb = self.crumb_manager.get_crumb(name)
|
|
728
|
+
if not crumb:
|
|
903
729
|
return
|
|
904
730
|
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
731
|
+
command = crumb.get("command", "")
|
|
732
|
+
if not command:
|
|
733
|
+
return
|
|
734
|
+
|
|
735
|
+
try:
|
|
736
|
+
explanation_prompt = f"Summarize this command in one line (10-15 words max): {command}"
|
|
737
|
+
result = await self.assistant.send_prompt(prompt=explanation_prompt, command_mode=False)
|
|
738
|
+
explanation = result.content.strip()
|
|
739
|
+
|
|
740
|
+
if explanation:
|
|
741
|
+
self.crumb_manager.update_explanation(name, explanation)
|
|
742
|
+
from rich.text import Text
|
|
743
|
+
text = Text()
|
|
744
|
+
text.append("Explanation added: ", style="cyan")
|
|
745
|
+
text.append(explanation)
|
|
746
|
+
console.print(text)
|
|
747
|
+
except Exception as e:
|
|
748
|
+
console.print(f"Could not generate explanation: {e}", style="yellow")
|
|
749
|
+
|
|
750
|
+
async def _add_crumb_manual(self, name: str, command: str) -> None:
|
|
751
|
+
"""Manually add a crumb with a command."""
|
|
752
|
+
self.crumb_manager.save_crumb(
|
|
753
|
+
name=name,
|
|
754
|
+
prompt="Manual addition",
|
|
755
|
+
response="",
|
|
756
|
+
command=command,
|
|
909
757
|
)
|
|
910
758
|
|
|
759
|
+
console.print(f"Added crumb '{name}'!", style="green")
|
|
760
|
+
console.print("Generating explanation...", style="dim")
|
|
761
|
+
|
|
762
|
+
# Spawn subprocess to generate explanation asynchronously
|
|
763
|
+
asyncio.create_task(self._generate_crumb_explanation(name))
|
|
764
|
+
|
|
765
|
+
async def _delete_crumb(self, name: str) -> None:
|
|
766
|
+
"""Delete a crumb."""
|
|
767
|
+
if self.crumb_manager.delete_crumb(name):
|
|
768
|
+
console.print(f"Deleted crumb '{name}'.", style="green")
|
|
769
|
+
else:
|
|
770
|
+
console.print(f"Crumb '{name}' not found.", style="yellow")
|
|
771
|
+
|
|
772
|
+
async def _use_crumb(self, name: str) -> None:
|
|
773
|
+
"""Recall and execute a saved crumb."""
|
|
774
|
+
crumb = self.crumb_manager.get_crumb(name)
|
|
775
|
+
if not crumb:
|
|
776
|
+
console.print(f"Crumb '{name}' not found.", style="yellow")
|
|
777
|
+
return
|
|
778
|
+
|
|
779
|
+
explanation = crumb.get("explanation", "") or "No explanation"
|
|
780
|
+
command = crumb.get("command", "") or "No command"
|
|
781
|
+
|
|
782
|
+
console.print(f"\n[bold cyan]Crumb: {name}[/bold cyan]")
|
|
783
|
+
console.print(f"Explanation: {explanation}", style="green")
|
|
784
|
+
console.print(f"Command: ", style="cyan", end="")
|
|
785
|
+
console.print(command, highlight=False)
|
|
786
|
+
|
|
787
|
+
if command and command != "No command":
|
|
788
|
+
# Execute the command
|
|
789
|
+
await run_shell_and_print(
|
|
790
|
+
self.assistant,
|
|
791
|
+
command,
|
|
792
|
+
logger=self.logger,
|
|
793
|
+
history=self.assistant.messages,
|
|
794
|
+
)
|
|
795
|
+
|
|
911
796
|
async def _select_model(self, host: str = "") -> None:
|
|
912
797
|
"""Show available models and allow user to select one with arrow keys."""
|
|
913
798
|
if PromptSession is None or KeyBindings is None:
|
|
@@ -1056,6 +941,16 @@ async def run_single_prompt(
|
|
|
1056
941
|
return result
|
|
1057
942
|
|
|
1058
943
|
|
|
944
|
+
def copy_to_clipboard(text: str) -> bool:
|
|
945
|
+
"""Copy text to system clipboard using pbcopy on macOS."""
|
|
946
|
+
try:
|
|
947
|
+
process = subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE)
|
|
948
|
+
process.communicate(text.encode('utf-8'))
|
|
949
|
+
return process.returncode == 0
|
|
950
|
+
except Exception:
|
|
951
|
+
return False
|
|
952
|
+
|
|
953
|
+
|
|
1059
954
|
def confirm(prompt: str, default: bool = False) -> bool:
|
|
1060
955
|
suffix = " [Y/n]: " if default else " [y/N]: "
|
|
1061
956
|
try:
|
|
@@ -1077,174 +972,6 @@ async def interactive_session(
|
|
|
1077
972
|
await ui.run()
|
|
1078
973
|
|
|
1079
974
|
|
|
1080
|
-
async def polling_session(
|
|
1081
|
-
rubber_ducky: RubberDuck,
|
|
1082
|
-
crumb: Crumb,
|
|
1083
|
-
interval: int | None = None,
|
|
1084
|
-
prompt_override: str | None = None,
|
|
1085
|
-
) -> None:
|
|
1086
|
-
"""Run a polling session for a crumb.
|
|
1087
|
-
|
|
1088
|
-
For interval polling: Runs the crumb repeatedly at the specified interval.
|
|
1089
|
-
For continuous polling: Runs the crumb once in background and analyzes output periodically.
|
|
1090
|
-
|
|
1091
|
-
Args:
|
|
1092
|
-
rubber_ducky: The RubberDuck assistant
|
|
1093
|
-
crumb: The crumb to poll
|
|
1094
|
-
interval: Override the crumb's default interval
|
|
1095
|
-
prompt_override: Override the crumb's default poll prompt
|
|
1096
|
-
"""
|
|
1097
|
-
# Use overrides or crumb defaults
|
|
1098
|
-
poll_interval = interval or crumb.poll_interval
|
|
1099
|
-
poll_prompt = prompt_override or crumb.poll_prompt or "Analyze this output."
|
|
1100
|
-
poll_type = crumb.poll_type or "interval"
|
|
1101
|
-
|
|
1102
|
-
if not crumb.poll_prompt and not prompt_override:
|
|
1103
|
-
console.print("Warning: No poll prompt configured for this crumb.", style="yellow")
|
|
1104
|
-
console.print(f"Using default prompt: '{poll_prompt}'", style="dim")
|
|
1105
|
-
|
|
1106
|
-
if poll_type == "continuous":
|
|
1107
|
-
await _continuous_polling(rubber_ducky, crumb, poll_interval, poll_prompt)
|
|
1108
|
-
else:
|
|
1109
|
-
await _interval_polling(rubber_ducky, crumb, poll_interval, poll_prompt)
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
async def _interval_polling(
|
|
1113
|
-
rubber_ducky: RubberDuck,
|
|
1114
|
-
crumb: Crumb,
|
|
1115
|
-
interval: int,
|
|
1116
|
-
poll_prompt: str,
|
|
1117
|
-
) -> None:
|
|
1118
|
-
"""Poll by running crumb script at intervals and analyzing with AI."""
|
|
1119
|
-
console.print(
|
|
1120
|
-
f"\nStarting interval polling for '{crumb.name}' (interval: {interval}s)...\n"
|
|
1121
|
-
f"Poll prompt: {poll_prompt}\n"
|
|
1122
|
-
f"Press Ctrl+C to stop polling.\n",
|
|
1123
|
-
style="bold cyan",
|
|
1124
|
-
)
|
|
1125
|
-
|
|
1126
|
-
shutdown_event = asyncio.Event()
|
|
1127
|
-
|
|
1128
|
-
def signal_handler():
|
|
1129
|
-
console.print("\nStopping polling...", style="yellow")
|
|
1130
|
-
shutdown_event.set()
|
|
1131
|
-
|
|
1132
|
-
loop = asyncio.get_running_loop()
|
|
1133
|
-
loop.add_signal_handler(signal.SIGINT, signal_handler)
|
|
1134
|
-
|
|
1135
|
-
try:
|
|
1136
|
-
while not shutdown_event.is_set():
|
|
1137
|
-
timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S")
|
|
1138
|
-
console.print(f"\n[{timestamp}] Polling {crumb.name}...", style="bold blue")
|
|
1139
|
-
|
|
1140
|
-
# Run crumb script
|
|
1141
|
-
result = await rubber_ducky.run_shell_command(str(crumb.path))
|
|
1142
|
-
|
|
1143
|
-
script_output = result.stdout if result.stdout.strip() else "(no output)"
|
|
1144
|
-
if result.stderr.strip():
|
|
1145
|
-
script_output += f"\n[stderr]\n{result.stderr}"
|
|
1146
|
-
|
|
1147
|
-
console.print(f"Script output: {len(result.stdout)} bytes\n", style="dim")
|
|
1148
|
-
|
|
1149
|
-
# Send to AI with prompt
|
|
1150
|
-
full_prompt = f"{poll_prompt}\n\nScript output:\n{script_output}"
|
|
1151
|
-
ai_result = await rubber_ducky.send_prompt(prompt=full_prompt, command_mode=False)
|
|
1152
|
-
|
|
1153
|
-
console.print(f"AI: {ai_result.content}", style="green", highlight=False)
|
|
1154
|
-
|
|
1155
|
-
# Wait for next interval
|
|
1156
|
-
await asyncio.sleep(interval)
|
|
1157
|
-
except asyncio.CancelledError:
|
|
1158
|
-
console.print("\nPolling stopped.", style="yellow")
|
|
1159
|
-
finally:
|
|
1160
|
-
loop.remove_signal_handler(signal.SIGINT)
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
async def _continuous_polling(
|
|
1164
|
-
rubber_ducky: RubberDuck,
|
|
1165
|
-
crumb: Crumb,
|
|
1166
|
-
interval: int,
|
|
1167
|
-
poll_prompt: str,
|
|
1168
|
-
) -> None:
|
|
1169
|
-
"""Poll by running crumb continuously and analyzing output periodically."""
|
|
1170
|
-
console.print(
|
|
1171
|
-
f"\nStarting continuous polling for '{crumb.name}' (analysis interval: {interval}s)...\n"
|
|
1172
|
-
f"Poll prompt: {poll_prompt}\n"
|
|
1173
|
-
f"Press Ctrl+C to stop polling.\n",
|
|
1174
|
-
style="bold cyan",
|
|
1175
|
-
)
|
|
1176
|
-
|
|
1177
|
-
shutdown_event = asyncio.Event()
|
|
1178
|
-
accumulated_output: list[str] = []
|
|
1179
|
-
|
|
1180
|
-
def signal_handler():
|
|
1181
|
-
console.print("\nStopping polling...", style="yellow")
|
|
1182
|
-
shutdown_event.set()
|
|
1183
|
-
|
|
1184
|
-
loop = asyncio.get_running_loop()
|
|
1185
|
-
loop.add_signal_handler(signal.SIGINT, signal_handler)
|
|
1186
|
-
|
|
1187
|
-
# Start crumb process
|
|
1188
|
-
process = None
|
|
1189
|
-
try:
|
|
1190
|
-
process = await asyncio.create_subprocess_shell(
|
|
1191
|
-
str(crumb.path),
|
|
1192
|
-
stdout=asyncio.subprocess.PIPE,
|
|
1193
|
-
stderr=asyncio.subprocess.PIPE,
|
|
1194
|
-
)
|
|
1195
|
-
|
|
1196
|
-
async def read_stream(stream, name: str):
|
|
1197
|
-
"""Read output from stream non-blocking."""
|
|
1198
|
-
while not shutdown_event.is_set():
|
|
1199
|
-
try:
|
|
1200
|
-
line = await asyncio.wait_for(stream.readline(), timeout=0.1)
|
|
1201
|
-
if not line:
|
|
1202
|
-
break
|
|
1203
|
-
line_text = line.decode(errors="replace")
|
|
1204
|
-
accumulated_output.append(line_text)
|
|
1205
|
-
except asyncio.TimeoutError:
|
|
1206
|
-
continue
|
|
1207
|
-
except Exception:
|
|
1208
|
-
break
|
|
1209
|
-
|
|
1210
|
-
# Read both stdout and stderr
|
|
1211
|
-
asyncio.create_task(read_stream(process.stdout, "stdout"))
|
|
1212
|
-
asyncio.create_task(read_stream(process.stderr, "stderr"))
|
|
1213
|
-
|
|
1214
|
-
# Main polling loop - analyze accumulated output
|
|
1215
|
-
last_analyzed_length = 0
|
|
1216
|
-
|
|
1217
|
-
while not shutdown_event.is_set():
|
|
1218
|
-
await asyncio.sleep(interval)
|
|
1219
|
-
|
|
1220
|
-
# Only analyze if there's new output
|
|
1221
|
-
current_length = len(accumulated_output)
|
|
1222
|
-
if current_length > last_analyzed_length:
|
|
1223
|
-
timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S")
|
|
1224
|
-
console.print(f"\n[{timestamp}] Polling {crumb.name}...", style="bold blue")
|
|
1225
|
-
|
|
1226
|
-
# Get new output since last analysis
|
|
1227
|
-
new_output = "".join(accumulated_output[last_analyzed_length:])
|
|
1228
|
-
|
|
1229
|
-
console.print(f"New script output: {len(new_output)} bytes\n", style="dim")
|
|
1230
|
-
|
|
1231
|
-
# Send to AI with prompt
|
|
1232
|
-
full_prompt = f"{poll_prompt}\n\nScript output:\n{new_output}"
|
|
1233
|
-
ai_result = await rubber_ducky.send_prompt(prompt=full_prompt, command_mode=False)
|
|
1234
|
-
|
|
1235
|
-
console.print(f"AI: {ai_result.content}", style="green", highlight=False)
|
|
1236
|
-
|
|
1237
|
-
last_analyzed_length = current_length
|
|
1238
|
-
|
|
1239
|
-
except asyncio.CancelledError:
|
|
1240
|
-
console.print("\nPolling stopped.", style="yellow")
|
|
1241
|
-
finally:
|
|
1242
|
-
if process:
|
|
1243
|
-
process.kill()
|
|
1244
|
-
await process.wait()
|
|
1245
|
-
loop.remove_signal_handler(signal.SIGINT)
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
975
|
async def ducky() -> None:
|
|
1249
976
|
parser = argparse.ArgumentParser()
|
|
1250
977
|
parser.add_argument(
|
|
@@ -1258,24 +985,12 @@ async def ducky() -> None:
|
|
|
1258
985
|
help="Run DuckY offline using a local Ollama instance on localhost:11434",
|
|
1259
986
|
)
|
|
1260
987
|
parser.add_argument(
|
|
1261
|
-
"
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
)
|
|
1265
|
-
parser.add_argument(
|
|
1266
|
-
"--interval",
|
|
1267
|
-
"-i",
|
|
1268
|
-
type=int,
|
|
1269
|
-
help="Override crumb's polling interval in seconds",
|
|
988
|
+
"single_prompt",
|
|
989
|
+
nargs="?",
|
|
990
|
+
help="Run a single prompt and copy the suggested command to clipboard",
|
|
1270
991
|
default=None,
|
|
1271
992
|
)
|
|
1272
|
-
parser.
|
|
1273
|
-
"--prompt",
|
|
1274
|
-
"-p",
|
|
1275
|
-
help="Override crumb's polling prompt",
|
|
1276
|
-
default=None,
|
|
1277
|
-
)
|
|
1278
|
-
args, _ = parser.parse_known_args()
|
|
993
|
+
args = parser.parse_args()
|
|
1279
994
|
|
|
1280
995
|
ensure_history_dir()
|
|
1281
996
|
logger = ConversationLogger(CONVERSATION_LOG_FILE)
|
|
@@ -1286,9 +1001,9 @@ async def ducky() -> None:
|
|
|
1286
1001
|
|
|
1287
1002
|
# If --local flag is used, override with local settings
|
|
1288
1003
|
if getattr(args, "local", False):
|
|
1289
|
-
# Point Ollama client to local host and use
|
|
1004
|
+
# Point Ollama client to local host and use qwen3 as default model
|
|
1290
1005
|
os.environ["OLLAMA_HOST"] = "http://localhost:11434"
|
|
1291
|
-
args.model = args.model or "
|
|
1006
|
+
args.model = args.model or "qwen3"
|
|
1292
1007
|
last_host = "http://localhost:11434"
|
|
1293
1008
|
# If no model is specified, use the last used model
|
|
1294
1009
|
elif args.model is None:
|
|
@@ -1328,31 +1043,37 @@ async def ducky() -> None:
|
|
|
1328
1043
|
console.print("No input received from stdin.", style="yellow")
|
|
1329
1044
|
return
|
|
1330
1045
|
|
|
1331
|
-
# Handle
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
)
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1046
|
+
# Handle crumb invocation mode
|
|
1047
|
+
crumb_manager = CrumbManager()
|
|
1048
|
+
if args.single_prompt and crumb_manager.has_crumb(args.single_prompt):
|
|
1049
|
+
crumb = crumb_manager.get_crumb(args.single_prompt)
|
|
1050
|
+
if crumb:
|
|
1051
|
+
explanation = crumb.get("explanation", "") or "No explanation"
|
|
1052
|
+
command = crumb.get("command", "") or "No command"
|
|
1053
|
+
|
|
1054
|
+
console.print(f"\n[bold cyan]Crumb: {args.single_prompt}[/bold cyan]")
|
|
1055
|
+
console.print(f"Explanation: {explanation}", style="green")
|
|
1056
|
+
console.print(f"Command: ", style="cyan", end="")
|
|
1057
|
+
console.print(command, highlight=False)
|
|
1058
|
+
|
|
1059
|
+
if command and command != "No command":
|
|
1060
|
+
# Execute the command
|
|
1061
|
+
await run_shell_and_print(
|
|
1062
|
+
rubber_ducky,
|
|
1063
|
+
command,
|
|
1064
|
+
logger=logger,
|
|
1065
|
+
history=rubber_ducky.messages,
|
|
1066
|
+
)
|
|
1067
|
+
return
|
|
1349
1068
|
|
|
1350
|
-
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
|
|
1354
|
-
prompt_override=args.prompt,
|
|
1069
|
+
# Handle single prompt mode
|
|
1070
|
+
if args.single_prompt:
|
|
1071
|
+
result = await run_single_prompt(
|
|
1072
|
+
rubber_ducky, args.single_prompt, code=code, logger=logger
|
|
1355
1073
|
)
|
|
1074
|
+
if result.command:
|
|
1075
|
+
if copy_to_clipboard(result.command):
|
|
1076
|
+
console.print("\n[green]✓[/green] Command copied to clipboard")
|
|
1356
1077
|
return
|
|
1357
1078
|
|
|
1358
1079
|
await interactive_session(rubber_ducky, logger=logger, code=code)
|