rubber-ducky 1.2.2__py3-none-any.whl → 1.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ducky/ducky.py CHANGED
@@ -1,14 +1,16 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import argparse
4
- import os
5
4
  import asyncio
5
+ import json
6
+ import os
7
+ import re
8
+ import shlex
6
9
  import sys
10
+ import signal
7
11
  from dataclasses import dataclass
8
- from datetime import datetime
9
-
10
- # import json included earlier
11
- from typing import Dict
12
+ from datetime import UTC, datetime
13
+ from rich.console import Console
12
14
  from pathlib import Path
13
15
  from textwrap import dedent
14
16
  from typing import Any, Dict, List
@@ -21,21 +23,28 @@ class Crumb:
21
23
  type: str
22
24
  enabled: bool
23
25
  description: str | None = None
26
+ poll: bool = False
27
+ poll_type: str | None = None # "interval" or "continuous"
28
+ poll_interval: int = 2
29
+ poll_prompt: str | None = None
24
30
 
25
31
 
26
- from ollama import AsyncClient
27
32
  from contextlib import nullcontext
28
33
 
34
+ from ollama import AsyncClient
35
+
36
+ from .config import ConfigManager
37
+
29
38
  try: # prompt_toolkit is optional at runtime
30
39
  from prompt_toolkit import PromptSession
40
+ from prompt_toolkit.application import Application
31
41
  from prompt_toolkit.history import FileHistory
32
42
  from prompt_toolkit.key_binding import KeyBindings
33
- from prompt_toolkit.patch_stdout import patch_stdout
34
- from prompt_toolkit.application import Application
35
43
  from prompt_toolkit.key_binding.bindings.focus import focus_next, focus_previous
36
44
  from prompt_toolkit.layout.containers import HSplit, Window
37
45
  from prompt_toolkit.layout.controls import FormattedTextControl
38
46
  from prompt_toolkit.layout.layout import Layout
47
+ from prompt_toolkit.patch_stdout import patch_stdout
39
48
  from prompt_toolkit.styles import Style
40
49
  from prompt_toolkit.widgets import Box, Button, Dialog, Label, TextArea
41
50
  except ImportError: # pragma: no cover - fallback mode
@@ -47,9 +56,6 @@ except ImportError: # pragma: no cover - fallback mode
47
56
  return nullcontext()
48
57
 
49
58
 
50
- from rich.console import Console
51
-
52
-
53
59
  @dataclass
54
60
  class AssistantResult:
55
61
  content: str
@@ -80,61 +86,93 @@ def ensure_history_dir() -> Path:
80
86
 
81
87
 
82
88
  def load_crumbs() -> Dict[str, Crumb]:
83
- """Populate the global ``CRUMBS`` dictionary from the ``CRUMBS_DIR``.
89
+ """Populate the global ``CRUMBS`` dictionary from both default and user crumbs.
84
90
 
85
91
  Each crumb is expected to be a directory containing an ``info.txt`` and a
86
92
  script file matching the ``type`` field (``shell`` → ``*.sh``).
93
+
94
+ Default crumbs are loaded from the package directory first, then user crumbs
95
+ are loaded from ``~/.ducky/crumbs/`` and can override default crumbs if they
96
+ have the same name.
87
97
  """
88
98
 
89
99
  global CRUMBS
90
100
  CRUMBS.clear()
91
- if not CRUMBS_DIR.exists():
92
- return CRUMBS
93
101
 
94
- for crumb_dir in CRUMBS_DIR.iterdir():
95
- if not crumb_dir.is_dir():
96
- continue
97
- info_path = crumb_dir / "info.txt"
98
- if not info_path.is_file():
99
- continue
100
- # Parse key: value pairs
101
- meta = {}
102
- for line in info_path.read_text(encoding="utf-8").splitlines():
103
- if ":" not in line:
102
+ # Helper function to load crumbs from a directory
103
+ def _load_from_dir(dir_path: Path) -> None:
104
+ if not dir_path.exists():
105
+ return
106
+
107
+ for crumb_dir in dir_path.iterdir():
108
+ if not crumb_dir.is_dir():
104
109
  continue
105
- key, val = line.split(":", 1)
106
- meta[key.strip()] = val.strip()
107
- name = meta.get("name", crumb_dir.name)
108
- ctype = meta.get("type", "shell")
109
- description = meta.get("description")
110
- # Find script file: look for executable in the directory
111
- script_path: Path | None = None
112
- if ctype == "shell":
113
- # Prefer a file named <name>.sh if present
114
- candidate = crumb_dir / f"{name}.sh"
115
- if candidate.is_file() and os.access(candidate, os.X_OK):
116
- script_path = candidate
117
- else:
118
- # Fallback: first .sh in dir
119
- for p in crumb_dir.glob("*.sh"):
120
- if os.access(p, os.X_OK):
121
- script_path = p
122
- break
123
- # Default to first file if script not found
124
- if script_path is None:
125
- files = list(crumb_dir.iterdir())
126
- if files:
127
- script_path = files[0]
128
- if script_path is None:
129
- continue
130
- crumb = Crumb(
131
- name=name,
132
- path=script_path,
133
- type=ctype,
134
- enabled=False,
135
- description=description,
136
- )
137
- CRUMBS[name] = crumb
110
+ info_path = crumb_dir / "info.txt"
111
+ if not info_path.is_file():
112
+ continue
113
+ # Parse key: value pairs
114
+ meta = {}
115
+ for line in info_path.read_text(encoding="utf-8").splitlines():
116
+ if ":" not in line:
117
+ continue
118
+ key, val = line.split(":", 1)
119
+ meta[key.strip()] = val.strip()
120
+ name = meta.get("name", crumb_dir.name)
121
+ ctype = meta.get("type", "shell")
122
+ description = meta.get("description")
123
+ poll = meta.get("poll", "").lower() == "true"
124
+ poll_type = meta.get("poll_type")
125
+ poll_interval = int(meta.get("poll_interval", 2))
126
+ poll_prompt = meta.get("poll_prompt")
127
+ # Find script file: look for executable in the directory
128
+ script_path: Path | None = None
129
+ if ctype == "shell":
130
+ # Prefer a file named <name>.sh if present
131
+ candidate = crumb_dir / f"{name}.sh"
132
+ if candidate.is_file() and os.access(candidate, os.X_OK):
133
+ script_path = candidate
134
+ else:
135
+ # Fallback: first .sh in dir
136
+ for p in crumb_dir.glob("*.sh"):
137
+ if os.access(p, os.X_OK):
138
+ script_path = p
139
+ break
140
+ # Default to first file if script not found
141
+ if script_path is None:
142
+ files = list(crumb_dir.iterdir())
143
+ if files:
144
+ script_path = files[0]
145
+ if script_path is None:
146
+ continue
147
+ crumb = Crumb(
148
+ name=name,
149
+ path=script_path,
150
+ type=ctype,
151
+ enabled=False,
152
+ description=description,
153
+ poll=poll,
154
+ poll_type=poll_type,
155
+ poll_interval=poll_interval,
156
+ poll_prompt=poll_prompt,
157
+ )
158
+ CRUMBS[name] = crumb
159
+
160
+ # Try to load from package directory (where ducky is installed)
161
+ try:
162
+ # Try to locate the crumbs directory relative to the ducky package
163
+ import ducky
164
+ # Get the directory containing the ducky package
165
+ ducky_dir = Path(ducky.__file__).parent
166
+ # Check if crumbs exists in the same directory as ducky package
167
+ default_crumbs_dir = ducky_dir.parent / "crumbs"
168
+ if default_crumbs_dir.exists():
169
+ _load_from_dir(default_crumbs_dir)
170
+ except Exception:
171
+ # If package directory loading fails, continue without default crumbs
172
+ pass
173
+
174
+ # Load user crumbs (these can override default crumbs with the same name)
175
+ _load_from_dir(CRUMBS_DIR)
138
176
 
139
177
  return CRUMBS
140
178
 
@@ -165,9 +203,7 @@ class ConversationLogger:
165
203
  )
166
204
 
167
205
  def _append(self, entry: Dict[str, Any]) -> None:
168
- import json
169
-
170
- entry["timestamp"] = datetime.utcnow().isoformat()
206
+ entry["timestamp"] = datetime.now(UTC).isoformat()
171
207
  with self.log_path.open("a", encoding="utf-8") as handle:
172
208
  handle.write(json.dumps(entry, ensure_ascii=False))
173
209
  handle.write("\n")
@@ -223,7 +259,11 @@ async def run_shell_and_print(
223
259
 
224
260
  class RubberDuck:
225
261
  def __init__(
226
- self, model: str, quick: bool = False, command_mode: bool = False
262
+ self,
263
+ model: str,
264
+ quick: bool = False,
265
+ command_mode: bool = False,
266
+ host: str = "",
227
267
  ) -> None:
228
268
  self.system_prompt = dedent(
229
269
  """
@@ -238,6 +278,16 @@ class RubberDuck:
238
278
  changes rather than responding to each line individually.
239
279
  """
240
280
  ).strip()
281
+
282
+ # Set OLLAMA_HOST based on whether it's a cloud model
283
+ if host:
284
+ os.environ["OLLAMA_HOST"] = host
285
+ elif "-cloud" in model:
286
+ os.environ["OLLAMA_HOST"] = "https://ollama.com"
287
+ elif "OLLAMA_HOST" not in os.environ:
288
+ # Default to localhost if not set and not a cloud model
289
+ os.environ["OLLAMA_HOST"] = "http://localhost:11434"
290
+
241
291
  self.client = AsyncClient()
242
292
  self.model = model
243
293
  self.quick = quick
@@ -261,17 +311,19 @@ class RubberDuck:
261
311
  """
262
312
  # Start with the base system prompt
263
313
  prompt_lines = [self.system_prompt]
264
-
314
+
265
315
  if self.crumbs:
266
- prompt_lines.append("\nCrumbs are simple scripts you can run with bash, uv, or bun.")
316
+ prompt_lines.append(
317
+ "\nCrumbs are simple scripts you can run with bash, uv, or bun."
318
+ )
267
319
  prompt_lines.append("Crumbs:")
268
320
  for c in self.crumbs.values():
269
321
  description = c.description or "no description"
270
322
  prompt_lines.append(f"- {c.name}: {description}")
271
-
323
+
272
324
  # Update the system prompt
273
325
  self.system_prompt = "\n".join(prompt_lines)
274
-
326
+
275
327
  # Update the first system message in the messages list
276
328
  if self.messages and self.messages[0]["role"] == "system":
277
329
  self.messages[0]["content"] = self.system_prompt
@@ -280,7 +332,7 @@ class RubberDuck:
280
332
  self.messages.insert(0, {"role": "system", "content": self.system_prompt})
281
333
 
282
334
  async def send_prompt(
283
- self, prompt: str | None = None, code: str | None = None
335
+ self, prompt: str | None = None, code: str | None = None, command_mode: bool | None = None
284
336
  ) -> AssistantResult:
285
337
  user_content = (prompt or "").strip()
286
338
 
@@ -292,7 +344,10 @@ class RubberDuck:
292
344
  if self.quick and user_content:
293
345
  user_content += ". Return a command and be extremely concise"
294
346
 
295
- if self.command_mode:
347
+ # Use provided command_mode, or fall back to self.command_mode
348
+ effective_command_mode = command_mode if command_mode is not None else self.command_mode
349
+
350
+ if effective_command_mode:
296
351
  instruction = (
297
352
  "Return a single bash command that accomplishes the task. Unless user wants something els"
298
353
  "Do not include explanations or formatting other than the command itself."
@@ -323,7 +378,7 @@ class RubberDuck:
323
378
  if thinking:
324
379
  self.last_thinking = thinking
325
380
 
326
- command = self._extract_command(content) if self.command_mode else None
381
+ command = self._extract_command(content) if effective_command_mode else None
327
382
 
328
383
  return AssistantResult(content=content, command=command, thinking=thinking)
329
384
 
@@ -375,20 +430,54 @@ class RubberDuck:
375
430
 
376
431
  return command or None
377
432
 
378
- async def list_models(self) -> list[str]:
433
+ async def list_models(self, host: str = "") -> list[str]:
379
434
  """List available Ollama models."""
435
+ # Set the host temporarily for this operation
436
+ original_host = os.environ.get("OLLAMA_HOST", "")
437
+ if host:
438
+ os.environ["OLLAMA_HOST"] = host
439
+ self.client = AsyncClient(host)
380
440
  try:
381
441
  response = await self.client.list()
382
- return [model.model for model in response.models]
442
+ models = []
443
+ for m in response.models:
444
+ models.append(m.model)
445
+ return models
383
446
  except Exception as e:
384
447
  console.print(f"Error listing models: {e}", style="red")
385
448
  return []
386
-
387
- def switch_model(self, model_name: str) -> None:
449
+ finally:
450
+ # Restore original host
451
+ if original_host:
452
+ os.environ["OLLAMA_HOST"] = original_host
453
+ self.client = AsyncClient(original_host)
454
+ elif "OLLAMA_HOST" in os.environ:
455
+ del os.environ["OLLAMA_HOST"]
456
+ self.client = AsyncClient()
457
+
458
+ def switch_model(self, model_name: str, host: str = "") -> None:
388
459
  """Switch to a different Ollama model."""
389
460
  self.model = model_name
461
+
462
+ # Set the host based on the model or explicit host
463
+ if host:
464
+ os.environ["OLLAMA_HOST"] = host
465
+ self.client = AsyncClient(host)
466
+ elif "-cloud" in model_name:
467
+ os.environ["OLLAMA_HOST"] = "https://ollama.com"
468
+ self.client = AsyncClient("https://ollama.com")
469
+ else:
470
+ os.environ["OLLAMA_HOST"] = "http://localhost:11434"
471
+ self.client = AsyncClient()
472
+
390
473
  console.print(f"Switched to model: {model_name}", style="green")
391
474
 
475
+ def clear_history(self) -> None:
476
+ """Reset conversation history to the initial system prompt."""
477
+ if self.messages:
478
+ self.messages = [self.messages[0]]
479
+ console.print("Conversation history cleared.", style="green")
480
+
392
481
 
393
482
  class InlineInterface:
394
483
  def __init__(
@@ -407,6 +496,7 @@ class InlineInterface:
407
496
  self.pending_command: str | None = None
408
497
  self.session: PromptSession | None = None
409
498
  self.selected_model: str | None = None
499
+ self.running_polling: bool = False
410
500
 
411
501
  if (
412
502
  PromptSession is not None
@@ -439,6 +529,11 @@ class InlineInterface:
439
529
  def _(event) -> None:
440
530
  event.app.exit(result="__RUN_LAST__")
441
531
 
532
+ @kb.add("c-s")
533
+ def _(event) -> None:
534
+ # This will be handled in the processing loop
535
+ event.app.exit(result="__COPY_LAST__")
536
+
442
537
  return kb
443
538
 
444
539
  async def run(self) -> None:
@@ -451,7 +546,7 @@ class InlineInterface:
451
546
  return
452
547
 
453
548
  console.print(
454
- "Enter submits • empty Enter reruns the last suggested command (or explains the last shell output) • '!cmd' runs shell • Ctrl+D exits",
549
+ "Enter submits • empty Enter reruns the last suggested command (or explains the last shell output) • '!<cmd>' runs shell • Ctrl+D exits • Ctrl+S copies last command",
455
550
  style="dim",
456
551
  )
457
552
  while True:
@@ -462,7 +557,7 @@ class InlineInterface:
462
557
  console.print()
463
558
  console.print("Exiting.", style="dim")
464
559
  return
465
- except KeyboardInterrupt:
560
+ except (KeyboardInterrupt, asyncio.CancelledError):
466
561
  console.print()
467
562
  console.print("Interrupted. Press Ctrl+D to exit.", style="yellow")
468
563
  continue
@@ -471,8 +566,53 @@ class InlineInterface:
471
566
  await self._run_last_command()
472
567
  continue
473
568
 
569
+ if text == "__COPY_LAST__":
570
+ await self._copy_last_command()
571
+ continue
572
+
474
573
  await self._process_text(text)
475
574
 
575
+ async def _copy_last_command(self) -> None:
576
+ """Copy the last suggested command to clipboard."""
577
+ if not self.last_command:
578
+ console.print("No suggested command available to copy.", style="yellow")
579
+ return
580
+
581
+ try:
582
+ import subprocess
583
+ import platform
584
+
585
+ command_to_copy = self.last_command
586
+
587
+ system = platform.system()
588
+ if system == "Darwin": # macOS
589
+ process = subprocess.Popen(["pbcopy"], stdin=subprocess.PIPE, text=True)
590
+ process.communicate(input=command_to_copy)
591
+ elif system == "Windows":
592
+ process = subprocess.Popen(["clip"], stdin=subprocess.PIPE, text=True)
593
+ process.communicate(input=command_to_copy)
594
+ else: # Linux and others
595
+ # Try xclip first, then xsel as fallback
596
+ try:
597
+ process = subprocess.Popen(
598
+ ["xclip", "-selection", "clipboard"],
599
+ stdin=subprocess.PIPE,
600
+ text=True,
601
+ )
602
+ process.communicate(input=command_to_copy)
603
+ except FileNotFoundError:
604
+ # Try xsel as fallback
605
+ process = subprocess.Popen(
606
+ ["xsel", "-b", "-i"], stdin=subprocess.PIPE, text=True
607
+ )
608
+ process.communicate(input=command_to_copy)
609
+
610
+ console.print(f"Copied to clipboard: {command_to_copy}", style="green")
611
+ except Exception as e:
612
+ console.print(f"Failed to copy to clipboard: {e}", style="red")
613
+ console.print("You can manually copy the last command:", style="dim")
614
+ console.print(f" {self.last_command}", style="bold")
615
+
476
616
  async def _run_last_command(self) -> None:
477
617
  if not self.last_command:
478
618
  console.print("No suggested command available yet.", style="yellow")
@@ -483,6 +623,9 @@ class InlineInterface:
483
623
  logger=self.logger,
484
624
  history=self.assistant.messages,
485
625
  )
626
+ # Add the command to prompt history so user can recall it with up arrow
627
+ if self.session and self.session.history and self.last_command:
628
+ self.session.history.append_string(self.last_command)
486
629
  self.last_shell_output = True
487
630
  self.pending_command = None
488
631
  self.last_command = None
@@ -503,17 +646,49 @@ class InlineInterface:
503
646
  await self._run_last_command()
504
647
  return
505
648
 
649
+ if stripped.lower() in {"/clear", "/reset"}:
650
+ await self._clear_history()
651
+ return
652
+
506
653
  if stripped.lower() == "/model":
507
654
  await self._select_model()
508
655
  return
509
656
 
657
+ if stripped.lower() == "/local":
658
+ await self._select_model(host="http://localhost:11434")
659
+ return
660
+
661
+ if stripped.lower() == "/cloud":
662
+ await self._select_model(host="https://ollama.com")
663
+ return
664
+
665
+ if stripped.lower() == "/help":
666
+ await self._show_help()
667
+ return
668
+
669
+ if stripped.lower() == "/crumbs":
670
+ await self._show_crumbs()
671
+ return
672
+
673
+ if stripped.lower() == "/stop-poll":
674
+ await self._stop_polling()
675
+ return
676
+
677
+ if stripped.startswith("/poll"):
678
+ await self._handle_poll_command(stripped)
679
+ return
680
+
510
681
  if stripped.startswith("!"):
682
+ command = stripped[1:].strip()
511
683
  await run_shell_and_print(
512
684
  self.assistant,
513
- stripped[1:].strip(),
685
+ command,
514
686
  logger=self.logger,
515
687
  history=self.assistant.messages,
516
688
  )
689
+ # Add the command to prompt history so user can recall it with up arrow
690
+ if self.session and self.session.history and command:
691
+ self.session.history.append_string(command)
517
692
  self.last_shell_output = True
518
693
  self.pending_command = None
519
694
  return
@@ -549,29 +724,261 @@ class InlineInterface:
549
724
  )
550
725
  self.last_shell_output = None
551
726
 
552
- async def _select_model(self) -> None:
727
+ async def _show_help(self) -> None:
728
+ """Display help information for all available commands."""
729
+ console.print("\nDucky CLI Help", style="bold blue")
730
+ console.print("===============", style="bold blue")
731
+ console.print()
732
+
733
+ commands = [
734
+ ("[bold]/help[/bold]", "Show this help message"),
735
+ ("[bold]/crumbs[/bold]", "List all available crumbs"),
736
+ ("[bold]/model[/bold]", "Select a model interactively (local or cloud)"),
737
+ (
738
+ "[bold]/local[/bold]",
739
+ "List and select from local models (localhost:11434)",
740
+ ),
741
+ ("[bold]/cloud[/bold]", "List and select from cloud models (ollama.com)"),
742
+ (
743
+ "[bold]/clear[/bold] or [bold]/reset[/bold]",
744
+ "Clear conversation history",
745
+ ),
746
+ (
747
+ "[bold]/poll <crumb>[/bold]",
748
+ "Start polling session for a crumb",
749
+ ),
750
+ (
751
+ "[bold]/poll <crumb> -i 5[/bold]",
752
+ "Start polling with 5s interval",
753
+ ),
754
+ (
755
+ "[bold]/poll <crumb> -p <text>[/bold]",
756
+ "Start polling with custom prompt",
757
+ ),
758
+ (
759
+ "[bold]/stop-poll[/bold]",
760
+ "Stop current polling session",
761
+ ),
762
+ (
763
+ "[bold]/run[/bold]",
764
+ "Re-run the last suggested command",
765
+ ),
766
+ (
767
+ "[bold]Empty Enter[/bold]",
768
+ "Re-run suggested command or explain last output",
769
+ ),
770
+ ("[bold]![<command>][/bold]", "Execute a shell command directly"),
771
+ ("[bold]Ctrl+D[/bold]", "Exit the application"),
772
+ ("[bold]Ctrl+R[/bold]", "Re-run the last suggested command"),
773
+ ("[bold]Ctrl+S[/bold]", "Copy the last suggested command to clipboard"),
774
+ ]
775
+
776
+ for command, description in commands:
777
+ console.print(f"{command:<45} {description}")
778
+
779
+ console.print()
780
+
781
+ async def _show_crumbs(self) -> None:
782
+ """Display all available crumbs."""
783
+ crumbs = self.assistant.crumbs
784
+
785
+ if not crumbs:
786
+ console.print("No crumbs available.", style="yellow")
787
+ return
788
+
789
+ console.print("\nAvailable Crumbs", style="bold blue")
790
+ console.print("===============", style="bold blue")
791
+ console.print()
792
+
793
+ # Group crumbs by source (default vs user)
794
+ default_crumbs = []
795
+ user_crumbs = []
796
+
797
+ for name, crumb in sorted(crumbs.items()):
798
+ path_str = str(crumb.path)
799
+ if "crumbs/" in path_str and "/.ducky/crumbs/" not in path_str:
800
+ default_crumbs.append((name, crumb))
801
+ else:
802
+ user_crumbs.append((name, crumb))
803
+
804
+ # Show default crumbs
805
+ if default_crumbs:
806
+ console.print("[bold cyan]Default Crumbs (shipped with ducky):[/bold cyan]", style="cyan")
807
+ for name, crumb in default_crumbs:
808
+ description = crumb.description or "No description"
809
+ # Check if it has polling enabled
810
+ poll_info = " [dim](polling enabled)[/dim]" if crumb.poll else ""
811
+ console.print(f" [bold]{name}[/bold]{poll_info}: {description}")
812
+ console.print()
813
+
814
+ # Show user crumbs
815
+ if user_crumbs:
816
+ console.print("[bold green]Your Crumbs:[/bold green]", style="green")
817
+ for name, crumb in user_crumbs:
818
+ description = crumb.description or "No description"
819
+ # Check if it has polling enabled
820
+ poll_info = " [dim](polling enabled)[/dim]" if crumb.poll else ""
821
+ console.print(f" [bold]{name}[/bold]{poll_info}: {description}")
822
+ console.print()
823
+
824
+ console.print(f"[dim]Total: {len(crumbs)} crumbs available[/dim]")
825
+
826
+ async def _clear_history(self) -> None:
827
+ self.assistant.clear_history()
828
+ self.last_command = None
829
+ self.pending_command = None
830
+ self.last_shell_output = None
831
+
832
+ async def _handle_poll_command(self, command: str) -> None:
833
+ """Handle /poll command with optional arguments."""
834
+ if self.running_polling:
835
+ console.print(
836
+ "A polling session is already running. Use /stop-poll first.",
837
+ style="yellow",
838
+ )
839
+ return
840
+
841
+ # Parse command: /poll <crumb> [-i interval] [-p prompt]
842
+ parts = command.split()
843
+ if len(parts) < 2:
844
+ console.print("Usage: /poll <crumb-name> [-i interval] [-p prompt]", style="yellow")
845
+ console.print("Example: /poll log-crumb -i 5", style="dim")
846
+ return
847
+
848
+ crumb_name = parts[1]
849
+ interval = None
850
+ prompt = None
851
+
852
+ # Parse optional arguments
853
+ i = 2
854
+ while i < len(parts):
855
+ if parts[i] in {"-i", "--interval"} and i + 1 < len(parts):
856
+ try:
857
+ interval = int(parts[i + 1])
858
+ i += 2
859
+ except ValueError:
860
+ console.print("Invalid interval value.", style="red")
861
+ return
862
+ elif parts[i] in {"-p", "--prompt"} and i + 1 < len(parts):
863
+ prompt = " ".join(parts[i + 1:])
864
+ break
865
+ else:
866
+ i += 1
867
+
868
+ if crumb_name not in self.assistant.crumbs:
869
+ console.print(f"Crumb '{crumb_name}' not found.", style="red")
870
+ console.print(
871
+ f"Available crumbs: {', '.join(self.assistant.crumbs.keys())}",
872
+ style="yellow",
873
+ )
874
+ return
875
+
876
+ crumb = self.assistant.crumbs[crumb_name]
877
+
878
+ if not crumb.poll:
879
+ console.print(
880
+ f"Warning: Crumb '{crumb_name}' doesn't have polling enabled.",
881
+ style="yellow",
882
+ )
883
+ console.print("Proceeding anyway with default polling mode.", style="dim")
884
+
885
+ console.print("Starting polling session... Press Ctrl+C to stop.", style="bold cyan")
886
+
887
+ self.running_polling = True
888
+ try:
889
+ await polling_session(
890
+ self.assistant,
891
+ crumb,
892
+ interval=interval,
893
+ prompt_override=prompt,
894
+ )
895
+ finally:
896
+ self.running_polling = False
897
+ console.print("Polling stopped. Returning to interactive mode.", style="green")
898
+
899
+ async def _stop_polling(self) -> None:
900
+ """Handle /stop-poll command."""
901
+ if not self.running_polling:
902
+ console.print("No polling session is currently running.", style="yellow")
903
+ return
904
+
905
+ # This is handled by the signal handler in polling_session
906
+ console.print(
907
+ "Stopping polling... (press Ctrl+C if it doesn't stop automatically)",
908
+ style="yellow",
909
+ )
910
+
911
+ async def _select_model(self, host: str = "") -> None:
553
912
  """Show available models and allow user to select one with arrow keys."""
554
913
  if PromptSession is None or KeyBindings is None:
555
- console.print("Model selection requires prompt_toolkit to be installed.", style="yellow")
914
+ console.print(
915
+ "Model selection requires prompt_toolkit to be installed.",
916
+ style="yellow",
917
+ )
556
918
  return
557
919
 
558
- models = await self.assistant.list_models()
920
+ # Show current model
921
+ console.print(f"Current model: {self.assistant.model}", style="bold green")
922
+
923
+ # If no host specified, give user a choice between local and cloud
924
+ if not host:
925
+ console.print("\nSelect model type:", style="bold")
926
+ console.print("1. Local models (localhost:11434)")
927
+ console.print("2. Cloud models (ollama.com)")
928
+ console.print("Press Esc to cancel", style="dim")
929
+
930
+ try:
931
+ choice = await asyncio.to_thread(input, "Enter choice (1 or 2): ")
932
+ choice = choice.strip()
933
+
934
+ if choice.lower() == "esc":
935
+ console.print("Model selection cancelled.", style="yellow")
936
+ return
937
+
938
+ if choice == "1":
939
+ host = "http://localhost:11434"
940
+ elif choice == "2":
941
+ host = "https://ollama.com"
942
+ else:
943
+ console.print("Invalid choice. Please select 1 or 2.", style="red")
944
+ return
945
+ except (ValueError, EOFError):
946
+ console.print("Invalid input.", style="red")
947
+ return
948
+
949
+ models = await self.assistant.list_models(host)
559
950
  if not models:
560
- console.print("No models available.", style="yellow")
951
+ if host == "http://localhost:11434":
952
+ console.print(
953
+ "No local models available. Is Ollama running?", style="red"
954
+ )
955
+ console.print("Start Ollama with: ollama serve", style="yellow")
956
+ else:
957
+ console.print("No models available.", style="yellow")
561
958
  return
562
959
 
563
- # Simple approach: show models as a list and let user type the number
564
- console.print("Available models:", style="bold")
960
+ if host == "https://ollama.com":
961
+ console.print("\nAvailable cloud models:", style="bold")
962
+ elif host == "http://localhost:11434":
963
+ console.print("\nAvailable local models:", style="bold")
964
+ else:
965
+ console.print("\nAvailable models:", style="bold")
966
+
565
967
  for i, model in enumerate(models, 1):
566
968
  if model == self.assistant.model:
567
969
  console.print(f"{i}. {model} (current)", style="green")
568
970
  else:
569
971
  console.print(f"{i}. {model}")
570
972
 
973
+ console.print("Press Esc to cancel", style="dim")
571
974
  try:
572
975
  choice = await asyncio.to_thread(input, "Enter model number or name: ")
573
976
  choice = choice.strip()
574
-
977
+
978
+ if choice.lower() == "esc":
979
+ console.print("Model selection cancelled.", style="yellow")
980
+ return
981
+
575
982
  # Check if it's a number
576
983
  if choice.isdigit():
577
984
  index = int(choice) - 1
@@ -587,8 +994,15 @@ class InlineInterface:
587
994
  else:
588
995
  console.print("Invalid model name.", style="red")
589
996
  return
590
-
591
- self.assistant.switch_model(selected_model)
997
+
998
+ self.assistant.switch_model(selected_model, host)
999
+
1000
+ # Save the selected model and host to config
1001
+ config_manager = ConfigManager()
1002
+ config_manager.save_last_model(
1003
+ selected_model,
1004
+ host or os.environ.get("OLLAMA_HOST", "http://localhost:11434"),
1005
+ )
592
1006
  except (ValueError, EOFError):
593
1007
  console.print("Invalid input.", style="red")
594
1008
 
@@ -600,7 +1014,7 @@ class InlineInterface:
600
1014
  console.print()
601
1015
  console.print("Exiting.", style="dim")
602
1016
  return
603
- except KeyboardInterrupt:
1017
+ except (KeyboardInterrupt, asyncio.CancelledError):
604
1018
  console.print()
605
1019
  console.print("Interrupted. Press Ctrl+D to exit.", style="yellow")
606
1020
  continue
@@ -663,29 +1077,229 @@ async def interactive_session(
663
1077
  await ui.run()
664
1078
 
665
1079
 
1080
+ async def polling_session(
1081
+ rubber_ducky: RubberDuck,
1082
+ crumb: Crumb,
1083
+ interval: int | None = None,
1084
+ prompt_override: str | None = None,
1085
+ ) -> None:
1086
+ """Run a polling session for a crumb.
1087
+
1088
+ For interval polling: Runs the crumb repeatedly at the specified interval.
1089
+ For continuous polling: Runs the crumb once in background and analyzes output periodically.
1090
+
1091
+ Args:
1092
+ rubber_ducky: The RubberDuck assistant
1093
+ crumb: The crumb to poll
1094
+ interval: Override the crumb's default interval
1095
+ prompt_override: Override the crumb's default poll prompt
1096
+ """
1097
+ # Use overrides or crumb defaults
1098
+ poll_interval = interval or crumb.poll_interval
1099
+ poll_prompt = prompt_override or crumb.poll_prompt or "Analyze this output."
1100
+ poll_type = crumb.poll_type or "interval"
1101
+
1102
+ if not crumb.poll_prompt and not prompt_override:
1103
+ console.print("Warning: No poll prompt configured for this crumb.", style="yellow")
1104
+ console.print(f"Using default prompt: '{poll_prompt}'", style="dim")
1105
+
1106
+ if poll_type == "continuous":
1107
+ await _continuous_polling(rubber_ducky, crumb, poll_interval, poll_prompt)
1108
+ else:
1109
+ await _interval_polling(rubber_ducky, crumb, poll_interval, poll_prompt)
1110
+
1111
+
1112
+ async def _interval_polling(
1113
+ rubber_ducky: RubberDuck,
1114
+ crumb: Crumb,
1115
+ interval: int,
1116
+ poll_prompt: str,
1117
+ ) -> None:
1118
+ """Poll by running crumb script at intervals and analyzing with AI."""
1119
+ console.print(
1120
+ f"\nStarting interval polling for '{crumb.name}' (interval: {interval}s)...\n"
1121
+ f"Poll prompt: {poll_prompt}\n"
1122
+ f"Press Ctrl+C to stop polling.\n",
1123
+ style="bold cyan",
1124
+ )
1125
+
1126
+ shutdown_event = asyncio.Event()
1127
+
1128
+ def signal_handler():
1129
+ console.print("\nStopping polling...", style="yellow")
1130
+ shutdown_event.set()
1131
+
1132
+ loop = asyncio.get_running_loop()
1133
+ loop.add_signal_handler(signal.SIGINT, signal_handler)
1134
+
1135
+ try:
1136
+ while not shutdown_event.is_set():
1137
+ timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S")
1138
+ console.print(f"\n[{timestamp}] Polling {crumb.name}...", style="bold blue")
1139
+
1140
+ # Run crumb script
1141
+ result = await rubber_ducky.run_shell_command(str(crumb.path))
1142
+
1143
+ script_output = result.stdout if result.stdout.strip() else "(no output)"
1144
+ if result.stderr.strip():
1145
+ script_output += f"\n[stderr]\n{result.stderr}"
1146
+
1147
+ console.print(f"Script output: {len(result.stdout)} bytes\n", style="dim")
1148
+
1149
+ # Send to AI with prompt
1150
+ full_prompt = f"{poll_prompt}\n\nScript output:\n{script_output}"
1151
+ ai_result = await rubber_ducky.send_prompt(prompt=full_prompt, command_mode=False)
1152
+
1153
+ console.print(f"AI: {ai_result.content}", style="green", highlight=False)
1154
+
1155
+ # Wait for next interval
1156
+ await asyncio.sleep(interval)
1157
+ except asyncio.CancelledError:
1158
+ console.print("\nPolling stopped.", style="yellow")
1159
+ finally:
1160
+ loop.remove_signal_handler(signal.SIGINT)
1161
+
1162
+
1163
+ async def _continuous_polling(
1164
+ rubber_ducky: RubberDuck,
1165
+ crumb: Crumb,
1166
+ interval: int,
1167
+ poll_prompt: str,
1168
+ ) -> None:
1169
+ """Poll by running crumb continuously and analyzing output periodically."""
1170
+ console.print(
1171
+ f"\nStarting continuous polling for '{crumb.name}' (analysis interval: {interval}s)...\n"
1172
+ f"Poll prompt: {poll_prompt}\n"
1173
+ f"Press Ctrl+C to stop polling.\n",
1174
+ style="bold cyan",
1175
+ )
1176
+
1177
+ shutdown_event = asyncio.Event()
1178
+ accumulated_output: list[str] = []
1179
+
1180
+ def signal_handler():
1181
+ console.print("\nStopping polling...", style="yellow")
1182
+ shutdown_event.set()
1183
+
1184
+ loop = asyncio.get_running_loop()
1185
+ loop.add_signal_handler(signal.SIGINT, signal_handler)
1186
+
1187
+ # Start crumb process
1188
+ process = None
1189
+ try:
1190
+ process = await asyncio.create_subprocess_shell(
1191
+ str(crumb.path),
1192
+ stdout=asyncio.subprocess.PIPE,
1193
+ stderr=asyncio.subprocess.PIPE,
1194
+ )
1195
+
1196
+ async def read_stream(stream, name: str):
1197
+ """Read output from stream non-blocking."""
1198
+ while not shutdown_event.is_set():
1199
+ try:
1200
+ line = await asyncio.wait_for(stream.readline(), timeout=0.1)
1201
+ if not line:
1202
+ break
1203
+ line_text = line.decode(errors="replace")
1204
+ accumulated_output.append(line_text)
1205
+ except asyncio.TimeoutError:
1206
+ continue
1207
+ except Exception:
1208
+ break
1209
+
1210
+ # Read both stdout and stderr
1211
+ asyncio.create_task(read_stream(process.stdout, "stdout"))
1212
+ asyncio.create_task(read_stream(process.stderr, "stderr"))
1213
+
1214
+ # Main polling loop - analyze accumulated output
1215
+ last_analyzed_length = 0
1216
+
1217
+ while not shutdown_event.is_set():
1218
+ await asyncio.sleep(interval)
1219
+
1220
+ # Only analyze if there's new output
1221
+ current_length = len(accumulated_output)
1222
+ if current_length > last_analyzed_length:
1223
+ timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S")
1224
+ console.print(f"\n[{timestamp}] Polling {crumb.name}...", style="bold blue")
1225
+
1226
+ # Get new output since last analysis
1227
+ new_output = "".join(accumulated_output[last_analyzed_length:])
1228
+
1229
+ console.print(f"New script output: {len(new_output)} bytes\n", style="dim")
1230
+
1231
+ # Send to AI with prompt
1232
+ full_prompt = f"{poll_prompt}\n\nScript output:\n{new_output}"
1233
+ ai_result = await rubber_ducky.send_prompt(prompt=full_prompt, command_mode=False)
1234
+
1235
+ console.print(f"AI: {ai_result.content}", style="green", highlight=False)
1236
+
1237
+ last_analyzed_length = current_length
1238
+
1239
+ except asyncio.CancelledError:
1240
+ console.print("\nPolling stopped.", style="yellow")
1241
+ finally:
1242
+ if process:
1243
+ process.kill()
1244
+ await process.wait()
1245
+ loop.remove_signal_handler(signal.SIGINT)
1246
+
1247
+
666
1248
  async def ducky() -> None:
667
1249
  parser = argparse.ArgumentParser()
668
1250
  parser.add_argument(
669
1251
  "--directory", "-d", help="The directory to be processed", default=None
670
1252
  )
671
- parser.add_argument(
672
- "--model", "-m", help="The model to be used", default="qwen3-coder:480b-cloud"
673
- )
1253
+ parser.add_argument("--model", "-m", help="The model to be used", default=None)
674
1254
  parser.add_argument(
675
1255
  "--local",
676
1256
  "-l",
677
1257
  action="store_true",
678
1258
  help="Run DuckY offline using a local Ollama instance on localhost:11434",
679
1259
  )
1260
+ parser.add_argument(
1261
+ "--poll",
1262
+ help="Start polling mode for the specified crumb",
1263
+ default=None,
1264
+ )
1265
+ parser.add_argument(
1266
+ "--interval",
1267
+ "-i",
1268
+ type=int,
1269
+ help="Override crumb's polling interval in seconds",
1270
+ default=None,
1271
+ )
1272
+ parser.add_argument(
1273
+ "--prompt",
1274
+ "-p",
1275
+ help="Override crumb's polling prompt",
1276
+ default=None,
1277
+ )
680
1278
  args, _ = parser.parse_known_args()
681
1279
 
682
1280
  ensure_history_dir()
683
1281
  logger = ConversationLogger(CONVERSATION_LOG_FILE)
1282
+
1283
+ # Load the last used model from config if no model is specified
1284
+ config_manager = ConfigManager()
1285
+ last_model, last_host = config_manager.get_last_model()
1286
+
1287
+ # If --local flag is used, override with local settings
684
1288
  if getattr(args, "local", False):
685
1289
  # Point Ollama client to local host and use gemma3 as default model
686
1290
  os.environ["OLLAMA_HOST"] = "http://localhost:11434"
687
- args.model = "gpt-oss:20b"
688
- rubber_ducky = RubberDuck(model=args.model, quick=False, command_mode=True)
1291
+ args.model = args.model or "gemma2:9b"
1292
+ last_host = "http://localhost:11434"
1293
+ # If no model is specified, use the last used model
1294
+ elif args.model is None:
1295
+ args.model = last_model
1296
+ # Set the host based on the last used host
1297
+ if last_host:
1298
+ os.environ["OLLAMA_HOST"] = last_host
1299
+
1300
+ rubber_ducky = RubberDuck(
1301
+ model=args.model, quick=False, command_mode=True, host=last_host
1302
+ )
689
1303
 
690
1304
  code = read_files_from_dir(args.directory) if args.directory else None
691
1305
 
@@ -714,6 +1328,33 @@ async def ducky() -> None:
714
1328
  console.print("No input received from stdin.", style="yellow")
715
1329
  return
716
1330
 
1331
+ # Handle polling mode
1332
+ if args.poll:
1333
+ crumb_name = args.poll
1334
+ if crumb_name not in rubber_ducky.crumbs:
1335
+ console.print(f"Crumb '{crumb_name}' not found.", style="red")
1336
+ console.print(
1337
+ f"Available crumbs: {', '.join(rubber_ducky.crumbs.keys())}",
1338
+ style="yellow",
1339
+ )
1340
+ return
1341
+
1342
+ crumb = rubber_ducky.crumbs[crumb_name]
1343
+ if not crumb.poll:
1344
+ console.print(
1345
+ f"Warning: Crumb '{crumb_name}' doesn't have polling enabled.",
1346
+ style="yellow",
1347
+ )
1348
+ console.print("Proceeding anyway with default polling mode.", style="dim")
1349
+
1350
+ await polling_session(
1351
+ rubber_ducky,
1352
+ crumb,
1353
+ interval=args.interval,
1354
+ prompt_override=args.prompt,
1355
+ )
1356
+ return
1357
+
717
1358
  await interactive_session(rubber_ducky, logger=logger, code=code)
718
1359
 
719
1360