rubber-ducky 1.2.1__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ducky/__init__.py CHANGED
@@ -1 +1,3 @@
1
- from .ducky import ducky
1
+ from .ducky import ducky, main
2
+
3
+ __all__ = ["ducky", "main"]
ducky/config.py ADDED
@@ -0,0 +1,60 @@
1
+ import json
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Dict, Any, Optional
5
+
6
+
7
+ class ConfigManager:
8
+ """Manages Ducky configuration including model preferences."""
9
+
10
+ def __init__(self, config_dir: Optional[Path] = None):
11
+ if config_dir is None:
12
+ config_dir = Path.home() / ".ducky"
13
+ self.config_dir = config_dir
14
+ self.config_file = self.config_dir / "config"
15
+ self.config_dir.mkdir(parents=True, exist_ok=True)
16
+
17
+ def load_config(self) -> Dict[str, Any]:
18
+ """Load configuration from file, returning defaults if not found."""
19
+ default_config = {
20
+ "last_model": "qwen3-coder:480b-cloud",
21
+ "last_host": "https://ollama.com"
22
+ }
23
+
24
+ if not self.config_file.exists():
25
+ return default_config
26
+
27
+ try:
28
+ with open(self.config_file, 'r') as f:
29
+ config = json.load(f)
30
+ # Ensure all required keys are present
31
+ for key in default_config:
32
+ if key not in config:
33
+ config[key] = default_config[key]
34
+ return config
35
+ except (json.JSONDecodeError, IOError):
36
+ return default_config
37
+
38
+ def save_config(self, config: Dict[str, Any]) -> None:
39
+ """Save configuration to file."""
40
+ try:
41
+ with open(self.config_file, 'w') as f:
42
+ json.dump(config, f, indent=2)
43
+ except IOError as e:
44
+ print(f"Warning: Could not save config: {e}")
45
+
46
+ def get_last_model(self) -> tuple[str, str]:
47
+ """Get the last used model and host.
48
+
49
+ Returns:
50
+ Tuple of (model_name, host)
51
+ """
52
+ config = self.load_config()
53
+ return config.get("last_model", "qwen3-coder:480b-cloud"), config.get("last_host", "https://ollama.com")
54
+
55
+ def save_last_model(self, model_name: str, host: str) -> None:
56
+ """Save the last used model and host."""
57
+ config = self.load_config()
58
+ config["last_model"] = model_name
59
+ config["last_host"] = host
60
+ self.save_config(config)
ducky/ducky.py CHANGED
@@ -3,21 +3,43 @@ from __future__ import annotations
3
3
  import argparse
4
4
  import asyncio
5
5
  import json
6
+ import os
6
7
  import sys
7
8
  from dataclasses import dataclass
8
- from datetime import datetime
9
+ from datetime import UTC, datetime
10
+ from rich.console import Console
9
11
  from pathlib import Path
10
12
  from textwrap import dedent
11
13
  from typing import Any, Dict, List
12
14
 
13
- from ollama import AsyncClient
15
+
16
+ @dataclass
17
+ class Crumb:
18
+ name: str
19
+ path: Path
20
+ type: str
21
+ enabled: bool
22
+ description: str | None = None
23
+
24
+
14
25
  from contextlib import nullcontext
15
26
 
27
+ from ollama import AsyncClient
28
+
29
+ from .config import ConfigManager
30
+
16
31
  try: # prompt_toolkit is optional at runtime
17
32
  from prompt_toolkit import PromptSession
33
+ from prompt_toolkit.application import Application
18
34
  from prompt_toolkit.history import FileHistory
19
35
  from prompt_toolkit.key_binding import KeyBindings
36
+ from prompt_toolkit.key_binding.bindings.focus import focus_next, focus_previous
37
+ from prompt_toolkit.layout.containers import HSplit, Window
38
+ from prompt_toolkit.layout.controls import FormattedTextControl
39
+ from prompt_toolkit.layout.layout import Layout
20
40
  from prompt_toolkit.patch_stdout import patch_stdout
41
+ from prompt_toolkit.styles import Style
42
+ from prompt_toolkit.widgets import Box, Button, Dialog, Label, TextArea
21
43
  except ImportError: # pragma: no cover - fallback mode
22
44
  PromptSession = None # type: ignore[assignment]
23
45
  FileHistory = None # type: ignore[assignment]
@@ -27,9 +49,6 @@ except ImportError: # pragma: no cover - fallback mode
27
49
  return nullcontext()
28
50
 
29
51
 
30
- from rich.console import Console
31
-
32
-
33
52
  @dataclass
34
53
  class AssistantResult:
35
54
  content: str
@@ -48,14 +67,77 @@ class ShellResult:
48
67
  HISTORY_DIR = Path.home() / ".ducky"
49
68
  PROMPT_HISTORY_FILE = HISTORY_DIR / "prompt_history"
50
69
  CONVERSATION_LOG_FILE = HISTORY_DIR / "conversation.log"
70
+ CRUMBS_DIR = HISTORY_DIR / "crumbs"
71
+ CRUMBS: Dict[str, Crumb] = {}
51
72
  console = Console()
52
73
 
53
74
 
54
75
  def ensure_history_dir() -> Path:
55
76
  HISTORY_DIR.mkdir(parents=True, exist_ok=True)
77
+ CRUMBS_DIR.mkdir(parents=True, exist_ok=True)
56
78
  return HISTORY_DIR
57
79
 
58
80
 
81
+ def load_crumbs() -> Dict[str, Crumb]:
82
+ """Populate the global ``CRUMBS`` dictionary from the ``CRUMBS_DIR``.
83
+
84
+ Each crumb is expected to be a directory containing an ``info.txt`` and a
85
+ script file matching the ``type`` field (``shell`` → ``*.sh``).
86
+ """
87
+
88
+ global CRUMBS
89
+ CRUMBS.clear()
90
+ if not CRUMBS_DIR.exists():
91
+ return CRUMBS
92
+
93
+ for crumb_dir in CRUMBS_DIR.iterdir():
94
+ if not crumb_dir.is_dir():
95
+ continue
96
+ info_path = crumb_dir / "info.txt"
97
+ if not info_path.is_file():
98
+ continue
99
+ # Parse key: value pairs
100
+ meta = {}
101
+ for line in info_path.read_text(encoding="utf-8").splitlines():
102
+ if ":" not in line:
103
+ continue
104
+ key, val = line.split(":", 1)
105
+ meta[key.strip()] = val.strip()
106
+ name = meta.get("name", crumb_dir.name)
107
+ ctype = meta.get("type", "shell")
108
+ description = meta.get("description")
109
+ # Find script file: look for executable in the directory
110
+ script_path: Path | None = None
111
+ if ctype == "shell":
112
+ # Prefer a file named <name>.sh if present
113
+ candidate = crumb_dir / f"{name}.sh"
114
+ if candidate.is_file() and os.access(candidate, os.X_OK):
115
+ script_path = candidate
116
+ else:
117
+ # Fallback: first .sh in dir
118
+ for p in crumb_dir.glob("*.sh"):
119
+ if os.access(p, os.X_OK):
120
+ script_path = p
121
+ break
122
+ # Default to first file if script not found
123
+ if script_path is None:
124
+ files = list(crumb_dir.iterdir())
125
+ if files:
126
+ script_path = files[0]
127
+ if script_path is None:
128
+ continue
129
+ crumb = Crumb(
130
+ name=name,
131
+ path=script_path,
132
+ type=ctype,
133
+ enabled=False,
134
+ description=description,
135
+ )
136
+ CRUMBS[name] = crumb
137
+
138
+ return CRUMBS
139
+
140
+
59
141
  class ConversationLogger:
60
142
  def __init__(self, log_path: Path) -> None:
61
143
  self.log_path = log_path
@@ -82,7 +164,7 @@ class ConversationLogger:
82
164
  )
83
165
 
84
166
  def _append(self, entry: Dict[str, Any]) -> None:
85
- entry["timestamp"] = datetime.utcnow().isoformat()
167
+ entry["timestamp"] = datetime.now(UTC).isoformat()
86
168
  with self.log_path.open("a", encoding="utf-8") as handle:
87
169
  handle.write(json.dumps(entry, ensure_ascii=False))
88
170
  handle.write("\n")
@@ -138,7 +220,11 @@ async def run_shell_and_print(
138
220
 
139
221
  class RubberDuck:
140
222
  def __init__(
141
- self, model: str, quick: bool = False, command_mode: bool = False
223
+ self,
224
+ model: str,
225
+ quick: bool = False,
226
+ command_mode: bool = False,
227
+ host: str = "",
142
228
  ) -> None:
143
229
  self.system_prompt = dedent(
144
230
  """
@@ -153,20 +239,66 @@ class RubberDuck:
153
239
  changes rather than responding to each line individually.
154
240
  """
155
241
  ).strip()
242
+
243
+ # Set OLLAMA_HOST based on whether it's a cloud model
244
+ if host:
245
+ os.environ["OLLAMA_HOST"] = host
246
+ elif "-cloud" in model:
247
+ os.environ["OLLAMA_HOST"] = "https://ollama.com"
248
+ elif "OLLAMA_HOST" not in os.environ:
249
+ # Default to localhost if not set and not a cloud model
250
+ os.environ["OLLAMA_HOST"] = "http://localhost:11434"
251
+
156
252
  self.client = AsyncClient()
157
253
  self.model = model
158
254
  self.quick = quick
159
255
  self.command_mode = command_mode
256
+ self.crumbs = load_crumbs()
160
257
  self.messages: List[Dict[str, str]] = [
161
258
  {"role": "system", "content": self.system_prompt}
162
259
  ]
163
- self.last_thinking: str | None = None
260
+ # Update system prompt to include enabled crumb descriptions
261
+
262
+ def update_system_prompt(self) -> None:
263
+ """Append enabled crumb descriptions to the system prompt.
264
+
265
+ The system prompt is stored in ``self.system_prompt`` and injected as the
266
+ first system message. When crumbs are enabled, we add a section that
267
+ lists the crumb names and their descriptions. The format is simple:
268
+
269
+ ``Crumbs:``\n
270
+ ``- <name>: <description>``\n
271
+ If no crumbs are enabled the prompt is unchanged.
272
+ """
273
+ # Start with the base system prompt
274
+ prompt_lines = [self.system_prompt]
275
+
276
+ if self.crumbs:
277
+ prompt_lines.append(
278
+ "\nCrumbs are simple scripts you can run with bash, uv, or bun."
279
+ )
280
+ prompt_lines.append("Crumbs:")
281
+ for c in self.crumbs.values():
282
+ description = c.description or "no description"
283
+ prompt_lines.append(f"- {c.name}: {description}")
284
+
285
+ # Update the system prompt
286
+ self.system_prompt = "\n".join(prompt_lines)
287
+
288
+ # Update the first system message in the messages list
289
+ if self.messages and self.messages[0]["role"] == "system":
290
+ self.messages[0]["content"] = self.system_prompt
291
+ else:
292
+ # If there's no system message, add one
293
+ self.messages.insert(0, {"role": "system", "content": self.system_prompt})
164
294
 
165
295
  async def send_prompt(
166
296
  self, prompt: str | None = None, code: str | None = None
167
297
  ) -> AssistantResult:
168
298
  user_content = (prompt or "").strip()
169
299
 
300
+ self.update_system_prompt()
301
+
170
302
  if code:
171
303
  user_content = f"{user_content}\n\n{code}" if user_content else code
172
304
 
@@ -175,7 +307,7 @@ class RubberDuck:
175
307
 
176
308
  if self.command_mode:
177
309
  instruction = (
178
- "Return a single bash command that accomplishes the task. "
310
+ "Return a single bash command that accomplishes the task. Unless user wants something els"
179
311
  "Do not include explanations or formatting other than the command itself."
180
312
  )
181
313
  user_content = (
@@ -256,6 +388,54 @@ class RubberDuck:
256
388
 
257
389
  return command or None
258
390
 
391
+ async def list_models(self, host: str = "") -> list[str]:
392
+ """List available Ollama models."""
393
+ # Set the host temporarily for this operation
394
+ original_host = os.environ.get("OLLAMA_HOST", "")
395
+ if host:
396
+ os.environ["OLLAMA_HOST"] = host
397
+ self.client = AsyncClient(host)
398
+ try:
399
+ response = await self.client.list()
400
+ models = []
401
+ for m in response.models:
402
+ models.append(m.model)
403
+ return models
404
+ except Exception as e:
405
+ console.print(f"Error listing models: {e}", style="red")
406
+ return []
407
+ finally:
408
+ # Restore original host
409
+ if original_host:
410
+ os.environ["OLLAMA_HOST"] = original_host
411
+ self.client = AsyncClient(original_host)
412
+ elif "OLLAMA_HOST" in os.environ:
413
+ del os.environ["OLLAMA_HOST"]
414
+ self.client = AsyncClient()
415
+
416
+ def switch_model(self, model_name: str, host: str = "") -> None:
417
+ """Switch to a different Ollama model."""
418
+ self.model = model_name
419
+
420
+ # Set the host based on the model or explicit host
421
+ if host:
422
+ os.environ["OLLAMA_HOST"] = host
423
+ self.client = AsyncClient(host)
424
+ elif "-cloud" in model_name:
425
+ os.environ["OLLAMA_HOST"] = "https://ollama.com"
426
+ self.client = AsyncClient("https://ollama.com")
427
+ else:
428
+ os.environ["OLLAMA_HOST"] = "http://localhost:11434"
429
+ self.client = AsyncClient()
430
+
431
+ console.print(f"Switched to model: {model_name}", style="green")
432
+
433
+ def clear_history(self) -> None:
434
+ """Reset conversation history to the initial system prompt."""
435
+ if self.messages:
436
+ self.messages = [self.messages[0]]
437
+ console.print("Conversation history cleared.", style="green")
438
+
259
439
 
260
440
  class InlineInterface:
261
441
  def __init__(
@@ -273,6 +453,7 @@ class InlineInterface:
273
453
  self.last_shell_output: str | None = None
274
454
  self.pending_command: str | None = None
275
455
  self.session: PromptSession | None = None
456
+ self.selected_model: str | None = None
276
457
 
277
458
  if (
278
459
  PromptSession is not None
@@ -305,6 +486,11 @@ class InlineInterface:
305
486
  def _(event) -> None:
306
487
  event.app.exit(result="__RUN_LAST__")
307
488
 
489
+ @kb.add("c-s")
490
+ def _(event) -> None:
491
+ # This will be handled in the processing loop
492
+ event.app.exit(result="__COPY_LAST__")
493
+
308
494
  return kb
309
495
 
310
496
  async def run(self) -> None:
@@ -317,7 +503,7 @@ class InlineInterface:
317
503
  return
318
504
 
319
505
  console.print(
320
- "Enter submits • empty Enter reruns the last suggested command (or explains the last shell output) • '!cmd' runs shell • Ctrl+D exits",
506
+ "Enter submits • empty Enter reruns the last suggested command (or explains the last shell output) • '!<cmd>' runs shell • Ctrl+D exits • Ctrl+S copies last command",
321
507
  style="dim",
322
508
  )
323
509
  while True:
@@ -328,7 +514,7 @@ class InlineInterface:
328
514
  console.print()
329
515
  console.print("Exiting.", style="dim")
330
516
  return
331
- except KeyboardInterrupt:
517
+ except (KeyboardInterrupt, asyncio.CancelledError):
332
518
  console.print()
333
519
  console.print("Interrupted. Press Ctrl+D to exit.", style="yellow")
334
520
  continue
@@ -337,8 +523,53 @@ class InlineInterface:
337
523
  await self._run_last_command()
338
524
  continue
339
525
 
526
+ if text == "__COPY_LAST__":
527
+ await self._copy_last_command()
528
+ continue
529
+
340
530
  await self._process_text(text)
341
531
 
532
+ async def _copy_last_command(self) -> None:
533
+ """Copy the last suggested command to clipboard."""
534
+ if not self.last_command:
535
+ console.print("No suggested command available to copy.", style="yellow")
536
+ return
537
+
538
+ try:
539
+ import subprocess
540
+ import platform
541
+
542
+ command_to_copy = self.last_command
543
+
544
+ system = platform.system()
545
+ if system == "Darwin": # macOS
546
+ process = subprocess.Popen(["pbcopy"], stdin=subprocess.PIPE, text=True)
547
+ process.communicate(input=command_to_copy)
548
+ elif system == "Windows":
549
+ process = subprocess.Popen(["clip"], stdin=subprocess.PIPE, text=True)
550
+ process.communicate(input=command_to_copy)
551
+ else: # Linux and others
552
+ # Try xclip first, then xsel as fallback
553
+ try:
554
+ process = subprocess.Popen(
555
+ ["xclip", "-selection", "clipboard"],
556
+ stdin=subprocess.PIPE,
557
+ text=True,
558
+ )
559
+ process.communicate(input=command_to_copy)
560
+ except FileNotFoundError:
561
+ # Try xsel as fallback
562
+ process = subprocess.Popen(
563
+ ["xsel", "-b", "-i"], stdin=subprocess.PIPE, text=True
564
+ )
565
+ process.communicate(input=command_to_copy)
566
+
567
+ console.print(f"Copied to clipboard: {command_to_copy}", style="green")
568
+ except Exception as e:
569
+ console.print(f"Failed to copy to clipboard: {e}", style="red")
570
+ console.print("You can manually copy the last command:", style="dim")
571
+ console.print(f" {self.last_command}", style="bold")
572
+
342
573
  async def _run_last_command(self) -> None:
343
574
  if not self.last_command:
344
575
  console.print("No suggested command available yet.", style="yellow")
@@ -349,6 +580,9 @@ class InlineInterface:
349
580
  logger=self.logger,
350
581
  history=self.assistant.messages,
351
582
  )
583
+ # Add the command to prompt history so user can recall it with up arrow
584
+ if self.session and self.session.history and self.last_command:
585
+ self.session.history.append_string(self.last_command)
352
586
  self.last_shell_output = True
353
587
  self.pending_command = None
354
588
  self.last_command = None
@@ -369,13 +603,37 @@ class InlineInterface:
369
603
  await self._run_last_command()
370
604
  return
371
605
 
606
+ if stripped.lower() in {"/clear", "/reset"}:
607
+ await self._clear_history()
608
+ return
609
+
610
+ if stripped.lower() == "/model":
611
+ await self._select_model()
612
+ return
613
+
614
+ if stripped.lower() == "/local":
615
+ await self._select_model(host="http://localhost:11434")
616
+ return
617
+
618
+ if stripped.lower() == "/cloud":
619
+ await self._select_model(host="https://ollama.com")
620
+ return
621
+
622
+ if stripped.lower() == "/help":
623
+ await self._show_help()
624
+ return
625
+
372
626
  if stripped.startswith("!"):
627
+ command = stripped[1:].strip()
373
628
  await run_shell_and_print(
374
629
  self.assistant,
375
- stripped[1:].strip(),
630
+ command,
376
631
  logger=self.logger,
377
632
  history=self.assistant.messages,
378
633
  )
634
+ # Add the command to prompt history so user can recall it with up arrow
635
+ if self.session and self.session.history and command:
636
+ self.session.history.append_string(command)
379
637
  self.last_shell_output = True
380
638
  self.pending_command = None
381
639
  return
@@ -390,7 +648,8 @@ class InlineInterface:
390
648
  self._code_sent = True
391
649
  self.last_command = result.command
392
650
  self.pending_command = result.command
393
- self.last_shell_output = None
651
+ # Set last_shell_output to True so empty Enter will explain the result
652
+ self.last_shell_output = True
394
653
 
395
654
  async def _explain_last_command(self) -> None:
396
655
  if not self.assistant.messages or len(self.assistant.messages) < 2:
@@ -410,6 +669,147 @@ class InlineInterface:
410
669
  )
411
670
  self.last_shell_output = None
412
671
 
672
+ async def _show_help(self) -> None:
673
+ """Display help information for all available commands."""
674
+ console.print("\nDucky CLI Help", style="bold blue")
675
+ console.print("===============", style="bold blue")
676
+ console.print()
677
+
678
+ commands = [
679
+ ("[bold]/help[/bold]", "Show this help message"),
680
+ ("[bold]/model[/bold]", "Select a model interactively (local or cloud)"),
681
+ (
682
+ "[bold]/local[/bold]",
683
+ "List and select from local models (localhost:11434)",
684
+ ),
685
+ ("[bold]/cloud[/bold]", "List and select from cloud models (ollama.com)"),
686
+ (
687
+ "[bold]/clear[/bold] or [bold]/reset[/bold]",
688
+ "Clear conversation history",
689
+ ),
690
+ (
691
+ "[bold]/run[/bold]",
692
+ "Re-run the last suggested command",
693
+ ),
694
+ (
695
+ "[bold]Empty Enter[/bold]",
696
+ "Re-run suggested command or explain last output",
697
+ ),
698
+ ("[bold]![<command>][/bold]", "Execute a shell command directly"),
699
+ ("[bold]Ctrl+D[/bold]", "Exit the application"),
700
+ ("[bold]Ctrl+R[/bold]", "Re-run the last suggested command"),
701
+ ("[bold]Ctrl+S[/bold]", "Copy the last suggested command to clipboard"),
702
+ ]
703
+
704
+ for command, description in commands:
705
+ console.print(f"{command:<30} {description}")
706
+
707
+ console.print()
708
+
709
+ async def _clear_history(self) -> None:
710
+ self.assistant.clear_history()
711
+ self.last_command = None
712
+ self.pending_command = None
713
+ self.last_shell_output = None
714
+
715
+ async def _select_model(self, host: str = "") -> None:
716
+ """Show available models and allow user to select one with arrow keys."""
717
+ if PromptSession is None or KeyBindings is None:
718
+ console.print(
719
+ "Model selection requires prompt_toolkit to be installed.",
720
+ style="yellow",
721
+ )
722
+ return
723
+
724
+ # Show current model
725
+ console.print(f"Current model: {self.assistant.model}", style="bold green")
726
+
727
+ # If no host specified, give user a choice between local and cloud
728
+ if not host:
729
+ console.print("\nSelect model type:", style="bold")
730
+ console.print("1. Local models (localhost:11434)")
731
+ console.print("2. Cloud models (ollama.com)")
732
+ console.print("Press Esc to cancel", style="dim")
733
+
734
+ try:
735
+ choice = await asyncio.to_thread(input, "Enter choice (1 or 2): ")
736
+ choice = choice.strip()
737
+
738
+ if choice.lower() == "esc":
739
+ console.print("Model selection cancelled.", style="yellow")
740
+ return
741
+
742
+ if choice == "1":
743
+ host = "http://localhost:11434"
744
+ elif choice == "2":
745
+ host = "https://ollama.com"
746
+ else:
747
+ console.print("Invalid choice. Please select 1 or 2.", style="red")
748
+ return
749
+ except (ValueError, EOFError):
750
+ console.print("Invalid input.", style="red")
751
+ return
752
+
753
+ models = await self.assistant.list_models(host)
754
+ if not models:
755
+ if host == "http://localhost:11434":
756
+ console.print(
757
+ "No local models available. Is Ollama running?", style="red"
758
+ )
759
+ console.print("Start Ollama with: ollama serve", style="yellow")
760
+ else:
761
+ console.print("No models available.", style="yellow")
762
+ return
763
+
764
+ if host == "https://ollama.com":
765
+ console.print("\nAvailable cloud models:", style="bold")
766
+ elif host == "http://localhost:11434":
767
+ console.print("\nAvailable local models:", style="bold")
768
+ else:
769
+ console.print("\nAvailable models:", style="bold")
770
+
771
+ for i, model in enumerate(models, 1):
772
+ if model == self.assistant.model:
773
+ console.print(f"{i}. {model} (current)", style="green")
774
+ else:
775
+ console.print(f"{i}. {model}")
776
+
777
+ console.print("Press Esc to cancel", style="dim")
778
+ try:
779
+ choice = await asyncio.to_thread(input, "Enter model number or name: ")
780
+ choice = choice.strip()
781
+
782
+ if choice.lower() == "esc":
783
+ console.print("Model selection cancelled.", style="yellow")
784
+ return
785
+
786
+ # Check if it's a number
787
+ if choice.isdigit():
788
+ index = int(choice) - 1
789
+ if 0 <= index < len(models):
790
+ selected_model = models[index]
791
+ else:
792
+ console.print("Invalid model number.", style="red")
793
+ return
794
+ else:
795
+ # Check if it's a model name
796
+ if choice in models:
797
+ selected_model = choice
798
+ else:
799
+ console.print("Invalid model name.", style="red")
800
+ return
801
+
802
+ self.assistant.switch_model(selected_model, host)
803
+
804
+ # Save the selected model and host to config
805
+ config_manager = ConfigManager()
806
+ config_manager.save_last_model(
807
+ selected_model,
808
+ host or os.environ.get("OLLAMA_HOST", "http://localhost:11434"),
809
+ )
810
+ except (ValueError, EOFError):
811
+ console.print("Invalid input.", style="red")
812
+
413
813
  async def _run_basic_loop(self) -> None: # pragma: no cover - fallback path
414
814
  while True:
415
815
  try:
@@ -418,7 +818,7 @@ class InlineInterface:
418
818
  console.print()
419
819
  console.print("Exiting.", style="dim")
420
820
  return
421
- except KeyboardInterrupt:
821
+ except (KeyboardInterrupt, asyncio.CancelledError):
422
822
  console.print()
423
823
  console.print("Interrupted. Press Ctrl+D to exit.", style="yellow")
424
824
  continue
@@ -486,14 +886,38 @@ async def ducky() -> None:
486
886
  parser.add_argument(
487
887
  "--directory", "-d", help="The directory to be processed", default=None
488
888
  )
889
+ parser.add_argument("--model", "-m", help="The model to be used", default=None)
489
890
  parser.add_argument(
490
- "--model", "-m", help="The model to be used", default="qwen3-coder:480b-cloud"
891
+ "--local",
892
+ "-l",
893
+ action="store_true",
894
+ help="Run DuckY offline using a local Ollama instance on localhost:11434",
491
895
  )
492
896
  args, _ = parser.parse_known_args()
493
897
 
494
898
  ensure_history_dir()
495
899
  logger = ConversationLogger(CONVERSATION_LOG_FILE)
496
- rubber_ducky = RubberDuck(model=args.model, quick=False, command_mode=True)
900
+
901
+ # Load the last used model from config if no model is specified
902
+ config_manager = ConfigManager()
903
+ last_model, last_host = config_manager.get_last_model()
904
+
905
+ # If --local flag is used, override with local settings
906
+ if getattr(args, "local", False):
907
+ # Point Ollama client to local host and use gemma3 as default model
908
+ os.environ["OLLAMA_HOST"] = "http://localhost:11434"
909
+ args.model = args.model or "gemma2:9b"
910
+ last_host = "http://localhost:11434"
911
+ # If no model is specified, use the last used model
912
+ elif args.model is None:
913
+ args.model = last_model
914
+ # Set the host based on the last used host
915
+ if last_host:
916
+ os.environ["OLLAMA_HOST"] = last_host
917
+
918
+ rubber_ducky = RubberDuck(
919
+ model=args.model, quick=False, command_mode=True, host=last_host
920
+ )
497
921
 
498
922
  code = read_files_from_dir(args.directory) if args.directory else None
499
923
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rubber-ducky
3
- Version: 1.2.1
3
+ Version: 1.3.0
4
4
  Summary: Quick CLI do-it-all tool. Use natural language to spit out bash commands
5
5
  Requires-Python: >=3.10
6
6
  Description-Content-Type: text/markdown
@@ -35,7 +35,7 @@ Requirements:
35
35
  ```
36
36
  ducky # interactive inline session
37
37
  ducky --directory src # preload code from a directory
38
- ducky --model llama3 # use a different Ollama model
38
+ ducky --model qwen3 # use a different Ollama model
39
39
  ```
40
40
 
41
41
  Both `ducky` and `rubber-ducky` executables map to the same CLI, so `uvx rubber-ducky -- <args>` works as well.
@@ -54,6 +54,32 @@ Launching `ducky` with no arguments opens the inline interface:
54
54
 
55
55
  `ducky --directory <path>` streams the contents of the provided directory to the assistant the next time you submit a prompt (the directory is read once at startup).
56
56
 
57
+ ## Crumbs
58
+
59
+ Crumbs are simple scripts that can be executed within Rubber Ducky. They are stored in `~/.ducky/crumbs/` and can be referenced by name in your prompts.
60
+
61
+ To use a crumb, simply mention it in your prompt:
62
+ ```
63
+ Can you use the uv-server crumb to run the HuggingFace prompt renderer?
64
+ ```
65
+
66
+ ### Creating Crumbs
67
+
68
+ To create a new crumb:
69
+
70
+ 1. Create a new directory in `~/.ducky/crumbs/` with your crumb name
71
+ 2. Add an `info.txt` file with metadata:
72
+ ```
73
+ name: your-crumb-name
74
+ type: shell
75
+ description: Brief description of what this crumb does
76
+ ```
77
+ 3. Add your executable script file (e.g., `your-crumb-name.sh`)
78
+ 4. Create a symbolic link in `~/.local/bin` to make it available as a command:
79
+ ```bash
80
+ ln -s ~/.ducky/crumbs/your-crumb-name/your-crumb-name.sh ~/.local/bin/your-crumb-name
81
+ ```
82
+
57
83
  ## Development (uv)
58
84
 
59
85
  ```
@@ -0,0 +1,9 @@
1
+ ducky/__init__.py,sha256=2vLhJxOuJ3lnIeg5rmF6xUvybUT5Qhjej6AS0BeBASY,60
2
+ ducky/config.py,sha256=AH7KMTxPYrtSSmIJ_3qv0WB1tYpAjOWPrvPb7bKQ_cA,2142
3
+ ducky/ducky.py,sha256=1B500ievEsIg9mowpzqWuDNGck_DnUM0LdDRDHsm4yo,33716
4
+ rubber_ducky-1.3.0.dist-info/licenses/LICENSE,sha256=gQ1rCmw18NqTk5GxG96F6vgyN70e1c4kcKUtWDwdNaE,1069
5
+ rubber_ducky-1.3.0.dist-info/METADATA,sha256=BVIv6uBWho_khv8AbkQdPMP9PBB6xbvg-U6muGsfdCs,3874
6
+ rubber_ducky-1.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
7
+ rubber_ducky-1.3.0.dist-info/entry_points.txt,sha256=WPnVUUNvWdMDcBlCo8JCzkLghGllMX5QVZyQghyq85Q,75
8
+ rubber_ducky-1.3.0.dist-info/top_level.txt,sha256=4Q75MONDNPpQ3o17bTu7RFuKwFhTIRzlXP3_LDWQQ30,6
9
+ rubber_ducky-1.3.0.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- ducky/__init__.py,sha256=9l8SmwX0t1BmITkcrzW9fVMPvD2LfgKLZlSXWzPJFSE,25
2
- ducky/ducky.py,sha256=FWGkAnyWB8k6GxsAu5WkIxJ5mlnT9ymIAJsJf8ryTts,17347
3
- rubber_ducky-1.2.1.dist-info/licenses/LICENSE,sha256=gQ1rCmw18NqTk5GxG96F6vgyN70e1c4kcKUtWDwdNaE,1069
4
- rubber_ducky-1.2.1.dist-info/METADATA,sha256=MDt4yR-GtzqF4bB-j8s4kXt3tNUDYJ6H_7Mr6mLUEu0,3063
5
- rubber_ducky-1.2.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
6
- rubber_ducky-1.2.1.dist-info/entry_points.txt,sha256=WPnVUUNvWdMDcBlCo8JCzkLghGllMX5QVZyQghyq85Q,75
7
- rubber_ducky-1.2.1.dist-info/top_level.txt,sha256=4Q75MONDNPpQ3o17bTu7RFuKwFhTIRzlXP3_LDWQQ30,6
8
- rubber_ducky-1.2.1.dist-info/RECORD,,