rubber-ducky 1.2.0__tar.gz → 1.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rubber-ducky
3
- Version: 1.2.0
4
- Summary: For developers who can never remember the right bash command
3
+ Version: 1.2.2
4
+ Summary: Quick CLI do-it-all tool. Use natural language to spit out bash commands
5
5
  Requires-Python: >=3.10
6
6
  Description-Content-Type: text/markdown
7
7
  License-File: LICENSE
@@ -43,7 +43,7 @@ Both `ducky` and `rubber-ducky` executables map to the same CLI, so `uvx rubber-
43
43
  ### Inline Session (default)
44
44
 
45
45
  Launching `ducky` with no arguments opens the inline interface:
46
- - **Enter** submits; **Ctrl+J** inserts a newline (helpful when crafting multi-line prompts).
46
+ - **Enter** submits; **Ctrl+J** inserts a newline (helpful when crafting multi-line prompts). Hitting **Enter on an empty prompt** reruns the latest suggested command; if none exists yet, it explains the most recent shell output.
47
47
  - **Ctrl+R** re-runs the last suggested command.
48
48
  - Prefix any line with **`!`** (e.g., `!ls -la`) to run a shell command immediately.
49
49
  - Arrow keys browse prompt history, backed by `~/.ducky/prompt_history`.
@@ -27,7 +27,7 @@ Both `ducky` and `rubber-ducky` executables map to the same CLI, so `uvx rubber-
27
27
  ### Inline Session (default)
28
28
 
29
29
  Launching `ducky` with no arguments opens the inline interface:
30
- - **Enter** submits; **Ctrl+J** inserts a newline (helpful when crafting multi-line prompts).
30
+ - **Enter** submits; **Ctrl+J** inserts a newline (helpful when crafting multi-line prompts). Hitting **Enter on an empty prompt** reruns the latest suggested command; if none exists yet, it explains the most recent shell output.
31
31
  - **Ctrl+R** re-runs the last suggested command.
32
32
  - Prefix any line with **`!`** (e.g., `!ls -la`) to run a shell command immediately.
33
33
  - Arrow keys browse prompt history, backed by `~/.ducky/prompt_history`.
@@ -1,15 +1,28 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import argparse
4
+ import os
4
5
  import asyncio
5
- import json
6
6
  import sys
7
7
  from dataclasses import dataclass
8
8
  from datetime import datetime
9
+
10
+ # import json included earlier
11
+ from typing import Dict
9
12
  from pathlib import Path
10
13
  from textwrap import dedent
11
14
  from typing import Any, Dict, List
12
15
 
16
+
17
+ @dataclass
18
+ class Crumb:
19
+ name: str
20
+ path: Path
21
+ type: str
22
+ enabled: bool
23
+ description: str | None = None
24
+
25
+
13
26
  from ollama import AsyncClient
14
27
  from contextlib import nullcontext
15
28
 
@@ -18,6 +31,13 @@ try: # prompt_toolkit is optional at runtime
18
31
  from prompt_toolkit.history import FileHistory
19
32
  from prompt_toolkit.key_binding import KeyBindings
20
33
  from prompt_toolkit.patch_stdout import patch_stdout
34
+ from prompt_toolkit.application import Application
35
+ from prompt_toolkit.key_binding.bindings.focus import focus_next, focus_previous
36
+ from prompt_toolkit.layout.containers import HSplit, Window
37
+ from prompt_toolkit.layout.controls import FormattedTextControl
38
+ from prompt_toolkit.layout.layout import Layout
39
+ from prompt_toolkit.styles import Style
40
+ from prompt_toolkit.widgets import Box, Button, Dialog, Label, TextArea
21
41
  except ImportError: # pragma: no cover - fallback mode
22
42
  PromptSession = None # type: ignore[assignment]
23
43
  FileHistory = None # type: ignore[assignment]
@@ -25,6 +45,8 @@ except ImportError: # pragma: no cover - fallback mode
25
45
 
26
46
  def patch_stdout() -> nullcontext:
27
47
  return nullcontext()
48
+
49
+
28
50
  from rich.console import Console
29
51
 
30
52
 
@@ -46,14 +68,77 @@ class ShellResult:
46
68
  HISTORY_DIR = Path.home() / ".ducky"
47
69
  PROMPT_HISTORY_FILE = HISTORY_DIR / "prompt_history"
48
70
  CONVERSATION_LOG_FILE = HISTORY_DIR / "conversation.log"
71
+ CRUMBS_DIR = HISTORY_DIR / "crumbs"
72
+ CRUMBS: Dict[str, Crumb] = {}
49
73
  console = Console()
50
74
 
51
75
 
52
76
  def ensure_history_dir() -> Path:
53
77
  HISTORY_DIR.mkdir(parents=True, exist_ok=True)
78
+ CRUMBS_DIR.mkdir(parents=True, exist_ok=True)
54
79
  return HISTORY_DIR
55
80
 
56
81
 
82
+ def load_crumbs() -> Dict[str, Crumb]:
83
+ """Populate the global ``CRUMBS`` dictionary from the ``CRUMBS_DIR``.
84
+
85
+ Each crumb is expected to be a directory containing an ``info.txt`` and a
86
+ script file matching the ``type`` field (``shell`` → ``*.sh``).
87
+ """
88
+
89
+ global CRUMBS
90
+ CRUMBS.clear()
91
+ if not CRUMBS_DIR.exists():
92
+ return CRUMBS
93
+
94
+ for crumb_dir in CRUMBS_DIR.iterdir():
95
+ if not crumb_dir.is_dir():
96
+ continue
97
+ info_path = crumb_dir / "info.txt"
98
+ if not info_path.is_file():
99
+ continue
100
+ # Parse key: value pairs
101
+ meta = {}
102
+ for line in info_path.read_text(encoding="utf-8").splitlines():
103
+ if ":" not in line:
104
+ continue
105
+ key, val = line.split(":", 1)
106
+ meta[key.strip()] = val.strip()
107
+ name = meta.get("name", crumb_dir.name)
108
+ ctype = meta.get("type", "shell")
109
+ description = meta.get("description")
110
+ # Find script file: look for executable in the directory
111
+ script_path: Path | None = None
112
+ if ctype == "shell":
113
+ # Prefer a file named <name>.sh if present
114
+ candidate = crumb_dir / f"{name}.sh"
115
+ if candidate.is_file() and os.access(candidate, os.X_OK):
116
+ script_path = candidate
117
+ else:
118
+ # Fallback: first .sh in dir
119
+ for p in crumb_dir.glob("*.sh"):
120
+ if os.access(p, os.X_OK):
121
+ script_path = p
122
+ break
123
+ # Default to first file if script not found
124
+ if script_path is None:
125
+ files = list(crumb_dir.iterdir())
126
+ if files:
127
+ script_path = files[0]
128
+ if script_path is None:
129
+ continue
130
+ crumb = Crumb(
131
+ name=name,
132
+ path=script_path,
133
+ type=ctype,
134
+ enabled=False,
135
+ description=description,
136
+ )
137
+ CRUMBS[name] = crumb
138
+
139
+ return CRUMBS
140
+
141
+
57
142
  class ConversationLogger:
58
143
  def __init__(self, log_path: Path) -> None:
59
144
  self.log_path = log_path
@@ -80,6 +165,8 @@ class ConversationLogger:
80
165
  )
81
166
 
82
167
  def _append(self, entry: Dict[str, Any]) -> None:
168
+ import json
169
+
83
170
  entry["timestamp"] = datetime.utcnow().isoformat()
84
171
  with self.log_path.open("a", encoding="utf-8") as handle:
85
172
  handle.write(json.dumps(entry, ensure_ascii=False))
@@ -110,6 +197,7 @@ async def run_shell_and_print(
110
197
  assistant: RubberDuck,
111
198
  command: str,
112
199
  logger: ConversationLogger | None = None,
200
+ history: list[dict[str, str]] | None = None,
113
201
  ) -> None:
114
202
  if not command:
115
203
  console.print("No command provided.", style="yellow")
@@ -119,6 +207,18 @@ async def run_shell_and_print(
119
207
  print_shell_result(result)
120
208
  if logger:
121
209
  logger.log_shell(result)
210
+ if history is not None:
211
+ history.append({"role": "user", "content": f"!{command}"})
212
+ combined_output: list[str] = []
213
+ if result.stdout.strip():
214
+ combined_output.append(result.stdout.rstrip())
215
+ if result.stderr.strip():
216
+ combined_output.append(f"[stderr]\n{result.stderr.rstrip()}")
217
+ if result.returncode != 0:
218
+ combined_output.append(f"(exit status {result.returncode})")
219
+ if not combined_output:
220
+ combined_output.append("(command produced no output)")
221
+ history.append({"role": "assistant", "content": "\n\n".join(combined_output)})
122
222
 
123
223
 
124
224
  class RubberDuck:
@@ -142,16 +242,50 @@ class RubberDuck:
142
242
  self.model = model
143
243
  self.quick = quick
144
244
  self.command_mode = command_mode
245
+ self.crumbs = load_crumbs()
145
246
  self.messages: List[Dict[str, str]] = [
146
247
  {"role": "system", "content": self.system_prompt}
147
248
  ]
148
- self.last_thinking: str | None = None
249
+ # Update system prompt to include enabled crumb descriptions
250
+
251
+ def update_system_prompt(self) -> None:
252
+ """Append enabled crumb descriptions to the system prompt.
253
+
254
+ The system prompt is stored in ``self.system_prompt`` and injected as the
255
+ first system message. When crumbs are enabled, we add a section that
256
+ lists the crumb names and their descriptions. The format is simple:
257
+
258
+ ``Crumbs:``\n
259
+ ``- <name>: <description>``\n
260
+ If no crumbs are enabled the prompt is unchanged.
261
+ """
262
+ # Start with the base system prompt
263
+ prompt_lines = [self.system_prompt]
264
+
265
+ if self.crumbs:
266
+ prompt_lines.append("\nCrumbs are simple scripts you can run with bash, uv, or bun.")
267
+ prompt_lines.append("Crumbs:")
268
+ for c in self.crumbs.values():
269
+ description = c.description or "no description"
270
+ prompt_lines.append(f"- {c.name}: {description}")
271
+
272
+ # Update the system prompt
273
+ self.system_prompt = "\n".join(prompt_lines)
274
+
275
+ # Update the first system message in the messages list
276
+ if self.messages and self.messages[0]["role"] == "system":
277
+ self.messages[0]["content"] = self.system_prompt
278
+ else:
279
+ # If there's no system message, add one
280
+ self.messages.insert(0, {"role": "system", "content": self.system_prompt})
149
281
 
150
282
  async def send_prompt(
151
283
  self, prompt: str | None = None, code: str | None = None
152
284
  ) -> AssistantResult:
153
285
  user_content = (prompt or "").strip()
154
286
 
287
+ self.update_system_prompt()
288
+
155
289
  if code:
156
290
  user_content = f"{user_content}\n\n{code}" if user_content else code
157
291
 
@@ -160,7 +294,7 @@ class RubberDuck:
160
294
 
161
295
  if self.command_mode:
162
296
  instruction = (
163
- "Return a single bash command that accomplishes the task. "
297
+ "Return a single bash command that accomplishes the task. Unless user wants something els"
164
298
  "Do not include explanations or formatting other than the command itself."
165
299
  )
166
300
  user_content = (
@@ -241,6 +375,20 @@ class RubberDuck:
241
375
 
242
376
  return command or None
243
377
 
378
+ async def list_models(self) -> list[str]:
379
+ """List available Ollama models."""
380
+ try:
381
+ response = await self.client.list()
382
+ return [model.model for model in response.models]
383
+ except Exception as e:
384
+ console.print(f"Error listing models: {e}", style="red")
385
+ return []
386
+
387
+ def switch_model(self, model_name: str) -> None:
388
+ """Switch to a different Ollama model."""
389
+ self.model = model_name
390
+ console.print(f"Switched to model: {model_name}", style="green")
391
+
244
392
 
245
393
  class InlineInterface:
246
394
  def __init__(
@@ -255,7 +403,10 @@ class InlineInterface:
255
403
  self.last_command: str | None = None
256
404
  self.code = code
257
405
  self._code_sent = False
406
+ self.last_shell_output: str | None = None
407
+ self.pending_command: str | None = None
258
408
  self.session: PromptSession | None = None
409
+ self.selected_model: str | None = None
259
410
 
260
411
  if (
261
412
  PromptSession is not None
@@ -300,7 +451,7 @@ class InlineInterface:
300
451
  return
301
452
 
302
453
  console.print(
303
- "Enter submits • Ctrl+J inserts newline Ctrl+R reruns last command • '!cmd' runs shell • Ctrl+D exits",
454
+ "Enter submits • empty Enter reruns the last suggested command (or explains the last shell output) • '!cmd' runs shell • Ctrl+D exits",
304
455
  style="dim",
305
456
  )
306
457
  while True:
@@ -326,21 +477,45 @@ class InlineInterface:
326
477
  if not self.last_command:
327
478
  console.print("No suggested command available yet.", style="yellow")
328
479
  return
329
- await run_shell_and_print(self.assistant, self.last_command, logger=self.logger)
480
+ await run_shell_and_print(
481
+ self.assistant,
482
+ self.last_command,
483
+ logger=self.logger,
484
+ history=self.assistant.messages,
485
+ )
486
+ self.last_shell_output = True
487
+ self.pending_command = None
488
+ self.last_command = None
330
489
 
331
490
  async def _process_text(self, text: str) -> None:
332
491
  stripped = text.strip()
333
492
  if not stripped:
493
+ if self.pending_command:
494
+ await self._run_last_command()
495
+ return
496
+ if self.last_shell_output:
497
+ await self._explain_last_command()
498
+ return
499
+ console.print("Nothing to run yet.", style="yellow")
334
500
  return
335
501
 
336
502
  if stripped.lower() in {":run", "/run"}:
337
503
  await self._run_last_command()
338
504
  return
339
505
 
506
+ if stripped.lower() == "/model":
507
+ await self._select_model()
508
+ return
509
+
340
510
  if stripped.startswith("!"):
341
511
  await run_shell_and_print(
342
- self.assistant, stripped[1:].strip(), logger=self.logger
512
+ self.assistant,
513
+ stripped[1:].strip(),
514
+ logger=self.logger,
515
+ history=self.assistant.messages,
343
516
  )
517
+ self.last_shell_output = True
518
+ self.pending_command = None
344
519
  return
345
520
 
346
521
  result = await run_single_prompt(
@@ -352,6 +527,70 @@ class InlineInterface:
352
527
  if self.code and not self._code_sent:
353
528
  self._code_sent = True
354
529
  self.last_command = result.command
530
+ self.pending_command = result.command
531
+ # Set last_shell_output to True so empty Enter will explain the result
532
+ self.last_shell_output = True
533
+
534
+ async def _explain_last_command(self) -> None:
535
+ if not self.assistant.messages or len(self.assistant.messages) < 2:
536
+ console.print("No shell output to explain yet.", style="yellow")
537
+ return
538
+ last_entry = self.assistant.messages[-1]
539
+ if last_entry["role"] != "assistant":
540
+ console.print("No shell output to explain yet.", style="yellow")
541
+ return
542
+ prompt = (
543
+ "The user ran a shell command above. Summarize the key findings from the output, "
544
+ "highlight problems if any, and suggest next steps. Do NOT suggest a shell command or code snippet.\n\n"
545
+ f"{last_entry['content']}"
546
+ )
547
+ await run_single_prompt(
548
+ self.assistant, prompt, logger=self.logger, suppress_suggestion=True
549
+ )
550
+ self.last_shell_output = None
551
+
552
+ async def _select_model(self) -> None:
553
+ """Show available models and allow user to select one with arrow keys."""
554
+ if PromptSession is None or KeyBindings is None:
555
+ console.print("Model selection requires prompt_toolkit to be installed.", style="yellow")
556
+ return
557
+
558
+ models = await self.assistant.list_models()
559
+ if not models:
560
+ console.print("No models available.", style="yellow")
561
+ return
562
+
563
+ # Simple approach: show models as a list and let user type the number
564
+ console.print("Available models:", style="bold")
565
+ for i, model in enumerate(models, 1):
566
+ if model == self.assistant.model:
567
+ console.print(f"{i}. {model} (current)", style="green")
568
+ else:
569
+ console.print(f"{i}. {model}")
570
+
571
+ try:
572
+ choice = await asyncio.to_thread(input, "Enter model number or name: ")
573
+ choice = choice.strip()
574
+
575
+ # Check if it's a number
576
+ if choice.isdigit():
577
+ index = int(choice) - 1
578
+ if 0 <= index < len(models):
579
+ selected_model = models[index]
580
+ else:
581
+ console.print("Invalid model number.", style="red")
582
+ return
583
+ else:
584
+ # Check if it's a model name
585
+ if choice in models:
586
+ selected_model = choice
587
+ else:
588
+ console.print("Invalid model name.", style="red")
589
+ return
590
+
591
+ self.assistant.switch_model(selected_model)
592
+ except (ValueError, EOFError):
593
+ console.print("Invalid input.", style="red")
355
594
 
356
595
  async def _run_basic_loop(self) -> None: # pragma: no cover - fallback path
357
596
  while True:
@@ -388,6 +627,7 @@ async def run_single_prompt(
388
627
  prompt: str,
389
628
  code: str | None = None,
390
629
  logger: ConversationLogger | None = None,
630
+ suppress_suggestion: bool = False,
391
631
  ) -> AssistantResult:
392
632
  if logger:
393
633
  logger.log_user(prompt)
@@ -396,7 +636,7 @@ async def run_single_prompt(
396
636
  console.print(content, style="green", highlight=False)
397
637
  if logger:
398
638
  logger.log_assistant(content, result.command)
399
- if result.command:
639
+ if result.command and not suppress_suggestion:
400
640
  console.print("\nSuggested command:", style="cyan", highlight=False)
401
641
  console.print(result.command, style="bold cyan", highlight=False)
402
642
  return result
@@ -431,10 +671,20 @@ async def ducky() -> None:
431
671
  parser.add_argument(
432
672
  "--model", "-m", help="The model to be used", default="qwen3-coder:480b-cloud"
433
673
  )
674
+ parser.add_argument(
675
+ "--local",
676
+ "-l",
677
+ action="store_true",
678
+ help="Run DuckY offline using a local Ollama instance on localhost:11434",
679
+ )
434
680
  args, _ = parser.parse_known_args()
435
681
 
436
682
  ensure_history_dir()
437
683
  logger = ConversationLogger(CONVERSATION_LOG_FILE)
684
+ if getattr(args, "local", False):
685
+ # Point Ollama client to local host and use gemma3 as default model
686
+ os.environ["OLLAMA_HOST"] = "http://localhost:11434"
687
+ args.model = "gpt-oss:20b"
438
688
  rubber_ducky = RubberDuck(model=args.model, quick=False, command_mode=True)
439
689
 
440
690
  code = read_files_from_dir(args.directory) if args.directory else None
@@ -455,7 +705,10 @@ async def ducky() -> None:
455
705
  and confirm("Run suggested command?")
456
706
  ):
457
707
  await run_shell_and_print(
458
- rubber_ducky, result.command, logger=logger
708
+ rubber_ducky,
709
+ result.command,
710
+ logger=logger,
711
+ history=rubber_ducky.messages,
459
712
  )
460
713
  else:
461
714
  console.print("No input received from stdin.", style="yellow")
@@ -1,7 +1,7 @@
1
1
  [project]
2
2
  name = "rubber-ducky"
3
- version = "1.2.0"
4
- description = "For developers who can never remember the right bash command"
3
+ version = "1.2.2"
4
+ description = "Quick CLI do-it-all tool. Use natural language to spit out bash commands"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10"
7
7
  dependencies = [
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rubber-ducky
3
- Version: 1.2.0
4
- Summary: For developers who can never remember the right bash command
3
+ Version: 1.2.2
4
+ Summary: Quick CLI do-it-all tool. Use natural language to spit out bash commands
5
5
  Requires-Python: >=3.10
6
6
  Description-Content-Type: text/markdown
7
7
  License-File: LICENSE
@@ -43,7 +43,7 @@ Both `ducky` and `rubber-ducky` executables map to the same CLI, so `uvx rubber-
43
43
  ### Inline Session (default)
44
44
 
45
45
  Launching `ducky` with no arguments opens the inline interface:
46
- - **Enter** submits; **Ctrl+J** inserts a newline (helpful when crafting multi-line prompts).
46
+ - **Enter** submits; **Ctrl+J** inserts a newline (helpful when crafting multi-line prompts). Hitting **Enter on an empty prompt** reruns the latest suggested command; if none exists yet, it explains the most recent shell output.
47
47
  - **Ctrl+R** re-runs the last suggested command.
48
48
  - Prefix any line with **`!`** (e.g., `!ls -la`) to run a shell command immediately.
49
49
  - Arrow keys browse prompt history, backed by `~/.ducky/prompt_history`.
File without changes
File without changes