rubber-ducky 1.2.1__tar.gz → 1.2.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {rubber_ducky-1.2.1/rubber_ducky.egg-info → rubber_ducky-1.2.2}/PKG-INFO +1 -1
- {rubber_ducky-1.2.1 → rubber_ducky-1.2.2}/ducky/ducky.py +196 -4
- {rubber_ducky-1.2.1 → rubber_ducky-1.2.2}/pyproject.toml +1 -1
- {rubber_ducky-1.2.1 → rubber_ducky-1.2.2/rubber_ducky.egg-info}/PKG-INFO +1 -1
- {rubber_ducky-1.2.1 → rubber_ducky-1.2.2}/LICENSE +0 -0
- {rubber_ducky-1.2.1 → rubber_ducky-1.2.2}/README.md +0 -0
- {rubber_ducky-1.2.1 → rubber_ducky-1.2.2}/ducky/__init__.py +0 -0
- {rubber_ducky-1.2.1 → rubber_ducky-1.2.2}/rubber_ducky.egg-info/SOURCES.txt +0 -0
- {rubber_ducky-1.2.1 → rubber_ducky-1.2.2}/rubber_ducky.egg-info/dependency_links.txt +0 -0
- {rubber_ducky-1.2.1 → rubber_ducky-1.2.2}/rubber_ducky.egg-info/entry_points.txt +0 -0
- {rubber_ducky-1.2.1 → rubber_ducky-1.2.2}/rubber_ducky.egg-info/requires.txt +0 -0
- {rubber_ducky-1.2.1 → rubber_ducky-1.2.2}/rubber_ducky.egg-info/top_level.txt +0 -0
- {rubber_ducky-1.2.1 → rubber_ducky-1.2.2}/setup.cfg +0 -0
|
@@ -1,15 +1,28 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import argparse
|
|
4
|
+
import os
|
|
4
5
|
import asyncio
|
|
5
|
-
import json
|
|
6
6
|
import sys
|
|
7
7
|
from dataclasses import dataclass
|
|
8
8
|
from datetime import datetime
|
|
9
|
+
|
|
10
|
+
# import json included earlier
|
|
11
|
+
from typing import Dict
|
|
9
12
|
from pathlib import Path
|
|
10
13
|
from textwrap import dedent
|
|
11
14
|
from typing import Any, Dict, List
|
|
12
15
|
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class Crumb:
|
|
19
|
+
name: str
|
|
20
|
+
path: Path
|
|
21
|
+
type: str
|
|
22
|
+
enabled: bool
|
|
23
|
+
description: str | None = None
|
|
24
|
+
|
|
25
|
+
|
|
13
26
|
from ollama import AsyncClient
|
|
14
27
|
from contextlib import nullcontext
|
|
15
28
|
|
|
@@ -18,6 +31,13 @@ try: # prompt_toolkit is optional at runtime
|
|
|
18
31
|
from prompt_toolkit.history import FileHistory
|
|
19
32
|
from prompt_toolkit.key_binding import KeyBindings
|
|
20
33
|
from prompt_toolkit.patch_stdout import patch_stdout
|
|
34
|
+
from prompt_toolkit.application import Application
|
|
35
|
+
from prompt_toolkit.key_binding.bindings.focus import focus_next, focus_previous
|
|
36
|
+
from prompt_toolkit.layout.containers import HSplit, Window
|
|
37
|
+
from prompt_toolkit.layout.controls import FormattedTextControl
|
|
38
|
+
from prompt_toolkit.layout.layout import Layout
|
|
39
|
+
from prompt_toolkit.styles import Style
|
|
40
|
+
from prompt_toolkit.widgets import Box, Button, Dialog, Label, TextArea
|
|
21
41
|
except ImportError: # pragma: no cover - fallback mode
|
|
22
42
|
PromptSession = None # type: ignore[assignment]
|
|
23
43
|
FileHistory = None # type: ignore[assignment]
|
|
@@ -48,14 +68,77 @@ class ShellResult:
|
|
|
48
68
|
HISTORY_DIR = Path.home() / ".ducky"
|
|
49
69
|
PROMPT_HISTORY_FILE = HISTORY_DIR / "prompt_history"
|
|
50
70
|
CONVERSATION_LOG_FILE = HISTORY_DIR / "conversation.log"
|
|
71
|
+
CRUMBS_DIR = HISTORY_DIR / "crumbs"
|
|
72
|
+
CRUMBS: Dict[str, Crumb] = {}
|
|
51
73
|
console = Console()
|
|
52
74
|
|
|
53
75
|
|
|
54
76
|
def ensure_history_dir() -> Path:
|
|
55
77
|
HISTORY_DIR.mkdir(parents=True, exist_ok=True)
|
|
78
|
+
CRUMBS_DIR.mkdir(parents=True, exist_ok=True)
|
|
56
79
|
return HISTORY_DIR
|
|
57
80
|
|
|
58
81
|
|
|
82
|
+
def load_crumbs() -> Dict[str, Crumb]:
|
|
83
|
+
"""Populate the global ``CRUMBS`` dictionary from the ``CRUMBS_DIR``.
|
|
84
|
+
|
|
85
|
+
Each crumb is expected to be a directory containing an ``info.txt`` and a
|
|
86
|
+
script file matching the ``type`` field (``shell`` → ``*.sh``).
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
global CRUMBS
|
|
90
|
+
CRUMBS.clear()
|
|
91
|
+
if not CRUMBS_DIR.exists():
|
|
92
|
+
return CRUMBS
|
|
93
|
+
|
|
94
|
+
for crumb_dir in CRUMBS_DIR.iterdir():
|
|
95
|
+
if not crumb_dir.is_dir():
|
|
96
|
+
continue
|
|
97
|
+
info_path = crumb_dir / "info.txt"
|
|
98
|
+
if not info_path.is_file():
|
|
99
|
+
continue
|
|
100
|
+
# Parse key: value pairs
|
|
101
|
+
meta = {}
|
|
102
|
+
for line in info_path.read_text(encoding="utf-8").splitlines():
|
|
103
|
+
if ":" not in line:
|
|
104
|
+
continue
|
|
105
|
+
key, val = line.split(":", 1)
|
|
106
|
+
meta[key.strip()] = val.strip()
|
|
107
|
+
name = meta.get("name", crumb_dir.name)
|
|
108
|
+
ctype = meta.get("type", "shell")
|
|
109
|
+
description = meta.get("description")
|
|
110
|
+
# Find script file: look for executable in the directory
|
|
111
|
+
script_path: Path | None = None
|
|
112
|
+
if ctype == "shell":
|
|
113
|
+
# Prefer a file named <name>.sh if present
|
|
114
|
+
candidate = crumb_dir / f"{name}.sh"
|
|
115
|
+
if candidate.is_file() and os.access(candidate, os.X_OK):
|
|
116
|
+
script_path = candidate
|
|
117
|
+
else:
|
|
118
|
+
# Fallback: first .sh in dir
|
|
119
|
+
for p in crumb_dir.glob("*.sh"):
|
|
120
|
+
if os.access(p, os.X_OK):
|
|
121
|
+
script_path = p
|
|
122
|
+
break
|
|
123
|
+
# Default to first file if script not found
|
|
124
|
+
if script_path is None:
|
|
125
|
+
files = list(crumb_dir.iterdir())
|
|
126
|
+
if files:
|
|
127
|
+
script_path = files[0]
|
|
128
|
+
if script_path is None:
|
|
129
|
+
continue
|
|
130
|
+
crumb = Crumb(
|
|
131
|
+
name=name,
|
|
132
|
+
path=script_path,
|
|
133
|
+
type=ctype,
|
|
134
|
+
enabled=False,
|
|
135
|
+
description=description,
|
|
136
|
+
)
|
|
137
|
+
CRUMBS[name] = crumb
|
|
138
|
+
|
|
139
|
+
return CRUMBS
|
|
140
|
+
|
|
141
|
+
|
|
59
142
|
class ConversationLogger:
|
|
60
143
|
def __init__(self, log_path: Path) -> None:
|
|
61
144
|
self.log_path = log_path
|
|
@@ -82,6 +165,8 @@ class ConversationLogger:
|
|
|
82
165
|
)
|
|
83
166
|
|
|
84
167
|
def _append(self, entry: Dict[str, Any]) -> None:
|
|
168
|
+
import json
|
|
169
|
+
|
|
85
170
|
entry["timestamp"] = datetime.utcnow().isoformat()
|
|
86
171
|
with self.log_path.open("a", encoding="utf-8") as handle:
|
|
87
172
|
handle.write(json.dumps(entry, ensure_ascii=False))
|
|
@@ -157,16 +242,50 @@ class RubberDuck:
|
|
|
157
242
|
self.model = model
|
|
158
243
|
self.quick = quick
|
|
159
244
|
self.command_mode = command_mode
|
|
245
|
+
self.crumbs = load_crumbs()
|
|
160
246
|
self.messages: List[Dict[str, str]] = [
|
|
161
247
|
{"role": "system", "content": self.system_prompt}
|
|
162
248
|
]
|
|
163
|
-
|
|
249
|
+
# Update system prompt to include enabled crumb descriptions
|
|
250
|
+
|
|
251
|
+
def update_system_prompt(self) -> None:
|
|
252
|
+
"""Append enabled crumb descriptions to the system prompt.
|
|
253
|
+
|
|
254
|
+
The system prompt is stored in ``self.system_prompt`` and injected as the
|
|
255
|
+
first system message. When crumbs are enabled, we add a section that
|
|
256
|
+
lists the crumb names and their descriptions. The format is simple:
|
|
257
|
+
|
|
258
|
+
``Crumbs:``\n
|
|
259
|
+
``- <name>: <description>``\n
|
|
260
|
+
If no crumbs are enabled the prompt is unchanged.
|
|
261
|
+
"""
|
|
262
|
+
# Start with the base system prompt
|
|
263
|
+
prompt_lines = [self.system_prompt]
|
|
264
|
+
|
|
265
|
+
if self.crumbs:
|
|
266
|
+
prompt_lines.append("\nCrumbs are simple scripts you can run with bash, uv, or bun.")
|
|
267
|
+
prompt_lines.append("Crumbs:")
|
|
268
|
+
for c in self.crumbs.values():
|
|
269
|
+
description = c.description or "no description"
|
|
270
|
+
prompt_lines.append(f"- {c.name}: {description}")
|
|
271
|
+
|
|
272
|
+
# Update the system prompt
|
|
273
|
+
self.system_prompt = "\n".join(prompt_lines)
|
|
274
|
+
|
|
275
|
+
# Update the first system message in the messages list
|
|
276
|
+
if self.messages and self.messages[0]["role"] == "system":
|
|
277
|
+
self.messages[0]["content"] = self.system_prompt
|
|
278
|
+
else:
|
|
279
|
+
# If there's no system message, add one
|
|
280
|
+
self.messages.insert(0, {"role": "system", "content": self.system_prompt})
|
|
164
281
|
|
|
165
282
|
async def send_prompt(
|
|
166
283
|
self, prompt: str | None = None, code: str | None = None
|
|
167
284
|
) -> AssistantResult:
|
|
168
285
|
user_content = (prompt or "").strip()
|
|
169
286
|
|
|
287
|
+
self.update_system_prompt()
|
|
288
|
+
|
|
170
289
|
if code:
|
|
171
290
|
user_content = f"{user_content}\n\n{code}" if user_content else code
|
|
172
291
|
|
|
@@ -175,7 +294,7 @@ class RubberDuck:
|
|
|
175
294
|
|
|
176
295
|
if self.command_mode:
|
|
177
296
|
instruction = (
|
|
178
|
-
"Return a single bash command that accomplishes the task. "
|
|
297
|
+
"Return a single bash command that accomplishes the task. Unless user wants something els"
|
|
179
298
|
"Do not include explanations or formatting other than the command itself."
|
|
180
299
|
)
|
|
181
300
|
user_content = (
|
|
@@ -256,6 +375,20 @@ class RubberDuck:
|
|
|
256
375
|
|
|
257
376
|
return command or None
|
|
258
377
|
|
|
378
|
+
async def list_models(self) -> list[str]:
|
|
379
|
+
"""List available Ollama models."""
|
|
380
|
+
try:
|
|
381
|
+
response = await self.client.list()
|
|
382
|
+
return [model.model for model in response.models]
|
|
383
|
+
except Exception as e:
|
|
384
|
+
console.print(f"Error listing models: {e}", style="red")
|
|
385
|
+
return []
|
|
386
|
+
|
|
387
|
+
def switch_model(self, model_name: str) -> None:
|
|
388
|
+
"""Switch to a different Ollama model."""
|
|
389
|
+
self.model = model_name
|
|
390
|
+
console.print(f"Switched to model: {model_name}", style="green")
|
|
391
|
+
|
|
259
392
|
|
|
260
393
|
class InlineInterface:
|
|
261
394
|
def __init__(
|
|
@@ -273,6 +406,7 @@ class InlineInterface:
|
|
|
273
406
|
self.last_shell_output: str | None = None
|
|
274
407
|
self.pending_command: str | None = None
|
|
275
408
|
self.session: PromptSession | None = None
|
|
409
|
+
self.selected_model: str | None = None
|
|
276
410
|
|
|
277
411
|
if (
|
|
278
412
|
PromptSession is not None
|
|
@@ -369,6 +503,10 @@ class InlineInterface:
|
|
|
369
503
|
await self._run_last_command()
|
|
370
504
|
return
|
|
371
505
|
|
|
506
|
+
if stripped.lower() == "/model":
|
|
507
|
+
await self._select_model()
|
|
508
|
+
return
|
|
509
|
+
|
|
372
510
|
if stripped.startswith("!"):
|
|
373
511
|
await run_shell_and_print(
|
|
374
512
|
self.assistant,
|
|
@@ -390,7 +528,8 @@ class InlineInterface:
|
|
|
390
528
|
self._code_sent = True
|
|
391
529
|
self.last_command = result.command
|
|
392
530
|
self.pending_command = result.command
|
|
393
|
-
|
|
531
|
+
# Set last_shell_output to True so empty Enter will explain the result
|
|
532
|
+
self.last_shell_output = True
|
|
394
533
|
|
|
395
534
|
async def _explain_last_command(self) -> None:
|
|
396
535
|
if not self.assistant.messages or len(self.assistant.messages) < 2:
|
|
@@ -410,6 +549,49 @@ class InlineInterface:
|
|
|
410
549
|
)
|
|
411
550
|
self.last_shell_output = None
|
|
412
551
|
|
|
552
|
+
async def _select_model(self) -> None:
|
|
553
|
+
"""Show available models and allow user to select one with arrow keys."""
|
|
554
|
+
if PromptSession is None or KeyBindings is None:
|
|
555
|
+
console.print("Model selection requires prompt_toolkit to be installed.", style="yellow")
|
|
556
|
+
return
|
|
557
|
+
|
|
558
|
+
models = await self.assistant.list_models()
|
|
559
|
+
if not models:
|
|
560
|
+
console.print("No models available.", style="yellow")
|
|
561
|
+
return
|
|
562
|
+
|
|
563
|
+
# Simple approach: show models as a list and let user type the number
|
|
564
|
+
console.print("Available models:", style="bold")
|
|
565
|
+
for i, model in enumerate(models, 1):
|
|
566
|
+
if model == self.assistant.model:
|
|
567
|
+
console.print(f"{i}. {model} (current)", style="green")
|
|
568
|
+
else:
|
|
569
|
+
console.print(f"{i}. {model}")
|
|
570
|
+
|
|
571
|
+
try:
|
|
572
|
+
choice = await asyncio.to_thread(input, "Enter model number or name: ")
|
|
573
|
+
choice = choice.strip()
|
|
574
|
+
|
|
575
|
+
# Check if it's a number
|
|
576
|
+
if choice.isdigit():
|
|
577
|
+
index = int(choice) - 1
|
|
578
|
+
if 0 <= index < len(models):
|
|
579
|
+
selected_model = models[index]
|
|
580
|
+
else:
|
|
581
|
+
console.print("Invalid model number.", style="red")
|
|
582
|
+
return
|
|
583
|
+
else:
|
|
584
|
+
# Check if it's a model name
|
|
585
|
+
if choice in models:
|
|
586
|
+
selected_model = choice
|
|
587
|
+
else:
|
|
588
|
+
console.print("Invalid model name.", style="red")
|
|
589
|
+
return
|
|
590
|
+
|
|
591
|
+
self.assistant.switch_model(selected_model)
|
|
592
|
+
except (ValueError, EOFError):
|
|
593
|
+
console.print("Invalid input.", style="red")
|
|
594
|
+
|
|
413
595
|
async def _run_basic_loop(self) -> None: # pragma: no cover - fallback path
|
|
414
596
|
while True:
|
|
415
597
|
try:
|
|
@@ -489,10 +671,20 @@ async def ducky() -> None:
|
|
|
489
671
|
parser.add_argument(
|
|
490
672
|
"--model", "-m", help="The model to be used", default="qwen3-coder:480b-cloud"
|
|
491
673
|
)
|
|
674
|
+
parser.add_argument(
|
|
675
|
+
"--local",
|
|
676
|
+
"-l",
|
|
677
|
+
action="store_true",
|
|
678
|
+
help="Run DuckY offline using a local Ollama instance on localhost:11434",
|
|
679
|
+
)
|
|
492
680
|
args, _ = parser.parse_known_args()
|
|
493
681
|
|
|
494
682
|
ensure_history_dir()
|
|
495
683
|
logger = ConversationLogger(CONVERSATION_LOG_FILE)
|
|
684
|
+
if getattr(args, "local", False):
|
|
685
|
+
# Point Ollama client to local host and use gemma3 as default model
|
|
686
|
+
os.environ["OLLAMA_HOST"] = "http://localhost:11434"
|
|
687
|
+
args.model = "gpt-oss:20b"
|
|
496
688
|
rubber_ducky = RubberDuck(model=args.model, quick=False, command_mode=True)
|
|
497
689
|
|
|
498
690
|
code = read_files_from_dir(args.directory) if args.directory else None
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|