rubber-ducky 1.5.3__py3-none-any.whl → 1.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ducky/ducky.py CHANGED
@@ -348,9 +348,12 @@ class RubberDuck:
348
348
  if host:
349
349
  os.environ["OLLAMA_HOST"] = host
350
350
  self.client = AsyncClient(host)
351
+ if "ollama.com" in host:
352
+ console.print("[dim]Note: Cloud models require authentication[/dim]", style="yellow")
351
353
  elif "-cloud" in model_name:
352
354
  os.environ["OLLAMA_HOST"] = "https://ollama.com"
353
355
  self.client = AsyncClient("https://ollama.com")
356
+ console.print("[dim]Note: Cloud models require authentication[/dim]", style="yellow")
354
357
  else:
355
358
  os.environ["OLLAMA_HOST"] = "http://localhost:11434"
356
359
  self.client = AsyncClient()
@@ -549,7 +552,10 @@ class InlineInterface:
549
552
  # Check if first word is a crumb name
550
553
  first_word = stripped.split()[0].lower()
551
554
  if self.crumb_manager.has_crumb(first_word):
552
- await self._use_crumb(first_word)
555
+ # Extract additional arguments after the crumb name
556
+ parts = stripped.split()
557
+ args = parts[1:]
558
+ await self._use_crumb(first_word, args)
553
559
  return
554
560
 
555
561
  if stripped.lower() in {":run", "/run"}:
@@ -634,13 +640,16 @@ class InlineInterface:
634
640
  console.print()
635
641
 
636
642
  commands = [
637
- ("[bold]/help[/bold]", "Show this help message"),
643
+ ("[bold]Crumbs:[/bold]", ""),
644
+ ("[bold]/crumb help[/bold]", "Show detailed crumb commands help"),
638
645
  ("[bold]/crumbs[/bold]", "List all saved crumb shortcuts"),
639
646
  ("[bold]/crumb <name>[/bold]", "Save last result as a crumb"),
640
647
  ("[bold]/crumb add <name> <cmd>[/bold]", "Manually add a crumb"),
641
648
  ("[bold]/crumb del <name>[/bold]", "Delete a crumb"),
642
649
  ("[bold]<name>[/bold]", "Invoke a saved crumb"),
643
- ("[bold]/expand[/bold]", "Show full output of last shell command"),
650
+ ("", ""),
651
+ ("[bold]General:[/bold]", ""),
652
+ ("[bold]/help[/bold]", "Show this help message"),
644
653
  ("[bold]/model[/bold]", "Select a model interactively (local or cloud)"),
645
654
  (
646
655
  "[bold]/local[/bold]",
@@ -651,10 +660,8 @@ class InlineInterface:
651
660
  "[bold]/clear[/bold] or [bold]/reset[/bold]",
652
661
  "Clear conversation history",
653
662
  ),
654
- (
655
- "[bold]/run[/bold]",
656
- "Re-run the last suggested command",
657
- ),
663
+ ("[bold]/expand[/bold]", "Show full output of last shell command"),
664
+ ("[bold]/run[/bold]", "Re-run the last suggested command"),
658
665
  (
659
666
  "[bold]Empty Enter[/bold]",
660
667
  "Re-run suggested command or explain last output",
@@ -666,7 +673,13 @@ class InlineInterface:
666
673
  ]
667
674
 
668
675
  for command, description in commands:
669
- console.print(f"{command:<45} {description}")
676
+ if command:
677
+ console.print(f"{command:<45} {description}")
678
+ else:
679
+ console.print()
680
+
681
+ console.print()
682
+ console.print("[dim]Use /crumb help for detailed crumb command documentation[/dim]")
670
683
 
671
684
  console.print()
672
685
 
@@ -706,9 +719,15 @@ class InlineInterface:
706
719
  async def _handle_crumb_command(self, command: str) -> None:
707
720
  """Handle /crumb commands."""
708
721
  parts = command.split()
722
+
709
723
  if len(parts) == 1:
710
- # Just "/crumbs" - show list
711
- await self._show_crumbs()
724
+ # Just "/crumb" - show help
725
+ await self._show_crumb_help()
726
+ return
727
+
728
+ # Check for help flag or argument
729
+ if parts[1] in {"help", "--help", "-h"}:
730
+ await self._show_crumb_help()
712
731
  return
713
732
 
714
733
  if len(parts) == 2:
@@ -721,6 +740,7 @@ class InlineInterface:
721
740
  # "/crumb add <name> <...command>"
722
741
  if len(parts) < 4:
723
742
  console.print("Usage: /crumb add <name> <command>", style="yellow")
743
+ console.print("Example: /crumb add deploy docker build -t app:latest", style="dim")
724
744
  return
725
745
  name = parts[2]
726
746
  cmd = " ".join(parts[3:])
@@ -734,10 +754,50 @@ class InlineInterface:
734
754
  return
735
755
 
736
756
  console.print(
737
- "Usage: /crumb <name> | /crumb add <name> <cmd> | /crumb del <name>",
757
+ "Unknown crumb command. Use /crumb help for usage information.",
738
758
  style="yellow",
739
759
  )
740
760
 
761
+ async def _show_crumb_help(self) -> None:
762
+ """Display detailed help for crumb commands."""
763
+ console.print("\n[bold blue]Crumbs Help[/bold blue]")
764
+ console.print("=" * 40)
765
+ console.print()
766
+
767
+ console.print("[bold cyan]Commands:[/bold cyan]")
768
+ console.print()
769
+
770
+ commands = [
771
+ ("[bold]/crumbs[/bold]", "List all saved crumb shortcuts"),
772
+ ("[bold]/crumb help[/bold]", "Show this help message"),
773
+ ("[bold]/crumb <name>[/bold]", "Save the last AI-suggested command as a crumb"),
774
+ ("[bold]/crumb add <name> <cmd>[/bold]", "Manually add a crumb with a specific command"),
775
+ ("[bold]/crumb del <name>[/bold]", "Delete a saved crumb"),
776
+ ("[bold]<name>[/bold]", "Invoke a saved crumb by name"),
777
+ ]
778
+
779
+ for command, description in commands:
780
+ console.print(f"{command:<45} {description}")
781
+
782
+ console.print()
783
+ console.print("[bold cyan]Examples:[/bold cyan]")
784
+ console.print()
785
+ console.print(" [dim]# List all crumbs[/dim]")
786
+ console.print(" >> /crumbs")
787
+ console.print()
788
+ console.print(" [dim]# Save last command as 'deploy'[/dim]")
789
+ console.print(" >> /crumb deploy")
790
+ console.print()
791
+ console.print(" [dim]# Manually add a crumb[/dim]")
792
+ console.print(" >> /crumb add test-run pytest tests/ -v")
793
+ console.print()
794
+ console.print(" [dim]# Delete a crumb[/dim]")
795
+ console.print(" >> /crumb del deploy")
796
+ console.print()
797
+ console.print(" [dim]# Run a saved crumb[/dim]")
798
+ console.print(" >> test-run")
799
+ console.print()
800
+
741
801
  async def _save_crumb(self, name: str) -> None:
742
802
  """Save the last result as a crumb."""
743
803
  if not self.assistant.last_result:
@@ -819,8 +879,13 @@ class InlineInterface:
819
879
  else:
820
880
  console.print(f"Crumb '{name}' not found.", style="yellow")
821
881
 
822
- async def _use_crumb(self, name: str) -> None:
823
- """Recall and execute a saved crumb."""
882
+ async def _use_crumb(self, name: str, args: list[str] | None = None) -> None:
883
+ """Recall and execute a saved crumb.
884
+
885
+ Args:
886
+ name: Name of the crumb to execute
887
+ args: Optional list of arguments to replace ${VAR} placeholders in the command
888
+ """
824
889
  crumb = self.crumb_manager.get_crumb(name)
825
890
  if not crumb:
826
891
  console.print(f"Crumb '{name}' not found.", style="yellow")
@@ -829,9 +894,13 @@ class InlineInterface:
829
894
  explanation = crumb.get("explanation", "") or "No explanation"
830
895
  command = crumb.get("command", "") or "No command"
831
896
 
897
+ # Substitute placeholders with provided arguments
898
+ if args and command != "No command":
899
+ command = substitute_placeholders(command, args)
900
+
832
901
  console.print(f"\n[bold cyan]Crumb: {name}[/bold cyan]")
833
902
  console.print(f"Explanation: {explanation}", style="green")
834
- console.print(f"Command: ", style="cyan", end="")
903
+ console.print("Command: ", style="cyan", end="")
835
904
  console.print(command, highlight=False)
836
905
 
837
906
  if command and command != "No command":
@@ -975,7 +1044,33 @@ async def run_single_prompt(
975
1044
  ) -> AssistantResult:
976
1045
  if logger:
977
1046
  logger.log_user(prompt)
978
- result = await rubber_ducky.send_prompt(prompt=prompt, code=code)
1047
+ try:
1048
+ result = await rubber_ducky.send_prompt(prompt=prompt, code=code)
1049
+ except Exception as e:
1050
+ error_msg = str(e)
1051
+ if "unauthorized" in error_msg.lower() or "401" in error_msg:
1052
+ console.print("\n[red]Authentication Error (401)[/red]")
1053
+ console.print("You're trying to use a cloud model but don't have valid credentials.", style="yellow")
1054
+
1055
+ # Check if API key is set
1056
+ api_key = os.environ.get("OLLAMA_API_KEY")
1057
+ if api_key:
1058
+ console.print("\nAn OLLAMA_API_KEY is set, but it appears invalid.", style="yellow")
1059
+ else:
1060
+ console.print("\n[bold]OLLAMA_API_KEY environment variable is not set.[/bold]", style="yellow")
1061
+
1062
+ console.print("\nOptions:", style="bold")
1063
+ console.print(" 1. Use --local flag to access local models:", style="dim")
1064
+ console.print(" ducky --local", style="cyan")
1065
+ console.print(" 2. Select a local model with /local command", style="dim")
1066
+ console.print(" 3. Set up Ollama cloud API credentials:", style="dim")
1067
+ console.print(" export OLLAMA_API_KEY='your-api-key-here'", style="cyan")
1068
+ console.print("\nGet your API key from: https://ollama.com/account/api-keys", style="dim")
1069
+ console.print()
1070
+ raise
1071
+ else:
1072
+ raise
1073
+
979
1074
  content = result.content or "(No content returned.)"
980
1075
  console.print(content, style="green", highlight=False)
981
1076
  if logger:
@@ -1037,7 +1132,7 @@ async def ducky() -> None:
1037
1132
  )
1038
1133
  parser.add_argument(
1039
1134
  "single_prompt",
1040
- nargs="?",
1135
+ nargs="*",
1041
1136
  help="Run a single prompt and copy the suggested command to clipboard",
1042
1137
  default=None,
1043
1138
  )
@@ -1096,31 +1191,41 @@ async def ducky() -> None:
1096
1191
 
1097
1192
  # Handle crumb invocation mode
1098
1193
  crumb_manager = CrumbManager()
1099
- if args.single_prompt and crumb_manager.has_crumb(args.single_prompt):
1100
- crumb = crumb_manager.get_crumb(args.single_prompt)
1101
- if crumb:
1102
- explanation = crumb.get("explanation", "") or "No explanation"
1103
- command = crumb.get("command", "") or "No command"
1104
-
1105
- console.print(f"\n[bold cyan]Crumb: {args.single_prompt}[/bold cyan]")
1106
- console.print(f"Explanation: {explanation}", style="green")
1107
- console.print(f"Command: ", style="cyan", end="")
1108
- console.print(command, highlight=False)
1109
-
1110
- if command and command != "No command":
1111
- # Execute the command
1112
- await run_shell_and_print(
1113
- rubber_ducky,
1114
- command,
1115
- logger=logger,
1116
- history=rubber_ducky.messages,
1117
- )
1118
- return
1194
+ if args.single_prompt:
1195
+ first_arg = args.single_prompt[0]
1196
+ if crumb_manager.has_crumb(first_arg):
1197
+ # Extract crumb arguments (everything after the crumb name)
1198
+ crumb_args = args.single_prompt[1:]
1199
+
1200
+ crumb = crumb_manager.get_crumb(first_arg)
1201
+ if crumb:
1202
+ explanation = crumb.get("explanation", "") or "No explanation"
1203
+ command = crumb.get("command", "") or "No command"
1204
+
1205
+ # Substitute placeholders with provided arguments
1206
+ if crumb_args and command != "No command":
1207
+ command = substitute_placeholders(command, crumb_args)
1208
+
1209
+ console.print(f"\n[bold cyan]Crumb: {first_arg}[/bold cyan]")
1210
+ console.print(f"Explanation: {explanation}", style="green")
1211
+ console.print("Command: ", style="cyan", end="")
1212
+ console.print(command, highlight=False)
1213
+
1214
+ if command and command != "No command":
1215
+ # Execute the command
1216
+ await run_shell_and_print(
1217
+ rubber_ducky,
1218
+ command,
1219
+ logger=logger,
1220
+ history=rubber_ducky.messages,
1221
+ )
1222
+ return
1119
1223
 
1120
1224
  # Handle single prompt mode
1121
1225
  if args.single_prompt:
1226
+ prompt = " ".join(args.single_prompt)
1122
1227
  result = await run_single_prompt(
1123
- rubber_ducky, args.single_prompt, code=code, logger=logger
1228
+ rubber_ducky, prompt, code=code, logger=logger
1124
1229
  )
1125
1230
  if result.command:
1126
1231
  if args.yolo:
@@ -1137,6 +1242,35 @@ async def ducky() -> None:
1137
1242
  await interactive_session(rubber_ducky, logger=logger, code=code)
1138
1243
 
1139
1244
 
1245
+ def substitute_placeholders(command: str, args: list[str]) -> str:
1246
+ """Replace ${VAR} and $var placeholders in command with provided arguments.
1247
+
1248
+ Args:
1249
+ command: The command string with placeholders
1250
+ args: List of arguments to substitute (first arg replaces first placeholder, etc.)
1251
+
1252
+ Returns:
1253
+ Command with placeholders replaced, falling back to env vars for unreplaced placeholders
1254
+ """
1255
+ result = command
1256
+ arg_index = 0
1257
+ placeholder_pattern = re.compile(r'\$\{([^}]+)\}|\$(\w+)')
1258
+
1259
+ def replace_placeholder(match: re.Match) -> str:
1260
+ nonlocal arg_index
1261
+ # Get the variable name from either ${VAR} or $var format
1262
+ var_name = match.group(1) or match.group(2)
1263
+ if arg_index < len(args):
1264
+ value = args[arg_index]
1265
+ arg_index += 1
1266
+ return value
1267
+ # Fallback to environment variable
1268
+ return os.environ.get(var_name, match.group(0))
1269
+
1270
+ result = placeholder_pattern.sub(replace_placeholder, result)
1271
+ return result
1272
+
1273
+
1140
1274
  def main() -> None:
1141
1275
  asyncio.run(ducky())
1142
1276
 
@@ -0,0 +1,317 @@
1
+ Metadata-Version: 2.4
2
+ Name: rubber-ducky
3
+ Version: 1.6.1
4
+ Summary: Quick CLI do-it-all tool. Use natural language to spit out bash commands
5
+ Requires-Python: >=3.10
6
+ Description-Content-Type: text/markdown
7
+ License-File: LICENSE
8
+ Requires-Dist: colorama>=0.4.6
9
+ Requires-Dist: fastapi>=0.115.11
10
+ Requires-Dist: ollama>=0.6.0
11
+ Requires-Dist: openai>=1.60.2
12
+ Requires-Dist: prompt-toolkit>=3.0.48
13
+ Requires-Dist: rich>=13.9.4
14
+ Requires-Dist: termcolor>=2.5.0
15
+ Dynamic: license-file
16
+
17
+ # Rubber Ducky
18
+
19
+ Turn natural language into bash commands without leaving your terminal.
20
+
21
+ Rubber Ducky is an inline terminal companion that transforms your prompts into runnable shell commands. Paste multi-line context, get smart suggestions, and execute commands instantly.
22
+
23
+ ---
24
+
25
+ ## Quick Start
26
+
27
+ ```bash
28
+ # Install globally (recommended)
29
+ uv tool install rubber-ducky
30
+
31
+ # Run interactively
32
+ ducky
33
+
34
+ # Quick one-shot
35
+ ducky "list all files larger than 10MB in current directory"
36
+
37
+ # From CLI with options
38
+ ducky --model qwen3
39
+ ducky --directory src
40
+ ducky --local
41
+
42
+ # Or use uvx (requires -- separator)
43
+ uvx rubber-ducky -- --model qwen3
44
+ ```
45
+
46
+ Both `ducky` and `rubber-ducky` executables work identically.
47
+
48
+ ### Requirements
49
+
50
+ - [Ollama](https://ollama.com) (running locally or using cloud models)
51
+ - Python 3.10+
52
+
53
+ ---
54
+
55
+ ## Features
56
+
57
+ - **Natural to Shell** - Describe what you want, get the bash command
58
+ - **Model Flexibility** - Switch between local Ollama models and cloud models
59
+ - **Crumbs** - Save and reuse commands with argument substitution
60
+ - **Piped Input** - Pipe output from other commands directly to ducky
61
+ - **Interactive REPL** - Rich terminal experience with history and shortcuts
62
+ - **Code Context** - Preload project code for AI awareness
63
+ - **Clipboard Support** - Copy commands across macOS, Windows, and Linux
64
+
65
+ ---
66
+
67
+ ## Key Concepts
68
+
69
+ ### REPL (Interactive Mode)
70
+
71
+ Launch `ducky` to start an inline session:
72
+
73
+ ```
74
+ ducky
75
+ ```
76
+
77
+ **Key controls:**
78
+ - `Enter` - Submit prompt
79
+ - `Ctrl+J` - Insert newline (for multi-line prompts)
80
+ - `Empty Enter` - Rerun last command or explain shell output
81
+ - `Ctrl+R` - Re-run last suggested command
82
+ - `Ctrl+S` - Copy last command to clipboard
83
+ - `!<cmd>` - Run shell command immediately
84
+ - `Arrow keys` - Browse history
85
+ - `Ctrl+D` - Exit
86
+
87
+ ### Models
88
+
89
+ Rubber Ducky supports both local and cloud models:
90
+
91
+ - `/model` - Interactive model selection
92
+ - `/local` - List local models (localhost:11434)
93
+ - `/cloud` - List cloud models (ollama.com)
94
+ - Last used model is saved automatically
95
+
96
+ **Startup flags:**
97
+ - `--local` / `-l` - Use local Ollama with qwen3 default
98
+ - `--model <name>` / `-m` - Specify model directly
99
+
100
+ ### Crumbs
101
+
102
+ Crumbs are saved command shortcuts. Store frequently-used commands or complex workflows:
103
+
104
+ ```
105
+ >> How do I list all running Python processes?
106
+ ...
107
+ Suggested: ps aux | grep python | grep -v grep
108
+ >> /crumb pyprocs
109
+ Saved crumb 'pyprocs'!
110
+ ```
111
+
112
+ **Invoke crumb:**
113
+ ```
114
+ >> pyprocs
115
+ Crumb: pyprocs
116
+ Command: ps aux | grep python | grep -v grep
117
+ ...
118
+ ```
119
+
120
+ **With argument substitution:**
121
+ ```bash
122
+ # Crumb command: git worktree add "../$var-$other" -b $var3
123
+ ducky at feature backend develop
124
+ # Executes: git worktree add "../feature-backend" -b develop
125
+ ```
126
+
127
+ ---
128
+
129
+ ## Usage Guide
130
+
131
+ ### Interactive Mode
132
+
133
+ Default mode. Perfect for development sessions.
134
+
135
+ ```bash
136
+ ducky
137
+ ```
138
+
139
+ Load code context for better suggestions:
140
+
141
+ ```bash
142
+ ducky --directory src
143
+ ```
144
+
145
+ ### Single-Shot Mode
146
+
147
+ Get one command suggestion and exit.
148
+
149
+ ```bash
150
+ ducky "find all TODO comments in src/"
151
+ ```
152
+
153
+ Copy to clipboard automatically:
154
+
155
+ ```bash
156
+ ducky "build and run tests"
157
+ ```
158
+
159
+ ### Piped Input
160
+
161
+ Process text from other commands:
162
+
163
+ ```bash
164
+ cat error.log | ducky "what's wrong here?"
165
+ git diff | ducky "summarize these changes"
166
+ ```
167
+
168
+ ### Run Without Confirmation
169
+
170
+ Auto-execute suggested commands:
171
+
172
+ ```bash
173
+ ducky --yolo "restart the nginx service"
174
+ ```
175
+
176
+ ---
177
+
178
+ ## Crumbs Quick Reference
179
+
180
+ | Command | Description |
181
+ |---------|-------------|
182
+ | `/crumbs` | List all saved crumbs |
183
+ | `/crumb <name>` | Save last command as crumb |
184
+ | `/crumb add <name> <cmd>` | Manually add crumb |
185
+ | `/crumb del <name>` | Delete crumb |
186
+ | `<name>` | Execute crumb |
187
+ | `/crumb help` | Detailed crumb help |
188
+
189
+ **Argument Substitution:**
190
+
191
+ Crumbs support `${VAR}` and `$var` placeholder styles:
192
+
193
+ ```bash
194
+ # Create crumb with placeholders
195
+ git worktree add "../$var-$other" -b $var3
196
+
197
+ # Invoke with arguments
198
+ ducky at feature backend develop
199
+ ```
200
+
201
+ Both styles are interchangeable.
202
+
203
+ ---
204
+
205
+ ## Command Reference
206
+
207
+ ### Inline Commands
208
+
209
+ | Command | Action |
210
+ |---------|--------|
211
+ | `/help` | Show all commands |
212
+ | `/clear` / `/reset` | Clear conversation history |
213
+ | `/model` | Select model (interactive) |
214
+ | `/local` | List local models |
215
+ | `/cloud` | List cloud models |
216
+ | `/run` / `:run` | Re-run last command |
217
+ | `/expand` | Show full output of last shell command |
218
+
219
+ ### CLI Flags
220
+
221
+ | Flag | Description |
222
+ |------|-------------|
223
+ | `--directory <path>` / `-d` | Preload code from directory |
224
+ | `--model <name>` / `-m` | Specify Ollama model |
225
+ | `--local` / `-l` | Use local Ollama (qwen3 default) |
226
+ | `--yolo` / `-y` | Auto-run without confirmation |
227
+ | `<prompt>` | Single prompt mode (copied to clipboard) |
228
+
229
+ ---
230
+
231
+ ## Tips & Tricks
232
+
233
+ ### Efficient Workflows
234
+
235
+ ```bash
236
+ # Preload project context
237
+ ducky --directory src
238
+
239
+ # Reuse complex commands with crumbs
240
+ docker ps | ducky "kill all containers"
241
+ >> /crumb killall
242
+
243
+ # Chain commands
244
+ !ls -la
245
+ ducksy "find large files"
246
+
247
+ # Use history
248
+ [↑] Recall previous prompts
249
+ [↓] Navigate command history
250
+ ```
251
+
252
+ ### Keyboard Shortcuts Reference
253
+
254
+ | Key | Action |
255
+ |-----|--------|
256
+ | `Enter` | Submit prompt |
257
+ | `Ctrl+J` | Insert newline |
258
+ | `Empty Enter` | Rerun last command or explain |
259
+ | `Ctrl+R` | Re-run last suggested command |
260
+ | `Ctrl+S` | Copy to clipboard |
261
+ | `Ctrl+D` | Exit |
262
+ | `!cmd` | Run shell command directly |
263
+
264
+ ### Crumb Patterns
265
+
266
+ ```bash
267
+ # Save after complex command
268
+ >> docker-compose up -d && wait && docker-compose logs
269
+ >> /crumb start-logs
270
+
271
+ # Manually add with arguments
272
+ >> /crumb add deploy-prod docker build -t app:latest && docker push app:latest
273
+
274
+ # Use for common workflows
275
+ >> ls -la
276
+ find . -type f -name "*.py" | xargs wc -l
277
+ >> /crumb count-py
278
+ ```
279
+
280
+ ---
281
+
282
+ ## Storage
283
+
284
+ Rubber Ducky stores data in `~/.ducky/`:
285
+
286
+ | File | Purpose |
287
+ |------|---------|
288
+ | `prompt_history` | readline-compatible history |
289
+ | `conversation.log` | JSON log of all interactions |
290
+ | `config` | User preferences (last model) |
291
+ | `crumbs.json` | Saved crumb shortcuts |
292
+
293
+ Delete the entire directory for a fresh start.
294
+
295
+ ---
296
+
297
+ ## Development
298
+
299
+ ```bash
300
+ # Clone and setup
301
+ git clone <repo>
302
+ cd ducky
303
+ uv sync
304
+
305
+ # Run
306
+ uv run ducky --help
307
+ uv run ducky
308
+
309
+ # Lint
310
+ uv run ruff check .
311
+ ```
312
+
313
+ ---
314
+
315
+ ## License
316
+
317
+ MIT © 2023 Parth Sareen
@@ -1,13 +1,13 @@
1
1
  ducky/__init__.py,sha256=2vLhJxOuJ3lnIeg5rmF6xUvybUT5Qhjej6AS0BeBASY,60
2
2
  ducky/config.py,sha256=Lh7xTUYh4i8Gxgrl0oTYadZB_72Wy2BKIqLCcDQduOA,2116
3
3
  ducky/crumb.py,sha256=7BlyjD81-cZptYxQM97y6gOGdVDBF2qzxW0xbPqbspE,2693
4
- ducky/ducky.py,sha256=bAU-hck3T9ZomrWtmh8E9bu3WZ2p3ektNdm_sDhL8kE,41582
4
+ ducky/ducky.py,sha256=LMAp1ZV7dR-NR6uoSiUpfcanHXU1eRA55Q0jB_ViMwo,47374
5
5
  examples/POLLING_USER_GUIDE.md,sha256=rMEAczZhpgyJ9BgwHkN-SKwSdyas8nlw_CjpV7SFOLA,10685
6
6
  examples/mock-logs/info.txt,sha256=apJqEO__UM1R2_2x9MlQOA7XmxvLvbhRvOy-FAwrINo,258
7
7
  examples/mock-logs/mock-logs.sh,sha256=zM2JSaCR1eCQLlMvXDWjFnpxZTqrMpnFRa_SgNLPmBk,1132
8
- rubber_ducky-1.5.3.dist-info/licenses/LICENSE,sha256=gQ1rCmw18NqTk5GxG96F6vgyN70e1c4kcKUtWDwdNaE,1069
9
- rubber_ducky-1.5.3.dist-info/METADATA,sha256=3MwsGVph0L8spjytrZQx_E6St4gW1LxJlBcOIFLMZhk,6733
10
- rubber_ducky-1.5.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
11
- rubber_ducky-1.5.3.dist-info/entry_points.txt,sha256=WPnVUUNvWdMDcBlCo8JCzkLghGllMX5QVZyQghyq85Q,75
12
- rubber_ducky-1.5.3.dist-info/top_level.txt,sha256=hid_mDkugR6XIeravFKuzcRPpuN_ylN3ejC_06Fmnb4,15
13
- rubber_ducky-1.5.3.dist-info/RECORD,,
8
+ rubber_ducky-1.6.1.dist-info/licenses/LICENSE,sha256=gQ1rCmw18NqTk5GxG96F6vgyN70e1c4kcKUtWDwdNaE,1069
9
+ rubber_ducky-1.6.1.dist-info/METADATA,sha256=lZZ9iHVNunf7cJuyN7lLQK9vA1WUBVX3SGGWG4c7Mok,6638
10
+ rubber_ducky-1.6.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
11
+ rubber_ducky-1.6.1.dist-info/entry_points.txt,sha256=WPnVUUNvWdMDcBlCo8JCzkLghGllMX5QVZyQghyq85Q,75
12
+ rubber_ducky-1.6.1.dist-info/top_level.txt,sha256=hid_mDkugR6XIeravFKuzcRPpuN_ylN3ejC_06Fmnb4,15
13
+ rubber_ducky-1.6.1.dist-info/RECORD,,
@@ -1,198 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: rubber-ducky
3
- Version: 1.5.3
4
- Summary: Quick CLI do-it-all tool. Use natural language to spit out bash commands
5
- Requires-Python: >=3.10
6
- Description-Content-Type: text/markdown
7
- License-File: LICENSE
8
- Requires-Dist: colorama>=0.4.6
9
- Requires-Dist: fastapi>=0.115.11
10
- Requires-Dist: ollama>=0.6.0
11
- Requires-Dist: openai>=1.60.2
12
- Requires-Dist: prompt-toolkit>=3.0.48
13
- Requires-Dist: rich>=13.9.4
14
- Requires-Dist: termcolor>=2.5.0
15
- Dynamic: license-file
16
-
17
- # Rubber Ducky
18
-
19
- Rubber Ducky is an inline terminal companion that turns natural language prompts into runnable shell commands. Paste multi-line context, get a suggested command, and run it without leaving your terminal.
20
-
21
- ## Quick Start
22
-
23
- | Action | Command |
24
- | --- | --- |
25
- | Install globally | `uv tool install rubber-ducky` |
26
- | Run once | `uvx rubber-ducky -- --help` |
27
- | Local install | `uv pip install rubber-ducky` |
28
-
29
- Requirements:
30
- - [Ollama](https://ollama.com) running locally or use cloud models
31
- - Model available via Ollama (default: `glm-4.7:cloud`)
32
-
33
- ## Usage
34
-
35
- ```
36
- ducky # interactive inline session
37
- ducky --directory src # preload code from a directory
38
- ducky --model qwen3 # use a different Ollama model
39
- ducky --local # use local models with qwen3 default
40
- ```
41
-
42
- Both `ducky` and `rubber-ducky` executables map to the same CLI, so `uvx rubber-ducky -- <args>` works as well.
43
-
44
- ### Inline Session (default)
45
-
46
- Launching `ducky` with no arguments opens the inline interface:
47
- - **Enter** submits; **Ctrl+J** inserts a newline (helpful when crafting multi-line prompts). Hitting **Enter on an empty prompt** reruns the latest suggested command; if none exists yet, it explains the most recent shell output.
48
- - **Ctrl+R** re-runs the last suggested command.
49
- - **Ctrl+S** copies the last suggested command to clipboard.
50
- - Prefix any line with **`!`** (e.g., `!ls -la`) to run a shell command immediately.
51
- - Arrow keys browse prompt history, backed by `~/.ducky/prompt_history`.
52
- - Every prompt, assistant response, and executed command is logged to `~/.ducky/conversation.log`.
53
- - Press **Ctrl+D** on an empty line to exit.
54
- - Non-interactive runs such as `cat prompt.txt | ducky` print one response (and suggested command) before exiting; if a TTY is available you'll be asked whether to run the suggested command immediately.
55
- - If `prompt_toolkit` is unavailable in your environment, Rubber Ducky falls back to a basic input loop (no history or shortcuts); install `prompt-toolkit>=3.0.48` to unlock the richer UI.
56
-
57
- `ducky --directory <path>` streams the contents of the provided directory to the assistant the next time you submit a prompt (the directory is read once at startup).
58
-
59
- ### Model Management
60
-
61
- Rubber Ducky now supports easy switching between local and cloud models:
62
- - **`/model`** - Interactive model selection between local and cloud models
63
- - **`/local`** - List and select from local models (localhost:11434)
64
- - **`/cloud`** - List and select from cloud models (ollama.com)
65
- - Last used model is automatically saved and loaded on startup
66
- - Type **`esc`** during model selection to cancel
67
-
68
- ### Additional Commands
69
-
70
- - **`/help`** - Show all available commands and shortcuts
71
- - **`/crumbs`** - List all saved crumb shortcuts
72
- - **`/crumb <name>`** - Save the last AI-suggested command as a named crumb
73
- - **`/crumb add <name> <command>`** - Manually add a crumb with a specific command
74
- - **`/crumb del <name>`** - Delete a saved crumb
75
- - **`<crumb-name>`** - Invoke a saved crumb (displays info and executes the command)
76
- - **`/clear`** or **`/reset`** - Clear conversation history
77
- - **`/run`** or **`:run`** - Re-run the last suggested command
78
-
79
- ## Crumbs
80
-
81
- Crumbs are saved command shortcuts that let you quickly reuse AI-generated bash commands without regenerating them each time. Perfect for frequently-used workflows or complex commands.
82
-
83
- ### Saving Crumbs
84
-
85
- When the AI suggests a command that you want to reuse:
86
-
87
- 1. Get a command suggestion from ducky
88
- 2. Save it immediately: `/crumb <name>`
89
- 3. Example:
90
- ```
91
- >> How do I list all Ollama processes?
92
- ...
93
- Suggested command: ps aux | grep -i ollama | grep -v grep
94
- >> /crumb ols
95
- Saved crumb 'ols'!
96
- Generating explanation...
97
- Explanation added: Finds and lists all running Ollama processes.
98
- ```
99
-
100
- The crumb is saved with:
101
- - The original command
102
- - An AI-generated one-line explanation
103
- - A timestamp
104
-
105
- ### Invoking Crumbs
106
-
107
- Simply type the crumb name in the REPL or use it as a CLI argument:
108
-
109
- **In REPL:**
110
- ```
111
- >> ols
112
-
113
- Crumb: ols
114
- Explanation: Finds and lists all running Ollama processes.
115
- Command: ps aux | grep -i ollama | grep -v grep
116
-
117
- $ ps aux | grep -i ollama | grep -v grep
118
- user123 12345 0.3 1.2 456789 98765 ? Sl 10:00 0:05 ollama serve
119
- ```
120
-
121
- **From CLI:**
122
- ```bash
123
- ducky ols # Runs the saved crumb and displays output
124
- ```
125
-
126
- When you invoke a crumb:
127
- 1. It displays the crumb name, explanation, and command
128
- 2. Automatically executes the command
129
- 3. Shows the output
130
-
131
- ### Managing Crumbs
132
-
133
- **List all crumbs:**
134
- ```bash
135
- >> /crumbs
136
- ```
137
-
138
- Output:
139
- ```
140
- Saved Crumbs
141
- =============
142
- ols | Finds and lists all running Ollama processes. | ps aux | grep -i ollama | grep -v grep
143
- test | Run tests and build project | pytest && python build.py
144
- deploy | Deploy to production | docker push app:latest
145
- ```
146
-
147
- **Manually add a crumb:**
148
- ```bash
149
- >> /crumb add deploy-prod docker build -t app:latest && docker push app:latest
150
- ```
151
-
152
- **Delete a crumb:**
153
- ```bash
154
- >> /crumb ols
155
- Deleted crumb 'ols'.
156
- ```
157
-
158
- ### Storage
159
-
160
- Crumbs are stored in `~/.ducky/crumbs.json` as JSON. Each crumb includes:
161
- - `prompt`: Original user prompt
162
- - `response`: AI's full response
163
- - `command`: The suggested bash command
164
- - `explanation`: AI-generated one-line summary
165
- - `created_at`: ISO timestamp
166
-
167
- **Example:**
168
- ```json
169
- {
170
- "ols": {
171
- "prompt": "How do I list all Ollama processes?",
172
- "response": "To list all running Ollama processes...",
173
- "command": "ps aux | grep -i ollama | grep -v grep",
174
- "explanation": "Finds and lists all running Ollama processes.",
175
- "created_at": "2024-01-05T10:30:00.000000+00:00"
176
- }
177
- }
178
- ```
179
-
180
- Delete `~/.ducky/crumbs.json` to clear all saved crumbs.
181
-
182
- ## Development (uv)
183
-
184
- ```
185
- uv sync
186
- uv run ducky --help
187
- ```
188
-
189
- `uv sync` creates a virtual environment and installs dependencies defined in `pyproject.toml` / `uv.lock`.
190
-
191
- ## Telemetry & Storage
192
-
193
- Rubber Ducky stores:
194
- - `~/.ducky/prompt_history`: readline-compatible history file.
195
- - `~/.ducky/conversation.log`: JSON lines with timestamps for prompts, assistant messages, and shell executions.
196
- - `~/.ducky/config`: User preferences including last selected model.
197
-
198
- No other telemetry is collected; delete the directory if you want a fresh slate.