rubber-ducky 1.6.0__py3-none-any.whl → 1.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ducky/ducky.py CHANGED
@@ -15,6 +15,7 @@ from pathlib import Path
15
15
  from textwrap import dedent
16
16
  from typing import Any, Dict, List
17
17
 
18
+ __version__ = "1.6.2"
18
19
 
19
20
  from .config import ConfigManager
20
21
  from .crumb import CrumbManager
@@ -554,7 +555,7 @@ class InlineInterface:
554
555
  if self.crumb_manager.has_crumb(first_word):
555
556
  # Extract additional arguments after the crumb name
556
557
  parts = stripped.split()
557
- args = parts[1:] if len(parts) > 1 else None
558
+ args = parts[1:]
558
559
  await self._use_crumb(first_word, args)
559
560
  return
560
561
 
@@ -1114,6 +1115,9 @@ async def interactive_session(
1114
1115
 
1115
1116
  async def ducky() -> None:
1116
1117
  parser = argparse.ArgumentParser()
1118
+ parser.add_argument(
1119
+ "--version", "-v", action="version", version=f"%(prog)s {__version__}"
1120
+ )
1117
1121
  parser.add_argument(
1118
1122
  "--directory", "-d", help="The directory to be processed", default=None
1119
1123
  )
@@ -1195,7 +1199,7 @@ async def ducky() -> None:
1195
1199
  first_arg = args.single_prompt[0]
1196
1200
  if crumb_manager.has_crumb(first_arg):
1197
1201
  # Extract crumb arguments (everything after the crumb name)
1198
- crumb_args = args.single_prompt[1:] if len(args.single_prompt) > 1 else None
1202
+ crumb_args = args.single_prompt[1:]
1199
1203
 
1200
1204
  crumb = crumb_manager.get_crumb(first_arg)
1201
1205
  if crumb:
@@ -1243,27 +1247,41 @@ async def ducky() -> None:
1243
1247
 
1244
1248
 
1245
1249
  def substitute_placeholders(command: str, args: list[str]) -> str:
1246
- """Replace ${VAR} placeholders in command with provided arguments.
1250
+ """Replace ${VAR} and $var placeholders in command with provided arguments.
1247
1251
 
1248
1252
  Args:
1249
1253
  command: The command string with placeholders
1250
- args: List of arguments to substitute (first arg replaces first placeholder, etc.)
1254
+ args: List of arguments to substitute. The first unique variable name
1255
+ maps to args[0], the second unique name maps to args[1], etc.
1251
1256
 
1252
1257
  Returns:
1253
- Command with placeholders replaced, falling back to env vars for unreplaced placeholders
1258
+ Command with placeholders replaced. Reused variable names get the
1259
+ same argument value. Falls back to env vars for unreplaced placeholders.
1254
1260
  """
1255
1261
  result = command
1256
- arg_index = 0
1257
- placeholder_pattern = re.compile(r'\$\{([^}]+)\}')
1258
-
1262
+ placeholder_pattern = re.compile(r'\$\{([^}]+)\}|\$(\w+)')
1263
+
1264
+ # First pass: collect unique variable names in order of appearance
1265
+ unique_vars = []
1266
+ seen_vars = set()
1267
+ for match in placeholder_pattern.finditer(command):
1268
+ var_name = match.group(1) or match.group(2)
1269
+ if var_name not in seen_vars:
1270
+ seen_vars.add(var_name)
1271
+ unique_vars.append(var_name)
1272
+
1273
+ # Map unique variable names to arguments
1274
+ var_map = {}
1275
+ for i, var_name in enumerate(unique_vars):
1276
+ if i < len(args):
1277
+ var_map[var_name] = args[i]
1278
+
1279
+ # Second pass: replace all placeholders using the map
1259
1280
  def replace_placeholder(match: re.Match) -> str:
1260
- nonlocal arg_index
1261
- if arg_index < len(args):
1262
- value = args[arg_index]
1263
- arg_index += 1
1264
- return value
1281
+ var_name = match.group(1) or match.group(2)
1282
+ if var_name in var_map:
1283
+ return var_map[var_name]
1265
1284
  # Fallback to environment variable
1266
- var_name = match.group(1)
1267
1285
  return os.environ.get(var_name, match.group(0))
1268
1286
 
1269
1287
  result = placeholder_pattern.sub(replace_placeholder, result)
@@ -0,0 +1,317 @@
1
+ Metadata-Version: 2.4
2
+ Name: rubber-ducky
3
+ Version: 1.6.2
4
+ Summary: Quick CLI do-it-all tool. Use natural language to spit out bash commands
5
+ Requires-Python: >=3.10
6
+ Description-Content-Type: text/markdown
7
+ License-File: LICENSE
8
+ Requires-Dist: colorama>=0.4.6
9
+ Requires-Dist: fastapi>=0.115.11
10
+ Requires-Dist: ollama>=0.6.0
11
+ Requires-Dist: openai>=1.60.2
12
+ Requires-Dist: prompt-toolkit>=3.0.48
13
+ Requires-Dist: rich>=13.9.4
14
+ Requires-Dist: termcolor>=2.5.0
15
+ Dynamic: license-file
16
+
17
+ # Rubber Ducky
18
+
19
+ Turn natural language into bash commands without leaving your terminal.
20
+
21
+ Rubber Ducky is an inline terminal companion that transforms your prompts into runnable shell commands. Paste multi-line context, get smart suggestions, and execute commands instantly.
22
+
23
+ ---
24
+
25
+ ## Quick Start
26
+
27
+ ```bash
28
+ # Install globally (recommended)
29
+ uv tool install rubber-ducky
30
+
31
+ # Run interactively
32
+ ducky
33
+
34
+ # Quick one-shot
35
+ ducky "list all files larger than 10MB in current directory"
36
+
37
+ # From CLI with options
38
+ ducky --model qwen3
39
+ ducky --directory src
40
+ ducky --local
41
+
42
+ # Or use uvx (requires -- separator)
43
+ uvx rubber-ducky -- --model qwen3
44
+ ```
45
+
46
+ Both `ducky` and `rubber-ducky` executables work identically.
47
+
48
+ ### Requirements
49
+
50
+ - [Ollama](https://ollama.com) (running locally or using cloud models)
51
+ - Python 3.10+
52
+
53
+ ---
54
+
55
+ ## Features
56
+
57
+ - **Natural to Shell** - Describe what you want, get the bash command
58
+ - **Model Flexibility** - Switch between local Ollama models and cloud models
59
+ - **Crumbs** - Save and reuse commands with argument substitution
60
+ - **Piped Input** - Pipe output from other commands directly to ducky
61
+ - **Interactive REPL** - Rich terminal experience with history and shortcuts
62
+ - **Code Context** - Preload project code for AI awareness
63
+ - **Clipboard Support** - Copy commands across macOS, Windows, and Linux
64
+
65
+ ---
66
+
67
+ ## Key Concepts
68
+
69
+ ### REPL (Interactive Mode)
70
+
71
+ Launch `ducky` to start an inline session:
72
+
73
+ ```
74
+ ducky
75
+ ```
76
+
77
+ **Key controls:**
78
+ - `Enter` - Submit prompt
79
+ - `Ctrl+J` - Insert newline (for multi-line prompts)
80
+ - `Empty Enter` - Rerun last command or explain shell output
81
+ - `Ctrl+R` - Re-run last suggested command
82
+ - `Ctrl+S` - Copy last command to clipboard
83
+ - `!<cmd>` - Run shell command immediately
84
+ - `Arrow keys` - Browse history
85
+ - `Ctrl+D` - Exit
86
+
87
+ ### Models
88
+
89
+ Rubber Ducky supports both local and cloud models:
90
+
91
+ - `/model` - Interactive model selection
92
+ - `/local` - List local models (localhost:11434)
93
+ - `/cloud` - List cloud models (ollama.com)
94
+ - Last used model is saved automatically
95
+
96
+ **Startup flags:**
97
+ - `--local` / `-l` - Use local Ollama with qwen3 default
98
+ - `--model <name>` / `-m` - Specify model directly
99
+
100
+ ### Crumbs
101
+
102
+ Crumbs are saved command shortcuts. Store frequently-used commands or complex workflows:
103
+
104
+ ```
105
+ >> How do I list all running Python processes?
106
+ ...
107
+ Suggested: ps aux | grep python | grep -v grep
108
+ >> /crumb pyprocs
109
+ Saved crumb 'pyprocs'!
110
+ ```
111
+
112
+ **Invoke crumb:**
113
+ ```
114
+ >> pyprocs
115
+ Crumb: pyprocs
116
+ Command: ps aux | grep python | grep -v grep
117
+ ...
118
+ ```
119
+
120
+ **With argument substitution:**
121
+ ```bash
122
+ # Crumb command: git worktree add "../$var-$other" -b $var3
123
+ ducky at feature backend develop
124
+ # Executes: git worktree add "../feature-backend" -b develop
125
+ ```
126
+
127
+ ---
128
+
129
+ ## Usage Guide
130
+
131
+ ### Interactive Mode
132
+
133
+ Default mode. Perfect for development sessions.
134
+
135
+ ```bash
136
+ ducky
137
+ ```
138
+
139
+ Load code context for better suggestions:
140
+
141
+ ```bash
142
+ ducky --directory src
143
+ ```
144
+
145
+ ### Single-Shot Mode
146
+
147
+ Get one command suggestion and exit.
148
+
149
+ ```bash
150
+ ducky "find all TODO comments in src/"
151
+ ```
152
+
153
+ Copy to clipboard automatically:
154
+
155
+ ```bash
156
+ ducky "build and run tests"
157
+ ```
158
+
159
+ ### Piped Input
160
+
161
+ Process text from other commands:
162
+
163
+ ```bash
164
+ cat error.log | ducky "what's wrong here?"
165
+ git diff | ducky "summarize these changes"
166
+ ```
167
+
168
+ ### Run Without Confirmation
169
+
170
+ Auto-execute suggested commands:
171
+
172
+ ```bash
173
+ ducky --yolo "restart the nginx service"
174
+ ```
175
+
176
+ ---
177
+
178
+ ## Crumbs Quick Reference
179
+
180
+ | Command | Description |
181
+ |---------|-------------|
182
+ | `/crumbs` | List all saved crumbs |
183
+ | `/crumb <name>` | Save last command as crumb |
184
+ | `/crumb add <name> <cmd>` | Manually add crumb |
185
+ | `/crumb del <name>` | Delete crumb |
186
+ | `<name>` | Execute crumb |
187
+ | `/crumb help` | Detailed crumb help |
188
+
189
+ **Argument Substitution:**
190
+
191
+ Crumbs support `${VAR}` and `$var` placeholder styles:
192
+
193
+ ```bash
194
+ # Create crumb with placeholders
195
+ git worktree add "../$var-$other" -b $var3
196
+
197
+ # Invoke with arguments
198
+ ducky at feature backend develop
199
+ ```
200
+
201
+ Both styles are interchangeable.
202
+
203
+ ---
204
+
205
+ ## Command Reference
206
+
207
+ ### Inline Commands
208
+
209
+ | Command | Action |
210
+ |---------|--------|
211
+ | `/help` | Show all commands |
212
+ | `/clear` / `/reset` | Clear conversation history |
213
+ | `/model` | Select model (interactive) |
214
+ | `/local` | List local models |
215
+ | `/cloud` | List cloud models |
216
+ | `/run` / `:run` | Re-run last command |
217
+ | `/expand` | Show full output of last shell command |
218
+
219
+ ### CLI Flags
220
+
221
+ | Flag | Description |
222
+ |------|-------------|
223
+ | `--directory <path>` / `-d` | Preload code from directory |
224
+ | `--model <name>` / `-m` | Specify Ollama model |
225
+ | `--local` / `-l` | Use local Ollama (qwen3 default) |
226
+ | `--yolo` / `-y` | Auto-run without confirmation |
227
+ | `<prompt>` | Single prompt mode (copied to clipboard) |
228
+
229
+ ---
230
+
231
+ ## Tips & Tricks
232
+
233
+ ### Efficient Workflows
234
+
235
+ ```bash
236
+ # Preload project context
237
+ ducky --directory src
238
+
239
+ # Reuse complex commands with crumbs
240
+ docker ps | ducky "kill all containers"
241
+ >> /crumb killall
242
+
243
+ # Chain commands
244
+ !ls -la
245
+ ducksy "find large files"
246
+
247
+ # Use history
248
+ [↑] Recall previous prompts
249
+ [↓] Navigate command history
250
+ ```
251
+
252
+ ### Keyboard Shortcuts Reference
253
+
254
+ | Key | Action |
255
+ |-----|--------|
256
+ | `Enter` | Submit prompt |
257
+ | `Ctrl+J` | Insert newline |
258
+ | `Empty Enter` | Rerun last command or explain |
259
+ | `Ctrl+R` | Re-run last suggested command |
260
+ | `Ctrl+S` | Copy to clipboard |
261
+ | `Ctrl+D` | Exit |
262
+ | `!cmd` | Run shell command directly |
263
+
264
+ ### Crumb Patterns
265
+
266
+ ```bash
267
+ # Save after complex command
268
+ >> docker-compose up -d && wait && docker-compose logs
269
+ >> /crumb start-logs
270
+
271
+ # Manually add with arguments
272
+ >> /crumb add deploy-prod docker build -t app:latest && docker push app:latest
273
+
274
+ # Use for common workflows
275
+ >> ls -la
276
+ find . -type f -name "*.py" | xargs wc -l
277
+ >> /crumb count-py
278
+ ```
279
+
280
+ ---
281
+
282
+ ## Storage
283
+
284
+ Rubber Ducky stores data in `~/.ducky/`:
285
+
286
+ | File | Purpose |
287
+ |------|---------|
288
+ | `prompt_history` | readline-compatible history |
289
+ | `conversation.log` | JSON log of all interactions |
290
+ | `config` | User preferences (last model) |
291
+ | `crumbs.json` | Saved crumb shortcuts |
292
+
293
+ Delete the entire directory for a fresh start.
294
+
295
+ ---
296
+
297
+ ## Development
298
+
299
+ ```bash
300
+ # Clone and setup
301
+ git clone <repo>
302
+ cd ducky
303
+ uv sync
304
+
305
+ # Run
306
+ uv run ducky --help
307
+ uv run ducky
308
+
309
+ # Lint
310
+ uv run ruff check .
311
+ ```
312
+
313
+ ---
314
+
315
+ ## License
316
+
317
+ MIT © 2023 Parth Sareen
@@ -1,13 +1,13 @@
1
1
  ducky/__init__.py,sha256=2vLhJxOuJ3lnIeg5rmF6xUvybUT5Qhjej6AS0BeBASY,60
2
2
  ducky/config.py,sha256=Lh7xTUYh4i8Gxgrl0oTYadZB_72Wy2BKIqLCcDQduOA,2116
3
3
  ducky/crumb.py,sha256=7BlyjD81-cZptYxQM97y6gOGdVDBF2qzxW0xbPqbspE,2693
4
- ducky/ducky.py,sha256=mPxbfk9LVPpYUz9nh6zsgCH7K2S-HV46FniIVMJbHm8,47342
4
+ ducky/ducky.py,sha256=J6UeoL07TUp6HODQmBHr_x-kSj6DZXnhm8RwRTwxNHs,48035
5
5
  examples/POLLING_USER_GUIDE.md,sha256=rMEAczZhpgyJ9BgwHkN-SKwSdyas8nlw_CjpV7SFOLA,10685
6
6
  examples/mock-logs/info.txt,sha256=apJqEO__UM1R2_2x9MlQOA7XmxvLvbhRvOy-FAwrINo,258
7
7
  examples/mock-logs/mock-logs.sh,sha256=zM2JSaCR1eCQLlMvXDWjFnpxZTqrMpnFRa_SgNLPmBk,1132
8
- rubber_ducky-1.6.0.dist-info/licenses/LICENSE,sha256=gQ1rCmw18NqTk5GxG96F6vgyN70e1c4kcKUtWDwdNaE,1069
9
- rubber_ducky-1.6.0.dist-info/METADATA,sha256=_Q7V5YvGVeDVBPPkeeKbw5hiIFM2u1R0TnQm5_9JTtU,6733
10
- rubber_ducky-1.6.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
11
- rubber_ducky-1.6.0.dist-info/entry_points.txt,sha256=WPnVUUNvWdMDcBlCo8JCzkLghGllMX5QVZyQghyq85Q,75
12
- rubber_ducky-1.6.0.dist-info/top_level.txt,sha256=hid_mDkugR6XIeravFKuzcRPpuN_ylN3ejC_06Fmnb4,15
13
- rubber_ducky-1.6.0.dist-info/RECORD,,
8
+ rubber_ducky-1.6.2.dist-info/licenses/LICENSE,sha256=gQ1rCmw18NqTk5GxG96F6vgyN70e1c4kcKUtWDwdNaE,1069
9
+ rubber_ducky-1.6.2.dist-info/METADATA,sha256=Z7Bck_ZmjiNnyGFxdgD_phsFMd_SK-SxUWPctCUuUp8,6638
10
+ rubber_ducky-1.6.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
11
+ rubber_ducky-1.6.2.dist-info/entry_points.txt,sha256=WPnVUUNvWdMDcBlCo8JCzkLghGllMX5QVZyQghyq85Q,75
12
+ rubber_ducky-1.6.2.dist-info/top_level.txt,sha256=hid_mDkugR6XIeravFKuzcRPpuN_ylN3ejC_06Fmnb4,15
13
+ rubber_ducky-1.6.2.dist-info/RECORD,,
@@ -1,198 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: rubber-ducky
3
- Version: 1.6.0
4
- Summary: Quick CLI do-it-all tool. Use natural language to spit out bash commands
5
- Requires-Python: >=3.10
6
- Description-Content-Type: text/markdown
7
- License-File: LICENSE
8
- Requires-Dist: colorama>=0.4.6
9
- Requires-Dist: fastapi>=0.115.11
10
- Requires-Dist: ollama>=0.6.0
11
- Requires-Dist: openai>=1.60.2
12
- Requires-Dist: prompt-toolkit>=3.0.48
13
- Requires-Dist: rich>=13.9.4
14
- Requires-Dist: termcolor>=2.5.0
15
- Dynamic: license-file
16
-
17
- # Rubber Ducky
18
-
19
- Rubber Ducky is an inline terminal companion that turns natural language prompts into runnable shell commands. Paste multi-line context, get a suggested command, and run it without leaving your terminal.
20
-
21
- ## Quick Start
22
-
23
- | Action | Command |
24
- | --- | --- |
25
- | Install globally | `uv tool install rubber-ducky` |
26
- | Run once | `uvx rubber-ducky -- --help` |
27
- | Local install | `uv pip install rubber-ducky` |
28
-
29
- Requirements:
30
- - [Ollama](https://ollama.com) running locally or use cloud models
31
- - Model available via Ollama (default: `glm-4.7:cloud`)
32
-
33
- ## Usage
34
-
35
- ```
36
- ducky # interactive inline session
37
- ducky --directory src # preload code from a directory
38
- ducky --model qwen3 # use a different Ollama model
39
- ducky --local # use local models with qwen3 default
40
- ```
41
-
42
- Both `ducky` and `rubber-ducky` executables map to the same CLI, so `uvx rubber-ducky -- <args>` works as well.
43
-
44
- ### Inline Session (default)
45
-
46
- Launching `ducky` with no arguments opens the inline interface:
47
- - **Enter** submits; **Ctrl+J** inserts a newline (helpful when crafting multi-line prompts). Hitting **Enter on an empty prompt** reruns the latest suggested command; if none exists yet, it explains the most recent shell output.
48
- - **Ctrl+R** re-runs the last suggested command.
49
- - **Ctrl+S** copies the last suggested command to clipboard.
50
- - Prefix any line with **`!`** (e.g., `!ls -la`) to run a shell command immediately.
51
- - Arrow keys browse prompt history, backed by `~/.ducky/prompt_history`.
52
- - Every prompt, assistant response, and executed command is logged to `~/.ducky/conversation.log`.
53
- - Press **Ctrl+D** on an empty line to exit.
54
- - Non-interactive runs such as `cat prompt.txt | ducky` print one response (and suggested command) before exiting; if a TTY is available you'll be asked whether to run the suggested command immediately.
55
- - If `prompt_toolkit` is unavailable in your environment, Rubber Ducky falls back to a basic input loop (no history or shortcuts); install `prompt-toolkit>=3.0.48` to unlock the richer UI.
56
-
57
- `ducky --directory <path>` streams the contents of the provided directory to the assistant the next time you submit a prompt (the directory is read once at startup).
58
-
59
- ### Model Management
60
-
61
- Rubber Ducky now supports easy switching between local and cloud models:
62
- - **`/model`** - Interactive model selection between local and cloud models
63
- - **`/local`** - List and select from local models (localhost:11434)
64
- - **`/cloud`** - List and select from cloud models (ollama.com)
65
- - Last used model is automatically saved and loaded on startup
66
- - Type **`esc`** during model selection to cancel
67
-
68
- ### Additional Commands
69
-
70
- - **`/help`** - Show all available commands and shortcuts
71
- - **`/crumbs`** - List all saved crumb shortcuts
72
- - **`/crumb <name>`** - Save the last AI-suggested command as a named crumb
73
- - **`/crumb add <name> <command>`** - Manually add a crumb with a specific command
74
- - **`/crumb del <name>`** - Delete a saved crumb
75
- - **`<crumb-name>`** - Invoke a saved crumb (displays info and executes the command)
76
- - **`/clear`** or **`/reset`** - Clear conversation history
77
- - **`/run`** or **`:run`** - Re-run the last suggested command
78
-
79
- ## Crumbs
80
-
81
- Crumbs are saved command shortcuts that let you quickly reuse AI-generated bash commands without regenerating them each time. Perfect for frequently-used workflows or complex commands.
82
-
83
- ### Saving Crumbs
84
-
85
- When the AI suggests a command that you want to reuse:
86
-
87
- 1. Get a command suggestion from ducky
88
- 2. Save it immediately: `/crumb <name>`
89
- 3. Example:
90
- ```
91
- >> How do I list all Ollama processes?
92
- ...
93
- Suggested command: ps aux | grep -i ollama | grep -v grep
94
- >> /crumb ols
95
- Saved crumb 'ols'!
96
- Generating explanation...
97
- Explanation added: Finds and lists all running Ollama processes.
98
- ```
99
-
100
- The crumb is saved with:
101
- - The original command
102
- - An AI-generated one-line explanation
103
- - A timestamp
104
-
105
- ### Invoking Crumbs
106
-
107
- Simply type the crumb name in the REPL or use it as a CLI argument:
108
-
109
- **In REPL:**
110
- ```
111
- >> ols
112
-
113
- Crumb: ols
114
- Explanation: Finds and lists all running Ollama processes.
115
- Command: ps aux | grep -i ollama | grep -v grep
116
-
117
- $ ps aux | grep -i ollama | grep -v grep
118
- user123 12345 0.3 1.2 456789 98765 ? Sl 10:00 0:05 ollama serve
119
- ```
120
-
121
- **From CLI:**
122
- ```bash
123
- ducky ols # Runs the saved crumb and displays output
124
- ```
125
-
126
- When you invoke a crumb:
127
- 1. It displays the crumb name, explanation, and command
128
- 2. Automatically executes the command
129
- 3. Shows the output
130
-
131
- ### Managing Crumbs
132
-
133
- **List all crumbs:**
134
- ```bash
135
- >> /crumbs
136
- ```
137
-
138
- Output:
139
- ```
140
- Saved Crumbs
141
- =============
142
- ols | Finds and lists all running Ollama processes. | ps aux | grep -i ollama | grep -v grep
143
- test | Run tests and build project | pytest && python build.py
144
- deploy | Deploy to production | docker push app:latest
145
- ```
146
-
147
- **Manually add a crumb:**
148
- ```bash
149
- >> /crumb add deploy-prod docker build -t app:latest && docker push app:latest
150
- ```
151
-
152
- **Delete a crumb:**
153
- ```bash
154
- >> /crumb ols
155
- Deleted crumb 'ols'.
156
- ```
157
-
158
- ### Storage
159
-
160
- Crumbs are stored in `~/.ducky/crumbs.json` as JSON. Each crumb includes:
161
- - `prompt`: Original user prompt
162
- - `response`: AI's full response
163
- - `command`: The suggested bash command
164
- - `explanation`: AI-generated one-line summary
165
- - `created_at`: ISO timestamp
166
-
167
- **Example:**
168
- ```json
169
- {
170
- "ols": {
171
- "prompt": "How do I list all Ollama processes?",
172
- "response": "To list all running Ollama processes...",
173
- "command": "ps aux | grep -i ollama | grep -v grep",
174
- "explanation": "Finds and lists all running Ollama processes.",
175
- "created_at": "2024-01-05T10:30:00.000000+00:00"
176
- }
177
- }
178
- ```
179
-
180
- Delete `~/.ducky/crumbs.json` to clear all saved crumbs.
181
-
182
- ## Development (uv)
183
-
184
- ```
185
- uv sync
186
- uv run ducky --help
187
- ```
188
-
189
- `uv sync` creates a virtual environment and installs dependencies defined in `pyproject.toml` / `uv.lock`.
190
-
191
- ## Telemetry & Storage
192
-
193
- Rubber Ducky stores:
194
- - `~/.ducky/prompt_history`: readline-compatible history file.
195
- - `~/.ducky/conversation.log`: JSON lines with timestamps for prompts, assistant messages, and shell executions.
196
- - `~/.ducky/config`: User preferences including last selected model.
197
-
198
- No other telemetry is collected; delete the directory if you want a fresh slate.