rubber-ducky 1.4.0__tar.gz → 1.5.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rubber_ducky-1.5.1/PKG-INFO +198 -0
- rubber_ducky-1.5.1/README.md +182 -0
- {rubber_ducky-1.4.0 → rubber_ducky-1.5.1}/ducky/config.py +3 -3
- rubber_ducky-1.5.1/ducky/crumb.py +84 -0
- {rubber_ducky-1.4.0 → rubber_ducky-1.5.1}/ducky/ducky.py +210 -489
- {rubber_ducky-1.4.0 → rubber_ducky-1.5.1}/pyproject.toml +1 -1
- rubber_ducky-1.5.1/rubber_ducky.egg-info/PKG-INFO +198 -0
- rubber_ducky-1.5.1/rubber_ducky.egg-info/SOURCES.txt +17 -0
- {rubber_ducky-1.4.0 → rubber_ducky-1.5.1}/rubber_ducky.egg-info/top_level.txt +0 -1
- rubber_ducky-1.4.0/PKG-INFO +0 -210
- rubber_ducky-1.4.0/README.md +0 -194
- rubber_ducky-1.4.0/crumbs/disk-usage/disk-usage.sh +0 -12
- rubber_ducky-1.4.0/crumbs/disk-usage/info.txt +0 -3
- rubber_ducky-1.4.0/crumbs/git-log/git-log.sh +0 -24
- rubber_ducky-1.4.0/crumbs/git-log/info.txt +0 -3
- rubber_ducky-1.4.0/crumbs/git-status/git-status.sh +0 -21
- rubber_ducky-1.4.0/crumbs/git-status/info.txt +0 -3
- rubber_ducky-1.4.0/crumbs/process-list/info.txt +0 -3
- rubber_ducky-1.4.0/crumbs/process-list/process-list.sh +0 -20
- rubber_ducky-1.4.0/crumbs/recent-files/info.txt +0 -3
- rubber_ducky-1.4.0/crumbs/recent-files/recent-files.sh +0 -13
- rubber_ducky-1.4.0/crumbs/system-health/info.txt +0 -3
- rubber_ducky-1.4.0/crumbs/system-health/system-health.sh +0 -58
- rubber_ducky-1.4.0/rubber_ducky.egg-info/PKG-INFO +0 -210
- rubber_ducky-1.4.0/rubber_ducky.egg-info/SOURCES.txt +0 -28
- {rubber_ducky-1.4.0 → rubber_ducky-1.5.1}/LICENSE +0 -0
- {rubber_ducky-1.4.0 → rubber_ducky-1.5.1}/MANIFEST.in +0 -0
- {rubber_ducky-1.4.0 → rubber_ducky-1.5.1}/ducky/__init__.py +0 -0
- {rubber_ducky-1.4.0 → rubber_ducky-1.5.1}/examples/POLLING_USER_GUIDE.md +0 -0
- {rubber_ducky-1.4.0 → rubber_ducky-1.5.1}/examples/mock-logs/info.txt +0 -0
- {rubber_ducky-1.4.0 → rubber_ducky-1.5.1}/examples/mock-logs/mock-logs.sh +0 -0
- {rubber_ducky-1.4.0 → rubber_ducky-1.5.1}/rubber_ducky.egg-info/dependency_links.txt +0 -0
- {rubber_ducky-1.4.0 → rubber_ducky-1.5.1}/rubber_ducky.egg-info/entry_points.txt +0 -0
- {rubber_ducky-1.4.0 → rubber_ducky-1.5.1}/rubber_ducky.egg-info/requires.txt +0 -0
- {rubber_ducky-1.4.0 → rubber_ducky-1.5.1}/setup.cfg +0 -0
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: rubber-ducky
|
|
3
|
+
Version: 1.5.1
|
|
4
|
+
Summary: Quick CLI do-it-all tool. Use natural language to spit out bash commands
|
|
5
|
+
Requires-Python: >=3.10
|
|
6
|
+
Description-Content-Type: text/markdown
|
|
7
|
+
License-File: LICENSE
|
|
8
|
+
Requires-Dist: colorama>=0.4.6
|
|
9
|
+
Requires-Dist: fastapi>=0.115.11
|
|
10
|
+
Requires-Dist: ollama>=0.6.0
|
|
11
|
+
Requires-Dist: openai>=1.60.2
|
|
12
|
+
Requires-Dist: prompt-toolkit>=3.0.48
|
|
13
|
+
Requires-Dist: rich>=13.9.4
|
|
14
|
+
Requires-Dist: termcolor>=2.5.0
|
|
15
|
+
Dynamic: license-file
|
|
16
|
+
|
|
17
|
+
# Rubber Ducky
|
|
18
|
+
|
|
19
|
+
Rubber Ducky is an inline terminal companion that turns natural language prompts into runnable shell commands. Paste multi-line context, get a suggested command, and run it without leaving your terminal.
|
|
20
|
+
|
|
21
|
+
## Quick Start
|
|
22
|
+
|
|
23
|
+
| Action | Command |
|
|
24
|
+
| --- | --- |
|
|
25
|
+
| Install globally | `uv tool install rubber-ducky` |
|
|
26
|
+
| Run once | `uvx rubber-ducky -- --help` |
|
|
27
|
+
| Local install | `uv pip install rubber-ducky` |
|
|
28
|
+
|
|
29
|
+
Requirements:
|
|
30
|
+
- [Ollama](https://ollama.com) running locally or use cloud models
|
|
31
|
+
- Model available via Ollama (default: `glm-4.7:cloud`)
|
|
32
|
+
|
|
33
|
+
## Usage
|
|
34
|
+
|
|
35
|
+
```
|
|
36
|
+
ducky # interactive inline session
|
|
37
|
+
ducky --directory src # preload code from a directory
|
|
38
|
+
ducky --model qwen3 # use a different Ollama model
|
|
39
|
+
ducky --local # use local models with qwen3 default
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
Both `ducky` and `rubber-ducky` executables map to the same CLI, so `uvx rubber-ducky -- <args>` works as well.
|
|
43
|
+
|
|
44
|
+
### Inline Session (default)
|
|
45
|
+
|
|
46
|
+
Launching `ducky` with no arguments opens the inline interface:
|
|
47
|
+
- **Enter** submits; **Ctrl+J** inserts a newline (helpful when crafting multi-line prompts). Hitting **Enter on an empty prompt** reruns the latest suggested command; if none exists yet, it explains the most recent shell output.
|
|
48
|
+
- **Ctrl+R** re-runs the last suggested command.
|
|
49
|
+
- **Ctrl+S** copies the last suggested command to clipboard.
|
|
50
|
+
- Prefix any line with **`!`** (e.g., `!ls -la`) to run a shell command immediately.
|
|
51
|
+
- Arrow keys browse prompt history, backed by `~/.ducky/prompt_history`.
|
|
52
|
+
- Every prompt, assistant response, and executed command is logged to `~/.ducky/conversation.log`.
|
|
53
|
+
- Press **Ctrl+D** on an empty line to exit.
|
|
54
|
+
- Non-interactive runs such as `cat prompt.txt | ducky` print one response (and suggested command) before exiting; if a TTY is available you'll be asked whether to run the suggested command immediately.
|
|
55
|
+
- If `prompt_toolkit` is unavailable in your environment, Rubber Ducky falls back to a basic input loop (no history or shortcuts); install `prompt-toolkit>=3.0.48` to unlock the richer UI.
|
|
56
|
+
|
|
57
|
+
`ducky --directory <path>` streams the contents of the provided directory to the assistant the next time you submit a prompt (the directory is read once at startup).
|
|
58
|
+
|
|
59
|
+
### Model Management
|
|
60
|
+
|
|
61
|
+
Rubber Ducky now supports easy switching between local and cloud models:
|
|
62
|
+
- **`/model`** - Interactive model selection between local and cloud models
|
|
63
|
+
- **`/local`** - List and select from local models (localhost:11434)
|
|
64
|
+
- **`/cloud`** - List and select from cloud models (ollama.com)
|
|
65
|
+
- Last used model is automatically saved and loaded on startup
|
|
66
|
+
- Type **`esc`** during model selection to cancel
|
|
67
|
+
|
|
68
|
+
### Additional Commands
|
|
69
|
+
|
|
70
|
+
- **`/help`** - Show all available commands and shortcuts
|
|
71
|
+
- **`/crumbs`** - List all saved crumb shortcuts
|
|
72
|
+
- **`/crumb <name>`** - Save the last AI-suggested command as a named crumb
|
|
73
|
+
- **`/crumb add <name> <command>`** - Manually add a crumb with a specific command
|
|
74
|
+
- **`/crumb del <name>`** - Delete a saved crumb
|
|
75
|
+
- **`<crumb-name>`** - Invoke a saved crumb (displays info and executes the command)
|
|
76
|
+
- **`/clear`** or **`/reset`** - Clear conversation history
|
|
77
|
+
- **`/run`** or **`:run`** - Re-run the last suggested command
|
|
78
|
+
|
|
79
|
+
## Crumbs
|
|
80
|
+
|
|
81
|
+
Crumbs are saved command shortcuts that let you quickly reuse AI-generated bash commands without regenerating them each time. Perfect for frequently-used workflows or complex commands.
|
|
82
|
+
|
|
83
|
+
### Saving Crumbs
|
|
84
|
+
|
|
85
|
+
When the AI suggests a command that you want to reuse:
|
|
86
|
+
|
|
87
|
+
1. Get a command suggestion from ducky
|
|
88
|
+
2. Save it immediately: `/crumb <name>`
|
|
89
|
+
3. Example:
|
|
90
|
+
```
|
|
91
|
+
>> How do I list all Ollama processes?
|
|
92
|
+
...
|
|
93
|
+
Suggested command: ps aux | grep -i ollama | grep -v grep
|
|
94
|
+
>> /crumb ols
|
|
95
|
+
Saved crumb 'ols'!
|
|
96
|
+
Generating explanation...
|
|
97
|
+
Explanation added: Finds and lists all running Ollama processes.
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
The crumb is saved with:
|
|
101
|
+
- The original command
|
|
102
|
+
- An AI-generated one-line explanation
|
|
103
|
+
- A timestamp
|
|
104
|
+
|
|
105
|
+
### Invoking Crumbs
|
|
106
|
+
|
|
107
|
+
Simply type the crumb name in the REPL or use it as a CLI argument:
|
|
108
|
+
|
|
109
|
+
**In REPL:**
|
|
110
|
+
```
|
|
111
|
+
>> ols
|
|
112
|
+
|
|
113
|
+
Crumb: ols
|
|
114
|
+
Explanation: Finds and lists all running Ollama processes.
|
|
115
|
+
Command: ps aux | grep -i ollama | grep -v grep
|
|
116
|
+
|
|
117
|
+
$ ps aux | grep -i ollama | grep -v grep
|
|
118
|
+
user123 12345 0.3 1.2 456789 98765 ? Sl 10:00 0:05 ollama serve
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
**From CLI:**
|
|
122
|
+
```bash
|
|
123
|
+
ducky ols # Runs the saved crumb and displays output
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
When you invoke a crumb:
|
|
127
|
+
1. It displays the crumb name, explanation, and command
|
|
128
|
+
2. Automatically executes the command
|
|
129
|
+
3. Shows the output
|
|
130
|
+
|
|
131
|
+
### Managing Crumbs
|
|
132
|
+
|
|
133
|
+
**List all crumbs:**
|
|
134
|
+
```bash
|
|
135
|
+
>> /crumbs
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
Output:
|
|
139
|
+
```
|
|
140
|
+
Saved Crumbs
|
|
141
|
+
=============
|
|
142
|
+
ols | Finds and lists all running Ollama processes. | ps aux | grep -i ollama | grep -v grep
|
|
143
|
+
test | Run tests and build project | pytest && python build.py
|
|
144
|
+
deploy | Deploy to production | docker push app:latest
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
**Manually add a crumb:**
|
|
148
|
+
```bash
|
|
149
|
+
>> /crumb add deploy-prod docker build -t app:latest && docker push app:latest
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
**Delete a crumb:**
|
|
153
|
+
```bash
|
|
154
|
+
>> /crumb ols
|
|
155
|
+
Deleted crumb 'ols'.
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
### Storage
|
|
159
|
+
|
|
160
|
+
Crumbs are stored in `~/.ducky/crumbs.json` as JSON. Each crumb includes:
|
|
161
|
+
- `prompt`: Original user prompt
|
|
162
|
+
- `response`: AI's full response
|
|
163
|
+
- `command`: The suggested bash command
|
|
164
|
+
- `explanation`: AI-generated one-line summary
|
|
165
|
+
- `created_at`: ISO timestamp
|
|
166
|
+
|
|
167
|
+
**Example:**
|
|
168
|
+
```json
|
|
169
|
+
{
|
|
170
|
+
"ols": {
|
|
171
|
+
"prompt": "How do I list all Ollama processes?",
|
|
172
|
+
"response": "To list all running Ollama processes...",
|
|
173
|
+
"command": "ps aux | grep -i ollama | grep -v grep",
|
|
174
|
+
"explanation": "Finds and lists all running Ollama processes.",
|
|
175
|
+
"created_at": "2024-01-05T10:30:00.000000+00:00"
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
Delete `~/.ducky/crumbs.json` to clear all saved crumbs.
|
|
181
|
+
|
|
182
|
+
## Development (uv)
|
|
183
|
+
|
|
184
|
+
```
|
|
185
|
+
uv sync
|
|
186
|
+
uv run ducky --help
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
`uv sync` creates a virtual environment and installs dependencies defined in `pyproject.toml` / `uv.lock`.
|
|
190
|
+
|
|
191
|
+
## Telemetry & Storage
|
|
192
|
+
|
|
193
|
+
Rubber Ducky stores:
|
|
194
|
+
- `~/.ducky/prompt_history`: readline-compatible history file.
|
|
195
|
+
- `~/.ducky/conversation.log`: JSON lines with timestamps for prompts, assistant messages, and shell executions.
|
|
196
|
+
- `~/.ducky/config`: User preferences including last selected model.
|
|
197
|
+
|
|
198
|
+
No other telemetry is collected; delete the directory if you want a fresh slate.
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
# Rubber Ducky
|
|
2
|
+
|
|
3
|
+
Rubber Ducky is an inline terminal companion that turns natural language prompts into runnable shell commands. Paste multi-line context, get a suggested command, and run it without leaving your terminal.
|
|
4
|
+
|
|
5
|
+
## Quick Start
|
|
6
|
+
|
|
7
|
+
| Action | Command |
|
|
8
|
+
| --- | --- |
|
|
9
|
+
| Install globally | `uv tool install rubber-ducky` |
|
|
10
|
+
| Run once | `uvx rubber-ducky -- --help` |
|
|
11
|
+
| Local install | `uv pip install rubber-ducky` |
|
|
12
|
+
|
|
13
|
+
Requirements:
|
|
14
|
+
- [Ollama](https://ollama.com) running locally or use cloud models
|
|
15
|
+
- Model available via Ollama (default: `glm-4.7:cloud`)
|
|
16
|
+
|
|
17
|
+
## Usage
|
|
18
|
+
|
|
19
|
+
```
|
|
20
|
+
ducky # interactive inline session
|
|
21
|
+
ducky --directory src # preload code from a directory
|
|
22
|
+
ducky --model qwen3 # use a different Ollama model
|
|
23
|
+
ducky --local # use local models with qwen3 default
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
Both `ducky` and `rubber-ducky` executables map to the same CLI, so `uvx rubber-ducky -- <args>` works as well.
|
|
27
|
+
|
|
28
|
+
### Inline Session (default)
|
|
29
|
+
|
|
30
|
+
Launching `ducky` with no arguments opens the inline interface:
|
|
31
|
+
- **Enter** submits; **Ctrl+J** inserts a newline (helpful when crafting multi-line prompts). Hitting **Enter on an empty prompt** reruns the latest suggested command; if none exists yet, it explains the most recent shell output.
|
|
32
|
+
- **Ctrl+R** re-runs the last suggested command.
|
|
33
|
+
- **Ctrl+S** copies the last suggested command to clipboard.
|
|
34
|
+
- Prefix any line with **`!`** (e.g., `!ls -la`) to run a shell command immediately.
|
|
35
|
+
- Arrow keys browse prompt history, backed by `~/.ducky/prompt_history`.
|
|
36
|
+
- Every prompt, assistant response, and executed command is logged to `~/.ducky/conversation.log`.
|
|
37
|
+
- Press **Ctrl+D** on an empty line to exit.
|
|
38
|
+
- Non-interactive runs such as `cat prompt.txt | ducky` print one response (and suggested command) before exiting; if a TTY is available you'll be asked whether to run the suggested command immediately.
|
|
39
|
+
- If `prompt_toolkit` is unavailable in your environment, Rubber Ducky falls back to a basic input loop (no history or shortcuts); install `prompt-toolkit>=3.0.48` to unlock the richer UI.
|
|
40
|
+
|
|
41
|
+
`ducky --directory <path>` streams the contents of the provided directory to the assistant the next time you submit a prompt (the directory is read once at startup).
|
|
42
|
+
|
|
43
|
+
### Model Management
|
|
44
|
+
|
|
45
|
+
Rubber Ducky now supports easy switching between local and cloud models:
|
|
46
|
+
- **`/model`** - Interactive model selection between local and cloud models
|
|
47
|
+
- **`/local`** - List and select from local models (localhost:11434)
|
|
48
|
+
- **`/cloud`** - List and select from cloud models (ollama.com)
|
|
49
|
+
- Last used model is automatically saved and loaded on startup
|
|
50
|
+
- Type **`esc`** during model selection to cancel
|
|
51
|
+
|
|
52
|
+
### Additional Commands
|
|
53
|
+
|
|
54
|
+
- **`/help`** - Show all available commands and shortcuts
|
|
55
|
+
- **`/crumbs`** - List all saved crumb shortcuts
|
|
56
|
+
- **`/crumb <name>`** - Save the last AI-suggested command as a named crumb
|
|
57
|
+
- **`/crumb add <name> <command>`** - Manually add a crumb with a specific command
|
|
58
|
+
- **`/crumb del <name>`** - Delete a saved crumb
|
|
59
|
+
- **`<crumb-name>`** - Invoke a saved crumb (displays info and executes the command)
|
|
60
|
+
- **`/clear`** or **`/reset`** - Clear conversation history
|
|
61
|
+
- **`/run`** or **`:run`** - Re-run the last suggested command
|
|
62
|
+
|
|
63
|
+
## Crumbs
|
|
64
|
+
|
|
65
|
+
Crumbs are saved command shortcuts that let you quickly reuse AI-generated bash commands without regenerating them each time. Perfect for frequently-used workflows or complex commands.
|
|
66
|
+
|
|
67
|
+
### Saving Crumbs
|
|
68
|
+
|
|
69
|
+
When the AI suggests a command that you want to reuse:
|
|
70
|
+
|
|
71
|
+
1. Get a command suggestion from ducky
|
|
72
|
+
2. Save it immediately: `/crumb <name>`
|
|
73
|
+
3. Example:
|
|
74
|
+
```
|
|
75
|
+
>> How do I list all Ollama processes?
|
|
76
|
+
...
|
|
77
|
+
Suggested command: ps aux | grep -i ollama | grep -v grep
|
|
78
|
+
>> /crumb ols
|
|
79
|
+
Saved crumb 'ols'!
|
|
80
|
+
Generating explanation...
|
|
81
|
+
Explanation added: Finds and lists all running Ollama processes.
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
The crumb is saved with:
|
|
85
|
+
- The original command
|
|
86
|
+
- An AI-generated one-line explanation
|
|
87
|
+
- A timestamp
|
|
88
|
+
|
|
89
|
+
### Invoking Crumbs
|
|
90
|
+
|
|
91
|
+
Simply type the crumb name in the REPL or use it as a CLI argument:
|
|
92
|
+
|
|
93
|
+
**In REPL:**
|
|
94
|
+
```
|
|
95
|
+
>> ols
|
|
96
|
+
|
|
97
|
+
Crumb: ols
|
|
98
|
+
Explanation: Finds and lists all running Ollama processes.
|
|
99
|
+
Command: ps aux | grep -i ollama | grep -v grep
|
|
100
|
+
|
|
101
|
+
$ ps aux | grep -i ollama | grep -v grep
|
|
102
|
+
user123 12345 0.3 1.2 456789 98765 ? Sl 10:00 0:05 ollama serve
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
**From CLI:**
|
|
106
|
+
```bash
|
|
107
|
+
ducky ols # Runs the saved crumb and displays output
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
When you invoke a crumb:
|
|
111
|
+
1. It displays the crumb name, explanation, and command
|
|
112
|
+
2. Automatically executes the command
|
|
113
|
+
3. Shows the output
|
|
114
|
+
|
|
115
|
+
### Managing Crumbs
|
|
116
|
+
|
|
117
|
+
**List all crumbs:**
|
|
118
|
+
```bash
|
|
119
|
+
>> /crumbs
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
Output:
|
|
123
|
+
```
|
|
124
|
+
Saved Crumbs
|
|
125
|
+
=============
|
|
126
|
+
ols | Finds and lists all running Ollama processes. | ps aux | grep -i ollama | grep -v grep
|
|
127
|
+
test | Run tests and build project | pytest && python build.py
|
|
128
|
+
deploy | Deploy to production | docker push app:latest
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
**Manually add a crumb:**
|
|
132
|
+
```bash
|
|
133
|
+
>> /crumb add deploy-prod docker build -t app:latest && docker push app:latest
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
**Delete a crumb:**
|
|
137
|
+
```bash
|
|
138
|
+
>> /crumb ols
|
|
139
|
+
Deleted crumb 'ols'.
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
### Storage
|
|
143
|
+
|
|
144
|
+
Crumbs are stored in `~/.ducky/crumbs.json` as JSON. Each crumb includes:
|
|
145
|
+
- `prompt`: Original user prompt
|
|
146
|
+
- `response`: AI's full response
|
|
147
|
+
- `command`: The suggested bash command
|
|
148
|
+
- `explanation`: AI-generated one-line summary
|
|
149
|
+
- `created_at`: ISO timestamp
|
|
150
|
+
|
|
151
|
+
**Example:**
|
|
152
|
+
```json
|
|
153
|
+
{
|
|
154
|
+
"ols": {
|
|
155
|
+
"prompt": "How do I list all Ollama processes?",
|
|
156
|
+
"response": "To list all running Ollama processes...",
|
|
157
|
+
"command": "ps aux | grep -i ollama | grep -v grep",
|
|
158
|
+
"explanation": "Finds and lists all running Ollama processes.",
|
|
159
|
+
"created_at": "2024-01-05T10:30:00.000000+00:00"
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
Delete `~/.ducky/crumbs.json` to clear all saved crumbs.
|
|
165
|
+
|
|
166
|
+
## Development (uv)
|
|
167
|
+
|
|
168
|
+
```
|
|
169
|
+
uv sync
|
|
170
|
+
uv run ducky --help
|
|
171
|
+
```
|
|
172
|
+
|
|
173
|
+
`uv sync` creates a virtual environment and installs dependencies defined in `pyproject.toml` / `uv.lock`.
|
|
174
|
+
|
|
175
|
+
## Telemetry & Storage
|
|
176
|
+
|
|
177
|
+
Rubber Ducky stores:
|
|
178
|
+
- `~/.ducky/prompt_history`: readline-compatible history file.
|
|
179
|
+
- `~/.ducky/conversation.log`: JSON lines with timestamps for prompts, assistant messages, and shell executions.
|
|
180
|
+
- `~/.ducky/config`: User preferences including last selected model.
|
|
181
|
+
|
|
182
|
+
No other telemetry is collected; delete the directory if you want a fresh slate.
|
|
@@ -17,7 +17,7 @@ class ConfigManager:
|
|
|
17
17
|
def load_config(self) -> Dict[str, Any]:
|
|
18
18
|
"""Load configuration from file, returning defaults if not found."""
|
|
19
19
|
default_config = {
|
|
20
|
-
"last_model": "
|
|
20
|
+
"last_model": "glm-4.7:cloud",
|
|
21
21
|
"last_host": "https://ollama.com"
|
|
22
22
|
}
|
|
23
23
|
|
|
@@ -45,12 +45,12 @@ class ConfigManager:
|
|
|
45
45
|
|
|
46
46
|
def get_last_model(self) -> tuple[str, str]:
|
|
47
47
|
"""Get the last used model and host.
|
|
48
|
-
|
|
48
|
+
|
|
49
49
|
Returns:
|
|
50
50
|
Tuple of (model_name, host)
|
|
51
51
|
"""
|
|
52
52
|
config = self.load_config()
|
|
53
|
-
return config.get("last_model", "
|
|
53
|
+
return config.get("last_model", "glm-4.7:cloud"), config.get("last_host", "https://ollama.com")
|
|
54
54
|
|
|
55
55
|
def save_last_model(self, model_name: str, host: str) -> None:
|
|
56
56
|
"""Save the last used model and host."""
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from datetime import UTC, datetime
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Dict, Any, Optional
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class CrumbManager:
|
|
8
|
+
"""Manages crumb storage for command shortcuts."""
|
|
9
|
+
|
|
10
|
+
def __init__(self, config_dir: Optional[Path] = None):
|
|
11
|
+
if config_dir is None:
|
|
12
|
+
config_dir = Path.home() / ".ducky"
|
|
13
|
+
self.config_dir = config_dir
|
|
14
|
+
self.crumbs_file = self.config_dir / "crumbs.json"
|
|
15
|
+
self.config_dir.mkdir(parents=True, exist_ok=True)
|
|
16
|
+
|
|
17
|
+
def load_crumbs(self) -> Dict[str, Any]:
|
|
18
|
+
"""Load crumbs from JSON file, returning empty dict if not found."""
|
|
19
|
+
if not self.crumbs_file.exists():
|
|
20
|
+
return {}
|
|
21
|
+
|
|
22
|
+
try:
|
|
23
|
+
with open(self.crumbs_file, "r") as f:
|
|
24
|
+
return json.load(f)
|
|
25
|
+
except (json.JSONDecodeError, IOError):
|
|
26
|
+
return {}
|
|
27
|
+
|
|
28
|
+
def save_crumbs(self, crumbs: Dict[str, Any]) -> None:
|
|
29
|
+
"""Save crumbs to JSON file."""
|
|
30
|
+
try:
|
|
31
|
+
with open(self.crumbs_file, "w") as f:
|
|
32
|
+
json.dump(crumbs, f, indent=2)
|
|
33
|
+
except IOError as e:
|
|
34
|
+
print(f"Warning: Could not save crumbs: {e}")
|
|
35
|
+
|
|
36
|
+
def save_crumb(
|
|
37
|
+
self,
|
|
38
|
+
name: str,
|
|
39
|
+
prompt: str,
|
|
40
|
+
response: str,
|
|
41
|
+
command: str,
|
|
42
|
+
) -> None:
|
|
43
|
+
"""Add or update a crumb."""
|
|
44
|
+
crumbs = self.load_crumbs()
|
|
45
|
+
crumbs[name] = {
|
|
46
|
+
"prompt": prompt,
|
|
47
|
+
"response": response,
|
|
48
|
+
"command": command,
|
|
49
|
+
"explanation": "",
|
|
50
|
+
"created_at": datetime.now(UTC).isoformat(),
|
|
51
|
+
}
|
|
52
|
+
self.save_crumbs(crumbs)
|
|
53
|
+
|
|
54
|
+
def get_crumb(self, name: str) -> Optional[Dict[str, Any]]:
|
|
55
|
+
"""Retrieve a crumb by name."""
|
|
56
|
+
crumbs = self.load_crumbs()
|
|
57
|
+
return crumbs.get(name)
|
|
58
|
+
|
|
59
|
+
def list_crumbs(self) -> Dict[str, Any]:
|
|
60
|
+
"""Return all crumbs."""
|
|
61
|
+
return self.load_crumbs()
|
|
62
|
+
|
|
63
|
+
def delete_crumb(self, name: str) -> bool:
|
|
64
|
+
"""Remove a crumb. Returns True if deleted, False if not found."""
|
|
65
|
+
crumbs = self.load_crumbs()
|
|
66
|
+
if name in crumbs:
|
|
67
|
+
del crumbs[name]
|
|
68
|
+
self.save_crumbs(crumbs)
|
|
69
|
+
return True
|
|
70
|
+
return False
|
|
71
|
+
|
|
72
|
+
def update_explanation(self, name: str, explanation: str) -> bool:
|
|
73
|
+
"""Update explanation for a crumb. Returns True if updated, False if not found."""
|
|
74
|
+
crumbs = self.load_crumbs()
|
|
75
|
+
if name in crumbs:
|
|
76
|
+
crumbs[name]["explanation"] = explanation
|
|
77
|
+
self.save_crumbs(crumbs)
|
|
78
|
+
return True
|
|
79
|
+
return False
|
|
80
|
+
|
|
81
|
+
def has_crumb(self, name: str) -> bool:
|
|
82
|
+
"""Check if a crumb exists."""
|
|
83
|
+
crumbs = self.load_crumbs()
|
|
84
|
+
return name in crumbs
|