rubber-ducky 1.2.2__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ducky/__init__.py CHANGED
@@ -1 +1,3 @@
1
- from .ducky import ducky
1
+ from .ducky import ducky, main
2
+
3
+ __all__ = ["ducky", "main"]
ducky/config.py ADDED
@@ -0,0 +1,60 @@
1
+ import json
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Dict, Any, Optional
5
+
6
+
7
+ class ConfigManager:
8
+ """Manages Ducky configuration including model preferences."""
9
+
10
+ def __init__(self, config_dir: Optional[Path] = None):
11
+ if config_dir is None:
12
+ config_dir = Path.home() / ".ducky"
13
+ self.config_dir = config_dir
14
+ self.config_file = self.config_dir / "config"
15
+ self.config_dir.mkdir(parents=True, exist_ok=True)
16
+
17
+ def load_config(self) -> Dict[str, Any]:
18
+ """Load configuration from file, returning defaults if not found."""
19
+ default_config = {
20
+ "last_model": "qwen3-coder:480b-cloud",
21
+ "last_host": "https://ollama.com"
22
+ }
23
+
24
+ if not self.config_file.exists():
25
+ return default_config
26
+
27
+ try:
28
+ with open(self.config_file, 'r') as f:
29
+ config = json.load(f)
30
+ # Ensure all required keys are present
31
+ for key in default_config:
32
+ if key not in config:
33
+ config[key] = default_config[key]
34
+ return config
35
+ except (json.JSONDecodeError, IOError):
36
+ return default_config
37
+
38
+ def save_config(self, config: Dict[str, Any]) -> None:
39
+ """Save configuration to file."""
40
+ try:
41
+ with open(self.config_file, 'w') as f:
42
+ json.dump(config, f, indent=2)
43
+ except IOError as e:
44
+ print(f"Warning: Could not save config: {e}")
45
+
46
+ def get_last_model(self) -> tuple[str, str]:
47
+ """Get the last used model and host.
48
+
49
+ Returns:
50
+ Tuple of (model_name, host)
51
+ """
52
+ config = self.load_config()
53
+ return config.get("last_model", "qwen3-coder:480b-cloud"), config.get("last_host", "https://ollama.com")
54
+
55
+ def save_last_model(self, model_name: str, host: str) -> None:
56
+ """Save the last used model and host."""
57
+ config = self.load_config()
58
+ config["last_model"] = model_name
59
+ config["last_host"] = host
60
+ self.save_config(config)
ducky/ducky.py CHANGED
@@ -1,14 +1,13 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import argparse
4
- import os
5
4
  import asyncio
5
+ import json
6
+ import os
6
7
  import sys
7
8
  from dataclasses import dataclass
8
- from datetime import datetime
9
-
10
- # import json included earlier
11
- from typing import Dict
9
+ from datetime import UTC, datetime
10
+ from rich.console import Console
12
11
  from pathlib import Path
13
12
  from textwrap import dedent
14
13
  from typing import Any, Dict, List
@@ -23,19 +22,22 @@ class Crumb:
23
22
  description: str | None = None
24
23
 
25
24
 
26
- from ollama import AsyncClient
27
25
  from contextlib import nullcontext
28
26
 
27
+ from ollama import AsyncClient
28
+
29
+ from .config import ConfigManager
30
+
29
31
  try: # prompt_toolkit is optional at runtime
30
32
  from prompt_toolkit import PromptSession
33
+ from prompt_toolkit.application import Application
31
34
  from prompt_toolkit.history import FileHistory
32
35
  from prompt_toolkit.key_binding import KeyBindings
33
- from prompt_toolkit.patch_stdout import patch_stdout
34
- from prompt_toolkit.application import Application
35
36
  from prompt_toolkit.key_binding.bindings.focus import focus_next, focus_previous
36
37
  from prompt_toolkit.layout.containers import HSplit, Window
37
38
  from prompt_toolkit.layout.controls import FormattedTextControl
38
39
  from prompt_toolkit.layout.layout import Layout
40
+ from prompt_toolkit.patch_stdout import patch_stdout
39
41
  from prompt_toolkit.styles import Style
40
42
  from prompt_toolkit.widgets import Box, Button, Dialog, Label, TextArea
41
43
  except ImportError: # pragma: no cover - fallback mode
@@ -47,9 +49,6 @@ except ImportError: # pragma: no cover - fallback mode
47
49
  return nullcontext()
48
50
 
49
51
 
50
- from rich.console import Console
51
-
52
-
53
52
  @dataclass
54
53
  class AssistantResult:
55
54
  content: str
@@ -165,9 +164,7 @@ class ConversationLogger:
165
164
  )
166
165
 
167
166
  def _append(self, entry: Dict[str, Any]) -> None:
168
- import json
169
-
170
- entry["timestamp"] = datetime.utcnow().isoformat()
167
+ entry["timestamp"] = datetime.now(UTC).isoformat()
171
168
  with self.log_path.open("a", encoding="utf-8") as handle:
172
169
  handle.write(json.dumps(entry, ensure_ascii=False))
173
170
  handle.write("\n")
@@ -223,7 +220,11 @@ async def run_shell_and_print(
223
220
 
224
221
  class RubberDuck:
225
222
  def __init__(
226
- self, model: str, quick: bool = False, command_mode: bool = False
223
+ self,
224
+ model: str,
225
+ quick: bool = False,
226
+ command_mode: bool = False,
227
+ host: str = "",
227
228
  ) -> None:
228
229
  self.system_prompt = dedent(
229
230
  """
@@ -238,6 +239,16 @@ class RubberDuck:
238
239
  changes rather than responding to each line individually.
239
240
  """
240
241
  ).strip()
242
+
243
+ # Set OLLAMA_HOST based on whether it's a cloud model
244
+ if host:
245
+ os.environ["OLLAMA_HOST"] = host
246
+ elif "-cloud" in model:
247
+ os.environ["OLLAMA_HOST"] = "https://ollama.com"
248
+ elif "OLLAMA_HOST" not in os.environ:
249
+ # Default to localhost if not set and not a cloud model
250
+ os.environ["OLLAMA_HOST"] = "http://localhost:11434"
251
+
241
252
  self.client = AsyncClient()
242
253
  self.model = model
243
254
  self.quick = quick
@@ -261,17 +272,19 @@ class RubberDuck:
261
272
  """
262
273
  # Start with the base system prompt
263
274
  prompt_lines = [self.system_prompt]
264
-
275
+
265
276
  if self.crumbs:
266
- prompt_lines.append("\nCrumbs are simple scripts you can run with bash, uv, or bun.")
277
+ prompt_lines.append(
278
+ "\nCrumbs are simple scripts you can run with bash, uv, or bun."
279
+ )
267
280
  prompt_lines.append("Crumbs:")
268
281
  for c in self.crumbs.values():
269
282
  description = c.description or "no description"
270
283
  prompt_lines.append(f"- {c.name}: {description}")
271
-
284
+
272
285
  # Update the system prompt
273
286
  self.system_prompt = "\n".join(prompt_lines)
274
-
287
+
275
288
  # Update the first system message in the messages list
276
289
  if self.messages and self.messages[0]["role"] == "system":
277
290
  self.messages[0]["content"] = self.system_prompt
@@ -375,20 +388,54 @@ class RubberDuck:
375
388
 
376
389
  return command or None
377
390
 
378
- async def list_models(self) -> list[str]:
391
+ async def list_models(self, host: str = "") -> list[str]:
379
392
  """List available Ollama models."""
393
+ # Set the host temporarily for this operation
394
+ original_host = os.environ.get("OLLAMA_HOST", "")
395
+ if host:
396
+ os.environ["OLLAMA_HOST"] = host
397
+ self.client = AsyncClient(host)
380
398
  try:
381
399
  response = await self.client.list()
382
- return [model.model for model in response.models]
400
+ models = []
401
+ for m in response.models:
402
+ models.append(m.model)
403
+ return models
383
404
  except Exception as e:
384
405
  console.print(f"Error listing models: {e}", style="red")
385
406
  return []
386
-
387
- def switch_model(self, model_name: str) -> None:
407
+ finally:
408
+ # Restore original host
409
+ if original_host:
410
+ os.environ["OLLAMA_HOST"] = original_host
411
+ self.client = AsyncClient(original_host)
412
+ elif "OLLAMA_HOST" in os.environ:
413
+ del os.environ["OLLAMA_HOST"]
414
+ self.client = AsyncClient()
415
+
416
+ def switch_model(self, model_name: str, host: str = "") -> None:
388
417
  """Switch to a different Ollama model."""
389
418
  self.model = model_name
419
+
420
+ # Set the host based on the model or explicit host
421
+ if host:
422
+ os.environ["OLLAMA_HOST"] = host
423
+ self.client = AsyncClient(host)
424
+ elif "-cloud" in model_name:
425
+ os.environ["OLLAMA_HOST"] = "https://ollama.com"
426
+ self.client = AsyncClient("https://ollama.com")
427
+ else:
428
+ os.environ["OLLAMA_HOST"] = "http://localhost:11434"
429
+ self.client = AsyncClient()
430
+
390
431
  console.print(f"Switched to model: {model_name}", style="green")
391
432
 
433
+ def clear_history(self) -> None:
434
+ """Reset conversation history to the initial system prompt."""
435
+ if self.messages:
436
+ self.messages = [self.messages[0]]
437
+ console.print("Conversation history cleared.", style="green")
438
+
392
439
 
393
440
  class InlineInterface:
394
441
  def __init__(
@@ -439,6 +486,11 @@ class InlineInterface:
439
486
  def _(event) -> None:
440
487
  event.app.exit(result="__RUN_LAST__")
441
488
 
489
+ @kb.add("c-s")
490
+ def _(event) -> None:
491
+ # This will be handled in the processing loop
492
+ event.app.exit(result="__COPY_LAST__")
493
+
442
494
  return kb
443
495
 
444
496
  async def run(self) -> None:
@@ -451,7 +503,7 @@ class InlineInterface:
451
503
  return
452
504
 
453
505
  console.print(
454
- "Enter submits • empty Enter reruns the last suggested command (or explains the last shell output) • '!cmd' runs shell • Ctrl+D exits",
506
+ "Enter submits • empty Enter reruns the last suggested command (or explains the last shell output) • '!<cmd>' runs shell • Ctrl+D exits • Ctrl+S copies last command",
455
507
  style="dim",
456
508
  )
457
509
  while True:
@@ -462,7 +514,7 @@ class InlineInterface:
462
514
  console.print()
463
515
  console.print("Exiting.", style="dim")
464
516
  return
465
- except KeyboardInterrupt:
517
+ except (KeyboardInterrupt, asyncio.CancelledError):
466
518
  console.print()
467
519
  console.print("Interrupted. Press Ctrl+D to exit.", style="yellow")
468
520
  continue
@@ -471,8 +523,53 @@ class InlineInterface:
471
523
  await self._run_last_command()
472
524
  continue
473
525
 
526
+ if text == "__COPY_LAST__":
527
+ await self._copy_last_command()
528
+ continue
529
+
474
530
  await self._process_text(text)
475
531
 
532
+ async def _copy_last_command(self) -> None:
533
+ """Copy the last suggested command to clipboard."""
534
+ if not self.last_command:
535
+ console.print("No suggested command available to copy.", style="yellow")
536
+ return
537
+
538
+ try:
539
+ import subprocess
540
+ import platform
541
+
542
+ command_to_copy = self.last_command
543
+
544
+ system = platform.system()
545
+ if system == "Darwin": # macOS
546
+ process = subprocess.Popen(["pbcopy"], stdin=subprocess.PIPE, text=True)
547
+ process.communicate(input=command_to_copy)
548
+ elif system == "Windows":
549
+ process = subprocess.Popen(["clip"], stdin=subprocess.PIPE, text=True)
550
+ process.communicate(input=command_to_copy)
551
+ else: # Linux and others
552
+ # Try xclip first, then xsel as fallback
553
+ try:
554
+ process = subprocess.Popen(
555
+ ["xclip", "-selection", "clipboard"],
556
+ stdin=subprocess.PIPE,
557
+ text=True,
558
+ )
559
+ process.communicate(input=command_to_copy)
560
+ except FileNotFoundError:
561
+ # Try xsel as fallback
562
+ process = subprocess.Popen(
563
+ ["xsel", "-b", "-i"], stdin=subprocess.PIPE, text=True
564
+ )
565
+ process.communicate(input=command_to_copy)
566
+
567
+ console.print(f"Copied to clipboard: {command_to_copy}", style="green")
568
+ except Exception as e:
569
+ console.print(f"Failed to copy to clipboard: {e}", style="red")
570
+ console.print("You can manually copy the last command:", style="dim")
571
+ console.print(f" {self.last_command}", style="bold")
572
+
476
573
  async def _run_last_command(self) -> None:
477
574
  if not self.last_command:
478
575
  console.print("No suggested command available yet.", style="yellow")
@@ -483,6 +580,9 @@ class InlineInterface:
483
580
  logger=self.logger,
484
581
  history=self.assistant.messages,
485
582
  )
583
+ # Add the command to prompt history so user can recall it with up arrow
584
+ if self.session and self.session.history and self.last_command:
585
+ self.session.history.append_string(self.last_command)
486
586
  self.last_shell_output = True
487
587
  self.pending_command = None
488
588
  self.last_command = None
@@ -503,17 +603,37 @@ class InlineInterface:
503
603
  await self._run_last_command()
504
604
  return
505
605
 
606
+ if stripped.lower() in {"/clear", "/reset"}:
607
+ await self._clear_history()
608
+ return
609
+
506
610
  if stripped.lower() == "/model":
507
611
  await self._select_model()
508
612
  return
509
613
 
614
+ if stripped.lower() == "/local":
615
+ await self._select_model(host="http://localhost:11434")
616
+ return
617
+
618
+ if stripped.lower() == "/cloud":
619
+ await self._select_model(host="https://ollama.com")
620
+ return
621
+
622
+ if stripped.lower() == "/help":
623
+ await self._show_help()
624
+ return
625
+
510
626
  if stripped.startswith("!"):
627
+ command = stripped[1:].strip()
511
628
  await run_shell_and_print(
512
629
  self.assistant,
513
- stripped[1:].strip(),
630
+ command,
514
631
  logger=self.logger,
515
632
  history=self.assistant.messages,
516
633
  )
634
+ # Add the command to prompt history so user can recall it with up arrow
635
+ if self.session and self.session.history and command:
636
+ self.session.history.append_string(command)
517
637
  self.last_shell_output = True
518
638
  self.pending_command = None
519
639
  return
@@ -549,29 +669,120 @@ class InlineInterface:
549
669
  )
550
670
  self.last_shell_output = None
551
671
 
552
- async def _select_model(self) -> None:
672
+ async def _show_help(self) -> None:
673
+ """Display help information for all available commands."""
674
+ console.print("\nDucky CLI Help", style="bold blue")
675
+ console.print("===============", style="bold blue")
676
+ console.print()
677
+
678
+ commands = [
679
+ ("[bold]/help[/bold]", "Show this help message"),
680
+ ("[bold]/model[/bold]", "Select a model interactively (local or cloud)"),
681
+ (
682
+ "[bold]/local[/bold]",
683
+ "List and select from local models (localhost:11434)",
684
+ ),
685
+ ("[bold]/cloud[/bold]", "List and select from cloud models (ollama.com)"),
686
+ (
687
+ "[bold]/clear[/bold] or [bold]/reset[/bold]",
688
+ "Clear conversation history",
689
+ ),
690
+ (
691
+ "[bold]/run[/bold]",
692
+ "Re-run the last suggested command",
693
+ ),
694
+ (
695
+ "[bold]Empty Enter[/bold]",
696
+ "Re-run suggested command or explain last output",
697
+ ),
698
+ ("[bold]![<command>][/bold]", "Execute a shell command directly"),
699
+ ("[bold]Ctrl+D[/bold]", "Exit the application"),
700
+ ("[bold]Ctrl+R[/bold]", "Re-run the last suggested command"),
701
+ ("[bold]Ctrl+S[/bold]", "Copy the last suggested command to clipboard"),
702
+ ]
703
+
704
+ for command, description in commands:
705
+ console.print(f"{command:<30} {description}")
706
+
707
+ console.print()
708
+
709
+ async def _clear_history(self) -> None:
710
+ self.assistant.clear_history()
711
+ self.last_command = None
712
+ self.pending_command = None
713
+ self.last_shell_output = None
714
+
715
+ async def _select_model(self, host: str = "") -> None:
553
716
  """Show available models and allow user to select one with arrow keys."""
554
717
  if PromptSession is None or KeyBindings is None:
555
- console.print("Model selection requires prompt_toolkit to be installed.", style="yellow")
718
+ console.print(
719
+ "Model selection requires prompt_toolkit to be installed.",
720
+ style="yellow",
721
+ )
556
722
  return
557
723
 
558
- models = await self.assistant.list_models()
724
+ # Show current model
725
+ console.print(f"Current model: {self.assistant.model}", style="bold green")
726
+
727
+ # If no host specified, give user a choice between local and cloud
728
+ if not host:
729
+ console.print("\nSelect model type:", style="bold")
730
+ console.print("1. Local models (localhost:11434)")
731
+ console.print("2. Cloud models (ollama.com)")
732
+ console.print("Press Esc to cancel", style="dim")
733
+
734
+ try:
735
+ choice = await asyncio.to_thread(input, "Enter choice (1 or 2): ")
736
+ choice = choice.strip()
737
+
738
+ if choice.lower() == "esc":
739
+ console.print("Model selection cancelled.", style="yellow")
740
+ return
741
+
742
+ if choice == "1":
743
+ host = "http://localhost:11434"
744
+ elif choice == "2":
745
+ host = "https://ollama.com"
746
+ else:
747
+ console.print("Invalid choice. Please select 1 or 2.", style="red")
748
+ return
749
+ except (ValueError, EOFError):
750
+ console.print("Invalid input.", style="red")
751
+ return
752
+
753
+ models = await self.assistant.list_models(host)
559
754
  if not models:
560
- console.print("No models available.", style="yellow")
755
+ if host == "http://localhost:11434":
756
+ console.print(
757
+ "No local models available. Is Ollama running?", style="red"
758
+ )
759
+ console.print("Start Ollama with: ollama serve", style="yellow")
760
+ else:
761
+ console.print("No models available.", style="yellow")
561
762
  return
562
763
 
563
- # Simple approach: show models as a list and let user type the number
564
- console.print("Available models:", style="bold")
764
+ if host == "https://ollama.com":
765
+ console.print("\nAvailable cloud models:", style="bold")
766
+ elif host == "http://localhost:11434":
767
+ console.print("\nAvailable local models:", style="bold")
768
+ else:
769
+ console.print("\nAvailable models:", style="bold")
770
+
565
771
  for i, model in enumerate(models, 1):
566
772
  if model == self.assistant.model:
567
773
  console.print(f"{i}. {model} (current)", style="green")
568
774
  else:
569
775
  console.print(f"{i}. {model}")
570
776
 
777
+ console.print("Press Esc to cancel", style="dim")
571
778
  try:
572
779
  choice = await asyncio.to_thread(input, "Enter model number or name: ")
573
780
  choice = choice.strip()
574
-
781
+
782
+ if choice.lower() == "esc":
783
+ console.print("Model selection cancelled.", style="yellow")
784
+ return
785
+
575
786
  # Check if it's a number
576
787
  if choice.isdigit():
577
788
  index = int(choice) - 1
@@ -587,8 +798,15 @@ class InlineInterface:
587
798
  else:
588
799
  console.print("Invalid model name.", style="red")
589
800
  return
590
-
591
- self.assistant.switch_model(selected_model)
801
+
802
+ self.assistant.switch_model(selected_model, host)
803
+
804
+ # Save the selected model and host to config
805
+ config_manager = ConfigManager()
806
+ config_manager.save_last_model(
807
+ selected_model,
808
+ host or os.environ.get("OLLAMA_HOST", "http://localhost:11434"),
809
+ )
592
810
  except (ValueError, EOFError):
593
811
  console.print("Invalid input.", style="red")
594
812
 
@@ -600,7 +818,7 @@ class InlineInterface:
600
818
  console.print()
601
819
  console.print("Exiting.", style="dim")
602
820
  return
603
- except KeyboardInterrupt:
821
+ except (KeyboardInterrupt, asyncio.CancelledError):
604
822
  console.print()
605
823
  console.print("Interrupted. Press Ctrl+D to exit.", style="yellow")
606
824
  continue
@@ -668,9 +886,7 @@ async def ducky() -> None:
668
886
  parser.add_argument(
669
887
  "--directory", "-d", help="The directory to be processed", default=None
670
888
  )
671
- parser.add_argument(
672
- "--model", "-m", help="The model to be used", default="qwen3-coder:480b-cloud"
673
- )
889
+ parser.add_argument("--model", "-m", help="The model to be used", default=None)
674
890
  parser.add_argument(
675
891
  "--local",
676
892
  "-l",
@@ -681,11 +897,27 @@ async def ducky() -> None:
681
897
 
682
898
  ensure_history_dir()
683
899
  logger = ConversationLogger(CONVERSATION_LOG_FILE)
900
+
901
+ # Load the last used model from config if no model is specified
902
+ config_manager = ConfigManager()
903
+ last_model, last_host = config_manager.get_last_model()
904
+
905
+ # If --local flag is used, override with local settings
684
906
  if getattr(args, "local", False):
685
907
  # Point Ollama client to local host and use gemma3 as default model
686
908
  os.environ["OLLAMA_HOST"] = "http://localhost:11434"
687
- args.model = "gpt-oss:20b"
688
- rubber_ducky = RubberDuck(model=args.model, quick=False, command_mode=True)
909
+ args.model = args.model or "gemma2:9b"
910
+ last_host = "http://localhost:11434"
911
+ # If no model is specified, use the last used model
912
+ elif args.model is None:
913
+ args.model = last_model
914
+ # Set the host based on the last used host
915
+ if last_host:
916
+ os.environ["OLLAMA_HOST"] = last_host
917
+
918
+ rubber_ducky = RubberDuck(
919
+ model=args.model, quick=False, command_mode=True, host=last_host
920
+ )
689
921
 
690
922
  code = read_files_from_dir(args.directory) if args.directory else None
691
923
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rubber-ducky
3
- Version: 1.2.2
3
+ Version: 1.3.0
4
4
  Summary: Quick CLI do-it-all tool. Use natural language to spit out bash commands
5
5
  Requires-Python: >=3.10
6
6
  Description-Content-Type: text/markdown
@@ -35,7 +35,7 @@ Requirements:
35
35
  ```
36
36
  ducky # interactive inline session
37
37
  ducky --directory src # preload code from a directory
38
- ducky --model llama3 # use a different Ollama model
38
+ ducky --model qwen3 # use a different Ollama model
39
39
  ```
40
40
 
41
41
  Both `ducky` and `rubber-ducky` executables map to the same CLI, so `uvx rubber-ducky -- <args>` works as well.
@@ -54,6 +54,32 @@ Launching `ducky` with no arguments opens the inline interface:
54
54
 
55
55
  `ducky --directory <path>` streams the contents of the provided directory to the assistant the next time you submit a prompt (the directory is read once at startup).
56
56
 
57
+ ## Crumbs
58
+
59
+ Crumbs are simple scripts that can be executed within Rubber Ducky. They are stored in `~/.ducky/crumbs/` and can be referenced by name in your prompts.
60
+
61
+ To use a crumb, simply mention it in your prompt:
62
+ ```
63
+ Can you use the uv-server crumb to run the HuggingFace prompt renderer?
64
+ ```
65
+
66
+ ### Creating Crumbs
67
+
68
+ To create a new crumb:
69
+
70
+ 1. Create a new directory in `~/.ducky/crumbs/` with your crumb name
71
+ 2. Add an `info.txt` file with metadata:
72
+ ```
73
+ name: your-crumb-name
74
+ type: shell
75
+ description: Brief description of what this crumb does
76
+ ```
77
+ 3. Add your executable script file (e.g., `your-crumb-name.sh`)
78
+ 4. Create a symbolic link in `~/.local/bin` to make it available as a command:
79
+ ```bash
80
+ ln -s ~/.ducky/crumbs/your-crumb-name/your-crumb-name.sh ~/.local/bin/your-crumb-name
81
+ ```
82
+
57
83
  ## Development (uv)
58
84
 
59
85
  ```
@@ -0,0 +1,9 @@
1
+ ducky/__init__.py,sha256=2vLhJxOuJ3lnIeg5rmF6xUvybUT5Qhjej6AS0BeBASY,60
2
+ ducky/config.py,sha256=AH7KMTxPYrtSSmIJ_3qv0WB1tYpAjOWPrvPb7bKQ_cA,2142
3
+ ducky/ducky.py,sha256=1B500ievEsIg9mowpzqWuDNGck_DnUM0LdDRDHsm4yo,33716
4
+ rubber_ducky-1.3.0.dist-info/licenses/LICENSE,sha256=gQ1rCmw18NqTk5GxG96F6vgyN70e1c4kcKUtWDwdNaE,1069
5
+ rubber_ducky-1.3.0.dist-info/METADATA,sha256=BVIv6uBWho_khv8AbkQdPMP9PBB6xbvg-U6muGsfdCs,3874
6
+ rubber_ducky-1.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
7
+ rubber_ducky-1.3.0.dist-info/entry_points.txt,sha256=WPnVUUNvWdMDcBlCo8JCzkLghGllMX5QVZyQghyq85Q,75
8
+ rubber_ducky-1.3.0.dist-info/top_level.txt,sha256=4Q75MONDNPpQ3o17bTu7RFuKwFhTIRzlXP3_LDWQQ30,6
9
+ rubber_ducky-1.3.0.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- ducky/__init__.py,sha256=9l8SmwX0t1BmITkcrzW9fVMPvD2LfgKLZlSXWzPJFSE,25
2
- ducky/ducky.py,sha256=bBf_BU0GIu_6bqbDO7jTkJzrDrL-4YIA-jFfhTUMvVg,24493
3
- rubber_ducky-1.2.2.dist-info/licenses/LICENSE,sha256=gQ1rCmw18NqTk5GxG96F6vgyN70e1c4kcKUtWDwdNaE,1069
4
- rubber_ducky-1.2.2.dist-info/METADATA,sha256=AM3v2H1FFazbpSqZSHoj8L2qKZ9ZgdT5Xzu__lQPlHc,3063
5
- rubber_ducky-1.2.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
6
- rubber_ducky-1.2.2.dist-info/entry_points.txt,sha256=WPnVUUNvWdMDcBlCo8JCzkLghGllMX5QVZyQghyq85Q,75
7
- rubber_ducky-1.2.2.dist-info/top_level.txt,sha256=4Q75MONDNPpQ3o17bTu7RFuKwFhTIRzlXP3_LDWQQ30,6
8
- rubber_ducky-1.2.2.dist-info/RECORD,,