rubber-ducky 1.6.1__tar.gz → 1.6.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rubber-ducky
3
- Version: 1.6.1
3
+ Version: 1.6.4
4
4
  Summary: Quick CLI do-it-all tool. Use natural language to spit out bash commands
5
5
  Requires-Python: >=3.10
6
6
  Description-Content-Type: text/markdown
@@ -5,7 +5,6 @@ import asyncio
5
5
  import json
6
6
  import os
7
7
  import re
8
- import shlex
9
8
  import subprocess
10
9
  import sys
11
10
  from dataclasses import dataclass
@@ -15,6 +14,7 @@ from pathlib import Path
15
14
  from textwrap import dedent
16
15
  from typing import Any, Dict, List
17
16
 
17
+ __version__ = "1.6.4"
18
18
 
19
19
  from .config import ConfigManager
20
20
  from .crumb import CrumbManager
@@ -35,6 +35,7 @@ try: # prompt_toolkit is optional at runtime
35
35
  from prompt_toolkit.patch_stdout import patch_stdout
36
36
  from prompt_toolkit.styles import Style
37
37
  from prompt_toolkit.widgets import Box, Button, Dialog, Label, TextArea
38
+ from prompt_toolkit.formatted_text import PygmentsTokens
38
39
  except ImportError: # pragma: no cover - fallback mode
39
40
  PromptSession = None # type: ignore[assignment]
40
41
  FileHistory = None # type: ignore[assignment]
@@ -42,6 +43,9 @@ except ImportError: # pragma: no cover - fallback mode
42
43
 
43
44
  def patch_stdout() -> nullcontext:
44
45
  return nullcontext()
46
+ else:
47
+ def patch_stdout() -> nullcontext:
48
+ return nullcontext()
45
49
 
46
50
 
47
51
  @dataclass
@@ -122,22 +126,22 @@ def print_shell_result(result: ShellResult, truncate: bool = True) -> None:
122
126
  # Show first 8 lines of stdout
123
127
  show_lines = stdout_lines[:8]
124
128
  console.print('\n'.join(show_lines), highlight=False)
125
- console.print(f"... ({len(stdout_lines) - 8} more lines, use /expand to see full output)", style="dim cyan")
129
+ console.print(f"... ({len(stdout_lines) - 8} more lines, use /expand to see full output)", style="dim")
126
130
  else:
127
131
  console.print(result.stdout.rstrip(), highlight=False)
128
132
 
129
133
  if result.stderr.strip():
130
134
  if result.stdout.strip():
131
135
  console.print()
132
- console.print("[stderr]", style="bold red")
136
+ console.print("[stderr]", style="bold yellow")
133
137
  if should_truncate:
134
138
  # Show first 5 lines of stderr
135
139
  show_lines = stderr_lines[:5]
136
- console.print('\n'.join(show_lines), style="red", highlight=False)
140
+ console.print('\n'.join(show_lines), style="yellow", highlight=False)
137
141
  if len(stderr_lines) > 5:
138
- console.print(f"... ({len(stderr_lines) - 5} more lines)", style="dim red")
142
+ console.print(f"... ({len(stderr_lines) - 5} more lines)", style="dim")
139
143
  else:
140
- console.print(result.stderr.rstrip(), style="red", highlight=False)
144
+ console.print(result.stderr.rstrip(), style="yellow", highlight=False)
141
145
 
142
146
  if result.returncode != 0 or (not result.stdout.strip() and not result.stderr.strip()):
143
147
  suffix = (
@@ -158,7 +162,7 @@ async def run_shell_and_print(
158
162
  if not command:
159
163
  console.print("No command provided.", style="yellow")
160
164
  return ShellResult(command="", stdout="", stderr="", returncode=-1)
161
- console.print(f"$ {command}", style="bold magenta")
165
+ console.print(f"$ {command}", style="bold white")
162
166
  result = await assistant.run_shell_command(command)
163
167
  print_shell_result(result)
164
168
  if logger:
@@ -329,7 +333,7 @@ class RubberDuck:
329
333
  models.append(m.model)
330
334
  return models
331
335
  except Exception as e:
332
- console.print(f"Error listing models: {e}", style="red")
336
+ console.print(f"Error listing models: {e}", style="yellow")
333
337
  return []
334
338
  finally:
335
339
  # Restore original host
@@ -358,13 +362,27 @@ class RubberDuck:
358
362
  os.environ["OLLAMA_HOST"] = "http://localhost:11434"
359
363
  self.client = AsyncClient()
360
364
 
361
- console.print(f"Switched to model: {model_name}", style="green")
365
+ console.print(f"Switched to model: {model_name}", style="yellow")
362
366
 
363
367
  def clear_history(self) -> None:
364
368
  """Reset conversation history to the initial system prompt."""
365
369
  if self.messages:
366
370
  self.messages = [self.messages[0]]
367
- console.print("Conversation history cleared.", style="green")
371
+ console.print("Conversation history cleared.", style="yellow")
372
+
373
+ async def check_connection(self) -> tuple[bool, str]:
374
+ """Check if Ollama host is reachable. Returns (is_connected, message)."""
375
+ try:
376
+ models = await self.list_models()
377
+ return True, f"Connected ({len(models)} models available)"
378
+ except Exception as e:
379
+ error_msg = str(e).lower()
380
+ if "refused" in error_msg:
381
+ return False, "Connection refused - is Ollama running?"
382
+ elif "timeout" in error_msg:
383
+ return False, "Connection timeout - check network/host"
384
+ else:
385
+ return False, f"Error: {e}"
368
386
 
369
387
 
370
388
  class InlineInterface:
@@ -373,6 +391,7 @@ class InlineInterface:
373
391
  assistant: RubberDuck,
374
392
  logger: ConversationLogger | None = None,
375
393
  code: str | None = None,
394
+ quiet_mode: bool = False,
376
395
  ) -> None:
377
396
  ensure_history_dir()
378
397
  self.assistant = assistant
@@ -386,6 +405,7 @@ class InlineInterface:
386
405
  self.session: PromptSession | None = None
387
406
  self.selected_model: str | None = None
388
407
  self.crumb_manager = CrumbManager()
408
+ self.quiet_mode = quiet_mode
389
409
 
390
410
  if (
391
411
  PromptSession is not None
@@ -425,6 +445,36 @@ class InlineInterface:
425
445
 
426
446
  return kb
427
447
 
448
+ def _print_banner(self) -> None:
449
+ """Print the startup banner with version, model info, and crumb count."""
450
+ from .ducky import __version__
451
+
452
+ # Determine model display with host indicator
453
+ model = self.assistant.model
454
+ host = os.environ.get("OLLAMA_HOST", "")
455
+ if host == "https://ollama.com":
456
+ model_display = f"{model}:cloud"
457
+ elif "localhost" in host:
458
+ model_display = f"{model}:local"
459
+ else:
460
+ model_display = model
461
+
462
+ # Get crumb count
463
+ crumbs = self.crumb_manager.list_crumbs()
464
+ crumb_count = len(crumbs)
465
+
466
+ # Print banner with yellow/white color scheme
467
+ console.print(f"Ducky v{__version__}", style="yellow")
468
+ console.print(
469
+ f"Model: [bold white]{model_display}[/bold white] | Crumbs: {crumb_count}"
470
+ )
471
+ console.print()
472
+ console.print(
473
+ "Enter submits • !cmd=shell • Ctrl+D=exit • /help=commands",
474
+ style="dim",
475
+ )
476
+ console.print()
477
+
428
478
  async def run(self) -> None:
429
479
  if self.session is None:
430
480
  console.print(
@@ -434,10 +484,10 @@ class InlineInterface:
434
484
  await self._run_basic_loop()
435
485
  return
436
486
 
437
- console.print(
438
- "Enter submits • empty Enter reruns the last suggested command (or explains the last shell output) • '!<cmd>' runs shell • Ctrl+D exits • Ctrl+S copies last command",
439
- style="dim",
440
- )
487
+ # Print banner if not in quiet mode
488
+ if not self.quiet_mode:
489
+ self._print_banner()
490
+
441
491
  while True:
442
492
  try:
443
493
  with patch_stdout():
@@ -496,9 +546,9 @@ class InlineInterface:
496
546
  )
497
547
  process.communicate(input=command_to_copy)
498
548
 
499
- console.print(f"Copied to clipboard: {command_to_copy}", style="green")
549
+ console.print(f"Copied to clipboard: {command_to_copy}", style="yellow")
500
550
  except Exception as e:
501
- console.print(f"Failed to copy to clipboard: {e}", style="red")
551
+ console.print(f"Failed to copy to clipboard: {e}", style="yellow")
502
552
  console.print("You can manually copy the last command:", style="dim")
503
553
  console.print(f" {self.last_command}", style="bold")
504
554
 
@@ -532,7 +582,7 @@ class InlineInterface:
532
582
  return
533
583
 
534
584
  console.print()
535
- console.print(f"[Full output for: {self.last_shell_result.command}]", style="bold cyan")
585
+ console.print(f"[Full output for: {self.last_shell_result.command}]", style="bold white")
536
586
  console.print()
537
587
  print_shell_result(self.last_shell_result, truncate=False)
538
588
  console.print()
@@ -553,8 +603,8 @@ class InlineInterface:
553
603
  first_word = stripped.split()[0].lower()
554
604
  if self.crumb_manager.has_crumb(first_word):
555
605
  # Extract additional arguments after the crumb name
556
- parts = stripped.split()
557
- args = parts[1:]
606
+ parts = stripped.split(maxsplit=1)
607
+ args = parts[1:] if len(parts) > 1 else []
558
608
  await self._use_crumb(first_word, args)
559
609
  return
560
610
 
@@ -635,8 +685,8 @@ class InlineInterface:
635
685
 
636
686
  async def _show_help(self) -> None:
637
687
  """Display help information for all available commands."""
638
- console.print("\nDucky CLI Help", style="bold blue")
639
- console.print("===============", style="bold blue")
688
+ console.print("\nDucky CLI Help", style="bold white")
689
+ console.print("===============", style="bold white")
640
690
  console.print()
641
691
 
642
692
  commands = [
@@ -691,8 +741,8 @@ class InlineInterface:
691
741
  console.print("No crumbs saved yet. Use '/crumb <name>' to save a command.", style="yellow")
692
742
  return
693
743
 
694
- console.print("\nSaved Crumbs", style="bold blue")
695
- console.print("=============", style="bold blue")
744
+ console.print("\nSaved Crumbs", style="bold white")
745
+ console.print("=============", style="bold white")
696
746
  console.print()
697
747
 
698
748
  # Calculate max name length for alignment
@@ -705,7 +755,7 @@ class InlineInterface:
705
755
 
706
756
  # Format: name | explanation | command
707
757
  console.print(
708
- f"[bold]{name:<{max_name_len}}[/bold] | [cyan]{explanation}[/cyan] | [dim]{command}[/dim]"
758
+ f"[bold yellow]{name:<{max_name_len}}[/bold yellow] | [white]{explanation}[/white] | [dim]{command}[/dim]"
709
759
  )
710
760
 
711
761
  console.print(f"\n[dim]Total: {len(crumbs)} crumbs[/dim]")
@@ -822,7 +872,7 @@ class InlineInterface:
822
872
  command=self.assistant.last_result.command,
823
873
  )
824
874
 
825
- console.print(f"Saved crumb '{name}'!", style="green")
875
+ console.print(f"Saved crumb '{name}'!", style="yellow")
826
876
  console.print("Generating explanation...", style="dim")
827
877
 
828
878
  # Spawn subprocess to generate explanation asynchronously
@@ -851,7 +901,7 @@ class InlineInterface:
851
901
  clean_explanation = re.sub(r'\x1b\[([0-9;]*[mGK])', '', explanation)
852
902
 
853
903
  text = Text()
854
- text.append("Explanation added: ", style="cyan")
904
+ text.append("Explanation added: ", style="white")
855
905
  text.append(clean_explanation)
856
906
  console.print(text)
857
907
  except Exception as e:
@@ -866,7 +916,7 @@ class InlineInterface:
866
916
  command=command,
867
917
  )
868
918
 
869
- console.print(f"Added crumb '{name}'!", style="green")
919
+ console.print(f"Added crumb '{name}'!", style="yellow")
870
920
  console.print("Generating explanation...", style="dim")
871
921
 
872
922
  # Spawn subprocess to generate explanation asynchronously
@@ -875,7 +925,7 @@ class InlineInterface:
875
925
  async def _delete_crumb(self, name: str) -> None:
876
926
  """Delete a crumb."""
877
927
  if self.crumb_manager.delete_crumb(name):
878
- console.print(f"Deleted crumb '{name}'.", style="green")
928
+ console.print(f"Deleted crumb '{name}'.", style="yellow")
879
929
  else:
880
930
  console.print(f"Crumb '{name}' not found.", style="yellow")
881
931
 
@@ -898,9 +948,13 @@ class InlineInterface:
898
948
  if args and command != "No command":
899
949
  command = substitute_placeholders(command, args)
900
950
 
901
- console.print(f"\n[bold cyan]Crumb: {name}[/bold cyan]")
902
- console.print(f"Explanation: {explanation}", style="green")
903
- console.print("Command: ", style="cyan", end="")
951
+ from rich.text import Text
952
+ crumb_text = Text()
953
+ crumb_text.append("Crumb: ", style="bold yellow")
954
+ crumb_text.append(name, style="bold yellow")
955
+ console.print(f"\n{crumb_text}")
956
+ console.print(f"Explanation: {explanation}", style="yellow")
957
+ console.print("Command: ", style="white", end="")
904
958
  console.print(command, highlight=False)
905
959
 
906
960
  if command and command != "No command":
@@ -917,7 +971,7 @@ class InlineInterface:
917
971
  return
918
972
 
919
973
  # Show current model
920
- console.print(f"Current model: {self.assistant.model}", style="bold green")
974
+ console.print(f"Current model: {self.assistant.model}", style="bold yellow")
921
975
 
922
976
  # If no host specified, give user a choice between local and cloud
923
977
  if not host:
@@ -939,17 +993,17 @@ class InlineInterface:
939
993
  elif choice == "2":
940
994
  host = "https://ollama.com"
941
995
  else:
942
- console.print("Invalid choice. Please select 1 or 2.", style="red")
996
+ console.print("Invalid choice. Please select 1 or 2.", style="yellow")
943
997
  return
944
998
  except (ValueError, EOFError):
945
- console.print("Invalid input.", style="red")
999
+ console.print("Invalid input.", style="yellow")
946
1000
  return
947
1001
 
948
1002
  models = await self.assistant.list_models(host)
949
1003
  if not models:
950
1004
  if host == "http://localhost:11434":
951
1005
  console.print(
952
- "No local models available. Is Ollama running?", style="red"
1006
+ "No local models available. Is Ollama running?", style="yellow"
953
1007
  )
954
1008
  console.print("Start Ollama with: ollama serve", style="yellow")
955
1009
  else:
@@ -965,7 +1019,7 @@ class InlineInterface:
965
1019
 
966
1020
  for i, model in enumerate(models, 1):
967
1021
  if model == self.assistant.model:
968
- console.print(f"{i}. {model} (current)", style="green")
1022
+ console.print(f"{i}. [bold yellow]{model}[/bold yellow] (current)", style="yellow")
969
1023
  else:
970
1024
  console.print(f"{i}. {model}")
971
1025
 
@@ -984,14 +1038,14 @@ class InlineInterface:
984
1038
  if 0 <= index < len(models):
985
1039
  selected_model = models[index]
986
1040
  else:
987
- console.print("Invalid model number.", style="red")
1041
+ console.print("Invalid model number.", style="yellow")
988
1042
  return
989
1043
  else:
990
1044
  # Check if it's a model name
991
1045
  if choice in models:
992
1046
  selected_model = choice
993
1047
  else:
994
- console.print("Invalid model name.", style="red")
1048
+ console.print("Invalid model name.", style="yellow")
995
1049
  return
996
1050
 
997
1051
  self.assistant.switch_model(selected_model, host)
@@ -1003,7 +1057,7 @@ class InlineInterface:
1003
1057
  host or os.environ.get("OLLAMA_HOST", "http://localhost:11434"),
1004
1058
  )
1005
1059
  except (ValueError, EOFError):
1006
- console.print("Invalid input.", style="red")
1060
+ console.print("Invalid input.", style="yellow")
1007
1061
 
1008
1062
  async def _run_basic_loop(self) -> None: # pragma: no cover - fallback path
1009
1063
  while True:
@@ -1061,10 +1115,10 @@ async def run_single_prompt(
1061
1115
 
1062
1116
  console.print("\nOptions:", style="bold")
1063
1117
  console.print(" 1. Use --local flag to access local models:", style="dim")
1064
- console.print(" ducky --local", style="cyan")
1118
+ console.print(" ducky --local", style="white")
1065
1119
  console.print(" 2. Select a local model with /local command", style="dim")
1066
1120
  console.print(" 3. Set up Ollama cloud API credentials:", style="dim")
1067
- console.print(" export OLLAMA_API_KEY='your-api-key-here'", style="cyan")
1121
+ console.print(" export OLLAMA_API_KEY='your-api-key-here'", style="white")
1068
1122
  console.print("\nGet your API key from: https://ollama.com/account/api-keys", style="dim")
1069
1123
  console.print()
1070
1124
  raise
@@ -1072,12 +1126,12 @@ async def run_single_prompt(
1072
1126
  raise
1073
1127
 
1074
1128
  content = result.content or "(No content returned.)"
1075
- console.print(content, style="green", highlight=False)
1129
+ console.print(content, style="dim", highlight=False)
1076
1130
  if logger:
1077
1131
  logger.log_assistant(content, result.command)
1078
1132
  if result.command and not suppress_suggestion:
1079
- console.print("\nSuggested command:", style="cyan", highlight=False)
1080
- console.print(result.command, style="bold cyan", highlight=False)
1133
+ console.print("\nSuggested command:", style="yellow", highlight=False)
1134
+ console.print(result.command, style="bold yellow", highlight=False)
1081
1135
  return result
1082
1136
 
1083
1137
 
@@ -1107,13 +1161,17 @@ async def interactive_session(
1107
1161
  rubber_ducky: RubberDuck,
1108
1162
  logger: ConversationLogger | None = None,
1109
1163
  code: str | None = None,
1164
+ quiet_mode: bool = False,
1110
1165
  ) -> None:
1111
- ui = InlineInterface(rubber_ducky, logger=logger, code=code)
1166
+ ui = InlineInterface(rubber_ducky, logger=logger, code=code, quiet_mode=quiet_mode)
1112
1167
  await ui.run()
1113
1168
 
1114
1169
 
1115
1170
  async def ducky() -> None:
1116
1171
  parser = argparse.ArgumentParser()
1172
+ parser.add_argument(
1173
+ "--version", "-v", action="version", version=f"%(prog)s {__version__}"
1174
+ )
1117
1175
  parser.add_argument(
1118
1176
  "--directory", "-d", help="The directory to be processed", default=None
1119
1177
  )
@@ -1130,6 +1188,12 @@ async def ducky() -> None:
1130
1188
  action="store_true",
1131
1189
  help=" Automatically run the suggested command without confirmation",
1132
1190
  )
1191
+ parser.add_argument(
1192
+ "--quiet",
1193
+ "-q",
1194
+ action="store_true",
1195
+ help="Suppress startup messages and help text",
1196
+ )
1133
1197
  parser.add_argument(
1134
1198
  "single_prompt",
1135
1199
  nargs="*",
@@ -1189,8 +1253,31 @@ async def ducky() -> None:
1189
1253
  console.print("No input received from stdin.", style="yellow")
1190
1254
  return
1191
1255
 
1192
- # Handle crumb invocation mode
1256
+ # Handle crumb list command
1193
1257
  crumb_manager = CrumbManager()
1258
+ if args.single_prompt and args.single_prompt[0] == "crumbs":
1259
+ crumbs = crumb_manager.list_crumbs()
1260
+
1261
+ if not crumbs:
1262
+ console.print("No crumbs saved yet.", style="yellow")
1263
+ else:
1264
+ console.print("Saved Crumbs", style="bold white")
1265
+ console.print("=============", style="bold white")
1266
+ console.print()
1267
+
1268
+ max_name_len = max(len(name) for name in crumbs.keys())
1269
+
1270
+ for name, data in sorted(crumbs.items()):
1271
+ explanation = data.get("explanation", "") or "No explanation yet"
1272
+ command = data.get("command", "") or "No command"
1273
+
1274
+ console.print(
1275
+ f"[bold yellow]{name:<{max_name_len}}[/bold yellow] | [white]{explanation}[/white] | [dim]{command}[/dim]"
1276
+ )
1277
+ console.print(f"\n[dim]Total: {len(crumbs)} crumbs[/dim]")
1278
+ return
1279
+
1280
+ # Handle crumb invocation mode
1194
1281
  if args.single_prompt:
1195
1282
  first_arg = args.single_prompt[0]
1196
1283
  if crumb_manager.has_crumb(first_arg):
@@ -1206,9 +1293,13 @@ async def ducky() -> None:
1206
1293
  if crumb_args and command != "No command":
1207
1294
  command = substitute_placeholders(command, crumb_args)
1208
1295
 
1209
- console.print(f"\n[bold cyan]Crumb: {first_arg}[/bold cyan]")
1210
- console.print(f"Explanation: {explanation}", style="green")
1211
- console.print("Command: ", style="cyan", end="")
1296
+ from rich.text import Text
1297
+ crumb_text = Text()
1298
+ crumb_text.append("Crumb: ", style="bold yellow")
1299
+ crumb_text.append(first_arg, style="bold yellow")
1300
+ console.print(f"\n{crumb_text}")
1301
+ console.print(f"Explanation: {explanation}", style="yellow")
1302
+ console.print("Command: ", style="white", end="")
1212
1303
  console.print(command, highlight=False)
1213
1304
 
1214
1305
  if command and command != "No command":
@@ -1239,7 +1330,19 @@ async def ducky() -> None:
1239
1330
  console.print("\n[green]✓[/green] Command copied to clipboard")
1240
1331
  return
1241
1332
 
1242
- await interactive_session(rubber_ducky, logger=logger, code=code)
1333
+ # Validate model is available if using local
1334
+ if not args.single_prompt and not piped_prompt and last_host == "http://localhost:11434":
1335
+ connected = True
1336
+ try:
1337
+ models = await rubber_ducky.list_models()
1338
+ if args.model not in models:
1339
+ console.print(f"Model '{args.model}' not found locally.", style="yellow")
1340
+ console.print(f"Available: {', '.join(models[:5])}...", style="dim")
1341
+ console.print("Use /model to select, or run 'ollama pull <model>'", style="yellow")
1342
+ except Exception:
1343
+ pass
1344
+
1345
+ await interactive_session(rubber_ducky, logger=logger, code=code, quiet_mode=args.quiet)
1243
1346
 
1244
1347
 
1245
1348
  def substitute_placeholders(command: str, args: list[str]) -> str:
@@ -1247,23 +1350,36 @@ def substitute_placeholders(command: str, args: list[str]) -> str:
1247
1350
 
1248
1351
  Args:
1249
1352
  command: The command string with placeholders
1250
- args: List of arguments to substitute (first arg replaces first placeholder, etc.)
1353
+ args: List of arguments to substitute. The first unique variable name
1354
+ maps to args[0], the second unique name maps to args[1], etc.
1251
1355
 
1252
1356
  Returns:
1253
- Command with placeholders replaced, falling back to env vars for unreplaced placeholders
1357
+ Command with placeholders replaced. Reused variable names get the
1358
+ same argument value. Falls back to env vars for unreplaced placeholders.
1254
1359
  """
1255
1360
  result = command
1256
- arg_index = 0
1257
1361
  placeholder_pattern = re.compile(r'\$\{([^}]+)\}|\$(\w+)')
1258
1362
 
1363
+ # First pass: collect unique variable names in order of appearance
1364
+ unique_vars = []
1365
+ seen_vars = set()
1366
+ for match in placeholder_pattern.finditer(command):
1367
+ var_name = match.group(1) or match.group(2)
1368
+ if var_name not in seen_vars:
1369
+ seen_vars.add(var_name)
1370
+ unique_vars.append(var_name)
1371
+
1372
+ # Map unique variable names to arguments
1373
+ var_map = {}
1374
+ for i, var_name in enumerate(unique_vars):
1375
+ if i < len(args):
1376
+ var_map[var_name] = args[i]
1377
+
1378
+ # Second pass: replace all placeholders using the map
1259
1379
  def replace_placeholder(match: re.Match) -> str:
1260
- nonlocal arg_index
1261
- # Get the variable name from either ${VAR} or $var format
1262
1380
  var_name = match.group(1) or match.group(2)
1263
- if arg_index < len(args):
1264
- value = args[arg_index]
1265
- arg_index += 1
1266
- return value
1381
+ if var_name in var_map:
1382
+ return var_map[var_name]
1267
1383
  # Fallback to environment variable
1268
1384
  return os.environ.get(var_name, match.group(0))
1269
1385
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "rubber-ducky"
3
- version = "1.6.1"
3
+ version = "1.6.4"
4
4
  description = "Quick CLI do-it-all tool. Use natural language to spit out bash commands"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rubber-ducky
3
- Version: 1.6.1
3
+ Version: 1.6.4
4
4
  Summary: Quick CLI do-it-all tool. Use natural language to spit out bash commands
5
5
  Requires-Python: >=3.10
6
6
  Description-Content-Type: text/markdown
File without changes
File without changes
File without changes
File without changes