owo-cli 1.2.0 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +109 -0
  2. package/dist/owo.js +453 -34
  3. package/package.json +1 -1
package/README.md CHANGED
@@ -73,6 +73,22 @@ chmod +x dist/owo-cli
73
73
  mv dist/owo-cli /usr/local/bin/owo-cli
74
74
  ```
75
75
 
76
+ ### Quick Start
77
+
78
+ The fastest way to get started is with the interactive setup wizard:
79
+
80
+ ```bash
81
+ owo setup
82
+ ```
83
+
84
+ This walks you through picking a provider, entering your API key, choosing a model, and enabling optional features. It writes the config file for you.
85
+
86
+ Alternatively, you can skip setup entirely with inline flags:
87
+
88
+ ```bash
89
+ owo -p openai -k sk-your-key -m gpt-4.1 list all files larger than 100MB
90
+ ```
91
+
76
92
  ### Configuration
77
93
 
78
94
  `owo` is configured through a single `config.json` file. The first time you run `owo`, it will automatically create a default configuration file to get you started.
@@ -369,6 +385,99 @@ owo generate a new ssh key called owo-key and add it to the ssh agent
369
385
 
370
386
  You'll see the generated command in your shell's input line. Press **Enter** to run it, or edit it first. Executed commands will show up in your shell's history just like any other command.
371
387
 
388
+ ### CLI Flags
389
+
390
+ All flags can override config values for a single invocation without editing `config.json`.
391
+
392
+ #### Provider Overrides
393
+
394
+ | Flag | Short | Description |
395
+ |------|-------|-------------|
396
+ | `--provider <type>` | `-p` | Override provider (`openai`, `claude`, `gemini`, `github`, `claudecode`, `custom`) |
397
+ | `--model <name>` | `-m` | Override model |
398
+ | `--api-key <key>` | `-k` | Override API key (highest precedence: flag > config > env var) |
399
+ | `--base-url <url>` | | Override base URL for custom/OpenAI-compatible providers |
400
+
401
+ ```bash
402
+ # Use Claude for a single query without changing config
403
+ owo -p claude -m claude-sonnet-4-20250514 find large log files
404
+
405
+ # Use a local Ollama model
406
+ owo -p custom --base-url http://localhost:11434/v1 -m llama3 show disk usage
407
+ ```
408
+
409
+ #### Behavior
410
+
411
+ | Flag | Short | Description |
412
+ |------|-------|-------------|
413
+ | `--copy` | `-c` | Copy generated command to clipboard |
414
+ | `--no-copy` | | Don't copy to clipboard |
415
+ | `--history` | | Include shell history context |
416
+ | `--no-history` | | Exclude shell history context |
417
+ | `--history-count <n>` | | Number of history commands to include (implies `--history`) |
418
+
419
+ ```bash
420
+ # Copy command even if config has clipboard disabled
421
+ owo --copy find all zombie processes
422
+
423
+ # Use history context for a single query
424
+ owo --history redo that but with sudo
425
+ ```
426
+
427
+ #### Output
428
+
429
+ | Flag | Short | Description |
430
+ |------|-------|-------------|
431
+ | `--exec` | `-x` | Execute the generated command after `Execute? [y/N]` confirmation |
432
+ | `--explain` | `-e` | Show a brief explanation of the command on stderr |
433
+ | `--raw` | `-r` | Suppress all non-command output (clean for piping) |
434
+
435
+ ```bash
436
+ # Execute with confirmation
437
+ owo -x delete all .tmp files older than 7 days
438
+
439
+ # Get an explanation alongside the command
440
+ owo -e find all files larger than 100mb
441
+
442
+ # Pipe-safe output
443
+ result=$(owo -r show my public IP)
444
+ ```
445
+
446
+ #### Debugging
447
+
448
+ | Flag | Short | Description |
449
+ |------|-------|-------------|
450
+ | `--dry-run` | `-n` | Print the prompt that would be sent without making an API call |
451
+ | `--verbose` | `-V` | Print diagnostics (provider, model, latency) to stderr |
452
+ | `--retry <n>` | | Override retry count (default: 2) |
453
+
454
+ ```bash
455
+ # See exactly what would be sent to the API
456
+ owo --dry-run find large files
457
+
458
+ # Debug with full diagnostics
459
+ owo -V convert all heic files to jpg
460
+
461
+ # Fail fast with no retries
462
+ owo --retry 0 list disk usage
463
+ ```
464
+
465
+ #### Stdin / Pipe Support
466
+
467
+ `owo` auto-detects piped input and reads the description from stdin:
468
+
469
+ ```bash
470
+ echo "find files larger than 100mb" | owo
471
+ ```
472
+
473
+ #### The `--` Separator
474
+
475
+ Use `--` to separate flags from the description when your description looks like a flag:
476
+
477
+ ```bash
478
+ owo -- -rf delete these files
479
+ ```
480
+
372
481
  ## License
373
482
 
374
483
  [MIT](LICENSE)
package/dist/owo.js CHANGED
@@ -24278,6 +24278,7 @@ var esm_default = createClient;
24278
24278
 
24279
24279
  // index.ts
24280
24280
  import { execSync } from "child_process";
24281
+ import readline from "readline";
24281
24282
  import os5 from "os";
24282
24283
  import fs3 from "fs";
24283
24284
  import path4 from "path";
@@ -24452,6 +24453,193 @@ function buildContextHistory(contextConfig) {
24452
24453
  }
24453
24454
 
24454
24455
  // index.ts
24456
+ var PROVIDER_ALIASES = {
24457
+ openai: "OpenAI",
24458
+ custom: "Custom",
24459
+ claude: "Claude",
24460
+ gemini: "Gemini",
24461
+ github: "GitHub",
24462
+ claudecode: "ClaudeCode"
24463
+ };
24464
+ function parseArgs(argv) {
24465
+ const result = {
24466
+ help: false,
24467
+ version: false,
24468
+ dryRun: false,
24469
+ exec: false,
24470
+ explain: false,
24471
+ raw: false,
24472
+ verbose: false,
24473
+ copy: undefined,
24474
+ history: undefined,
24475
+ provider: undefined,
24476
+ model: undefined,
24477
+ apiKey: undefined,
24478
+ baseUrl: undefined,
24479
+ historyCount: undefined,
24480
+ retry: undefined,
24481
+ subcommand: undefined,
24482
+ subcommandArgs: [],
24483
+ description: ""
24484
+ };
24485
+ if (argv[0] === "config") {
24486
+ result.subcommand = "config";
24487
+ result.subcommandArgs = argv.slice(1);
24488
+ return result;
24489
+ }
24490
+ if (argv[0] === "setup") {
24491
+ result.subcommand = "setup";
24492
+ result.subcommandArgs = argv.slice(1);
24493
+ return result;
24494
+ }
24495
+ const descriptionParts = [];
24496
+ let i2 = 0;
24497
+ while (i2 < argv.length) {
24498
+ const arg = argv[i2];
24499
+ if (arg === "--") {
24500
+ descriptionParts.push(...argv.slice(i2 + 1));
24501
+ break;
24502
+ }
24503
+ switch (arg) {
24504
+ case "--help":
24505
+ result.help = true;
24506
+ i2++;
24507
+ continue;
24508
+ case "--version":
24509
+ result.version = true;
24510
+ i2++;
24511
+ continue;
24512
+ case "--dry-run":
24513
+ result.dryRun = true;
24514
+ i2++;
24515
+ continue;
24516
+ case "--exec":
24517
+ result.exec = true;
24518
+ i2++;
24519
+ continue;
24520
+ case "--explain":
24521
+ result.explain = true;
24522
+ i2++;
24523
+ continue;
24524
+ case "--raw":
24525
+ result.raw = true;
24526
+ i2++;
24527
+ continue;
24528
+ case "--verbose":
24529
+ result.verbose = true;
24530
+ i2++;
24531
+ continue;
24532
+ case "--copy":
24533
+ result.copy = true;
24534
+ i2++;
24535
+ continue;
24536
+ case "--no-copy":
24537
+ result.copy = false;
24538
+ i2++;
24539
+ continue;
24540
+ case "--history":
24541
+ result.history = true;
24542
+ i2++;
24543
+ continue;
24544
+ case "--no-history":
24545
+ result.history = false;
24546
+ i2++;
24547
+ continue;
24548
+ case "--provider":
24549
+ result.provider = argv[++i2];
24550
+ i2++;
24551
+ continue;
24552
+ case "--model":
24553
+ result.model = argv[++i2];
24554
+ i2++;
24555
+ continue;
24556
+ case "--api-key":
24557
+ result.apiKey = argv[++i2];
24558
+ i2++;
24559
+ continue;
24560
+ case "--base-url":
24561
+ result.baseUrl = argv[++i2];
24562
+ i2++;
24563
+ continue;
24564
+ case "--history-count": {
24565
+ const val = argv[++i2];
24566
+ result.historyCount = val ? parseInt(val, 10) : undefined;
24567
+ result.history = true;
24568
+ i2++;
24569
+ continue;
24570
+ }
24571
+ case "--retry": {
24572
+ const val = argv[++i2];
24573
+ result.retry = val ? parseInt(val, 10) : undefined;
24574
+ i2++;
24575
+ continue;
24576
+ }
24577
+ }
24578
+ if (arg.startsWith("-") && !arg.startsWith("--") && arg.length >= 2) {
24579
+ let consumed = false;
24580
+ switch (arg) {
24581
+ case "-p":
24582
+ result.provider = argv[++i2];
24583
+ i2++;
24584
+ consumed = true;
24585
+ break;
24586
+ case "-m":
24587
+ result.model = argv[++i2];
24588
+ i2++;
24589
+ consumed = true;
24590
+ break;
24591
+ case "-k":
24592
+ result.apiKey = argv[++i2];
24593
+ i2++;
24594
+ consumed = true;
24595
+ break;
24596
+ }
24597
+ if (consumed)
24598
+ continue;
24599
+ const chars = arg.slice(1);
24600
+ let allValid = true;
24601
+ for (const ch of chars) {
24602
+ switch (ch) {
24603
+ case "h":
24604
+ result.help = true;
24605
+ break;
24606
+ case "v":
24607
+ result.version = true;
24608
+ break;
24609
+ case "n":
24610
+ result.dryRun = true;
24611
+ break;
24612
+ case "x":
24613
+ result.exec = true;
24614
+ break;
24615
+ case "e":
24616
+ result.explain = true;
24617
+ break;
24618
+ case "r":
24619
+ result.raw = true;
24620
+ break;
24621
+ case "V":
24622
+ result.verbose = true;
24623
+ break;
24624
+ case "c":
24625
+ result.copy = true;
24626
+ break;
24627
+ default:
24628
+ allValid = false;
24629
+ break;
24630
+ }
24631
+ }
24632
+ if (allValid) {
24633
+ i2++;
24634
+ continue;
24635
+ }
24636
+ }
24637
+ descriptionParts.push(arg);
24638
+ i2++;
24639
+ }
24640
+ result.description = descriptionParts.join(" ").trim();
24641
+ return result;
24642
+ }
24455
24643
  var CLAUDE_MAX_TOKENS = 1024;
24456
24644
  var DEFAULT_CONFIG = {
24457
24645
  type: "OpenAI",
@@ -24579,24 +24767,54 @@ async function copyToClipboard(text) {
24579
24767
  throw new Error(`Clipboard operation failed: ${error2}`);
24580
24768
  }
24581
24769
  }
24582
- var VERSION3 = "1.1.3";
24770
+ var VERSION3 = "1.3.0";
24583
24771
  function printHelp() {
24584
24772
  console.log(`owo v${VERSION3} - Natural language to shell commands using AI
24585
24773
 
24586
24774
  Usage:
24587
24775
  owo <command description> Generate a shell command from a description
24776
+ owo setup Interactive first-run configuration wizard
24588
24777
  owo config path Print config file location
24589
24778
  owo config show Display current config (API keys masked)
24590
24779
  owo config set <key> <value> Set a config value
24591
24780
 
24592
- Options:
24593
- --help, -h Show this help message
24594
- --version, -v Show version number
24781
+ Provider Overrides:
24782
+ -p, --provider <type> Override provider (openai, claude, gemini, github, claudecode, custom)
24783
+ -m, --model <name> Override model
24784
+ -k, --api-key <key> Override API key
24785
+ --base-url <url> Override base URL (custom providers)
24786
+
24787
+ Behavior:
24788
+ -c, --copy Copy command to clipboard
24789
+ --no-copy Don't copy to clipboard
24790
+ --history Include shell history context
24791
+ --no-history Exclude shell history context
24792
+ --history-count <n> History commands to include (implies --history)
24793
+
24794
+ Output:
24795
+ -x, --exec Execute generated command (with confirmation)
24796
+ -e, --explain Show command explanation on stderr
24797
+ -r, --raw Suppress all non-command output
24798
+
24799
+ Debugging:
24800
+ -n, --dry-run Show prompt without making API call
24801
+ -V, --verbose Show diagnostics (provider, latency, tokens)
24802
+ --retry <n> Override retry count (default: 2)
24803
+
24804
+ General:
24805
+ -h, --help Show this help message
24806
+ -v, --version Show version number
24807
+
24808
+ Use -- to separate flags from description:
24809
+ owo -- -rf delete these files
24595
24810
 
24596
24811
  Examples:
24597
24812
  owo list all files larger than 100MB
24598
- owo find and replace foo with bar in all js files
24599
- owo compress all png files in current directory
24813
+ owo -p claude -m claude-sonnet-4-20250514 find large log files
24814
+ owo --dry-run find files modified today
24815
+ owo --copy find all zombie processes
24816
+ owo -x delete all .tmp files older than 7 days
24817
+ echo "find large files" | owo
24600
24818
 
24601
24819
  Providers: OpenAI, Custom, Claude, Gemini, GitHub, ClaudeCode
24602
24820
 
@@ -24655,20 +24873,148 @@ function handleConfigSubcommand(args) {
24655
24873
  console.error("Usage: owo config <path|show|set>");
24656
24874
  process.exit(1);
24657
24875
  }
24876
+ function createReadlineInterface() {
24877
+ return readline.createInterface({
24878
+ input: process.stdin,
24879
+ output: process.stderr
24880
+ });
24881
+ }
24882
+ function prompt(rl, question) {
24883
+ return new Promise((resolve) => rl.question(question, resolve));
24884
+ }
24885
+ var DEFAULT_MODELS = {
24886
+ OpenAI: "gpt-4.1",
24887
+ Custom: "llama3",
24888
+ Claude: "claude-sonnet-4-20250514",
24889
+ Gemini: "gemini-pro",
24890
+ GitHub: "openai/gpt-4.1-nano",
24891
+ ClaudeCode: "sonnet"
24892
+ };
24893
+ async function handleSetupSubcommand() {
24894
+ const rl = createReadlineInterface();
24895
+ console.error(`
24896
+ owo setup - Interactive Configuration Wizard
24897
+ `);
24898
+ const providers = ["OpenAI", "Claude", "Gemini", "GitHub", "ClaudeCode", "Custom"];
24899
+ console.error("Available providers:");
24900
+ providers.forEach((p2, i2) => console.error(` ${i2 + 1}. ${p2}`));
24901
+ let providerIdx;
24902
+ while (true) {
24903
+ const answer = await prompt(rl, `
24904
+ Select provider [1-${providers.length}]: `);
24905
+ providerIdx = parseInt(answer, 10) - 1;
24906
+ if (providerIdx >= 0 && providerIdx < providers.length)
24907
+ break;
24908
+ console.error("Invalid selection. Please enter a number.");
24909
+ }
24910
+ const providerType = providers[providerIdx];
24911
+ let apiKey = "";
24912
+ if (providerType !== "ClaudeCode") {
24913
+ const envVar = {
24914
+ OpenAI: "OPENAI_API_KEY",
24915
+ Custom: "OPENAI_API_KEY",
24916
+ Claude: "ANTHROPIC_API_KEY",
24917
+ Gemini: "GOOGLE_API_KEY",
24918
+ GitHub: "GITHUB_TOKEN"
24919
+ }[providerType] || "OPENAI_API_KEY";
24920
+ apiKey = await prompt(rl, `
24921
+ API key (or press Enter to use $${envVar}): `);
24922
+ }
24923
+ const defaultModel = DEFAULT_MODELS[providerType];
24924
+ const modelAnswer = await prompt(rl, `
24925
+ Model [${defaultModel}]: `);
24926
+ const model = modelAnswer.trim() || defaultModel;
24927
+ let baseURL;
24928
+ if (providerType === "Custom") {
24929
+ const urlAnswer = await prompt(rl, `
24930
+ Base URL (e.g., http://localhost:11434/v1): `);
24931
+ baseURL = urlAnswer.trim() || undefined;
24932
+ }
24933
+ const clipAnswer = await prompt(rl, `
24934
+ Auto-copy commands to clipboard? [y/N]: `);
24935
+ const clipboard = clipAnswer.trim().toLowerCase() === "y";
24936
+ const histAnswer = await prompt(rl, "Include shell history context? [y/N]: ");
24937
+ const historyEnabled = histAnswer.trim().toLowerCase() === "y";
24938
+ rl.close();
24939
+ const newConfig = {
24940
+ type: providerType,
24941
+ apiKey,
24942
+ model,
24943
+ clipboard,
24944
+ context: {
24945
+ enabled: historyEnabled,
24946
+ maxHistoryCommands: 10
24947
+ }
24948
+ };
24949
+ if (baseURL)
24950
+ newConfig.baseURL = baseURL;
24951
+ const paths = envPaths("owo", { suffix: "" });
24952
+ const configPath = path4.join(paths.config, "config.json");
24953
+ fs3.mkdirSync(paths.config, { recursive: true });
24954
+ fs3.writeFileSync(configPath, JSON.stringify(newConfig, null, 2));
24955
+ console.error(`
24956
+ Configuration saved to: ${configPath}`);
24957
+ console.error(`You're all set! Try: owo list all files larger than 100MB
24958
+ `);
24959
+ process.exit(0);
24960
+ }
24658
24961
  var rawArgs = process.argv.slice(2);
24659
- if (rawArgs.includes("--help") || rawArgs.includes("-h")) {
24962
+ var parsed = parseArgs(rawArgs);
24963
+ if (parsed.help) {
24660
24964
  printHelp();
24661
24965
  process.exit(0);
24662
24966
  }
24663
- if (rawArgs.includes("--version") || rawArgs.includes("-v")) {
24967
+ if (parsed.version) {
24664
24968
  console.log(VERSION3);
24665
24969
  process.exit(0);
24666
24970
  }
24667
- if (rawArgs[0] === "config") {
24668
- handleConfigSubcommand(rawArgs.slice(1));
24971
+ if (parsed.subcommand === "config") {
24972
+ handleConfigSubcommand(parsed.subcommandArgs);
24973
+ }
24974
+ if (parsed.subcommand === "setup") {
24975
+ await handleSetupSubcommand();
24976
+ }
24977
+ var commandDescription = parsed.description;
24978
+ if (!commandDescription && !process.stdin.isTTY) {
24979
+ const chunks = [];
24980
+ for await (const chunk of process.stdin) {
24981
+ chunks.push(chunk);
24982
+ }
24983
+ commandDescription = Buffer.concat(chunks).toString("utf-8").trim();
24669
24984
  }
24670
24985
  var config = getConfig();
24671
- var commandDescription = rawArgs.join(" ").trim();
24986
+ if (parsed.provider) {
24987
+ const normalized = parsed.provider.toLowerCase();
24988
+ const mapped = PROVIDER_ALIASES[normalized];
24989
+ if (!mapped) {
24990
+ console.error(`Error: Unknown provider "${parsed.provider}".`);
24991
+ console.error(`Valid providers: ${Object.keys(PROVIDER_ALIASES).join(", ")}`);
24992
+ process.exit(1);
24993
+ }
24994
+ config.type = mapped;
24995
+ }
24996
+ if (parsed.model)
24997
+ config.model = parsed.model;
24998
+ if (parsed.apiKey)
24999
+ config.apiKey = parsed.apiKey;
25000
+ if (parsed.baseUrl)
25001
+ config.baseURL = parsed.baseUrl;
25002
+ if (parsed.copy !== undefined)
25003
+ config.clipboard = parsed.copy;
25004
+ if (parsed.history !== undefined) {
25005
+ if (!config.context)
25006
+ config.context = { ...DEFAULT_CONTEXT_CONFIG };
25007
+ config.context.enabled = parsed.history;
25008
+ }
25009
+ if (parsed.historyCount !== undefined) {
25010
+ if (!config.context)
25011
+ config.context = { ...DEFAULT_CONTEXT_CONFIG };
25012
+ config.context.maxHistoryCommands = parsed.historyCount;
25013
+ }
25014
+ if (!parsed.apiKey && !config.apiKey) {
25015
+ config.apiKey = getEnvApiKey(config.type);
25016
+ }
25017
+ validateConfig(config);
24672
25018
  if (!commandDescription) {
24673
25019
  console.error("Error: No command description provided.");
24674
25020
  console.error("Usage: owo <command description>");
@@ -24702,7 +25048,7 @@ function sanitizeResponse(content) {
24702
25048
  }
24703
25049
  return lines.at(-1)?.trim() || "";
24704
25050
  }
24705
- async function generateCommand(config2, commandDescription2) {
25051
+ function buildPrompts(config2, commandDescription2, explain) {
24706
25052
  const envContext = `
24707
25053
  Operating System: ${os5.type()} ${os5.release()} (${os5.platform()} - ${os5.arch()})
24708
25054
  Node.js Version: ${process.version}
@@ -24728,7 +25074,26 @@ Free Memory: ${(os5.freemem() / 1024 / 1024).toFixed(0)} MB
24728
25074
  }
24729
25075
  const contextConfig = config2.context || DEFAULT_CONTEXT_CONFIG;
24730
25076
  const historyContext = buildContextHistory(contextConfig);
24731
- const systemPrompt = `
25077
+ let systemPrompt;
25078
+ if (explain) {
25079
+ systemPrompt = `
25080
+ You live in a developer's CLI, helping them convert natural language into CLI commands.
25081
+ Based on the description of the command given, generate the command and a brief explanation.
25082
+ Make sure to escape characters when appropriate. The result of \`${lsCommand}\` is given with the command.
25083
+ Output your response in exactly this format:
25084
+ COMMAND: <the command>
25085
+ EXPLANATION: <brief explanation of what the command does>
25086
+ Do not wrap the command in quotes.
25087
+
25088
+ --- ENVIRONMENT CONTEXT ---
25089
+ ${envContext}
25090
+ --- END ENVIRONMENT CONTEXT ---
25091
+
25092
+ Result of \`${lsCommand}\` in working directory:
25093
+ ${lsResult}
25094
+ ${historyContext}`;
25095
+ } else {
25096
+ systemPrompt = `
24732
25097
  You live in a developer's CLI, helping them convert natural language into CLI commands.
24733
25098
  Based on the description of the command given, generate the command. Output only the command and nothing else.
24734
25099
  Make sure to escape characters when appropriate. The result of \`${lsCommand}\` is given with the command.
@@ -24742,6 +25107,11 @@ ${envContext}
24742
25107
  Result of \`${lsCommand}\` in working directory:
24743
25108
  ${lsResult}
24744
25109
  ${historyContext}`;
25110
+ }
25111
+ return { systemPrompt, userMessage: `Command description: ${commandDescription2}` };
25112
+ }
25113
+ async function generateCommand(config2, commandDescription2, explain) {
25114
+ const { systemPrompt, userMessage } = buildPrompts(config2, commandDescription2, explain);
24745
25115
  if (config2.type !== "ClaudeCode" && !config2.apiKey) {
24746
25116
  const envVar = {
24747
25117
  OpenAI: "OPENAI_API_KEY",
@@ -24765,14 +25135,11 @@ ${historyContext}`;
24765
25135
  model: config2.model,
24766
25136
  messages: [
24767
25137
  { role: "system", content: systemPrompt },
24768
- {
24769
- role: "user",
24770
- content: `Command description: ${commandDescription2}`
24771
- }
25138
+ { role: "user", content: userMessage }
24772
25139
  ]
24773
25140
  });
24774
25141
  const raw = response?.choices?.[0]?.message?.content ?? "";
24775
- return sanitizeResponse(String(raw));
25142
+ return explain ? String(raw) : sanitizeResponse(String(raw));
24776
25143
  }
24777
25144
  case "Claude": {
24778
25145
  const anthropic = new sdk_default({ apiKey: config2.apiKey });
@@ -24781,26 +25148,23 @@ ${historyContext}`;
24781
25148
  system: systemPrompt,
24782
25149
  max_tokens: CLAUDE_MAX_TOKENS,
24783
25150
  messages: [
24784
- {
24785
- role: "user",
24786
- content: `Command description: ${commandDescription2}`
24787
- }
25151
+ { role: "user", content: userMessage }
24788
25152
  ]
24789
25153
  });
24790
25154
  const firstBlock = response.content?.[0];
24791
25155
  const raw = (firstBlock && firstBlock.type === "text" ? firstBlock.text : "") ?? "";
24792
- return sanitizeResponse(String(raw));
25156
+ return explain ? String(raw) : sanitizeResponse(String(raw));
24793
25157
  }
24794
25158
  case "Gemini": {
24795
25159
  const genAI = new GoogleGenerativeAI(config2.apiKey);
24796
25160
  const model = genAI.getGenerativeModel({ model: config2.model });
24797
- const prompt = `${systemPrompt}
25161
+ const fullPrompt = `${systemPrompt}
24798
25162
 
24799
- Command description: ${commandDescription2}`;
24800
- const result = await model.generateContent(prompt);
25163
+ ${userMessage}`;
25164
+ const result = await model.generateContent(fullPrompt);
24801
25165
  const response = await result.response;
24802
25166
  const raw = await response.text();
24803
- return sanitizeResponse(String(raw));
25167
+ return explain ? String(raw) : sanitizeResponse(String(raw));
24804
25168
  }
24805
25169
  case "GitHub": {
24806
25170
  const endpoint = config2.baseURL ? config2.baseURL : "https://models.github.ai/inference";
@@ -24810,7 +25174,7 @@ Command description: ${commandDescription2}`;
24810
25174
  body: {
24811
25175
  messages: [
24812
25176
  { role: "system", content: systemPrompt },
24813
- { role: "user", content: `Command description: ${commandDescription2}` }
25177
+ { role: "user", content: userMessage }
24814
25178
  ],
24815
25179
  model
24816
25180
  }
@@ -24819,7 +25183,7 @@ Command description: ${commandDescription2}`;
24819
25183
  throw response.body.error;
24820
25184
  }
24821
25185
  const content = response.body.choices?.[0]?.message?.content;
24822
- return sanitizeResponse(String(content ?? ""));
25186
+ return explain ? String(content ?? "") : sanitizeResponse(String(content ?? ""));
24823
25187
  }
24824
25188
  case "ClaudeCode": {
24825
25189
  try {
@@ -24841,29 +25205,84 @@ Command description: ${commandDescription2}`;
24841
25205
  if (config2.model) {
24842
25206
  args.push("--model", config2.model);
24843
25207
  }
24844
- args.push(JSON.stringify(`Command description: ${commandDescription2}`));
25208
+ args.push(JSON.stringify(userMessage));
24845
25209
  const result = execSync(args.join(" "), {
24846
25210
  encoding: "utf-8",
24847
25211
  timeout: 30000
24848
25212
  });
24849
- return sanitizeResponse(result);
25213
+ return explain ? result : sanitizeResponse(result);
24850
25214
  }
24851
25215
  default:
24852
25216
  console.error(`Error: Unknown provider type "${config2.type}" in config.json.`);
24853
25217
  process.exit(1);
24854
25218
  }
24855
25219
  }
25220
+ if (parsed.dryRun) {
25221
+ const { systemPrompt, userMessage } = buildPrompts(config, commandDescription, parsed.explain);
25222
+ console.log("--- SYSTEM PROMPT ---");
25223
+ console.log(systemPrompt);
25224
+ console.log("--- USER MESSAGE ---");
25225
+ console.log(userMessage);
25226
+ console.log("---");
25227
+ console.log(`Provider: ${config.type}`);
25228
+ console.log(`Model: ${config.model}`);
25229
+ process.exit(0);
25230
+ }
25231
+ var retryCount = parsed.retry !== undefined ? parsed.retry : 2;
24856
25232
  try {
24857
- const command = await withRetry(() => generateCommand(config, commandDescription));
25233
+ if (parsed.verbose) {
25234
+ console.error(`Provider: ${config.type}`);
25235
+ console.error(`Model: ${config.model}`);
25236
+ if (config.baseURL)
25237
+ console.error(`Base URL: ${config.baseURL}`);
25238
+ }
25239
+ const startTime = Date.now();
25240
+ const rawResult = await withRetry(() => generateCommand(config, commandDescription, parsed.explain), retryCount);
25241
+ const elapsed = Date.now() - startTime;
25242
+ let command;
25243
+ let explanation;
25244
+ if (parsed.explain) {
25245
+ const commandMatch = rawResult.match(/COMMAND:\s*(.+)/);
25246
+ const explainMatch = rawResult.match(/EXPLANATION:\s*([\s\S]+)/);
25247
+ command = commandMatch ? sanitizeResponse(commandMatch[1]) : sanitizeResponse(rawResult);
25248
+ explanation = explainMatch ? explainMatch[1].trim() : undefined;
25249
+ } else {
25250
+ command = rawResult;
25251
+ }
25252
+ if (parsed.verbose) {
25253
+ console.error(`Latency: ${elapsed}ms`);
25254
+ }
24858
25255
  if (config.clipboard) {
24859
25256
  try {
24860
25257
  await copyToClipboard(command);
25258
+ if (!parsed.raw)
25259
+ console.error("Copied to clipboard.");
24861
25260
  } catch (clipboardError) {
24862
- console.error("Warning: Failed to copy to clipboard:", clipboardError.message);
25261
+ if (!parsed.raw)
25262
+ console.error("Warning: Failed to copy to clipboard:", clipboardError.message);
24863
25263
  }
24864
25264
  }
25265
+ if (explanation && !parsed.raw) {
25266
+ console.error(`
25267
+ Explanation: ${explanation}`);
25268
+ }
24865
25269
  console.log(command);
25270
+ if (parsed.exec) {
25271
+ const rl = createReadlineInterface();
25272
+ const answer = await prompt(rl, `
25273
+ Execute? [y/N]: `);
25274
+ rl.close();
25275
+ if (answer.trim().toLowerCase() === "y") {
25276
+ try {
25277
+ execSync(command, { stdio: "inherit" });
25278
+ } catch (execError) {
25279
+ process.exit(execError.status ?? 1);
25280
+ }
25281
+ }
25282
+ }
24866
25283
  } catch (error2) {
24867
- console.error(`Error generating command (provider: ${config.type}, model: ${config.model}):`, error2.message);
25284
+ if (!parsed.raw) {
25285
+ console.error(`Error generating command (provider: ${config.type}, model: ${config.model}):`, error2.message);
25286
+ }
24868
25287
  process.exit(1);
24869
25288
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "owo-cli",
3
- "version": "1.2.0",
3
+ "version": "1.3.0",
4
4
  "description": "Natural language to shell commands using AI",
5
5
  "type": "module",
6
6
  "bin": {