@atharvaspatil5/aish 1.0.1 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +17 -17
  2. package/package.json +2 -2
  3. package/src/llm.js +20 -31
package/README.md CHANGED
@@ -1,6 +1,20 @@
1
1
  # AI.sh
2
2
 
3
- AI.sh is a CLI tool that converts natural language into safe shell commands.
3
+ AI.sh is a CLI tool that converts natural language into safe shell commands, powered by Groq LLM.
4
+
5
+ ## Install
6
+
7
+ ```bash
8
+ npm install -g @atharvaspatil5/aish
9
+ ```
10
+
11
+ ## Setup
12
+
13
+ Get a free API key at [console.groq.com](https://console.groq.com), then create a `.env` file in your working directory:
14
+
15
+ ```env
16
+ GROQ_API_KEY=your_groq_api_key
17
+ ```
4
18
 
5
19
  ## Usage
6
20
 
@@ -37,22 +51,9 @@ ai "delete everything"
37
51
 
38
52
  ## Safety
39
53
 
40
- * Blocks critical system-level commands
41
54
  * Warns on risky operations (like file deletion)
42
55
  * Requires confirmation before execution
43
-
44
- ## Setup
45
-
46
- ```bash
47
- npm install
48
- npm link
49
- ```
50
-
51
- Create a `.env` file:
52
-
53
- ```env
54
- GROQ_API_KEY=your_api_key
55
- ```
56
+ * Dangerous commands always prompt, even with `-y`
56
57
 
57
58
  ## Structure
58
59
 
@@ -60,11 +61,10 @@ GROQ_API_KEY=your_api_key
60
61
  bin/ CLI entry
61
62
  src/ core logic
62
63
  llm.js
63
- validator.js
64
64
  executor.js
65
65
  index.js
66
66
  ```
67
67
 
68
68
  ## Notes
69
69
 
70
- This is a local CLI tool. Use carefully when executing destructive commands.
70
+ Use carefully when executing destructive commands. The `.env` file must be present in the directory where you run `ai`.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@atharvaspatil5/aish",
3
- "version": "1.0.1",
3
+ "version": "1.0.2",
4
4
  "description": "AI-powered CLI that converts natural language to shell commands",
5
5
  "main": "index.js",
6
6
  "scripts": {
@@ -10,7 +10,7 @@
10
10
  "author": "athPATIL247",
11
11
  "license": "ISC",
12
12
  "bin": {
13
- "ai": "./bin/ai.js"
13
+ "ai": "bin/ai.js"
14
14
  },
15
15
  "type": "module",
16
16
  "dependencies": {
package/src/llm.js CHANGED
@@ -1,38 +1,24 @@
1
1
  import Groq from "groq-sdk";
2
- import { readFileSync } from 'fs';
3
- import { fileURLToPath } from 'url';
4
- import { dirname, join } from 'path';
5
-
6
- const __filename = fileURLToPath(import.meta.url);
7
- const __dirname = dirname(__filename);
8
-
9
- const envPath = join(__dirname, '..', '.env');
10
- const envContent = readFileSync(envPath, 'utf8');
11
- const envLines = envContent.split('\n');
12
- for (const line of envLines) {
13
- const [key, ...valueParts] = line.split('=');
14
- if (key && valueParts.length) {
15
- process.env[key.trim()] = valueParts.join('=').trim();
16
- }
17
- }
2
+ import 'dotenv/config';
18
3
 
19
4
  const groq = new Groq({
20
5
  apiKey: process.env.GROQ_API_KEY,
21
6
  });
22
7
 
23
- export async function getCommand(query){
8
+ export async function getCommand(query) {
24
9
  const response = await groq.chat.completions.create({
25
10
  model: "llama-3.1-8b-instant",
26
11
  messages: [
27
12
  {
28
13
  role: "system",
29
- content: `You are a Linux shell expert. Convert user instructions into a single shell command and assess its risk level.
14
+ content: `You are a shell command generator. Your ONLY output must be a single raw JSON object. No markdown, no code blocks, no backticks, no explanations, no extra text — just the JSON.
30
15
 
31
- Output ONLY a JSON object with exactly these fields:
32
- - command: the shell command (string)
33
- - risk_level: "safe", "warning", or "danger" (string)
16
+ The JSON must have exactly these two fields:
17
+ - "command": the shell command as a string
18
+ - "risk_level": one of "safe", "warning", or "danger"
34
19
 
35
- Do not output any other text, explanations, code blocks, or notes. Just the raw JSON.`,
20
+ Example output:
21
+ {"command":"ls -lh","risk_level":"safe"}`,
36
22
  },
37
23
  {
38
24
  role: "user",
@@ -42,22 +28,25 @@ Do not output any other text, explanations, code blocks, or notes. Just the raw
42
28
  });
43
29
 
44
30
  const content = response.choices[0].message.content.trim();
45
- // Remove markdown code blocks if present
31
+
32
+ // Strip markdown code blocks if model misbehaves
46
33
  let jsonString = content;
47
- if (jsonString.startsWith('```json')) {
48
- jsonString = jsonString.replace(/^```json\s*/, '').replace(/\s*```$/, '');
49
- } else if (jsonString.startsWith('```')) {
50
- jsonString = jsonString.replace(/^```\s*/, '').replace(/\s*```$/, '');
34
+ if (jsonString.startsWith('```')) {
35
+ jsonString = jsonString.replace(/^```[a-z]*\s*/i, '').replace(/\s*```$/, '');
51
36
  }
37
+
38
+ // Extract first JSON object if there's extra text
39
+ const match = jsonString.match(/\{[\s\S]*?\}/);
40
+ if (match) {
41
+ jsonString = match[0];
42
+ }
43
+
52
44
  try {
53
- const result = JSON.parse(jsonString);
54
- return result;
45
+ return JSON.parse(jsonString);
55
46
  } catch (e) {
56
- // Fallback: assume safe if parsing fails
57
47
  return {
58
48
  command: content,
59
49
  risk_level: "safe",
60
- reason: "Unable to assess risk"
61
50
  };
62
51
  }
63
52
  }