@devangkumar/dvai 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +44 -0
- package/bin/index.js +3 -0
- package/package.json +29 -0
- package/scratch/list_models.js +18 -0
- package/src/index.js +93 -0
package/README.md
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# MyAI Terminal 🚀
|
|
2
|
+
|
|
3
|
+
A stunning, powerful AI assistant built directly into your terminal. Powered by Gemini.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
- ✨ **Beautiful UI**: Rich terminal colors and smooth animations.
|
|
7
|
+
- 💬 **Interactive Chat**: Conversational AI experience.
|
|
8
|
+
- âš¡ **Quick Questions**: One-shot answers from your command line.
|
|
9
|
+
- 🆓 **Free to use**: Just add your Gemini API key.
|
|
10
|
+
|
|
11
|
+
## Installation
|
|
12
|
+
|
|
13
|
+
```bash
|
|
14
|
+
npm install -g myai-terminal
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
## Setup
|
|
18
|
+
To make this work, you need to add your **Google Gemini API Key** in the source code (as requested for a hardcoded experience) or set it up in `src/index.js`.
|
|
19
|
+
|
|
20
|
+
1. Get a free key from [Google AI Studio](https://aistudio.google.com/).
|
|
21
|
+
2. Edit `src/index.js` and replace `REPLACE_WITH_YOUR_GEMINI_API_KEY` with your actual key.
|
|
22
|
+
|
|
23
|
+
## Usage
|
|
24
|
+
|
|
25
|
+
### Quick Question
|
|
26
|
+
```bash
|
|
27
|
+
myai "How do I reverse a string in JavaScript?"
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
### Interactive Mode
|
|
31
|
+
```bash
|
|
32
|
+
myai chat
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
## Development
|
|
36
|
+
To test locally:
|
|
37
|
+
```bash
|
|
38
|
+
npm install
|
|
39
|
+
npm link
|
|
40
|
+
myai chat
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## License
|
|
44
|
+
ISC
|
package/bin/index.js
ADDED
package/package.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@devangkumar/dvai",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "A powerful AI assistant for your terminal",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "src/index.js",
|
|
7
|
+
"bin": {
|
|
8
|
+
"myai": "./bin/index.js"
|
|
9
|
+
},
|
|
10
|
+
"scripts": {
|
|
11
|
+
"start": "node bin/index.js"
|
|
12
|
+
},
|
|
13
|
+
"keywords": [
|
|
14
|
+
"ai",
|
|
15
|
+
"cli",
|
|
16
|
+
"terminal",
|
|
17
|
+
"gemini"
|
|
18
|
+
],
|
|
19
|
+
"author": "",
|
|
20
|
+
"license": "ISC",
|
|
21
|
+
"dependencies": {
|
|
22
|
+
"@google/generative-ai": "^0.24.1",
|
|
23
|
+
"chalk": "^5.6.2",
|
|
24
|
+
"commander": "^14.0.3",
|
|
25
|
+
"devang-super-ai-terminal-v1": "^1.0.0",
|
|
26
|
+
"inquirer": "^8.2.4",
|
|
27
|
+
"ora": "^5.4.1"
|
|
28
|
+
}
|
|
29
|
+
}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import { GoogleGenerativeAI } from "@google/generative-ai";
|
|
2
|
+
|
|
3
|
+
const API_KEY = "AIzaSyDkVgrWXHa7oyPasZGqNOZ-OyA-uYIDtXc";
|
|
4
|
+
const genAI = new GoogleGenerativeAI(API_KEY);
|
|
5
|
+
|
|
6
|
+
async function listModels() {
|
|
7
|
+
try {
|
|
8
|
+
const result = await genAI.listModels();
|
|
9
|
+
console.log("Available Models:");
|
|
10
|
+
result.models.forEach(model => {
|
|
11
|
+
console.log(`- ${model.name} (${model.displayName})`);
|
|
12
|
+
});
|
|
13
|
+
} catch (error) {
|
|
14
|
+
console.error("Error listing models:", error.message);
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
listModels();
|
package/src/index.js
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import { GoogleGenerativeAI } from "@google/generative-ai";
|
|
2
|
+
import { Command } from "commander";
|
|
3
|
+
import ora from "ora";
|
|
4
|
+
import inquirer from "inquirer";
|
|
5
|
+
|
|
6
|
+
// --- CONFIGURATION ---
|
|
7
|
+
const HARDCODED_API_KEY = "AIzaSyDkVgrWXHa7oyPasZGqNOZ-OyA-uYIDtXc";
|
|
8
|
+
const genAI = new GoogleGenerativeAI(HARDCODED_API_KEY);
|
|
9
|
+
const model = genAI.getGenerativeModel({ model: "gemini-2.5-flash" });
|
|
10
|
+
|
|
11
|
+
const program = new Command();
|
|
12
|
+
|
|
13
|
+
program
|
|
14
|
+
.name("myai")
|
|
15
|
+
.description("AI Assistant")
|
|
16
|
+
.version("1.0.0");
|
|
17
|
+
|
|
18
|
+
async function getAIResponse(prompt, chatHistory = []) {
|
|
19
|
+
const spinner = ora({
|
|
20
|
+
text: "",
|
|
21
|
+
spinner: "dots",
|
|
22
|
+
isSilent: true // Keep it quiet
|
|
23
|
+
}).start();
|
|
24
|
+
|
|
25
|
+
try {
|
|
26
|
+
if (chatHistory.length > 0) {
|
|
27
|
+
const chat = model.startChat({
|
|
28
|
+
history: chatHistory,
|
|
29
|
+
});
|
|
30
|
+
const result = await chat.sendMessage(prompt);
|
|
31
|
+
spinner.stop();
|
|
32
|
+
return result.response.text();
|
|
33
|
+
} else {
|
|
34
|
+
const result = await model.generateContent(prompt);
|
|
35
|
+
spinner.stop();
|
|
36
|
+
return result.response.text();
|
|
37
|
+
}
|
|
38
|
+
} catch (error) {
|
|
39
|
+
spinner.stop();
|
|
40
|
+
console.error("Error:", error.message);
|
|
41
|
+
return null;
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Handle Single Question
|
|
47
|
+
*/
|
|
48
|
+
program
|
|
49
|
+
.argument("[prompt]", "Prompt to send to AI")
|
|
50
|
+
.action(async (prompt) => {
|
|
51
|
+
if (!prompt) {
|
|
52
|
+
return;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
const response = await getAIResponse(prompt);
|
|
56
|
+
if (response) {
|
|
57
|
+
process.stdout.write(response + "\n");
|
|
58
|
+
}
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Handle Interactive Chat
|
|
63
|
+
*/
|
|
64
|
+
program
|
|
65
|
+
.command("chat")
|
|
66
|
+
.description("Start chat session")
|
|
67
|
+
.action(async () => {
|
|
68
|
+
let chatHistory = [];
|
|
69
|
+
|
|
70
|
+
while (true) {
|
|
71
|
+
const { userInput } = await inquirer.prompt([
|
|
72
|
+
{
|
|
73
|
+
type: "input",
|
|
74
|
+
name: "userInput",
|
|
75
|
+
message: ">",
|
|
76
|
+
}
|
|
77
|
+
]);
|
|
78
|
+
|
|
79
|
+
const cmd = userInput.toLowerCase();
|
|
80
|
+
if (cmd === "exit" || cmd === "quit" || cmd === "bye") break;
|
|
81
|
+
|
|
82
|
+
const response = await getAIResponse(userInput, chatHistory);
|
|
83
|
+
|
|
84
|
+
if (response) {
|
|
85
|
+
process.stdout.write(response + "\n\n");
|
|
86
|
+
|
|
87
|
+
chatHistory.push({ role: "user", parts: [{ text: userInput }] });
|
|
88
|
+
chatHistory.push({ role: "model", parts: [{ text: response }] });
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
program.parse();
|