trace.ai-cli 1.1.7 → 1.1.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.markdown +150 -150
- package/cli/traceAI.js +30 -5
- package/index.js +8 -4
- package/package.json +45 -45
- package/services/aiService.js +24 -9
- package/services/fileAnalyzer.js +59 -59
- package/services/folderAnalyzer.js +163 -163
- package/services/imageService.js +16 -2
- package/services/systemInfoService.js +956 -956
- package/utils/encryption.js +41 -41
- package/utils/fileUtils.js +100 -100
- package/utils/markdown.js +168 -0
package/README.markdown
CHANGED
|
@@ -1,151 +1,151 @@
|
|
|
1
|
-
# Trace.Ai CLI
|
|
2
|
-
|
|
3
|
-
A powerful AI-powered command-line tool powered by mixkey for analyzing files, folders, images, and system information. Trace.AI CLI leverages artificial intelligence to provide code analysis, project structure insights, text extraction from images, and detailed system information, making it an essential tool for developers and analysts.
|
|
4
|
-
|
|
5
|
-
## Features
|
|
6
|
-
|
|
7
|
-
- **File Analysis**: Analyze code files (e.g., JavaScript, Python, Java) for structure, quality, and potential improvements.
|
|
8
|
-
- **Folder Structure Analysis**: Get insights into project architecture, technology stack, and file organization, including a simple file tree.
|
|
9
|
-
- **Image Text Extraction**: Extract text from images (e.g., PNG, JPEG) using AI-powered OCR.
|
|
10
|
-
- **System Information**: Get detailed information about your system's hardware, software, and configuration.
|
|
11
|
-
- **Context Management**: Add text or file-based context to enhance AI responses.
|
|
12
|
-
- **Interactive CLI**: Easy-to-use interface with commands like `/file`, `/folder`, `/image`, `/system`, and more.
|
|
13
|
-
- **Extensible**: Supports a wide range of file types and languages, with customizable queries.
|
|
14
|
-
|
|
15
|
-
## Installation
|
|
16
|
-
|
|
17
|
-
### Prerequisites
|
|
18
|
-
|
|
19
|
-
- **Node.js**: Version 14.0.0 or higher.
|
|
20
|
-
- **npm**: Included with Node.js.
|
|
21
|
-
|
|
22
|
-
### Install via npm
|
|
23
|
-
|
|
24
|
-
Install Trace.AI CLI globally:
|
|
25
|
-
|
|
26
|
-
```bash
|
|
27
|
-
npm install -g trace.ai-cli
|
|
28
|
-
```
|
|
29
|
-
|
|
30
|
-
If you encounter permission issues, try one of the following:
|
|
31
|
-
|
|
32
|
-
- **With sudo**:
|
|
33
|
-
|
|
34
|
-
```bash
|
|
35
|
-
sudo npm install -g trace.ai-cli
|
|
36
|
-
```
|
|
37
|
-
|
|
38
|
-
- **With a user-owned directory**:
|
|
39
|
-
|
|
40
|
-
```bash
|
|
41
|
-
mkdir -p ~/.npm-global
|
|
42
|
-
npm config set prefix '~/.npm-global'
|
|
43
|
-
echo 'export PATH=~/.npm-global/bin:$PATH' >> ~/.zshrc # or ~/.bashrc
|
|
44
|
-
source ~/.zshrc
|
|
45
|
-
npm install -g trace.ai-cli
|
|
46
|
-
```
|
|
47
|
-
|
|
48
|
-
Verify the installation:
|
|
49
|
-
|
|
50
|
-
```bash
|
|
51
|
-
trace-ai --version
|
|
52
|
-
```
|
|
53
|
-
|
|
54
|
-
## Usage
|
|
55
|
-
|
|
56
|
-
### Interactive Mode
|
|
57
|
-
|
|
58
|
-
Start Trace.AI CLI in interactive mode:
|
|
59
|
-
|
|
60
|
-
```bash
|
|
61
|
-
trace-ai
|
|
62
|
-
```
|
|
63
|
-
|
|
64
|
-
This launches the interactive CLI interface where you can enter commands and queries.
|
|
65
|
-
|
|
66
|
-
### Commands
|
|
67
|
-
|
|
68
|
-
- **File Analysis**:
|
|
69
|
-
```
|
|
70
|
-
/file <file_path> [query]
|
|
71
|
-
```
|
|
72
|
-
Example: `/file src/app.js explain the main function`
|
|
73
|
-
|
|
74
|
-
- **Folder Analysis**:
|
|
75
|
-
```
|
|
76
|
-
/folder <folder_path> [query]
|
|
77
|
-
```
|
|
78
|
-
Example: `/folder src/ what is the architecture?`
|
|
79
|
-
|
|
80
|
-
- **Image Analysis**:
|
|
81
|
-
```
|
|
82
|
-
/image <image_path> [query]
|
|
83
|
-
```
|
|
84
|
-
Example: `/image diagram.png explain this flowchart`
|
|
85
|
-
|
|
86
|
-
- **System Information**:
|
|
87
|
-
```
|
|
88
|
-
/system [query]
|
|
89
|
-
```
|
|
90
|
-
The system information feature supports natural language queries that automatically determine which information to return.
|
|
91
|
-
|
|
92
|
-
Example: `/system How much RAM do I have available?`
|
|
93
|
-
|
|
94
|
-
- **Add Context**:
|
|
95
|
-
```
|
|
96
|
-
/context <text>
|
|
97
|
-
```
|
|
98
|
-
Example: `/context This is a React project using TypeScript`
|
|
99
|
-
|
|
100
|
-
- **Add File as Context**:
|
|
101
|
-
```
|
|
102
|
-
/context-file <file_path>
|
|
103
|
-
```
|
|
104
|
-
Example: `/context-file README.md`
|
|
105
|
-
|
|
106
|
-
- **View Context**:
|
|
107
|
-
```
|
|
108
|
-
/view-context
|
|
109
|
-
```
|
|
110
|
-
|
|
111
|
-
- **Clear Context or Screen**:
|
|
112
|
-
```
|
|
113
|
-
/clear [type]
|
|
114
|
-
```
|
|
115
|
-
Example: `/clear context` or `/clear screen`
|
|
116
|
-
|
|
117
|
-
- **Help**:
|
|
118
|
-
```
|
|
119
|
-
/help
|
|
120
|
-
```
|
|
121
|
-
|
|
122
|
-
- **Statistics**:
|
|
123
|
-
```
|
|
124
|
-
/stats
|
|
125
|
-
```
|
|
126
|
-
|
|
127
|
-
- **Exit**:
|
|
128
|
-
```
|
|
129
|
-
/exit
|
|
130
|
-
```
|
|
131
|
-
|
|
132
|
-
### Direct Queries
|
|
133
|
-
|
|
134
|
-
You can also ask questions directly without using commands:
|
|
135
|
-
|
|
136
|
-
```
|
|
137
|
-
What is the difference between let and const in JavaScript?
|
|
138
|
-
```
|
|
139
|
-
|
|
140
|
-
```
|
|
141
|
-
How do I implement a binary search tree in Python?
|
|
142
|
-
```
|
|
143
|
-
|
|
144
|
-
```
|
|
145
|
-
What's the current CPU usage on my system?
|
|
146
|
-
```
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
## License
|
|
150
|
-
|
|
1
|
+
# Trace.Ai CLI
|
|
2
|
+
|
|
3
|
+
A powerful AI-powered command-line tool powered by mixkey for analyzing files, folders, images, and system information. Trace.AI CLI leverages artificial intelligence to provide code analysis, project structure insights, text extraction from images, and detailed system information, making it an essential tool for developers and analysts.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **File Analysis**: Analyze code files (e.g., JavaScript, Python, Java) for structure, quality, and potential improvements.
|
|
8
|
+
- **Folder Structure Analysis**: Get insights into project architecture, technology stack, and file organization, including a simple file tree.
|
|
9
|
+
- **Image Text Extraction**: Extract text from images (e.g., PNG, JPEG) using AI-powered OCR.
|
|
10
|
+
- **System Information**: Get detailed information about your system's hardware, software, and configuration.
|
|
11
|
+
- **Context Management**: Add text or file-based context to enhance AI responses.
|
|
12
|
+
- **Interactive CLI**: Easy-to-use interface with commands like `/file`, `/folder`, `/image`, `/system`, and more.
|
|
13
|
+
- **Extensible**: Supports a wide range of file types and languages, with customizable queries.
|
|
14
|
+
|
|
15
|
+
## Installation
|
|
16
|
+
|
|
17
|
+
### Prerequisites
|
|
18
|
+
|
|
19
|
+
- **Node.js**: Version 14.0.0 or higher.
|
|
20
|
+
- **npm**: Included with Node.js.
|
|
21
|
+
|
|
22
|
+
### Install via npm
|
|
23
|
+
|
|
24
|
+
Install Trace.AI CLI globally:
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
npm install -g trace.ai-cli
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
If you encounter permission issues, try one of the following:
|
|
31
|
+
|
|
32
|
+
- **With sudo**:
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
sudo npm install -g trace.ai-cli
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
- **With a user-owned directory**:
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
mkdir -p ~/.npm-global
|
|
42
|
+
npm config set prefix '~/.npm-global'
|
|
43
|
+
echo 'export PATH=~/.npm-global/bin:$PATH' >> ~/.zshrc # or ~/.bashrc
|
|
44
|
+
source ~/.zshrc
|
|
45
|
+
npm install -g trace.ai-cli
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
Verify the installation:
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
trace-ai --version
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
## Usage
|
|
55
|
+
|
|
56
|
+
### Interactive Mode
|
|
57
|
+
|
|
58
|
+
Start Trace.AI CLI in interactive mode:
|
|
59
|
+
|
|
60
|
+
```bash
|
|
61
|
+
trace-ai
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
This launches the interactive CLI interface where you can enter commands and queries.
|
|
65
|
+
|
|
66
|
+
### Commands
|
|
67
|
+
|
|
68
|
+
- **File Analysis**:
|
|
69
|
+
```
|
|
70
|
+
/file <file_path> [query]
|
|
71
|
+
```
|
|
72
|
+
Example: `/file src/app.js explain the main function`
|
|
73
|
+
|
|
74
|
+
- **Folder Analysis**:
|
|
75
|
+
```
|
|
76
|
+
/folder <folder_path> [query]
|
|
77
|
+
```
|
|
78
|
+
Example: `/folder src/ what is the architecture?`
|
|
79
|
+
|
|
80
|
+
- **Image Analysis**:
|
|
81
|
+
```
|
|
82
|
+
/image <image_path> [query]
|
|
83
|
+
```
|
|
84
|
+
Example: `/image diagram.png explain this flowchart`
|
|
85
|
+
|
|
86
|
+
- **System Information**:
|
|
87
|
+
```
|
|
88
|
+
/system [query]
|
|
89
|
+
```
|
|
90
|
+
The system information feature supports natural language queries that automatically determine which information to return.
|
|
91
|
+
|
|
92
|
+
Example: `/system How much RAM do I have available?`
|
|
93
|
+
|
|
94
|
+
- **Add Context**:
|
|
95
|
+
```
|
|
96
|
+
/context <text>
|
|
97
|
+
```
|
|
98
|
+
Example: `/context This is a React project using TypeScript`
|
|
99
|
+
|
|
100
|
+
- **Add File as Context**:
|
|
101
|
+
```
|
|
102
|
+
/context-file <file_path>
|
|
103
|
+
```
|
|
104
|
+
Example: `/context-file README.md`
|
|
105
|
+
|
|
106
|
+
- **View Context**:
|
|
107
|
+
```
|
|
108
|
+
/view-context
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
- **Clear Context or Screen**:
|
|
112
|
+
```
|
|
113
|
+
/clear [type]
|
|
114
|
+
```
|
|
115
|
+
Example: `/clear context` or `/clear screen`
|
|
116
|
+
|
|
117
|
+
- **Help**:
|
|
118
|
+
```
|
|
119
|
+
/help
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
- **Statistics**:
|
|
123
|
+
```
|
|
124
|
+
/stats
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
- **Exit**:
|
|
128
|
+
```
|
|
129
|
+
/exit
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
### Direct Queries
|
|
133
|
+
|
|
134
|
+
You can also ask questions directly without using commands:
|
|
135
|
+
|
|
136
|
+
```
|
|
137
|
+
What is the difference between let and const in JavaScript?
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
```
|
|
141
|
+
How do I implement a binary search tree in Python?
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
```
|
|
145
|
+
What's the current CPU usage on my system?
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
## License
|
|
150
|
+
|
|
151
151
|
MIT
|
package/cli/traceAI.js
CHANGED
|
@@ -7,6 +7,7 @@ const { analyzeFile } = require('../services/fileAnalyzer');
|
|
|
7
7
|
const { analyzeFolderStructure } = require('../services/folderAnalyzer');
|
|
8
8
|
const { extractTextFromImage } = require('../services/imageService');
|
|
9
9
|
const { processWithAI } = require('../services/aiService');
|
|
10
|
+
const { markdownToAnsi } = require('../utils/markdown');
|
|
10
11
|
|
|
11
12
|
class TraceAI {
|
|
12
13
|
constructor() {
|
|
@@ -18,6 +19,7 @@ class TraceAI {
|
|
|
18
19
|
this.contexts = [];
|
|
19
20
|
this.sessionStartTime = new Date();
|
|
20
21
|
this.queryCount = 0;
|
|
22
|
+
this.mode = 2; // 1: Fast, 2: Balanced (default), 3: Think
|
|
21
23
|
}
|
|
22
24
|
|
|
23
25
|
// Enhanced startup with animated loading
|
|
@@ -146,6 +148,12 @@ class TraceAI {
|
|
|
146
148
|
example: '/system cpu',
|
|
147
149
|
icon: '💻'
|
|
148
150
|
},
|
|
151
|
+
{
|
|
152
|
+
cmd: '/mode <1|2|3>',
|
|
153
|
+
desc: 'Set AI mode: 1=Fast, 2=Balanced, 3=Think',
|
|
154
|
+
example: '/mode 2',
|
|
155
|
+
icon: '⚙️'
|
|
156
|
+
},
|
|
149
157
|
{
|
|
150
158
|
cmd: '/exit',
|
|
151
159
|
desc: 'Exit the application',
|
|
@@ -177,11 +185,13 @@ class TraceAI {
|
|
|
177
185
|
displayStatusBar() {
|
|
178
186
|
const uptime = Math.floor((new Date() - this.sessionStartTime) / 1000);
|
|
179
187
|
const contextCount = this.contexts.length;
|
|
188
|
+
const modeLabel = this.mode === 1 ? 'Fast' : this.mode === 2 ? 'Balanced' : 'Think';
|
|
180
189
|
|
|
181
190
|
console.log(chalk.gray('\n┌─ SESSION INFO ') + chalk.gray('─'.repeat(34)));
|
|
182
191
|
console.log(chalk.gray('│ ') + chalk.white('Uptime: ') + chalk.green(`${uptime}s`) +
|
|
183
192
|
chalk.gray(' │ ') + chalk.white('Contexts: ') + chalk.cyan(contextCount) +
|
|
184
|
-
chalk.gray(' │ ') + chalk.white('Queries: ') + chalk.blueBright(this.queryCount)
|
|
193
|
+
chalk.gray(' │ ') + chalk.white('Queries: ') + chalk.blueBright(this.queryCount) +
|
|
194
|
+
chalk.gray(' │ ') + chalk.white('Mode: ') + chalk.magenta(modeLabel));
|
|
185
195
|
console.log(chalk.gray('└─') + chalk.gray('─'.repeat(48)));
|
|
186
196
|
}
|
|
187
197
|
|
|
@@ -220,6 +230,7 @@ class TraceAI {
|
|
|
220
230
|
'/help': () => this.displayHelp(),
|
|
221
231
|
'/stats': () => this.displayStats(),
|
|
222
232
|
'/system': () => this.handleSystemCommand(trimmedInput),
|
|
233
|
+
'/mode': () => this.handleModeCommand(trimmedInput),
|
|
223
234
|
'/exit': () => this.close()
|
|
224
235
|
};
|
|
225
236
|
|
|
@@ -275,7 +286,7 @@ class TraceAI {
|
|
|
275
286
|
await this.executeWithSpinner(
|
|
276
287
|
`Analyzing image: ${path.basename(imagePath)}`,
|
|
277
288
|
async () => {
|
|
278
|
-
const result = await extractTextFromImage(path.resolve(imagePath), query);
|
|
289
|
+
const result = await extractTextFromImage(path.resolve(imagePath), query, this.mode);
|
|
279
290
|
this.displayResult('Image Analysis', result, imagePath);
|
|
280
291
|
}
|
|
281
292
|
);
|
|
@@ -399,7 +410,7 @@ class TraceAI {
|
|
|
399
410
|
await this.executeWithSpinner(
|
|
400
411
|
'Retrieving system information',
|
|
401
412
|
async () => {
|
|
402
|
-
const result = await processWithAI(query ? `Get system information about ${query}` : 'Get system information');
|
|
413
|
+
const result = await processWithAI(query ? `Get system information about ${query}` : 'Get system information', '', this.mode);
|
|
403
414
|
this.displayResult('System Information', result);
|
|
404
415
|
}
|
|
405
416
|
);
|
|
@@ -410,12 +421,25 @@ class TraceAI {
|
|
|
410
421
|
'Processing your query',
|
|
411
422
|
async () => {
|
|
412
423
|
const context = this.contexts.map(ctx => ctx.content).join('\n\n');
|
|
413
|
-
const result = await processWithAI(input, context);
|
|
424
|
+
const result = await processWithAI(input, context, this.mode);
|
|
414
425
|
this.displayResult('Trace.Ai Response', result.text || result);
|
|
415
426
|
}
|
|
416
427
|
);
|
|
417
428
|
}
|
|
418
429
|
|
|
430
|
+
handleModeCommand(input) {
|
|
431
|
+
const arg = input.substring('/mode'.length).trim();
|
|
432
|
+
const value = Number(arg);
|
|
433
|
+
if (![1, 2, 3].includes(value)) {
|
|
434
|
+
console.log(chalk.cyan('Usage: /mode <1|2|3> (1=Fast, 2=Balanced, 3=Think)'));
|
|
435
|
+
return;
|
|
436
|
+
}
|
|
437
|
+
this.mode = value;
|
|
438
|
+
const label = this.mode === 1 ? 'Fast' : this.mode === 2 ? 'Balanced' : 'Think';
|
|
439
|
+
console.log(chalk.green(`✓ Mode set to ${label} (${this.mode})`));
|
|
440
|
+
this.displayStatusBar();
|
|
441
|
+
}
|
|
442
|
+
|
|
419
443
|
// Utility methods
|
|
420
444
|
parseCommand(input) {
|
|
421
445
|
return input.match(/(?:[^\s"]+|"[^"]*")+/g) || [];
|
|
@@ -463,7 +487,8 @@ class TraceAI {
|
|
|
463
487
|
console.log(chalk.gray(`📍 Source: ${filePath}`));
|
|
464
488
|
}
|
|
465
489
|
console.log(chalk.blueBright('═'.repeat(60)));
|
|
466
|
-
|
|
490
|
+
const rendered = markdownToAnsi(String(content || ''));
|
|
491
|
+
console.log(rendered);
|
|
467
492
|
console.log(chalk.blueBright('═'.repeat(60)));
|
|
468
493
|
console.log();
|
|
469
494
|
}
|
package/index.js
CHANGED
|
@@ -5,6 +5,7 @@ const { analyzeFile } = require('./services/fileAnalyzer');
|
|
|
5
5
|
const { analyzeFolderStructure } = require('./services/folderAnalyzer');
|
|
6
6
|
const { extractTextFromImage } = require('./services/imageService');
|
|
7
7
|
const { processWithAI } = require('./services/aiService');
|
|
8
|
+
const { markdownToAnsi } = require('./utils/markdown');
|
|
8
9
|
|
|
9
10
|
// Handle CLI arguments
|
|
10
11
|
async function handleCliArgs() {
|
|
@@ -27,7 +28,8 @@ async function handleCliArgs() {
|
|
|
27
28
|
const result = await analyzeFile(filePath, query);
|
|
28
29
|
console.log('\n📄 File Analysis:');
|
|
29
30
|
console.log('=' .repeat(50));
|
|
30
|
-
console.log(result.text || result);
|
|
31
|
+
console.log(markdownToAnsi(String(result.text || result || '')));
|
|
32
|
+
|
|
31
33
|
console.log('=' .repeat(50));
|
|
32
34
|
} catch (error) {
|
|
33
35
|
console.error('❌ Error:', error.message);
|
|
@@ -42,7 +44,8 @@ async function handleCliArgs() {
|
|
|
42
44
|
const result = await analyzeFolderStructure(folderPath, query);
|
|
43
45
|
console.log('\n📁 Folder Analysis:');
|
|
44
46
|
console.log('=' .repeat(50));
|
|
45
|
-
console.log(result.text);
|
|
47
|
+
console.log(markdownToAnsi(String(result.text || '')));
|
|
48
|
+
|
|
46
49
|
console.log('=' .repeat(50));
|
|
47
50
|
} catch (error) {
|
|
48
51
|
console.error('❌ Error:', error.message);
|
|
@@ -56,7 +59,8 @@ async function handleCliArgs() {
|
|
|
56
59
|
const result = await extractTextFromImage(imagePath, question);
|
|
57
60
|
console.log('\n🖼️ Image Analysis:');
|
|
58
61
|
console.log('=' .repeat(50));
|
|
59
|
-
console.log(result);
|
|
62
|
+
console.log(markdownToAnsi(String(result || '')));
|
|
63
|
+
|
|
60
64
|
console.log('=' .repeat(50));
|
|
61
65
|
} catch (error) {
|
|
62
66
|
console.error('❌ Error:', error.message);
|
|
@@ -74,7 +78,7 @@ async function handleCliArgs() {
|
|
|
74
78
|
const result = await processWithAI(query);
|
|
75
79
|
console.log('\nTrace.Ai Response:');
|
|
76
80
|
console.log('=' .repeat(50));
|
|
77
|
-
console.log(result);
|
|
81
|
+
console.log(markdownToAnsi(String(result || '')));
|
|
78
82
|
console.log('=' .repeat(50));
|
|
79
83
|
} catch (error) {
|
|
80
84
|
console.error('❌ Error:', error.message);
|
package/package.json
CHANGED
|
@@ -1,45 +1,45 @@
|
|
|
1
|
-
{
|
|
2
|
-
"name": "trace.ai-cli",
|
|
3
|
-
"version": "1.1.
|
|
4
|
-
"description": "A powerful AI-powered CLI tool",
|
|
5
|
-
"main": "index.js",
|
|
6
|
-
"bin": {
|
|
7
|
-
"trace-ai": "index.js"
|
|
8
|
-
},
|
|
9
|
-
"scripts": {
|
|
10
|
-
"start": "node index.js",
|
|
11
|
-
"test": "echo \"Error: no test specified\" && exit 1"
|
|
12
|
-
},
|
|
13
|
-
"keywords": [
|
|
14
|
-
"ai",
|
|
15
|
-
"cli",
|
|
16
|
-
"code-analysis",
|
|
17
|
-
"file-analysis",
|
|
18
|
-
"code-review",
|
|
19
|
-
"bug-detection",
|
|
20
|
-
"text-processing",
|
|
21
|
-
"image-text-extraction",
|
|
22
|
-
"ocr",
|
|
23
|
-
"artificial-intelligence",
|
|
24
|
-
"command-line",
|
|
25
|
-
"tool",
|
|
26
|
-
"developer-tools",
|
|
27
|
-
"code-quality",
|
|
28
|
-
"project-analysis"
|
|
29
|
-
],
|
|
30
|
-
"author": "Dukeindustries7",
|
|
31
|
-
"license": "MIT",
|
|
32
|
-
"dependencies": {
|
|
33
|
-
"chalk": "^4.1.2",
|
|
34
|
-
"cli-spinners": "^2.9.2",
|
|
35
|
-
"figlet": "^1.7.0",
|
|
36
|
-
"node-fetch": "^2.6.7",
|
|
37
|
-
"ora": "^5.4.1",
|
|
38
|
-
"os-utils": "^0.0.14",
|
|
39
|
-
"systeminformation": "^5.27.1",
|
|
40
|
-
"trace.ai-cli": "^1.1.
|
|
41
|
-
},
|
|
42
|
-
"engines": {
|
|
43
|
-
"node": ">=14.0.0"
|
|
44
|
-
}
|
|
45
|
-
}
|
|
1
|
+
{
|
|
2
|
+
"name": "trace.ai-cli",
|
|
3
|
+
"version": "1.1.9",
|
|
4
|
+
"description": "A powerful AI-powered CLI tool",
|
|
5
|
+
"main": "index.js",
|
|
6
|
+
"bin": {
|
|
7
|
+
"trace-ai": "index.js"
|
|
8
|
+
},
|
|
9
|
+
"scripts": {
|
|
10
|
+
"start": "node index.js",
|
|
11
|
+
"test": "echo \"Error: no test specified\" && exit 1"
|
|
12
|
+
},
|
|
13
|
+
"keywords": [
|
|
14
|
+
"ai",
|
|
15
|
+
"cli",
|
|
16
|
+
"code-analysis",
|
|
17
|
+
"file-analysis",
|
|
18
|
+
"code-review",
|
|
19
|
+
"bug-detection",
|
|
20
|
+
"text-processing",
|
|
21
|
+
"image-text-extraction",
|
|
22
|
+
"ocr",
|
|
23
|
+
"artificial-intelligence",
|
|
24
|
+
"command-line",
|
|
25
|
+
"tool",
|
|
26
|
+
"developer-tools",
|
|
27
|
+
"code-quality",
|
|
28
|
+
"project-analysis"
|
|
29
|
+
],
|
|
30
|
+
"author": "Dukeindustries7",
|
|
31
|
+
"license": "MIT",
|
|
32
|
+
"dependencies": {
|
|
33
|
+
"chalk": "^4.1.2",
|
|
34
|
+
"cli-spinners": "^2.9.2",
|
|
35
|
+
"figlet": "^1.7.0",
|
|
36
|
+
"node-fetch": "^2.6.7",
|
|
37
|
+
"ora": "^5.4.1",
|
|
38
|
+
"os-utils": "^0.0.14",
|
|
39
|
+
"systeminformation": "^5.27.1",
|
|
40
|
+
"trace.ai-cli": "^1.1.8"
|
|
41
|
+
},
|
|
42
|
+
"engines": {
|
|
43
|
+
"node": ">=14.0.0"
|
|
44
|
+
}
|
|
45
|
+
}
|
package/services/aiService.js
CHANGED
|
@@ -2,6 +2,20 @@ const fetch = require('node-fetch');
|
|
|
2
2
|
const { encryptData, decryptData } = require('../utils/encryption');
|
|
3
3
|
const { getSystemInfo, formatBytes } = require('./systemInfoService');
|
|
4
4
|
|
|
5
|
+
// Model selection helpers
|
|
6
|
+
function getTextModels(mode) {
|
|
7
|
+
switch (Number(mode)) {
|
|
8
|
+
case 1: // Fast
|
|
9
|
+
return ['g1'];
|
|
10
|
+
case 2: // Balanced
|
|
11
|
+
return ['sct', 'g1', 'gptoss20'];
|
|
12
|
+
case 3: // Think
|
|
13
|
+
return ['kimi', 'mvrk', 'gma3', 'dsv3', 'sct', 'gptoss120', 'gptoss20', 'll70b', 'qw3', 'nlm3'];
|
|
14
|
+
default:
|
|
15
|
+
return ['sct', 'g1', 'gptoss20'];
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
|
|
5
19
|
/**
|
|
6
20
|
* Use AI to determine what system information is being requested
|
|
7
21
|
* @param {string} prompt - The user's prompt
|
|
@@ -40,7 +54,7 @@ Examples:
|
|
|
40
54
|
Only respond with the category names, nothing else. Be precise and focus on what the user actually needs.`;
|
|
41
55
|
|
|
42
56
|
// Use multiple AI models to analyze the query
|
|
43
|
-
const models =
|
|
57
|
+
const models = getTextModels(2); // Balanced by default
|
|
44
58
|
|
|
45
59
|
const modelRequests = models.map(async (model) => {
|
|
46
60
|
try {
|
|
@@ -250,7 +264,7 @@ Make the response helpful and easy to understand for the user.`;
|
|
|
250
264
|
method: 'POST',
|
|
251
265
|
headers: { 'Content-Type': 'application/json' },
|
|
252
266
|
body: encryptData({
|
|
253
|
-
a:
|
|
267
|
+
a: getTextModels(2)[0], // Use the first Balanced text model for formatting
|
|
254
268
|
q: formattingPrompt,
|
|
255
269
|
r: [],
|
|
256
270
|
i: [],
|
|
@@ -518,7 +532,7 @@ function formatSystemInfoManually(sysInfo, prompt) {
|
|
|
518
532
|
response += `\n`;
|
|
519
533
|
}
|
|
520
534
|
|
|
521
|
-
response
|
|
535
|
+
response ;
|
|
522
536
|
return response;
|
|
523
537
|
}
|
|
524
538
|
|
|
@@ -558,7 +572,7 @@ function getHelpMessage(prompt) {
|
|
|
558
572
|
`*User: m0v0dga_walmart*`;
|
|
559
573
|
}
|
|
560
574
|
|
|
561
|
-
async function processWithAI(prompt, context = '') {
|
|
575
|
+
async function processWithAI(prompt, context = '', mode = 2) {
|
|
562
576
|
try {
|
|
563
577
|
|
|
564
578
|
// Enhanced system query detection
|
|
@@ -575,7 +589,6 @@ async function processWithAI(prompt, context = '') {
|
|
|
575
589
|
|
|
576
590
|
if (isSystemQuery) {
|
|
577
591
|
try {
|
|
578
|
-
console.log(`[${new Date().toISOString()}] Processing system information query...`);
|
|
579
592
|
|
|
580
593
|
// First get basic system info to provide context
|
|
581
594
|
const basicInfo = await getSystemInfo('basic');
|
|
@@ -589,7 +602,6 @@ async function processWithAI(prompt, context = '') {
|
|
|
589
602
|
// Format the response using AI
|
|
590
603
|
const formattedResponse = await formatSystemInfoResponse(systemInfo, prompt);
|
|
591
604
|
|
|
592
|
-
console.log(`[${new Date().toISOString()}] System info query completed successfully`);
|
|
593
605
|
return formattedResponse;
|
|
594
606
|
} catch (sysError) {
|
|
595
607
|
console.error(`[${new Date().toISOString()}] ❌ System info error:`, sysError.message);
|
|
@@ -599,8 +611,7 @@ async function processWithAI(prompt, context = '') {
|
|
|
599
611
|
}
|
|
600
612
|
|
|
601
613
|
// Regular AI processing for non-system queries
|
|
602
|
-
|
|
603
|
-
const models = ['kimi', 'mvrk', 'gma3', 'dsv3', 'qw32b', 'ms24b', 'll70b', 'qw3', 'nlm3'];
|
|
614
|
+
const models = getTextModels(mode); // Use selected mode
|
|
604
615
|
|
|
605
616
|
const modelRequests = models.map(async (model) => {
|
|
606
617
|
try {
|
|
@@ -632,6 +643,11 @@ async function processWithAI(prompt, context = '') {
|
|
|
632
643
|
return 'I apologize, but I was unable to process your request at this time. Please try again.';
|
|
633
644
|
}
|
|
634
645
|
|
|
646
|
+
// Fast mode: return the first model response without aggregation
|
|
647
|
+
if (Number(mode) === 1) {
|
|
648
|
+
return responseTexts[0];
|
|
649
|
+
}
|
|
650
|
+
|
|
635
651
|
const finalResponse = await fetch('https://traceai.dukeindustries7.workers.dev/', {
|
|
636
652
|
method: 'POST',
|
|
637
653
|
headers: { 'Content-Type': 'application/json' },
|
|
@@ -647,7 +663,6 @@ async function processWithAI(prompt, context = '') {
|
|
|
647
663
|
const encryptedResult = await finalResponse.text();
|
|
648
664
|
const decryptedResult = decryptData(encryptedResult);
|
|
649
665
|
|
|
650
|
-
console.log(`[${new Date().toISOString()}] Query completed successfully`);
|
|
651
666
|
return decryptedResult.text || 'No response generated';
|
|
652
667
|
} catch (error) {
|
|
653
668
|
console.error(`[${new Date().toISOString()}] ❌ Processing error:`, error.message);
|