notoken-core 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/config/file-hints.json +255 -0
- package/config/hosts.json +14 -0
- package/config/intents.json +3920 -0
- package/config/playbooks.json +112 -0
- package/config/rules.json +100 -0
- package/dist/agents/agentSpawner.d.ts +56 -0
- package/dist/agents/agentSpawner.js +180 -0
- package/dist/agents/planner.d.ts +40 -0
- package/dist/agents/planner.js +175 -0
- package/dist/agents/playbookRunner.d.ts +45 -0
- package/dist/agents/playbookRunner.js +120 -0
- package/dist/agents/taskRunner.d.ts +61 -0
- package/dist/agents/taskRunner.js +142 -0
- package/dist/context/history.d.ts +36 -0
- package/dist/context/history.js +115 -0
- package/dist/conversation/coreference.d.ts +27 -0
- package/dist/conversation/coreference.js +147 -0
- package/dist/conversation/secrets.d.ts +43 -0
- package/dist/conversation/secrets.js +129 -0
- package/dist/conversation/store.d.ts +94 -0
- package/dist/conversation/store.js +184 -0
- package/dist/execution/git.d.ts +11 -0
- package/dist/execution/git.js +146 -0
- package/dist/execution/ssh.d.ts +2 -0
- package/dist/execution/ssh.js +17 -0
- package/dist/handlers/executor.d.ts +8 -0
- package/dist/handlers/executor.js +216 -0
- package/dist/healing/claudeHealer.d.ts +17 -0
- package/dist/healing/claudeHealer.js +300 -0
- package/dist/healing/patchPromoter.d.ts +25 -0
- package/dist/healing/patchPromoter.js +118 -0
- package/dist/healing/ruleBuilder.d.ts +5 -0
- package/dist/healing/ruleBuilder.js +111 -0
- package/dist/healing/ruleRepairer.d.ts +8 -0
- package/dist/healing/ruleRepairer.js +29 -0
- package/dist/healing/ruleValidator.d.ts +22 -0
- package/dist/healing/ruleValidator.js +145 -0
- package/dist/healing/runHealer.d.ts +11 -0
- package/dist/healing/runHealer.js +74 -0
- package/dist/index.d.ts +51 -0
- package/dist/index.js +62 -0
- package/dist/intents/catalog.d.ts +4 -0
- package/dist/intents/catalog.js +7 -0
- package/dist/nlp/disambiguate.d.ts +2 -0
- package/dist/nlp/disambiguate.js +46 -0
- package/dist/nlp/fuzzyResolver.d.ts +14 -0
- package/dist/nlp/fuzzyResolver.js +108 -0
- package/dist/nlp/llmFallback.d.ts +63 -0
- package/dist/nlp/llmFallback.js +338 -0
- package/dist/nlp/llmParser.d.ts +8 -0
- package/dist/nlp/llmParser.js +118 -0
- package/dist/nlp/multiClassifier.d.ts +39 -0
- package/dist/nlp/multiClassifier.js +181 -0
- package/dist/nlp/parseIntent.d.ts +2 -0
- package/dist/nlp/parseIntent.js +34 -0
- package/dist/nlp/ruleParser.d.ts +2 -0
- package/dist/nlp/ruleParser.js +234 -0
- package/dist/nlp/semantic.d.ts +104 -0
- package/dist/nlp/semantic.js +419 -0
- package/dist/nlp/uncertainty.d.ts +42 -0
- package/dist/nlp/uncertainty.js +103 -0
- package/dist/parsers/apacheParser.d.ts +50 -0
- package/dist/parsers/apacheParser.js +152 -0
- package/dist/parsers/bindParser.d.ts +40 -0
- package/dist/parsers/bindParser.js +189 -0
- package/dist/parsers/envFile.d.ts +39 -0
- package/dist/parsers/envFile.js +128 -0
- package/dist/parsers/fileFinder.d.ts +30 -0
- package/dist/parsers/fileFinder.js +226 -0
- package/dist/parsers/index.d.ts +27 -0
- package/dist/parsers/index.js +193 -0
- package/dist/parsers/jsonParser.d.ts +16 -0
- package/dist/parsers/jsonParser.js +57 -0
- package/dist/parsers/nginxParser.d.ts +47 -0
- package/dist/parsers/nginxParser.js +161 -0
- package/dist/parsers/passwd.d.ts +25 -0
- package/dist/parsers/passwd.js +41 -0
- package/dist/parsers/shadow.d.ts +23 -0
- package/dist/parsers/shadow.js +50 -0
- package/dist/parsers/yamlParser.d.ts +13 -0
- package/dist/parsers/yamlParser.js +54 -0
- package/dist/policy/confirm.d.ts +2 -0
- package/dist/policy/confirm.js +29 -0
- package/dist/policy/safety.d.ts +4 -0
- package/dist/policy/safety.js +32 -0
- package/dist/types/intent.d.ts +205 -0
- package/dist/types/intent.js +32 -0
- package/dist/types/rules.d.ts +237 -0
- package/dist/types/rules.js +50 -0
- package/dist/utils/analysis.d.ts +25 -0
- package/dist/utils/analysis.js +307 -0
- package/dist/utils/autoBackup.d.ts +43 -0
- package/dist/utils/autoBackup.js +144 -0
- package/dist/utils/config.d.ts +11 -0
- package/dist/utils/config.js +32 -0
- package/dist/utils/dirAnalysis.d.ts +23 -0
- package/dist/utils/dirAnalysis.js +192 -0
- package/dist/utils/explain.d.ts +8 -0
- package/dist/utils/explain.js +145 -0
- package/dist/utils/logger.d.ts +5 -0
- package/dist/utils/logger.js +29 -0
- package/dist/utils/output.d.ts +2 -0
- package/dist/utils/output.js +26 -0
- package/dist/utils/paths.d.ts +26 -0
- package/dist/utils/paths.js +47 -0
- package/dist/utils/permissions.d.ts +64 -0
- package/dist/utils/permissions.js +298 -0
- package/dist/utils/platform.d.ts +53 -0
- package/dist/utils/platform.js +253 -0
- package/dist/utils/smartFile.d.ts +29 -0
- package/dist/utils/smartFile.js +188 -0
- package/dist/utils/spinner.d.ts +53 -0
- package/dist/utils/spinner.js +140 -0
- package/dist/utils/verbose.d.ts +27 -0
- package/dist/utils/verbose.js +131 -0
- package/dist/utils/wslPaths.d.ts +31 -0
- package/dist/utils/wslPaths.js +145 -0
- package/package.json +39 -0
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import { runRemoteCommand, runLocalCommand } from "../execution/ssh.js";
|
|
2
|
+
import { getIntentDef } from "../utils/config.js";
|
|
3
|
+
/**
|
|
4
|
+
* Fuzzy file resolver.
|
|
5
|
+
*
|
|
6
|
+
* When an intent has fuzzyResolve fields, this attempts to find the actual
|
|
7
|
+
* file path on the target server using a combination of:
|
|
8
|
+
* - exact match
|
|
9
|
+
* - find by filename
|
|
10
|
+
* - find by partial name (fuzzy)
|
|
11
|
+
* - locate database
|
|
12
|
+
*
|
|
13
|
+
* Returns the intent with resolved file paths in the fields.
|
|
14
|
+
*/
|
|
15
|
+
export async function resolveFuzzyFields(intent) {
|
|
16
|
+
const def = getIntentDef(intent.intent);
|
|
17
|
+
if (!def?.fuzzyResolve || def.fuzzyResolve.length === 0)
|
|
18
|
+
return intent;
|
|
19
|
+
const env = intent.fields.environment ?? "dev";
|
|
20
|
+
const execution = def.execution;
|
|
21
|
+
const fields = { ...intent.fields };
|
|
22
|
+
for (const fieldName of def.fuzzyResolve) {
|
|
23
|
+
const rawValue = fields[fieldName];
|
|
24
|
+
if (!rawValue)
|
|
25
|
+
continue;
|
|
26
|
+
// If it already looks like an absolute path, keep it
|
|
27
|
+
if (rawValue.startsWith("/"))
|
|
28
|
+
continue;
|
|
29
|
+
const resolved = await fuzzyFindFile(rawValue, env, execution);
|
|
30
|
+
if (resolved) {
|
|
31
|
+
fields[fieldName] = resolved;
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
return { ...intent, fields };
|
|
35
|
+
}
|
|
36
|
+
async function fuzzyFindFile(filename, environment, execution) {
|
|
37
|
+
const run = execution === "local" ? runLocalCommand : (cmd) => runRemoteCommand(environment, cmd);
|
|
38
|
+
// Strategy 1: find by exact name in common locations
|
|
39
|
+
const searchPaths = ["/etc", "/var/log", "/var", "/srv", "/opt", "/tmp", "/root", "/home"];
|
|
40
|
+
const findCmd = `find ${searchPaths.join(" ")} -maxdepth 4 -name '${sanitizeForShell(filename)}' 2>/dev/null | head -10`;
|
|
41
|
+
try {
|
|
42
|
+
const result = (await run(findCmd)).trim();
|
|
43
|
+
if (result) {
|
|
44
|
+
const matches = result.split("\n").filter(Boolean);
|
|
45
|
+
if (matches.length === 1)
|
|
46
|
+
return matches[0];
|
|
47
|
+
if (matches.length > 1) {
|
|
48
|
+
// Return best match — prefer shorter path (closer to root of search)
|
|
49
|
+
return rankMatches(filename, matches);
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
catch { }
|
|
54
|
+
// Strategy 2: fuzzy — find by partial name
|
|
55
|
+
const baseName = filename.replace(/\.[^.]+$/, ""); // strip extension
|
|
56
|
+
const fuzzyCmd = `find ${searchPaths.join(" ")} -maxdepth 4 -iname '*${sanitizeForShell(baseName)}*' 2>/dev/null | head -10`;
|
|
57
|
+
try {
|
|
58
|
+
const result = (await run(fuzzyCmd)).trim();
|
|
59
|
+
if (result) {
|
|
60
|
+
const matches = result.split("\n").filter(Boolean);
|
|
61
|
+
if (matches.length > 0) {
|
|
62
|
+
return rankMatches(filename, matches);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
catch { }
|
|
67
|
+
// Strategy 3: locate (if available)
|
|
68
|
+
try {
|
|
69
|
+
const result = (await run(`locate -i '${sanitizeForShell(filename)}' 2>/dev/null | head -10`)).trim();
|
|
70
|
+
if (result) {
|
|
71
|
+
const matches = result.split("\n").filter(Boolean);
|
|
72
|
+
if (matches.length > 0) {
|
|
73
|
+
return rankMatches(filename, matches);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
catch { }
|
|
78
|
+
return null;
|
|
79
|
+
}
|
|
80
|
+
/**
|
|
81
|
+
* Rank file path matches by similarity to the query.
|
|
82
|
+
* Prefers: exact basename match > shorter path > alphabetical
|
|
83
|
+
*/
|
|
84
|
+
function rankMatches(query, matches) {
|
|
85
|
+
const queryLower = query.toLowerCase();
|
|
86
|
+
const scored = matches.map((m) => {
|
|
87
|
+
const basename = m.split("/").pop()?.toLowerCase() ?? "";
|
|
88
|
+
let score = 0;
|
|
89
|
+
// Exact basename match
|
|
90
|
+
if (basename === queryLower)
|
|
91
|
+
score += 100;
|
|
92
|
+
// Basename starts with query
|
|
93
|
+
else if (basename.startsWith(queryLower))
|
|
94
|
+
score += 50;
|
|
95
|
+
// Basename contains query
|
|
96
|
+
else if (basename.includes(queryLower))
|
|
97
|
+
score += 25;
|
|
98
|
+
// Prefer shorter paths (more specific location)
|
|
99
|
+
score -= m.split("/").length;
|
|
100
|
+
return { path: m, score };
|
|
101
|
+
});
|
|
102
|
+
scored.sort((a, b) => b.score - a.score);
|
|
103
|
+
return scored[0].path;
|
|
104
|
+
}
|
|
105
|
+
function sanitizeForShell(value) {
|
|
106
|
+
// Strip anything that isn't alphanumeric, dot, dash, underscore, star
|
|
107
|
+
return value.replace(/[^a-zA-Z0-9._*\-]/g, "");
|
|
108
|
+
}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM fallback for unrecognized prompts.
|
|
3
|
+
*
|
|
4
|
+
* ONLY fires when an LLM is actually configured:
|
|
5
|
+
* - MYCLI_LLM_ENDPOINT env var is set (for API), OR
|
|
6
|
+
* - MYCLI_LLM_CLI=claude|chatgpt is set (for CLI tools)
|
|
7
|
+
*
|
|
8
|
+
* Otherwise returns null immediately — no noise, no "trying fallback" messages.
|
|
9
|
+
*/
|
|
10
|
+
export interface LLMFallbackResult {
|
|
11
|
+
understood: boolean;
|
|
12
|
+
restatement: string;
|
|
13
|
+
suggestedIntents: Array<{
|
|
14
|
+
intent: string;
|
|
15
|
+
fields: Record<string, unknown>;
|
|
16
|
+
confidence: number;
|
|
17
|
+
reasoning: string;
|
|
18
|
+
}>;
|
|
19
|
+
todoSteps?: Array<{
|
|
20
|
+
step: number;
|
|
21
|
+
description: string;
|
|
22
|
+
intent?: string;
|
|
23
|
+
command?: string;
|
|
24
|
+
}>;
|
|
25
|
+
missingInfo?: string[];
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* Check if any LLM is configured.
|
|
29
|
+
*/
|
|
30
|
+
/**
|
|
31
|
+
* Check if any LLM is available.
|
|
32
|
+
* Order: explicit config → auto-detect Ollama → nothing.
|
|
33
|
+
*/
|
|
34
|
+
export declare function isLLMConfigured(): boolean;
|
|
35
|
+
/** Which LLM backend is active? */
|
|
36
|
+
export declare function getLLMBackend(): string | null;
|
|
37
|
+
/**
|
|
38
|
+
* Ask the LLM to interpret an unrecognized prompt.
|
|
39
|
+
* Returns null immediately if no LLM is available.
|
|
40
|
+
*
|
|
41
|
+
* Priority: CLI (claude/chatgpt) → API endpoint → Ollama (local) → null
|
|
42
|
+
*/
|
|
43
|
+
export declare function llmFallback(rawText: string, context: {
|
|
44
|
+
recentIntents?: string[];
|
|
45
|
+
knownEntities?: Array<{
|
|
46
|
+
entity: string;
|
|
47
|
+
type: string;
|
|
48
|
+
}>;
|
|
49
|
+
uncertainTokens?: string[];
|
|
50
|
+
}): Promise<LLMFallbackResult | null>;
|
|
51
|
+
/**
|
|
52
|
+
* Check if Ollama is installed (not just running).
|
|
53
|
+
* Used by doctor and interactive mode to offer installation.
|
|
54
|
+
*/
|
|
55
|
+
export declare function isOllamaInstalled(): boolean;
|
|
56
|
+
/**
|
|
57
|
+
* Check if Ollama has any models pulled.
|
|
58
|
+
*/
|
|
59
|
+
export declare function getOllamaModels(): Promise<string[]>;
|
|
60
|
+
/**
|
|
61
|
+
* Format an LLM fallback result for display.
|
|
62
|
+
*/
|
|
63
|
+
export declare function formatLLMFallback(result: LLMFallbackResult): string;
|
|
@@ -0,0 +1,338 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM fallback for unrecognized prompts.
|
|
3
|
+
*
|
|
4
|
+
* ONLY fires when an LLM is actually configured:
|
|
5
|
+
* - MYCLI_LLM_ENDPOINT env var is set (for API), OR
|
|
6
|
+
* - MYCLI_LLM_CLI=claude|chatgpt is set (for CLI tools)
|
|
7
|
+
*
|
|
8
|
+
* Otherwise returns null immediately — no noise, no "trying fallback" messages.
|
|
9
|
+
*/
|
|
10
|
+
import { execSync } from "node:child_process";
|
|
11
|
+
import { loadIntents } from "../utils/config.js";
|
|
12
|
+
import { detectLocalPlatform } from "../utils/platform.js";
|
|
13
|
+
/**
|
|
14
|
+
* Check if any LLM is configured.
|
|
15
|
+
*/
|
|
16
|
+
/**
|
|
17
|
+
* Check if any LLM is available.
|
|
18
|
+
* Order: explicit config → auto-detect Ollama → nothing.
|
|
19
|
+
*/
|
|
20
|
+
export function isLLMConfigured() {
|
|
21
|
+
return !!(process.env.MYCLI_LLM_ENDPOINT || process.env.MYCLI_LLM_CLI || detectOllama());
|
|
22
|
+
}
|
|
23
|
+
/** Which LLM backend is active? */
|
|
24
|
+
export function getLLMBackend() {
|
|
25
|
+
if (process.env.MYCLI_LLM_CLI)
|
|
26
|
+
return process.env.MYCLI_LLM_CLI;
|
|
27
|
+
if (process.env.MYCLI_LLM_ENDPOINT)
|
|
28
|
+
return "api";
|
|
29
|
+
if (detectOllama())
|
|
30
|
+
return "ollama";
|
|
31
|
+
return null;
|
|
32
|
+
}
|
|
33
|
+
let ollamaChecked = false;
|
|
34
|
+
let ollamaAvailable = false;
|
|
35
|
+
function detectOllama() {
|
|
36
|
+
if (ollamaChecked)
|
|
37
|
+
return ollamaAvailable;
|
|
38
|
+
ollamaChecked = true;
|
|
39
|
+
try {
|
|
40
|
+
execSync("command -v ollama", { timeout: 1000, stdio: "pipe" });
|
|
41
|
+
execSync("curl -sf --max-time 1 http://localhost:11434/api/tags >/dev/null 2>&1", { timeout: 2000, stdio: "pipe" });
|
|
42
|
+
ollamaAvailable = true;
|
|
43
|
+
}
|
|
44
|
+
catch {
|
|
45
|
+
ollamaAvailable = false;
|
|
46
|
+
}
|
|
47
|
+
return ollamaAvailable;
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Ask the LLM to interpret an unrecognized prompt.
|
|
51
|
+
* Returns null immediately if no LLM is available.
|
|
52
|
+
*
|
|
53
|
+
* Priority: CLI (claude/chatgpt) → API endpoint → Ollama (local) → null
|
|
54
|
+
*/
|
|
55
|
+
export async function llmFallback(rawText, context) {
|
|
56
|
+
if (!isLLMConfigured())
|
|
57
|
+
return null;
|
|
58
|
+
// Try CLI tool if configured
|
|
59
|
+
if (process.env.MYCLI_LLM_CLI) {
|
|
60
|
+
const cliResult = await tryLLMCli(rawText, context);
|
|
61
|
+
if (cliResult)
|
|
62
|
+
return cliResult;
|
|
63
|
+
}
|
|
64
|
+
// Try API endpoint if configured
|
|
65
|
+
if (process.env.MYCLI_LLM_ENDPOINT) {
|
|
66
|
+
const apiResult = await tryApiEndpoint(rawText, context);
|
|
67
|
+
if (apiResult)
|
|
68
|
+
return apiResult;
|
|
69
|
+
}
|
|
70
|
+
// Try Ollama (auto-detected local)
|
|
71
|
+
if (detectOllama()) {
|
|
72
|
+
const ollamaResult = await tryOllama(rawText, context);
|
|
73
|
+
if (ollamaResult)
|
|
74
|
+
return ollamaResult;
|
|
75
|
+
}
|
|
76
|
+
return null;
|
|
77
|
+
}
|
|
78
|
+
async function tryLLMCli(rawText, context) {
|
|
79
|
+
const cli = process.env.MYCLI_LLM_CLI;
|
|
80
|
+
if (!cli)
|
|
81
|
+
return null;
|
|
82
|
+
try {
|
|
83
|
+
const { execSync } = await import("node:child_process");
|
|
84
|
+
const prompt = buildPrompt(rawText, context);
|
|
85
|
+
let cmd;
|
|
86
|
+
if (cli === "claude") {
|
|
87
|
+
execSync("command -v claude", { stdio: "pipe" });
|
|
88
|
+
cmd = `claude -p ${JSON.stringify(prompt)} --output-format json --max-turns 1 --no-session-persistence`;
|
|
89
|
+
}
|
|
90
|
+
else if (cli === "chatgpt") {
|
|
91
|
+
execSync("command -v chatgpt", { stdio: "pipe" });
|
|
92
|
+
cmd = `chatgpt ${JSON.stringify(prompt)}`;
|
|
93
|
+
}
|
|
94
|
+
else {
|
|
95
|
+
return null;
|
|
96
|
+
}
|
|
97
|
+
const rawResult = execSync(cmd, {
|
|
98
|
+
encoding: "utf-8",
|
|
99
|
+
timeout: 60_000,
|
|
100
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
101
|
+
});
|
|
102
|
+
// Claude CLI --output-format json wraps the response; extract the text content
|
|
103
|
+
let result = rawResult;
|
|
104
|
+
try {
|
|
105
|
+
const parsed = JSON.parse(rawResult);
|
|
106
|
+
// Claude CLI json format: { result: "...", ... } or { content: [{text: "..."}] }
|
|
107
|
+
if (parsed.result && typeof parsed.result === "string") {
|
|
108
|
+
result = parsed.result;
|
|
109
|
+
}
|
|
110
|
+
else if (Array.isArray(parsed)) {
|
|
111
|
+
// Array of messages — find the assistant message
|
|
112
|
+
const assistant = parsed.find((m) => m.role === "assistant");
|
|
113
|
+
if (assistant?.content) {
|
|
114
|
+
if (typeof assistant.content === "string")
|
|
115
|
+
result = assistant.content;
|
|
116
|
+
else if (Array.isArray(assistant.content)) {
|
|
117
|
+
const textBlock = assistant.content.find((b) => b.type === "text");
|
|
118
|
+
if (textBlock?.text)
|
|
119
|
+
result = textBlock.text;
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
catch {
|
|
125
|
+
// Not JSON — use as-is (text mode fallback)
|
|
126
|
+
}
|
|
127
|
+
return parseResponse(result);
|
|
128
|
+
}
|
|
129
|
+
catch {
|
|
130
|
+
return null;
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
async function tryApiEndpoint(rawText, context) {
|
|
134
|
+
const endpoint = process.env.MYCLI_LLM_ENDPOINT;
|
|
135
|
+
if (!endpoint)
|
|
136
|
+
return null;
|
|
137
|
+
const apiKey = process.env.MYCLI_LLM_API_KEY ?? "";
|
|
138
|
+
const model = process.env.MYCLI_LLM_MODEL ?? "claude-sonnet-4-20250514";
|
|
139
|
+
const prompt = buildPrompt(rawText, context);
|
|
140
|
+
try {
|
|
141
|
+
const response = await fetch(endpoint, {
|
|
142
|
+
method: "POST",
|
|
143
|
+
headers: {
|
|
144
|
+
"Content-Type": "application/json",
|
|
145
|
+
...(apiKey ? { Authorization: `Bearer ${apiKey}`, "x-api-key": apiKey } : {}),
|
|
146
|
+
},
|
|
147
|
+
body: JSON.stringify({
|
|
148
|
+
model,
|
|
149
|
+
max_tokens: 1024,
|
|
150
|
+
messages: [{ role: "user", content: prompt }],
|
|
151
|
+
}),
|
|
152
|
+
});
|
|
153
|
+
if (!response.ok)
|
|
154
|
+
return null;
|
|
155
|
+
const data = (await response.json());
|
|
156
|
+
const content = extractContent(data);
|
|
157
|
+
if (!content)
|
|
158
|
+
return null;
|
|
159
|
+
return parseResponse(content);
|
|
160
|
+
}
|
|
161
|
+
catch {
|
|
162
|
+
return null;
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
async function tryOllama(rawText, context) {
|
|
166
|
+
const prompt = buildPrompt(rawText, context);
|
|
167
|
+
const model = process.env.MYCLI_OLLAMA_MODEL ?? "llama3.2";
|
|
168
|
+
try {
|
|
169
|
+
const response = await fetch("http://localhost:11434/api/generate", {
|
|
170
|
+
method: "POST",
|
|
171
|
+
headers: { "Content-Type": "application/json" },
|
|
172
|
+
body: JSON.stringify({
|
|
173
|
+
model,
|
|
174
|
+
prompt,
|
|
175
|
+
stream: false,
|
|
176
|
+
options: { temperature: 0.1, num_predict: 1024 },
|
|
177
|
+
}),
|
|
178
|
+
});
|
|
179
|
+
if (!response.ok)
|
|
180
|
+
return null;
|
|
181
|
+
const data = (await response.json());
|
|
182
|
+
const text = data.response;
|
|
183
|
+
if (!text)
|
|
184
|
+
return null;
|
|
185
|
+
return parseResponse(text);
|
|
186
|
+
}
|
|
187
|
+
catch {
|
|
188
|
+
return null;
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
/**
|
|
192
|
+
* Check if Ollama is installed (not just running).
|
|
193
|
+
* Used by doctor and interactive mode to offer installation.
|
|
194
|
+
*/
|
|
195
|
+
export function isOllamaInstalled() {
|
|
196
|
+
try {
|
|
197
|
+
const { execSync } = require("node:child_process");
|
|
198
|
+
execSync("command -v ollama", { stdio: "pipe" });
|
|
199
|
+
return true;
|
|
200
|
+
}
|
|
201
|
+
catch {
|
|
202
|
+
return false;
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
/**
|
|
206
|
+
* Check if Ollama has any models pulled.
|
|
207
|
+
*/
|
|
208
|
+
export async function getOllamaModels() {
|
|
209
|
+
try {
|
|
210
|
+
const response = await fetch("http://localhost:11434/api/tags");
|
|
211
|
+
if (!response.ok)
|
|
212
|
+
return [];
|
|
213
|
+
const data = (await response.json());
|
|
214
|
+
return (data.models ?? []).map((m) => m.name);
|
|
215
|
+
}
|
|
216
|
+
catch {
|
|
217
|
+
return [];
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
function buildPrompt(rawText, context) {
|
|
221
|
+
const intents = loadIntents();
|
|
222
|
+
const intentSummary = intents.map((i) => {
|
|
223
|
+
const fields = Object.entries(i.fields)
|
|
224
|
+
.map(([k, v]) => `${k}:${v.type}${v.required ? "*" : ""}`)
|
|
225
|
+
.join(", ");
|
|
226
|
+
return ` ${i.name}: ${i.description} [${fields}]`;
|
|
227
|
+
}).join("\n");
|
|
228
|
+
const platform = detectLocalPlatform();
|
|
229
|
+
return `You are a server operations CLI assistant. The user said something I couldn't parse with my rule-based system.
|
|
230
|
+
|
|
231
|
+
ENVIRONMENT:
|
|
232
|
+
OS: ${platform.distro}${platform.isWSL ? " (WSL)" : ""}
|
|
233
|
+
Kernel: ${platform.kernel}
|
|
234
|
+
Arch: ${platform.arch}
|
|
235
|
+
Shell: ${platform.shell}
|
|
236
|
+
Package manager: ${platform.packageManager}
|
|
237
|
+
Init system: ${platform.initSystem}
|
|
238
|
+
|
|
239
|
+
USER INPUT: "${rawText}"
|
|
240
|
+
|
|
241
|
+
CONTEXT:
|
|
242
|
+
${JSON.stringify(context, null, 2)}
|
|
243
|
+
|
|
244
|
+
AVAILABLE INTENTS (these are the tools I can execute):
|
|
245
|
+
${intentSummary}
|
|
246
|
+
|
|
247
|
+
Respond with ONLY a JSON object:
|
|
248
|
+
{
|
|
249
|
+
"understood": true/false,
|
|
250
|
+
"restatement": "In plain English, what the user wants to do",
|
|
251
|
+
"suggestedIntents": [
|
|
252
|
+
{
|
|
253
|
+
"intent": "intent.name from list above",
|
|
254
|
+
"fields": { "field": "value" },
|
|
255
|
+
"confidence": 0.0-1.0,
|
|
256
|
+
"reasoning": "why this intent"
|
|
257
|
+
}
|
|
258
|
+
],
|
|
259
|
+
"todoSteps": [
|
|
260
|
+
{ "step": 1, "description": "what to do first", "intent": "optional intent name" }
|
|
261
|
+
],
|
|
262
|
+
"missingInfo": ["things I'd need to ask the user"]
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
Return ONLY JSON.`;
|
|
266
|
+
}
|
|
267
|
+
function parseResponse(raw) {
|
|
268
|
+
try {
|
|
269
|
+
const json = JSON.parse(raw.trim());
|
|
270
|
+
if (json.understood !== undefined)
|
|
271
|
+
return json;
|
|
272
|
+
}
|
|
273
|
+
catch { }
|
|
274
|
+
const match = raw.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
275
|
+
if (match) {
|
|
276
|
+
try {
|
|
277
|
+
return JSON.parse(match[1].trim());
|
|
278
|
+
}
|
|
279
|
+
catch { }
|
|
280
|
+
}
|
|
281
|
+
const jsonMatch = raw.match(/\{[\s\S]*"understood"[\s\S]*\}/);
|
|
282
|
+
if (jsonMatch) {
|
|
283
|
+
try {
|
|
284
|
+
return JSON.parse(jsonMatch[0]);
|
|
285
|
+
}
|
|
286
|
+
catch { }
|
|
287
|
+
}
|
|
288
|
+
return null;
|
|
289
|
+
}
|
|
290
|
+
function extractContent(data) {
|
|
291
|
+
if (data.choices && Array.isArray(data.choices)) {
|
|
292
|
+
const msg = data.choices[0]?.message;
|
|
293
|
+
if (msg?.content && typeof msg.content === "string")
|
|
294
|
+
return msg.content;
|
|
295
|
+
}
|
|
296
|
+
if (data.content && Array.isArray(data.content)) {
|
|
297
|
+
const block = data.content[0];
|
|
298
|
+
if (block?.text && typeof block.text === "string")
|
|
299
|
+
return block.text;
|
|
300
|
+
}
|
|
301
|
+
if (typeof data.result === "string")
|
|
302
|
+
return data.result;
|
|
303
|
+
return null;
|
|
304
|
+
}
|
|
305
|
+
/**
|
|
306
|
+
* Format an LLM fallback result for display.
|
|
307
|
+
*/
|
|
308
|
+
export function formatLLMFallback(result) {
|
|
309
|
+
const c = { reset: "\x1b[0m", bold: "\x1b[1m", dim: "\x1b[2m", cyan: "\x1b[36m", yellow: "\x1b[33m", green: "\x1b[32m" };
|
|
310
|
+
const lines = [];
|
|
311
|
+
lines.push(`${c.bold}${c.cyan}LLM Interpretation:${c.reset}`);
|
|
312
|
+
lines.push(` ${result.restatement}`);
|
|
313
|
+
if (result.suggestedIntents.length > 0) {
|
|
314
|
+
lines.push(`\n${c.bold}Suggested actions:${c.reset}`);
|
|
315
|
+
for (const s of result.suggestedIntents) {
|
|
316
|
+
const conf = (s.confidence * 100).toFixed(0);
|
|
317
|
+
lines.push(` ${c.green}${s.intent}${c.reset} (${conf}%) — ${s.reasoning}`);
|
|
318
|
+
const fields = Object.entries(s.fields);
|
|
319
|
+
if (fields.length > 0) {
|
|
320
|
+
lines.push(` ${fields.map(([k, v]) => `${k}=${v}`).join(", ")}`);
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
if (result.todoSteps && result.todoSteps.length > 0) {
|
|
325
|
+
lines.push(`\n${c.bold}Plan:${c.reset}`);
|
|
326
|
+
for (const step of result.todoSteps) {
|
|
327
|
+
const intent = step.intent ? ` ${c.dim}[${step.intent}]${c.reset}` : "";
|
|
328
|
+
lines.push(` ${c.cyan}${step.step}.${c.reset} ${step.description}${intent}`);
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
if (result.missingInfo && result.missingInfo.length > 0) {
|
|
332
|
+
lines.push(`\n${c.yellow}Need more info:${c.reset}`);
|
|
333
|
+
for (const q of result.missingInfo) {
|
|
334
|
+
lines.push(` ? ${q}`);
|
|
335
|
+
}
|
|
336
|
+
}
|
|
337
|
+
return lines.join("\n");
|
|
338
|
+
}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import type { DynamicIntent } from "../types/intent.js";
|
|
2
|
+
/**
|
|
3
|
+
* LLM-based fallback parser.
|
|
4
|
+
*
|
|
5
|
+
* Sends the raw text + context to an LLM and asks for structured JSON.
|
|
6
|
+
* Set MYCLI_LLM_ENDPOINT and optionally MYCLI_LLM_API_KEY in env.
|
|
7
|
+
*/
|
|
8
|
+
export declare function parseByLLM(rawText: string): Promise<DynamicIntent | null>;
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
import { DynamicIntent as DynamicIntentSchema } from "../types/intent.js";
|
|
2
|
+
import { loadIntents } from "../utils/config.js";
|
|
3
|
+
import { loadRules } from "../utils/config.js";
|
|
4
|
+
/**
|
|
5
|
+
* LLM-based fallback parser.
|
|
6
|
+
*
|
|
7
|
+
* Sends the raw text + context to an LLM and asks for structured JSON.
|
|
8
|
+
* Set MYCLI_LLM_ENDPOINT and optionally MYCLI_LLM_API_KEY in env.
|
|
9
|
+
*/
|
|
10
|
+
export async function parseByLLM(rawText) {
|
|
11
|
+
const endpoint = process.env.MYCLI_LLM_ENDPOINT;
|
|
12
|
+
if (!endpoint)
|
|
13
|
+
return null;
|
|
14
|
+
const apiKey = process.env.MYCLI_LLM_API_KEY ?? "";
|
|
15
|
+
const rules = loadRules();
|
|
16
|
+
const intents = loadIntents();
|
|
17
|
+
const systemPrompt = buildSystemPrompt(intents, rules);
|
|
18
|
+
const userPrompt = `Parse this command into structured intent JSON:\n\n"${rawText}"`;
|
|
19
|
+
try {
|
|
20
|
+
const response = await fetch(endpoint, {
|
|
21
|
+
method: "POST",
|
|
22
|
+
headers: {
|
|
23
|
+
"Content-Type": "application/json",
|
|
24
|
+
...(apiKey ? { Authorization: `Bearer ${apiKey}`, "x-api-key": apiKey } : {}),
|
|
25
|
+
},
|
|
26
|
+
body: JSON.stringify({
|
|
27
|
+
model: process.env.MYCLI_LLM_MODEL ?? "claude-sonnet-4-20250514",
|
|
28
|
+
max_tokens: 512,
|
|
29
|
+
messages: [
|
|
30
|
+
{ role: "system", content: systemPrompt },
|
|
31
|
+
{ role: "user", content: userPrompt },
|
|
32
|
+
],
|
|
33
|
+
}),
|
|
34
|
+
});
|
|
35
|
+
if (!response.ok)
|
|
36
|
+
return null;
|
|
37
|
+
const data = (await response.json());
|
|
38
|
+
const content = extractContent(data);
|
|
39
|
+
if (!content)
|
|
40
|
+
return null;
|
|
41
|
+
const json = extractJSON(content);
|
|
42
|
+
if (!json)
|
|
43
|
+
return null;
|
|
44
|
+
// Reshape LLM output into DynamicIntent
|
|
45
|
+
const intent = json.intent;
|
|
46
|
+
const confidence = json.confidence;
|
|
47
|
+
const fields = {};
|
|
48
|
+
for (const [k, v] of Object.entries(json)) {
|
|
49
|
+
if (!["intent", "confidence", "rawText"].includes(k)) {
|
|
50
|
+
fields[k] = v;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
const parsed = DynamicIntentSchema.safeParse({ intent, confidence, rawText, fields });
|
|
54
|
+
if (!parsed.success)
|
|
55
|
+
return null;
|
|
56
|
+
return parsed.data;
|
|
57
|
+
}
|
|
58
|
+
catch {
|
|
59
|
+
return null;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
function buildSystemPrompt(intents, rules) {
|
|
63
|
+
const intentList = intents
|
|
64
|
+
.map((i) => {
|
|
65
|
+
const fields = Object.entries(i.fields)
|
|
66
|
+
.map(([k, v]) => `${k}(${v.type}${v.required ? ",required" : ""})`)
|
|
67
|
+
.join(", ");
|
|
68
|
+
return `- ${i.name}: ${i.description} [${fields}]`;
|
|
69
|
+
})
|
|
70
|
+
.join("\n");
|
|
71
|
+
const envs = Object.keys(rules.environmentAliases).join(", ");
|
|
72
|
+
const services = Object.keys(rules.serviceAliases).join(", ");
|
|
73
|
+
return `You are a command parser for a server operations CLI.
|
|
74
|
+
Parse the user's natural language command into a JSON object.
|
|
75
|
+
|
|
76
|
+
Supported intents:
|
|
77
|
+
${intentList}
|
|
78
|
+
|
|
79
|
+
Known environments: ${envs}
|
|
80
|
+
Known services: ${services}
|
|
81
|
+
|
|
82
|
+
Return ONLY valid JSON with:
|
|
83
|
+
- "intent": one of the intent names above, or "unknown"
|
|
84
|
+
- "confidence": 0.0 to 1.0
|
|
85
|
+
- "fields": object with all relevant fields for that intent
|
|
86
|
+
|
|
87
|
+
Example: {"intent": "service.restart", "confidence": 0.9, "fields": {"service": "nginx", "environment": "prod"}}
|
|
88
|
+
|
|
89
|
+
If you cannot determine the intent, return: {"intent": "unknown", "confidence": 0.1, "fields": {"reason": "..."}}
|
|
90
|
+
Return ONLY the JSON object, no markdown.`;
|
|
91
|
+
}
|
|
92
|
+
function extractContent(data) {
|
|
93
|
+
if (data.choices && Array.isArray(data.choices)) {
|
|
94
|
+
const msg = data.choices[0]?.message;
|
|
95
|
+
if (msg?.content && typeof msg.content === "string")
|
|
96
|
+
return msg.content;
|
|
97
|
+
}
|
|
98
|
+
if (data.content && Array.isArray(data.content)) {
|
|
99
|
+
const block = data.content[0];
|
|
100
|
+
if (block?.text && typeof block.text === "string")
|
|
101
|
+
return block.text;
|
|
102
|
+
}
|
|
103
|
+
return null;
|
|
104
|
+
}
|
|
105
|
+
function extractJSON(text) {
|
|
106
|
+
try {
|
|
107
|
+
return JSON.parse(text.trim());
|
|
108
|
+
}
|
|
109
|
+
catch { }
|
|
110
|
+
const match = text.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
111
|
+
if (match) {
|
|
112
|
+
try {
|
|
113
|
+
return JSON.parse(match[1].trim());
|
|
114
|
+
}
|
|
115
|
+
catch { }
|
|
116
|
+
}
|
|
117
|
+
return null;
|
|
118
|
+
}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Multi-classifier intent scorer.
|
|
3
|
+
*
|
|
4
|
+
* Instead of picking one parser, this runs multiple classifiers in parallel
|
|
5
|
+
* and merges their scores. Each classifier votes on the likely intent.
|
|
6
|
+
*
|
|
7
|
+
* Classifiers:
|
|
8
|
+
* 1. Synonym matcher — exact/substring synonym matching (fast, deterministic)
|
|
9
|
+
* 2. Semantic classifier — uses compromise POS + dependency parse
|
|
10
|
+
* 3. Context classifier — uses conversation history for likely intent
|
|
11
|
+
* 4. Fuzzy classifier — keyboard-distance matching for typos
|
|
12
|
+
*/
|
|
13
|
+
export interface ClassifierVote {
|
|
14
|
+
classifier: string;
|
|
15
|
+
intent: string;
|
|
16
|
+
confidence: number;
|
|
17
|
+
reason: string;
|
|
18
|
+
}
|
|
19
|
+
export interface MultiClassifierResult {
|
|
20
|
+
/** All votes from all classifiers */
|
|
21
|
+
votes: ClassifierVote[];
|
|
22
|
+
/** Final merged scores per intent, sorted by score */
|
|
23
|
+
scores: Array<{
|
|
24
|
+
intent: string;
|
|
25
|
+
score: number;
|
|
26
|
+
votes: number;
|
|
27
|
+
}>;
|
|
28
|
+
/** The winning intent (highest merged score) */
|
|
29
|
+
best: {
|
|
30
|
+
intent: string;
|
|
31
|
+
score: number;
|
|
32
|
+
} | null;
|
|
33
|
+
/** Whether there was a close second (ambiguous) */
|
|
34
|
+
ambiguous: boolean;
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Run all classifiers and merge results.
|
|
38
|
+
*/
|
|
39
|
+
export declare function classifyMulti(rawText: string, recentIntents?: string[]): MultiClassifierResult;
|