node-red-contrib-linux-copilot 1.2.11 → 1.2.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/linux-copilot.html +59 -25
- package/linux-copilot.js +78 -52
- package/package.json +1 -1
package/linux-copilot.html
CHANGED
|
@@ -1,50 +1,84 @@
|
|
|
1
1
|
<script type="text/javascript">
|
|
2
|
-
RED.nodes.registerType('linux-copilot',{
|
|
2
|
+
RED.nodes.registerType('linux-copilot', {
|
|
3
3
|
category: 'advanced',
|
|
4
4
|
color: '#E2D96E',
|
|
5
5
|
defaults: {
|
|
6
|
-
name: {value:""},
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
prioGEM: {value:"
|
|
10
|
-
|
|
6
|
+
name: { value: "" },
|
|
7
|
+
chatId: { value: "" },
|
|
8
|
+
// On garde les priorités au cas où vous voudriez les ajuster plus tard
|
|
9
|
+
prioGEM: { value: "1" },
|
|
10
|
+
prioDS: { value: "2" },
|
|
11
|
+
prioOR: { value: "3" }
|
|
11
12
|
},
|
|
12
13
|
credentials: {
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
14
|
+
geminiKey: { type: "password" },
|
|
15
|
+
deepseekKey: { type: "password" },
|
|
16
|
+
openrouterKey: { type: "password" }
|
|
16
17
|
},
|
|
17
|
-
inputs:1,
|
|
18
|
-
outputs:1,
|
|
18
|
+
inputs: 1,
|
|
19
|
+
outputs: 1,
|
|
19
20
|
icon: "font-awesome/fa-terminal",
|
|
20
|
-
label: function() {
|
|
21
|
+
label: function() {
|
|
22
|
+
return this.name || "Linux Copilot (Multi-LLM)";
|
|
23
|
+
},
|
|
24
|
+
oneditprepare: function() {
|
|
25
|
+
// Initialisation si nécessaire
|
|
26
|
+
}
|
|
21
27
|
});
|
|
22
28
|
</script>
|
|
23
29
|
|
|
24
30
|
<script type="text/html" data-template-name="linux-copilot">
|
|
25
31
|
<div class="form-row">
|
|
26
32
|
<label for="node-input-name"><i class="fa fa-tag"></i> Nom</label>
|
|
27
|
-
<input type="text" id="node-input-name" placeholder="
|
|
33
|
+
<input type="text" id="node-input-name" placeholder="Mon Expert SRE">
|
|
28
34
|
</div>
|
|
35
|
+
|
|
29
36
|
<div class="form-row">
|
|
30
37
|
<label for="node-input-chatId"><i class="fa fa-comment"></i> Chat ID</label>
|
|
31
|
-
<input type="text" id="node-input-chatId">
|
|
38
|
+
<input type="text" id="node-input-chatId" placeholder="ID Telegram ou autre">
|
|
32
39
|
</div>
|
|
33
|
-
|
|
34
|
-
<
|
|
40
|
+
|
|
41
|
+
<hr align="middle">
|
|
42
|
+
<h4><i class="fa fa-key"></i> Clés API & Modèles</h4>
|
|
43
|
+
<p style="font-size: 0.9em; color: #666;">
|
|
44
|
+
Le système tentera d'utiliser <b>Gemini/Gemma</b> en priorité, puis <b>DeepSeek</b>,
|
|
45
|
+
et enfin les modèles gratuits d'<b>OpenRouter</b> (Llama 3.3, Qwen, Olmo).
|
|
46
|
+
</p>
|
|
47
|
+
|
|
35
48
|
<div class="form-row">
|
|
36
|
-
<label for="node-input-
|
|
37
|
-
<input type="
|
|
38
|
-
<input type="password" id="node-input-deepseekKey" placeholder="Clé API" style="width:200px">
|
|
49
|
+
<label for="node-input-geminiKey"><i class="fa fa-google"></i> Google AI</label>
|
|
50
|
+
<input type="password" id="node-input-geminiKey" placeholder="Clé API Gemini (Gemma 3)">
|
|
39
51
|
</div>
|
|
52
|
+
|
|
40
53
|
<div class="form-row">
|
|
41
|
-
<label for="node-input-
|
|
42
|
-
<input type="
|
|
43
|
-
<input type="password" id="node-input-openrouterKey" placeholder="Clé API" style="width:200px">
|
|
54
|
+
<label for="node-input-deepseekKey"><i class="fa fa-code"></i> DeepSeek</label>
|
|
55
|
+
<input type="password" id="node-input-deepseekKey" placeholder="Clé API DeepSeek">
|
|
44
56
|
</div>
|
|
57
|
+
|
|
45
58
|
<div class="form-row">
|
|
46
|
-
<label for="node-input-
|
|
47
|
-
<input type="
|
|
48
|
-
|
|
59
|
+
<label for="node-input-openrouterKey"><i class="fa fa-rocket"></i> OpenRouter</label>
|
|
60
|
+
<input type="password" id="node-input-openrouterKey" placeholder="Clé API OpenRouter (Modèles Free)">
|
|
61
|
+
</div>
|
|
62
|
+
|
|
63
|
+
<hr align="middle">
|
|
64
|
+
<div class="form-tips">
|
|
65
|
+
<b>Conseil :</b> Laissez un champ vide si vous n'avez pas la clé. Le nœud sautera automatiquement les modèles correspondants sans bloquer le diagnostic.
|
|
49
66
|
</div>
|
|
50
67
|
</script>
|
|
68
|
+
|
|
69
|
+
<script type="text/html" data-help-name="linux-copilot">
|
|
70
|
+
<p>Un expert Linux SRE capable de diagnostiquer votre système en cascade.</p>
|
|
71
|
+
<h3>Fonctionnement</h3>
|
|
72
|
+
<ol>
|
|
73
|
+
<li>Analyse le texte ou le résultat de commande reçu.</li>
|
|
74
|
+
<li>Interroge le meilleur modèle disponible (Failover).</li>
|
|
75
|
+
<li>Exécute automatiquement des commandes de diagnostic sécurisées (ls, df, top, etc.).</li>
|
|
76
|
+
<li>Boucle jusqu'à résolution ou fin de séquence (5 itérations max).</li>
|
|
77
|
+
</ol>
|
|
78
|
+
<h3>Modèles inclus</h3>
|
|
79
|
+
<ul>
|
|
80
|
+
<li><b>Gemini 2.0 Flash / Gemma 3</b> : Priorité haute, rapide.</li>
|
|
81
|
+
<li><b>DeepSeek V3</b> : Analyse de code et logs.</li>
|
|
82
|
+
<li><b>Qwen/Llama/Olmo</b> : Relève via OpenRouter si nécessaire.</li>
|
|
83
|
+
</ul>
|
|
84
|
+
</script>
|
package/linux-copilot.js
CHANGED
|
@@ -8,100 +8,126 @@ module.exports = function(RED) {
|
|
|
8
8
|
|
|
9
9
|
const omniPrompt = `Tu es un Expert Linux SRE polyglotte.
|
|
10
10
|
RÈGLES CRUCIALES :
|
|
11
|
-
1. LANGUE :
|
|
11
|
+
1. LANGUE : Réponds TOUJOURS dans la langue de l'utilisateur.
|
|
12
12
|
2. ANALYSE : Analyse brièvement les résultats techniques.
|
|
13
|
-
3. ACTION : Propose la commande suivante
|
|
14
|
-
4. FORMAT JSON : {"speech": "ton explication
|
|
15
|
-
|
|
16
|
-
const formatHistory = (history) => history.map(h => ({
|
|
17
|
-
role: h.role === "model" ? "assistant" : h.role,
|
|
18
|
-
content: String(h.content).substring(0, 800)
|
|
19
|
-
}));
|
|
13
|
+
3. ACTION : Propose la commande linux suivante.
|
|
14
|
+
4. FORMAT JSON STRICT : {"speech": "ton explication", "cmd": "commande ou none"}`;
|
|
20
15
|
|
|
21
16
|
const engines = {
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
17
|
+
// Moteur Google (Gemini 2.0 Flash & Gemma 3)
|
|
18
|
+
google: async (history, key, model) => {
|
|
19
|
+
const res = await axios.post(`https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${key.trim()}`, {
|
|
20
|
+
contents: history.map(h => ({
|
|
21
|
+
role: h.role === "assistant" ? "model" : h.role,
|
|
22
|
+
parts: [{ text: h.content }]
|
|
23
|
+
})),
|
|
25
24
|
system_instruction: { parts: [{ text: omniPrompt }] },
|
|
26
|
-
generationConfig: { responseMimeType: "application/json" }
|
|
27
|
-
}, { timeout:
|
|
25
|
+
generationConfig: { responseMimeType: "application/json", temperature: 0.1 }
|
|
26
|
+
}, { timeout: 12000 });
|
|
28
27
|
return JSON.parse(res.data.candidates[0].content.parts[0].text);
|
|
29
28
|
},
|
|
30
|
-
|
|
29
|
+
// Moteur OpenRouter (Pour tous les modèles gratuits : DeepSeek, Llama, Qwen, Olmo)
|
|
30
|
+
openrouter: async (history, key, model) => {
|
|
31
31
|
const res = await axios.post('https://openrouter.ai/api/v1/chat/completions', {
|
|
32
|
-
model:
|
|
33
|
-
messages: [{ role: 'system', content: omniPrompt }, ...
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
return JSON.parse(res.data.choices[0].message.content);
|
|
32
|
+
model: model,
|
|
33
|
+
messages: [{ role: 'system', content: omniPrompt }, ...history.map(h => ({
|
|
34
|
+
role: h.role === "model" ? "assistant" : h.role,
|
|
35
|
+
content: String(h.content)
|
|
36
|
+
}))]
|
|
37
|
+
}, {
|
|
38
|
+
headers: { 'Authorization': `Bearer ${key.trim()}`, 'Content-Type': 'application/json' },
|
|
39
|
+
timeout: 20000
|
|
40
|
+
});
|
|
41
|
+
const content = res.data.choices[0].message.content;
|
|
42
|
+
const match = content.match(/\{[\s\S]*\}/);
|
|
43
|
+
return JSON.parse(match ? match[0] : content);
|
|
45
44
|
}
|
|
46
45
|
};
|
|
47
46
|
|
|
48
47
|
node.on('input', async function(msg) {
|
|
49
|
-
const chatId = msg.payload.chatId || config.chatId
|
|
48
|
+
const chatId = msg.payload.chatId || config.chatId;
|
|
50
49
|
let userText = msg.payload.content || (typeof msg.payload === 'string' ? msg.payload : "");
|
|
51
50
|
let loopCount = msg.loopCount || 0;
|
|
52
|
-
|
|
53
|
-
if (loopCount >
|
|
51
|
+
|
|
52
|
+
if (loopCount > 5) return node.status({fill:"blue", text:"Séquence terminée"});
|
|
54
53
|
|
|
55
54
|
let history = node.context().get('history') || [];
|
|
56
55
|
if (userText.toLowerCase() === "reset") {
|
|
57
56
|
node.context().set('history', []);
|
|
58
|
-
return node.send({ payload: { chatId,
|
|
57
|
+
return node.send({ payload: { chatId, content: "♻️ Historique effacé." } });
|
|
59
58
|
}
|
|
60
59
|
|
|
61
60
|
if (!userText) return;
|
|
62
61
|
history.push({ role: "user", content: userText });
|
|
63
62
|
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
{
|
|
67
|
-
{
|
|
68
|
-
|
|
63
|
+
// LA LISTE DE SECOURS (ORDRE DE PRIORITÉ)
|
|
64
|
+
const queue = [
|
|
65
|
+
{ type: 'google', m: "gemini-2.0-flash", n: "Gemini 2.0 Flash", k: node.credentials.geminiKey },
|
|
66
|
+
{ type: 'openrouter', m: "deepseek/deepseek-chat", n: "DeepSeek V3", k: node.credentials.deepseekKey },
|
|
67
|
+
{ type: 'openrouter', m: "qwen/qwen-2.5-coder-32b-instruct:free", n: "Qwen 2.5 Coder", k: node.credentials.openrouterKey },
|
|
68
|
+
{ type: 'openrouter', m: "meta-llama/llama-3.3-70b-instruct:free", n: "Llama 3.3 Free", k: node.credentials.openrouterKey },
|
|
69
|
+
{ type: 'openrouter', m: "google/gemini-2.0-flash-001", n: "Gemini (via OpenRouter)", k: node.credentials.openrouterKey },
|
|
70
|
+
{ type: 'openrouter', m: "allenai/olmo-3-32b-instruct", n: "Olmo 3", k: node.credentials.openrouterKey }
|
|
71
|
+
].filter(q => q.k);
|
|
69
72
|
|
|
70
73
|
let aiData = null;
|
|
71
74
|
let engineUsed = "";
|
|
72
75
|
|
|
73
76
|
for (let e of queue) {
|
|
74
77
|
try {
|
|
75
|
-
node.status({fill:"yellow", text: `
|
|
76
|
-
aiData = await engines[e.
|
|
77
|
-
if (aiData && (aiData.speech || aiData.cmd)) {
|
|
78
|
-
|
|
78
|
+
node.status({fill:"yellow", text: `Essai ${e.n}...`});
|
|
79
|
+
aiData = await engines[e.type](history, e.k, e.m);
|
|
80
|
+
if (aiData && (aiData.speech || aiData.cmd)) {
|
|
81
|
+
engineUsed = e.n;
|
|
82
|
+
break;
|
|
83
|
+
}
|
|
84
|
+
} catch (err) {
|
|
85
|
+
node.warn(`Échec ${e.n}: ${err.message}`);
|
|
86
|
+
}
|
|
79
87
|
}
|
|
80
88
|
|
|
81
|
-
if (!aiData)
|
|
89
|
+
if (!aiData) {
|
|
90
|
+
node.status({fill:"red", text:"Tous les modèles ont échoué"});
|
|
91
|
+
return;
|
|
92
|
+
}
|
|
82
93
|
|
|
83
|
-
|
|
94
|
+
// Réponse à l'utilisateur
|
|
95
|
+
node.send({ payload: {
|
|
96
|
+
chatId,
|
|
97
|
+
type: "message",
|
|
98
|
+
content: `🤖 [${engineUsed}] : ${aiData.speech}`,
|
|
99
|
+
options: { parse_mode: "HTML" }
|
|
100
|
+
}});
|
|
84
101
|
|
|
102
|
+
// Gestion du Terminal
|
|
85
103
|
let cmd = (aiData.cmd || "").trim();
|
|
86
|
-
const
|
|
104
|
+
const safeCommands = ['ls', 'df', 'free', 'uptime', 'ps', 'cat', 'grep', 'ss', 'ip', 'systemctl', 'journalctl', 'uname', 'docker', 'top -b -n 1'];
|
|
87
105
|
|
|
88
|
-
if (cmd && cmd !== "none" &&
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
let res = (stdout || stderr || "OK").substring(0, 800);
|
|
106
|
+
if (cmd && cmd !== "none" && safeCommands.some(w => cmd.includes(w.split(' ')[0]))) {
|
|
107
|
+
exec(cmd, { timeout: 12000 }, (err, stdout, stderr) => {
|
|
108
|
+
let res = (stdout || stderr || "Commande exécutée (sans retour)").substring(0, 1000);
|
|
92
109
|
node.send({ payload: { chatId, type: "message", content: `📟 <b>Terminal (${cmd})</b> :\n<pre>${res}</pre>`, options: { parse_mode: "HTML" } } });
|
|
110
|
+
|
|
111
|
+
// On renvoie le résultat à l'IA pour l'étape suivante
|
|
93
112
|
setTimeout(() => {
|
|
94
|
-
node.emit("input", {
|
|
95
|
-
|
|
113
|
+
node.emit("input", {
|
|
114
|
+
payload: { chatId, content: `RÉSULTAT DE ${cmd} :\n${res}` },
|
|
115
|
+
loopCount: loopCount + 1
|
|
116
|
+
});
|
|
117
|
+
}, 1000);
|
|
96
118
|
});
|
|
97
119
|
}
|
|
98
120
|
|
|
99
|
-
history.push({ role: "assistant", content: aiData.speech
|
|
121
|
+
history.push({ role: "assistant", content: aiData.speech });
|
|
100
122
|
node.context().set('history', history.slice(-10));
|
|
101
|
-
node.status({fill:"green", text:`
|
|
123
|
+
node.status({fill:"green", text:`Répondu par ${engineUsed}`});
|
|
102
124
|
});
|
|
103
125
|
}
|
|
104
126
|
RED.nodes.registerType('linux-copilot', LinuxCopilotNode, {
|
|
105
|
-
credentials: {
|
|
127
|
+
credentials: {
|
|
128
|
+
geminiKey: {type:"password"},
|
|
129
|
+
openrouterKey: {type:"password"},
|
|
130
|
+
deepseekKey: {type:"password"}
|
|
131
|
+
}
|
|
106
132
|
});
|
|
107
133
|
}
|