@arka-labs/nemesis 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +201 -0
- package/README.md +668 -0
- package/lib/core/agent-launcher.js +193 -0
- package/lib/core/audit.js +210 -0
- package/lib/core/connexions.js +80 -0
- package/lib/core/flowmap/api.js +111 -0
- package/lib/core/flowmap/cli-helpers.js +80 -0
- package/lib/core/flowmap/machine.js +281 -0
- package/lib/core/flowmap/persistence.js +83 -0
- package/lib/core/generators.js +183 -0
- package/lib/core/inbox.js +275 -0
- package/lib/core/logger.js +20 -0
- package/lib/core/mission.js +109 -0
- package/lib/core/notewriter/config.js +36 -0
- package/lib/core/notewriter/cr.js +237 -0
- package/lib/core/notewriter/log.js +112 -0
- package/lib/core/notewriter/notes.js +168 -0
- package/lib/core/notewriter/paths.js +45 -0
- package/lib/core/notewriter/reader.js +121 -0
- package/lib/core/notewriter/registry.js +80 -0
- package/lib/core/odm.js +191 -0
- package/lib/core/profile-picker.js +323 -0
- package/lib/core/project.js +287 -0
- package/lib/core/registry.js +129 -0
- package/lib/core/secrets.js +137 -0
- package/lib/core/services.js +45 -0
- package/lib/core/team.js +287 -0
- package/lib/core/templates.js +80 -0
- package/lib/kairos/agent-runner.js +261 -0
- package/lib/kairos/claude-invoker.js +90 -0
- package/lib/kairos/context-injector.js +331 -0
- package/lib/kairos/context-loader.js +108 -0
- package/lib/kairos/context-writer.js +45 -0
- package/lib/kairos/dispatcher-router.js +173 -0
- package/lib/kairos/dispatcher.js +139 -0
- package/lib/kairos/event-bus.js +287 -0
- package/lib/kairos/event-router.js +131 -0
- package/lib/kairos/flowmap-bridge.js +120 -0
- package/lib/kairos/hook-handlers.js +351 -0
- package/lib/kairos/hook-installer.js +207 -0
- package/lib/kairos/hook-prompts.js +54 -0
- package/lib/kairos/leader-rules.js +94 -0
- package/lib/kairos/pid-checker.js +108 -0
- package/lib/kairos/situation-detector.js +123 -0
- package/lib/sync/fallback-engine.js +97 -0
- package/lib/sync/hcm-client.js +170 -0
- package/lib/sync/health.js +47 -0
- package/lib/sync/llm-client.js +387 -0
- package/lib/sync/nemesis-client.js +379 -0
- package/lib/sync/service-session.js +74 -0
- package/lib/sync/sync-engine.js +178 -0
- package/lib/ui/box.js +104 -0
- package/lib/ui/brand.js +42 -0
- package/lib/ui/colors.js +57 -0
- package/lib/ui/dashboard.js +580 -0
- package/lib/ui/error-hints.js +49 -0
- package/lib/ui/format.js +61 -0
- package/lib/ui/menu.js +306 -0
- package/lib/ui/note-card.js +198 -0
- package/lib/ui/note-colors.js +26 -0
- package/lib/ui/note-detail.js +297 -0
- package/lib/ui/note-filters.js +252 -0
- package/lib/ui/note-views.js +283 -0
- package/lib/ui/prompt.js +81 -0
- package/lib/ui/spinner.js +139 -0
- package/lib/ui/streambox.js +46 -0
- package/lib/ui/table.js +42 -0
- package/lib/ui/tree.js +33 -0
- package/package.json +53 -0
- package/src/cli.js +457 -0
- package/src/commands/_helpers.js +119 -0
- package/src/commands/audit.js +187 -0
- package/src/commands/auth.js +316 -0
- package/src/commands/doctor.js +243 -0
- package/src/commands/hcm.js +147 -0
- package/src/commands/inbox.js +333 -0
- package/src/commands/init.js +160 -0
- package/src/commands/kairos.js +216 -0
- package/src/commands/kars.js +134 -0
- package/src/commands/mission.js +275 -0
- package/src/commands/notes.js +316 -0
- package/src/commands/notewriter.js +296 -0
- package/src/commands/odm.js +329 -0
- package/src/commands/orch.js +68 -0
- package/src/commands/project.js +123 -0
- package/src/commands/run.js +123 -0
- package/src/commands/services.js +705 -0
- package/src/commands/status.js +231 -0
- package/src/commands/team.js +572 -0
- package/src/config.js +84 -0
- package/src/index.js +5 -0
- package/templates/project-context.json +10 -0
- package/templates/template_CONTRIB-NAME.json +22 -0
- package/templates/template_CR-ODM-NAME-000.exemple.json +32 -0
- package/templates/template_DEC-NAME-000.json +18 -0
- package/templates/template_INTV-NAME-000.json +15 -0
- package/templates/template_MISSION_CONTRACT.json +46 -0
- package/templates/template_ODM-NAME-000.json +89 -0
- package/templates/template_REGISTRY-PROJECT.json +26 -0
- package/templates/template_TXN-NAME-000.json +24 -0
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* HCM API client — vanilla fetch (Node 20+), zero deps.
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { existsSync, readFileSync } from 'node:fs';
|
|
6
|
+
import { join } from 'node:path';
|
|
7
|
+
import { homedir } from 'node:os';
|
|
8
|
+
import { debug } from '../core/logger.js';
|
|
9
|
+
|
|
10
|
+
function loadSavedAuth() {
|
|
11
|
+
const configFile = join(homedir(), '.nemesis', 'config.json');
|
|
12
|
+
if (!existsSync(configFile)) return {};
|
|
13
|
+
try {
|
|
14
|
+
return JSON.parse(readFileSync(configFile, 'utf-8'));
|
|
15
|
+
} catch (e) {
|
|
16
|
+
debug(`loadSavedAuth: ${e.message}`);
|
|
17
|
+
return {};
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export function createHcmClient(opts = {}) {
|
|
22
|
+
const saved = loadSavedAuth();
|
|
23
|
+
const {
|
|
24
|
+
baseUrl = saved.hcm_url || process.env.HCM_URL || 'https://hcm.arkalabs.app',
|
|
25
|
+
apiKey = saved.hcm_api_key || process.env.HCM_API_KEY || '',
|
|
26
|
+
timeout = 10000,
|
|
27
|
+
} = opts;
|
|
28
|
+
|
|
29
|
+
// Detect if a different fallback key exists (env vs config)
|
|
30
|
+
const envKey = process.env.HCM_API_KEY || '';
|
|
31
|
+
const configKey = saved.hcm_api_key || '';
|
|
32
|
+
const fallbackKey = (!opts.apiKey && envKey && configKey && envKey !== configKey) ? configKey : '';
|
|
33
|
+
|
|
34
|
+
let activeKey = apiKey;
|
|
35
|
+
|
|
36
|
+
function buildHeaders(key) {
|
|
37
|
+
return {
|
|
38
|
+
'Content-Type': 'application/json',
|
|
39
|
+
...(key ? { 'X-API-Key': key } : {}),
|
|
40
|
+
};
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
async function doFetch(method, url, key, body, signal) {
|
|
44
|
+
const res = await fetch(url, {
|
|
45
|
+
method,
|
|
46
|
+
headers: buildHeaders(key),
|
|
47
|
+
body: body ? JSON.stringify(body) : null,
|
|
48
|
+
signal,
|
|
49
|
+
});
|
|
50
|
+
return res;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
async function request(method, path, body = null) {
|
|
54
|
+
const url = `${baseUrl}${path}`;
|
|
55
|
+
const controller = new AbortController();
|
|
56
|
+
const timer = setTimeout(() => controller.abort(), timeout);
|
|
57
|
+
|
|
58
|
+
try {
|
|
59
|
+
let res = await doFetch(method, url, activeKey, body, controller.signal);
|
|
60
|
+
|
|
61
|
+
// 401 + fallback key available → retry with fallback
|
|
62
|
+
if (res.status === 401 && fallbackKey && activeKey !== fallbackKey) {
|
|
63
|
+
activeKey = fallbackKey;
|
|
64
|
+
res = await doFetch(method, url, activeKey, body, controller.signal);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
clearTimeout(timer);
|
|
68
|
+
|
|
69
|
+
if (!res.ok) {
|
|
70
|
+
const text = await res.text().catch(() => '');
|
|
71
|
+
throw new Error(`HCM API ${res.status}: ${text || res.statusText}`);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
const contentType = res.headers.get('content-type') || '';
|
|
75
|
+
if (contentType.includes('application/json')) {
|
|
76
|
+
const json = await res.json();
|
|
77
|
+
if (json && typeof json === 'object' && json.ok === true && 'data' in json) {
|
|
78
|
+
return json.data;
|
|
79
|
+
}
|
|
80
|
+
return json;
|
|
81
|
+
}
|
|
82
|
+
return await res.text();
|
|
83
|
+
} catch (err) {
|
|
84
|
+
clearTimeout(timer);
|
|
85
|
+
if (err.name === 'AbortError') {
|
|
86
|
+
throw new Error(`HCM API timeout (${timeout}ms) : ${url}`, { cause: err });
|
|
87
|
+
}
|
|
88
|
+
throw err;
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
return {
|
|
93
|
+
baseUrl,
|
|
94
|
+
|
|
95
|
+
async health() {
|
|
96
|
+
return request('GET', '/health');
|
|
97
|
+
},
|
|
98
|
+
|
|
99
|
+
async graphStats() {
|
|
100
|
+
return request('GET', '/status');
|
|
101
|
+
},
|
|
102
|
+
|
|
103
|
+
async search(query, opts = {}) {
|
|
104
|
+
return request('POST', '/api/v1/documents/search', {
|
|
105
|
+
text_query: query,
|
|
106
|
+
limit: opts.limit || 10,
|
|
107
|
+
tags: opts.tags || [],
|
|
108
|
+
});
|
|
109
|
+
},
|
|
110
|
+
|
|
111
|
+
async listProfiles() {
|
|
112
|
+
const res = await request('GET', '/profiles');
|
|
113
|
+
return Array.isArray(res) ? res : (res?.profiles || []);
|
|
114
|
+
},
|
|
115
|
+
|
|
116
|
+
async listProfilesSummary() {
|
|
117
|
+
const res = await request('GET', '/profiles/summary');
|
|
118
|
+
return Array.isArray(res) ? res : [];
|
|
119
|
+
},
|
|
120
|
+
|
|
121
|
+
async getProfileAtoms(profileId, sousFamilles = []) {
|
|
122
|
+
const params = sousFamilles.length > 0
|
|
123
|
+
? `?sous_famille=${sousFamilles.join(',')}`
|
|
124
|
+
: '';
|
|
125
|
+
const res = await request('GET', `/profiles/${profileId}/atoms${params}`);
|
|
126
|
+
// Handle {atoms: [...]} (envelope) or direct array (legacy)
|
|
127
|
+
return Array.isArray(res) ? res : (res?.atoms || []);
|
|
128
|
+
},
|
|
129
|
+
|
|
130
|
+
async searchAtoms(opts = {}) {
|
|
131
|
+
const params = new URLSearchParams();
|
|
132
|
+
if (opts.sousFamille) params.set('sous_famille', opts.sousFamille);
|
|
133
|
+
if (opts.type) params.set('type', opts.type);
|
|
134
|
+
if (opts.limit) params.set('limit', String(opts.limit));
|
|
135
|
+
const qs = params.toString();
|
|
136
|
+
const res = await request('GET', `/api/v1/atoms/search${qs ? '?' + qs : ''}`);
|
|
137
|
+
return res?.atoms || (Array.isArray(res) ? res : []);
|
|
138
|
+
},
|
|
139
|
+
|
|
140
|
+
async searchProfiles(q, sector) {
|
|
141
|
+
const params = new URLSearchParams();
|
|
142
|
+
if (q) params.set('q', q);
|
|
143
|
+
if (sector) params.set('sector', sector);
|
|
144
|
+
const qs = params.toString();
|
|
145
|
+
return request('GET', `/profiles/search${qs ? '?' + qs : ''}`);
|
|
146
|
+
},
|
|
147
|
+
|
|
148
|
+
async buildKairosPack(profileId, opts = {}) {
|
|
149
|
+
return request('POST', '/kairos/pack', {
|
|
150
|
+
profileId,
|
|
151
|
+
include_governance: opts.includeGovernance ?? true,
|
|
152
|
+
sous_familles: opts.sousFamilles || [],
|
|
153
|
+
});
|
|
154
|
+
},
|
|
155
|
+
|
|
156
|
+
async deposit(node) {
|
|
157
|
+
return request('POST', '/api/v1/deposit', node);
|
|
158
|
+
},
|
|
159
|
+
|
|
160
|
+
async ping() {
|
|
161
|
+
const start = Date.now();
|
|
162
|
+
try {
|
|
163
|
+
await request('GET', '/health');
|
|
164
|
+
return { ok: true, latency: Date.now() - start };
|
|
165
|
+
} catch (err) {
|
|
166
|
+
return { ok: false, latency: Date.now() - start, error: err.message };
|
|
167
|
+
}
|
|
168
|
+
},
|
|
169
|
+
};
|
|
170
|
+
}
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import { createHcmClient } from './hcm-client.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Run full health diagnostic on HCM connection.
|
|
5
|
+
*/
|
|
6
|
+
export async function diagnoseHcm(opts = {}) {
|
|
7
|
+
const client = createHcmClient(opts);
|
|
8
|
+
const checks = [];
|
|
9
|
+
|
|
10
|
+
// Check 1: Ping
|
|
11
|
+
const pingResult = await client.ping();
|
|
12
|
+
checks.push({
|
|
13
|
+
label: 'Connexion',
|
|
14
|
+
ok: pingResult.ok,
|
|
15
|
+
detail: pingResult.ok
|
|
16
|
+
? `${pingResult.latency}ms`
|
|
17
|
+
: pingResult.error,
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
if (!pingResult.ok) {
|
|
21
|
+
return { ok: false, checks, url: client.baseUrl };
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
// Check 2: Auth
|
|
25
|
+
try {
|
|
26
|
+
await client.health();
|
|
27
|
+
checks.push({ label: 'Auth', ok: true, detail: 'API key valide' });
|
|
28
|
+
} catch (err) {
|
|
29
|
+
checks.push({ label: 'Auth', ok: false, detail: err.message });
|
|
30
|
+
return { ok: false, checks, url: client.baseUrl };
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
// Check 3: Graph stats
|
|
34
|
+
try {
|
|
35
|
+
const stats = await client.graphStats();
|
|
36
|
+
checks.push({
|
|
37
|
+
label: 'Graphe',
|
|
38
|
+
ok: true,
|
|
39
|
+
detail: `${stats.node_count ?? '?'} noeuds, ${stats.edge_count ?? '?'} edges`,
|
|
40
|
+
});
|
|
41
|
+
} catch (err) {
|
|
42
|
+
checks.push({ label: 'Graphe', ok: false, detail: err.message });
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
const allOk = checks.every(c => c.ok);
|
|
46
|
+
return { ok: allOk, checks, url: client.baseUrl };
|
|
47
|
+
}
|
|
@@ -0,0 +1,387 @@
|
|
|
1
|
+
import { execFileSync } from 'node:child_process';
|
|
2
|
+
import { getConnexionCredential } from '../core/connexions.js';
|
|
3
|
+
import { debug } from '../core/logger.js';
|
|
4
|
+
|
|
5
|
+
const PROVIDER_DEFAULTS = {
|
|
6
|
+
anthropic: { baseUrl: 'https://api.anthropic.com', healthPath: '/v1/messages', method: 'POST' },
|
|
7
|
+
openai: { baseUrl: 'https://api.openai.com', healthPath: '/v1/models', method: 'GET' },
|
|
8
|
+
gemini_genai: { baseUrl: 'https://generativelanguage.googleapis.com', healthPath: '/v1beta/models', method: 'GET' },
|
|
9
|
+
google_cloud: { baseUrl: 'https://us-central1-aiplatform.googleapis.com', healthPath: null, method: 'GET' },
|
|
10
|
+
aws_bedrock: { baseUrl: null, healthPath: null, method: 'GET' },
|
|
11
|
+
ollama: { baseUrl: 'http://localhost:11434', healthPath: '/api/tags', method: 'GET' },
|
|
12
|
+
groq: { baseUrl: 'https://api.groq.com', healthPath: '/openai/v1/models', method: 'GET' },
|
|
13
|
+
custom: { baseUrl: null, healthPath: null, method: 'GET' },
|
|
14
|
+
cli_tools: { baseUrl: null, healthPath: null, method: null },
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
export async function testConnection(connexion) {
|
|
18
|
+
const start = Date.now();
|
|
19
|
+
try {
|
|
20
|
+
if (connexion.provider === 'cli_tools') return testCliTools();
|
|
21
|
+
const credential = getConnexionCredential(connexion.id);
|
|
22
|
+
const defaults = PROVIDER_DEFAULTS[connexion.provider] || PROVIDER_DEFAULTS.custom;
|
|
23
|
+
const baseUrl = connexion.endpoint || defaults.baseUrl;
|
|
24
|
+
if (!baseUrl) return { ok: false, latency: 0, error: 'Endpoint non configure' };
|
|
25
|
+
const result = await testProvider(connexion.provider, baseUrl, credential, connexion.model);
|
|
26
|
+
return { ok: result.ok, latency: Date.now() - start, details: result.details, error: result.error || null };
|
|
27
|
+
} catch (err) {
|
|
28
|
+
return { ok: false, latency: Date.now() - start, error: err.message };
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
function testCliTools() {
|
|
33
|
+
try {
|
|
34
|
+
execFileSync('which', ['claude'], { stdio: 'pipe' });
|
|
35
|
+
return { ok: true, latency: 0, details: 'claude CLI disponible' };
|
|
36
|
+
} catch (e) {
|
|
37
|
+
debug(`testCliTools: ${e.message}`);
|
|
38
|
+
return { ok: false, latency: 0, error: 'claude CLI introuvable' };
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
async function testProvider(provider, baseUrl, credential, model) {
|
|
43
|
+
switch (provider) {
|
|
44
|
+
case 'anthropic': return testAnthropic(baseUrl, credential, model);
|
|
45
|
+
case 'openai': return testOpenAI(baseUrl, credential);
|
|
46
|
+
case 'groq': return testOpenAI(baseUrl + '/openai', credential);
|
|
47
|
+
case 'gemini_genai': return testGemini(baseUrl, credential);
|
|
48
|
+
case 'ollama': return testOllama(baseUrl);
|
|
49
|
+
default: return testGeneric(baseUrl, credential);
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
async function testAnthropic(baseUrl, apiKey, model) {
|
|
54
|
+
const res = await fetch(`${baseUrl}/v1/messages`, {
|
|
55
|
+
method: 'POST',
|
|
56
|
+
headers: {
|
|
57
|
+
'x-api-key': apiKey || '',
|
|
58
|
+
'anthropic-version': '2023-06-01',
|
|
59
|
+
'content-type': 'application/json',
|
|
60
|
+
},
|
|
61
|
+
body: JSON.stringify({
|
|
62
|
+
model: model || 'claude-haiku-4-5-20251001',
|
|
63
|
+
max_tokens: 1,
|
|
64
|
+
messages: [{ role: 'user', content: 'ping' }],
|
|
65
|
+
}),
|
|
66
|
+
signal: AbortSignal.timeout(8000),
|
|
67
|
+
});
|
|
68
|
+
// 200 or 400 = auth OK (400 can be model not found but auth passed)
|
|
69
|
+
if (res.status === 200 || res.status === 400) {
|
|
70
|
+
return { ok: true, details: `HTTP ${res.status}` };
|
|
71
|
+
}
|
|
72
|
+
return { ok: false, error: `HTTP ${res.status}` };
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
async function testOpenAI(baseUrl, apiKey) {
|
|
76
|
+
const res = await fetch(`${baseUrl}/v1/models`, {
|
|
77
|
+
method: 'GET',
|
|
78
|
+
headers: { 'Authorization': `Bearer ${apiKey || ''}` },
|
|
79
|
+
signal: AbortSignal.timeout(8000),
|
|
80
|
+
});
|
|
81
|
+
if (res.ok) return { ok: true, details: `HTTP ${res.status}` };
|
|
82
|
+
return { ok: false, error: `HTTP ${res.status}` };
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
async function testGemini(baseUrl, apiKey) {
|
|
86
|
+
const res = await fetch(`${baseUrl}/v1beta/models?key=${apiKey || ''}`, {
|
|
87
|
+
method: 'GET',
|
|
88
|
+
signal: AbortSignal.timeout(8000),
|
|
89
|
+
});
|
|
90
|
+
if (res.ok) return { ok: true, details: `HTTP ${res.status}` };
|
|
91
|
+
return { ok: false, error: `HTTP ${res.status}` };
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
async function testOllama(baseUrl) {
|
|
95
|
+
const res = await fetch(`${baseUrl}/api/tags`, {
|
|
96
|
+
method: 'GET',
|
|
97
|
+
signal: AbortSignal.timeout(5000),
|
|
98
|
+
});
|
|
99
|
+
if (res.ok) return { ok: true, details: `HTTP ${res.status}` };
|
|
100
|
+
return { ok: false, error: `HTTP ${res.status}` };
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
async function testGeneric(baseUrl, credential) {
|
|
104
|
+
const headers = {};
|
|
105
|
+
if (credential) headers['Authorization'] = `Bearer ${credential}`;
|
|
106
|
+
const res = await fetch(baseUrl, {
|
|
107
|
+
method: 'GET',
|
|
108
|
+
headers,
|
|
109
|
+
signal: AbortSignal.timeout(8000),
|
|
110
|
+
});
|
|
111
|
+
if (res.ok) return { ok: true, details: `HTTP ${res.status}` };
|
|
112
|
+
return { ok: false, error: `HTTP ${res.status}` };
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// ── Live test — vrai appel LLM ──────────────────────────────────────
|
|
116
|
+
|
|
117
|
+
const LIVE_PROMPT = 'Reponds exactement : "Nemesis OK"';
|
|
118
|
+
const LIVE_TIMEOUT = 15000;
|
|
119
|
+
const CLI_LIVE_TIMEOUT = 60000;
|
|
120
|
+
|
|
121
|
+
export async function liveTestConnection(connexion) {
|
|
122
|
+
const start = Date.now();
|
|
123
|
+
try {
|
|
124
|
+
if (connexion.provider === 'cli_tools') return liveTestCli(connexion);
|
|
125
|
+
const credential = getConnexionCredential(connexion.id);
|
|
126
|
+
const defaults = PROVIDER_DEFAULTS[connexion.provider] || PROVIDER_DEFAULTS.custom;
|
|
127
|
+
const baseUrl = connexion.endpoint || defaults.baseUrl;
|
|
128
|
+
if (!baseUrl) return { ok: false, latency: 0, response: null, error: 'Endpoint non configure' };
|
|
129
|
+
|
|
130
|
+
const result = await liveCallProvider(connexion.provider, baseUrl, credential, connexion.model);
|
|
131
|
+
return {
|
|
132
|
+
ok: result.ok,
|
|
133
|
+
latency: Date.now() - start,
|
|
134
|
+
response: result.response || null,
|
|
135
|
+
model: result.model || connexion.model || null,
|
|
136
|
+
error: result.error || null,
|
|
137
|
+
};
|
|
138
|
+
} catch (err) {
|
|
139
|
+
return { ok: false, latency: Date.now() - start, response: null, error: err.message };
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
async function liveCallProvider(provider, baseUrl, credential, model) {
|
|
144
|
+
switch (provider) {
|
|
145
|
+
case 'anthropic': return liveAnthropic(baseUrl, credential, model);
|
|
146
|
+
case 'openai': return liveOpenAI(baseUrl, credential, model);
|
|
147
|
+
case 'groq': return liveOpenAI(baseUrl + '/openai', credential, model);
|
|
148
|
+
case 'gemini_genai': return liveGemini(baseUrl, credential, model);
|
|
149
|
+
case 'ollama': return liveOllama(baseUrl, model);
|
|
150
|
+
default: return { ok: false, error: 'Live test non supporte pour ce provider' };
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
async function liveAnthropic(baseUrl, apiKey, model) {
|
|
155
|
+
const res = await fetch(`${baseUrl}/v1/messages`, {
|
|
156
|
+
method: 'POST',
|
|
157
|
+
headers: {
|
|
158
|
+
'x-api-key': apiKey || '',
|
|
159
|
+
'anthropic-version': '2023-06-01',
|
|
160
|
+
'content-type': 'application/json',
|
|
161
|
+
},
|
|
162
|
+
body: JSON.stringify({
|
|
163
|
+
model: model || 'claude-haiku-4-5-20251001',
|
|
164
|
+
max_tokens: 50,
|
|
165
|
+
messages: [{ role: 'user', content: LIVE_PROMPT }],
|
|
166
|
+
}),
|
|
167
|
+
signal: AbortSignal.timeout(LIVE_TIMEOUT),
|
|
168
|
+
});
|
|
169
|
+
if (!res.ok) {
|
|
170
|
+
const body = await res.text().catch(() => '');
|
|
171
|
+
return { ok: false, error: `HTTP ${res.status} — ${body.slice(0, 200)}` };
|
|
172
|
+
}
|
|
173
|
+
const data = await res.json();
|
|
174
|
+
const text = data.content?.[0]?.text || '';
|
|
175
|
+
return { ok: true, response: text, model: data.model || model };
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
async function liveOpenAI(baseUrl, apiKey, model) {
|
|
179
|
+
const res = await fetch(`${baseUrl}/v1/chat/completions`, {
|
|
180
|
+
method: 'POST',
|
|
181
|
+
headers: {
|
|
182
|
+
'Authorization': `Bearer ${apiKey || ''}`,
|
|
183
|
+
'content-type': 'application/json',
|
|
184
|
+
},
|
|
185
|
+
body: JSON.stringify({
|
|
186
|
+
model: model || 'gpt-4o-mini',
|
|
187
|
+
max_tokens: 50,
|
|
188
|
+
messages: [{ role: 'user', content: LIVE_PROMPT }],
|
|
189
|
+
}),
|
|
190
|
+
signal: AbortSignal.timeout(LIVE_TIMEOUT),
|
|
191
|
+
});
|
|
192
|
+
if (!res.ok) {
|
|
193
|
+
const body = await res.text().catch(() => '');
|
|
194
|
+
return { ok: false, error: `HTTP ${res.status} — ${body.slice(0, 200)}` };
|
|
195
|
+
}
|
|
196
|
+
const data = await res.json();
|
|
197
|
+
const text = data.choices?.[0]?.message?.content || '';
|
|
198
|
+
return { ok: true, response: text, model: data.model || model };
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
async function liveGemini(baseUrl, apiKey, model) {
|
|
202
|
+
const m = model || 'gemini-2.0-flash';
|
|
203
|
+
const res = await fetch(`${baseUrl}/v1beta/models/${m}:generateContent?key=${apiKey || ''}`, {
|
|
204
|
+
method: 'POST',
|
|
205
|
+
headers: { 'content-type': 'application/json' },
|
|
206
|
+
body: JSON.stringify({
|
|
207
|
+
contents: [{ parts: [{ text: LIVE_PROMPT }] }],
|
|
208
|
+
}),
|
|
209
|
+
signal: AbortSignal.timeout(LIVE_TIMEOUT),
|
|
210
|
+
});
|
|
211
|
+
if (!res.ok) {
|
|
212
|
+
const body = await res.text().catch(() => '');
|
|
213
|
+
return { ok: false, error: `HTTP ${res.status} — ${body.slice(0, 200)}` };
|
|
214
|
+
}
|
|
215
|
+
const data = await res.json();
|
|
216
|
+
const text = data.candidates?.[0]?.content?.parts?.[0]?.text || '';
|
|
217
|
+
return { ok: true, response: text, model: m };
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
async function liveOllama(baseUrl, model) {
|
|
221
|
+
const res = await fetch(`${baseUrl}/api/generate`, {
|
|
222
|
+
method: 'POST',
|
|
223
|
+
headers: { 'content-type': 'application/json' },
|
|
224
|
+
body: JSON.stringify({
|
|
225
|
+
model: model || 'llama3',
|
|
226
|
+
prompt: LIVE_PROMPT,
|
|
227
|
+
stream: false,
|
|
228
|
+
}),
|
|
229
|
+
signal: AbortSignal.timeout(LIVE_TIMEOUT),
|
|
230
|
+
});
|
|
231
|
+
if (!res.ok) {
|
|
232
|
+
const body = await res.text().catch(() => '');
|
|
233
|
+
return { ok: false, error: `HTTP ${res.status} — ${body.slice(0, 200)}` };
|
|
234
|
+
}
|
|
235
|
+
const data = await res.json();
|
|
236
|
+
return { ok: true, response: data.response || '', model: data.model || model };
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
// ── API call — system + user prompt ─────────────────────────────────
|
|
240
|
+
|
|
241
|
+
const API_CALL_TIMEOUT = 30000;
|
|
242
|
+
|
|
243
|
+
/**
|
|
244
|
+
* Make an actual LLM API call with system + user prompts.
|
|
245
|
+
* Used by createServiceCaller for NoteWriter integration.
|
|
246
|
+
* @param {object} connexion - { provider, endpoint, ... }
|
|
247
|
+
* @param {string} credential - API key
|
|
248
|
+
* @param {string} model - Model identifier
|
|
249
|
+
* @param {string} systemPrompt
|
|
250
|
+
* @param {string} userPrompt
|
|
251
|
+
* @returns {Promise<string>} Response text
|
|
252
|
+
*/
|
|
253
|
+
export async function callLlmApi(connexion, credential, model, systemPrompt, userPrompt) {
|
|
254
|
+
const defaults = PROVIDER_DEFAULTS[connexion.provider] || PROVIDER_DEFAULTS.custom;
|
|
255
|
+
const baseUrl = connexion.endpoint || defaults.baseUrl;
|
|
256
|
+
if (!baseUrl) throw new Error('Endpoint non configure');
|
|
257
|
+
|
|
258
|
+
switch (connexion.provider) {
|
|
259
|
+
case 'anthropic': return callAnthropic(baseUrl, credential, model, systemPrompt, userPrompt);
|
|
260
|
+
case 'openai': return callOpenAI(baseUrl, credential, model, systemPrompt, userPrompt);
|
|
261
|
+
case 'groq': return callOpenAI(baseUrl + '/openai', credential, model || 'llama-3.3-70b-versatile', systemPrompt, userPrompt);
|
|
262
|
+
case 'gemini_genai': return callGemini(baseUrl, credential, model, systemPrompt, userPrompt);
|
|
263
|
+
case 'ollama': return callOllama(baseUrl, model, systemPrompt, userPrompt);
|
|
264
|
+
default: throw new Error(`Provider "${connexion.provider}" non supporte pour les appels LLM`);
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
async function callAnthropic(baseUrl, apiKey, model, systemPrompt, userPrompt) {
|
|
269
|
+
const res = await fetch(`${baseUrl}/v1/messages`, {
|
|
270
|
+
method: 'POST',
|
|
271
|
+
headers: {
|
|
272
|
+
'x-api-key': apiKey || '',
|
|
273
|
+
'anthropic-version': '2023-06-01',
|
|
274
|
+
'content-type': 'application/json',
|
|
275
|
+
},
|
|
276
|
+
body: JSON.stringify({
|
|
277
|
+
model: model || 'claude-haiku-4-5-20251001',
|
|
278
|
+
max_tokens: 4096,
|
|
279
|
+
system: systemPrompt,
|
|
280
|
+
messages: [{ role: 'user', content: userPrompt }],
|
|
281
|
+
}),
|
|
282
|
+
signal: AbortSignal.timeout(API_CALL_TIMEOUT),
|
|
283
|
+
});
|
|
284
|
+
if (!res.ok) {
|
|
285
|
+
const body = await res.text().catch(() => '');
|
|
286
|
+
const err = new Error(`HTTP ${res.status} — ${body.slice(0, 200)}`);
|
|
287
|
+
err.status = res.status;
|
|
288
|
+
throw err;
|
|
289
|
+
}
|
|
290
|
+
const data = await res.json();
|
|
291
|
+
return data.content?.[0]?.text || '';
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
async function callOpenAI(baseUrl, apiKey, model, systemPrompt, userPrompt) {
|
|
295
|
+
const res = await fetch(`${baseUrl}/v1/chat/completions`, {
|
|
296
|
+
method: 'POST',
|
|
297
|
+
headers: {
|
|
298
|
+
'Authorization': `Bearer ${apiKey || ''}`,
|
|
299
|
+
'content-type': 'application/json',
|
|
300
|
+
},
|
|
301
|
+
body: JSON.stringify({
|
|
302
|
+
model: model || 'gpt-4o-mini',
|
|
303
|
+
max_tokens: 4096,
|
|
304
|
+
messages: [
|
|
305
|
+
{ role: 'system', content: systemPrompt },
|
|
306
|
+
{ role: 'user', content: userPrompt },
|
|
307
|
+
],
|
|
308
|
+
}),
|
|
309
|
+
signal: AbortSignal.timeout(API_CALL_TIMEOUT),
|
|
310
|
+
});
|
|
311
|
+
if (!res.ok) {
|
|
312
|
+
const body = await res.text().catch(() => '');
|
|
313
|
+
const err = new Error(`HTTP ${res.status} — ${body.slice(0, 200)}`);
|
|
314
|
+
err.status = res.status;
|
|
315
|
+
throw err;
|
|
316
|
+
}
|
|
317
|
+
const data = await res.json();
|
|
318
|
+
return data.choices?.[0]?.message?.content || '';
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
async function callGemini(baseUrl, apiKey, model, systemPrompt, userPrompt) {
|
|
322
|
+
const m = model || 'gemini-2.0-flash';
|
|
323
|
+
const res = await fetch(`${baseUrl}/v1beta/models/${m}:generateContent?key=${apiKey || ''}`, {
|
|
324
|
+
method: 'POST',
|
|
325
|
+
headers: { 'content-type': 'application/json' },
|
|
326
|
+
body: JSON.stringify({
|
|
327
|
+
systemInstruction: { parts: [{ text: systemPrompt }] },
|
|
328
|
+
contents: [{ parts: [{ text: userPrompt }] }],
|
|
329
|
+
}),
|
|
330
|
+
signal: AbortSignal.timeout(API_CALL_TIMEOUT),
|
|
331
|
+
});
|
|
332
|
+
if (!res.ok) {
|
|
333
|
+
const body = await res.text().catch(() => '');
|
|
334
|
+
const err = new Error(`HTTP ${res.status} — ${body.slice(0, 200)}`);
|
|
335
|
+
err.status = res.status;
|
|
336
|
+
throw err;
|
|
337
|
+
}
|
|
338
|
+
const data = await res.json();
|
|
339
|
+
return data.candidates?.[0]?.content?.parts?.[0]?.text || '';
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
async function callOllama(baseUrl, model, systemPrompt, userPrompt) {
|
|
343
|
+
const res = await fetch(`${baseUrl}/api/generate`, {
|
|
344
|
+
method: 'POST',
|
|
345
|
+
headers: { 'content-type': 'application/json' },
|
|
346
|
+
body: JSON.stringify({
|
|
347
|
+
model: model || 'llama3',
|
|
348
|
+
system: systemPrompt,
|
|
349
|
+
prompt: userPrompt,
|
|
350
|
+
stream: false,
|
|
351
|
+
}),
|
|
352
|
+
signal: AbortSignal.timeout(API_CALL_TIMEOUT),
|
|
353
|
+
});
|
|
354
|
+
if (!res.ok) {
|
|
355
|
+
const body = await res.text().catch(() => '');
|
|
356
|
+
const err = new Error(`HTTP ${res.status} — ${body.slice(0, 200)}`);
|
|
357
|
+
err.status = res.status;
|
|
358
|
+
throw err;
|
|
359
|
+
}
|
|
360
|
+
const data = await res.json();
|
|
361
|
+
return data.response || '';
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
function liveTestCli(connexion = {}) {
|
|
365
|
+
try {
|
|
366
|
+
const args = [
|
|
367
|
+
'-p', LIVE_PROMPT,
|
|
368
|
+
'--output-format', 'json',
|
|
369
|
+
'--dangerously-skip-permissions',
|
|
370
|
+
];
|
|
371
|
+
if (connexion.model) args.push('--model', connexion.model);
|
|
372
|
+
|
|
373
|
+
const result = execFileSync('claude', args, {
|
|
374
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
375
|
+
timeout: CLI_LIVE_TIMEOUT,
|
|
376
|
+
encoding: 'utf-8',
|
|
377
|
+
});
|
|
378
|
+
let text = result;
|
|
379
|
+
try {
|
|
380
|
+
const parsed = JSON.parse(result);
|
|
381
|
+
text = parsed.result || parsed.text || result;
|
|
382
|
+
} catch (e) { debug(`callCliTool parse: ${e.message}`); }
|
|
383
|
+
return { ok: true, latency: 0, response: text, model: connexion.model || 'claude-cli' };
|
|
384
|
+
} catch (err) {
|
|
385
|
+
return { ok: false, latency: 0, response: null, error: err.message };
|
|
386
|
+
}
|
|
387
|
+
}
|