@gishubperu/ghp 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env +13 -0
- package/domain/contracts/IAgent.js +3 -0
- package/domain/contracts/IMemoryStore.js +3 -0
- package/domain/contracts/IModelProvider.js +3 -0
- package/domain/contracts/IToolset.js +3 -0
- package/domain/dtos/requests/RunAgentRequest.js +8 -0
- package/domain/dtos/responses/AgentResponse.js +8 -0
- package/domain/dtos/responses/ToolResult.js +8 -0
- package/domain/entities/AgentDefinition.js +11 -0
- package/domain/entities/Message.js +10 -0
- package/domain/entities/ModelInfo.js +11 -0
- package/index.js +66 -0
- package/infrastructure/agents/agent-factory.js +79 -0
- package/infrastructure/agents/agriculture_agent.js +13 -0
- package/infrastructure/agents/base_agent.js +19 -0
- package/infrastructure/agents/deforestacion_agent.js +13 -0
- package/infrastructure/agents/general_agent.js +13 -0
- package/infrastructure/agents/minning_agent.js +13 -0
- package/infrastructure/constants/agent-config.js +48 -0
- package/infrastructure/constants/agent-type.js +20 -0
- package/infrastructure/constants/index.js +8 -0
- package/infrastructure/constants/model-config.js +37 -0
- package/infrastructure/constants/system-prompts.js +88 -0
- package/infrastructure/constants/task-type.js +22 -0
- package/infrastructure/core/orchestrator.js +155 -0
- package/infrastructure/core/queue.js +53 -0
- package/infrastructure/core/workers.js +67 -0
- package/infrastructure/memory/store.js +115 -0
- package/infrastructure/providers/anthropic.js +59 -0
- package/infrastructure/providers/gateway.js +140 -0
- package/infrastructure/providers/gemini.js +50 -0
- package/infrastructure/providers/ollama.js +92 -0
- package/infrastructure/providers/openai.js +83 -0
- package/infrastructure/router/router.js +115 -0
- package/infrastructure/skills/gis.skill.js +105 -0
- package/infrastructure/tools/arcgis.js +187 -0
- package/infrastructure/tools/tool-formatters.js +110 -0
- package/package.json +32 -0
- package/presentation/console/procedures/App.js +424 -0
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
// infrastructure/core/orchestrator.js
|
|
2
|
+
|
|
3
|
+
import { AgentType, SystemPrompts, DEFAULT_SYSTEM_PROMPT, TaskType } from '../constants/index.js';
|
|
4
|
+
|
|
5
|
+
export class Orchestrator {
|
|
6
|
+
|
|
7
|
+
#queue; #workers; #router; #memory; #toolset;
|
|
8
|
+
|
|
9
|
+
constructor({ queue, workerPool, router, memory, toolset }) {
|
|
10
|
+
this.#queue = queue;
|
|
11
|
+
this.#workers = workerPool;
|
|
12
|
+
this.#router = router;
|
|
13
|
+
this.#memory = memory;
|
|
14
|
+
this.#toolset = toolset;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
async run(userInput, { onProgress, agentId = AgentType.AUTO, signal = null } = {}) {
|
|
18
|
+
const systemPrompt = SystemPrompts[agentId] ?? DEFAULT_SYSTEM_PROMPT;
|
|
19
|
+
const complexity = this.#assessComplexity(userInput);
|
|
20
|
+
|
|
21
|
+
if (complexity === 'simple')
|
|
22
|
+
return this.#runSimple(userInput, { onProgress, systemPrompt, signal });
|
|
23
|
+
return this.#runPlanned(userInput, { onProgress, systemPrompt, signal });
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
// Single agent with conversation history
|
|
27
|
+
async #runSimple(input, { onProgress, systemPrompt, signal }) {
|
|
28
|
+
const history = this.#memory.getBuffer();
|
|
29
|
+
const messages = [...history, { role: 'user', content: input }];
|
|
30
|
+
|
|
31
|
+
this.#memory.pushTurn({ role: 'user', content: input });
|
|
32
|
+
|
|
33
|
+
const result = await this.#chatWithTools({
|
|
34
|
+
taskType: 'fast', messages, systemPrompt, onProgress, signal,
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
this.#memory.pushTurn({ role: 'assistant', content: result.text });
|
|
38
|
+
await this.#memory.save({ input, output: result.text, type: 'simple' });
|
|
39
|
+
return result;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// Splits into parallel subtasks → synthesizes
|
|
43
|
+
async #runPlanned(input, { onProgress, systemPrompt, signal }) {
|
|
44
|
+
onProgress?.({ type: 'planning' });
|
|
45
|
+
|
|
46
|
+
const plan = await this.#buildPlan(input);
|
|
47
|
+
onProgress?.({ type: 'plan', tasks: plan });
|
|
48
|
+
|
|
49
|
+
const taskIds = plan.map(t => this.#queue.enqueue({ ...t, parentInput: input }));
|
|
50
|
+
const results = await this.#workers.runAll(taskIds, { onProgress, signal });
|
|
51
|
+
|
|
52
|
+
onProgress?.({ type: 'synthesizing' });
|
|
53
|
+
|
|
54
|
+
const history = this.#memory.getBuffer();
|
|
55
|
+
const synthesis = await this.#chatWithTools({
|
|
56
|
+
taskType: 'fast',
|
|
57
|
+
systemPrompt: systemPrompt + '\n\nConsolida los resultados en una respuesta clara y directa.',
|
|
58
|
+
messages: [
|
|
59
|
+
...history,
|
|
60
|
+
{
|
|
61
|
+
role: 'user',
|
|
62
|
+
content: `Pregunta original: ${input}\n\nResultados de subtareas paralelas:\n${
|
|
63
|
+
results.map(t => `[${t.type}${t.error ? ' ✗' : ''}]: ${t.result}`).join('\n\n')
|
|
64
|
+
}`,
|
|
65
|
+
},
|
|
66
|
+
],
|
|
67
|
+
onProgress, signal,
|
|
68
|
+
});
|
|
69
|
+
|
|
70
|
+
this.#memory.pushTurn({ role: 'user', content: input });
|
|
71
|
+
this.#memory.pushTurn({ role: 'assistant', content: synthesis.text });
|
|
72
|
+
await this.#memory.save({ input, output: synthesis.text, type: 'planned' });
|
|
73
|
+
return synthesis;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// Iterative tool-call loop
|
|
77
|
+
async #chatWithTools({ taskType, messages, systemPrompt, onProgress, signal }) {
|
|
78
|
+
const tools = this.#toolset?.declarations ?? [];
|
|
79
|
+
let msgs = [...messages];
|
|
80
|
+
let maxIter = 8;
|
|
81
|
+
|
|
82
|
+
while (maxIter-- > 0) {
|
|
83
|
+
if (signal?.aborted) throw new DOMException('Cancelled by user', 'AbortError');
|
|
84
|
+
|
|
85
|
+
const res = await this.#router.chat({ taskType, messages: msgs, tools, systemPrompt, signal });
|
|
86
|
+
|
|
87
|
+
if (!res.toolCalls?.length)
|
|
88
|
+
return { text: res.text, model: res.model };
|
|
89
|
+
|
|
90
|
+
for (const toolCall of res.toolCalls) {
|
|
91
|
+
onProgress?.({ type: 'tool_start', toolName: toolCall.name, toolArgs: toolCall.arguments });
|
|
92
|
+
|
|
93
|
+
let toolResult;
|
|
94
|
+
try { toolResult = await this.#toolset.execute(toolCall.name, toolCall.arguments ?? {}); }
|
|
95
|
+
catch (err) { toolResult = { error: err.message }; }
|
|
96
|
+
|
|
97
|
+
onProgress?.({ type: 'tool_done', toolName: toolCall.name, toolResult });
|
|
98
|
+
|
|
99
|
+
msgs.push({ role: 'assistant', content: res.rawContent ?? '', toolCalls: res.toolCalls });
|
|
100
|
+
msgs.push({ role: 'tool', tool_call_id: toolCall.id, content: JSON.stringify(toolResult) });
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
return { text: 'Límite de iteraciones de herramientas alcanzado.', model: 'system' };
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// LLM-based task planner
|
|
108
|
+
async #buildPlan(input) {
|
|
109
|
+
const res = await this.#router.chat({
|
|
110
|
+
taskType: 'fast',
|
|
111
|
+
systemPrompt: PLANNER_PROMPT,
|
|
112
|
+
messages: [{ role: 'user', content: input }],
|
|
113
|
+
});
|
|
114
|
+
try {
|
|
115
|
+
const raw = res.text;
|
|
116
|
+
const json = raw.match(/```json\n?([\s\S]*?)\n?```/)?.[1]
|
|
117
|
+
?? raw.match(/(\[[\s\S]*\])/)?.[1] ?? raw;
|
|
118
|
+
return JSON.parse(json);
|
|
119
|
+
} catch {
|
|
120
|
+
return [{ id: '1', type: 'general', description: input, priority: 1 }];
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// Decides simple vs parallel execution
|
|
125
|
+
#assessComplexity(input) {
|
|
126
|
+
if (input.length < 60) return 'simple';
|
|
127
|
+
|
|
128
|
+
const multiTaskPatterns = [
|
|
129
|
+
/compara.+y.+|versus|vs\b/i,
|
|
130
|
+
/(piura|cajamarca|cusco|lima|junín|ayacucho).+(y|,).+(piura|cajamarca|cusco|lima|junín|ayacucho)/i,
|
|
131
|
+
/analiz.+y.+(genera|crea|report)/i,
|
|
132
|
+
/por (cada|todos los) (departamento|region|cultivo)/i,
|
|
133
|
+
/(informe completo|reporte detallado|análisis nacional|resumen nacional)/i,
|
|
134
|
+
/(datos|estadísticas).+(reporte|pdf|informe)/i,
|
|
135
|
+
];
|
|
136
|
+
|
|
137
|
+
if (multiTaskPatterns.some(pattern => pattern.test(input))) return 'complex';
|
|
138
|
+
|
|
139
|
+
if (input.length > 120 &&
|
|
140
|
+
/\b(analiza|genera|crea|compara|investiga|planifica|extrae|procesa|reporte|informe)\b/i.test(input))
|
|
141
|
+
return 'complex';
|
|
142
|
+
|
|
143
|
+
return 'simple';
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
const PLANNER_PROMPT = `Eres un planificador de tareas paralelas. Divide la solicitud en subtareas INDEPENDIENTES que puedan ejecutarse en paralelo.
|
|
148
|
+
Responde ÚNICAMENTE con un JSON array, sin texto adicional:
|
|
149
|
+
[
|
|
150
|
+
{"id":"1","type":"agriculture","description":"consulta específica 1","priority":1},
|
|
151
|
+
{"id":"2","type":"deforestation","description":"análisis específico 2","priority":1}
|
|
152
|
+
]
|
|
153
|
+
Tipos disponibles: agriculture, minning, deforestation, general.
|
|
154
|
+
Máximo 4 subtareas. Cada subtarea debe ser autónoma y específica.
|
|
155
|
+
Si la tarea no tiene subtareas paralelas claras, devuelve solo 1 tarea de tipo general.`;
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
// infrastructure/core/queue.js
|
|
2
|
+
// Priority task queue — min-heap by priority
|
|
3
|
+
|
|
4
|
+
export class TaskQueue {
|
|
5
|
+
|
|
6
|
+
#heap = [];
|
|
7
|
+
|
|
8
|
+
enqueue(task) {
|
|
9
|
+
const item = { ...task, enqueuedAt: Date.now() };
|
|
10
|
+
this.#heap.push(item);
|
|
11
|
+
this.#bubbleUp(this.#heap.length - 1);
|
|
12
|
+
return item;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
dequeue() {
|
|
16
|
+
if (this.#heap.length === 0) return null;
|
|
17
|
+
const top = this.#heap[0];
|
|
18
|
+
const last = this.#heap.pop();
|
|
19
|
+
if (this.#heap.length > 0) {
|
|
20
|
+
this.#heap[0] = last;
|
|
21
|
+
this.#sinkDown(0);
|
|
22
|
+
}
|
|
23
|
+
return top;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
peek() { return this.#heap[0] ?? null; }
|
|
27
|
+
size() { return this.#heap.length; }
|
|
28
|
+
isEmpty() { return this.#heap.length === 0; }
|
|
29
|
+
|
|
30
|
+
// Lower priority number = execute first (1 > 2 > 3)
|
|
31
|
+
#bubbleUp(i) {
|
|
32
|
+
while (i > 0) {
|
|
33
|
+
const parent = Math.floor((i - 1) / 2);
|
|
34
|
+
if (this.#heap[parent].priority <= this.#heap[i].priority) break;
|
|
35
|
+
[this.#heap[parent], this.#heap[i]] = [this.#heap[i], this.#heap[parent]];
|
|
36
|
+
i = parent;
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
#sinkDown(i) {
|
|
41
|
+
const n = this.#heap.length;
|
|
42
|
+
while (true) {
|
|
43
|
+
let smallest = i;
|
|
44
|
+
const left = 2 * i + 1;
|
|
45
|
+
const right = 2 * i + 2;
|
|
46
|
+
if (left < n && this.#heap[left].priority < this.#heap[smallest].priority) smallest = left;
|
|
47
|
+
if (right < n && this.#heap[right].priority < this.#heap[smallest].priority) smallest = right;
|
|
48
|
+
if (smallest === i) break;
|
|
49
|
+
[this.#heap[smallest], this.#heap[i]] = [this.#heap[i], this.#heap[smallest]];
|
|
50
|
+
i = smallest;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
}
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
// infrastructure/core/workers.js
|
|
2
|
+
// Worker pool — runs subtasks in parallel with concurrency limit
|
|
3
|
+
|
|
4
|
+
import { AgentFactory } from '../agents/agent-factory.js';
|
|
5
|
+
import { AgentType } from '../constants/index.js';
|
|
6
|
+
|
|
7
|
+
const AGENT_REGISTRY = Object.freeze({
|
|
8
|
+
[AgentType.AGRICULTURE]: AgentType.AGRICULTURE,
|
|
9
|
+
[AgentType.MINNING]: AgentType.MINNING,
|
|
10
|
+
[AgentType.DEFORESTATION]: AgentType.DEFORESTATION,
|
|
11
|
+
[AgentType.GENERAL]: AgentType.GENERAL,
|
|
12
|
+
});
|
|
13
|
+
|
|
14
|
+
export class WorkerPool {
|
|
15
|
+
|
|
16
|
+
#maxConcurrent;
|
|
17
|
+
#router;
|
|
18
|
+
#memory;
|
|
19
|
+
#agents;
|
|
20
|
+
|
|
21
|
+
constructor({ workers = 4, router, memory, toolset }) {
|
|
22
|
+
this.#maxConcurrent = workers;
|
|
23
|
+
this.#router = router;
|
|
24
|
+
this.#memory = memory;
|
|
25
|
+
this.#agents = {};
|
|
26
|
+
for (const type of Object.values(AGENT_REGISTRY)) {
|
|
27
|
+
this.#agents[type] = AgentFactory.create(type, { router, memory, toolset });
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
async runAll(tasks, { onProgress, signal } = {}) {
|
|
32
|
+
const results = [];
|
|
33
|
+
for (let i = 0; i < tasks.length; i += this.#maxConcurrent) {
|
|
34
|
+
const batch = tasks.slice(i, i + this.#maxConcurrent);
|
|
35
|
+
const batchOutcomes = await Promise.allSettled(
|
|
36
|
+
batch.map(task => this.#executeTask(task, { onProgress, signal }))
|
|
37
|
+
);
|
|
38
|
+
for (const [j, outcome] of batchOutcomes.entries()) {
|
|
39
|
+
if (outcome.status === 'fulfilled') {
|
|
40
|
+
results.push(outcome.value);
|
|
41
|
+
} else {
|
|
42
|
+
results.push({
|
|
43
|
+
...batch[j],
|
|
44
|
+
result: `Error: ${outcome.reason?.message ?? 'unknown'}`,
|
|
45
|
+
error: true,
|
|
46
|
+
});
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
return results;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
async #executeTask(task, { onProgress, signal }) {
|
|
54
|
+
onProgress?.({ type: 'task_start', task: task.id, taskType: task.type });
|
|
55
|
+
const agent = this.#agents[task.type] ?? this.#agents[AgentType.GENERAL];
|
|
56
|
+
const result = await agent.run(task.description, { signal });
|
|
57
|
+
onProgress?.({ type: 'task_done', task: task.id, taskType: task.type });
|
|
58
|
+
return {
|
|
59
|
+
id: task.id,
|
|
60
|
+
type: task.type,
|
|
61
|
+
input: task.description,
|
|
62
|
+
result: result.text,
|
|
63
|
+
model: result.model,
|
|
64
|
+
priority: task.priority,
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
}
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
// infrastructure/memory/store.js
|
|
2
|
+
// Two-layer memory:
|
|
3
|
+
// 1. conversationBuffer — last N turns in RAM (immediate context, always included)
|
|
4
|
+
// 2. entries (JSON file) — persistent history, keyword search
|
|
5
|
+
|
|
6
|
+
import fs from 'fs/promises';
|
|
7
|
+
import path from 'path';
|
|
8
|
+
|
|
9
|
+
const MEMORY_FILE = path.resolve(process.cwd(), '.agent_memory.json');
|
|
10
|
+
const MAX_ENTRIES = 500;
|
|
11
|
+
const BUFFER_LIMIT = 12; // last 12 turns (~6 exchanges)
|
|
12
|
+
|
|
13
|
+
export class MemoryStore {
|
|
14
|
+
|
|
15
|
+
#data = { entries: [], sessions: [] };
|
|
16
|
+
#buffer = [];
|
|
17
|
+
|
|
18
|
+
async init() {
|
|
19
|
+
try {
|
|
20
|
+
const raw = await fs.readFile(MEMORY_FILE, 'utf8');
|
|
21
|
+
this.#data = JSON.parse(raw);
|
|
22
|
+
} catch {
|
|
23
|
+
this.#data = { entries: [], sessions: [] };
|
|
24
|
+
await this.#flush();
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
// Add a turn to the active conversation buffer
|
|
29
|
+
pushTurn({ role, content }) {
|
|
30
|
+
this.#buffer.push({ role, content });
|
|
31
|
+
// Remove oldest pair (user+assistant) to keep structure valid
|
|
32
|
+
if (this.#buffer.length > BUFFER_LIMIT)
|
|
33
|
+
this.#buffer.splice(0, 2);
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
// Returns full buffer — pass to model as conversation history
|
|
37
|
+
getBuffer() {
|
|
38
|
+
return [...this.#buffer];
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// Clear buffer (new session / /clear command)
|
|
42
|
+
clearBuffer() {
|
|
43
|
+
this.#buffer = [];
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
// Persist interaction at end of each turn
|
|
47
|
+
async save({ input, output, type = 'general', tasks = [] }) {
|
|
48
|
+
const entry = {
|
|
49
|
+
id: Date.now().toString(36),
|
|
50
|
+
timestamp: new Date().toISOString(),
|
|
51
|
+
input,
|
|
52
|
+
output,
|
|
53
|
+
type,
|
|
54
|
+
tasks,
|
|
55
|
+
keywords: this.#extractKeywords(input + ' ' + output),
|
|
56
|
+
};
|
|
57
|
+
this.#data.entries.unshift(entry);
|
|
58
|
+
if (this.#data.entries.length > MAX_ENTRIES)
|
|
59
|
+
this.#data.entries = this.#data.entries.slice(0, MAX_ENTRIES);
|
|
60
|
+
await this.#flush();
|
|
61
|
+
return entry;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// Keyword similarity search — for relevant context from past sessions
|
|
65
|
+
async recall(query, { limit = 3 } = {}) {
|
|
66
|
+
const queryWords = this.#extractKeywords(query);
|
|
67
|
+
return this.#data.entries
|
|
68
|
+
.map(e => ({ ...e, score: this.#similarity(queryWords, e.keywords) }))
|
|
69
|
+
.filter(e => e.score > 0.15)
|
|
70
|
+
.sort((a, b) => b.score - a.score || new Date(b.timestamp) - new Date(a.timestamp))
|
|
71
|
+
.slice(0, limit)
|
|
72
|
+
.map(({ score, keywords, ...e }) => e);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
async recent(limit = 10) { return this.#data.entries.slice(0, limit); }
|
|
76
|
+
|
|
77
|
+
async clear() {
|
|
78
|
+
this.#data = { entries: [], sessions: [] };
|
|
79
|
+
this.#buffer = [];
|
|
80
|
+
await this.#flush();
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
stats() {
|
|
84
|
+
return {
|
|
85
|
+
total: this.#data.entries.length,
|
|
86
|
+
buffer: this.#buffer.length,
|
|
87
|
+
oldest: this.#data.entries.at(-1)?.timestamp ?? null,
|
|
88
|
+
newest: this.#data.entries.at(0)?.timestamp ?? null,
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
async #flush() {
|
|
93
|
+
await fs.writeFile(MEMORY_FILE, JSON.stringify(this.#data, null, 2), 'utf8');
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
#extractKeywords(text) {
|
|
97
|
+
const stopWords = new Set([
|
|
98
|
+
'de','la','el','en','y','a','que','the','of','and','to','is',
|
|
99
|
+
'los','las','con','por','para','del','un','una','este','esta',
|
|
100
|
+
'eso','ello','hay','son','está','estoy','tiene','tengo','puedo','puede','sería',
|
|
101
|
+
]);
|
|
102
|
+
return [...new Set(
|
|
103
|
+
text.toLowerCase()
|
|
104
|
+
.replace(/[^\w\sáéíóúñü]/g, ' ')
|
|
105
|
+
.split(/\s+/)
|
|
106
|
+
.filter(w => w.length > 3 && !stopWords.has(w))
|
|
107
|
+
)];
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
#similarity(a, b) {
|
|
111
|
+
const setB = new Set(b);
|
|
112
|
+
const matches = a.filter(w => setB.has(w)).length;
|
|
113
|
+
return matches / Math.max(a.length, b.length, 1);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
// models/providers/anthropic.js
|
|
2
|
+
// Anthropic Claude provider implementation
|
|
3
|
+
|
|
4
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
5
|
+
|
|
6
|
+
export class AnthropicProvider {
|
|
7
|
+
|
|
8
|
+
#client;
|
|
9
|
+
#available = false;
|
|
10
|
+
|
|
11
|
+
async init() {
|
|
12
|
+
if (!process.env.ANTHROPIC_API_KEY) return;
|
|
13
|
+
this.#client = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
|
|
14
|
+
this.#available = true;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
async isAvailable() { return this.#available; }
|
|
18
|
+
|
|
19
|
+
models() { return ['claude-sonnet-4-5', 'claude-haiku-4-5-20251001']; }
|
|
20
|
+
|
|
21
|
+
async chat({ model = 'claude-sonnet-4-5', messages, tools = [], systemPrompt = '' }) {
|
|
22
|
+
const params = {
|
|
23
|
+
model,
|
|
24
|
+
max_tokens: 4096,
|
|
25
|
+
messages: messages.map(m => ({ role: m.role, content: m.content })),
|
|
26
|
+
...(systemPrompt ? { system: systemPrompt } : {}),
|
|
27
|
+
...(tools.length ? { tools: this.#formatTools(tools) } : {}),
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
const response = await this.#client.messages.create(params);
|
|
31
|
+
|
|
32
|
+
const textBlock = response.content.find(b => b.type === 'text');
|
|
33
|
+
const toolBlock = response.content.find(b => b.type === 'tool_use');
|
|
34
|
+
|
|
35
|
+
const toolCalls = [];
|
|
36
|
+
if (toolBlock) {
|
|
37
|
+
toolCalls.push({
|
|
38
|
+
id: toolBlock.id,
|
|
39
|
+
name: toolBlock.name,
|
|
40
|
+
arguments: toolBlock.input ?? {},
|
|
41
|
+
});
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
return {
|
|
45
|
+
text: textBlock?.text ?? '',
|
|
46
|
+
rawContent: '',
|
|
47
|
+
toolCalls,
|
|
48
|
+
model: `anthropic/${model}`,
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
#formatTools(tools) {
|
|
53
|
+
return tools.map(t => ({
|
|
54
|
+
name: t.name,
|
|
55
|
+
description: t.description,
|
|
56
|
+
input_schema: t.parameters ?? { type: 'object', properties: {} },
|
|
57
|
+
}));
|
|
58
|
+
}
|
|
59
|
+
}
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
// models/providers/gateway.js
|
|
2
|
+
// GHP Gateway — Kilo AI Gateway (OpenAI-compatible)
|
|
3
|
+
// Modelos :free funcionan SIN API key (200 req/hora anónimo)
|
|
4
|
+
|
|
5
|
+
import { KNOWN_FREE_MODELS } from '../constants/index.js';
|
|
6
|
+
|
|
7
|
+
const BASE_URL = 'https://api.kilo.ai/api/gateway';
|
|
8
|
+
|
|
9
|
+
export class GatewayProvider {
|
|
10
|
+
|
|
11
|
+
#apiKey = null;
|
|
12
|
+
#available = false;
|
|
13
|
+
#modelList = [];
|
|
14
|
+
|
|
15
|
+
async init() {
|
|
16
|
+
this.#apiKey = process.env.GH_PPA_KEY ?? process.env.KILO_API_KEY ?? null;
|
|
17
|
+
try {
|
|
18
|
+
const res = await fetch(`${BASE_URL}/models`, { signal: AbortSignal.timeout(6_000) });
|
|
19
|
+
if (res.ok) {
|
|
20
|
+
const data = await res.json();
|
|
21
|
+
const raw = Array.isArray(data) ? data : (data.data ?? []);
|
|
22
|
+
this.#modelList = raw
|
|
23
|
+
.map(m => ({
|
|
24
|
+
id: m.id,
|
|
25
|
+
name: m.name ?? m.id,
|
|
26
|
+
provider: m.id.split('/')[0] ?? 'unknown',
|
|
27
|
+
free: m.id.endsWith(':free') || !m.pricing?.prompt || m.pricing?.prompt === '0',
|
|
28
|
+
context: m.context_length ?? null,
|
|
29
|
+
gateway: true,
|
|
30
|
+
}))
|
|
31
|
+
.sort((a, b) => (b.free - a.free) || a.name.localeCompare(b.name));
|
|
32
|
+
} else {
|
|
33
|
+
this.#modelList = KNOWN_FREE_MODELS;
|
|
34
|
+
}
|
|
35
|
+
} catch {
|
|
36
|
+
this.#modelList = KNOWN_FREE_MODELS;
|
|
37
|
+
}
|
|
38
|
+
this.#available = true;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
async isAvailable() { return this.#available; }
|
|
42
|
+
getModelList() { return this.#modelList; }
|
|
43
|
+
models() { return this.#modelList.map(m => m.id); }
|
|
44
|
+
|
|
45
|
+
async chat({ model, messages, tools = [], systemPrompt = '', signal = null }) {
|
|
46
|
+
const headers = { 'Content-Type': 'application/json' };
|
|
47
|
+
if (this.#apiKey) headers['Authorization'] = `Bearer ${this.#apiKey}`;
|
|
48
|
+
|
|
49
|
+
// Convertir messages con tool results al formato OpenAI
|
|
50
|
+
const formattedMessages = [
|
|
51
|
+
...(systemPrompt ? [{ role: 'system', content: systemPrompt }] : []),
|
|
52
|
+
...messages.map(m => {
|
|
53
|
+
if (m.role === 'tool') {
|
|
54
|
+
return { role: 'tool', tool_call_id: m.tool_call_id, content: m.content };
|
|
55
|
+
}
|
|
56
|
+
if (m.toolCalls) {
|
|
57
|
+
return {
|
|
58
|
+
role: 'assistant',
|
|
59
|
+
content: m.content ?? null,
|
|
60
|
+
tool_calls: m.toolCalls.map(tc => ({
|
|
61
|
+
id: tc.id,
|
|
62
|
+
type: 'function',
|
|
63
|
+
function: { name: tc.name, arguments: JSON.stringify(tc.arguments ?? {}) },
|
|
64
|
+
})),
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
return { role: m.role, content: m.content };
|
|
68
|
+
}),
|
|
69
|
+
];
|
|
70
|
+
|
|
71
|
+
const body = { model, messages: formattedMessages };
|
|
72
|
+
|
|
73
|
+
// Solo agregar tools si hay declaraciones
|
|
74
|
+
if (tools.length > 0) {
|
|
75
|
+
body.tools = tools.map(t => ({
|
|
76
|
+
type: 'function',
|
|
77
|
+
function: {
|
|
78
|
+
name: t.name,
|
|
79
|
+
description: t.description,
|
|
80
|
+
parameters: t.parameters ?? { type: 'object', properties: {} },
|
|
81
|
+
},
|
|
82
|
+
}));
|
|
83
|
+
body.tool_choice = 'auto';
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
const res = await fetch(`${BASE_URL}/chat/completions`, {
|
|
87
|
+
method: 'POST',
|
|
88
|
+
headers,
|
|
89
|
+
body: JSON.stringify(body),
|
|
90
|
+
signal: signal ?? AbortSignal.timeout(60_000),
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
if (!res.ok) {
|
|
94
|
+
const text = await res.text().catch(() => String(res.status));
|
|
95
|
+
const err = new Error(`GHP Gateway [${model}] → ${res.status}: ${text}`);
|
|
96
|
+
err.retryable = true;
|
|
97
|
+
throw err;
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
const data = await res.json();
|
|
101
|
+
|
|
102
|
+
if (data.error) {
|
|
103
|
+
const err = new Error(`GHP Gateway: ${data.error.message ?? JSON.stringify(data.error)}`);
|
|
104
|
+
err.retryable = true;
|
|
105
|
+
throw err;
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
const choice = data.choices?.[0];
|
|
109
|
+
const message = choice?.message;
|
|
110
|
+
|
|
111
|
+
if (!message) {
|
|
112
|
+
const err = new Error(`GHP Gateway [${model}] no retornó contenido`);
|
|
113
|
+
err.retryable = true;
|
|
114
|
+
throw err;
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
// Si el modelo quiere usar tools
|
|
118
|
+
if (message.tool_calls?.length > 0) {
|
|
119
|
+
return {
|
|
120
|
+
text: message.content ?? '',
|
|
121
|
+
model: `gateway/${model}`,
|
|
122
|
+
rawContent: message.content,
|
|
123
|
+
toolCalls: message.tool_calls.map(tc => ({
|
|
124
|
+
id: tc.id,
|
|
125
|
+
name: tc.function.name,
|
|
126
|
+
arguments: JSON.parse(tc.function.arguments || '{}'),
|
|
127
|
+
})),
|
|
128
|
+
};
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
const text = message.content;
|
|
132
|
+
if (!text) {
|
|
133
|
+
const err = new Error(`GHP Gateway [${model}] respuesta vacía`);
|
|
134
|
+
err.retryable = true;
|
|
135
|
+
throw err;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
return { text, model: `gateway/${model}` };
|
|
139
|
+
}
|
|
140
|
+
}
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
// models/providers/gemini.js
|
|
2
|
+
// Google Gemini provider implementation
|
|
3
|
+
|
|
4
|
+
import { GoogleGenerativeAI } from '@google/generative-ai';
|
|
5
|
+
|
|
6
|
+
export class GeminiProvider {
|
|
7
|
+
|
|
8
|
+
#client;
|
|
9
|
+
#available = false;
|
|
10
|
+
|
|
11
|
+
async init() {
|
|
12
|
+
if (!process.env.GEMINI_API_KEY) return;
|
|
13
|
+
this.#client = new GoogleGenerativeAI(process.env.GEMINI_API_KEY);
|
|
14
|
+
this.#available = true;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
async isAvailable() { return this.#available; }
|
|
18
|
+
|
|
19
|
+
models() { return ['gemini-2.5-flash', 'gemini-2.5-pro']; }
|
|
20
|
+
|
|
21
|
+
async chat({ model = 'gemini-2.5-flash', messages, tools = [], systemPrompt = '' }) {
|
|
22
|
+
const geminiModel = this.#client.getGenerativeModel({
|
|
23
|
+
model,
|
|
24
|
+
...(systemPrompt ? { systemInstruction: systemPrompt } : {}),
|
|
25
|
+
...(tools.length ? { tools: [{ functionDeclarations: tools }] } : {}),
|
|
26
|
+
});
|
|
27
|
+
|
|
28
|
+
const history = messages.slice(0, -1).map(m => ({
|
|
29
|
+
role: m.role === 'assistant' ? 'model' : 'user',
|
|
30
|
+
parts: [{ text: m.content }],
|
|
31
|
+
}));
|
|
32
|
+
|
|
33
|
+
const chat = geminiModel.startChat({ history });
|
|
34
|
+
const last = messages.at(-1);
|
|
35
|
+
const result = await chat.sendMessage(last.content);
|
|
36
|
+
|
|
37
|
+
const functionCalls = result.response.functionCalls?.() ?? [];
|
|
38
|
+
|
|
39
|
+
return {
|
|
40
|
+
text: result.response.text(),
|
|
41
|
+
rawContent: '',
|
|
42
|
+
toolCalls: functionCalls.length > 0 ? functionCalls.map(fc => ({
|
|
43
|
+
id: `gemini-${Date.now()}-${Math.random().toString(36).slice(2)}`,
|
|
44
|
+
name: fc.name,
|
|
45
|
+
arguments: fc.args ?? {},
|
|
46
|
+
})) : [],
|
|
47
|
+
model: `gemini/${model}`,
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
}
|