@olane/o-tool-registry 0.6.2 → 0.6.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/auth/index.d.ts +4 -0
- package/dist/auth/index.d.ts.map +1 -0
- package/dist/auth/index.js +3 -0
- package/dist/auth/interfaces/index.d.ts +4 -0
- package/dist/auth/interfaces/index.d.ts.map +1 -0
- package/dist/auth/interfaces/index.js +3 -0
- package/dist/auth/interfaces/oAuth-tokens.interface.d.ts +9 -0
- package/dist/auth/interfaces/oAuth-tokens.interface.d.ts.map +1 -0
- package/dist/auth/interfaces/oAuth-tokens.interface.js +1 -0
- package/dist/auth/interfaces/oAuth-user-info.interface.d.ts +8 -0
- package/dist/auth/interfaces/oAuth-user-info.interface.d.ts.map +1 -0
- package/dist/auth/interfaces/oAuth-user-info.interface.js +1 -0
- package/dist/auth/interfaces/oAuth.config.d.ts +14 -0
- package/dist/auth/interfaces/oAuth.config.d.ts.map +1 -0
- package/dist/auth/interfaces/oAuth.config.js +1 -0
- package/dist/auth/methods/auth.methods.d.ts +5 -0
- package/dist/auth/methods/auth.methods.d.ts.map +1 -0
- package/dist/auth/methods/auth.methods.js +302 -0
- package/dist/auth/oAuth.tool.d.ts +20 -0
- package/dist/auth/oAuth.tool.d.ts.map +1 -0
- package/dist/auth/oAuth.tool.js +419 -0
- package/dist/embeddings/embeddings.tool.d.ts +6 -0
- package/dist/embeddings/embeddings.tool.d.ts.map +1 -0
- package/dist/embeddings/embeddings.tool.js +11 -0
- package/dist/embeddings/huggingface-text-embeddings.tool.d.ts +9 -0
- package/dist/embeddings/huggingface-text-embeddings.tool.d.ts.map +1 -0
- package/dist/embeddings/huggingface-text-embeddings.tool.js +21 -0
- package/dist/embeddings/index.d.ts +4 -0
- package/dist/embeddings/index.d.ts.map +1 -0
- package/dist/embeddings/index.js +3 -0
- package/dist/embeddings/methods/text-embeddings.method.d.ts +5 -0
- package/dist/embeddings/methods/text-embeddings.method.d.ts.map +1 -0
- package/dist/embeddings/methods/text-embeddings.method.js +29 -0
- package/dist/embeddings/text-embeddings.tool.d.ts +9 -0
- package/dist/embeddings/text-embeddings.tool.d.ts.map +1 -0
- package/dist/embeddings/text-embeddings.tool.js +13 -0
- package/dist/index.d.ts +7 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +6 -0
- package/dist/init.d.ts +3 -0
- package/dist/init.d.ts.map +1 -0
- package/dist/init.js +36 -0
- package/dist/intelligence/anthropic-intelligence.tool.d.ts +224 -0
- package/dist/intelligence/anthropic-intelligence.tool.d.ts.map +1 -0
- package/dist/intelligence/anthropic-intelligence.tool.js +477 -0
- package/dist/intelligence/gemini-intelligence.tool.d.ts +29 -0
- package/dist/intelligence/gemini-intelligence.tool.d.ts.map +1 -0
- package/dist/intelligence/gemini-intelligence.tool.js +267 -0
- package/dist/intelligence/index.d.ts +6 -0
- package/dist/intelligence/index.d.ts.map +1 -0
- package/dist/intelligence/index.js +5 -0
- package/dist/intelligence/intelligence.tool.d.ts +11 -0
- package/dist/intelligence/intelligence.tool.d.ts.map +1 -0
- package/dist/intelligence/intelligence.tool.js +120 -0
- package/dist/intelligence/methods/intelligence.methods.d.ts +5 -0
- package/dist/intelligence/methods/intelligence.methods.d.ts.map +1 -0
- package/dist/intelligence/methods/intelligence.methods.js +98 -0
- package/dist/intelligence/ollama-intelligence.tool.d.ts +36 -0
- package/dist/intelligence/ollama-intelligence.tool.d.ts.map +1 -0
- package/dist/intelligence/ollama-intelligence.tool.js +312 -0
- package/dist/intelligence/openai-intelligence.tool.d.ts +30 -0
- package/dist/intelligence/openai-intelligence.tool.d.ts.map +1 -0
- package/dist/intelligence/openai-intelligence.tool.js +255 -0
- package/dist/intelligence/perplexity-intelligence.tool.d.ts +27 -0
- package/dist/intelligence/perplexity-intelligence.tool.d.ts.map +1 -0
- package/dist/intelligence/perplexity-intelligence.tool.js +309 -0
- package/dist/mcp/index.d.ts +3 -0
- package/dist/mcp/index.d.ts.map +1 -0
- package/dist/mcp/index.js +2 -0
- package/dist/mcp/mcp-bridge.tool.d.ts +12 -0
- package/dist/mcp/mcp-bridge.tool.d.ts.map +1 -0
- package/dist/mcp/mcp-bridge.tool.js +126 -0
- package/dist/mcp/mcp.tool.d.ts +14 -0
- package/dist/mcp/mcp.tool.d.ts.map +1 -0
- package/dist/mcp/mcp.tool.js +83 -0
- package/dist/mcp/methods/mcp-bridge.methods.d.ts +5 -0
- package/dist/mcp/methods/mcp-bridge.methods.d.ts.map +1 -0
- package/dist/mcp/methods/mcp-bridge.methods.js +79 -0
- package/dist/nlp/index.d.ts +2 -0
- package/dist/nlp/index.d.ts.map +1 -0
- package/dist/nlp/index.js +1 -0
- package/dist/nlp/methods/nlp.methods.d.ts +5 -0
- package/dist/nlp/methods/nlp.methods.d.ts.map +1 -0
- package/dist/nlp/methods/nlp.methods.js +15 -0
- package/dist/nlp/ner.tool.d.ts +7 -0
- package/dist/nlp/ner.tool.d.ts.map +1 -0
- package/dist/nlp/ner.tool.js +25 -0
- package/dist/src/intelligence/anthropic-intelligence.tool.d.ts +224 -0
- package/dist/src/intelligence/anthropic-intelligence.tool.d.ts.map +1 -0
- package/dist/src/intelligence/anthropic-intelligence.tool.js +477 -0
- package/dist/src/intelligence/gemini-intelligence.tool.d.ts +29 -0
- package/dist/src/intelligence/gemini-intelligence.tool.d.ts.map +1 -0
- package/dist/src/intelligence/gemini-intelligence.tool.js +267 -0
- package/dist/src/intelligence/index.d.ts +6 -0
- package/dist/src/intelligence/index.d.ts.map +1 -0
- package/dist/src/intelligence/index.js +5 -0
- package/dist/src/intelligence/intelligence.tool.d.ts +11 -0
- package/dist/src/intelligence/intelligence.tool.d.ts.map +1 -0
- package/dist/src/intelligence/intelligence.tool.js +134 -0
- package/dist/src/intelligence/methods/intelligence.methods.d.ts +5 -0
- package/dist/src/intelligence/methods/intelligence.methods.d.ts.map +1 -0
- package/dist/src/intelligence/methods/intelligence.methods.js +132 -0
- package/dist/src/intelligence/ollama-intelligence.tool.d.ts +36 -0
- package/dist/src/intelligence/ollama-intelligence.tool.d.ts.map +1 -0
- package/dist/src/intelligence/ollama-intelligence.tool.js +312 -0
- package/dist/src/intelligence/openai-intelligence.tool.d.ts +30 -0
- package/dist/src/intelligence/openai-intelligence.tool.d.ts.map +1 -0
- package/dist/src/intelligence/openai-intelligence.tool.js +255 -0
- package/dist/src/intelligence/perplexity-intelligence.tool.d.ts +28 -0
- package/dist/src/intelligence/perplexity-intelligence.tool.d.ts.map +1 -0
- package/dist/src/intelligence/perplexity-intelligence.tool.js +310 -0
- package/dist/src/mcp/index.d.ts +3 -0
- package/dist/src/mcp/index.d.ts.map +1 -0
- package/dist/src/mcp/index.js +2 -0
- package/dist/src/mcp/mcp-bridge.tool.d.ts +12 -0
- package/dist/src/mcp/mcp-bridge.tool.d.ts.map +1 -0
- package/dist/src/mcp/mcp-bridge.tool.js +107 -0
- package/dist/src/mcp/mcp.tool.d.ts +15 -0
- package/dist/src/mcp/mcp.tool.d.ts.map +1 -0
- package/dist/src/mcp/mcp.tool.js +85 -0
- package/dist/src/mcp/methods/mcp-bridge.methods.d.ts +5 -0
- package/dist/src/mcp/methods/mcp-bridge.methods.d.ts.map +1 -0
- package/dist/src/mcp/methods/mcp-bridge.methods.js +58 -0
- package/dist/vector-store/index.d.ts +3 -0
- package/dist/vector-store/index.d.ts.map +1 -0
- package/dist/vector-store/index.js +2 -0
- package/dist/vector-store/langchain-memory.vector-store.tool.d.ts +14 -0
- package/dist/vector-store/langchain-memory.vector-store.tool.d.ts.map +1 -0
- package/dist/vector-store/langchain-memory.vector-store.tool.js +60 -0
- package/dist/vector-store/methods/vector-store.methods.d.ts +5 -0
- package/dist/vector-store/methods/vector-store.methods.d.ts.map +1 -0
- package/dist/vector-store/methods/vector-store.methods.js +60 -0
- package/dist/vector-store/vector-memory.tool.d.ts +11 -0
- package/dist/vector-store/vector-memory.tool.d.ts.map +1 -0
- package/dist/vector-store/vector-memory.tool.js +13 -0
- package/package.json +8 -8
|
@@ -0,0 +1,267 @@
|
|
|
1
|
+
import { oAddress } from '@olane/o-core';
|
|
2
|
+
import { oVirtualTool } from '@olane/o-tool';
|
|
3
|
+
import { INTELLIGENCE_PARAMS } from './methods/intelligence.methods.js';
|
|
4
|
+
export class GeminiIntelligenceTool extends oVirtualTool {
|
|
5
|
+
constructor(config) {
|
|
6
|
+
super({
|
|
7
|
+
...config,
|
|
8
|
+
address: new oAddress('o://gemini'),
|
|
9
|
+
description: 'Intelligence tool using Google Gemini suite of models',
|
|
10
|
+
methods: INTELLIGENCE_PARAMS,
|
|
11
|
+
dependencies: [],
|
|
12
|
+
});
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Chat completion with Gemini
|
|
16
|
+
*/
|
|
17
|
+
async _tool_completion(request) {
|
|
18
|
+
try {
|
|
19
|
+
const params = request.params;
|
|
20
|
+
const { model = this.defaultModel, messages, ...options } = params;
|
|
21
|
+
if (!this.apiKey) {
|
|
22
|
+
return {
|
|
23
|
+
success: false,
|
|
24
|
+
error: 'Gemini API key is required',
|
|
25
|
+
};
|
|
26
|
+
}
|
|
27
|
+
if (!messages || !Array.isArray(messages)) {
|
|
28
|
+
return {
|
|
29
|
+
success: false,
|
|
30
|
+
error: '"messages" array is required',
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
// Convert messages to Gemini format
|
|
34
|
+
const contents = messages.map((msg) => ({
|
|
35
|
+
role: msg.role === 'assistant' ? 'model' : 'user',
|
|
36
|
+
parts: [{ text: msg.content }],
|
|
37
|
+
}));
|
|
38
|
+
const chatRequest = {
|
|
39
|
+
contents,
|
|
40
|
+
generationConfig: {
|
|
41
|
+
temperature: options.temperature,
|
|
42
|
+
topK: options.topK,
|
|
43
|
+
topP: options.topP,
|
|
44
|
+
maxOutputTokens: options.maxOutputTokens,
|
|
45
|
+
stopSequences: options.stopSequences,
|
|
46
|
+
},
|
|
47
|
+
safetySettings: options.safetySettings,
|
|
48
|
+
};
|
|
49
|
+
const response = await fetch(`${this.baseUrl}/models/${model}:generateContent?key=${this.apiKey}`, {
|
|
50
|
+
method: 'POST',
|
|
51
|
+
headers: {
|
|
52
|
+
'Content-Type': 'application/json',
|
|
53
|
+
},
|
|
54
|
+
body: JSON.stringify(chatRequest),
|
|
55
|
+
});
|
|
56
|
+
if (!response.ok) {
|
|
57
|
+
const errorText = await response.text();
|
|
58
|
+
return {
|
|
59
|
+
success: false,
|
|
60
|
+
error: `Gemini API error: ${response.status} - ${errorText}`,
|
|
61
|
+
};
|
|
62
|
+
}
|
|
63
|
+
const result = (await response.json());
|
|
64
|
+
if (!result.candidates || result.candidates.length === 0) {
|
|
65
|
+
return {
|
|
66
|
+
success: false,
|
|
67
|
+
error: 'No response generated from Gemini',
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
return {
|
|
71
|
+
success: true,
|
|
72
|
+
response: result.candidates[0].content.parts[0]?.text || '',
|
|
73
|
+
model: model,
|
|
74
|
+
usage: result.usageMetadata,
|
|
75
|
+
finish_reason: result.candidates[0].finishReason,
|
|
76
|
+
safety_ratings: result.candidates[0].safetyRatings,
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
catch (error) {
|
|
80
|
+
return {
|
|
81
|
+
success: false,
|
|
82
|
+
error: `Failed to complete chat: ${error.message}`,
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
/**
|
|
87
|
+
* Generate text with Gemini
|
|
88
|
+
*/
|
|
89
|
+
async _tool_generate(request) {
|
|
90
|
+
try {
|
|
91
|
+
const params = request.params;
|
|
92
|
+
const { model = this.defaultModel, prompt, system, ...options } = params;
|
|
93
|
+
if (!this.apiKey) {
|
|
94
|
+
return {
|
|
95
|
+
success: false,
|
|
96
|
+
error: 'Gemini API key is required',
|
|
97
|
+
};
|
|
98
|
+
}
|
|
99
|
+
if (!prompt) {
|
|
100
|
+
return {
|
|
101
|
+
success: false,
|
|
102
|
+
error: 'Prompt is required',
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
// Combine system and user prompt
|
|
106
|
+
const fullPrompt = system ? `${system}\n\n${prompt}` : prompt;
|
|
107
|
+
const generateRequest = {
|
|
108
|
+
contents: [
|
|
109
|
+
{
|
|
110
|
+
parts: [{ text: fullPrompt }],
|
|
111
|
+
},
|
|
112
|
+
],
|
|
113
|
+
generationConfig: {
|
|
114
|
+
temperature: options.temperature,
|
|
115
|
+
topK: options.topK,
|
|
116
|
+
topP: options.topP,
|
|
117
|
+
maxOutputTokens: options.maxOutputTokens,
|
|
118
|
+
stopSequences: options.stopSequences,
|
|
119
|
+
},
|
|
120
|
+
safetySettings: options.safetySettings,
|
|
121
|
+
};
|
|
122
|
+
const response = await fetch(`${this.baseUrl}/models/${model}:generateContent?key=${this.apiKey}`, {
|
|
123
|
+
method: 'POST',
|
|
124
|
+
headers: {
|
|
125
|
+
'Content-Type': 'application/json',
|
|
126
|
+
},
|
|
127
|
+
body: JSON.stringify(generateRequest),
|
|
128
|
+
});
|
|
129
|
+
if (!response.ok) {
|
|
130
|
+
const errorText = await response.text();
|
|
131
|
+
return {
|
|
132
|
+
success: false,
|
|
133
|
+
error: `Gemini API error: ${response.status} - ${errorText}`,
|
|
134
|
+
};
|
|
135
|
+
}
|
|
136
|
+
const result = (await response.json());
|
|
137
|
+
if (!result.candidates || result.candidates.length === 0) {
|
|
138
|
+
return {
|
|
139
|
+
success: false,
|
|
140
|
+
error: 'No response generated from Gemini',
|
|
141
|
+
};
|
|
142
|
+
}
|
|
143
|
+
return {
|
|
144
|
+
success: true,
|
|
145
|
+
response: result.candidates[0].content.parts[0]?.text || '',
|
|
146
|
+
model: model,
|
|
147
|
+
usage: result.usageMetadata,
|
|
148
|
+
finish_reason: result.candidates[0].finishReason,
|
|
149
|
+
safety_ratings: result.candidates[0].safetyRatings,
|
|
150
|
+
};
|
|
151
|
+
}
|
|
152
|
+
catch (error) {
|
|
153
|
+
return {
|
|
154
|
+
success: false,
|
|
155
|
+
error: `Failed to generate text: ${error.message}`,
|
|
156
|
+
};
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
/**
|
|
160
|
+
* List available models
|
|
161
|
+
*/
|
|
162
|
+
async _tool_list_models(request) {
|
|
163
|
+
try {
|
|
164
|
+
if (!this.apiKey) {
|
|
165
|
+
return {
|
|
166
|
+
success: false,
|
|
167
|
+
error: 'Gemini API key is required',
|
|
168
|
+
};
|
|
169
|
+
}
|
|
170
|
+
const response = await fetch(`${this.baseUrl}/models?key=${this.apiKey}`, {
|
|
171
|
+
method: 'GET',
|
|
172
|
+
headers: {
|
|
173
|
+
'Content-Type': 'application/json',
|
|
174
|
+
},
|
|
175
|
+
});
|
|
176
|
+
if (!response.ok) {
|
|
177
|
+
const errorText = await response.text();
|
|
178
|
+
return {
|
|
179
|
+
success: false,
|
|
180
|
+
error: `Gemini API error: ${response.status} - ${errorText}`,
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
const result = (await response.json());
|
|
184
|
+
return {
|
|
185
|
+
success: true,
|
|
186
|
+
models: result.models,
|
|
187
|
+
};
|
|
188
|
+
}
|
|
189
|
+
catch (error) {
|
|
190
|
+
return {
|
|
191
|
+
success: false,
|
|
192
|
+
error: `Failed to list models: ${error.message}`,
|
|
193
|
+
};
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
/**
|
|
197
|
+
* Get model information
|
|
198
|
+
*/
|
|
199
|
+
async _tool_model_info(request) {
|
|
200
|
+
try {
|
|
201
|
+
const params = request.params;
|
|
202
|
+
const { model = this.defaultModel } = params;
|
|
203
|
+
if (!this.apiKey) {
|
|
204
|
+
return {
|
|
205
|
+
success: false,
|
|
206
|
+
error: 'Gemini API key is required',
|
|
207
|
+
};
|
|
208
|
+
}
|
|
209
|
+
const response = await fetch(`${this.baseUrl}/models/${model}?key=${this.apiKey}`, {
|
|
210
|
+
method: 'GET',
|
|
211
|
+
headers: {
|
|
212
|
+
'Content-Type': 'application/json',
|
|
213
|
+
},
|
|
214
|
+
});
|
|
215
|
+
if (!response.ok) {
|
|
216
|
+
const errorText = await response.text();
|
|
217
|
+
return {
|
|
218
|
+
success: false,
|
|
219
|
+
error: `Gemini API error: ${response.status} - ${errorText}`,
|
|
220
|
+
};
|
|
221
|
+
}
|
|
222
|
+
const result = (await response.json());
|
|
223
|
+
return {
|
|
224
|
+
success: true,
|
|
225
|
+
model_info: result,
|
|
226
|
+
};
|
|
227
|
+
}
|
|
228
|
+
catch (error) {
|
|
229
|
+
return {
|
|
230
|
+
success: false,
|
|
231
|
+
error: `Failed to get model info: ${error.message}`,
|
|
232
|
+
};
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
/**
|
|
236
|
+
* Check Gemini API status
|
|
237
|
+
*/
|
|
238
|
+
async _tool_status(request) {
|
|
239
|
+
try {
|
|
240
|
+
if (!this.apiKey) {
|
|
241
|
+
return {
|
|
242
|
+
success: false,
|
|
243
|
+
status: 'offline',
|
|
244
|
+
error: 'Gemini API key is required',
|
|
245
|
+
};
|
|
246
|
+
}
|
|
247
|
+
const response = await fetch(`${this.baseUrl}/models?key=${this.apiKey}`, {
|
|
248
|
+
method: 'GET',
|
|
249
|
+
headers: {
|
|
250
|
+
'Content-Type': 'application/json',
|
|
251
|
+
},
|
|
252
|
+
});
|
|
253
|
+
return {
|
|
254
|
+
success: response.ok,
|
|
255
|
+
status: response.ok ? 'online' : 'offline',
|
|
256
|
+
status_code: response.status,
|
|
257
|
+
};
|
|
258
|
+
}
|
|
259
|
+
catch (error) {
|
|
260
|
+
return {
|
|
261
|
+
success: false,
|
|
262
|
+
status: 'offline',
|
|
263
|
+
error: `Connection failed: ${error.message}`,
|
|
264
|
+
};
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
}
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
export * from './ollama-intelligence.tool.js';
|
|
2
|
+
export * from './openai-intelligence.tool.js';
|
|
3
|
+
export * from './anthropic-intelligence.tool.js';
|
|
4
|
+
export * from './gemini-intelligence.tool.js';
|
|
5
|
+
export * from './intelligence.tool.js';
|
|
6
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/intelligence/index.ts"],"names":[],"mappings":"AAAA,cAAc,+BAA+B,CAAC;AAC9C,cAAc,+BAA+B,CAAC;AAC9C,cAAc,kCAAkC,CAAC;AACjD,cAAc,+BAA+B,CAAC;AAC9C,cAAc,wBAAwB,CAAC"}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { oToolConfig, oVirtualTool } from '@olane/o-tool';
|
|
2
|
+
import { oRequest } from '@olane/o-core';
|
|
3
|
+
import { ToolResult } from '@olane/o-tool';
|
|
4
|
+
export declare class IntelligenceTool extends oVirtualTool {
|
|
5
|
+
private roundRobinIndex;
|
|
6
|
+
constructor(config: oToolConfig);
|
|
7
|
+
requestMissingData(): Promise<ToolResult>;
|
|
8
|
+
chooseIntelligence(request: oRequest): Promise<ToolResult>;
|
|
9
|
+
_tool_prompt(request: oRequest): Promise<ToolResult>;
|
|
10
|
+
}
|
|
11
|
+
//# sourceMappingURL=intelligence.tool.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"intelligence.tool.d.ts","sourceRoot":"","sources":["../../../src/intelligence/intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,YAAY,EAAE,MAAM,eAAe,CAAC;AAE1D,OAAO,EAAE,QAAQ,EAAE,MAAM,eAAe,CAAC;AACzC,OAAO,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AAO3C,qBAAa,gBAAiB,SAAQ,YAAY;IAChD,OAAO,CAAC,eAAe,CAAK;gBAChB,MAAM,EAAE,WAAW;IAmDzB,kBAAkB,IAAI,OAAO,CAAC,UAAU,CAAC;IAoCzC,kBAAkB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAqC1D,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;CAiB3D"}
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
import { oVirtualTool } from '@olane/o-tool';
|
|
2
|
+
import { oAddress } from '@olane/o-core';
|
|
3
|
+
import { AnthropicIntelligenceTool } from './anthropic-intelligence.tool.js';
|
|
4
|
+
import { OpenAIIntelligenceTool } from './openai-intelligence.tool.js';
|
|
5
|
+
import { OllamaIntelligenceTool } from './ollama-intelligence.tool.js';
|
|
6
|
+
import { PerplexityIntelligenceTool } from './perplexity-intelligence.tool.js';
|
|
7
|
+
import { INTELLIGENCE_PARAMS } from './methods/intelligence.methods.js';
|
|
8
|
+
export class IntelligenceTool extends oVirtualTool {
|
|
9
|
+
constructor(config) {
|
|
10
|
+
super({
|
|
11
|
+
...config,
|
|
12
|
+
address: new oAddress('o://intelligence'),
|
|
13
|
+
methods: INTELLIGENCE_PARAMS,
|
|
14
|
+
description: config.description ||
|
|
15
|
+
'Tool to help route LLM requests to the best intelligence tool',
|
|
16
|
+
dependencies: [
|
|
17
|
+
{
|
|
18
|
+
address: 'o://setup',
|
|
19
|
+
parameters: [
|
|
20
|
+
{
|
|
21
|
+
name: 'intelligence',
|
|
22
|
+
type: 'string',
|
|
23
|
+
description: 'The intelligence tool to use',
|
|
24
|
+
},
|
|
25
|
+
],
|
|
26
|
+
},
|
|
27
|
+
],
|
|
28
|
+
});
|
|
29
|
+
this.roundRobinIndex = 0;
|
|
30
|
+
this.addChildNode(new AnthropicIntelligenceTool({
|
|
31
|
+
...config,
|
|
32
|
+
parent: null,
|
|
33
|
+
leader: null,
|
|
34
|
+
}));
|
|
35
|
+
this.addChildNode(new OpenAIIntelligenceTool({
|
|
36
|
+
...config,
|
|
37
|
+
parent: null,
|
|
38
|
+
leader: null,
|
|
39
|
+
}));
|
|
40
|
+
this.addChildNode(new OllamaIntelligenceTool({
|
|
41
|
+
...config,
|
|
42
|
+
parent: null,
|
|
43
|
+
leader: null,
|
|
44
|
+
}));
|
|
45
|
+
this.addChildNode(new PerplexityIntelligenceTool({
|
|
46
|
+
...config,
|
|
47
|
+
parent: null,
|
|
48
|
+
leader: null,
|
|
49
|
+
}));
|
|
50
|
+
}
|
|
51
|
+
async requestMissingData() {
|
|
52
|
+
// check to see if the anthropic key is provided in the ENV vars
|
|
53
|
+
if (process.env.ANTHROPIC_API_KEY) {
|
|
54
|
+
return {
|
|
55
|
+
choice: 'o://anthropic',
|
|
56
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
57
|
+
};
|
|
58
|
+
}
|
|
59
|
+
// if the anthropic key is not in the vault, ask the human
|
|
60
|
+
this.logger.info('Anthropic API key not found in vault, asking human');
|
|
61
|
+
const humanResponse = await this.use(new oAddress('o://human'), {
|
|
62
|
+
method: 'question',
|
|
63
|
+
params: {
|
|
64
|
+
question: 'Enter the anthropic api key',
|
|
65
|
+
},
|
|
66
|
+
});
|
|
67
|
+
// process the human response
|
|
68
|
+
const { answer } = humanResponse.result.data;
|
|
69
|
+
this.logger.info('Human answer: ', answer);
|
|
70
|
+
await this.use(new oAddress('o://memory'), {
|
|
71
|
+
method: 'put',
|
|
72
|
+
params: {
|
|
73
|
+
key: 'anthropic-api-key',
|
|
74
|
+
value: answer,
|
|
75
|
+
},
|
|
76
|
+
});
|
|
77
|
+
return {
|
|
78
|
+
choice: 'o://anthropic',
|
|
79
|
+
apiKey: answer,
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
async chooseIntelligence(request) {
|
|
83
|
+
// check to see if anthropic key is in vault
|
|
84
|
+
const preference = await this.use(new oAddress('o://memory'), {
|
|
85
|
+
method: 'get',
|
|
86
|
+
params: {
|
|
87
|
+
key: 'intelligence-preference',
|
|
88
|
+
},
|
|
89
|
+
});
|
|
90
|
+
const { value } = preference.result.data;
|
|
91
|
+
if (value) {
|
|
92
|
+
return {
|
|
93
|
+
choice: value,
|
|
94
|
+
apiKey: '',
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
const response = await this.use(new oAddress('o://memory'), {
|
|
98
|
+
method: 'get',
|
|
99
|
+
params: {
|
|
100
|
+
key: 'anthropic-api-key',
|
|
101
|
+
},
|
|
102
|
+
});
|
|
103
|
+
// if the anthropic key is in the vault, use it
|
|
104
|
+
if (response.result.data) {
|
|
105
|
+
const { value } = response.result.data;
|
|
106
|
+
if (value) {
|
|
107
|
+
return {
|
|
108
|
+
choice: 'o://anthropic',
|
|
109
|
+
apiKey: value,
|
|
110
|
+
};
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
const result = await this.requestMissingData();
|
|
114
|
+
return result;
|
|
115
|
+
}
|
|
116
|
+
// we cannot wrap this tool use in a plan because it is a core dependency in all planning
|
|
117
|
+
async _tool_prompt(request) {
|
|
118
|
+
const { prompt } = request.params;
|
|
119
|
+
const intelligence = await this.chooseIntelligence(request);
|
|
120
|
+
const response = await this.use(new oAddress(intelligence.choice), {
|
|
121
|
+
method: 'completion',
|
|
122
|
+
params: {
|
|
123
|
+
apiKey: intelligence.apiKey,
|
|
124
|
+
messages: [
|
|
125
|
+
{
|
|
126
|
+
role: 'user',
|
|
127
|
+
content: prompt,
|
|
128
|
+
},
|
|
129
|
+
],
|
|
130
|
+
},
|
|
131
|
+
});
|
|
132
|
+
return response.result.data;
|
|
133
|
+
}
|
|
134
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"intelligence.methods.d.ts","sourceRoot":"","sources":["../../../../src/intelligence/methods/intelligence.methods.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAE5C,eAAO,MAAM,mBAAmB,EAAE;IAAE,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAA;CAmIzD,CAAC"}
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
export const INTELLIGENCE_PARAMS = {
|
|
2
|
+
completion: {
|
|
3
|
+
name: 'completion',
|
|
4
|
+
description: 'Completion',
|
|
5
|
+
dependencies: [],
|
|
6
|
+
parameters: [
|
|
7
|
+
{
|
|
8
|
+
name: 'model',
|
|
9
|
+
type: 'string',
|
|
10
|
+
value: 'string',
|
|
11
|
+
description: 'The model to use for generation',
|
|
12
|
+
required: false,
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
name: 'messages',
|
|
16
|
+
type: 'array',
|
|
17
|
+
value: 'string[]',
|
|
18
|
+
description: 'The messages to use for generation',
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
name: 'options',
|
|
22
|
+
type: 'object',
|
|
23
|
+
value: 'object',
|
|
24
|
+
description: 'The options to use for generation',
|
|
25
|
+
required: false,
|
|
26
|
+
},
|
|
27
|
+
],
|
|
28
|
+
},
|
|
29
|
+
generate: {
|
|
30
|
+
name: 'generate',
|
|
31
|
+
description: 'Generate',
|
|
32
|
+
dependencies: [],
|
|
33
|
+
parameters: [
|
|
34
|
+
{
|
|
35
|
+
name: 'model',
|
|
36
|
+
type: 'string',
|
|
37
|
+
value: 'string',
|
|
38
|
+
description: 'The model to use for generation',
|
|
39
|
+
},
|
|
40
|
+
],
|
|
41
|
+
},
|
|
42
|
+
list_models: {
|
|
43
|
+
name: 'list_models',
|
|
44
|
+
description: 'List models',
|
|
45
|
+
dependencies: [],
|
|
46
|
+
parameters: [],
|
|
47
|
+
},
|
|
48
|
+
pull_model: {
|
|
49
|
+
name: 'pull_model',
|
|
50
|
+
description: 'Pull model',
|
|
51
|
+
dependencies: [],
|
|
52
|
+
parameters: [
|
|
53
|
+
{
|
|
54
|
+
name: 'model',
|
|
55
|
+
type: 'string',
|
|
56
|
+
value: 'string',
|
|
57
|
+
description: 'The model to pull',
|
|
58
|
+
},
|
|
59
|
+
{
|
|
60
|
+
name: 'insecure',
|
|
61
|
+
type: 'boolean',
|
|
62
|
+
value: 'boolean',
|
|
63
|
+
description: 'Whether to allow insecure connections',
|
|
64
|
+
},
|
|
65
|
+
],
|
|
66
|
+
},
|
|
67
|
+
delete_model: {
|
|
68
|
+
name: 'delete_model',
|
|
69
|
+
description: 'Delete model',
|
|
70
|
+
dependencies: [],
|
|
71
|
+
parameters: [
|
|
72
|
+
{
|
|
73
|
+
name: 'model',
|
|
74
|
+
type: 'string',
|
|
75
|
+
value: 'string',
|
|
76
|
+
description: 'The model to delete',
|
|
77
|
+
},
|
|
78
|
+
],
|
|
79
|
+
},
|
|
80
|
+
model_info: {
|
|
81
|
+
name: 'model_info',
|
|
82
|
+
description: 'Model info',
|
|
83
|
+
dependencies: [],
|
|
84
|
+
parameters: [
|
|
85
|
+
{
|
|
86
|
+
name: 'model',
|
|
87
|
+
type: 'string',
|
|
88
|
+
value: 'string',
|
|
89
|
+
description: 'The model to get info for',
|
|
90
|
+
},
|
|
91
|
+
],
|
|
92
|
+
},
|
|
93
|
+
status: {
|
|
94
|
+
name: 'status',
|
|
95
|
+
description: 'Status',
|
|
96
|
+
dependencies: [],
|
|
97
|
+
parameters: [],
|
|
98
|
+
},
|
|
99
|
+
prompt: {
|
|
100
|
+
name: 'prompt',
|
|
101
|
+
description: 'Generate a response using AI based on a prompt',
|
|
102
|
+
dependencies: [],
|
|
103
|
+
parameters: [
|
|
104
|
+
{
|
|
105
|
+
name: 'prompt',
|
|
106
|
+
type: 'string',
|
|
107
|
+
value: 'string',
|
|
108
|
+
description: 'The prompt to send to the AI model',
|
|
109
|
+
},
|
|
110
|
+
],
|
|
111
|
+
},
|
|
112
|
+
search: {
|
|
113
|
+
name: 'search',
|
|
114
|
+
description: 'Search for information using AI search capabilities',
|
|
115
|
+
dependencies: [],
|
|
116
|
+
parameters: [
|
|
117
|
+
{
|
|
118
|
+
name: 'query',
|
|
119
|
+
type: 'string',
|
|
120
|
+
value: 'string',
|
|
121
|
+
description: 'The search query to execute',
|
|
122
|
+
},
|
|
123
|
+
{
|
|
124
|
+
name: 'focus',
|
|
125
|
+
type: 'string',
|
|
126
|
+
value: 'string',
|
|
127
|
+
description: 'The focus area for the search',
|
|
128
|
+
required: false,
|
|
129
|
+
},
|
|
130
|
+
],
|
|
131
|
+
},
|
|
132
|
+
};
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import { oRequest } from '@olane/o-core';
|
|
2
|
+
import { oToolConfig, oVirtualTool, ToolResult } from '@olane/o-tool';
|
|
3
|
+
export declare class OllamaIntelligenceTool extends oVirtualTool {
|
|
4
|
+
static defaultModel: string;
|
|
5
|
+
static defaultUrl: string;
|
|
6
|
+
constructor(config: oToolConfig);
|
|
7
|
+
/**
|
|
8
|
+
* Chat completion with Ollama
|
|
9
|
+
*/
|
|
10
|
+
_tool_completion(request: oRequest): Promise<ToolResult>;
|
|
11
|
+
/**
|
|
12
|
+
* Generate text with Ollama
|
|
13
|
+
*/
|
|
14
|
+
_tool_generate(request: oRequest): Promise<ToolResult>;
|
|
15
|
+
/**
|
|
16
|
+
* List available models
|
|
17
|
+
*/
|
|
18
|
+
_tool_list_models(request: oRequest): Promise<ToolResult>;
|
|
19
|
+
/**
|
|
20
|
+
* Pull a model from Ollama library
|
|
21
|
+
*/
|
|
22
|
+
_tool_pull_model(request: oRequest): Promise<ToolResult>;
|
|
23
|
+
/**
|
|
24
|
+
* Delete a model
|
|
25
|
+
*/
|
|
26
|
+
_tool_delete_model(request: oRequest): Promise<ToolResult>;
|
|
27
|
+
/**
|
|
28
|
+
* Get model information
|
|
29
|
+
*/
|
|
30
|
+
_tool_model_info(request: oRequest): Promise<ToolResult>;
|
|
31
|
+
/**
|
|
32
|
+
* Check Ollama server status
|
|
33
|
+
*/
|
|
34
|
+
_tool_status(request: oRequest): Promise<ToolResult>;
|
|
35
|
+
}
|
|
36
|
+
//# sourceMappingURL=ollama-intelligence.tool.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ollama-intelligence.tool.d.ts","sourceRoot":"","sources":["../../../src/intelligence/ollama-intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAY,QAAQ,EAAgB,MAAM,eAAe,CAAC;AACjE,OAAO,EAAS,WAAW,EAAE,YAAY,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AA+G7E,qBAAa,sBAAuB,SAAQ,YAAY;IACtD,MAAM,CAAC,YAAY,SAAoB;IACvC,MAAM,CAAC,UAAU,SAA4B;gBAEjC,MAAM,EAAE,WAAW;IAY/B;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA4D9D;;OAEG;IACG,cAAc,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAgE5D;;OAEG;IACG,iBAAiB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAmC/D;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAiE9D;;OAEG;IACG,kBAAkB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA6ChE;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAwC9D;;OAEG;IACG,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;CAyB3D"}
|