@olane/o-tool-registry 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/apple/apple.tool.d.ts +16 -16
- package/dist/apple/apple.tool.d.ts.map +1 -1
- package/dist/apple/apple.tool.js +8 -5
- package/dist/apple/index.d.ts +1 -0
- package/dist/apple/index.js +2 -1
- package/dist/apple/notes.tool.d.ts +16 -16
- package/dist/apple/notes.tool.d.ts.map +1 -1
- package/dist/apple/notes.tool.js +36 -9
- package/dist/auth/index.js +19 -4
- package/dist/auth/interfaces/index.js +19 -4
- package/dist/auth/interfaces/oAuth-tokens.interface.js +2 -2
- package/dist/auth/interfaces/oAuth-user-info.interface.js +2 -2
- package/dist/auth/interfaces/oAuth.config.js +2 -2
- package/dist/auth/methods/auth.methods.js +4 -2
- package/dist/auth/oAuth.tool.js +23 -10
- package/dist/embeddings/embeddings.tool.d.ts +16 -16
- package/dist/embeddings/embeddings.tool.d.ts.map +1 -1
- package/dist/embeddings/embeddings.tool.js +8 -5
- package/dist/embeddings/huggingface-text-embeddings.tool.d.ts.map +1 -1
- package/dist/embeddings/huggingface-text-embeddings.tool.js +9 -6
- package/dist/embeddings/index.js +19 -4
- package/dist/embeddings/methods/text-embeddings.method.js +4 -2
- package/dist/embeddings/text-embeddings.tool.d.ts +16 -16
- package/dist/embeddings/text-embeddings.tool.d.ts.map +1 -1
- package/dist/embeddings/text-embeddings.tool.js +10 -7
- package/dist/index.js +22 -7
- package/dist/init.d.ts.map +1 -1
- package/dist/init.js +15 -12
- package/dist/intelligence/anthropic-intelligence.tool.d.ts +46 -16
- package/dist/intelligence/anthropic-intelligence.tool.d.ts.map +1 -1
- package/dist/intelligence/anthropic-intelligence.tool.js +45 -11
- package/dist/intelligence/gemini-intelligence.tool.d.ts +31 -16
- package/dist/intelligence/gemini-intelligence.tool.d.ts.map +1 -1
- package/dist/intelligence/gemini-intelligence.tool.js +31 -11
- package/dist/intelligence/index.js +21 -6
- package/dist/intelligence/intelligence.tool.d.ts +16 -16
- package/dist/intelligence/intelligence.tool.d.ts.map +1 -1
- package/dist/intelligence/intelligence.tool.js +26 -18
- package/dist/intelligence/methods/intelligence.methods.js +4 -2
- package/dist/intelligence/ollama-intelligence.tool.d.ts +37 -16
- package/dist/intelligence/ollama-intelligence.tool.d.ts.map +1 -1
- package/dist/intelligence/ollama-intelligence.tool.js +39 -11
- package/dist/intelligence/openai-intelligence.tool.d.ts +31 -16
- package/dist/intelligence/openai-intelligence.tool.d.ts.map +1 -1
- package/dist/intelligence/openai-intelligence.tool.js +32 -11
- package/dist/intelligence/perplexity-intelligence.tool.d.ts +31 -16
- package/dist/intelligence/perplexity-intelligence.tool.d.ts.map +1 -1
- package/dist/intelligence/perplexity-intelligence.tool.js +33 -11
- package/dist/mcp/index.js +18 -3
- package/dist/mcp/mcp-bridge.tool.d.ts +16 -16
- package/dist/mcp/mcp-bridge.tool.d.ts.map +1 -1
- package/dist/mcp/mcp-bridge.tool.js +36 -19
- package/dist/mcp/mcp.tool.d.ts +14 -14
- package/dist/mcp/mcp.tool.d.ts.map +1 -1
- package/dist/mcp/mcp.tool.js +15 -7
- package/dist/mcp/methods/mcp-bridge.methods.js +4 -2
- package/dist/nlp/index.js +17 -2
- package/dist/nlp/methods/nlp.methods.js +4 -2
- package/dist/nlp/ner.tool.d.ts +16 -16
- package/dist/nlp/ner.tool.d.ts.map +1 -1
- package/dist/nlp/ner.tool.js +11 -8
- package/dist/vector-store/index.js +18 -3
- package/dist/vector-store/langchain-memory.vector-store.tool.d.ts.map +1 -1
- package/dist/vector-store/langchain-memory.vector-store.tool.js +13 -10
- package/dist/vector-store/methods/vector-store.methods.js +4 -2
- package/dist/vector-store/vector-memory.tool.d.ts +16 -16
- package/dist/vector-store/vector-memory.tool.d.ts.map +1 -1
- package/dist/vector-store/vector-memory.tool.js +10 -7
- package/package.json +14 -21
- package/dist/apple/apple.tool.js.map +0 -1
- package/dist/apple/index.js.map +0 -1
- package/dist/apple/notes.tool.js.map +0 -1
- package/dist/auth/index.js.map +0 -1
- package/dist/auth/interfaces/index.js.map +0 -1
- package/dist/auth/interfaces/oAuth-tokens.interface.js.map +0 -1
- package/dist/auth/interfaces/oAuth-user-info.interface.js.map +0 -1
- package/dist/auth/interfaces/oAuth.config.js.map +0 -1
- package/dist/auth/methods/auth.methods.js.map +0 -1
- package/dist/auth/oAuth.tool.js.map +0 -1
- package/dist/embeddings/embeddings.tool.js.map +0 -1
- package/dist/embeddings/huggingface-text-embeddings.tool.js.map +0 -1
- package/dist/embeddings/index.js.map +0 -1
- package/dist/embeddings/methods/text-embeddings.method.js.map +0 -1
- package/dist/embeddings/text-embeddings.tool.js.map +0 -1
- package/dist/index.js.map +0 -1
- package/dist/init.js.map +0 -1
- package/dist/intelligence/anthropic-intelligence.tool.js.map +0 -1
- package/dist/intelligence/gemini-intelligence.tool.js.map +0 -1
- package/dist/intelligence/index.js.map +0 -1
- package/dist/intelligence/intelligence.tool.js.map +0 -1
- package/dist/intelligence/methods/intelligence.methods.js.map +0 -1
- package/dist/intelligence/ollama-intelligence.tool.js.map +0 -1
- package/dist/intelligence/openai-intelligence.tool.js.map +0 -1
- package/dist/intelligence/perplexity-intelligence.tool.js.map +0 -1
- package/dist/mcp/index.js.map +0 -1
- package/dist/mcp/mcp-bridge.tool.js.map +0 -1
- package/dist/mcp/mcp.tool.js.map +0 -1
- package/dist/mcp/methods/mcp-bridge.methods.js.map +0 -1
- package/dist/nlp/index.js.map +0 -1
- package/dist/nlp/methods/nlp.methods.js.map +0 -1
- package/dist/nlp/ner.tool.js.map +0 -1
- package/dist/tsconfig.tsbuildinfo +0 -1
- package/dist/vector-store/index.js.map +0 -1
- package/dist/vector-store/langchain-memory.vector-store.tool.js.map +0 -1
- package/dist/vector-store/methods/vector-store.methods.js.map +0 -1
- package/dist/vector-store/vector-memory.tool.js.map +0 -1
|
@@ -1,19 +1,25 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.GeminiIntelligenceTool = void 0;
|
|
4
|
+
const o_core_1 = require("@olane/o-core");
|
|
5
|
+
const o_tool_1 = require("@olane/o-tool");
|
|
6
|
+
const intelligence_methods_1 = require("./methods/intelligence.methods");
|
|
7
|
+
class GeminiIntelligenceTool extends (0, o_tool_1.oTool)(o_core_1.oVirtualNode) {
|
|
5
8
|
apiKey;
|
|
6
9
|
baseUrl;
|
|
7
10
|
defaultModel;
|
|
8
11
|
constructor(config) {
|
|
9
12
|
super({
|
|
10
13
|
...config,
|
|
11
|
-
address: new oAddress('o://gemini'),
|
|
14
|
+
address: new o_core_1.oAddress('o://gemini'),
|
|
12
15
|
description: 'Intelligence tool using Google Gemini suite of models',
|
|
13
|
-
methods: INTELLIGENCE_PARAMS,
|
|
16
|
+
methods: intelligence_methods_1.INTELLIGENCE_PARAMS,
|
|
14
17
|
dependencies: [],
|
|
15
18
|
});
|
|
16
19
|
}
|
|
20
|
+
/**
|
|
21
|
+
* Chat completion with Gemini
|
|
22
|
+
*/
|
|
17
23
|
async _tool_completion(request) {
|
|
18
24
|
try {
|
|
19
25
|
const params = request.params;
|
|
@@ -30,6 +36,7 @@ export class GeminiIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
30
36
|
error: '"messages" array is required',
|
|
31
37
|
};
|
|
32
38
|
}
|
|
39
|
+
// Convert messages to Gemini format
|
|
33
40
|
const contents = messages.map((msg) => ({
|
|
34
41
|
role: msg.role === 'assistant' ? 'model' : 'user',
|
|
35
42
|
parts: [{ text: msg.content }],
|
|
@@ -59,7 +66,7 @@ export class GeminiIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
59
66
|
error: `Gemini API error: ${response.status} - ${errorText}`,
|
|
60
67
|
};
|
|
61
68
|
}
|
|
62
|
-
const result = await response.json();
|
|
69
|
+
const result = (await response.json());
|
|
63
70
|
if (!result.candidates || result.candidates.length === 0) {
|
|
64
71
|
return {
|
|
65
72
|
success: false,
|
|
@@ -82,6 +89,9 @@ export class GeminiIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
82
89
|
};
|
|
83
90
|
}
|
|
84
91
|
}
|
|
92
|
+
/**
|
|
93
|
+
* Generate text with Gemini
|
|
94
|
+
*/
|
|
85
95
|
async _tool_generate(request) {
|
|
86
96
|
try {
|
|
87
97
|
const params = request.params;
|
|
@@ -98,6 +108,7 @@ export class GeminiIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
98
108
|
error: 'Prompt is required',
|
|
99
109
|
};
|
|
100
110
|
}
|
|
111
|
+
// Combine system and user prompt
|
|
101
112
|
const fullPrompt = system ? `${system}\n\n${prompt}` : prompt;
|
|
102
113
|
const generateRequest = {
|
|
103
114
|
contents: [
|
|
@@ -128,7 +139,7 @@ export class GeminiIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
128
139
|
error: `Gemini API error: ${response.status} - ${errorText}`,
|
|
129
140
|
};
|
|
130
141
|
}
|
|
131
|
-
const result = await response.json();
|
|
142
|
+
const result = (await response.json());
|
|
132
143
|
if (!result.candidates || result.candidates.length === 0) {
|
|
133
144
|
return {
|
|
134
145
|
success: false,
|
|
@@ -151,6 +162,9 @@ export class GeminiIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
151
162
|
};
|
|
152
163
|
}
|
|
153
164
|
}
|
|
165
|
+
/**
|
|
166
|
+
* List available models
|
|
167
|
+
*/
|
|
154
168
|
async _tool_list_models(request) {
|
|
155
169
|
try {
|
|
156
170
|
if (!this.apiKey) {
|
|
@@ -172,7 +186,7 @@ export class GeminiIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
172
186
|
error: `Gemini API error: ${response.status} - ${errorText}`,
|
|
173
187
|
};
|
|
174
188
|
}
|
|
175
|
-
const result = await response.json();
|
|
189
|
+
const result = (await response.json());
|
|
176
190
|
return {
|
|
177
191
|
success: true,
|
|
178
192
|
models: result.models,
|
|
@@ -185,6 +199,9 @@ export class GeminiIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
185
199
|
};
|
|
186
200
|
}
|
|
187
201
|
}
|
|
202
|
+
/**
|
|
203
|
+
* Get model information
|
|
204
|
+
*/
|
|
188
205
|
async _tool_model_info(request) {
|
|
189
206
|
try {
|
|
190
207
|
const params = request.params;
|
|
@@ -208,7 +225,7 @@ export class GeminiIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
208
225
|
error: `Gemini API error: ${response.status} - ${errorText}`,
|
|
209
226
|
};
|
|
210
227
|
}
|
|
211
|
-
const result = await response.json();
|
|
228
|
+
const result = (await response.json());
|
|
212
229
|
return {
|
|
213
230
|
success: true,
|
|
214
231
|
model_info: result,
|
|
@@ -221,6 +238,9 @@ export class GeminiIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
221
238
|
};
|
|
222
239
|
}
|
|
223
240
|
}
|
|
241
|
+
/**
|
|
242
|
+
* Check Gemini API status
|
|
243
|
+
*/
|
|
224
244
|
async _tool_status(request) {
|
|
225
245
|
try {
|
|
226
246
|
if (!this.apiKey) {
|
|
@@ -251,4 +271,4 @@ export class GeminiIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
251
271
|
}
|
|
252
272
|
}
|
|
253
273
|
}
|
|
254
|
-
|
|
274
|
+
exports.GeminiIntelligenceTool = GeminiIntelligenceTool;
|
|
@@ -1,6 +1,21 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
|
+
};
|
|
16
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
__exportStar(require("./ollama-intelligence.tool"), exports);
|
|
18
|
+
__exportStar(require("./openai-intelligence.tool"), exports);
|
|
19
|
+
__exportStar(require("./anthropic-intelligence.tool"), exports);
|
|
20
|
+
__exportStar(require("./gemini-intelligence.tool"), exports);
|
|
21
|
+
__exportStar(require("./intelligence.tool"), exports);
|
|
@@ -8,13 +8,13 @@ declare const IntelligenceTool_base: (new (...args: any[]) => {
|
|
|
8
8
|
initialize(): Promise<void>;
|
|
9
9
|
use(address: oAddress, data: {
|
|
10
10
|
[key: string]: unknown;
|
|
11
|
-
}, config?: import("@olane/o-core").UseOptions): Promise<oResponse>;
|
|
12
|
-
handleStream(streamData: import("@
|
|
13
|
-
execute(req: oRequest, stream?: import("@
|
|
14
|
-
run(request: oRequest, stream?: import("@
|
|
11
|
+
}, config?: import("@olane/o-core").UseOptions | undefined): Promise<oResponse>;
|
|
12
|
+
handleStream(streamData: import("@libp2p/interface", { with: { "resolution-mode": "import" } }).IncomingStreamData): Promise<void>;
|
|
13
|
+
execute(req: oRequest, stream?: import("@libp2p/interface", { with: { "resolution-mode": "import" } }).Stream | undefined): Promise<import("@olane/o-tool").RunResult>;
|
|
14
|
+
run(request: oRequest, stream?: import("@libp2p/interface", { with: { "resolution-mode": "import" } }).Stream | undefined): Promise<import("@olane/o-tool").RunResult>;
|
|
15
15
|
myTools(): string[];
|
|
16
16
|
myToolParams(tool: string): Record<string, any>;
|
|
17
|
-
callMyTool(request: oRequest, stream?: import("@
|
|
17
|
+
callMyTool(request: oRequest, stream?: import("@libp2p/interface", { with: { "resolution-mode": "import" } }).Stream | undefined): Promise<ToolResult>;
|
|
18
18
|
index(): Promise<{
|
|
19
19
|
provider: string;
|
|
20
20
|
summary: string;
|
|
@@ -27,31 +27,31 @@ declare const IntelligenceTool_base: (new (...args: any[]) => {
|
|
|
27
27
|
tools: string[];
|
|
28
28
|
description: string;
|
|
29
29
|
}>;
|
|
30
|
-
findMissingParams(methodName: string, params: any): import("@olane/o-protocol
|
|
30
|
+
findMissingParams(methodName: string, params: any): import("@olane/o-protocol").oParameter[];
|
|
31
31
|
readonly config: import("@olane/o-core").CoreConfig;
|
|
32
|
-
p2pNode: import("@
|
|
32
|
+
p2pNode: import("libp2p", { with: { "resolution-mode": "import" } }).Libp2p<import("@libp2p/interface", { with: { "resolution-mode": "import" } }).ServiceMap>;
|
|
33
33
|
logger: import("@olane/o-core").Logger;
|
|
34
|
-
networkConfig: import("@olane/o-config
|
|
34
|
+
networkConfig: import("@olane/o-config").Libp2pConfig;
|
|
35
35
|
address: oAddress;
|
|
36
36
|
readonly staticAddress: oAddress;
|
|
37
|
-
peerId: import("@
|
|
37
|
+
peerId: import("@libp2p/interface-peer-id", { with: { "resolution-mode": "import" } }).PeerId;
|
|
38
38
|
state: import("@olane/o-core").NodeState;
|
|
39
39
|
errors: Error[];
|
|
40
40
|
connectionManager: import("@olane/o-core").oConnectionManager;
|
|
41
|
-
leaders: import("@
|
|
41
|
+
leaders: import("@multiformats/multiaddr", { with: { "resolution-mode": "import" } }).Multiaddr[];
|
|
42
42
|
addressResolution: import("@olane/o-core").oAddressResolution;
|
|
43
43
|
readonly description: string;
|
|
44
44
|
dependencies: import("@olane/o-core").oDependency[];
|
|
45
45
|
methods: {
|
|
46
|
-
[key: string]: import("@olane/o-protocol
|
|
46
|
+
[key: string]: import("@olane/o-protocol").oMethod;
|
|
47
47
|
};
|
|
48
48
|
successCount: number;
|
|
49
49
|
errorCount: number;
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
getTransports(address: oAddress): import("@
|
|
50
|
+
readonly type: import("@olane/o-core").NodeType;
|
|
51
|
+
readonly transports: string[];
|
|
52
|
+
readonly parent: oAddress | null;
|
|
53
|
+
readonly parentTransports: import("@multiformats/multiaddr", { with: { "resolution-mode": "import" } }).Multiaddr[];
|
|
54
|
+
getTransports(address: oAddress): import("@multiformats/multiaddr", { with: { "resolution-mode": "import" } }).Multiaddr[];
|
|
55
55
|
handleStaticAddressTranslation(addressInput: oAddress): Promise<oAddress>;
|
|
56
56
|
translateAddress(addressWithLeaderTransports: oAddress): Promise<{
|
|
57
57
|
nextHopAddress: oAddress;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/intelligence/intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAS,WAAW,EAAE,MAAM,eAAe,CAAC;AACnD,OAAO,EAAE,QAAQ,EAAE,SAAS,EAAE,YAAY,EAAE,MAAM,eAAe,CAAC;AAClE,OAAO,EAAE,QAAQ,EAAE,MAAM,eAAe,CAAC;AACzC,OAAO,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC
|
|
1
|
+
{"version":3,"file":"intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/intelligence/intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAS,WAAW,EAAE,MAAM,eAAe,CAAC;AACnD,OAAO,EAAE,QAAQ,EAAE,SAAS,EAAE,YAAY,EAAE,MAAM,eAAe,CAAC;AAClE,OAAO,EAAE,QAAQ,EAAE,MAAM,eAAe,CAAC;AACzC,OAAO,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAM3C,qBAAa,gBAAiB,SAAQ,qBAAmB;IACvD,OAAO,CAAC,eAAe,CAAK;gBAChB,MAAM,EAAE,WAAW;IAkDzB,kBAAkB,IAAI,OAAO,CAAC,UAAU,CAAC;IA2BzC,kBAAkB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAwB1D,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;CAkB3D"}
|
|
@@ -1,15 +1,18 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.IntelligenceTool = void 0;
|
|
4
|
+
const o_tool_1 = require("@olane/o-tool");
|
|
5
|
+
const o_core_1 = require("@olane/o-core");
|
|
6
|
+
const anthropic_intelligence_tool_1 = require("./anthropic-intelligence.tool");
|
|
7
|
+
const openai_intelligence_tool_1 = require("./openai-intelligence.tool");
|
|
8
|
+
const ollama_intelligence_tool_1 = require("./ollama-intelligence.tool");
|
|
9
|
+
const perplexity_intelligence_tool_1 = require("./perplexity-intelligence.tool");
|
|
10
|
+
class IntelligenceTool extends (0, o_tool_1.oTool)(o_core_1.oVirtualNode) {
|
|
8
11
|
roundRobinIndex = 0;
|
|
9
12
|
constructor(config) {
|
|
10
13
|
super({
|
|
11
14
|
...config,
|
|
12
|
-
address: new oAddress('o://intelligence'),
|
|
15
|
+
address: new o_core_1.oAddress('o://intelligence'),
|
|
13
16
|
description: config.description ||
|
|
14
17
|
'Tool to help route LLM requests to the best intelligence tool',
|
|
15
18
|
dependencies: [
|
|
@@ -25,38 +28,40 @@ export class IntelligenceTool extends oTool(oVirtualNode) {
|
|
|
25
28
|
},
|
|
26
29
|
],
|
|
27
30
|
});
|
|
28
|
-
this.addChildNode(new AnthropicIntelligenceTool({
|
|
31
|
+
this.addChildNode(new anthropic_intelligence_tool_1.AnthropicIntelligenceTool({
|
|
29
32
|
...config,
|
|
30
33
|
parent: null,
|
|
31
34
|
leader: null,
|
|
32
35
|
}));
|
|
33
|
-
this.addChildNode(new OpenAIIntelligenceTool({
|
|
36
|
+
this.addChildNode(new openai_intelligence_tool_1.OpenAIIntelligenceTool({
|
|
34
37
|
...config,
|
|
35
38
|
parent: null,
|
|
36
39
|
leader: null,
|
|
37
40
|
}));
|
|
38
|
-
this.addChildNode(new OllamaIntelligenceTool({
|
|
41
|
+
this.addChildNode(new ollama_intelligence_tool_1.OllamaIntelligenceTool({
|
|
39
42
|
...config,
|
|
40
43
|
parent: null,
|
|
41
44
|
leader: null,
|
|
42
45
|
}));
|
|
43
|
-
this.addChildNode(new PerplexityIntelligenceTool({
|
|
46
|
+
this.addChildNode(new perplexity_intelligence_tool_1.PerplexityIntelligenceTool({
|
|
44
47
|
...config,
|
|
45
48
|
parent: null,
|
|
46
49
|
leader: null,
|
|
47
50
|
}));
|
|
48
51
|
}
|
|
49
52
|
async requestMissingData() {
|
|
53
|
+
// if the anthropic key is not in the vault, ask the human
|
|
50
54
|
this.logger.info('Anthropic API key not found in vault, asking human');
|
|
51
|
-
const humanResponse = await this.use(new oAddress('o://human'), {
|
|
55
|
+
const humanResponse = await this.use(new o_core_1.oAddress('o://human'), {
|
|
52
56
|
method: 'question',
|
|
53
57
|
params: {
|
|
54
58
|
question: 'Enter the anthropic api key',
|
|
55
59
|
},
|
|
56
60
|
});
|
|
61
|
+
// process the human response
|
|
57
62
|
const { answer } = humanResponse.result.data;
|
|
58
63
|
this.logger.info('Human answer: ', answer);
|
|
59
|
-
await this.use(new oAddress('o://memory'), {
|
|
64
|
+
await this.use(new o_core_1.oAddress('o://memory'), {
|
|
60
65
|
method: 'put',
|
|
61
66
|
params: {
|
|
62
67
|
key: 'anthropic-api-key',
|
|
@@ -69,12 +74,14 @@ export class IntelligenceTool extends oTool(oVirtualNode) {
|
|
|
69
74
|
};
|
|
70
75
|
}
|
|
71
76
|
async chooseIntelligence(request) {
|
|
72
|
-
|
|
77
|
+
// check to see if anthropic key is in vault
|
|
78
|
+
const response = await this.use(new o_core_1.oAddress('o://memory'), {
|
|
73
79
|
method: 'get',
|
|
74
80
|
params: {
|
|
75
81
|
key: 'anthropic-api-key',
|
|
76
82
|
},
|
|
77
83
|
});
|
|
84
|
+
// if the anthropic key is in the vault, use it
|
|
78
85
|
if (response.result.data) {
|
|
79
86
|
const { value } = response.result.data;
|
|
80
87
|
if (value) {
|
|
@@ -87,13 +94,14 @@ export class IntelligenceTool extends oTool(oVirtualNode) {
|
|
|
87
94
|
const result = await this.requestMissingData();
|
|
88
95
|
return result;
|
|
89
96
|
}
|
|
97
|
+
// we cannot wrap this tool use in a plan because it is a core dependency in all planning
|
|
90
98
|
async _tool_prompt(request) {
|
|
91
99
|
const { prompt } = request.params;
|
|
92
100
|
const intelligence = await this.chooseIntelligence(request);
|
|
93
|
-
const response = await this.use(new oAddress(intelligence.choice), {
|
|
101
|
+
const response = await this.use(new o_core_1.oAddress(intelligence.choice), {
|
|
94
102
|
method: 'completion',
|
|
95
103
|
params: {
|
|
96
|
-
model: 'claude-
|
|
104
|
+
model: 'claude-sonnet-4-20250514',
|
|
97
105
|
apiKey: intelligence.apiKey,
|
|
98
106
|
messages: [
|
|
99
107
|
{
|
|
@@ -106,4 +114,4 @@ export class IntelligenceTool extends oTool(oVirtualNode) {
|
|
|
106
114
|
return response.result.data;
|
|
107
115
|
}
|
|
108
116
|
}
|
|
109
|
-
|
|
117
|
+
exports.IntelligenceTool = IntelligenceTool;
|
|
@@ -1,4 +1,7 @@
|
|
|
1
|
-
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.INTELLIGENCE_PARAMS = void 0;
|
|
4
|
+
exports.INTELLIGENCE_PARAMS = {
|
|
2
5
|
completion: {
|
|
3
6
|
name: 'completion',
|
|
4
7
|
description: 'Completion',
|
|
@@ -96,4 +99,3 @@ export const INTELLIGENCE_PARAMS = {
|
|
|
96
99
|
parameters: [],
|
|
97
100
|
},
|
|
98
101
|
};
|
|
99
|
-
//# sourceMappingURL=intelligence.methods.js.map
|
|
@@ -6,13 +6,13 @@ declare const OllamaIntelligenceTool_base: (new (...args: any[]) => {
|
|
|
6
6
|
initialize(): Promise<void>;
|
|
7
7
|
use(address: oAddress, data: {
|
|
8
8
|
[key: string]: unknown;
|
|
9
|
-
}, config?: import("@olane/o-core").UseOptions): Promise<import("@olane/o-core").oResponse>;
|
|
10
|
-
handleStream(streamData: import("@
|
|
11
|
-
execute(req: oRequest, stream?: import("@
|
|
12
|
-
run(request: oRequest, stream?: import("@
|
|
9
|
+
}, config?: import("@olane/o-core").UseOptions | undefined): Promise<import("@olane/o-core").oResponse>;
|
|
10
|
+
handleStream(streamData: import("@libp2p/interface", { with: { "resolution-mode": "import" } }).IncomingStreamData): Promise<void>;
|
|
11
|
+
execute(req: oRequest, stream?: import("@libp2p/interface", { with: { "resolution-mode": "import" } }).Stream | undefined): Promise<import("@olane/o-tool").RunResult>;
|
|
12
|
+
run(request: oRequest, stream?: import("@libp2p/interface", { with: { "resolution-mode": "import" } }).Stream | undefined): Promise<import("@olane/o-tool").RunResult>;
|
|
13
13
|
myTools(): string[];
|
|
14
14
|
myToolParams(tool: string): Record<string, any>;
|
|
15
|
-
callMyTool(request: oRequest, stream?: import("@
|
|
15
|
+
callMyTool(request: oRequest, stream?: import("@libp2p/interface", { with: { "resolution-mode": "import" } }).Stream | undefined): Promise<ToolResult>;
|
|
16
16
|
index(): Promise<{
|
|
17
17
|
provider: string;
|
|
18
18
|
summary: string;
|
|
@@ -25,31 +25,31 @@ declare const OllamaIntelligenceTool_base: (new (...args: any[]) => {
|
|
|
25
25
|
tools: string[];
|
|
26
26
|
description: string;
|
|
27
27
|
}>;
|
|
28
|
-
findMissingParams(methodName: string, params: any): import("@olane/o-protocol
|
|
28
|
+
findMissingParams(methodName: string, params: any): import("@olane/o-protocol").oParameter[];
|
|
29
29
|
readonly config: import("@olane/o-core").CoreConfig;
|
|
30
|
-
p2pNode: import("@
|
|
30
|
+
p2pNode: import("libp2p", { with: { "resolution-mode": "import" } }).Libp2p<import("@libp2p/interface", { with: { "resolution-mode": "import" } }).ServiceMap>;
|
|
31
31
|
logger: import("@olane/o-core").Logger;
|
|
32
|
-
networkConfig: import("@olane/o-config
|
|
32
|
+
networkConfig: import("@olane/o-config").Libp2pConfig;
|
|
33
33
|
address: oAddress;
|
|
34
34
|
readonly staticAddress: oAddress;
|
|
35
|
-
peerId: import("@
|
|
35
|
+
peerId: import("@libp2p/interface-peer-id", { with: { "resolution-mode": "import" } }).PeerId;
|
|
36
36
|
state: import("@olane/o-core").NodeState;
|
|
37
37
|
errors: Error[];
|
|
38
38
|
connectionManager: import("@olane/o-core").oConnectionManager;
|
|
39
|
-
leaders: import("@
|
|
39
|
+
leaders: import("@multiformats/multiaddr", { with: { "resolution-mode": "import" } }).Multiaddr[];
|
|
40
40
|
addressResolution: import("@olane/o-core").oAddressResolution;
|
|
41
41
|
readonly description: string;
|
|
42
42
|
dependencies: import("@olane/o-core").oDependency[];
|
|
43
43
|
methods: {
|
|
44
|
-
[key: string]: import("@olane/o-protocol
|
|
44
|
+
[key: string]: import("@olane/o-protocol").oMethod;
|
|
45
45
|
};
|
|
46
46
|
successCount: number;
|
|
47
47
|
errorCount: number;
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
getTransports(address: oAddress): import("@
|
|
48
|
+
readonly type: import("@olane/o-core").NodeType;
|
|
49
|
+
readonly transports: string[];
|
|
50
|
+
readonly parent: oAddress | null;
|
|
51
|
+
readonly parentTransports: import("@multiformats/multiaddr", { with: { "resolution-mode": "import" } }).Multiaddr[];
|
|
52
|
+
getTransports(address: oAddress): import("@multiformats/multiaddr", { with: { "resolution-mode": "import" } }).Multiaddr[];
|
|
53
53
|
handleStaticAddressTranslation(addressInput: oAddress): Promise<oAddress>;
|
|
54
54
|
translateAddress(addressWithLeaderTransports: oAddress): Promise<{
|
|
55
55
|
nextHopAddress: oAddress;
|
|
@@ -67,12 +67,33 @@ export declare class OllamaIntelligenceTool extends OllamaIntelligenceTool_base
|
|
|
67
67
|
static defaultModel: string;
|
|
68
68
|
static defaultUrl: string;
|
|
69
69
|
constructor(config: oToolConfig);
|
|
70
|
+
/**
|
|
71
|
+
* Chat completion with Ollama
|
|
72
|
+
*/
|
|
70
73
|
_tool_completion(request: oRequest): Promise<ToolResult>;
|
|
74
|
+
/**
|
|
75
|
+
* Generate text with Ollama
|
|
76
|
+
*/
|
|
71
77
|
_tool_generate(request: oRequest): Promise<ToolResult>;
|
|
78
|
+
/**
|
|
79
|
+
* List available models
|
|
80
|
+
*/
|
|
72
81
|
_tool_list_models(request: oRequest): Promise<ToolResult>;
|
|
82
|
+
/**
|
|
83
|
+
* Pull a model from Ollama library
|
|
84
|
+
*/
|
|
73
85
|
_tool_pull_model(request: oRequest): Promise<ToolResult>;
|
|
86
|
+
/**
|
|
87
|
+
* Delete a model
|
|
88
|
+
*/
|
|
74
89
|
_tool_delete_model(request: oRequest): Promise<ToolResult>;
|
|
90
|
+
/**
|
|
91
|
+
* Get model information
|
|
92
|
+
*/
|
|
75
93
|
_tool_model_info(request: oRequest): Promise<ToolResult>;
|
|
94
|
+
/**
|
|
95
|
+
* Check Ollama server status
|
|
96
|
+
*/
|
|
76
97
|
_tool_status(request: oRequest): Promise<ToolResult>;
|
|
77
98
|
}
|
|
78
99
|
export {};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"ollama-intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/intelligence/ollama-intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,QAAQ,EAAE,QAAQ,EAAE,YAAY,EAAE,MAAM,eAAe,CAAC;AACjE,OAAO,EAAS,WAAW,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC
|
|
1
|
+
{"version":3,"file":"ollama-intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/intelligence/ollama-intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,QAAQ,EAAE,QAAQ,EAAE,YAAY,EAAE,MAAM,eAAe,CAAC;AACjE,OAAO,EAAS,WAAW,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+G/D,qBAAa,sBAAuB,SAAQ,2BAAmB;IAC7D,MAAM,CAAC,YAAY,SAAqB;IACxC,MAAM,CAAC,UAAU,SAA4B;gBAEjC,MAAM,EAAE,WAAW;IAY/B;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA4D9D;;OAEG;IACG,cAAc,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAgE5D;;OAEG;IACG,iBAAiB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAmC/D;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAiE9D;;OAEG;IACG,kBAAkB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA6ChE;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAwC9D;;OAEG;IACG,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;CAyB3D"}
|
|
@@ -1,22 +1,31 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.OllamaIntelligenceTool = void 0;
|
|
4
|
+
const o_core_1 = require("@olane/o-core");
|
|
5
|
+
const o_tool_1 = require("@olane/o-tool");
|
|
6
|
+
const intelligence_methods_1 = require("./methods/intelligence.methods");
|
|
7
|
+
class OllamaIntelligenceTool extends (0, o_tool_1.oTool)(o_core_1.oVirtualNode) {
|
|
5
8
|
static defaultModel = 'llama3.2:latest';
|
|
6
9
|
static defaultUrl = 'http://localhost:11434';
|
|
7
10
|
constructor(config) {
|
|
8
11
|
super({
|
|
9
12
|
...config,
|
|
10
|
-
address: new oAddress('o://ollama'),
|
|
13
|
+
address: new o_core_1.oAddress('o://ollama'),
|
|
11
14
|
description: 'Intelligence tool using Ollama LLM suite of models',
|
|
12
|
-
methods: INTELLIGENCE_PARAMS,
|
|
15
|
+
methods: intelligence_methods_1.INTELLIGENCE_PARAMS,
|
|
13
16
|
dependencies: [],
|
|
14
17
|
});
|
|
18
|
+
// this.baseUrl = config.ollamaUrl || 'http://localhost:11434';
|
|
19
|
+
// this.defaultModel = config.defaultModel || 'llama2';
|
|
15
20
|
}
|
|
21
|
+
/**
|
|
22
|
+
* Chat completion with Ollama
|
|
23
|
+
*/
|
|
16
24
|
async _tool_completion(request) {
|
|
17
25
|
try {
|
|
18
26
|
const params = request.params;
|
|
19
27
|
const { model = OllamaIntelligenceTool.defaultModel, messages, options = {}, } = params;
|
|
28
|
+
// let's validate the params and ask for ones that are missing
|
|
20
29
|
if (!messages || !Array.isArray(messages)) {
|
|
21
30
|
return {
|
|
22
31
|
success: false,
|
|
@@ -43,7 +52,7 @@ export class OllamaIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
43
52
|
error: `Ollama API error: ${response.status} - ${errorText}`,
|
|
44
53
|
};
|
|
45
54
|
}
|
|
46
|
-
const result = await response.json();
|
|
55
|
+
const result = (await response.json());
|
|
47
56
|
return {
|
|
48
57
|
message: result.message.content,
|
|
49
58
|
model: result.model,
|
|
@@ -59,6 +68,9 @@ export class OllamaIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
59
68
|
};
|
|
60
69
|
}
|
|
61
70
|
}
|
|
71
|
+
/**
|
|
72
|
+
* Generate text with Ollama
|
|
73
|
+
*/
|
|
62
74
|
async _tool_generate(request) {
|
|
63
75
|
try {
|
|
64
76
|
const params = request.params;
|
|
@@ -90,7 +102,7 @@ export class OllamaIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
90
102
|
error: `Ollama API error: ${response.status} - ${errorText}`,
|
|
91
103
|
};
|
|
92
104
|
}
|
|
93
|
-
const result = await response.json();
|
|
105
|
+
const result = (await response.json());
|
|
94
106
|
return {
|
|
95
107
|
success: true,
|
|
96
108
|
response: result.response,
|
|
@@ -108,6 +120,9 @@ export class OllamaIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
108
120
|
};
|
|
109
121
|
}
|
|
110
122
|
}
|
|
123
|
+
/**
|
|
124
|
+
* List available models
|
|
125
|
+
*/
|
|
111
126
|
async _tool_list_models(request) {
|
|
112
127
|
try {
|
|
113
128
|
const response = await fetch(`${OllamaIntelligenceTool.defaultUrl}/api/tags`, {
|
|
@@ -123,7 +138,7 @@ export class OllamaIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
123
138
|
error: `Ollama API error: ${response.status} - ${errorText}`,
|
|
124
139
|
};
|
|
125
140
|
}
|
|
126
|
-
const result = await response.json();
|
|
141
|
+
const result = (await response.json());
|
|
127
142
|
return {
|
|
128
143
|
success: true,
|
|
129
144
|
models: result.models,
|
|
@@ -136,6 +151,9 @@ export class OllamaIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
136
151
|
};
|
|
137
152
|
}
|
|
138
153
|
}
|
|
154
|
+
/**
|
|
155
|
+
* Pull a model from Ollama library
|
|
156
|
+
*/
|
|
139
157
|
async _tool_pull_model(request) {
|
|
140
158
|
try {
|
|
141
159
|
const params = request.params;
|
|
@@ -163,6 +181,7 @@ export class OllamaIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
163
181
|
error: `Ollama API error: ${response.status} - ${errorText}`,
|
|
164
182
|
};
|
|
165
183
|
}
|
|
184
|
+
// For pull operations, we need to handle streaming response
|
|
166
185
|
const reader = response.body?.getReader();
|
|
167
186
|
if (!reader) {
|
|
168
187
|
return {
|
|
@@ -191,6 +210,9 @@ export class OllamaIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
191
210
|
};
|
|
192
211
|
}
|
|
193
212
|
}
|
|
213
|
+
/**
|
|
214
|
+
* Delete a model
|
|
215
|
+
*/
|
|
194
216
|
async _tool_delete_model(request) {
|
|
195
217
|
try {
|
|
196
218
|
const params = request.params;
|
|
@@ -229,6 +251,9 @@ export class OllamaIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
229
251
|
};
|
|
230
252
|
}
|
|
231
253
|
}
|
|
254
|
+
/**
|
|
255
|
+
* Get model information
|
|
256
|
+
*/
|
|
232
257
|
async _tool_model_info(request) {
|
|
233
258
|
try {
|
|
234
259
|
const params = request.params;
|
|
@@ -249,7 +274,7 @@ export class OllamaIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
249
274
|
error: `Ollama API error: ${response.status} - ${errorText}`,
|
|
250
275
|
};
|
|
251
276
|
}
|
|
252
|
-
const result = await response.json();
|
|
277
|
+
const result = (await response.json());
|
|
253
278
|
return {
|
|
254
279
|
success: true,
|
|
255
280
|
model_info: result,
|
|
@@ -262,6 +287,9 @@ export class OllamaIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
262
287
|
};
|
|
263
288
|
}
|
|
264
289
|
}
|
|
290
|
+
/**
|
|
291
|
+
* Check Ollama server status
|
|
292
|
+
*/
|
|
265
293
|
async _tool_status(request) {
|
|
266
294
|
try {
|
|
267
295
|
const response = await fetch(`${OllamaIntelligenceTool.defaultUrl}/api/tags`, {
|
|
@@ -285,4 +313,4 @@ export class OllamaIntelligenceTool extends oTool(oVirtualNode) {
|
|
|
285
313
|
}
|
|
286
314
|
}
|
|
287
315
|
}
|
|
288
|
-
|
|
316
|
+
exports.OllamaIntelligenceTool = OllamaIntelligenceTool;
|