@olane/o-intelligence 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/README.md +11 -0
  2. package/dist/src/anthropic-intelligence.tool.d.ts +224 -0
  3. package/dist/src/anthropic-intelligence.tool.d.ts.map +1 -0
  4. package/dist/src/anthropic-intelligence.tool.js +477 -0
  5. package/dist/src/enums/intelligence-storage-keys.enum.d.ts +5 -0
  6. package/dist/src/enums/intelligence-storage-keys.enum.d.ts.map +1 -0
  7. package/dist/src/enums/intelligence-storage-keys.enum.js +5 -0
  8. package/dist/src/enums/llm-providers.enum.d.ts +8 -0
  9. package/dist/src/enums/llm-providers.enum.d.ts.map +1 -0
  10. package/dist/src/enums/llm-providers.enum.js +8 -0
  11. package/dist/src/gemini-intelligence.tool.d.ts +29 -0
  12. package/dist/src/gemini-intelligence.tool.d.ts.map +1 -0
  13. package/dist/src/gemini-intelligence.tool.js +267 -0
  14. package/dist/src/grok-intelligence.tool.d.ts +13 -0
  15. package/dist/src/grok-intelligence.tool.d.ts.map +1 -0
  16. package/dist/src/grok-intelligence.tool.js +214 -0
  17. package/dist/src/index.d.ts +7 -0
  18. package/dist/src/index.d.ts.map +1 -0
  19. package/dist/src/index.js +6 -0
  20. package/dist/src/intelligence.tool.d.ts +21 -0
  21. package/dist/src/intelligence.tool.d.ts.map +1 -0
  22. package/dist/src/intelligence.tool.js +201 -0
  23. package/dist/src/methods/intelligence.methods.d.ts +5 -0
  24. package/dist/src/methods/intelligence.methods.d.ts.map +1 -0
  25. package/dist/src/methods/intelligence.methods.js +132 -0
  26. package/dist/src/ollama-intelligence.tool.d.ts +36 -0
  27. package/dist/src/ollama-intelligence.tool.d.ts.map +1 -0
  28. package/dist/src/ollama-intelligence.tool.js +312 -0
  29. package/dist/src/openai-intelligence.tool.d.ts +29 -0
  30. package/dist/src/openai-intelligence.tool.d.ts.map +1 -0
  31. package/dist/src/openai-intelligence.tool.js +261 -0
  32. package/dist/src/perplexity-intelligence.tool.d.ts +28 -0
  33. package/dist/src/perplexity-intelligence.tool.d.ts.map +1 -0
  34. package/dist/src/perplexity-intelligence.tool.js +310 -0
  35. package/dist/test/basic.spec.d.ts +1 -0
  36. package/dist/test/basic.spec.d.ts.map +1 -0
  37. package/dist/test/basic.spec.js +1 -0
  38. package/package.json +70 -0
@@ -0,0 +1,201 @@
1
+ import { oVirtualTool } from '@olane/o-tool';
2
+ import { oAddress } from '@olane/o-core';
3
+ import { AnthropicIntelligenceTool } from './anthropic-intelligence.tool.js';
4
+ import { OpenAIIntelligenceTool } from './openai-intelligence.tool.js';
5
+ import { OllamaIntelligenceTool } from './ollama-intelligence.tool.js';
6
+ import { PerplexityIntelligenceTool } from './perplexity-intelligence.tool.js';
7
+ import { GrokIntelligenceTool } from './grok-intelligence.tool.js';
8
+ import { INTELLIGENCE_PARAMS } from './methods/intelligence.methods.js';
9
+ import { IntelligenceStorageKeys } from './enums/intelligence-storage-keys.enum.js';
10
+ import { LLMProviders } from './enums/llm-providers.enum.js';
11
+ export class IntelligenceTool extends oVirtualTool {
12
+ constructor(config) {
13
+ super({
14
+ ...config,
15
+ address: new oAddress('o://intelligence'),
16
+ methods: INTELLIGENCE_PARAMS,
17
+ description: config.description ||
18
+ 'Tool to help route LLM requests to the best intelligence tool',
19
+ dependencies: [
20
+ {
21
+ address: 'o://setup',
22
+ parameters: [
23
+ {
24
+ name: 'intelligence',
25
+ type: 'string',
26
+ description: 'The intelligence tool to use',
27
+ },
28
+ ],
29
+ },
30
+ ],
31
+ });
32
+ this.roundRobinIndex = 0;
33
+ this.addChildNode(new AnthropicIntelligenceTool({
34
+ ...config,
35
+ parent: null,
36
+ leader: null,
37
+ }));
38
+ this.addChildNode(new OpenAIIntelligenceTool({
39
+ ...config,
40
+ parent: null,
41
+ leader: null,
42
+ }));
43
+ this.addChildNode(new OllamaIntelligenceTool({
44
+ ...config,
45
+ parent: null,
46
+ leader: null,
47
+ }));
48
+ this.addChildNode(new PerplexityIntelligenceTool({
49
+ ...config,
50
+ parent: null,
51
+ leader: null,
52
+ }));
53
+ this.addChildNode(new GrokIntelligenceTool({
54
+ ...config,
55
+ parent: null,
56
+ leader: null,
57
+ }));
58
+ }
59
+ async getModelProvider() {
60
+ // check ENV vars for override
61
+ if (process.env.MODEL_PROVIDER_CHOICE) {
62
+ if (Object.values(LLMProviders).includes(process.env.MODEL_PROVIDER_CHOICE)) {
63
+ return {
64
+ provider: process.env.MODEL_PROVIDER_CHOICE,
65
+ };
66
+ }
67
+ throw new Error('Invalid model provider choice, please set the MODEL_PROVIDER_CHOICE environment variable to a valid model provider');
68
+ }
69
+ // check secure storage for preference
70
+ const config = await this.use(new oAddress('o://secure-storage'), {
71
+ method: 'get',
72
+ params: {
73
+ key: IntelligenceStorageKeys.MODEL_PROVIDER_PREFERENCE,
74
+ },
75
+ });
76
+ const payload = config.result.data;
77
+ if (payload && payload.value) {
78
+ const modelProvider = payload.value;
79
+ return {
80
+ provider: modelProvider,
81
+ };
82
+ }
83
+ // we need to ask the human for the model provider
84
+ this.logger.info('Asking human for model selection');
85
+ const modelResponse = await this.use(new oAddress('o://human'), {
86
+ method: 'question',
87
+ params: {
88
+ question: 'Which AI model do you want to use? (anthropic, openai, ollama, perplexity, grok)',
89
+ },
90
+ });
91
+ // process the human response
92
+ const { answer: model } = modelResponse.result.data;
93
+ await this.use(new oAddress('o://secure-storage'), {
94
+ method: 'put',
95
+ params: {
96
+ key: IntelligenceStorageKeys.MODEL_PROVIDER_PREFERENCE,
97
+ value: model,
98
+ },
99
+ });
100
+ return {
101
+ provider: model,
102
+ };
103
+ }
104
+ async getProviderApiKey(provider) {
105
+ // leverage the ENV vars first
106
+ const ENV_KEYS = [
107
+ {
108
+ key: process.env.ANTHROPIC_API_KEY,
109
+ address: 'o://anthropic',
110
+ name: 'anthropic',
111
+ },
112
+ {
113
+ key: process.env.OPENAI_API_KEY,
114
+ address: 'o://openai',
115
+ name: 'openai',
116
+ },
117
+ {
118
+ key: process.env.SONAR_API_KEY,
119
+ address: 'o://sonar',
120
+ name: 'sonar',
121
+ },
122
+ {
123
+ key: process.env.GEMINI_API_KEY,
124
+ address: 'o://gemini',
125
+ name: 'gemini',
126
+ },
127
+ {
128
+ key: process.env.GROK_API_KEY,
129
+ address: 'o://grok',
130
+ name: 'grok',
131
+ },
132
+ ];
133
+ const modelEnvConfig = ENV_KEYS.find((key) => key.name === provider);
134
+ if (modelEnvConfig && !!modelEnvConfig.key) {
135
+ return {
136
+ apiKey: modelEnvConfig.key,
137
+ };
138
+ }
139
+ // check secure storage 2nd
140
+ const config = await this.use(new oAddress('o://secure-storage'), {
141
+ method: 'get',
142
+ params: {
143
+ key: `${provider}-${IntelligenceStorageKeys.API_KEY_SUFFIX}`,
144
+ },
145
+ });
146
+ const payload = config.result.data;
147
+ if (payload && payload.value) {
148
+ const apiKey = payload.value;
149
+ return {
150
+ apiKey,
151
+ };
152
+ }
153
+ // we need to ask the human for the api key
154
+ const keyResponse = await this.use(new oAddress('o://human'), {
155
+ method: 'question',
156
+ params: {
157
+ question: `What is the API key for the ${provider} model?`,
158
+ },
159
+ });
160
+ // process the human response
161
+ const { answer: key } = keyResponse.result.data;
162
+ await this.use(new oAddress('o://secure-storage'), {
163
+ method: 'put',
164
+ params: {
165
+ key: `${provider}-${IntelligenceStorageKeys.API_KEY_SUFFIX}`,
166
+ value: key,
167
+ },
168
+ });
169
+ return {
170
+ apiKey: key,
171
+ };
172
+ }
173
+ async chooseIntelligence(request) {
174
+ // check to see if anthropic key is in vault
175
+ const { provider } = await this.getModelProvider();
176
+ const { apiKey } = await this.getProviderApiKey(provider);
177
+ return {
178
+ choice: new oAddress(`o://${provider}`),
179
+ apiKey,
180
+ };
181
+ }
182
+ // we cannot wrap this tool use in a plan because it is a core dependency in all planning
183
+ async _tool_prompt(request) {
184
+ const { prompt } = request.params;
185
+ const intelligence = await this.chooseIntelligence(request);
186
+ this.logger.debug('Using AI provider: ', intelligence.choice);
187
+ const response = await this.use(intelligence.choice, {
188
+ method: 'completion',
189
+ params: {
190
+ apiKey: intelligence.apiKey,
191
+ messages: [
192
+ {
193
+ role: 'user',
194
+ content: prompt,
195
+ },
196
+ ],
197
+ },
198
+ });
199
+ return response.result.data;
200
+ }
201
+ }
@@ -0,0 +1,5 @@
1
+ import { oMethod } from '@olane/o-protocol';
2
+ export declare const INTELLIGENCE_PARAMS: {
3
+ [key: string]: oMethod;
4
+ };
5
+ //# sourceMappingURL=intelligence.methods.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"intelligence.methods.d.ts","sourceRoot":"","sources":["../../../src/methods/intelligence.methods.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAE5C,eAAO,MAAM,mBAAmB,EAAE;IAAE,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAA;CAmIzD,CAAC"}
@@ -0,0 +1,132 @@
1
+ export const INTELLIGENCE_PARAMS = {
2
+ completion: {
3
+ name: 'completion',
4
+ description: 'Completion',
5
+ dependencies: [],
6
+ parameters: [
7
+ {
8
+ name: 'model',
9
+ type: 'string',
10
+ value: 'string',
11
+ description: 'The model to use for generation',
12
+ required: false,
13
+ },
14
+ {
15
+ name: 'messages',
16
+ type: 'array',
17
+ value: 'string[]',
18
+ description: 'The messages to use for generation',
19
+ },
20
+ {
21
+ name: 'options',
22
+ type: 'object',
23
+ value: 'object',
24
+ description: 'The options to use for generation',
25
+ required: false,
26
+ },
27
+ ],
28
+ },
29
+ generate: {
30
+ name: 'generate',
31
+ description: 'Generate',
32
+ dependencies: [],
33
+ parameters: [
34
+ {
35
+ name: 'model',
36
+ type: 'string',
37
+ value: 'string',
38
+ description: 'The model to use for generation',
39
+ },
40
+ ],
41
+ },
42
+ list_models: {
43
+ name: 'list_models',
44
+ description: 'List models',
45
+ dependencies: [],
46
+ parameters: [],
47
+ },
48
+ pull_model: {
49
+ name: 'pull_model',
50
+ description: 'Pull model',
51
+ dependencies: [],
52
+ parameters: [
53
+ {
54
+ name: 'model',
55
+ type: 'string',
56
+ value: 'string',
57
+ description: 'The model to pull',
58
+ },
59
+ {
60
+ name: 'insecure',
61
+ type: 'boolean',
62
+ value: 'boolean',
63
+ description: 'Whether to allow insecure connections',
64
+ },
65
+ ],
66
+ },
67
+ delete_model: {
68
+ name: 'delete_model',
69
+ description: 'Delete model',
70
+ dependencies: [],
71
+ parameters: [
72
+ {
73
+ name: 'model',
74
+ type: 'string',
75
+ value: 'string',
76
+ description: 'The model to delete',
77
+ },
78
+ ],
79
+ },
80
+ model_info: {
81
+ name: 'model_info',
82
+ description: 'Model info',
83
+ dependencies: [],
84
+ parameters: [
85
+ {
86
+ name: 'model',
87
+ type: 'string',
88
+ value: 'string',
89
+ description: 'The model to get info for',
90
+ },
91
+ ],
92
+ },
93
+ status: {
94
+ name: 'status',
95
+ description: 'Status',
96
+ dependencies: [],
97
+ parameters: [],
98
+ },
99
+ prompt: {
100
+ name: 'prompt',
101
+ description: 'Generate a response using AI based on a prompt',
102
+ dependencies: [],
103
+ parameters: [
104
+ {
105
+ name: 'prompt',
106
+ type: 'string',
107
+ value: 'string',
108
+ description: 'The prompt to send to the AI model',
109
+ },
110
+ ],
111
+ },
112
+ search: {
113
+ name: 'search',
114
+ description: 'Search for information using AI search capabilities',
115
+ dependencies: [],
116
+ parameters: [
117
+ {
118
+ name: 'query',
119
+ type: 'string',
120
+ value: 'string',
121
+ description: 'The search query to execute',
122
+ },
123
+ {
124
+ name: 'focus',
125
+ type: 'string',
126
+ value: 'string',
127
+ description: 'The focus area for the search',
128
+ required: false,
129
+ },
130
+ ],
131
+ },
132
+ };
@@ -0,0 +1,36 @@
1
+ import { oRequest } from '@olane/o-core';
2
+ import { oToolConfig, oVirtualTool, ToolResult } from '@olane/o-tool';
3
+ export declare class OllamaIntelligenceTool extends oVirtualTool {
4
+ static defaultModel: string;
5
+ static defaultUrl: string;
6
+ constructor(config: oToolConfig);
7
+ /**
8
+ * Chat completion with Ollama
9
+ */
10
+ _tool_completion(request: oRequest): Promise<ToolResult>;
11
+ /**
12
+ * Generate text with Ollama
13
+ */
14
+ _tool_generate(request: oRequest): Promise<ToolResult>;
15
+ /**
16
+ * List available models
17
+ */
18
+ _tool_list_models(request: oRequest): Promise<ToolResult>;
19
+ /**
20
+ * Pull a model from Ollama library
21
+ */
22
+ _tool_pull_model(request: oRequest): Promise<ToolResult>;
23
+ /**
24
+ * Delete a model
25
+ */
26
+ _tool_delete_model(request: oRequest): Promise<ToolResult>;
27
+ /**
28
+ * Get model information
29
+ */
30
+ _tool_model_info(request: oRequest): Promise<ToolResult>;
31
+ /**
32
+ * Check Ollama server status
33
+ */
34
+ _tool_status(request: oRequest): Promise<ToolResult>;
35
+ }
36
+ //# sourceMappingURL=ollama-intelligence.tool.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ollama-intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/ollama-intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAY,QAAQ,EAAgB,MAAM,eAAe,CAAC;AACjE,OAAO,EAAS,WAAW,EAAE,YAAY,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AA+G7E,qBAAa,sBAAuB,SAAQ,YAAY;IACtD,MAAM,CAAC,YAAY,SAAqB;IACxC,MAAM,CAAC,UAAU,SAA4B;gBAEjC,MAAM,EAAE,WAAW;IAY/B;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA4D9D;;OAEG;IACG,cAAc,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAgE5D;;OAEG;IACG,iBAAiB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAmC/D;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAiE9D;;OAEG;IACG,kBAAkB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA6ChE;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAwC9D;;OAEG;IACG,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;CAyB3D"}
@@ -0,0 +1,312 @@
1
+ import { oAddress } from '@olane/o-core';
2
+ import { oVirtualTool } from '@olane/o-tool';
3
+ import { INTELLIGENCE_PARAMS } from './methods/intelligence.methods.js';
4
+ export class OllamaIntelligenceTool extends oVirtualTool {
5
+ constructor(config) {
6
+ super({
7
+ ...config,
8
+ address: new oAddress('o://ollama'),
9
+ description: 'Intelligence tool using Ollama LLM suite of models',
10
+ methods: INTELLIGENCE_PARAMS,
11
+ dependencies: [],
12
+ });
13
+ // this.baseUrl = config.ollamaUrl || 'http://localhost:11434';
14
+ // this.defaultModel = config.defaultModel || 'llama2';
15
+ }
16
+ /**
17
+ * Chat completion with Ollama
18
+ */
19
+ async _tool_completion(request) {
20
+ try {
21
+ const params = request.params;
22
+ const { model = OllamaIntelligenceTool.defaultModel, messages, options = {}, } = params;
23
+ // let's validate the params and ask for ones that are missing
24
+ if (!messages || !Array.isArray(messages)) {
25
+ return {
26
+ success: false,
27
+ error: '"messages" array is required',
28
+ };
29
+ }
30
+ const chatRequest = {
31
+ model: model,
32
+ messages: messages,
33
+ stream: false,
34
+ options: options,
35
+ };
36
+ const response = await fetch(`${OllamaIntelligenceTool.defaultUrl}/api/chat`, {
37
+ method: 'POST',
38
+ headers: {
39
+ 'Content-Type': 'application/json',
40
+ },
41
+ body: JSON.stringify(chatRequest),
42
+ });
43
+ if (!response.ok) {
44
+ const errorText = await response.text();
45
+ return {
46
+ success: false,
47
+ error: `Ollama API error: ${response.status} - ${errorText}`,
48
+ };
49
+ }
50
+ const result = (await response.json());
51
+ return {
52
+ message: result.message.content,
53
+ model: result.model,
54
+ total_duration: result.total_duration,
55
+ eval_count: result.eval_count,
56
+ eval_duration: result.eval_duration,
57
+ };
58
+ }
59
+ catch (error) {
60
+ return {
61
+ success: false,
62
+ error: `Failed to complete chat: ${error.message}`,
63
+ };
64
+ }
65
+ }
66
+ /**
67
+ * Generate text with Ollama
68
+ */
69
+ async _tool_generate(request) {
70
+ try {
71
+ const params = request.params;
72
+ const { model = OllamaIntelligenceTool.defaultModel, prompt, system, options = {}, } = params;
73
+ if (!prompt) {
74
+ return {
75
+ success: false,
76
+ error: 'Prompt is required',
77
+ };
78
+ }
79
+ const generateRequest = {
80
+ model: model,
81
+ prompt: prompt,
82
+ system: system,
83
+ stream: false,
84
+ options: options,
85
+ };
86
+ const response = await fetch(`${OllamaIntelligenceTool.defaultUrl}/api/generate`, {
87
+ method: 'POST',
88
+ headers: {
89
+ 'Content-Type': 'application/json',
90
+ },
91
+ body: JSON.stringify(generateRequest),
92
+ });
93
+ if (!response.ok) {
94
+ const errorText = await response.text();
95
+ return {
96
+ success: false,
97
+ error: `Ollama API error: ${response.status} - ${errorText}`,
98
+ };
99
+ }
100
+ const result = (await response.json());
101
+ return {
102
+ success: true,
103
+ response: result.response,
104
+ model: result.model,
105
+ done: result.done,
106
+ total_duration: result.total_duration,
107
+ eval_count: result.eval_count,
108
+ eval_duration: result.eval_duration,
109
+ };
110
+ }
111
+ catch (error) {
112
+ return {
113
+ success: false,
114
+ error: `Failed to generate text: ${error.message}`,
115
+ };
116
+ }
117
+ }
118
+ /**
119
+ * List available models
120
+ */
121
+ async _tool_list_models(request) {
122
+ try {
123
+ const response = await fetch(`${OllamaIntelligenceTool.defaultUrl}/api/tags`, {
124
+ method: 'GET',
125
+ headers: {
126
+ 'Content-Type': 'application/json',
127
+ },
128
+ });
129
+ if (!response.ok) {
130
+ const errorText = await response.text();
131
+ return {
132
+ success: false,
133
+ error: `Ollama API error: ${response.status} - ${errorText}`,
134
+ };
135
+ }
136
+ const result = (await response.json());
137
+ return {
138
+ success: true,
139
+ models: result.models,
140
+ };
141
+ }
142
+ catch (error) {
143
+ return {
144
+ success: false,
145
+ error: `Failed to list models: ${error.message}`,
146
+ };
147
+ }
148
+ }
149
+ /**
150
+ * Pull a model from Ollama library
151
+ */
152
+ async _tool_pull_model(request) {
153
+ try {
154
+ const params = request.params;
155
+ const { model, insecure = false } = params;
156
+ if (!model) {
157
+ return {
158
+ success: false,
159
+ error: 'Model name is required',
160
+ };
161
+ }
162
+ const response = await fetch(`${OllamaIntelligenceTool.defaultUrl}/api/pull`, {
163
+ method: 'POST',
164
+ headers: {
165
+ 'Content-Type': 'application/json',
166
+ },
167
+ body: JSON.stringify({
168
+ name: model,
169
+ insecure: insecure,
170
+ }),
171
+ });
172
+ if (!response.ok) {
173
+ const errorText = await response.text();
174
+ return {
175
+ success: false,
176
+ error: `Ollama API error: ${response.status} - ${errorText}`,
177
+ };
178
+ }
179
+ // For pull operations, we need to handle streaming response
180
+ const reader = response.body?.getReader();
181
+ if (!reader) {
182
+ return {
183
+ success: false,
184
+ error: 'Failed to read response stream',
185
+ };
186
+ }
187
+ let result = '';
188
+ while (true) {
189
+ const { done, value } = await reader.read();
190
+ if (done)
191
+ break;
192
+ const chunk = new TextDecoder().decode(value);
193
+ result += chunk;
194
+ }
195
+ return {
196
+ success: true,
197
+ message: `Model ${model} pulled successfully`,
198
+ details: result,
199
+ };
200
+ }
201
+ catch (error) {
202
+ return {
203
+ success: false,
204
+ error: `Failed to pull model: ${error.message}`,
205
+ };
206
+ }
207
+ }
208
+ /**
209
+ * Delete a model
210
+ */
211
+ async _tool_delete_model(request) {
212
+ try {
213
+ const params = request.params;
214
+ const { model } = params;
215
+ if (!model) {
216
+ return {
217
+ success: false,
218
+ error: 'Model name is required',
219
+ };
220
+ }
221
+ const response = await fetch(`${OllamaIntelligenceTool.defaultUrl}/api/delete`, {
222
+ method: 'DELETE',
223
+ headers: {
224
+ 'Content-Type': 'application/json',
225
+ },
226
+ body: JSON.stringify({
227
+ name: model,
228
+ }),
229
+ });
230
+ if (!response.ok) {
231
+ const errorText = await response.text();
232
+ return {
233
+ success: false,
234
+ error: `Ollama API error: ${response.status} - ${errorText}`,
235
+ };
236
+ }
237
+ return {
238
+ success: true,
239
+ message: `Model ${model} deleted successfully`,
240
+ };
241
+ }
242
+ catch (error) {
243
+ return {
244
+ success: false,
245
+ error: `Failed to delete model: ${error.message}`,
246
+ };
247
+ }
248
+ }
249
+ /**
250
+ * Get model information
251
+ */
252
+ async _tool_model_info(request) {
253
+ try {
254
+ const params = request.params;
255
+ const { model = OllamaIntelligenceTool.defaultModel } = params;
256
+ const response = await fetch(`${OllamaIntelligenceTool.defaultUrl}/api/show`, {
257
+ method: 'POST',
258
+ headers: {
259
+ 'Content-Type': 'application/json',
260
+ },
261
+ body: JSON.stringify({
262
+ name: model,
263
+ }),
264
+ });
265
+ if (!response.ok) {
266
+ const errorText = await response.text();
267
+ return {
268
+ success: false,
269
+ error: `Ollama API error: ${response.status} - ${errorText}`,
270
+ };
271
+ }
272
+ const result = (await response.json());
273
+ return {
274
+ success: true,
275
+ model_info: result,
276
+ };
277
+ }
278
+ catch (error) {
279
+ return {
280
+ success: false,
281
+ error: `Failed to get model info: ${error.message}`,
282
+ };
283
+ }
284
+ }
285
+ /**
286
+ * Check Ollama server status
287
+ */
288
+ async _tool_status(request) {
289
+ try {
290
+ const response = await fetch(`${OllamaIntelligenceTool.defaultUrl}/api/tags`, {
291
+ method: 'GET',
292
+ headers: {
293
+ 'Content-Type': 'application/json',
294
+ },
295
+ });
296
+ return {
297
+ success: response.ok,
298
+ status: response.ok ? 'online' : 'offline',
299
+ status_code: response.status,
300
+ };
301
+ }
302
+ catch (error) {
303
+ return {
304
+ success: false,
305
+ status: 'offline',
306
+ error: `Connection failed: ${error.message}`,
307
+ };
308
+ }
309
+ }
310
+ }
311
+ OllamaIntelligenceTool.defaultModel = 'llama3.2:latest';
312
+ OllamaIntelligenceTool.defaultUrl = 'http://localhost:11434';