@olane/o-intelligence 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/README.md +11 -0
  2. package/dist/src/anthropic-intelligence.tool.d.ts +224 -0
  3. package/dist/src/anthropic-intelligence.tool.d.ts.map +1 -0
  4. package/dist/src/anthropic-intelligence.tool.js +477 -0
  5. package/dist/src/enums/intelligence-storage-keys.enum.d.ts +5 -0
  6. package/dist/src/enums/intelligence-storage-keys.enum.d.ts.map +1 -0
  7. package/dist/src/enums/intelligence-storage-keys.enum.js +5 -0
  8. package/dist/src/enums/llm-providers.enum.d.ts +8 -0
  9. package/dist/src/enums/llm-providers.enum.d.ts.map +1 -0
  10. package/dist/src/enums/llm-providers.enum.js +8 -0
  11. package/dist/src/gemini-intelligence.tool.d.ts +29 -0
  12. package/dist/src/gemini-intelligence.tool.d.ts.map +1 -0
  13. package/dist/src/gemini-intelligence.tool.js +267 -0
  14. package/dist/src/grok-intelligence.tool.d.ts +13 -0
  15. package/dist/src/grok-intelligence.tool.d.ts.map +1 -0
  16. package/dist/src/grok-intelligence.tool.js +214 -0
  17. package/dist/src/index.d.ts +7 -0
  18. package/dist/src/index.d.ts.map +1 -0
  19. package/dist/src/index.js +6 -0
  20. package/dist/src/intelligence.tool.d.ts +21 -0
  21. package/dist/src/intelligence.tool.d.ts.map +1 -0
  22. package/dist/src/intelligence.tool.js +201 -0
  23. package/dist/src/methods/intelligence.methods.d.ts +5 -0
  24. package/dist/src/methods/intelligence.methods.d.ts.map +1 -0
  25. package/dist/src/methods/intelligence.methods.js +132 -0
  26. package/dist/src/ollama-intelligence.tool.d.ts +36 -0
  27. package/dist/src/ollama-intelligence.tool.d.ts.map +1 -0
  28. package/dist/src/ollama-intelligence.tool.js +312 -0
  29. package/dist/src/openai-intelligence.tool.d.ts +29 -0
  30. package/dist/src/openai-intelligence.tool.d.ts.map +1 -0
  31. package/dist/src/openai-intelligence.tool.js +261 -0
  32. package/dist/src/perplexity-intelligence.tool.d.ts +28 -0
  33. package/dist/src/perplexity-intelligence.tool.d.ts.map +1 -0
  34. package/dist/src/perplexity-intelligence.tool.js +310 -0
  35. package/dist/test/basic.spec.d.ts +1 -0
  36. package/dist/test/basic.spec.d.ts.map +1 -0
  37. package/dist/test/basic.spec.js +1 -0
  38. package/package.json +70 -0
@@ -0,0 +1,267 @@
1
+ import { oAddress } from '@olane/o-core';
2
+ import { oVirtualTool } from '@olane/o-tool';
3
+ import { INTELLIGENCE_PARAMS } from './methods/intelligence.methods.js';
4
+ export class GeminiIntelligenceTool extends oVirtualTool {
5
+ constructor(config) {
6
+ super({
7
+ ...config,
8
+ address: new oAddress('o://gemini'),
9
+ description: 'Intelligence tool using Google Gemini suite of models',
10
+ methods: INTELLIGENCE_PARAMS,
11
+ dependencies: [],
12
+ });
13
+ }
14
+ /**
15
+ * Chat completion with Gemini
16
+ */
17
+ async _tool_completion(request) {
18
+ try {
19
+ const params = request.params;
20
+ const { model = this.defaultModel, messages, ...options } = params;
21
+ if (!this.apiKey) {
22
+ return {
23
+ success: false,
24
+ error: 'Gemini API key is required',
25
+ };
26
+ }
27
+ if (!messages || !Array.isArray(messages)) {
28
+ return {
29
+ success: false,
30
+ error: '"messages" array is required',
31
+ };
32
+ }
33
+ // Convert messages to Gemini format
34
+ const contents = messages.map((msg) => ({
35
+ role: msg.role === 'assistant' ? 'model' : 'user',
36
+ parts: [{ text: msg.content }],
37
+ }));
38
+ const chatRequest = {
39
+ contents,
40
+ generationConfig: {
41
+ temperature: options.temperature,
42
+ topK: options.topK,
43
+ topP: options.topP,
44
+ maxOutputTokens: options.maxOutputTokens,
45
+ stopSequences: options.stopSequences,
46
+ },
47
+ safetySettings: options.safetySettings,
48
+ };
49
+ const response = await fetch(`${this.baseUrl}/models/${model}:generateContent?key=${this.apiKey}`, {
50
+ method: 'POST',
51
+ headers: {
52
+ 'Content-Type': 'application/json',
53
+ },
54
+ body: JSON.stringify(chatRequest),
55
+ });
56
+ if (!response.ok) {
57
+ const errorText = await response.text();
58
+ return {
59
+ success: false,
60
+ error: `Gemini API error: ${response.status} - ${errorText}`,
61
+ };
62
+ }
63
+ const result = (await response.json());
64
+ if (!result.candidates || result.candidates.length === 0) {
65
+ return {
66
+ success: false,
67
+ error: 'No response generated from Gemini',
68
+ };
69
+ }
70
+ return {
71
+ success: true,
72
+ response: result.candidates[0].content.parts[0]?.text || '',
73
+ model: model,
74
+ usage: result.usageMetadata,
75
+ finish_reason: result.candidates[0].finishReason,
76
+ safety_ratings: result.candidates[0].safetyRatings,
77
+ };
78
+ }
79
+ catch (error) {
80
+ return {
81
+ success: false,
82
+ error: `Failed to complete chat: ${error.message}`,
83
+ };
84
+ }
85
+ }
86
+ /**
87
+ * Generate text with Gemini
88
+ */
89
+ async _tool_generate(request) {
90
+ try {
91
+ const params = request.params;
92
+ const { model = this.defaultModel, prompt, system, ...options } = params;
93
+ if (!this.apiKey) {
94
+ return {
95
+ success: false,
96
+ error: 'Gemini API key is required',
97
+ };
98
+ }
99
+ if (!prompt) {
100
+ return {
101
+ success: false,
102
+ error: 'Prompt is required',
103
+ };
104
+ }
105
+ // Combine system and user prompt
106
+ const fullPrompt = system ? `${system}\n\n${prompt}` : prompt;
107
+ const generateRequest = {
108
+ contents: [
109
+ {
110
+ parts: [{ text: fullPrompt }],
111
+ },
112
+ ],
113
+ generationConfig: {
114
+ temperature: options.temperature,
115
+ topK: options.topK,
116
+ topP: options.topP,
117
+ maxOutputTokens: options.maxOutputTokens,
118
+ stopSequences: options.stopSequences,
119
+ },
120
+ safetySettings: options.safetySettings,
121
+ };
122
+ const response = await fetch(`${this.baseUrl}/models/${model}:generateContent?key=${this.apiKey}`, {
123
+ method: 'POST',
124
+ headers: {
125
+ 'Content-Type': 'application/json',
126
+ },
127
+ body: JSON.stringify(generateRequest),
128
+ });
129
+ if (!response.ok) {
130
+ const errorText = await response.text();
131
+ return {
132
+ success: false,
133
+ error: `Gemini API error: ${response.status} - ${errorText}`,
134
+ };
135
+ }
136
+ const result = (await response.json());
137
+ if (!result.candidates || result.candidates.length === 0) {
138
+ return {
139
+ success: false,
140
+ error: 'No response generated from Gemini',
141
+ };
142
+ }
143
+ return {
144
+ success: true,
145
+ response: result.candidates[0].content.parts[0]?.text || '',
146
+ model: model,
147
+ usage: result.usageMetadata,
148
+ finish_reason: result.candidates[0].finishReason,
149
+ safety_ratings: result.candidates[0].safetyRatings,
150
+ };
151
+ }
152
+ catch (error) {
153
+ return {
154
+ success: false,
155
+ error: `Failed to generate text: ${error.message}`,
156
+ };
157
+ }
158
+ }
159
+ /**
160
+ * List available models
161
+ */
162
+ async _tool_list_models(request) {
163
+ try {
164
+ if (!this.apiKey) {
165
+ return {
166
+ success: false,
167
+ error: 'Gemini API key is required',
168
+ };
169
+ }
170
+ const response = await fetch(`${this.baseUrl}/models?key=${this.apiKey}`, {
171
+ method: 'GET',
172
+ headers: {
173
+ 'Content-Type': 'application/json',
174
+ },
175
+ });
176
+ if (!response.ok) {
177
+ const errorText = await response.text();
178
+ return {
179
+ success: false,
180
+ error: `Gemini API error: ${response.status} - ${errorText}`,
181
+ };
182
+ }
183
+ const result = (await response.json());
184
+ return {
185
+ success: true,
186
+ models: result.models,
187
+ };
188
+ }
189
+ catch (error) {
190
+ return {
191
+ success: false,
192
+ error: `Failed to list models: ${error.message}`,
193
+ };
194
+ }
195
+ }
196
+ /**
197
+ * Get model information
198
+ */
199
+ async _tool_model_info(request) {
200
+ try {
201
+ const params = request.params;
202
+ const { model = this.defaultModel } = params;
203
+ if (!this.apiKey) {
204
+ return {
205
+ success: false,
206
+ error: 'Gemini API key is required',
207
+ };
208
+ }
209
+ const response = await fetch(`${this.baseUrl}/models/${model}?key=${this.apiKey}`, {
210
+ method: 'GET',
211
+ headers: {
212
+ 'Content-Type': 'application/json',
213
+ },
214
+ });
215
+ if (!response.ok) {
216
+ const errorText = await response.text();
217
+ return {
218
+ success: false,
219
+ error: `Gemini API error: ${response.status} - ${errorText}`,
220
+ };
221
+ }
222
+ const result = (await response.json());
223
+ return {
224
+ success: true,
225
+ model_info: result,
226
+ };
227
+ }
228
+ catch (error) {
229
+ return {
230
+ success: false,
231
+ error: `Failed to get model info: ${error.message}`,
232
+ };
233
+ }
234
+ }
235
+ /**
236
+ * Check Gemini API status
237
+ */
238
+ async _tool_status(request) {
239
+ try {
240
+ if (!this.apiKey) {
241
+ return {
242
+ success: false,
243
+ status: 'offline',
244
+ error: 'Gemini API key is required',
245
+ };
246
+ }
247
+ const response = await fetch(`${this.baseUrl}/models?key=${this.apiKey}`, {
248
+ method: 'GET',
249
+ headers: {
250
+ 'Content-Type': 'application/json',
251
+ },
252
+ });
253
+ return {
254
+ success: response.ok,
255
+ status: response.ok ? 'online' : 'offline',
256
+ status_code: response.status,
257
+ };
258
+ }
259
+ catch (error) {
260
+ return {
261
+ success: false,
262
+ status: 'offline',
263
+ error: `Connection failed: ${error.message}`,
264
+ };
265
+ }
266
+ }
267
+ }
@@ -0,0 +1,13 @@
1
+ import { oRequest } from '@olane/o-core';
2
+ import { oToolConfig, oVirtualTool, ToolResult } from '@olane/o-tool';
3
+ export declare class GrokIntelligenceTool extends oVirtualTool {
4
+ private baseUrl;
5
+ private defaultModel;
6
+ constructor(config: oToolConfig);
7
+ _tool_completion(request: oRequest): Promise<ToolResult>;
8
+ _tool_generate(request: oRequest): Promise<ToolResult>;
9
+ _tool_list_models(request: oRequest): Promise<ToolResult>;
10
+ _tool_model_info(request: oRequest): Promise<ToolResult>;
11
+ _tool_status(request: oRequest): Promise<ToolResult>;
12
+ }
13
+ //# sourceMappingURL=grok-intelligence.tool.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"grok-intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/grok-intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAY,QAAQ,EAAE,MAAM,eAAe,CAAC;AACnD,OAAO,EAAE,WAAW,EAAE,YAAY,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AA8CtE,qBAAa,oBAAqB,SAAQ,YAAY;IACpD,OAAO,CAAC,OAAO,CAAiC;IAChD,OAAO,CAAC,YAAY,CAAyB;gBAEjC,MAAM,EAAE,WAAW;IAUzB,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAiExD,cAAc,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAqEtD,iBAAiB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAiCzD,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAgCxD,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;CA+B3D"}
@@ -0,0 +1,214 @@
1
+ import { oAddress } from '@olane/o-core';
2
+ import { oVirtualTool } from '@olane/o-tool';
3
+ import { INTELLIGENCE_PARAMS } from './methods/intelligence.methods.js';
4
+ export class GrokIntelligenceTool extends oVirtualTool {
5
+ constructor(config) {
6
+ super({
7
+ ...config,
8
+ address: new oAddress('o://grok'),
9
+ description: 'Intelligence tool using xAI Grok suite of models',
10
+ methods: INTELLIGENCE_PARAMS,
11
+ dependencies: [],
12
+ });
13
+ this.baseUrl = 'https://api.x.ai/v1';
14
+ this.defaultModel = 'grok-3-mini';
15
+ }
16
+ async _tool_completion(request) {
17
+ try {
18
+ const params = request.params;
19
+ const { model = this.defaultModel, messages, apiKey, ...options } = params;
20
+ const key = apiKey || process.env.GROK_API_KEY;
21
+ if (!key) {
22
+ return { success: false, error: 'Grok API key is required' };
23
+ }
24
+ if (!messages || !Array.isArray(messages)) {
25
+ return { success: false, error: '"messages" array is required' };
26
+ }
27
+ const chatRequest = {
28
+ model: model,
29
+ messages: messages,
30
+ stream: false,
31
+ };
32
+ if (options.max_tokens !== undefined)
33
+ chatRequest.max_tokens = options.max_tokens;
34
+ if (options.temperature !== undefined)
35
+ chatRequest.temperature = options.temperature;
36
+ if (options.top_p !== undefined)
37
+ chatRequest.top_p = options.top_p;
38
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
39
+ method: 'POST',
40
+ headers: {
41
+ 'Content-Type': 'application/json',
42
+ Authorization: `Bearer ${key}`,
43
+ },
44
+ body: JSON.stringify(chatRequest),
45
+ });
46
+ if (!response.ok) {
47
+ const errorText = await response.text();
48
+ return {
49
+ success: false,
50
+ error: `Grok API error: ${response.status} - ${errorText}`,
51
+ };
52
+ }
53
+ const result = (await response.json());
54
+ return {
55
+ success: true,
56
+ message: result.choices?.[0]?.message?.content || '',
57
+ model: result.model,
58
+ usage: result.usage,
59
+ finish_reason: result.choices?.[0]?.finish_reason,
60
+ };
61
+ }
62
+ catch (error) {
63
+ return {
64
+ success: false,
65
+ error: `Failed to complete chat: ${error.message}`,
66
+ };
67
+ }
68
+ }
69
+ async _tool_generate(request) {
70
+ try {
71
+ const params = request.params;
72
+ const { model = this.defaultModel, prompt, system, apiKey, ...options } = params;
73
+ const key = apiKey || process.env.GROK_API_KEY;
74
+ if (!key) {
75
+ return { success: false, error: 'Grok API key is required' };
76
+ }
77
+ if (!prompt) {
78
+ return { success: false, error: 'Prompt is required' };
79
+ }
80
+ const messages = [];
81
+ if (system)
82
+ messages.push({ role: 'system', content: system });
83
+ messages.push({ role: 'user', content: prompt });
84
+ const chatRequest = {
85
+ model: model,
86
+ messages,
87
+ stream: false,
88
+ };
89
+ if (options.max_tokens !== undefined)
90
+ chatRequest.max_tokens = options.max_tokens;
91
+ if (options.temperature !== undefined)
92
+ chatRequest.temperature = options.temperature;
93
+ if (options.top_p !== undefined)
94
+ chatRequest.top_p = options.top_p;
95
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
96
+ method: 'POST',
97
+ headers: {
98
+ 'Content-Type': 'application/json',
99
+ Authorization: `Bearer ${key}`,
100
+ },
101
+ body: JSON.stringify(chatRequest),
102
+ });
103
+ if (!response.ok) {
104
+ const errorText = await response.text();
105
+ return {
106
+ success: false,
107
+ error: `Grok API error: ${response.status} - ${errorText}`,
108
+ };
109
+ }
110
+ const result = (await response.json());
111
+ return {
112
+ success: true,
113
+ response: result.choices?.[0]?.message?.content || '',
114
+ model: result.model,
115
+ usage: result.usage,
116
+ finish_reason: result.choices?.[0]?.finish_reason,
117
+ };
118
+ }
119
+ catch (error) {
120
+ return {
121
+ success: false,
122
+ error: `Failed to generate text: ${error.message}`,
123
+ };
124
+ }
125
+ }
126
+ async _tool_list_models(request) {
127
+ try {
128
+ const params = request.params;
129
+ const { apiKey } = params;
130
+ const key = apiKey || process.env.GROK_API_KEY;
131
+ if (!key) {
132
+ return { success: false, error: 'Grok API key is required' };
133
+ }
134
+ const response = await fetch(`${this.baseUrl}/models`, {
135
+ method: 'GET',
136
+ headers: { Authorization: `Bearer ${key}` },
137
+ });
138
+ if (!response.ok) {
139
+ const errorText = await response.text();
140
+ return {
141
+ success: false,
142
+ error: `Grok API error: ${response.status} - ${errorText}`,
143
+ };
144
+ }
145
+ const result = (await response.json());
146
+ return { success: true, models: result.data };
147
+ }
148
+ catch (error) {
149
+ return {
150
+ success: false,
151
+ error: `Failed to list models: ${error.message}`,
152
+ };
153
+ }
154
+ }
155
+ async _tool_model_info(request) {
156
+ try {
157
+ const params = request.params;
158
+ const { model = this.defaultModel, apiKey } = params;
159
+ const key = apiKey || process.env.GROK_API_KEY;
160
+ if (!key) {
161
+ return { success: false, error: 'Grok API key is required' };
162
+ }
163
+ const response = await fetch(`${this.baseUrl}/models/${model}`, {
164
+ method: 'GET',
165
+ headers: { Authorization: `Bearer ${key}` },
166
+ });
167
+ if (!response.ok) {
168
+ const errorText = await response.text();
169
+ return {
170
+ success: false,
171
+ error: `Grok API error: ${response.status} - ${errorText}`,
172
+ };
173
+ }
174
+ const result = (await response.json());
175
+ return { success: true, model_info: result };
176
+ }
177
+ catch (error) {
178
+ return {
179
+ success: false,
180
+ error: `Failed to get model info: ${error.message}`,
181
+ };
182
+ }
183
+ }
184
+ async _tool_status(request) {
185
+ try {
186
+ const params = request.params;
187
+ const { apiKey } = params;
188
+ const key = apiKey || process.env.GROK_API_KEY;
189
+ if (!key) {
190
+ return {
191
+ success: false,
192
+ status: 'no_api_key',
193
+ error: 'Grok API key is required',
194
+ };
195
+ }
196
+ const response = await fetch(`${this.baseUrl}/models`, {
197
+ method: 'GET',
198
+ headers: { Authorization: `Bearer ${key}` },
199
+ });
200
+ return {
201
+ success: response.ok,
202
+ status: response.ok ? 'online' : 'offline',
203
+ status_code: response.status,
204
+ };
205
+ }
206
+ catch (error) {
207
+ return {
208
+ success: false,
209
+ status: 'offline',
210
+ error: `Connection failed: ${error.message}`,
211
+ };
212
+ }
213
+ }
214
+ }
@@ -0,0 +1,7 @@
1
+ export * from './ollama-intelligence.tool.js';
2
+ export * from './openai-intelligence.tool.js';
3
+ export * from './anthropic-intelligence.tool.js';
4
+ export * from './gemini-intelligence.tool.js';
5
+ export * from './grok-intelligence.tool.js';
6
+ export * from './intelligence.tool.js';
7
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,+BAA+B,CAAC;AAC9C,cAAc,+BAA+B,CAAC;AAC9C,cAAc,kCAAkC,CAAC;AACjD,cAAc,+BAA+B,CAAC;AAC9C,cAAc,6BAA6B,CAAC;AAC5C,cAAc,wBAAwB,CAAC"}
@@ -0,0 +1,6 @@
1
+ export * from './ollama-intelligence.tool.js';
2
+ export * from './openai-intelligence.tool.js';
3
+ export * from './anthropic-intelligence.tool.js';
4
+ export * from './gemini-intelligence.tool.js';
5
+ export * from './grok-intelligence.tool.js';
6
+ export * from './intelligence.tool.js';
@@ -0,0 +1,21 @@
1
+ import { oToolConfig, oVirtualTool } from '@olane/o-tool';
2
+ import { oAddress } from '@olane/o-core';
3
+ import { oRequest } from '@olane/o-core';
4
+ import { ToolResult } from '@olane/o-tool';
5
+ import { LLMProviders } from './enums/llm-providers.enum.js';
6
+ export declare class IntelligenceTool extends oVirtualTool {
7
+ private roundRobinIndex;
8
+ constructor(config: oToolConfig);
9
+ getModelProvider(): Promise<{
10
+ provider: LLMProviders;
11
+ }>;
12
+ getProviderApiKey(provider: LLMProviders): Promise<{
13
+ apiKey: string;
14
+ }>;
15
+ chooseIntelligence(request: oRequest): Promise<{
16
+ choice: oAddress;
17
+ apiKey: string;
18
+ }>;
19
+ _tool_prompt(request: oRequest): Promise<ToolResult>;
20
+ }
21
+ //# sourceMappingURL=intelligence.tool.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,YAAY,EAAE,MAAM,eAAe,CAAC;AAC1D,OAAO,EAAE,QAAQ,EAAE,MAAM,eAAe,CAAC;AACzC,OAAO,EAAE,QAAQ,EAAE,MAAM,eAAe,CAAC;AACzC,OAAO,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AAQ3C,OAAO,EAAE,YAAY,EAAE,MAAM,+BAA+B,CAAC;AAE7D,qBAAa,gBAAiB,SAAQ,YAAY;IAChD,OAAO,CAAC,eAAe,CAAK;gBAChB,MAAM,EAAE,WAAW;IA0DzB,gBAAgB,IAAI,OAAO,CAAC;QAAE,QAAQ,EAAE,YAAY,CAAA;KAAE,CAAC;IAsDvD,iBAAiB,CAAC,QAAQ,EAAE,YAAY,GAAG,OAAO,CAAC;QAAE,MAAM,EAAE,MAAM,CAAA;KAAE,CAAC;IAuEtE,kBAAkB,CACtB,OAAO,EAAE,QAAQ,GAChB,OAAO,CAAC;QAAE,MAAM,EAAE,QAAQ,CAAC;QAAC,MAAM,EAAE,MAAM,CAAA;KAAE,CAAC;IAW1C,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;CAkB3D"}