@olane/o-intelligence 0.7.1 → 0.7.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md
CHANGED
|
@@ -1,11 +1,902 @@
|
|
|
1
1
|
# o-intelligence
|
|
2
|
-
Important configuration options below:
|
|
3
2
|
|
|
3
|
+
Multi-provider AI intelligence router for Olane OS that provides a unified interface to interact with LLM providers including Anthropic, OpenAI, Ollama, Perplexity, and Grok.
|
|
4
|
+
|
|
5
|
+
## Quick Start
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
# Installation
|
|
9
|
+
npm install @olane/o-intelligence
|
|
10
|
+
```
|
|
11
|
+
|
|
12
|
+
```typescript
|
|
13
|
+
// Basic usage with automatic provider selection
|
|
14
|
+
import { IntelligenceTool } from '@olane/o-intelligence';
|
|
15
|
+
import { oAddress } from '@olane/o-core';
|
|
16
|
+
|
|
17
|
+
const intelligence = new IntelligenceTool({
|
|
18
|
+
address: new oAddress('o://intelligence')
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
await intelligence.start();
|
|
22
|
+
|
|
23
|
+
// Send a prompt (automatically routes to configured provider)
|
|
24
|
+
const response = await intelligence.use(new oAddress('o://intelligence'), {
|
|
25
|
+
method: 'prompt',
|
|
26
|
+
params: {
|
|
27
|
+
prompt: 'Explain quantum computing in simple terms'
|
|
28
|
+
}
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
console.log(response.result.data);
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## How It Works {#how-it-works}
|
|
35
|
+
|
|
36
|
+
`o-intelligence` is a **complex node** that acts as a smart router for LLM requests. It:
|
|
37
|
+
|
|
38
|
+
1. **Manages multiple AI providers** as child nodes (Anthropic, OpenAI, Ollama, Perplexity, Grok)
|
|
39
|
+
2. **Handles provider selection** via configuration, environment variables, or interactive prompts
|
|
40
|
+
3. **Securely stores API keys** using `o://secure` for credential management
|
|
41
|
+
4. **Routes requests** to the appropriate provider based on configuration
|
|
42
|
+
5. **Provides unified interface** - one API for all providers
|
|
43
|
+
|
|
44
|
+
```
|
|
45
|
+
┌─────────────────────────────────────────────────┐
|
|
46
|
+
│ o://intelligence (Router/Coordinator) │
|
|
47
|
+
│ • Manages provider selection │
|
|
48
|
+
│ • Handles API key retrieval │
|
|
49
|
+
│ • Routes requests to child nodes │
|
|
50
|
+
└─────────────────────────────────────────────────┘
|
|
51
|
+
⬇ routes to
|
|
52
|
+
┌───────────┬───────────┬───────────┬────────────┬──────────┐
|
|
53
|
+
⬇ ⬇ ⬇ ⬇ ⬇ ⬇
|
|
54
|
+
┌─────────┐ ┌─────────┐ ┌─────────┐ ┌──────────┐ ┌──────┐ ┌────────┐
|
|
55
|
+
│anthropic│ │ openai │ │ ollama │ │perplexity│ │ grok │ │ gemini │
|
|
56
|
+
└─────────┘ └─────────┘ └─────────┘ └──────────┘ └──────┘ └────────┘
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
## Tools {#tools}
|
|
60
|
+
|
|
61
|
+
### Router Tools (o://intelligence)
|
|
62
|
+
|
|
63
|
+
#### `prompt` - Simple AI Prompting
|
|
64
|
+
|
|
65
|
+
Send a single prompt and get a response. The router automatically selects the configured provider.
|
|
66
|
+
|
|
67
|
+
**Parameters:**
|
|
68
|
+
- `prompt` (string, required): The prompt to send to the AI model
|
|
69
|
+
|
|
70
|
+
**Returns:** LLM response with message text
|
|
71
|
+
|
|
72
|
+
**Example:**
|
|
73
|
+
```typescript
|
|
74
|
+
const result = await intelligence.use(new oAddress('o://intelligence'), {
|
|
75
|
+
method: 'prompt',
|
|
76
|
+
params: {
|
|
77
|
+
prompt: 'Write a haiku about coding'
|
|
78
|
+
}
|
|
79
|
+
});
|
|
80
|
+
|
|
81
|
+
console.log(result.result.data.message);
|
|
82
|
+
// Outputs the generated haiku
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
#### `configure` - Set Provider Preferences
|
|
86
|
+
|
|
87
|
+
Configure which AI provider to use and store preferences securely.
|
|
88
|
+
|
|
89
|
+
**Parameters:**
|
|
90
|
+
- `modelProvider` (string, optional): Provider to use (`anthropic`, `openai`, `ollama`, `perplexity`, `grok`)
|
|
91
|
+
- `hostingProvider` (string, optional): Where models are hosted (`olane`, `local`)
|
|
92
|
+
- `accessToken` (string, optional): Access token for hosted models
|
|
93
|
+
- `address` (string, optional): Custom address for hosted models
|
|
94
|
+
|
|
95
|
+
**Returns:** Success confirmation
|
|
96
|
+
|
|
97
|
+
**Example:**
|
|
98
|
+
```typescript
|
|
99
|
+
await intelligence.use(new oAddress('o://intelligence'), {
|
|
100
|
+
method: 'configure',
|
|
101
|
+
params: {
|
|
102
|
+
modelProvider: 'anthropic',
|
|
103
|
+
hostingProvider: 'local'
|
|
104
|
+
}
|
|
105
|
+
});
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
### Provider Tools (Child Nodes)
|
|
109
|
+
|
|
110
|
+
Each provider node (`o://anthropic`, `o://openai`, etc.) exposes these tools:
|
|
111
|
+
|
|
112
|
+
#### `completion` - Multi-Turn Conversation
|
|
113
|
+
|
|
114
|
+
Generate responses with conversation history and system prompts.
|
|
115
|
+
|
|
116
|
+
**Parameters:**
|
|
117
|
+
- `messages` (array, required): Conversation history
|
|
118
|
+
- `role` (string): `user` or `assistant`
|
|
119
|
+
- `content` (string | array): Message content
|
|
120
|
+
- `model` (string, optional): Specific model to use
|
|
121
|
+
- `system` (string, optional): System message for behavior control
|
|
122
|
+
- `max_tokens` (number, optional): Maximum tokens to generate (default: 1000)
|
|
123
|
+
- `temperature` (number, optional): Randomness control (0-1)
|
|
124
|
+
- `top_p` (number, optional): Nucleus sampling (0-1)
|
|
125
|
+
- `apiKey` (string, optional): Override API key
|
|
126
|
+
|
|
127
|
+
**Returns:** Response with message, model info, and token usage
|
|
128
|
+
|
|
129
|
+
**Example:**
|
|
130
|
+
```typescript
|
|
131
|
+
const response = await intelligence.use(new oAddress('o://anthropic'), {
|
|
132
|
+
method: 'completion',
|
|
133
|
+
params: {
|
|
134
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
135
|
+
messages: [
|
|
136
|
+
{ role: 'user', content: 'What is TypeScript?' },
|
|
137
|
+
{ role: 'assistant', content: 'TypeScript is a typed superset of JavaScript...' },
|
|
138
|
+
{ role: 'user', content: 'Can you give me an example?' }
|
|
139
|
+
],
|
|
140
|
+
system: 'You are a helpful programming tutor',
|
|
141
|
+
max_tokens: 500
|
|
142
|
+
}
|
|
143
|
+
});
|
|
144
|
+
|
|
145
|
+
console.log(response.result.data);
|
|
146
|
+
// {
|
|
147
|
+
// message: 'Here\'s a TypeScript example...',
|
|
148
|
+
// model: 'claude-sonnet-4-5-20250929',
|
|
149
|
+
// usage: { input_tokens: 45, output_tokens: 120 }
|
|
150
|
+
// }
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
#### `generate` - Simple Text Generation
|
|
154
|
+
|
|
155
|
+
Generate text from a single prompt (simpler than completion).
|
|
156
|
+
|
|
157
|
+
**Parameters:**
|
|
158
|
+
- `prompt` (string, required): Input prompt
|
|
159
|
+
- `model` (string, optional): Model to use
|
|
160
|
+
- `system` (string, optional): System message
|
|
161
|
+
- `max_tokens` (number, optional): Token limit
|
|
162
|
+
- `temperature` (number, optional): Randomness
|
|
163
|
+
- `apiKey` (string, optional): Override API key
|
|
164
|
+
|
|
165
|
+
**Returns:** Generated text response
|
|
166
|
+
|
|
167
|
+
**Example:**
|
|
168
|
+
```typescript
|
|
169
|
+
const result = await intelligence.use(new oAddress('o://openai'), {
|
|
170
|
+
method: 'generate',
|
|
171
|
+
params: {
|
|
172
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
173
|
+
prompt: 'Explain REST APIs in one paragraph',
|
|
174
|
+
max_tokens: 200
|
|
175
|
+
}
|
|
176
|
+
});
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
#### `list_models` - List Available Models
|
|
180
|
+
|
|
181
|
+
Get a list of all available models from the provider.
|
|
182
|
+
|
|
183
|
+
**Parameters:**
|
|
184
|
+
- `apiKey` (string, optional): Override API key
|
|
185
|
+
|
|
186
|
+
**Returns:** Array of model objects with details
|
|
187
|
+
|
|
188
|
+
**Example:**
|
|
189
|
+
```typescript
|
|
190
|
+
const models = await intelligence.use(new oAddress('o://anthropic'), {
|
|
191
|
+
method: 'list_models',
|
|
192
|
+
params: {
|
|
193
|
+
apiKey: process.env.ANTHROPIC_API_KEY
|
|
194
|
+
}
|
|
195
|
+
});
|
|
196
|
+
|
|
197
|
+
console.log(models.result.data.models);
|
|
198
|
+
// [
|
|
199
|
+
// { id: 'claude-sonnet-4-5-20250929', name: 'Claude 3.5 Sonnet', ... },
|
|
200
|
+
// { id: 'claude-opus-3-20240229', name: 'Claude 3 Opus', ... }
|
|
201
|
+
// ]
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
#### `model_info` - Get Model Details
|
|
205
|
+
|
|
206
|
+
Retrieve detailed information about a specific model.
|
|
207
|
+
|
|
208
|
+
**Parameters:**
|
|
209
|
+
- `model` (string, optional): Model ID (defaults to provider default)
|
|
210
|
+
- `apiKey` (string, optional): Override API key
|
|
211
|
+
|
|
212
|
+
**Returns:** Model details including context length, pricing, description
|
|
213
|
+
|
|
214
|
+
**Example:**
|
|
215
|
+
```typescript
|
|
216
|
+
const info = await intelligence.use(new oAddress('o://anthropic'), {
|
|
217
|
+
method: 'model_info',
|
|
218
|
+
params: {
|
|
219
|
+
model: 'claude-sonnet-4-5-20250929',
|
|
220
|
+
apiKey: process.env.ANTHROPIC_API_KEY
|
|
221
|
+
}
|
|
222
|
+
});
|
|
223
|
+
|
|
224
|
+
console.log(info.result.data.model);
|
|
225
|
+
// {
|
|
226
|
+
// id: 'claude-sonnet-4-5-20250929',
|
|
227
|
+
// display_name: 'Claude 3.5 Sonnet',
|
|
228
|
+
// context_length: 200000,
|
|
229
|
+
// pricing: { prompt: '$3/MTok', completion: '$15/MTok' }
|
|
230
|
+
// }
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
#### `status` - Check Provider Health
|
|
234
|
+
|
|
235
|
+
Verify that the provider API is accessible and working.
|
|
236
|
+
|
|
237
|
+
**Parameters:**
|
|
238
|
+
- `apiKey` (string, optional): Override API key
|
|
239
|
+
|
|
240
|
+
**Returns:** Status information
|
|
241
|
+
|
|
242
|
+
**Example:**
|
|
243
|
+
```typescript
|
|
244
|
+
const status = await intelligence.use(new oAddress('o://anthropic'), {
|
|
245
|
+
method: 'status',
|
|
246
|
+
params: {
|
|
247
|
+
apiKey: process.env.ANTHROPIC_API_KEY
|
|
248
|
+
}
|
|
249
|
+
});
|
|
250
|
+
|
|
251
|
+
console.log(status.result.data);
|
|
252
|
+
// { success: true, status: 'ok', response: 'Anthropic API is accessible' }
|
|
253
|
+
```
|
|
254
|
+
|
|
255
|
+
## Configuration {#configuration}
|
|
256
|
+
|
|
257
|
+
### Environment Variables
|
|
258
|
+
|
|
259
|
+
Set provider API keys via environment variables (highest priority):
|
|
260
|
+
|
|
261
|
+
```bash
|
|
262
|
+
# Provider API Keys
|
|
263
|
+
export ANTHROPIC_API_KEY="sk-ant-..."
|
|
264
|
+
export OPENAI_API_KEY="sk-..."
|
|
265
|
+
export GEMINI_API_KEY="..."
|
|
266
|
+
export GROK_API_KEY="..."
|
|
267
|
+
export SONAR_API_KEY="..." # Perplexity
|
|
268
|
+
|
|
269
|
+
# Provider Selection
|
|
270
|
+
export MODEL_PROVIDER_CHOICE="anthropic" # anthropic, openai, ollama, perplexity, grok
|
|
271
|
+
```
|
|
272
|
+
|
|
273
|
+
### Secure Storage
|
|
274
|
+
|
|
275
|
+
If API keys aren't in environment variables, `o-intelligence` will:
|
|
276
|
+
1. Check `o://secure` storage for saved keys
|
|
277
|
+
2. Prompt the user interactively if not found
|
|
278
|
+
3. Save the key to secure storage for future use
|
|
279
|
+
|
|
280
|
+
**Stored keys:**
|
|
281
|
+
- `anthropic-api-key`
|
|
282
|
+
- `openai-api-key`
|
|
283
|
+
- `ollama-api-key`
|
|
284
|
+
- `perplexity-api-key`
|
|
285
|
+
- `grok-api-key`
|
|
286
|
+
- `gemini-api-key`
|
|
287
|
+
- `model-provider-preference`
|
|
288
|
+
|
|
289
|
+
### Interactive Configuration
|
|
290
|
+
|
|
291
|
+
If no configuration is found, users will be prompted:
|
|
292
|
+
|
|
293
|
+
```bash
|
|
294
|
+
# Terminal output when no provider is configured:
|
|
295
|
+
? Which AI model do you want to use? (anthropic, openai, ollama, perplexity, grok)
|
|
296
|
+
> anthropic
|
|
297
|
+
|
|
298
|
+
? What is the API key for the anthropic model?
|
|
299
|
+
> sk-ant-...
|
|
300
|
+
|
|
301
|
+
# Saved to secure storage for future use
|
|
302
|
+
```
|
|
303
|
+
|
|
304
|
+
## Common Use Cases {#common-use-cases}
|
|
305
|
+
|
|
306
|
+
### Use Case 1: Simple Chatbot
|
|
307
|
+
|
|
308
|
+
Build a conversational AI that maintains context.
|
|
309
|
+
|
|
310
|
+
```typescript
|
|
311
|
+
import { IntelligenceTool } from '@olane/o-intelligence';
|
|
312
|
+
import { oAddress } from '@olane/o-core';
|
|
313
|
+
|
|
314
|
+
const intelligence = new IntelligenceTool({
|
|
315
|
+
address: new oAddress('o://intelligence')
|
|
316
|
+
});
|
|
317
|
+
|
|
318
|
+
await intelligence.start();
|
|
319
|
+
|
|
320
|
+
const conversationHistory = [];
|
|
321
|
+
|
|
322
|
+
async function chat(userMessage: string) {
|
|
323
|
+
// Add user message to history
|
|
324
|
+
conversationHistory.push({
|
|
325
|
+
role: 'user',
|
|
326
|
+
content: userMessage
|
|
327
|
+
});
|
|
328
|
+
|
|
329
|
+
// Get AI response
|
|
330
|
+
const response = await intelligence.use(new oAddress('o://anthropic'), {
|
|
331
|
+
method: 'completion',
|
|
332
|
+
params: {
|
|
333
|
+
messages: conversationHistory,
|
|
334
|
+
system: 'You are a helpful assistant',
|
|
335
|
+
max_tokens: 1000
|
|
336
|
+
}
|
|
337
|
+
});
|
|
338
|
+
|
|
339
|
+
const aiMessage = response.result.data.message;
|
|
340
|
+
|
|
341
|
+
// Add AI response to history
|
|
342
|
+
conversationHistory.push({
|
|
343
|
+
role: 'assistant',
|
|
344
|
+
content: aiMessage
|
|
345
|
+
});
|
|
346
|
+
|
|
347
|
+
return aiMessage;
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// Example conversation
|
|
351
|
+
await chat('Hello! What can you help me with?');
|
|
352
|
+
await chat('Can you explain async/await in JavaScript?');
|
|
353
|
+
await chat('Can you give me an example?');
|
|
354
|
+
```
|
|
355
|
+
|
|
356
|
+
### Use Case 2: Multi-Provider Fallback
|
|
357
|
+
|
|
358
|
+
Try multiple providers with fallback logic.
|
|
359
|
+
|
|
360
|
+
```typescript
|
|
361
|
+
import { IntelligenceTool } from '@olane/o-intelligence';
|
|
362
|
+
import { oAddress } from '@olane/o-core';
|
|
363
|
+
|
|
364
|
+
const intelligence = new IntelligenceTool({
|
|
365
|
+
address: new oAddress('o://intelligence')
|
|
366
|
+
});
|
|
367
|
+
|
|
368
|
+
await intelligence.start();
|
|
369
|
+
|
|
370
|
+
async function generateWithFallback(prompt: string) {
|
|
371
|
+
const providers = [
|
|
372
|
+
{ address: 'o://anthropic', key: process.env.ANTHROPIC_API_KEY },
|
|
373
|
+
{ address: 'o://openai', key: process.env.OPENAI_API_KEY },
|
|
374
|
+
{ address: 'o://grok', key: process.env.GROK_API_KEY }
|
|
375
|
+
];
|
|
376
|
+
|
|
377
|
+
for (const provider of providers) {
|
|
378
|
+
try {
|
|
379
|
+
const response = await intelligence.use(new oAddress(provider.address), {
|
|
380
|
+
method: 'generate',
|
|
381
|
+
params: {
|
|
382
|
+
apiKey: provider.key,
|
|
383
|
+
prompt: prompt
|
|
384
|
+
}
|
|
385
|
+
});
|
|
386
|
+
|
|
387
|
+
if (response.result.data.success !== false) {
|
|
388
|
+
return response.result.data;
|
|
389
|
+
}
|
|
390
|
+
} catch (error) {
|
|
391
|
+
console.warn(`${provider.address} failed, trying next provider...`);
|
|
392
|
+
continue;
|
|
393
|
+
}
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
throw new Error('All providers failed');
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
const result = await generateWithFallback('Explain machine learning');
|
|
4
400
|
```
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
401
|
+
|
|
402
|
+
### Use Case 3: Local Model with Ollama
|
|
403
|
+
|
|
404
|
+
Run AI models locally using Ollama (no API key required).
|
|
405
|
+
|
|
406
|
+
```typescript
|
|
407
|
+
import { IntelligenceTool } from '@olane/o-intelligence';
|
|
408
|
+
import { oAddress } from '@olane/o-core';
|
|
409
|
+
|
|
410
|
+
// Configure to use Ollama (local)
|
|
411
|
+
await intelligence.use(new oAddress('o://intelligence'), {
|
|
412
|
+
method: 'configure',
|
|
413
|
+
params: {
|
|
414
|
+
modelProvider: 'ollama',
|
|
415
|
+
hostingProvider: 'local'
|
|
416
|
+
}
|
|
417
|
+
});
|
|
418
|
+
|
|
419
|
+
// Use local model
|
|
420
|
+
const response = await intelligence.use(new oAddress('o://intelligence'), {
|
|
421
|
+
method: 'prompt',
|
|
422
|
+
params: {
|
|
423
|
+
prompt: 'What is the capital of France?'
|
|
424
|
+
}
|
|
425
|
+
});
|
|
426
|
+
|
|
427
|
+
// Runs on local Ollama instance (typically http://localhost:11434)
|
|
428
|
+
```
|
|
429
|
+
|
|
430
|
+
### Use Case 4: Model Comparison
|
|
431
|
+
|
|
432
|
+
Compare responses from different providers.
|
|
433
|
+
|
|
434
|
+
```typescript
|
|
435
|
+
async function compareProviders(prompt: string) {
|
|
436
|
+
const providers = ['anthropic', 'openai', 'grok'];
|
|
437
|
+
const results = [];
|
|
438
|
+
|
|
439
|
+
for (const provider of providers) {
|
|
440
|
+
const response = await intelligence.use(new oAddress(`o://${provider}`), {
|
|
441
|
+
method: 'generate',
|
|
442
|
+
params: {
|
|
443
|
+
prompt: prompt,
|
|
444
|
+
max_tokens: 200
|
|
445
|
+
}
|
|
446
|
+
});
|
|
447
|
+
|
|
448
|
+
results.push({
|
|
449
|
+
provider: provider,
|
|
450
|
+
response: response.result.data.response,
|
|
451
|
+
model: response.result.data.model,
|
|
452
|
+
tokens: response.result.data.usage
|
|
453
|
+
});
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
return results;
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
const comparison = await compareProviders('Explain blockchain in simple terms');
|
|
460
|
+
comparison.forEach(r => {
|
|
461
|
+
console.log(`\n${r.provider} (${r.model}):`);
|
|
462
|
+
console.log(r.response);
|
|
463
|
+
console.log(`Tokens: ${JSON.stringify(r.tokens)}`);
|
|
464
|
+
});
|
|
465
|
+
```
|
|
466
|
+
|
|
467
|
+
### Use Case 5: Streaming Responses (Advanced)
|
|
468
|
+
|
|
469
|
+
Handle long-form generation with streaming.
|
|
470
|
+
|
|
471
|
+
```typescript
|
|
472
|
+
// Note: Streaming support varies by provider
|
|
473
|
+
// This example shows the pattern for providers that support it
|
|
474
|
+
|
|
475
|
+
async function streamGeneration(prompt: string) {
|
|
476
|
+
// Check provider capabilities first
|
|
477
|
+
const status = await intelligence.use(new oAddress('o://anthropic'), {
|
|
478
|
+
method: 'status',
|
|
479
|
+
params: {}
|
|
480
|
+
});
|
|
481
|
+
|
|
482
|
+
if (status.result.data.status !== 'ok') {
|
|
483
|
+
throw new Error('Provider not available');
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
// Generate with streaming (if supported)
|
|
487
|
+
const response = await intelligence.use(new oAddress('o://anthropic'), {
|
|
488
|
+
method: 'completion',
|
|
489
|
+
params: {
|
|
490
|
+
messages: [{ role: 'user', content: prompt }],
|
|
491
|
+
max_tokens: 2000,
|
|
492
|
+
stream: true // Enable streaming if provider supports it
|
|
493
|
+
}
|
|
494
|
+
});
|
|
495
|
+
|
|
496
|
+
return response;
|
|
497
|
+
}
|
|
498
|
+
```
|
|
499
|
+
|
|
500
|
+
## API Reference {#api-reference}
|
|
501
|
+
|
|
502
|
+
### IntelligenceTool Class
|
|
503
|
+
|
|
504
|
+
Main router class that extends `oLaneTool`.
|
|
505
|
+
|
|
506
|
+
```typescript
|
|
507
|
+
class IntelligenceTool extends oLaneTool
|
|
508
|
+
```
|
|
509
|
+
|
|
510
|
+
**Constructor:**
|
|
511
|
+
```typescript
|
|
512
|
+
constructor(config: oNodeToolConfig)
|
|
513
|
+
```
|
|
514
|
+
|
|
515
|
+
**Config Options:**
|
|
516
|
+
- `address`: oAddress for the intelligence router (typically `o://intelligence`)
|
|
517
|
+
- `description`: Optional description override
|
|
518
|
+
- All standard `oNodeToolConfig` options
|
|
519
|
+
|
|
520
|
+
**Methods:**
|
|
521
|
+
|
|
522
|
+
#### `getModelProvider()`
|
|
523
|
+
|
|
524
|
+
Determines which AI provider to use.
|
|
525
|
+
|
|
526
|
+
**Priority:**
|
|
527
|
+
1. `MODEL_PROVIDER_CHOICE` environment variable
|
|
528
|
+
2. Stored preference in `o://secure`
|
|
529
|
+
3. Interactive user prompt
|
|
530
|
+
|
|
531
|
+
**Returns:** `Promise<{ provider: LLMProviders }>`
|
|
532
|
+
|
|
533
|
+
#### `getProviderApiKey(provider: LLMProviders)`
|
|
534
|
+
|
|
535
|
+
Retrieves API key for the specified provider.
|
|
536
|
+
|
|
537
|
+
**Priority:**
|
|
538
|
+
1. Environment variable (e.g., `ANTHROPIC_API_KEY`)
|
|
539
|
+
2. Stored key in `o://secure`
|
|
540
|
+
3. Interactive user prompt
|
|
541
|
+
|
|
542
|
+
**Returns:** `Promise<{ apiKey: string }>`
|
|
543
|
+
|
|
544
|
+
#### `chooseIntelligence(request: PromptRequest)`
|
|
545
|
+
|
|
546
|
+
Selects the appropriate intelligence provider and retrieves API key.
|
|
547
|
+
|
|
548
|
+
**Returns:** `Promise<{ choice: oAddress, apiKey: string, options: any }>`
|
|
549
|
+
|
|
550
|
+
### Provider Classes
|
|
551
|
+
|
|
552
|
+
Each provider (Anthropic, OpenAI, Ollama, Perplexity, Grok) extends `oLaneTool`:
|
|
553
|
+
|
|
554
|
+
```typescript
|
|
555
|
+
class AnthropicIntelligenceTool extends oLaneTool
|
|
556
|
+
class OpenAIIntelligenceTool extends oLaneTool
|
|
557
|
+
class OllamaIntelligenceTool extends oLaneTool
|
|
558
|
+
class PerplexityIntelligenceTool extends oLaneTool
|
|
559
|
+
class GrokIntelligenceTool extends oLaneTool
|
|
560
|
+
class GeminiIntelligenceTool extends oLaneTool
|
|
561
|
+
```
|
|
562
|
+
|
|
563
|
+
**Default Models:**
|
|
564
|
+
- Anthropic: `claude-sonnet-4-5-20250929`
|
|
565
|
+
- OpenAI: `gpt-4-turbo-preview` (or latest GPT-4)
|
|
566
|
+
- Ollama: `llama2` (or configured local model)
|
|
567
|
+
- Perplexity: `sonar-medium-chat`
|
|
568
|
+
- Grok: `grok-1` (or latest)
|
|
569
|
+
- Gemini: `gemini-pro`
|
|
570
|
+
|
|
571
|
+
## Enumerations {#enumerations}
|
|
572
|
+
|
|
573
|
+
### LLMProviders
|
|
574
|
+
|
|
575
|
+
Available AI model providers.
|
|
576
|
+
|
|
577
|
+
```typescript
|
|
578
|
+
enum LLMProviders {
|
|
579
|
+
ANTHROPIC = 'anthropic',
|
|
580
|
+
OPENAI = 'openai',
|
|
581
|
+
OLLAMA = 'ollama',
|
|
582
|
+
PERPLEXITY = 'perplexity',
|
|
583
|
+
GROK = 'grok',
|
|
584
|
+
OLANE = 'olane'
|
|
585
|
+
}
|
|
586
|
+
```
|
|
587
|
+
|
|
588
|
+
### HostModelProvider
|
|
589
|
+
|
|
590
|
+
Where AI models are hosted.
|
|
591
|
+
|
|
592
|
+
```typescript
|
|
593
|
+
enum HostModelProvider {
|
|
594
|
+
OLANE = 'olane', // Hosted on Olane infrastructure
|
|
595
|
+
LOCAL = 'local' // Hosted locally (e.g., Ollama)
|
|
596
|
+
}
|
|
597
|
+
```
|
|
598
|
+
|
|
599
|
+
### IntelligenceStorageKeys
|
|
600
|
+
|
|
601
|
+
Keys used for secure storage.
|
|
602
|
+
|
|
603
|
+
```typescript
|
|
604
|
+
enum IntelligenceStorageKeys {
|
|
605
|
+
MODEL_PROVIDER_PREFERENCE = 'model-provider-preference',
|
|
606
|
+
HOSTING_PROVIDER_PREFERENCE = 'hosting-provider-preference',
|
|
607
|
+
API_KEY_SUFFIX = 'api-key',
|
|
608
|
+
ACCESS_TOKEN = 'access-token',
|
|
609
|
+
OLANE_ADDRESS = 'olane-address'
|
|
610
|
+
}
|
|
611
|
+
```
|
|
612
|
+
|
|
613
|
+
## Architecture {#architecture}
|
|
614
|
+
|
|
615
|
+
`o-intelligence` follows the **Coordinator + Specialists** pattern:
|
|
616
|
+
|
|
617
|
+
```
|
|
618
|
+
┌──────────────────────────────┐
|
|
619
|
+
│ IntelligenceTool │
|
|
620
|
+
│ o://intelligence │
|
|
621
|
+
│ (Router/Coordinator) │
|
|
622
|
+
│ │
|
|
623
|
+
│ Responsibilities: │
|
|
624
|
+
│ • Provider selection │
|
|
625
|
+
│ • API key management │
|
|
626
|
+
│ • Request routing │
|
|
627
|
+
└──────────────────────────────┘
|
|
628
|
+
⬇ manages
|
|
629
|
+
┌─────────┬─────────┬─────────┬──────────┬──────┬────────┐
|
|
630
|
+
⬇ ⬇ ⬇ ⬇ ⬇ ⬇ ⬇
|
|
631
|
+
┌─────────────┐ ┌───────┐ ┌────────┐ ┌──────────┐ ┌────┐ ┌──────┐
|
|
632
|
+
│Anthropic │ │OpenAI │ │Ollama │ │Perplexity│ │Grok│ │Gemini│
|
|
633
|
+
│o://anthropic│ │o:// │ │o:// │ │o://sonar │ │o:// │ │o:// │
|
|
634
|
+
│ │ │openai │ │ollama │ │ │ │grok│ │gemini│
|
|
635
|
+
│Tools: │ │ │ │ │ │ │ │ │ │ │
|
|
636
|
+
│• completion │ │Same │ │Same │ │Same │ │Same│ │Same │
|
|
637
|
+
│• generate │ │tools │ │tools │ │tools │ │ │ │ │
|
|
638
|
+
│• list_models│ │ │ │ │ │ │ │ │ │ │
|
|
639
|
+
│• model_info │ │ │ │ │ │ │ │ │ │ │
|
|
640
|
+
│• status │ │ │ │ │ │ │ │ │ │ │
|
|
641
|
+
└─────────────┘ └───────┘ └────────┘ └──────────┘ └────┘ └──────┘
|
|
642
|
+
```
|
|
643
|
+
|
|
644
|
+
**Design Benefits:**
|
|
645
|
+
- **Unified Interface**: Single API for all providers
|
|
646
|
+
- **Provider Abstraction**: Switch providers without code changes
|
|
647
|
+
- **Automatic Fallback**: Can implement multi-provider strategies
|
|
648
|
+
- **Secure Credentials**: API keys managed centrally via `o://secure`
|
|
649
|
+
- **Extensible**: Add new providers by creating child nodes
|
|
650
|
+
|
|
651
|
+
## Troubleshooting {#troubleshooting}
|
|
652
|
+
|
|
653
|
+
### Error: API key is required
|
|
654
|
+
|
|
655
|
+
**Cause:** No API key found in environment variables or secure storage
|
|
656
|
+
|
|
657
|
+
**Solution:** Set the appropriate environment variable:
|
|
658
|
+
```bash
|
|
659
|
+
export ANTHROPIC_API_KEY="sk-ant-..."
|
|
660
|
+
# or
|
|
661
|
+
export OPENAI_API_KEY="sk-..."
|
|
662
|
+
```
|
|
663
|
+
|
|
664
|
+
Or configure interactively:
|
|
665
|
+
```typescript
|
|
666
|
+
await intelligence.use(new oAddress('o://intelligence'), {
|
|
667
|
+
method: 'configure',
|
|
668
|
+
params: {
|
|
669
|
+
modelProvider: 'anthropic'
|
|
670
|
+
}
|
|
671
|
+
});
|
|
672
|
+
// Will prompt for API key
|
|
673
|
+
```
|
|
674
|
+
|
|
675
|
+
---
|
|
676
|
+
|
|
677
|
+
### Error: Invalid model provider choice
|
|
678
|
+
|
|
679
|
+
**Cause:** `MODEL_PROVIDER_CHOICE` environment variable set to invalid value
|
|
680
|
+
|
|
681
|
+
**Solution:** Use a valid provider:
|
|
682
|
+
```bash
|
|
683
|
+
export MODEL_PROVIDER_CHOICE="anthropic" # anthropic, openai, ollama, perplexity, grok
|
|
684
|
+
```
|
|
685
|
+
|
|
686
|
+
---
|
|
687
|
+
|
|
688
|
+
### Error: Provider API is not accessible
|
|
689
|
+
|
|
690
|
+
**Cause:** Network issue or invalid API key
|
|
691
|
+
|
|
692
|
+
**Solution:** Check API key validity and network connectivity:
|
|
693
|
+
```typescript
|
|
694
|
+
// Test provider status
|
|
695
|
+
const status = await intelligence.use(new oAddress('o://anthropic'), {
|
|
696
|
+
method: 'status',
|
|
697
|
+
params: {
|
|
698
|
+
apiKey: process.env.ANTHROPIC_API_KEY
|
|
699
|
+
}
|
|
700
|
+
});
|
|
701
|
+
|
|
702
|
+
console.log(status.result.data);
|
|
703
|
+
```
|
|
704
|
+
|
|
705
|
+
---
|
|
706
|
+
|
|
707
|
+
### Error: Model not found
|
|
708
|
+
|
|
709
|
+
**Cause:** Specified model ID doesn't exist for the provider
|
|
710
|
+
|
|
711
|
+
**Solution:** List available models:
|
|
712
|
+
```typescript
|
|
713
|
+
const models = await intelligence.use(new oAddress('o://anthropic'), {
|
|
714
|
+
method: 'list_models',
|
|
715
|
+
params: {}
|
|
716
|
+
});
|
|
717
|
+
|
|
718
|
+
console.log(models.result.data.models.map(m => m.id));
|
|
719
|
+
```
|
|
720
|
+
|
|
721
|
+
---
|
|
722
|
+
|
|
723
|
+
### Ollama Connection Failed
|
|
724
|
+
|
|
725
|
+
**Cause:** Ollama not running locally
|
|
726
|
+
|
|
727
|
+
**Solution:** Start Ollama service:
|
|
728
|
+
```bash
|
|
729
|
+
# Install Ollama (if not installed)
|
|
730
|
+
curl -fsSL https://ollama.ai/install.sh | sh
|
|
731
|
+
|
|
732
|
+
# Start Ollama service
|
|
733
|
+
ollama serve
|
|
734
|
+
|
|
735
|
+
# Pull a model
|
|
736
|
+
ollama pull llama2
|
|
737
|
+
```
|
|
738
|
+
|
|
739
|
+
---
|
|
740
|
+
|
|
741
|
+
### High Token Usage
|
|
742
|
+
|
|
743
|
+
**Cause:** Conversation history growing too large
|
|
744
|
+
|
|
745
|
+
**Solution:** Limit conversation history:
|
|
746
|
+
```typescript
|
|
747
|
+
// Keep only last N messages
|
|
748
|
+
const MAX_HISTORY = 10;
|
|
749
|
+
if (conversationHistory.length > MAX_HISTORY) {
|
|
750
|
+
conversationHistory = conversationHistory.slice(-MAX_HISTORY);
|
|
751
|
+
}
|
|
752
|
+
```
|
|
753
|
+
|
|
754
|
+
Or use `max_tokens` parameter:
|
|
755
|
+
```typescript
|
|
756
|
+
await intelligence.use(new oAddress('o://anthropic'), {
|
|
757
|
+
method: 'completion',
|
|
758
|
+
params: {
|
|
759
|
+
messages: conversationHistory,
|
|
760
|
+
max_tokens: 500 // Limit response length
|
|
761
|
+
}
|
|
762
|
+
});
|
|
763
|
+
```
|
|
764
|
+
|
|
765
|
+
## Dependencies {#dependencies}
|
|
766
|
+
|
|
767
|
+
**Peer Dependencies:**
|
|
768
|
+
- `@olane/o-core@^0.7.2` - Core Olane types and utilities
|
|
769
|
+
- `@olane/o-config@^0.7.2` - Configuration management
|
|
770
|
+
- `@olane/o-protocol@^0.7.2` - Protocol definitions
|
|
771
|
+
- `@olane/o-tool@^0.7.2` - Tool base classes
|
|
772
|
+
- `@olane/o-lane@^0.7.2` - Lane capability loop
|
|
773
|
+
|
|
774
|
+
**Required Nodes:**
|
|
775
|
+
- `o://secure` - Secure credential storage
|
|
776
|
+
- `o://human` - Human interaction for prompts (optional)
|
|
777
|
+
- `o://setup` - Setup and configuration (optional)
|
|
778
|
+
|
|
779
|
+
**External APIs:**
|
|
780
|
+
- Anthropic API: `https://api.anthropic.com/v1/`
|
|
781
|
+
- OpenAI API: `https://api.openai.com/v1/`
|
|
782
|
+
- Ollama: `http://localhost:11434` (local)
|
|
783
|
+
- Perplexity API: Provider-specific endpoint
|
|
784
|
+
- Grok API: Provider-specific endpoint
|
|
785
|
+
- Gemini API: `https://generativelanguage.googleapis.com/v1/`
|
|
786
|
+
|
|
787
|
+
## Package Information {#package-information}
|
|
788
|
+
|
|
789
|
+
- **Name**: `@olane/o-intelligence`
|
|
790
|
+
- **Version**: 0.7.2
|
|
791
|
+
- **License**: ISC
|
|
792
|
+
- **Type**: ESM (ES Module)
|
|
793
|
+
- **Repository**: https://github.com/olane-labs/olane
|
|
794
|
+
|
|
795
|
+
## Related {#related}
|
|
796
|
+
|
|
797
|
+
- **Concept**: [Tools, Nodes, and Applications](/concepts/tools-nodes-applications)
|
|
798
|
+
- **Package**: [@olane/o-lane - Lane Capability Loop](/packages/o-lane)
|
|
799
|
+
- **Package**: [@olane/o-node - Node Foundation](/packages/o-node)
|
|
800
|
+
- **Package**: [@olane/o-tool - Tool Base Classes](/packages/o-tool)
|
|
801
|
+
- **Node**: [o://secure - Secure Storage](/nodes/secure)
|
|
802
|
+
- **Node**: [o://human - Human Interaction](/nodes/human)
|
|
803
|
+
|
|
804
|
+
## Contributing {#contributing}
|
|
805
|
+
|
|
806
|
+
### Adding a New Provider
|
|
807
|
+
|
|
808
|
+
To add a new LLM provider:
|
|
809
|
+
|
|
810
|
+
1. Create a new provider tool (e.g., `new-provider-intelligence.tool.ts`):
|
|
811
|
+
|
|
812
|
+
```typescript
|
|
813
|
+
import { oLaneTool } from '@olane/o-lane';
|
|
814
|
+
import { oAddress, oRequest } from '@olane/o-core';
|
|
815
|
+
import { ToolResult } from '@olane/o-tool';
|
|
816
|
+
import { LLM_PARAMS } from './methods/llm.methods.js';
|
|
817
|
+
|
|
818
|
+
export class NewProviderIntelligenceTool extends oLaneTool {
|
|
819
|
+
private defaultModel = 'model-name';
|
|
820
|
+
private apiKey: string = process.env.NEW_PROVIDER_API_KEY || '';
|
|
821
|
+
|
|
822
|
+
constructor(config: oNodeToolConfig) {
|
|
823
|
+
super({
|
|
824
|
+
...config,
|
|
825
|
+
address: new oAddress('o://new-provider'),
|
|
826
|
+
description: 'Intelligence tool using New Provider',
|
|
827
|
+
methods: LLM_PARAMS,
|
|
828
|
+
dependencies: []
|
|
829
|
+
});
|
|
830
|
+
}
|
|
831
|
+
|
|
832
|
+
async _tool_completion(request: oRequest): Promise<ToolResult> {
|
|
833
|
+
// Implement completion logic
|
|
834
|
+
}
|
|
835
|
+
|
|
836
|
+
async _tool_generate(request: oRequest): Promise<ToolResult> {
|
|
837
|
+
// Implement generation logic
|
|
838
|
+
}
|
|
839
|
+
|
|
840
|
+
async _tool_list_models(request: oRequest): Promise<ToolResult> {
|
|
841
|
+
// Implement model listing
|
|
842
|
+
}
|
|
843
|
+
|
|
844
|
+
async _tool_model_info(request: oRequest): Promise<ToolResult> {
|
|
845
|
+
// Implement model info retrieval
|
|
846
|
+
}
|
|
847
|
+
|
|
848
|
+
async _tool_status(request: oRequest): Promise<ToolResult> {
|
|
849
|
+
// Implement status check
|
|
850
|
+
}
|
|
851
|
+
}
|
|
852
|
+
```
|
|
853
|
+
|
|
854
|
+
2. Update `LLMProviders` enum:
|
|
855
|
+
|
|
856
|
+
```typescript
|
|
857
|
+
export enum LLMProviders {
|
|
858
|
+
// ... existing providers
|
|
859
|
+
NEW_PROVIDER = 'new-provider'
|
|
860
|
+
}
|
|
861
|
+
```
|
|
862
|
+
|
|
863
|
+
3. Register in `IntelligenceTool.initialize()`:
|
|
864
|
+
|
|
865
|
+
```typescript
|
|
866
|
+
async initialize(): Promise<void> {
|
|
867
|
+
await super.initialize();
|
|
868
|
+
// ... existing providers
|
|
869
|
+
|
|
870
|
+
const newProviderTool = new NewProviderIntelligenceTool({
|
|
871
|
+
...this.config,
|
|
872
|
+
parent: this.address,
|
|
873
|
+
leader: this.leader
|
|
874
|
+
});
|
|
875
|
+
await newProviderTool.start();
|
|
876
|
+
this.addChildNode(newProviderTool);
|
|
877
|
+
}
|
|
878
|
+
```
|
|
879
|
+
|
|
880
|
+
4. Export in `index.ts`:
|
|
881
|
+
|
|
882
|
+
```typescript
|
|
883
|
+
export * from './new-provider-intelligence.tool.js';
|
|
884
|
+
```
|
|
885
|
+
|
|
886
|
+
5. Update `getProviderApiKey()` in `o-intelligence.tool.ts` to include the new provider's environment variable.
|
|
887
|
+
|
|
888
|
+
## Best Practices {#best-practices}
|
|
889
|
+
|
|
890
|
+
1. **Always handle API key securely** - Never commit keys to version control
|
|
891
|
+
2. **Set `max_tokens` appropriately** - Control costs and response length
|
|
892
|
+
3. **Implement error handling** - API calls can fail, always catch errors
|
|
893
|
+
4. **Use system prompts** - Guide model behavior for consistent results
|
|
894
|
+
5. **Monitor token usage** - Track usage to optimize costs
|
|
895
|
+
6. **Cache responses** - Consider caching for repeated prompts
|
|
896
|
+
7. **Test locally first** - Use Ollama for development before using paid APIs
|
|
897
|
+
8. **Version your prompts** - Track prompt changes like code changes
|
|
898
|
+
|
|
899
|
+
## License {#license}
|
|
900
|
+
|
|
901
|
+
ISC License - Copyright (c) oLane Inc.
|
|
902
|
+
|
|
@@ -26,202 +26,5 @@ export declare class AnthropicIntelligenceTool extends oLaneTool {
|
|
|
26
26
|
* Check API status
|
|
27
27
|
*/
|
|
28
28
|
_tool_status(request: oRequest): Promise<ToolResult>;
|
|
29
|
-
/**
|
|
30
|
-
* Parameter definitions for completion
|
|
31
|
-
*/
|
|
32
|
-
_params_completion(): {
|
|
33
|
-
type: string;
|
|
34
|
-
properties: {
|
|
35
|
-
model: {
|
|
36
|
-
type: string;
|
|
37
|
-
description: string;
|
|
38
|
-
default: string;
|
|
39
|
-
};
|
|
40
|
-
messages: {
|
|
41
|
-
type: string;
|
|
42
|
-
description: string;
|
|
43
|
-
items: {
|
|
44
|
-
type: string;
|
|
45
|
-
properties: {
|
|
46
|
-
role: {
|
|
47
|
-
type: string;
|
|
48
|
-
enum: string[];
|
|
49
|
-
description: string;
|
|
50
|
-
};
|
|
51
|
-
content: {
|
|
52
|
-
oneOf: ({
|
|
53
|
-
type: string;
|
|
54
|
-
description: string;
|
|
55
|
-
items?: undefined;
|
|
56
|
-
} | {
|
|
57
|
-
type: string;
|
|
58
|
-
description: string;
|
|
59
|
-
items: {
|
|
60
|
-
type: string;
|
|
61
|
-
properties: {
|
|
62
|
-
type: {
|
|
63
|
-
type: string;
|
|
64
|
-
enum: string[];
|
|
65
|
-
};
|
|
66
|
-
text: {
|
|
67
|
-
type: string;
|
|
68
|
-
};
|
|
69
|
-
source: {
|
|
70
|
-
type: string;
|
|
71
|
-
properties: {
|
|
72
|
-
type: {
|
|
73
|
-
type: string;
|
|
74
|
-
enum: string[];
|
|
75
|
-
};
|
|
76
|
-
media_type: {
|
|
77
|
-
type: string;
|
|
78
|
-
};
|
|
79
|
-
data: {
|
|
80
|
-
type: string;
|
|
81
|
-
};
|
|
82
|
-
};
|
|
83
|
-
};
|
|
84
|
-
};
|
|
85
|
-
};
|
|
86
|
-
})[];
|
|
87
|
-
};
|
|
88
|
-
};
|
|
89
|
-
required: string[];
|
|
90
|
-
};
|
|
91
|
-
};
|
|
92
|
-
system: {
|
|
93
|
-
type: string;
|
|
94
|
-
description: string;
|
|
95
|
-
};
|
|
96
|
-
max_tokens: {
|
|
97
|
-
type: string;
|
|
98
|
-
description: string;
|
|
99
|
-
default: number;
|
|
100
|
-
};
|
|
101
|
-
temperature: {
|
|
102
|
-
type: string;
|
|
103
|
-
description: string;
|
|
104
|
-
minimum: number;
|
|
105
|
-
maximum: number;
|
|
106
|
-
};
|
|
107
|
-
top_p: {
|
|
108
|
-
type: string;
|
|
109
|
-
description: string;
|
|
110
|
-
minimum: number;
|
|
111
|
-
maximum: number;
|
|
112
|
-
};
|
|
113
|
-
top_k: {
|
|
114
|
-
type: string;
|
|
115
|
-
description: string;
|
|
116
|
-
minimum: number;
|
|
117
|
-
};
|
|
118
|
-
stop_sequences: {
|
|
119
|
-
type: string;
|
|
120
|
-
description: string;
|
|
121
|
-
items: {
|
|
122
|
-
type: string;
|
|
123
|
-
};
|
|
124
|
-
};
|
|
125
|
-
metadata: {
|
|
126
|
-
type: string;
|
|
127
|
-
description: string;
|
|
128
|
-
properties: {
|
|
129
|
-
user_id: {
|
|
130
|
-
type: string;
|
|
131
|
-
description: string;
|
|
132
|
-
};
|
|
133
|
-
};
|
|
134
|
-
};
|
|
135
|
-
};
|
|
136
|
-
required: string[];
|
|
137
|
-
};
|
|
138
|
-
/**
|
|
139
|
-
* Parameter definitions for generate
|
|
140
|
-
*/
|
|
141
|
-
_params_generate(): {
|
|
142
|
-
type: string;
|
|
143
|
-
properties: {
|
|
144
|
-
model: {
|
|
145
|
-
type: string;
|
|
146
|
-
description: string;
|
|
147
|
-
default: string;
|
|
148
|
-
};
|
|
149
|
-
prompt: {
|
|
150
|
-
type: string;
|
|
151
|
-
description: string;
|
|
152
|
-
};
|
|
153
|
-
system: {
|
|
154
|
-
type: string;
|
|
155
|
-
description: string;
|
|
156
|
-
};
|
|
157
|
-
max_tokens: {
|
|
158
|
-
type: string;
|
|
159
|
-
description: string;
|
|
160
|
-
default: number;
|
|
161
|
-
};
|
|
162
|
-
temperature: {
|
|
163
|
-
type: string;
|
|
164
|
-
description: string;
|
|
165
|
-
minimum: number;
|
|
166
|
-
maximum: number;
|
|
167
|
-
};
|
|
168
|
-
top_p: {
|
|
169
|
-
type: string;
|
|
170
|
-
description: string;
|
|
171
|
-
minimum: number;
|
|
172
|
-
maximum: number;
|
|
173
|
-
};
|
|
174
|
-
top_k: {
|
|
175
|
-
type: string;
|
|
176
|
-
description: string;
|
|
177
|
-
minimum: number;
|
|
178
|
-
};
|
|
179
|
-
stop_sequences: {
|
|
180
|
-
type: string;
|
|
181
|
-
description: string;
|
|
182
|
-
items: {
|
|
183
|
-
type: string;
|
|
184
|
-
};
|
|
185
|
-
};
|
|
186
|
-
metadata: {
|
|
187
|
-
type: string;
|
|
188
|
-
description: string;
|
|
189
|
-
properties: {
|
|
190
|
-
user_id: {
|
|
191
|
-
type: string;
|
|
192
|
-
description: string;
|
|
193
|
-
};
|
|
194
|
-
};
|
|
195
|
-
};
|
|
196
|
-
};
|
|
197
|
-
required: string[];
|
|
198
|
-
};
|
|
199
|
-
/**
|
|
200
|
-
* Parameter definitions for list_models
|
|
201
|
-
*/
|
|
202
|
-
_params_list_models(): {
|
|
203
|
-
type: string;
|
|
204
|
-
properties: {};
|
|
205
|
-
};
|
|
206
|
-
/**
|
|
207
|
-
* Parameter definitions for model_info
|
|
208
|
-
*/
|
|
209
|
-
_params_model_info(): {
|
|
210
|
-
type: string;
|
|
211
|
-
properties: {
|
|
212
|
-
model: {
|
|
213
|
-
type: string;
|
|
214
|
-
description: string;
|
|
215
|
-
default: string;
|
|
216
|
-
};
|
|
217
|
-
};
|
|
218
|
-
};
|
|
219
|
-
/**
|
|
220
|
-
* Parameter definitions for status
|
|
221
|
-
*/
|
|
222
|
-
_params_status(): {
|
|
223
|
-
type: string;
|
|
224
|
-
properties: {};
|
|
225
|
-
};
|
|
226
29
|
}
|
|
227
30
|
//# sourceMappingURL=anthropic-intelligence.tool.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"anthropic-intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/anthropic-intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAY,QAAQ,EAAE,MAAM,eAAe,CAAC;AACnD,OAAO,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AAE3C,OAAO,EAAE,SAAS,EAAE,MAAM,eAAe,CAAC;AAC1C,OAAO,EAAe,eAAe,EAAE,MAAM,eAAe,CAAC;AAkG7D,qBAAa,yBAA0B,SAAQ,SAAS;IACtD,OAAO,CAAC,YAAY,CAAgC;IACpD,OAAO,CAAC,MAAM,CAA+C;gBAEjD,MAAM,EAAE,eAAe;IAWnC;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAqE9D;;OAEG;IACG,cAAc,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA+E5D;;OAEG;IACG,iBAAiB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA4C/D;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAoD9D;;OAEG;IACG,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;
|
|
1
|
+
{"version":3,"file":"anthropic-intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/anthropic-intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAY,QAAQ,EAAE,MAAM,eAAe,CAAC;AACnD,OAAO,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AAE3C,OAAO,EAAE,SAAS,EAAE,MAAM,eAAe,CAAC;AAC1C,OAAO,EAAe,eAAe,EAAE,MAAM,eAAe,CAAC;AAkG7D,qBAAa,yBAA0B,SAAQ,SAAS;IACtD,OAAO,CAAC,YAAY,CAAgC;IACpD,OAAO,CAAC,MAAM,CAA+C;gBAEjD,MAAM,EAAE,eAAe;IAWnC;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAqE9D;;OAEG;IACG,cAAc,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA+E5D;;OAEG;IACG,iBAAiB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA4C/D;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAoD9D;;OAEG;IACG,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;CAuC3D"}
|
|
@@ -266,213 +266,4 @@ export class AnthropicIntelligenceTool extends oLaneTool {
|
|
|
266
266
|
};
|
|
267
267
|
}
|
|
268
268
|
}
|
|
269
|
-
/**
|
|
270
|
-
* Parameter definitions for completion
|
|
271
|
-
*/
|
|
272
|
-
_params_completion() {
|
|
273
|
-
return {
|
|
274
|
-
type: 'object',
|
|
275
|
-
properties: {
|
|
276
|
-
model: {
|
|
277
|
-
type: 'string',
|
|
278
|
-
description: 'The model to use for completion',
|
|
279
|
-
default: this.defaultModel,
|
|
280
|
-
},
|
|
281
|
-
messages: {
|
|
282
|
-
type: 'array',
|
|
283
|
-
description: 'Array of messages in the conversation',
|
|
284
|
-
items: {
|
|
285
|
-
type: 'object',
|
|
286
|
-
properties: {
|
|
287
|
-
role: {
|
|
288
|
-
type: 'string',
|
|
289
|
-
enum: ['user', 'assistant'],
|
|
290
|
-
description: 'The role of the message sender',
|
|
291
|
-
},
|
|
292
|
-
content: {
|
|
293
|
-
oneOf: [
|
|
294
|
-
{
|
|
295
|
-
type: 'string',
|
|
296
|
-
description: 'Text content',
|
|
297
|
-
},
|
|
298
|
-
{
|
|
299
|
-
type: 'array',
|
|
300
|
-
description: 'Array of content blocks',
|
|
301
|
-
items: {
|
|
302
|
-
type: 'object',
|
|
303
|
-
properties: {
|
|
304
|
-
type: {
|
|
305
|
-
type: 'string',
|
|
306
|
-
enum: ['text', 'image'],
|
|
307
|
-
},
|
|
308
|
-
text: {
|
|
309
|
-
type: 'string',
|
|
310
|
-
},
|
|
311
|
-
source: {
|
|
312
|
-
type: 'object',
|
|
313
|
-
properties: {
|
|
314
|
-
type: {
|
|
315
|
-
type: 'string',
|
|
316
|
-
enum: ['base64'],
|
|
317
|
-
},
|
|
318
|
-
media_type: {
|
|
319
|
-
type: 'string',
|
|
320
|
-
},
|
|
321
|
-
data: {
|
|
322
|
-
type: 'string',
|
|
323
|
-
},
|
|
324
|
-
},
|
|
325
|
-
},
|
|
326
|
-
},
|
|
327
|
-
},
|
|
328
|
-
},
|
|
329
|
-
],
|
|
330
|
-
},
|
|
331
|
-
},
|
|
332
|
-
required: ['role', 'content'],
|
|
333
|
-
},
|
|
334
|
-
},
|
|
335
|
-
system: {
|
|
336
|
-
type: 'string',
|
|
337
|
-
description: 'System message to set the behavior of the assistant',
|
|
338
|
-
},
|
|
339
|
-
max_tokens: {
|
|
340
|
-
type: 'number',
|
|
341
|
-
description: 'Maximum number of tokens to generate',
|
|
342
|
-
default: 1000,
|
|
343
|
-
},
|
|
344
|
-
temperature: {
|
|
345
|
-
type: 'number',
|
|
346
|
-
description: 'Controls randomness in the response',
|
|
347
|
-
minimum: 0,
|
|
348
|
-
maximum: 1,
|
|
349
|
-
},
|
|
350
|
-
top_p: {
|
|
351
|
-
type: 'number',
|
|
352
|
-
description: 'Controls diversity via nucleus sampling',
|
|
353
|
-
minimum: 0,
|
|
354
|
-
maximum: 1,
|
|
355
|
-
},
|
|
356
|
-
top_k: {
|
|
357
|
-
type: 'number',
|
|
358
|
-
description: 'Controls diversity via top-k sampling',
|
|
359
|
-
minimum: 0,
|
|
360
|
-
},
|
|
361
|
-
stop_sequences: {
|
|
362
|
-
type: 'array',
|
|
363
|
-
description: 'Sequences that will stop generation',
|
|
364
|
-
items: {
|
|
365
|
-
type: 'string',
|
|
366
|
-
},
|
|
367
|
-
},
|
|
368
|
-
metadata: {
|
|
369
|
-
type: 'object',
|
|
370
|
-
description: 'Optional metadata',
|
|
371
|
-
properties: {
|
|
372
|
-
user_id: {
|
|
373
|
-
type: 'string',
|
|
374
|
-
description: 'User ID for tracking',
|
|
375
|
-
},
|
|
376
|
-
},
|
|
377
|
-
},
|
|
378
|
-
},
|
|
379
|
-
required: ['messages'],
|
|
380
|
-
};
|
|
381
|
-
}
|
|
382
|
-
/**
|
|
383
|
-
* Parameter definitions for generate
|
|
384
|
-
*/
|
|
385
|
-
_params_generate() {
|
|
386
|
-
return {
|
|
387
|
-
type: 'object',
|
|
388
|
-
properties: {
|
|
389
|
-
model: {
|
|
390
|
-
type: 'string',
|
|
391
|
-
description: 'The model to use for generation',
|
|
392
|
-
default: this.defaultModel,
|
|
393
|
-
},
|
|
394
|
-
prompt: {
|
|
395
|
-
type: 'string',
|
|
396
|
-
description: 'The prompt to generate text from',
|
|
397
|
-
},
|
|
398
|
-
system: {
|
|
399
|
-
type: 'string',
|
|
400
|
-
description: 'System message to set the behavior of the assistant',
|
|
401
|
-
},
|
|
402
|
-
max_tokens: {
|
|
403
|
-
type: 'number',
|
|
404
|
-
description: 'Maximum number of tokens to generate',
|
|
405
|
-
default: 1000,
|
|
406
|
-
},
|
|
407
|
-
temperature: {
|
|
408
|
-
type: 'number',
|
|
409
|
-
description: 'Controls randomness in the response',
|
|
410
|
-
minimum: 0,
|
|
411
|
-
maximum: 1,
|
|
412
|
-
},
|
|
413
|
-
top_p: {
|
|
414
|
-
type: 'number',
|
|
415
|
-
description: 'Controls diversity via nucleus sampling',
|
|
416
|
-
minimum: 0,
|
|
417
|
-
maximum: 1,
|
|
418
|
-
},
|
|
419
|
-
top_k: {
|
|
420
|
-
type: 'number',
|
|
421
|
-
description: 'Controls diversity via top-k sampling',
|
|
422
|
-
minimum: 0,
|
|
423
|
-
},
|
|
424
|
-
stop_sequences: {
|
|
425
|
-
type: 'array',
|
|
426
|
-
description: 'Sequences that will stop generation',
|
|
427
|
-
items: {
|
|
428
|
-
type: 'string',
|
|
429
|
-
},
|
|
430
|
-
},
|
|
431
|
-
metadata: {
|
|
432
|
-
type: 'object',
|
|
433
|
-
description: 'Optional metadata',
|
|
434
|
-
properties: {
|
|
435
|
-
user_id: {
|
|
436
|
-
type: 'string',
|
|
437
|
-
description: 'User ID for tracking',
|
|
438
|
-
},
|
|
439
|
-
},
|
|
440
|
-
},
|
|
441
|
-
},
|
|
442
|
-
required: ['prompt'],
|
|
443
|
-
};
|
|
444
|
-
}
|
|
445
|
-
/**
|
|
446
|
-
* Parameter definitions for list_models
|
|
447
|
-
*/
|
|
448
|
-
_params_list_models() {
|
|
449
|
-
return {
|
|
450
|
-
type: 'object',
|
|
451
|
-
properties: {},
|
|
452
|
-
};
|
|
453
|
-
}
|
|
454
|
-
/**
|
|
455
|
-
* Parameter definitions for model_info
|
|
456
|
-
*/
|
|
457
|
-
_params_model_info() {
|
|
458
|
-
return {
|
|
459
|
-
type: 'object',
|
|
460
|
-
properties: {
|
|
461
|
-
model: {
|
|
462
|
-
type: 'string',
|
|
463
|
-
description: 'The model to get information about',
|
|
464
|
-
default: this.defaultModel,
|
|
465
|
-
},
|
|
466
|
-
},
|
|
467
|
-
};
|
|
468
|
-
}
|
|
469
|
-
/**
|
|
470
|
-
* Parameter definitions for status
|
|
471
|
-
*/
|
|
472
|
-
_params_status() {
|
|
473
|
-
return {
|
|
474
|
-
type: 'object',
|
|
475
|
-
properties: {},
|
|
476
|
-
};
|
|
477
|
-
}
|
|
478
269
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@olane/o-intelligence",
|
|
3
|
-
"version": "0.7.
|
|
3
|
+
"version": "0.7.3",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"main": "dist/src/index.js",
|
|
6
6
|
"types": "dist/src/index.d.ts",
|
|
@@ -33,7 +33,7 @@
|
|
|
33
33
|
"url": "git+https://github.com/olane-labs/olane.git"
|
|
34
34
|
},
|
|
35
35
|
"author": "oLane Inc.",
|
|
36
|
-
"license": "
|
|
36
|
+
"license": "(MIT OR Apache-2.0)",
|
|
37
37
|
"description": "oLane intelligence tool",
|
|
38
38
|
"devDependencies": {
|
|
39
39
|
"@eslint/eslintrc": "^3.3.1",
|
|
@@ -56,11 +56,11 @@
|
|
|
56
56
|
"typescript": "5.4.5"
|
|
57
57
|
},
|
|
58
58
|
"peerDependencies": {
|
|
59
|
-
"@olane/o-config": "^0.7.
|
|
60
|
-
"@olane/o-core": "^0.7.
|
|
61
|
-
"@olane/o-
|
|
62
|
-
"@olane/o-
|
|
63
|
-
"@olane/o-
|
|
59
|
+
"@olane/o-config": "^0.7.2",
|
|
60
|
+
"@olane/o-core": "^0.7.2",
|
|
61
|
+
"@olane/o-lane": "^0.7.2",
|
|
62
|
+
"@olane/o-protocol": "^0.7.2",
|
|
63
|
+
"@olane/o-tool": "^0.7.2"
|
|
64
64
|
},
|
|
65
65
|
"dependencies": {
|
|
66
66
|
"debug": "^4.4.1",
|