@majkapp/plugin-kit 3.5.4 → 3.5.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +67 -0
- package/bin/promptable-cli.js +6 -0
- package/dist/generator/cli.js +0 -0
- package/dist/index.d.ts +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/majk-interface-types.d.ts +380 -0
- package/dist/majk-interface-types.d.ts.map +1 -1
- package/dist/majk-interface-types.js +15 -0
- package/dist/transports.d.ts +59 -0
- package/dist/transports.d.ts.map +1 -0
- package/dist/transports.js +171 -0
- package/docs/AI.md +830 -0
- package/docs/FULL.md +148 -6
- package/docs/INDEX.md +1 -0
- package/package.json +1 -1
package/docs/AI.md
ADDED
|
@@ -0,0 +1,830 @@
|
|
|
1
|
+
# AI API - Complete Guide
|
|
2
|
+
|
|
3
|
+
Build AI-powered plugins with MAJK's unified AI interface. Access multiple AI providers (OpenAI, Anthropic, Bedrock, etc.) with a consistent, provider-agnostic API.
|
|
4
|
+
|
|
5
|
+
## Table of Contents
|
|
6
|
+
|
|
7
|
+
- [Quick Start](#quick-start)
|
|
8
|
+
- [Core Concepts](#core-concepts)
|
|
9
|
+
- [Provider Management](#provider-management)
|
|
10
|
+
- [LLM Operations](#llm-operations)
|
|
11
|
+
- [Streaming Responses](#streaming-responses)
|
|
12
|
+
- [Advanced Features](#advanced-features)
|
|
13
|
+
- [Error Handling](#error-handling)
|
|
14
|
+
- [Best Practices](#best-practices)
|
|
15
|
+
- [Complete Examples](#complete-examples)
|
|
16
|
+
|
|
17
|
+
## Quick Start
|
|
18
|
+
|
|
19
|
+
### Basic LLM Usage
|
|
20
|
+
|
|
21
|
+
```typescript
|
|
22
|
+
import { definePlugin, PluginContext } from '@majkapp/plugin-kit';
|
|
23
|
+
|
|
24
|
+
export default definePlugin('ai-plugin', 'AI Plugin', '1.0.0')
|
|
25
|
+
.pluginRoot(__dirname)
|
|
26
|
+
|
|
27
|
+
.onReady(async (ctx: PluginContext) => {
|
|
28
|
+
// Get the default LLM
|
|
29
|
+
const llm = ctx.majk.ai.getDefaultLLM();
|
|
30
|
+
|
|
31
|
+
// Send a prompt
|
|
32
|
+
const result = await llm.prompt({
|
|
33
|
+
messages: [
|
|
34
|
+
{ role: 'system', content: 'You are a helpful coding assistant' },
|
|
35
|
+
{ role: 'user', content: 'Explain what a promise is in JavaScript' }
|
|
36
|
+
]
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
ctx.logger.info(`Response: ${result.content}`);
|
|
40
|
+
ctx.logger.info(`Tokens used: ${result.usage?.totalTokens}`);
|
|
41
|
+
})
|
|
42
|
+
|
|
43
|
+
.build();
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
### Provider-Specific Usage
|
|
47
|
+
|
|
48
|
+
```typescript
|
|
49
|
+
.onReady(async (ctx) => {
|
|
50
|
+
// Use a specific provider
|
|
51
|
+
const bedrock = ctx.majk.ai.getProvider('bedrock');
|
|
52
|
+
|
|
53
|
+
if (bedrock) {
|
|
54
|
+
const claude = bedrock.getLLM('anthropic.claude-3-5-sonnet-20241022-v2:0');
|
|
55
|
+
|
|
56
|
+
const result = await claude.prompt({
|
|
57
|
+
messages: [{ role: 'user', content: 'Hello!' }]
|
|
58
|
+
});
|
|
59
|
+
}
|
|
60
|
+
})
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
## Core Concepts
|
|
64
|
+
|
|
65
|
+
### AI Providers
|
|
66
|
+
|
|
67
|
+
Providers are plugins that offer AI capabilities. Common providers:
|
|
68
|
+
- **bedrock**: AWS Bedrock (Claude, etc.)
|
|
69
|
+
- **openai**: OpenAI GPT models
|
|
70
|
+
- **anthropic**: Anthropic Claude direct
|
|
71
|
+
- **local**: Local LLM servers
|
|
72
|
+
|
|
73
|
+
Each provider has:
|
|
74
|
+
- Unique ID and name
|
|
75
|
+
- Declared capabilities
|
|
76
|
+
- One or more available models
|
|
77
|
+
|
|
78
|
+
### LLM Interface
|
|
79
|
+
|
|
80
|
+
An LLM represents a specific model from a provider. It offers:
|
|
81
|
+
- `prompt()` - Standard prompting
|
|
82
|
+
- `promptStream()` - Streaming responses
|
|
83
|
+
- `promptForJson()` - Structured JSON output
|
|
84
|
+
- `functionCall()` - Function calling support
|
|
85
|
+
|
|
86
|
+
### Capabilities
|
|
87
|
+
|
|
88
|
+
Providers declare what they support:
|
|
89
|
+
- `llm` - Core language model (always true)
|
|
90
|
+
- `streaming` - Real-time response streaming
|
|
91
|
+
- `functionCalling` - Function/tool invocation
|
|
92
|
+
- `structuredOutput` - JSON schema enforcement
|
|
93
|
+
- `imageGeneration` - DALL-E, Stable Diffusion, etc.
|
|
94
|
+
- `embeddings` - Text embeddings for search
|
|
95
|
+
- `audioTranscription` - Speech-to-text
|
|
96
|
+
|
|
97
|
+
## Provider Management
|
|
98
|
+
|
|
99
|
+
### List Available Providers
|
|
100
|
+
|
|
101
|
+
```typescript
|
|
102
|
+
const providers = ctx.majk.ai.listProviders();
|
|
103
|
+
|
|
104
|
+
for (const provider of providers) {
|
|
105
|
+
ctx.logger.info(`Provider: ${provider.name} (${provider.id})`);
|
|
106
|
+
ctx.logger.info(` Capabilities:`, provider.capabilities);
|
|
107
|
+
|
|
108
|
+
const models = await provider.listModels();
|
|
109
|
+
ctx.logger.info(` Models: ${models.map(m => m.id).join(', ')}`);
|
|
110
|
+
}
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
### Get Specific Provider
|
|
114
|
+
|
|
115
|
+
```typescript
|
|
116
|
+
const openai = ctx.majk.ai.getProvider('openai');
|
|
117
|
+
|
|
118
|
+
if (!openai) {
|
|
119
|
+
ctx.logger.warn('OpenAI provider not available');
|
|
120
|
+
return;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// Use OpenAI
|
|
124
|
+
const gpt4 = openai.getLLM('gpt-4');
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
### Default Provider
|
|
128
|
+
|
|
129
|
+
```typescript
|
|
130
|
+
// Get current default
|
|
131
|
+
const defaultProvider = ctx.majk.ai.getDefaultProvider();
|
|
132
|
+
ctx.logger.info(`Default: ${defaultProvider.name}`);
|
|
133
|
+
|
|
134
|
+
// Set new default
|
|
135
|
+
await ctx.majk.ai.setDefaultProvider('bedrock');
|
|
136
|
+
|
|
137
|
+
// Convenience: Use default provider + default model
|
|
138
|
+
const llm = ctx.majk.ai.getDefaultLLM();
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
### Query by Capability
|
|
142
|
+
|
|
143
|
+
```typescript
|
|
144
|
+
// Find all providers that can generate images
|
|
145
|
+
const imageProviders = ctx.majk.ai.getProvidersWithCapability('imageGeneration');
|
|
146
|
+
|
|
147
|
+
if (imageProviders.length > 0) {
|
|
148
|
+
ctx.logger.info(`Found ${imageProviders.length} image generation provider(s)`);
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// Find streaming-capable providers
|
|
152
|
+
const streamingProviders = ctx.majk.ai.getProvidersWithCapability('streaming');
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
### Check Provider Status
|
|
156
|
+
|
|
157
|
+
```typescript
|
|
158
|
+
const status = await ctx.majk.ai.getProviderStatus('bedrock');
|
|
159
|
+
|
|
160
|
+
if (!status.available) {
|
|
161
|
+
ctx.logger.error('Bedrock not available:', status.lastError);
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
if (!status.authenticated) {
|
|
165
|
+
ctx.logger.error('Bedrock not authenticated');
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
if (status.rateLimitRemaining !== undefined) {
|
|
169
|
+
ctx.logger.info(`Rate limit remaining: ${status.rateLimitRemaining}`);
|
|
170
|
+
}
|
|
171
|
+
```
|
|
172
|
+
|
|
173
|
+
## LLM Operations
|
|
174
|
+
|
|
175
|
+
### Basic Prompting
|
|
176
|
+
|
|
177
|
+
```typescript
|
|
178
|
+
const llm = ctx.majk.ai.getDefaultLLM();
|
|
179
|
+
|
|
180
|
+
const result = await llm.prompt({
|
|
181
|
+
messages: [
|
|
182
|
+
{ role: 'system', content: 'You are a helpful assistant' },
|
|
183
|
+
{ role: 'user', content: 'What is TypeScript?' }
|
|
184
|
+
],
|
|
185
|
+
temperature: 0.7, // Creativity (0-1)
|
|
186
|
+
maxTokens: 500, // Max response length
|
|
187
|
+
stopSequences: ['###'] // Stop generation at these strings
|
|
188
|
+
});
|
|
189
|
+
|
|
190
|
+
// Access response
|
|
191
|
+
console.log(result.content);
|
|
192
|
+
console.log(`Tokens: ${result.usage?.totalTokens}`);
|
|
193
|
+
console.log(`Stop reason: ${result.stopReason}`);
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
### Multi-turn Conversations
|
|
197
|
+
|
|
198
|
+
```typescript
|
|
199
|
+
const messages: AIMessage[] = [
|
|
200
|
+
{ role: 'system', content: 'You are a coding tutor' }
|
|
201
|
+
];
|
|
202
|
+
|
|
203
|
+
// First exchange
|
|
204
|
+
messages.push({ role: 'user', content: 'What is a closure?' });
|
|
205
|
+
|
|
206
|
+
const response1 = await llm.prompt({ messages });
|
|
207
|
+
messages.push({ role: 'assistant', content: response1.content });
|
|
208
|
+
|
|
209
|
+
// Second exchange
|
|
210
|
+
messages.push({ role: 'user', content: 'Can you show an example?' });
|
|
211
|
+
|
|
212
|
+
const response2 = await llm.prompt({ messages });
|
|
213
|
+
messages.push({ role: 'assistant', content: response2.content });
|
|
214
|
+
```
|
|
215
|
+
|
|
216
|
+
### Structured JSON Output
|
|
217
|
+
|
|
218
|
+
```typescript
|
|
219
|
+
// Define a schema
|
|
220
|
+
const schema = {
|
|
221
|
+
type: 'object',
|
|
222
|
+
properties: {
|
|
223
|
+
sentiment: {
|
|
224
|
+
type: 'string',
|
|
225
|
+
enum: ['positive', 'negative', 'neutral']
|
|
226
|
+
},
|
|
227
|
+
confidence: {
|
|
228
|
+
type: 'number',
|
|
229
|
+
minimum: 0,
|
|
230
|
+
maximum: 1
|
|
231
|
+
},
|
|
232
|
+
summary: {
|
|
233
|
+
type: 'string'
|
|
234
|
+
}
|
|
235
|
+
},
|
|
236
|
+
required: ['sentiment', 'confidence']
|
|
237
|
+
};
|
|
238
|
+
|
|
239
|
+
// Get structured response
|
|
240
|
+
interface SentimentResult {
|
|
241
|
+
sentiment: 'positive' | 'negative' | 'neutral';
|
|
242
|
+
confidence: number;
|
|
243
|
+
summary?: string;
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
const result = await llm.promptForJson<SentimentResult>({
|
|
247
|
+
messages: [
|
|
248
|
+
{
|
|
249
|
+
role: 'user',
|
|
250
|
+
content: 'Analyze the sentiment of: "This product is amazing!"'
|
|
251
|
+
}
|
|
252
|
+
],
|
|
253
|
+
schema
|
|
254
|
+
});
|
|
255
|
+
|
|
256
|
+
console.log(result.sentiment); // Type-safe access
|
|
257
|
+
console.log(result.confidence);
|
|
258
|
+
```
|
|
259
|
+
|
|
260
|
+
## Streaming Responses
|
|
261
|
+
|
|
262
|
+
Stream responses in real-time:
|
|
263
|
+
|
|
264
|
+
```typescript
|
|
265
|
+
const stream = llm.promptStream({
|
|
266
|
+
messages: [
|
|
267
|
+
{ role: 'user', content: 'Write a short story about a robot' }
|
|
268
|
+
]
|
|
269
|
+
});
|
|
270
|
+
|
|
271
|
+
let fullResponse = '';
|
|
272
|
+
|
|
273
|
+
for await (const chunk of stream) {
|
|
274
|
+
switch (chunk.type) {
|
|
275
|
+
case 'message_start':
|
|
276
|
+
ctx.logger.info('Stream started');
|
|
277
|
+
break;
|
|
278
|
+
|
|
279
|
+
case 'content_delta':
|
|
280
|
+
// Incremental content
|
|
281
|
+
process.stdout.write(chunk.content);
|
|
282
|
+
fullResponse += chunk.content;
|
|
283
|
+
break;
|
|
284
|
+
|
|
285
|
+
case 'content_end':
|
|
286
|
+
ctx.logger.info('\nContent complete');
|
|
287
|
+
break;
|
|
288
|
+
|
|
289
|
+
case 'message_end':
|
|
290
|
+
// Final message with metadata
|
|
291
|
+
ctx.logger.info(`Tokens: ${chunk.message?.usage?.totalTokens}`);
|
|
292
|
+
break;
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
ctx.logger.info(`Full response: ${fullResponse}`);
|
|
297
|
+
```
|
|
298
|
+
|
|
299
|
+
### Streaming with Progress Tracking
|
|
300
|
+
|
|
301
|
+
```typescript
|
|
302
|
+
let tokenCount = 0;
|
|
303
|
+
let chunkCount = 0;
|
|
304
|
+
|
|
305
|
+
for await (const chunk of stream) {
|
|
306
|
+
if (chunk.type === 'content_delta') {
|
|
307
|
+
chunkCount++;
|
|
308
|
+
tokenCount += chunk.content?.split(' ').length || 0;
|
|
309
|
+
|
|
310
|
+
// Update UI or progress indicator
|
|
311
|
+
ctx.logger.debug(`Progress: ${chunkCount} chunks, ~${tokenCount} tokens`);
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
```
|
|
315
|
+
|
|
316
|
+
## Advanced Features
|
|
317
|
+
|
|
318
|
+
### Function Calling
|
|
319
|
+
|
|
320
|
+
Let the LLM invoke functions:
|
|
321
|
+
|
|
322
|
+
```typescript
|
|
323
|
+
// Define available functions
|
|
324
|
+
const functions: AIFunctionDefinition[] = [
|
|
325
|
+
{
|
|
326
|
+
name: 'getWeather',
|
|
327
|
+
description: 'Get current weather for a location',
|
|
328
|
+
parameters: {
|
|
329
|
+
type: 'object',
|
|
330
|
+
properties: {
|
|
331
|
+
location: { type: 'string', description: 'City name' },
|
|
332
|
+
unit: { type: 'string', enum: ['celsius', 'fahrenheit'] }
|
|
333
|
+
},
|
|
334
|
+
required: ['location']
|
|
335
|
+
}
|
|
336
|
+
},
|
|
337
|
+
{
|
|
338
|
+
name: 'searchDatabase',
|
|
339
|
+
description: 'Search the database for records',
|
|
340
|
+
parameters: {
|
|
341
|
+
type: 'object',
|
|
342
|
+
properties: {
|
|
343
|
+
query: { type: 'string' },
|
|
344
|
+
limit: { type: 'number', default: 10 }
|
|
345
|
+
},
|
|
346
|
+
required: ['query']
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
];
|
|
350
|
+
|
|
351
|
+
// Make request with functions
|
|
352
|
+
const result = await llm.functionCall({
|
|
353
|
+
messages: [
|
|
354
|
+
{ role: 'user', content: 'What is the weather in Paris?' }
|
|
355
|
+
],
|
|
356
|
+
functions,
|
|
357
|
+
functionCall: 'auto' // Let model decide
|
|
358
|
+
});
|
|
359
|
+
|
|
360
|
+
// Check if model wants to call a function
|
|
361
|
+
if (result.functionCall) {
|
|
362
|
+
const { name, arguments: args } = result.functionCall;
|
|
363
|
+
|
|
364
|
+
ctx.logger.info(`Model wants to call: ${name}`);
|
|
365
|
+
ctx.logger.info(`With arguments:`, args);
|
|
366
|
+
|
|
367
|
+
// Execute the function
|
|
368
|
+
let functionResult;
|
|
369
|
+
|
|
370
|
+
if (name === 'getWeather') {
|
|
371
|
+
functionResult = await getWeather(args.location, args.unit);
|
|
372
|
+
} else if (name === 'searchDatabase') {
|
|
373
|
+
functionResult = await searchDatabase(args.query, args.limit);
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
// Send result back to model
|
|
377
|
+
const finalResult = await llm.prompt({
|
|
378
|
+
messages: [
|
|
379
|
+
...result.messages,
|
|
380
|
+
{
|
|
381
|
+
role: 'assistant',
|
|
382
|
+
content: JSON.stringify(result.functionCall)
|
|
383
|
+
},
|
|
384
|
+
{
|
|
385
|
+
role: 'user',
|
|
386
|
+
content: JSON.stringify(functionResult)
|
|
387
|
+
}
|
|
388
|
+
]
|
|
389
|
+
});
|
|
390
|
+
|
|
391
|
+
ctx.logger.info(`Final response: ${finalResult.content}`);
|
|
392
|
+
}
|
|
393
|
+
```
|
|
394
|
+
|
|
395
|
+
### Image Generation
|
|
396
|
+
|
|
397
|
+
```typescript
|
|
398
|
+
const imageProviders = ctx.majk.ai.getProvidersWithCapability('imageGeneration');
|
|
399
|
+
|
|
400
|
+
if (imageProviders.length === 0) {
|
|
401
|
+
ctx.logger.warn('No image generation providers available');
|
|
402
|
+
return;
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
const provider = imageProviders[0];
|
|
406
|
+
|
|
407
|
+
const imageResult = await provider.generateImage?.({
|
|
408
|
+
prompt: 'A serene mountain landscape at sunset',
|
|
409
|
+
negativePrompt: 'people, buildings, cars',
|
|
410
|
+
size: '1024x1024',
|
|
411
|
+
n: 2, // Generate 2 images
|
|
412
|
+
quality: 'hd'
|
|
413
|
+
});
|
|
414
|
+
|
|
415
|
+
if (imageResult) {
|
|
416
|
+
ctx.logger.info(`Generated ${imageResult.images.length} images`);
|
|
417
|
+
|
|
418
|
+
for (const imageUrl of imageResult.images) {
|
|
419
|
+
ctx.logger.info(`Image URL: ${imageUrl}`);
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
if (imageResult.revisedPrompt) {
|
|
423
|
+
ctx.logger.info(`Revised prompt: ${imageResult.revisedPrompt}`);
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
```
|
|
427
|
+
|
|
428
|
+
### Text Embeddings
|
|
429
|
+
|
|
430
|
+
```typescript
|
|
431
|
+
const embeddingProviders = ctx.majk.ai.getProvidersWithCapability('embeddings');
|
|
432
|
+
|
|
433
|
+
if (embeddingProviders.length > 0) {
|
|
434
|
+
const provider = embeddingProviders[0];
|
|
435
|
+
|
|
436
|
+
const embedding = await provider.generateEmbedding?.(
|
|
437
|
+
'What is the meaning of life?'
|
|
438
|
+
);
|
|
439
|
+
|
|
440
|
+
if (embedding) {
|
|
441
|
+
ctx.logger.info(`Embedding dimension: ${embedding.length}`);
|
|
442
|
+
ctx.logger.info(`First 5 values: ${embedding.slice(0, 5)}`);
|
|
443
|
+
|
|
444
|
+
// Use for semantic search, clustering, etc.
|
|
445
|
+
}
|
|
446
|
+
}
|
|
447
|
+
```
|
|
448
|
+
|
|
449
|
+
### Audio Transcription
|
|
450
|
+
|
|
451
|
+
```typescript
|
|
452
|
+
const transcriptionProviders = ctx.majk.ai.getProvidersWithCapability('audioTranscription');
|
|
453
|
+
|
|
454
|
+
if (transcriptionProviders.length > 0) {
|
|
455
|
+
const provider = transcriptionProviders[0];
|
|
456
|
+
|
|
457
|
+
const audioBuffer = await fs.readFile('recording.mp3');
|
|
458
|
+
|
|
459
|
+
const transcription = await provider.transcribeAudio?.({
|
|
460
|
+
audio: audioBuffer,
|
|
461
|
+
format: 'mp3',
|
|
462
|
+
language: 'en',
|
|
463
|
+
timestamps: true
|
|
464
|
+
});
|
|
465
|
+
|
|
466
|
+
if (transcription) {
|
|
467
|
+
ctx.logger.info(`Transcription: ${transcription.text}`);
|
|
468
|
+
ctx.logger.info(`Language: ${transcription.language}`);
|
|
469
|
+
ctx.logger.info(`Confidence: ${transcription.confidence}`);
|
|
470
|
+
|
|
471
|
+
if (transcription.timestamps) {
|
|
472
|
+
for (const segment of transcription.timestamps) {
|
|
473
|
+
ctx.logger.info(`${segment.start}s - ${segment.end}s: ${segment.word}`);
|
|
474
|
+
}
|
|
475
|
+
}
|
|
476
|
+
}
|
|
477
|
+
}
|
|
478
|
+
```
|
|
479
|
+
|
|
480
|
+
## Error Handling
|
|
481
|
+
|
|
482
|
+
### Try-Catch Pattern
|
|
483
|
+
|
|
484
|
+
```typescript
|
|
485
|
+
import { AIProviderError } from '@majkapp/plugin-kit';
|
|
486
|
+
|
|
487
|
+
try {
|
|
488
|
+
const llm = ctx.majk.ai.getDefaultLLM();
|
|
489
|
+
|
|
490
|
+
const result = await llm.prompt({
|
|
491
|
+
messages: [{ role: 'user', content: 'Hello' }]
|
|
492
|
+
});
|
|
493
|
+
|
|
494
|
+
} catch (error) {
|
|
495
|
+
if (error instanceof AIProviderError) {
|
|
496
|
+
ctx.logger.error(`AI Error [${error.code}]: ${error.message}`);
|
|
497
|
+
ctx.logger.error(`Provider: ${error.providerId}`);
|
|
498
|
+
|
|
499
|
+
if (error.code === 'RATE_LIMIT_EXCEEDED') {
|
|
500
|
+
ctx.logger.warn('Rate limit hit, waiting before retry...');
|
|
501
|
+
await new Promise(resolve => setTimeout(resolve, 60000));
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
if (error.code === 'AUTHENTICATION_FAILED') {
|
|
505
|
+
ctx.logger.error('Authentication failed - check credentials');
|
|
506
|
+
}
|
|
507
|
+
} else {
|
|
508
|
+
ctx.logger.error('Unexpected error:', error);
|
|
509
|
+
}
|
|
510
|
+
}
|
|
511
|
+
```
|
|
512
|
+
|
|
513
|
+
### Error Codes
|
|
514
|
+
|
|
515
|
+
Common `AIProviderError` codes:
|
|
516
|
+
- `PROVIDER_NOT_FOUND` - Provider doesn't exist
|
|
517
|
+
- `NO_PROVIDERS` - No providers registered
|
|
518
|
+
- `CAPABILITY_NOT_SUPPORTED` - Feature not available
|
|
519
|
+
- `MODEL_NOT_FOUND` - Model doesn't exist
|
|
520
|
+
- `AUTHENTICATION_FAILED` - Auth error
|
|
521
|
+
- `RATE_LIMIT_EXCEEDED` - Rate limit hit
|
|
522
|
+
- `REQUEST_FAILED` - General request failure
|
|
523
|
+
|
|
524
|
+
### Graceful Degradation
|
|
525
|
+
|
|
526
|
+
```typescript
|
|
527
|
+
async function generateWithFallback(prompt: string): Promise<string> {
|
|
528
|
+
// Try preferred provider first
|
|
529
|
+
try {
|
|
530
|
+
const bedrock = ctx.majk.ai.getProvider('bedrock');
|
|
531
|
+
if (bedrock) {
|
|
532
|
+
const result = await bedrock.getLLM().prompt({
|
|
533
|
+
messages: [{ role: 'user', content: prompt }]
|
|
534
|
+
});
|
|
535
|
+
return result.content;
|
|
536
|
+
}
|
|
537
|
+
} catch (error) {
|
|
538
|
+
ctx.logger.warn('Bedrock failed, trying fallback');
|
|
539
|
+
}
|
|
540
|
+
|
|
541
|
+
// Fallback to default
|
|
542
|
+
try {
|
|
543
|
+
const llm = ctx.majk.ai.getDefaultLLM();
|
|
544
|
+
const result = await llm.prompt({
|
|
545
|
+
messages: [{ role: 'user', content: prompt }]
|
|
546
|
+
});
|
|
547
|
+
return result.content;
|
|
548
|
+
} catch (error) {
|
|
549
|
+
throw new Error('All AI providers failed');
|
|
550
|
+
}
|
|
551
|
+
}
|
|
552
|
+
```
|
|
553
|
+
|
|
554
|
+
## Best Practices
|
|
555
|
+
|
|
556
|
+
### 1. Check Provider Availability
|
|
557
|
+
|
|
558
|
+
```typescript
|
|
559
|
+
// ❌ Don't assume providers exist
|
|
560
|
+
const llm = ctx.majk.ai.getProvider('openai')!.getLLM();
|
|
561
|
+
|
|
562
|
+
// ✅ Check availability
|
|
563
|
+
const provider = ctx.majk.ai.getProvider('openai');
|
|
564
|
+
if (!provider) {
|
|
565
|
+
ctx.logger.warn('OpenAI not available');
|
|
566
|
+
return;
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
const llm = provider.getLLM();
|
|
570
|
+
```
|
|
571
|
+
|
|
572
|
+
### 2. Use Appropriate Temperature
|
|
573
|
+
|
|
574
|
+
```typescript
|
|
575
|
+
// Low temperature (0.0-0.3) for deterministic tasks
|
|
576
|
+
const result = await llm.prompt({
|
|
577
|
+
messages: [{ role: 'user', content: 'Calculate 2+2' }],
|
|
578
|
+
temperature: 0.1
|
|
579
|
+
});
|
|
580
|
+
|
|
581
|
+
// High temperature (0.7-1.0) for creative tasks
|
|
582
|
+
const story = await llm.prompt({
|
|
583
|
+
messages: [{ role: 'user', content: 'Write a creative story' }],
|
|
584
|
+
temperature: 0.9
|
|
585
|
+
});
|
|
586
|
+
```
|
|
587
|
+
|
|
588
|
+
### 3. Limit Token Usage
|
|
589
|
+
|
|
590
|
+
```typescript
|
|
591
|
+
const result = await llm.prompt({
|
|
592
|
+
messages: [{ role: 'user', content: 'Explain quantum computing' }],
|
|
593
|
+
maxTokens: 200 // Limit response length
|
|
594
|
+
});
|
|
595
|
+
|
|
596
|
+
// Monitor usage
|
|
597
|
+
if (result.usage) {
|
|
598
|
+
const { inputTokens, outputTokens, totalTokens } = result.usage;
|
|
599
|
+
ctx.logger.info(`Used ${totalTokens} tokens (${inputTokens} in, ${outputTokens} out)`);
|
|
600
|
+
}
|
|
601
|
+
```
|
|
602
|
+
|
|
603
|
+
### 4. Cache Responses
|
|
604
|
+
|
|
605
|
+
```typescript
|
|
606
|
+
// Use plugin storage for caching
|
|
607
|
+
const cacheKey = `ai-response:${prompt}`;
|
|
608
|
+
let response = await ctx.storage.get<string>(cacheKey);
|
|
609
|
+
|
|
610
|
+
if (!response) {
|
|
611
|
+
const result = await llm.prompt({
|
|
612
|
+
messages: [{ role: 'user', content: prompt }]
|
|
613
|
+
});
|
|
614
|
+
response = result.content;
|
|
615
|
+
|
|
616
|
+
// Cache for 1 hour
|
|
617
|
+
await ctx.storage.set(cacheKey, response);
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
return response;
|
|
621
|
+
```
|
|
622
|
+
|
|
623
|
+
### 5. Handle Rate Limits
|
|
624
|
+
|
|
625
|
+
```typescript
|
|
626
|
+
async function promptWithRetry(llm: LLMInterface, params: PromptParams, maxRetries = 3) {
|
|
627
|
+
for (let i = 0; i < maxRetries; i++) {
|
|
628
|
+
try {
|
|
629
|
+
return await llm.prompt(params);
|
|
630
|
+
} catch (error) {
|
|
631
|
+
if (error instanceof AIProviderError && error.code === 'RATE_LIMIT_EXCEEDED') {
|
|
632
|
+
const delay = Math.pow(2, i) * 1000; // Exponential backoff
|
|
633
|
+
ctx.logger.warn(`Rate limited, retrying in ${delay}ms...`);
|
|
634
|
+
await new Promise(resolve => setTimeout(resolve, delay));
|
|
635
|
+
} else {
|
|
636
|
+
throw error;
|
|
637
|
+
}
|
|
638
|
+
}
|
|
639
|
+
}
|
|
640
|
+
throw new Error('Max retries exceeded');
|
|
641
|
+
}
|
|
642
|
+
```
|
|
643
|
+
|
|
644
|
+
## Complete Examples
|
|
645
|
+
|
|
646
|
+
### AI-Powered Tool
|
|
647
|
+
|
|
648
|
+
```typescript
|
|
649
|
+
import { definePlugin, ToolHandler } from '@majkapp/plugin-kit';
|
|
650
|
+
|
|
651
|
+
const analyzeSentiment: ToolHandler = async (input, ctx) => {
|
|
652
|
+
const { text } = input;
|
|
653
|
+
|
|
654
|
+
try {
|
|
655
|
+
const llm = ctx.majk.ai.getDefaultLLM();
|
|
656
|
+
|
|
657
|
+
const result = await llm.promptForJson({
|
|
658
|
+
messages: [
|
|
659
|
+
{
|
|
660
|
+
role: 'system',
|
|
661
|
+
content: 'You are a sentiment analysis expert. Respond with JSON only.'
|
|
662
|
+
},
|
|
663
|
+
{
|
|
664
|
+
role: 'user',
|
|
665
|
+
content: `Analyze the sentiment of: "${text}"`
|
|
666
|
+
}
|
|
667
|
+
],
|
|
668
|
+
schema: {
|
|
669
|
+
type: 'object',
|
|
670
|
+
properties: {
|
|
671
|
+
sentiment: { type: 'string', enum: ['positive', 'negative', 'neutral'] },
|
|
672
|
+
confidence: { type: 'number' },
|
|
673
|
+
keywords: { type: 'array', items: { type: 'string' } }
|
|
674
|
+
},
|
|
675
|
+
required: ['sentiment', 'confidence']
|
|
676
|
+
}
|
|
677
|
+
});
|
|
678
|
+
|
|
679
|
+
return {
|
|
680
|
+
success: true,
|
|
681
|
+
analysis: result
|
|
682
|
+
};
|
|
683
|
+
|
|
684
|
+
} catch (error) {
|
|
685
|
+
ctx.logger.error('Sentiment analysis failed:', error);
|
|
686
|
+
return {
|
|
687
|
+
success: false,
|
|
688
|
+
error: error.message
|
|
689
|
+
};
|
|
690
|
+
}
|
|
691
|
+
};
|
|
692
|
+
|
|
693
|
+
export default definePlugin('sentiment-analyzer', 'Sentiment Analyzer', '1.0.0')
|
|
694
|
+
.pluginRoot(__dirname)
|
|
695
|
+
|
|
696
|
+
.tool('global', {
|
|
697
|
+
name: 'analyzeSentiment',
|
|
698
|
+
description: 'Analyzes text sentiment using AI. Returns sentiment classification and confidence score.',
|
|
699
|
+
inputSchema: {
|
|
700
|
+
type: 'object',
|
|
701
|
+
properties: {
|
|
702
|
+
text: { type: 'string' }
|
|
703
|
+
},
|
|
704
|
+
required: ['text']
|
|
705
|
+
}
|
|
706
|
+
}, analyzeSentiment)
|
|
707
|
+
|
|
708
|
+
.build();
|
|
709
|
+
```
|
|
710
|
+
|
|
711
|
+
### Multi-Provider Service
|
|
712
|
+
|
|
713
|
+
```typescript
|
|
714
|
+
import { definePlugin } from '@majkapp/plugin-kit';
|
|
715
|
+
|
|
716
|
+
export default definePlugin('ai-service', 'AI Service', '1.0.0')
|
|
717
|
+
.pluginRoot(__dirname)
|
|
718
|
+
|
|
719
|
+
.apiRoute({
|
|
720
|
+
method: 'POST',
|
|
721
|
+
path: '/api/generate',
|
|
722
|
+
name: 'Generate Content',
|
|
723
|
+
description: 'Generates AI content using the best available provider. Handles provider selection and fallbacks.',
|
|
724
|
+
handler: async (req, res, { majk, logger }) => {
|
|
725
|
+
const { prompt, provider: preferredProvider, model } = req.body;
|
|
726
|
+
|
|
727
|
+
if (!prompt) {
|
|
728
|
+
return res.status(400).json({ error: 'Prompt required' });
|
|
729
|
+
}
|
|
730
|
+
|
|
731
|
+
try {
|
|
732
|
+
let llm;
|
|
733
|
+
|
|
734
|
+
// Try preferred provider if specified
|
|
735
|
+
if (preferredProvider) {
|
|
736
|
+
const provider = majk.ai.getProvider(preferredProvider);
|
|
737
|
+
if (provider) {
|
|
738
|
+
llm = model ? provider.getLLM(model) : provider.getLLM();
|
|
739
|
+
}
|
|
740
|
+
}
|
|
741
|
+
|
|
742
|
+
// Fallback to default
|
|
743
|
+
if (!llm) {
|
|
744
|
+
llm = majk.ai.getDefaultLLM();
|
|
745
|
+
}
|
|
746
|
+
|
|
747
|
+
const result = await llm.prompt({
|
|
748
|
+
messages: [{ role: 'user', content: prompt }]
|
|
749
|
+
});
|
|
750
|
+
|
|
751
|
+
return res.json({
|
|
752
|
+
content: result.content,
|
|
753
|
+
provider: llm.provider,
|
|
754
|
+
model: llm.model,
|
|
755
|
+
usage: result.usage
|
|
756
|
+
});
|
|
757
|
+
|
|
758
|
+
} catch (error) {
|
|
759
|
+
logger.error('Generation failed:', error);
|
|
760
|
+
return res.status(500).json({
|
|
761
|
+
error: error.message,
|
|
762
|
+
code: error.code
|
|
763
|
+
});
|
|
764
|
+
}
|
|
765
|
+
}
|
|
766
|
+
})
|
|
767
|
+
|
|
768
|
+
.build();
|
|
769
|
+
```
|
|
770
|
+
|
|
771
|
+
### Streaming Chat API
|
|
772
|
+
|
|
773
|
+
```typescript
|
|
774
|
+
export default definePlugin('chat-api', 'Chat API', '1.0.0')
|
|
775
|
+
.pluginRoot(__dirname)
|
|
776
|
+
|
|
777
|
+
.apiRoute({
|
|
778
|
+
method: 'POST',
|
|
779
|
+
path: '/api/chat/stream',
|
|
780
|
+
name: 'Stream Chat',
|
|
781
|
+
description: 'Streams chat responses in real-time. Returns server-sent events for progressive display.',
|
|
782
|
+
handler: async (req, res, { majk, logger }) => {
|
|
783
|
+
const { messages } = req.body;
|
|
784
|
+
|
|
785
|
+
// Set up SSE headers
|
|
786
|
+
res.setHeader('Content-Type', 'text/event-stream');
|
|
787
|
+
res.setHeader('Cache-Control', 'no-cache');
|
|
788
|
+
res.setHeader('Connection', 'keep-alive');
|
|
789
|
+
|
|
790
|
+
try {
|
|
791
|
+
const llm = majk.ai.getDefaultLLM();
|
|
792
|
+
const stream = llm.promptStream({ messages });
|
|
793
|
+
|
|
794
|
+
for await (const chunk of stream) {
|
|
795
|
+
if (chunk.type === 'content_delta') {
|
|
796
|
+
// Send SSE event
|
|
797
|
+
res.send(`data: ${JSON.stringify({ content: chunk.content })}\n\n`);
|
|
798
|
+
} else if (chunk.type === 'message_end') {
|
|
799
|
+
res.send(`data: ${JSON.stringify({ done: true, usage: chunk.message?.usage })}\n\n`);
|
|
800
|
+
}
|
|
801
|
+
}
|
|
802
|
+
|
|
803
|
+
res.send('data: [DONE]\n\n');
|
|
804
|
+
|
|
805
|
+
} catch (error) {
|
|
806
|
+
logger.error('Stream failed:', error);
|
|
807
|
+
res.send(`data: ${JSON.stringify({ error: error.message })}\n\n`);
|
|
808
|
+
}
|
|
809
|
+
}
|
|
810
|
+
})
|
|
811
|
+
|
|
812
|
+
.build();
|
|
813
|
+
```
|
|
814
|
+
|
|
815
|
+
---
|
|
816
|
+
|
|
817
|
+
## Summary
|
|
818
|
+
|
|
819
|
+
The MAJK AI API provides:
|
|
820
|
+
|
|
821
|
+
✅ **Provider-agnostic** - Works with any AI provider
|
|
822
|
+
✅ **Type-safe** - Full TypeScript support
|
|
823
|
+
✅ **Streaming** - Real-time responses
|
|
824
|
+
✅ **Function calling** - Let LLMs invoke functions
|
|
825
|
+
✅ **Structured output** - JSON schema enforcement
|
|
826
|
+
✅ **Multi-modal** - Images, audio, embeddings
|
|
827
|
+
✅ **Error handling** - Standardized error types
|
|
828
|
+
✅ **Capability discovery** - Query by features
|
|
829
|
+
|
|
830
|
+
Build AI-powered plugins with confidence!
|