@push.rocks/smartai 0.5.4 → 0.5.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +8 -8
- package/readme.md +274 -104
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@push.rocks/smartai",
|
|
3
|
-
"version": "0.5.
|
|
3
|
+
"version": "0.5.5",
|
|
4
4
|
"private": false,
|
|
5
5
|
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
|
6
6
|
"main": "dist_ts/index.js",
|
|
@@ -9,24 +9,24 @@
|
|
|
9
9
|
"author": "Task Venture Capital GmbH",
|
|
10
10
|
"license": "MIT",
|
|
11
11
|
"devDependencies": {
|
|
12
|
-
"@git.zone/tsbuild": "^2.
|
|
13
|
-
"@git.zone/tsbundle": "^2.
|
|
12
|
+
"@git.zone/tsbuild": "^2.6.4",
|
|
13
|
+
"@git.zone/tsbundle": "^2.5.1",
|
|
14
14
|
"@git.zone/tsrun": "^1.3.3",
|
|
15
|
-
"@git.zone/tstest": "^
|
|
15
|
+
"@git.zone/tstest": "^2.3.2",
|
|
16
16
|
"@push.rocks/qenv": "^6.1.0",
|
|
17
17
|
"@push.rocks/tapbundle": "^6.0.3",
|
|
18
18
|
"@types/node": "^22.15.17"
|
|
19
19
|
},
|
|
20
20
|
"dependencies": {
|
|
21
|
-
"@anthropic-ai/sdk": "^0.
|
|
21
|
+
"@anthropic-ai/sdk": "^0.57.0",
|
|
22
22
|
"@push.rocks/smartarray": "^1.1.0",
|
|
23
|
-
"@push.rocks/smartfile": "^11.2.
|
|
23
|
+
"@push.rocks/smartfile": "^11.2.5",
|
|
24
24
|
"@push.rocks/smartpath": "^5.0.18",
|
|
25
25
|
"@push.rocks/smartpdf": "^3.2.2",
|
|
26
26
|
"@push.rocks/smartpromise": "^4.2.3",
|
|
27
27
|
"@push.rocks/smartrequest": "^2.1.0",
|
|
28
28
|
"@push.rocks/webstream": "^1.0.10",
|
|
29
|
-
"openai": "^
|
|
29
|
+
"openai": "^5.10.2"
|
|
30
30
|
},
|
|
31
31
|
"repository": {
|
|
32
32
|
"type": "git",
|
|
@@ -76,7 +76,7 @@
|
|
|
76
76
|
"provider switching"
|
|
77
77
|
],
|
|
78
78
|
"scripts": {
|
|
79
|
-
"test": "(tstest test/ --web)",
|
|
79
|
+
"test": "(tstest test/ --web --verbose)",
|
|
80
80
|
"build": "(tsbuild --web --allowimplicitany)",
|
|
81
81
|
"buildDocs": "(tsdoc)"
|
|
82
82
|
}
|
package/readme.md
CHANGED
|
@@ -1,222 +1,392 @@
|
|
|
1
1
|
# @push.rocks/smartai
|
|
2
2
|
|
|
3
|
-
SmartAi is a TypeScript library
|
|
3
|
+
SmartAi is a powerful TypeScript library that provides a unified interface for integrating with multiple AI providers including OpenAI, Anthropic, Perplexity, Ollama, Groq, XAI, and Exo. It offers comprehensive support for chat interactions, streaming conversations, text-to-speech, document analysis, and vision processing.
|
|
4
4
|
|
|
5
5
|
## Install
|
|
6
6
|
|
|
7
|
-
To install SmartAi into your project,
|
|
7
|
+
To install SmartAi into your project, use pnpm:
|
|
8
8
|
|
|
9
9
|
```bash
|
|
10
|
-
|
|
10
|
+
pnpm install @push.rocks/smartai
|
|
11
11
|
```
|
|
12
12
|
|
|
13
|
-
This command will add the SmartAi library to your project's dependencies, making it available for use in your TypeScript application.
|
|
14
|
-
|
|
15
13
|
## Usage
|
|
16
14
|
|
|
17
|
-
SmartAi
|
|
15
|
+
SmartAi provides a clean, consistent API across all supported AI providers. This documentation covers all features with practical examples for each provider and capability.
|
|
18
16
|
|
|
19
17
|
### Initialization
|
|
20
18
|
|
|
21
|
-
|
|
19
|
+
First, initialize SmartAi with the API tokens and configuration for the providers you want to use:
|
|
22
20
|
|
|
23
21
|
```typescript
|
|
24
22
|
import { SmartAi } from '@push.rocks/smartai';
|
|
25
23
|
|
|
26
24
|
const smartAi = new SmartAi({
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
25
|
+
// OpenAI - for GPT models, DALL-E, and TTS
|
|
26
|
+
openaiToken: 'your-openai-api-key',
|
|
27
|
+
|
|
28
|
+
// Anthropic - for Claude models
|
|
29
|
+
anthropicToken: 'your-anthropic-api-key',
|
|
30
|
+
|
|
31
|
+
// Perplexity - for research-focused AI
|
|
32
|
+
perplexityToken: 'your-perplexity-api-key',
|
|
33
|
+
|
|
34
|
+
// Groq - for fast inference
|
|
35
|
+
groqToken: 'your-groq-api-key',
|
|
36
|
+
|
|
37
|
+
// XAI - for Grok models
|
|
38
|
+
xaiToken: 'your-xai-api-key',
|
|
39
|
+
|
|
40
|
+
// Ollama - for local models
|
|
32
41
|
ollama: {
|
|
33
42
|
baseUrl: 'http://localhost:11434',
|
|
34
|
-
model: 'llama2',
|
|
35
|
-
visionModel: 'llava'
|
|
43
|
+
model: 'llama2', // default model for chat
|
|
44
|
+
visionModel: 'llava' // default model for vision
|
|
36
45
|
},
|
|
46
|
+
|
|
47
|
+
// Exo - for distributed inference
|
|
37
48
|
exo: {
|
|
38
49
|
baseUrl: 'http://localhost:8080/v1',
|
|
39
|
-
apiKey: 'your-api-key'
|
|
50
|
+
apiKey: 'your-exo-api-key'
|
|
40
51
|
}
|
|
41
52
|
});
|
|
42
53
|
|
|
54
|
+
// Start the SmartAi instance
|
|
43
55
|
await smartAi.start();
|
|
44
56
|
```
|
|
45
57
|
|
|
46
|
-
|
|
58
|
+
## Supported Providers
|
|
59
|
+
|
|
60
|
+
SmartAi supports the following AI providers:
|
|
61
|
+
|
|
62
|
+
| Provider | Use Case | Key Features |
|
|
63
|
+
|----------|----------|--------------|
|
|
64
|
+
| **OpenAI** | General purpose, GPT models | Chat, streaming, TTS, vision, documents |
|
|
65
|
+
| **Anthropic** | Claude models, safety-focused | Chat, streaming, vision, documents |
|
|
66
|
+
| **Perplexity** | Research and factual queries | Chat, streaming, documents |
|
|
67
|
+
| **Groq** | Fast inference | Chat, streaming |
|
|
68
|
+
| **XAI** | Grok models | Chat, streaming |
|
|
69
|
+
| **Ollama** | Local models | Chat, streaming, vision |
|
|
70
|
+
| **Exo** | Distributed inference | Chat, streaming |
|
|
71
|
+
|
|
72
|
+
## Core Features
|
|
73
|
+
|
|
74
|
+
### 1. Chat Interactions
|
|
47
75
|
|
|
48
|
-
|
|
76
|
+
SmartAi provides both synchronous and streaming chat capabilities across all supported providers.
|
|
49
77
|
|
|
50
|
-
####
|
|
78
|
+
#### Synchronous Chat
|
|
51
79
|
|
|
52
|
-
|
|
80
|
+
Simple request-response interactions with any provider:
|
|
53
81
|
|
|
54
82
|
```typescript
|
|
55
|
-
|
|
83
|
+
// OpenAI Example
|
|
84
|
+
const openAiResponse = await smartAi.openaiProvider.chat({
|
|
56
85
|
systemMessage: 'You are a helpful assistant.',
|
|
57
86
|
userMessage: 'What is the capital of France?',
|
|
58
|
-
messageHistory: []
|
|
87
|
+
messageHistory: []
|
|
59
88
|
});
|
|
89
|
+
console.log(openAiResponse.message); // "The capital of France is Paris."
|
|
60
90
|
|
|
61
|
-
|
|
91
|
+
// Anthropic Example
|
|
92
|
+
const anthropicResponse = await smartAi.anthropicProvider.chat({
|
|
93
|
+
systemMessage: 'You are a knowledgeable historian.',
|
|
94
|
+
userMessage: 'Tell me about the French Revolution',
|
|
95
|
+
messageHistory: []
|
|
96
|
+
});
|
|
97
|
+
console.log(anthropicResponse.message);
|
|
98
|
+
|
|
99
|
+
// Using message history for context
|
|
100
|
+
const contextualResponse = await smartAi.openaiProvider.chat({
|
|
101
|
+
systemMessage: 'You are a math tutor.',
|
|
102
|
+
userMessage: 'What about multiplication?',
|
|
103
|
+
messageHistory: [
|
|
104
|
+
{ role: 'user', content: 'Can you teach me math?' },
|
|
105
|
+
{ role: 'assistant', content: 'Of course! What would you like to learn?' }
|
|
106
|
+
]
|
|
107
|
+
});
|
|
62
108
|
```
|
|
63
109
|
|
|
64
|
-
####
|
|
110
|
+
#### Streaming Chat
|
|
65
111
|
|
|
66
|
-
For
|
|
112
|
+
For real-time, token-by-token responses:
|
|
67
113
|
|
|
68
114
|
```typescript
|
|
69
|
-
|
|
70
|
-
const
|
|
71
|
-
|
|
72
|
-
// Establish a transform stream
|
|
73
|
-
const { writable, readable } = new TransformStream();
|
|
115
|
+
// Create a readable stream for input
|
|
116
|
+
const { readable, writable } = new TransformStream();
|
|
74
117
|
const writer = writable.getWriter();
|
|
75
118
|
|
|
76
|
-
|
|
119
|
+
// Send a message
|
|
120
|
+
const encoder = new TextEncoder();
|
|
121
|
+
await writer.write(encoder.encode(JSON.stringify({
|
|
77
122
|
role: 'user',
|
|
78
|
-
content: '
|
|
79
|
-
};
|
|
123
|
+
content: 'Write a haiku about programming'
|
|
124
|
+
})));
|
|
125
|
+
await writer.close();
|
|
80
126
|
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
const
|
|
85
|
-
const reader = stream.getReader();
|
|
127
|
+
// Get streaming response
|
|
128
|
+
const responseStream = await smartAi.openaiProvider.chatStream(readable);
|
|
129
|
+
const reader = responseStream.getReader();
|
|
130
|
+
const decoder = new TextDecoder();
|
|
86
131
|
|
|
132
|
+
// Read the stream
|
|
87
133
|
while (true) {
|
|
88
134
|
const { done, value } = await reader.read();
|
|
89
135
|
if (done) break;
|
|
90
|
-
|
|
136
|
+
process.stdout.write(value); // Print each chunk as it arrives
|
|
91
137
|
}
|
|
92
138
|
```
|
|
93
139
|
|
|
94
|
-
### Audio Generation
|
|
140
|
+
### 2. Text-to-Speech (Audio Generation)
|
|
95
141
|
|
|
96
|
-
|
|
142
|
+
Convert text to natural-sounding speech (currently supported by OpenAI):
|
|
97
143
|
|
|
98
144
|
```typescript
|
|
145
|
+
import * as fs from 'fs';
|
|
146
|
+
|
|
147
|
+
// Generate speech from text
|
|
99
148
|
const audioStream = await smartAi.openaiProvider.audio({
|
|
100
|
-
message: 'This is a test
|
|
149
|
+
message: 'Hello world! This is a test of the text-to-speech system.'
|
|
101
150
|
});
|
|
102
151
|
|
|
103
|
-
//
|
|
152
|
+
// Save to file
|
|
153
|
+
const writeStream = fs.createWriteStream('output.mp3');
|
|
154
|
+
audioStream.pipe(writeStream);
|
|
155
|
+
|
|
156
|
+
// Or use in your application directly
|
|
157
|
+
audioStream.on('data', (chunk) => {
|
|
158
|
+
// Process audio chunks
|
|
159
|
+
});
|
|
104
160
|
```
|
|
105
161
|
|
|
106
|
-
###
|
|
162
|
+
### 3. Vision Processing
|
|
107
163
|
|
|
108
|
-
|
|
164
|
+
Analyze images and get detailed descriptions:
|
|
109
165
|
|
|
110
166
|
```typescript
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
167
|
+
import * as fs from 'fs';
|
|
168
|
+
|
|
169
|
+
// Read an image file
|
|
170
|
+
const imageBuffer = fs.readFileSync('image.jpg');
|
|
171
|
+
|
|
172
|
+
// OpenAI Vision
|
|
173
|
+
const openAiVision = await smartAi.openaiProvider.vision({
|
|
174
|
+
image: imageBuffer,
|
|
175
|
+
prompt: 'What is in this image? Describe in detail.'
|
|
117
176
|
});
|
|
177
|
+
console.log('OpenAI:', openAiVision);
|
|
118
178
|
|
|
119
|
-
|
|
179
|
+
// Anthropic Vision
|
|
180
|
+
const anthropicVision = await smartAi.anthropicProvider.vision({
|
|
181
|
+
image: imageBuffer,
|
|
182
|
+
prompt: 'Analyze this image and identify any text or objects.'
|
|
183
|
+
});
|
|
184
|
+
console.log('Anthropic:', anthropicVision);
|
|
185
|
+
|
|
186
|
+
// Ollama Vision (using local model)
|
|
187
|
+
const ollamaVision = await smartAi.ollamaProvider.vision({
|
|
188
|
+
image: imageBuffer,
|
|
189
|
+
prompt: 'Describe the colors and composition of this image.'
|
|
190
|
+
});
|
|
191
|
+
console.log('Ollama:', ollamaVision);
|
|
120
192
|
```
|
|
121
193
|
|
|
122
|
-
|
|
194
|
+
### 4. Document Analysis
|
|
195
|
+
|
|
196
|
+
Process and analyze PDF documents with AI:
|
|
123
197
|
|
|
124
198
|
```typescript
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
199
|
+
import * as fs from 'fs';
|
|
200
|
+
|
|
201
|
+
// Read PDF documents
|
|
202
|
+
const pdfBuffer = fs.readFileSync('document.pdf');
|
|
203
|
+
|
|
204
|
+
// Analyze with OpenAI
|
|
205
|
+
const openAiAnalysis = await smartAi.openaiProvider.document({
|
|
206
|
+
systemMessage: 'You are a document analyst. Extract key information.',
|
|
207
|
+
userMessage: 'Summarize this document and list the main points.',
|
|
208
|
+
messageHistory: [],
|
|
209
|
+
pdfDocuments: [pdfBuffer]
|
|
210
|
+
});
|
|
211
|
+
console.log('OpenAI Analysis:', openAiAnalysis.message);
|
|
212
|
+
|
|
213
|
+
// Analyze with Anthropic
|
|
214
|
+
const anthropicAnalysis = await smartAi.anthropicProvider.document({
|
|
215
|
+
systemMessage: 'You are a legal expert.',
|
|
216
|
+
userMessage: 'Identify any legal terms or implications in this document.',
|
|
128
217
|
messageHistory: [],
|
|
129
218
|
pdfDocuments: [pdfBuffer]
|
|
130
219
|
});
|
|
220
|
+
console.log('Anthropic Analysis:', anthropicAnalysis.message);
|
|
221
|
+
|
|
222
|
+
// Process multiple documents
|
|
223
|
+
const doc1 = fs.readFileSync('contract1.pdf');
|
|
224
|
+
const doc2 = fs.readFileSync('contract2.pdf');
|
|
131
225
|
|
|
132
|
-
|
|
226
|
+
const comparison = await smartAi.openaiProvider.document({
|
|
227
|
+
systemMessage: 'You are a contract analyst.',
|
|
228
|
+
userMessage: 'Compare these two contracts and highlight the differences.',
|
|
229
|
+
messageHistory: [],
|
|
230
|
+
pdfDocuments: [doc1, doc2]
|
|
231
|
+
});
|
|
232
|
+
console.log('Comparison:', comparison.message);
|
|
133
233
|
```
|
|
134
234
|
|
|
135
|
-
###
|
|
235
|
+
### 5. Conversation Management
|
|
136
236
|
|
|
137
|
-
|
|
237
|
+
Create persistent conversation sessions with any provider:
|
|
138
238
|
|
|
139
239
|
```typescript
|
|
140
|
-
|
|
240
|
+
// Create a conversation with OpenAI
|
|
241
|
+
const conversation = smartAi.createConversation('openai');
|
|
141
242
|
|
|
142
|
-
//
|
|
143
|
-
|
|
144
|
-
image: imageBuffer,
|
|
145
|
-
prompt: 'Describe the image.'
|
|
146
|
-
});
|
|
243
|
+
// Set the system message
|
|
244
|
+
await conversation.setSystemMessage('You are a helpful coding assistant.');
|
|
147
245
|
|
|
148
|
-
|
|
149
|
-
|
|
246
|
+
// Get input and output streams
|
|
247
|
+
const inputWriter = conversation.getInputStreamWriter();
|
|
248
|
+
const outputStream = conversation.getOutputStream();
|
|
150
249
|
|
|
151
|
-
|
|
250
|
+
// Set up output reader
|
|
251
|
+
const reader = outputStream.getReader();
|
|
252
|
+
const decoder = new TextDecoder();
|
|
152
253
|
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
image: imageBuffer,
|
|
156
|
-
prompt: 'Detailed analysis required.'
|
|
157
|
-
});
|
|
254
|
+
// Send messages
|
|
255
|
+
await inputWriter.write('How do I create a REST API in Node.js?');
|
|
158
256
|
|
|
159
|
-
|
|
257
|
+
// Read responses
|
|
258
|
+
while (true) {
|
|
259
|
+
const { done, value } = await reader.read();
|
|
260
|
+
if (done) break;
|
|
261
|
+
console.log('Assistant:', decoder.decode(value));
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
// Continue the conversation
|
|
265
|
+
await inputWriter.write('Can you show me an example with Express?');
|
|
266
|
+
|
|
267
|
+
// Create conversations with different providers
|
|
268
|
+
const anthropicConversation = smartAi.createConversation('anthropic');
|
|
269
|
+
const groqConversation = smartAi.createConversation('groq');
|
|
160
270
|
```
|
|
161
271
|
|
|
272
|
+
## Advanced Usage
|
|
273
|
+
|
|
162
274
|
### Error Handling
|
|
163
275
|
|
|
164
|
-
|
|
276
|
+
Always wrap AI operations in try-catch blocks for robust error handling:
|
|
165
277
|
|
|
166
278
|
```typescript
|
|
167
279
|
try {
|
|
168
|
-
const response = await smartAi.
|
|
169
|
-
systemMessage: '
|
|
170
|
-
userMessage: '
|
|
280
|
+
const response = await smartAi.openaiProvider.chat({
|
|
281
|
+
systemMessage: 'You are an assistant.',
|
|
282
|
+
userMessage: 'Hello!',
|
|
171
283
|
messageHistory: []
|
|
172
284
|
});
|
|
173
285
|
console.log(response.message);
|
|
174
|
-
} catch (error
|
|
175
|
-
|
|
286
|
+
} catch (error) {
|
|
287
|
+
if (error.code === 'rate_limit_exceeded') {
|
|
288
|
+
console.error('Rate limit hit, please retry later');
|
|
289
|
+
} else if (error.code === 'invalid_api_key') {
|
|
290
|
+
console.error('Invalid API key provided');
|
|
291
|
+
} else {
|
|
292
|
+
console.error('Unexpected error:', error.message);
|
|
293
|
+
}
|
|
176
294
|
}
|
|
177
295
|
```
|
|
178
296
|
|
|
179
|
-
###
|
|
297
|
+
### Streaming with Custom Processing
|
|
180
298
|
|
|
181
|
-
|
|
299
|
+
Implement custom transformations on streaming responses:
|
|
182
300
|
|
|
183
301
|
```typescript
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
visionModel: 'llava'
|
|
302
|
+
// Create a custom transform stream
|
|
303
|
+
const customTransform = new TransformStream({
|
|
304
|
+
transform(chunk, controller) {
|
|
305
|
+
// Example: Add timestamps to each chunk
|
|
306
|
+
const timestamp = new Date().toISOString();
|
|
307
|
+
controller.enqueue(`[${timestamp}] ${chunk}`);
|
|
191
308
|
}
|
|
192
309
|
});
|
|
193
310
|
|
|
194
|
-
|
|
311
|
+
// Apply to streaming chat
|
|
312
|
+
const inputStream = new ReadableStream({
|
|
313
|
+
start(controller) {
|
|
314
|
+
controller.enqueue(new TextEncoder().encode(JSON.stringify({
|
|
315
|
+
role: 'user',
|
|
316
|
+
content: 'Tell me a story'
|
|
317
|
+
})));
|
|
318
|
+
controller.close();
|
|
319
|
+
}
|
|
320
|
+
});
|
|
321
|
+
|
|
322
|
+
const responseStream = await smartAi.openaiProvider.chatStream(inputStream);
|
|
323
|
+
const processedStream = responseStream.pipeThrough(customTransform);
|
|
324
|
+
|
|
325
|
+
// Read processed stream
|
|
326
|
+
const reader = processedStream.getReader();
|
|
327
|
+
while (true) {
|
|
328
|
+
const { done, value } = await reader.read();
|
|
329
|
+
if (done) break;
|
|
330
|
+
console.log(value);
|
|
331
|
+
}
|
|
195
332
|
```
|
|
196
333
|
|
|
197
|
-
###
|
|
334
|
+
### Provider-Specific Features
|
|
198
335
|
|
|
199
|
-
|
|
336
|
+
Each provider may have unique capabilities. Here's how to leverage them:
|
|
200
337
|
|
|
201
338
|
```typescript
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
339
|
+
// OpenAI - Use specific models
|
|
340
|
+
const gpt4Response = await smartAi.openaiProvider.chat({
|
|
341
|
+
systemMessage: 'You are a helpful assistant.',
|
|
342
|
+
userMessage: 'Explain quantum computing',
|
|
343
|
+
messageHistory: []
|
|
207
344
|
});
|
|
208
345
|
|
|
209
|
-
|
|
210
|
-
const
|
|
346
|
+
// Anthropic - Use Claude's strength in analysis
|
|
347
|
+
const codeReview = await smartAi.anthropicProvider.chat({
|
|
348
|
+
systemMessage: 'You are a code reviewer.',
|
|
349
|
+
userMessage: 'Review this code for security issues: ...',
|
|
350
|
+
messageHistory: []
|
|
351
|
+
});
|
|
211
352
|
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
353
|
+
// Perplexity - Best for research and current events
|
|
354
|
+
const research = await smartAi.perplexityProvider.chat({
|
|
355
|
+
systemMessage: 'You are a research assistant.',
|
|
356
|
+
userMessage: 'What are the latest developments in renewable energy?',
|
|
357
|
+
messageHistory: []
|
|
358
|
+
});
|
|
359
|
+
|
|
360
|
+
// Groq - Optimized for speed
|
|
361
|
+
const quickResponse = await smartAi.groqProvider.chat({
|
|
362
|
+
systemMessage: 'You are a quick helper.',
|
|
363
|
+
userMessage: 'Give me a one-line summary of photosynthesis',
|
|
364
|
+
messageHistory: []
|
|
365
|
+
});
|
|
217
366
|
```
|
|
218
367
|
|
|
219
|
-
|
|
368
|
+
### Performance Optimization
|
|
369
|
+
|
|
370
|
+
Tips for optimal performance:
|
|
371
|
+
|
|
372
|
+
```typescript
|
|
373
|
+
// 1. Reuse providers instead of creating new instances
|
|
374
|
+
const smartAi = new SmartAi({ /* config */ });
|
|
375
|
+
await smartAi.start(); // Initialize once
|
|
376
|
+
|
|
377
|
+
// 2. Use streaming for long responses
|
|
378
|
+
// Streaming reduces time-to-first-token and memory usage
|
|
379
|
+
|
|
380
|
+
// 3. Batch operations when possible
|
|
381
|
+
const promises = [
|
|
382
|
+
smartAi.openaiProvider.chat({ /* ... */ }),
|
|
383
|
+
smartAi.anthropicProvider.chat({ /* ... */ })
|
|
384
|
+
];
|
|
385
|
+
const results = await Promise.all(promises);
|
|
386
|
+
|
|
387
|
+
// 4. Clean up resources
|
|
388
|
+
await smartAi.stop(); // When done
|
|
389
|
+
```
|
|
220
390
|
|
|
221
391
|
## License and Legal Information
|
|
222
392
|
|