@push.rocks/smartai 0.4.2 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist_ts/00_commitinfo_data.js +3 -3
- package/npmextra.json +18 -5
- package/package.json +19 -6
- package/readme.md +99 -199
- package/ts/00_commitinfo_data.ts +2 -2
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
*/
|
|
4
4
|
export const commitinfo = {
|
|
5
5
|
name: '@push.rocks/smartai',
|
|
6
|
-
version: '0.
|
|
7
|
-
description: '
|
|
6
|
+
version: '0.5.0',
|
|
7
|
+
description: 'SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.'
|
|
8
8
|
};
|
|
9
|
-
//# sourceMappingURL=data:application/json;base64,
|
|
9
|
+
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiMDBfY29tbWl0aW5mb19kYXRhLmpzIiwic291cmNlUm9vdCI6IiIsInNvdXJjZXMiOlsiLi4vdHMvMDBfY29tbWl0aW5mb19kYXRhLnRzIl0sIm5hbWVzIjpbXSwibWFwcGluZ3MiOiJBQUFBOztHQUVHO0FBQ0gsTUFBTSxDQUFDLE1BQU0sVUFBVSxHQUFHO0lBQ3hCLElBQUksRUFBRSxxQkFBcUI7SUFDM0IsT0FBTyxFQUFFLE9BQU87SUFDaEIsV0FBVyxFQUFFLGtOQUFrTjtDQUNoTyxDQUFBIn0=
|
package/npmextra.json
CHANGED
|
@@ -5,20 +5,33 @@
|
|
|
5
5
|
"githost": "code.foss.global",
|
|
6
6
|
"gitscope": "push.rocks",
|
|
7
7
|
"gitrepo": "smartai",
|
|
8
|
-
"description": "
|
|
8
|
+
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
|
9
9
|
"npmPackagename": "@push.rocks/smartai",
|
|
10
10
|
"license": "MIT",
|
|
11
11
|
"projectDomain": "push.rocks",
|
|
12
12
|
"keywords": [
|
|
13
13
|
"AI integration",
|
|
14
|
-
"chatbot",
|
|
15
14
|
"TypeScript",
|
|
15
|
+
"chatbot",
|
|
16
16
|
"OpenAI",
|
|
17
17
|
"Anthropic",
|
|
18
|
-
"multi-model
|
|
19
|
-
"audio
|
|
18
|
+
"multi-model",
|
|
19
|
+
"audio generation",
|
|
20
20
|
"text-to-speech",
|
|
21
|
-
"
|
|
21
|
+
"document processing",
|
|
22
|
+
"vision processing",
|
|
23
|
+
"streaming chat",
|
|
24
|
+
"API",
|
|
25
|
+
"multiple providers",
|
|
26
|
+
"AI models",
|
|
27
|
+
"synchronous chat",
|
|
28
|
+
"asynchronous chat",
|
|
29
|
+
"real-time interaction",
|
|
30
|
+
"content analysis",
|
|
31
|
+
"image description",
|
|
32
|
+
"document classification",
|
|
33
|
+
"AI toolkit",
|
|
34
|
+
"provider switching"
|
|
22
35
|
]
|
|
23
36
|
}
|
|
24
37
|
},
|
package/package.json
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@push.rocks/smartai",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.5.0",
|
|
4
4
|
"private": false,
|
|
5
|
-
"description": "
|
|
5
|
+
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
|
6
6
|
"main": "dist_ts/index.js",
|
|
7
7
|
"typings": "dist_ts/index.d.ts",
|
|
8
8
|
"type": "module",
|
|
@@ -53,14 +53,27 @@
|
|
|
53
53
|
],
|
|
54
54
|
"keywords": [
|
|
55
55
|
"AI integration",
|
|
56
|
-
"chatbot",
|
|
57
56
|
"TypeScript",
|
|
57
|
+
"chatbot",
|
|
58
58
|
"OpenAI",
|
|
59
59
|
"Anthropic",
|
|
60
|
-
"multi-model
|
|
61
|
-
"audio
|
|
60
|
+
"multi-model",
|
|
61
|
+
"audio generation",
|
|
62
62
|
"text-to-speech",
|
|
63
|
-
"
|
|
63
|
+
"document processing",
|
|
64
|
+
"vision processing",
|
|
65
|
+
"streaming chat",
|
|
66
|
+
"API",
|
|
67
|
+
"multiple providers",
|
|
68
|
+
"AI models",
|
|
69
|
+
"synchronous chat",
|
|
70
|
+
"asynchronous chat",
|
|
71
|
+
"real-time interaction",
|
|
72
|
+
"content analysis",
|
|
73
|
+
"image description",
|
|
74
|
+
"document classification",
|
|
75
|
+
"AI toolkit",
|
|
76
|
+
"provider switching"
|
|
64
77
|
],
|
|
65
78
|
"scripts": {
|
|
66
79
|
"test": "(tstest test/ --web)",
|
package/readme.md
CHANGED
|
@@ -1,144 +1,38 @@
|
|
|
1
1
|
# @push.rocks/smartai
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
- [Features](#features)
|
|
10
|
-
- [Installation](#installation)
|
|
11
|
-
- [Supported AI Providers](#supported-ai-providers)
|
|
12
|
-
- [Quick Start](#quick-start)
|
|
13
|
-
- [Usage Examples](#usage-examples)
|
|
14
|
-
- [Chat Interactions](#chat-interactions)
|
|
15
|
-
- [Streaming Chat](#streaming-chat)
|
|
16
|
-
- [Audio Generation](#audio-generation)
|
|
17
|
-
- [Document Processing](#document-processing)
|
|
18
|
-
- [Vision Processing](#vision-processing)
|
|
19
|
-
- [Error Handling](#error-handling)
|
|
20
|
-
- [Development](#development)
|
|
21
|
-
- [Running Tests](#running-tests)
|
|
22
|
-
- [Building the Project](#building-the-project)
|
|
23
|
-
- [Contributing](#contributing)
|
|
24
|
-
- [License](#license)
|
|
25
|
-
- [Legal Information](#legal-information)
|
|
26
|
-
|
|
27
|
-
## Features
|
|
28
|
-
|
|
29
|
-
- **Unified API:** Seamlessly integrate multiple AI providers with a consistent interface.
|
|
30
|
-
- **Chat & Streaming:** Support for both synchronous and real-time streaming chat interactions.
|
|
31
|
-
- **Audio & Vision:** Generate audio responses and perform detailed image analysis.
|
|
32
|
-
- **Document Processing:** Analyze PDFs and other documents using vision models.
|
|
33
|
-
- **Extensible:** Easily extend the library to support additional AI providers.
|
|
34
|
-
|
|
35
|
-
## Installation
|
|
36
|
-
|
|
37
|
-
To install SmartAi, run the following command:
|
|
3
|
+
SmartAi is a TypeScript library providing a unified interface for integrating and interacting with multiple AI models, supporting chat interactions, audio and document processing, and vision tasks.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
To install SmartAi into your project, you need to run the following command in your terminal:
|
|
38
8
|
|
|
39
9
|
```bash
|
|
40
10
|
npm install @push.rocks/smartai
|
|
41
11
|
```
|
|
42
12
|
|
|
43
|
-
This will add the
|
|
44
|
-
|
|
45
|
-
## Supported AI Providers
|
|
46
|
-
|
|
47
|
-
SmartAi supports multiple AI providers. Configure each provider with its corresponding token or settings:
|
|
48
|
-
|
|
49
|
-
### OpenAI
|
|
50
|
-
|
|
51
|
-
- **Models:** GPT-4, GPT-3.5-turbo, GPT-4-vision-preview
|
|
52
|
-
- **Features:** Chat, Streaming, Audio Generation, Vision, Document Processing
|
|
53
|
-
- **Configuration Example:**
|
|
54
|
-
|
|
55
|
-
```typescript
|
|
56
|
-
openaiToken: 'your-openai-token'
|
|
57
|
-
```
|
|
58
|
-
|
|
59
|
-
### X.AI
|
|
60
|
-
|
|
61
|
-
- **Models:** Grok-2-latest
|
|
62
|
-
- **Features:** Chat, Streaming, Document Processing
|
|
63
|
-
- **Configuration Example:**
|
|
64
|
-
|
|
65
|
-
```typescript
|
|
66
|
-
xaiToken: 'your-xai-token'
|
|
67
|
-
```
|
|
68
|
-
|
|
69
|
-
### Anthropic
|
|
70
|
-
|
|
71
|
-
- **Models:** Claude-3-opus-20240229
|
|
72
|
-
- **Features:** Chat, Streaming, Vision, Document Processing
|
|
73
|
-
- **Configuration Example:**
|
|
74
|
-
|
|
75
|
-
```typescript
|
|
76
|
-
anthropicToken: 'your-anthropic-token'
|
|
77
|
-
```
|
|
78
|
-
|
|
79
|
-
### Perplexity
|
|
80
|
-
|
|
81
|
-
- **Models:** Mixtral-8x7b-instruct
|
|
82
|
-
- **Features:** Chat, Streaming
|
|
83
|
-
- **Configuration Example:**
|
|
84
|
-
|
|
85
|
-
```typescript
|
|
86
|
-
perplexityToken: 'your-perplexity-token'
|
|
87
|
-
```
|
|
88
|
-
|
|
89
|
-
### Groq
|
|
90
|
-
|
|
91
|
-
- **Models:** Llama-3.3-70b-versatile
|
|
92
|
-
- **Features:** Chat, Streaming
|
|
93
|
-
- **Configuration Example:**
|
|
94
|
-
|
|
95
|
-
```typescript
|
|
96
|
-
groqToken: 'your-groq-token'
|
|
97
|
-
```
|
|
98
|
-
|
|
99
|
-
### Ollama
|
|
100
|
-
|
|
101
|
-
- **Models:** Configurable (default: llama2; use llava for vision/document tasks)
|
|
102
|
-
- **Features:** Chat, Streaming, Vision, Document Processing
|
|
103
|
-
- **Configuration Example:**
|
|
104
|
-
|
|
105
|
-
```typescript
|
|
106
|
-
ollama: {
|
|
107
|
-
baseUrl: 'http://localhost:11434', // Optional
|
|
108
|
-
model: 'llama2', // Optional
|
|
109
|
-
visionModel: 'llava' // Optional for vision and document tasks
|
|
110
|
-
}
|
|
111
|
-
```
|
|
13
|
+
This command will add the SmartAi library to your project's dependencies, making it available for use in your TypeScript application.
|
|
112
14
|
|
|
113
|
-
|
|
15
|
+
## Usage
|
|
114
16
|
|
|
115
|
-
|
|
116
|
-
- **Features:** Chat, Streaming
|
|
117
|
-
- **Configuration Example:**
|
|
118
|
-
|
|
119
|
-
```typescript
|
|
120
|
-
exo: {
|
|
121
|
-
baseUrl: 'http://localhost:8080/v1', // Optional
|
|
122
|
-
apiKey: 'your-api-key' // Optional for local deployments
|
|
123
|
-
}
|
|
124
|
-
```
|
|
17
|
+
SmartAi is designed to provide a comprehensive and unified API for working seamlessly with multiple AI providers like OpenAI, Anthropic, Perplexity, and others. Below we will delve into how to make the most out of this library, illustrating the setup and functionality with in-depth examples. Our scenarios will explore synchronous and streaming interactions, audio generation, document handling, and vision tasks with different AI providers.
|
|
125
18
|
|
|
126
|
-
|
|
19
|
+
### Initialization
|
|
127
20
|
|
|
128
|
-
|
|
21
|
+
Initialization is the first step before using any AI functionalities. You should provide API tokens for each provider you plan to utilize.
|
|
129
22
|
|
|
130
23
|
```typescript
|
|
131
24
|
import { SmartAi } from '@push.rocks/smartai';
|
|
132
25
|
|
|
133
26
|
const smartAi = new SmartAi({
|
|
134
27
|
openaiToken: 'your-openai-token',
|
|
135
|
-
xaiToken: 'your-xai-token',
|
|
136
28
|
anthropicToken: 'your-anthropic-token',
|
|
137
29
|
perplexityToken: 'your-perplexity-token',
|
|
30
|
+
xaiToken: 'your-xai-token',
|
|
138
31
|
groqToken: 'your-groq-token',
|
|
139
32
|
ollama: {
|
|
140
33
|
baseUrl: 'http://localhost:11434',
|
|
141
|
-
model: 'llama2'
|
|
34
|
+
model: 'llama2',
|
|
35
|
+
visionModel: 'llava'
|
|
142
36
|
},
|
|
143
37
|
exo: {
|
|
144
38
|
baseUrl: 'http://localhost:8080/v1',
|
|
@@ -149,31 +43,33 @@ const smartAi = new SmartAi({
|
|
|
149
43
|
await smartAi.start();
|
|
150
44
|
```
|
|
151
45
|
|
|
152
|
-
## Usage Examples
|
|
153
|
-
|
|
154
46
|
### Chat Interactions
|
|
155
47
|
|
|
156
|
-
|
|
48
|
+
Interaction through chat is a key feature. SmartAi caters to both synchronous and asynchronous (streaming) chats across several AI models.
|
|
49
|
+
|
|
50
|
+
#### Regular Synchronous Chat
|
|
51
|
+
|
|
52
|
+
Connect with AI models via straightforward request-response interactions.
|
|
157
53
|
|
|
158
54
|
```typescript
|
|
159
|
-
const
|
|
55
|
+
const syncResponse = await smartAi.openaiProvider.chat({
|
|
160
56
|
systemMessage: 'You are a helpful assistant.',
|
|
161
57
|
userMessage: 'What is the capital of France?',
|
|
162
|
-
messageHistory: [] //
|
|
58
|
+
messageHistory: [] // Could include context or preceding messages
|
|
163
59
|
});
|
|
164
60
|
|
|
165
|
-
console.log(
|
|
61
|
+
console.log(syncResponse.message); // Outputs: "The capital of France is Paris."
|
|
166
62
|
```
|
|
167
63
|
|
|
168
|
-
|
|
64
|
+
#### Real-Time Streaming Chat
|
|
169
65
|
|
|
170
|
-
|
|
66
|
+
For continuous interaction and lower latency, engage in streaming chat.
|
|
171
67
|
|
|
172
68
|
```typescript
|
|
173
69
|
const textEncoder = new TextEncoder();
|
|
174
70
|
const textDecoder = new TextDecoder();
|
|
175
71
|
|
|
176
|
-
//
|
|
72
|
+
// Establish a transform stream
|
|
177
73
|
const { writable, readable } = new TransformStream();
|
|
178
74
|
const writer = writable.getWriter();
|
|
179
75
|
|
|
@@ -184,7 +80,7 @@ const message = {
|
|
|
184
80
|
|
|
185
81
|
writer.write(textEncoder.encode(JSON.stringify(message) + '\n'));
|
|
186
82
|
|
|
187
|
-
//
|
|
83
|
+
// Initiate streaming
|
|
188
84
|
const stream = await smartAi.openaiProvider.chatStream(readable);
|
|
189
85
|
const reader = stream.getReader();
|
|
190
86
|
|
|
@@ -197,133 +93,137 @@ while (true) {
|
|
|
197
93
|
|
|
198
94
|
### Audio Generation
|
|
199
95
|
|
|
200
|
-
|
|
96
|
+
Audio generation from textual input is possible using providers like OpenAI.
|
|
201
97
|
|
|
202
98
|
```typescript
|
|
203
99
|
const audioStream = await smartAi.openaiProvider.audio({
|
|
204
|
-
message: '
|
|
100
|
+
message: 'This is a test message for generating speech.'
|
|
205
101
|
});
|
|
206
102
|
|
|
207
|
-
//
|
|
103
|
+
// Use the audioStream e.g., playing or saving it.
|
|
208
104
|
```
|
|
209
105
|
|
|
210
|
-
### Document
|
|
106
|
+
### Document Analysis
|
|
211
107
|
|
|
212
|
-
|
|
108
|
+
SmartAi can ingest and process documents, extracting meaningful information or performing classifications.
|
|
213
109
|
|
|
214
110
|
```typescript
|
|
215
|
-
|
|
216
|
-
const
|
|
217
|
-
systemMessage: '
|
|
218
|
-
userMessage: '
|
|
111
|
+
const pdfBuffer = await fetchPdf('https://example.com/document.pdf');
|
|
112
|
+
const documentRes = await smartAi.openaiProvider.document({
|
|
113
|
+
systemMessage: 'Determine the nature of the document.',
|
|
114
|
+
userMessage: 'Classify this document.',
|
|
219
115
|
messageHistory: [],
|
|
220
|
-
pdfDocuments: [pdfBuffer]
|
|
116
|
+
pdfDocuments: [pdfBuffer]
|
|
221
117
|
});
|
|
118
|
+
|
|
119
|
+
console.log(documentRes.message); // Outputs: classified document type
|
|
222
120
|
```
|
|
223
121
|
|
|
224
|
-
|
|
122
|
+
SmartAi allows easy switching between providers, thus giving developers flexibility:
|
|
225
123
|
|
|
226
124
|
```typescript
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
userMessage: 'Extract key information from this document',
|
|
125
|
+
const anthopicRes = await smartAi.anthropicProvider.document({
|
|
126
|
+
systemMessage: 'Analyze this document.',
|
|
127
|
+
userMessage: 'Extract core points.',
|
|
231
128
|
messageHistory: [],
|
|
232
129
|
pdfDocuments: [pdfBuffer]
|
|
233
130
|
});
|
|
234
|
-
```
|
|
235
131
|
|
|
236
|
-
|
|
237
|
-
// Using Anthropic for document processing
|
|
238
|
-
const anthropicResult = await smartAi.anthropicProvider.document({
|
|
239
|
-
systemMessage: 'Analyze the document',
|
|
240
|
-
userMessage: 'Please extract the main points',
|
|
241
|
-
messageHistory: [],
|
|
242
|
-
pdfDocuments: [pdfBuffer]
|
|
243
|
-
});
|
|
132
|
+
console.log(anthopicRes.message); // Outputs: summarized core points
|
|
244
133
|
```
|
|
245
134
|
|
|
246
135
|
### Vision Processing
|
|
247
136
|
|
|
248
|
-
|
|
137
|
+
Engage AI models in analyzing and describing images:
|
|
249
138
|
|
|
250
139
|
```typescript
|
|
251
|
-
|
|
252
|
-
const imageDescription = await smartAi.openaiProvider.vision({
|
|
253
|
-
image: imageBuffer, // Uint8Array containing image data
|
|
254
|
-
prompt: 'What do you see in this image?'
|
|
255
|
-
});
|
|
140
|
+
const imageBuffer = await fetchImage('path/to/image.jpg');
|
|
256
141
|
|
|
257
|
-
// Using
|
|
258
|
-
const
|
|
142
|
+
// Using OpenAI's vision capabilities
|
|
143
|
+
const visionOutput = await smartAi.openaiProvider.vision({
|
|
259
144
|
image: imageBuffer,
|
|
260
|
-
prompt: '
|
|
145
|
+
prompt: 'Describe the image.'
|
|
261
146
|
});
|
|
262
147
|
|
|
263
|
-
//
|
|
264
|
-
|
|
148
|
+
console.log(visionOutput); // Outputs: image description
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
Use other providers for more varied analysis:
|
|
152
|
+
|
|
153
|
+
```typescript
|
|
154
|
+
const ollamaOutput = await smartAi.ollamaProvider.vision({
|
|
265
155
|
image: imageBuffer,
|
|
266
|
-
prompt: '
|
|
156
|
+
prompt: 'Detailed analysis required.'
|
|
267
157
|
});
|
|
158
|
+
|
|
159
|
+
console.log(ollamaOutput); // Outputs: detailed analysis results
|
|
268
160
|
```
|
|
269
161
|
|
|
270
|
-
|
|
162
|
+
### Error Handling
|
|
271
163
|
|
|
272
|
-
|
|
164
|
+
Due to the nature of external integrations, ensure to wrap AI calls within try-catch blocks.
|
|
273
165
|
|
|
274
166
|
```typescript
|
|
275
167
|
try {
|
|
276
|
-
const response = await smartAi.
|
|
277
|
-
systemMessage: '
|
|
278
|
-
userMessage: '
|
|
168
|
+
const response = await smartAi.anthropicProvider.chat({
|
|
169
|
+
systemMessage: 'Hello!',
|
|
170
|
+
userMessage: 'Help me out.',
|
|
279
171
|
messageHistory: []
|
|
280
172
|
});
|
|
281
173
|
console.log(response.message);
|
|
282
174
|
} catch (error: any) {
|
|
283
|
-
console.error('
|
|
175
|
+
console.error('Encountered an error:', error.message);
|
|
284
176
|
}
|
|
285
177
|
```
|
|
286
178
|
|
|
287
|
-
|
|
179
|
+
### Providers and Customization
|
|
288
180
|
|
|
289
|
-
|
|
181
|
+
The library supports provider-specific customization, enabling tailored interactions:
|
|
290
182
|
|
|
291
|
-
|
|
183
|
+
```typescript
|
|
184
|
+
const smartAi = new SmartAi({
|
|
185
|
+
openaiToken: 'your-openai-token',
|
|
186
|
+
anthropicToken: 'your-anthropic-token',
|
|
187
|
+
ollama: {
|
|
188
|
+
baseUrl: 'http://localhost:11434',
|
|
189
|
+
model: 'llama2',
|
|
190
|
+
visionModel: 'llava'
|
|
191
|
+
}
|
|
192
|
+
});
|
|
292
193
|
|
|
293
|
-
|
|
294
|
-
npm run test
|
|
194
|
+
await smartAi.start();
|
|
295
195
|
```
|
|
296
196
|
|
|
297
|
-
|
|
197
|
+
### Advanced Streaming Customization
|
|
298
198
|
|
|
299
|
-
|
|
199
|
+
Developers can implement real-time processing pipelines with custom transformations:
|
|
300
200
|
|
|
301
|
-
|
|
201
|
+
```typescript
|
|
202
|
+
const customProcessingStream = new TransformStream({
|
|
203
|
+
transform(chunk, controller) {
|
|
204
|
+
const processed = chunk.toUpperCase(); // Example transformation
|
|
205
|
+
controller.enqueue(processed);
|
|
206
|
+
}
|
|
207
|
+
});
|
|
302
208
|
|
|
303
|
-
|
|
304
|
-
|
|
209
|
+
const processedStream = stream.pipeThrough(customProcessingStream);
|
|
210
|
+
const processedReader = processedStream.getReader();
|
|
211
|
+
|
|
212
|
+
while (true) {
|
|
213
|
+
const { done, value } = await processedReader.read();
|
|
214
|
+
if (done) break;
|
|
215
|
+
console.log('Processed Output:', value);
|
|
216
|
+
}
|
|
305
217
|
```
|
|
306
218
|
|
|
307
|
-
This
|
|
219
|
+
This approach can facilitate adaptive content processing workflows.
|
|
220
|
+
|
|
221
|
+
### Conclusion
|
|
308
222
|
|
|
309
|
-
|
|
223
|
+
SmartAi is a powerful toolkit for multi-faceted AI integration, offering robust solutions for chat, media, and document processing. Developers can enjoy a consistent API experience while leveraging the strengths of each supported AI model.
|
|
310
224
|
|
|
311
|
-
|
|
225
|
+
For futher exploration, developers might consider perusing individual provider's documentation to understand specific capabilities and limitations.
|
|
312
226
|
|
|
313
|
-
1. Fork the repository.
|
|
314
|
-
2. Create a feature branch:
|
|
315
|
-
```bash
|
|
316
|
-
git checkout -b feature/my-feature
|
|
317
|
-
```
|
|
318
|
-
3. Commit your changes with clear messages:
|
|
319
|
-
```bash
|
|
320
|
-
git commit -m 'Add new feature'
|
|
321
|
-
```
|
|
322
|
-
4. Push your branch to your fork:
|
|
323
|
-
```bash
|
|
324
|
-
git push origin feature/my-feature
|
|
325
|
-
```
|
|
326
|
-
5. Open a Pull Request with a detailed description of your changes.
|
|
327
227
|
|
|
328
228
|
## License and Legal Information
|
|
329
229
|
|
|
@@ -342,4 +242,4 @@ Registered at District court Bremen HRB 35230 HB, Germany
|
|
|
342
242
|
|
|
343
243
|
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
|
|
344
244
|
|
|
345
|
-
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
|
|
245
|
+
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
|
package/ts/00_commitinfo_data.ts
CHANGED
|
@@ -3,6 +3,6 @@
|
|
|
3
3
|
*/
|
|
4
4
|
export const commitinfo = {
|
|
5
5
|
name: '@push.rocks/smartai',
|
|
6
|
-
version: '0.
|
|
7
|
-
description: '
|
|
6
|
+
version: '0.5.0',
|
|
7
|
+
description: 'SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.'
|
|
8
8
|
}
|