@memberjunction/ai-mistral 2.43.0 → 2.44.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +3 -3
  2. package/readme.md +201 -19
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@memberjunction/ai-mistral",
3
- "version": "2.43.0",
3
+ "version": "2.44.0",
4
4
  "description": "MemberJunction Wrapper for Mistral AI's AI Models",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -19,8 +19,8 @@
19
19
  "typescript": "^5.4.5"
20
20
  },
21
21
  "dependencies": {
22
- "@memberjunction/ai": "2.43.0",
23
- "@memberjunction/global": "2.43.0",
22
+ "@memberjunction/ai": "2.44.0",
23
+ "@memberjunction/global": "2.44.0",
24
24
  "@mistralai/mistralai": "^1.6.0",
25
25
  "axios-retry": "4.3.0"
26
26
  }
package/readme.md CHANGED
@@ -1,15 +1,18 @@
1
1
  # @memberjunction/ai-mistral
2
2
 
3
- A comprehensive wrapper for Mistral AI's models, enabling seamless integration with the MemberJunction AI framework for natural language processing tasks.
3
+ A comprehensive wrapper for Mistral AI's models, enabling seamless integration with the MemberJunction AI framework for natural language processing and embedding tasks.
4
4
 
5
5
  ## Features
6
6
 
7
- - **Mistral AI Integration**: Connect to Mistral's powerful language models
8
- - **Standardized Interface**: Implements MemberJunction's BaseLLM abstract class
7
+ - **Mistral AI Integration**: Connect to Mistral's powerful language models and embedding models
8
+ - **Standardized Interface**: Implements MemberJunction's BaseLLM and BaseEmbeddings abstract classes
9
+ - **Streaming Support**: Full support for streaming chat completions
9
10
  - **Token Usage Tracking**: Automatic tracking of prompt and completion tokens
10
11
  - **Response Format Control**: Support for standard text and JSON response formats
12
+ - **Multi-Modal Support**: Handles text, images, and documents in chat messages
11
13
  - **Error Handling**: Robust error handling with detailed reporting
12
14
  - **Chat Completion**: Full support for chat-based interactions with Mistral models
15
+ - **Text Embeddings**: Generate vector embeddings for text using Mistral's embedding models
13
16
 
14
17
  ## Installation
15
18
 
@@ -20,8 +23,9 @@ npm install @memberjunction/ai-mistral
20
23
  ## Requirements
21
24
 
22
25
  - Node.js 16+
26
+ - TypeScript 5.4.5+
23
27
  - A Mistral AI API key
24
- - MemberJunction Core libraries
28
+ - MemberJunction Core libraries (@memberjunction/ai, @memberjunction/global)
25
29
 
26
30
  ## Usage
27
31
 
@@ -37,14 +41,14 @@ const mistralLLM = new MistralLLM('your-mistral-api-key');
37
41
  ### Chat Completion
38
42
 
39
43
  ```typescript
40
- import { ChatParams } from '@memberjunction/ai';
44
+ import { ChatParams, ChatMessageRole } from '@memberjunction/ai';
41
45
 
42
46
  // Create chat parameters
43
47
  const chatParams: ChatParams = {
44
48
  model: 'mistral-large-latest', // or other models like 'open-mistral-7b', 'mistral-small-latest'
45
49
  messages: [
46
- { role: 'system', content: 'You are a helpful assistant.' },
47
- { role: 'user', content: 'What are the main principles of machine learning?' }
50
+ { role: ChatMessageRole.system, content: 'You are a helpful assistant.' },
51
+ { role: ChatMessageRole.user, content: 'What are the main principles of machine learning?' }
48
52
  ],
49
53
  temperature: 0.7,
50
54
  maxOutputTokens: 1000
@@ -72,8 +76,8 @@ try {
72
76
  const jsonParams: ChatParams = {
73
77
  model: 'mistral-large-latest',
74
78
  messages: [
75
- { role: 'system', content: 'You are a helpful assistant.' },
76
- { role: 'user', content: 'Give me data about the top 3 machine learning algorithms in JSON format' }
79
+ { role: ChatMessageRole.system, content: 'You are a helpful assistant that responds in JSON format.' },
80
+ { role: ChatMessageRole.user, content: 'Give me data about the top 3 machine learning algorithms in JSON format' }
77
81
  ],
78
82
  maxOutputTokens: 1000,
79
83
  responseFormat: 'JSON' // This will add the appropriate response_format parameter
@@ -88,6 +92,101 @@ if (jsonResponse.success) {
88
92
  }
89
93
  ```
90
94
 
95
+ ### Streaming Chat Completion
96
+
97
+ ```typescript
98
+ // Mistral supports streaming responses
99
+ const streamParams: ChatParams = {
100
+ model: 'mistral-large-latest',
101
+ messages: [
102
+ { role: ChatMessageRole.system, content: 'You are a helpful assistant.' },
103
+ { role: ChatMessageRole.user, content: 'Write a short story about AI' }
104
+ ],
105
+ maxOutputTokens: 1000,
106
+ stream: true, // Enable streaming
107
+ streamCallback: (content: string) => {
108
+ // Handle each chunk of streamed content
109
+ process.stdout.write(content);
110
+ }
111
+ };
112
+
113
+ const streamResponse = await mistralLLM.ChatCompletion(streamParams);
114
+ console.log('\nStreaming complete!');
115
+ console.log('Total tokens:', streamResponse.data.usage);
116
+ ```
117
+
118
+ ### Multi-Modal Messages
119
+
120
+ ```typescript
121
+ // Mistral supports images and documents in messages
122
+ const multiModalParams: ChatParams = {
123
+ model: 'mistral-large-latest',
124
+ messages: [
125
+ {
126
+ role: ChatMessageRole.user,
127
+ content: [
128
+ { type: 'text', content: 'What do you see in this image?' },
129
+ { type: 'image_url', content: 'https://example.com/image.jpg' }
130
+ ]
131
+ }
132
+ ],
133
+ maxOutputTokens: 1000
134
+ };
135
+
136
+ // For documents
137
+ const documentParams: ChatParams = {
138
+ model: 'mistral-large-latest',
139
+ messages: [
140
+ {
141
+ role: ChatMessageRole.user,
142
+ content: [
143
+ { type: 'text', content: 'Summarize this document' },
144
+ { type: 'file_url', content: 'https://example.com/document.pdf' } // Converted to document_url for Mistral
145
+ ]
146
+ }
147
+ ],
148
+ maxOutputTokens: 1000
149
+ };
150
+ ```
151
+
152
+ ### Text Embeddings
153
+
154
+ ```typescript
155
+ import { MistralEmbedding } from '@memberjunction/ai-mistral';
156
+ import { EmbedTextParams, EmbedTextsParams } from '@memberjunction/ai';
157
+
158
+ // Initialize the embedding client
159
+ const mistralEmbedding = new MistralEmbedding('your-mistral-api-key');
160
+
161
+ // Embed a single text
162
+ const embedParams: EmbedTextParams = {
163
+ text: 'Machine learning is a subset of artificial intelligence.',
164
+ model: 'mistral-embed' // Optional, defaults to 'mistral-embed'
165
+ };
166
+
167
+ const embedResult = await mistralEmbedding.EmbedText(embedParams);
168
+ console.log('Embedding vector dimensions:', embedResult.vector.length); // 1024 dimensions
169
+ console.log('Token usage:', embedResult.ModelUsage);
170
+
171
+ // Embed multiple texts
172
+ const multiEmbedParams: EmbedTextsParams = {
173
+ texts: [
174
+ 'Natural language processing enables computers to understand human language.',
175
+ 'Deep learning uses neural networks with multiple layers.',
176
+ 'Computer vision allows machines to interpret visual information.'
177
+ ],
178
+ model: 'mistral-embed'
179
+ };
180
+
181
+ const multiEmbedResult = await mistralEmbedding.EmbedTexts(multiEmbedParams);
182
+ console.log('Number of embeddings:', multiEmbedResult.vectors.length);
183
+ console.log('Total token usage:', multiEmbedResult.ModelUsage);
184
+
185
+ // Get available embedding models
186
+ const embeddingModels = await mistralEmbedding.GetEmbeddingModels();
187
+ console.log('Available models:', embeddingModels);
188
+ ```
189
+
91
190
  ### Direct Access to Mistral Client
92
191
 
93
192
  ```typescript
@@ -95,7 +194,7 @@ if (jsonResponse.success) {
95
194
  const mistralClient = mistralLLM.Client;
96
195
 
97
196
  // Use the client directly if needed
98
- const modelList = await mistralClient.listModels();
197
+ const modelList = await mistralClient.models.list();
99
198
  console.log('Available models:', modelList);
100
199
  ```
101
200
 
@@ -126,13 +225,34 @@ new MistralLLM(apiKey: string)
126
225
  #### Properties
127
226
 
128
227
  - `Client`: (read-only) Returns the underlying Mistral client instance
228
+ - `SupportsStreaming`: (read-only) Returns `true` - Mistral supports streaming
129
229
 
130
230
  #### Methods
131
231
 
132
- - `ChatCompletion(params: ChatParams): Promise<ChatResult>` - Perform a chat completion
232
+ - `ChatCompletion(params: ChatParams): Promise<ChatResult>` - Perform a chat completion (supports both streaming and non-streaming)
133
233
  - `SummarizeText(params: SummarizeParams): Promise<SummarizeResult>` - Not implemented yet
134
234
  - `ClassifyText(params: ClassifyParams): Promise<ClassifyResult>` - Not implemented yet
135
235
 
236
+ ### MistralEmbedding Class
237
+
238
+ A class that extends BaseEmbeddings to provide Mistral embedding functionality.
239
+
240
+ #### Constructor
241
+
242
+ ```typescript
243
+ new MistralEmbedding(apiKey: string)
244
+ ```
245
+
246
+ #### Properties
247
+
248
+ - `Client`: (read-only) Returns the underlying Mistral client instance
249
+
250
+ #### Methods
251
+
252
+ - `EmbedText(params: EmbedTextParams): Promise<EmbedTextResult>` - Generate embedding for a single text
253
+ - `EmbedTexts(params: EmbedTextsParams): Promise<EmbedTextsResult>` - Generate embeddings for multiple texts
254
+ - `GetEmbeddingModels(): Promise<any>` - Get list of available embedding models
255
+
136
256
  ## Response Format Control
137
257
 
138
258
  The wrapper supports different response formats:
@@ -181,23 +301,85 @@ if (response.success) {
181
301
  }
182
302
  ```
183
303
 
304
+ ## Special Behaviors
305
+
306
+ ### Message Formatting
307
+ - The wrapper automatically ensures Mistral's requirement that the last message must be from 'user' or 'tool'
308
+ - If the last message is not from a user, a placeholder user message "ok" is automatically appended
309
+
310
+ ### Multi-Modal Content
311
+ - Image URLs are passed through as `image_url` type
312
+ - File URLs are converted to `document_url` type for Mistral compatibility
313
+ - Unsupported content types are filtered out with a warning
314
+
184
315
  ## Limitations
185
316
 
186
317
  Currently, the wrapper implements:
187
- - Chat completion functionality with token usage tracking
318
+ - Chat completion functionality with full streaming support
319
+ - Text embedding functionality with single and batch processing
320
+ - Token usage tracking for both chat and embeddings
188
321
 
189
- Future implementations may include:
322
+ Not yet implemented:
190
323
  - `SummarizeText` functionality
191
324
  - `ClassifyText` functionality
192
- - Streaming responses
325
+ - `effortLevel`/`reasoning_effort` parameter (not currently supported by Mistral API)
326
+
327
+ ## Integration with MemberJunction
328
+
329
+ This package is designed to work seamlessly with the MemberJunction AI framework:
330
+
331
+ ### Class Registration
332
+ Both `MistralLLM` and `MistralEmbedding` are automatically registered with the MemberJunction class factory using the `@RegisterClass` decorator:
333
+
334
+ ```typescript
335
+ // Classes are registered and can be instantiated via the class factory
336
+ import { ClassFactory } from '@memberjunction/global';
337
+
338
+ const mistralLLM = ClassFactory.CreateInstance<BaseLLM>(BaseLLM, 'MistralLLM', apiKey);
339
+ const mistralEmbedding = ClassFactory.CreateInstance<BaseEmbeddings>(BaseEmbeddings, 'MistralEmbedding', apiKey);
340
+ ```
341
+
342
+ ### Tree-Shaking Prevention
343
+ The package exports loader functions to prevent tree-shaking:
344
+
345
+ ```typescript
346
+ import { LoadMistralLLM, LoadMistralEmbedding } from '@memberjunction/ai-mistral';
347
+
348
+ // Call these in your application initialization to ensure classes are registered
349
+ LoadMistralLLM();
350
+ LoadMistralEmbedding();
351
+ ```
193
352
 
194
353
  ## Dependencies
195
354
 
196
- - `@mistralai/mistralai`: Official Mistral AI Node.js SDK
197
- - `@memberjunction/ai`: MemberJunction AI core framework
198
- - `@memberjunction/global`: MemberJunction global utilities
199
- - `axios-retry`: Retry mechanism for API calls
355
+ - `@mistralai/mistralai`: ^1.6.0 - Official Mistral AI Node.js SDK
356
+ - `@memberjunction/ai`: 2.43.0 - MemberJunction AI core framework
357
+ - `@memberjunction/global`: 2.43.0 - MemberJunction global utilities
358
+ - `axios-retry`: 4.3.0 - Retry mechanism for API calls
359
+
360
+ ## Development
361
+
362
+ ### Building
363
+
364
+ ```bash
365
+ npm run build
366
+ ```
367
+
368
+ ### Development Mode
369
+
370
+ ```bash
371
+ npm start
372
+ ```
200
373
 
201
374
  ## License
202
375
 
203
- ISC
376
+ ISC
377
+
378
+ ## Contributing
379
+
380
+ When contributing to this package:
381
+ 1. Follow the MemberJunction code style guide
382
+ 2. Ensure all TypeScript types are properly defined
383
+ 3. Add appropriate error handling
384
+ 4. Update documentation for any new features
385
+ 5. Test with various Mistral models