@mistralai/mistralai 0.0.5 → 0.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,43 +1,24 @@
1
1
  # Mistral Javascript Client
2
2
 
3
- You can use the Mistral Javascript client to interact with the Mistral AI API
3
+ You can use the Mistral Javascript client to interact with the Mistral AI API.
4
4
 
5
5
  ## Installing
6
6
 
7
7
  You can install the library in your project using:
8
8
 
9
- `npm install mistralai`
9
+ `npm install @mistralai/mistralai`
10
10
 
11
- ## Usage
12
-
13
- ### Chat
14
-
15
- The simplest use case is to chat with Mistral AI models:
16
-
17
- ```javascript
18
- const client = require("mistralai");
19
-
20
- const response = client.chat('le-tiny', [{role: 'user', content: 'What is your favourite French food, and why is it mayonnaise?'}])
21
-
22
- ```
23
-
24
- You can also use `client.chatStream` for streaming results.
25
-
26
- ### Embeddings
27
-
28
- To use our embedding API you can use the following code:
11
+ ## Run examples
29
12
 
30
- ```javascript
31
- const client = require('mistralai');
13
+ You can run the examples in the examples directory by installing them locally:
32
14
 
33
- const response = client.embed('le-embed', 'My favourite place to eat mayonnaise is embed');
15
+ ```bash
16
+ cd examples
17
+ npm install .
34
18
  ```
35
19
 
36
- ## Run examples
37
-
38
- Examples can be found in the `examples/` directory you can run them using:
20
+ You can then run the examples using node:
39
21
 
40
22
  ```bash
41
- node [example.js]
42
-
23
+ MISTARL_API_KEY=XXXX node chat_with_streaming.js
43
24
  ```
@@ -0,0 +1,12 @@
1
+ import MistralClient from '@mistralai/mistralai';
2
+
3
+ const apiKey = process.env.MISTRAL_API_KEY;
4
+
5
+ const client = new MistralClient(apiKey);
6
+
7
+ const chatResponse = await client.chat({
8
+ model: 'mistral-tiny',
9
+ messages: [{role: 'user', content: 'What is the best French cheese?'}],
10
+ });
11
+
12
+ console.log('Chat:', chatResponse.choices[0].message.content);
@@ -0,0 +1,18 @@
1
+ import MistralClient from '@mistralai/mistralai';
2
+
3
+ const apiKey = process.env.MISTRAL_API_KEY;
4
+
5
+ const client = new MistralClient(apiKey);
6
+
7
+ const chatStreamResponse = await client.chatStream({
8
+ model: 'mistral-tiny',
9
+ messages: [{role: 'user', content: 'What is the best French cheese?'}],
10
+ });
11
+
12
+ console.log('Chat Stream:');
13
+ for await (const chunk of chatStreamResponse) {
14
+ if (chunk.choices[0].delta.content !== undefined) {
15
+ const streamText = chunk.choices[0].delta.content;
16
+ process.stdout.write(streamText);
17
+ }
18
+ }
@@ -0,0 +1,17 @@
1
+ import MistralClient from '@mistralai/mistralai';
2
+
3
+ const apiKey = process.env.MISTRAL_API_KEY;
4
+
5
+ const client = new MistralClient(apiKey);
6
+
7
+ const input = [];
8
+ for (let i = 0; i < 1; i++) {
9
+ input.push('What is the best French cheese?');
10
+ }
11
+
12
+ const embeddingsBatchResponse = await client.embeddings({
13
+ model: 'mistral-embed',
14
+ input: input,
15
+ });
16
+
17
+ console.log('Embeddings Batch:', embeddingsBatchResponse.data);
@@ -0,0 +1,11 @@
1
+ import MistralClient from '@mistralai/mistralai';
2
+
3
+ const apiKey = process.env.MISTRAL_API_KEY;
4
+
5
+ const client = new MistralClient(apiKey);
6
+
7
+ const listModelsResponse = await client.listModels();
8
+
9
+ listModelsResponse.data.forEach((model) => {
10
+ console.log('Model:', model);
11
+ });
@@ -0,0 +1,33 @@
1
+ {
2
+ "name": "mistralai client examples",
3
+ "version": "1.0.0",
4
+ "lockfileVersion": 3,
5
+ "requires": true,
6
+ "packages": {
7
+ "": {
8
+ "name": "mistralai client examples",
9
+ "version": "1.0.0",
10
+ "dependencies": {
11
+ "@mistralai/mistralai": "file:../"
12
+ }
13
+ },
14
+ "..": {
15
+ "name": "@mistralai/mistralai",
16
+ "version": "0.0.1",
17
+ "license": "ISC",
18
+ "dependencies": {
19
+ "axios": "^1.6.2",
20
+ "axios-retry": "^4.0.0"
21
+ },
22
+ "devDependencies": {
23
+ "eslint": "^8.55.0",
24
+ "eslint-config-google": "^0.14.0",
25
+ "prettier": "2.8.8"
26
+ }
27
+ },
28
+ "node_modules/@mistralai/mistralai": {
29
+ "resolved": "..",
30
+ "link": true
31
+ }
32
+ }
33
+ }
@@ -0,0 +1,10 @@
1
+ {
2
+ "name": "mistralai client examples",
3
+ "version": "1.0.0",
4
+ "description": "",
5
+ "type": "module",
6
+ "dependencies": {
7
+ "@mistralai/mistralai": "file:../"
8
+ },
9
+ "keywords": []
10
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mistralai/mistralai",
3
- "version": "0.0.5",
3
+ "version": "0.0.6",
4
4
  "description": "",
5
5
  "author": "bam4d@mistral.ai",
6
6
  "license": "ISC",
package/src/client.js CHANGED
@@ -2,7 +2,7 @@ import axios from 'axios';
2
2
  import axiosRetry from 'axios-retry';
3
3
 
4
4
  const RETRY_STATUS_CODES = [429, 500, 502, 503, 504];
5
- const ENDPOINT = 'http://api.mistral.ai';
5
+ const ENDPOINT = 'https://api.mistral.ai';
6
6
 
7
7
  /**
8
8
  * MistralClient
@@ -10,11 +10,12 @@ const ENDPOINT = 'http://api.mistral.ai';
10
10
  */
11
11
  class MistralClient {
12
12
  /**
13
- *
14
- * @param {*} apiKey
15
- * @param {*} endpoint
13
+ * A simple and lightweight client for the Mistral API
14
+ * @param {*} apiKey can be set as an environment variable MISTRAL_API_KEY,
15
+ * or provided in this parameter
16
+ * @param {*} endpoint defaults to https://api.mistral.ai
16
17
  */
17
- constructor(apiKey, endpoint = ENDPOINT) {
18
+ constructor(apiKey=process.env.MISTRAL_API_KEY, endpoint = ENDPOINT) {
18
19
  this.endpoint = endpoint;
19
20
  this.apiKey = apiKey;
20
21
 
@@ -82,11 +83,11 @@ class MistralClient {
82
83
  model: model,
83
84
  messages: messages,
84
85
  temperature: temperature ?? undefined,
85
- maxTokens: maxTokens ?? undefined,
86
- topP: topP ?? undefined,
87
- randomSeed: randomSeed ?? undefined,
86
+ max_tokens: maxTokens ?? undefined,
87
+ top_p: topP ?? undefined,
88
+ random_seed: randomSeed ?? undefined,
88
89
  stream: stream ?? undefined,
89
- safeMode: safeMode ?? undefined,
90
+ safe_prompt: safeMode ?? undefined,
90
91
  };
91
92
  };
92
93
 
@@ -97,24 +98,25 @@ class MistralClient {
97
98
  };
98
99
 
99
100
  /**
100
- * Chat
101
- * @param {*} model
102
- * @param {*} messages
103
- * @param {*} temperature
104
- * @param {*} maxTokens
105
- * @param {*} topP
106
- * @param {*} randomSeed
107
- * @param {*} safeMode
101
+ * A chat endpoint without streaming
102
+ * @param {*} model the name of the model to chat with, e.g. mistral-tiny
103
+ * @param {*} messages an array of messages to chat with, e.g.
104
+ * [{role: 'user', content: 'What is the best French cheese?'}]
105
+ * @param {*} temperature the temperature to use for sampling, e.g. 0.5
106
+ * @param {*} maxTokens the maximum number of tokens to generate, e.g. 100
107
+ * @param {*} topP the cumulative probability of tokens to generate, e.g. 0.9
108
+ * @param {*} randomSeed the random seed to use for sampling, e.g. 42
109
+ * @param {*} safeMode whether to use safe mode, e.g. true
108
110
  * @return {Promise<Object>}
109
111
  */
110
- chat = async function(
112
+ chat = async function({
111
113
  model,
112
114
  messages,
113
115
  temperature,
114
116
  maxTokens,
115
117
  topP,
116
118
  randomSeed,
117
- safeMode) {
119
+ safeMode}) {
118
120
  const request = this._makeChatCompletionRequest(
119
121
  model,
120
122
  messages,
@@ -132,24 +134,25 @@ class MistralClient {
132
134
  };
133
135
 
134
136
  /**
135
- * Chat with streaming
136
- * @param {*} model
137
- * @param {*} messages
138
- * @param {*} temperature
139
- * @param {*} maxTokens
140
- * @param {*} topP
141
- * @param {*} randomSeed
142
- * @param {*} safeMode
137
+ * A chat endpoint that streams responses.
138
+ * @param {*} model the name of the model to chat with, e.g. mistral-tiny
139
+ * @param {*} messages an array of messages to chat with, e.g.
140
+ * [{role: 'user', content: 'What is the best French cheese?'}]
141
+ * @param {*} temperature the temperature to use for sampling, e.g. 0.5
142
+ * @param {*} maxTokens the maximum number of tokens to generate, e.g. 100
143
+ * @param {*} topP the cumulative probability of tokens to generate, e.g. 0.9
144
+ * @param {*} randomSeed the random seed to use for sampling, e.g. 42
145
+ * @param {*} safeMode whether to use safe mode, e.g. true
143
146
  * @return {Promise<Object>}
144
147
  */
145
- chatStream = async function* (
148
+ chatStream = async function* ({
146
149
  model,
147
150
  messages,
148
151
  temperature,
149
152
  maxTokens,
150
153
  topP,
151
154
  randomSeed,
152
- safeMode) {
155
+ safeMode}) {
153
156
  const request = this._makeChatCompletionRequest(
154
157
  model,
155
158
  messages,
@@ -166,22 +169,30 @@ class MistralClient {
166
169
 
167
170
  for await (const chunk of response) {
168
171
  const chunkString = this.textDecoder.decode(chunk);
169
- if (chunkString.startsWith('data:')) {
170
- const chunkData = chunkString.substring(6).trim();
171
- if (chunkData !== '[DONE]') {
172
- yield JSON.parse(chunkData);
172
+ // split the chunks by new line
173
+ const chunkLines = chunkString.split('\n');
174
+ // Iterate through the lines
175
+ for (const chunkLine of chunkLines) {
176
+ // If the line starts with data: then it is a chunk
177
+ if (chunkLine.startsWith('data:')) {
178
+ const chunkData = chunkLine.substring(6).trim();
179
+ if (chunkData !== '[DONE]') {
180
+ yield JSON.parse(chunkData);
181
+ }
173
182
  }
174
183
  }
175
184
  }
176
185
  };
177
186
 
178
187
  /**
179
- * Embeddings
180
- * @param {*} model
181
- * @param {*} input
188
+ * An embedddings endpoint that returns embeddings for a single,
189
+ * or batch of inputs
190
+ * @param {*} model The embedding model to use, e.g. mistral-embed
191
+ * @param {*} input The input to embed,
192
+ * e.g. ['What is the best French cheese?']
182
193
  * @return {Promise<Object>}
183
194
  */
184
- embeddings = async function(model, input) {
195
+ embeddings = async function({model, input}) {
185
196
  const request = {
186
197
  model: model,
187
198
  input: input,
@@ -1,51 +0,0 @@
1
- import MistralClient from '../client.js';
2
-
3
- const apiKey = process.env.MISTRAL_API_KEY;
4
-
5
- const client = new MistralClient(apiKey);
6
-
7
- // LIST MODELS
8
- const listModelsResponse = await client.listModels();
9
-
10
- listModelsResponse.data.forEach((model) => {
11
- console.log('Model:', model);
12
- });
13
-
14
- // CHAT
15
- const chatResponse = await client.chat(
16
- 'le-tiny-v2312',
17
- [{role: 'user', content: 'hello world'}],
18
- );
19
-
20
- console.log('Chat:', chatResponse);
21
-
22
- // CHAT STREAM
23
- const chatStreamResponse = await client.chatStream(
24
- 'le-tiny-v2312', [{role: 'user', content: 'hello world'}],
25
- );
26
-
27
- for await (const chunk of chatStreamResponse) {
28
- console.log('Chat Stream:', '' + chunk);
29
- }
30
-
31
- // chatStreamResponse.data.on('data', (data) => {
32
- // console.log('Chat Stream:', '' + data);
33
- // });
34
-
35
- // EMBEDDINGS
36
- const embeddingsResponse = await client.embeddings('le-embed', 'hello world');
37
-
38
- console.log('Embeddings:', embeddingsResponse.data);
39
-
40
-
41
- // EMBEDDINGS BATCH
42
-
43
- // Create 100 strings to embed
44
- const input = [];
45
- for (let i = 0; i < 10; i++) {
46
- input.push('hello world');
47
- }
48
-
49
- const embeddingsBatchResponse = await client.embeddings('le-embed', input);
50
-
51
- console.log('Embeddings Batch:', embeddingsBatchResponse.data);