modelmix 3.2.2 → 3.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/MODELS.md ADDED
@@ -0,0 +1,249 @@
1
+ # ModelMix Provider Documentation
2
+
3
+ This document describes how each model provider in ModelMix handles data input and output formats.
4
+
5
+ ## Common Structure
6
+
7
+ All providers inherit from `MixCustom` base class which provides common functionality for:
8
+ - API key management
9
+ - Error handling
10
+ - Basic request/response processing
11
+ - Stream handling
12
+
13
+ ## Provider-Specific Details
14
+
15
+ ### OpenAI (MixOpenAI)
16
+ - **Base URL**: `https://api.openai.com/v1/chat/completions`
17
+ - **Input Format**:
18
+ ```json
19
+ {
20
+ "messages": [
21
+ {
22
+ "role": "system",
23
+ "content": "system message"
24
+ },
25
+ {
26
+ "role": "user",
27
+ "content": [
28
+ {
29
+ "type": "text",
30
+ "text": "message text"
31
+ },
32
+ {
33
+ "type": "image_url",
34
+ "image_url": {
35
+ "url": "data:image/jpeg;base64,..."
36
+ }
37
+ }
38
+ ]
39
+ }
40
+ ],
41
+ "model": "model-name",
42
+ "temperature": 1,
43
+ "max_tokens": 5000,
44
+ "top_p": 1
45
+ }
46
+ ```
47
+ - **Output Format**:
48
+ ```json
49
+ {
50
+ "choices": [
51
+ {
52
+ "message": {
53
+ "content": "response text"
54
+ }
55
+ }
56
+ ]
57
+ }
58
+ ```
59
+ - **Special Notes**:
60
+ - Removes `max_tokens` and `temperature` for o1/o3 models
61
+ - Converts image messages to base64 data URLs
62
+
63
+ ### Anthropic (MixAnthropic)
64
+ - **Base URL**: `https://api.anthropic.com/v1/messages`
65
+ - **Input Format**:
66
+ ```json
67
+ {
68
+ "system": "system message",
69
+ "messages": [
70
+ {
71
+ "role": "user",
72
+ "content": [
73
+ {
74
+ "type": "text",
75
+ "text": "message text"
76
+ },
77
+ {
78
+ "type": "image",
79
+ "source": {
80
+ "type": "base64",
81
+ "media_type": "image/jpeg",
82
+ "data": "base64data"
83
+ }
84
+ }
85
+ ]
86
+ }
87
+ ],
88
+ "model": "claude-3-sonnet-20240229",
89
+ "temperature": 1,
90
+ "top_p": 1
91
+ }
92
+ ```
93
+ - **Output Format**:
94
+ ```json
95
+ {
96
+ "content": [
97
+ {
98
+ "text": "response text"
99
+ }
100
+ ]
101
+ }
102
+ ```
103
+ - **Special Notes**:
104
+ - Removes `top_p` when thinking mode is enabled
105
+ - Uses `x-api-key` header instead of `authorization`
106
+ - Requires `anthropic-version` header
107
+
108
+ ### Perplexity (MixPerplexity)
109
+ - **Base URL**: `https://api.perplexity.ai/chat/completions`
110
+ - **Input Format**: Same as OpenAI
111
+ - **Output Format**: Same as OpenAI
112
+ - **Special Notes**: Uses standard OpenAI-compatible format
113
+
114
+ ### Grok (MixGrok)
115
+ - **Base URL**: `https://api.x.ai/v1/chat/completions`
116
+ - **Input Format**: Same as OpenAI
117
+ - **Output Format**: Same as OpenAI
118
+ - **Special Notes**: Inherits from MixOpenAI
119
+
120
+ ### Together (MixTogether)
121
+ - **Base URL**: `https://api.together.xyz/v1/chat/completions`
122
+ - **Input Format**:
123
+ ```json
124
+ {
125
+ "messages": [
126
+ {
127
+ "role": "system",
128
+ "content": "system message"
129
+ },
130
+ {
131
+ "role": "user",
132
+ "content": "message text"
133
+ }
134
+ ],
135
+ "model": "model-name",
136
+ "stop": ["<|eot_id|>", "<|eom_id|>"]
137
+ }
138
+ ```
139
+ - **Output Format**: Same as OpenAI
140
+ - **Special Notes**:
141
+ - Flattens content arrays to strings
142
+ - Adds default stop tokens
143
+
144
+ ### Google (MixGoogle)
145
+ - **Base URL**: `https://generativelanguage.googleapis.com/v1beta/models`
146
+ - **Input Format**:
147
+ ```json
148
+ {
149
+ "contents": [
150
+ {
151
+ "role": "user",
152
+ "parts": [
153
+ {
154
+ "text": "message text"
155
+ },
156
+ {
157
+ "inline_data": {
158
+ "mime_type": "image/jpeg",
159
+ "data": "base64data"
160
+ }
161
+ }
162
+ ]
163
+ }
164
+ ],
165
+ "generationConfig": {
166
+ "responseMimeType": "text/plain"
167
+ }
168
+ }
169
+ ```
170
+ - **Output Format**:
171
+ ```json
172
+ {
173
+ "candidates": [
174
+ {
175
+ "content": {
176
+ "parts": [
177
+ {
178
+ "text": "response text"
179
+ }
180
+ ]
181
+ }
182
+ }
183
+ ]
184
+ }
185
+ ```
186
+ - **Special Notes**:
187
+ - Uses different role names (`model` instead of `assistant`)
188
+ - Requires model ID in URL path
189
+ - Doesn't support streaming
190
+ - Available Gemini models:
191
+ - `gemini-2.5-flash-preview-04-17`
192
+ - `gemini-2.5-pro-exp-03-25`
193
+ - `gemini-2.5-pro-preview-05-06`
194
+ - Each model has different capabilities:
195
+ - Flash: Fastest response time, best for simple tasks
196
+ - Pro: More capable, better for complex tasks
197
+ - Pro Exp: Experimental version with latest features
198
+
199
+ ### Cerebras (MixCerebras)
200
+ - **Base URL**: `https://api.cerebras.ai/v1/chat/completions`
201
+ - **Input Format**: Same as Together
202
+ - **Output Format**: Same as OpenAI
203
+ - **Special Notes**: Uses Together's message conversion
204
+
205
+ ### Ollama (MixOllama)
206
+ - **Base URL**: `http://localhost:11434/api/chat`
207
+ - **Input Format**:
208
+ ```json
209
+ {
210
+ "messages": [
211
+ {
212
+ "role": "system",
213
+ "content": "system message",
214
+ "images": []
215
+ },
216
+ {
217
+ "role": "user",
218
+ "content": "message text",
219
+ "images": ["base64data"]
220
+ }
221
+ ]
222
+ }
223
+ ```
224
+ - **Output Format**:
225
+ ```json
226
+ {
227
+ "message": {
228
+ "content": "response text"
229
+ }
230
+ }
231
+ ```
232
+ - **Special Notes**:
233
+ - Local deployment only
234
+ - Handles images in separate array
235
+ - No API key required
236
+
237
+ ### LM Studio (MixLMStudio)
238
+ - **Base URL**: `http://localhost:1234/v1/chat/completions`
239
+ - **Input Format**: Same as OpenAI
240
+ - **Output Format**: Same as OpenAI
241
+ - **Special Notes**:
242
+ - Local deployment only
243
+ - No API key required
244
+
245
+ ### Groq (MixGroq)
246
+ - **Base URL**: `https://api.groq.com/openai/v1/chat/completions`
247
+ - **Input Format**: Same as OpenAI
248
+ - **Output Format**: Same as OpenAI
249
+ - **Special Notes**: Uses OpenAI-compatible format
package/README.md CHANGED
@@ -47,6 +47,16 @@ Here's a quick example to get you started:
47
47
  import 'dotenv/config';
48
48
  import { ModelMix } from 'modelmix';
49
49
 
50
+ // Get structured JSON responses
51
+ const model = ModelMix.new()
52
+ .sonnet37() // Anthropic claude-3-7-sonnet-20250219
53
+ .addText("Name and capital of 3 South American countries.");
54
+
55
+ const outputExample = { countries: [{ name: "", capital: "" }] };
56
+ console.log(await model.json(outputExample));
57
+ ```
58
+
59
+ ```javascript
50
60
  // Basic setup with system prompt and debug mode
51
61
  const setup = {
52
62
  config: {
@@ -56,24 +66,24 @@ const setup = {
56
66
  };
57
67
 
58
68
  // Chain multiple models with automatic fallback
59
- const result = await ModelMix.create(setup)
60
- .sonnet37think()
61
- .o4mini({ config: { temperature: 0 } })
62
- .gemini25proExp()
63
- .gpt41nano()
64
- .grok3mini()
65
- .addText("What's your name?")
66
- .message();
67
-
68
- console.log(result);
69
+ const model = await ModelMix.new(setup)
70
+ .sonnet37think() // (main model) Anthropic claude-3-7-sonnet-20250219
71
+ .o4mini() // (fallback 1) OpenAI o4-mini
72
+ .gemini25proExp({ config: { temperature: 0 } }) // (fallback 2) Google gemini-2.5-pro-exp-03-25
73
+ .gpt41nano() // (fallback 3) OpenAI gpt-4.1-nano
74
+ .grok3mini() // (fallback 4) Grok grok-3-mini-beta
75
+ .addText("What's your name?");
76
+
77
+ console.log(await model.message());
78
+ ```
69
79
 
70
- // Get structured JSON responses
71
- const jsonResult = await ModelMix.create()
72
- .sonnet37()
73
- .addText("Name and capital of 3 South American countries.")
74
- .json({ countries: [{ name: "", capital: "" }] });
80
+ ```javascript
75
81
 
76
- console.log(jsonResult);
82
+ const ETH = ModelMix.new()
83
+ .sonar() // Perplexity sonar
84
+ .addText('How much is ETH trading in USD?')
85
+ .json({ price: 1000.1 });
86
+ console.log(ETH.price);
77
87
  ```
78
88
 
79
89
  This pattern allows you to:
@@ -82,6 +92,54 @@ This pattern allows you to:
82
92
  - Get structured JSON responses when needed
83
93
  - Keep your code clean and maintainable
84
94
 
95
+ ## ⚡️ Shorthand Methods
96
+
97
+ ModelMix provides convenient shorthand methods for quickly accessing different AI models.
98
+ Here's a comprehensive list of available methods:
99
+
100
+ | Method | Provider | Model | Price (I/O) per 1 M tokens |
101
+ | ------------------ | ---------- | ------------------------------ | -------------------------- |
102
+ | `gpt41()` | OpenAI | gpt-4.1 | [\$2.00 / \$8.00][1] |
103
+ | `gpt41mini()` | OpenAI | gpt-4.1-mini | [\$0.40 / \$1.60][1] |
104
+ | `gpt41nano()` | OpenAI | gpt-4.1-nano | [\$0.10 / \$0.40][1] |
105
+ | `gpt4o()` | OpenAI | gpt-4o | [\$5.00 / \$20.00][1] |
106
+ | `o4mini()` | OpenAI | o4-mini | [\$1.10 / \$4.40][1] |
107
+ | `o3()` | OpenAI | o3 | [\$10.00 / \$40.00][1] |
108
+ | `sonnet37()` | Anthropic | claude-3-7-sonnet-20250219 | [\$3.00 / \$15.00][2] |
109
+ | `sonnet37think()` | Anthropic | claude-3-7-sonnet-20250219 | [\$3.00 / \$15.00][2] |
110
+ | `sonnet35()` | Anthropic | claude-3-5-sonnet-20241022 | [\$3.00 / \$15.00][2] |
111
+ | `haiku35()` | Anthropic | claude-3-5-haiku-20241022 | [\$0.80 / \$4.00][2] |
112
+ | `gemini25flash()` | Google | gemini-2.5-flash-preview-04-17 | [\$0.00 / \$0.00][3] |
113
+ | `gemini25proExp()` | Google | gemini-2.5-pro-exp-03-25 | [\$0.00 / \$0.00][3] |
114
+ | `gemini25pro()` | Google | gemini-2.5-pro-preview-05-06 | [\$2.50 / \$15.00][3] |
115
+ | `grok2()` | Grok | grok-2-latest | [\$2.00 / \$10.00][6] |
116
+ | `grok3()` | Grok | grok-3-beta | [\$3.00 / \$15.00][6] |
117
+ | `grok3mini()` | Grok | grok-3-mini-beta | [\$0.30 / \$0.50][6] |
118
+ | `sonar()` | Perplexity | sonar | [\$1.00 / \$1.00][4] |
119
+ | `sonarPro()` | Perplexity | sonar-pro | [\$3.00 / \$15.00][4] |
120
+ | `qwen3()` | Groq | Qwen3-235B-A22B-fp8-tput | [\$0.29 / \$0.39][5] |
121
+ | `scout()` | Groq | Llama-4-Scout-17B-16E-Instruct | [\$0.11 / \$0.34][5] |
122
+ | `maverick()` | Groq | Maverick-17B-128E-Instruct-FP8 | [\$0.20 / \$0.60][5] |
123
+
124
+ [1]: https://openai.com/api/pricing/ "Pricing | OpenAI"
125
+ [2]: https://docs.anthropic.com/en/docs/about-claude/pricing "Pricing - Anthropic"
126
+ [3]: https://ai.google.dev/gemini-api/docs/pricing "Google AI for Developers"
127
+ [4]: https://docs.perplexity.ai/guides/pricing "Pricing - Perplexity"
128
+ [5]: https://groq.com/pricing/ "Groq Pricing"
129
+ [6]: https://docs.x.ai/docs/models "xAI"
130
+
131
+ Each method accepts optional `options` and `config` parameters to customize the model's behavior. For example:
132
+
133
+ ```javascript
134
+ const result = await ModelMix.new({
135
+ options: { temperature: 0.7 },
136
+ config: { system: "You are a helpful assistant" }
137
+ })
138
+ .sonnet37()
139
+ .addText("Tell me a story about a cat");
140
+ .message();
141
+ ```
142
+
85
143
  ## 🔄 Templating Methods
86
144
 
87
145
  ### `replace` Method
@@ -90,7 +148,7 @@ The `replace` method is used to define key-value pairs for text replacement in t
90
148
 
91
149
  #### Usage:
92
150
  ```javascript
93
- gpt.replace({ '{{key1}}': 'value1', '{{key2}}': 'value2' });
151
+ model.replace({ '{{key1}}': 'value1', '{{key2}}': 'value2' });
94
152
  ```
95
153
 
96
154
  #### How it works:
@@ -99,7 +157,7 @@ gpt.replace({ '{{key1}}': 'value1', '{{key2}}': 'value2' });
99
157
 
100
158
  #### Example:
101
159
  ```javascript
102
- gpt
160
+ model
103
161
  .replace({ '{{name}}': 'Alice', '{{age}}': '30' })
104
162
  .addText('Hello {{name}}, are you {{age}} years old?');
105
163
  ```
@@ -111,7 +169,7 @@ The `replaceKeyFromFile` method is similar to `replace`, but it reads the replac
111
169
 
112
170
  #### Usage:
113
171
  ```javascript
114
- messageHandler.replaceKeyFromFile('longText', './path/to/file.txt');
172
+ model.replaceKeyFromFile('longText', './path/to/file.txt');
115
173
  ```
116
174
 
117
175
  #### How it works:
@@ -139,7 +197,7 @@ To activate debug mode in ModelMix and view detailed request information, follow
139
197
  1. In the ModelMix constructor, include `debug: true` in the configuration:
140
198
 
141
199
  ```javascript
142
- const mix = new ModelMix({
200
+ const mix = ModelMix.new({
143
201
  config: {
144
202
  debug: true
145
203
  // ... other configuration options ...
@@ -165,16 +223,16 @@ ModelMix now uses Bottleneck for efficient rate limiting of API requests. This i
165
223
 
166
224
  1. **Configuration**: Bottleneck is configured in the ModelMix constructor. You can customize the settings or use the default configuration:
167
225
 
168
- ```javascript
169
- const setup = {
170
- config: {
171
- bottleneck: {
172
- maxConcurrent: 8, // Maximum number of concurrent requests
173
- minTime: 500 // Minimum time between requests (in ms)
174
- }
226
+ ```javascript
227
+ const setup = {
228
+ config: {
229
+ bottleneck: {
230
+ maxConcurrent: 8, // Maximum number of concurrent requests
231
+ minTime: 500 // Minimum time between requests (in ms)
175
232
  }
176
- };
177
- ```
233
+ }
234
+ };
235
+ ```
178
236
 
179
237
  2. **Rate Limiting**: When you make a request using any of the attached models, Bottleneck automatically manages the request flow based on the configured settings.
180
238
 
@@ -205,53 +263,12 @@ new ModelMix(args = { options: {}, config: {} })
205
263
  - `reservoirRefreshInterval`: Reservoir refresh interval
206
264
  - ...(Additional configuration parameters can be added as needed)
207
265
 
208
- ### Shorthand Methods
209
-
210
- ModelMix provides convenient shorthand methods for quickly accessing different AI models. Here's a comprehensive list of available methods:
211
-
212
- | Method | Provider | Model | Description |
213
- | ------------------ | ----------- | ------------------------------ | -------------------------------------------- |
214
- | `gpt41()` | OpenAI | gpt-4.1 | OpenAI's GPT-4.1 model |
215
- | `gpt41mini()` | OpenAI | gpt-4.1-mini | OpenAI's GPT-4.1 Mini model |
216
- | `gpt41nano()` | OpenAI | gpt-4.1-nano | OpenAI's GPT-4.1 Nano model |
217
- | `gpt4o()` | OpenAI | gpt-4o | OpenAI's GPT-4 Optimized model |
218
- | `o4mini()` | OpenAI | o4-mini | OpenAI's O4 Mini model |
219
- | `o3()` | OpenAI | o3 | OpenAI's O3 model |
220
- | `sonnet37()` | Anthropic | claude-3-7-sonnet-20250219 | Anthropic's Claude 3.7 Sonnet model |
221
- | `sonnet37think()` | Anthropic | claude-3-7-sonnet-20250219 | Claude 3.7 Sonnet with thinking mode enabled |
222
- | `sonnet35()` | Anthropic | claude-3-5-sonnet-20241022 | Anthropic's Claude 3.5 Sonnet model |
223
- | `haiku35()` | Anthropic | claude-3-5-haiku-20241022 | Anthropic's Claude 3.5 Haiku model |
224
- | `gemini25flash()` | Google | gemini-2.5-flash-preview-04-17 | Google's Gemini 2.5 Flash model |
225
- | `gemini25proExp()` | Google | gemini-2.5-pro-exp-03-25 | Google's Gemini 2.5 Pro Experimental model |
226
- | `gemini25pro()` | Google | gemini-2.5-pro-preview-05-06 | Google's Gemini 2.5 Pro model |
227
- | `sonar()` | Perplexity | sonar-pro | Perplexity's Sonar Pro model |
228
- | `qwen3()` | Together AI | Qwen/Qwen3-235B-A22B-fp8-tput | Together AI's Qwen 3 model |
229
- | `grok2()` | Grok | grok-2-latest | Grok's latest version 2 model |
230
- | `grok3()` | Grok | grok-3-beta | Grok's version 3 beta model |
231
- | `grok3mini()` | Grok | grok-3-mini-beta | Grok's version 3 mini beta model |
232
- | `scout()` | Cerebras | llama-4-scout-17b-16e-instruct | Cerebras' Llama 4 Scout model |
233
-
234
- Each method accepts optional `options` and `config` parameters to customize the model's behavior. For example:
235
-
236
- ```javascript
237
- const result = await ModelMix.create()
238
- .sonnet37({
239
- options: { temperature: 0.7 },
240
- config: { system: "You are a helpful assistant" }
241
- })
242
- .message();
243
- ```
244
-
245
266
  **Methods**
246
267
 
247
- - `attach(modelInstance)`: Attaches a model instance to the `ModelMix`.
248
- - `create(modelKey, overOptions = {})`: Creates a new `MessageHandler` for the specified model.
249
-
250
- ### MessageHandler Class Overview
251
-
252
- **Methods**
268
+ - `attach(modelKey, modelInstance)`: Attaches a model instance to the `ModelMix`.
269
+ - `new()`: `static` Creates a new `ModelMix`.
270
+ - `new()`: Creates a new `ModelMix` using instance setup.
253
271
 
254
- - `new()`: Initializes a new message handler instance.
255
272
  - `addText(text, config = { role: "user" })`: Adds a text message.
256
273
  - `addTextFromFile(filePath, config = { role: "user" })`: Adds a text message from a file path.
257
274
  - `addImage(filePath, config = { role: "user" })`: Adds an image message from a file path.
package/demo/custom.mjs CHANGED
@@ -1,6 +1,6 @@
1
1
  import 'dotenv/config'
2
2
 
3
- import { ModelMix, MixCerebras } from '../index.js';
3
+ import { ModelMix, MixCustom } from '../index.js';
4
4
 
5
5
  const mmix = new ModelMix({
6
6
  options: {
@@ -13,8 +13,46 @@ const mmix = new ModelMix({
13
13
  }
14
14
  });
15
15
 
16
- mmix.attach(new MixCerebras());
16
+ class MixTogether extends MixCustom {
17
+ getDefaultConfig(customConfig) {
18
+ return super.getDefaultConfig({
19
+ url: 'https://api.together.xyz/v1/chat/completions',
20
+ apiKey: process.env.TOGETHER_API_KEY,
21
+ ...customConfig
22
+ });
23
+ }
24
+
25
+ getDefaultOptions(customOptions) {
26
+ return {
27
+ stop: ["<|eot_id|>", "<|eom_id|>"],
28
+ ...customOptions
29
+ };
30
+ }
31
+
32
+ static convertMessages(messages) {
33
+ return messages.map(message => {
34
+ if (message.content instanceof Array) {
35
+ message.content = message.content.map(content => content.text).join("\n\n");
36
+ }
37
+ return message;
38
+ });
39
+ }
40
+
41
+ async create({ config = {}, options = {} } = {}) {
42
+ if (!this.config.apiKey) {
43
+ throw new Error('Together API key not found. Please provide it in config or set TOGETHER_API_KEY environment variable.');
44
+ }
45
+
46
+ const content = config.system + config.systemExtra;
47
+ options.messages = [{ role: 'system', content }, ...options.messages || []];
48
+ options.messages = MixTogether.convertMessages(options.messages);
49
+
50
+ return super.create({ config, options });
51
+ }
52
+ }
53
+
54
+ mmix.attach('Qwen/Qwen3-235B-A22B-fp8-tput', new MixTogether());
17
55
 
18
- let r = mmix.create('llama-4-scout-17b-16e-instruct').addText('hi there');
56
+ let r = mmix.addText('hi there');
19
57
  r = await r.addText('do you like cats?').message();
20
58
  console.log(r);
package/demo/demo.mjs CHANGED
@@ -5,6 +5,7 @@ import { ModelMix, MixOpenAI, MixAnthropic, MixPerplexity, MixOllama } from '../
5
5
  const mmix = new ModelMix({
6
6
  options: {
7
7
  max_tokens: 200,
8
+ temperature: 0.5,
8
9
  },
9
10
  config: {
10
11
  system: 'You are {name} from Melmac.',
@@ -14,52 +15,44 @@ const mmix = new ModelMix({
14
15
  }
15
16
  });
16
17
 
17
- mmix.attach(new MixOpenAI());
18
- mmix.attach(new MixAnthropic());
19
- mmix.attach(new MixPerplexity({
20
- config: {
21
- apiKey: process.env.PPLX_API_KEY,
22
- system: 'You are my personal assistant.'
23
- },
24
18
 
25
- }));
26
- mmix.attach(new MixOllama({
19
+ const pplxSettings = {
27
20
  config: {
28
- prefix: ['llava'],
29
- },
30
- options: {
31
- temperature: 0.5,
21
+ apiKey: process.env.PPLX_API_KEY,
22
+ system: 'You are my personal assistant.',
23
+ max_tokens: 500
32
24
  }
33
- }));
25
+ };
26
+
34
27
 
35
28
  mmix.replace({ '{name}': 'ALF' });
36
29
 
37
30
  console.log("\n" + '--------| gpt-4.1-nano |--------');
38
- const gpt = mmix.create('gpt-4.1-nano', { options: { temperature: 0 } }).addText("Have you ever eaten a {animal}?");
31
+ const gpt = mmix.attach('gpt-4.1-nano', new MixOpenAI({ options: { temperature: 0 } })).addText("Have you ever eaten a {animal}?");
39
32
  gpt.replace({ '{animal}': 'cat' });
40
33
  console.log(await gpt.json({ time: '24:00:00', message: 'Hello' }, { time: 'Time in format HH:MM:SS' }));
41
34
 
42
- // console.log("\n" + '--------| claude-3-5-sonnet-20240620 |--------');
43
- // const claude = mmix.create('claude-3-5-sonnet-20240620', { options: { temperature: 0 } });
44
- // claude.addImageFromUrl('https://pbs.twimg.com/media/F6-GsjraAAADDGy?format=jpg');
45
- // const imageDescription = await claude.addText('describe the image').message();
46
- // console.log(imageDescription);
35
+ console.log("\n" + '--------| claude-3-5-sonnet-20240620 |--------');
36
+ const claude = ModelMix.new().attach('claude-3-5-sonnet-20240620', new MixAnthropic());
37
+ claude.addImageFromUrl('https://pbs.twimg.com/media/F6-GsjraAAADDGy?format=jpg');
38
+ const imageDescription = await claude.addText('describe the image').message();
39
+ console.log(imageDescription);
47
40
 
48
41
  console.log("\n" + '--------| claude-3-7-sonnet-20250219 |--------');
49
- const writer = mmix.create('claude-3-7-sonnet-20250219', { options: { temperature: 0.5 } });
42
+ const writer = ModelMix.new().attach('claude-3-7-sonnet-20250219', new MixAnthropic());
50
43
  writer.setSystem('You are a writer like Stephen King');
51
44
  writer.replaceKeyFromFile('{story_title}', './title.md');
52
45
  const story = await writer.addTextFromFile('./prompt.md').message();
53
46
  console.log(story);
54
47
 
55
- // console.log("\n" + '--------| llama-3-sonar-large-32k-online |--------');
56
- // const pplx = mmix.create('llama-3-sonar-large-32k-online', { config: { max_tokens: 500 } });
57
- // pplx.addText('How much is ETH trading in USD?');
58
- // const news = await pplx.addText('What are the 3 most recent Ethereum news?').message();
59
- // console.log(news);
48
+ console.log("\n" + '--------| sonar |--------');
49
+ const pplx = ModelMix.new().sonar(pplxSettings);
50
+ pplx.addText('How much is ETH trading in USD?');
51
+ const ETH = await pplx.json({ price: 1000.1 });
52
+ console.log(ETH.price);
60
53
 
61
54
  // console.log("\n" + '--------| ollama (llava:latest) |--------');
62
- // await mmix.create('llava:latest')
55
+ // await mmix.new().attach('llava:latest', new MixOllama())
63
56
  // .addImage('./watson.jpg')
64
57
  // .addText('what is the predominant color?')
65
58
  // .stream((data) => { console.log(data.message); });
package/demo/fallback.mjs CHANGED
@@ -15,24 +15,13 @@ const mmix = new ModelMix({
15
15
  max_tokens: 8192,
16
16
  }
17
17
  });
18
- const an = new MixAnthropic();
19
- an.config.url = 'fail';
20
- mmix.attach(new MixOpenAI(), an, new MixGrok());
21
18
 
22
-
23
- const modelOptionsRef = ['claude-3-5-sonnet-20241022', 'gpt-4.1-nano'];
19
+ mmix.sonnet37({ config: { url: 'fail' } }).gpt41nano();
24
20
 
25
21
  async function main() {
26
- const response = await generateThread(modelOptionsRef);
22
+ mmix.addText('hola, como estas?');
23
+ const response = await mmix.message();
27
24
  console.log(response);
28
25
  }
29
26
 
30
- async function generateThread(modelOptionsRef) {
31
- const model = mmix.create(modelOptionsRef, { options: { temperature: 0.5 } });
32
- model.addText('hola, como estas?');
33
- const response = await model.message();
34
-
35
- return response.split('---').map(section => section.trim());
36
- }
37
-
38
27
  main();
package/demo/grok.mjs CHANGED
@@ -1,6 +1,6 @@
1
1
  import 'dotenv/config'
2
2
 
3
- import { ModelMix, MixGrok, MixAnthropic, MixOpenAI } from '../index.js';
3
+ import { ModelMix } from '../index.js';
4
4
 
5
5
  const mmix = new ModelMix({
6
6
  options: {
@@ -8,11 +8,14 @@ const mmix = new ModelMix({
8
8
  },
9
9
  config: {
10
10
  system: 'You are ALF from Melmac.',
11
- max_history: 2
11
+ max_history: 2,
12
+ debug: true
12
13
  }
13
14
  });
14
15
 
15
- mmix.attach(new MixGrok(), new MixAnthropic(), new MixOpenAI());
16
16
 
17
- const r = await mmix.create(['claude-3-7-sonnet-20250219', 'o3-mini', 'grok-2-latest']).addText('do you like cats?').message();
17
+ const r = await mmix.grok2()
18
+ .addText('hi there!')
19
+ .addText('do you like cats?')
20
+ .message();
18
21
  console.log(r);
package/demo/groq.mjs CHANGED
@@ -14,11 +14,11 @@ const mmix = new ModelMix({
14
14
  }
15
15
  });
16
16
 
17
- mmix.attach(new MixGroq({
17
+ mmix.attach('deepseek-r1-distill-llama-70b', new MixGroq({
18
18
  config: {
19
19
  apiKey: env.GROQ_API_KEY,
20
20
  }
21
21
  }));
22
22
 
23
- const r = await mmix.create('deepseek-r1-distill-llama-70b').addText('do you like cats?').message();
23
+ const r = await mmix.addText('do you like cats?').message();
24
24
  console.log(r)