modelmix 3.1.8 → 3.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/MODELS.md ADDED
@@ -0,0 +1,249 @@
1
+ # ModelMix Provider Documentation
2
+
3
+ This document describes how each model provider in ModelMix handles data input and output formats.
4
+
5
+ ## Common Structure
6
+
7
+ All providers inherit from `MixCustom` base class which provides common functionality for:
8
+ - API key management
9
+ - Error handling
10
+ - Basic request/response processing
11
+ - Stream handling
12
+
13
+ ## Provider-Specific Details
14
+
15
+ ### OpenAI (MixOpenAI)
16
+ - **Base URL**: `https://api.openai.com/v1/chat/completions`
17
+ - **Input Format**:
18
+ ```json
19
+ {
20
+ "messages": [
21
+ {
22
+ "role": "system",
23
+ "content": "system message"
24
+ },
25
+ {
26
+ "role": "user",
27
+ "content": [
28
+ {
29
+ "type": "text",
30
+ "text": "message text"
31
+ },
32
+ {
33
+ "type": "image_url",
34
+ "image_url": {
35
+ "url": "data:image/jpeg;base64,..."
36
+ }
37
+ }
38
+ ]
39
+ }
40
+ ],
41
+ "model": "model-name",
42
+ "temperature": 1,
43
+ "max_tokens": 5000,
44
+ "top_p": 1
45
+ }
46
+ ```
47
+ - **Output Format**:
48
+ ```json
49
+ {
50
+ "choices": [
51
+ {
52
+ "message": {
53
+ "content": "response text"
54
+ }
55
+ }
56
+ ]
57
+ }
58
+ ```
59
+ - **Special Notes**:
60
+ - Removes `max_tokens` and `temperature` for o1/o3 models
61
+ - Converts image messages to base64 data URLs
62
+
63
+ ### Anthropic (MixAnthropic)
64
+ - **Base URL**: `https://api.anthropic.com/v1/messages`
65
+ - **Input Format**:
66
+ ```json
67
+ {
68
+ "system": "system message",
69
+ "messages": [
70
+ {
71
+ "role": "user",
72
+ "content": [
73
+ {
74
+ "type": "text",
75
+ "text": "message text"
76
+ },
77
+ {
78
+ "type": "image",
79
+ "source": {
80
+ "type": "base64",
81
+ "media_type": "image/jpeg",
82
+ "data": "base64data"
83
+ }
84
+ }
85
+ ]
86
+ }
87
+ ],
88
+ "model": "claude-3-sonnet-20240229",
89
+ "temperature": 1,
90
+ "top_p": 1
91
+ }
92
+ ```
93
+ - **Output Format**:
94
+ ```json
95
+ {
96
+ "content": [
97
+ {
98
+ "text": "response text"
99
+ }
100
+ ]
101
+ }
102
+ ```
103
+ - **Special Notes**:
104
+ - Removes `top_p` when thinking mode is enabled
105
+ - Uses `x-api-key` header instead of `authorization`
106
+ - Requires `anthropic-version` header
107
+
108
+ ### Perplexity (MixPerplexity)
109
+ - **Base URL**: `https://api.perplexity.ai/chat/completions`
110
+ - **Input Format**: Same as OpenAI
111
+ - **Output Format**: Same as OpenAI
112
+ - **Special Notes**: Uses standard OpenAI-compatible format
113
+
114
+ ### Grok (MixGrok)
115
+ - **Base URL**: `https://api.x.ai/v1/chat/completions`
116
+ - **Input Format**: Same as OpenAI
117
+ - **Output Format**: Same as OpenAI
118
+ - **Special Notes**: Inherits from MixOpenAI
119
+
120
+ ### Together (MixTogether)
121
+ - **Base URL**: `https://api.together.xyz/v1/chat/completions`
122
+ - **Input Format**:
123
+ ```json
124
+ {
125
+ "messages": [
126
+ {
127
+ "role": "system",
128
+ "content": "system message"
129
+ },
130
+ {
131
+ "role": "user",
132
+ "content": "message text"
133
+ }
134
+ ],
135
+ "model": "model-name",
136
+ "stop": ["<|eot_id|>", "<|eom_id|>"]
137
+ }
138
+ ```
139
+ - **Output Format**: Same as OpenAI
140
+ - **Special Notes**:
141
+ - Flattens content arrays to strings
142
+ - Adds default stop tokens
143
+
144
+ ### Google (MixGoogle)
145
+ - **Base URL**: `https://generativelanguage.googleapis.com/v1beta/models`
146
+ - **Input Format**:
147
+ ```json
148
+ {
149
+ "contents": [
150
+ {
151
+ "role": "user",
152
+ "parts": [
153
+ {
154
+ "text": "message text"
155
+ },
156
+ {
157
+ "inline_data": {
158
+ "mime_type": "image/jpeg",
159
+ "data": "base64data"
160
+ }
161
+ }
162
+ ]
163
+ }
164
+ ],
165
+ "generationConfig": {
166
+ "responseMimeType": "text/plain"
167
+ }
168
+ }
169
+ ```
170
+ - **Output Format**:
171
+ ```json
172
+ {
173
+ "candidates": [
174
+ {
175
+ "content": {
176
+ "parts": [
177
+ {
178
+ "text": "response text"
179
+ }
180
+ ]
181
+ }
182
+ }
183
+ ]
184
+ }
185
+ ```
186
+ - **Special Notes**:
187
+ - Uses different role names (`model` instead of `assistant`)
188
+ - Requires model ID in URL path
189
+ - Doesn't support streaming
190
+ - Available Gemini models:
191
+ - `gemini-2.5-flash-preview-04-17`
192
+ - `gemini-2.5-pro-exp-03-25`
193
+ - `gemini-2.5-pro-preview-05-06`
194
+ - Each model has different capabilities:
195
+ - Flash: Fastest response time, best for simple tasks
196
+ - Pro: More capable, better for complex tasks
197
+ - Pro Exp: Experimental version with latest features
198
+
199
+ ### Cerebras (MixCerebras)
200
+ - **Base URL**: `https://api.cerebras.ai/v1/chat/completions`
201
+ - **Input Format**: Same as Together
202
+ - **Output Format**: Same as OpenAI
203
+ - **Special Notes**: Uses Together's message conversion
204
+
205
+ ### Ollama (MixOllama)
206
+ - **Base URL**: `http://localhost:11434/api/chat`
207
+ - **Input Format**:
208
+ ```json
209
+ {
210
+ "messages": [
211
+ {
212
+ "role": "system",
213
+ "content": "system message",
214
+ "images": []
215
+ },
216
+ {
217
+ "role": "user",
218
+ "content": "message text",
219
+ "images": ["base64data"]
220
+ }
221
+ ]
222
+ }
223
+ ```
224
+ - **Output Format**:
225
+ ```json
226
+ {
227
+ "message": {
228
+ "content": "response text"
229
+ }
230
+ }
231
+ ```
232
+ - **Special Notes**:
233
+ - Local deployment only
234
+ - Handles images in separate array
235
+ - No API key required
236
+
237
+ ### LM Studio (MixLMStudio)
238
+ - **Base URL**: `http://localhost:1234/v1/chat/completions`
239
+ - **Input Format**: Same as OpenAI
240
+ - **Output Format**: Same as OpenAI
241
+ - **Special Notes**:
242
+ - Local deployment only
243
+ - No API key required
244
+
245
+ ### Groq (MixGroq)
246
+ - **Base URL**: `https://api.groq.com/openai/v1/chat/completions`
247
+ - **Input Format**: Same as OpenAI
248
+ - **Output Format**: Same as OpenAI
249
+ - **Special Notes**: Uses OpenAI-compatible format
package/README.md CHANGED
@@ -2,13 +2,16 @@
2
2
 
3
3
  **ModelMix** is a versatile module that enables seamless integration of various language models from different providers through a unified interface. With ModelMix, you can effortlessly manage and utilize multiple AI models while controlling request rates to avoid provider restrictions.
4
4
 
5
+ Are you one of those developers who wants to apply language models to everything? Do you need a reliable fallback system to ensure your application never fails? ModelMix is the answer! It allows you to chain multiple models together, automatically falling back to the next model if one fails, ensuring your application always gets a response.
6
+
5
7
  ## ✨ Features
6
8
 
7
9
  - **Unified Interface**: Interact with multiple AI models through a single, coherent API.
8
10
  - **Request Rate Control**: Manage the rate of requests to adhere to provider limitations using Bottleneck.
9
- - **Flexible Integration**: Easily integrate popular models like OpenAI, Anthropic, Perplexity, Groq, Together AI, Ollama, LM Studio or custom models.
11
+ - **Flexible Integration**: Easily integrate popular models like OpenAI, Anthropic, Perplexity, Groq, Together AI, Ollama, LM Studio, Google Gemini or custom models.
10
12
  - **History Tracking**: Automatically logs the conversation history with model responses, allowing you to limit the number of historical messages with `max_history`.
11
13
  - **Model Fallbacks**: Automatically try different models if one fails or is unavailable.
14
+ - **Chain Multiple Models**: Create powerful chains of models that work together, with automatic fallback if one fails.
12
15
 
13
16
  ## 📦 Installation
14
17
 
@@ -18,7 +21,7 @@ First, install the ModelMix package:
18
21
  npm install modelmix
19
22
  ```
20
23
 
21
- Optional: install dotenv to manage environment variables:
24
+ Recommended: install dotenv to manage environment variables:
22
25
 
23
26
  ```bash
24
27
  npm install dotenv
@@ -35,129 +38,106 @@ Here's a quick example to get you started:
35
38
  PPLX_API_KEY="your_perplexity_api_key"
36
39
  GROQ_API_KEY="your_groq_api_key"
37
40
  TOGETHER_API_KEY="your_together_api_key"
41
+ GOOGLE_API_KEY="your_google_api_key"
38
42
  ```
39
43
 
40
44
  2. **Create and configure your models**:
41
45
 
42
- ```javascript
43
- import 'dotenv/config';
44
- import { ModelMix, MixOpenAI, MixAnthropic, MixPerplexity, MixOllama, MixTogether } from 'modelmix';
45
-
46
- const env = process.env;
47
-
48
- const mmix = new ModelMix({
49
- options: {
50
- max_tokens: 200,
51
- },
52
- config: {
53
- system: "You are {name} from Melmac.",
54
- max_history: 2,
55
- bottleneck: { maxConcurrent: 2 },
56
- debug: true
57
- }
58
- });
59
-
60
- mmix.replace({ '{name}': 'ALF' });
61
-
62
- mmix.attach(new MixOpenAI({ config: { apiKey: env.OPENAI_API_KEY } }));
63
- mmix.attach(new MixAnthropic()); // it will use the default apiKey from process.env
64
- mmix.attach(new MixPerplexity({
65
- config: {
66
- apiKey: env.PPLX_API_KEY
67
- },
68
- options: {
69
- system: "You are my personal assistant."
70
- }
71
- }));
72
- mmix.attach(new MixOllama({
73
- config: {
74
- url: 'http://localhost:11434/api/chat',
75
- prefix: ['llava'],
76
- },
77
- options: {
78
- temperature: 0.5,
79
- }
80
- }));
81
- mmix.attach(new MixTogether());
82
- ```
83
-
84
- 3. **Generate responses from different models**:
85
-
86
- #### Model Fallbacks
87
- ```javascript
88
- // Create a message handler with multiple fallback models
89
- const handler = mmix.create(['grok-2-latest', 'claude-3-7-sonnet-20250219']);
90
-
91
- // If the first model (grok-2-latest) fails or is unavailable,
92
- // ModelMix will automatically try the next model (claude-3-7-sonnet)
93
- const response = await handler.addText('do you like cats?').message();
94
- ```
46
+ ```javascript
47
+ import 'dotenv/config';
48
+ import { ModelMix } from 'modelmix';
95
49
 
96
- The `create()` method accepts either a single model name as a string or an array of model names. When an array is provided, ModelMix will attempt to use each model in order until a successful response is received. This is useful for:
97
- - Implementing fallback options when a primary model is unavailable
98
- - Load balancing across different providers
99
- - Ensuring high availability in your application
50
+ // Get structured JSON responses
51
+ const model = ModelMix.new()
52
+ .sonnet37() // Anthropic claude-3-7-sonnet-20250219
53
+ .addText("Name and capital of 3 South American countries.");
100
54
 
101
- #### gpt-4o-mini
102
- ```javascript
103
- const gpt = mmix.create('gpt-4o-mini', { options: { temperature: 0 } });
104
- gpt.addText("Have you ever eaten a {animal}?");
105
- gpt.replace({ '{animal}': 'cat' });
106
- console.log(await gpt.message());
107
- ```
55
+ const outputExample = { countries: [{ name: "", capital: "" }] };
56
+ console.log(await model.json(outputExample));
57
+ ```
108
58
 
109
- #### gpt-4.1-nano (json)
110
- ```javascript
111
- console.log("\n" + '--------| gpt-4.1-nano |--------');
112
- const gpt = mmix.create('gpt-4.1-nano', { options: { temperature: 0 } }).addText("Have you ever eaten a {animal}?");
113
- gpt.replace({ '{animal}': 'cat' });
114
- const schemaExample = { time: '24:00:00', message: 'Hello' };
115
- const schemaDescription = { time: 'Time in format HH:MM:SS' }; // optional
116
- console.log(await gpt.json(schemaExample, schemaDescription));
117
- ```
59
+ ```javascript
60
+ // Basic setup with system prompt and debug mode
61
+ const setup = {
62
+ config: {
63
+ system: "You are ALF, if they ask your name, respond with 'ALF'.",
64
+ debug: true
65
+ }
66
+ };
67
+
68
+ // Chain multiple models with automatic fallback
69
+ const model = await ModelMix.new(setup)
70
+ .sonnet37think() // (main model) Anthropic claude-3-7-sonnet-20250219
71
+ .o4mini() // (fallback 1) OpenAI o4-mini
72
+ .gemini25proExp({ config: { temperature: 0 } }) // (fallback 2) Google gemini-2.5-pro-exp-03-25
73
+ .gpt41nano() // (fallback 3) OpenAI gpt-4.1-nano
74
+ .grok3mini() // (fallback 4) Grok grok-3-mini-beta
75
+ .addText("What's your name?");
76
+
77
+ console.log(await model.message());
78
+ ```
118
79
 
119
- #### claude-3-5-sonnet-20240620 (writer)
120
- ```javascript
121
- const writer = mmix.create('claude-3-5-sonnet-20240620', { options: { temperature: 0.5 } });
122
- writer.setSystem('You are a writer like Stephen King'); // or setSystemFromFile
123
- writer.replace({ '{story_title}': 'The Mysterious Package' })
124
- // or write.replaceKeyFromFile('{story_title}', './title.md');
125
- const story = await writer.addTextFromFile('./prompt.md').message();
126
- console.log(story);
127
- ```
128
- #### claude-3-5-sonnet-20240620 (image)
129
- ```javascript
130
- console.log("\n" + '--------| |--------');
131
- const claude = mmix.create('claude-3-5-sonnet-20240620', { options: { temperature: 0 } });
132
- claude.addImage("./watson.jpg"); // or claude.addImageFromUrl(url)
133
- const imageDescription = await claude.addText("Describe the image").message();
134
- console.log(imageDescription);
135
- ```
80
+ ```javascript
136
81
 
137
- #### pplx-70b-online
138
- ```javascript
139
- const pplx = mmix.create('pplx-70b-online', { config: { max_tokens: 500 } });
140
- pplx.addText('How much is ETH trading in USD?');
141
- const news = await pplx.addText('What are the 3 most recent Ethereum news?').message();
142
- console.log(news);
143
- ```
82
+ const ETH = ModelMix.new()
83
+ .sonar() // Perplexity sonar
84
+ .addText('How much is ETH trading in USD?')
85
+ .json({ price: 1000.1 });
86
+ console.log(ETH.price);
87
+ ```
144
88
 
145
- #### ollama (llava:latest)
146
- ```javascript
147
- await mmix.create('llava:latest')
148
- .addImage("./watson.jpg")
149
- .addText("What is the predominant color?")
150
- .stream((data) => { console.log(data.message); });
151
- ```
89
+ This pattern allows you to:
90
+ - Chain multiple models together
91
+ - Automatically fall back to the next model if one fails
92
+ - Get structured JSON responses when needed
93
+ - Keep your code clean and maintainable
94
+
95
+ ## ⚡️ Shorthand Methods
96
+
97
+ ModelMix provides convenient shorthand methods for quickly accessing different AI models. Here's a comprehensive list of available methods:
98
+
99
+ | Method | Provider | Model | Price (I/O) per 1 M tokens |
100
+ | ------------------ | ---------- | ------------------------------ | -------------------------- |
101
+ | `gpt41()` | OpenAI | gpt-4.1 | [\$2.00 / \$8.00][1] |
102
+ | `gpt41mini()` | OpenAI | gpt-4.1-mini | [\$0.40 / \$1.60][1] |
103
+ | `gpt41nano()` | OpenAI | gpt-4.1-nano | [\$0.10 / \$0.40][1] |
104
+ | `gpt4o()` | OpenAI | gpt-4o | [\$5.00 / \$20.00][1] |
105
+ | `o4mini()` | OpenAI | o4-mini | [\$1.10 / \$4.40][1] |
106
+ | `o3()` | OpenAI | o3 | [\$10.00 / \$40.00][1] |
107
+ | `sonnet37()` | Anthropic | claude-3-7-sonnet-20250219 | [\$3.00 / \$15.00][2] |
108
+ | `sonnet37think()` | Anthropic | claude-3-7-sonnet-20250219 | [\$3.00 / \$15.00][2] |
109
+ | `sonnet35()` | Anthropic | claude-3-5-sonnet-20241022 | [\$3.00 / \$15.00][2] |
110
+ | `haiku35()` | Anthropic | claude-3-5-haiku-20241022 | [\$0.80 / \$4.00][2] |
111
+ | `gemini25flash()` | Google | gemini-2.5-flash-preview-04-17 | [\$0.00 / \$0.00][5] |
112
+ | `gemini25proExp()` | Google | gemini-2.5-pro-exp-03-25 | [\$0.00 / \$0.00][5] |
113
+ | `gemini25pro()` | Google | gemini-2.5-pro-preview-05-06 | [\$2.50 / \$15.00][5] |
114
+ | `grok2()` | Grok | grok-2-latest | [\$2.00 / \$10.00][9] |
115
+ | `grok3()` | Grok | grok-3-beta | [\$3.00 / \$15.00][9] |
116
+ | `grok3mini()` | Grok | grok-3-mini-beta | [\$0.30 / \$0.50][9] |
117
+ | `sonar()` | Perplexity | sonar | [\$1.00 / \$1.00][7] |
118
+ | `sonarPro()` | Perplexity | sonar-pro | [\$3.00 / \$15.00][7] |
119
+ | `qwen3()` | Groq | Qwen3-235B-A22B-fp8-tput | [\$0.29 / \$0.39][8] |
120
+ | `scout()` | Groq | Llama-4-Scout-17B-16E-Instruct | [\$0.11 / \$0.34][8] |
121
+ | `maverick()` | Groq | Maverick-17B-128E-Instruct-FP8 | [\$0.20 / \$0.60][8] |
122
+
123
+ [1]: https://openai.com/api/pricing/ "Pricing | OpenAI"
124
+ [2]: https://www.anthropic.com/pricing "Pricing - Anthropic"
125
+ [5]: https://ai.google.dev/gemini-api/docs/pricing "Google AI for Developers"
126
+ [7]: https://docs.perplexity.ai/guides/pricing "Pricing - Perplexity"
127
+ [8]: https://groq.com/pricing/ "Groq Pricing"
128
+ [9]: https://docs.x.ai/docs/models "xAI"
129
+
130
+ Each method accepts optional `options` and `config` parameters to customize the model's behavior. For example:
152
131
 
153
- #### Together AI (deepseek-ai/DeepSeek-R1)
154
- ```javascript
155
- const together = mmix.create('deepseek-ai/DeepSeek-R1', { options: { temperature: 0.7 } });
156
- together.addText('What are the main differences between Python and JavaScript?');
157
- const comparison = await together.message();
158
- console.log(comparison);
159
- ```
160
- 4. Find the files for this example at: [/ModelMix/demo](https://github.com/clasen/ModelMix/tree/master/demo).
132
+ ```javascript
133
+ const result = await ModelMix.new({
134
+ options: { temperature: 0.7 },
135
+ config: { system: "You are a helpful assistant" }
136
+ })
137
+ .sonnet37()
138
+ .addText("Tell me a story about a cat");
139
+ .message();
140
+ ```
161
141
 
162
142
  ## 🔄 Templating Methods
163
143
 
@@ -167,7 +147,7 @@ The `replace` method is used to define key-value pairs for text replacement in t
167
147
 
168
148
  #### Usage:
169
149
  ```javascript
170
- gpt.replace({ '{{key1}}': 'value1', '{{key2}}': 'value2' });
150
+ model.replace({ '{{key1}}': 'value1', '{{key2}}': 'value2' });
171
151
  ```
172
152
 
173
153
  #### How it works:
@@ -176,7 +156,7 @@ gpt.replace({ '{{key1}}': 'value1', '{{key2}}': 'value2' });
176
156
 
177
157
  #### Example:
178
158
  ```javascript
179
- gpt
159
+ model
180
160
  .replace({ '{{name}}': 'Alice', '{{age}}': '30' })
181
161
  .addText('Hello {{name}}, are you {{age}} years old?');
182
162
  ```
@@ -188,7 +168,7 @@ The `replaceKeyFromFile` method is similar to `replace`, but it reads the replac
188
168
 
189
169
  #### Usage:
190
170
  ```javascript
191
- messageHandler.replaceKeyFromFile('longText', './path/to/file.txt');
171
+ model.replaceKeyFromFile('longText', './path/to/file.txt');
192
172
  ```
193
173
 
194
174
  #### How it works:
@@ -216,7 +196,7 @@ To activate debug mode in ModelMix and view detailed request information, follow
216
196
  1. In the ModelMix constructor, include `debug: true` in the configuration:
217
197
 
218
198
  ```javascript
219
- const mix = new ModelMix({
199
+ const mix = ModelMix.new({
220
200
  config: {
221
201
  debug: true
222
202
  // ... other configuration options ...
@@ -242,16 +222,16 @@ ModelMix now uses Bottleneck for efficient rate limiting of API requests. This i
242
222
 
243
223
  1. **Configuration**: Bottleneck is configured in the ModelMix constructor. You can customize the settings or use the default configuration:
244
224
 
245
- ```javascript
246
- const mmix = new ModelMix({
247
- config: {
248
- bottleneck: {
249
- maxConcurrent: 8, // Maximum number of concurrent requests
250
- minTime: 500 // Minimum time between requests (in ms)
251
- }
225
+ ```javascript
226
+ const setup = {
227
+ config: {
228
+ bottleneck: {
229
+ maxConcurrent: 8, // Maximum number of concurrent requests
230
+ minTime: 500 // Minimum time between requests (in ms)
252
231
  }
253
- });
254
- ```
232
+ }
233
+ };
234
+ ```
255
235
 
256
236
  2. **Rate Limiting**: When you make a request using any of the attached models, Bottleneck automatically manages the request flow based on the configured settings.
257
237
 
@@ -284,14 +264,10 @@ new ModelMix(args = { options: {}, config: {} })
284
264
 
285
265
  **Methods**
286
266
 
287
- - `attach(modelInstance)`: Attaches a model instance to the `ModelMix`.
288
- - `create(modelKey, overOptions = {})`: Creates a new `MessageHandler` for the specified model.
289
-
290
- ### MessageHandler Class Overview
267
+ - `attach(modelKey, modelInstance)`: Attaches a model instance to the `ModelMix`.
268
+ - `new()`: `static` Creates a new `ModelMix`.
269
+ - `new()`: Creates a new `ModelMix` using instance setup.
291
270
 
292
- **Methods**
293
-
294
- - `new()`: Initializes a new message handler instance.
295
271
  - `addText(text, config = { role: "user" })`: Adds a text message.
296
272
  - `addTextFromFile(filePath, config = { role: "user" })`: Adds a text message from a file path.
297
273
  - `addImage(filePath, config = { role: "user" })`: Adds an image message from a file path.
@@ -404,6 +380,16 @@ new MixTogether(args = { config: {}, options: {} })
404
380
  - **config**: Specific configuration settings for Together AI, including the `apiKey`.
405
381
  - **options**: Default options for Together AI model instances.
406
382
 
383
+ ### MixGoogle Class Overview
384
+
385
+ ```javascript
386
+ new MixGoogle(args = { config: {}, options: {} })
387
+ ```
388
+
389
+ - **args**: Configuration object with `config` and `options` properties.
390
+ - **config**: Specific configuration settings for Google Gemini, including the `apiKey`.
391
+ - **options**: Default options for Google Gemini model instances.
392
+
407
393
  ## 🤝 Contributing
408
394
 
409
395
  Contributions are welcome! If you find any issues or have suggestions for improvement, please open an issue or submit a pull request on the [GitHub repository](https://github.com/clasen/ModelMix).
package/demo/custom.mjs CHANGED
@@ -1,6 +1,6 @@
1
1
  import 'dotenv/config'
2
2
 
3
- import { ModelMix, MixCerebras, MixTogether } from '../index.js';
3
+ import { ModelMix, MixCustom } from '../index.js';
4
4
 
5
5
  const mmix = new ModelMix({
6
6
  options: {
@@ -13,8 +13,46 @@ const mmix = new ModelMix({
13
13
  }
14
14
  });
15
15
 
16
- mmix.attach(new MixCerebras());
16
+ class MixTogether extends MixCustom {
17
+ getDefaultConfig(customConfig) {
18
+ return super.getDefaultConfig({
19
+ url: 'https://api.together.xyz/v1/chat/completions',
20
+ apiKey: process.env.TOGETHER_API_KEY,
21
+ ...customConfig
22
+ });
23
+ }
24
+
25
+ getDefaultOptions(customOptions) {
26
+ return {
27
+ stop: ["<|eot_id|>", "<|eom_id|>"],
28
+ ...customOptions
29
+ };
30
+ }
31
+
32
+ static convertMessages(messages) {
33
+ return messages.map(message => {
34
+ if (message.content instanceof Array) {
35
+ message.content = message.content.map(content => content.text).join("\n\n");
36
+ }
37
+ return message;
38
+ });
39
+ }
40
+
41
+ async create({ config = {}, options = {} } = {}) {
42
+ if (!this.config.apiKey) {
43
+ throw new Error('Together API key not found. Please provide it in config or set TOGETHER_API_KEY environment variable.');
44
+ }
45
+
46
+ const content = config.system + config.systemExtra;
47
+ options.messages = [{ role: 'system', content }, ...options.messages || []];
48
+ options.messages = MixTogether.convertMessages(options.messages);
49
+
50
+ return super.create({ config, options });
51
+ }
52
+ }
53
+
54
+ mmix.attach('Qwen/Qwen3-235B-A22B-fp8-tput', new MixTogether());
17
55
 
18
- let r = mmix.create('llama-4-scout-17b-16e-instruct').addText('hi there');
56
+ let r = mmix.addText('hi there');
19
57
  r = await r.addText('do you like cats?').message();
20
58
  console.log(r);