modelmix 3.3.2 → 3.3.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +18 -17
  2. package/index.js +29 -15
  3. package/package.json +1 -1
package/README.md CHANGED
@@ -94,7 +94,8 @@ This pattern allows you to:
94
94
 
95
95
  ## ⚡️ Shorthand Methods
96
96
 
97
- ModelMix provides convenient shorthand methods for quickly accessing different AI models. Here's a comprehensive list of available methods:
97
+ ModelMix provides convenient shorthand methods for quickly accessing different AI models.
98
+ Here's a comprehensive list of available methods:
98
99
 
99
100
  | Method | Provider | Model | Price (I/O) per 1 M tokens |
100
101
  | ------------------ | ---------- | ------------------------------ | -------------------------- |
@@ -108,24 +109,24 @@ ModelMix provides convenient shorthand methods for quickly accessing different A
108
109
  | `sonnet37think()` | Anthropic | claude-3-7-sonnet-20250219 | [\$3.00 / \$15.00][2] |
109
110
  | `sonnet35()` | Anthropic | claude-3-5-sonnet-20241022 | [\$3.00 / \$15.00][2] |
110
111
  | `haiku35()` | Anthropic | claude-3-5-haiku-20241022 | [\$0.80 / \$4.00][2] |
111
- | `gemini25flash()` | Google | gemini-2.5-flash-preview-04-17 | [\$0.00 / \$0.00][5] |
112
- | `gemini25proExp()` | Google | gemini-2.5-pro-exp-03-25 | [\$0.00 / \$0.00][5] |
113
- | `gemini25pro()` | Google | gemini-2.5-pro-preview-05-06 | [\$2.50 / \$15.00][5] |
114
- | `grok2()` | Grok | grok-2-latest | [\$2.00 / \$10.00][9] |
115
- | `grok3()` | Grok | grok-3-beta | [\$3.00 / \$15.00][9] |
116
- | `grok3mini()` | Grok | grok-3-mini-beta | [\$0.30 / \$0.50][9] |
117
- | `sonar()` | Perplexity | sonar | [\$1.00 / \$1.00][7] |
118
- | `sonarPro()` | Perplexity | sonar-pro | [\$3.00 / \$15.00][7] |
119
- | `qwen3()` | Groq | Qwen3-235B-A22B-fp8-tput | [\$0.29 / \$0.39][8] |
120
- | `scout()` | Groq | Llama-4-Scout-17B-16E-Instruct | [\$0.11 / \$0.34][8] |
121
- | `maverick()` | Groq | Maverick-17B-128E-Instruct-FP8 | [\$0.20 / \$0.60][8] |
112
+ | `gemini25flash()` | Google | gemini-2.5-flash-preview-04-17 | [\$0.00 / \$0.00][3] |
113
+ | `gemini25proExp()` | Google | gemini-2.5-pro-exp-03-25 | [\$0.00 / \$0.00][3] |
114
+ | `gemini25pro()` | Google | gemini-2.5-pro-preview-05-06 | [\$2.50 / \$15.00][3] |
115
+ | `grok2()` | Grok | grok-2-latest | [\$2.00 / \$10.00][6] |
116
+ | `grok3()` | Grok | grok-3-beta | [\$3.00 / \$15.00][6] |
117
+ | `grok3mini()` | Grok | grok-3-mini-beta | [\$0.30 / \$0.50][6] |
118
+ | `sonar()` | Perplexity | sonar | [\$1.00 / \$1.00][4] |
119
+ | `sonarPro()` | Perplexity | sonar-pro | [\$3.00 / \$15.00][4] |
120
+ | `qwen3()` | Groq | Qwen3-235B-A22B-fp8-tput | [\$0.29 / \$0.39][5] |
121
+ | `scout()` | Groq | Llama-4-Scout-17B-16E-Instruct | [\$0.11 / \$0.34][5] |
122
+ | `maverick()` | Groq | Maverick-17B-128E-Instruct-FP8 | [\$0.20 / \$0.60][5] |
122
123
 
123
124
  [1]: https://openai.com/api/pricing/ "Pricing | OpenAI"
124
- [2]: https://www.anthropic.com/pricing "Pricing - Anthropic"
125
- [5]: https://ai.google.dev/gemini-api/docs/pricing "Google AI for Developers"
126
- [7]: https://docs.perplexity.ai/guides/pricing "Pricing - Perplexity"
127
- [8]: https://groq.com/pricing/ "Groq Pricing"
128
- [9]: https://docs.x.ai/docs/models "xAI"
125
+ [2]: https://docs.anthropic.com/en/docs/about-claude/pricing "Pricing - Anthropic"
126
+ [3]: https://ai.google.dev/gemini-api/docs/pricing "Google AI for Developers"
127
+ [4]: https://docs.perplexity.ai/guides/pricing "Pricing - Perplexity"
128
+ [5]: https://groq.com/pricing/ "Groq Pricing"
129
+ [6]: https://docs.x.ai/docs/models "xAI"
129
130
 
130
131
  Each method accepts optional `options` and `config` parameters to customize the model's behavior. For example:
131
132
 
package/index.js CHANGED
@@ -37,7 +37,7 @@ class ModelMix {
37
37
  }
38
38
 
39
39
  replace(keyValues) {
40
- this.config.replace = keyValues;
40
+ this.config.replace = { ...this.config.replace, ...keyValues };
41
41
  return this;
42
42
  }
43
43
 
@@ -140,7 +140,7 @@ class ModelMix {
140
140
  if (mix.together) this.attach('meta-llama/Llama-4-Scout-17B-16E-Instruct', new MixTogether({ options, config }));
141
141
  if (mix.cerebras) this.attach('llama-4-scout-17b-16e-instruct', new MixCerebras({ options, config }));
142
142
  return this;
143
- }
143
+ }
144
144
  maverick({ options = {}, config = {}, mix = { groq: true, together: false } } = {}) {
145
145
  if (mix.groq) this.attach('meta-llama/llama-4-maverick-17b-128e-instruct', new MixGroq({ options, config }));
146
146
  if (mix.together) this.attach('meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8', new MixTogether({ options, config }));
@@ -568,15 +568,23 @@ class MixCustom {
568
568
  }
569
569
 
570
570
  extractMessage(data) {
571
- if (data.choices && data.choices[0].message.content) return data.choices[0].message.content;
571
+ if (data.choices && data.choices[0].message.content) return data.choices[0].message.content.trim();
572
572
  return '';
573
573
  }
574
574
 
575
575
  processResponse(response) {
576
- return {
577
- response: response.data,
578
- message: this.extractMessage(response.data)
579
- };
576
+ let message = this.extractMessage(response.data);
577
+
578
+ if (message.startsWith('<think>')) {
579
+ const endTagIndex = message.indexOf('</think>');
580
+ if (endTagIndex !== -1) {
581
+ const think = message.substring(7, endTagIndex).trim();
582
+ message = message.substring(endTagIndex + 8).trim();
583
+ return { response: response.data, message, think };
584
+ }
585
+ }
586
+
587
+ return { response: response.data, message };
580
588
  }
581
589
  }
582
590
 
@@ -665,18 +673,24 @@ class MixAnthropic extends MixCustom {
665
673
  return '';
666
674
  }
667
675
 
668
- extractMessage(data) {
669
- if (data.content) {
670
- // thinking
671
- if (data.content?.[1]?.text) {
672
- return data.content[1].text;
676
+ processResponse(response) {
677
+ if (response.data.content) {
678
+
679
+ if (response.data.content?.[1]?.text) {
680
+ return {
681
+ think: response.data.content[0]?.thinking,
682
+ message: response.data.content[1].text,
683
+ response: response.data
684
+ }
673
685
  }
674
686
 
675
- if (data.content[0].text) {
676
- return data.content[0].text;
687
+ if (response.data.content[0].text) {
688
+ return {
689
+ message: response.data.content[0].text,
690
+ response: response.data
691
+ }
677
692
  }
678
693
  }
679
- return '';
680
694
  }
681
695
  }
682
696
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "modelmix",
3
- "version": "3.3.2",
3
+ "version": "3.3.6",
4
4
  "description": "🧬 ModelMix - Unified API for Diverse AI LLM.",
5
5
  "main": "index.js",
6
6
  "repository": {