modelmix 4.4.20 → 4.4.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.js CHANGED
@@ -1,6 +1,7 @@
1
1
  const axios = require('axios');
2
2
  const fs = require('fs');
3
- const { fromBuffer } = require('file-type');
3
+ const fileType = require('file-type');
4
+ const detectFileTypeFromBuffer = fileType.fileTypeFromBuffer || fileType.fromBuffer;
4
5
  const { inspect } = require('util');
5
6
  const log = require('lemonlog')('ModelMix');
6
7
  const Bottleneck = require('bottleneck');
@@ -633,11 +634,14 @@ class ModelMix {
633
634
 
634
635
  // Detect mimeType if not provided
635
636
  if (!mimeType) {
636
- const fileType = await fromBuffer(buffer);
637
- if (!fileType || !fileType.mime.startsWith('image/')) {
637
+ if (typeof detectFileTypeFromBuffer !== 'function') {
638
+ throw new Error('file-type module does not expose a buffer detector');
639
+ }
640
+ const detectedType = await detectFileTypeFromBuffer(buffer);
641
+ if (!detectedType || !detectedType.mime.startsWith('image/')) {
638
642
  throw new Error(`Invalid image - unable to detect valid image format`);
639
643
  }
640
- mimeType = fileType.mime;
644
+ mimeType = detectedType.mime;
641
645
  }
642
646
 
643
647
  // Update the content with processed image
@@ -1334,7 +1338,34 @@ class MixCustom {
1334
1338
  }
1335
1339
 
1336
1340
  static extractMessage(data) {
1337
- const message = data.choices[0].message?.content?.trim() || '';
1341
+ const choice = data?.choices?.[0] || {};
1342
+ const messageObj = choice.message || {};
1343
+ const finishReason = choice.finish_reason;
1344
+
1345
+ if (typeof messageObj.refusal === 'string' && messageObj.refusal.trim().length > 0) {
1346
+ throw new Error(`OpenAI model refused to process this request: ${messageObj.refusal}`);
1347
+ }
1348
+
1349
+ if (finishReason === 'content_filter') {
1350
+ throw new Error('OpenAI response was blocked by content_filter.');
1351
+ }
1352
+
1353
+ let message = '';
1354
+ if (typeof messageObj.content === 'string') {
1355
+ message = messageObj.content.trim();
1356
+ } else if (Array.isArray(messageObj.content)) {
1357
+ const refusalPart = messageObj.content.find(part => part?.type === 'refusal' || (typeof part?.refusal === 'string' && part.refusal.trim().length > 0));
1358
+ if (refusalPart) {
1359
+ const refusalText = typeof refusalPart.refusal === 'string' ? refusalPart.refusal : 'No refusal text provided.';
1360
+ throw new Error(`OpenAI model refused to process this request: ${refusalText}`);
1361
+ }
1362
+ message = messageObj.content
1363
+ .filter(part => typeof part?.text === 'string')
1364
+ .map(part => part.text)
1365
+ .join('')
1366
+ .trim();
1367
+ }
1368
+
1338
1369
  const endTagIndex = message.indexOf('</think>');
1339
1370
  if (message.startsWith('<think>') && endTagIndex !== -1) {
1340
1371
  return message.substring(endTagIndex + 8).trim();
@@ -2020,10 +2051,25 @@ class MixAnthropic extends MixCustom {
2020
2051
  }
2021
2052
 
2022
2053
  static extractMessage(data) {
2023
- if (data.content?.[1]?.text) {
2024
- return data.content[1].text;
2054
+ const content = Array.isArray(data?.content) ? data.content : [];
2055
+
2056
+ // Anthropic can return text in different positions depending on thinking/tool blocks.
2057
+ const textBlock = content.find(block => typeof block?.text === 'string' && block.text.trim().length > 0);
2058
+ if (textBlock) {
2059
+ return textBlock.text;
2060
+ }
2061
+
2062
+ // Empty/non-text content is often due to safety refusal or token limits.
2063
+ const stopReason = data?.stop_reason;
2064
+ const contentTypes = content.map(block => block?.type || 'unknown').join(', ') || 'none';
2065
+
2066
+ if (stopReason === 'refusal') {
2067
+ throw new Error('Anthropic refused to process this request (content policy). Try different wording or a fallback model.');
2068
+ }
2069
+ if (!content.length) {
2070
+ throw new Error(`Anthropic returned empty content (stop_reason: ${stopReason ?? 'unknown'}).`);
2025
2071
  }
2026
- return data.content[0].text;
2072
+ throw new Error(`Anthropic content blocks are missing .text (stop_reason: ${stopReason ?? 'unknown'}, content_types: ${contentTypes}).`);
2027
2073
  }
2028
2074
 
2029
2075
  static extractThink(data) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "modelmix",
3
- "version": "4.4.20",
3
+ "version": "4.4.24",
4
4
  "description": "🧬 Reliable interface with automatic fallback for AI LLMs.",
5
5
  "main": "index.js",
6
6
  "repository": {
@@ -82,5 +82,39 @@ describe('Image Processing and Multimodal Support Tests', () => {
82
82
  expect(response).to.include('small PNG test image');
83
83
  });
84
84
 
85
+ it('should detect image mime type from buffer when content-type header is missing', async () => {
86
+ const imageUrl = 'https://assets.example.com/test-image';
87
+ const pngBase64 = 'iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8z8BQz0AEYBxVSF+FABJADveWkH6oAAAAAElFTkSuQmCC';
88
+ const pngBuffer = Buffer.from(pngBase64, 'base64');
89
+
90
+ model.sonnet46()
91
+ .addText('Describe this image')
92
+ .addImageFromUrl(imageUrl);
93
+
94
+ // No content-type header on purpose: this forces buffer-based detection.
95
+ nock('https://assets.example.com')
96
+ .get('/test-image')
97
+ .reply(200, pngBuffer);
98
+
99
+ nock('https://api.anthropic.com')
100
+ .post('/v1/messages')
101
+ .reply(function (uri, body) {
102
+ const userMsg = body.messages.find(m => m.role === 'user');
103
+ expect(userMsg).to.exist;
104
+ const imageContent = userMsg.content.find(c => c.type === 'image');
105
+ expect(imageContent).to.exist;
106
+ expect(imageContent.source.type).to.equal('base64');
107
+ expect(imageContent.source.media_type).to.equal('image/png');
108
+ expect(imageContent.source.data).to.equal(pngBase64);
109
+ return [200, {
110
+ content: [{ type: "text", text: "Image received." }],
111
+ role: "assistant"
112
+ }];
113
+ });
114
+
115
+ const response = await model.message();
116
+ expect(response).to.include('Image received.');
117
+ });
118
+
85
119
  });
86
120
  });
@@ -1,12 +0,0 @@
1
- {
2
- "permissions": {
3
- "allow": [
4
- "Bash(node:*)",
5
- "Bash(npm install:*)",
6
- "Bash(mkdir:*)",
7
- "Bash(npm test)",
8
- "Bash(npm test:*)"
9
- ],
10
- "deny": []
11
- }
12
- }