modelmix 4.4.2 → 4.4.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/demo/json.js +12 -10
- package/index.js +52 -12
- package/package.json +1 -1
- package/schema.js +46 -5
- package/test/bottleneck.test.js +4 -0
- package/test/history.test.js +572 -0
- package/test/json.test.js +185 -0
package/demo/json.js
CHANGED
|
@@ -11,17 +11,19 @@ const model = await ModelMix.new({ options: { max_tokens: 10000 }, config: { deb
|
|
|
11
11
|
// .gemini25flash()
|
|
12
12
|
.addText("Name and capital of 3 South American countries.")
|
|
13
13
|
|
|
14
|
-
const jsonResult = await model.json({
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
capital: "BUENOS AIRES"
|
|
18
|
-
}]
|
|
14
|
+
const jsonResult = await model.json([{
|
|
15
|
+
name: "Argentina",
|
|
16
|
+
capital: "BUENOS AIRES"
|
|
19
17
|
}, {
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
18
|
+
name: "Brazil",
|
|
19
|
+
capital: "BRASILIA"
|
|
20
|
+
}, {
|
|
21
|
+
name: "Colombia",
|
|
22
|
+
capital: "BOGOTA"
|
|
23
|
+
}], [{
|
|
24
|
+
name: { description: "name of the country", enum: ["Perú", "Colombia", "Argentina"] },
|
|
25
|
+
capital: "capital of the country in uppercase"
|
|
26
|
+
}], { addNote: true });
|
|
25
27
|
|
|
26
28
|
console.log(jsonResult);
|
|
27
29
|
console.log(model.lastRaw.tokens);
|
package/index.js
CHANGED
|
@@ -52,6 +52,7 @@ const MODEL_PRICING = {
|
|
|
52
52
|
// MiniMax
|
|
53
53
|
'MiniMax-M2.1': [0.30, 1.20],
|
|
54
54
|
'MiniMax-M2.5': [0.30, 1.20],
|
|
55
|
+
'fireworks/minimax-m2p5': [0.30, 1.20],
|
|
55
56
|
// Perplexity
|
|
56
57
|
'sonar': [1.00, 1.00],
|
|
57
58
|
'sonar-pro': [3.00, 15.00],
|
|
@@ -111,7 +112,7 @@ class ModelMix {
|
|
|
111
112
|
|
|
112
113
|
this.config = {
|
|
113
114
|
system: 'You are an assistant.',
|
|
114
|
-
max_history:
|
|
115
|
+
max_history: 0, // 0=no history (stateless), N=keep last N messages, -1=unlimited
|
|
115
116
|
debug: 0, // 0=silent, 1=minimal, 2=readable summary, 3=full (no truncate), 4=verbose (raw details)
|
|
116
117
|
bottleneck: defaultBottleneckConfig,
|
|
117
118
|
roundRobin: false, // false=fallback mode, true=round robin rotation
|
|
@@ -279,14 +280,14 @@ class ModelMix {
|
|
|
279
280
|
opus46think({ options = {}, config = {} } = {}) {
|
|
280
281
|
options = { ...MixAnthropic.thinkingOptions, ...options };
|
|
281
282
|
return this.attach('claude-opus-4-6', new MixAnthropic({ options, config }));
|
|
282
|
-
}
|
|
283
|
+
}
|
|
283
284
|
opus45think({ options = {}, config = {} } = {}) {
|
|
284
285
|
options = { ...MixAnthropic.thinkingOptions, ...options };
|
|
285
286
|
return this.attach('claude-opus-4-5-20251101', new MixAnthropic({ options, config }));
|
|
286
|
-
}
|
|
287
|
+
}
|
|
287
288
|
opus46({ options = {}, config = {} } = {}) {
|
|
288
289
|
return this.attach('claude-opus-4-6', new MixAnthropic({ options, config }));
|
|
289
|
-
}
|
|
290
|
+
}
|
|
290
291
|
opus45({ options = {}, config = {} } = {}) {
|
|
291
292
|
return this.attach('claude-opus-4-5-20251101', new MixAnthropic({ options, config }));
|
|
292
293
|
}
|
|
@@ -414,7 +415,7 @@ class ModelMix {
|
|
|
414
415
|
if (mix.fireworks) this.attach('accounts/fireworks/models/kimi-k2p5', new MixFireworks({ options, config }));
|
|
415
416
|
if (mix.openrouter) this.attach('moonshotai/kimi-k2.5', new MixOpenRouter({ options, config }));
|
|
416
417
|
return this;
|
|
417
|
-
}
|
|
418
|
+
}
|
|
418
419
|
|
|
419
420
|
kimiK2think({ options = {}, config = {}, mix = { together: true } } = {}) {
|
|
420
421
|
mix = { ...this.mix, ...mix };
|
|
@@ -438,8 +439,11 @@ class ModelMix {
|
|
|
438
439
|
return this;
|
|
439
440
|
}
|
|
440
441
|
|
|
441
|
-
minimaxM25({ options = {}, config = {} } = {}) {
|
|
442
|
-
|
|
442
|
+
minimaxM25({ options = {}, config = {}, mix = { minimax: true } } = {}) {
|
|
443
|
+
mix = { ...this.mix, ...mix };
|
|
444
|
+
if (mix.minimax) this.attach('MiniMax-M2.5', new MixMiniMax({ options, config }));
|
|
445
|
+
if (mix.fireworks) this.attach('fireworks/minimax-m2p5', new MixFireworks({ options, config }));
|
|
446
|
+
return this;
|
|
443
447
|
}
|
|
444
448
|
|
|
445
449
|
minimaxM2Stable({ options = {}, config = {} } = {}) {
|
|
@@ -636,6 +640,15 @@ class ModelMix {
|
|
|
636
640
|
|
|
637
641
|
async json(schemaExample = null, schemaDescription = {}, { type = 'json_object', addExample = false, addSchema = true, addNote = false } = {}) {
|
|
638
642
|
|
|
643
|
+
let isArrayWrap = false;
|
|
644
|
+
if (Array.isArray(schemaExample)) {
|
|
645
|
+
isArrayWrap = true;
|
|
646
|
+
schemaExample = { out: schemaExample };
|
|
647
|
+
if (Array.isArray(schemaDescription)) {
|
|
648
|
+
schemaDescription = { out: schemaDescription };
|
|
649
|
+
}
|
|
650
|
+
}
|
|
651
|
+
|
|
639
652
|
let options = {
|
|
640
653
|
response_format: { type },
|
|
641
654
|
stream: false,
|
|
@@ -662,7 +675,8 @@ class ModelMix {
|
|
|
662
675
|
}
|
|
663
676
|
}
|
|
664
677
|
const { message } = await this.execute({ options, config });
|
|
665
|
-
|
|
678
|
+
const parsed = JSON.parse(this._extractBlock(message));
|
|
679
|
+
return isArrayWrap ? parsed.out : parsed;
|
|
666
680
|
}
|
|
667
681
|
|
|
668
682
|
_extractBlock(response) {
|
|
@@ -756,7 +770,8 @@ class ModelMix {
|
|
|
756
770
|
await this.processImages();
|
|
757
771
|
this.applyTemplate();
|
|
758
772
|
|
|
759
|
-
// Smart message slicing
|
|
773
|
+
// Smart message slicing based on max_history:
|
|
774
|
+
// 0 = no history (stateless), N = keep last N messages, -1 = unlimited
|
|
760
775
|
if (this.config.max_history > 0) {
|
|
761
776
|
let sliceStart = Math.max(0, this.messages.length - this.config.max_history);
|
|
762
777
|
|
|
@@ -776,6 +791,8 @@ class ModelMix {
|
|
|
776
791
|
|
|
777
792
|
this.messages = this.messages.slice(sliceStart);
|
|
778
793
|
}
|
|
794
|
+
// max_history = -1: unlimited, no slicing
|
|
795
|
+
// max_history = 0: no history, messages only contain what was added since last call
|
|
779
796
|
|
|
780
797
|
this.messages = this.groupByRoles(this.messages);
|
|
781
798
|
this.options.messages = this.messages;
|
|
@@ -937,6 +954,29 @@ class ModelMix {
|
|
|
937
954
|
if (currentConfig.debug >= 1) console.log('');
|
|
938
955
|
|
|
939
956
|
this.lastRaw = result;
|
|
957
|
+
|
|
958
|
+
// Manage conversation history based on max_history setting
|
|
959
|
+
if (this.config.max_history === 0) {
|
|
960
|
+
// Stateless: clear messages so next call starts fresh
|
|
961
|
+
this.messages = [];
|
|
962
|
+
} else if (result.message) {
|
|
963
|
+
// Persist assistant response for multi-turn conversations
|
|
964
|
+
if (result.signature) {
|
|
965
|
+
this.messages.push({
|
|
966
|
+
role: "assistant", content: [{
|
|
967
|
+
type: "thinking",
|
|
968
|
+
thinking: result.think,
|
|
969
|
+
signature: result.signature
|
|
970
|
+
}, {
|
|
971
|
+
type: "text",
|
|
972
|
+
text: result.message
|
|
973
|
+
}]
|
|
974
|
+
});
|
|
975
|
+
} else {
|
|
976
|
+
this.addText(result.message, { role: "assistant" });
|
|
977
|
+
}
|
|
978
|
+
}
|
|
979
|
+
|
|
940
980
|
return result;
|
|
941
981
|
|
|
942
982
|
} catch (error) {
|
|
@@ -1039,7 +1079,7 @@ class ModelMix {
|
|
|
1039
1079
|
return;
|
|
1040
1080
|
}
|
|
1041
1081
|
|
|
1042
|
-
if (this.config.max_history < 3) {
|
|
1082
|
+
if (this.config.max_history >= 0 && this.config.max_history < 3) {
|
|
1043
1083
|
log.warn(`MCP ${key} requires at least 3 max_history. Setting to 3.`);
|
|
1044
1084
|
this.config.max_history = 3;
|
|
1045
1085
|
}
|
|
@@ -1075,7 +1115,7 @@ class ModelMix {
|
|
|
1075
1115
|
|
|
1076
1116
|
addTool(toolDefinition, callback) {
|
|
1077
1117
|
|
|
1078
|
-
if (this.config.max_history < 3) {
|
|
1118
|
+
if (this.config.max_history >= 0 && this.config.max_history < 3) {
|
|
1079
1119
|
log.warn(`MCP ${toolDefinition.name} requires at least 3 max_history. Setting to 3.`);
|
|
1080
1120
|
this.config.max_history = 3;
|
|
1081
1121
|
}
|
|
@@ -2133,7 +2173,7 @@ class MixGoogle extends MixCustom {
|
|
|
2133
2173
|
}
|
|
2134
2174
|
|
|
2135
2175
|
const options = {};
|
|
2136
|
-
|
|
2176
|
+
|
|
2137
2177
|
// Solo incluir tools si el array no está vacío
|
|
2138
2178
|
if (functionDeclarations.length > 0) {
|
|
2139
2179
|
options.tools = [{
|
package/package.json
CHANGED
package/schema.js
CHANGED
|
@@ -1,3 +1,29 @@
|
|
|
1
|
+
const META_KEYS = new Set(['description', 'required', 'enum', 'default']);
|
|
2
|
+
|
|
3
|
+
function isDescriptor(value) {
|
|
4
|
+
if (!value || typeof value !== 'object' || Array.isArray(value)) return false;
|
|
5
|
+
const keys = Object.keys(value);
|
|
6
|
+
return keys.length > 0 && keys.every(k => META_KEYS.has(k));
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
function makeNullable(fieldSchema) {
|
|
10
|
+
if (!fieldSchema.type) return fieldSchema;
|
|
11
|
+
if (Array.isArray(fieldSchema.type)) {
|
|
12
|
+
if (!fieldSchema.type.includes('null')) fieldSchema.type.push('null');
|
|
13
|
+
} else {
|
|
14
|
+
fieldSchema.type = [fieldSchema.type, 'null'];
|
|
15
|
+
}
|
|
16
|
+
return fieldSchema;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
function getNestedDescriptions(desc) {
|
|
20
|
+
if (!desc) return {};
|
|
21
|
+
if (typeof desc === 'string') return {};
|
|
22
|
+
if (Array.isArray(desc)) return desc[0] || {};
|
|
23
|
+
if (isDescriptor(desc)) return {};
|
|
24
|
+
return desc;
|
|
25
|
+
}
|
|
26
|
+
|
|
1
27
|
function generateJsonSchema(example, descriptions = {}) {
|
|
2
28
|
function detectType(key, value) {
|
|
3
29
|
if (value === null) return { type: 'null' };
|
|
@@ -32,7 +58,7 @@ function generateJsonSchema(example, descriptions = {}) {
|
|
|
32
58
|
if (typeof value[0] === 'object' && !Array.isArray(value[0])) {
|
|
33
59
|
return {
|
|
34
60
|
type: 'array',
|
|
35
|
-
items: generateJsonSchema(value[0], descriptions[key]
|
|
61
|
+
items: generateJsonSchema(value[0], getNestedDescriptions(descriptions[key]))
|
|
36
62
|
};
|
|
37
63
|
} else {
|
|
38
64
|
return {
|
|
@@ -42,7 +68,7 @@ function generateJsonSchema(example, descriptions = {}) {
|
|
|
42
68
|
}
|
|
43
69
|
}
|
|
44
70
|
if (typeof value === 'object') {
|
|
45
|
-
return generateJsonSchema(value, descriptions[key]
|
|
71
|
+
return generateJsonSchema(value, getNestedDescriptions(descriptions[key]));
|
|
46
72
|
}
|
|
47
73
|
return {};
|
|
48
74
|
}
|
|
@@ -65,13 +91,28 @@ function generateJsonSchema(example, descriptions = {}) {
|
|
|
65
91
|
|
|
66
92
|
for (const key in example) {
|
|
67
93
|
const fieldSchema = detectType(key, example[key]);
|
|
94
|
+
const desc = descriptions[key];
|
|
95
|
+
let isRequired = true;
|
|
68
96
|
|
|
69
|
-
if (
|
|
70
|
-
|
|
97
|
+
if (desc) {
|
|
98
|
+
if (typeof desc === 'string') {
|
|
99
|
+
fieldSchema.description = desc;
|
|
100
|
+
} else if (typeof desc === 'object' && !Array.isArray(desc) && isDescriptor(desc)) {
|
|
101
|
+
if (desc.description) fieldSchema.description = desc.description;
|
|
102
|
+
if (desc.enum) fieldSchema.enum = desc.enum;
|
|
103
|
+
if (desc.default !== undefined) fieldSchema.default = desc.default;
|
|
104
|
+
if (desc.required === false) {
|
|
105
|
+
isRequired = false;
|
|
106
|
+
makeNullable(fieldSchema);
|
|
107
|
+
}
|
|
108
|
+
if (desc.enum && desc.enum.includes(null)) {
|
|
109
|
+
makeNullable(fieldSchema);
|
|
110
|
+
}
|
|
111
|
+
}
|
|
71
112
|
}
|
|
72
113
|
|
|
73
114
|
schema.properties[key] = fieldSchema;
|
|
74
|
-
schema.required.push(key);
|
|
115
|
+
if (isRequired) schema.required.push(key);
|
|
75
116
|
}
|
|
76
117
|
|
|
77
118
|
return schema;
|
package/test/bottleneck.test.js
CHANGED
|
@@ -115,6 +115,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
|
|
|
115
115
|
model = ModelMix.new({
|
|
116
116
|
config: {
|
|
117
117
|
debug: false,
|
|
118
|
+
max_history: -1, // concurrent calls need history to preserve queued messages
|
|
118
119
|
bottleneck: {
|
|
119
120
|
maxConcurrent: 2,
|
|
120
121
|
minTime: 50
|
|
@@ -335,6 +336,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
|
|
|
335
336
|
const model = ModelMix.new({
|
|
336
337
|
config: {
|
|
337
338
|
debug: false,
|
|
339
|
+
max_history: -1,
|
|
338
340
|
bottleneck: {
|
|
339
341
|
maxConcurrent: 5,
|
|
340
342
|
minTime: 100,
|
|
@@ -385,6 +387,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
|
|
|
385
387
|
const model = ModelMix.new({
|
|
386
388
|
config: {
|
|
387
389
|
debug: false,
|
|
390
|
+
max_history: -1,
|
|
388
391
|
bottleneck: {
|
|
389
392
|
maxConcurrent: 1,
|
|
390
393
|
minTime: 200
|
|
@@ -432,6 +435,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
|
|
|
432
435
|
const model = ModelMix.new({
|
|
433
436
|
config: {
|
|
434
437
|
debug: false,
|
|
438
|
+
max_history: -1,
|
|
435
439
|
bottleneck: {
|
|
436
440
|
maxConcurrent: 2,
|
|
437
441
|
minTime: 100,
|
|
@@ -0,0 +1,572 @@
|
|
|
1
|
+
const { expect } = require('chai');
|
|
2
|
+
const sinon = require('sinon');
|
|
3
|
+
const nock = require('nock');
|
|
4
|
+
const { ModelMix } = require('../index.js');
|
|
5
|
+
|
|
6
|
+
describe('Conversation History Tests', () => {
|
|
7
|
+
|
|
8
|
+
if (global.setupTestHooks) {
|
|
9
|
+
global.setupTestHooks();
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
afterEach(() => {
|
|
13
|
+
nock.cleanAll();
|
|
14
|
+
sinon.restore();
|
|
15
|
+
});
|
|
16
|
+
|
|
17
|
+
describe('Assistant Response Persistence', () => {
|
|
18
|
+
let model;
|
|
19
|
+
|
|
20
|
+
beforeEach(() => {
|
|
21
|
+
model = ModelMix.new({
|
|
22
|
+
config: { debug: false, max_history: 10 }
|
|
23
|
+
});
|
|
24
|
+
});
|
|
25
|
+
|
|
26
|
+
it('should add assistant response to message history after message()', async () => {
|
|
27
|
+
model.gpt5mini().addText('Hello');
|
|
28
|
+
|
|
29
|
+
nock('https://api.openai.com')
|
|
30
|
+
.post('/v1/chat/completions')
|
|
31
|
+
.reply(200, {
|
|
32
|
+
choices: [{
|
|
33
|
+
message: {
|
|
34
|
+
role: 'assistant',
|
|
35
|
+
content: 'Hi there!'
|
|
36
|
+
}
|
|
37
|
+
}]
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
await model.message();
|
|
41
|
+
|
|
42
|
+
// After the call, messages should contain both user and assistant
|
|
43
|
+
expect(model.messages).to.have.length(2);
|
|
44
|
+
expect(model.messages[0].role).to.equal('user');
|
|
45
|
+
expect(model.messages[1].role).to.equal('assistant');
|
|
46
|
+
expect(model.messages[1].content[0].text).to.equal('Hi there!');
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
it('should add assistant response to message history after raw()', async () => {
|
|
50
|
+
model.sonnet4().addText('Hello');
|
|
51
|
+
|
|
52
|
+
nock('https://api.anthropic.com')
|
|
53
|
+
.post('/v1/messages')
|
|
54
|
+
.reply(200, {
|
|
55
|
+
content: [{
|
|
56
|
+
type: 'text',
|
|
57
|
+
text: 'Hi from Claude!'
|
|
58
|
+
}]
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
await model.raw();
|
|
62
|
+
|
|
63
|
+
expect(model.messages).to.have.length(2);
|
|
64
|
+
expect(model.messages[0].role).to.equal('user');
|
|
65
|
+
expect(model.messages[1].role).to.equal('assistant');
|
|
66
|
+
expect(model.messages[1].content[0].text).to.equal('Hi from Claude!');
|
|
67
|
+
});
|
|
68
|
+
});
|
|
69
|
+
|
|
70
|
+
describe('Multi-turn Conversations', () => {
|
|
71
|
+
|
|
72
|
+
it('should include previous assistant response in second API call (OpenAI)', async () => {
|
|
73
|
+
const model = ModelMix.new({
|
|
74
|
+
config: { debug: false, max_history: 10 }
|
|
75
|
+
});
|
|
76
|
+
model.gpt5mini();
|
|
77
|
+
|
|
78
|
+
// First turn
|
|
79
|
+
model.addText('Capital of France?');
|
|
80
|
+
|
|
81
|
+
nock('https://api.openai.com')
|
|
82
|
+
.post('/v1/chat/completions')
|
|
83
|
+
.reply(200, {
|
|
84
|
+
choices: [{
|
|
85
|
+
message: {
|
|
86
|
+
role: 'assistant',
|
|
87
|
+
content: 'The capital of France is Paris.'
|
|
88
|
+
}
|
|
89
|
+
}]
|
|
90
|
+
});
|
|
91
|
+
|
|
92
|
+
await model.message();
|
|
93
|
+
|
|
94
|
+
// Second turn - capture the request body to verify history
|
|
95
|
+
let capturedBody;
|
|
96
|
+
nock('https://api.openai.com')
|
|
97
|
+
.post('/v1/chat/completions', (body) => {
|
|
98
|
+
capturedBody = body;
|
|
99
|
+
return true;
|
|
100
|
+
})
|
|
101
|
+
.reply(200, {
|
|
102
|
+
choices: [{
|
|
103
|
+
message: {
|
|
104
|
+
role: 'assistant',
|
|
105
|
+
content: 'The capital of Germany is Berlin.'
|
|
106
|
+
}
|
|
107
|
+
}]
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
model.addText('Capital of Germany?');
|
|
111
|
+
await model.message();
|
|
112
|
+
|
|
113
|
+
// Verify the second request includes system + user + assistant + user
|
|
114
|
+
expect(capturedBody.messages).to.have.length(4); // system + 3 conversation messages
|
|
115
|
+
expect(capturedBody.messages[0].role).to.equal('system');
|
|
116
|
+
expect(capturedBody.messages[1].role).to.equal('user');
|
|
117
|
+
expect(capturedBody.messages[2].role).to.equal('assistant');
|
|
118
|
+
// OpenAI content is an array of {type, text} objects
|
|
119
|
+
const assistantContent = capturedBody.messages[2].content;
|
|
120
|
+
const assistantText = Array.isArray(assistantContent)
|
|
121
|
+
? assistantContent[0].text
|
|
122
|
+
: assistantContent;
|
|
123
|
+
expect(assistantText).to.include('Paris');
|
|
124
|
+
expect(capturedBody.messages[3].role).to.equal('user');
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
it('should include previous assistant response in second API call (Anthropic)', async () => {
|
|
128
|
+
const model = ModelMix.new({
|
|
129
|
+
config: { debug: false, max_history: 10 }
|
|
130
|
+
});
|
|
131
|
+
model.sonnet4();
|
|
132
|
+
|
|
133
|
+
// First turn
|
|
134
|
+
model.addText('Capital of France?');
|
|
135
|
+
|
|
136
|
+
nock('https://api.anthropic.com')
|
|
137
|
+
.post('/v1/messages')
|
|
138
|
+
.reply(200, {
|
|
139
|
+
content: [{
|
|
140
|
+
type: 'text',
|
|
141
|
+
text: 'The capital of France is Paris.'
|
|
142
|
+
}]
|
|
143
|
+
});
|
|
144
|
+
|
|
145
|
+
await model.message();
|
|
146
|
+
|
|
147
|
+
// Second turn - capture the request body
|
|
148
|
+
let capturedBody;
|
|
149
|
+
nock('https://api.anthropic.com')
|
|
150
|
+
.post('/v1/messages', (body) => {
|
|
151
|
+
capturedBody = body;
|
|
152
|
+
return true;
|
|
153
|
+
})
|
|
154
|
+
.reply(200, {
|
|
155
|
+
content: [{
|
|
156
|
+
type: 'text',
|
|
157
|
+
text: 'The capital of Germany is Berlin.'
|
|
158
|
+
}]
|
|
159
|
+
});
|
|
160
|
+
|
|
161
|
+
model.addText('Capital of Germany?');
|
|
162
|
+
await model.message();
|
|
163
|
+
|
|
164
|
+
// Anthropic: system is separate, messages should be user/assistant/user
|
|
165
|
+
expect(capturedBody.messages).to.have.length(3);
|
|
166
|
+
expect(capturedBody.messages[0].role).to.equal('user');
|
|
167
|
+
expect(capturedBody.messages[1].role).to.equal('assistant');
|
|
168
|
+
expect(capturedBody.messages[1].content[0].text).to.include('Paris');
|
|
169
|
+
expect(capturedBody.messages[2].role).to.equal('user');
|
|
170
|
+
});
|
|
171
|
+
|
|
172
|
+
it('should not merge consecutive user messages when assistant response is between them', async () => {
|
|
173
|
+
const model = ModelMix.new({
|
|
174
|
+
config: { debug: false, max_history: 10 }
|
|
175
|
+
});
|
|
176
|
+
model.gpt5mini();
|
|
177
|
+
|
|
178
|
+
model.addText('First question');
|
|
179
|
+
|
|
180
|
+
nock('https://api.openai.com')
|
|
181
|
+
.post('/v1/chat/completions')
|
|
182
|
+
.reply(200, {
|
|
183
|
+
choices: [{
|
|
184
|
+
message: {
|
|
185
|
+
role: 'assistant',
|
|
186
|
+
content: 'First answer'
|
|
187
|
+
}
|
|
188
|
+
}]
|
|
189
|
+
});
|
|
190
|
+
|
|
191
|
+
await model.message();
|
|
192
|
+
|
|
193
|
+
// Capture second request
|
|
194
|
+
let capturedBody;
|
|
195
|
+
nock('https://api.openai.com')
|
|
196
|
+
.post('/v1/chat/completions', (body) => {
|
|
197
|
+
capturedBody = body;
|
|
198
|
+
return true;
|
|
199
|
+
})
|
|
200
|
+
.reply(200, {
|
|
201
|
+
choices: [{
|
|
202
|
+
message: {
|
|
203
|
+
role: 'assistant',
|
|
204
|
+
content: 'Second answer'
|
|
205
|
+
}
|
|
206
|
+
}]
|
|
207
|
+
});
|
|
208
|
+
|
|
209
|
+
model.addText('Second question');
|
|
210
|
+
await model.message();
|
|
211
|
+
|
|
212
|
+
// The two user messages must NOT be merged into one
|
|
213
|
+
const userMessages = capturedBody.messages.filter(m => m.role === 'user');
|
|
214
|
+
expect(userMessages).to.have.length(2);
|
|
215
|
+
expect(userMessages[0].content[0].text).to.equal('First question');
|
|
216
|
+
expect(userMessages[1].content[0].text).to.equal('Second question');
|
|
217
|
+
});
|
|
218
|
+
|
|
219
|
+
it('should maintain correct alternating roles across 3 turns', async () => {
|
|
220
|
+
const model = ModelMix.new({
|
|
221
|
+
config: { debug: false, max_history: 20 }
|
|
222
|
+
});
|
|
223
|
+
model.gpt5mini();
|
|
224
|
+
|
|
225
|
+
const turns = [
|
|
226
|
+
{ user: 'Question 1', assistant: 'Answer 1' },
|
|
227
|
+
{ user: 'Question 2', assistant: 'Answer 2' },
|
|
228
|
+
{ user: 'Question 3', assistant: 'Answer 3' },
|
|
229
|
+
];
|
|
230
|
+
|
|
231
|
+
let capturedBody;
|
|
232
|
+
|
|
233
|
+
for (const turn of turns) {
|
|
234
|
+
model.addText(turn.user);
|
|
235
|
+
|
|
236
|
+
nock('https://api.openai.com')
|
|
237
|
+
.post('/v1/chat/completions', (body) => {
|
|
238
|
+
capturedBody = body;
|
|
239
|
+
return true;
|
|
240
|
+
})
|
|
241
|
+
.reply(200, {
|
|
242
|
+
choices: [{
|
|
243
|
+
message: {
|
|
244
|
+
role: 'assistant',
|
|
245
|
+
content: turn.assistant
|
|
246
|
+
}
|
|
247
|
+
}]
|
|
248
|
+
});
|
|
249
|
+
|
|
250
|
+
await model.message();
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// After 3 turns, the last request should have system + 5 messages (u/a/u/a/u)
|
|
254
|
+
const msgs = capturedBody.messages.filter(m => m.role !== 'system');
|
|
255
|
+
expect(msgs).to.have.length(5);
|
|
256
|
+
expect(msgs.map(m => m.role)).to.deep.equal([
|
|
257
|
+
'user', 'assistant', 'user', 'assistant', 'user'
|
|
258
|
+
]);
|
|
259
|
+
});
|
|
260
|
+
});
|
|
261
|
+
|
|
262
|
+
describe('max_history Limits', () => {
|
|
263
|
+
|
|
264
|
+
it('should be stateless with max_history=0 (default)', async () => {
|
|
265
|
+
const model = ModelMix.new({
|
|
266
|
+
config: { debug: false } // max_history defaults to 0
|
|
267
|
+
});
|
|
268
|
+
model.gpt5mini();
|
|
269
|
+
|
|
270
|
+
model.addText('Question 1');
|
|
271
|
+
nock('https://api.openai.com')
|
|
272
|
+
.post('/v1/chat/completions')
|
|
273
|
+
.reply(200, {
|
|
274
|
+
choices: [{
|
|
275
|
+
message: { role: 'assistant', content: 'Answer 1' }
|
|
276
|
+
}]
|
|
277
|
+
});
|
|
278
|
+
await model.message();
|
|
279
|
+
|
|
280
|
+
// After call, messages should be cleared (stateless)
|
|
281
|
+
expect(model.messages).to.have.length(0);
|
|
282
|
+
});
|
|
283
|
+
|
|
284
|
+
it('should not send history on second call with max_history=0', async () => {
|
|
285
|
+
const model = ModelMix.new({
|
|
286
|
+
config: { debug: false, max_history: 0 }
|
|
287
|
+
});
|
|
288
|
+
model.gpt5mini();
|
|
289
|
+
|
|
290
|
+
// First turn
|
|
291
|
+
model.addText('Question 1');
|
|
292
|
+
nock('https://api.openai.com')
|
|
293
|
+
.post('/v1/chat/completions')
|
|
294
|
+
.reply(200, {
|
|
295
|
+
choices: [{
|
|
296
|
+
message: { role: 'assistant', content: 'Answer 1' }
|
|
297
|
+
}]
|
|
298
|
+
});
|
|
299
|
+
await model.message();
|
|
300
|
+
|
|
301
|
+
// Second turn - capture request
|
|
302
|
+
let capturedBody;
|
|
303
|
+
model.addText('Question 2');
|
|
304
|
+
nock('https://api.openai.com')
|
|
305
|
+
.post('/v1/chat/completions', (body) => {
|
|
306
|
+
capturedBody = body;
|
|
307
|
+
return true;
|
|
308
|
+
})
|
|
309
|
+
.reply(200, {
|
|
310
|
+
choices: [{
|
|
311
|
+
message: { role: 'assistant', content: 'Answer 2' }
|
|
312
|
+
}]
|
|
313
|
+
});
|
|
314
|
+
await model.message();
|
|
315
|
+
|
|
316
|
+
// Only system + current user message, no history from turn 1
|
|
317
|
+
const msgs = capturedBody.messages.filter(m => m.role !== 'system');
|
|
318
|
+
expect(msgs).to.have.length(1);
|
|
319
|
+
expect(msgs[0].role).to.equal('user');
|
|
320
|
+
expect(msgs[0].content[0].text).to.equal('Question 2');
|
|
321
|
+
});
|
|
322
|
+
|
|
323
|
+
it('should trim old messages when max_history is reached', async () => {
|
|
324
|
+
const model = ModelMix.new({
|
|
325
|
+
config: { debug: false, max_history: 2 }
|
|
326
|
+
});
|
|
327
|
+
model.gpt5mini();
|
|
328
|
+
|
|
329
|
+
// Turn 1
|
|
330
|
+
model.addText('Question 1');
|
|
331
|
+
nock('https://api.openai.com')
|
|
332
|
+
.post('/v1/chat/completions')
|
|
333
|
+
.reply(200, {
|
|
334
|
+
choices: [{
|
|
335
|
+
message: { role: 'assistant', content: 'Answer 1' }
|
|
336
|
+
}]
|
|
337
|
+
});
|
|
338
|
+
await model.message();
|
|
339
|
+
|
|
340
|
+
// Turn 2 - capture request
|
|
341
|
+
let capturedBody;
|
|
342
|
+
model.addText('Question 2');
|
|
343
|
+
nock('https://api.openai.com')
|
|
344
|
+
.post('/v1/chat/completions', (body) => {
|
|
345
|
+
capturedBody = body;
|
|
346
|
+
return true;
|
|
347
|
+
})
|
|
348
|
+
.reply(200, {
|
|
349
|
+
choices: [{
|
|
350
|
+
message: { role: 'assistant', content: 'Answer 2' }
|
|
351
|
+
}]
|
|
352
|
+
});
|
|
353
|
+
await model.message();
|
|
354
|
+
|
|
355
|
+
// With max_history=2, only the last 2 messages should be sent (assistant + user)
|
|
356
|
+
const msgs = capturedBody.messages.filter(m => m.role !== 'system');
|
|
357
|
+
expect(msgs.length).to.be.at.most(2);
|
|
358
|
+
// The last message should be the current user question
|
|
359
|
+
expect(msgs[msgs.length - 1].role).to.equal('user');
|
|
360
|
+
expect(msgs[msgs.length - 1].content[0].text).to.equal('Question 2');
|
|
361
|
+
});
|
|
362
|
+
|
|
363
|
+
it('should keep full history when max_history is large enough', async () => {
|
|
364
|
+
const model = ModelMix.new({
|
|
365
|
+
config: { debug: false, max_history: 100 }
|
|
366
|
+
});
|
|
367
|
+
model.gpt5mini();
|
|
368
|
+
|
|
369
|
+
// Turn 1
|
|
370
|
+
model.addText('Q1');
|
|
371
|
+
nock('https://api.openai.com')
|
|
372
|
+
.post('/v1/chat/completions')
|
|
373
|
+
.reply(200, {
|
|
374
|
+
choices: [{
|
|
375
|
+
message: { role: 'assistant', content: 'A1' }
|
|
376
|
+
}]
|
|
377
|
+
});
|
|
378
|
+
await model.message();
|
|
379
|
+
|
|
380
|
+
// Turn 2
|
|
381
|
+
let capturedBody;
|
|
382
|
+
model.addText('Q2');
|
|
383
|
+
nock('https://api.openai.com')
|
|
384
|
+
.post('/v1/chat/completions', (body) => {
|
|
385
|
+
capturedBody = body;
|
|
386
|
+
return true;
|
|
387
|
+
})
|
|
388
|
+
.reply(200, {
|
|
389
|
+
choices: [{
|
|
390
|
+
message: { role: 'assistant', content: 'A2' }
|
|
391
|
+
}]
|
|
392
|
+
});
|
|
393
|
+
await model.message();
|
|
394
|
+
|
|
395
|
+
// All 3 messages should be present (user, assistant, user)
|
|
396
|
+
const msgs = capturedBody.messages.filter(m => m.role !== 'system');
|
|
397
|
+
expect(msgs).to.have.length(3);
|
|
398
|
+
});
|
|
399
|
+
|
|
400
|
+
it('should handle max_history=-1 (unlimited)', async () => {
|
|
401
|
+
const model = ModelMix.new({
|
|
402
|
+
config: { debug: false, max_history: -1 }
|
|
403
|
+
});
|
|
404
|
+
model.gpt5mini();
|
|
405
|
+
|
|
406
|
+
for (let i = 1; i <= 5; i++) {
|
|
407
|
+
model.addText(`Question ${i}`);
|
|
408
|
+
nock('https://api.openai.com')
|
|
409
|
+
.post('/v1/chat/completions')
|
|
410
|
+
.reply(200, {
|
|
411
|
+
choices: [{
|
|
412
|
+
message: { role: 'assistant', content: `Answer ${i}` }
|
|
413
|
+
}]
|
|
414
|
+
});
|
|
415
|
+
await model.message();
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
// After 5 turns, all 10 messages should be in history (5 user + 5 assistant)
|
|
419
|
+
expect(model.messages).to.have.length(10);
|
|
420
|
+
});
|
|
421
|
+
});
|
|
422
|
+
|
|
423
|
+
describe('Cross-provider History', () => {
|
|
424
|
+
|
|
425
|
+
it('should maintain history when using Anthropic provider', async () => {
|
|
426
|
+
const model = ModelMix.new({
|
|
427
|
+
config: { debug: false, max_history: 10 }
|
|
428
|
+
});
|
|
429
|
+
model.haiku35();
|
|
430
|
+
|
|
431
|
+
model.addText('Hello');
|
|
432
|
+
nock('https://api.anthropic.com')
|
|
433
|
+
.post('/v1/messages')
|
|
434
|
+
.reply(200, {
|
|
435
|
+
content: [{ type: 'text', text: 'Hi there!' }]
|
|
436
|
+
});
|
|
437
|
+
await model.message();
|
|
438
|
+
|
|
439
|
+
let capturedBody;
|
|
440
|
+
model.addText('How are you?');
|
|
441
|
+
nock('https://api.anthropic.com')
|
|
442
|
+
.post('/v1/messages', (body) => {
|
|
443
|
+
capturedBody = body;
|
|
444
|
+
return true;
|
|
445
|
+
})
|
|
446
|
+
.reply(200, {
|
|
447
|
+
content: [{ type: 'text', text: 'I am well!' }]
|
|
448
|
+
});
|
|
449
|
+
await model.message();
|
|
450
|
+
|
|
451
|
+
// Anthropic sends system separately; messages should be u/a/u
|
|
452
|
+
expect(capturedBody.messages).to.have.length(3);
|
|
453
|
+
expect(capturedBody.messages[0].role).to.equal('user');
|
|
454
|
+
expect(capturedBody.messages[1].role).to.equal('assistant');
|
|
455
|
+
expect(capturedBody.messages[1].content[0].text).to.equal('Hi there!');
|
|
456
|
+
expect(capturedBody.messages[2].role).to.equal('user');
|
|
457
|
+
});
|
|
458
|
+
|
|
459
|
+
it('should maintain history when using Google provider', async () => {
|
|
460
|
+
const model = ModelMix.new({
|
|
461
|
+
config: { debug: false, max_history: 10 }
|
|
462
|
+
});
|
|
463
|
+
model.gemini25flash();
|
|
464
|
+
|
|
465
|
+
model.addText('Hello');
|
|
466
|
+
nock('https://generativelanguage.googleapis.com')
|
|
467
|
+
.post(/.*generateContent/)
|
|
468
|
+
.reply(200, {
|
|
469
|
+
candidates: [{
|
|
470
|
+
content: {
|
|
471
|
+
parts: [{ text: 'Hi from Gemini!' }]
|
|
472
|
+
}
|
|
473
|
+
}]
|
|
474
|
+
});
|
|
475
|
+
await model.message();
|
|
476
|
+
|
|
477
|
+
let capturedBody;
|
|
478
|
+
model.addText('How are you?');
|
|
479
|
+
nock('https://generativelanguage.googleapis.com')
|
|
480
|
+
.post(/.*generateContent/, (body) => {
|
|
481
|
+
capturedBody = body;
|
|
482
|
+
return true;
|
|
483
|
+
})
|
|
484
|
+
.reply(200, {
|
|
485
|
+
candidates: [{
|
|
486
|
+
content: {
|
|
487
|
+
parts: [{ text: 'Great, thanks!' }]
|
|
488
|
+
}
|
|
489
|
+
}]
|
|
490
|
+
});
|
|
491
|
+
await model.message();
|
|
492
|
+
|
|
493
|
+
// Google sends messages in contents array with user/model roles
|
|
494
|
+
const userMsgs = capturedBody.contents.filter(m => m.role === 'user');
|
|
495
|
+
const modelMsgs = capturedBody.contents.filter(m => m.role === 'model');
|
|
496
|
+
expect(userMsgs).to.have.length(2);
|
|
497
|
+
expect(modelMsgs).to.have.length(1);
|
|
498
|
+
});
|
|
499
|
+
});
|
|
500
|
+
|
|
501
|
+
describe('Edge Cases', () => {
|
|
502
|
+
|
|
503
|
+
it('should handle single turn without breaking', async () => {
|
|
504
|
+
const model = ModelMix.new({
|
|
505
|
+
config: { debug: false, max_history: 10 }
|
|
506
|
+
});
|
|
507
|
+
model.gpt5mini().addText('Just one question');
|
|
508
|
+
|
|
509
|
+
nock('https://api.openai.com')
|
|
510
|
+
.post('/v1/chat/completions')
|
|
511
|
+
.reply(200, {
|
|
512
|
+
choices: [{
|
|
513
|
+
message: { role: 'assistant', content: 'Just one answer' }
|
|
514
|
+
}]
|
|
515
|
+
});
|
|
516
|
+
|
|
517
|
+
const response = await model.message();
|
|
518
|
+
expect(response).to.equal('Just one answer');
|
|
519
|
+
expect(model.messages).to.have.length(2);
|
|
520
|
+
});
|
|
521
|
+
|
|
522
|
+
it('should handle empty assistant response gracefully', async () => {
|
|
523
|
+
const model = ModelMix.new({
|
|
524
|
+
config: { debug: false, max_history: 10 }
|
|
525
|
+
});
|
|
526
|
+
model.gpt5mini().addText('Hello');
|
|
527
|
+
|
|
528
|
+
nock('https://api.openai.com')
|
|
529
|
+
.post('/v1/chat/completions')
|
|
530
|
+
.reply(200, {
|
|
531
|
+
choices: [{
|
|
532
|
+
message: { role: 'assistant', content: '' }
|
|
533
|
+
}]
|
|
534
|
+
});
|
|
535
|
+
|
|
536
|
+
const response = await model.message();
|
|
537
|
+
// Empty string is falsy, so assistant message should NOT be added
|
|
538
|
+
expect(response).to.equal('');
|
|
539
|
+
});
|
|
540
|
+
|
|
541
|
+
it('should handle multiple addText before first message()', async () => {
|
|
542
|
+
const model = ModelMix.new({
|
|
543
|
+
config: { debug: false, max_history: 10 }
|
|
544
|
+
});
|
|
545
|
+
model.gpt5mini();
|
|
546
|
+
|
|
547
|
+
model.addText('Part 1');
|
|
548
|
+
model.addText('Part 2');
|
|
549
|
+
|
|
550
|
+
let capturedBody;
|
|
551
|
+
nock('https://api.openai.com')
|
|
552
|
+
.post('/v1/chat/completions', (body) => {
|
|
553
|
+
capturedBody = body;
|
|
554
|
+
return true;
|
|
555
|
+
})
|
|
556
|
+
.reply(200, {
|
|
557
|
+
choices: [{
|
|
558
|
+
message: { role: 'assistant', content: 'Response' }
|
|
559
|
+
}]
|
|
560
|
+
});
|
|
561
|
+
|
|
562
|
+
await model.message();
|
|
563
|
+
|
|
564
|
+
// Two consecutive user messages should be grouped into one by groupByRoles
|
|
565
|
+
const userMsgs = capturedBody.messages.filter(m => m.role === 'user');
|
|
566
|
+
expect(userMsgs).to.have.length(1);
|
|
567
|
+
expect(userMsgs[0].content).to.have.length(2);
|
|
568
|
+
expect(userMsgs[0].content[0].text).to.equal('Part 1');
|
|
569
|
+
expect(userMsgs[0].content[1].text).to.equal('Part 2');
|
|
570
|
+
});
|
|
571
|
+
});
|
|
572
|
+
});
|
package/test/json.test.js
CHANGED
|
@@ -181,6 +181,163 @@ describe('JSON Schema and Structured Output Tests', () => {
|
|
|
181
181
|
});
|
|
182
182
|
});
|
|
183
183
|
|
|
184
|
+
describe('Enhanced Descriptor Descriptions', () => {
|
|
185
|
+
it('should support required: false (not in required + nullable)', () => {
|
|
186
|
+
const example = { name: 'Alice', nickname: 'Ali' };
|
|
187
|
+
const descriptions = {
|
|
188
|
+
name: 'Full name',
|
|
189
|
+
nickname: { description: 'Optional nickname', required: false }
|
|
190
|
+
};
|
|
191
|
+
const schema = generateJsonSchema(example, descriptions);
|
|
192
|
+
|
|
193
|
+
expect(schema.required).to.deep.equal(['name']);
|
|
194
|
+
expect(schema.properties.nickname).to.deep.equal({
|
|
195
|
+
type: ['string', 'null'],
|
|
196
|
+
description: 'Optional nickname'
|
|
197
|
+
});
|
|
198
|
+
expect(schema.properties.name).to.deep.equal({
|
|
199
|
+
type: 'string',
|
|
200
|
+
description: 'Full name'
|
|
201
|
+
});
|
|
202
|
+
});
|
|
203
|
+
|
|
204
|
+
it('should support enum', () => {
|
|
205
|
+
const example = { status: 'active' };
|
|
206
|
+
const descriptions = {
|
|
207
|
+
status: { description: 'Account status', enum: ['active', 'inactive', 'banned'] }
|
|
208
|
+
};
|
|
209
|
+
const schema = generateJsonSchema(example, descriptions);
|
|
210
|
+
|
|
211
|
+
expect(schema.properties.status).to.deep.equal({
|
|
212
|
+
type: 'string',
|
|
213
|
+
description: 'Account status',
|
|
214
|
+
enum: ['active', 'inactive', 'banned']
|
|
215
|
+
});
|
|
216
|
+
expect(schema.required).to.deep.equal(['status']);
|
|
217
|
+
});
|
|
218
|
+
|
|
219
|
+
it('should make type nullable when enum includes null', () => {
|
|
220
|
+
const example = { sex: 'm' };
|
|
221
|
+
const descriptions = {
|
|
222
|
+
sex: { description: 'Gender', enum: ['m', 'f', null] }
|
|
223
|
+
};
|
|
224
|
+
const schema = generateJsonSchema(example, descriptions);
|
|
225
|
+
|
|
226
|
+
expect(schema.properties.sex).to.deep.equal({
|
|
227
|
+
type: ['string', 'null'],
|
|
228
|
+
description: 'Gender',
|
|
229
|
+
enum: ['m', 'f', null]
|
|
230
|
+
});
|
|
231
|
+
});
|
|
232
|
+
|
|
233
|
+
it('should support default value', () => {
|
|
234
|
+
const example = { theme: 'light' };
|
|
235
|
+
const descriptions = {
|
|
236
|
+
theme: { description: 'UI theme', default: 'light', enum: ['light', 'dark'] }
|
|
237
|
+
};
|
|
238
|
+
const schema = generateJsonSchema(example, descriptions);
|
|
239
|
+
|
|
240
|
+
expect(schema.properties.theme).to.deep.equal({
|
|
241
|
+
type: 'string',
|
|
242
|
+
description: 'UI theme',
|
|
243
|
+
default: 'light',
|
|
244
|
+
enum: ['light', 'dark']
|
|
245
|
+
});
|
|
246
|
+
});
|
|
247
|
+
|
|
248
|
+
it('should mix string and descriptor descriptions', () => {
|
|
249
|
+
const example = { name: 'martin', age: 22, sex: 'm' };
|
|
250
|
+
const descriptions = {
|
|
251
|
+
name: { description: 'Name of the actor', required: false },
|
|
252
|
+
age: 'Age of the actor',
|
|
253
|
+
sex: { description: 'Gender', enum: ['m', 'f', null], default: 'm' }
|
|
254
|
+
};
|
|
255
|
+
const schema = generateJsonSchema(example, descriptions);
|
|
256
|
+
|
|
257
|
+
expect(schema.required).to.deep.equal(['age', 'sex']);
|
|
258
|
+
expect(schema.properties.name).to.deep.equal({
|
|
259
|
+
type: ['string', 'null'],
|
|
260
|
+
description: 'Name of the actor'
|
|
261
|
+
});
|
|
262
|
+
expect(schema.properties.age).to.deep.equal({
|
|
263
|
+
type: 'integer',
|
|
264
|
+
description: 'Age of the actor'
|
|
265
|
+
});
|
|
266
|
+
expect(schema.properties.sex).to.deep.equal({
|
|
267
|
+
type: ['string', 'null'],
|
|
268
|
+
description: 'Gender',
|
|
269
|
+
enum: ['m', 'f', null],
|
|
270
|
+
default: 'm'
|
|
271
|
+
});
|
|
272
|
+
});
|
|
273
|
+
|
|
274
|
+
it('should not apply descriptor as nested descriptions for objects', () => {
|
|
275
|
+
const example = { user: { name: 'Alice', age: 30 } };
|
|
276
|
+
const descriptions = {
|
|
277
|
+
user: { description: 'User details', required: false }
|
|
278
|
+
};
|
|
279
|
+
const schema = generateJsonSchema(example, descriptions);
|
|
280
|
+
|
|
281
|
+
expect(schema.required).to.deep.equal([]);
|
|
282
|
+
expect(schema.properties.user.description).to.equal('User details');
|
|
283
|
+
expect(schema.properties.user.properties.name).to.deep.equal({ type: 'string' });
|
|
284
|
+
expect(schema.properties.user.properties.age).to.deep.equal({ type: 'integer' });
|
|
285
|
+
});
|
|
286
|
+
|
|
287
|
+
it('should pass nested descriptions correctly for objects', () => {
|
|
288
|
+
const example = { user: { name: 'Alice', age: 30 } };
|
|
289
|
+
const descriptions = {
|
|
290
|
+
user: { name: 'User name', age: 'User age' }
|
|
291
|
+
};
|
|
292
|
+
const schema = generateJsonSchema(example, descriptions);
|
|
293
|
+
|
|
294
|
+
expect(schema.properties.user.properties.name).to.deep.equal({
|
|
295
|
+
type: 'string',
|
|
296
|
+
description: 'User name'
|
|
297
|
+
});
|
|
298
|
+
expect(schema.properties.user.properties.age).to.deep.equal({
|
|
299
|
+
type: 'integer',
|
|
300
|
+
description: 'User age'
|
|
301
|
+
});
|
|
302
|
+
});
|
|
303
|
+
|
|
304
|
+
it('should handle array descriptions in array format', () => {
|
|
305
|
+
const example = {
|
|
306
|
+
countries: [{ name: 'France', capital: 'Paris' }]
|
|
307
|
+
};
|
|
308
|
+
const descriptions = {
|
|
309
|
+
countries: [{ name: 'Country name', capital: 'Capital city' }]
|
|
310
|
+
};
|
|
311
|
+
const schema = generateJsonSchema(example, descriptions);
|
|
312
|
+
|
|
313
|
+
expect(schema.properties.countries.type).to.equal('array');
|
|
314
|
+
expect(schema.properties.countries.items.properties.name.description).to.equal('Country name');
|
|
315
|
+
expect(schema.properties.countries.items.properties.capital.description).to.equal('Capital city');
|
|
316
|
+
});
|
|
317
|
+
|
|
318
|
+
it('should handle descriptor for array field itself', () => {
|
|
319
|
+
const example = { tags: ['admin'] };
|
|
320
|
+
const descriptions = {
|
|
321
|
+
tags: { description: 'User tags', required: false }
|
|
322
|
+
};
|
|
323
|
+
const schema = generateJsonSchema(example, descriptions);
|
|
324
|
+
|
|
325
|
+
expect(schema.properties.tags.description).to.equal('User tags');
|
|
326
|
+
expect(schema.required).to.deep.equal([]);
|
|
327
|
+
});
|
|
328
|
+
|
|
329
|
+
it('should not double-add null to type', () => {
|
|
330
|
+
const example = { status: 'active' };
|
|
331
|
+
const descriptions = {
|
|
332
|
+
status: { required: false, enum: ['active', null] }
|
|
333
|
+
};
|
|
334
|
+
const schema = generateJsonSchema(example, descriptions);
|
|
335
|
+
|
|
336
|
+
const nullCount = schema.properties.status.type.filter(t => t === 'null').length;
|
|
337
|
+
expect(nullCount).to.equal(1);
|
|
338
|
+
});
|
|
339
|
+
});
|
|
340
|
+
|
|
184
341
|
describe('ModelMix JSON Output', () => {
|
|
185
342
|
let model;
|
|
186
343
|
|
|
@@ -291,5 +448,33 @@ describe('JSON Schema and Structured Output Tests', () => {
|
|
|
291
448
|
expect(error.message).to.include('JSON');
|
|
292
449
|
}
|
|
293
450
|
});
|
|
451
|
+
|
|
452
|
+
it('should auto-wrap top-level array and unwrap on return', async () => {
|
|
453
|
+
model.gpt41().addText('List 3 countries');
|
|
454
|
+
|
|
455
|
+
nock('https://api.openai.com')
|
|
456
|
+
.post('/v1/chat/completions')
|
|
457
|
+
.reply(200, {
|
|
458
|
+
choices: [{
|
|
459
|
+
message: {
|
|
460
|
+
role: 'assistant',
|
|
461
|
+
content: JSON.stringify({
|
|
462
|
+
out: [
|
|
463
|
+
{ name: 'France' },
|
|
464
|
+
{ name: 'Germany' },
|
|
465
|
+
{ name: 'Spain' }
|
|
466
|
+
]
|
|
467
|
+
})
|
|
468
|
+
}
|
|
469
|
+
}]
|
|
470
|
+
});
|
|
471
|
+
|
|
472
|
+
const result = await model.json([{ name: 'France' }]);
|
|
473
|
+
|
|
474
|
+
expect(result).to.be.an('array');
|
|
475
|
+
expect(result).to.have.length(3);
|
|
476
|
+
expect(result[0]).to.have.property('name', 'France');
|
|
477
|
+
expect(result[2]).to.have.property('name', 'Spain');
|
|
478
|
+
});
|
|
294
479
|
});
|
|
295
480
|
});
|