holosphere 2.0.0-alpha1 → 2.0.0-alpha2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/holosphere.cjs +2 -0
- package/dist/cjs/holosphere.cjs.map +1 -0
- package/dist/esm/holosphere.js +56 -0
- package/dist/esm/holosphere.js.map +1 -0
- package/dist/index-CDfIuXew.js +15974 -0
- package/dist/index-CDfIuXew.js.map +1 -0
- package/dist/index-ifOgtDvd.cjs +3 -0
- package/dist/index-ifOgtDvd.cjs.map +1 -0
- package/dist/indexeddb-storage-CMW4qRQS.js +96 -0
- package/dist/indexeddb-storage-CMW4qRQS.js.map +1 -0
- package/dist/indexeddb-storage-DLZOgetM.cjs +2 -0
- package/dist/indexeddb-storage-DLZOgetM.cjs.map +1 -0
- package/dist/memory-storage-DQzcAZlf.js +47 -0
- package/dist/memory-storage-DQzcAZlf.js.map +1 -0
- package/dist/memory-storage-DmePEP2q.cjs +2 -0
- package/dist/memory-storage-DmePEP2q.cjs.map +1 -0
- package/dist/secp256k1-CP0ZkpAx.cjs +13 -0
- package/dist/secp256k1-CP0ZkpAx.cjs.map +1 -0
- package/dist/secp256k1-vOXp40Fx.js +2281 -0
- package/dist/secp256k1-vOXp40Fx.js.map +1 -0
- package/docs/FOSDEM_PROPOSAL.md +388 -0
- package/docs/LOCALFIRST.md +266 -0
- package/docs/contracts/api-interface.md +793 -0
- package/docs/data-model.md +476 -0
- package/docs/gun-async-usage.md +338 -0
- package/docs/plan.md +349 -0
- package/docs/quickstart.md +674 -0
- package/docs/research.md +362 -0
- package/docs/spec.md +244 -0
- package/docs/storage-backends.md +326 -0
- package/docs/tasks.md +947 -0
- package/package.json +1 -1
- package/tests/unit/ai/aggregation.test.js +0 -295
- package/tests/unit/ai/breakdown.test.js +0 -446
- package/tests/unit/ai/classifier.test.js +0 -294
- package/tests/unit/ai/council.test.js +0 -262
- package/tests/unit/ai/embeddings.test.js +0 -384
- package/tests/unit/ai/federation-ai.test.js +0 -344
- package/tests/unit/ai/h3-ai.test.js +0 -458
- package/tests/unit/ai/index.test.js +0 -304
- package/tests/unit/ai/json-ops.test.js +0 -307
- package/tests/unit/ai/llm-service.test.js +0 -390
- package/tests/unit/ai/nl-query.test.js +0 -383
- package/tests/unit/ai/relationships.test.js +0 -311
- package/tests/unit/ai/schema-extractor.test.js +0 -384
- package/tests/unit/ai/spatial.test.js +0 -279
- package/tests/unit/ai/tts.test.js +0 -279
- package/tests/unit/content.test.js +0 -332
- package/tests/unit/contract/core.test.js +0 -88
- package/tests/unit/contract/crypto.test.js +0 -198
- package/tests/unit/contract/data.test.js +0 -223
- package/tests/unit/contract/federation.test.js +0 -181
- package/tests/unit/contract/hierarchical.test.js +0 -113
- package/tests/unit/contract/schema.test.js +0 -114
- package/tests/unit/contract/social.test.js +0 -217
- package/tests/unit/contract/spatial.test.js +0 -110
- package/tests/unit/contract/subscriptions.test.js +0 -128
- package/tests/unit/contract/utils.test.js +0 -159
- package/tests/unit/core.test.js +0 -152
- package/tests/unit/crypto.test.js +0 -328
- package/tests/unit/federation.test.js +0 -234
- package/tests/unit/gun-async.test.js +0 -252
- package/tests/unit/hierarchical.test.js +0 -399
- package/tests/unit/integration/scenario-01-geographic-storage.test.js +0 -74
- package/tests/unit/integration/scenario-02-federation.test.js +0 -76
- package/tests/unit/integration/scenario-03-subscriptions.test.js +0 -102
- package/tests/unit/integration/scenario-04-validation.test.js +0 -129
- package/tests/unit/integration/scenario-05-hierarchy.test.js +0 -125
- package/tests/unit/integration/scenario-06-social.test.js +0 -135
- package/tests/unit/integration/scenario-07-persistence.test.js +0 -130
- package/tests/unit/integration/scenario-08-authorization.test.js +0 -161
- package/tests/unit/integration/scenario-09-cross-dimensional.test.js +0 -139
- package/tests/unit/integration/scenario-10-cross-holosphere-capabilities.test.js +0 -357
- package/tests/unit/integration/scenario-11-cross-holosphere-federation.test.js +0 -410
- package/tests/unit/integration/scenario-12-capability-federated-read.test.js +0 -719
- package/tests/unit/performance/benchmark.test.js +0 -85
- package/tests/unit/schema.test.js +0 -213
- package/tests/unit/spatial.test.js +0 -158
- package/tests/unit/storage.test.js +0 -195
- package/tests/unit/subscriptions.test.js +0 -328
- package/tests/unit/test-data-permanence-debug.js +0 -197
- package/tests/unit/test-data-permanence.js +0 -340
- package/tests/unit/test-key-persistence-fixed.js +0 -148
- package/tests/unit/test-key-persistence.js +0 -172
- package/tests/unit/test-relay-permanence.js +0 -376
- package/tests/unit/test-second-node.js +0 -95
- package/tests/unit/test-simple-write.js +0 -89
|
@@ -1,390 +0,0 @@
|
|
|
1
|
-
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
|
2
|
-
import { LLMService } from '../../../src/ai/llm-service.js';
|
|
3
|
-
|
|
4
|
-
// Mock OpenAI
|
|
5
|
-
vi.mock('openai', () => ({
|
|
6
|
-
default: vi.fn().mockImplementation(() => ({
|
|
7
|
-
chat: {
|
|
8
|
-
completions: {
|
|
9
|
-
create: vi.fn()
|
|
10
|
-
}
|
|
11
|
-
}
|
|
12
|
-
}))
|
|
13
|
-
}));
|
|
14
|
-
|
|
15
|
-
describe('Unit: LLMService', () => {
|
|
16
|
-
let llm;
|
|
17
|
-
let mockOpenAI;
|
|
18
|
-
|
|
19
|
-
beforeEach(() => {
|
|
20
|
-
vi.clearAllMocks();
|
|
21
|
-
llm = new LLMService('test-api-key');
|
|
22
|
-
mockOpenAI = llm.openai;
|
|
23
|
-
});
|
|
24
|
-
|
|
25
|
-
describe('Constructor', () => {
|
|
26
|
-
it('should throw error if API key is not provided', () => {
|
|
27
|
-
expect(() => new LLMService()).toThrow('OpenAI API key is required');
|
|
28
|
-
expect(() => new LLMService('')).toThrow('OpenAI API key is required');
|
|
29
|
-
expect(() => new LLMService(null)).toThrow('OpenAI API key is required');
|
|
30
|
-
});
|
|
31
|
-
|
|
32
|
-
it('should initialize with default options', () => {
|
|
33
|
-
const service = new LLMService('test-key');
|
|
34
|
-
expect(service.model).toBe('gpt-4o-mini');
|
|
35
|
-
expect(service.maxTokens).toBe(2000);
|
|
36
|
-
expect(service.temperature).toBe(0.7);
|
|
37
|
-
});
|
|
38
|
-
|
|
39
|
-
it('should accept custom options', () => {
|
|
40
|
-
const service = new LLMService('test-key', {
|
|
41
|
-
model: 'gpt-4',
|
|
42
|
-
maxTokens: 1000,
|
|
43
|
-
temperature: 0.5
|
|
44
|
-
});
|
|
45
|
-
expect(service.model).toBe('gpt-4');
|
|
46
|
-
expect(service.maxTokens).toBe(1000);
|
|
47
|
-
expect(service.temperature).toBe(0.5);
|
|
48
|
-
});
|
|
49
|
-
});
|
|
50
|
-
|
|
51
|
-
describe('sendMessage', () => {
|
|
52
|
-
it('should send message with system and user prompts', async () => {
|
|
53
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
54
|
-
choices: [{ message: { content: 'Test response' } }]
|
|
55
|
-
});
|
|
56
|
-
|
|
57
|
-
const result = await llm.sendMessage('You are helpful', 'Hello');
|
|
58
|
-
|
|
59
|
-
expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({
|
|
60
|
-
model: 'gpt-4o-mini',
|
|
61
|
-
messages: [
|
|
62
|
-
{ role: 'system', content: 'You are helpful' },
|
|
63
|
-
{ role: 'user', content: 'Hello' }
|
|
64
|
-
],
|
|
65
|
-
max_tokens: 2000,
|
|
66
|
-
temperature: 0.7
|
|
67
|
-
});
|
|
68
|
-
expect(result).toBe('Test response');
|
|
69
|
-
});
|
|
70
|
-
|
|
71
|
-
it('should allow option overrides', async () => {
|
|
72
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
73
|
-
choices: [{ message: { content: 'Response' } }]
|
|
74
|
-
});
|
|
75
|
-
|
|
76
|
-
await llm.sendMessage('System', 'User', {
|
|
77
|
-
model: 'gpt-4',
|
|
78
|
-
maxTokens: 500,
|
|
79
|
-
temperature: 0.3
|
|
80
|
-
});
|
|
81
|
-
|
|
82
|
-
expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith(
|
|
83
|
-
expect.objectContaining({
|
|
84
|
-
model: 'gpt-4',
|
|
85
|
-
max_tokens: 500,
|
|
86
|
-
temperature: 0.3
|
|
87
|
-
})
|
|
88
|
-
);
|
|
89
|
-
});
|
|
90
|
-
|
|
91
|
-
it('should handle empty response', async () => {
|
|
92
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
93
|
-
choices: [{ message: { content: null } }]
|
|
94
|
-
});
|
|
95
|
-
|
|
96
|
-
const result = await llm.sendMessage('System', 'User');
|
|
97
|
-
expect(result).toBe('');
|
|
98
|
-
});
|
|
99
|
-
|
|
100
|
-
it('should throw error on API failure', async () => {
|
|
101
|
-
mockOpenAI.chat.completions.create.mockRejectedValue(new Error('API Error'));
|
|
102
|
-
|
|
103
|
-
await expect(llm.sendMessage('System', 'User'))
|
|
104
|
-
.rejects.toThrow('LLM request failed: API Error');
|
|
105
|
-
});
|
|
106
|
-
|
|
107
|
-
it('should trim whitespace from response', async () => {
|
|
108
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
109
|
-
choices: [{ message: { content: ' Response with spaces ' } }]
|
|
110
|
-
});
|
|
111
|
-
|
|
112
|
-
const result = await llm.sendMessage('System', 'User');
|
|
113
|
-
expect(result).toBe('Response with spaces');
|
|
114
|
-
});
|
|
115
|
-
});
|
|
116
|
-
|
|
117
|
-
describe('chat', () => {
|
|
118
|
-
it('should send conversation messages', async () => {
|
|
119
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
120
|
-
choices: [{ message: { content: 'Chat response' } }]
|
|
121
|
-
});
|
|
122
|
-
|
|
123
|
-
const messages = [
|
|
124
|
-
{ role: 'system', content: 'You are helpful' },
|
|
125
|
-
{ role: 'user', content: 'Hello' },
|
|
126
|
-
{ role: 'assistant', content: 'Hi there!' },
|
|
127
|
-
{ role: 'user', content: 'How are you?' }
|
|
128
|
-
];
|
|
129
|
-
|
|
130
|
-
const result = await llm.chat(messages);
|
|
131
|
-
|
|
132
|
-
expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith(
|
|
133
|
-
expect.objectContaining({ messages })
|
|
134
|
-
);
|
|
135
|
-
expect(result).toBe('Chat response');
|
|
136
|
-
});
|
|
137
|
-
|
|
138
|
-
it('should throw error on chat failure', async () => {
|
|
139
|
-
mockOpenAI.chat.completions.create.mockRejectedValue(new Error('Chat failed'));
|
|
140
|
-
|
|
141
|
-
await expect(llm.chat([]))
|
|
142
|
-
.rejects.toThrow('LLM chat failed: Chat failed');
|
|
143
|
-
});
|
|
144
|
-
});
|
|
145
|
-
|
|
146
|
-
describe('summarize', () => {
|
|
147
|
-
it('should summarize text', async () => {
|
|
148
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
149
|
-
choices: [{ message: { content: 'Summary of text' } }]
|
|
150
|
-
});
|
|
151
|
-
|
|
152
|
-
const result = await llm.summarize('Long text to summarize');
|
|
153
|
-
|
|
154
|
-
expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith(
|
|
155
|
-
expect.objectContaining({
|
|
156
|
-
temperature: 0.5
|
|
157
|
-
})
|
|
158
|
-
);
|
|
159
|
-
expect(result).toBe('Summary of text');
|
|
160
|
-
});
|
|
161
|
-
|
|
162
|
-
it('should include maxLength in prompt when specified', async () => {
|
|
163
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
164
|
-
choices: [{ message: { content: 'Short summary' } }]
|
|
165
|
-
});
|
|
166
|
-
|
|
167
|
-
await llm.summarize('Text', { maxLength: 50 });
|
|
168
|
-
|
|
169
|
-
const call = mockOpenAI.chat.completions.create.mock.calls[0][0];
|
|
170
|
-
expect(call.messages[0].content).toContain('50 words');
|
|
171
|
-
});
|
|
172
|
-
|
|
173
|
-
it('should include style in prompt when specified', async () => {
|
|
174
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
175
|
-
choices: [{ message: { content: 'Bullet summary' } }]
|
|
176
|
-
});
|
|
177
|
-
|
|
178
|
-
await llm.summarize('Text', { style: 'bullet points' });
|
|
179
|
-
|
|
180
|
-
const call = mockOpenAI.chat.completions.create.mock.calls[0][0];
|
|
181
|
-
expect(call.messages[0].content).toContain('bullet points');
|
|
182
|
-
});
|
|
183
|
-
});
|
|
184
|
-
|
|
185
|
-
describe('analyze', () => {
|
|
186
|
-
it('should analyze text from specified perspective', async () => {
|
|
187
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
188
|
-
choices: [{ message: { content: 'Sentiment analysis result' } }]
|
|
189
|
-
});
|
|
190
|
-
|
|
191
|
-
const result = await llm.analyze('Happy text', 'sentiment');
|
|
192
|
-
|
|
193
|
-
const call = mockOpenAI.chat.completions.create.mock.calls[0][0];
|
|
194
|
-
expect(call.messages[0].content).toContain('sentiment');
|
|
195
|
-
expect(call.temperature).toBe(0.3);
|
|
196
|
-
expect(result).toBe('Sentiment analysis result');
|
|
197
|
-
});
|
|
198
|
-
});
|
|
199
|
-
|
|
200
|
-
describe('extractKeywords', () => {
|
|
201
|
-
it('should extract keywords as JSON array', async () => {
|
|
202
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
203
|
-
choices: [{ message: { content: '["keyword1", "keyword2", "keyword3"]' } }]
|
|
204
|
-
});
|
|
205
|
-
|
|
206
|
-
const result = await llm.extractKeywords('Some text about keywords');
|
|
207
|
-
|
|
208
|
-
expect(result).toEqual(['keyword1', 'keyword2', 'keyword3']);
|
|
209
|
-
});
|
|
210
|
-
|
|
211
|
-
it('should handle non-JSON response by extracting array', async () => {
|
|
212
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
213
|
-
choices: [{ message: { content: 'Here are keywords: ["a", "b"]' } }]
|
|
214
|
-
});
|
|
215
|
-
|
|
216
|
-
const result = await llm.extractKeywords('Text');
|
|
217
|
-
expect(result).toEqual(['a', 'b']);
|
|
218
|
-
});
|
|
219
|
-
|
|
220
|
-
it('should fall back to comma splitting if JSON fails', async () => {
|
|
221
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
222
|
-
choices: [{ message: { content: 'keyword1, keyword2, keyword3' } }]
|
|
223
|
-
});
|
|
224
|
-
|
|
225
|
-
const result = await llm.extractKeywords('Text');
|
|
226
|
-
expect(result).toEqual(['keyword1', 'keyword2', 'keyword3']);
|
|
227
|
-
});
|
|
228
|
-
|
|
229
|
-
it('should respect maxKeywords parameter', async () => {
|
|
230
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
231
|
-
choices: [{ message: { content: '["a", "b", "c"]' } }]
|
|
232
|
-
});
|
|
233
|
-
|
|
234
|
-
await llm.extractKeywords('Text', 5);
|
|
235
|
-
|
|
236
|
-
const call = mockOpenAI.chat.completions.create.mock.calls[0][0];
|
|
237
|
-
expect(call.messages[0].content).toContain('5');
|
|
238
|
-
});
|
|
239
|
-
});
|
|
240
|
-
|
|
241
|
-
describe('categorize', () => {
|
|
242
|
-
it('should categorize text into one of the categories', async () => {
|
|
243
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
244
|
-
choices: [{ message: { content: '{"category": "sports", "confidence": 0.9, "reasoning": "Text is about sports"}' } }]
|
|
245
|
-
});
|
|
246
|
-
|
|
247
|
-
const result = await llm.categorize('Football game', ['sports', 'politics', 'tech']);
|
|
248
|
-
|
|
249
|
-
expect(result).toEqual({
|
|
250
|
-
category: 'sports',
|
|
251
|
-
confidence: 0.9,
|
|
252
|
-
reasoning: 'Text is about sports'
|
|
253
|
-
});
|
|
254
|
-
});
|
|
255
|
-
|
|
256
|
-
it('should handle non-JSON response', async () => {
|
|
257
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
258
|
-
choices: [{ message: { content: 'The category is {"category": "tech", "confidence": 0.8, "reasoning": "about tech"}' } }]
|
|
259
|
-
});
|
|
260
|
-
|
|
261
|
-
const result = await llm.categorize('AI text', ['tech']);
|
|
262
|
-
expect(result.category).toBe('tech');
|
|
263
|
-
});
|
|
264
|
-
|
|
265
|
-
it('should fall back to first category on parse failure', async () => {
|
|
266
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
267
|
-
choices: [{ message: { content: 'Unable to parse' } }]
|
|
268
|
-
});
|
|
269
|
-
|
|
270
|
-
const result = await llm.categorize('Text', ['first', 'second']);
|
|
271
|
-
expect(result.category).toBe('first');
|
|
272
|
-
expect(result.confidence).toBe(0.5);
|
|
273
|
-
});
|
|
274
|
-
});
|
|
275
|
-
|
|
276
|
-
describe('translate', () => {
|
|
277
|
-
it('should translate text to target language', async () => {
|
|
278
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
279
|
-
choices: [{ message: { content: 'Bonjour le monde' } }]
|
|
280
|
-
});
|
|
281
|
-
|
|
282
|
-
const result = await llm.translate('Hello world', 'French');
|
|
283
|
-
|
|
284
|
-
const call = mockOpenAI.chat.completions.create.mock.calls[0][0];
|
|
285
|
-
expect(call.messages[0].content).toContain('French');
|
|
286
|
-
expect(result).toBe('Bonjour le monde');
|
|
287
|
-
});
|
|
288
|
-
});
|
|
289
|
-
|
|
290
|
-
describe('generateQuestions', () => {
|
|
291
|
-
it('should generate questions as JSON array', async () => {
|
|
292
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
293
|
-
choices: [{ message: { content: '["Question 1?", "Question 2?"]' } }]
|
|
294
|
-
});
|
|
295
|
-
|
|
296
|
-
const result = await llm.generateQuestions('Some content');
|
|
297
|
-
expect(result).toEqual(['Question 1?', 'Question 2?']);
|
|
298
|
-
});
|
|
299
|
-
|
|
300
|
-
it('should handle non-JSON by extracting array', async () => {
|
|
301
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
302
|
-
choices: [{ message: { content: 'Questions: ["Q1?", "Q2?"]' } }]
|
|
303
|
-
});
|
|
304
|
-
|
|
305
|
-
const result = await llm.generateQuestions('Content');
|
|
306
|
-
expect(result).toEqual(['Q1?', 'Q2?']);
|
|
307
|
-
});
|
|
308
|
-
|
|
309
|
-
it('should fall back to line splitting for questions', async () => {
|
|
310
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
311
|
-
choices: [{ message: { content: 'What is this?\nWhy does it matter?' } }]
|
|
312
|
-
});
|
|
313
|
-
|
|
314
|
-
const result = await llm.generateQuestions('Content');
|
|
315
|
-
expect(result).toContain('What is this?');
|
|
316
|
-
expect(result).toContain('Why does it matter?');
|
|
317
|
-
});
|
|
318
|
-
|
|
319
|
-
it('should respect count parameter', async () => {
|
|
320
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
321
|
-
choices: [{ message: { content: '["Q1?", "Q2?", "Q3?"]' } }]
|
|
322
|
-
});
|
|
323
|
-
|
|
324
|
-
await llm.generateQuestions('Content', 3);
|
|
325
|
-
|
|
326
|
-
const call = mockOpenAI.chat.completions.create.mock.calls[0][0];
|
|
327
|
-
expect(call.messages[0].content).toContain('3');
|
|
328
|
-
});
|
|
329
|
-
});
|
|
330
|
-
|
|
331
|
-
describe('getJSON', () => {
|
|
332
|
-
it('should return parsed JSON from response', async () => {
|
|
333
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
334
|
-
choices: [{ message: { content: '{"key": "value", "number": 42}' } }]
|
|
335
|
-
});
|
|
336
|
-
|
|
337
|
-
const result = await llm.getJSON('System prompt', 'User message');
|
|
338
|
-
expect(result).toEqual({ key: 'value', number: 42 });
|
|
339
|
-
});
|
|
340
|
-
|
|
341
|
-
it('should extract JSON from non-pure JSON response', async () => {
|
|
342
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
343
|
-
choices: [{ message: { content: 'Here is the result: {"data": [1, 2, 3]}' } }]
|
|
344
|
-
});
|
|
345
|
-
|
|
346
|
-
const result = await llm.getJSON('System', 'User');
|
|
347
|
-
expect(result).toEqual({ data: [1, 2, 3] });
|
|
348
|
-
});
|
|
349
|
-
|
|
350
|
-
it('should extract JSON array from response', async () => {
|
|
351
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
352
|
-
choices: [{ message: { content: 'Result: [1, 2, 3]' } }]
|
|
353
|
-
});
|
|
354
|
-
|
|
355
|
-
const result = await llm.getJSON('System', 'User');
|
|
356
|
-
expect(result).toEqual([1, 2, 3]);
|
|
357
|
-
});
|
|
358
|
-
|
|
359
|
-
it('should throw error on invalid JSON', async () => {
|
|
360
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
361
|
-
choices: [{ message: { content: 'Not valid JSON at all' } }]
|
|
362
|
-
});
|
|
363
|
-
|
|
364
|
-
await expect(llm.getJSON('System', 'User'))
|
|
365
|
-
.rejects.toThrow('Failed to parse JSON response');
|
|
366
|
-
});
|
|
367
|
-
|
|
368
|
-
it('should use low temperature by default', async () => {
|
|
369
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
370
|
-
choices: [{ message: { content: '{}' } }]
|
|
371
|
-
});
|
|
372
|
-
|
|
373
|
-
await llm.getJSON('System', 'User');
|
|
374
|
-
|
|
375
|
-
const call = mockOpenAI.chat.completions.create.mock.calls[0][0];
|
|
376
|
-
expect(call.temperature).toBe(0.2);
|
|
377
|
-
});
|
|
378
|
-
|
|
379
|
-
it('should append JSON instruction to system prompt', async () => {
|
|
380
|
-
mockOpenAI.chat.completions.create.mockResolvedValue({
|
|
381
|
-
choices: [{ message: { content: '{}' } }]
|
|
382
|
-
});
|
|
383
|
-
|
|
384
|
-
await llm.getJSON('Original prompt', 'User');
|
|
385
|
-
|
|
386
|
-
const call = mockOpenAI.chat.completions.create.mock.calls[0][0];
|
|
387
|
-
expect(call.messages[0].content).toContain('Return ONLY valid JSON');
|
|
388
|
-
});
|
|
389
|
-
});
|
|
390
|
-
});
|