mcp-rubber-duck 1.2.5 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/.eslintrc.json +1 -0
  2. package/CHANGELOG.md +12 -0
  3. package/README.md +116 -2
  4. package/dist/config/types.d.ts +78 -0
  5. package/dist/config/types.d.ts.map +1 -1
  6. package/dist/server.d.ts.map +1 -1
  7. package/dist/server.js +150 -0
  8. package/dist/server.js.map +1 -1
  9. package/dist/services/consensus.d.ts +28 -0
  10. package/dist/services/consensus.d.ts.map +1 -0
  11. package/dist/services/consensus.js +257 -0
  12. package/dist/services/consensus.js.map +1 -0
  13. package/dist/tools/duck-debate.d.ts +16 -0
  14. package/dist/tools/duck-debate.d.ts.map +1 -0
  15. package/dist/tools/duck-debate.js +272 -0
  16. package/dist/tools/duck-debate.js.map +1 -0
  17. package/dist/tools/duck-iterate.d.ts +14 -0
  18. package/dist/tools/duck-iterate.d.ts.map +1 -0
  19. package/dist/tools/duck-iterate.js +195 -0
  20. package/dist/tools/duck-iterate.js.map +1 -0
  21. package/dist/tools/duck-judge.d.ts +15 -0
  22. package/dist/tools/duck-judge.d.ts.map +1 -0
  23. package/dist/tools/duck-judge.js +208 -0
  24. package/dist/tools/duck-judge.js.map +1 -0
  25. package/dist/tools/duck-vote.d.ts +14 -0
  26. package/dist/tools/duck-vote.d.ts.map +1 -0
  27. package/dist/tools/duck-vote.js +46 -0
  28. package/dist/tools/duck-vote.js.map +1 -0
  29. package/package.json +1 -1
  30. package/src/config/types.ts +92 -0
  31. package/src/server.ts +154 -0
  32. package/src/services/consensus.ts +324 -0
  33. package/src/tools/duck-debate.ts +383 -0
  34. package/src/tools/duck-iterate.ts +253 -0
  35. package/src/tools/duck-judge.ts +301 -0
  36. package/src/tools/duck-vote.ts +87 -0
  37. package/tests/consensus.test.ts +282 -0
  38. package/tests/duck-debate.test.ts +286 -0
  39. package/tests/duck-iterate.test.ts +249 -0
  40. package/tests/duck-judge.test.ts +296 -0
  41. package/tests/duck-vote.test.ts +250 -0
@@ -0,0 +1,286 @@
1
+ import { describe, it, expect, jest, beforeEach } from '@jest/globals';
2
+
3
+ // Mock OpenAI BEFORE importing the provider
4
+ const mockCreate = jest.fn();
5
+ jest.mock('openai', () => {
6
+ const MockOpenAI = jest.fn().mockImplementation(() => ({
7
+ chat: {
8
+ completions: {
9
+ create: mockCreate,
10
+ },
11
+ },
12
+ }));
13
+ return {
14
+ __esModule: true,
15
+ default: MockOpenAI,
16
+ };
17
+ });
18
+
19
+ // Mock config manager and logger
20
+ jest.mock('../src/config/config');
21
+ jest.mock('../src/utils/logger');
22
+
23
+ import { duckDebateTool } from '../src/tools/duck-debate';
24
+ import { ProviderManager } from '../src/providers/manager';
25
+ import { ConfigManager } from '../src/config/config';
26
+
27
+ describe('duckDebateTool', () => {
28
+ let mockProviderManager: ProviderManager;
29
+ let mockConfigManager: jest.Mocked<ConfigManager>;
30
+
31
+ beforeEach(() => {
32
+ jest.clearAllMocks();
33
+
34
+ mockConfigManager = {
35
+ getConfig: jest.fn().mockReturnValue({
36
+ providers: {
37
+ openai: {
38
+ api_key: 'key1',
39
+ base_url: 'https://api.openai.com/v1',
40
+ default_model: 'gpt-4',
41
+ nickname: 'GPT-4',
42
+ models: ['gpt-4'],
43
+ },
44
+ gemini: {
45
+ api_key: 'key2',
46
+ base_url: 'https://api.gemini.com/v1',
47
+ default_model: 'gemini-pro',
48
+ nickname: 'Gemini',
49
+ models: ['gemini-pro'],
50
+ },
51
+ },
52
+ default_provider: 'openai',
53
+ cache_ttl: 300,
54
+ enable_failover: true,
55
+ default_temperature: 0.7,
56
+ }),
57
+ } as any;
58
+
59
+ mockProviderManager = new ProviderManager(mockConfigManager);
60
+
61
+ // Override the client method on all providers
62
+ const provider1 = mockProviderManager.getProvider('openai');
63
+ const provider2 = mockProviderManager.getProvider('gemini');
64
+ provider1['client'].chat.completions.create = mockCreate;
65
+ provider2['client'].chat.completions.create = mockCreate;
66
+ });
67
+
68
+ it('should throw error when prompt is missing', async () => {
69
+ await expect(
70
+ duckDebateTool(mockProviderManager, { format: 'oxford' })
71
+ ).rejects.toThrow('Prompt/topic is required');
72
+ });
73
+
74
+ it('should throw error when format is invalid', async () => {
75
+ await expect(
76
+ duckDebateTool(mockProviderManager, { prompt: 'Test', format: 'invalid' })
77
+ ).rejects.toThrow('Format must be "oxford", "socratic", or "adversarial"');
78
+ });
79
+
80
+ it('should throw error when rounds out of range', async () => {
81
+ await expect(
82
+ duckDebateTool(mockProviderManager, { prompt: 'Test', format: 'oxford', rounds: 0 })
83
+ ).rejects.toThrow('Rounds must be between 1 and 10');
84
+
85
+ await expect(
86
+ duckDebateTool(mockProviderManager, { prompt: 'Test', format: 'oxford', rounds: 11 })
87
+ ).rejects.toThrow('Rounds must be between 1 and 10');
88
+ });
89
+
90
+ it('should throw error when less than 2 providers', async () => {
91
+ await expect(
92
+ duckDebateTool(mockProviderManager, { prompt: 'Test', format: 'oxford', providers: ['openai'] })
93
+ ).rejects.toThrow('At least 2 providers are required');
94
+ });
95
+
96
+ it('should throw error when provider does not exist', async () => {
97
+ await expect(
98
+ duckDebateTool(mockProviderManager, { prompt: 'Test', format: 'oxford', providers: ['openai', 'nonexistent'] })
99
+ ).rejects.toThrow('Provider "nonexistent" not found');
100
+ });
101
+
102
+ it('should perform oxford debate', async () => {
103
+ // Round 1: 2 participants
104
+ mockCreate
105
+ .mockResolvedValueOnce({
106
+ choices: [{ message: { content: 'PRO argument round 1' }, finish_reason: 'stop' }],
107
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
108
+ model: 'gpt-4',
109
+ })
110
+ .mockResolvedValueOnce({
111
+ choices: [{ message: { content: 'CON argument round 1' }, finish_reason: 'stop' }],
112
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
113
+ model: 'gemini-pro',
114
+ })
115
+ // Round 2
116
+ .mockResolvedValueOnce({
117
+ choices: [{ message: { content: 'PRO argument round 2' }, finish_reason: 'stop' }],
118
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
119
+ model: 'gpt-4',
120
+ })
121
+ .mockResolvedValueOnce({
122
+ choices: [{ message: { content: 'CON argument round 2' }, finish_reason: 'stop' }],
123
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
124
+ model: 'gemini-pro',
125
+ })
126
+ // Synthesis
127
+ .mockResolvedValueOnce({
128
+ choices: [{ message: { content: 'Debate synthesis: Both sides made valid points.' }, finish_reason: 'stop' }],
129
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
130
+ model: 'gpt-4',
131
+ });
132
+
133
+ const result = await duckDebateTool(mockProviderManager, {
134
+ prompt: 'Should we use microservices?',
135
+ format: 'oxford',
136
+ rounds: 2,
137
+ });
138
+
139
+ expect(result.content).toHaveLength(1);
140
+ const text = result.content[0].text;
141
+
142
+ expect(text).toContain('Oxford Debate');
143
+ expect(text).toContain('microservices');
144
+ expect(text).toContain('ROUND 1');
145
+ expect(text).toContain('ROUND 2');
146
+ expect(text).toContain('[PRO]');
147
+ expect(text).toContain('[CON]');
148
+ expect(text).toContain('Synthesis');
149
+ });
150
+
151
+ it('should perform socratic debate', async () => {
152
+ mockCreate
153
+ .mockResolvedValueOnce({
154
+ choices: [{ message: { content: 'Philosophical question 1' }, finish_reason: 'stop' }],
155
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
156
+ model: 'gpt-4',
157
+ })
158
+ .mockResolvedValueOnce({
159
+ choices: [{ message: { content: 'Philosophical response 1' }, finish_reason: 'stop' }],
160
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
161
+ model: 'gemini-pro',
162
+ })
163
+ .mockResolvedValueOnce({
164
+ choices: [{ message: { content: 'Synthesis of Socratic dialogue' }, finish_reason: 'stop' }],
165
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
166
+ model: 'gpt-4',
167
+ });
168
+
169
+ const result = await duckDebateTool(mockProviderManager, {
170
+ prompt: 'What is knowledge?',
171
+ format: 'socratic',
172
+ rounds: 1,
173
+ });
174
+
175
+ const text = result.content[0].text;
176
+ expect(text).toContain('Socratic Debate');
177
+ expect(text).toContain('[NEUTRAL]');
178
+ });
179
+
180
+ it('should perform adversarial debate', async () => {
181
+ mockCreate
182
+ .mockResolvedValueOnce({
183
+ choices: [{ message: { content: 'Defender argument' }, finish_reason: 'stop' }],
184
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
185
+ model: 'gpt-4',
186
+ })
187
+ .mockResolvedValueOnce({
188
+ choices: [{ message: { content: 'Challenger attack' }, finish_reason: 'stop' }],
189
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
190
+ model: 'gemini-pro',
191
+ })
192
+ .mockResolvedValueOnce({
193
+ choices: [{ message: { content: 'Adversarial synthesis' }, finish_reason: 'stop' }],
194
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
195
+ model: 'gpt-4',
196
+ });
197
+
198
+ const result = await duckDebateTool(mockProviderManager, {
199
+ prompt: 'AI will surpass human intelligence',
200
+ format: 'adversarial',
201
+ rounds: 1,
202
+ });
203
+
204
+ const text = result.content[0].text;
205
+ expect(text).toContain('Adversarial Debate');
206
+ });
207
+
208
+ it('should use all providers when none specified', async () => {
209
+ // 2 providers, 1 round = 2 arguments + 1 synthesis = 3 calls
210
+ mockCreate
211
+ .mockResolvedValueOnce({
212
+ choices: [{ message: { content: 'Arg 1' }, finish_reason: 'stop' }],
213
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
214
+ model: 'gpt-4',
215
+ })
216
+ .mockResolvedValueOnce({
217
+ choices: [{ message: { content: 'Arg 2' }, finish_reason: 'stop' }],
218
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
219
+ model: 'gemini-pro',
220
+ })
221
+ .mockResolvedValueOnce({
222
+ choices: [{ message: { content: 'Synthesis' }, finish_reason: 'stop' }],
223
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
224
+ model: 'gpt-4',
225
+ });
226
+
227
+ const result = await duckDebateTool(mockProviderManager, {
228
+ prompt: 'Test topic',
229
+ format: 'oxford',
230
+ rounds: 1,
231
+ });
232
+
233
+ // Should have used both providers
234
+ const text = result.content[0].text;
235
+ expect(text).toContain('GPT-4');
236
+ expect(text).toContain('Gemini');
237
+ });
238
+
239
+ it('should use specified synthesizer', async () => {
240
+ mockCreate
241
+ .mockResolvedValueOnce({
242
+ choices: [{ message: { content: 'Arg 1' }, finish_reason: 'stop' }],
243
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
244
+ model: 'gpt-4',
245
+ })
246
+ .mockResolvedValueOnce({
247
+ choices: [{ message: { content: 'Arg 2' }, finish_reason: 'stop' }],
248
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
249
+ model: 'gemini-pro',
250
+ })
251
+ .mockResolvedValueOnce({
252
+ choices: [{ message: { content: 'Gemini synthesis' }, finish_reason: 'stop' }],
253
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
254
+ model: 'gemini-pro',
255
+ });
256
+
257
+ const result = await duckDebateTool(mockProviderManager, {
258
+ prompt: 'Test',
259
+ format: 'oxford',
260
+ rounds: 1,
261
+ synthesizer: 'gemini',
262
+ });
263
+
264
+ const text = result.content[0].text;
265
+ expect(text).toContain('by gemini');
266
+ });
267
+
268
+ it('should handle default rounds', async () => {
269
+ // Default is 3 rounds, 2 participants = 6 arguments + 1 synthesis = 7 calls
270
+ for (let i = 0; i < 7; i++) {
271
+ mockCreate.mockResolvedValueOnce({
272
+ choices: [{ message: { content: `Response ${i}` }, finish_reason: 'stop' }],
273
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
274
+ model: 'gpt-4',
275
+ });
276
+ }
277
+
278
+ const result = await duckDebateTool(mockProviderManager, {
279
+ prompt: 'Test',
280
+ format: 'oxford',
281
+ });
282
+
283
+ const text = result.content[0].text;
284
+ expect(text).toContain('3 rounds completed');
285
+ });
286
+ });
@@ -0,0 +1,249 @@
1
+ import { describe, it, expect, jest, beforeEach } from '@jest/globals';
2
+
3
+ // Mock OpenAI BEFORE importing the provider
4
+ const mockCreate = jest.fn();
5
+ jest.mock('openai', () => {
6
+ const MockOpenAI = jest.fn().mockImplementation(() => ({
7
+ chat: {
8
+ completions: {
9
+ create: mockCreate,
10
+ },
11
+ },
12
+ }));
13
+ return {
14
+ __esModule: true,
15
+ default: MockOpenAI,
16
+ };
17
+ });
18
+
19
+ // Mock config manager and logger
20
+ jest.mock('../src/config/config');
21
+ jest.mock('../src/utils/logger');
22
+
23
+ import { duckIterateTool } from '../src/tools/duck-iterate';
24
+ import { ProviderManager } from '../src/providers/manager';
25
+ import { ConfigManager } from '../src/config/config';
26
+
27
+ describe('duckIterateTool', () => {
28
+ let mockProviderManager: ProviderManager;
29
+ let mockConfigManager: jest.Mocked<ConfigManager>;
30
+
31
+ beforeEach(() => {
32
+ jest.clearAllMocks();
33
+
34
+ mockConfigManager = {
35
+ getConfig: jest.fn().mockReturnValue({
36
+ providers: {
37
+ openai: {
38
+ api_key: 'key1',
39
+ base_url: 'https://api.openai.com/v1',
40
+ default_model: 'gpt-4',
41
+ nickname: 'GPT-4',
42
+ models: ['gpt-4'],
43
+ },
44
+ gemini: {
45
+ api_key: 'key2',
46
+ base_url: 'https://api.gemini.com/v1',
47
+ default_model: 'gemini-pro',
48
+ nickname: 'Gemini',
49
+ models: ['gemini-pro'],
50
+ },
51
+ },
52
+ default_provider: 'openai',
53
+ cache_ttl: 300,
54
+ enable_failover: true,
55
+ default_temperature: 0.7,
56
+ }),
57
+ } as any;
58
+
59
+ mockProviderManager = new ProviderManager(mockConfigManager);
60
+
61
+ // Override the client method on all providers
62
+ const provider1 = mockProviderManager.getProvider('openai');
63
+ const provider2 = mockProviderManager.getProvider('gemini');
64
+ provider1['client'].chat.completions.create = mockCreate;
65
+ provider2['client'].chat.completions.create = mockCreate;
66
+ });
67
+
68
+ it('should throw error when prompt is missing', async () => {
69
+ await expect(
70
+ duckIterateTool(mockProviderManager, { providers: ['openai', 'gemini'], mode: 'refine' })
71
+ ).rejects.toThrow('Prompt is required');
72
+ });
73
+
74
+ it('should throw error when providers count is not 2', async () => {
75
+ await expect(
76
+ duckIterateTool(mockProviderManager, { prompt: 'Test', providers: ['openai'], mode: 'refine' })
77
+ ).rejects.toThrow('Exactly 2 providers are required');
78
+
79
+ await expect(
80
+ duckIterateTool(mockProviderManager, { prompt: 'Test', providers: ['openai', 'gemini', 'another'], mode: 'refine' })
81
+ ).rejects.toThrow('Exactly 2 providers are required');
82
+ });
83
+
84
+ it('should throw error when mode is invalid', async () => {
85
+ await expect(
86
+ duckIterateTool(mockProviderManager, { prompt: 'Test', providers: ['openai', 'gemini'], mode: 'invalid' })
87
+ ).rejects.toThrow('Mode must be either "refine" or "critique-improve"');
88
+ });
89
+
90
+ it('should throw error when iterations out of range', async () => {
91
+ await expect(
92
+ duckIterateTool(mockProviderManager, { prompt: 'Test', providers: ['openai', 'gemini'], mode: 'refine', iterations: 0 })
93
+ ).rejects.toThrow('Iterations must be between 1 and 10');
94
+
95
+ await expect(
96
+ duckIterateTool(mockProviderManager, { prompt: 'Test', providers: ['openai', 'gemini'], mode: 'refine', iterations: 11 })
97
+ ).rejects.toThrow('Iterations must be between 1 and 10');
98
+ });
99
+
100
+ it('should throw error when provider does not exist', async () => {
101
+ await expect(
102
+ duckIterateTool(mockProviderManager, { prompt: 'Test', providers: ['openai', 'nonexistent'], mode: 'refine' })
103
+ ).rejects.toThrow('Provider "nonexistent" not found');
104
+ });
105
+
106
+ it('should perform refine iteration', async () => {
107
+ mockCreate
108
+ .mockResolvedValueOnce({
109
+ choices: [{ message: { content: 'Initial response about sorting' }, finish_reason: 'stop' }],
110
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
111
+ model: 'gpt-4',
112
+ })
113
+ .mockResolvedValueOnce({
114
+ choices: [{ message: { content: 'Refined response with better explanation' }, finish_reason: 'stop' }],
115
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
116
+ model: 'gemini-pro',
117
+ })
118
+ .mockResolvedValueOnce({
119
+ choices: [{ message: { content: 'Further refined with examples' }, finish_reason: 'stop' }],
120
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
121
+ model: 'gpt-4',
122
+ });
123
+
124
+ const result = await duckIterateTool(mockProviderManager, {
125
+ prompt: 'Write a sorting algorithm',
126
+ providers: ['openai', 'gemini'],
127
+ mode: 'refine',
128
+ iterations: 3,
129
+ });
130
+
131
+ expect(result.content).toHaveLength(1);
132
+ expect(result.content[0].type).toBe('text');
133
+
134
+ const text = result.content[0].text;
135
+ expect(text).toContain('Iterative Refinement');
136
+ expect(text).toContain('refine');
137
+ expect(text).toContain('Round 1');
138
+ expect(text).toContain('Round 2');
139
+ expect(text).toContain('Round 3');
140
+ expect(text).toContain('Final Response');
141
+ });
142
+
143
+ it('should perform critique-improve iteration', async () => {
144
+ mockCreate
145
+ .mockResolvedValueOnce({
146
+ choices: [{ message: { content: 'Initial implementation' }, finish_reason: 'stop' }],
147
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
148
+ model: 'gpt-4',
149
+ })
150
+ .mockResolvedValueOnce({
151
+ choices: [{ message: { content: 'Critique: Missing edge cases, no error handling' }, finish_reason: 'stop' }],
152
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
153
+ model: 'gemini-pro',
154
+ })
155
+ .mockResolvedValueOnce({
156
+ choices: [{ message: { content: 'Improved with edge cases and error handling' }, finish_reason: 'stop' }],
157
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
158
+ model: 'gpt-4',
159
+ });
160
+
161
+ const result = await duckIterateTool(mockProviderManager, {
162
+ prompt: 'Write a function',
163
+ providers: ['openai', 'gemini'],
164
+ mode: 'critique-improve',
165
+ iterations: 3,
166
+ });
167
+
168
+ const text = result.content[0].text;
169
+ expect(text).toContain('critique-improve');
170
+ expect(text).toContain('generator');
171
+ expect(text).toContain('critic');
172
+ });
173
+
174
+ it('should use default iterations when not specified', async () => {
175
+ mockCreate
176
+ .mockResolvedValueOnce({
177
+ choices: [{ message: { content: 'Response 1' }, finish_reason: 'stop' }],
178
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
179
+ model: 'gpt-4',
180
+ })
181
+ .mockResolvedValueOnce({
182
+ choices: [{ message: { content: 'Response 2' }, finish_reason: 'stop' }],
183
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
184
+ model: 'gemini-pro',
185
+ })
186
+ .mockResolvedValueOnce({
187
+ choices: [{ message: { content: 'Response 3' }, finish_reason: 'stop' }],
188
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
189
+ model: 'gpt-4',
190
+ });
191
+
192
+ const result = await duckIterateTool(mockProviderManager, {
193
+ prompt: 'Test prompt',
194
+ providers: ['openai', 'gemini'],
195
+ mode: 'refine',
196
+ });
197
+
198
+ // Default is 3 iterations
199
+ expect(mockCreate).toHaveBeenCalledTimes(3);
200
+ expect(result.content[0].text).toContain('3 rounds completed');
201
+ });
202
+
203
+ it('should detect convergence and stop early', async () => {
204
+ // Return very similar responses to trigger convergence
205
+ const similarResponse = 'This is the exact same response content that will be repeated to trigger convergence detection.';
206
+
207
+ mockCreate
208
+ .mockResolvedValueOnce({
209
+ choices: [{ message: { content: similarResponse }, finish_reason: 'stop' }],
210
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
211
+ model: 'gpt-4',
212
+ })
213
+ .mockResolvedValueOnce({
214
+ choices: [{ message: { content: similarResponse }, finish_reason: 'stop' }],
215
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
216
+ model: 'gemini-pro',
217
+ });
218
+
219
+ const result = await duckIterateTool(mockProviderManager, {
220
+ prompt: 'Test',
221
+ providers: ['openai', 'gemini'],
222
+ mode: 'refine',
223
+ iterations: 5,
224
+ });
225
+
226
+ const text = result.content[0].text;
227
+ expect(text).toContain('converged');
228
+ // Should stop at 2 rounds due to convergence, not 5
229
+ expect(mockCreate).toHaveBeenCalledTimes(2);
230
+ });
231
+
232
+ it('should handle single iteration', async () => {
233
+ mockCreate.mockResolvedValueOnce({
234
+ choices: [{ message: { content: 'Single response' }, finish_reason: 'stop' }],
235
+ usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
236
+ model: 'gpt-4',
237
+ });
238
+
239
+ const result = await duckIterateTool(mockProviderManager, {
240
+ prompt: 'Test',
241
+ providers: ['openai', 'gemini'],
242
+ mode: 'refine',
243
+ iterations: 1,
244
+ });
245
+
246
+ expect(mockCreate).toHaveBeenCalledTimes(1);
247
+ expect(result.content[0].text).toContain('1 rounds completed');
248
+ });
249
+ });