@probelabs/probe-chat 0.6.0-rc100

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,269 @@
1
+ import { streamText as originalStreamText } from 'ai';
2
+ import { z } from 'zod';
3
+
4
+ // Mock LLM Provider for testing
5
+ export class MockLLMProvider {
6
+ constructor(options = {}) {
7
+ this.responses = options.responses || [];
8
+ this.currentResponseIndex = 0;
9
+ this.failAfter = options.failAfter || Infinity;
10
+ this.callCount = 0;
11
+ this.capturedCalls = [];
12
+ this.streamDelay = options.streamDelay || 10; // ms between chunks
13
+ this.throwError = options.throwError || null;
14
+ }
15
+
16
+ // Reset state between tests
17
+ reset() {
18
+ this.currentResponseIndex = 0;
19
+ this.callCount = 0;
20
+ this.capturedCalls = [];
21
+ }
22
+
23
+ // Add a response to the queue
24
+ addResponse(response) {
25
+ this.responses.push(response);
26
+ }
27
+
28
+ // Get the next response
29
+ getNextResponse() {
30
+ if (this.callCount >= this.failAfter) {
31
+ throw new Error('Mock provider configured to fail after ' + this.failAfter + ' calls');
32
+ }
33
+
34
+ if (this.throwError) {
35
+ throw new Error(this.throwError);
36
+ }
37
+
38
+ const response = this.responses[this.currentResponseIndex];
39
+ this.currentResponseIndex = (this.currentResponseIndex + 1) % this.responses.length;
40
+ return response;
41
+ }
42
+
43
+ // Create a mock model that can be used with the AI SDK
44
+ createMockModel() {
45
+ const provider = this;
46
+
47
+ return {
48
+ doStream: async function*(params) {
49
+ provider.callCount++;
50
+ // Deep copy params to avoid reference issues
51
+ provider.capturedCalls.push({
52
+ ...params,
53
+ messages: params.messages ? JSON.parse(JSON.stringify(params.messages)) : []
54
+ });
55
+
56
+ const response = provider.getNextResponse();
57
+
58
+ // Simulate streaming with chunks
59
+ if (response.text) {
60
+ // Split into words for more realistic streaming
61
+ const words = response.text.split(' ');
62
+ const chunkSize = Math.max(1, Math.floor(words.length / 5)); // At least 5 chunks
63
+
64
+ for (let i = 0; i < words.length; i += chunkSize) {
65
+ const chunk = words.slice(i, i + chunkSize).join(' ');
66
+ if (i + chunkSize < words.length) {
67
+ // Add space after chunk if not last
68
+ await new Promise(resolve => setTimeout(resolve, provider.streamDelay));
69
+ yield {
70
+ type: 'text-delta',
71
+ textDelta: chunk + ' '
72
+ };
73
+ } else {
74
+ // Last chunk, no trailing space
75
+ await new Promise(resolve => setTimeout(resolve, provider.streamDelay));
76
+ yield {
77
+ type: 'text-delta',
78
+ textDelta: chunk
79
+ };
80
+ }
81
+ }
82
+ }
83
+
84
+ // Handle tool calls
85
+ if (response.toolCalls) {
86
+ for (const toolCall of response.toolCalls) {
87
+ yield {
88
+ type: 'tool-call',
89
+ toolCallId: toolCall.toolCallId || 'mock-tool-call-' + Date.now(),
90
+ toolName: toolCall.toolName,
91
+ args: toolCall.args
92
+ };
93
+ }
94
+ }
95
+
96
+ // Send finish reason
97
+ yield {
98
+ type: 'finish',
99
+ finishReason: response.finishReason || 'stop',
100
+ usage: {
101
+ promptTokens: response.promptTokens || 100,
102
+ completionTokens: response.completionTokens || 50
103
+ }
104
+ };
105
+ },
106
+
107
+ // Support for generateText (non-streaming)
108
+ doGenerate: async function(params) {
109
+ provider.callCount++;
110
+ // Deep copy params to avoid reference issues
111
+ provider.capturedCalls.push({
112
+ ...params,
113
+ messages: params.messages ? JSON.parse(JSON.stringify(params.messages)) : []
114
+ });
115
+
116
+ const response = provider.getNextResponse();
117
+
118
+ return {
119
+ text: response.text || '',
120
+ toolCalls: response.toolCalls || [],
121
+ finishReason: response.finishReason || 'stop',
122
+ usage: {
123
+ promptTokens: response.promptTokens || 100,
124
+ completionTokens: response.completionTokens || 50
125
+ }
126
+ };
127
+ }
128
+ };
129
+ }
130
+ }
131
+
132
+ // Mock the AI SDK's streamText function
133
+ export function createMockStreamText(provider) {
134
+ return async function mockStreamText(options) {
135
+ const { model, messages, tools, toolChoice, maxTokens, temperature, system } = options;
136
+
137
+ // Create a mock stream similar to the AI SDK
138
+ const mockModel = provider.createMockModel();
139
+
140
+ // Call doStream once and collect all chunks
141
+ const params = { messages, tools, toolChoice, maxTokens, temperature, system };
142
+ const chunks = [];
143
+ for await (const chunk of mockModel.doStream(params)) {
144
+ chunks.push(chunk);
145
+ }
146
+
147
+ // Create mock helper functions that replay the collected chunks
148
+ const textStream = (async function*() {
149
+ for (const chunk of chunks) {
150
+ if (chunk.type === 'text-delta') {
151
+ yield chunk.textDelta;
152
+ }
153
+ }
154
+ })();
155
+
156
+ const fullStream = (async function*() {
157
+ for (const chunk of chunks) {
158
+ yield chunk;
159
+ }
160
+ })();
161
+
162
+ return {
163
+ textStream,
164
+ fullStream,
165
+ toAIStreamResponse: () => {
166
+ // Mock response for testing
167
+ return new Response('mock stream response');
168
+ }
169
+ };
170
+ };
171
+ }
172
+
173
+ // Predefined response scenarios for common test cases
174
+ export const mockResponses = {
175
+ // Simple text response
176
+ simpleText: {
177
+ text: "This is a simple text response from the mock LLM."
178
+ },
179
+
180
+ // Response with tool call
181
+ withToolCall: {
182
+ text: "Let me search for that information.",
183
+ toolCalls: [{
184
+ toolName: 'probe_search',
185
+ args: {
186
+ query: 'test query',
187
+ path: './src'
188
+ }
189
+ }]
190
+ },
191
+
192
+ // Multiple tool calls
193
+ multipleToolCalls: {
194
+ text: "I'll help you with multiple operations.",
195
+ toolCalls: [
196
+ {
197
+ toolName: 'probe_search',
198
+ args: {
199
+ query: 'function definition',
200
+ path: './src'
201
+ }
202
+ },
203
+ {
204
+ toolName: 'probe_extract',
205
+ args: {
206
+ location: 'src/main.rs:42'
207
+ }
208
+ }
209
+ ]
210
+ },
211
+
212
+ // Implement tool call
213
+ implementToolCall: {
214
+ text: "I'll implement that feature for you.",
215
+ toolCalls: [{
216
+ toolName: 'implement',
217
+ args: {
218
+ request: 'Add a new function to calculate fibonacci numbers',
219
+ files: ['src/math.js'],
220
+ backend: 'mock'
221
+ }
222
+ }]
223
+ },
224
+
225
+ // Error response
226
+ errorResponse: {
227
+ text: "I encountered an error processing your request.",
228
+ finishReason: 'error'
229
+ },
230
+
231
+ // Long streaming response
232
+ longStreamingResponse: {
233
+ text: "This is a longer response that will be streamed in chunks. " +
234
+ "It simulates how a real LLM would stream content back to the user. " +
235
+ "Each chunk arrives with a small delay to mimic network latency. " +
236
+ "This helps test the streaming functionality of the chat system."
237
+ }
238
+ };
239
+
240
+ // Helper to create a mock provider with predefined responses
241
+ export function createMockProvider(scenario = 'simple', options = {}) {
242
+ const responses = [];
243
+
244
+ switch (scenario) {
245
+ case 'simple':
246
+ responses.push(mockResponses.simpleText);
247
+ break;
248
+ case 'tools':
249
+ responses.push(mockResponses.withToolCall);
250
+ responses.push(mockResponses.simpleText);
251
+ break;
252
+ case 'implement':
253
+ responses.push(mockResponses.implementToolCall);
254
+ responses.push({ text: "The implementation is complete!" });
255
+ break;
256
+ case 'error':
257
+ return new MockLLMProvider({ ...options, throwError: 'Simulated API error' });
258
+ case 'mixed':
259
+ responses.push(mockResponses.simpleText);
260
+ responses.push(mockResponses.withToolCall);
261
+ responses.push(mockResponses.multipleToolCalls);
262
+ responses.push(mockResponses.implementToolCall);
263
+ break;
264
+ default:
265
+ responses.push(mockResponses.simpleText);
266
+ }
267
+
268
+ return new MockLLMProvider({ ...options, responses });
269
+ }
@@ -0,0 +1,90 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * Simple test script for the pluggable backend system
5
+ */
6
+
7
+ import { createImplementTool } from '../implement/core/ImplementTool.js';
8
+ import { listBackendNames, getBackendMetadata } from '../implement/backends/registry.js';
9
+
10
+ async function testBackends() {
11
+ console.log('๐Ÿงช Testing Probe Chat Pluggable Backend System\n');
12
+
13
+ // List available backends
14
+ console.log('๐Ÿ“‹ Available Backends:');
15
+ const backends = listBackendNames();
16
+ for (const backend of backends) {
17
+ const metadata = getBackendMetadata(backend);
18
+ console.log(`\n ${backend}:`);
19
+ console.log(` Version: ${metadata.version}`);
20
+ console.log(` Description: ${metadata.description}`);
21
+ console.log(` Languages: ${metadata.capabilities.supportsLanguages.join(', ')}`);
22
+ }
23
+
24
+ console.log('\n' + '='.repeat(50) + '\n');
25
+
26
+ // Test backend initialization
27
+ console.log('๐Ÿ”ง Testing Backend Initialization:\n');
28
+
29
+ const tool = createImplementTool({
30
+ enabled: true,
31
+ backendConfig: {
32
+ defaultBackend: 'aider',
33
+ fallbackBackends: ['claude-code']
34
+ }
35
+ });
36
+
37
+ try {
38
+ const info = await tool.getInfo();
39
+ console.log('โœ… Backend system initialized successfully');
40
+ console.log(` Default backend: ${info.defaultBackend}`);
41
+ console.log(` Fallback backends: ${info.fallbackBackends.join(', ')}`);
42
+ console.log(` Available backends: ${info.availableBackends.join(', ')}`);
43
+
44
+ console.log('\n๐Ÿ“Š Backend Health Status:');
45
+ for (const [name, health] of Object.entries(info.health)) {
46
+ console.log(` ${name}: ${health.status} ${health.available ? 'โœ…' : 'โŒ'}`);
47
+ }
48
+ } catch (error) {
49
+ console.error('โŒ Failed to initialize backend system:', error.message);
50
+ }
51
+
52
+ console.log('\n' + '='.repeat(50) + '\n');
53
+
54
+ // Test a simple implementation request (dry run)
55
+ console.log('๐Ÿš€ Testing Implementation Request (Dry Run):\n');
56
+
57
+ const testRequest = {
58
+ task: 'Create a simple hello world function in JavaScript',
59
+ dryRun: true,
60
+ sessionId: 'test-' + Date.now()
61
+ };
62
+
63
+ console.log('Request:', testRequest);
64
+
65
+ try {
66
+ console.log('\nExecuting request...\n');
67
+ const result = await tool.execute(testRequest);
68
+
69
+ if (result.success) {
70
+ console.log('โœ… Request executed successfully');
71
+ console.log(` Backend used: ${result.backend}`);
72
+ console.log(` Execution time: ${result.metrics?.executionTime}ms`);
73
+ console.log('\nOutput preview:');
74
+ console.log(result.output?.substring(0, 200) + '...');
75
+ } else {
76
+ console.log('โŒ Request failed');
77
+ console.log(` Error: ${result.error}`);
78
+ }
79
+ } catch (error) {
80
+ console.error('โŒ Error executing request:', error.message);
81
+ }
82
+
83
+ // Cleanup
84
+ await tool.cleanup();
85
+
86
+ console.log('\nโœ… Test completed');
87
+ }
88
+
89
+ // Run the test
90
+ testBackends().catch(console.error);