flowquery 1.0.12 → 1.0.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -3
- package/dist/flowquery.min.js +1 -1
- package/dist/parsing/functions/aggregate_function.d.ts +1 -1
- package/dist/parsing/functions/aggregate_function.d.ts.map +1 -1
- package/dist/parsing/functions/aggregate_function.js.map +1 -1
- package/dist/parsing/functions/async_function.d.ts +19 -16
- package/dist/parsing/functions/async_function.d.ts.map +1 -1
- package/dist/parsing/functions/async_function.js +20 -59
- package/dist/parsing/functions/async_function.js.map +1 -1
- package/dist/parsing/functions/function.d.ts +1 -1
- package/dist/parsing/functions/function.d.ts.map +1 -1
- package/dist/parsing/functions/function.js +1 -1
- package/dist/parsing/functions/function.js.map +1 -1
- package/dist/parsing/functions/function_factory.d.ts +2 -0
- package/dist/parsing/functions/function_factory.d.ts.map +1 -1
- package/dist/parsing/functions/function_factory.js +12 -11
- package/dist/parsing/functions/function_factory.js.map +1 -1
- package/dist/parsing/functions/function_metadata.d.ts +53 -24
- package/dist/parsing/functions/function_metadata.d.ts.map +1 -1
- package/dist/parsing/functions/function_metadata.js +55 -45
- package/dist/parsing/functions/function_metadata.js.map +1 -1
- package/dist/parsing/functions/predicate_function.d.ts +1 -1
- package/dist/parsing/functions/predicate_function.d.ts.map +1 -1
- package/dist/parsing/functions/predicate_function.js +1 -1
- package/dist/parsing/functions/predicate_function.js.map +1 -1
- package/dist/parsing/operations/load.d.ts +1 -0
- package/dist/parsing/operations/load.d.ts.map +1 -1
- package/dist/parsing/operations/load.js +3 -1
- package/dist/parsing/operations/load.js.map +1 -1
- package/dist/parsing/parser.d.ts.map +1 -1
- package/dist/parsing/parser.js +1 -2
- package/dist/parsing/parser.js.map +1 -1
- package/docs/flowquery.min.js +1 -1
- package/flowquery-vscode/flowQueryEngine/flowquery.min.js +1 -1
- package/misc/apps/RAG/package.json +1 -2
- package/misc/apps/RAG/src/components/ChatContainer.tsx +33 -6
- package/misc/apps/RAG/src/components/ChatMessage.css +24 -0
- package/misc/apps/RAG/src/components/ChatMessage.tsx +51 -2
- package/misc/apps/RAG/src/components/FlowQueryAgent.ts +566 -286
- package/misc/apps/RAG/src/plugins/index.ts +3 -1
- package/misc/apps/RAG/src/plugins/loaders/CatFacts.ts +6 -5
- package/misc/apps/RAG/src/plugins/loaders/Form.ts +7 -7
- package/misc/apps/RAG/src/plugins/loaders/Llm.ts +14 -14
- package/misc/apps/RAG/src/plugins/loaders/MockData.ts +5 -5
- package/misc/apps/RAG/src/plugins/loaders/Table.ts +4 -4
- package/misc/apps/RAG/src/plugins/loaders/Weather.ts +126 -0
- package/misc/apps/RAG/src/prompts/FlowQuerySystemPrompt.ts +4 -0
- package/package.json +1 -1
- package/src/parsing/functions/aggregate_function.ts +1 -1
- package/src/parsing/functions/async_function.ts +20 -51
- package/src/parsing/functions/function.ts +2 -2
- package/src/parsing/functions/function_factory.ts +18 -9
- package/src/parsing/functions/function_metadata.ts +55 -47
- package/src/parsing/functions/predicate_function.ts +2 -2
- package/src/parsing/operations/load.ts +3 -1
- package/src/parsing/parser.ts +2 -1
- package/tests/extensibility.test.ts +22 -22
- package/tests/parsing/parser.test.ts +2 -2
- package/misc/apps/RAG/.env.example +0 -14
- package/misc/apps/RAG/src/plugins/loaders/FetchJson.ts +0 -66
|
@@ -12,16 +12,13 @@ import { llm, llmStream, LlmOptions, LlmResponse } from '../plugins/loaders/Llm'
|
|
|
12
12
|
import { FlowQueryExecutor, FlowQueryExecutionResult } from '../utils/FlowQueryExecutor';
|
|
13
13
|
import { extractFlowQuery, FlowQueryExtraction } from '../utils/FlowQueryExtractor';
|
|
14
14
|
import { isAdaptiveCard } from './AdaptiveCardRenderer';
|
|
15
|
-
|
|
16
|
-
// Shared executor instance
|
|
17
|
-
const flowQueryExecutor = new FlowQueryExecutor();
|
|
18
15
|
import { generateInterpretationPrompt } from '../prompts';
|
|
19
16
|
|
|
20
17
|
/**
|
|
21
18
|
* Represents a step in the agent's execution process.
|
|
22
19
|
*/
|
|
23
20
|
export interface AgentStep {
|
|
24
|
-
type: 'query_generation' | 'query_execution' | 'interpretation' | 'direct_response';
|
|
21
|
+
type: 'query_generation' | 'query_execution' | 'interpretation' | 'direct_response' | 'retry';
|
|
25
22
|
content: string;
|
|
26
23
|
timestamp: Date;
|
|
27
24
|
metadata?: {
|
|
@@ -64,319 +61,509 @@ export interface FlowQueryAgentOptions {
|
|
|
64
61
|
onStream?: AgentStreamCallback;
|
|
65
62
|
/** Whether to show intermediate steps to the user */
|
|
66
63
|
showIntermediateSteps?: boolean;
|
|
67
|
-
/** Maximum number of retry attempts for query execution */
|
|
64
|
+
/** Maximum number of retry attempts for query execution (default: 2) */
|
|
68
65
|
maxRetries?: number;
|
|
69
66
|
}
|
|
70
67
|
|
|
71
68
|
/**
|
|
72
|
-
*
|
|
69
|
+
* FlowQuery Agent class that orchestrates the multi-step query processing flow.
|
|
73
70
|
*
|
|
74
|
-
* @
|
|
75
|
-
*
|
|
76
|
-
*
|
|
71
|
+
* @example
|
|
72
|
+
* ```typescript
|
|
73
|
+
* const agent = new FlowQueryAgent();
|
|
74
|
+
* const result = await agent.processQuery("Show me all users", {
|
|
75
|
+
* systemPrompt: "You are a helpful assistant..."
|
|
76
|
+
* });
|
|
77
|
+
* ```
|
|
77
78
|
*/
|
|
78
|
-
export
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
...llmOptions,
|
|
89
|
-
systemPrompt,
|
|
90
|
-
messages: conversationHistory,
|
|
91
|
-
});
|
|
79
|
+
export class FlowQueryAgent {
|
|
80
|
+
private readonly flowQueryExecutor: FlowQueryExecutor;
|
|
81
|
+
|
|
82
|
+
/**
|
|
83
|
+
* Creates a new FlowQueryAgent instance.
|
|
84
|
+
* @param executor - Optional custom FlowQueryExecutor instance. If not provided, a new one will be created.
|
|
85
|
+
*/
|
|
86
|
+
constructor(executor?: FlowQueryExecutor) {
|
|
87
|
+
this.flowQueryExecutor = executor ?? new FlowQueryExecutor();
|
|
88
|
+
}
|
|
92
89
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
90
|
+
/**
|
|
91
|
+
* Process a user query through the FlowQuery agent.
|
|
92
|
+
*
|
|
93
|
+
* @param userQuery - The natural language query from the user
|
|
94
|
+
* @param options - Agent configuration options
|
|
95
|
+
* @returns The agent result including final response and steps taken
|
|
96
|
+
*/
|
|
97
|
+
async processQuery(
|
|
98
|
+
userQuery: string,
|
|
99
|
+
options: FlowQueryAgentOptions
|
|
100
|
+
): Promise<AgentResult> {
|
|
101
|
+
const steps: AgentStep[] = [];
|
|
102
|
+
const { systemPrompt, llmOptions = {}, conversationHistory = [], onStream, showIntermediateSteps = true } = options;
|
|
103
|
+
|
|
104
|
+
try {
|
|
105
|
+
// Step 1: Generate FlowQuery from natural language
|
|
106
|
+
const generationResponse = await llm(userQuery, {
|
|
107
|
+
...llmOptions,
|
|
108
|
+
systemPrompt,
|
|
109
|
+
messages: conversationHistory,
|
|
110
|
+
});
|
|
111
|
+
|
|
112
|
+
const generationContent = generationResponse.choices[0]?.message?.content || '';
|
|
113
|
+
|
|
114
|
+
steps.push({
|
|
115
|
+
type: 'query_generation',
|
|
116
|
+
content: generationContent,
|
|
117
|
+
timestamp: new Date(),
|
|
118
|
+
});
|
|
100
119
|
|
|
101
|
-
|
|
102
|
-
|
|
120
|
+
// Step 2: Extract the FlowQuery from the response
|
|
121
|
+
const extraction = extractFlowQuery(generationContent);
|
|
122
|
+
|
|
123
|
+
// If no query needed (direct response from LLM)
|
|
124
|
+
if (extraction.noQueryNeeded || !extraction.found) {
|
|
125
|
+
const directResponse = extraction.directResponse || generationContent;
|
|
126
|
+
|
|
127
|
+
steps.push({
|
|
128
|
+
type: 'direct_response',
|
|
129
|
+
content: directResponse,
|
|
130
|
+
timestamp: new Date(),
|
|
131
|
+
metadata: { extraction }
|
|
132
|
+
});
|
|
133
|
+
|
|
134
|
+
return {
|
|
135
|
+
finalResponse: directResponse,
|
|
136
|
+
steps,
|
|
137
|
+
success: true,
|
|
138
|
+
};
|
|
139
|
+
}
|
|
103
140
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
const directResponse = extraction.directResponse || generationContent;
|
|
141
|
+
// Step 3: Execute the FlowQuery
|
|
142
|
+
let executionResult = await this.flowQueryExecutor.execute(extraction.query!);
|
|
107
143
|
|
|
108
144
|
steps.push({
|
|
109
|
-
type: '
|
|
110
|
-
content:
|
|
145
|
+
type: 'query_execution',
|
|
146
|
+
content: this.flowQueryExecutor.formatResult(executionResult),
|
|
111
147
|
timestamp: new Date(),
|
|
112
|
-
metadata: {
|
|
148
|
+
metadata: {
|
|
149
|
+
query: extraction.query!,
|
|
150
|
+
executionResult,
|
|
151
|
+
extraction
|
|
152
|
+
}
|
|
113
153
|
});
|
|
114
154
|
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
155
|
+
// If execution failed, attempt retry with error context
|
|
156
|
+
if (!executionResult.success) {
|
|
157
|
+
const maxRetries = options.maxRetries ?? 2;
|
|
158
|
+
let retryCount = 0;
|
|
159
|
+
let currentQuery = extraction.query!;
|
|
160
|
+
let currentError = executionResult.error;
|
|
161
|
+
let currentResult = executionResult;
|
|
162
|
+
|
|
163
|
+
while (!currentResult.success && retryCount < maxRetries) {
|
|
164
|
+
retryCount++;
|
|
165
|
+
|
|
166
|
+
steps.push({
|
|
167
|
+
type: 'retry',
|
|
168
|
+
content: `Retry ${retryCount}: Error was "${currentError}"`,
|
|
169
|
+
timestamp: new Date(),
|
|
170
|
+
metadata: {
|
|
171
|
+
query: currentQuery,
|
|
172
|
+
executionResult: currentResult
|
|
173
|
+
}
|
|
174
|
+
});
|
|
175
|
+
|
|
176
|
+
// Ask LLM to generate a corrected query
|
|
177
|
+
const correctedQuery = await this.generateCorrectedQuery(
|
|
178
|
+
userQuery,
|
|
179
|
+
currentQuery,
|
|
180
|
+
currentError || 'Unknown error',
|
|
181
|
+
steps,
|
|
182
|
+
options
|
|
183
|
+
);
|
|
184
|
+
|
|
185
|
+
if (!correctedQuery) {
|
|
186
|
+
// LLM couldn't generate a correction, fall back to error interpretation
|
|
187
|
+
const errorInterpretation = await this.interpretError(
|
|
188
|
+
userQuery,
|
|
189
|
+
currentQuery,
|
|
190
|
+
currentResult,
|
|
191
|
+
options
|
|
192
|
+
);
|
|
193
|
+
|
|
194
|
+
return {
|
|
195
|
+
finalResponse: errorInterpretation,
|
|
196
|
+
steps,
|
|
197
|
+
success: false,
|
|
198
|
+
error: currentResult.error
|
|
199
|
+
};
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// Try executing the corrected query
|
|
203
|
+
currentQuery = correctedQuery;
|
|
204
|
+
currentResult = await this.flowQueryExecutor.execute(correctedQuery);
|
|
205
|
+
currentError = currentResult.error;
|
|
206
|
+
|
|
207
|
+
steps.push({
|
|
208
|
+
type: 'query_execution',
|
|
209
|
+
content: this.flowQueryExecutor.formatResult(currentResult),
|
|
210
|
+
timestamp: new Date(),
|
|
211
|
+
metadata: {
|
|
212
|
+
query: correctedQuery,
|
|
213
|
+
executionResult: currentResult
|
|
214
|
+
}
|
|
215
|
+
});
|
|
216
|
+
}
|
|
121
217
|
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
218
|
+
// If still failing after retries, interpret the error
|
|
219
|
+
if (!currentResult.success) {
|
|
220
|
+
const errorInterpretation = await this.interpretError(
|
|
221
|
+
userQuery,
|
|
222
|
+
currentQuery,
|
|
223
|
+
currentResult,
|
|
224
|
+
options
|
|
225
|
+
);
|
|
226
|
+
|
|
227
|
+
return {
|
|
228
|
+
finalResponse: errorInterpretation,
|
|
229
|
+
steps,
|
|
230
|
+
success: false,
|
|
231
|
+
error: currentResult.error
|
|
232
|
+
};
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
// Update for interpretation phase
|
|
236
|
+
executionResult = currentResult;
|
|
237
|
+
extraction.query = currentQuery;
|
|
133
238
|
}
|
|
134
|
-
});
|
|
135
239
|
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
const errorInterpretation = await interpretError(
|
|
240
|
+
// Step 4: Send results to LLM for interpretation
|
|
241
|
+
const interpretationPrompt = this.buildInterpretationPrompt(
|
|
139
242
|
userQuery,
|
|
140
243
|
extraction.query!,
|
|
141
|
-
executionResult
|
|
142
|
-
options
|
|
244
|
+
executionResult
|
|
143
245
|
);
|
|
246
|
+
|
|
247
|
+
let finalResponse = '';
|
|
248
|
+
|
|
249
|
+
if (onStream) {
|
|
250
|
+
// Stream the interpretation response
|
|
251
|
+
for await (const chunk of llmStream(interpretationPrompt, {
|
|
252
|
+
...llmOptions,
|
|
253
|
+
systemPrompt: generateInterpretationPrompt(),
|
|
254
|
+
messages: conversationHistory,
|
|
255
|
+
})) {
|
|
256
|
+
const deltaContent = chunk.choices?.[0]?.delta?.content || '';
|
|
257
|
+
if (deltaContent) {
|
|
258
|
+
finalResponse += deltaContent;
|
|
259
|
+
onStream(deltaContent, 'interpretation');
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
} else {
|
|
263
|
+
const interpretationResponse = await llm(interpretationPrompt, {
|
|
264
|
+
...llmOptions,
|
|
265
|
+
systemPrompt: generateInterpretationPrompt(),
|
|
266
|
+
messages: conversationHistory,
|
|
267
|
+
});
|
|
268
|
+
finalResponse = interpretationResponse.choices[0]?.message?.content || '';
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
steps.push({
|
|
272
|
+
type: 'interpretation',
|
|
273
|
+
content: finalResponse,
|
|
274
|
+
timestamp: new Date(),
|
|
275
|
+
});
|
|
276
|
+
|
|
277
|
+
// Build the complete response with optional intermediate steps
|
|
278
|
+
let completeResponse = '';
|
|
279
|
+
|
|
280
|
+
if (showIntermediateSteps && extraction.explanation) {
|
|
281
|
+
completeResponse += extraction.explanation + '\n\n';
|
|
282
|
+
}
|
|
144
283
|
|
|
284
|
+
if (showIntermediateSteps) {
|
|
285
|
+
completeResponse += `**Query executed:**\n\`\`\`flowquery\n${extraction.query}\n\`\`\`\n\n`;
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
completeResponse += finalResponse;
|
|
289
|
+
|
|
145
290
|
return {
|
|
146
|
-
finalResponse:
|
|
291
|
+
finalResponse: completeResponse,
|
|
292
|
+
steps,
|
|
293
|
+
success: true,
|
|
294
|
+
};
|
|
295
|
+
} catch (error) {
|
|
296
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
297
|
+
|
|
298
|
+
return {
|
|
299
|
+
finalResponse: `⚠️ An error occurred: ${errorMessage}`,
|
|
147
300
|
steps,
|
|
148
301
|
success: false,
|
|
149
|
-
error:
|
|
302
|
+
error: errorMessage,
|
|
150
303
|
};
|
|
151
304
|
}
|
|
305
|
+
}
|
|
152
306
|
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
systemPrompt: generateInterpretationPrompt(),
|
|
167
|
-
messages: conversationHistory,
|
|
168
|
-
})) {
|
|
169
|
-
const deltaContent = chunk.choices?.[0]?.delta?.content || '';
|
|
170
|
-
if (deltaContent) {
|
|
171
|
-
finalResponse += deltaContent;
|
|
172
|
-
onStream(deltaContent, 'interpretation');
|
|
173
|
-
}
|
|
174
|
-
}
|
|
175
|
-
} else {
|
|
176
|
-
const interpretationResponse = await llm(interpretationPrompt, {
|
|
307
|
+
/**
|
|
308
|
+
* Process a query with streaming support for the final interpretation.
|
|
309
|
+
*/
|
|
310
|
+
async *processQueryStream(
|
|
311
|
+
userQuery: string,
|
|
312
|
+
options: FlowQueryAgentOptions
|
|
313
|
+
): AsyncGenerator<{ chunk: string; step: AgentStep['type']; done: boolean; steps?: AgentStep[]; adaptiveCard?: Record<string, unknown>; newMessage?: boolean }, void, unknown> {
|
|
314
|
+
const steps: AgentStep[] = [];
|
|
315
|
+
const { systemPrompt, llmOptions = {}, conversationHistory = [], showIntermediateSteps = true } = options;
|
|
316
|
+
|
|
317
|
+
try {
|
|
318
|
+
// Step 1: Generate FlowQuery from natural language (non-streaming for speed)
|
|
319
|
+
const generationResponse = await llm(userQuery, {
|
|
177
320
|
...llmOptions,
|
|
178
|
-
systemPrompt
|
|
321
|
+
systemPrompt,
|
|
179
322
|
messages: conversationHistory,
|
|
180
323
|
});
|
|
181
|
-
finalResponse = interpretationResponse.choices[0]?.message?.content || '';
|
|
182
|
-
}
|
|
183
|
-
|
|
184
|
-
steps.push({
|
|
185
|
-
type: 'interpretation',
|
|
186
|
-
content: finalResponse,
|
|
187
|
-
timestamp: new Date(),
|
|
188
|
-
});
|
|
189
|
-
|
|
190
|
-
// Build the complete response with optional intermediate steps
|
|
191
|
-
let completeResponse = '';
|
|
192
|
-
|
|
193
|
-
if (showIntermediateSteps && extraction.explanation) {
|
|
194
|
-
completeResponse += extraction.explanation + '\n\n';
|
|
195
|
-
}
|
|
196
|
-
|
|
197
|
-
if (showIntermediateSteps) {
|
|
198
|
-
completeResponse += `**Query executed:**\n\`\`\`flowquery\n${extraction.query}\n\`\`\`\n\n`;
|
|
199
|
-
}
|
|
200
|
-
|
|
201
|
-
completeResponse += finalResponse;
|
|
202
|
-
|
|
203
|
-
return {
|
|
204
|
-
finalResponse: completeResponse,
|
|
205
|
-
steps,
|
|
206
|
-
success: true,
|
|
207
|
-
};
|
|
208
|
-
} catch (error) {
|
|
209
|
-
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
210
|
-
|
|
211
|
-
return {
|
|
212
|
-
finalResponse: `⚠️ An error occurred: ${errorMessage}`,
|
|
213
|
-
steps,
|
|
214
|
-
success: false,
|
|
215
|
-
error: errorMessage,
|
|
216
|
-
};
|
|
217
|
-
}
|
|
218
|
-
}
|
|
219
324
|
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
)
|
|
227
|
-
const steps: AgentStep[] = [];
|
|
228
|
-
const { systemPrompt, llmOptions = {}, conversationHistory = [], showIntermediateSteps = true } = options;
|
|
229
|
-
|
|
230
|
-
try {
|
|
231
|
-
// Step 1: Generate FlowQuery from natural language (non-streaming for speed)
|
|
232
|
-
const generationResponse = await llm(userQuery, {
|
|
233
|
-
...llmOptions,
|
|
234
|
-
systemPrompt,
|
|
235
|
-
messages: conversationHistory,
|
|
236
|
-
});
|
|
325
|
+
const generationContent = generationResponse.choices[0]?.message?.content || '';
|
|
326
|
+
|
|
327
|
+
steps.push({
|
|
328
|
+
type: 'query_generation',
|
|
329
|
+
content: generationContent,
|
|
330
|
+
timestamp: new Date(),
|
|
331
|
+
});
|
|
237
332
|
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
333
|
+
// Step 2: Extract the FlowQuery
|
|
334
|
+
const extraction = extractFlowQuery(generationContent);
|
|
335
|
+
|
|
336
|
+
// If no query needed
|
|
337
|
+
if (extraction.noQueryNeeded || !extraction.found) {
|
|
338
|
+
const directResponse = extraction.directResponse || generationContent;
|
|
339
|
+
|
|
340
|
+
steps.push({
|
|
341
|
+
type: 'direct_response',
|
|
342
|
+
content: directResponse,
|
|
343
|
+
timestamp: new Date(),
|
|
344
|
+
metadata: { extraction }
|
|
345
|
+
});
|
|
346
|
+
|
|
347
|
+
yield { chunk: directResponse, step: 'direct_response', done: true, steps };
|
|
348
|
+
return;
|
|
349
|
+
}
|
|
245
350
|
|
|
246
|
-
|
|
247
|
-
|
|
351
|
+
// Emit intermediate step: show the query being executed
|
|
352
|
+
if (showIntermediateSteps) {
|
|
353
|
+
yield {
|
|
354
|
+
chunk: `\`\`\`flowquery\n${extraction.query}\n\`\`\`\n\n`,
|
|
355
|
+
step: 'query_generation',
|
|
356
|
+
done: false
|
|
357
|
+
};
|
|
358
|
+
}
|
|
248
359
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
const directResponse = extraction.directResponse || generationContent;
|
|
360
|
+
// Step 3: Execute the FlowQuery
|
|
361
|
+
let executionResult = await this.flowQueryExecutor.execute(extraction.query!);
|
|
252
362
|
|
|
253
363
|
steps.push({
|
|
254
|
-
type: '
|
|
255
|
-
content:
|
|
364
|
+
type: 'query_execution',
|
|
365
|
+
content: this.flowQueryExecutor.formatResult(executionResult),
|
|
256
366
|
timestamp: new Date(),
|
|
257
|
-
metadata: {
|
|
367
|
+
metadata: {
|
|
368
|
+
query: extraction.query!,
|
|
369
|
+
executionResult,
|
|
370
|
+
extraction
|
|
371
|
+
}
|
|
258
372
|
});
|
|
259
373
|
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
374
|
+
// Handle execution errors with retry logic
|
|
375
|
+
if (!executionResult.success) {
|
|
376
|
+
const maxRetries = options.maxRetries ?? 2;
|
|
377
|
+
let retryCount = 0;
|
|
378
|
+
let currentQuery = extraction.query!;
|
|
379
|
+
let currentError = executionResult.error;
|
|
380
|
+
let currentResult = executionResult;
|
|
381
|
+
|
|
382
|
+
while (!currentResult.success && retryCount < maxRetries) {
|
|
383
|
+
retryCount++;
|
|
384
|
+
|
|
385
|
+
// Show the failure in the current message before marking it complete
|
|
386
|
+
yield {
|
|
387
|
+
chunk: `\n⚠️ **Query execution failed:** ${currentError}\n`,
|
|
388
|
+
step: 'query_execution',
|
|
389
|
+
done: false
|
|
390
|
+
};
|
|
391
|
+
|
|
392
|
+
// Complete the previous message before starting a new one
|
|
393
|
+
yield {
|
|
394
|
+
chunk: '',
|
|
395
|
+
step: 'query_execution',
|
|
396
|
+
done: true
|
|
397
|
+
};
|
|
398
|
+
|
|
399
|
+
// Notify user of retry attempt - start a new message for the retry
|
|
400
|
+
yield {
|
|
401
|
+
chunk: `🔄 Attempting to fix (retry ${retryCount}/${maxRetries})...\n\n`,
|
|
402
|
+
step: 'retry',
|
|
403
|
+
done: false,
|
|
404
|
+
newMessage: true
|
|
405
|
+
};
|
|
406
|
+
|
|
407
|
+
steps.push({
|
|
408
|
+
type: 'retry',
|
|
409
|
+
content: `Retry ${retryCount}: Error was "${currentError}"`,
|
|
410
|
+
timestamp: new Date(),
|
|
411
|
+
metadata: {
|
|
412
|
+
query: currentQuery,
|
|
413
|
+
executionResult: currentResult
|
|
414
|
+
}
|
|
415
|
+
});
|
|
416
|
+
|
|
417
|
+
// Ask LLM to generate a corrected query
|
|
418
|
+
const correctedQuery = await this.generateCorrectedQuery(
|
|
419
|
+
userQuery,
|
|
420
|
+
currentQuery,
|
|
421
|
+
currentError || 'Unknown error',
|
|
422
|
+
steps,
|
|
423
|
+
options
|
|
424
|
+
);
|
|
425
|
+
|
|
426
|
+
if (!correctedQuery) {
|
|
427
|
+
// LLM couldn't generate a correction
|
|
428
|
+
yield {
|
|
429
|
+
chunk: `Unable to generate a corrected query. Please try rephrasing your request.\n`,
|
|
430
|
+
step: 'retry',
|
|
431
|
+
done: true,
|
|
432
|
+
steps
|
|
433
|
+
};
|
|
434
|
+
return;
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
// Show the corrected query
|
|
438
|
+
if (showIntermediateSteps) {
|
|
439
|
+
yield {
|
|
440
|
+
chunk: `**Corrected query:**\n\`\`\`flowquery\n${correctedQuery}\n\`\`\`\n\n`,
|
|
441
|
+
step: 'retry',
|
|
442
|
+
done: false
|
|
443
|
+
};
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
// Try executing the corrected query
|
|
447
|
+
currentQuery = correctedQuery;
|
|
448
|
+
currentResult = await this.flowQueryExecutor.execute(correctedQuery);
|
|
449
|
+
currentError = currentResult.error;
|
|
450
|
+
|
|
451
|
+
steps.push({
|
|
452
|
+
type: 'query_execution',
|
|
453
|
+
content: this.flowQueryExecutor.formatResult(currentResult),
|
|
454
|
+
timestamp: new Date(),
|
|
455
|
+
metadata: {
|
|
456
|
+
query: correctedQuery,
|
|
457
|
+
executionResult: currentResult
|
|
458
|
+
}
|
|
459
|
+
});
|
|
460
|
+
}
|
|
263
461
|
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
};
|
|
271
|
-
}
|
|
462
|
+
// If still failing after retries, give up
|
|
463
|
+
if (!currentResult.success) {
|
|
464
|
+
const errorMessage = `⚠️ Query execution failed after ${maxRetries} retries: ${currentError}\n\nLast query attempted:\n\`\`\`flowquery\n${currentQuery}\n\`\`\``;
|
|
465
|
+
yield { chunk: errorMessage, step: 'query_execution', done: true, steps };
|
|
466
|
+
return;
|
|
467
|
+
}
|
|
272
468
|
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
extraction
|
|
469
|
+
// Mark the retry message as complete before proceeding to interpretation
|
|
470
|
+
yield {
|
|
471
|
+
chunk: '',
|
|
472
|
+
step: 'retry',
|
|
473
|
+
done: true
|
|
474
|
+
};
|
|
475
|
+
|
|
476
|
+
// Update executionResult for interpretation phase
|
|
477
|
+
executionResult = currentResult;
|
|
478
|
+
extraction.query = currentQuery;
|
|
284
479
|
}
|
|
285
|
-
});
|
|
286
480
|
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
const errorMessage = `⚠️ Query execution failed: ${executionResult.error}\n\nQuery attempted:\n\`\`\`flowquery\n${extraction.query}\n\`\`\``;
|
|
290
|
-
yield { chunk: errorMessage, step: 'query_execution', done: true, steps };
|
|
291
|
-
return;
|
|
292
|
-
}
|
|
481
|
+
// Check if the result contains an Adaptive Card
|
|
482
|
+
const adaptiveCard = this.extractAdaptiveCardFromResults(executionResult.results);
|
|
293
483
|
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
executionResult,
|
|
302
|
-
!!adaptiveCard
|
|
303
|
-
);
|
|
304
|
-
|
|
305
|
-
let interpretationContent = '';
|
|
306
|
-
|
|
307
|
-
for await (const chunk of llmStream(interpretationPrompt, {
|
|
308
|
-
...llmOptions,
|
|
309
|
-
systemPrompt: generateInterpretationPrompt(),
|
|
310
|
-
messages: conversationHistory,
|
|
311
|
-
})) {
|
|
312
|
-
const deltaContent = chunk.choices?.[0]?.delta?.content || '';
|
|
313
|
-
if (deltaContent) {
|
|
314
|
-
interpretationContent += deltaContent;
|
|
315
|
-
yield { chunk: deltaContent, step: 'interpretation', done: false };
|
|
316
|
-
}
|
|
317
|
-
}
|
|
484
|
+
// Step 4: Stream the interpretation
|
|
485
|
+
const interpretationPrompt = this.buildInterpretationPrompt(
|
|
486
|
+
userQuery,
|
|
487
|
+
extraction.query!,
|
|
488
|
+
executionResult,
|
|
489
|
+
!!adaptiveCard
|
|
490
|
+
);
|
|
318
491
|
|
|
319
|
-
|
|
320
|
-
type: 'interpretation',
|
|
321
|
-
content: interpretationContent,
|
|
322
|
-
timestamp: new Date(),
|
|
323
|
-
});
|
|
492
|
+
let interpretationContent = '';
|
|
324
493
|
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
}
|
|
494
|
+
for await (const chunk of llmStream(interpretationPrompt, {
|
|
495
|
+
...llmOptions,
|
|
496
|
+
systemPrompt: generateInterpretationPrompt(),
|
|
497
|
+
messages: conversationHistory,
|
|
498
|
+
})) {
|
|
499
|
+
const deltaContent = chunk.choices?.[0]?.delta?.content || '';
|
|
500
|
+
if (deltaContent) {
|
|
501
|
+
interpretationContent += deltaContent;
|
|
502
|
+
yield { chunk: deltaContent, step: 'interpretation', done: false };
|
|
503
|
+
}
|
|
504
|
+
}
|
|
336
505
|
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
506
|
+
steps.push({
|
|
507
|
+
type: 'interpretation',
|
|
508
|
+
content: interpretationContent,
|
|
509
|
+
timestamp: new Date(),
|
|
510
|
+
});
|
|
511
|
+
|
|
512
|
+
yield { chunk: '', step: 'interpretation', done: true, steps, adaptiveCard };
|
|
513
|
+
} catch (error) {
|
|
514
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
515
|
+
yield {
|
|
516
|
+
chunk: `⚠️ An error occurred: ${errorMessage}`,
|
|
517
|
+
step: 'interpretation',
|
|
518
|
+
done: true,
|
|
519
|
+
steps
|
|
520
|
+
};
|
|
521
|
+
}
|
|
345
522
|
}
|
|
346
523
|
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
524
|
+
/**
|
|
525
|
+
* Extract an Adaptive Card from the execution results.
|
|
526
|
+
* Checks if any result is an Adaptive Card (type: "AdaptiveCard") and returns it.
|
|
527
|
+
* Searches for Adaptive Cards at the top level or within any property of result objects.
|
|
528
|
+
*/
|
|
529
|
+
private extractAdaptiveCardFromResults(results: unknown[] | undefined): Record<string, unknown> | undefined {
|
|
530
|
+
if (!results || !Array.isArray(results)) {
|
|
531
|
+
return undefined;
|
|
351
532
|
}
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
533
|
+
|
|
534
|
+
for (const result of results) {
|
|
535
|
+
// Check if the result itself is an Adaptive Card
|
|
536
|
+
if (isAdaptiveCard(result)) {
|
|
537
|
+
return result;
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
// Check if any property of the result object is an Adaptive Card
|
|
541
|
+
if (typeof result === 'object' && result !== null) {
|
|
542
|
+
const obj = result as Record<string, unknown>;
|
|
543
|
+
for (const value of Object.values(obj)) {
|
|
544
|
+
if (isAdaptiveCard(value)) {
|
|
545
|
+
return value as Record<string, unknown>;
|
|
546
|
+
}
|
|
359
547
|
}
|
|
360
548
|
}
|
|
361
549
|
}
|
|
362
|
-
}
|
|
363
550
|
|
|
364
|
-
|
|
365
|
-
}
|
|
551
|
+
return undefined;
|
|
552
|
+
}
|
|
366
553
|
|
|
367
|
-
/**
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
): string {
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
554
|
+
/**
|
|
555
|
+
* Build the prompt for the interpretation phase.
|
|
556
|
+
*/
|
|
557
|
+
private buildInterpretationPrompt(
|
|
558
|
+
originalQuery: string,
|
|
559
|
+
flowQuery: string,
|
|
560
|
+
executionResult: FlowQueryExecutionResult,
|
|
561
|
+
hasAdaptiveCard: boolean = false
|
|
562
|
+
): string {
|
|
563
|
+
const resultsJson = JSON.stringify(executionResult.results, null, 2);
|
|
564
|
+
const resultCount = executionResult.results?.length || 0;
|
|
565
|
+
|
|
566
|
+
let prompt = `The user asked: "${originalQuery}"
|
|
380
567
|
|
|
381
568
|
This was translated to the following FlowQuery:
|
|
382
569
|
\`\`\`flowquery
|
|
@@ -391,25 +578,88 @@ ${resultsJson}
|
|
|
391
578
|
|
|
392
579
|
`;
|
|
393
580
|
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
581
|
+
if (hasAdaptiveCard) {
|
|
582
|
+
prompt += `The result is an Adaptive Card that will be rendered automatically in the UI. Please provide a brief introduction or context for the data shown in the card, but do NOT recreate the table or list the data in your response since the card will display it visually.`;
|
|
583
|
+
} else {
|
|
584
|
+
prompt += `Please interpret these results and provide a helpful response to the user's original question.`;
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
return prompt;
|
|
398
588
|
}
|
|
399
589
|
|
|
400
|
-
|
|
401
|
-
|
|
590
|
+
/**
|
|
591
|
+
* Generate a corrected FlowQuery by sending the error back to the LLM.
|
|
592
|
+
* This is used for retry logic when query execution fails.
|
|
593
|
+
*/
|
|
594
|
+
private async generateCorrectedQuery(
|
|
595
|
+
originalQuery: string,
|
|
596
|
+
failedQuery: string,
|
|
597
|
+
errorMessage: string,
|
|
598
|
+
previousSteps: AgentStep[],
|
|
599
|
+
options: FlowQueryAgentOptions
|
|
600
|
+
): Promise<string | null> {
|
|
601
|
+
const { systemPrompt, llmOptions = {}, conversationHistory = [] } = options;
|
|
602
|
+
|
|
603
|
+
// Build context from previous steps
|
|
604
|
+
const stepsContext = previousSteps
|
|
605
|
+
.filter(step => step.type === 'query_execution' || step.type === 'retry')
|
|
606
|
+
.map(step => {
|
|
607
|
+
if (step.type === 'retry') {
|
|
608
|
+
return `- Retry attempt: ${step.content}`;
|
|
609
|
+
}
|
|
610
|
+
return `- Query: \`${step.metadata?.query}\` → Error: ${step.metadata?.executionResult?.error || 'unknown'}`;
|
|
611
|
+
})
|
|
612
|
+
.join('\n');
|
|
402
613
|
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
614
|
+
const retryPrompt = `The user asked: "${originalQuery}"
|
|
615
|
+
|
|
616
|
+
I generated the following FlowQuery:
|
|
617
|
+
\`\`\`flowquery
|
|
618
|
+
${failedQuery}
|
|
619
|
+
\`\`\`
|
|
620
|
+
|
|
621
|
+
However, the query failed with this error:
|
|
622
|
+
${errorMessage}
|
|
623
|
+
|
|
624
|
+
${stepsContext ? `Previous attempts:\n${stepsContext}\n\n` : ''}Please analyze the error and generate a CORRECTED FlowQuery that will work. Pay close attention to:
|
|
625
|
+
- Syntax errors in the query
|
|
626
|
+
- Incorrect loader names or function names
|
|
627
|
+
- Missing or incorrect parameters
|
|
628
|
+
- Data type mismatches
|
|
629
|
+
|
|
630
|
+
Generate the corrected query using the same format as before (with explanation if needed).`;
|
|
631
|
+
|
|
632
|
+
try {
|
|
633
|
+
const response = await llm(retryPrompt, {
|
|
634
|
+
...llmOptions,
|
|
635
|
+
systemPrompt,
|
|
636
|
+
messages: conversationHistory,
|
|
637
|
+
});
|
|
638
|
+
|
|
639
|
+
const responseContent = response.choices[0]?.message?.content || '';
|
|
640
|
+
const extraction = extractFlowQuery(responseContent);
|
|
641
|
+
|
|
642
|
+
if (extraction.found && extraction.query) {
|
|
643
|
+
return extraction.query;
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
return null;
|
|
647
|
+
} catch (error) {
|
|
648
|
+
console.error('Error generating corrected query:', error);
|
|
649
|
+
return null;
|
|
650
|
+
}
|
|
651
|
+
}
|
|
652
|
+
|
|
653
|
+
/**
|
|
654
|
+
* Handle execution errors by asking LLM to explain or suggest fixes.
|
|
655
|
+
*/
|
|
656
|
+
private async interpretError(
|
|
657
|
+
originalQuery: string,
|
|
658
|
+
flowQuery: string,
|
|
659
|
+
executionResult: FlowQueryExecutionResult,
|
|
660
|
+
options: FlowQueryAgentOptions
|
|
661
|
+
): Promise<string> {
|
|
662
|
+
const errorPrompt = `The user asked: "${originalQuery}"
|
|
413
663
|
|
|
414
664
|
This was translated to the following FlowQuery:
|
|
415
665
|
\`\`\`flowquery
|
|
@@ -421,14 +671,44 @@ ${executionResult.error}
|
|
|
421
671
|
|
|
422
672
|
Please explain what went wrong in user-friendly terms and, if possible, suggest how to fix the issue or rephrase their request.`;
|
|
423
673
|
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
674
|
+
const response = await llm(errorPrompt, {
|
|
675
|
+
...options.llmOptions,
|
|
676
|
+
systemPrompt: 'You are a helpful assistant explaining query errors. Be concise and helpful.',
|
|
677
|
+
messages: options.conversationHistory,
|
|
678
|
+
});
|
|
679
|
+
|
|
680
|
+
return response.choices[0]?.message?.content ||
|
|
681
|
+
`The query failed with error: ${executionResult.error}`;
|
|
682
|
+
}
|
|
683
|
+
}
|
|
684
|
+
|
|
685
|
+
// Create a default instance for convenience
|
|
686
|
+
const defaultAgent = new FlowQueryAgent();
|
|
687
|
+
|
|
688
|
+
/**
|
|
689
|
+
* Process a user query through the FlowQuery agent.
|
|
690
|
+
*
|
|
691
|
+
* @param userQuery - The natural language query from the user
|
|
692
|
+
* @param options - Agent configuration options
|
|
693
|
+
* @returns The agent result including final response and steps taken
|
|
694
|
+
* @deprecated Use `new FlowQueryAgent().processQuery()` instead
|
|
695
|
+
*/
|
|
696
|
+
export async function processQuery(
|
|
697
|
+
userQuery: string,
|
|
698
|
+
options: FlowQueryAgentOptions
|
|
699
|
+
): Promise<AgentResult> {
|
|
700
|
+
return defaultAgent.processQuery(userQuery, options);
|
|
701
|
+
}
|
|
429
702
|
|
|
430
|
-
|
|
431
|
-
|
|
703
|
+
/**
|
|
704
|
+
* Process a query with streaming support for the final interpretation.
|
|
705
|
+
* @deprecated Use `new FlowQueryAgent().processQueryStream()` instead
|
|
706
|
+
*/
|
|
707
|
+
export function processQueryStream(
|
|
708
|
+
userQuery: string,
|
|
709
|
+
options: FlowQueryAgentOptions
|
|
710
|
+
): AsyncGenerator<{ chunk: string; step: AgentStep['type']; done: boolean; steps?: AgentStep[]; adaptiveCard?: Record<string, unknown>; newMessage?: boolean }, void, unknown> {
|
|
711
|
+
return defaultAgent.processQueryStream(userQuery, options);
|
|
432
712
|
}
|
|
433
713
|
|
|
434
|
-
export default
|
|
714
|
+
export default FlowQueryAgent;
|