flowquery 1.0.25 → 1.0.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/release.yml +1 -0
- package/.husky/pre-commit +3 -2
- package/dist/flowquery.min.js +1 -1
- package/dist/parsing/parser.d.ts.map +1 -1
- package/dist/parsing/parser.js +1 -0
- package/dist/parsing/parser.js.map +1 -1
- package/docs/flowquery.min.js +1 -1
- package/flowquery-py/pyproject.toml +1 -1
- package/flowquery-py/src/parsing/parser.py +1 -0
- package/flowquery-py/tests/compute/test_runner.py +26 -0
- package/flowquery-py/tests/parsing/test_parser.py +18 -0
- package/flowquery-vscode/flowQueryEngine/flowquery.min.js +1 -1
- package/jest.config.js +6 -9
- package/misc/apps/RAG/data/chats.json +302 -0
- package/misc/apps/RAG/data/emails.json +182 -0
- package/misc/apps/RAG/data/events.json +226 -0
- package/misc/apps/RAG/data/files.json +172 -0
- package/misc/apps/RAG/data/users.json +158 -0
- package/misc/apps/RAG/jest.config.js +21 -0
- package/misc/apps/RAG/package.json +9 -2
- package/misc/apps/RAG/src/App.tsx +5 -5
- package/misc/apps/RAG/src/components/ChatContainer.tsx +53 -124
- package/misc/apps/RAG/src/components/FlowQueryAgent.ts +151 -157
- package/misc/apps/RAG/src/components/index.ts +1 -1
- package/misc/apps/RAG/src/graph/index.ts +19 -0
- package/misc/apps/RAG/src/graph/initializeGraph.ts +254 -0
- package/misc/apps/RAG/src/index.tsx +25 -13
- package/misc/apps/RAG/src/prompts/FlowQuerySystemPrompt.ts +146 -231
- package/misc/apps/RAG/src/prompts/index.ts +4 -4
- package/misc/apps/RAG/src/tests/graph.test.ts +35 -0
- package/misc/apps/RAG/src/utils/FlowQueryExecutor.ts +20 -21
- package/misc/apps/RAG/src/utils/FlowQueryExtractor.ts +35 -30
- package/misc/apps/RAG/src/utils/Llm.ts +248 -0
- package/misc/apps/RAG/src/utils/index.ts +7 -4
- package/misc/apps/RAG/tsconfig.json +4 -3
- package/misc/apps/RAG/webpack.config.js +40 -40
- package/package.json +1 -1
- package/src/parsing/parser.ts +1 -0
- package/tests/compute/runner.test.ts +1 -1
- package/tests/parsing/parser.test.ts +16 -0
- package/misc/apps/RAG/src/plugins/README.md +0 -139
- package/misc/apps/RAG/src/plugins/index.ts +0 -72
- package/misc/apps/RAG/src/plugins/loaders/CatFacts.ts +0 -70
- package/misc/apps/RAG/src/plugins/loaders/FetchJson.ts +0 -65
- package/misc/apps/RAG/src/plugins/loaders/Form.ts +0 -594
- package/misc/apps/RAG/src/plugins/loaders/Llm.ts +0 -450
- package/misc/apps/RAG/src/plugins/loaders/MockData.ts +0 -101
- package/misc/apps/RAG/src/plugins/loaders/Table.ts +0 -274
- package/misc/apps/RAG/src/plugins/loaders/Weather.ts +0 -138
|
@@ -1,450 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* OpenAI LLM Plugin: Call OpenAI-compatible APIs for chat completions.
|
|
3
|
-
*
|
|
4
|
-
* Usage in FlowQuery:
|
|
5
|
-
* CALL llm('What is the capital of France?') YIELD choices
|
|
6
|
-
*
|
|
7
|
-
* With custom options:
|
|
8
|
-
* CALL llm('Translate to French: Hello', { model: 'gpt-4o', temperature: 0.3 }) YIELD choices, usage
|
|
9
|
-
*
|
|
10
|
-
* This class can also be used standalone outside of FlowQuery:
|
|
11
|
-
* import { Llm } from './plugins/loaders/Llm';
|
|
12
|
-
* const llmInstance = new Llm();
|
|
13
|
-
* const response = await llmInstance.complete('What is 2+2?');
|
|
14
|
-
* console.log(response.choices[0].message.content);
|
|
15
|
-
*/
|
|
16
|
-
import { AsyncFunction, FunctionDef } from "flowquery/extensibility";
|
|
17
|
-
|
|
18
|
-
// Default configuration - can be overridden via options
|
|
19
|
-
const DEFAULT_CONFIG = {
|
|
20
|
-
apiUrl: "https://api.openai.com/v1/chat/completions",
|
|
21
|
-
model: "gpt-4o-mini",
|
|
22
|
-
temperature: 0.7,
|
|
23
|
-
maxTokens: undefined as number | undefined,
|
|
24
|
-
};
|
|
25
|
-
|
|
26
|
-
/**
|
|
27
|
-
* Options for LLM requests.
|
|
28
|
-
*/
|
|
29
|
-
export interface LlmOptions {
|
|
30
|
-
/** OpenAI API key. Configure in Settings or pass as option. */
|
|
31
|
-
apiKey?: string;
|
|
32
|
-
/** API endpoint URL. Defaults to OpenAI's chat completions endpoint. */
|
|
33
|
-
apiUrl?: string;
|
|
34
|
-
/** Model to use. Defaults to 'gpt-4o-mini'. */
|
|
35
|
-
model?: string;
|
|
36
|
-
/** Sampling temperature (0-2). Defaults to 0.7. */
|
|
37
|
-
temperature?: number;
|
|
38
|
-
/** Maximum tokens to generate. */
|
|
39
|
-
maxTokens?: number;
|
|
40
|
-
/** System prompt to set context for the conversation. */
|
|
41
|
-
systemPrompt?: string;
|
|
42
|
-
/** Additional messages to include in the conversation. */
|
|
43
|
-
messages?: Array<{ role: "system" | "user" | "assistant"; content: string }>;
|
|
44
|
-
/** Organization ID for OpenAI API. */
|
|
45
|
-
organizationId?: string;
|
|
46
|
-
/** Additional headers to include in the request. */
|
|
47
|
-
headers?: Record<string, string>;
|
|
48
|
-
/** Enable streaming response. */
|
|
49
|
-
stream?: boolean;
|
|
50
|
-
/** Additional body parameters to pass to the API. */
|
|
51
|
-
additionalParams?: Record<string, any>;
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
/**
|
|
55
|
-
* OpenAI-compatible chat completion response.
|
|
56
|
-
*/
|
|
57
|
-
export interface LlmResponse {
|
|
58
|
-
id: string;
|
|
59
|
-
object: string;
|
|
60
|
-
created: number;
|
|
61
|
-
model: string;
|
|
62
|
-
choices: Array<{
|
|
63
|
-
index: number;
|
|
64
|
-
message: {
|
|
65
|
-
role: string;
|
|
66
|
-
content: string;
|
|
67
|
-
};
|
|
68
|
-
finish_reason: string;
|
|
69
|
-
}>;
|
|
70
|
-
usage?: {
|
|
71
|
-
prompt_tokens: number;
|
|
72
|
-
completion_tokens: number;
|
|
73
|
-
total_tokens: number;
|
|
74
|
-
};
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
/**
|
|
78
|
-
* Llm class - calls OpenAI-compatible APIs for chat completions.
|
|
79
|
-
*/
|
|
80
|
-
@FunctionDef({
|
|
81
|
-
description:
|
|
82
|
-
"Calls OpenAI-compatible chat completion APIs. Supports GPT models and any OpenAI-compatible endpoint.",
|
|
83
|
-
category: "async",
|
|
84
|
-
parameters: [
|
|
85
|
-
{
|
|
86
|
-
name: "prompt",
|
|
87
|
-
description: "The user prompt to send to the LLM",
|
|
88
|
-
type: "string",
|
|
89
|
-
required: true,
|
|
90
|
-
example: "What is the capital of France?",
|
|
91
|
-
},
|
|
92
|
-
{
|
|
93
|
-
name: "options",
|
|
94
|
-
description: "Optional configuration for the LLM request",
|
|
95
|
-
type: "object",
|
|
96
|
-
required: false,
|
|
97
|
-
properties: {
|
|
98
|
-
apiKey: { description: "OpenAI API key", type: "string" },
|
|
99
|
-
apiUrl: {
|
|
100
|
-
description: "API endpoint URL (defaults to OpenAI chat completions)",
|
|
101
|
-
type: "string",
|
|
102
|
-
},
|
|
103
|
-
model: { description: "Model to use (defaults to gpt-4o-mini)", type: "string" },
|
|
104
|
-
temperature: {
|
|
105
|
-
description: "Sampling temperature 0-2 (defaults to 0.7)",
|
|
106
|
-
type: "number",
|
|
107
|
-
},
|
|
108
|
-
maxTokens: { description: "Maximum tokens to generate", type: "number" },
|
|
109
|
-
systemPrompt: { description: "System prompt to set context", type: "string" },
|
|
110
|
-
messages: { description: "Additional conversation messages", type: "array" },
|
|
111
|
-
organizationId: { description: "OpenAI organization ID", type: "string" },
|
|
112
|
-
headers: { description: "Additional request headers", type: "object" },
|
|
113
|
-
stream: { description: "Enable streaming response", type: "boolean" },
|
|
114
|
-
additionalParams: { description: "Additional API parameters", type: "object" },
|
|
115
|
-
},
|
|
116
|
-
},
|
|
117
|
-
],
|
|
118
|
-
output: {
|
|
119
|
-
description: "OpenAI chat completion response",
|
|
120
|
-
type: "object",
|
|
121
|
-
properties: {
|
|
122
|
-
id: { description: "Unique identifier for the completion", type: "string" },
|
|
123
|
-
model: { description: "Model used for completion", type: "string" },
|
|
124
|
-
choices: {
|
|
125
|
-
description: "Array of completion choices",
|
|
126
|
-
type: "array",
|
|
127
|
-
example: [
|
|
128
|
-
{ message: { role: "assistant", content: "Paris is the capital of France." } },
|
|
129
|
-
],
|
|
130
|
-
},
|
|
131
|
-
usage: { description: "Token usage statistics", type: "object" },
|
|
132
|
-
},
|
|
133
|
-
},
|
|
134
|
-
examples: [
|
|
135
|
-
"CALL llm('What is 2+2?') YIELD choices",
|
|
136
|
-
"CALL llm('Translate to French: Hello', { model: 'gpt-4o', temperature: 0.3 }) YIELD choices, usage",
|
|
137
|
-
"CALL llm('Write a haiku', { systemPrompt: 'You are a poet' }) YIELD choices",
|
|
138
|
-
],
|
|
139
|
-
notes: "Requires API key configured in Settings or passed as apiKey option. Works with any OpenAI-compatible API by setting the apiUrl option.",
|
|
140
|
-
})
|
|
141
|
-
export class Llm extends AsyncFunction {
|
|
142
|
-
private readonly defaultOptions: Partial<LlmOptions>;
|
|
143
|
-
|
|
144
|
-
constructor(defaultOptions: Partial<LlmOptions> = {}) {
|
|
145
|
-
super();
|
|
146
|
-
this.defaultOptions = defaultOptions;
|
|
147
|
-
}
|
|
148
|
-
|
|
149
|
-
/**
|
|
150
|
-
* Get API key from options or localStorage (browser).
|
|
151
|
-
*/
|
|
152
|
-
private getApiKey(options?: LlmOptions): string {
|
|
153
|
-
// First check options
|
|
154
|
-
if (options?.apiKey) {
|
|
155
|
-
return options.apiKey;
|
|
156
|
-
}
|
|
157
|
-
|
|
158
|
-
// Check default options
|
|
159
|
-
if (this.defaultOptions.apiKey) {
|
|
160
|
-
return this.defaultOptions.apiKey;
|
|
161
|
-
}
|
|
162
|
-
|
|
163
|
-
// In browser, check localStorage
|
|
164
|
-
if (typeof window !== "undefined" && typeof localStorage !== "undefined") {
|
|
165
|
-
const storedKey = localStorage.getItem("flowquery_openai_api_key");
|
|
166
|
-
if (storedKey) {
|
|
167
|
-
return storedKey;
|
|
168
|
-
}
|
|
169
|
-
}
|
|
170
|
-
|
|
171
|
-
throw new Error(
|
|
172
|
-
"OpenAI API key is required. Configure it in Settings or pass apiKey in options."
|
|
173
|
-
);
|
|
174
|
-
}
|
|
175
|
-
|
|
176
|
-
/**
|
|
177
|
-
* Get stored configuration from localStorage (browser only).
|
|
178
|
-
*/
|
|
179
|
-
private getStoredConfig(): Partial<LlmOptions> {
|
|
180
|
-
if (typeof window === "undefined" || typeof localStorage === "undefined") {
|
|
181
|
-
return {};
|
|
182
|
-
}
|
|
183
|
-
|
|
184
|
-
return {
|
|
185
|
-
organizationId: localStorage.getItem("flowquery_openai_org_id") || undefined,
|
|
186
|
-
model: localStorage.getItem("flowquery_openai_model") || undefined,
|
|
187
|
-
};
|
|
188
|
-
}
|
|
189
|
-
|
|
190
|
-
/**
|
|
191
|
-
* Build the request body for the API call.
|
|
192
|
-
*/
|
|
193
|
-
private buildRequestBody(prompt: string, options?: LlmOptions): Record<string, any> {
|
|
194
|
-
const messages: Array<{ role: string; content: string }> = [];
|
|
195
|
-
|
|
196
|
-
// Add system prompt if provided
|
|
197
|
-
if (options?.systemPrompt) {
|
|
198
|
-
messages.push({ role: "system", content: options.systemPrompt });
|
|
199
|
-
}
|
|
200
|
-
|
|
201
|
-
// Add any additional messages
|
|
202
|
-
if (options?.messages) {
|
|
203
|
-
messages.push(...options.messages);
|
|
204
|
-
}
|
|
205
|
-
|
|
206
|
-
// Add the user prompt
|
|
207
|
-
messages.push({ role: "user", content: prompt });
|
|
208
|
-
|
|
209
|
-
const body: Record<string, any> = {
|
|
210
|
-
model: options?.model || this.defaultOptions.model || DEFAULT_CONFIG.model,
|
|
211
|
-
messages,
|
|
212
|
-
temperature:
|
|
213
|
-
options?.temperature ??
|
|
214
|
-
this.defaultOptions.temperature ??
|
|
215
|
-
DEFAULT_CONFIG.temperature,
|
|
216
|
-
...(options?.additionalParams || {}),
|
|
217
|
-
};
|
|
218
|
-
|
|
219
|
-
if (options?.maxTokens || this.defaultOptions.maxTokens || DEFAULT_CONFIG.maxTokens) {
|
|
220
|
-
body.max_tokens =
|
|
221
|
-
options?.maxTokens || this.defaultOptions.maxTokens || DEFAULT_CONFIG.maxTokens;
|
|
222
|
-
}
|
|
223
|
-
|
|
224
|
-
if (options?.stream) {
|
|
225
|
-
body.stream = true;
|
|
226
|
-
}
|
|
227
|
-
|
|
228
|
-
return body;
|
|
229
|
-
}
|
|
230
|
-
|
|
231
|
-
/**
|
|
232
|
-
* Build request headers.
|
|
233
|
-
*/
|
|
234
|
-
private buildHeaders(apiKey: string, options?: LlmOptions): Record<string, string> {
|
|
235
|
-
const headers: Record<string, string> = {
|
|
236
|
-
"Content-Type": "application/json",
|
|
237
|
-
Authorization: `Bearer ${apiKey}`,
|
|
238
|
-
...(options?.headers || {}),
|
|
239
|
-
};
|
|
240
|
-
|
|
241
|
-
if (options?.organizationId) {
|
|
242
|
-
headers["OpenAI-Organization"] = options.organizationId;
|
|
243
|
-
}
|
|
244
|
-
|
|
245
|
-
return headers;
|
|
246
|
-
}
|
|
247
|
-
|
|
248
|
-
/**
|
|
249
|
-
* Call the OpenAI-compatible API and return the full response.
|
|
250
|
-
*
|
|
251
|
-
* @param prompt - The user prompt to send to the LLM
|
|
252
|
-
* @param options - Optional configuration for the request
|
|
253
|
-
* @returns The full API response
|
|
254
|
-
*
|
|
255
|
-
* @example
|
|
256
|
-
* ```typescript
|
|
257
|
-
* const llmInstance = new Llm();
|
|
258
|
-
* const response = await llmInstance.complete('What is the capital of France?');
|
|
259
|
-
* console.log(response.choices[0].message.content);
|
|
260
|
-
* ```
|
|
261
|
-
*/
|
|
262
|
-
async complete(prompt: string, options?: LlmOptions): Promise<LlmResponse> {
|
|
263
|
-
// Merge stored config with provided options (options take precedence)
|
|
264
|
-
const storedConfig = this.getStoredConfig();
|
|
265
|
-
const mergedOptions = { ...this.defaultOptions, ...storedConfig, ...options };
|
|
266
|
-
|
|
267
|
-
const apiKey = this.getApiKey(mergedOptions);
|
|
268
|
-
const apiUrl = mergedOptions?.apiUrl || DEFAULT_CONFIG.apiUrl;
|
|
269
|
-
const headers = this.buildHeaders(apiKey, mergedOptions);
|
|
270
|
-
const body = this.buildRequestBody(prompt, mergedOptions);
|
|
271
|
-
|
|
272
|
-
const response = await fetch(apiUrl, {
|
|
273
|
-
method: "POST",
|
|
274
|
-
headers,
|
|
275
|
-
body: JSON.stringify(body),
|
|
276
|
-
});
|
|
277
|
-
|
|
278
|
-
if (!response.ok) {
|
|
279
|
-
const errorText = await response.text();
|
|
280
|
-
throw new Error(`LLM API error (${response.status}): ${errorText}`);
|
|
281
|
-
}
|
|
282
|
-
|
|
283
|
-
return response.json();
|
|
284
|
-
}
|
|
285
|
-
|
|
286
|
-
/**
|
|
287
|
-
* Call the OpenAI-compatible API with streaming and yield each chunk.
|
|
288
|
-
*
|
|
289
|
-
* @param prompt - The user prompt to send to the LLM
|
|
290
|
-
* @param options - Optional configuration for the request
|
|
291
|
-
* @yields Parsed SSE data chunks from the stream
|
|
292
|
-
*
|
|
293
|
-
* @example
|
|
294
|
-
* ```typescript
|
|
295
|
-
* const llmInstance = new Llm();
|
|
296
|
-
* for await (const chunk of llmInstance.stream('Tell me a story')) {
|
|
297
|
-
* if (chunk.choices?.[0]?.delta?.content) {
|
|
298
|
-
* process.stdout.write(chunk.choices[0].delta.content);
|
|
299
|
-
* }
|
|
300
|
-
* }
|
|
301
|
-
* ```
|
|
302
|
-
*/
|
|
303
|
-
async *stream(prompt: string, options?: LlmOptions): AsyncGenerator<any, void, unknown> {
|
|
304
|
-
// Merge stored config with provided options (options take precedence)
|
|
305
|
-
const storedConfig = this.getStoredConfig();
|
|
306
|
-
const mergedOptions = { ...this.defaultOptions, ...storedConfig, ...options };
|
|
307
|
-
|
|
308
|
-
const apiKey = this.getApiKey(mergedOptions);
|
|
309
|
-
const apiUrl = mergedOptions?.apiUrl || DEFAULT_CONFIG.apiUrl;
|
|
310
|
-
const headers = this.buildHeaders(apiKey, mergedOptions);
|
|
311
|
-
const body = this.buildRequestBody(prompt, { ...mergedOptions, stream: true });
|
|
312
|
-
|
|
313
|
-
const response = await fetch(apiUrl, {
|
|
314
|
-
method: "POST",
|
|
315
|
-
headers,
|
|
316
|
-
body: JSON.stringify(body),
|
|
317
|
-
});
|
|
318
|
-
|
|
319
|
-
if (!response.ok) {
|
|
320
|
-
const errorText = await response.text();
|
|
321
|
-
throw new Error(`LLM API error (${response.status}): ${errorText}`);
|
|
322
|
-
}
|
|
323
|
-
|
|
324
|
-
if (!response.body) {
|
|
325
|
-
throw new Error("Response body is null");
|
|
326
|
-
}
|
|
327
|
-
|
|
328
|
-
const reader = response.body.getReader();
|
|
329
|
-
const decoder = new TextDecoder();
|
|
330
|
-
let buffer = "";
|
|
331
|
-
|
|
332
|
-
try {
|
|
333
|
-
while (true) {
|
|
334
|
-
const { done, value } = await reader.read();
|
|
335
|
-
if (done) break;
|
|
336
|
-
|
|
337
|
-
buffer += decoder.decode(value, { stream: true });
|
|
338
|
-
const lines = buffer.split("\n");
|
|
339
|
-
buffer = lines.pop() || "";
|
|
340
|
-
|
|
341
|
-
for (const line of lines) {
|
|
342
|
-
const trimmed = line.trim();
|
|
343
|
-
if (trimmed.startsWith("data: ")) {
|
|
344
|
-
const data = trimmed.slice(6);
|
|
345
|
-
if (data === "[DONE]") {
|
|
346
|
-
return;
|
|
347
|
-
}
|
|
348
|
-
try {
|
|
349
|
-
yield JSON.parse(data);
|
|
350
|
-
} catch {
|
|
351
|
-
// Skip invalid JSON chunks
|
|
352
|
-
}
|
|
353
|
-
}
|
|
354
|
-
}
|
|
355
|
-
}
|
|
356
|
-
} finally {
|
|
357
|
-
reader.releaseLock();
|
|
358
|
-
}
|
|
359
|
-
}
|
|
360
|
-
|
|
361
|
-
/**
|
|
362
|
-
* Async generator provider for FlowQuery LOAD operations.
|
|
363
|
-
*/
|
|
364
|
-
async *generate(prompt: string, options?: LlmOptions): AsyncGenerator<any, void, unknown> {
|
|
365
|
-
if (options?.stream) {
|
|
366
|
-
yield* this.stream(prompt, options);
|
|
367
|
-
} else {
|
|
368
|
-
const response = await this.complete(prompt, options);
|
|
369
|
-
yield response;
|
|
370
|
-
}
|
|
371
|
-
}
|
|
372
|
-
|
|
373
|
-
/**
|
|
374
|
-
* Extract just the text content from an LLM response.
|
|
375
|
-
* Convenience method for common use case.
|
|
376
|
-
*
|
|
377
|
-
* @param response - The LLM response object
|
|
378
|
-
* @returns The text content from the first choice
|
|
379
|
-
*/
|
|
380
|
-
static extractContent(response: LlmResponse): string {
|
|
381
|
-
return response.choices?.[0]?.message?.content || "";
|
|
382
|
-
}
|
|
383
|
-
}
|
|
384
|
-
|
|
385
|
-
/**
|
|
386
|
-
* Call the OpenAI-compatible API and return the full response.
|
|
387
|
-
* This function can be used standalone outside of FlowQuery.
|
|
388
|
-
*
|
|
389
|
-
* @param prompt - The user prompt to send to the LLM
|
|
390
|
-
* @param options - Optional configuration for the request
|
|
391
|
-
* @returns The full API response
|
|
392
|
-
*
|
|
393
|
-
* @example
|
|
394
|
-
* ```typescript
|
|
395
|
-
* import { llm } from './plugins/loaders/Llm';
|
|
396
|
-
*
|
|
397
|
-
* // Simple usage
|
|
398
|
-
* const response = await llm('What is the capital of France?');
|
|
399
|
-
* console.log(response.choices[0].message.content);
|
|
400
|
-
*
|
|
401
|
-
* // With options
|
|
402
|
-
* const response = await llm('Translate to Spanish: Hello', {
|
|
403
|
-
* model: 'gpt-4o',
|
|
404
|
-
* temperature: 0.3,
|
|
405
|
-
* systemPrompt: 'You are a professional translator.'
|
|
406
|
-
* });
|
|
407
|
-
* ```
|
|
408
|
-
*/
|
|
409
|
-
export async function llm(prompt: string, options?: LlmOptions): Promise<LlmResponse> {
|
|
410
|
-
return new Llm().complete(prompt, options);
|
|
411
|
-
}
|
|
412
|
-
|
|
413
|
-
/**
|
|
414
|
-
* Call the OpenAI-compatible API with streaming and yield each chunk.
|
|
415
|
-
* This function can be used standalone outside of FlowQuery.
|
|
416
|
-
*
|
|
417
|
-
* @param prompt - The user prompt to send to the LLM
|
|
418
|
-
* @param options - Optional configuration for the request
|
|
419
|
-
* @yields Parsed SSE data chunks from the stream
|
|
420
|
-
*
|
|
421
|
-
* @example
|
|
422
|
-
* ```typescript
|
|
423
|
-
* import { llmStream } from './plugins/loaders/Llm';
|
|
424
|
-
*
|
|
425
|
-
* for await (const chunk of llmStream('Tell me a story')) {
|
|
426
|
-
* if (chunk.choices?.[0]?.delta?.content) {
|
|
427
|
-
* process.stdout.write(chunk.choices[0].delta.content);
|
|
428
|
-
* }
|
|
429
|
-
* }
|
|
430
|
-
* ```
|
|
431
|
-
*/
|
|
432
|
-
export async function* llmStream(
|
|
433
|
-
prompt: string,
|
|
434
|
-
options?: LlmOptions
|
|
435
|
-
): AsyncGenerator<any, void, unknown> {
|
|
436
|
-
yield* new Llm().stream(prompt, options);
|
|
437
|
-
}
|
|
438
|
-
|
|
439
|
-
/**
|
|
440
|
-
* Extract just the text content from an LLM response.
|
|
441
|
-
* Convenience function for common use case.
|
|
442
|
-
*
|
|
443
|
-
* @param response - The LLM response object
|
|
444
|
-
* @returns The text content from the first choice
|
|
445
|
-
*/
|
|
446
|
-
export function extractContent(response: LlmResponse): string {
|
|
447
|
-
return Llm.extractContent(response);
|
|
448
|
-
}
|
|
449
|
-
|
|
450
|
-
export default Llm;
|
|
@@ -1,101 +0,0 @@
|
|
|
1
|
-
import { AsyncFunction, FunctionDef } from "flowquery/extensibility";
|
|
2
|
-
|
|
3
|
-
const usersData = [
|
|
4
|
-
{ id: 1, name: "Alice Johnson", email: "alice@example.com", age: 28 },
|
|
5
|
-
{ id: 2, name: "Bob Smith", email: "bob@example.com", age: 34 },
|
|
6
|
-
{ id: 3, name: "Charlie Brown", email: "charlie@example.com", age: 22 },
|
|
7
|
-
{ id: 4, name: "Diana Ross", email: "diana@example.com", age: 45 },
|
|
8
|
-
{ id: 5, name: "Eve Wilson", email: "eve@example.com", age: 31 },
|
|
9
|
-
];
|
|
10
|
-
|
|
11
|
-
const productsData = [
|
|
12
|
-
{ id: 101, name: "Laptop", price: 999.99, category: "Electronics", stock: 50 },
|
|
13
|
-
{ id: 102, name: "Headphones", price: 149.99, category: "Electronics", stock: 200 },
|
|
14
|
-
{ id: 103, name: "Coffee Mug", price: 12.99, category: "Kitchen", stock: 500 },
|
|
15
|
-
{ id: 104, name: "Notebook", price: 8.99, category: "Office", stock: 300 },
|
|
16
|
-
{ id: 105, name: "Backpack", price: 59.99, category: "Accessories", stock: 75 },
|
|
17
|
-
];
|
|
18
|
-
|
|
19
|
-
const ordersData = [
|
|
20
|
-
{ id: 1001, userId: 1, productId: 101, quantity: 1, total: 999.99, date: "2025-12-01" },
|
|
21
|
-
{ id: 1002, userId: 2, productId: 102, quantity: 2, total: 299.98, date: "2025-12-05" },
|
|
22
|
-
{ id: 1003, userId: 1, productId: 103, quantity: 4, total: 51.96, date: "2025-12-10" },
|
|
23
|
-
{ id: 1004, userId: 3, productId: 104, quantity: 10, total: 89.9, date: "2025-12-15" },
|
|
24
|
-
{ id: 1005, userId: 4, productId: 105, quantity: 1, total: 59.99, date: "2025-12-20" },
|
|
25
|
-
{ id: 1006, userId: 5, productId: 101, quantity: 1, total: 999.99, date: "2025-12-25" },
|
|
26
|
-
];
|
|
27
|
-
|
|
28
|
-
@FunctionDef({
|
|
29
|
-
description: "Returns mock user data",
|
|
30
|
-
category: "async",
|
|
31
|
-
parameters: [],
|
|
32
|
-
output: {
|
|
33
|
-
description: "User object",
|
|
34
|
-
type: "object",
|
|
35
|
-
properties: {
|
|
36
|
-
id: { description: "User ID", type: "number" },
|
|
37
|
-
name: { description: "User name", type: "string" },
|
|
38
|
-
email: { description: "User email", type: "string" },
|
|
39
|
-
age: { description: "User age", type: "number" },
|
|
40
|
-
},
|
|
41
|
-
},
|
|
42
|
-
examples: ["CALL users() YIELD id, name, email, age"],
|
|
43
|
-
})
|
|
44
|
-
export class Users extends AsyncFunction {
|
|
45
|
-
async *generate(): AsyncGenerator<any, void, unknown> {
|
|
46
|
-
for (const user of usersData) {
|
|
47
|
-
yield user;
|
|
48
|
-
}
|
|
49
|
-
}
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
@FunctionDef({
|
|
53
|
-
description: "Returns mock product data",
|
|
54
|
-
category: "async",
|
|
55
|
-
parameters: [],
|
|
56
|
-
output: {
|
|
57
|
-
description: "Product object",
|
|
58
|
-
type: "object",
|
|
59
|
-
properties: {
|
|
60
|
-
id: { description: "Product ID", type: "number" },
|
|
61
|
-
name: { description: "Product name", type: "string" },
|
|
62
|
-
price: { description: "Product price", type: "number" },
|
|
63
|
-
category: { description: "Product category", type: "string" },
|
|
64
|
-
stock: { description: "Stock quantity", type: "number" },
|
|
65
|
-
},
|
|
66
|
-
},
|
|
67
|
-
examples: ["CALL products() YIELD id, name, price, category, stock"],
|
|
68
|
-
})
|
|
69
|
-
export class Products extends AsyncFunction {
|
|
70
|
-
async *generate(): AsyncGenerator<any, void, unknown> {
|
|
71
|
-
for (const product of productsData) {
|
|
72
|
-
yield product;
|
|
73
|
-
}
|
|
74
|
-
}
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
@FunctionDef({
|
|
78
|
-
description: "Returns mock order data",
|
|
79
|
-
category: "async",
|
|
80
|
-
parameters: [],
|
|
81
|
-
output: {
|
|
82
|
-
description: "Order object",
|
|
83
|
-
type: "object",
|
|
84
|
-
properties: {
|
|
85
|
-
id: { description: "Order ID", type: "number" },
|
|
86
|
-
userId: { description: "User ID who placed the order", type: "number" },
|
|
87
|
-
productId: { description: "Product ID ordered", type: "number" },
|
|
88
|
-
quantity: { description: "Quantity ordered", type: "number" },
|
|
89
|
-
total: { description: "Order total", type: "number" },
|
|
90
|
-
date: { description: "Order date", type: "string" },
|
|
91
|
-
},
|
|
92
|
-
},
|
|
93
|
-
examples: ["CALL orders() YIELD id, userId, productId, quantity, total, date"],
|
|
94
|
-
})
|
|
95
|
-
export class Orders extends AsyncFunction {
|
|
96
|
-
async *generate(): AsyncGenerator<any, void, unknown> {
|
|
97
|
-
for (const order of ordersData) {
|
|
98
|
-
yield order;
|
|
99
|
-
}
|
|
100
|
-
}
|
|
101
|
-
}
|