flowquery 1.0.14 → 1.0.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.editorconfig +21 -0
- package/.husky/pre-commit +1 -0
- package/.prettierrc +22 -0
- package/dist/flowquery.min.js +1 -1
- package/dist/parsing/expressions/expression_map.d.ts +9 -0
- package/dist/parsing/expressions/expression_map.d.ts.map +1 -0
- package/dist/parsing/expressions/expression_map.js +24 -0
- package/dist/parsing/expressions/expression_map.js.map +1 -0
- package/dist/parsing/operations/call.d.ts +17 -0
- package/dist/parsing/operations/call.d.ts.map +1 -0
- package/dist/parsing/operations/call.js +105 -0
- package/dist/parsing/operations/call.js.map +1 -0
- package/dist/parsing/operations/load.d.ts +6 -6
- package/dist/parsing/operations/load.d.ts.map +1 -1
- package/dist/parsing/operations/load.js +8 -6
- package/dist/parsing/operations/load.js.map +1 -1
- package/dist/parsing/operations/operation.d.ts +1 -0
- package/dist/parsing/operations/operation.d.ts.map +1 -1
- package/dist/parsing/operations/operation.js +6 -5
- package/dist/parsing/operations/operation.js.map +1 -1
- package/dist/parsing/operations/projection.d.ts +1 -1
- package/dist/parsing/operations/projection.d.ts.map +1 -1
- package/dist/parsing/operations/projection.js.map +1 -1
- package/dist/parsing/parser.d.ts +1 -0
- package/dist/parsing/parser.d.ts.map +1 -1
- package/dist/parsing/parser.js +148 -99
- package/dist/parsing/parser.js.map +1 -1
- package/dist/parsing/token_to_node.d.ts +2 -2
- package/dist/parsing/token_to_node.d.ts.map +1 -1
- package/dist/parsing/token_to_node.js +12 -12
- package/dist/parsing/token_to_node.js.map +1 -1
- package/dist/tokenization/token.d.ts +5 -1
- package/dist/tokenization/token.d.ts.map +1 -1
- package/dist/tokenization/token.js +17 -5
- package/dist/tokenization/token.js.map +1 -1
- package/docs/flowquery.min.js +1 -1
- package/flowquery-vscode/flowQueryEngine/flowquery.min.js +1 -1
- package/misc/apps/RAG/package.json +1 -1
- package/misc/apps/RAG/src/plugins/loaders/CatFacts.ts +21 -26
- package/misc/apps/RAG/src/plugins/loaders/FetchJson.ts +65 -0
- package/misc/apps/RAG/src/plugins/loaders/Form.ts +163 -147
- package/misc/apps/RAG/src/plugins/loaders/Llm.ts +106 -92
- package/misc/apps/RAG/src/plugins/loaders/MockData.ts +80 -58
- package/misc/apps/RAG/src/plugins/loaders/Table.ts +106 -103
- package/misc/apps/RAG/src/plugins/loaders/Weather.ts +50 -38
- package/misc/apps/RAG/src/prompts/FlowQuerySystemPrompt.ts +77 -78
- package/package.json +12 -2
- package/src/parsing/expressions/expression_map.ts +22 -0
- package/src/parsing/operations/call.ts +69 -0
- package/src/parsing/operations/load.ts +123 -120
- package/src/parsing/operations/operation.ts +14 -13
- package/src/parsing/operations/projection.ts +3 -3
- package/src/parsing/parser.ts +303 -239
- package/src/parsing/token_to_node.ts +67 -50
- package/src/tokenization/token.ts +29 -14
- package/tests/compute/runner.test.ts +277 -165
- package/tests/parsing/parser.test.ts +352 -303
- package/tests/tokenization/tokenizer.test.ts +17 -17
- package/vscode-settings.json.recommended +16 -0
|
@@ -1,27 +1,24 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* OpenAI LLM Plugin: Call OpenAI-compatible APIs for chat completions.
|
|
3
|
-
*
|
|
3
|
+
*
|
|
4
4
|
* Usage in FlowQuery:
|
|
5
|
-
*
|
|
6
|
-
*
|
|
7
|
-
*
|
|
5
|
+
* CALL llm('What is the capital of France?') YIELD choices
|
|
6
|
+
*
|
|
8
7
|
* With custom options:
|
|
9
|
-
*
|
|
10
|
-
*
|
|
11
|
-
*
|
|
8
|
+
* CALL llm('Translate to French: Hello', { model: 'gpt-4o', temperature: 0.3 }) YIELD choices, usage
|
|
9
|
+
*
|
|
12
10
|
* This class can also be used standalone outside of FlowQuery:
|
|
13
11
|
* import { Llm } from './plugins/loaders/Llm';
|
|
14
12
|
* const llmInstance = new Llm();
|
|
15
13
|
* const response = await llmInstance.complete('What is 2+2?');
|
|
16
14
|
* console.log(response.choices[0].message.content);
|
|
17
15
|
*/
|
|
18
|
-
|
|
19
|
-
import { FunctionDef } from 'flowquery/extensibility';
|
|
16
|
+
import { AsyncFunction, FunctionDef } from "flowquery/extensibility";
|
|
20
17
|
|
|
21
18
|
// Default configuration - can be overridden via options
|
|
22
19
|
const DEFAULT_CONFIG = {
|
|
23
|
-
apiUrl:
|
|
24
|
-
model:
|
|
20
|
+
apiUrl: "https://api.openai.com/v1/chat/completions",
|
|
21
|
+
model: "gpt-4o-mini",
|
|
25
22
|
temperature: 0.7,
|
|
26
23
|
maxTokens: undefined as number | undefined,
|
|
27
24
|
};
|
|
@@ -43,7 +40,7 @@ export interface LlmOptions {
|
|
|
43
40
|
/** System prompt to set context for the conversation. */
|
|
44
41
|
systemPrompt?: string;
|
|
45
42
|
/** Additional messages to include in the conversation. */
|
|
46
|
-
messages?: Array<{ role:
|
|
43
|
+
messages?: Array<{ role: "system" | "user" | "assistant"; content: string }>;
|
|
47
44
|
/** Organization ID for OpenAI API. */
|
|
48
45
|
organizationId?: string;
|
|
49
46
|
/** Additional headers to include in the request. */
|
|
@@ -81,61 +78,71 @@ export interface LlmResponse {
|
|
|
81
78
|
* Llm class - calls OpenAI-compatible APIs for chat completions.
|
|
82
79
|
*/
|
|
83
80
|
@FunctionDef({
|
|
84
|
-
description:
|
|
85
|
-
|
|
81
|
+
description:
|
|
82
|
+
"Calls OpenAI-compatible chat completion APIs. Supports GPT models and any OpenAI-compatible endpoint.",
|
|
83
|
+
category: "async",
|
|
86
84
|
parameters: [
|
|
87
85
|
{
|
|
88
|
-
name:
|
|
89
|
-
description:
|
|
90
|
-
type:
|
|
86
|
+
name: "prompt",
|
|
87
|
+
description: "The user prompt to send to the LLM",
|
|
88
|
+
type: "string",
|
|
91
89
|
required: true,
|
|
92
|
-
example:
|
|
90
|
+
example: "What is the capital of France?",
|
|
93
91
|
},
|
|
94
92
|
{
|
|
95
|
-
name:
|
|
96
|
-
description:
|
|
97
|
-
type:
|
|
93
|
+
name: "options",
|
|
94
|
+
description: "Optional configuration for the LLM request",
|
|
95
|
+
type: "object",
|
|
98
96
|
required: false,
|
|
99
97
|
properties: {
|
|
100
|
-
apiKey: { description:
|
|
101
|
-
apiUrl: {
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
98
|
+
apiKey: { description: "OpenAI API key", type: "string" },
|
|
99
|
+
apiUrl: {
|
|
100
|
+
description: "API endpoint URL (defaults to OpenAI chat completions)",
|
|
101
|
+
type: "string",
|
|
102
|
+
},
|
|
103
|
+
model: { description: "Model to use (defaults to gpt-4o-mini)", type: "string" },
|
|
104
|
+
temperature: {
|
|
105
|
+
description: "Sampling temperature 0-2 (defaults to 0.7)",
|
|
106
|
+
type: "number",
|
|
107
|
+
},
|
|
108
|
+
maxTokens: { description: "Maximum tokens to generate", type: "number" },
|
|
109
|
+
systemPrompt: { description: "System prompt to set context", type: "string" },
|
|
110
|
+
messages: { description: "Additional conversation messages", type: "array" },
|
|
111
|
+
organizationId: { description: "OpenAI organization ID", type: "string" },
|
|
112
|
+
headers: { description: "Additional request headers", type: "object" },
|
|
113
|
+
stream: { description: "Enable streaming response", type: "boolean" },
|
|
114
|
+
additionalParams: { description: "Additional API parameters", type: "object" },
|
|
115
|
+
},
|
|
116
|
+
},
|
|
113
117
|
],
|
|
114
118
|
output: {
|
|
115
|
-
description:
|
|
116
|
-
type:
|
|
119
|
+
description: "OpenAI chat completion response",
|
|
120
|
+
type: "object",
|
|
117
121
|
properties: {
|
|
118
|
-
id: { description:
|
|
119
|
-
model: { description:
|
|
120
|
-
choices: {
|
|
121
|
-
description:
|
|
122
|
-
type:
|
|
123
|
-
example: [
|
|
122
|
+
id: { description: "Unique identifier for the completion", type: "string" },
|
|
123
|
+
model: { description: "Model used for completion", type: "string" },
|
|
124
|
+
choices: {
|
|
125
|
+
description: "Array of completion choices",
|
|
126
|
+
type: "array",
|
|
127
|
+
example: [
|
|
128
|
+
{ message: { role: "assistant", content: "Paris is the capital of France." } },
|
|
129
|
+
],
|
|
124
130
|
},
|
|
125
|
-
usage: { description:
|
|
126
|
-
}
|
|
131
|
+
usage: { description: "Token usage statistics", type: "object" },
|
|
132
|
+
},
|
|
127
133
|
},
|
|
128
134
|
examples: [
|
|
129
|
-
"
|
|
130
|
-
"
|
|
131
|
-
"
|
|
135
|
+
"CALL llm('What is 2+2?') YIELD choices",
|
|
136
|
+
"CALL llm('Translate to French: Hello', { model: 'gpt-4o', temperature: 0.3 }) YIELD choices, usage",
|
|
137
|
+
"CALL llm('Write a haiku', { systemPrompt: 'You are a poet' }) YIELD choices",
|
|
132
138
|
],
|
|
133
|
-
notes:
|
|
139
|
+
notes: "Requires API key configured in Settings or passed as apiKey option. Works with any OpenAI-compatible API by setting the apiUrl option.",
|
|
134
140
|
})
|
|
135
|
-
export class Llm {
|
|
141
|
+
export class Llm extends AsyncFunction {
|
|
136
142
|
private readonly defaultOptions: Partial<LlmOptions>;
|
|
137
143
|
|
|
138
144
|
constructor(defaultOptions: Partial<LlmOptions> = {}) {
|
|
145
|
+
super();
|
|
139
146
|
this.defaultOptions = defaultOptions;
|
|
140
147
|
}
|
|
141
148
|
|
|
@@ -147,22 +154,22 @@ export class Llm {
|
|
|
147
154
|
if (options?.apiKey) {
|
|
148
155
|
return options.apiKey;
|
|
149
156
|
}
|
|
150
|
-
|
|
157
|
+
|
|
151
158
|
// Check default options
|
|
152
159
|
if (this.defaultOptions.apiKey) {
|
|
153
160
|
return this.defaultOptions.apiKey;
|
|
154
161
|
}
|
|
155
|
-
|
|
162
|
+
|
|
156
163
|
// In browser, check localStorage
|
|
157
|
-
if (typeof window !==
|
|
158
|
-
const storedKey = localStorage.getItem(
|
|
164
|
+
if (typeof window !== "undefined" && typeof localStorage !== "undefined") {
|
|
165
|
+
const storedKey = localStorage.getItem("flowquery_openai_api_key");
|
|
159
166
|
if (storedKey) {
|
|
160
167
|
return storedKey;
|
|
161
168
|
}
|
|
162
169
|
}
|
|
163
|
-
|
|
170
|
+
|
|
164
171
|
throw new Error(
|
|
165
|
-
|
|
172
|
+
"OpenAI API key is required. Configure it in Settings or pass apiKey in options."
|
|
166
173
|
);
|
|
167
174
|
}
|
|
168
175
|
|
|
@@ -170,13 +177,13 @@ export class Llm {
|
|
|
170
177
|
* Get stored configuration from localStorage (browser only).
|
|
171
178
|
*/
|
|
172
179
|
private getStoredConfig(): Partial<LlmOptions> {
|
|
173
|
-
if (typeof window ===
|
|
180
|
+
if (typeof window === "undefined" || typeof localStorage === "undefined") {
|
|
174
181
|
return {};
|
|
175
182
|
}
|
|
176
|
-
|
|
183
|
+
|
|
177
184
|
return {
|
|
178
|
-
organizationId: localStorage.getItem(
|
|
179
|
-
model: localStorage.getItem(
|
|
185
|
+
organizationId: localStorage.getItem("flowquery_openai_org_id") || undefined,
|
|
186
|
+
model: localStorage.getItem("flowquery_openai_model") || undefined,
|
|
180
187
|
};
|
|
181
188
|
}
|
|
182
189
|
|
|
@@ -188,7 +195,7 @@ export class Llm {
|
|
|
188
195
|
|
|
189
196
|
// Add system prompt if provided
|
|
190
197
|
if (options?.systemPrompt) {
|
|
191
|
-
messages.push({ role:
|
|
198
|
+
messages.push({ role: "system", content: options.systemPrompt });
|
|
192
199
|
}
|
|
193
200
|
|
|
194
201
|
// Add any additional messages
|
|
@@ -197,17 +204,21 @@ export class Llm {
|
|
|
197
204
|
}
|
|
198
205
|
|
|
199
206
|
// Add the user prompt
|
|
200
|
-
messages.push({ role:
|
|
207
|
+
messages.push({ role: "user", content: prompt });
|
|
201
208
|
|
|
202
209
|
const body: Record<string, any> = {
|
|
203
210
|
model: options?.model || this.defaultOptions.model || DEFAULT_CONFIG.model,
|
|
204
211
|
messages,
|
|
205
|
-
temperature:
|
|
212
|
+
temperature:
|
|
213
|
+
options?.temperature ??
|
|
214
|
+
this.defaultOptions.temperature ??
|
|
215
|
+
DEFAULT_CONFIG.temperature,
|
|
206
216
|
...(options?.additionalParams || {}),
|
|
207
217
|
};
|
|
208
218
|
|
|
209
219
|
if (options?.maxTokens || this.defaultOptions.maxTokens || DEFAULT_CONFIG.maxTokens) {
|
|
210
|
-
body.max_tokens =
|
|
220
|
+
body.max_tokens =
|
|
221
|
+
options?.maxTokens || this.defaultOptions.maxTokens || DEFAULT_CONFIG.maxTokens;
|
|
211
222
|
}
|
|
212
223
|
|
|
213
224
|
if (options?.stream) {
|
|
@@ -222,13 +233,13 @@ export class Llm {
|
|
|
222
233
|
*/
|
|
223
234
|
private buildHeaders(apiKey: string, options?: LlmOptions): Record<string, string> {
|
|
224
235
|
const headers: Record<string, string> = {
|
|
225
|
-
|
|
226
|
-
|
|
236
|
+
"Content-Type": "application/json",
|
|
237
|
+
Authorization: `Bearer ${apiKey}`,
|
|
227
238
|
...(options?.headers || {}),
|
|
228
239
|
};
|
|
229
240
|
|
|
230
241
|
if (options?.organizationId) {
|
|
231
|
-
headers[
|
|
242
|
+
headers["OpenAI-Organization"] = options.organizationId;
|
|
232
243
|
}
|
|
233
244
|
|
|
234
245
|
return headers;
|
|
@@ -236,11 +247,11 @@ export class Llm {
|
|
|
236
247
|
|
|
237
248
|
/**
|
|
238
249
|
* Call the OpenAI-compatible API and return the full response.
|
|
239
|
-
*
|
|
250
|
+
*
|
|
240
251
|
* @param prompt - The user prompt to send to the LLM
|
|
241
252
|
* @param options - Optional configuration for the request
|
|
242
253
|
* @returns The full API response
|
|
243
|
-
*
|
|
254
|
+
*
|
|
244
255
|
* @example
|
|
245
256
|
* ```typescript
|
|
246
257
|
* const llmInstance = new Llm();
|
|
@@ -252,14 +263,14 @@ export class Llm {
|
|
|
252
263
|
// Merge stored config with provided options (options take precedence)
|
|
253
264
|
const storedConfig = this.getStoredConfig();
|
|
254
265
|
const mergedOptions = { ...this.defaultOptions, ...storedConfig, ...options };
|
|
255
|
-
|
|
266
|
+
|
|
256
267
|
const apiKey = this.getApiKey(mergedOptions);
|
|
257
268
|
const apiUrl = mergedOptions?.apiUrl || DEFAULT_CONFIG.apiUrl;
|
|
258
269
|
const headers = this.buildHeaders(apiKey, mergedOptions);
|
|
259
270
|
const body = this.buildRequestBody(prompt, mergedOptions);
|
|
260
271
|
|
|
261
272
|
const response = await fetch(apiUrl, {
|
|
262
|
-
method:
|
|
273
|
+
method: "POST",
|
|
263
274
|
headers,
|
|
264
275
|
body: JSON.stringify(body),
|
|
265
276
|
});
|
|
@@ -274,11 +285,11 @@ export class Llm {
|
|
|
274
285
|
|
|
275
286
|
/**
|
|
276
287
|
* Call the OpenAI-compatible API with streaming and yield each chunk.
|
|
277
|
-
*
|
|
288
|
+
*
|
|
278
289
|
* @param prompt - The user prompt to send to the LLM
|
|
279
290
|
* @param options - Optional configuration for the request
|
|
280
291
|
* @yields Parsed SSE data chunks from the stream
|
|
281
|
-
*
|
|
292
|
+
*
|
|
282
293
|
* @example
|
|
283
294
|
* ```typescript
|
|
284
295
|
* const llmInstance = new Llm();
|
|
@@ -293,14 +304,14 @@ export class Llm {
|
|
|
293
304
|
// Merge stored config with provided options (options take precedence)
|
|
294
305
|
const storedConfig = this.getStoredConfig();
|
|
295
306
|
const mergedOptions = { ...this.defaultOptions, ...storedConfig, ...options };
|
|
296
|
-
|
|
307
|
+
|
|
297
308
|
const apiKey = this.getApiKey(mergedOptions);
|
|
298
309
|
const apiUrl = mergedOptions?.apiUrl || DEFAULT_CONFIG.apiUrl;
|
|
299
310
|
const headers = this.buildHeaders(apiKey, mergedOptions);
|
|
300
311
|
const body = this.buildRequestBody(prompt, { ...mergedOptions, stream: true });
|
|
301
312
|
|
|
302
313
|
const response = await fetch(apiUrl, {
|
|
303
|
-
method:
|
|
314
|
+
method: "POST",
|
|
304
315
|
headers,
|
|
305
316
|
body: JSON.stringify(body),
|
|
306
317
|
});
|
|
@@ -311,12 +322,12 @@ export class Llm {
|
|
|
311
322
|
}
|
|
312
323
|
|
|
313
324
|
if (!response.body) {
|
|
314
|
-
throw new Error(
|
|
325
|
+
throw new Error("Response body is null");
|
|
315
326
|
}
|
|
316
327
|
|
|
317
328
|
const reader = response.body.getReader();
|
|
318
329
|
const decoder = new TextDecoder();
|
|
319
|
-
let buffer =
|
|
330
|
+
let buffer = "";
|
|
320
331
|
|
|
321
332
|
try {
|
|
322
333
|
while (true) {
|
|
@@ -324,14 +335,14 @@ export class Llm {
|
|
|
324
335
|
if (done) break;
|
|
325
336
|
|
|
326
337
|
buffer += decoder.decode(value, { stream: true });
|
|
327
|
-
const lines = buffer.split(
|
|
328
|
-
buffer = lines.pop() ||
|
|
338
|
+
const lines = buffer.split("\n");
|
|
339
|
+
buffer = lines.pop() || "";
|
|
329
340
|
|
|
330
341
|
for (const line of lines) {
|
|
331
342
|
const trimmed = line.trim();
|
|
332
|
-
if (trimmed.startsWith(
|
|
343
|
+
if (trimmed.startsWith("data: ")) {
|
|
333
344
|
const data = trimmed.slice(6);
|
|
334
|
-
if (data ===
|
|
345
|
+
if (data === "[DONE]") {
|
|
335
346
|
return;
|
|
336
347
|
}
|
|
337
348
|
try {
|
|
@@ -350,7 +361,7 @@ export class Llm {
|
|
|
350
361
|
/**
|
|
351
362
|
* Async generator provider for FlowQuery LOAD operations.
|
|
352
363
|
*/
|
|
353
|
-
async *
|
|
364
|
+
async *generate(prompt: string, options?: LlmOptions): AsyncGenerator<any, void, unknown> {
|
|
354
365
|
if (options?.stream) {
|
|
355
366
|
yield* this.stream(prompt, options);
|
|
356
367
|
} else {
|
|
@@ -362,31 +373,31 @@ export class Llm {
|
|
|
362
373
|
/**
|
|
363
374
|
* Extract just the text content from an LLM response.
|
|
364
375
|
* Convenience method for common use case.
|
|
365
|
-
*
|
|
376
|
+
*
|
|
366
377
|
* @param response - The LLM response object
|
|
367
378
|
* @returns The text content from the first choice
|
|
368
379
|
*/
|
|
369
380
|
static extractContent(response: LlmResponse): string {
|
|
370
|
-
return response.choices?.[0]?.message?.content ||
|
|
381
|
+
return response.choices?.[0]?.message?.content || "";
|
|
371
382
|
}
|
|
372
383
|
}
|
|
373
384
|
|
|
374
385
|
/**
|
|
375
386
|
* Call the OpenAI-compatible API and return the full response.
|
|
376
387
|
* This function can be used standalone outside of FlowQuery.
|
|
377
|
-
*
|
|
388
|
+
*
|
|
378
389
|
* @param prompt - The user prompt to send to the LLM
|
|
379
390
|
* @param options - Optional configuration for the request
|
|
380
391
|
* @returns The full API response
|
|
381
|
-
*
|
|
392
|
+
*
|
|
382
393
|
* @example
|
|
383
394
|
* ```typescript
|
|
384
395
|
* import { llm } from './plugins/loaders/Llm';
|
|
385
|
-
*
|
|
396
|
+
*
|
|
386
397
|
* // Simple usage
|
|
387
398
|
* const response = await llm('What is the capital of France?');
|
|
388
399
|
* console.log(response.choices[0].message.content);
|
|
389
|
-
*
|
|
400
|
+
*
|
|
390
401
|
* // With options
|
|
391
402
|
* const response = await llm('Translate to Spanish: Hello', {
|
|
392
403
|
* model: 'gpt-4o',
|
|
@@ -402,15 +413,15 @@ export async function llm(prompt: string, options?: LlmOptions): Promise<LlmResp
|
|
|
402
413
|
/**
|
|
403
414
|
* Call the OpenAI-compatible API with streaming and yield each chunk.
|
|
404
415
|
* This function can be used standalone outside of FlowQuery.
|
|
405
|
-
*
|
|
416
|
+
*
|
|
406
417
|
* @param prompt - The user prompt to send to the LLM
|
|
407
418
|
* @param options - Optional configuration for the request
|
|
408
419
|
* @yields Parsed SSE data chunks from the stream
|
|
409
|
-
*
|
|
420
|
+
*
|
|
410
421
|
* @example
|
|
411
422
|
* ```typescript
|
|
412
423
|
* import { llmStream } from './plugins/loaders/Llm';
|
|
413
|
-
*
|
|
424
|
+
*
|
|
414
425
|
* for await (const chunk of llmStream('Tell me a story')) {
|
|
415
426
|
* if (chunk.choices?.[0]?.delta?.content) {
|
|
416
427
|
* process.stdout.write(chunk.choices[0].delta.content);
|
|
@@ -418,14 +429,17 @@ export async function llm(prompt: string, options?: LlmOptions): Promise<LlmResp
|
|
|
418
429
|
* }
|
|
419
430
|
* ```
|
|
420
431
|
*/
|
|
421
|
-
export async function* llmStream(
|
|
432
|
+
export async function* llmStream(
|
|
433
|
+
prompt: string,
|
|
434
|
+
options?: LlmOptions
|
|
435
|
+
): AsyncGenerator<any, void, unknown> {
|
|
422
436
|
yield* new Llm().stream(prompt, options);
|
|
423
437
|
}
|
|
424
438
|
|
|
425
439
|
/**
|
|
426
440
|
* Extract just the text content from an LLM response.
|
|
427
441
|
* Convenience function for common use case.
|
|
428
|
-
*
|
|
442
|
+
*
|
|
429
443
|
* @param response - The LLM response object
|
|
430
444
|
* @returns The text content from the first choice
|
|
431
445
|
*/
|
|
@@ -1,54 +1,75 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Example plugin: Generate mock data for testing.
|
|
3
|
-
*
|
|
3
|
+
*
|
|
4
4
|
* Usage in FlowQuery:
|
|
5
|
-
*
|
|
6
|
-
* RETURN user.name, user.email
|
|
5
|
+
* CALL mockUsers(10) YIELD name, email
|
|
7
6
|
*/
|
|
8
|
-
|
|
9
|
-
import { FunctionDef } from 'flowquery/extensibility';
|
|
7
|
+
import { AsyncFunction, FunctionDef } from "flowquery/extensibility";
|
|
10
8
|
|
|
11
9
|
/**
|
|
12
10
|
* MockUsers class - generates mock user data for testing.
|
|
13
11
|
*/
|
|
14
12
|
@FunctionDef({
|
|
15
|
-
description:
|
|
16
|
-
category:
|
|
13
|
+
description: "Generates mock user data for testing purposes",
|
|
14
|
+
category: "async",
|
|
17
15
|
parameters: [
|
|
18
16
|
{
|
|
19
|
-
name:
|
|
20
|
-
description:
|
|
21
|
-
type:
|
|
17
|
+
name: "count",
|
|
18
|
+
description: "Number of mock users to generate",
|
|
19
|
+
type: "number",
|
|
22
20
|
required: false,
|
|
23
|
-
default: 5
|
|
24
|
-
}
|
|
21
|
+
default: 5,
|
|
22
|
+
},
|
|
25
23
|
],
|
|
26
24
|
output: {
|
|
27
|
-
description:
|
|
28
|
-
type:
|
|
25
|
+
description: "Mock user object",
|
|
26
|
+
type: "object",
|
|
29
27
|
properties: {
|
|
30
|
-
id: { description:
|
|
31
|
-
name: { description:
|
|
32
|
-
email: { description:
|
|
33
|
-
age: { description:
|
|
34
|
-
active: { description:
|
|
35
|
-
}
|
|
28
|
+
id: { description: "User ID", type: "number" },
|
|
29
|
+
name: { description: "Full name", type: "string" },
|
|
30
|
+
email: { description: "Email address", type: "string" },
|
|
31
|
+
age: { description: "Age in years", type: "number" },
|
|
32
|
+
active: { description: "Whether user is active", type: "boolean" },
|
|
33
|
+
},
|
|
36
34
|
},
|
|
37
35
|
examples: [
|
|
38
|
-
"
|
|
39
|
-
"
|
|
40
|
-
]
|
|
36
|
+
"CALL mockUsers(10) YIELD name, email",
|
|
37
|
+
"CALL mockUsers(20) YIELD name, email, active WHERE active = true",
|
|
38
|
+
],
|
|
41
39
|
})
|
|
42
|
-
export class MockUsers {
|
|
40
|
+
export class MockUsers extends AsyncFunction {
|
|
43
41
|
private readonly firstNames: string[];
|
|
44
42
|
private readonly lastNames: string[];
|
|
45
43
|
private readonly domains: string[];
|
|
46
44
|
|
|
47
45
|
constructor(
|
|
48
|
-
firstNames: string[] = [
|
|
49
|
-
|
|
50
|
-
|
|
46
|
+
firstNames: string[] = [
|
|
47
|
+
"Alice",
|
|
48
|
+
"Bob",
|
|
49
|
+
"Charlie",
|
|
50
|
+
"Diana",
|
|
51
|
+
"Eve",
|
|
52
|
+
"Frank",
|
|
53
|
+
"Grace",
|
|
54
|
+
"Henry",
|
|
55
|
+
"Ivy",
|
|
56
|
+
"Jack",
|
|
57
|
+
],
|
|
58
|
+
lastNames: string[] = [
|
|
59
|
+
"Smith",
|
|
60
|
+
"Johnson",
|
|
61
|
+
"Williams",
|
|
62
|
+
"Brown",
|
|
63
|
+
"Jones",
|
|
64
|
+
"Garcia",
|
|
65
|
+
"Miller",
|
|
66
|
+
"Davis",
|
|
67
|
+
"Rodriguez",
|
|
68
|
+
"Martinez",
|
|
69
|
+
],
|
|
70
|
+
domains: string[] = ["example.com", "test.org", "demo.net"]
|
|
51
71
|
) {
|
|
72
|
+
super();
|
|
52
73
|
this.firstNames = firstNames;
|
|
53
74
|
this.lastNames = lastNames;
|
|
54
75
|
this.domains = domains;
|
|
@@ -56,21 +77,21 @@ export class MockUsers {
|
|
|
56
77
|
|
|
57
78
|
/**
|
|
58
79
|
* Generates mock user data.
|
|
59
|
-
*
|
|
80
|
+
*
|
|
60
81
|
* @param count - Number of mock users to generate
|
|
61
82
|
*/
|
|
62
|
-
async *
|
|
83
|
+
async *generate(count: number = 5): AsyncGenerator<any, void, unknown> {
|
|
63
84
|
for (let i = 0; i < count; i++) {
|
|
64
85
|
const firstName = this.firstNames[Math.floor(Math.random() * this.firstNames.length)];
|
|
65
86
|
const lastName = this.lastNames[Math.floor(Math.random() * this.lastNames.length)];
|
|
66
87
|
const domain = this.domains[Math.floor(Math.random() * this.domains.length)];
|
|
67
|
-
|
|
88
|
+
|
|
68
89
|
yield {
|
|
69
90
|
id: i + 1,
|
|
70
91
|
name: `${firstName} ${lastName}`,
|
|
71
92
|
email: `${firstName.toLowerCase()}.${lastName.toLowerCase()}@${domain}`,
|
|
72
93
|
age: Math.floor(Math.random() * 50) + 18,
|
|
73
|
-
active: Math.random() > 0.3
|
|
94
|
+
active: Math.random() > 0.3,
|
|
74
95
|
};
|
|
75
96
|
}
|
|
76
97
|
}
|
|
@@ -80,44 +101,45 @@ export class MockUsers {
|
|
|
80
101
|
* MockProducts class - generates mock product data for testing.
|
|
81
102
|
*/
|
|
82
103
|
@FunctionDef({
|
|
83
|
-
description:
|
|
84
|
-
category:
|
|
104
|
+
description: "Generates mock product data for testing purposes",
|
|
105
|
+
category: "async",
|
|
85
106
|
parameters: [
|
|
86
107
|
{
|
|
87
|
-
name:
|
|
88
|
-
description:
|
|
89
|
-
type:
|
|
108
|
+
name: "count",
|
|
109
|
+
description: "Number of mock products to generate",
|
|
110
|
+
type: "number",
|
|
90
111
|
required: false,
|
|
91
|
-
default: 5
|
|
92
|
-
}
|
|
112
|
+
default: 5,
|
|
113
|
+
},
|
|
93
114
|
],
|
|
94
115
|
output: {
|
|
95
|
-
description:
|
|
96
|
-
type:
|
|
116
|
+
description: "Mock product object",
|
|
117
|
+
type: "object",
|
|
97
118
|
properties: {
|
|
98
|
-
id: { description:
|
|
99
|
-
name: { description:
|
|
100
|
-
category: { description:
|
|
101
|
-
price: { description:
|
|
102
|
-
inStock: { description:
|
|
103
|
-
rating: { description:
|
|
104
|
-
}
|
|
119
|
+
id: { description: "Product ID", type: "number" },
|
|
120
|
+
name: { description: "Product name", type: "string" },
|
|
121
|
+
category: { description: "Product category", type: "string" },
|
|
122
|
+
price: { description: "Price in dollars", type: "number" },
|
|
123
|
+
inStock: { description: "Whether product is in stock", type: "boolean" },
|
|
124
|
+
rating: { description: "Customer rating (0-5)", type: "number" },
|
|
125
|
+
},
|
|
105
126
|
},
|
|
106
127
|
examples: [
|
|
107
|
-
"
|
|
108
|
-
"
|
|
109
|
-
]
|
|
128
|
+
"CALL mockProducts(10) YIELD name, price",
|
|
129
|
+
"CALL mockProducts(50) YIELD name, price, category WHERE category = 'Electronics'",
|
|
130
|
+
],
|
|
110
131
|
})
|
|
111
|
-
export class MockProducts {
|
|
132
|
+
export class MockProducts extends AsyncFunction {
|
|
112
133
|
private readonly categories: string[];
|
|
113
134
|
private readonly adjectives: string[];
|
|
114
135
|
private readonly nouns: string[];
|
|
115
136
|
|
|
116
137
|
constructor(
|
|
117
|
-
categories: string[] = [
|
|
118
|
-
adjectives: string[] = [
|
|
119
|
-
nouns: string[] = [
|
|
138
|
+
categories: string[] = ["Electronics", "Clothing", "Books", "Home", "Sports"],
|
|
139
|
+
adjectives: string[] = ["Premium", "Basic", "Pro", "Ultra", "Classic"],
|
|
140
|
+
nouns: string[] = ["Widget", "Gadget", "Item", "Product", "Thing"]
|
|
120
141
|
) {
|
|
142
|
+
super();
|
|
121
143
|
this.categories = categories;
|
|
122
144
|
this.adjectives = adjectives;
|
|
123
145
|
this.nouns = nouns;
|
|
@@ -125,22 +147,22 @@ export class MockProducts {
|
|
|
125
147
|
|
|
126
148
|
/**
|
|
127
149
|
* Generates mock product data.
|
|
128
|
-
*
|
|
150
|
+
*
|
|
129
151
|
* @param count - Number of mock products to generate
|
|
130
152
|
*/
|
|
131
|
-
async *
|
|
153
|
+
async *generate(count: number = 5): AsyncGenerator<any, void, unknown> {
|
|
132
154
|
for (let i = 0; i < count; i++) {
|
|
133
155
|
const adj = this.adjectives[Math.floor(Math.random() * this.adjectives.length)];
|
|
134
156
|
const noun = this.nouns[Math.floor(Math.random() * this.nouns.length)];
|
|
135
157
|
const category = this.categories[Math.floor(Math.random() * this.categories.length)];
|
|
136
|
-
|
|
158
|
+
|
|
137
159
|
yield {
|
|
138
160
|
id: i + 1,
|
|
139
161
|
name: `${adj} ${noun} ${i + 1}`,
|
|
140
162
|
category,
|
|
141
163
|
price: Math.round(Math.random() * 1000 * 100) / 100,
|
|
142
164
|
inStock: Math.random() > 0.2,
|
|
143
|
-
rating: Math.round(Math.random() * 50) / 10
|
|
165
|
+
rating: Math.round(Math.random() * 50) / 10,
|
|
144
166
|
};
|
|
145
167
|
}
|
|
146
168
|
}
|