flowquery 1.0.15 → 1.0.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/flowquery.min.js +1 -1
- package/dist/parsing/expressions/expression_map.d.ts +1 -0
- package/dist/parsing/expressions/expression_map.d.ts.map +1 -1
- package/dist/parsing/expressions/expression_map.js +3 -0
- package/dist/parsing/expressions/expression_map.js.map +1 -1
- package/dist/parsing/operations/call.d.ts.map +1 -1
- package/dist/parsing/operations/call.js +3 -1
- package/dist/parsing/operations/call.js.map +1 -1
- package/docs/flowquery.min.js +1 -1
- package/flowquery-vscode/flowQueryEngine/flowquery.min.js +1 -1
- package/misc/apps/RAG/package.json +1 -1
- package/misc/apps/RAG/src/plugins/loaders/CatFacts.ts +21 -26
- package/misc/apps/RAG/src/plugins/loaders/FetchJson.ts +24 -25
- package/misc/apps/RAG/src/plugins/loaders/Form.ts +163 -147
- package/misc/apps/RAG/src/plugins/loaders/Llm.ts +103 -90
- package/misc/apps/RAG/src/plugins/loaders/MockData.ts +74 -54
- package/misc/apps/RAG/src/plugins/loaders/Table.ts +104 -101
- package/misc/apps/RAG/src/plugins/loaders/Weather.ts +47 -36
- package/misc/apps/RAG/src/prompts/FlowQuerySystemPrompt.ts +77 -78
- package/package.json +1 -1
- package/src/parsing/expressions/expression_map.ts +3 -0
- package/src/parsing/operations/call.ts +3 -1
- package/tests/compute/runner.test.ts +6 -6
- package/tests/parsing/parser.test.ts +3 -6
- package/tests/tokenization/tokenizer.test.ts +17 -17
|
@@ -1,27 +1,24 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* OpenAI LLM Plugin: Call OpenAI-compatible APIs for chat completions.
|
|
3
|
-
*
|
|
3
|
+
*
|
|
4
4
|
* Usage in FlowQuery:
|
|
5
|
-
*
|
|
6
|
-
*
|
|
7
|
-
*
|
|
5
|
+
* CALL llm('What is the capital of France?') YIELD choices
|
|
6
|
+
*
|
|
8
7
|
* With custom options:
|
|
9
|
-
*
|
|
10
|
-
*
|
|
11
|
-
*
|
|
8
|
+
* CALL llm('Translate to French: Hello', { model: 'gpt-4o', temperature: 0.3 }) YIELD choices, usage
|
|
9
|
+
*
|
|
12
10
|
* This class can also be used standalone outside of FlowQuery:
|
|
13
11
|
* import { Llm } from './plugins/loaders/Llm';
|
|
14
12
|
* const llmInstance = new Llm();
|
|
15
13
|
* const response = await llmInstance.complete('What is 2+2?');
|
|
16
14
|
* console.log(response.choices[0].message.content);
|
|
17
15
|
*/
|
|
18
|
-
|
|
19
|
-
import { FunctionDef, AsyncFunction } from 'flowquery/extensibility';
|
|
16
|
+
import { AsyncFunction, FunctionDef } from "flowquery/extensibility";
|
|
20
17
|
|
|
21
18
|
// Default configuration - can be overridden via options
|
|
22
19
|
const DEFAULT_CONFIG = {
|
|
23
|
-
apiUrl:
|
|
24
|
-
model:
|
|
20
|
+
apiUrl: "https://api.openai.com/v1/chat/completions",
|
|
21
|
+
model: "gpt-4o-mini",
|
|
25
22
|
temperature: 0.7,
|
|
26
23
|
maxTokens: undefined as number | undefined,
|
|
27
24
|
};
|
|
@@ -43,7 +40,7 @@ export interface LlmOptions {
|
|
|
43
40
|
/** System prompt to set context for the conversation. */
|
|
44
41
|
systemPrompt?: string;
|
|
45
42
|
/** Additional messages to include in the conversation. */
|
|
46
|
-
messages?: Array<{ role:
|
|
43
|
+
messages?: Array<{ role: "system" | "user" | "assistant"; content: string }>;
|
|
47
44
|
/** Organization ID for OpenAI API. */
|
|
48
45
|
organizationId?: string;
|
|
49
46
|
/** Additional headers to include in the request. */
|
|
@@ -81,56 +78,65 @@ export interface LlmResponse {
|
|
|
81
78
|
* Llm class - calls OpenAI-compatible APIs for chat completions.
|
|
82
79
|
*/
|
|
83
80
|
@FunctionDef({
|
|
84
|
-
description:
|
|
85
|
-
|
|
81
|
+
description:
|
|
82
|
+
"Calls OpenAI-compatible chat completion APIs. Supports GPT models and any OpenAI-compatible endpoint.",
|
|
83
|
+
category: "async",
|
|
86
84
|
parameters: [
|
|
87
85
|
{
|
|
88
|
-
name:
|
|
89
|
-
description:
|
|
90
|
-
type:
|
|
86
|
+
name: "prompt",
|
|
87
|
+
description: "The user prompt to send to the LLM",
|
|
88
|
+
type: "string",
|
|
91
89
|
required: true,
|
|
92
|
-
example:
|
|
90
|
+
example: "What is the capital of France?",
|
|
93
91
|
},
|
|
94
92
|
{
|
|
95
|
-
name:
|
|
96
|
-
description:
|
|
97
|
-
type:
|
|
93
|
+
name: "options",
|
|
94
|
+
description: "Optional configuration for the LLM request",
|
|
95
|
+
type: "object",
|
|
98
96
|
required: false,
|
|
99
97
|
properties: {
|
|
100
|
-
apiKey: { description:
|
|
101
|
-
apiUrl: {
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
98
|
+
apiKey: { description: "OpenAI API key", type: "string" },
|
|
99
|
+
apiUrl: {
|
|
100
|
+
description: "API endpoint URL (defaults to OpenAI chat completions)",
|
|
101
|
+
type: "string",
|
|
102
|
+
},
|
|
103
|
+
model: { description: "Model to use (defaults to gpt-4o-mini)", type: "string" },
|
|
104
|
+
temperature: {
|
|
105
|
+
description: "Sampling temperature 0-2 (defaults to 0.7)",
|
|
106
|
+
type: "number",
|
|
107
|
+
},
|
|
108
|
+
maxTokens: { description: "Maximum tokens to generate", type: "number" },
|
|
109
|
+
systemPrompt: { description: "System prompt to set context", type: "string" },
|
|
110
|
+
messages: { description: "Additional conversation messages", type: "array" },
|
|
111
|
+
organizationId: { description: "OpenAI organization ID", type: "string" },
|
|
112
|
+
headers: { description: "Additional request headers", type: "object" },
|
|
113
|
+
stream: { description: "Enable streaming response", type: "boolean" },
|
|
114
|
+
additionalParams: { description: "Additional API parameters", type: "object" },
|
|
115
|
+
},
|
|
116
|
+
},
|
|
113
117
|
],
|
|
114
118
|
output: {
|
|
115
|
-
description:
|
|
116
|
-
type:
|
|
119
|
+
description: "OpenAI chat completion response",
|
|
120
|
+
type: "object",
|
|
117
121
|
properties: {
|
|
118
|
-
id: { description:
|
|
119
|
-
model: { description:
|
|
120
|
-
choices: {
|
|
121
|
-
description:
|
|
122
|
-
type:
|
|
123
|
-
example: [
|
|
122
|
+
id: { description: "Unique identifier for the completion", type: "string" },
|
|
123
|
+
model: { description: "Model used for completion", type: "string" },
|
|
124
|
+
choices: {
|
|
125
|
+
description: "Array of completion choices",
|
|
126
|
+
type: "array",
|
|
127
|
+
example: [
|
|
128
|
+
{ message: { role: "assistant", content: "Paris is the capital of France." } },
|
|
129
|
+
],
|
|
124
130
|
},
|
|
125
|
-
usage: { description:
|
|
126
|
-
}
|
|
131
|
+
usage: { description: "Token usage statistics", type: "object" },
|
|
132
|
+
},
|
|
127
133
|
},
|
|
128
134
|
examples: [
|
|
129
|
-
"
|
|
130
|
-
"
|
|
131
|
-
"
|
|
135
|
+
"CALL llm('What is 2+2?') YIELD choices",
|
|
136
|
+
"CALL llm('Translate to French: Hello', { model: 'gpt-4o', temperature: 0.3 }) YIELD choices, usage",
|
|
137
|
+
"CALL llm('Write a haiku', { systemPrompt: 'You are a poet' }) YIELD choices",
|
|
132
138
|
],
|
|
133
|
-
notes:
|
|
139
|
+
notes: "Requires API key configured in Settings or passed as apiKey option. Works with any OpenAI-compatible API by setting the apiUrl option.",
|
|
134
140
|
})
|
|
135
141
|
export class Llm extends AsyncFunction {
|
|
136
142
|
private readonly defaultOptions: Partial<LlmOptions>;
|
|
@@ -148,22 +154,22 @@ export class Llm extends AsyncFunction {
|
|
|
148
154
|
if (options?.apiKey) {
|
|
149
155
|
return options.apiKey;
|
|
150
156
|
}
|
|
151
|
-
|
|
157
|
+
|
|
152
158
|
// Check default options
|
|
153
159
|
if (this.defaultOptions.apiKey) {
|
|
154
160
|
return this.defaultOptions.apiKey;
|
|
155
161
|
}
|
|
156
|
-
|
|
162
|
+
|
|
157
163
|
// In browser, check localStorage
|
|
158
|
-
if (typeof window !==
|
|
159
|
-
const storedKey = localStorage.getItem(
|
|
164
|
+
if (typeof window !== "undefined" && typeof localStorage !== "undefined") {
|
|
165
|
+
const storedKey = localStorage.getItem("flowquery_openai_api_key");
|
|
160
166
|
if (storedKey) {
|
|
161
167
|
return storedKey;
|
|
162
168
|
}
|
|
163
169
|
}
|
|
164
|
-
|
|
170
|
+
|
|
165
171
|
throw new Error(
|
|
166
|
-
|
|
172
|
+
"OpenAI API key is required. Configure it in Settings or pass apiKey in options."
|
|
167
173
|
);
|
|
168
174
|
}
|
|
169
175
|
|
|
@@ -171,13 +177,13 @@ export class Llm extends AsyncFunction {
|
|
|
171
177
|
* Get stored configuration from localStorage (browser only).
|
|
172
178
|
*/
|
|
173
179
|
private getStoredConfig(): Partial<LlmOptions> {
|
|
174
|
-
if (typeof window ===
|
|
180
|
+
if (typeof window === "undefined" || typeof localStorage === "undefined") {
|
|
175
181
|
return {};
|
|
176
182
|
}
|
|
177
|
-
|
|
183
|
+
|
|
178
184
|
return {
|
|
179
|
-
organizationId: localStorage.getItem(
|
|
180
|
-
model: localStorage.getItem(
|
|
185
|
+
organizationId: localStorage.getItem("flowquery_openai_org_id") || undefined,
|
|
186
|
+
model: localStorage.getItem("flowquery_openai_model") || undefined,
|
|
181
187
|
};
|
|
182
188
|
}
|
|
183
189
|
|
|
@@ -189,7 +195,7 @@ export class Llm extends AsyncFunction {
|
|
|
189
195
|
|
|
190
196
|
// Add system prompt if provided
|
|
191
197
|
if (options?.systemPrompt) {
|
|
192
|
-
messages.push({ role:
|
|
198
|
+
messages.push({ role: "system", content: options.systemPrompt });
|
|
193
199
|
}
|
|
194
200
|
|
|
195
201
|
// Add any additional messages
|
|
@@ -198,17 +204,21 @@ export class Llm extends AsyncFunction {
|
|
|
198
204
|
}
|
|
199
205
|
|
|
200
206
|
// Add the user prompt
|
|
201
|
-
messages.push({ role:
|
|
207
|
+
messages.push({ role: "user", content: prompt });
|
|
202
208
|
|
|
203
209
|
const body: Record<string, any> = {
|
|
204
210
|
model: options?.model || this.defaultOptions.model || DEFAULT_CONFIG.model,
|
|
205
211
|
messages,
|
|
206
|
-
temperature:
|
|
212
|
+
temperature:
|
|
213
|
+
options?.temperature ??
|
|
214
|
+
this.defaultOptions.temperature ??
|
|
215
|
+
DEFAULT_CONFIG.temperature,
|
|
207
216
|
...(options?.additionalParams || {}),
|
|
208
217
|
};
|
|
209
218
|
|
|
210
219
|
if (options?.maxTokens || this.defaultOptions.maxTokens || DEFAULT_CONFIG.maxTokens) {
|
|
211
|
-
body.max_tokens =
|
|
220
|
+
body.max_tokens =
|
|
221
|
+
options?.maxTokens || this.defaultOptions.maxTokens || DEFAULT_CONFIG.maxTokens;
|
|
212
222
|
}
|
|
213
223
|
|
|
214
224
|
if (options?.stream) {
|
|
@@ -223,13 +233,13 @@ export class Llm extends AsyncFunction {
|
|
|
223
233
|
*/
|
|
224
234
|
private buildHeaders(apiKey: string, options?: LlmOptions): Record<string, string> {
|
|
225
235
|
const headers: Record<string, string> = {
|
|
226
|
-
|
|
227
|
-
|
|
236
|
+
"Content-Type": "application/json",
|
|
237
|
+
Authorization: `Bearer ${apiKey}`,
|
|
228
238
|
...(options?.headers || {}),
|
|
229
239
|
};
|
|
230
240
|
|
|
231
241
|
if (options?.organizationId) {
|
|
232
|
-
headers[
|
|
242
|
+
headers["OpenAI-Organization"] = options.organizationId;
|
|
233
243
|
}
|
|
234
244
|
|
|
235
245
|
return headers;
|
|
@@ -237,11 +247,11 @@ export class Llm extends AsyncFunction {
|
|
|
237
247
|
|
|
238
248
|
/**
|
|
239
249
|
* Call the OpenAI-compatible API and return the full response.
|
|
240
|
-
*
|
|
250
|
+
*
|
|
241
251
|
* @param prompt - The user prompt to send to the LLM
|
|
242
252
|
* @param options - Optional configuration for the request
|
|
243
253
|
* @returns The full API response
|
|
244
|
-
*
|
|
254
|
+
*
|
|
245
255
|
* @example
|
|
246
256
|
* ```typescript
|
|
247
257
|
* const llmInstance = new Llm();
|
|
@@ -253,14 +263,14 @@ export class Llm extends AsyncFunction {
|
|
|
253
263
|
// Merge stored config with provided options (options take precedence)
|
|
254
264
|
const storedConfig = this.getStoredConfig();
|
|
255
265
|
const mergedOptions = { ...this.defaultOptions, ...storedConfig, ...options };
|
|
256
|
-
|
|
266
|
+
|
|
257
267
|
const apiKey = this.getApiKey(mergedOptions);
|
|
258
268
|
const apiUrl = mergedOptions?.apiUrl || DEFAULT_CONFIG.apiUrl;
|
|
259
269
|
const headers = this.buildHeaders(apiKey, mergedOptions);
|
|
260
270
|
const body = this.buildRequestBody(prompt, mergedOptions);
|
|
261
271
|
|
|
262
272
|
const response = await fetch(apiUrl, {
|
|
263
|
-
method:
|
|
273
|
+
method: "POST",
|
|
264
274
|
headers,
|
|
265
275
|
body: JSON.stringify(body),
|
|
266
276
|
});
|
|
@@ -275,11 +285,11 @@ export class Llm extends AsyncFunction {
|
|
|
275
285
|
|
|
276
286
|
/**
|
|
277
287
|
* Call the OpenAI-compatible API with streaming and yield each chunk.
|
|
278
|
-
*
|
|
288
|
+
*
|
|
279
289
|
* @param prompt - The user prompt to send to the LLM
|
|
280
290
|
* @param options - Optional configuration for the request
|
|
281
291
|
* @yields Parsed SSE data chunks from the stream
|
|
282
|
-
*
|
|
292
|
+
*
|
|
283
293
|
* @example
|
|
284
294
|
* ```typescript
|
|
285
295
|
* const llmInstance = new Llm();
|
|
@@ -294,14 +304,14 @@ export class Llm extends AsyncFunction {
|
|
|
294
304
|
// Merge stored config with provided options (options take precedence)
|
|
295
305
|
const storedConfig = this.getStoredConfig();
|
|
296
306
|
const mergedOptions = { ...this.defaultOptions, ...storedConfig, ...options };
|
|
297
|
-
|
|
307
|
+
|
|
298
308
|
const apiKey = this.getApiKey(mergedOptions);
|
|
299
309
|
const apiUrl = mergedOptions?.apiUrl || DEFAULT_CONFIG.apiUrl;
|
|
300
310
|
const headers = this.buildHeaders(apiKey, mergedOptions);
|
|
301
311
|
const body = this.buildRequestBody(prompt, { ...mergedOptions, stream: true });
|
|
302
312
|
|
|
303
313
|
const response = await fetch(apiUrl, {
|
|
304
|
-
method:
|
|
314
|
+
method: "POST",
|
|
305
315
|
headers,
|
|
306
316
|
body: JSON.stringify(body),
|
|
307
317
|
});
|
|
@@ -312,12 +322,12 @@ export class Llm extends AsyncFunction {
|
|
|
312
322
|
}
|
|
313
323
|
|
|
314
324
|
if (!response.body) {
|
|
315
|
-
throw new Error(
|
|
325
|
+
throw new Error("Response body is null");
|
|
316
326
|
}
|
|
317
327
|
|
|
318
328
|
const reader = response.body.getReader();
|
|
319
329
|
const decoder = new TextDecoder();
|
|
320
|
-
let buffer =
|
|
330
|
+
let buffer = "";
|
|
321
331
|
|
|
322
332
|
try {
|
|
323
333
|
while (true) {
|
|
@@ -325,14 +335,14 @@ export class Llm extends AsyncFunction {
|
|
|
325
335
|
if (done) break;
|
|
326
336
|
|
|
327
337
|
buffer += decoder.decode(value, { stream: true });
|
|
328
|
-
const lines = buffer.split(
|
|
329
|
-
buffer = lines.pop() ||
|
|
338
|
+
const lines = buffer.split("\n");
|
|
339
|
+
buffer = lines.pop() || "";
|
|
330
340
|
|
|
331
341
|
for (const line of lines) {
|
|
332
342
|
const trimmed = line.trim();
|
|
333
|
-
if (trimmed.startsWith(
|
|
343
|
+
if (trimmed.startsWith("data: ")) {
|
|
334
344
|
const data = trimmed.slice(6);
|
|
335
|
-
if (data ===
|
|
345
|
+
if (data === "[DONE]") {
|
|
336
346
|
return;
|
|
337
347
|
}
|
|
338
348
|
try {
|
|
@@ -363,31 +373,31 @@ export class Llm extends AsyncFunction {
|
|
|
363
373
|
/**
|
|
364
374
|
* Extract just the text content from an LLM response.
|
|
365
375
|
* Convenience method for common use case.
|
|
366
|
-
*
|
|
376
|
+
*
|
|
367
377
|
* @param response - The LLM response object
|
|
368
378
|
* @returns The text content from the first choice
|
|
369
379
|
*/
|
|
370
380
|
static extractContent(response: LlmResponse): string {
|
|
371
|
-
return response.choices?.[0]?.message?.content ||
|
|
381
|
+
return response.choices?.[0]?.message?.content || "";
|
|
372
382
|
}
|
|
373
383
|
}
|
|
374
384
|
|
|
375
385
|
/**
|
|
376
386
|
* Call the OpenAI-compatible API and return the full response.
|
|
377
387
|
* This function can be used standalone outside of FlowQuery.
|
|
378
|
-
*
|
|
388
|
+
*
|
|
379
389
|
* @param prompt - The user prompt to send to the LLM
|
|
380
390
|
* @param options - Optional configuration for the request
|
|
381
391
|
* @returns The full API response
|
|
382
|
-
*
|
|
392
|
+
*
|
|
383
393
|
* @example
|
|
384
394
|
* ```typescript
|
|
385
395
|
* import { llm } from './plugins/loaders/Llm';
|
|
386
|
-
*
|
|
396
|
+
*
|
|
387
397
|
* // Simple usage
|
|
388
398
|
* const response = await llm('What is the capital of France?');
|
|
389
399
|
* console.log(response.choices[0].message.content);
|
|
390
|
-
*
|
|
400
|
+
*
|
|
391
401
|
* // With options
|
|
392
402
|
* const response = await llm('Translate to Spanish: Hello', {
|
|
393
403
|
* model: 'gpt-4o',
|
|
@@ -403,15 +413,15 @@ export async function llm(prompt: string, options?: LlmOptions): Promise<LlmResp
|
|
|
403
413
|
/**
|
|
404
414
|
* Call the OpenAI-compatible API with streaming and yield each chunk.
|
|
405
415
|
* This function can be used standalone outside of FlowQuery.
|
|
406
|
-
*
|
|
416
|
+
*
|
|
407
417
|
* @param prompt - The user prompt to send to the LLM
|
|
408
418
|
* @param options - Optional configuration for the request
|
|
409
419
|
* @yields Parsed SSE data chunks from the stream
|
|
410
|
-
*
|
|
420
|
+
*
|
|
411
421
|
* @example
|
|
412
422
|
* ```typescript
|
|
413
423
|
* import { llmStream } from './plugins/loaders/Llm';
|
|
414
|
-
*
|
|
424
|
+
*
|
|
415
425
|
* for await (const chunk of llmStream('Tell me a story')) {
|
|
416
426
|
* if (chunk.choices?.[0]?.delta?.content) {
|
|
417
427
|
* process.stdout.write(chunk.choices[0].delta.content);
|
|
@@ -419,14 +429,17 @@ export async function llm(prompt: string, options?: LlmOptions): Promise<LlmResp
|
|
|
419
429
|
* }
|
|
420
430
|
* ```
|
|
421
431
|
*/
|
|
422
|
-
export async function* llmStream(
|
|
432
|
+
export async function* llmStream(
|
|
433
|
+
prompt: string,
|
|
434
|
+
options?: LlmOptions
|
|
435
|
+
): AsyncGenerator<any, void, unknown> {
|
|
423
436
|
yield* new Llm().stream(prompt, options);
|
|
424
437
|
}
|
|
425
438
|
|
|
426
439
|
/**
|
|
427
440
|
* Extract just the text content from an LLM response.
|
|
428
441
|
* Convenience function for common use case.
|
|
429
|
-
*
|
|
442
|
+
*
|
|
430
443
|
* @param response - The LLM response object
|
|
431
444
|
* @returns The text content from the first choice
|
|
432
445
|
*/
|
|
@@ -1,43 +1,41 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Example plugin: Generate mock data for testing.
|
|
3
|
-
*
|
|
3
|
+
*
|
|
4
4
|
* Usage in FlowQuery:
|
|
5
|
-
*
|
|
6
|
-
* RETURN user.name, user.email
|
|
5
|
+
* CALL mockUsers(10) YIELD name, email
|
|
7
6
|
*/
|
|
8
|
-
|
|
9
|
-
import { FunctionDef, AsyncFunction } from 'flowquery/extensibility';
|
|
7
|
+
import { AsyncFunction, FunctionDef } from "flowquery/extensibility";
|
|
10
8
|
|
|
11
9
|
/**
|
|
12
10
|
* MockUsers class - generates mock user data for testing.
|
|
13
11
|
*/
|
|
14
12
|
@FunctionDef({
|
|
15
|
-
description:
|
|
16
|
-
category:
|
|
13
|
+
description: "Generates mock user data for testing purposes",
|
|
14
|
+
category: "async",
|
|
17
15
|
parameters: [
|
|
18
16
|
{
|
|
19
|
-
name:
|
|
20
|
-
description:
|
|
21
|
-
type:
|
|
17
|
+
name: "count",
|
|
18
|
+
description: "Number of mock users to generate",
|
|
19
|
+
type: "number",
|
|
22
20
|
required: false,
|
|
23
|
-
default: 5
|
|
24
|
-
}
|
|
21
|
+
default: 5,
|
|
22
|
+
},
|
|
25
23
|
],
|
|
26
24
|
output: {
|
|
27
|
-
description:
|
|
28
|
-
type:
|
|
25
|
+
description: "Mock user object",
|
|
26
|
+
type: "object",
|
|
29
27
|
properties: {
|
|
30
|
-
id: { description:
|
|
31
|
-
name: { description:
|
|
32
|
-
email: { description:
|
|
33
|
-
age: { description:
|
|
34
|
-
active: { description:
|
|
35
|
-
}
|
|
28
|
+
id: { description: "User ID", type: "number" },
|
|
29
|
+
name: { description: "Full name", type: "string" },
|
|
30
|
+
email: { description: "Email address", type: "string" },
|
|
31
|
+
age: { description: "Age in years", type: "number" },
|
|
32
|
+
active: { description: "Whether user is active", type: "boolean" },
|
|
33
|
+
},
|
|
36
34
|
},
|
|
37
35
|
examples: [
|
|
38
|
-
"
|
|
39
|
-
"
|
|
40
|
-
]
|
|
36
|
+
"CALL mockUsers(10) YIELD name, email",
|
|
37
|
+
"CALL mockUsers(20) YIELD name, email, active WHERE active = true",
|
|
38
|
+
],
|
|
41
39
|
})
|
|
42
40
|
export class MockUsers extends AsyncFunction {
|
|
43
41
|
private readonly firstNames: string[];
|
|
@@ -45,9 +43,31 @@ export class MockUsers extends AsyncFunction {
|
|
|
45
43
|
private readonly domains: string[];
|
|
46
44
|
|
|
47
45
|
constructor(
|
|
48
|
-
firstNames: string[] = [
|
|
49
|
-
|
|
50
|
-
|
|
46
|
+
firstNames: string[] = [
|
|
47
|
+
"Alice",
|
|
48
|
+
"Bob",
|
|
49
|
+
"Charlie",
|
|
50
|
+
"Diana",
|
|
51
|
+
"Eve",
|
|
52
|
+
"Frank",
|
|
53
|
+
"Grace",
|
|
54
|
+
"Henry",
|
|
55
|
+
"Ivy",
|
|
56
|
+
"Jack",
|
|
57
|
+
],
|
|
58
|
+
lastNames: string[] = [
|
|
59
|
+
"Smith",
|
|
60
|
+
"Johnson",
|
|
61
|
+
"Williams",
|
|
62
|
+
"Brown",
|
|
63
|
+
"Jones",
|
|
64
|
+
"Garcia",
|
|
65
|
+
"Miller",
|
|
66
|
+
"Davis",
|
|
67
|
+
"Rodriguez",
|
|
68
|
+
"Martinez",
|
|
69
|
+
],
|
|
70
|
+
domains: string[] = ["example.com", "test.org", "demo.net"]
|
|
51
71
|
) {
|
|
52
72
|
super();
|
|
53
73
|
this.firstNames = firstNames;
|
|
@@ -57,7 +77,7 @@ export class MockUsers extends AsyncFunction {
|
|
|
57
77
|
|
|
58
78
|
/**
|
|
59
79
|
* Generates mock user data.
|
|
60
|
-
*
|
|
80
|
+
*
|
|
61
81
|
* @param count - Number of mock users to generate
|
|
62
82
|
*/
|
|
63
83
|
async *generate(count: number = 5): AsyncGenerator<any, void, unknown> {
|
|
@@ -65,13 +85,13 @@ export class MockUsers extends AsyncFunction {
|
|
|
65
85
|
const firstName = this.firstNames[Math.floor(Math.random() * this.firstNames.length)];
|
|
66
86
|
const lastName = this.lastNames[Math.floor(Math.random() * this.lastNames.length)];
|
|
67
87
|
const domain = this.domains[Math.floor(Math.random() * this.domains.length)];
|
|
68
|
-
|
|
88
|
+
|
|
69
89
|
yield {
|
|
70
90
|
id: i + 1,
|
|
71
91
|
name: `${firstName} ${lastName}`,
|
|
72
92
|
email: `${firstName.toLowerCase()}.${lastName.toLowerCase()}@${domain}`,
|
|
73
93
|
age: Math.floor(Math.random() * 50) + 18,
|
|
74
|
-
active: Math.random() > 0.3
|
|
94
|
+
active: Math.random() > 0.3,
|
|
75
95
|
};
|
|
76
96
|
}
|
|
77
97
|
}
|
|
@@ -81,33 +101,33 @@ export class MockUsers extends AsyncFunction {
|
|
|
81
101
|
* MockProducts class - generates mock product data for testing.
|
|
82
102
|
*/
|
|
83
103
|
@FunctionDef({
|
|
84
|
-
description:
|
|
85
|
-
category:
|
|
104
|
+
description: "Generates mock product data for testing purposes",
|
|
105
|
+
category: "async",
|
|
86
106
|
parameters: [
|
|
87
107
|
{
|
|
88
|
-
name:
|
|
89
|
-
description:
|
|
90
|
-
type:
|
|
108
|
+
name: "count",
|
|
109
|
+
description: "Number of mock products to generate",
|
|
110
|
+
type: "number",
|
|
91
111
|
required: false,
|
|
92
|
-
default: 5
|
|
93
|
-
}
|
|
112
|
+
default: 5,
|
|
113
|
+
},
|
|
94
114
|
],
|
|
95
115
|
output: {
|
|
96
|
-
description:
|
|
97
|
-
type:
|
|
116
|
+
description: "Mock product object",
|
|
117
|
+
type: "object",
|
|
98
118
|
properties: {
|
|
99
|
-
id: { description:
|
|
100
|
-
name: { description:
|
|
101
|
-
category: { description:
|
|
102
|
-
price: { description:
|
|
103
|
-
inStock: { description:
|
|
104
|
-
rating: { description:
|
|
105
|
-
}
|
|
119
|
+
id: { description: "Product ID", type: "number" },
|
|
120
|
+
name: { description: "Product name", type: "string" },
|
|
121
|
+
category: { description: "Product category", type: "string" },
|
|
122
|
+
price: { description: "Price in dollars", type: "number" },
|
|
123
|
+
inStock: { description: "Whether product is in stock", type: "boolean" },
|
|
124
|
+
rating: { description: "Customer rating (0-5)", type: "number" },
|
|
125
|
+
},
|
|
106
126
|
},
|
|
107
127
|
examples: [
|
|
108
|
-
"
|
|
109
|
-
"
|
|
110
|
-
]
|
|
128
|
+
"CALL mockProducts(10) YIELD name, price",
|
|
129
|
+
"CALL mockProducts(50) YIELD name, price, category WHERE category = 'Electronics'",
|
|
130
|
+
],
|
|
111
131
|
})
|
|
112
132
|
export class MockProducts extends AsyncFunction {
|
|
113
133
|
private readonly categories: string[];
|
|
@@ -115,9 +135,9 @@ export class MockProducts extends AsyncFunction {
|
|
|
115
135
|
private readonly nouns: string[];
|
|
116
136
|
|
|
117
137
|
constructor(
|
|
118
|
-
categories: string[] = [
|
|
119
|
-
adjectives: string[] = [
|
|
120
|
-
nouns: string[] = [
|
|
138
|
+
categories: string[] = ["Electronics", "Clothing", "Books", "Home", "Sports"],
|
|
139
|
+
adjectives: string[] = ["Premium", "Basic", "Pro", "Ultra", "Classic"],
|
|
140
|
+
nouns: string[] = ["Widget", "Gadget", "Item", "Product", "Thing"]
|
|
121
141
|
) {
|
|
122
142
|
super();
|
|
123
143
|
this.categories = categories;
|
|
@@ -127,7 +147,7 @@ export class MockProducts extends AsyncFunction {
|
|
|
127
147
|
|
|
128
148
|
/**
|
|
129
149
|
* Generates mock product data.
|
|
130
|
-
*
|
|
150
|
+
*
|
|
131
151
|
* @param count - Number of mock products to generate
|
|
132
152
|
*/
|
|
133
153
|
async *generate(count: number = 5): AsyncGenerator<any, void, unknown> {
|
|
@@ -135,14 +155,14 @@ export class MockProducts extends AsyncFunction {
|
|
|
135
155
|
const adj = this.adjectives[Math.floor(Math.random() * this.adjectives.length)];
|
|
136
156
|
const noun = this.nouns[Math.floor(Math.random() * this.nouns.length)];
|
|
137
157
|
const category = this.categories[Math.floor(Math.random() * this.categories.length)];
|
|
138
|
-
|
|
158
|
+
|
|
139
159
|
yield {
|
|
140
160
|
id: i + 1,
|
|
141
161
|
name: `${adj} ${noun} ${i + 1}`,
|
|
142
162
|
category,
|
|
143
163
|
price: Math.round(Math.random() * 1000 * 100) / 100,
|
|
144
164
|
inStock: Math.random() > 0.2,
|
|
145
|
-
rating: Math.round(Math.random() * 50) / 10
|
|
165
|
+
rating: Math.round(Math.random() * 50) / 10,
|
|
146
166
|
};
|
|
147
167
|
}
|
|
148
168
|
}
|