flowquery 1.0.5 → 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/README.md +182 -0
  2. package/dist/extensibility.d.ts +9 -0
  3. package/dist/extensibility.d.ts.map +1 -0
  4. package/dist/extensibility.js +25 -0
  5. package/dist/extensibility.js.map +1 -0
  6. package/dist/flowquery.min.js +1 -1
  7. package/dist/parsing/functions/avg.d.ts.map +1 -1
  8. package/dist/parsing/functions/avg.js +20 -2
  9. package/dist/parsing/functions/avg.js.map +1 -1
  10. package/dist/parsing/functions/collect.d.ts.map +1 -1
  11. package/dist/parsing/functions/collect.js +20 -2
  12. package/dist/parsing/functions/collect.js.map +1 -1
  13. package/dist/parsing/functions/extensibility/index.d.ts +37 -0
  14. package/dist/parsing/functions/extensibility/index.d.ts.map +1 -0
  15. package/dist/parsing/functions/extensibility/index.js +50 -0
  16. package/dist/parsing/functions/extensibility/index.js.map +1 -0
  17. package/dist/parsing/functions/function_factory.d.ts +23 -0
  18. package/dist/parsing/functions/function_factory.d.ts.map +1 -1
  19. package/dist/parsing/functions/function_factory.js +44 -47
  20. package/dist/parsing/functions/function_factory.js.map +1 -1
  21. package/dist/parsing/functions/function_metadata.d.ts +57 -6
  22. package/dist/parsing/functions/function_metadata.d.ts.map +1 -1
  23. package/dist/parsing/functions/function_metadata.js +103 -153
  24. package/dist/parsing/functions/function_metadata.js.map +1 -1
  25. package/dist/parsing/functions/functions.d.ts.map +1 -1
  26. package/dist/parsing/functions/functions.js +37 -2
  27. package/dist/parsing/functions/functions.js.map +1 -1
  28. package/dist/parsing/functions/join.d.ts.map +1 -1
  29. package/dist/parsing/functions/join.js +21 -2
  30. package/dist/parsing/functions/join.js.map +1 -1
  31. package/dist/parsing/functions/predicate_function.d.ts +1 -0
  32. package/dist/parsing/functions/predicate_function.d.ts.map +1 -1
  33. package/dist/parsing/functions/predicate_function.js +3 -0
  34. package/dist/parsing/functions/predicate_function.js.map +1 -1
  35. package/dist/parsing/functions/predicate_sum.d.ts.map +1 -1
  36. package/dist/parsing/functions/predicate_sum.js +23 -2
  37. package/dist/parsing/functions/predicate_sum.js.map +1 -1
  38. package/dist/parsing/functions/rand.d.ts.map +1 -1
  39. package/dist/parsing/functions/rand.js +18 -2
  40. package/dist/parsing/functions/rand.js.map +1 -1
  41. package/dist/parsing/functions/range.d.ts.map +1 -1
  42. package/dist/parsing/functions/range.js +21 -2
  43. package/dist/parsing/functions/range.js.map +1 -1
  44. package/dist/parsing/functions/replace.d.ts.map +1 -1
  45. package/dist/parsing/functions/replace.js +22 -2
  46. package/dist/parsing/functions/replace.js.map +1 -1
  47. package/dist/parsing/functions/round.d.ts.map +1 -1
  48. package/dist/parsing/functions/round.js +20 -2
  49. package/dist/parsing/functions/round.js.map +1 -1
  50. package/dist/parsing/functions/size.d.ts.map +1 -1
  51. package/dist/parsing/functions/size.js +20 -2
  52. package/dist/parsing/functions/size.js.map +1 -1
  53. package/dist/parsing/functions/split.d.ts.map +1 -1
  54. package/dist/parsing/functions/split.js +21 -2
  55. package/dist/parsing/functions/split.js.map +1 -1
  56. package/dist/parsing/functions/stringify.d.ts.map +1 -1
  57. package/dist/parsing/functions/stringify.js +20 -2
  58. package/dist/parsing/functions/stringify.js.map +1 -1
  59. package/dist/parsing/functions/sum.d.ts.map +1 -1
  60. package/dist/parsing/functions/sum.js +20 -2
  61. package/dist/parsing/functions/sum.js.map +1 -1
  62. package/dist/parsing/functions/to_json.d.ts.map +1 -1
  63. package/dist/parsing/functions/to_json.js +20 -2
  64. package/dist/parsing/functions/to_json.js.map +1 -1
  65. package/dist/parsing/parser.d.ts.map +1 -1
  66. package/dist/parsing/parser.js +1 -2
  67. package/dist/parsing/parser.js.map +1 -1
  68. package/docs/flowquery.min.js +1 -1
  69. package/flowquery-vscode/flowQueryEngine/flowquery.min.js +1 -1
  70. package/misc/apps/RAG/.env.example +14 -0
  71. package/misc/apps/RAG/README.md +0 -7
  72. package/misc/apps/RAG/package.json +16 -7
  73. package/misc/apps/RAG/public/index.html +18 -0
  74. package/misc/apps/RAG/src/App.css +42 -0
  75. package/misc/apps/RAG/src/App.tsx +50 -0
  76. package/misc/apps/RAG/src/components/ApiKeySettings.tsx +245 -0
  77. package/misc/apps/RAG/src/components/ChatContainer.css +67 -0
  78. package/misc/apps/RAG/src/components/ChatContainer.tsx +239 -0
  79. package/misc/apps/RAG/src/components/ChatInput.css +23 -0
  80. package/misc/apps/RAG/src/components/ChatInput.tsx +62 -0
  81. package/misc/apps/RAG/src/components/ChatMessage.css +136 -0
  82. package/misc/apps/RAG/src/components/ChatMessage.tsx +152 -0
  83. package/misc/apps/RAG/src/components/FlowQueryAgent.ts +390 -0
  84. package/misc/apps/RAG/src/components/FlowQueryRunner.css +104 -0
  85. package/misc/apps/RAG/src/components/FlowQueryRunner.tsx +332 -0
  86. package/misc/apps/RAG/src/components/index.ts +15 -0
  87. package/misc/apps/RAG/src/index.tsx +17 -0
  88. package/misc/apps/RAG/src/plugins/PluginRegistry.ts +136 -0
  89. package/misc/apps/RAG/src/plugins/README.md +139 -0
  90. package/misc/apps/RAG/src/plugins/index.ts +72 -0
  91. package/misc/apps/RAG/src/plugins/loaders/CatFacts.ts +79 -0
  92. package/misc/apps/RAG/src/plugins/loaders/FetchJson.ts +71 -0
  93. package/misc/apps/RAG/src/plugins/loaders/Llm.ts +441 -0
  94. package/misc/apps/RAG/src/plugins/loaders/MockData.ts +161 -0
  95. package/misc/apps/RAG/src/plugins/types.ts +52 -0
  96. package/misc/apps/RAG/src/prompts/FlowQuerySystemPrompt.ts +385 -0
  97. package/misc/apps/RAG/src/prompts/index.ts +10 -0
  98. package/misc/apps/RAG/src/utils/FlowQueryExecutor.ts +131 -0
  99. package/misc/apps/RAG/src/utils/FlowQueryExtractor.ts +203 -0
  100. package/misc/apps/RAG/src/utils/index.ts +9 -0
  101. package/misc/apps/RAG/tsconfig.json +4 -2
  102. package/misc/apps/RAG/webpack.config.js +23 -12
  103. package/package.json +7 -1
  104. package/src/extensibility.ts +9 -0
  105. package/src/parsing/functions/avg.ts +10 -0
  106. package/src/parsing/functions/collect.ts +10 -0
  107. package/src/parsing/functions/extensibility/index.ts +54 -0
  108. package/src/parsing/functions/function_factory.ts +51 -48
  109. package/src/parsing/functions/function_metadata.ts +132 -156
  110. package/src/parsing/functions/functions.ts +27 -0
  111. package/src/parsing/functions/join.ts +11 -0
  112. package/src/parsing/functions/predicate_function.ts +4 -0
  113. package/src/parsing/functions/predicate_sum.ts +13 -0
  114. package/src/parsing/functions/rand.ts +8 -0
  115. package/src/parsing/functions/range.ts +11 -0
  116. package/src/parsing/functions/replace.ts +12 -0
  117. package/src/parsing/functions/round.ts +10 -0
  118. package/src/parsing/functions/size.ts +10 -0
  119. package/src/parsing/functions/split.ts +11 -0
  120. package/src/parsing/functions/stringify.ts +10 -0
  121. package/src/parsing/functions/sum.ts +10 -0
  122. package/src/parsing/functions/to_json.ts +10 -0
  123. package/src/parsing/parser.ts +1 -2
  124. package/tests/parsing/function_plugins.test.ts +11 -11
  125. package/tsconfig.json +1 -0
  126. package/dist/parsing/functions/predicate_function_factory.d.ts +0 -6
  127. package/dist/parsing/functions/predicate_function_factory.d.ts.map +0 -1
  128. package/dist/parsing/functions/predicate_function_factory.js +0 -19
  129. package/dist/parsing/functions/predicate_function_factory.js.map +0 -1
  130. package/misc/apps/RAG/src/index.ts +0 -20
  131. package/src/parsing/functions/predicate_function_factory.ts +0 -15
@@ -0,0 +1,441 @@
1
+ /**
2
+ * OpenAI LLM Plugin: Call OpenAI-compatible APIs for chat completions.
3
+ *
4
+ * Usage in FlowQuery:
5
+ * LOAD JSON FROM llm('What is the capital of France?') AS response
6
+ * RETURN response.choices[0].message.content
7
+ *
8
+ * With custom options:
9
+ * LOAD JSON FROM llm('Translate to French: Hello', { model: 'gpt-4o', temperature: 0.3 }) AS response
10
+ * RETURN response.choices[0].message.content
11
+ *
12
+ * This loader can also be used standalone outside of FlowQuery:
13
+ * import { LlmLoader } from './plugins/loaders/Llm';
14
+ * const loader = new LlmLoader();
15
+ * const response = await loader.complete('What is 2+2?');
16
+ * console.log(response.choices[0].message.content);
17
+ */
18
+
19
+ import { AsyncLoaderPlugin } from '../types';
20
+
21
+ // Default configuration - can be overridden via options
22
+ const DEFAULT_CONFIG = {
23
+ apiUrl: 'https://api.openai.com/v1/chat/completions',
24
+ model: 'gpt-4o-mini',
25
+ temperature: 0.7,
26
+ maxTokens: undefined as number | undefined,
27
+ };
28
+
29
+ /**
30
+ * Options for LLM requests.
31
+ */
32
+ export interface LlmOptions {
33
+ /** OpenAI API key. Configure in Settings or pass as option. */
34
+ apiKey?: string;
35
+ /** API endpoint URL. Defaults to OpenAI's chat completions endpoint. */
36
+ apiUrl?: string;
37
+ /** Model to use. Defaults to 'gpt-4o-mini'. */
38
+ model?: string;
39
+ /** Sampling temperature (0-2). Defaults to 0.7. */
40
+ temperature?: number;
41
+ /** Maximum tokens to generate. */
42
+ maxTokens?: number;
43
+ /** System prompt to set context for the conversation. */
44
+ systemPrompt?: string;
45
+ /** Additional messages to include in the conversation. */
46
+ messages?: Array<{ role: 'system' | 'user' | 'assistant'; content: string }>;
47
+ /** Organization ID for OpenAI API. */
48
+ organizationId?: string;
49
+ /** Additional headers to include in the request. */
50
+ headers?: Record<string, string>;
51
+ /** Enable streaming response. */
52
+ stream?: boolean;
53
+ /** Additional body parameters to pass to the API. */
54
+ additionalParams?: Record<string, any>;
55
+ }
56
+
57
+ /**
58
+ * OpenAI-compatible chat completion response.
59
+ */
60
+ export interface LlmResponse {
61
+ id: string;
62
+ object: string;
63
+ created: number;
64
+ model: string;
65
+ choices: Array<{
66
+ index: number;
67
+ message: {
68
+ role: string;
69
+ content: string;
70
+ };
71
+ finish_reason: string;
72
+ }>;
73
+ usage?: {
74
+ prompt_tokens: number;
75
+ completion_tokens: number;
76
+ total_tokens: number;
77
+ };
78
+ }
79
+
80
+ /**
81
+ * LLM Loader class - calls OpenAI-compatible APIs for chat completions.
82
+ */
83
+ export class LlmLoader {
84
+ private readonly defaultOptions: Partial<LlmOptions>;
85
+
86
+ constructor(defaultOptions: Partial<LlmOptions> = {}) {
87
+ this.defaultOptions = defaultOptions;
88
+ }
89
+
90
+ /**
91
+ * Get API key from options or localStorage (browser).
92
+ */
93
+ private getApiKey(options?: LlmOptions): string {
94
+ // First check options
95
+ if (options?.apiKey) {
96
+ return options.apiKey;
97
+ }
98
+
99
+ // Check default options
100
+ if (this.defaultOptions.apiKey) {
101
+ return this.defaultOptions.apiKey;
102
+ }
103
+
104
+ // In browser, check localStorage
105
+ if (typeof window !== 'undefined' && typeof localStorage !== 'undefined') {
106
+ const storedKey = localStorage.getItem('flowquery_openai_api_key');
107
+ if (storedKey) {
108
+ return storedKey;
109
+ }
110
+ }
111
+
112
+ throw new Error(
113
+ 'OpenAI API key is required. Configure it in Settings or pass apiKey in options.'
114
+ );
115
+ }
116
+
117
+ /**
118
+ * Get stored configuration from localStorage (browser only).
119
+ */
120
+ private getStoredConfig(): Partial<LlmOptions> {
121
+ if (typeof window === 'undefined' || typeof localStorage === 'undefined') {
122
+ return {};
123
+ }
124
+
125
+ return {
126
+ organizationId: localStorage.getItem('flowquery_openai_org_id') || undefined,
127
+ model: localStorage.getItem('flowquery_openai_model') || undefined,
128
+ };
129
+ }
130
+
131
+ /**
132
+ * Build the request body for the API call.
133
+ */
134
+ private buildRequestBody(prompt: string, options?: LlmOptions): Record<string, any> {
135
+ const messages: Array<{ role: string; content: string }> = [];
136
+
137
+ // Add system prompt if provided
138
+ if (options?.systemPrompt) {
139
+ messages.push({ role: 'system', content: options.systemPrompt });
140
+ }
141
+
142
+ // Add any additional messages
143
+ if (options?.messages) {
144
+ messages.push(...options.messages);
145
+ }
146
+
147
+ // Add the user prompt
148
+ messages.push({ role: 'user', content: prompt });
149
+
150
+ const body: Record<string, any> = {
151
+ model: options?.model || this.defaultOptions.model || DEFAULT_CONFIG.model,
152
+ messages,
153
+ temperature: options?.temperature ?? this.defaultOptions.temperature ?? DEFAULT_CONFIG.temperature,
154
+ ...(options?.additionalParams || {}),
155
+ };
156
+
157
+ if (options?.maxTokens || this.defaultOptions.maxTokens || DEFAULT_CONFIG.maxTokens) {
158
+ body.max_tokens = options?.maxTokens || this.defaultOptions.maxTokens || DEFAULT_CONFIG.maxTokens;
159
+ }
160
+
161
+ if (options?.stream) {
162
+ body.stream = true;
163
+ }
164
+
165
+ return body;
166
+ }
167
+
168
+ /**
169
+ * Build request headers.
170
+ */
171
+ private buildHeaders(apiKey: string, options?: LlmOptions): Record<string, string> {
172
+ const headers: Record<string, string> = {
173
+ 'Content-Type': 'application/json',
174
+ 'Authorization': `Bearer ${apiKey}`,
175
+ ...(options?.headers || {}),
176
+ };
177
+
178
+ if (options?.organizationId) {
179
+ headers['OpenAI-Organization'] = options.organizationId;
180
+ }
181
+
182
+ return headers;
183
+ }
184
+
185
+ /**
186
+ * Call the OpenAI-compatible API and return the full response.
187
+ *
188
+ * @param prompt - The user prompt to send to the LLM
189
+ * @param options - Optional configuration for the request
190
+ * @returns The full API response
191
+ *
192
+ * @example
193
+ * ```typescript
194
+ * const loader = new LlmLoader();
195
+ * const response = await loader.complete('What is the capital of France?');
196
+ * console.log(response.choices[0].message.content);
197
+ * ```
198
+ */
199
+ async complete(prompt: string, options?: LlmOptions): Promise<LlmResponse> {
200
+ // Merge stored config with provided options (options take precedence)
201
+ const storedConfig = this.getStoredConfig();
202
+ const mergedOptions = { ...this.defaultOptions, ...storedConfig, ...options };
203
+
204
+ const apiKey = this.getApiKey(mergedOptions);
205
+ const apiUrl = mergedOptions?.apiUrl || DEFAULT_CONFIG.apiUrl;
206
+ const headers = this.buildHeaders(apiKey, mergedOptions);
207
+ const body = this.buildRequestBody(prompt, mergedOptions);
208
+
209
+ const response = await fetch(apiUrl, {
210
+ method: 'POST',
211
+ headers,
212
+ body: JSON.stringify(body),
213
+ });
214
+
215
+ if (!response.ok) {
216
+ const errorText = await response.text();
217
+ throw new Error(`LLM API error (${response.status}): ${errorText}`);
218
+ }
219
+
220
+ return response.json();
221
+ }
222
+
223
+ /**
224
+ * Call the OpenAI-compatible API with streaming and yield each chunk.
225
+ *
226
+ * @param prompt - The user prompt to send to the LLM
227
+ * @param options - Optional configuration for the request
228
+ * @yields Parsed SSE data chunks from the stream
229
+ *
230
+ * @example
231
+ * ```typescript
232
+ * const loader = new LlmLoader();
233
+ * for await (const chunk of loader.stream('Tell me a story')) {
234
+ * if (chunk.choices?.[0]?.delta?.content) {
235
+ * process.stdout.write(chunk.choices[0].delta.content);
236
+ * }
237
+ * }
238
+ * ```
239
+ */
240
+ async *stream(prompt: string, options?: LlmOptions): AsyncGenerator<any, void, unknown> {
241
+ // Merge stored config with provided options (options take precedence)
242
+ const storedConfig = this.getStoredConfig();
243
+ const mergedOptions = { ...this.defaultOptions, ...storedConfig, ...options };
244
+
245
+ const apiKey = this.getApiKey(mergedOptions);
246
+ const apiUrl = mergedOptions?.apiUrl || DEFAULT_CONFIG.apiUrl;
247
+ const headers = this.buildHeaders(apiKey, mergedOptions);
248
+ const body = this.buildRequestBody(prompt, { ...mergedOptions, stream: true });
249
+
250
+ const response = await fetch(apiUrl, {
251
+ method: 'POST',
252
+ headers,
253
+ body: JSON.stringify(body),
254
+ });
255
+
256
+ if (!response.ok) {
257
+ const errorText = await response.text();
258
+ throw new Error(`LLM API error (${response.status}): ${errorText}`);
259
+ }
260
+
261
+ if (!response.body) {
262
+ throw new Error('Response body is null');
263
+ }
264
+
265
+ const reader = response.body.getReader();
266
+ const decoder = new TextDecoder();
267
+ let buffer = '';
268
+
269
+ try {
270
+ while (true) {
271
+ const { done, value } = await reader.read();
272
+ if (done) break;
273
+
274
+ buffer += decoder.decode(value, { stream: true });
275
+ const lines = buffer.split('\n');
276
+ buffer = lines.pop() || '';
277
+
278
+ for (const line of lines) {
279
+ const trimmed = line.trim();
280
+ if (trimmed.startsWith('data: ')) {
281
+ const data = trimmed.slice(6);
282
+ if (data === '[DONE]') {
283
+ return;
284
+ }
285
+ try {
286
+ yield JSON.parse(data);
287
+ } catch {
288
+ // Skip invalid JSON chunks
289
+ }
290
+ }
291
+ }
292
+ }
293
+ } finally {
294
+ reader.releaseLock();
295
+ }
296
+ }
297
+
298
+ /**
299
+ * Async generator provider for FlowQuery LOAD operations.
300
+ */
301
+ async *fetch(prompt: string, options?: LlmOptions): AsyncGenerator<any, void, unknown> {
302
+ if (options?.stream) {
303
+ yield* this.stream(prompt, options);
304
+ } else {
305
+ const response = await this.complete(prompt, options);
306
+ yield response;
307
+ }
308
+ }
309
+
310
+ /**
311
+ * Extract just the text content from an LLM response.
312
+ * Convenience method for common use case.
313
+ *
314
+ * @param response - The LLM response object
315
+ * @returns The text content from the first choice
316
+ */
317
+ static extractContent(response: LlmResponse): string {
318
+ return response.choices?.[0]?.message?.content || '';
319
+ }
320
+ }
321
+
322
+ /**
323
+ * Call the OpenAI-compatible API and return the full response.
324
+ * This function can be used standalone outside of FlowQuery.
325
+ *
326
+ * @param prompt - The user prompt to send to the LLM
327
+ * @param options - Optional configuration for the request
328
+ * @returns The full API response
329
+ *
330
+ * @example
331
+ * ```typescript
332
+ * import { llm } from './plugins/loaders/Llm';
333
+ *
334
+ * // Simple usage
335
+ * const response = await llm('What is the capital of France?');
336
+ * console.log(response.choices[0].message.content);
337
+ *
338
+ * // With options
339
+ * const response = await llm('Translate to Spanish: Hello', {
340
+ * model: 'gpt-4o',
341
+ * temperature: 0.3,
342
+ * systemPrompt: 'You are a professional translator.'
343
+ * });
344
+ * ```
345
+ */
346
+ export async function llm(prompt: string, options?: LlmOptions): Promise<LlmResponse> {
347
+ return new LlmLoader().complete(prompt, options);
348
+ }
349
+
350
+ /**
351
+ * Call the OpenAI-compatible API with streaming and yield each chunk.
352
+ * This function can be used standalone outside of FlowQuery.
353
+ *
354
+ * @param prompt - The user prompt to send to the LLM
355
+ * @param options - Optional configuration for the request
356
+ * @yields Parsed SSE data chunks from the stream
357
+ *
358
+ * @example
359
+ * ```typescript
360
+ * import { llmStream } from './plugins/loaders/Llm';
361
+ *
362
+ * for await (const chunk of llmStream('Tell me a story')) {
363
+ * if (chunk.choices?.[0]?.delta?.content) {
364
+ * process.stdout.write(chunk.choices[0].delta.content);
365
+ * }
366
+ * }
367
+ * ```
368
+ */
369
+ export async function* llmStream(prompt: string, options?: LlmOptions): AsyncGenerator<any, void, unknown> {
370
+ yield* new LlmLoader().stream(prompt, options);
371
+ }
372
+
373
+ /**
374
+ * Extract just the text content from an LLM response.
375
+ * Convenience function for common use case.
376
+ *
377
+ * @param response - The LLM response object
378
+ * @returns The text content from the first choice
379
+ */
380
+ export function extractContent(response: LlmResponse): string {
381
+ return LlmLoader.extractContent(response);
382
+ }
383
+
384
+ export const llmPlugin: AsyncLoaderPlugin = {
385
+ name: 'llm',
386
+ provider: (prompt: string, options?: LlmOptions) => new LlmLoader().fetch(prompt, options),
387
+ metadata: {
388
+ description: 'Calls OpenAI-compatible chat completion APIs. Supports GPT models and any OpenAI-compatible endpoint.',
389
+ category: 'ai',
390
+ parameters: [
391
+ {
392
+ name: 'prompt',
393
+ description: 'The user prompt to send to the LLM',
394
+ type: 'string',
395
+ required: true,
396
+ example: 'What is the capital of France?'
397
+ },
398
+ {
399
+ name: 'options',
400
+ description: 'Optional configuration for the LLM request',
401
+ type: 'object',
402
+ required: false,
403
+ properties: {
404
+ apiKey: { description: 'OpenAI API key', type: 'string' },
405
+ apiUrl: { description: 'API endpoint URL (defaults to OpenAI chat completions)', type: 'string' },
406
+ model: { description: 'Model to use (defaults to gpt-4o-mini)', type: 'string' },
407
+ temperature: { description: 'Sampling temperature 0-2 (defaults to 0.7)', type: 'number' },
408
+ maxTokens: { description: 'Maximum tokens to generate', type: 'number' },
409
+ systemPrompt: { description: 'System prompt to set context', type: 'string' },
410
+ messages: { description: 'Additional conversation messages', type: 'array' },
411
+ organizationId: { description: 'OpenAI organization ID', type: 'string' },
412
+ headers: { description: 'Additional request headers', type: 'object' },
413
+ stream: { description: 'Enable streaming response', type: 'boolean' },
414
+ additionalParams: { description: 'Additional API parameters', type: 'object' }
415
+ }
416
+ }
417
+ ],
418
+ output: {
419
+ description: 'OpenAI chat completion response',
420
+ type: 'object',
421
+ properties: {
422
+ id: { description: 'Unique identifier for the completion', type: 'string' },
423
+ model: { description: 'Model used for completion', type: 'string' },
424
+ choices: {
425
+ description: 'Array of completion choices',
426
+ type: 'array',
427
+ example: [{ message: { role: 'assistant', content: 'Paris is the capital of France.' } }]
428
+ },
429
+ usage: { description: 'Token usage statistics', type: 'object' }
430
+ }
431
+ },
432
+ examples: [
433
+ "LOAD JSON FROM llm('What is 2+2?') AS response RETURN response.choices[0].message.content",
434
+ "LOAD JSON FROM llm('Translate to French: Hello', { model: 'gpt-4o', temperature: 0.3 }) AS response RETURN response.choices[0].message.content",
435
+ "LOAD JSON FROM llm('Write a haiku', { systemPrompt: 'You are a poet' }) AS response RETURN response.choices[0].message.content"
436
+ ],
437
+ notes: 'Requires API key configured in Settings or passed as apiKey option. Works with any OpenAI-compatible API by setting the apiUrl option.'
438
+ }
439
+ };
440
+
441
+ export default llmPlugin;
@@ -0,0 +1,161 @@
1
+ /**
2
+ * Example plugin: Generate mock data for testing.
3
+ *
4
+ * Usage in FlowQuery:
5
+ * LOAD JSON FROM mockUsers(10) AS user
6
+ * RETURN user.name, user.email
7
+ */
8
+
9
+ import { AsyncLoaderPlugin } from '../types';
10
+
11
+ /**
12
+ * MockUsers loader class - generates mock user data for testing.
13
+ */
14
+ export class MockUsersLoader {
15
+ private readonly firstNames: string[];
16
+ private readonly lastNames: string[];
17
+ private readonly domains: string[];
18
+
19
+ constructor(
20
+ firstNames: string[] = ['Alice', 'Bob', 'Charlie', 'Diana', 'Eve', 'Frank', 'Grace', 'Henry', 'Ivy', 'Jack'],
21
+ lastNames: string[] = ['Smith', 'Johnson', 'Williams', 'Brown', 'Jones', 'Garcia', 'Miller', 'Davis', 'Rodriguez', 'Martinez'],
22
+ domains: string[] = ['example.com', 'test.org', 'demo.net']
23
+ ) {
24
+ this.firstNames = firstNames;
25
+ this.lastNames = lastNames;
26
+ this.domains = domains;
27
+ }
28
+
29
+ /**
30
+ * Generates mock user data.
31
+ *
32
+ * @param count - Number of mock users to generate
33
+ */
34
+ async *fetch(count: number = 5): AsyncGenerator<any, void, unknown> {
35
+ for (let i = 0; i < count; i++) {
36
+ const firstName = this.firstNames[Math.floor(Math.random() * this.firstNames.length)];
37
+ const lastName = this.lastNames[Math.floor(Math.random() * this.lastNames.length)];
38
+ const domain = this.domains[Math.floor(Math.random() * this.domains.length)];
39
+
40
+ yield {
41
+ id: i + 1,
42
+ name: `${firstName} ${lastName}`,
43
+ email: `${firstName.toLowerCase()}.${lastName.toLowerCase()}@${domain}`,
44
+ age: Math.floor(Math.random() * 50) + 18,
45
+ active: Math.random() > 0.3
46
+ };
47
+ }
48
+ }
49
+ }
50
+
51
+ /**
52
+ * MockProducts loader class - generates mock product data for testing.
53
+ */
54
+ export class MockProductsLoader {
55
+ private readonly categories: string[];
56
+ private readonly adjectives: string[];
57
+ private readonly nouns: string[];
58
+
59
+ constructor(
60
+ categories: string[] = ['Electronics', 'Clothing', 'Books', 'Home', 'Sports'],
61
+ adjectives: string[] = ['Premium', 'Basic', 'Pro', 'Ultra', 'Classic'],
62
+ nouns: string[] = ['Widget', 'Gadget', 'Item', 'Product', 'Thing']
63
+ ) {
64
+ this.categories = categories;
65
+ this.adjectives = adjectives;
66
+ this.nouns = nouns;
67
+ }
68
+
69
+ /**
70
+ * Generates mock product data.
71
+ *
72
+ * @param count - Number of mock products to generate
73
+ */
74
+ async *fetch(count: number = 5): AsyncGenerator<any, void, unknown> {
75
+ for (let i = 0; i < count; i++) {
76
+ const adj = this.adjectives[Math.floor(Math.random() * this.adjectives.length)];
77
+ const noun = this.nouns[Math.floor(Math.random() * this.nouns.length)];
78
+ const category = this.categories[Math.floor(Math.random() * this.categories.length)];
79
+
80
+ yield {
81
+ id: i + 1,
82
+ name: `${adj} ${noun} ${i + 1}`,
83
+ category,
84
+ price: Math.round(Math.random() * 1000 * 100) / 100,
85
+ inStock: Math.random() > 0.2,
86
+ rating: Math.round(Math.random() * 50) / 10
87
+ };
88
+ }
89
+ }
90
+ }
91
+
92
+ export const mockUsersPlugin: AsyncLoaderPlugin = {
93
+ name: 'mockUsers',
94
+ provider: (count: number = 5) => new MockUsersLoader().fetch(count),
95
+ metadata: {
96
+ description: 'Generates mock user data for testing purposes',
97
+ category: 'testing',
98
+ parameters: [
99
+ {
100
+ name: 'count',
101
+ description: 'Number of mock users to generate',
102
+ type: 'number',
103
+ required: false,
104
+ default: 5
105
+ }
106
+ ],
107
+ output: {
108
+ description: 'Mock user object',
109
+ type: 'object',
110
+ properties: {
111
+ id: { description: 'User ID', type: 'number' },
112
+ name: { description: 'Full name', type: 'string' },
113
+ email: { description: 'Email address', type: 'string' },
114
+ age: { description: 'Age in years', type: 'number' },
115
+ active: { description: 'Whether user is active', type: 'boolean' }
116
+ }
117
+ },
118
+ examples: [
119
+ "LOAD JSON FROM mockUsers(10) AS user RETURN user.name, user.email",
120
+ "LOAD JSON FROM mockUsers(20) AS user RETURN user WHERE user.active = true"
121
+ ]
122
+ }
123
+ };
124
+
125
+ export const mockProductsPlugin: AsyncLoaderPlugin = {
126
+ name: 'mockProducts',
127
+ provider: (count: number = 5) => new MockProductsLoader().fetch(count),
128
+ metadata: {
129
+ description: 'Generates mock product data for testing purposes',
130
+ category: 'testing',
131
+ parameters: [
132
+ {
133
+ name: 'count',
134
+ description: 'Number of mock products to generate',
135
+ type: 'number',
136
+ required: false,
137
+ default: 5
138
+ }
139
+ ],
140
+ output: {
141
+ description: 'Mock product object',
142
+ type: 'object',
143
+ properties: {
144
+ id: { description: 'Product ID', type: 'number' },
145
+ name: { description: 'Product name', type: 'string' },
146
+ category: { description: 'Product category', type: 'string' },
147
+ price: { description: 'Price in dollars', type: 'number' },
148
+ inStock: { description: 'Whether product is in stock', type: 'boolean' },
149
+ rating: { description: 'Customer rating (0-5)', type: 'number' }
150
+ }
151
+ },
152
+ examples: [
153
+ "LOAD JSON FROM mockProducts(10) AS p RETURN p.name, p.price",
154
+ "LOAD JSON FROM mockProducts(50) AS p RETURN p WHERE p.category = 'Electronics'"
155
+ ]
156
+ }
157
+ };
158
+
159
+ export const plugins = [mockUsersPlugin, mockProductsPlugin];
160
+
161
+ export default plugins;
@@ -0,0 +1,52 @@
1
+ /**
2
+ * Plugin type definitions for FlowQuery async data loaders.
3
+ *
4
+ * Re-exports core types from FlowQuery library for consistency.
5
+ */
6
+
7
+ import {
8
+ ParameterSchema,
9
+ OutputSchema,
10
+ FunctionMetadata,
11
+ AsyncDataProvider
12
+ } from 'flowquery';
13
+
14
+ // Re-export core types from FlowQuery
15
+ export { ParameterSchema, OutputSchema, FunctionMetadata, AsyncDataProvider };
16
+
17
+ /**
18
+ * Alias for FunctionMetadata - used for plugin definitions.
19
+ * This type is identical to FunctionMetadata from FlowQuery.
20
+ */
21
+ export type PluginMetadata = FunctionMetadata;
22
+
23
+ /**
24
+ * A plugin definition for an async data loader.
25
+ */
26
+ export interface AsyncLoaderPlugin {
27
+ /**
28
+ * The name of the function as it will be used in FlowQuery.
29
+ * Will be lowercased when registered.
30
+ */
31
+ name: string;
32
+
33
+ /**
34
+ * The async data provider function.
35
+ */
36
+ provider: AsyncDataProvider;
37
+
38
+ /**
39
+ * Optional metadata describing the function for LLM consumption.
40
+ */
41
+ metadata?: Omit<PluginMetadata, 'name'>;
42
+ }
43
+
44
+ /**
45
+ * Interface that plugin modules should export.
46
+ */
47
+ export interface PluginModule {
48
+ /**
49
+ * Array of plugins defined in this module.
50
+ */
51
+ plugins: AsyncLoaderPlugin[];
52
+ }