flowquery 1.0.5 → 1.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +74 -0
- package/dist/compute/runner.d.ts +1 -22
- package/dist/compute/runner.d.ts.map +1 -1
- package/dist/compute/runner.js.map +1 -1
- package/dist/extensibility.d.ts +35 -0
- package/dist/extensibility.d.ts.map +1 -0
- package/dist/extensibility.js +49 -0
- package/dist/extensibility.js.map +1 -0
- package/dist/flowquery.min.js +1 -1
- package/dist/index.browser.d.ts.map +1 -1
- package/dist/index.browser.js +0 -80
- package/dist/index.browser.js.map +1 -1
- package/dist/index.node.d.ts +3 -3
- package/dist/index.node.d.ts.map +1 -1
- package/dist/index.node.js +0 -80
- package/dist/index.node.js.map +1 -1
- package/dist/parsing/functions/avg.d.ts.map +1 -1
- package/dist/parsing/functions/avg.js +20 -2
- package/dist/parsing/functions/avg.js.map +1 -1
- package/dist/parsing/functions/collect.d.ts.map +1 -1
- package/dist/parsing/functions/collect.js +20 -2
- package/dist/parsing/functions/collect.js.map +1 -1
- package/dist/parsing/functions/function_factory.d.ts +26 -80
- package/dist/parsing/functions/function_factory.d.ts.map +1 -1
- package/dist/parsing/functions/function_factory.js +46 -168
- package/dist/parsing/functions/function_factory.js.map +1 -1
- package/dist/parsing/functions/function_metadata.d.ts +81 -20
- package/dist/parsing/functions/function_metadata.d.ts.map +1 -1
- package/dist/parsing/functions/function_metadata.js +154 -152
- package/dist/parsing/functions/function_metadata.js.map +1 -1
- package/dist/parsing/functions/functions.d.ts.map +1 -1
- package/dist/parsing/functions/functions.js +37 -2
- package/dist/parsing/functions/functions.js.map +1 -1
- package/dist/parsing/functions/join.d.ts.map +1 -1
- package/dist/parsing/functions/join.js +21 -2
- package/dist/parsing/functions/join.js.map +1 -1
- package/dist/parsing/functions/predicate_function.d.ts +1 -0
- package/dist/parsing/functions/predicate_function.d.ts.map +1 -1
- package/dist/parsing/functions/predicate_function.js +3 -0
- package/dist/parsing/functions/predicate_function.js.map +1 -1
- package/dist/parsing/functions/predicate_sum.d.ts.map +1 -1
- package/dist/parsing/functions/predicate_sum.js +23 -2
- package/dist/parsing/functions/predicate_sum.js.map +1 -1
- package/dist/parsing/functions/rand.d.ts.map +1 -1
- package/dist/parsing/functions/rand.js +18 -2
- package/dist/parsing/functions/rand.js.map +1 -1
- package/dist/parsing/functions/range.d.ts.map +1 -1
- package/dist/parsing/functions/range.js +21 -2
- package/dist/parsing/functions/range.js.map +1 -1
- package/dist/parsing/functions/replace.d.ts.map +1 -1
- package/dist/parsing/functions/replace.js +22 -2
- package/dist/parsing/functions/replace.js.map +1 -1
- package/dist/parsing/functions/round.d.ts.map +1 -1
- package/dist/parsing/functions/round.js +20 -2
- package/dist/parsing/functions/round.js.map +1 -1
- package/dist/parsing/functions/size.d.ts.map +1 -1
- package/dist/parsing/functions/size.js +20 -2
- package/dist/parsing/functions/size.js.map +1 -1
- package/dist/parsing/functions/split.d.ts.map +1 -1
- package/dist/parsing/functions/split.js +21 -2
- package/dist/parsing/functions/split.js.map +1 -1
- package/dist/parsing/functions/stringify.d.ts.map +1 -1
- package/dist/parsing/functions/stringify.js +20 -2
- package/dist/parsing/functions/stringify.js.map +1 -1
- package/dist/parsing/functions/sum.d.ts.map +1 -1
- package/dist/parsing/functions/sum.js +20 -2
- package/dist/parsing/functions/sum.js.map +1 -1
- package/dist/parsing/functions/to_json.d.ts.map +1 -1
- package/dist/parsing/functions/to_json.js +20 -2
- package/dist/parsing/functions/to_json.js.map +1 -1
- package/dist/parsing/parser.d.ts.map +1 -1
- package/dist/parsing/parser.js +1 -2
- package/dist/parsing/parser.js.map +1 -1
- package/docs/flowquery.min.js +1 -1
- package/flowquery-vscode/flowQueryEngine/flowquery.min.js +1 -1
- package/misc/apps/RAG/.env.example +14 -0
- package/misc/apps/RAG/README.md +0 -7
- package/misc/apps/RAG/package.json +16 -7
- package/misc/apps/RAG/public/index.html +18 -0
- package/misc/apps/RAG/src/App.css +42 -0
- package/misc/apps/RAG/src/App.tsx +50 -0
- package/misc/apps/RAG/src/components/ApiKeySettings.tsx +245 -0
- package/misc/apps/RAG/src/components/ChatContainer.css +67 -0
- package/misc/apps/RAG/src/components/ChatContainer.tsx +239 -0
- package/misc/apps/RAG/src/components/ChatInput.css +23 -0
- package/misc/apps/RAG/src/components/ChatInput.tsx +62 -0
- package/misc/apps/RAG/src/components/ChatMessage.css +136 -0
- package/misc/apps/RAG/src/components/ChatMessage.tsx +152 -0
- package/misc/apps/RAG/src/components/FlowQueryAgent.ts +390 -0
- package/misc/apps/RAG/src/components/FlowQueryRunner.css +104 -0
- package/misc/apps/RAG/src/components/FlowQueryRunner.tsx +332 -0
- package/misc/apps/RAG/src/components/index.ts +15 -0
- package/misc/apps/RAG/src/index.tsx +17 -0
- package/misc/apps/RAG/src/plugins/README.md +139 -0
- package/misc/apps/RAG/src/plugins/index.ts +68 -0
- package/misc/apps/RAG/src/plugins/loaders/CatFacts.ts +75 -0
- package/misc/apps/RAG/src/plugins/loaders/FetchJson.ts +67 -0
- package/misc/apps/RAG/src/plugins/loaders/Llm.ts +437 -0
- package/misc/apps/RAG/src/plugins/loaders/MockData.ts +151 -0
- package/misc/apps/RAG/src/prompts/FlowQuerySystemPrompt.ts +385 -0
- package/misc/apps/RAG/src/prompts/index.ts +10 -0
- package/misc/apps/RAG/src/utils/FlowQueryExecutor.ts +131 -0
- package/misc/apps/RAG/src/utils/FlowQueryExtractor.ts +203 -0
- package/misc/apps/RAG/src/utils/index.ts +9 -0
- package/misc/apps/RAG/tsconfig.json +4 -2
- package/misc/apps/RAG/webpack.config.js +23 -12
- package/package.json +7 -1
- package/src/compute/runner.ts +1 -26
- package/src/extensibility.ts +45 -0
- package/src/index.browser.ts +2 -88
- package/src/index.node.ts +3 -92
- package/src/parsing/functions/avg.ts +10 -0
- package/src/parsing/functions/collect.ts +10 -0
- package/src/parsing/functions/function_factory.ts +56 -194
- package/src/parsing/functions/function_metadata.ts +187 -168
- package/src/parsing/functions/functions.ts +27 -0
- package/src/parsing/functions/join.ts +11 -0
- package/src/parsing/functions/predicate_function.ts +4 -0
- package/src/parsing/functions/predicate_sum.ts +13 -0
- package/src/parsing/functions/rand.ts +8 -0
- package/src/parsing/functions/range.ts +11 -0
- package/src/parsing/functions/replace.ts +12 -0
- package/src/parsing/functions/round.ts +10 -0
- package/src/parsing/functions/size.ts +10 -0
- package/src/parsing/functions/split.ts +11 -0
- package/src/parsing/functions/stringify.ts +10 -0
- package/src/parsing/functions/sum.ts +10 -0
- package/src/parsing/functions/to_json.ts +10 -0
- package/src/parsing/parser.ts +1 -2
- package/tests/extensibility.test.ts +563 -0
- package/tsconfig.json +1 -0
- package/dist/parsing/functions/predicate_function_factory.d.ts +0 -6
- package/dist/parsing/functions/predicate_function_factory.d.ts.map +0 -1
- package/dist/parsing/functions/predicate_function_factory.js +0 -19
- package/dist/parsing/functions/predicate_function_factory.js.map +0 -1
- package/misc/apps/RAG/src/index.ts +0 -20
- package/src/parsing/functions/predicate_function_factory.ts +0 -15
- package/tests/parsing/function_plugins.test.ts +0 -369
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Example plugin: Fetch random cat facts from the Cat Facts API.
|
|
3
|
+
*
|
|
4
|
+
* Usage in FlowQuery:
|
|
5
|
+
* LOAD JSON FROM catFacts(5) AS fact
|
|
6
|
+
* RETURN fact.text
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { FunctionDef } from 'flowquery/extensibility';
|
|
10
|
+
|
|
11
|
+
const CAT_FACTS_API = 'https://catfact.ninja/facts';
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* CatFacts loader class - fetches random cat facts from the Cat Facts API.
|
|
15
|
+
*/
|
|
16
|
+
@FunctionDef({
|
|
17
|
+
isAsyncProvider: true,
|
|
18
|
+
description: 'Fetches random cat facts from the Cat Facts API (catfact.ninja)',
|
|
19
|
+
category: 'examples',
|
|
20
|
+
parameters: [
|
|
21
|
+
{
|
|
22
|
+
name: 'count',
|
|
23
|
+
description: 'Number of cat facts to fetch',
|
|
24
|
+
type: 'number',
|
|
25
|
+
required: false,
|
|
26
|
+
default: 1
|
|
27
|
+
}
|
|
28
|
+
],
|
|
29
|
+
output: {
|
|
30
|
+
description: 'Cat fact object',
|
|
31
|
+
type: 'object',
|
|
32
|
+
properties: {
|
|
33
|
+
text: { description: 'The cat fact text', type: 'string' },
|
|
34
|
+
length: { description: 'Length of the fact text', type: 'number' }
|
|
35
|
+
}
|
|
36
|
+
},
|
|
37
|
+
examples: [
|
|
38
|
+
"LOAD JSON FROM catFacts() AS fact RETURN fact.text",
|
|
39
|
+
"LOAD JSON FROM catFacts(5) AS fact RETURN fact.text, fact.length AS length"
|
|
40
|
+
]
|
|
41
|
+
})
|
|
42
|
+
export class CatFactsLoader {
|
|
43
|
+
private readonly apiUrl: string;
|
|
44
|
+
|
|
45
|
+
constructor(apiUrl: string = CAT_FACTS_API) {
|
|
46
|
+
this.apiUrl = apiUrl;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Fetches random cat facts from the Cat Facts API.
|
|
51
|
+
*
|
|
52
|
+
* @param count - Number of cat facts to fetch (default: 1)
|
|
53
|
+
*/
|
|
54
|
+
async *fetch(count: number = 1): AsyncGenerator<any, void, unknown> {
|
|
55
|
+
const url = `${this.apiUrl}?limit=${count}`;
|
|
56
|
+
const response = await fetch(url);
|
|
57
|
+
|
|
58
|
+
if (!response.ok) {
|
|
59
|
+
throw new Error(`Failed to fetch cat facts: ${response.statusText}`);
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
const json = await response.json();
|
|
63
|
+
const data = json.data || [];
|
|
64
|
+
|
|
65
|
+
for (const item of data) {
|
|
66
|
+
// Map 'fact' to 'text' for backwards compatibility with existing queries
|
|
67
|
+
yield {
|
|
68
|
+
text: item.fact,
|
|
69
|
+
length: item.length
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
export default CatFactsLoader;
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Example plugin: Fetch JSON data from a URL.
|
|
3
|
+
*
|
|
4
|
+
* Usage in FlowQuery:
|
|
5
|
+
* LOAD JSON FROM fetchJson('https://api.example.com/data') AS item
|
|
6
|
+
* RETURN item.name, item.value
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { FunctionDef } from 'flowquery/extensibility';
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* FetchJson loader class - fetches JSON data from a URL and yields items.
|
|
13
|
+
*/
|
|
14
|
+
@FunctionDef({
|
|
15
|
+
isAsyncProvider: true,
|
|
16
|
+
description: 'Fetches JSON data from a URL. If the response is an array, yields each item individually.',
|
|
17
|
+
category: 'data',
|
|
18
|
+
parameters: [
|
|
19
|
+
{
|
|
20
|
+
name: 'url',
|
|
21
|
+
description: 'The URL to fetch JSON from',
|
|
22
|
+
type: 'string',
|
|
23
|
+
required: true
|
|
24
|
+
},
|
|
25
|
+
{
|
|
26
|
+
name: 'options',
|
|
27
|
+
description: 'Optional fetch options (headers, method, etc.)',
|
|
28
|
+
type: 'object',
|
|
29
|
+
required: false
|
|
30
|
+
}
|
|
31
|
+
],
|
|
32
|
+
output: {
|
|
33
|
+
description: 'JSON data items',
|
|
34
|
+
type: 'object'
|
|
35
|
+
},
|
|
36
|
+
examples: [
|
|
37
|
+
"LOAD JSON FROM fetchJson('https://api.example.com/users') AS user RETURN user.name",
|
|
38
|
+
"LOAD JSON FROM fetchJson('https://api.example.com/data') AS item RETURN item WHERE item.active = true"
|
|
39
|
+
]
|
|
40
|
+
})
|
|
41
|
+
export class FetchJsonLoader {
|
|
42
|
+
/**
|
|
43
|
+
* Fetches JSON data from a URL and yields each item if array, or the object itself.
|
|
44
|
+
*
|
|
45
|
+
* @param url - The URL to fetch JSON from
|
|
46
|
+
* @param options - Optional fetch options
|
|
47
|
+
*/
|
|
48
|
+
async *fetch(url: string, options?: RequestInit): AsyncGenerator<any, void, unknown> {
|
|
49
|
+
const response = await fetch(url, options);
|
|
50
|
+
|
|
51
|
+
if (!response.ok) {
|
|
52
|
+
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
const data = await response.json();
|
|
56
|
+
|
|
57
|
+
if (Array.isArray(data)) {
|
|
58
|
+
for (const item of data) {
|
|
59
|
+
yield item;
|
|
60
|
+
}
|
|
61
|
+
} else {
|
|
62
|
+
yield data;
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
export default FetchJsonLoader;
|
|
@@ -0,0 +1,437 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI LLM Plugin: Call OpenAI-compatible APIs for chat completions.
|
|
3
|
+
*
|
|
4
|
+
* Usage in FlowQuery:
|
|
5
|
+
* LOAD JSON FROM llm('What is the capital of France?') AS response
|
|
6
|
+
* RETURN response.choices[0].message.content
|
|
7
|
+
*
|
|
8
|
+
* With custom options:
|
|
9
|
+
* LOAD JSON FROM llm('Translate to French: Hello', { model: 'gpt-4o', temperature: 0.3 }) AS response
|
|
10
|
+
* RETURN response.choices[0].message.content
|
|
11
|
+
*
|
|
12
|
+
* This loader can also be used standalone outside of FlowQuery:
|
|
13
|
+
* import { LlmLoader } from './plugins/loaders/Llm';
|
|
14
|
+
* const loader = new LlmLoader();
|
|
15
|
+
* const response = await loader.complete('What is 2+2?');
|
|
16
|
+
* console.log(response.choices[0].message.content);
|
|
17
|
+
*/
|
|
18
|
+
|
|
19
|
+
import { FunctionDef } from 'flowquery/extensibility';
|
|
20
|
+
|
|
21
|
+
// Default configuration - can be overridden via options
|
|
22
|
+
const DEFAULT_CONFIG = {
|
|
23
|
+
apiUrl: 'https://api.openai.com/v1/chat/completions',
|
|
24
|
+
model: 'gpt-4o-mini',
|
|
25
|
+
temperature: 0.7,
|
|
26
|
+
maxTokens: undefined as number | undefined,
|
|
27
|
+
};
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Options for LLM requests.
|
|
31
|
+
*/
|
|
32
|
+
export interface LlmOptions {
|
|
33
|
+
/** OpenAI API key. Configure in Settings or pass as option. */
|
|
34
|
+
apiKey?: string;
|
|
35
|
+
/** API endpoint URL. Defaults to OpenAI's chat completions endpoint. */
|
|
36
|
+
apiUrl?: string;
|
|
37
|
+
/** Model to use. Defaults to 'gpt-4o-mini'. */
|
|
38
|
+
model?: string;
|
|
39
|
+
/** Sampling temperature (0-2). Defaults to 0.7. */
|
|
40
|
+
temperature?: number;
|
|
41
|
+
/** Maximum tokens to generate. */
|
|
42
|
+
maxTokens?: number;
|
|
43
|
+
/** System prompt to set context for the conversation. */
|
|
44
|
+
systemPrompt?: string;
|
|
45
|
+
/** Additional messages to include in the conversation. */
|
|
46
|
+
messages?: Array<{ role: 'system' | 'user' | 'assistant'; content: string }>;
|
|
47
|
+
/** Organization ID for OpenAI API. */
|
|
48
|
+
organizationId?: string;
|
|
49
|
+
/** Additional headers to include in the request. */
|
|
50
|
+
headers?: Record<string, string>;
|
|
51
|
+
/** Enable streaming response. */
|
|
52
|
+
stream?: boolean;
|
|
53
|
+
/** Additional body parameters to pass to the API. */
|
|
54
|
+
additionalParams?: Record<string, any>;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* OpenAI-compatible chat completion response.
|
|
59
|
+
*/
|
|
60
|
+
export interface LlmResponse {
|
|
61
|
+
id: string;
|
|
62
|
+
object: string;
|
|
63
|
+
created: number;
|
|
64
|
+
model: string;
|
|
65
|
+
choices: Array<{
|
|
66
|
+
index: number;
|
|
67
|
+
message: {
|
|
68
|
+
role: string;
|
|
69
|
+
content: string;
|
|
70
|
+
};
|
|
71
|
+
finish_reason: string;
|
|
72
|
+
}>;
|
|
73
|
+
usage?: {
|
|
74
|
+
prompt_tokens: number;
|
|
75
|
+
completion_tokens: number;
|
|
76
|
+
total_tokens: number;
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* LLM Loader class - calls OpenAI-compatible APIs for chat completions.
|
|
82
|
+
*/
|
|
83
|
+
@FunctionDef({
|
|
84
|
+
isAsyncProvider: true,
|
|
85
|
+
description: 'Calls OpenAI-compatible chat completion APIs. Supports GPT models and any OpenAI-compatible endpoint.',
|
|
86
|
+
category: 'ai',
|
|
87
|
+
parameters: [
|
|
88
|
+
{
|
|
89
|
+
name: 'prompt',
|
|
90
|
+
description: 'The user prompt to send to the LLM',
|
|
91
|
+
type: 'string',
|
|
92
|
+
required: true,
|
|
93
|
+
example: 'What is the capital of France?'
|
|
94
|
+
},
|
|
95
|
+
{
|
|
96
|
+
name: 'options',
|
|
97
|
+
description: 'Optional configuration for the LLM request',
|
|
98
|
+
type: 'object',
|
|
99
|
+
required: false,
|
|
100
|
+
properties: {
|
|
101
|
+
apiKey: { description: 'OpenAI API key', type: 'string' },
|
|
102
|
+
apiUrl: { description: 'API endpoint URL (defaults to OpenAI chat completions)', type: 'string' },
|
|
103
|
+
model: { description: 'Model to use (defaults to gpt-4o-mini)', type: 'string' },
|
|
104
|
+
temperature: { description: 'Sampling temperature 0-2 (defaults to 0.7)', type: 'number' },
|
|
105
|
+
maxTokens: { description: 'Maximum tokens to generate', type: 'number' },
|
|
106
|
+
systemPrompt: { description: 'System prompt to set context', type: 'string' },
|
|
107
|
+
messages: { description: 'Additional conversation messages', type: 'array' },
|
|
108
|
+
organizationId: { description: 'OpenAI organization ID', type: 'string' },
|
|
109
|
+
headers: { description: 'Additional request headers', type: 'object' },
|
|
110
|
+
stream: { description: 'Enable streaming response', type: 'boolean' },
|
|
111
|
+
additionalParams: { description: 'Additional API parameters', type: 'object' }
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
],
|
|
115
|
+
output: {
|
|
116
|
+
description: 'OpenAI chat completion response',
|
|
117
|
+
type: 'object',
|
|
118
|
+
properties: {
|
|
119
|
+
id: { description: 'Unique identifier for the completion', type: 'string' },
|
|
120
|
+
model: { description: 'Model used for completion', type: 'string' },
|
|
121
|
+
choices: {
|
|
122
|
+
description: 'Array of completion choices',
|
|
123
|
+
type: 'array',
|
|
124
|
+
example: [{ message: { role: 'assistant', content: 'Paris is the capital of France.' } }]
|
|
125
|
+
},
|
|
126
|
+
usage: { description: 'Token usage statistics', type: 'object' }
|
|
127
|
+
}
|
|
128
|
+
},
|
|
129
|
+
examples: [
|
|
130
|
+
"LOAD JSON FROM llm('What is 2+2?') AS response RETURN response.choices[0].message.content",
|
|
131
|
+
"LOAD JSON FROM llm('Translate to French: Hello', { model: 'gpt-4o', temperature: 0.3 }) AS response RETURN response.choices[0].message.content",
|
|
132
|
+
"LOAD JSON FROM llm('Write a haiku', { systemPrompt: 'You are a poet' }) AS response RETURN response.choices[0].message.content"
|
|
133
|
+
],
|
|
134
|
+
notes: 'Requires API key configured in Settings or passed as apiKey option. Works with any OpenAI-compatible API by setting the apiUrl option.'
|
|
135
|
+
})
|
|
136
|
+
export class LlmLoader {
|
|
137
|
+
private readonly defaultOptions: Partial<LlmOptions>;
|
|
138
|
+
|
|
139
|
+
constructor(defaultOptions: Partial<LlmOptions> = {}) {
|
|
140
|
+
this.defaultOptions = defaultOptions;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
/**
|
|
144
|
+
* Get API key from options or localStorage (browser).
|
|
145
|
+
*/
|
|
146
|
+
private getApiKey(options?: LlmOptions): string {
|
|
147
|
+
// First check options
|
|
148
|
+
if (options?.apiKey) {
|
|
149
|
+
return options.apiKey;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// Check default options
|
|
153
|
+
if (this.defaultOptions.apiKey) {
|
|
154
|
+
return this.defaultOptions.apiKey;
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
// In browser, check localStorage
|
|
158
|
+
if (typeof window !== 'undefined' && typeof localStorage !== 'undefined') {
|
|
159
|
+
const storedKey = localStorage.getItem('flowquery_openai_api_key');
|
|
160
|
+
if (storedKey) {
|
|
161
|
+
return storedKey;
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
throw new Error(
|
|
166
|
+
'OpenAI API key is required. Configure it in Settings or pass apiKey in options.'
|
|
167
|
+
);
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
/**
|
|
171
|
+
* Get stored configuration from localStorage (browser only).
|
|
172
|
+
*/
|
|
173
|
+
private getStoredConfig(): Partial<LlmOptions> {
|
|
174
|
+
if (typeof window === 'undefined' || typeof localStorage === 'undefined') {
|
|
175
|
+
return {};
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
return {
|
|
179
|
+
organizationId: localStorage.getItem('flowquery_openai_org_id') || undefined,
|
|
180
|
+
model: localStorage.getItem('flowquery_openai_model') || undefined,
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
/**
|
|
185
|
+
* Build the request body for the API call.
|
|
186
|
+
*/
|
|
187
|
+
private buildRequestBody(prompt: string, options?: LlmOptions): Record<string, any> {
|
|
188
|
+
const messages: Array<{ role: string; content: string }> = [];
|
|
189
|
+
|
|
190
|
+
// Add system prompt if provided
|
|
191
|
+
if (options?.systemPrompt) {
|
|
192
|
+
messages.push({ role: 'system', content: options.systemPrompt });
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
// Add any additional messages
|
|
196
|
+
if (options?.messages) {
|
|
197
|
+
messages.push(...options.messages);
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
// Add the user prompt
|
|
201
|
+
messages.push({ role: 'user', content: prompt });
|
|
202
|
+
|
|
203
|
+
const body: Record<string, any> = {
|
|
204
|
+
model: options?.model || this.defaultOptions.model || DEFAULT_CONFIG.model,
|
|
205
|
+
messages,
|
|
206
|
+
temperature: options?.temperature ?? this.defaultOptions.temperature ?? DEFAULT_CONFIG.temperature,
|
|
207
|
+
...(options?.additionalParams || {}),
|
|
208
|
+
};
|
|
209
|
+
|
|
210
|
+
if (options?.maxTokens || this.defaultOptions.maxTokens || DEFAULT_CONFIG.maxTokens) {
|
|
211
|
+
body.max_tokens = options?.maxTokens || this.defaultOptions.maxTokens || DEFAULT_CONFIG.maxTokens;
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
if (options?.stream) {
|
|
215
|
+
body.stream = true;
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
return body;
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
/**
|
|
222
|
+
* Build request headers.
|
|
223
|
+
*/
|
|
224
|
+
private buildHeaders(apiKey: string, options?: LlmOptions): Record<string, string> {
|
|
225
|
+
const headers: Record<string, string> = {
|
|
226
|
+
'Content-Type': 'application/json',
|
|
227
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
228
|
+
...(options?.headers || {}),
|
|
229
|
+
};
|
|
230
|
+
|
|
231
|
+
if (options?.organizationId) {
|
|
232
|
+
headers['OpenAI-Organization'] = options.organizationId;
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
return headers;
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
/**
|
|
239
|
+
* Call the OpenAI-compatible API and return the full response.
|
|
240
|
+
*
|
|
241
|
+
* @param prompt - The user prompt to send to the LLM
|
|
242
|
+
* @param options - Optional configuration for the request
|
|
243
|
+
* @returns The full API response
|
|
244
|
+
*
|
|
245
|
+
* @example
|
|
246
|
+
* ```typescript
|
|
247
|
+
* const loader = new LlmLoader();
|
|
248
|
+
* const response = await loader.complete('What is the capital of France?');
|
|
249
|
+
* console.log(response.choices[0].message.content);
|
|
250
|
+
* ```
|
|
251
|
+
*/
|
|
252
|
+
async complete(prompt: string, options?: LlmOptions): Promise<LlmResponse> {
|
|
253
|
+
// Merge stored config with provided options (options take precedence)
|
|
254
|
+
const storedConfig = this.getStoredConfig();
|
|
255
|
+
const mergedOptions = { ...this.defaultOptions, ...storedConfig, ...options };
|
|
256
|
+
|
|
257
|
+
const apiKey = this.getApiKey(mergedOptions);
|
|
258
|
+
const apiUrl = mergedOptions?.apiUrl || DEFAULT_CONFIG.apiUrl;
|
|
259
|
+
const headers = this.buildHeaders(apiKey, mergedOptions);
|
|
260
|
+
const body = this.buildRequestBody(prompt, mergedOptions);
|
|
261
|
+
|
|
262
|
+
const response = await fetch(apiUrl, {
|
|
263
|
+
method: 'POST',
|
|
264
|
+
headers,
|
|
265
|
+
body: JSON.stringify(body),
|
|
266
|
+
});
|
|
267
|
+
|
|
268
|
+
if (!response.ok) {
|
|
269
|
+
const errorText = await response.text();
|
|
270
|
+
throw new Error(`LLM API error (${response.status}): ${errorText}`);
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
return response.json();
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
/**
|
|
277
|
+
* Call the OpenAI-compatible API with streaming and yield each chunk.
|
|
278
|
+
*
|
|
279
|
+
* @param prompt - The user prompt to send to the LLM
|
|
280
|
+
* @param options - Optional configuration for the request
|
|
281
|
+
* @yields Parsed SSE data chunks from the stream
|
|
282
|
+
*
|
|
283
|
+
* @example
|
|
284
|
+
* ```typescript
|
|
285
|
+
* const loader = new LlmLoader();
|
|
286
|
+
* for await (const chunk of loader.stream('Tell me a story')) {
|
|
287
|
+
* if (chunk.choices?.[0]?.delta?.content) {
|
|
288
|
+
* process.stdout.write(chunk.choices[0].delta.content);
|
|
289
|
+
* }
|
|
290
|
+
* }
|
|
291
|
+
* ```
|
|
292
|
+
*/
|
|
293
|
+
async *stream(prompt: string, options?: LlmOptions): AsyncGenerator<any, void, unknown> {
|
|
294
|
+
// Merge stored config with provided options (options take precedence)
|
|
295
|
+
const storedConfig = this.getStoredConfig();
|
|
296
|
+
const mergedOptions = { ...this.defaultOptions, ...storedConfig, ...options };
|
|
297
|
+
|
|
298
|
+
const apiKey = this.getApiKey(mergedOptions);
|
|
299
|
+
const apiUrl = mergedOptions?.apiUrl || DEFAULT_CONFIG.apiUrl;
|
|
300
|
+
const headers = this.buildHeaders(apiKey, mergedOptions);
|
|
301
|
+
const body = this.buildRequestBody(prompt, { ...mergedOptions, stream: true });
|
|
302
|
+
|
|
303
|
+
const response = await fetch(apiUrl, {
|
|
304
|
+
method: 'POST',
|
|
305
|
+
headers,
|
|
306
|
+
body: JSON.stringify(body),
|
|
307
|
+
});
|
|
308
|
+
|
|
309
|
+
if (!response.ok) {
|
|
310
|
+
const errorText = await response.text();
|
|
311
|
+
throw new Error(`LLM API error (${response.status}): ${errorText}`);
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
if (!response.body) {
|
|
315
|
+
throw new Error('Response body is null');
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
const reader = response.body.getReader();
|
|
319
|
+
const decoder = new TextDecoder();
|
|
320
|
+
let buffer = '';
|
|
321
|
+
|
|
322
|
+
try {
|
|
323
|
+
while (true) {
|
|
324
|
+
const { done, value } = await reader.read();
|
|
325
|
+
if (done) break;
|
|
326
|
+
|
|
327
|
+
buffer += decoder.decode(value, { stream: true });
|
|
328
|
+
const lines = buffer.split('\n');
|
|
329
|
+
buffer = lines.pop() || '';
|
|
330
|
+
|
|
331
|
+
for (const line of lines) {
|
|
332
|
+
const trimmed = line.trim();
|
|
333
|
+
if (trimmed.startsWith('data: ')) {
|
|
334
|
+
const data = trimmed.slice(6);
|
|
335
|
+
if (data === '[DONE]') {
|
|
336
|
+
return;
|
|
337
|
+
}
|
|
338
|
+
try {
|
|
339
|
+
yield JSON.parse(data);
|
|
340
|
+
} catch {
|
|
341
|
+
// Skip invalid JSON chunks
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
} finally {
|
|
347
|
+
reader.releaseLock();
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
/**
|
|
352
|
+
* Async generator provider for FlowQuery LOAD operations.
|
|
353
|
+
*/
|
|
354
|
+
async *fetch(prompt: string, options?: LlmOptions): AsyncGenerator<any, void, unknown> {
|
|
355
|
+
if (options?.stream) {
|
|
356
|
+
yield* this.stream(prompt, options);
|
|
357
|
+
} else {
|
|
358
|
+
const response = await this.complete(prompt, options);
|
|
359
|
+
yield response;
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
/**
|
|
364
|
+
* Extract just the text content from an LLM response.
|
|
365
|
+
* Convenience method for common use case.
|
|
366
|
+
*
|
|
367
|
+
* @param response - The LLM response object
|
|
368
|
+
* @returns The text content from the first choice
|
|
369
|
+
*/
|
|
370
|
+
static extractContent(response: LlmResponse): string {
|
|
371
|
+
return response.choices?.[0]?.message?.content || '';
|
|
372
|
+
}
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
/**
|
|
376
|
+
* Call the OpenAI-compatible API and return the full response.
|
|
377
|
+
* This function can be used standalone outside of FlowQuery.
|
|
378
|
+
*
|
|
379
|
+
* @param prompt - The user prompt to send to the LLM
|
|
380
|
+
* @param options - Optional configuration for the request
|
|
381
|
+
* @returns The full API response
|
|
382
|
+
*
|
|
383
|
+
* @example
|
|
384
|
+
* ```typescript
|
|
385
|
+
* import { llm } from './plugins/loaders/Llm';
|
|
386
|
+
*
|
|
387
|
+
* // Simple usage
|
|
388
|
+
* const response = await llm('What is the capital of France?');
|
|
389
|
+
* console.log(response.choices[0].message.content);
|
|
390
|
+
*
|
|
391
|
+
* // With options
|
|
392
|
+
* const response = await llm('Translate to Spanish: Hello', {
|
|
393
|
+
* model: 'gpt-4o',
|
|
394
|
+
* temperature: 0.3,
|
|
395
|
+
* systemPrompt: 'You are a professional translator.'
|
|
396
|
+
* });
|
|
397
|
+
* ```
|
|
398
|
+
*/
|
|
399
|
+
export async function llm(prompt: string, options?: LlmOptions): Promise<LlmResponse> {
|
|
400
|
+
return new LlmLoader().complete(prompt, options);
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
/**
|
|
404
|
+
* Call the OpenAI-compatible API with streaming and yield each chunk.
|
|
405
|
+
* This function can be used standalone outside of FlowQuery.
|
|
406
|
+
*
|
|
407
|
+
* @param prompt - The user prompt to send to the LLM
|
|
408
|
+
* @param options - Optional configuration for the request
|
|
409
|
+
* @yields Parsed SSE data chunks from the stream
|
|
410
|
+
*
|
|
411
|
+
* @example
|
|
412
|
+
* ```typescript
|
|
413
|
+
* import { llmStream } from './plugins/loaders/Llm';
|
|
414
|
+
*
|
|
415
|
+
* for await (const chunk of llmStream('Tell me a story')) {
|
|
416
|
+
* if (chunk.choices?.[0]?.delta?.content) {
|
|
417
|
+
* process.stdout.write(chunk.choices[0].delta.content);
|
|
418
|
+
* }
|
|
419
|
+
* }
|
|
420
|
+
* ```
|
|
421
|
+
*/
|
|
422
|
+
export async function* llmStream(prompt: string, options?: LlmOptions): AsyncGenerator<any, void, unknown> {
|
|
423
|
+
yield* new LlmLoader().stream(prompt, options);
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
/**
|
|
427
|
+
* Extract just the text content from an LLM response.
|
|
428
|
+
* Convenience function for common use case.
|
|
429
|
+
*
|
|
430
|
+
* @param response - The LLM response object
|
|
431
|
+
* @returns The text content from the first choice
|
|
432
|
+
*/
|
|
433
|
+
export function extractContent(response: LlmResponse): string {
|
|
434
|
+
return LlmLoader.extractContent(response);
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
export default LlmLoader;
|