@fugood/bricks-project 2.22.0-beta.9 → 2.22.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/compile/action-name-map.ts +108 -1
- package/compile/index.ts +10 -1
- package/package.json +3 -3
- package/tools/postinstall.ts +16 -9
- package/types/animation.ts +2 -1
- package/types/brick-base.ts +79 -0
- package/types/bricks/3DViewer.ts +200 -0
- package/types/bricks/Camera.ts +195 -0
- package/types/bricks/Chart.ts +362 -0
- package/types/bricks/GenerativeMedia.ts +240 -0
- package/types/bricks/Icon.ts +93 -0
- package/types/bricks/Image.ts +104 -0
- package/types/bricks/Items.ts +461 -0
- package/types/bricks/Lottie.ts +159 -0
- package/types/bricks/QrCode.ts +112 -0
- package/types/bricks/Rect.ts +110 -0
- package/types/bricks/RichText.ts +123 -0
- package/types/bricks/Rive.ts +209 -0
- package/types/bricks/Slideshow.ts +155 -0
- package/types/bricks/Svg.ts +94 -0
- package/types/bricks/Text.ts +143 -0
- package/types/bricks/TextInput.ts +231 -0
- package/types/bricks/Video.ts +170 -0
- package/types/bricks/VideoStreaming.ts +107 -0
- package/types/bricks/WebRtcStream.ts +60 -0
- package/types/bricks/WebView.ts +157 -0
- package/types/bricks/index.ts +20 -0
- package/types/common.ts +8 -3
- package/types/data.ts +6 -0
- package/types/generators/AlarmClock.ts +102 -0
- package/types/generators/Assistant.ts +546 -0
- package/types/generators/BleCentral.ts +225 -0
- package/types/generators/BlePeripheral.ts +202 -0
- package/types/generators/CanvasMap.ts +57 -0
- package/types/generators/CastlesPay.ts +77 -0
- package/types/generators/DataBank.ts +123 -0
- package/types/generators/File.ts +351 -0
- package/types/generators/GraphQl.ts +124 -0
- package/types/generators/Http.ts +117 -0
- package/types/generators/HttpServer.ts +164 -0
- package/types/generators/Information.ts +97 -0
- package/types/generators/Intent.ts +107 -0
- package/types/generators/Iterator.ts +95 -0
- package/types/generators/Keyboard.ts +85 -0
- package/types/generators/LlmAnthropicCompat.ts +188 -0
- package/types/generators/LlmGgml.ts +719 -0
- package/types/generators/LlmOnnx.ts +184 -0
- package/types/generators/LlmOpenAiCompat.ts +206 -0
- package/types/generators/LlmQualcommAiEngine.ts +213 -0
- package/types/generators/Mcp.ts +294 -0
- package/types/generators/McpServer.ts +248 -0
- package/types/generators/MediaFlow.ts +142 -0
- package/types/generators/MqttBroker.ts +121 -0
- package/types/generators/MqttClient.ts +129 -0
- package/types/generators/Question.ts +395 -0
- package/types/generators/RealtimeTranscription.ts +180 -0
- package/types/generators/RerankerGgml.ts +153 -0
- package/types/generators/SerialPort.ts +141 -0
- package/types/generators/SoundPlayer.ts +86 -0
- package/types/generators/SoundRecorder.ts +113 -0
- package/types/generators/SpeechToTextGgml.ts +462 -0
- package/types/generators/SpeechToTextOnnx.ts +227 -0
- package/types/generators/SpeechToTextPlatform.ts +75 -0
- package/types/generators/SqLite.ts +118 -0
- package/types/generators/Step.ts +101 -0
- package/types/generators/TapToPayOnIPhone.ts +175 -0
- package/types/generators/Tcp.ts +120 -0
- package/types/generators/TcpServer.ts +137 -0
- package/types/generators/TextToSpeechGgml.ts +182 -0
- package/types/generators/TextToSpeechOnnx.ts +169 -0
- package/types/generators/TextToSpeechOpenAiLike.ts +113 -0
- package/types/generators/ThermalPrinter.ts +185 -0
- package/types/generators/Tick.ts +75 -0
- package/types/generators/Udp.ts +109 -0
- package/types/generators/VadGgml.ts +211 -0
- package/types/generators/VectorStore.ts +223 -0
- package/types/generators/Watchdog.ts +96 -0
- package/types/generators/WebCrawler.ts +97 -0
- package/types/generators/WebRtc.ts +165 -0
- package/types/generators/WebSocket.ts +142 -0
- package/types/generators/index.ts +51 -0
- package/types/system.ts +64 -0
- package/utils/data.ts +45 -0
- package/utils/event-props.ts +89 -0
- package/types/bricks.ts +0 -3168
- package/types/generators.ts +0 -7633
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
import type { SwitchCondInnerStateCurrentCanvas, SwitchCondData, SwitchDef } from '../switch'
|
|
2
|
+
import type { Data, DataLink } from '../data'
|
|
3
|
+
import type {
|
|
4
|
+
Generator,
|
|
5
|
+
EventAction,
|
|
6
|
+
ActionWithDataParams,
|
|
7
|
+
ActionWithParams,
|
|
8
|
+
Action,
|
|
9
|
+
EventProperty,
|
|
10
|
+
} from '../common'
|
|
11
|
+
|
|
12
|
+
/* Load the model */
|
|
13
|
+
export type GeneratorOnnxLLMActionLoadModel = Action & {
|
|
14
|
+
__actionName: 'GENERATOR_ONNX_LLM_LOAD_MODEL'
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
/* Inference */
|
|
18
|
+
export type GeneratorOnnxLLMActionInfer = ActionWithParams & {
|
|
19
|
+
__actionName: 'GENERATOR_ONNX_LLM_INFER'
|
|
20
|
+
params?: Array<
|
|
21
|
+
| {
|
|
22
|
+
input: 'prompt'
|
|
23
|
+
value?: string | DataLink | EventProperty
|
|
24
|
+
mapping?: string
|
|
25
|
+
}
|
|
26
|
+
| {
|
|
27
|
+
input: 'chat'
|
|
28
|
+
value?: Array<any> | DataLink | EventProperty
|
|
29
|
+
mapping?: string
|
|
30
|
+
}
|
|
31
|
+
| {
|
|
32
|
+
input: 'images'
|
|
33
|
+
value?: Array<any> | DataLink | EventProperty
|
|
34
|
+
mapping?: string
|
|
35
|
+
}
|
|
36
|
+
| {
|
|
37
|
+
input: 'tools'
|
|
38
|
+
value?: Array<any> | DataLink | EventProperty
|
|
39
|
+
mapping?: string
|
|
40
|
+
}
|
|
41
|
+
| {
|
|
42
|
+
input: 'toolChoice'
|
|
43
|
+
value?: string | DataLink | EventProperty
|
|
44
|
+
mapping?: string
|
|
45
|
+
}
|
|
46
|
+
>
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/* Clean cache */
|
|
50
|
+
export type GeneratorOnnxLLMActionCleanCache = Action & {
|
|
51
|
+
__actionName: 'GENERATOR_ONNX_LLM_CLEAN_CACHE'
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/* Release context */
|
|
55
|
+
export type GeneratorOnnxLLMActionReleaseContext = Action & {
|
|
56
|
+
__actionName: 'GENERATOR_ONNX_LLM_RELEASE_CONTEXT'
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
interface GeneratorOnnxLLMDef {
|
|
60
|
+
/*
|
|
61
|
+
Default property:
|
|
62
|
+
{
|
|
63
|
+
"modelType": "auto",
|
|
64
|
+
"toolCallParser": "llama3_json",
|
|
65
|
+
"toolChoice": "auto",
|
|
66
|
+
"maxNewTokens": 256,
|
|
67
|
+
"temperature": 0.7,
|
|
68
|
+
"topK": 50,
|
|
69
|
+
"topP": 0.9,
|
|
70
|
+
"repetitionPenalty": 1,
|
|
71
|
+
"noRepeatNgramSize": 0,
|
|
72
|
+
"numBeams": 1,
|
|
73
|
+
"doSample": true,
|
|
74
|
+
"executionMode": "sequential"
|
|
75
|
+
}
|
|
76
|
+
*/
|
|
77
|
+
property?: {
|
|
78
|
+
/* Initialize the TTS context on generator initialization */
|
|
79
|
+
init?: boolean | DataLink
|
|
80
|
+
/* LLM model */
|
|
81
|
+
model?: string | DataLink
|
|
82
|
+
/* Model type */
|
|
83
|
+
modelType?: string | DataLink
|
|
84
|
+
/* Quantize type */
|
|
85
|
+
quantizeType?:
|
|
86
|
+
| 'auto'
|
|
87
|
+
| 'none'
|
|
88
|
+
| 'fp16'
|
|
89
|
+
| 'q8'
|
|
90
|
+
| 'int8'
|
|
91
|
+
| 'uint8'
|
|
92
|
+
| 'q4'
|
|
93
|
+
| 'bnb4'
|
|
94
|
+
| 'q4f16'
|
|
95
|
+
| DataLink
|
|
96
|
+
/* Prompt to inference */
|
|
97
|
+
prompt?: string | DataLink
|
|
98
|
+
/* Messages to inference */
|
|
99
|
+
messages?: Array<DataLink | {}> | DataLink
|
|
100
|
+
/* Images with message to inference */
|
|
101
|
+
images?: Array<string | DataLink> | DataLink
|
|
102
|
+
/* Tool call parser */
|
|
103
|
+
toolCallParser?: 'llama3_json' | 'mistral' | 'hermes' | 'internlm' | 'phi4' | DataLink
|
|
104
|
+
/* Tools for chat mode using OpenAI-compatible function calling format
|
|
105
|
+
Format: Array of objects with {type, function: {name, description, parameters}} structure
|
|
106
|
+
See: https://platform.openai.com/docs/guides/function-calling */
|
|
107
|
+
tools?: Array<{} | DataLink> | DataLink
|
|
108
|
+
/* Tool choice for chat mode */
|
|
109
|
+
toolChoice?: 'none' | 'auto' | DataLink
|
|
110
|
+
/* Max new tokens to generate */
|
|
111
|
+
maxNewTokens?: number | DataLink
|
|
112
|
+
/* Temperature */
|
|
113
|
+
temperature?: number | DataLink
|
|
114
|
+
/* Top k */
|
|
115
|
+
topK?: number | DataLink
|
|
116
|
+
/* Top p */
|
|
117
|
+
topP?: number | DataLink
|
|
118
|
+
/* Repetition penalty */
|
|
119
|
+
repetitionPenalty?: number | DataLink
|
|
120
|
+
/* No repeat ngram size */
|
|
121
|
+
noRepeatNgramSize?: number | DataLink
|
|
122
|
+
/* Number of beams */
|
|
123
|
+
numBeams?: number | DataLink
|
|
124
|
+
/* Do sampling */
|
|
125
|
+
doSample?: boolean | DataLink
|
|
126
|
+
/* Executor candidates, descending order of priority
|
|
127
|
+
Default will be xnnpack, wasm, cpu */
|
|
128
|
+
executors?:
|
|
129
|
+
| Array<'qnn' | 'dml' | 'nnapi' | 'xnnpack' | 'coreml' | 'cpu' | 'wasm' | 'webgpu' | DataLink>
|
|
130
|
+
| DataLink
|
|
131
|
+
/* Execution mode
|
|
132
|
+
Usually when the model has many branches, setting this option to `parallel` will give you better performance. */
|
|
133
|
+
executionMode?: 'sequential' | 'parallel' | DataLink
|
|
134
|
+
/* QNN backend */
|
|
135
|
+
qnnBackend?: 'HTP' | 'HTA' | 'DSP' | 'GPU' | 'CPU' | DataLink
|
|
136
|
+
/* Enable FP16 for QNN HTP */
|
|
137
|
+
qnnHtpEnableFp16?: boolean | DataLink
|
|
138
|
+
/* Enable QNN debug */
|
|
139
|
+
qnnEnableDebug?: boolean | DataLink
|
|
140
|
+
}
|
|
141
|
+
events?: {
|
|
142
|
+
/* Event triggered when state change */
|
|
143
|
+
onContextStateChange?: Array<EventAction>
|
|
144
|
+
/* Event triggered on get function call request */
|
|
145
|
+
onFunctionCall?: Array<EventAction>
|
|
146
|
+
/* Event triggered on completion finished */
|
|
147
|
+
onCompletionFinished?: Array<EventAction>
|
|
148
|
+
/* Event triggered when error occurs */
|
|
149
|
+
onError?: Array<EventAction>
|
|
150
|
+
}
|
|
151
|
+
outlets?: {
|
|
152
|
+
/* Context state */
|
|
153
|
+
contextState?: () => Data
|
|
154
|
+
/* Generated output */
|
|
155
|
+
generated?: () => Data
|
|
156
|
+
/* Full result of generation */
|
|
157
|
+
fullResult?: () => Data
|
|
158
|
+
/* Last function call */
|
|
159
|
+
lastFunctionCall?: () => Data
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
/* Local LLM inference based on [transformers.js](https://huggingface.co/docs/transformers.js)
|
|
164
|
+
You can use any converted model on HuggingFace. */
|
|
165
|
+
export type GeneratorOnnxLLM = Generator &
|
|
166
|
+
GeneratorOnnxLLMDef & {
|
|
167
|
+
templateKey: 'GENERATOR_ONNX_LLM'
|
|
168
|
+
switches: Array<
|
|
169
|
+
SwitchDef &
|
|
170
|
+
GeneratorOnnxLLMDef & {
|
|
171
|
+
conds?: Array<{
|
|
172
|
+
method: '==' | '!=' | '>' | '<' | '>=' | '<='
|
|
173
|
+
cond:
|
|
174
|
+
| SwitchCondInnerStateCurrentCanvas
|
|
175
|
+
| SwitchCondData
|
|
176
|
+
| {
|
|
177
|
+
__typename: 'SwitchCondInnerStateOutlet'
|
|
178
|
+
outlet: 'contextState' | 'generated' | 'fullResult' | 'lastFunctionCall'
|
|
179
|
+
value: any
|
|
180
|
+
}
|
|
181
|
+
}>
|
|
182
|
+
}
|
|
183
|
+
>
|
|
184
|
+
}
|
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
import type { SwitchCondInnerStateCurrentCanvas, SwitchCondData, SwitchDef } from '../switch'
|
|
2
|
+
import type { Data, DataLink } from '../data'
|
|
3
|
+
import type {
|
|
4
|
+
Generator,
|
|
5
|
+
EventAction,
|
|
6
|
+
ActionWithDataParams,
|
|
7
|
+
ActionWithParams,
|
|
8
|
+
Action,
|
|
9
|
+
EventProperty,
|
|
10
|
+
} from '../common'
|
|
11
|
+
|
|
12
|
+
/* Run text completion */
|
|
13
|
+
export type GeneratorOpenAILLMActionCompletion = ActionWithParams & {
|
|
14
|
+
__actionName: 'GENERATOR_OPENAI_LLM_COMPLETION'
|
|
15
|
+
params?: Array<
|
|
16
|
+
| {
|
|
17
|
+
input: 'messages'
|
|
18
|
+
value?: Array<any> | DataLink | EventProperty
|
|
19
|
+
mapping?: string
|
|
20
|
+
}
|
|
21
|
+
| {
|
|
22
|
+
input: 'maxTokens'
|
|
23
|
+
value?: number | DataLink | EventProperty
|
|
24
|
+
mapping?: string
|
|
25
|
+
}
|
|
26
|
+
| {
|
|
27
|
+
input: 'temperature'
|
|
28
|
+
value?: number | DataLink | EventProperty
|
|
29
|
+
mapping?: string
|
|
30
|
+
}
|
|
31
|
+
| {
|
|
32
|
+
input: 'topP'
|
|
33
|
+
value?: number | DataLink | EventProperty
|
|
34
|
+
mapping?: string
|
|
35
|
+
}
|
|
36
|
+
| {
|
|
37
|
+
input: 'frequencyPenalty'
|
|
38
|
+
value?: number | DataLink | EventProperty
|
|
39
|
+
mapping?: string
|
|
40
|
+
}
|
|
41
|
+
| {
|
|
42
|
+
input: 'presencePenalty'
|
|
43
|
+
value?: number | DataLink | EventProperty
|
|
44
|
+
mapping?: string
|
|
45
|
+
}
|
|
46
|
+
| {
|
|
47
|
+
input: 'stop'
|
|
48
|
+
value?: Array<any> | DataLink | EventProperty
|
|
49
|
+
mapping?: string
|
|
50
|
+
}
|
|
51
|
+
| {
|
|
52
|
+
input: 'tools'
|
|
53
|
+
value?: {} | DataLink | EventProperty
|
|
54
|
+
mapping?: string
|
|
55
|
+
}
|
|
56
|
+
| {
|
|
57
|
+
input: 'toolChoice'
|
|
58
|
+
value?: string | DataLink | EventProperty
|
|
59
|
+
mapping?: string
|
|
60
|
+
}
|
|
61
|
+
| {
|
|
62
|
+
input: 'parallelToolCalls'
|
|
63
|
+
value?: boolean | DataLink | EventProperty
|
|
64
|
+
mapping?: string
|
|
65
|
+
}
|
|
66
|
+
| {
|
|
67
|
+
input: 'responseFormat'
|
|
68
|
+
value?: {} | DataLink | EventProperty
|
|
69
|
+
mapping?: string
|
|
70
|
+
}
|
|
71
|
+
>
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/* Stop text completion */
|
|
75
|
+
export type GeneratorOpenAILLMActionStopCompletion = Action & {
|
|
76
|
+
__actionName: 'GENERATOR_OPENAI_LLM_STOP_COMPLETION'
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
interface GeneratorOpenAILLMDef {
|
|
80
|
+
/*
|
|
81
|
+
Default property:
|
|
82
|
+
{
|
|
83
|
+
"apiEndpoint": "https://api.openai.com/v1",
|
|
84
|
+
"model": "gpt-4o",
|
|
85
|
+
"completionMessages": [
|
|
86
|
+
{
|
|
87
|
+
"role": "system",
|
|
88
|
+
"content": "You are a helpful assistant."
|
|
89
|
+
}
|
|
90
|
+
],
|
|
91
|
+
"completionMaxTokens": 1024,
|
|
92
|
+
"completionTemperature": 1,
|
|
93
|
+
"completionTopP": 1,
|
|
94
|
+
"completionStop": []
|
|
95
|
+
}
|
|
96
|
+
*/
|
|
97
|
+
property?: {
|
|
98
|
+
/* API endpoint URL */
|
|
99
|
+
apiEndpoint?: string | DataLink
|
|
100
|
+
/* API key */
|
|
101
|
+
apiKey?: string | DataLink
|
|
102
|
+
/* Model name (Default: gpt-4o-mini) */
|
|
103
|
+
model?: string | DataLink
|
|
104
|
+
/* Chat messages */
|
|
105
|
+
completionMessages?:
|
|
106
|
+
| Array<
|
|
107
|
+
| DataLink
|
|
108
|
+
| {
|
|
109
|
+
role?: string | DataLink
|
|
110
|
+
content?:
|
|
111
|
+
| string
|
|
112
|
+
| DataLink
|
|
113
|
+
| DataLink
|
|
114
|
+
| {
|
|
115
|
+
type?: string | DataLink
|
|
116
|
+
text?: string | DataLink
|
|
117
|
+
image_url?: string | DataLink
|
|
118
|
+
}
|
|
119
|
+
| DataLink
|
|
120
|
+
}
|
|
121
|
+
>
|
|
122
|
+
| DataLink
|
|
123
|
+
/* Tools for chat mode following OpenAI function calling format
|
|
124
|
+
Format: Array of objects with {type, function: {name, description, parameters}} structure
|
|
125
|
+
See: https://platform.openai.com/docs/guides/function-calling */
|
|
126
|
+
completionTools?: Array<{} | DataLink> | DataLink
|
|
127
|
+
/* Enable parallel tool calls */
|
|
128
|
+
completionParallelToolCalls?: boolean | DataLink
|
|
129
|
+
/* Tool choice for chat mode */
|
|
130
|
+
completionToolChoice?: 'none' | 'auto' | 'required' | DataLink
|
|
131
|
+
/* Response format */
|
|
132
|
+
completionResponseFormat?:
|
|
133
|
+
| DataLink
|
|
134
|
+
| {
|
|
135
|
+
type?: 'text' | 'json_schema' | 'json_object' | DataLink
|
|
136
|
+
json_schema?:
|
|
137
|
+
| DataLink
|
|
138
|
+
| {
|
|
139
|
+
strict?: boolean | DataLink
|
|
140
|
+
schema?: {} | DataLink
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
/* Maximum tokens to generate */
|
|
144
|
+
completionMaxTokens?: number | DataLink
|
|
145
|
+
/* Temperature */
|
|
146
|
+
completionTemperature?: number | DataLink
|
|
147
|
+
/* Top P sampling */
|
|
148
|
+
completionTopP?: number | DataLink
|
|
149
|
+
/* Frequency penalty */
|
|
150
|
+
completionFrequencyPenalty?: number | DataLink
|
|
151
|
+
/* Presence penalty */
|
|
152
|
+
completionPresencePenalty?: number | DataLink
|
|
153
|
+
/* Stop sequences */
|
|
154
|
+
completionStop?: Array<string | DataLink> | DataLink
|
|
155
|
+
}
|
|
156
|
+
events?: {
|
|
157
|
+
/* Error event */
|
|
158
|
+
onError?: Array<EventAction>
|
|
159
|
+
/* Completion event */
|
|
160
|
+
onCompletion?: Array<EventAction>
|
|
161
|
+
/* Completion finished event */
|
|
162
|
+
onCompletionFinished?: Array<EventAction>
|
|
163
|
+
/* Completion function call event */
|
|
164
|
+
onCompletionFunctionCall?: Array<EventAction>
|
|
165
|
+
}
|
|
166
|
+
outlets?: {
|
|
167
|
+
/* Evaluating outlet */
|
|
168
|
+
isEvaluating?: () => Data
|
|
169
|
+
/* Completion result outlet */
|
|
170
|
+
completionResult?: () => Data
|
|
171
|
+
/* Completion details outlet */
|
|
172
|
+
completionDetails?: () => Data
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
/* LLM inference using OpenAI-compatible API endpoints
|
|
177
|
+
|
|
178
|
+
## Features
|
|
179
|
+
- Compatible with OpenAI API format
|
|
180
|
+
- Supports function calling
|
|
181
|
+
- Streaming responses
|
|
182
|
+
- Custom API endpoints, like
|
|
183
|
+
- OpenAI API: https://platform.openai.com/docs/guides/text?api-mode=chat
|
|
184
|
+
- Anthropic API: https://docs.anthropic.com/en/api/openai-sdk
|
|
185
|
+
- Gemini API: https://ai.google.dev/gemini-api/docs/openai
|
|
186
|
+
- llama.cpp server: https://github.com/ggml-org/llama.cpp/tree/master/tools/server */
|
|
187
|
+
export type GeneratorOpenAILLM = Generator &
|
|
188
|
+
GeneratorOpenAILLMDef & {
|
|
189
|
+
templateKey: 'GENERATOR_OPENAI_LLM'
|
|
190
|
+
switches: Array<
|
|
191
|
+
SwitchDef &
|
|
192
|
+
GeneratorOpenAILLMDef & {
|
|
193
|
+
conds?: Array<{
|
|
194
|
+
method: '==' | '!=' | '>' | '<' | '>=' | '<='
|
|
195
|
+
cond:
|
|
196
|
+
| SwitchCondInnerStateCurrentCanvas
|
|
197
|
+
| SwitchCondData
|
|
198
|
+
| {
|
|
199
|
+
__typename: 'SwitchCondInnerStateOutlet'
|
|
200
|
+
outlet: 'isEvaluating' | 'completionResult' | 'completionDetails'
|
|
201
|
+
value: any
|
|
202
|
+
}
|
|
203
|
+
}>
|
|
204
|
+
}
|
|
205
|
+
>
|
|
206
|
+
}
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
import type { SwitchCondInnerStateCurrentCanvas, SwitchCondData, SwitchDef } from '../switch'
|
|
2
|
+
import type { Data, DataLink } from '../data'
|
|
3
|
+
import type {
|
|
4
|
+
Generator,
|
|
5
|
+
EventAction,
|
|
6
|
+
ActionWithDataParams,
|
|
7
|
+
ActionWithParams,
|
|
8
|
+
Action,
|
|
9
|
+
EventProperty,
|
|
10
|
+
} from '../common'
|
|
11
|
+
|
|
12
|
+
/* Load the model */
|
|
13
|
+
export type GeneratorQnnLlmActionLoadModel = Action & {
|
|
14
|
+
__actionName: 'GENERATOR_QNN_LLM_LOAD_MODEL'
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
/* Abort model download */
|
|
18
|
+
export type GeneratorQnnLlmActionAbortModelDownload = Action & {
|
|
19
|
+
__actionName: 'GENERATOR_QNN_LLM_ABORT_MODEL_DOWNLOAD'
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
/* Pre-process the prompt, to prepare KV cache */
|
|
23
|
+
export type GeneratorQnnLlmActionProcess = ActionWithParams & {
|
|
24
|
+
__actionName: 'GENERATOR_QNN_LLM_PROCESS'
|
|
25
|
+
params?: Array<
|
|
26
|
+
| {
|
|
27
|
+
input: 'prompt'
|
|
28
|
+
value?: string | DataLink | EventProperty
|
|
29
|
+
mapping?: string
|
|
30
|
+
}
|
|
31
|
+
| {
|
|
32
|
+
input: 'messages'
|
|
33
|
+
value?: Array<any> | DataLink | EventProperty
|
|
34
|
+
mapping?: string
|
|
35
|
+
}
|
|
36
|
+
| {
|
|
37
|
+
input: 'tools'
|
|
38
|
+
value?: Array<any> | DataLink | EventProperty
|
|
39
|
+
mapping?: string
|
|
40
|
+
}
|
|
41
|
+
>
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/* Generate text */
|
|
45
|
+
export type GeneratorQnnLlmActionGenerate = ActionWithParams & {
|
|
46
|
+
__actionName: 'GENERATOR_QNN_LLM_GENERATE'
|
|
47
|
+
params?: Array<
|
|
48
|
+
| {
|
|
49
|
+
input: 'prompt'
|
|
50
|
+
value?: string | DataLink | EventProperty
|
|
51
|
+
mapping?: string
|
|
52
|
+
}
|
|
53
|
+
| {
|
|
54
|
+
input: 'messages'
|
|
55
|
+
value?: Array<any> | DataLink | EventProperty
|
|
56
|
+
mapping?: string
|
|
57
|
+
}
|
|
58
|
+
| {
|
|
59
|
+
input: 'tools'
|
|
60
|
+
value?: Array<any> | DataLink | EventProperty
|
|
61
|
+
mapping?: string
|
|
62
|
+
}
|
|
63
|
+
>
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/* Abort generation */
|
|
67
|
+
export type GeneratorQnnLlmActionAbortGeneration = Action & {
|
|
68
|
+
__actionName: 'GENERATOR_QNN_LLM_ABORT_GENERATION'
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/* Release context */
|
|
72
|
+
export type GeneratorQnnLlmActionReleaseContext = Action & {
|
|
73
|
+
__actionName: 'GENERATOR_QNN_LLM_RELEASE_CONTEXT'
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
interface GeneratorQnnLlmDef {
|
|
77
|
+
/*
|
|
78
|
+
Default property:
|
|
79
|
+
{
|
|
80
|
+
"modelType": "Llama 3.2 3B Chat",
|
|
81
|
+
"chatFormat": "Llama 3.x",
|
|
82
|
+
"toolsInUserMessage": true,
|
|
83
|
+
"toolCallParser": "llama3_json",
|
|
84
|
+
"toolChoice": "auto",
|
|
85
|
+
"parallelToolCalls": false,
|
|
86
|
+
"temperature": 0.8,
|
|
87
|
+
"seed": 42,
|
|
88
|
+
"topK": 40,
|
|
89
|
+
"topP": 0.95,
|
|
90
|
+
"greedy": false
|
|
91
|
+
}
|
|
92
|
+
*/
|
|
93
|
+
property?: {
|
|
94
|
+
/* Load model context when generator is initialized */
|
|
95
|
+
init?: boolean | DataLink
|
|
96
|
+
/* Model type */
|
|
97
|
+
modelType?:
|
|
98
|
+
| 'Llama 3 8B Chat'
|
|
99
|
+
| 'Llama 3.1 8B Chat'
|
|
100
|
+
| 'Llama 3.2 3B Chat'
|
|
101
|
+
| 'Mistral 7B Instruct v0.3'
|
|
102
|
+
| 'Qwen 2 7B Chat'
|
|
103
|
+
| 'Phi 3.5 Mini'
|
|
104
|
+
| 'Granite v3.1 8B Instruct'
|
|
105
|
+
| 'Custom'
|
|
106
|
+
| DataLink
|
|
107
|
+
/* SOC model */
|
|
108
|
+
socModel?: 'X Elite' | 'X Plus' | '8 Elite' | '8 Gen 3' | 'QCS8550' | DataLink
|
|
109
|
+
/* Custom model base URL
|
|
110
|
+
The model should be bundled, for details see https://github.com/mybigday/node-qnn-llm?tab=readme-ov-file#bundled-file */
|
|
111
|
+
customModelUrl?: string | DataLink
|
|
112
|
+
/* Custom model MD5 */
|
|
113
|
+
customModelMd5?: string | DataLink
|
|
114
|
+
/* Chat format */
|
|
115
|
+
chatFormat?:
|
|
116
|
+
| 'Llama 2'
|
|
117
|
+
| 'Llama 3'
|
|
118
|
+
| 'Llama 3.x'
|
|
119
|
+
| 'Mistral v0.3'
|
|
120
|
+
| 'Qwen 2'
|
|
121
|
+
| 'Custom'
|
|
122
|
+
| DataLink
|
|
123
|
+
/* Custom chat format template */
|
|
124
|
+
customChatFormat?: string | DataLink
|
|
125
|
+
/* Put tools in user message */
|
|
126
|
+
toolsInUserMessage?: boolean | DataLink
|
|
127
|
+
/* Prompt to generate */
|
|
128
|
+
prompt?: string | DataLink
|
|
129
|
+
/* Chat messages */
|
|
130
|
+
messages?:
|
|
131
|
+
| Array<
|
|
132
|
+
| DataLink
|
|
133
|
+
| {
|
|
134
|
+
role?: string | DataLink
|
|
135
|
+
content?: string | DataLink
|
|
136
|
+
}
|
|
137
|
+
>
|
|
138
|
+
| DataLink
|
|
139
|
+
/* Stop words */
|
|
140
|
+
stopWords?: Array<string | DataLink> | DataLink
|
|
141
|
+
/* Tool call parser */
|
|
142
|
+
toolCallParser?: 'llama3_json' | 'mistral' | 'hermes' | 'internlm' | 'phi4' | DataLink
|
|
143
|
+
/* Tools for chat mode using OpenAI-compatible function calling format
|
|
144
|
+
Format: Array of objects with {type, function: {name, description, parameters}} structure
|
|
145
|
+
See: https://platform.openai.com/docs/guides/function-calling */
|
|
146
|
+
tools?: Array<{} | DataLink> | DataLink
|
|
147
|
+
/* Tool choice for chat mode */
|
|
148
|
+
toolChoice?: 'none' | 'auto' | 'required' | DataLink
|
|
149
|
+
/* Enable parallel tool calls */
|
|
150
|
+
parallelToolCalls?: boolean | DataLink
|
|
151
|
+
/* Number of threads, -1 to use n-threads from model config */
|
|
152
|
+
nThreads?: number | DataLink
|
|
153
|
+
/* Temperature, -1 to use temperature from model config */
|
|
154
|
+
temperature?: number | DataLink
|
|
155
|
+
/* Seed, -1 to use seed from model config */
|
|
156
|
+
seed?: number | DataLink
|
|
157
|
+
/* Top K, -1 to use top-k from model config */
|
|
158
|
+
topK?: number | DataLink
|
|
159
|
+
/* Top P, -1 to use top-p from model config */
|
|
160
|
+
topP?: number | DataLink
|
|
161
|
+
/* Greedy, use greedy sampling */
|
|
162
|
+
greedy?: boolean | DataLink
|
|
163
|
+
}
|
|
164
|
+
events?: {
|
|
165
|
+
/* Event triggered when context state changes */
|
|
166
|
+
onContextStateChange?: Array<EventAction>
|
|
167
|
+
/* Event triggered when generate is done */
|
|
168
|
+
onGenerate?: Array<EventAction>
|
|
169
|
+
/* Event triggered on get function call request */
|
|
170
|
+
onFunctionCall?: Array<EventAction>
|
|
171
|
+
/* Event triggered when error occurs */
|
|
172
|
+
onError?: Array<EventAction>
|
|
173
|
+
}
|
|
174
|
+
outlets?: {
|
|
175
|
+
/* Context state */
|
|
176
|
+
contextState?: () => Data
|
|
177
|
+
/* Generation result */
|
|
178
|
+
result?: () => Data
|
|
179
|
+
/* Full context (Prompt + Generation Result) */
|
|
180
|
+
fullContext?: () => Data
|
|
181
|
+
/* Last function call details */
|
|
182
|
+
lastFunctionCall?: () => Data
|
|
183
|
+
/* Completion details */
|
|
184
|
+
completionDetails?: () => Data
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
/* Local LLM inference using Qualcomm AI Engine */
|
|
189
|
+
export type GeneratorQnnLlm = Generator &
|
|
190
|
+
GeneratorQnnLlmDef & {
|
|
191
|
+
templateKey: 'GENERATOR_QNN_LLM'
|
|
192
|
+
switches: Array<
|
|
193
|
+
SwitchDef &
|
|
194
|
+
GeneratorQnnLlmDef & {
|
|
195
|
+
conds?: Array<{
|
|
196
|
+
method: '==' | '!=' | '>' | '<' | '>=' | '<='
|
|
197
|
+
cond:
|
|
198
|
+
| SwitchCondInnerStateCurrentCanvas
|
|
199
|
+
| SwitchCondData
|
|
200
|
+
| {
|
|
201
|
+
__typename: 'SwitchCondInnerStateOutlet'
|
|
202
|
+
outlet:
|
|
203
|
+
| 'contextState'
|
|
204
|
+
| 'result'
|
|
205
|
+
| 'fullContext'
|
|
206
|
+
| 'lastFunctionCall'
|
|
207
|
+
| 'completionDetails'
|
|
208
|
+
value: any
|
|
209
|
+
}
|
|
210
|
+
}>
|
|
211
|
+
}
|
|
212
|
+
>
|
|
213
|
+
}
|