@fugood/bricks-ctor 2.24.0-beta.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. package/compile/action-name-map.ts +988 -0
  2. package/compile/index.ts +1245 -0
  3. package/compile/util.ts +358 -0
  4. package/index.ts +6 -0
  5. package/package.json +28 -0
  6. package/skills/bricks-design/LICENSE.txt +180 -0
  7. package/skills/bricks-design/SKILL.md +66 -0
  8. package/skills/bricks-project/SKILL.md +32 -0
  9. package/skills/bricks-project/rules/animation.md +159 -0
  10. package/skills/bricks-project/rules/architecture-patterns.md +69 -0
  11. package/skills/bricks-project/rules/automations.md +221 -0
  12. package/skills/bricks-project/rules/buttress.md +156 -0
  13. package/skills/bricks-project/rules/data-calculation.md +208 -0
  14. package/skills/bricks-project/rules/local-sync.md +129 -0
  15. package/skills/bricks-project/rules/media-flow.md +158 -0
  16. package/skills/bricks-project/rules/remote-data-bank.md +196 -0
  17. package/skills/bricks-project/rules/standby-transition.md +124 -0
  18. package/skills/rive-marketplace/SKILL.md +99 -0
  19. package/tools/deploy.ts +151 -0
  20. package/tools/icons/.gitattributes +1 -0
  21. package/tools/icons/fa6pro-glyphmap.json +4686 -0
  22. package/tools/icons/fa6pro-meta.json +3671 -0
  23. package/tools/mcp-server.ts +28 -0
  24. package/tools/mcp-tools/compile.ts +91 -0
  25. package/tools/mcp-tools/huggingface.ts +762 -0
  26. package/tools/mcp-tools/icons.ts +70 -0
  27. package/tools/mcp-tools/lottie.ts +102 -0
  28. package/tools/mcp-tools/media.ts +110 -0
  29. package/tools/postinstall.ts +229 -0
  30. package/tools/preview-main.mjs +293 -0
  31. package/tools/preview.ts +143 -0
  32. package/tools/pull.ts +116 -0
  33. package/tsconfig.json +16 -0
  34. package/types/animation.ts +100 -0
  35. package/types/automation.ts +235 -0
  36. package/types/brick-base.ts +80 -0
  37. package/types/bricks/Camera.ts +246 -0
  38. package/types/bricks/Chart.ts +372 -0
  39. package/types/bricks/GenerativeMedia.ts +276 -0
  40. package/types/bricks/Icon.ts +98 -0
  41. package/types/bricks/Image.ts +114 -0
  42. package/types/bricks/Items.ts +476 -0
  43. package/types/bricks/Lottie.ts +168 -0
  44. package/types/bricks/Maps.ts +262 -0
  45. package/types/bricks/QrCode.ts +117 -0
  46. package/types/bricks/Rect.ts +150 -0
  47. package/types/bricks/RichText.ts +128 -0
  48. package/types/bricks/Rive.ts +220 -0
  49. package/types/bricks/Slideshow.ts +201 -0
  50. package/types/bricks/Svg.ts +99 -0
  51. package/types/bricks/Text.ts +148 -0
  52. package/types/bricks/TextInput.ts +242 -0
  53. package/types/bricks/Video.ts +175 -0
  54. package/types/bricks/VideoStreaming.ts +112 -0
  55. package/types/bricks/WebRtcStream.ts +65 -0
  56. package/types/bricks/WebView.ts +168 -0
  57. package/types/bricks/index.ts +21 -0
  58. package/types/canvas.ts +82 -0
  59. package/types/common.ts +144 -0
  60. package/types/data-calc-command.ts +7005 -0
  61. package/types/data-calc-script.ts +21 -0
  62. package/types/data-calc.ts +11 -0
  63. package/types/data.ts +95 -0
  64. package/types/generators/AlarmClock.ts +110 -0
  65. package/types/generators/Assistant.ts +621 -0
  66. package/types/generators/BleCentral.ts +247 -0
  67. package/types/generators/BlePeripheral.ts +208 -0
  68. package/types/generators/CanvasMap.ts +74 -0
  69. package/types/generators/CastlesPay.ts +87 -0
  70. package/types/generators/DataBank.ts +160 -0
  71. package/types/generators/File.ts +432 -0
  72. package/types/generators/GraphQl.ts +132 -0
  73. package/types/generators/Http.ts +222 -0
  74. package/types/generators/HttpServer.ts +176 -0
  75. package/types/generators/Information.ts +103 -0
  76. package/types/generators/Intent.ts +168 -0
  77. package/types/generators/Iterator.ts +108 -0
  78. package/types/generators/Keyboard.ts +105 -0
  79. package/types/generators/LlmAnthropicCompat.ts +212 -0
  80. package/types/generators/LlmAppleBuiltin.ts +159 -0
  81. package/types/generators/LlmGgml.ts +861 -0
  82. package/types/generators/LlmMediaTekNeuroPilot.ts +235 -0
  83. package/types/generators/LlmMlx.ts +227 -0
  84. package/types/generators/LlmOnnx.ts +213 -0
  85. package/types/generators/LlmOpenAiCompat.ts +244 -0
  86. package/types/generators/LlmQualcommAiEngine.ts +247 -0
  87. package/types/generators/Mcp.ts +637 -0
  88. package/types/generators/McpServer.ts +289 -0
  89. package/types/generators/MediaFlow.ts +170 -0
  90. package/types/generators/MqttBroker.ts +141 -0
  91. package/types/generators/MqttClient.ts +141 -0
  92. package/types/generators/Question.ts +408 -0
  93. package/types/generators/RealtimeTranscription.ts +279 -0
  94. package/types/generators/RerankerGgml.ts +191 -0
  95. package/types/generators/SerialPort.ts +151 -0
  96. package/types/generators/SoundPlayer.ts +94 -0
  97. package/types/generators/SoundRecorder.ts +130 -0
  98. package/types/generators/SpeechToTextGgml.ts +415 -0
  99. package/types/generators/SpeechToTextOnnx.ts +236 -0
  100. package/types/generators/SpeechToTextPlatform.ts +85 -0
  101. package/types/generators/SqLite.ts +159 -0
  102. package/types/generators/Step.ts +107 -0
  103. package/types/generators/SttAppleBuiltin.ts +130 -0
  104. package/types/generators/Tcp.ts +126 -0
  105. package/types/generators/TcpServer.ts +147 -0
  106. package/types/generators/TextToSpeechAppleBuiltin.ts +127 -0
  107. package/types/generators/TextToSpeechGgml.ts +221 -0
  108. package/types/generators/TextToSpeechOnnx.ts +178 -0
  109. package/types/generators/TextToSpeechOpenAiLike.ts +121 -0
  110. package/types/generators/ThermalPrinter.ts +191 -0
  111. package/types/generators/Tick.ts +83 -0
  112. package/types/generators/Udp.ts +120 -0
  113. package/types/generators/VadGgml.ts +250 -0
  114. package/types/generators/VadOnnx.ts +231 -0
  115. package/types/generators/VadTraditional.ts +138 -0
  116. package/types/generators/VectorStore.ts +257 -0
  117. package/types/generators/Watchdog.ts +107 -0
  118. package/types/generators/WebCrawler.ts +103 -0
  119. package/types/generators/WebRtc.ts +181 -0
  120. package/types/generators/WebSocket.ts +148 -0
  121. package/types/generators/index.ts +57 -0
  122. package/types/index.ts +13 -0
  123. package/types/subspace.ts +59 -0
  124. package/types/switch.ts +51 -0
  125. package/types/system.ts +707 -0
  126. package/utils/calc.ts +126 -0
  127. package/utils/data.ts +497 -0
  128. package/utils/event-props.ts +836 -0
  129. package/utils/id.ts +80 -0
@@ -0,0 +1,244 @@
1
+ /* Auto generated by build script
2
+ *
3
+ * LLM inference using OpenAI-compatible API endpoints
4
+ *
5
+ * ## Features
6
+ * - Compatible with OpenAI API format
7
+ * - Supports function calling
8
+ * - Streaming responses
9
+ * - Custom API endpoints, like
10
+ * - OpenAI API: https://platform.openai.com/docs/guides/text?api-mode=chat
11
+ * - Anthropic API: https://docs.anthropic.com/en/api/openai-sdk
12
+ * - Gemini API: https://ai.google.dev/gemini-api/docs/openai
13
+ * - llama.cpp server: https://github.com/ggml-org/llama.cpp/tree/master/tools/server
14
+ */
15
+ import type { SwitchCondInnerStateCurrentCanvas, SwitchCondData, SwitchDef } from '../switch'
16
+ import type { Data, DataLink } from '../data'
17
+ import type {
18
+ Brick,
19
+ Generator,
20
+ EventAction,
21
+ ActionWithDataParams,
22
+ ActionWithParams,
23
+ Action,
24
+ EventProperty,
25
+ } from '../common'
26
+ import type { TemplateEventPropsMap } from '../../utils/event-props'
27
+
28
+ /* Run text completion */
29
+ export type GeneratorOpenAILLMActionCompletion = ActionWithParams & {
30
+ __actionName: 'GENERATOR_OPENAI_LLM_COMPLETION'
31
+ params?: Array<
32
+ | {
33
+ input: 'messages'
34
+ value?: Array<any> | DataLink | EventProperty
35
+ mapping?: string
36
+ }
37
+ | {
38
+ input: 'maxTokens'
39
+ value?: number | DataLink | EventProperty
40
+ mapping?: string
41
+ }
42
+ | {
43
+ input: 'temperature'
44
+ value?: number | DataLink | EventProperty
45
+ mapping?: string
46
+ }
47
+ | {
48
+ input: 'topP'
49
+ value?: number | DataLink | EventProperty
50
+ mapping?: string
51
+ }
52
+ | {
53
+ input: 'frequencyPenalty'
54
+ value?: number | DataLink | EventProperty
55
+ mapping?: string
56
+ }
57
+ | {
58
+ input: 'presencePenalty'
59
+ value?: number | DataLink | EventProperty
60
+ mapping?: string
61
+ }
62
+ | {
63
+ input: 'stop'
64
+ value?: Array<any> | DataLink | EventProperty
65
+ mapping?: string
66
+ }
67
+ | {
68
+ input: 'tools'
69
+ value?: {} | DataLink | EventProperty
70
+ mapping?: string
71
+ }
72
+ | {
73
+ input: 'toolChoice'
74
+ value?: string | DataLink | EventProperty
75
+ mapping?: string
76
+ }
77
+ | {
78
+ input: 'parallelToolCalls'
79
+ value?: boolean | DataLink | EventProperty
80
+ mapping?: string
81
+ }
82
+ | {
83
+ input: 'responseFormat'
84
+ value?: {} | DataLink | EventProperty
85
+ mapping?: string
86
+ }
87
+ >
88
+ }
89
+
90
+ /* Stop text completion */
91
+ export type GeneratorOpenAILLMActionStopCompletion = Action & {
92
+ __actionName: 'GENERATOR_OPENAI_LLM_STOP_COMPLETION'
93
+ }
94
+
95
+ interface GeneratorOpenAILLMDef {
96
+ /*
97
+ Default property:
98
+ {
99
+ "apiEndpoint": "https://api.openai.com/v1",
100
+ "model": "gpt-4o",
101
+ "completionMessages": [
102
+ {
103
+ "role": "system",
104
+ "content": "You are a helpful assistant."
105
+ }
106
+ ],
107
+ "completionMaxTokens": 1024,
108
+ "completionTemperature": 1,
109
+ "completionTopP": 1,
110
+ "completionStop": []
111
+ }
112
+ */
113
+ property?: {
114
+ /* API endpoint URL */
115
+ apiEndpoint?: string | DataLink
116
+ /* API key */
117
+ apiKey?: string | DataLink
118
+ /* Model name (Default: gpt-4o-mini) */
119
+ model?: string | DataLink
120
+ /* Chat messages */
121
+ completionMessages?:
122
+ | Array<
123
+ | DataLink
124
+ | {
125
+ role?: string | DataLink
126
+ content?:
127
+ | string
128
+ | DataLink
129
+ | DataLink
130
+ | {
131
+ type?: string | DataLink
132
+ text?: string | DataLink
133
+ image_url?: string | DataLink
134
+ }
135
+ | DataLink
136
+ }
137
+ >
138
+ | DataLink
139
+ /* Tools for chat mode following OpenAI function calling format
140
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
141
+ See: https://platform.openai.com/docs/guides/function-calling */
142
+ completionTools?: Array<{} | DataLink> | DataLink
143
+ /* Enable parallel tool calls */
144
+ completionParallelToolCalls?: boolean | DataLink
145
+ /* Tool choice for chat mode */
146
+ completionToolChoice?: 'none' | 'auto' | 'required' | DataLink
147
+ /* Response format */
148
+ completionResponseFormat?:
149
+ | DataLink
150
+ | {
151
+ type?: 'text' | 'json_schema' | 'json_object' | DataLink
152
+ json_schema?:
153
+ | DataLink
154
+ | {
155
+ strict?: boolean | DataLink
156
+ schema?: {} | DataLink
157
+ }
158
+ }
159
+ /* Maximum tokens to generate */
160
+ completionMaxTokens?: number | DataLink
161
+ /* Temperature */
162
+ completionTemperature?: number | DataLink
163
+ /* Top P sampling */
164
+ completionTopP?: number | DataLink
165
+ /* Frequency penalty */
166
+ completionFrequencyPenalty?: number | DataLink
167
+ /* Presence penalty */
168
+ completionPresencePenalty?: number | DataLink
169
+ /* Stop sequences */
170
+ completionStop?: Array<string | DataLink> | DataLink
171
+ }
172
+ events?: {
173
+ /* Error event */
174
+ onError?: Array<EventAction<string & keyof TemplateEventPropsMap['OpenaiLlm']['onError']>>
175
+ /* Completion event */
176
+ onCompletion?: Array<
177
+ EventAction<string & keyof TemplateEventPropsMap['OpenaiLlm']['onCompletion']>
178
+ >
179
+ /* Completion finished event */
180
+ onCompletionFinished?: Array<
181
+ EventAction<string & keyof TemplateEventPropsMap['OpenaiLlm']['onCompletionFinished']>
182
+ >
183
+ /* Completion function call event */
184
+ onCompletionFunctionCall?: Array<
185
+ EventAction<string & keyof TemplateEventPropsMap['OpenaiLlm']['onCompletionFunctionCall']>
186
+ >
187
+ }
188
+ outlets?: {
189
+ /* Evaluating outlet */
190
+ isEvaluating?: () => Data<boolean>
191
+ /* Completion result outlet */
192
+ completionResult?: () => Data<string>
193
+ /* Completion details outlet */
194
+ completionDetails?: () => Data<{
195
+ text?: string
196
+ content?: string
197
+ finish_reason?: string
198
+ usage?: { [key: string]: any }
199
+ tool_calls?: Array<{
200
+ id?: string
201
+ type?: string
202
+ function?: {
203
+ name?: string
204
+ arguments?: string
205
+ [key: string]: any
206
+ }
207
+ [key: string]: any
208
+ }>
209
+ [key: string]: any
210
+ }>
211
+ }
212
+ }
213
+
214
+ /* LLM inference using OpenAI-compatible API endpoints
215
+
216
+ ## Features
217
+ - Compatible with OpenAI API format
218
+ - Supports function calling
219
+ - Streaming responses
220
+ - Custom API endpoints, like
221
+ - OpenAI API: https://platform.openai.com/docs/guides/text?api-mode=chat
222
+ - Anthropic API: https://docs.anthropic.com/en/api/openai-sdk
223
+ - Gemini API: https://ai.google.dev/gemini-api/docs/openai
224
+ - llama.cpp server: https://github.com/ggml-org/llama.cpp/tree/master/tools/server */
225
+ export type GeneratorOpenAILLM = Generator &
226
+ GeneratorOpenAILLMDef & {
227
+ templateKey: 'GENERATOR_OPENAI_LLM'
228
+ switches?: Array<
229
+ SwitchDef &
230
+ GeneratorOpenAILLMDef & {
231
+ conds?: Array<{
232
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
233
+ cond:
234
+ | SwitchCondInnerStateCurrentCanvas
235
+ | SwitchCondData
236
+ | {
237
+ __typename: 'SwitchCondInnerStateOutlet'
238
+ outlet: 'isEvaluating' | 'completionResult' | 'completionDetails'
239
+ value: any
240
+ }
241
+ }>
242
+ }
243
+ >
244
+ }
@@ -0,0 +1,247 @@
1
+ /* Auto generated by build script
2
+ *
3
+ * Local LLM inference using Qualcomm AI Engine
4
+ */
5
+ import type { SwitchCondInnerStateCurrentCanvas, SwitchCondData, SwitchDef } from '../switch'
6
+ import type { Data, DataLink } from '../data'
7
+ import type {
8
+ Brick,
9
+ Generator,
10
+ EventAction,
11
+ ActionWithDataParams,
12
+ ActionWithParams,
13
+ Action,
14
+ EventProperty,
15
+ } from '../common'
16
+ import type { TemplateEventPropsMap } from '../../utils/event-props'
17
+
18
+ /* Load the model */
19
+ export type GeneratorQnnLlmActionLoadModel = Action & {
20
+ __actionName: 'GENERATOR_QNN_LLM_LOAD_MODEL'
21
+ }
22
+
23
+ /* Abort model download */
24
+ export type GeneratorQnnLlmActionAbortModelDownload = Action & {
25
+ __actionName: 'GENERATOR_QNN_LLM_ABORT_MODEL_DOWNLOAD'
26
+ }
27
+
28
+ /* Pre-process the prompt, to prepare KV cache */
29
+ export type GeneratorQnnLlmActionProcess = ActionWithParams & {
30
+ __actionName: 'GENERATOR_QNN_LLM_PROCESS'
31
+ params?: Array<
32
+ | {
33
+ input: 'prompt'
34
+ value?: string | DataLink | EventProperty
35
+ mapping?: string
36
+ }
37
+ | {
38
+ input: 'messages'
39
+ value?: Array<any> | DataLink | EventProperty
40
+ mapping?: string
41
+ }
42
+ | {
43
+ input: 'tools'
44
+ value?: Array<any> | DataLink | EventProperty
45
+ mapping?: string
46
+ }
47
+ >
48
+ }
49
+
50
+ /* Generate text */
51
+ export type GeneratorQnnLlmActionGenerate = ActionWithParams & {
52
+ __actionName: 'GENERATOR_QNN_LLM_GENERATE'
53
+ params?: Array<
54
+ | {
55
+ input: 'prompt'
56
+ value?: string | DataLink | EventProperty
57
+ mapping?: string
58
+ }
59
+ | {
60
+ input: 'messages'
61
+ value?: Array<any> | DataLink | EventProperty
62
+ mapping?: string
63
+ }
64
+ | {
65
+ input: 'tools'
66
+ value?: Array<any> | DataLink | EventProperty
67
+ mapping?: string
68
+ }
69
+ >
70
+ }
71
+
72
+ /* Abort generation */
73
+ export type GeneratorQnnLlmActionAbortGeneration = Action & {
74
+ __actionName: 'GENERATOR_QNN_LLM_ABORT_GENERATION'
75
+ }
76
+
77
+ /* Release context */
78
+ export type GeneratorQnnLlmActionReleaseContext = Action & {
79
+ __actionName: 'GENERATOR_QNN_LLM_RELEASE_CONTEXT'
80
+ }
81
+
82
+ interface GeneratorQnnLlmDef {
83
+ /*
84
+ Default property:
85
+ {
86
+ "modelType": "Llama 3.2 3B Chat",
87
+ "chatFormat": "Llama 3.x",
88
+ "toolsInUserMessage": true,
89
+ "toolCallParser": "llama3_json",
90
+ "toolChoice": "auto",
91
+ "parallelToolCalls": false,
92
+ "temperature": 0.8,
93
+ "seed": 42,
94
+ "topK": 40,
95
+ "topP": 0.95,
96
+ "greedy": false
97
+ }
98
+ */
99
+ property?: {
100
+ /* Load model context when generator is initialized */
101
+ init?: boolean | DataLink
102
+ /* Model type */
103
+ modelType?:
104
+ | 'Llama 3 8B Chat'
105
+ | 'Llama 3.1 8B Chat'
106
+ | 'Llama 3.2 3B Chat'
107
+ | 'Mistral 7B Instruct v0.3'
108
+ | 'Qwen 2 7B Chat'
109
+ | 'Phi 3.5 Mini'
110
+ | 'Granite v3.1 8B Instruct'
111
+ | 'Custom'
112
+ | DataLink
113
+ /* SOC model */
114
+ socModel?: 'X Elite' | 'X Plus' | '8 Elite' | '8 Gen 3' | 'QCS8550' | DataLink
115
+ /* Custom model base URL
116
+ The model should be bundled, for details see https://github.com/mybigday/node-qnn-llm?tab=readme-ov-file#bundled-file */
117
+ customModelUrl?: string | DataLink
118
+ /* Custom model MD5 */
119
+ customModelMd5?: string | DataLink
120
+ /* Chat format */
121
+ chatFormat?:
122
+ | 'Llama 2'
123
+ | 'Llama 3'
124
+ | 'Llama 3.x'
125
+ | 'Mistral v0.3'
126
+ | 'Qwen 2'
127
+ | 'Custom'
128
+ | DataLink
129
+ /* Custom chat format template */
130
+ customChatFormat?: string | DataLink
131
+ /* Put tools in user message */
132
+ toolsInUserMessage?: boolean | DataLink
133
+ /* Prompt to generate */
134
+ prompt?: string | DataLink
135
+ /* Chat messages */
136
+ messages?:
137
+ | Array<
138
+ | DataLink
139
+ | {
140
+ role?: string | DataLink
141
+ content?: string | DataLink
142
+ }
143
+ >
144
+ | DataLink
145
+ /* Stop words */
146
+ stopWords?: Array<string | DataLink> | DataLink
147
+ /* Tool call parser */
148
+ toolCallParser?: 'llama3_json' | 'mistral' | 'hermes' | 'internlm' | 'phi4' | DataLink
149
+ /* Tools for chat mode using OpenAI-compatible function calling format
150
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
151
+ See: https://platform.openai.com/docs/guides/function-calling */
152
+ tools?: Array<{} | DataLink> | DataLink
153
+ /* Tool choice for chat mode */
154
+ toolChoice?: 'none' | 'auto' | 'required' | DataLink
155
+ /* Enable parallel tool calls */
156
+ parallelToolCalls?: boolean | DataLink
157
+ /* Number of threads, -1 to use n-threads from model config */
158
+ nThreads?: number | DataLink
159
+ /* Temperature, -1 to use temperature from model config */
160
+ temperature?: number | DataLink
161
+ /* Seed, -1 to use seed from model config */
162
+ seed?: number | DataLink
163
+ /* Top K, -1 to use top-k from model config */
164
+ topK?: number | DataLink
165
+ /* Top P, -1 to use top-p from model config */
166
+ topP?: number | DataLink
167
+ /* Greedy, use greedy sampling */
168
+ greedy?: boolean | DataLink
169
+ }
170
+ events?: {
171
+ /* Event triggered when context state changes */
172
+ onContextStateChange?: Array<
173
+ EventAction<string & keyof TemplateEventPropsMap['QnnLlm']['onContextStateChange']>
174
+ >
175
+ /* Event triggered when generate is done */
176
+ onGenerate?: Array<EventAction<string & keyof TemplateEventPropsMap['QnnLlm']['onGenerate']>>
177
+ /* Event triggered on get function call request */
178
+ onFunctionCall?: Array<
179
+ EventAction<string & keyof TemplateEventPropsMap['QnnLlm']['onFunctionCall']>
180
+ >
181
+ /* Event triggered when error occurs */
182
+ onError?: Array<EventAction<string & keyof TemplateEventPropsMap['QnnLlm']['onError']>>
183
+ }
184
+ outlets?: {
185
+ /* Context state */
186
+ contextState?: () => Data<string>
187
+ /* Generation result */
188
+ result?: () => Data<string>
189
+ /* Full context (Prompt + Generation Result) */
190
+ fullContext?: () => Data<string>
191
+ /* Last function call details */
192
+ lastFunctionCall?: () => Data<{
193
+ id?: string
194
+ type?: string
195
+ function?: {
196
+ name?: string
197
+ arguments?: string
198
+ [key: string]: any
199
+ }
200
+ [key: string]: any
201
+ }>
202
+ /* Completion details */
203
+ completionDetails?: () => Data<{
204
+ text?: string
205
+ content?: string
206
+ reasoning_content?: string
207
+ tool_calls?: Array<{
208
+ id?: string
209
+ type?: string
210
+ function?: {
211
+ name?: string
212
+ arguments?: string
213
+ [key: string]: any
214
+ }
215
+ [key: string]: any
216
+ }>
217
+ [key: string]: any
218
+ }>
219
+ }
220
+ }
221
+
222
+ /* Local LLM inference using Qualcomm AI Engine */
223
+ export type GeneratorQnnLlm = Generator &
224
+ GeneratorQnnLlmDef & {
225
+ templateKey: 'GENERATOR_QNN_LLM'
226
+ switches?: Array<
227
+ SwitchDef &
228
+ GeneratorQnnLlmDef & {
229
+ conds?: Array<{
230
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
231
+ cond:
232
+ | SwitchCondInnerStateCurrentCanvas
233
+ | SwitchCondData
234
+ | {
235
+ __typename: 'SwitchCondInnerStateOutlet'
236
+ outlet:
237
+ | 'contextState'
238
+ | 'result'
239
+ | 'fullContext'
240
+ | 'lastFunctionCall'
241
+ | 'completionDetails'
242
+ value: any
243
+ }
244
+ }>
245
+ }
246
+ >
247
+ }