@fugood/bricks-ctor 2.24.0-beta.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. package/compile/action-name-map.ts +988 -0
  2. package/compile/index.ts +1245 -0
  3. package/compile/util.ts +358 -0
  4. package/index.ts +6 -0
  5. package/package.json +28 -0
  6. package/skills/bricks-design/LICENSE.txt +180 -0
  7. package/skills/bricks-design/SKILL.md +66 -0
  8. package/skills/bricks-project/SKILL.md +32 -0
  9. package/skills/bricks-project/rules/animation.md +159 -0
  10. package/skills/bricks-project/rules/architecture-patterns.md +69 -0
  11. package/skills/bricks-project/rules/automations.md +221 -0
  12. package/skills/bricks-project/rules/buttress.md +156 -0
  13. package/skills/bricks-project/rules/data-calculation.md +208 -0
  14. package/skills/bricks-project/rules/local-sync.md +129 -0
  15. package/skills/bricks-project/rules/media-flow.md +158 -0
  16. package/skills/bricks-project/rules/remote-data-bank.md +196 -0
  17. package/skills/bricks-project/rules/standby-transition.md +124 -0
  18. package/skills/rive-marketplace/SKILL.md +99 -0
  19. package/tools/deploy.ts +151 -0
  20. package/tools/icons/.gitattributes +1 -0
  21. package/tools/icons/fa6pro-glyphmap.json +4686 -0
  22. package/tools/icons/fa6pro-meta.json +3671 -0
  23. package/tools/mcp-server.ts +28 -0
  24. package/tools/mcp-tools/compile.ts +91 -0
  25. package/tools/mcp-tools/huggingface.ts +762 -0
  26. package/tools/mcp-tools/icons.ts +70 -0
  27. package/tools/mcp-tools/lottie.ts +102 -0
  28. package/tools/mcp-tools/media.ts +110 -0
  29. package/tools/postinstall.ts +229 -0
  30. package/tools/preview-main.mjs +293 -0
  31. package/tools/preview.ts +143 -0
  32. package/tools/pull.ts +116 -0
  33. package/tsconfig.json +16 -0
  34. package/types/animation.ts +100 -0
  35. package/types/automation.ts +235 -0
  36. package/types/brick-base.ts +80 -0
  37. package/types/bricks/Camera.ts +246 -0
  38. package/types/bricks/Chart.ts +372 -0
  39. package/types/bricks/GenerativeMedia.ts +276 -0
  40. package/types/bricks/Icon.ts +98 -0
  41. package/types/bricks/Image.ts +114 -0
  42. package/types/bricks/Items.ts +476 -0
  43. package/types/bricks/Lottie.ts +168 -0
  44. package/types/bricks/Maps.ts +262 -0
  45. package/types/bricks/QrCode.ts +117 -0
  46. package/types/bricks/Rect.ts +150 -0
  47. package/types/bricks/RichText.ts +128 -0
  48. package/types/bricks/Rive.ts +220 -0
  49. package/types/bricks/Slideshow.ts +201 -0
  50. package/types/bricks/Svg.ts +99 -0
  51. package/types/bricks/Text.ts +148 -0
  52. package/types/bricks/TextInput.ts +242 -0
  53. package/types/bricks/Video.ts +175 -0
  54. package/types/bricks/VideoStreaming.ts +112 -0
  55. package/types/bricks/WebRtcStream.ts +65 -0
  56. package/types/bricks/WebView.ts +168 -0
  57. package/types/bricks/index.ts +21 -0
  58. package/types/canvas.ts +82 -0
  59. package/types/common.ts +144 -0
  60. package/types/data-calc-command.ts +7005 -0
  61. package/types/data-calc-script.ts +21 -0
  62. package/types/data-calc.ts +11 -0
  63. package/types/data.ts +95 -0
  64. package/types/generators/AlarmClock.ts +110 -0
  65. package/types/generators/Assistant.ts +621 -0
  66. package/types/generators/BleCentral.ts +247 -0
  67. package/types/generators/BlePeripheral.ts +208 -0
  68. package/types/generators/CanvasMap.ts +74 -0
  69. package/types/generators/CastlesPay.ts +87 -0
  70. package/types/generators/DataBank.ts +160 -0
  71. package/types/generators/File.ts +432 -0
  72. package/types/generators/GraphQl.ts +132 -0
  73. package/types/generators/Http.ts +222 -0
  74. package/types/generators/HttpServer.ts +176 -0
  75. package/types/generators/Information.ts +103 -0
  76. package/types/generators/Intent.ts +168 -0
  77. package/types/generators/Iterator.ts +108 -0
  78. package/types/generators/Keyboard.ts +105 -0
  79. package/types/generators/LlmAnthropicCompat.ts +212 -0
  80. package/types/generators/LlmAppleBuiltin.ts +159 -0
  81. package/types/generators/LlmGgml.ts +861 -0
  82. package/types/generators/LlmMediaTekNeuroPilot.ts +235 -0
  83. package/types/generators/LlmMlx.ts +227 -0
  84. package/types/generators/LlmOnnx.ts +213 -0
  85. package/types/generators/LlmOpenAiCompat.ts +244 -0
  86. package/types/generators/LlmQualcommAiEngine.ts +247 -0
  87. package/types/generators/Mcp.ts +637 -0
  88. package/types/generators/McpServer.ts +289 -0
  89. package/types/generators/MediaFlow.ts +170 -0
  90. package/types/generators/MqttBroker.ts +141 -0
  91. package/types/generators/MqttClient.ts +141 -0
  92. package/types/generators/Question.ts +408 -0
  93. package/types/generators/RealtimeTranscription.ts +279 -0
  94. package/types/generators/RerankerGgml.ts +191 -0
  95. package/types/generators/SerialPort.ts +151 -0
  96. package/types/generators/SoundPlayer.ts +94 -0
  97. package/types/generators/SoundRecorder.ts +130 -0
  98. package/types/generators/SpeechToTextGgml.ts +415 -0
  99. package/types/generators/SpeechToTextOnnx.ts +236 -0
  100. package/types/generators/SpeechToTextPlatform.ts +85 -0
  101. package/types/generators/SqLite.ts +159 -0
  102. package/types/generators/Step.ts +107 -0
  103. package/types/generators/SttAppleBuiltin.ts +130 -0
  104. package/types/generators/Tcp.ts +126 -0
  105. package/types/generators/TcpServer.ts +147 -0
  106. package/types/generators/TextToSpeechAppleBuiltin.ts +127 -0
  107. package/types/generators/TextToSpeechGgml.ts +221 -0
  108. package/types/generators/TextToSpeechOnnx.ts +178 -0
  109. package/types/generators/TextToSpeechOpenAiLike.ts +121 -0
  110. package/types/generators/ThermalPrinter.ts +191 -0
  111. package/types/generators/Tick.ts +83 -0
  112. package/types/generators/Udp.ts +120 -0
  113. package/types/generators/VadGgml.ts +250 -0
  114. package/types/generators/VadOnnx.ts +231 -0
  115. package/types/generators/VadTraditional.ts +138 -0
  116. package/types/generators/VectorStore.ts +257 -0
  117. package/types/generators/Watchdog.ts +107 -0
  118. package/types/generators/WebCrawler.ts +103 -0
  119. package/types/generators/WebRtc.ts +181 -0
  120. package/types/generators/WebSocket.ts +148 -0
  121. package/types/generators/index.ts +57 -0
  122. package/types/index.ts +13 -0
  123. package/types/subspace.ts +59 -0
  124. package/types/switch.ts +51 -0
  125. package/types/system.ts +707 -0
  126. package/utils/calc.ts +126 -0
  127. package/utils/data.ts +497 -0
  128. package/utils/event-props.ts +836 -0
  129. package/utils/id.ts +80 -0
@@ -0,0 +1,235 @@
1
+ /* Auto generated by build script
2
+ *
3
+ * On-device LLM inference using MediaTek NeuroPilot native SDK integration on Android
4
+ */
5
+ import type { SwitchCondInnerStateCurrentCanvas, SwitchCondData, SwitchDef } from '../switch'
6
+ import type { Data, DataLink } from '../data'
7
+ import type {
8
+ Brick,
9
+ Generator,
10
+ EventAction,
11
+ ActionWithDataParams,
12
+ ActionWithParams,
13
+ Action,
14
+ EventProperty,
15
+ } from '../common'
16
+ import type { TemplateEventPropsMap } from '../../utils/event-props'
17
+
18
+ /* Load or validate the NeuroPilot model context */
19
+ export type GeneratorNeuropilotLlmActionLoadModel = ActionWithParams & {
20
+ __actionName: 'GENERATOR_NEUROPILOT_LLM_LOAD_MODEL'
21
+ params?: Array<
22
+ | {
23
+ input: 'runnerPath'
24
+ value?: string | DataLink | EventProperty
25
+ mapping?: string
26
+ }
27
+ | {
28
+ input: 'configPath'
29
+ value?: string | DataLink | EventProperty
30
+ mapping?: string
31
+ }
32
+ | {
33
+ input: 'workingDirectory'
34
+ value?: string | DataLink | EventProperty
35
+ mapping?: string
36
+ }
37
+ | {
38
+ input: 'libraryPaths'
39
+ value?: Array<any> | DataLink | EventProperty
40
+ mapping?: string
41
+ }
42
+ | {
43
+ input: 'daemonHost'
44
+ value?: string | DataLink | EventProperty
45
+ mapping?: string
46
+ }
47
+ | {
48
+ input: 'daemonPort'
49
+ value?: number | DataLink | EventProperty
50
+ mapping?: string
51
+ }
52
+ | {
53
+ input: 'daemonSocketName'
54
+ value?: string | DataLink | EventProperty
55
+ mapping?: string
56
+ }
57
+ | {
58
+ input: 'daemonSocketNamespace'
59
+ value?: string | DataLink | EventProperty
60
+ mapping?: string
61
+ }
62
+ >
63
+ }
64
+
65
+ /* Run text generation with the current NeuroPilot context */
66
+ export type GeneratorNeuropilotLlmActionGenerate = ActionWithParams & {
67
+ __actionName: 'GENERATOR_NEUROPILOT_LLM_GENERATE'
68
+ params?: Array<
69
+ | {
70
+ input: 'prompt'
71
+ value?: string | DataLink | EventProperty
72
+ mapping?: string
73
+ }
74
+ | {
75
+ input: 'messages'
76
+ value?: Array<any> | DataLink | EventProperty
77
+ mapping?: string
78
+ }
79
+ | {
80
+ input: 'maxNewTokens'
81
+ value?: number | DataLink | EventProperty
82
+ mapping?: string
83
+ }
84
+ | {
85
+ input: 'preformatter'
86
+ value?: string | DataLink | EventProperty
87
+ mapping?: string
88
+ }
89
+ >
90
+ }
91
+
92
+ /* Abort an in-flight NeuroPilot generation request */
93
+ export type GeneratorNeuropilotLlmActionAbortGeneration = Action & {
94
+ __actionName: 'GENERATOR_NEUROPILOT_LLM_ABORT_GENERATION'
95
+ }
96
+
97
+ /* Release the current NeuroPilot context */
98
+ export type GeneratorNeuropilotLlmActionReleaseContext = Action & {
99
+ __actionName: 'GENERATOR_NEUROPILOT_LLM_RELEASE_CONTEXT'
100
+ }
101
+
102
+ interface GeneratorNeuropilotLlmDef {
103
+ /*
104
+ Default property:
105
+ {
106
+ "runnerPath": "/data/local/tmp/llm_sdk/main",
107
+ "configPath": "/data/local/tmp/llm_sdk/config_gemma2_2b_instruct.yaml",
108
+ "workingDirectory": "/data/local/tmp/llm_sdk",
109
+ "libraryPaths": [
110
+ "/vendor/lib64",
111
+ "/system_ext/lib64",
112
+ "/vendor/lib",
113
+ "/system_ext/lib"
114
+ ],
115
+ "runtimeMode": "auto",
116
+ "preformatter": "GemmaNoInput",
117
+ "maxNewTokens": 128
118
+ }
119
+ */
120
+ property?: {
121
+ /* Preloadable NeuroPilot model bundle preset */
122
+ modelBundle?:
123
+ | 'Gemma 2 2B Instruct'
124
+ | 'Gemma 2 2B Instruct (Tailpatched)'
125
+ | 'Gemma 2 2B Instruct (Tailpatched MDLA53)'
126
+ | 'Qwen 2.5 0.5B Instruct'
127
+ | 'Qwen 2.5 1.5B Instruct'
128
+ | 'Llama 3 8B Instruct'
129
+ | DataLink
130
+ /* Override base URL for NeuroPilot model bundle downloads */
131
+ modelBaseUrl?: string | DataLink
132
+ /* Validate runner/config paths on generator initialization */
133
+ init?: boolean | DataLink
134
+ /* Runner binary path on device */
135
+ runnerPath?: string | DataLink
136
+ /* YAML config path on device */
137
+ configPath?: string | DataLink
138
+ /* Working directory for the runner process */
139
+ workingDirectory?: string | DataLink
140
+ /* Extra library search paths for the runner process */
141
+ libraryPaths?: Array<string | DataLink> | DataLink
142
+ /* Daemon host for a preloaded NeuroPilot service */
143
+ daemonHost?: string | DataLink
144
+ /* Daemon TCP port for a preloaded NeuroPilot service */
145
+ daemonPort?: number | DataLink
146
+ /* Android local socket name for a privileged NeuroPilot service */
147
+ daemonSocketName?: string | DataLink
148
+ /* Android local socket namespace used by the privileged NeuroPilot service */
149
+ daemonSocketNamespace?: 'abstract' | 'reserved' | 'filesystem' | DataLink
150
+ /* Runtime selection strategy for MediaTek execution */
151
+ runtimeMode?: 'auto' | 'daemon' | 'root-runner' | 'native' | DataLink
152
+ /* Prompt preformatter used by the vendor runner */
153
+ preformatter?:
154
+ | 'AlpacaNoInput'
155
+ | 'OneShotConversation'
156
+ | 'VicunaNoInput'
157
+ | 'QwenNoInput'
158
+ | 'Qwen3NoInput'
159
+ | 'Qwen3NoInputNoThink'
160
+ | 'Llama3NoInput'
161
+ | 'Phi3NoInput'
162
+ | 'MinicpmNoInput'
163
+ | 'MinicpmNoInputZh'
164
+ | 'InternLM2'
165
+ | 'GemmaNoInput'
166
+ | DataLink
167
+ /* Prompt to run */
168
+ prompt?: string | DataLink
169
+ /* Chat messages to flatten into a prompt */
170
+ messages?: Array<DataLink | {}> | DataLink
171
+ /* Maximum tokens requested from the runner */
172
+ maxNewTokens?: number | DataLink
173
+ }
174
+ events?: {
175
+ /* Event triggered when the NeuroPilot context state changes */
176
+ onContextStateChange?: Array<
177
+ EventAction<string & keyof TemplateEventPropsMap['NeuropilotLlm']['onContextStateChange']>
178
+ >
179
+ /* Event triggered when a completion token or partial result is emitted */
180
+ onCompletion?: Array<
181
+ EventAction<string & keyof TemplateEventPropsMap['NeuropilotLlm']['onCompletion']>
182
+ >
183
+ /* Event triggered when generation finishes */
184
+ onCompletionFinished?: Array<
185
+ EventAction<string & keyof TemplateEventPropsMap['NeuropilotLlm']['onCompletionFinished']>
186
+ >
187
+ /* Event triggered when a NeuroPilot error occurs */
188
+ onError?: Array<EventAction<string & keyof TemplateEventPropsMap['NeuropilotLlm']['onError']>>
189
+ }
190
+ outlets?: {
191
+ /* Current NeuroPilot context state */
192
+ contextState?: () => Data<string>
193
+ /* Final generated result text */
194
+ result?: () => Data<string>
195
+ /* Full context returned by the runner */
196
+ fullContext?: () => Data<string>
197
+ /* Last emitted token or chunk */
198
+ lastToken?: () => Data<string>
199
+ /* Raw output captured from the NeuroPilot runner */
200
+ rawOutput?: () => Data<string>
201
+ /* Prompt-phase performance in tokens per second */
202
+ promptTokensPerSec?: () => Data<string>
203
+ /* Generation-phase performance in tokens per second */
204
+ generationTokensPerSec?: () => Data<string>
205
+ }
206
+ }
207
+
208
+ /* On-device LLM inference using MediaTek NeuroPilot native SDK integration on Android */
209
+ export type GeneratorNeuropilotLlm = Generator &
210
+ GeneratorNeuropilotLlmDef & {
211
+ templateKey: 'GENERATOR_NEUROPILOT_LLM'
212
+ switches?: Array<
213
+ SwitchDef &
214
+ GeneratorNeuropilotLlmDef & {
215
+ conds?: Array<{
216
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
217
+ cond:
218
+ | SwitchCondInnerStateCurrentCanvas
219
+ | SwitchCondData
220
+ | {
221
+ __typename: 'SwitchCondInnerStateOutlet'
222
+ outlet:
223
+ | 'contextState'
224
+ | 'result'
225
+ | 'fullContext'
226
+ | 'lastToken'
227
+ | 'rawOutput'
228
+ | 'promptTokensPerSec'
229
+ | 'generationTokensPerSec'
230
+ value: any
231
+ }
232
+ }>
233
+ }
234
+ >
235
+ }
@@ -0,0 +1,227 @@
1
+ /* Auto generated by build script
2
+ *
3
+ * On-device LLM inference using Apple MLX framework on iOS/tvOS
4
+ *
5
+ * ## Features
6
+ * - Powered by MLX (Apple's ML framework optimized for Apple Silicon)
7
+ * - Download models directly from HuggingFace Hub
8
+ * - Streaming token generation
9
+ * - Supports LLM and VLM (Vision Language Models)
10
+ * - Requires iOS 17+ or tvOS 17+
11
+ */
12
+ import type { SwitchCondInnerStateCurrentCanvas, SwitchCondData, SwitchDef } from '../switch'
13
+ import type { Data, DataLink } from '../data'
14
+ import type {
15
+ Brick,
16
+ Generator,
17
+ EventAction,
18
+ ActionWithDataParams,
19
+ ActionWithParams,
20
+ Action,
21
+ EventProperty,
22
+ } from '../common'
23
+ import type { TemplateEventPropsMap } from '../../utils/event-props'
24
+
25
+ /* Load model */
26
+ export type GeneratorMlxLLMActionLoadModel = ActionWithParams & {
27
+ __actionName: 'GENERATOR_MLX_LLM_LOAD_MODEL'
28
+ params?: Array<
29
+ | {
30
+ input: 'modelId'
31
+ value?: string | DataLink | EventProperty
32
+ mapping?: string
33
+ }
34
+ | {
35
+ input: 'vlm'
36
+ value?: boolean | DataLink | EventProperty
37
+ mapping?: string
38
+ }
39
+ >
40
+ }
41
+
42
+ /* Run text completion */
43
+ export type GeneratorMlxLLMActionCompletion = ActionWithParams & {
44
+ __actionName: 'GENERATOR_MLX_LLM_COMPLETION'
45
+ params?: Array<
46
+ | {
47
+ input: 'messages'
48
+ value?: Array<any> | DataLink | EventProperty
49
+ mapping?: string
50
+ }
51
+ | {
52
+ input: 'tools'
53
+ value?: Array<any> | DataLink | EventProperty
54
+ mapping?: string
55
+ }
56
+ | {
57
+ input: 'maxTokens'
58
+ value?: number | DataLink | EventProperty
59
+ mapping?: string
60
+ }
61
+ | {
62
+ input: 'temperature'
63
+ value?: number | DataLink | EventProperty
64
+ mapping?: string
65
+ }
66
+ | {
67
+ input: 'topP'
68
+ value?: number | DataLink | EventProperty
69
+ mapping?: string
70
+ }
71
+ | {
72
+ input: 'repetitionPenalty'
73
+ value?: number | DataLink | EventProperty
74
+ mapping?: string
75
+ }
76
+ >
77
+ }
78
+
79
+ /* Stop text completion */
80
+ export type GeneratorMlxLLMActionStopCompletion = Action & {
81
+ __actionName: 'GENERATOR_MLX_LLM_STOP_COMPLETION'
82
+ }
83
+
84
+ /* Release model context */
85
+ export type GeneratorMlxLLMActionReleaseContext = Action & {
86
+ __actionName: 'GENERATOR_MLX_LLM_RELEASE_CONTEXT'
87
+ }
88
+
89
+ interface GeneratorMlxLLMDef {
90
+ /*
91
+ Default property:
92
+ {
93
+ "init": false,
94
+ "modelId": "mlx-community/Qwen3-4B-4bit",
95
+ "vlm": false,
96
+ "completionMessages": [
97
+ {
98
+ "role": "system",
99
+ "content": "You are a helpful assistant."
100
+ },
101
+ {
102
+ "role": "user",
103
+ "content": "Hello"
104
+ }
105
+ ],
106
+ "completionMaxTokens": 1024,
107
+ "completionTemperature": 0.6,
108
+ "completionTopP": 1,
109
+ "completionRepetitionContextSize": 20
110
+ }
111
+ */
112
+ property?: {
113
+ /* Initialize model on generator init */
114
+ init?: boolean | DataLink
115
+ /* HuggingFace model ID or local path to model directory
116
+ e.g. "mlx-community/Qwen3-4B-4bit" */
117
+ modelId?: string | DataLink
118
+ /* Enable Vision Language Model (VLM) mode */
119
+ vlm?: boolean | DataLink
120
+ /* Chat messages (if first message has role 'system', it will be used as system prompt) */
121
+ completionMessages?:
122
+ | Array<
123
+ | DataLink
124
+ | {
125
+ role?: string | DataLink
126
+ content?: string | DataLink
127
+ }
128
+ >
129
+ | DataLink
130
+ /* Maximum tokens to generate */
131
+ completionMaxTokens?: number | DataLink
132
+ /* Temperature (0.0 to 2.0) */
133
+ completionTemperature?: number | DataLink
134
+ /* Top P sampling */
135
+ completionTopP?: number | DataLink
136
+ /* Repetition penalty factor */
137
+ completionRepetitionPenalty?: number | DataLink
138
+ /* Number of tokens to consider for repetition penalty */
139
+ completionRepetitionContextSize?: number | DataLink
140
+ /* Maximum KV cache size (context window). Uses rotating cache to limit memory.
141
+ Smaller values (e.g. 512) use less memory but lower quality.
142
+ Larger values (e.g. 4096) use more memory but better quality.
143
+ Leave empty for unlimited (model default). */
144
+ contextSize?: number | DataLink
145
+ /* Enable thinking mode (model-dependent, e.g. Qwen3).
146
+ When enabled, passes enable_thinking=true to the chat template.
147
+ The model may output reasoning in &lt;think&gt; tags which will be extracted as reasoning_content. */
148
+ completionEnableThinking?: boolean | DataLink
149
+ /* Additional keyword arguments for chat template (object) */
150
+ completionChatTemplateKwargs?: {} | DataLink
151
+ /* Tools for chat mode using OpenAI-compatible function calling format
152
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
153
+ See: https://platform.openai.com/docs/guides/function-calling */
154
+ completionTools?: Array<{} | DataLink> | DataLink
155
+ /* Buttress connection settings for remote inference */
156
+ buttressConnectionSettings?:
157
+ | DataLink
158
+ | {
159
+ enabled?: boolean | DataLink
160
+ url?: string | DataLink
161
+ fallbackType?: 'use-local' | 'no-op' | DataLink
162
+ strategy?: 'prefer-local' | 'prefer-buttress' | 'prefer-best' | DataLink
163
+ }
164
+ }
165
+ events?: {
166
+ /* Event triggered when context state changes */
167
+ onContextStateChange?: Array<
168
+ EventAction<string & keyof TemplateEventPropsMap['MlxLlm']['onContextStateChange']>
169
+ >
170
+ /* Error event */
171
+ onError?: Array<EventAction<string & keyof TemplateEventPropsMap['MlxLlm']['onError']>>
172
+ /* Completion streaming event (emitted for each token) */
173
+ onCompletion?: Array<
174
+ EventAction<string & keyof TemplateEventPropsMap['MlxLlm']['onCompletion']>
175
+ >
176
+ /* Completion finished event */
177
+ onCompletionFinished?: Array<
178
+ EventAction<string & keyof TemplateEventPropsMap['MlxLlm']['onCompletionFinished']>
179
+ >
180
+ }
181
+ outlets?: {
182
+ /* Context state */
183
+ contextState?: () => Data<string>
184
+ /* Model load progress (0-1) */
185
+ loadProgress?: () => Data<number>
186
+ /* Whether the model is evaluating */
187
+ isEvaluating?: () => Data<boolean>
188
+ /* Completion result */
189
+ completionResult?: () => Data<string>
190
+ /* Last token */
191
+ completionLastToken?: () => Data<string>
192
+ }
193
+ }
194
+
195
+ /* On-device LLM inference using Apple MLX framework on iOS/tvOS
196
+
197
+ ## Features
198
+ - Powered by MLX (Apple's ML framework optimized for Apple Silicon)
199
+ - Download models directly from HuggingFace Hub
200
+ - Streaming token generation
201
+ - Supports LLM and VLM (Vision Language Models)
202
+ - Requires iOS 17+ or tvOS 17+ */
203
+ export type GeneratorMlxLLM = Generator &
204
+ GeneratorMlxLLMDef & {
205
+ templateKey: 'GENERATOR_MLX_LLM'
206
+ switches?: Array<
207
+ SwitchDef &
208
+ GeneratorMlxLLMDef & {
209
+ conds?: Array<{
210
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
211
+ cond:
212
+ | SwitchCondInnerStateCurrentCanvas
213
+ | SwitchCondData
214
+ | {
215
+ __typename: 'SwitchCondInnerStateOutlet'
216
+ outlet:
217
+ | 'contextState'
218
+ | 'loadProgress'
219
+ | 'isEvaluating'
220
+ | 'completionResult'
221
+ | 'completionLastToken'
222
+ value: any
223
+ }
224
+ }>
225
+ }
226
+ >
227
+ }
@@ -0,0 +1,213 @@
1
+ /* Auto generated by build script
2
+ *
3
+ * Local LLM inference based on [transformers.js](https://huggingface.co/docs/transformers.js)
4
+ * You can use any converted model on HuggingFace.
5
+ */
6
+ import type { SwitchCondInnerStateCurrentCanvas, SwitchCondData, SwitchDef } from '../switch'
7
+ import type { Data, DataLink } from '../data'
8
+ import type {
9
+ Brick,
10
+ Generator,
11
+ EventAction,
12
+ ActionWithDataParams,
13
+ ActionWithParams,
14
+ Action,
15
+ EventProperty,
16
+ } from '../common'
17
+ import type { TemplateEventPropsMap } from '../../utils/event-props'
18
+
19
+ /* Load the model */
20
+ export type GeneratorOnnxLLMActionLoadModel = Action & {
21
+ __actionName: 'GENERATOR_ONNX_LLM_LOAD_MODEL'
22
+ }
23
+
24
+ /* Inference */
25
+ export type GeneratorOnnxLLMActionInfer = ActionWithParams & {
26
+ __actionName: 'GENERATOR_ONNX_LLM_INFER'
27
+ params?: Array<
28
+ | {
29
+ input: 'prompt'
30
+ value?: string | DataLink | EventProperty
31
+ mapping?: string
32
+ }
33
+ | {
34
+ input: 'chat'
35
+ value?: Array<any> | DataLink | EventProperty
36
+ mapping?: string
37
+ }
38
+ | {
39
+ input: 'images'
40
+ value?: Array<any> | DataLink | EventProperty
41
+ mapping?: string
42
+ }
43
+ | {
44
+ input: 'audios'
45
+ value?: Array<any> | DataLink | EventProperty
46
+ mapping?: string
47
+ }
48
+ | {
49
+ input: 'tools'
50
+ value?: Array<any> | DataLink | EventProperty
51
+ mapping?: string
52
+ }
53
+ | {
54
+ input: 'toolChoice'
55
+ value?: string | DataLink | EventProperty
56
+ mapping?: string
57
+ }
58
+ >
59
+ }
60
+
61
+ /* Clean cache */
62
+ export type GeneratorOnnxLLMActionCleanCache = Action & {
63
+ __actionName: 'GENERATOR_ONNX_LLM_CLEAN_CACHE'
64
+ }
65
+
66
+ /* Release context */
67
+ export type GeneratorOnnxLLMActionReleaseContext = Action & {
68
+ __actionName: 'GENERATOR_ONNX_LLM_RELEASE_CONTEXT'
69
+ }
70
+
71
+ interface GeneratorOnnxLLMDef {
72
+ /*
73
+ Default property:
74
+ {
75
+ "modelType": "auto",
76
+ "toolCallParser": "llama3_json",
77
+ "toolChoice": "auto",
78
+ "maxNewTokens": 256,
79
+ "temperature": 0.7,
80
+ "topK": 50,
81
+ "topP": 0.9,
82
+ "repetitionPenalty": 1,
83
+ "noRepeatNgramSize": 0,
84
+ "numBeams": 1,
85
+ "doSample": true,
86
+ "executionMode": "sequential"
87
+ }
88
+ */
89
+ property?: {
90
+ /* Initialize the TTS context on generator initialization */
91
+ init?: boolean | DataLink
92
+ /* LLM model */
93
+ model?: string | DataLink
94
+ /* Model type */
95
+ modelType?: string | DataLink
96
+ /* Quantize type */
97
+ quantizeType?:
98
+ | 'auto'
99
+ | 'none'
100
+ | 'fp16'
101
+ | 'q8'
102
+ | 'int8'
103
+ | 'uint8'
104
+ | 'q4'
105
+ | 'bnb4'
106
+ | 'q4f16'
107
+ | DataLink
108
+ /* Prompt to inference */
109
+ prompt?: string | DataLink
110
+ /* Messages to inference */
111
+ messages?: Array<DataLink | {}> | DataLink
112
+ /* Images with message to inference */
113
+ images?: Array<string | DataLink> | DataLink
114
+ /* Audios with message to inference */
115
+ audios?: Array<string | DataLink> | DataLink
116
+ /* Tool call parser */
117
+ toolCallParser?: 'llama3_json' | 'mistral' | 'hermes' | 'internlm' | 'phi4' | DataLink
118
+ /* Tools for chat mode using OpenAI-compatible function calling format
119
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
120
+ See: https://platform.openai.com/docs/guides/function-calling */
121
+ tools?: Array<{} | DataLink> | DataLink
122
+ /* Tool choice for chat mode */
123
+ toolChoice?: 'none' | 'auto' | DataLink
124
+ /* Max new tokens to generate */
125
+ maxNewTokens?: number | DataLink
126
+ /* Temperature */
127
+ temperature?: number | DataLink
128
+ /* Top k */
129
+ topK?: number | DataLink
130
+ /* Top p */
131
+ topP?: number | DataLink
132
+ /* Repetition penalty */
133
+ repetitionPenalty?: number | DataLink
134
+ /* No repeat ngram size */
135
+ noRepeatNgramSize?: number | DataLink
136
+ /* Number of beams */
137
+ numBeams?: number | DataLink
138
+ /* Do sampling */
139
+ doSample?: boolean | DataLink
140
+ /* Executor candidates, descending order of priority
141
+ Default will be xnnpack, wasm, cpu */
142
+ executors?:
143
+ | Array<'qnn' | 'dml' | 'nnapi' | 'xnnpack' | 'coreml' | 'cpu' | 'wasm' | 'webgpu' | DataLink>
144
+ | DataLink
145
+ /* Execution mode
146
+ Usually when the model has many branches, setting this option to `parallel` will give you better performance. */
147
+ executionMode?: 'sequential' | 'parallel' | DataLink
148
+ /* QNN backend */
149
+ qnnBackend?: 'HTP' | 'HTA' | 'DSP' | 'GPU' | 'CPU' | DataLink
150
+ /* Enable FP16 for QNN HTP */
151
+ qnnHtpEnableFp16?: boolean | DataLink
152
+ /* Enable QNN debug */
153
+ qnnEnableDebug?: boolean | DataLink
154
+ }
155
+ events?: {
156
+ /* Event triggered when state change */
157
+ onContextStateChange?: Array<
158
+ EventAction<string & keyof TemplateEventPropsMap['OnnxLlm']['onContextStateChange']>
159
+ >
160
+ /* Event triggered on get function call request */
161
+ onFunctionCall?: Array<
162
+ EventAction<string & keyof TemplateEventPropsMap['OnnxLlm']['onFunctionCall']>
163
+ >
164
+ /* Event triggered on completion finished */
165
+ onCompletionFinished?: Array<
166
+ EventAction<string & keyof TemplateEventPropsMap['OnnxLlm']['onCompletionFinished']>
167
+ >
168
+ /* Event triggered when error occurs */
169
+ onError?: Array<EventAction<string & keyof TemplateEventPropsMap['OnnxLlm']['onError']>>
170
+ }
171
+ outlets?: {
172
+ /* Context state */
173
+ contextState?: () => Data<string>
174
+ /* Generated output */
175
+ generated?: () => Data<string>
176
+ /* Full result of generation */
177
+ fullResult?: () => Data<string>
178
+ /* Last function call */
179
+ lastFunctionCall?: () => Data<{
180
+ id?: string
181
+ type?: string
182
+ function?: {
183
+ name?: string
184
+ arguments?: string
185
+ [key: string]: any
186
+ }
187
+ [key: string]: any
188
+ }>
189
+ }
190
+ }
191
+
192
+ /* Local LLM inference based on [transformers.js](https://huggingface.co/docs/transformers.js)
193
+ You can use any converted model on HuggingFace. */
194
+ export type GeneratorOnnxLLM = Generator &
195
+ GeneratorOnnxLLMDef & {
196
+ templateKey: 'GENERATOR_ONNX_LLM'
197
+ switches?: Array<
198
+ SwitchDef &
199
+ GeneratorOnnxLLMDef & {
200
+ conds?: Array<{
201
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
202
+ cond:
203
+ | SwitchCondInnerStateCurrentCanvas
204
+ | SwitchCondData
205
+ | {
206
+ __typename: 'SwitchCondInnerStateOutlet'
207
+ outlet: 'contextState' | 'generated' | 'fullResult' | 'lastFunctionCall'
208
+ value: any
209
+ }
210
+ }>
211
+ }
212
+ >
213
+ }