@fugood/bricks-project 2.24.0-beta.3 → 2.24.0-beta.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/compile/action-name-map.ts +36 -0
  2. package/compile/index.ts +400 -146
  3. package/compile/util.ts +2 -0
  4. package/package.json +8 -3
  5. package/skills/bricks-project/rules/architecture-patterns.md +7 -0
  6. package/skills/bricks-project/rules/automations.md +74 -28
  7. package/skills/bricks-project/rules/buttress.md +9 -6
  8. package/tools/deploy.ts +66 -12
  9. package/tools/mcp-server.ts +10 -877
  10. package/tools/mcp-tools/compile.ts +91 -0
  11. package/tools/mcp-tools/huggingface.ts +762 -0
  12. package/tools/mcp-tools/icons.ts +60 -0
  13. package/tools/mcp-tools/lottie.ts +102 -0
  14. package/tools/mcp-tools/media.ts +110 -0
  15. package/tools/postinstall.ts +121 -33
  16. package/tools/preview-main.mjs +12 -8
  17. package/tools/pull.ts +37 -19
  18. package/tsconfig.json +16 -0
  19. package/types/animation.ts +4 -0
  20. package/types/automation.ts +3 -0
  21. package/types/brick-base.ts +1 -1
  22. package/types/bricks/Camera.ts +34 -7
  23. package/types/bricks/Chart.ts +1 -1
  24. package/types/bricks/GenerativeMedia.ts +6 -6
  25. package/types/bricks/Icon.ts +3 -3
  26. package/types/bricks/Image.ts +4 -4
  27. package/types/bricks/Items.ts +7 -7
  28. package/types/bricks/Lottie.ts +4 -4
  29. package/types/bricks/Maps.ts +4 -4
  30. package/types/bricks/QrCode.ts +4 -4
  31. package/types/bricks/Rect.ts +4 -4
  32. package/types/bricks/RichText.ts +3 -3
  33. package/types/bricks/Rive.ts +1 -1
  34. package/types/bricks/Slideshow.ts +4 -4
  35. package/types/bricks/Svg.ts +3 -3
  36. package/types/bricks/Text.ts +4 -4
  37. package/types/bricks/TextInput.ts +11 -7
  38. package/types/bricks/Video.ts +4 -4
  39. package/types/bricks/VideoStreaming.ts +3 -3
  40. package/types/bricks/WebRtcStream.ts +1 -1
  41. package/types/bricks/WebView.ts +4 -4
  42. package/types/canvas.ts +4 -2
  43. package/types/common.ts +9 -4
  44. package/types/data-calc-command.ts +2 -0
  45. package/types/data-calc.ts +1 -0
  46. package/types/data.ts +2 -0
  47. package/types/generators/AlarmClock.ts +5 -5
  48. package/types/generators/Assistant.ts +57 -12
  49. package/types/generators/BleCentral.ts +12 -4
  50. package/types/generators/BlePeripheral.ts +5 -5
  51. package/types/generators/CanvasMap.ts +4 -4
  52. package/types/generators/CastlesPay.ts +3 -3
  53. package/types/generators/DataBank.ts +31 -4
  54. package/types/generators/File.ts +63 -14
  55. package/types/generators/GraphQl.ts +3 -3
  56. package/types/generators/Http.ts +27 -8
  57. package/types/generators/HttpServer.ts +9 -9
  58. package/types/generators/Information.ts +2 -2
  59. package/types/generators/Intent.ts +8 -2
  60. package/types/generators/Iterator.ts +6 -6
  61. package/types/generators/Keyboard.ts +18 -8
  62. package/types/generators/LlmAnthropicCompat.ts +12 -6
  63. package/types/generators/LlmAppleBuiltin.ts +6 -6
  64. package/types/generators/LlmGgml.ts +101 -25
  65. package/types/generators/LlmMediaTekNeuroPilot.ts +225 -0
  66. package/types/generators/LlmMlx.ts +210 -0
  67. package/types/generators/LlmOnnx.ts +18 -9
  68. package/types/generators/LlmOpenAiCompat.ts +22 -6
  69. package/types/generators/LlmQualcommAiEngine.ts +32 -8
  70. package/types/generators/Mcp.ts +332 -17
  71. package/types/generators/McpServer.ts +38 -11
  72. package/types/generators/MediaFlow.ts +26 -8
  73. package/types/generators/MqttBroker.ts +10 -4
  74. package/types/generators/MqttClient.ts +11 -5
  75. package/types/generators/Question.ts +6 -6
  76. package/types/generators/RealtimeTranscription.ts +82 -11
  77. package/types/generators/RerankerGgml.ts +23 -9
  78. package/types/generators/SerialPort.ts +6 -6
  79. package/types/generators/SoundPlayer.ts +2 -2
  80. package/types/generators/SoundRecorder.ts +17 -6
  81. package/types/generators/SpeechToTextGgml.ts +34 -14
  82. package/types/generators/SpeechToTextOnnx.ts +8 -8
  83. package/types/generators/SpeechToTextPlatform.ts +4 -4
  84. package/types/generators/SqLite.ts +10 -6
  85. package/types/generators/Step.ts +3 -3
  86. package/types/generators/SttAppleBuiltin.ts +6 -6
  87. package/types/generators/Tcp.ts +5 -5
  88. package/types/generators/TcpServer.ts +7 -7
  89. package/types/generators/TextToSpeechApple.ts +1 -1
  90. package/types/generators/TextToSpeechAppleBuiltin.ts +5 -5
  91. package/types/generators/TextToSpeechGgml.ts +8 -8
  92. package/types/generators/TextToSpeechOnnx.ts +9 -9
  93. package/types/generators/TextToSpeechOpenAiLike.ts +5 -5
  94. package/types/generators/ThermalPrinter.ts +6 -6
  95. package/types/generators/Tick.ts +3 -3
  96. package/types/generators/Udp.ts +9 -4
  97. package/types/generators/VadGgml.ts +39 -10
  98. package/types/generators/VadOnnx.ts +31 -8
  99. package/types/generators/VadTraditional.ts +15 -9
  100. package/types/generators/VectorStore.ts +26 -9
  101. package/types/generators/Watchdog.ts +11 -6
  102. package/types/generators/WebCrawler.ts +5 -5
  103. package/types/generators/WebRtc.ts +17 -11
  104. package/types/generators/WebSocket.ts +5 -5
  105. package/types/generators/index.ts +2 -0
  106. package/types/subspace.ts +3 -0
  107. package/types/system.ts +1 -1
  108. package/utils/calc.ts +12 -8
  109. package/utils/event-props.ts +124 -87
  110. package/utils/id.ts +4 -0
  111. package/api/index.ts +0 -1
  112. package/api/instance.ts +0 -213
@@ -119,7 +119,7 @@ Default property:
119
119
  }
120
120
  >
121
121
  | DataLink
122
- /* Tools for function calling following Anthropic format
122
+ /* Tools for function calling following Anthropic format
123
123
  Format: Array of objects with {name, description, input_schema} structure
124
124
  See: https://docs.anthropic.com/en/docs/tool-use */
125
125
  completionTools?: Array<{} | DataLink> | DataLink
@@ -153,15 +153,21 @@ Default property:
153
153
  }
154
154
  outlets?: {
155
155
  /* Evaluating outlet */
156
- isEvaluating?: () => Data
156
+ isEvaluating?: () => Data<boolean>
157
157
  /* Completion result outlet */
158
- completionResult?: () => Data
158
+ completionResult?: () => Data<string>
159
159
  /* Completion details outlet */
160
- completionDetails?: () => Data
160
+ completionDetails?: () => Data<{
161
+ model?: string
162
+ stop_reason?: string
163
+ usage?: { [key: string]: any }
164
+ content?: Array<any>
165
+ [key: string]: any
166
+ }>
161
167
  }
162
168
  }
163
169
 
164
- /* LLM inference using Anthropic-compatible API endpoints
170
+ /* LLM inference using Anthropic-compatible API endpoints
165
171
 
166
172
  ## Features
167
173
  - Compatible with Anthropic API format
@@ -171,7 +177,7 @@ Default property:
171
177
  export type GeneratorAnthropicLLM = Generator &
172
178
  GeneratorAnthropicLLMDef & {
173
179
  templateKey: 'GENERATOR_ANTHROPIC_LLM'
174
- switches: Array<
180
+ switches?: Array<
175
181
  SwitchDef &
176
182
  GeneratorAnthropicLLMDef & {
177
183
  conds?: Array<{
@@ -100,17 +100,17 @@ Default property:
100
100
  }
101
101
  outlets?: {
102
102
  /* Context state outlet */
103
- contextState?: () => Data
103
+ contextState?: () => Data<string>
104
104
  /* Evaluating outlet */
105
- isEvaluating?: () => Data
105
+ isEvaluating?: () => Data<boolean>
106
106
  /* Completion result outlet */
107
- completionResult?: () => Data
107
+ completionResult?: () => Data<string>
108
108
  /* Last token outlet */
109
- completionLastToken?: () => Data
109
+ completionLastToken?: () => Data<string>
110
110
  }
111
111
  }
112
112
 
113
- /* LLM inference using Apple Intelligence on iOS/tvOS 26+
113
+ /* LLM inference using Apple Intelligence on iOS/tvOS 26+
114
114
 
115
115
  ## Features
116
116
  - Native Apple Intelligence integration
@@ -121,7 +121,7 @@ Default property:
121
121
  export type GeneratorAppleLLM = Generator &
122
122
  GeneratorAppleLLMDef & {
123
123
  templateKey: 'GENERATOR_APPLE_LLM'
124
- switches: Array<
124
+ switches?: Array<
125
125
  SwitchDef &
126
126
  GeneratorAppleLLMDef & {
127
127
  conds?: Array<{
@@ -97,6 +97,16 @@ export type GeneratorLLMActionProcessPrompt = ActionWithParams & {
97
97
  value?: boolean | DataLink | EventProperty
98
98
  mapping?: string
99
99
  }
100
+ | {
101
+ input: 'thinkingBudgetTokens'
102
+ value?: number | DataLink | EventProperty
103
+ mapping?: string
104
+ }
105
+ | {
106
+ input: 'thinkingBudgetMessage'
107
+ value?: string | DataLink | EventProperty
108
+ mapping?: string
109
+ }
100
110
  | {
101
111
  input: 'prompt'
102
112
  value?: string | DataLink | EventProperty
@@ -179,6 +189,16 @@ export type GeneratorLLMActionCompletion = ActionWithParams & {
179
189
  value?: boolean | DataLink | EventProperty
180
190
  mapping?: string
181
191
  }
192
+ | {
193
+ input: 'thinkingBudgetTokens'
194
+ value?: number | DataLink | EventProperty
195
+ mapping?: string
196
+ }
197
+ | {
198
+ input: 'thinkingBudgetMessage'
199
+ value?: string | DataLink | EventProperty
200
+ mapping?: string
201
+ }
182
202
  | {
183
203
  input: 'useReasoningFormat'
184
204
  value?: string | DataLink | EventProperty
@@ -459,10 +479,10 @@ Default property:
459
479
  }
460
480
  */
461
481
  property?: {
462
- /* Initialize the Llama context on generator initialization
482
+ /* Initialize the Llama context on generator initialization
463
483
  Please note that it will take some RAM depending on the model size */
464
484
  init?: boolean | DataLink
465
- /* The URL or path of model
485
+ /* The URL or path of model
466
486
  We used GGUF format model, please refer to https://github.com/ggerganov/llama.cpp/tree/master#description */
467
487
  modelUrl?: string | DataLink
468
488
  /* Hash type of model */
@@ -477,10 +497,10 @@ Default property:
477
497
  mmprojHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
478
498
  /* Hash of mmproj file (PREVIEW FEATURE) */
479
499
  mmprojHash?: string | DataLink
480
- /* Minimum tokens for image encoding in multimodal (PREVIEW FEATURE)
500
+ /* Minimum tokens for image encoding in multimodal (PREVIEW FEATURE)
481
501
  Useful for dynamic resolution models (e.g. Qwen-VL). Default: -1 (auto) */
482
502
  imageMinTokens?: number | DataLink
483
- /* Maximum tokens for image encoding in multimodal (PREVIEW FEATURE)
503
+ /* Maximum tokens for image encoding in multimodal (PREVIEW FEATURE)
484
504
  Limit tokens for dynamic resolution models to balance speed vs. detail. Default: -1 (auto) */
485
505
  imageMaxTokens?: number | DataLink
486
506
  /* Chat Template (Jinja format) to override the default template from model */
@@ -493,13 +513,13 @@ Default property:
493
513
  uBatchSize?: number | DataLink
494
514
  /* Number of threads */
495
515
  maxThreads?: number | DataLink
496
- /* Accelerator variant (Only for desktop)
516
+ /* Accelerator variant (Only for desktop)
497
517
  `default` - CPU / Metal (macOS)
498
518
  `vulkan` - Use Vulkan
499
519
  `cuda` - Use CUDA
500
520
  `snapdragon` - Use OpenCL/Hexagon of Qualcomm Snapdragon */
501
521
  accelVariant?: 'default' | 'vulkan' | 'cuda' | 'snapdragon' | DataLink
502
- /* Devices. For example:
522
+ /* Devices. For example:
503
523
 
504
524
  Metal or CPU for iOS/tvOS/MacOS
505
525
  OpenCL or CPU for Android
@@ -514,6 +534,8 @@ Default property:
514
534
  useMlock?: boolean | DataLink
515
535
  /* Use mmap */
516
536
  useMmap?: boolean | DataLink
537
+ /* Disable extra buffer types for weight repacking. Reduces memory usage at the cost of slower prompt processing. */
538
+ noExtraBuffs?: boolean | DataLink
517
539
  /* Use Flash Attention for inference (Recommended with GPU enabled) */
518
540
  useFlashAttn?: 'auto' | 'on' | 'off' | DataLink
519
541
  /* KV cache data type for the K (Default: f16) */
@@ -534,7 +556,7 @@ Default property:
534
556
  transformScriptCode?: string | DataLink
535
557
  /* Variables used in Transform Script (object) */
536
558
  transformScriptVariables?: {} | DataLink
537
- /* Session save mode
559
+ /* Session save mode
538
560
  `none` - No session saving
539
561
  `prompt` - Save session when prompt processed
540
562
  `completion` - Save session when completion finished
@@ -546,7 +568,7 @@ Default property:
546
568
  sessionRemain?: number | DataLink
547
569
  /* TODO:loran_gqarms_norm_epsrope_freq_baserope_freq_scale */
548
570
  completionMode?: 'auto' | 'chat' | 'text' | DataLink
549
- /* Tools for chat mode using OpenAI-compatible function calling format
571
+ /* Tools for chat mode using OpenAI-compatible function calling format
550
572
  Format: Array of objects with {type, function: {name, description, parameters}} structure
551
573
  See: https://platform.openai.com/docs/guides/function-calling */
552
574
  completionTools?: Array<{} | DataLink> | DataLink
@@ -566,7 +588,7 @@ Default property:
566
588
  | DataLink
567
589
  /* Prompt (text mode) */
568
590
  completionPrompt?: string | DataLink
569
- /* Media paths to be used in the prompt template (PREVIEW FEATURE)
591
+ /* Media paths to be used in the prompt template (PREVIEW FEATURE)
570
592
  In prompt, use `<__media__>` for position of media content */
571
593
  completionPromptMediaPaths?: Array<string | DataLink> | DataLink
572
594
  /* Data to be used in the prompt template (e.g. `Hello ${name}`). Supports nested data, such as `Hello ${user.name}`. */
@@ -588,13 +610,17 @@ Default property:
588
610
  }
589
611
  /* Enable thinking */
590
612
  completionEnableThinking?: boolean | DataLink
613
+ /* Maximum tokens allowed inside the model's thinking block before forcing it closed. Only applies when chat formatting exposes thinking tags. */
614
+ completionThinkingBudgetTokens?: number | DataLink
615
+ /* Message injected before the thinking end tag when the thinking budget is exhausted. */
616
+ completionThinkingBudgetMessage?: string | DataLink
591
617
  /* Add generation prompt */
592
618
  completionAddGenerationPrompt?: boolean | DataLink
593
619
  /* Now (for fill current date in chat template if supported) */
594
620
  completionNow?: string | DataLink
595
621
  /* Additional keyword arguments for chat template (object) */
596
622
  completionChatTemplateKwargs?: {} | DataLink
597
- /* Use reasoning format for enhanced response structure
623
+ /* Use reasoning format for enhanced response structure
598
624
  `auto` - Auto-determine the reasoning format of the model
599
625
  `none` - Disable reasoning format */
600
626
  completionUseReasoningFormat?: 'auto' | 'none' | DataLink
@@ -678,35 +704,85 @@ Default property:
678
704
  }
679
705
  outlets?: {
680
706
  /* Context state */
681
- contextState?: () => Data
707
+ contextState?: () => Data<string>
682
708
  /* Context load progress (0-100) */
683
- contextLoadProgress?: () => Data
709
+ contextLoadProgress?: () => Data<number>
684
710
  /* Context details */
685
- contextDetails?: () => Data
711
+ contextDetails?: () => Data<{
712
+ state?: string
713
+ contextId?: string
714
+ gpu?: string
715
+ reasonNoGPU?: string
716
+ model?: { [key: string]: any }
717
+ isMultimodalEnabled?: boolean
718
+ [key: string]: any
719
+ }>
686
720
  /* Session details */
687
- sessions?: () => Data
721
+ sessions?: () => Data<{
722
+ last_session_id?: string
723
+ sessions?: Array<{
724
+ id?: string
725
+ type?: string
726
+ prompt?: string
727
+ sessionKey?: string
728
+ model_instance_id?: string
729
+ tokens_evaluated?: number
730
+ t?: number
731
+ [key: string]: any
732
+ }>
733
+ last_custom_session_id?: string
734
+ custom_sessions?: Array<{
735
+ id?: string
736
+ type?: string
737
+ prompt?: string
738
+ sessionKey?: string
739
+ model_instance_id?: string
740
+ tokens_evaluated?: number
741
+ t?: number
742
+ [key: string]: any
743
+ }>
744
+ [key: string]: any
745
+ }>
688
746
  /* Is evaluating */
689
- isEvaluating?: () => Data
747
+ isEvaluating?: () => Data<boolean>
690
748
  /* Tokenize result */
691
- tokenizeResult?: () => Data
749
+ tokenizeResult?: () => Data<Array<number>>
692
750
  /* Detokenize result */
693
- detokenizeResult?: () => Data
751
+ detokenizeResult?: () => Data<string>
694
752
  /* Last formatted prompt (messages or prompt) */
695
- completionLastFormattedPrompt?: () => Data
753
+ completionLastFormattedPrompt?: () => Data<string>
696
754
  /* Last completion token */
697
- completionLastToken?: () => Data
755
+ completionLastToken?: () => Data<string>
698
756
  /* Completion result */
699
- completionResult?: () => Data
757
+ completionResult?: () => Data<string>
700
758
  /* Reasoning content from model responses */
701
- completionReasoningContent?: () => Data
759
+ completionReasoningContent?: () => Data<string>
702
760
  /* Full context (Prompt + Completion) */
703
- completionFullContext?: () => Data
761
+ completionFullContext?: () => Data<string>
704
762
  /* Inference result details */
705
- completionResultDetails?: () => Data
763
+ completionResultDetails?: () => Data<{
764
+ prompt?: string
765
+ full_context?: string
766
+ text?: string
767
+ content?: string
768
+ reasoning_content?: string
769
+ token?: string
770
+ tool_calls?: Array<{
771
+ id?: string
772
+ type?: string
773
+ function?: {
774
+ name?: string
775
+ arguments?: string
776
+ [key: string]: any
777
+ }
778
+ [key: string]: any
779
+ }>
780
+ [key: string]: any
781
+ }>
706
782
  }
707
783
  }
708
784
 
709
- /* Local Large Language Model (LLM) inference based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
785
+ /* Local Large Language Model (LLM) inference based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
710
786
 
711
787
  ## Notice
712
788
  - The device RAM must be larger than 8GB
@@ -720,7 +796,7 @@ Default property:
720
796
  export type GeneratorLLM = Generator &
721
797
  GeneratorLLMDef & {
722
798
  templateKey: 'GENERATOR_LLM'
723
- switches: Array<
799
+ switches?: Array<
724
800
  SwitchDef &
725
801
  GeneratorLLMDef & {
726
802
  conds?: Array<{
@@ -0,0 +1,225 @@
1
+ /* Auto generated by build script */
2
+ import type { SwitchCondInnerStateCurrentCanvas, SwitchCondData, SwitchDef } from '../switch'
3
+ import type { Data, DataLink } from '../data'
4
+ import type {
5
+ Brick,
6
+ Generator,
7
+ EventAction,
8
+ ActionWithDataParams,
9
+ ActionWithParams,
10
+ Action,
11
+ EventProperty,
12
+ } from '../common'
13
+
14
+ /* Load or validate the NeuroPilot model context */
15
+ export type GeneratorNeuropilotLlmActionLoadModel = ActionWithParams & {
16
+ __actionName: 'GENERATOR_NEUROPILOT_LLM_LOAD_MODEL'
17
+ params?: Array<
18
+ | {
19
+ input: 'runnerPath'
20
+ value?: string | DataLink | EventProperty
21
+ mapping?: string
22
+ }
23
+ | {
24
+ input: 'configPath'
25
+ value?: string | DataLink | EventProperty
26
+ mapping?: string
27
+ }
28
+ | {
29
+ input: 'workingDirectory'
30
+ value?: string | DataLink | EventProperty
31
+ mapping?: string
32
+ }
33
+ | {
34
+ input: 'libraryPaths'
35
+ value?: Array<any> | DataLink | EventProperty
36
+ mapping?: string
37
+ }
38
+ | {
39
+ input: 'daemonHost'
40
+ value?: string | DataLink | EventProperty
41
+ mapping?: string
42
+ }
43
+ | {
44
+ input: 'daemonPort'
45
+ value?: number | DataLink | EventProperty
46
+ mapping?: string
47
+ }
48
+ | {
49
+ input: 'daemonSocketName'
50
+ value?: string | DataLink | EventProperty
51
+ mapping?: string
52
+ }
53
+ | {
54
+ input: 'daemonSocketNamespace'
55
+ value?: string | DataLink | EventProperty
56
+ mapping?: string
57
+ }
58
+ >
59
+ }
60
+
61
+ /* Run text generation with the current NeuroPilot context */
62
+ export type GeneratorNeuropilotLlmActionGenerate = ActionWithParams & {
63
+ __actionName: 'GENERATOR_NEUROPILOT_LLM_GENERATE'
64
+ params?: Array<
65
+ | {
66
+ input: 'prompt'
67
+ value?: string | DataLink | EventProperty
68
+ mapping?: string
69
+ }
70
+ | {
71
+ input: 'messages'
72
+ value?: Array<any> | DataLink | EventProperty
73
+ mapping?: string
74
+ }
75
+ | {
76
+ input: 'maxNewTokens'
77
+ value?: number | DataLink | EventProperty
78
+ mapping?: string
79
+ }
80
+ | {
81
+ input: 'preformatter'
82
+ value?: string | DataLink | EventProperty
83
+ mapping?: string
84
+ }
85
+ >
86
+ }
87
+
88
+ /* Abort an in-flight NeuroPilot generation request */
89
+ export type GeneratorNeuropilotLlmActionAbortGeneration = Action & {
90
+ __actionName: 'GENERATOR_NEUROPILOT_LLM_ABORT_GENERATION'
91
+ }
92
+
93
+ /* Release the current NeuroPilot context */
94
+ export type GeneratorNeuropilotLlmActionReleaseContext = Action & {
95
+ __actionName: 'GENERATOR_NEUROPILOT_LLM_RELEASE_CONTEXT'
96
+ }
97
+
98
+ interface GeneratorNeuropilotLlmDef {
99
+ /*
100
+ Default property:
101
+ {
102
+ "runnerPath": "/data/local/tmp/llm_sdk/main",
103
+ "configPath": "/data/local/tmp/llm_sdk/config_gemma2_2b_instruct.yaml",
104
+ "workingDirectory": "/data/local/tmp/llm_sdk",
105
+ "libraryPaths": [
106
+ "/vendor/lib64",
107
+ "/system_ext/lib64",
108
+ "/vendor/lib",
109
+ "/system_ext/lib"
110
+ ],
111
+ "runtimeMode": "auto",
112
+ "preformatter": "GemmaNoInput",
113
+ "maxNewTokens": 128
114
+ }
115
+ */
116
+ property?: {
117
+ /* Preloadable NeuroPilot model bundle preset */
118
+ modelBundle?:
119
+ | 'Gemma 2 2B Instruct'
120
+ | 'Gemma 2 2B Instruct (Tailpatched)'
121
+ | 'Gemma 2 2B Instruct (Tailpatched MDLA53)'
122
+ | 'Qwen 2.5 0.5B Instruct'
123
+ | 'Qwen 2.5 1.5B Instruct'
124
+ | 'Llama 3 8B Instruct'
125
+ | DataLink
126
+ /* Override base URL for NeuroPilot model bundle downloads */
127
+ modelBaseUrl?: string | DataLink
128
+ /* Validate runner/config paths on generator initialization */
129
+ init?: boolean | DataLink
130
+ /* Runner binary path on device */
131
+ runnerPath?: string | DataLink
132
+ /* YAML config path on device */
133
+ configPath?: string | DataLink
134
+ /* Working directory for the runner process */
135
+ workingDirectory?: string | DataLink
136
+ /* Extra library search paths for the runner process */
137
+ libraryPaths?: Array<string | DataLink> | DataLink
138
+ /* Daemon host for a preloaded NeuroPilot service */
139
+ daemonHost?: string | DataLink
140
+ /* Daemon TCP port for a preloaded NeuroPilot service */
141
+ daemonPort?: number | DataLink
142
+ /* Android local socket name for a privileged NeuroPilot service */
143
+ daemonSocketName?: string | DataLink
144
+ /* Android local socket namespace used by the privileged NeuroPilot service */
145
+ daemonSocketNamespace?: 'abstract' | 'reserved' | 'filesystem' | DataLink
146
+ /* Runtime selection strategy for MediaTek execution */
147
+ runtimeMode?: 'auto' | 'daemon' | 'root-runner' | 'native' | DataLink
148
+ /* Prompt preformatter used by the vendor runner */
149
+ preformatter?:
150
+ | 'AlpacaNoInput'
151
+ | 'OneShotConversation'
152
+ | 'VicunaNoInput'
153
+ | 'QwenNoInput'
154
+ | 'Qwen3NoInput'
155
+ | 'Qwen3NoInputNoThink'
156
+ | 'Llama3NoInput'
157
+ | 'Phi3NoInput'
158
+ | 'MinicpmNoInput'
159
+ | 'MinicpmNoInputZh'
160
+ | 'InternLM2'
161
+ | 'GemmaNoInput'
162
+ | DataLink
163
+ /* Prompt to run */
164
+ prompt?: string | DataLink
165
+ /* Chat messages to flatten into a prompt */
166
+ messages?: Array<DataLink | {}> | DataLink
167
+ /* Maximum tokens requested from the runner */
168
+ maxNewTokens?: number | DataLink
169
+ }
170
+ events?: {
171
+ /* Event triggered when the NeuroPilot context state changes */
172
+ onContextStateChange?: Array<EventAction>
173
+ /* Event triggered when a completion token or partial result is emitted */
174
+ onCompletion?: Array<EventAction>
175
+ /* Event triggered when generation finishes */
176
+ onCompletionFinished?: Array<EventAction>
177
+ /* Event triggered when a NeuroPilot error occurs */
178
+ onError?: Array<EventAction>
179
+ }
180
+ outlets?: {
181
+ /* Current NeuroPilot context state */
182
+ contextState?: () => Data<string>
183
+ /* Final generated result text */
184
+ result?: () => Data<string>
185
+ /* Full context returned by the runner */
186
+ fullContext?: () => Data<string>
187
+ /* Last emitted token or chunk */
188
+ lastToken?: () => Data<string>
189
+ /* Raw output captured from the NeuroPilot runner */
190
+ rawOutput?: () => Data<string>
191
+ /* Prompt-phase performance in tokens per second */
192
+ promptTokensPerSec?: () => Data<string>
193
+ /* Generation-phase performance in tokens per second */
194
+ generationTokensPerSec?: () => Data<string>
195
+ }
196
+ }
197
+
198
+ /* On-device LLM inference using MediaTek NeuroPilot native SDK integration on Android */
199
+ export type GeneratorNeuropilotLlm = Generator &
200
+ GeneratorNeuropilotLlmDef & {
201
+ templateKey: 'GENERATOR_NEUROPILOT_LLM'
202
+ switches?: Array<
203
+ SwitchDef &
204
+ GeneratorNeuropilotLlmDef & {
205
+ conds?: Array<{
206
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
207
+ cond:
208
+ | SwitchCondInnerStateCurrentCanvas
209
+ | SwitchCondData
210
+ | {
211
+ __typename: 'SwitchCondInnerStateOutlet'
212
+ outlet:
213
+ | 'contextState'
214
+ | 'result'
215
+ | 'fullContext'
216
+ | 'lastToken'
217
+ | 'rawOutput'
218
+ | 'promptTokensPerSec'
219
+ | 'generationTokensPerSec'
220
+ value: any
221
+ }
222
+ }>
223
+ }
224
+ >
225
+ }