@fugood/bricks-ctor 2.24.0-beta.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. package/compile/action-name-map.ts +988 -0
  2. package/compile/index.ts +1245 -0
  3. package/compile/util.ts +358 -0
  4. package/index.ts +6 -0
  5. package/package.json +28 -0
  6. package/skills/bricks-design/LICENSE.txt +180 -0
  7. package/skills/bricks-design/SKILL.md +66 -0
  8. package/skills/bricks-project/SKILL.md +32 -0
  9. package/skills/bricks-project/rules/animation.md +159 -0
  10. package/skills/bricks-project/rules/architecture-patterns.md +69 -0
  11. package/skills/bricks-project/rules/automations.md +221 -0
  12. package/skills/bricks-project/rules/buttress.md +156 -0
  13. package/skills/bricks-project/rules/data-calculation.md +208 -0
  14. package/skills/bricks-project/rules/local-sync.md +129 -0
  15. package/skills/bricks-project/rules/media-flow.md +158 -0
  16. package/skills/bricks-project/rules/remote-data-bank.md +196 -0
  17. package/skills/bricks-project/rules/standby-transition.md +124 -0
  18. package/skills/rive-marketplace/SKILL.md +99 -0
  19. package/tools/deploy.ts +151 -0
  20. package/tools/icons/.gitattributes +1 -0
  21. package/tools/icons/fa6pro-glyphmap.json +4686 -0
  22. package/tools/icons/fa6pro-meta.json +3671 -0
  23. package/tools/mcp-server.ts +28 -0
  24. package/tools/mcp-tools/compile.ts +91 -0
  25. package/tools/mcp-tools/huggingface.ts +762 -0
  26. package/tools/mcp-tools/icons.ts +70 -0
  27. package/tools/mcp-tools/lottie.ts +102 -0
  28. package/tools/mcp-tools/media.ts +110 -0
  29. package/tools/postinstall.ts +229 -0
  30. package/tools/preview-main.mjs +293 -0
  31. package/tools/preview.ts +143 -0
  32. package/tools/pull.ts +116 -0
  33. package/tsconfig.json +16 -0
  34. package/types/animation.ts +100 -0
  35. package/types/automation.ts +235 -0
  36. package/types/brick-base.ts +80 -0
  37. package/types/bricks/Camera.ts +246 -0
  38. package/types/bricks/Chart.ts +372 -0
  39. package/types/bricks/GenerativeMedia.ts +276 -0
  40. package/types/bricks/Icon.ts +98 -0
  41. package/types/bricks/Image.ts +114 -0
  42. package/types/bricks/Items.ts +476 -0
  43. package/types/bricks/Lottie.ts +168 -0
  44. package/types/bricks/Maps.ts +262 -0
  45. package/types/bricks/QrCode.ts +117 -0
  46. package/types/bricks/Rect.ts +150 -0
  47. package/types/bricks/RichText.ts +128 -0
  48. package/types/bricks/Rive.ts +220 -0
  49. package/types/bricks/Slideshow.ts +201 -0
  50. package/types/bricks/Svg.ts +99 -0
  51. package/types/bricks/Text.ts +148 -0
  52. package/types/bricks/TextInput.ts +242 -0
  53. package/types/bricks/Video.ts +175 -0
  54. package/types/bricks/VideoStreaming.ts +112 -0
  55. package/types/bricks/WebRtcStream.ts +65 -0
  56. package/types/bricks/WebView.ts +168 -0
  57. package/types/bricks/index.ts +21 -0
  58. package/types/canvas.ts +82 -0
  59. package/types/common.ts +144 -0
  60. package/types/data-calc-command.ts +7005 -0
  61. package/types/data-calc-script.ts +21 -0
  62. package/types/data-calc.ts +11 -0
  63. package/types/data.ts +95 -0
  64. package/types/generators/AlarmClock.ts +110 -0
  65. package/types/generators/Assistant.ts +621 -0
  66. package/types/generators/BleCentral.ts +247 -0
  67. package/types/generators/BlePeripheral.ts +208 -0
  68. package/types/generators/CanvasMap.ts +74 -0
  69. package/types/generators/CastlesPay.ts +87 -0
  70. package/types/generators/DataBank.ts +160 -0
  71. package/types/generators/File.ts +432 -0
  72. package/types/generators/GraphQl.ts +132 -0
  73. package/types/generators/Http.ts +222 -0
  74. package/types/generators/HttpServer.ts +176 -0
  75. package/types/generators/Information.ts +103 -0
  76. package/types/generators/Intent.ts +168 -0
  77. package/types/generators/Iterator.ts +108 -0
  78. package/types/generators/Keyboard.ts +105 -0
  79. package/types/generators/LlmAnthropicCompat.ts +212 -0
  80. package/types/generators/LlmAppleBuiltin.ts +159 -0
  81. package/types/generators/LlmGgml.ts +861 -0
  82. package/types/generators/LlmMediaTekNeuroPilot.ts +235 -0
  83. package/types/generators/LlmMlx.ts +227 -0
  84. package/types/generators/LlmOnnx.ts +213 -0
  85. package/types/generators/LlmOpenAiCompat.ts +244 -0
  86. package/types/generators/LlmQualcommAiEngine.ts +247 -0
  87. package/types/generators/Mcp.ts +637 -0
  88. package/types/generators/McpServer.ts +289 -0
  89. package/types/generators/MediaFlow.ts +170 -0
  90. package/types/generators/MqttBroker.ts +141 -0
  91. package/types/generators/MqttClient.ts +141 -0
  92. package/types/generators/Question.ts +408 -0
  93. package/types/generators/RealtimeTranscription.ts +279 -0
  94. package/types/generators/RerankerGgml.ts +191 -0
  95. package/types/generators/SerialPort.ts +151 -0
  96. package/types/generators/SoundPlayer.ts +94 -0
  97. package/types/generators/SoundRecorder.ts +130 -0
  98. package/types/generators/SpeechToTextGgml.ts +415 -0
  99. package/types/generators/SpeechToTextOnnx.ts +236 -0
  100. package/types/generators/SpeechToTextPlatform.ts +85 -0
  101. package/types/generators/SqLite.ts +159 -0
  102. package/types/generators/Step.ts +107 -0
  103. package/types/generators/SttAppleBuiltin.ts +130 -0
  104. package/types/generators/Tcp.ts +126 -0
  105. package/types/generators/TcpServer.ts +147 -0
  106. package/types/generators/TextToSpeechAppleBuiltin.ts +127 -0
  107. package/types/generators/TextToSpeechGgml.ts +221 -0
  108. package/types/generators/TextToSpeechOnnx.ts +178 -0
  109. package/types/generators/TextToSpeechOpenAiLike.ts +121 -0
  110. package/types/generators/ThermalPrinter.ts +191 -0
  111. package/types/generators/Tick.ts +83 -0
  112. package/types/generators/Udp.ts +120 -0
  113. package/types/generators/VadGgml.ts +250 -0
  114. package/types/generators/VadOnnx.ts +231 -0
  115. package/types/generators/VadTraditional.ts +138 -0
  116. package/types/generators/VectorStore.ts +257 -0
  117. package/types/generators/Watchdog.ts +107 -0
  118. package/types/generators/WebCrawler.ts +103 -0
  119. package/types/generators/WebRtc.ts +181 -0
  120. package/types/generators/WebSocket.ts +148 -0
  121. package/types/generators/index.ts +57 -0
  122. package/types/index.ts +13 -0
  123. package/types/subspace.ts +59 -0
  124. package/types/switch.ts +51 -0
  125. package/types/system.ts +707 -0
  126. package/utils/calc.ts +126 -0
  127. package/utils/data.ts +497 -0
  128. package/utils/event-props.ts +836 -0
  129. package/utils/id.ts +80 -0
@@ -0,0 +1,861 @@
1
+ /* Auto generated by build script
2
+ *
3
+ * Local Large Language Model (LLM) inference based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
4
+ *
5
+ * ## Notice
6
+ * - The device RAM must be larger than 8GB
7
+ * - iOS: Recommended use M1+ / A17+ chip device. Supported GPU acceleration by Metal.
8
+ * - macOS: Recommended use M1+ chip device. Supported GPU acceleration by Metal.
9
+ * - Android: Recommended use Android 13+ system.
10
+ * - Supported GPU acceleration by OpenCL, currently only for Qualcomm Adreno 700+ GPUs, other GPUs are not supported.
11
+ * - Supported Hexagon NPU for Qualcomm Snapdragon 8 Gen 1+ GPUs.
12
+ * - Linux / Windows [@nextline - Supported GPU acceleration, you can choose `vulkan` or `cuda` backend in Accel Variant property
13
+ * - Supported Hexagon NPU for Qualcomm Dragonwing IQ9 series+ (Linux)
14
+ */
15
+ import type { SwitchCondInnerStateCurrentCanvas, SwitchCondData, SwitchDef } from '../switch'
16
+ import type { Data, DataLink } from '../data'
17
+ import type {
18
+ Brick,
19
+ Generator,
20
+ EventAction,
21
+ ActionWithDataParams,
22
+ ActionWithParams,
23
+ Action,
24
+ EventProperty,
25
+ } from '../common'
26
+ import type { TemplateEventPropsMap } from '../../utils/event-props'
27
+
28
+ /* Load the model */
29
+ export type GeneratorLLMActionLoadModel = Action & {
30
+ __actionName: 'GENERATOR_LLM_LOAD_MODEL'
31
+ }
32
+
33
+ /* Load multimodal (vision) model (PREVIEW FEATURE) */
34
+ export type GeneratorLLMActionLoadMultimodalModel = Action & {
35
+ __actionName: 'GENERATOR_LLM_LOAD_MULTIMODAL_MODEL'
36
+ }
37
+
38
+ /* Tokenize the prompt */
39
+ export type GeneratorLLMActionTokenize = ActionWithParams & {
40
+ __actionName: 'GENERATOR_LLM_TOKENIZE'
41
+ params?: Array<
42
+ | {
43
+ input: 'mode'
44
+ value?: string | DataLink | EventProperty
45
+ mapping?: string
46
+ }
47
+ | {
48
+ input: 'prompt'
49
+ value?: string | DataLink | EventProperty
50
+ mapping?: string
51
+ }
52
+ | {
53
+ input: 'promptMediaPaths'
54
+ value?: Array<any> | DataLink | EventProperty
55
+ mapping?: string
56
+ }
57
+ | {
58
+ input: 'messages'
59
+ value?: Array<any> | DataLink | EventProperty
60
+ mapping?: string
61
+ }
62
+ >
63
+ }
64
+
65
+ /* Detokenize the tokens to text */
66
+ export type GeneratorLLMActionDetokenize = ActionWithParams & {
67
+ __actionName: 'GENERATOR_LLM_DETOKENIZE'
68
+ params?: Array<{
69
+ input: 'tokens'
70
+ value?: Array<any> | DataLink | EventProperty
71
+ mapping?: string
72
+ }>
73
+ }
74
+
75
+ /* Pre-process the prompt, this can speed up the completion action */
76
+ export type GeneratorLLMActionProcessPrompt = ActionWithParams & {
77
+ __actionName: 'GENERATOR_LLM_PROCESS_PROMPT'
78
+ params?: Array<
79
+ | {
80
+ input: 'sessionKey'
81
+ value?: string | DataLink | EventProperty
82
+ mapping?: string
83
+ }
84
+ | {
85
+ input: 'mode'
86
+ value?: string | DataLink | EventProperty
87
+ mapping?: string
88
+ }
89
+ | {
90
+ input: 'messages'
91
+ value?: Array<any> | DataLink | EventProperty
92
+ mapping?: string
93
+ }
94
+ | {
95
+ input: 'tools'
96
+ value?: Array<any> | DataLink | EventProperty
97
+ mapping?: string
98
+ }
99
+ | {
100
+ input: 'parallelToolCalls'
101
+ value?: boolean | DataLink | EventProperty
102
+ mapping?: string
103
+ }
104
+ | {
105
+ input: 'toolChoice'
106
+ value?: string | DataLink | EventProperty
107
+ mapping?: string
108
+ }
109
+ | {
110
+ input: 'enableThinking'
111
+ value?: boolean | DataLink | EventProperty
112
+ mapping?: string
113
+ }
114
+ | {
115
+ input: 'thinkingBudgetTokens'
116
+ value?: number | DataLink | EventProperty
117
+ mapping?: string
118
+ }
119
+ | {
120
+ input: 'thinkingBudgetMessage'
121
+ value?: string | DataLink | EventProperty
122
+ mapping?: string
123
+ }
124
+ | {
125
+ input: 'prompt'
126
+ value?: string | DataLink | EventProperty
127
+ mapping?: string
128
+ }
129
+ | {
130
+ input: 'promptMediaPaths'
131
+ value?: Array<any> | DataLink | EventProperty
132
+ mapping?: string
133
+ }
134
+ | {
135
+ input: 'promptTemplateData'
136
+ value?: {} | DataLink | EventProperty
137
+ mapping?: string
138
+ }
139
+ | {
140
+ input: 'promptTemplateType'
141
+ value?: string | DataLink | EventProperty
142
+ mapping?: string
143
+ }
144
+ | {
145
+ input: 'responseFormat'
146
+ value?: {} | DataLink | EventProperty
147
+ mapping?: string
148
+ }
149
+ | {
150
+ input: 'chatTemplateKwargs'
151
+ value?: {} | DataLink | EventProperty
152
+ mapping?: string
153
+ }
154
+ | {
155
+ input: 'addGenerationPrompt'
156
+ value?: boolean | DataLink | EventProperty
157
+ mapping?: string
158
+ }
159
+ | {
160
+ input: 'now'
161
+ value?: string | DataLink | EventProperty
162
+ mapping?: string
163
+ }
164
+ | {
165
+ input: 'forcePureContent'
166
+ value?: boolean | DataLink | EventProperty
167
+ mapping?: string
168
+ }
169
+ >
170
+ }
171
+
172
+ /* Run text completion */
173
+ export type GeneratorLLMActionCompletion = ActionWithParams & {
174
+ __actionName: 'GENERATOR_LLM_COMPLETION'
175
+ params?: Array<
176
+ | {
177
+ input: 'sessionKey'
178
+ value?: string | DataLink | EventProperty
179
+ mapping?: string
180
+ }
181
+ | {
182
+ input: 'mode'
183
+ value?: string | DataLink | EventProperty
184
+ mapping?: string
185
+ }
186
+ | {
187
+ input: 'messages'
188
+ value?: Array<any> | DataLink | EventProperty
189
+ mapping?: string
190
+ }
191
+ | {
192
+ input: 'tools'
193
+ value?: Array<any> | DataLink | EventProperty
194
+ mapping?: string
195
+ }
196
+ | {
197
+ input: 'parallelToolCalls'
198
+ value?: boolean | DataLink | EventProperty
199
+ mapping?: string
200
+ }
201
+ | {
202
+ input: 'toolChoice'
203
+ value?: string | DataLink | EventProperty
204
+ mapping?: string
205
+ }
206
+ | {
207
+ input: 'enableThinking'
208
+ value?: boolean | DataLink | EventProperty
209
+ mapping?: string
210
+ }
211
+ | {
212
+ input: 'thinkingBudgetTokens'
213
+ value?: number | DataLink | EventProperty
214
+ mapping?: string
215
+ }
216
+ | {
217
+ input: 'thinkingBudgetMessage'
218
+ value?: string | DataLink | EventProperty
219
+ mapping?: string
220
+ }
221
+ | {
222
+ input: 'useReasoningFormat'
223
+ value?: string | DataLink | EventProperty
224
+ mapping?: string
225
+ }
226
+ | {
227
+ input: 'prompt'
228
+ value?: string | DataLink | EventProperty
229
+ mapping?: string
230
+ }
231
+ | {
232
+ input: 'promptMediaPaths'
233
+ value?: Array<any> | DataLink | EventProperty
234
+ mapping?: string
235
+ }
236
+ | {
237
+ input: 'promptTemplateData'
238
+ value?: {} | DataLink | EventProperty
239
+ mapping?: string
240
+ }
241
+ | {
242
+ input: 'promptTemplateType'
243
+ value?: string | DataLink | EventProperty
244
+ mapping?: string
245
+ }
246
+ | {
247
+ input: 'responseFormat'
248
+ value?: {} | DataLink | EventProperty
249
+ mapping?: string
250
+ }
251
+ | {
252
+ input: 'chatTemplateKwargs'
253
+ value?: {} | DataLink | EventProperty
254
+ mapping?: string
255
+ }
256
+ | {
257
+ input: 'addGenerationPrompt'
258
+ value?: boolean | DataLink | EventProperty
259
+ mapping?: string
260
+ }
261
+ | {
262
+ input: 'now'
263
+ value?: string | DataLink | EventProperty
264
+ mapping?: string
265
+ }
266
+ | {
267
+ input: 'forcePureContent'
268
+ value?: boolean | DataLink | EventProperty
269
+ mapping?: string
270
+ }
271
+ | {
272
+ input: 'grammar'
273
+ value?: string | DataLink | EventProperty
274
+ mapping?: string
275
+ }
276
+ | {
277
+ input: 'stopWords'
278
+ value?: Array<any> | DataLink | EventProperty
279
+ mapping?: string
280
+ }
281
+ | {
282
+ input: 'predict'
283
+ value?: number | DataLink | EventProperty
284
+ mapping?: string
285
+ }
286
+ | {
287
+ input: 'temperature'
288
+ value?: number | DataLink | EventProperty
289
+ mapping?: string
290
+ }
291
+ | {
292
+ input: 'probs'
293
+ value?: number | DataLink | EventProperty
294
+ mapping?: string
295
+ }
296
+ | {
297
+ input: 'topK'
298
+ value?: number | DataLink | EventProperty
299
+ mapping?: string
300
+ }
301
+ | {
302
+ input: 'topP'
303
+ value?: number | DataLink | EventProperty
304
+ mapping?: string
305
+ }
306
+ | {
307
+ input: 'xtcThreshold'
308
+ value?: number | DataLink | EventProperty
309
+ mapping?: string
310
+ }
311
+ | {
312
+ input: 'xtcProbability'
313
+ value?: number | DataLink | EventProperty
314
+ mapping?: string
315
+ }
316
+ | {
317
+ input: 'dryMultiplier'
318
+ value?: number | DataLink | EventProperty
319
+ mapping?: string
320
+ }
321
+ | {
322
+ input: 'dryBase'
323
+ value?: number | DataLink | EventProperty
324
+ mapping?: string
325
+ }
326
+ | {
327
+ input: 'dryAllowedLength'
328
+ value?: number | DataLink | EventProperty
329
+ mapping?: string
330
+ }
331
+ | {
332
+ input: 'dryPenaltyLastN'
333
+ value?: number | DataLink | EventProperty
334
+ mapping?: string
335
+ }
336
+ | {
337
+ input: 'drySequenceBreakers'
338
+ value?: Array<any> | DataLink | EventProperty
339
+ mapping?: string
340
+ }
341
+ | {
342
+ input: 'mirostat'
343
+ value?: number | DataLink | EventProperty
344
+ mapping?: string
345
+ }
346
+ | {
347
+ input: 'mirostatTau'
348
+ value?: number | DataLink | EventProperty
349
+ mapping?: string
350
+ }
351
+ | {
352
+ input: 'mirostatEta'
353
+ value?: number | DataLink | EventProperty
354
+ mapping?: string
355
+ }
356
+ | {
357
+ input: 'penaltyLastN'
358
+ value?: number | DataLink | EventProperty
359
+ mapping?: string
360
+ }
361
+ | {
362
+ input: 'penaltyRepeat'
363
+ value?: number | DataLink | EventProperty
364
+ mapping?: string
365
+ }
366
+ | {
367
+ input: 'penaltyFrequency'
368
+ value?: number | DataLink | EventProperty
369
+ mapping?: string
370
+ }
371
+ | {
372
+ input: 'penaltyPresent'
373
+ value?: number | DataLink | EventProperty
374
+ mapping?: string
375
+ }
376
+ | {
377
+ input: 'penalizeNewline'
378
+ value?: boolean | DataLink | EventProperty
379
+ mapping?: string
380
+ }
381
+ | {
382
+ input: 'seed'
383
+ value?: number | DataLink | EventProperty
384
+ mapping?: string
385
+ }
386
+ | {
387
+ input: 'typicalP'
388
+ value?: number | DataLink | EventProperty
389
+ mapping?: string
390
+ }
391
+ | {
392
+ input: 'ignoreEos'
393
+ value?: boolean | DataLink | EventProperty
394
+ mapping?: string
395
+ }
396
+ | {
397
+ input: 'functionCallEnabled'
398
+ value?: boolean | DataLink | EventProperty
399
+ mapping?: string
400
+ }
401
+ | {
402
+ input: 'functionCallSchema'
403
+ value?: Array<any> | DataLink | EventProperty
404
+ mapping?: string
405
+ }
406
+ >
407
+ }
408
+
409
+ /* Clear session with session key or session ID */
410
+ export type GeneratorLLMActionClearSession = ActionWithParams & {
411
+ __actionName: 'GENERATOR_LLM_CLEAR_SESSION'
412
+ params?: Array<
413
+ | {
414
+ input: 'sessionId'
415
+ value?: string | DataLink | EventProperty
416
+ mapping?: string
417
+ }
418
+ | {
419
+ input: 'sessionCustomKey'
420
+ value?: string | DataLink | EventProperty
421
+ mapping?: string
422
+ }
423
+ >
424
+ }
425
+
426
+ /* Stop text completion */
427
+ export type GeneratorLLMActionStopCompletion = Action & {
428
+ __actionName: 'GENERATOR_LLM_STOP_COMPLETION'
429
+ }
430
+
431
+ /* Clear KV cache */
432
+ export type GeneratorLLMActionClearCache = Action & {
433
+ __actionName: 'GENERATOR_LLM_CLEAR_CACHE'
434
+ }
435
+
436
+ /* Clear downloaded models & current jobs */
437
+ export type GeneratorLLMActionClearDownload = Action & {
438
+ __actionName: 'GENERATOR_LLM_CLEAR_DOWNLOAD'
439
+ }
440
+
441
+ /* Release multimodal (vision) context (PREVIEW FEATURE) */
442
+ export type GeneratorLLMActionReleaseMultimodalContext = Action & {
443
+ __actionName: 'GENERATOR_LLM_RELEASE_MULTIMODAL_CONTEXT'
444
+ }
445
+
446
+ /* Release context */
447
+ export type GeneratorLLMActionReleaseContext = Action & {
448
+ __actionName: 'GENERATOR_LLM_RELEASE_CONTEXT'
449
+ }
450
+
451
+ interface GeneratorLLMDef {
452
+ /*
453
+ Default property:
454
+ {
455
+ "init": false,
456
+ "contextSize": 512,
457
+ "batchSize": 512,
458
+ "uBatchSize": 512,
459
+ "accelVariant": "default",
460
+ "mainGpu": 0,
461
+ "gpuLayers": 0,
462
+ "useMlock": true,
463
+ "useMmap": true,
464
+ "cacheKType": "f16",
465
+ "cacheVType": "f16",
466
+ "ctxShift": true,
467
+ "cpuMoeLayers": 0,
468
+ "transformScriptEnabled": false,
469
+ "transformScriptCode": "\/\* Global variable: inputs = { prompt, messages, variables }, members = { llmUtils } \*\/\nreturn inputs.prompt",
470
+ "transformScriptVariables": {},
471
+ "sessionMinSaveSize": 50,
472
+ "sessionRemain": 10,
473
+ "completionMode": "auto",
474
+ "completionPrompt": "",
475
+ "completionPromptTemplateType": "${}",
476
+ "completionEnableThinking": true,
477
+ "completionAddGenerationPrompt": true,
478
+ "completionChatTemplateKwargs": {},
479
+ "completionForcePureContent": false,
480
+ "completionUseReasoningFormat": "auto",
481
+ "completionStopWords": [],
482
+ "completionPredict": 400,
483
+ "completionTopK": 40,
484
+ "completionTopP": 0.95,
485
+ "completionMinP": 0.05,
486
+ "completionDryMultiplier": 0,
487
+ "completionDryBase": 1.75,
488
+ "completionDryAllowedLength": 2,
489
+ "completionDrySequenceBreakers": [
490
+ "\n",
491
+ ":",
492
+ "\"",
493
+ "*"
494
+ ],
495
+ "completionMirostat": 0,
496
+ "completionMirostatTau": 5,
497
+ "completionMirostatEta": 0.1,
498
+ "completionPenaltyLastN": 64,
499
+ "completionPenaltyRepeat": 1,
500
+ "completionPenaltyFrequency": 0,
501
+ "completionPenaltyPresent": 0,
502
+ "completionPenalizeNewline": false,
503
+ "completionTypicalP": 1
504
+ }
505
+ */
506
+ property?: {
507
+ /* Initialize the Llama context on generator initialization
508
+ Please note that it will take some RAM depending on the model size */
509
+ init?: boolean | DataLink
510
+ /* The URL or path of model
511
+ We used GGUF format model, please refer to https://github.com/ggerganov/llama.cpp/tree/master#description */
512
+ modelUrl?: string | DataLink
513
+ /* Hash type of model */
514
+ modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
515
+ /* Hash of model */
516
+ modelHash?: string | DataLink
517
+ /* Load multimodal (vision) context after model loaded (PREVIEW FEATURE) */
518
+ initMultimodal?: boolean | DataLink
519
+ /* The URL or path of mmproj file for multimodal vision support (PREVIEW FEATURE) */
520
+ mmprojUrl?: string | DataLink
521
+ /* Hash type of mmproj file (PREVIEW FEATURE) */
522
+ mmprojHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
523
+ /* Hash of mmproj file (PREVIEW FEATURE) */
524
+ mmprojHash?: string | DataLink
525
+ /* Minimum tokens for image encoding in multimodal (PREVIEW FEATURE)
526
+ Useful for dynamic resolution models (e.g. Qwen-VL). Default: -1 (auto) */
527
+ imageMinTokens?: number | DataLink
528
+ /* Maximum tokens for image encoding in multimodal (PREVIEW FEATURE)
529
+ Limit tokens for dynamic resolution models to balance speed vs. detail. Default: -1 (auto) */
530
+ imageMaxTokens?: number | DataLink
531
+ /* Chat Template (Jinja format) to override the default template from model */
532
+ chatTemplate?: string | DataLink
533
+ /* Context size (0 ~ 4096) (Default to 512) */
534
+ contextSize?: number | DataLink
535
+ /* Logical batch size for prompt processing */
536
+ batchSize?: number | DataLink
537
+ /* Physical batch size for prompt processing */
538
+ uBatchSize?: number | DataLink
539
+ /* Number of threads */
540
+ maxThreads?: number | DataLink
541
+ /* Accelerator variant (Only for desktop)
542
+ `default` - CPU / Metal (macOS)
543
+ `vulkan` - Use Vulkan
544
+ `cuda` - Use CUDA
545
+ `snapdragon` - Use OpenCL/Hexagon of Qualcomm Snapdragon */
546
+ accelVariant?: 'default' | 'vulkan' | 'cuda' | 'snapdragon' | DataLink
547
+ /* Devices. For example:
548
+
549
+ Metal or CPU for iOS/tvOS/MacOS
550
+ OpenCL or CPU for Android
551
+ - Add `HTP0`, `HTP1`, `...` for OpenCL/Hexagon devices (Use HTP* for all HTP devices)
552
+ For Desktop, you may want to include/exclude GPUs */
553
+ devices?: Array<string | DataLink> | DataLink
554
+ /* Main GPU index */
555
+ mainGpu?: number | DataLink
556
+ /* Number of GPU layers */
557
+ gpuLayers?: number | DataLink
558
+ /* Use memory lock */
559
+ useMlock?: boolean | DataLink
560
+ /* Use mmap */
561
+ useMmap?: boolean | DataLink
562
+ /* Disable extra buffer types for weight repacking. Reduces memory usage at the cost of slower prompt processing. */
563
+ noExtraBuffs?: boolean | DataLink
564
+ /* Use Flash Attention for inference (Recommended with GPU enabled) */
565
+ useFlashAttn?: 'auto' | 'on' | 'off' | DataLink
566
+ /* KV cache data type for the K (Default: f16) */
567
+ cacheKType?: 'f16' | 'f32' | 'q8_0' | 'q4_0' | 'q4_1' | 'iq4_nl' | 'q5_0' | 'q5_1' | DataLink
568
+ /* KV cache data type for the V (Default: f16) */
569
+ cacheVType?: 'f16' | 'f32' | 'q8_0' | 'q4_0' | 'q4_1' | 'iq4_nl' | 'q5_0' | 'q5_1' | DataLink
570
+ /* Use a unified buffer across the input sequences when computing the attention */
571
+ useKVUnified?: boolean | DataLink
572
+ /* Use full-size SWA cache. May improve performance for multiple sequences but uses more memory. */
573
+ useSwaFull?: boolean | DataLink
574
+ /* Enable context shift */
575
+ ctxShift?: boolean | DataLink
576
+ /* Number of layers to keep MoE weights on CPU */
577
+ cpuMoeLayers?: number | DataLink
578
+ /* Enable Transform Script for processing the prompt */
579
+ transformScriptEnabled?: boolean | DataLink
580
+ /* Code of Transform Script */
581
+ transformScriptCode?: string | DataLink
582
+ /* Variables used in Transform Script (object) */
583
+ transformScriptVariables?: {} | DataLink
584
+ /* Session save mode
585
+ `none` - No session saving
586
+ `prompt` - Save session when prompt processed
587
+ `completion` - Save session when completion finished
588
+ `all` - Save session when prompt processed and completion finished */
589
+ sessionSaveMode?: 'none' | 'prompt' | 'completion' | 'all' | DataLink
590
+ /* Minimum processed/generated size to determine whether to save session (Unit: token) */
591
+ sessionMinSaveSize?: number | DataLink
592
+ /* Session file remain count (Default to 10) */
593
+ sessionRemain?: number | DataLink
594
+ /* TODO:loran_gqarms_norm_epsrope_freq_baserope_freq_scale */
595
+ completionMode?: 'auto' | 'chat' | 'text' | DataLink
596
+ /* Tools for chat mode using OpenAI-compatible function calling format
597
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
598
+ See: https://platform.openai.com/docs/guides/function-calling */
599
+ completionTools?: Array<{} | DataLink> | DataLink
600
+ /* Enable parallel tool calls */
601
+ completionParallelToolCalls?: boolean | DataLink
602
+ /* Tool choice for chat mode */
603
+ completionToolChoice?: 'none' | 'auto' | 'required' | DataLink
604
+ /* Messages (chat mode) */
605
+ completionMessages?:
606
+ | Array<
607
+ | DataLink
608
+ | {
609
+ role?: string | DataLink
610
+ content?: string | DataLink
611
+ }
612
+ >
613
+ | DataLink
614
+ /* Prompt (text mode) */
615
+ completionPrompt?: string | DataLink
616
+ /* Media paths to be used in the prompt template (PREVIEW FEATURE)
617
+ In prompt, use `<__media__>` for position of media content */
618
+ completionPromptMediaPaths?: Array<string | DataLink> | DataLink
619
+ /* Data to be used in the prompt template (e.g. `Hello ${name}`). Supports nested data, such as `Hello ${user.name}`. */
620
+ completionPromptTemplateData?: {} | DataLink
621
+ /* The prompt template type */
622
+ completionPromptTemplateType?: '${}' | '{{}}' | DataLink
623
+ /* Response format */
624
+ completionResponseFormat?:
625
+ | DataLink
626
+ | {
627
+ type?: 'text' | 'json_schema' | 'json_object' | DataLink
628
+ json_schema?:
629
+ | DataLink
630
+ | {
631
+ strict?: boolean | DataLink
632
+ schema?: {} | DataLink
633
+ }
634
+ schema?: {} | DataLink
635
+ }
636
+ /* Enable thinking */
637
+ completionEnableThinking?: boolean | DataLink
638
+ /* Maximum tokens allowed inside the model's thinking block before forcing it closed. Only applies when chat formatting exposes thinking tags. */
639
+ completionThinkingBudgetTokens?: number | DataLink
640
+ /* Message injected before the thinking end tag when the thinking budget is exhausted. */
641
+ completionThinkingBudgetMessage?: string | DataLink
642
+ /* Add generation prompt */
643
+ completionAddGenerationPrompt?: boolean | DataLink
644
+ /* Now (for fill current date in chat template if supported) */
645
+ completionNow?: string | DataLink
646
+ /* Additional keyword arguments for chat template (object) */
647
+ completionChatTemplateKwargs?: {} | DataLink
648
+ /* Force pure content (accept any model that has a chat_template without requiring template validation) */
649
+ completionForcePureContent?: boolean | DataLink
650
+ /* Use reasoning format for enhanced response structure
651
+ `auto` - Auto-determine the reasoning format of the model
652
+ `none` - Disable reasoning format */
653
+ completionUseReasoningFormat?: 'auto' | 'none' | DataLink
654
+ /* Stop words */
655
+ completionStopWords?: Array<string | DataLink> | DataLink
656
+ /* Number of tokens to predict */
657
+ completionPredict?: number | DataLink
658
+ /* Throttle time for completion result (in milliseconds) */
659
+ completionResultThrottle?: number | DataLink
660
+ /* Grammar (GBNF: Please refer to https://github.com/ggerganov/llama.cpp/tree/master/grammars) */
661
+ completionGrammar?: string | DataLink
662
+ /* Temperature */
663
+ completionTemperature?: number | DataLink
664
+ /* Number of probablites to show for each token in the completion details */
665
+ completionProbs?: number | DataLink
666
+ /* Top K sampling */
667
+ completionTopK?: number | DataLink
668
+ /* Top P sampling */
669
+ completionTopP?: number | DataLink
670
+ /* Min P sampling */
671
+ completionMinP?: number | DataLink
672
+ /* Sets a minimum probability threshold for tokens to be removed */
673
+ completionXtcThreshold?: number | DataLink
674
+ /* Sets the chance for token removal (checked once on sampler start) */
675
+ completionXtcProbability?: number | DataLink
676
+ /* Set the DRY (Don't Repeat Yourself) repetition penalty multiplier. Default: `0.0`, which is disabled. */
677
+ completionDryMultiplier?: number | DataLink
678
+ /* Set the DRY repetition penalty base value. Default: `1.75` */
679
+ completionDryBase?: number | DataLink
680
+ /* Tokens that extend repetition beyond this receive exponentially increasing penalty: multiplier * base ^ (length of repeating sequence before token - allowed length). Default: `2` */
681
+ completionDryAllowedLength?: number | DataLink
682
+ /* How many tokens to scan for repetitions. Default: `-1`, where `0` is disabled and `-1` is context size. */
683
+ completionDryPenaltyLastN?: number | DataLink
684
+ /* Specify an array of sequence breakers for DRY sampling. Only a JSON array of strings is accepted. Default: `['\n', ':', '"', '*']` */
685
+ completionDrySequenceBreakers?: Array<string | DataLink> | DataLink
686
+ /* Top n sigma sampling as described in academic paper "Top-nσ: Not All Logits Are You Need" https://arxiv.org/pdf/2411.07641. Default: `-1.0` (Disabled) */
687
+ completionTopNSigma?: number | DataLink
688
+ /* Use Mirostat sampling. Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used. */
689
+ completionMirostat?: number | DataLink
690
+ /* Mirostat target entropy, parameter tau */
691
+ completionMirostatTau?: number | DataLink
692
+ /* Mirostat learning rate, parameter eta */
693
+ completionMirostatEta?: number | DataLink
694
+ /* Last n tokens to consider for penalize */
695
+ completionPenaltyLastN?: number | DataLink
696
+ /* Penalize repeat sequence of tokens (default: 0.1, 1.0 = disabled) */
697
+ completionPenaltyRepeat?: number | DataLink
698
+ /* Repeat alpha frequency penalty (default: 0.1, 0.0 = disabled) */
699
+ completionPenaltyFrequency?: number | DataLink
700
+ /* Repeat alpha presence penalty (default: 0.1, 0.0 = disabled) */
701
+ completionPenaltyPresent?: number | DataLink
702
+ /* Penalize newline tokens when applying the repeat penalty (default: true) */
703
+ completionPenalizeNewline?: boolean | DataLink
704
+ /* Set the random number generator (RNG) seed (default: -1, -1 = random seed) */
705
+ completionSeed?: number | DataLink
706
+ /* locally typical sampling, parameter p (default: 0.1, 1.0 = disabled) */
707
+ completionTypicalP?: number | DataLink
708
+ /* Repeat alpha frequency penalty (default: 0.1, 0.0 = disabled) */
709
+ completionIgnoreEOS?: boolean | DataLink
710
+ /* Buttress connection settings for remote inference */
711
+ buttressConnectionSettings?:
712
+ | DataLink
713
+ | {
714
+ enabled?: boolean | DataLink
715
+ url?: string | DataLink
716
+ fallbackType?: 'use-local' | 'no-op' | DataLink
717
+ strategy?: 'prefer-local' | 'prefer-buttress' | 'prefer-best' | DataLink
718
+ }
719
+ }
720
+ events?: {
721
+ /* Event triggered when context state changes */
722
+ onContextStateChange?: Array<
723
+ EventAction<string & keyof TemplateEventPropsMap['Llm']['onContextStateChange']>
724
+ >
725
+ /* Event triggered when error occurs */
726
+ onError?: Array<EventAction<string & keyof TemplateEventPropsMap['Llm']['onError']>>
727
+ /* Event triggered when completion */
728
+ onCompletion?: Array<EventAction<string & keyof TemplateEventPropsMap['Llm']['onCompletion']>>
729
+ /* Event triggered when completion finished */
730
+ onCompletionFinished?: Array<
731
+ EventAction<string & keyof TemplateEventPropsMap['Llm']['onCompletionFinished']>
732
+ >
733
+ /* Event triggered on get function call request */
734
+ onCompletionFunctionCall?: Array<
735
+ EventAction<string & keyof TemplateEventPropsMap['Llm']['onCompletionFunctionCall']>
736
+ >
737
+ }
738
+ outlets?: {
739
+ /* Context state */
740
+ contextState?: () => Data<string>
741
+ /* Context load progress (0-100) */
742
+ contextLoadProgress?: () => Data<number>
743
+ /* Context details */
744
+ contextDetails?: () => Data<{
745
+ state?: string
746
+ contextId?: string
747
+ gpu?: string
748
+ reasonNoGPU?: string
749
+ model?: { [key: string]: any }
750
+ isMultimodalEnabled?: boolean
751
+ [key: string]: any
752
+ }>
753
+ /* Session details */
754
+ sessions?: () => Data<{
755
+ last_session_id?: string
756
+ sessions?: Array<{
757
+ id?: string
758
+ type?: string
759
+ prompt?: string
760
+ sessionKey?: string
761
+ model_instance_id?: string
762
+ tokens_evaluated?: number
763
+ t?: number
764
+ [key: string]: any
765
+ }>
766
+ last_custom_session_id?: string
767
+ custom_sessions?: Array<{
768
+ id?: string
769
+ type?: string
770
+ prompt?: string
771
+ sessionKey?: string
772
+ model_instance_id?: string
773
+ tokens_evaluated?: number
774
+ t?: number
775
+ [key: string]: any
776
+ }>
777
+ [key: string]: any
778
+ }>
779
+ /* Is evaluating */
780
+ isEvaluating?: () => Data<boolean>
781
+ /* Tokenize result */
782
+ tokenizeResult?: () => Data<Array<number>>
783
+ /* Detokenize result */
784
+ detokenizeResult?: () => Data<string>
785
+ /* Last formatted prompt (messages or prompt) */
786
+ completionLastFormattedPrompt?: () => Data<string>
787
+ /* Last completion token */
788
+ completionLastToken?: () => Data<string>
789
+ /* Completion result */
790
+ completionResult?: () => Data<string>
791
+ /* Reasoning content from model responses */
792
+ completionReasoningContent?: () => Data<string>
793
+ /* Full context (Prompt + Completion) */
794
+ completionFullContext?: () => Data<string>
795
+ /* Inference result details */
796
+ completionResultDetails?: () => Data<{
797
+ prompt?: string
798
+ full_context?: string
799
+ text?: string
800
+ content?: string
801
+ reasoning_content?: string
802
+ token?: string
803
+ tool_calls?: Array<{
804
+ id?: string
805
+ type?: string
806
+ function?: {
807
+ name?: string
808
+ arguments?: string
809
+ [key: string]: any
810
+ }
811
+ [key: string]: any
812
+ }>
813
+ [key: string]: any
814
+ }>
815
+ }
816
+ }
817
+
818
+ /* Local Large Language Model (LLM) inference based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
819
+
820
+ ## Notice
821
+ - The device RAM must be larger than 8GB
822
+ - iOS: Recommended use M1+ / A17+ chip device. Supported GPU acceleration by Metal.
823
+ - macOS: Recommended use M1+ chip device. Supported GPU acceleration by Metal.
824
+ - Android: Recommended use Android 13+ system.
825
+ - Supported GPU acceleration by OpenCL, currently only for Qualcomm Adreno 700+ GPUs, other GPUs are not supported.
826
+ - Supported Hexagon NPU for Qualcomm Snapdragon 8 Gen 1+ GPUs.
827
+ - Linux / Windows [@nextline - Supported GPU acceleration, you can choose `vulkan` or `cuda` backend in Accel Variant property
828
+ - Supported Hexagon NPU for Qualcomm Dragonwing IQ9 series+ (Linux) */
829
+ export type GeneratorLLM = Generator &
830
+ GeneratorLLMDef & {
831
+ templateKey: 'GENERATOR_LLM'
832
+ switches?: Array<
833
+ SwitchDef &
834
+ GeneratorLLMDef & {
835
+ conds?: Array<{
836
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
837
+ cond:
838
+ | SwitchCondInnerStateCurrentCanvas
839
+ | SwitchCondData
840
+ | {
841
+ __typename: 'SwitchCondInnerStateOutlet'
842
+ outlet:
843
+ | 'contextState'
844
+ | 'contextLoadProgress'
845
+ | 'contextDetails'
846
+ | 'sessions'
847
+ | 'isEvaluating'
848
+ | 'tokenizeResult'
849
+ | 'detokenizeResult'
850
+ | 'completionLastFormattedPrompt'
851
+ | 'completionLastToken'
852
+ | 'completionResult'
853
+ | 'completionReasoningContent'
854
+ | 'completionFullContext'
855
+ | 'completionResultDetails'
856
+ value: any
857
+ }
858
+ }>
859
+ }
860
+ >
861
+ }