@fugood/bricks-project 2.22.0-beta.8 → 2.22.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/compile/action-name-map.ts +112 -1
  2. package/compile/index.ts +10 -1
  3. package/package.json +3 -3
  4. package/tools/postinstall.ts +16 -9
  5. package/types/animation.ts +2 -1
  6. package/types/brick-base.ts +79 -0
  7. package/types/bricks/3DViewer.ts +200 -0
  8. package/types/bricks/Camera.ts +195 -0
  9. package/types/bricks/Chart.ts +362 -0
  10. package/types/bricks/GenerativeMedia.ts +240 -0
  11. package/types/bricks/Icon.ts +93 -0
  12. package/types/bricks/Image.ts +104 -0
  13. package/types/bricks/Items.ts +461 -0
  14. package/types/bricks/Lottie.ts +159 -0
  15. package/types/bricks/QrCode.ts +112 -0
  16. package/types/bricks/Rect.ts +110 -0
  17. package/types/bricks/RichText.ts +123 -0
  18. package/types/bricks/Rive.ts +209 -0
  19. package/types/bricks/Slideshow.ts +155 -0
  20. package/types/bricks/Svg.ts +94 -0
  21. package/types/bricks/Text.ts +143 -0
  22. package/types/bricks/TextInput.ts +231 -0
  23. package/types/bricks/Video.ts +170 -0
  24. package/types/bricks/VideoStreaming.ts +107 -0
  25. package/types/bricks/WebRtcStream.ts +60 -0
  26. package/types/bricks/WebView.ts +157 -0
  27. package/types/bricks/index.ts +20 -0
  28. package/types/common.ts +8 -3
  29. package/types/data.ts +6 -0
  30. package/types/generators/AlarmClock.ts +102 -0
  31. package/types/generators/Assistant.ts +546 -0
  32. package/types/generators/BleCentral.ts +225 -0
  33. package/types/generators/BlePeripheral.ts +202 -0
  34. package/types/generators/CanvasMap.ts +57 -0
  35. package/types/generators/CastlesPay.ts +77 -0
  36. package/types/generators/DataBank.ts +123 -0
  37. package/types/generators/File.ts +351 -0
  38. package/types/generators/GraphQl.ts +124 -0
  39. package/types/generators/Http.ts +117 -0
  40. package/types/generators/HttpServer.ts +164 -0
  41. package/types/generators/Information.ts +97 -0
  42. package/types/generators/Intent.ts +107 -0
  43. package/types/generators/Iterator.ts +95 -0
  44. package/types/generators/Keyboard.ts +85 -0
  45. package/types/generators/LlmAnthropicCompat.ts +188 -0
  46. package/types/generators/LlmGgml.ts +719 -0
  47. package/types/generators/LlmOnnx.ts +184 -0
  48. package/types/generators/LlmOpenAiCompat.ts +206 -0
  49. package/types/generators/LlmQualcommAiEngine.ts +213 -0
  50. package/types/generators/Mcp.ts +294 -0
  51. package/types/generators/McpServer.ts +248 -0
  52. package/types/generators/MediaFlow.ts +142 -0
  53. package/types/generators/MqttBroker.ts +121 -0
  54. package/types/generators/MqttClient.ts +129 -0
  55. package/types/generators/Question.ts +395 -0
  56. package/types/generators/RealtimeTranscription.ts +180 -0
  57. package/types/generators/RerankerGgml.ts +153 -0
  58. package/types/generators/SerialPort.ts +141 -0
  59. package/types/generators/SoundPlayer.ts +86 -0
  60. package/types/generators/SoundRecorder.ts +113 -0
  61. package/types/generators/SpeechToTextGgml.ts +462 -0
  62. package/types/generators/SpeechToTextOnnx.ts +227 -0
  63. package/types/generators/SpeechToTextPlatform.ts +75 -0
  64. package/types/generators/SqLite.ts +118 -0
  65. package/types/generators/Step.ts +101 -0
  66. package/types/generators/TapToPayOnIPhone.ts +175 -0
  67. package/types/generators/Tcp.ts +120 -0
  68. package/types/generators/TcpServer.ts +137 -0
  69. package/types/generators/TextToSpeechGgml.ts +182 -0
  70. package/types/generators/TextToSpeechOnnx.ts +169 -0
  71. package/types/generators/TextToSpeechOpenAiLike.ts +113 -0
  72. package/types/generators/ThermalPrinter.ts +185 -0
  73. package/types/generators/Tick.ts +75 -0
  74. package/types/generators/Udp.ts +109 -0
  75. package/types/generators/VadGgml.ts +211 -0
  76. package/types/generators/VectorStore.ts +223 -0
  77. package/types/generators/Watchdog.ts +96 -0
  78. package/types/generators/WebCrawler.ts +97 -0
  79. package/types/generators/WebRtc.ts +165 -0
  80. package/types/generators/WebSocket.ts +142 -0
  81. package/types/generators/index.ts +51 -0
  82. package/types/system.ts +64 -0
  83. package/utils/data.ts +45 -0
  84. package/utils/event-props.ts +89 -0
  85. package/types/bricks.ts +0 -3168
  86. package/types/generators.ts +0 -7580
@@ -0,0 +1,719 @@
1
+ import type { SwitchCondInnerStateCurrentCanvas, SwitchCondData, SwitchDef } from '../switch'
2
+ import type { Data, DataLink } from '../data'
3
+ import type {
4
+ Generator,
5
+ EventAction,
6
+ ActionWithDataParams,
7
+ ActionWithParams,
8
+ Action,
9
+ EventProperty,
10
+ } from '../common'
11
+
12
+ /* Load the model */
13
+ export type GeneratorLLMActionLoadModel = Action & {
14
+ __actionName: 'GENERATOR_LLM_LOAD_MODEL'
15
+ }
16
+
17
+ /* Load multimodal (vision) model (PREVIEW FEATURE) */
18
+ export type GeneratorLLMActionLoadMultimodalModel = Action & {
19
+ __actionName: 'GENERATOR_LLM_LOAD_MULTIMODAL_MODEL'
20
+ }
21
+
22
+ /* Tokenize the prompt */
23
+ export type GeneratorLLMActionTokenize = ActionWithParams & {
24
+ __actionName: 'GENERATOR_LLM_TOKENIZE'
25
+ params?: Array<
26
+ | {
27
+ input: 'mode'
28
+ value?: string | DataLink | EventProperty
29
+ mapping?: string
30
+ }
31
+ | {
32
+ input: 'prompt'
33
+ value?: string | DataLink | EventProperty
34
+ mapping?: string
35
+ }
36
+ | {
37
+ input: 'promptMediaPaths'
38
+ value?: Array<any> | DataLink | EventProperty
39
+ mapping?: string
40
+ }
41
+ | {
42
+ input: 'messages'
43
+ value?: Array<any> | DataLink | EventProperty
44
+ mapping?: string
45
+ }
46
+ >
47
+ }
48
+
49
+ /* Detokenize the tokens to text */
50
+ export type GeneratorLLMActionDetokenize = ActionWithParams & {
51
+ __actionName: 'GENERATOR_LLM_DETOKENIZE'
52
+ params?: Array<{
53
+ input: 'tokens'
54
+ value?: Array<any> | DataLink | EventProperty
55
+ mapping?: string
56
+ }>
57
+ }
58
+
59
+ /* Pre-process the prompt, this can speed up the completion action */
60
+ export type GeneratorLLMActionProcessPrompt = ActionWithParams & {
61
+ __actionName: 'GENERATOR_LLM_PROCESS_PROMPT'
62
+ params?: Array<
63
+ | {
64
+ input: 'sessionKey'
65
+ value?: string | DataLink | EventProperty
66
+ mapping?: string
67
+ }
68
+ | {
69
+ input: 'mode'
70
+ value?: string | DataLink | EventProperty
71
+ mapping?: string
72
+ }
73
+ | {
74
+ input: 'messages'
75
+ value?: Array<any> | DataLink | EventProperty
76
+ mapping?: string
77
+ }
78
+ | {
79
+ input: 'tools'
80
+ value?: Array<any> | DataLink | EventProperty
81
+ mapping?: string
82
+ }
83
+ | {
84
+ input: 'parallelToolCalls'
85
+ value?: boolean | DataLink | EventProperty
86
+ mapping?: string
87
+ }
88
+ | {
89
+ input: 'toolChoice'
90
+ value?: string | DataLink | EventProperty
91
+ mapping?: string
92
+ }
93
+ | {
94
+ input: 'enableThinking'
95
+ value?: boolean | DataLink | EventProperty
96
+ mapping?: string
97
+ }
98
+ | {
99
+ input: 'prompt'
100
+ value?: string | DataLink | EventProperty
101
+ mapping?: string
102
+ }
103
+ | {
104
+ input: 'promptMediaPaths'
105
+ value?: Array<any> | DataLink | EventProperty
106
+ mapping?: string
107
+ }
108
+ | {
109
+ input: 'promptTemplateData'
110
+ value?: {} | DataLink | EventProperty
111
+ mapping?: string
112
+ }
113
+ | {
114
+ input: 'promptTemplateType'
115
+ value?: string | DataLink | EventProperty
116
+ mapping?: string
117
+ }
118
+ | {
119
+ input: 'responseFormat'
120
+ value?: {} | DataLink | EventProperty
121
+ mapping?: string
122
+ }
123
+ | {
124
+ input: 'chatTemplateKwargs'
125
+ value?: {} | DataLink | EventProperty
126
+ mapping?: string
127
+ }
128
+ | {
129
+ input: 'addGenerationPrompt'
130
+ value?: boolean | DataLink | EventProperty
131
+ mapping?: string
132
+ }
133
+ | {
134
+ input: 'now'
135
+ value?: string | DataLink | EventProperty
136
+ mapping?: string
137
+ }
138
+ >
139
+ }
140
+
141
+ /* Run text completion */
142
+ export type GeneratorLLMActionCompletion = ActionWithParams & {
143
+ __actionName: 'GENERATOR_LLM_COMPLETION'
144
+ params?: Array<
145
+ | {
146
+ input: 'sessionKey'
147
+ value?: string | DataLink | EventProperty
148
+ mapping?: string
149
+ }
150
+ | {
151
+ input: 'mode'
152
+ value?: string | DataLink | EventProperty
153
+ mapping?: string
154
+ }
155
+ | {
156
+ input: 'messages'
157
+ value?: Array<any> | DataLink | EventProperty
158
+ mapping?: string
159
+ }
160
+ | {
161
+ input: 'tools'
162
+ value?: Array<any> | DataLink | EventProperty
163
+ mapping?: string
164
+ }
165
+ | {
166
+ input: 'parallelToolCalls'
167
+ value?: boolean | DataLink | EventProperty
168
+ mapping?: string
169
+ }
170
+ | {
171
+ input: 'toolChoice'
172
+ value?: string | DataLink | EventProperty
173
+ mapping?: string
174
+ }
175
+ | {
176
+ input: 'enableThinking'
177
+ value?: boolean | DataLink | EventProperty
178
+ mapping?: string
179
+ }
180
+ | {
181
+ input: 'useReasoningFormat'
182
+ value?: string | DataLink | EventProperty
183
+ mapping?: string
184
+ }
185
+ | {
186
+ input: 'prompt'
187
+ value?: string | DataLink | EventProperty
188
+ mapping?: string
189
+ }
190
+ | {
191
+ input: 'promptMediaPaths'
192
+ value?: Array<any> | DataLink | EventProperty
193
+ mapping?: string
194
+ }
195
+ | {
196
+ input: 'promptTemplateData'
197
+ value?: {} | DataLink | EventProperty
198
+ mapping?: string
199
+ }
200
+ | {
201
+ input: 'promptTemplateType'
202
+ value?: string | DataLink | EventProperty
203
+ mapping?: string
204
+ }
205
+ | {
206
+ input: 'responseFormat'
207
+ value?: {} | DataLink | EventProperty
208
+ mapping?: string
209
+ }
210
+ | {
211
+ input: 'chatTemplateKwargs'
212
+ value?: {} | DataLink | EventProperty
213
+ mapping?: string
214
+ }
215
+ | {
216
+ input: 'addGenerationPrompt'
217
+ value?: boolean | DataLink | EventProperty
218
+ mapping?: string
219
+ }
220
+ | {
221
+ input: 'now'
222
+ value?: string | DataLink | EventProperty
223
+ mapping?: string
224
+ }
225
+ | {
226
+ input: 'grammar'
227
+ value?: string | DataLink | EventProperty
228
+ mapping?: string
229
+ }
230
+ | {
231
+ input: 'stopWords'
232
+ value?: Array<any> | DataLink | EventProperty
233
+ mapping?: string
234
+ }
235
+ | {
236
+ input: 'predict'
237
+ value?: number | DataLink | EventProperty
238
+ mapping?: string
239
+ }
240
+ | {
241
+ input: 'temperature'
242
+ value?: number | DataLink | EventProperty
243
+ mapping?: string
244
+ }
245
+ | {
246
+ input: 'probs'
247
+ value?: number | DataLink | EventProperty
248
+ mapping?: string
249
+ }
250
+ | {
251
+ input: 'topK'
252
+ value?: number | DataLink | EventProperty
253
+ mapping?: string
254
+ }
255
+ | {
256
+ input: 'topP'
257
+ value?: number | DataLink | EventProperty
258
+ mapping?: string
259
+ }
260
+ | {
261
+ input: 'xtcThreshold'
262
+ value?: number | DataLink | EventProperty
263
+ mapping?: string
264
+ }
265
+ | {
266
+ input: 'xtcProbability'
267
+ value?: number | DataLink | EventProperty
268
+ mapping?: string
269
+ }
270
+ | {
271
+ input: 'dryMultiplier'
272
+ value?: number | DataLink | EventProperty
273
+ mapping?: string
274
+ }
275
+ | {
276
+ input: 'dryBase'
277
+ value?: number | DataLink | EventProperty
278
+ mapping?: string
279
+ }
280
+ | {
281
+ input: 'dryAllowedLength'
282
+ value?: number | DataLink | EventProperty
283
+ mapping?: string
284
+ }
285
+ | {
286
+ input: 'dryPenaltyLastN'
287
+ value?: number | DataLink | EventProperty
288
+ mapping?: string
289
+ }
290
+ | {
291
+ input: 'drySequenceBreakers'
292
+ value?: Array<any> | DataLink | EventProperty
293
+ mapping?: string
294
+ }
295
+ | {
296
+ input: 'mirostat'
297
+ value?: number | DataLink | EventProperty
298
+ mapping?: string
299
+ }
300
+ | {
301
+ input: 'mirostatTau'
302
+ value?: number | DataLink | EventProperty
303
+ mapping?: string
304
+ }
305
+ | {
306
+ input: 'mirostatEta'
307
+ value?: number | DataLink | EventProperty
308
+ mapping?: string
309
+ }
310
+ | {
311
+ input: 'penaltyLastN'
312
+ value?: number | DataLink | EventProperty
313
+ mapping?: string
314
+ }
315
+ | {
316
+ input: 'penaltyRepeat'
317
+ value?: number | DataLink | EventProperty
318
+ mapping?: string
319
+ }
320
+ | {
321
+ input: 'penaltyFrequency'
322
+ value?: number | DataLink | EventProperty
323
+ mapping?: string
324
+ }
325
+ | {
326
+ input: 'penaltyPresent'
327
+ value?: number | DataLink | EventProperty
328
+ mapping?: string
329
+ }
330
+ | {
331
+ input: 'penalizeNewline'
332
+ value?: boolean | DataLink | EventProperty
333
+ mapping?: string
334
+ }
335
+ | {
336
+ input: 'seed'
337
+ value?: number | DataLink | EventProperty
338
+ mapping?: string
339
+ }
340
+ | {
341
+ input: 'typicalP'
342
+ value?: number | DataLink | EventProperty
343
+ mapping?: string
344
+ }
345
+ | {
346
+ input: 'ignoreEos'
347
+ value?: boolean | DataLink | EventProperty
348
+ mapping?: string
349
+ }
350
+ | {
351
+ input: 'functionCallEnabled'
352
+ value?: boolean | DataLink | EventProperty
353
+ mapping?: string
354
+ }
355
+ | {
356
+ input: 'functionCallSchema'
357
+ value?: Array<any> | DataLink | EventProperty
358
+ mapping?: string
359
+ }
360
+ >
361
+ }
362
+
363
+ /* Clear session with session key or session ID */
364
+ export type GeneratorLLMActionClearSession = ActionWithParams & {
365
+ __actionName: 'GENERATOR_LLM_CLEAR_SESSION'
366
+ params?: Array<
367
+ | {
368
+ input: 'sessionId'
369
+ value?: string | DataLink | EventProperty
370
+ mapping?: string
371
+ }
372
+ | {
373
+ input: 'sessionCustomKey'
374
+ value?: string | DataLink | EventProperty
375
+ mapping?: string
376
+ }
377
+ >
378
+ }
379
+
380
+ /* Stop text completion */
381
+ export type GeneratorLLMActionStopCompletion = Action & {
382
+ __actionName: 'GENERATOR_LLM_STOP_COMPLETION'
383
+ }
384
+
385
+ /* Clear downloaded models & current jobs */
386
+ export type GeneratorLLMActionClearDownload = Action & {
387
+ __actionName: 'GENERATOR_LLM_CLEAR_DOWNLOAD'
388
+ }
389
+
390
+ /* Release multimodal (vision) context (PREVIEW FEATURE) */
391
+ export type GeneratorLLMActionReleaseMultimodalContext = Action & {
392
+ __actionName: 'GENERATOR_LLM_RELEASE_MULTIMODAL_CONTEXT'
393
+ }
394
+
395
+ /* Release context */
396
+ export type GeneratorLLMActionReleaseContext = Action & {
397
+ __actionName: 'GENERATOR_LLM_RELEASE_CONTEXT'
398
+ }
399
+
400
+ interface GeneratorLLMDef {
401
+ /*
402
+ Default property:
403
+ {
404
+ "init": false,
405
+ "contextSize": 512,
406
+ "batchSize": 512,
407
+ "uBatchSize": 512,
408
+ "accelVariant": "default",
409
+ "mainGpu": 0,
410
+ "gpuLayers": 0,
411
+ "useMlock": true,
412
+ "useMmap": true,
413
+ "cacheKType": "f16",
414
+ "cacheVType": "f16",
415
+ "ctxShift": true,
416
+ "cpuMoeLayers": 0,
417
+ "transformScriptEnabled": false,
418
+ "transformScriptCode": "\/\* Global variable: inputs = { prompt, messages, variables }, members = { llmUtils } \*\/\nreturn inputs.prompt",
419
+ "transformScriptVariables": {},
420
+ "sessionMinSaveSize": 50,
421
+ "sessionRemain": 10,
422
+ "completionMode": "auto",
423
+ "completionPrompt": "",
424
+ "completionPromptTemplateType": "${}",
425
+ "completionEnableThinking": true,
426
+ "completionAddGenerationPrompt": true,
427
+ "completionChatTemplateKwargs": {},
428
+ "completionUseReasoningFormat": "auto",
429
+ "completionStopWords": [],
430
+ "completionPredict": 400,
431
+ "completionTopK": 40,
432
+ "completionTopP": 0.95,
433
+ "completionMinP": 0.05,
434
+ "completionDryMultiplier": 0,
435
+ "completionDryBase": 1.75,
436
+ "completionDryAllowedLength": 2,
437
+ "completionDrySequenceBreakers": [
438
+ "\n",
439
+ ":",
440
+ "\"",
441
+ "*"
442
+ ],
443
+ "completionMirostat": 0,
444
+ "completionMirostatTau": 5,
445
+ "completionMirostatEta": 0.1,
446
+ "completionPenaltyLastN": 64,
447
+ "completionPenaltyRepeat": 1,
448
+ "completionPenaltyFrequency": 0,
449
+ "completionPenaltyPresent": 0,
450
+ "completionPenalizeNewline": false,
451
+ "completionTypicalP": 1
452
+ }
453
+ */
454
+ property?: {
455
+ /* Initialize the Llama context on generator initialization
456
+ Please note that it will take some RAM depending on the model size */
457
+ init?: boolean | DataLink
458
+ /* The URL or path of model
459
+ We used GGUF format model, please refer to https://github.com/ggerganov/llama.cpp/tree/master#description */
460
+ modelUrl?: string | DataLink
461
+ /* Hash type of model */
462
+ modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
463
+ /* Hash of model */
464
+ modelHash?: string | DataLink
465
+ /* Load multimodal (vision) context after model loaded (PREVIEW FEATURE) */
466
+ initMultimodal?: boolean | DataLink
467
+ /* The URL or path of mmproj file for multimodal vision support (PREVIEW FEATURE) */
468
+ mmprojUrl?: string | DataLink
469
+ /* Hash type of mmproj file (PREVIEW FEATURE) */
470
+ mmprojHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
471
+ /* Hash of mmproj file (PREVIEW FEATURE) */
472
+ mmprojHash?: string | DataLink
473
+ /* Chat Template (Jinja format) to override the default template from model */
474
+ chatTemplate?: string | DataLink
475
+ /* Context size (0 ~ 4096) (Default to 512) */
476
+ contextSize?: number | DataLink
477
+ /* Logical batch size for prompt processing */
478
+ batchSize?: number | DataLink
479
+ /* Physical batch size for prompt processing */
480
+ uBatchSize?: number | DataLink
481
+ /* Number of threads */
482
+ maxThreads?: number | DataLink
483
+ /* Accelerator variant (Only for desktop)
484
+ `default` - CPU / Metal (macOS)
485
+ `vulkan` - Use Vulkan
486
+ `cuda` - Use CUDA */
487
+ accelVariant?: 'default' | 'vulkan' | 'cuda' | DataLink
488
+ /* Main GPU index */
489
+ mainGpu?: number | DataLink
490
+ /* Number of GPU layers (NOTE: Currently not supported for Android) */
491
+ gpuLayers?: number | DataLink
492
+ /* Use memory lock */
493
+ useMlock?: boolean | DataLink
494
+ /* Use mmap */
495
+ useMmap?: boolean | DataLink
496
+ /* Use Flash Attention for inference (Recommended with GPU enabled) */
497
+ useFlashAttn?: 'auto' | 'on' | 'off' | DataLink
498
+ /* KV cache data type for the K (Default: f16) */
499
+ cacheKType?: 'f16' | 'f32' | 'q8_0' | 'q4_0' | 'q4_1' | 'iq4_nl' | 'q5_0' | 'q5_1' | DataLink
500
+ /* KV cache data type for the V (Default: f16) */
501
+ cacheVType?: 'f16' | 'f32' | 'q8_0' | 'q4_0' | 'q4_1' | 'iq4_nl' | 'q5_0' | 'q5_1' | DataLink
502
+ /* Use a unified buffer across the input sequences when computing the attention */
503
+ useKVUnified?: boolean | DataLink
504
+ /* Use full-size SWA cache. May improve performance for multiple sequences but uses more memory. */
505
+ useSwaFull?: boolean | DataLink
506
+ /* Enable context shift */
507
+ ctxShift?: boolean | DataLink
508
+ /* Number of layers to keep MoE weights on CPU */
509
+ cpuMoeLayers?: number | DataLink
510
+ /* Enable Transform Script for processing the prompt */
511
+ transformScriptEnabled?: boolean | DataLink
512
+ /* Code of Transform Script */
513
+ transformScriptCode?: string | DataLink
514
+ /* Variables used in Transform Script (object) */
515
+ transformScriptVariables?: {} | DataLink
516
+ /* Session save mode
517
+ `none` - No session saving
518
+ `prompt` - Save session when prompt processed
519
+ `completion` - Save session when completion finished
520
+ `all` - Save session when prompt processed and completion finished */
521
+ sessionSaveMode?: 'none' | 'prompt' | 'completion' | 'all' | DataLink
522
+ /* Minimum processed/generated size to determine whether to save session (Unit: token) */
523
+ sessionMinSaveSize?: number | DataLink
524
+ /* Session file remain count (Default to 10) */
525
+ sessionRemain?: number | DataLink
526
+ /* TODO:loran_gqarms_norm_epsrope_freq_baserope_freq_scale */
527
+ completionMode?: 'auto' | 'chat' | 'text' | DataLink
528
+ /* Tools for chat mode using OpenAI-compatible function calling format
529
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
530
+ See: https://platform.openai.com/docs/guides/function-calling */
531
+ completionTools?: Array<{} | DataLink> | DataLink
532
+ /* Enable parallel tool calls */
533
+ completionParallelToolCalls?: boolean | DataLink
534
+ /* Tool choice for chat mode */
535
+ completionToolChoice?: 'none' | 'auto' | 'required' | DataLink
536
+ /* Messages (chat mode) */
537
+ completionMessages?:
538
+ | Array<
539
+ | DataLink
540
+ | {
541
+ role?: string | DataLink
542
+ content?: string | DataLink
543
+ }
544
+ >
545
+ | DataLink
546
+ /* Prompt (text mode) */
547
+ completionPrompt?: string | DataLink
548
+ /* Media paths to be used in the prompt template (PREVIEW FEATURE)
549
+ In prompt, use `<__media__>` for position of media content */
550
+ completionPromptMediaPaths?: Array<string | DataLink> | DataLink
551
+ /* Data to be used in the prompt template (e.g. `Hello ${name}`). Supports nested data, such as `Hello ${user.name}`. */
552
+ completionPromptTemplateData?: {} | DataLink
553
+ /* The prompt template type */
554
+ completionPromptTemplateType?: '${}' | '{{}}' | DataLink
555
+ /* Response format */
556
+ completionResponseFormat?:
557
+ | DataLink
558
+ | {
559
+ type?: 'text' | 'json_schema' | 'json_object' | DataLink
560
+ json_schema?:
561
+ | DataLink
562
+ | {
563
+ strict?: boolean | DataLink
564
+ schema?: {} | DataLink
565
+ }
566
+ schema?: {} | DataLink
567
+ }
568
+ /* Enable thinking */
569
+ completionEnableThinking?: boolean | DataLink
570
+ /* Add generation prompt */
571
+ completionAddGenerationPrompt?: boolean | DataLink
572
+ /* Now (for fill current date in chat template if supported) */
573
+ completionNow?: string | DataLink
574
+ /* Additional keyword arguments for chat template (object) */
575
+ completionChatTemplateKwargs?: {} | DataLink
576
+ /* Use reasoning format for enhanced response structure
577
+ `auto` - Auto-determine the reasoning format of the model
578
+ `none` - Disable reasoning format */
579
+ completionUseReasoningFormat?: 'auto' | 'none' | DataLink
580
+ /* Stop words */
581
+ completionStopWords?: Array<string | DataLink> | DataLink
582
+ /* Number of tokens to predict */
583
+ completionPredict?: number | DataLink
584
+ /* Throttle time for completion result (in milliseconds) */
585
+ completionResultThrottle?: number | DataLink
586
+ /* Grammar (GBNF: Please refer to https://github.com/ggerganov/llama.cpp/tree/master/grammars) */
587
+ completionGrammar?: string | DataLink
588
+ /* Temperature */
589
+ completionTemperature?: number | DataLink
590
+ /* Number of probablites to show for each token in the completion details */
591
+ completionProbs?: number | DataLink
592
+ /* Top K sampling */
593
+ completionTopK?: number | DataLink
594
+ /* Top P sampling */
595
+ completionTopP?: number | DataLink
596
+ /* Min P sampling */
597
+ completionMinP?: number | DataLink
598
+ /* Sets a minimum probability threshold for tokens to be removed */
599
+ completionXtcThreshold?: number | DataLink
600
+ /* Sets the chance for token removal (checked once on sampler start) */
601
+ completionXtcProbability?: number | DataLink
602
+ /* Set the DRY (Don't Repeat Yourself) repetition penalty multiplier. Default: `0.0`, which is disabled. */
603
+ completionDryMultiplier?: number | DataLink
604
+ /* Set the DRY repetition penalty base value. Default: `1.75` */
605
+ completionDryBase?: number | DataLink
606
+ /* Tokens that extend repetition beyond this receive exponentially increasing penalty: multiplier * base ^ (length of repeating sequence before token - allowed length). Default: `2` */
607
+ completionDryAllowedLength?: number | DataLink
608
+ /* How many tokens to scan for repetitions. Default: `-1`, where `0` is disabled and `-1` is context size. */
609
+ completionDryPenaltyLastN?: number | DataLink
610
+ /* Specify an array of sequence breakers for DRY sampling. Only a JSON array of strings is accepted. Default: `['\n', ':', '"', '*']` */
611
+ completionDrySequenceBreakers?: Array<string | DataLink> | DataLink
612
+ /* Top n sigma sampling as described in academic paper "Top-nσ: Not All Logits Are You Need" https://arxiv.org/pdf/2411.07641. Default: `-1.0` (Disabled) */
613
+ completionTopNSigma?: number | DataLink
614
+ /* Use Mirostat sampling. Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used. */
615
+ completionMirostat?: number | DataLink
616
+ /* Mirostat target entropy, parameter tau */
617
+ completionMirostatTau?: number | DataLink
618
+ /* Mirostat learning rate, parameter eta */
619
+ completionMirostatEta?: number | DataLink
620
+ /* Last n tokens to consider for penalize */
621
+ completionPenaltyLastN?: number | DataLink
622
+ /* Penalize repeat sequence of tokens (default: 0.1, 1.0 = disabled) */
623
+ completionPenaltyRepeat?: number | DataLink
624
+ /* Repeat alpha frequency penalty (default: 0.1, 0.0 = disabled) */
625
+ completionPenaltyFrequency?: number | DataLink
626
+ /* Repeat alpha presence penalty (default: 0.1, 0.0 = disabled) */
627
+ completionPenaltyPresent?: number | DataLink
628
+ /* Penalize newline tokens when applying the repeat penalty (default: true) */
629
+ completionPenalizeNewline?: boolean | DataLink
630
+ /* Set the random number generator (RNG) seed (default: -1, -1 = random seed) */
631
+ completionSeed?: number | DataLink
632
+ /* locally typical sampling, parameter p (default: 0.1, 1.0 = disabled) */
633
+ completionTypicalP?: number | DataLink
634
+ /* Repeat alpha frequency penalty (default: 0.1, 0.0 = disabled) */
635
+ completionIgnoreEOS?: boolean | DataLink
636
+ }
637
+ events?: {
638
+ /* Event triggered when context state changes */
639
+ onContextStateChange?: Array<EventAction>
640
+ /* Event triggered when error occurs */
641
+ onError?: Array<EventAction>
642
+ /* Event triggered when completion */
643
+ onCompletion?: Array<EventAction>
644
+ /* Event triggered when completion finished */
645
+ onCompletionFinished?: Array<EventAction>
646
+ /* Event triggered on get function call request */
647
+ onCompletionFunctionCall?: Array<EventAction>
648
+ }
649
+ outlets?: {
650
+ /* Context state */
651
+ contextState?: () => Data
652
+ /* Context load progress (0-100) */
653
+ contextLoadProgress?: () => Data
654
+ /* Context details */
655
+ contextDetails?: () => Data
656
+ /* Session details */
657
+ sessions?: () => Data
658
+ /* Is evaluating */
659
+ isEvaluating?: () => Data
660
+ /* Tokenize result */
661
+ tokenizeResult?: () => Data
662
+ /* Detokenize result */
663
+ detokenizeResult?: () => Data
664
+ /* Last formatted prompt (messages or prompt) */
665
+ completionLastFormattedPrompt?: () => Data
666
+ /* Last completion token */
667
+ completionLastToken?: () => Data
668
+ /* Completion result */
669
+ completionResult?: () => Data
670
+ /* Reasoning content from model responses */
671
+ completionReasoningContent?: () => Data
672
+ /* Full context (Prompt + Completion) */
673
+ completionFullContext?: () => Data
674
+ /* Inference result details */
675
+ completionResultDetails?: () => Data
676
+ }
677
+ }
678
+
679
+ /* Local Large Language Model (LLM) inference based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
680
+
681
+ ## Notice
682
+ - The device RAM must be larger than 8GB
683
+ - iOS: Supported GPU acceleration, recommended use M1+ / A17+ chip device
684
+ - macOS: Supported GPU acceleration, recommended use M1+ chip device
685
+ - Android: Currently not supported GPU acceleration (Coming soon), recommended use Android 13+ system
686
+ - Linux / Windows: Supported GPU acceleration, you can choose `vulkan` or `cuda` backend in Accel Variant property */
687
+ export type GeneratorLLM = Generator &
688
+ GeneratorLLMDef & {
689
+ templateKey: 'GENERATOR_LLM'
690
+ switches: Array<
691
+ SwitchDef &
692
+ GeneratorLLMDef & {
693
+ conds?: Array<{
694
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
695
+ cond:
696
+ | SwitchCondInnerStateCurrentCanvas
697
+ | SwitchCondData
698
+ | {
699
+ __typename: 'SwitchCondInnerStateOutlet'
700
+ outlet:
701
+ | 'contextState'
702
+ | 'contextLoadProgress'
703
+ | 'contextDetails'
704
+ | 'sessions'
705
+ | 'isEvaluating'
706
+ | 'tokenizeResult'
707
+ | 'detokenizeResult'
708
+ | 'completionLastFormattedPrompt'
709
+ | 'completionLastToken'
710
+ | 'completionResult'
711
+ | 'completionReasoningContent'
712
+ | 'completionFullContext'
713
+ | 'completionResultDetails'
714
+ value: any
715
+ }
716
+ }>
717
+ }
718
+ >
719
+ }