@fugood/bricks-project 2.22.0-beta.2 → 2.22.0-beta.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -47,7 +47,7 @@ Default property:
47
47
  completed?: Array<EventAction>
48
48
  }
49
49
  outlets?: {
50
- /* Result of each countdown change */
50
+ /* Countdown step value */
51
51
  countdown?: () => Data
52
52
  }
53
53
  }
@@ -182,8 +182,8 @@ export type GeneratorFileActionReadContent = ActionWithParams & {
182
182
  }
183
183
 
184
184
  /* Delete */
185
- export type GeneratorFileActionGeneratorDeleteFile = Action & {
186
- __actionName: 'GENERATOR_DELETE_FILE'
185
+ export type GeneratorFileActionDelete = Action & {
186
+ __actionName: 'GENERATOR_FILE_DELETE'
187
187
  }
188
188
 
189
189
  /* Append (Currently only support text file) */
@@ -691,7 +691,7 @@ Default property:
691
691
  }
692
692
  */
693
693
  property?: {
694
- /* Start query on generator initialized */
694
+ /* Start generator initialization execution immediately */
695
695
  init?: boolean | DataLink
696
696
  /* Data Bank Space ID */
697
697
  spacename?: string | DataLink
@@ -730,7 +730,7 @@ Default property:
730
730
  }
731
731
  }
732
732
 
733
- /* Fetch data & subscribe data change from Data Bank */
733
+ /* Get Data or subscribe to Data changes from Data Bank */
734
734
  export type GeneratorDataBank = Generator &
735
735
  GeneratorDataBankDef & {
736
736
  templateKey: 'GENERATOR_DATA_BANK'
@@ -752,7 +752,7 @@ export type GeneratorDataBank = Generator &
752
752
  >
753
753
  }
754
754
 
755
- /* Run GraphQL query with defined properties */
755
+ /* Execute GraphQL request with defined properties */
756
756
  export type GeneratorGraphQLActionRunQuery = ActionWithParams & {
757
757
  __actionName: 'GENERATOR_GRAPHQL_RUN_QUERY'
758
758
  params?: Array<
@@ -797,7 +797,7 @@ Default property:
797
797
  }
798
798
  */
799
799
  property?: {
800
- /* Start GraphQL query on generator initialized */
800
+ /* Start GraphQL request immediately after generator initialization */
801
801
  init?: boolean | DataLink
802
802
  /* GraphQL request type */
803
803
  type?: 'query' | 'mutation' | 'subscription' | DataLink
@@ -805,9 +805,9 @@ Default property:
805
805
  headers?: {} | DataLink
806
806
  /* HTTP request URL endpoint */
807
807
  endpoint?: string | DataLink
808
- /* Subscrpition endpoint */
808
+ /* Subscription endpoint */
809
809
  endpointForSubscription?: string | DataLink
810
- /* Subscrpition connection params */
810
+ /* Subscription connection params */
811
811
  connectionParams?: {} | DataLink
812
812
  /* Query content */
813
813
  query?: string | DataLink
@@ -831,9 +831,9 @@ Default property:
831
831
  firebaseMessagingSenderId?: string | DataLink
832
832
  }
833
833
  events?: {
834
- /* Event of subscription on connection */
834
+ /* Event triggered when subscription connection is successful */
835
835
  subscriptionOnConnection?: Array<EventAction>
836
- /* Server connections error of GraphQL subscription */
836
+ /* Event triggered when subscription connection error occurs */
837
837
  subscriptionOnConnectionError?: Array<EventAction>
838
838
  }
839
839
  outlets?: {
@@ -973,22 +973,22 @@ export type GeneratorHTTP = Generator &
973
973
  >
974
974
  }
975
975
 
976
- /* Start play sound */
976
+ /* Start playing sound */
977
977
  export type GeneratorSoundPlayerActionPlay = Action & {
978
978
  __actionName: 'GENERATOR_SOUND_PLAYER_PLAY'
979
979
  }
980
980
 
981
- /* Pause play sound */
981
+ /* Pause playing sound */
982
982
  export type GeneratorSoundPlayerActionPause = Action & {
983
983
  __actionName: 'GENERATOR_SOUND_PLAYER_PAUSE'
984
984
  }
985
985
 
986
- /* Resume play sound from pause */
986
+ /* Resume playing sound from pause */
987
987
  export type GeneratorSoundPlayerActionResume = Action & {
988
988
  __actionName: 'GENERATOR_SOUND_PLAYER_RESUME'
989
989
  }
990
990
 
991
- /* Stop play sound */
991
+ /* Stop playing sound */
992
992
  export type GeneratorSoundPlayerActionRelease = Action & {
993
993
  __actionName: 'GENERATOR_SOUND_PLAYER_RELEASE'
994
994
  }
@@ -1002,32 +1002,32 @@ Default property:
1002
1002
  }
1003
1003
  */
1004
1004
  property?: {
1005
- /* The sound file path */
1005
+ /* Sound file path */
1006
1006
  filePath?: string | DataLink
1007
- /* The checksum of file */
1007
+ /* MD5 */
1008
1008
  md5?: string | DataLink
1009
- /* Repeat the sound */
1009
+ /* Repeat playback */
1010
1010
  loop?: boolean | DataLink
1011
- /* The volume of sound (0 - 100) */
1011
+ /* Sound volume (0 - 100) */
1012
1012
  volume?: number | DataLink
1013
1013
  }
1014
1014
  events?: {
1015
- /* Event on sound file loaded */
1015
+ /* Sound file loaded successfully */
1016
1016
  onLoad?: Array<EventAction>
1017
- /* Event on load error */
1017
+ /* Sound file load error */
1018
1018
  onLoadError?: Array<EventAction>
1019
- /* Event on sound play */
1019
+ /* Sound playback complete */
1020
1020
  onPlay?: Array<EventAction>
1021
- /* Event on sound end */
1021
+ /* Sound file playback end */
1022
1022
  onEnd?: Array<EventAction>
1023
1023
  }
1024
1024
  outlets?: {
1025
- /* Is sound playing */
1025
+ /* Whether the sound is playing */
1026
1026
  isPlaying?: () => Data
1027
1027
  }
1028
1028
  }
1029
1029
 
1030
- /* Play sound file from file system, support sound format refer this https://developer.android.com/guide/topics/media/media-formats */
1030
+ /* Play sound, see supported formats at https://developer.android.com/guide/topics/media/media-formats */
1031
1031
  export type GeneratorSoundPlayer = Generator &
1032
1032
  GeneratorSoundPlayerDef & {
1033
1033
  templateKey: 'GENERATOR_SOUND_PLAYER'
@@ -1062,43 +1062,42 @@ Default property:
1062
1062
  }
1063
1063
  */
1064
1064
  property?: {
1065
- /* Enable listening */
1065
+ /* Enable listening for input */
1066
1066
  enabled?: boolean | DataLink
1067
- /* Key map to convert key or key code to specify content (e.g. { 37: 'left' }) */
1067
+ /* Key map to transform key or key code to the designated content (e.g. { 37: 'left' }) */
1068
1068
  keyMap?: {} | DataLink
1069
- /* Key outlet prefer use key code or key.
1070
- Please note that the key code is not supported on iOS / tvOS, so it will use `key` if value is `auto`. */
1069
+ /* Key outlet preference use key code or key. */
1071
1070
  keyOutletPrefer?: 'auto' | 'key-code' | 'key' | DataLink
1072
- /* Stop key or code to finish batch */
1071
+ /* Key or code to finish continuous input */
1073
1072
  batchStopKeys?: Array<string | DataLink | number | DataLink | DataLink> | DataLink
1074
- /* Debounce time to finish batch (ms) */
1073
+ /* Debounce time (ms) to finish continuous input */
1075
1074
  batchDebounce?: number | DataLink
1076
- /* Max wait time to finish batch (ms) (Default: No limit) */
1075
+ /* Maximum wait time (ms) to finish continuous input (default: unlimited) */
1077
1076
  batchDebounceMaxWait?: number | DataLink
1078
1077
  }
1079
1078
  events?: {
1080
- /* Event of key down */
1079
+ /* Event on key press */
1081
1080
  onDown?: Array<EventAction>
1082
- /* Event of key up */
1081
+ /* Event on key up */
1083
1082
  onUp?: Array<EventAction>
1084
- /* Event of batch input finished */
1083
+ /* Event on continuous input complete */
1085
1084
  onBatch?: Array<EventAction>
1086
1085
  }
1087
1086
  outlets?: {
1088
- /* Last key down code */
1087
+ /* Last key code pressed */
1089
1088
  lastKeyDown?: () => Data
1090
- /* Last key down flags */
1089
+ /* Modifier key information on last key press */
1091
1090
  lastKeyDownFlags?: () => Data
1092
- /* Last key up code */
1091
+ /* Last key code released */
1093
1092
  lastKeyUp?: () => Data
1094
- /* Last key up flags */
1093
+ /* Modifier key information on last key release */
1095
1094
  lastKeyUpFlags?: () => Data
1096
- /* Last batch events */
1095
+ /* Last continuous event */
1097
1096
  lastBatchEvents?: () => Data
1098
1097
  }
1099
1098
  }
1100
1099
 
1101
- /* Listening keyboard (controller) event */
1100
+ /* Access keyboard (remote control) events */
1102
1101
  export type GeneratorKeyboard = Generator &
1103
1102
  GeneratorKeyboardDef & {
1104
1103
  templateKey: 'GENERATOR_KEYBOARD'
@@ -1395,22 +1394,22 @@ export type GeneratorStep = Generator &
1395
1394
  >
1396
1395
  }
1397
1396
 
1398
- /* Start the next iterate */
1397
+ /* Proceed to next iteration */
1399
1398
  export type GeneratorIteratorActionNext = Action & {
1400
1399
  __actionName: 'GENERATOR_ITERATOR_NEXT'
1401
1400
  }
1402
1401
 
1403
- /* Back to the previous iterate */
1402
+ /* Go back to previous iteration */
1404
1403
  export type GeneratorIteratorActionPrevious = Action & {
1405
1404
  __actionName: 'GENERATOR_ITERATOR_PREVIOUS'
1406
1405
  }
1407
1406
 
1408
- /* Skip to last iterate element (Ignore loop) */
1407
+ /* Jump to the last iteration element (ignoring the loop setting) */
1409
1408
  export type GeneratorIteratorActionLast = Action & {
1410
1409
  __actionName: 'GENERATOR_ITERATOR_LAST'
1411
1410
  }
1412
1411
 
1413
- /* Reset the iterator state */
1412
+ /* Reset iteration state */
1414
1413
  export type GeneratorIteratorActionReset = Action & {
1415
1414
  __actionName: 'GENERATOR_ITERATOR_RESET'
1416
1415
  }
@@ -1425,40 +1424,40 @@ Default property:
1425
1424
  }
1426
1425
  */
1427
1426
  property?: {
1428
- /* The data source of the value. If it is an array, the value element is used. If it is an object, Object.values() is used as the data. If it is a string, the source character of the value is taken. If it is a number, it represents a count of 1N. */
1427
+ /* Data source for iteration. If it's an Array, it will iterate through elements. If it's an Object, it will use Object.values() as data source. If it's a String, it will iterate through characters. If it's a Number, it represents count from 1 to N. */
1429
1428
  data?: any
1430
- /* Starting element index */
1429
+ /* Starting element position */
1431
1430
  start?: number | DataLink
1432
- /* Iterate step */
1431
+ /* Step size for each iteration */
1433
1432
  step?: number | DataLink
1434
- /* The maximum number of iterations (Set -1 for unlimited) */
1433
+ /* Maximum number of iterations (can be set to -1 for unlimited) */
1435
1434
  maxQuantity?: number | DataLink
1436
- /* Loop iterate */
1435
+ /* Whether to loop the iteration */
1437
1436
  loop?: boolean | DataLink
1438
1437
  }
1439
1438
  events?: {
1440
- /* Event on iterate */
1439
+ /* Event triggered on each iteration */
1441
1440
  iterate?: Array<EventAction>
1442
- /* Event on iterate round start */
1441
+ /* Event triggered on the first iteration of a round */
1443
1442
  first?: Array<EventAction>
1444
- /* Event on iterate round end */
1443
+ /* Event triggered on the last iteration of a round */
1445
1444
  end?: Array<EventAction>
1446
1445
  }
1447
1446
  outlets?: {
1448
- /* Elements that have been iterated (Included current iterated) */
1447
+ /* Elements that have been iterated (including current one) */
1449
1448
  iteratedArray?: () => Data
1450
- /* Elements not yet iterated */
1449
+ /* Elements that will be iterated but have not been iterated yet */
1451
1450
  upcomingArray?: () => Data
1452
- /* Current iterated element */
1451
+ /* Current iteration element */
1453
1452
  value?: () => Data
1454
- /* Current Key of iterated element (number: same as value; array, string: index; object: string) */
1453
+ /* Key of the current iteration element (for number: same as value, for array/string: index, for object: string key) */
1455
1454
  key?: () => Data
1456
- /* The current number of iterations (if data is 6 and step is 2, this value will be returned in order: 1, 2, 3) */
1455
+ /* Current iteration count (if data is 6 and step is 2, this will return: 1,2,3 in sequence) */
1457
1456
  index?: () => Data
1458
1457
  }
1459
1458
  }
1460
1459
 
1461
- /* Iterate values (Array, Object, Number, String) */
1460
+ /* Iterate through values (Array, Object, Number, String) */
1462
1461
  export type GeneratorIterator = Generator &
1463
1462
  GeneratorIteratorDef & {
1464
1463
  templateKey: 'GENERATOR_ITERATOR'
@@ -2065,14 +2064,11 @@ Default property:
2065
2064
  }
2066
2065
  */
2067
2066
  property?: {
2068
- /* Try attach on generator initialized
2069
- On web require user activation to attach device */
2067
+ /* Try attach on generator initialized On web require user activation to attach device */
2070
2068
  attachOnInit?: boolean | DataLink
2071
2069
  /* The serial device driver */
2072
2070
  driver?: 'fd' | 'usb' | DataLink
2073
- /* The serial device path
2074
- e.g. /dev/ttyS0 or /dev/bus/usb/001/001
2075
- For desktop and web is device index number */
2071
+ /* The serial device path e.g. /dev/ttyS0 or /dev/bus/usb/001/001 For desktop and web is device index number */
2076
2072
  path?: string | DataLink
2077
2073
  /* The serial USB vendor id (autoconnect first) */
2078
2074
  vendorId?: number | DataLink
@@ -4307,6 +4303,521 @@ export type GeneratorSqlite = Generator &
4307
4303
  >
4308
4304
  }
4309
4305
 
4306
+ /* Refresh tools and resources, used for case if tools or resources are changed. Note that the current connections will be closed. */
4307
+ export type GeneratorMCPServerActionRefreshResources = Action & {
4308
+ __actionName: 'GENERATOR_MCP_SERVER_REFRESH_RESOURCES'
4309
+ }
4310
+
4311
+ interface GeneratorMCPServerDef {
4312
+ /*
4313
+ Default property:
4314
+ {
4315
+ "enabled": true,
4316
+ "listening": true,
4317
+ "authType": "none",
4318
+ "name": "bricks-foundation-mcp-server-default",
4319
+ "version": "1.0.0",
4320
+ "resources": [],
4321
+ "tools": [],
4322
+ "prompts": []
4323
+ }
4324
+ */
4325
+ property?: {
4326
+ /* Enable MCP server. If enabled and Listening is false, the generator can still provide application-scoped resources. */
4327
+ enabled?: boolean | DataLink
4328
+ /* Application-scoped generator key, key cannot be the same with other application-scoped generators */
4329
+ globalGeneratorKey?: string | DataLink
4330
+ /* Start MCP server */
4331
+ listening?: boolean | DataLink
4332
+ /* HTTP server port */
4333
+ port?: number | DataLink
4334
+ /* Authorization type of HTTP request */
4335
+ authType?: 'none' | 'bearer' | DataLink
4336
+ /* Token of bearer auth */
4337
+ bearerToken?: string | DataLink
4338
+ /* Name of the MCP server */
4339
+ name?: string | DataLink
4340
+ /* Version of the MCP server */
4341
+ version?: string | DataLink
4342
+ /* Resources
4343
+ Type:
4344
+ `static`: Return static data
4345
+ `detect-data-change`: Watch data target change to return data,
4346
+ please update data with ({ id: string, content: string | object }),
4347
+ and ensure the id is same with request id
4348
+ `script`: Run a JavaScript code to return data
4349
+ - Script can define members to call generator functions
4350
+ - Script support async/await */
4351
+ resources?:
4352
+ | Array<
4353
+ | DataLink
4354
+ | {
4355
+ enabled?: boolean | DataLink
4356
+ name?: string | DataLink
4357
+ description?: string | DataLink
4358
+ uriOrTemplate?: string | DataLink
4359
+ type?: 'static' | 'detect-data-change' | 'script' | DataLink
4360
+ staticData?: any
4361
+ dataChangeConfig?:
4362
+ | DataLink
4363
+ | {
4364
+ target?: string | DataLink
4365
+ timeout?: number | DataLink
4366
+ additionalParams?: {} | DataLink
4367
+ }
4368
+ scriptConfig?:
4369
+ | DataLink
4370
+ | {
4371
+ code?: string | DataLink
4372
+ timeout?: number | DataLink
4373
+ members?:
4374
+ | Array<
4375
+ | DataLink
4376
+ | {
4377
+ handler?: string | DataLink
4378
+ varName?: string | DataLink
4379
+ }
4380
+ >
4381
+ | DataLink
4382
+ additionalParams?: {} | DataLink
4383
+ }
4384
+ }
4385
+ >
4386
+ | DataLink
4387
+ /* Tools
4388
+ Type:
4389
+ `detect-data-change`: Watch data target change to return data,
4390
+ please update data with ({ id: string, content: string | object }),
4391
+ and ensure the id is same with request id.
4392
+ `script`: Run a JavaScript code to return data
4393
+ - Script can define members to call generator functions
4394
+ - Script support async/await */
4395
+ tools?:
4396
+ | Array<
4397
+ | DataLink
4398
+ | {
4399
+ enabled?: boolean | DataLink
4400
+ name?: string | DataLink
4401
+ description?: string | DataLink
4402
+ params?: {} | DataLink
4403
+ type?: 'detect-data-change' | 'script' | DataLink
4404
+ dataChangeConfig?:
4405
+ | DataLink
4406
+ | {
4407
+ target?: string | DataLink
4408
+ timeout?: number | DataLink
4409
+ additionalParams?: {} | DataLink
4410
+ }
4411
+ scriptConfig?:
4412
+ | DataLink
4413
+ | {
4414
+ code?: string | DataLink
4415
+ timeout?: number | DataLink
4416
+ members?:
4417
+ | Array<
4418
+ | DataLink
4419
+ | {
4420
+ handler?: string | DataLink
4421
+ varName?: string | DataLink
4422
+ }
4423
+ >
4424
+ | DataLink
4425
+ additionalParams?: {} | DataLink
4426
+ }
4427
+ }
4428
+ >
4429
+ | DataLink
4430
+ /* Prompts
4431
+ Type:
4432
+ `static`: Return static data
4433
+ `detect-data-change`: Watch data target change to return data,
4434
+ please update data with ({ id: string, content: string | object }),
4435
+ and ensure the id is same with request id
4436
+ `script`: Run a JavaScript code to return data
4437
+ - Script can define members to call generator functions
4438
+ - Script support async/await */
4439
+ prompts?:
4440
+ | Array<
4441
+ | DataLink
4442
+ | {
4443
+ enabled?: boolean | DataLink
4444
+ name?: string | DataLink
4445
+ description?: string | DataLink
4446
+ arguments?: {} | DataLink
4447
+ type?: 'static' | 'detect-data-change' | 'script' | DataLink
4448
+ staticData?: any
4449
+ dataChangeConfig?:
4450
+ | DataLink
4451
+ | {
4452
+ target?: string | DataLink
4453
+ timeout?: number | DataLink
4454
+ additionalParams?: {} | DataLink
4455
+ }
4456
+ scriptConfig?:
4457
+ | DataLink
4458
+ | {
4459
+ code?: string | DataLink
4460
+ timeout?: number | DataLink
4461
+ members?:
4462
+ | Array<
4463
+ | DataLink
4464
+ | {
4465
+ handler?: string | DataLink
4466
+ varName?: string | DataLink
4467
+ }
4468
+ >
4469
+ | DataLink
4470
+ additionalParams?: {} | DataLink
4471
+ }
4472
+ }
4473
+ >
4474
+ | DataLink
4475
+ }
4476
+ events?: {
4477
+ /* Listening of HTTP server */
4478
+ onListening?: Array<EventAction>
4479
+ /* Error of HTTP server */
4480
+ onError?: Array<EventAction>
4481
+ /* Client error of HTTP server */
4482
+ onClientError?: Array<EventAction>
4483
+ /* Client close of HTTP server */
4484
+ onClientClose?: Array<EventAction>
4485
+ /* On request resource (Request: { name: string, uri: string, params: object }) */
4486
+ onRequestResource?: Array<EventAction>
4487
+ /* On call tool (Request: { name: string, params: object }) */
4488
+ onCallTool?: Array<EventAction>
4489
+ /* On get prompt (Request: { name: string, arguments: object }) */
4490
+ onGetPrompt?: Array<EventAction>
4491
+ }
4492
+ outlets?: {
4493
+ /* Whether the HTTP server is listening */
4494
+ isListening?: () => Data
4495
+ /* Last error of HTTP server */
4496
+ lastError?: () => Data
4497
+ /* MCP server endpoint URL */
4498
+ endpoint?: () => Data
4499
+ /* Connected remotes (Session ID) */
4500
+ connectedRemotes?: () => Data
4501
+ /* Last resource request ({ name: string, uri: string, params: object }) */
4502
+ lastResourceRequest?: () => Data
4503
+ /* Last tool call ({ name: string, params: object }) */
4504
+ lastToolCall?: () => Data
4505
+ /* Last prompt get ({ name: string, arguments: object }) */
4506
+ lastPromptGet?: () => Data
4507
+ }
4508
+ }
4509
+
4510
+ /* Model Context Protocol (MCP) Server (https://docs.anthropic.com/en/docs/agents-and-tools/mcp) */
4511
+ export type GeneratorMCPServer = Generator &
4512
+ GeneratorMCPServerDef & {
4513
+ templateKey: 'GENERATOR_MCP_SERVER'
4514
+ switches: Array<
4515
+ SwitchDef &
4516
+ GeneratorMCPServerDef & {
4517
+ conds?: Array<{
4518
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
4519
+ cond:
4520
+ | SwitchCondInnerStateCurrentCanvas
4521
+ | SwitchCondData
4522
+ | {
4523
+ __typename: 'SwitchCondInnerStateOutlet'
4524
+ outlet:
4525
+ | 'isListening'
4526
+ | 'lastError'
4527
+ | 'endpoint'
4528
+ | 'connectedRemotes'
4529
+ | 'lastResourceRequest'
4530
+ | 'lastToolCall'
4531
+ | 'lastPromptGet'
4532
+ value: any
4533
+ }
4534
+ }>
4535
+ }
4536
+ >
4537
+ }
4538
+
4539
+ /* Connect to MCP server */
4540
+ export type GeneratorMCPActionConnect = Action & {
4541
+ __actionName: 'GENERATOR_MCP_CONNECT'
4542
+ }
4543
+
4544
+ /* Disconnect from MCP server */
4545
+ export type GeneratorMCPActionDisconnect = Action & {
4546
+ __actionName: 'GENERATOR_MCP_DISCONNECT'
4547
+ }
4548
+
4549
+ /* List resources */
4550
+ export type GeneratorMCPActionListResources = ActionWithParams & {
4551
+ __actionName: 'GENERATOR_MCP_LIST_RESOURCES'
4552
+ params?: Array<{
4553
+ input: 'requestId'
4554
+ value?: string | DataLink | EventProperty
4555
+ mapping?: string
4556
+ }>
4557
+ }
4558
+
4559
+ /* List resource templates */
4560
+ export type GeneratorMCPActionListResourceTemplates = ActionWithParams & {
4561
+ __actionName: 'GENERATOR_MCP_LIST_RESOURCE_TEMPLATES'
4562
+ params?: Array<{
4563
+ input: 'requestId'
4564
+ value?: string | DataLink | EventProperty
4565
+ mapping?: string
4566
+ }>
4567
+ }
4568
+
4569
+ /* Read resource */
4570
+ export type GeneratorMCPActionReadResource = ActionWithParams & {
4571
+ __actionName: 'GENERATOR_MCP_READ_RESOURCE'
4572
+ params?: Array<
4573
+ | {
4574
+ input: 'requestId'
4575
+ value?: string | DataLink | EventProperty
4576
+ mapping?: string
4577
+ }
4578
+ | {
4579
+ input: 'uri'
4580
+ value?: string | DataLink | EventProperty
4581
+ mapping?: string
4582
+ }
4583
+ | {
4584
+ input: 'variables'
4585
+ value?: {} | DataLink | EventProperty
4586
+ mapping?: string
4587
+ }
4588
+ >
4589
+ }
4590
+
4591
+ /* List tools */
4592
+ export type GeneratorMCPActionListTools = ActionWithParams & {
4593
+ __actionName: 'GENERATOR_MCP_LIST_TOOLS'
4594
+ params?: Array<{
4595
+ input: 'requestId'
4596
+ value?: string | DataLink | EventProperty
4597
+ mapping?: string
4598
+ }>
4599
+ }
4600
+
4601
+ /* Call tool */
4602
+ export type GeneratorMCPActionCallTool = ActionWithParams & {
4603
+ __actionName: 'GENERATOR_MCP_CALL_TOOL'
4604
+ params?: Array<
4605
+ | {
4606
+ input: 'requestId'
4607
+ value?: string | DataLink | EventProperty
4608
+ mapping?: string
4609
+ }
4610
+ | {
4611
+ input: 'name'
4612
+ value?: string | DataLink | EventProperty
4613
+ mapping?: string
4614
+ }
4615
+ | {
4616
+ input: 'variables'
4617
+ value?: {} | DataLink | EventProperty
4618
+ mapping?: string
4619
+ }
4620
+ >
4621
+ }
4622
+
4623
+ /* List prompts */
4624
+ export type GeneratorMCPActionListPrompts = ActionWithParams & {
4625
+ __actionName: 'GENERATOR_MCP_LIST_PROMPTS'
4626
+ params?: Array<{
4627
+ input: 'requestId'
4628
+ value?: string | DataLink | EventProperty
4629
+ mapping?: string
4630
+ }>
4631
+ }
4632
+
4633
+ /* Request prompt */
4634
+ export type GeneratorMCPActionGetPrompt = ActionWithParams & {
4635
+ __actionName: 'GENERATOR_MCP_GET_PROMPT'
4636
+ params?: Array<
4637
+ | {
4638
+ input: 'requestId'
4639
+ value?: string | DataLink | EventProperty
4640
+ mapping?: string
4641
+ }
4642
+ | {
4643
+ input: 'name'
4644
+ value?: string | DataLink | EventProperty
4645
+ mapping?: string
4646
+ }
4647
+ | {
4648
+ input: 'variables'
4649
+ value?: {} | DataLink | EventProperty
4650
+ mapping?: string
4651
+ }
4652
+ >
4653
+ }
4654
+
4655
+ interface GeneratorMCPDef {
4656
+ /*
4657
+ Default property:
4658
+ {
4659
+ "init": false,
4660
+ "type": "streamable-http",
4661
+ "url": "",
4662
+ "autoReconnect": true,
4663
+ "maxReconnectAttempts": 10,
4664
+ "reconnectInterval": 1000,
4665
+ "generatorId": "",
4666
+ "generatorKey": "",
4667
+ "name": "bricks-foundation-mcp-client-default",
4668
+ "version": "1.0.0",
4669
+ "ignoreResourceInList": [],
4670
+ "ignoreToolInList": [],
4671
+ "ignorePromptInList": [],
4672
+ "requestTimeout": 60000
4673
+ }
4674
+ */
4675
+ property?: {
4676
+ /* Initialize the MCP client on start */
4677
+ init?: boolean | DataLink
4678
+ /* Type of the MCP connection, e.g. sse or direct-link (generator) */
4679
+ type?: 'streamable-http' | 'sse' | 'direct-link' | DataLink
4680
+ /* URL of the MCP server, e.g. http://localhost:19853/sse */
4681
+ url?: string | DataLink
4682
+ /* Whether to automatically reconnect to the MCP server */
4683
+ autoReconnect?: boolean | DataLink
4684
+ /* Maximum number of reconnection attempts */
4685
+ maxReconnectAttempts?: number | DataLink
4686
+ /* Reconnection interval in milliseconds */
4687
+ reconnectInterval?: number | DataLink
4688
+ /* SSE connection headers */
4689
+ sseHeaders?: {} | DataLink
4690
+ /* Send request headers */
4691
+ sendHeaders?: {} | DataLink
4692
+ /* Bearer token for authentication */
4693
+ bearerToken?: string | DataLink
4694
+ /* Generator MCP Server ID for direct link */
4695
+ generatorId?: string | DataLink
4696
+ /* Application-scoped key of Generator MCP Server for direct link (If ID is not provided) */
4697
+ generatorKey?: string | DataLink
4698
+ /* Name of the MCP client */
4699
+ name?: string | DataLink
4700
+ /* Version of the MCP client */
4701
+ version?: string | DataLink
4702
+ /* Ignore resources in list response */
4703
+ ignoreResourceInList?: Array<string | DataLink> | DataLink
4704
+ /* Ignore tools in list response */
4705
+ ignoreToolInList?: Array<string | DataLink> | DataLink
4706
+ /* Ignore prompts in list response */
4707
+ ignorePromptInList?: Array<string | DataLink> | DataLink
4708
+ /* Request timeout in milliseconds */
4709
+ requestTimeout?: number | DataLink
4710
+ }
4711
+ events?: {
4712
+ /* On connected */
4713
+ onConnected?: Array<EventAction>
4714
+ /* On connection error */
4715
+ onConnectionError?: Array<EventAction>
4716
+ /* On disconnected */
4717
+ onDisconnected?: Array<EventAction>
4718
+ /* On list resources */
4719
+ onListResources?: Array<EventAction>
4720
+ /* On list resources error */
4721
+ onListResourcesError?: Array<EventAction>
4722
+ /* On list resource templates */
4723
+ onListResourceTemplates?: Array<EventAction>
4724
+ /* On list resource templates error */
4725
+ onListResourceTemplatesError?: Array<EventAction>
4726
+ /* On read resource */
4727
+ onReadResource?: Array<EventAction>
4728
+ /* On read resource error */
4729
+ onReadResourceError?: Array<EventAction>
4730
+ /* On list tools */
4731
+ onListTools?: Array<EventAction>
4732
+ /* On list tools error */
4733
+ onListToolsError?: Array<EventAction>
4734
+ /* On call tool */
4735
+ onCallTool?: Array<EventAction>
4736
+ /* On call tool error */
4737
+ onCallToolError?: Array<EventAction>
4738
+ /* On list prompts */
4739
+ onListPrompts?: Array<EventAction>
4740
+ /* On list prompts error */
4741
+ onListPromptsError?: Array<EventAction>
4742
+ /* On get prompt */
4743
+ onGetPrompt?: Array<EventAction>
4744
+ /* On get prompt error */
4745
+ onGetPromptError?: Array<EventAction>
4746
+ }
4747
+ outlets?: {
4748
+ /* Connection state */
4749
+ connectionState?: () => Data
4750
+ /* List resources response */
4751
+ listResourcesResponse?: () => Data
4752
+ /* List resources error */
4753
+ listResourcesError?: () => Data
4754
+ /* List resource templates response */
4755
+ listResourceTemplatesResponse?: () => Data
4756
+ /* List resource templates error */
4757
+ listResourceTemplatesError?: () => Data
4758
+ /* Read resource response */
4759
+ readResourceResponse?: () => Data
4760
+ /* Read resource error */
4761
+ readResourceError?: () => Data
4762
+ /* List tools response */
4763
+ listToolsResponse?: () => Data
4764
+ /* List tools error */
4765
+ listToolsError?: () => Data
4766
+ /* Call tool response */
4767
+ callToolResponse?: () => Data
4768
+ /* Call tool error */
4769
+ callToolError?: () => Data
4770
+ /* List prompts response */
4771
+ listPromptsResponse?: () => Data
4772
+ /* List prompts error */
4773
+ listPromptsError?: () => Data
4774
+ /* Get prompt response */
4775
+ getPromptResponse?: () => Data
4776
+ /* Request prompt error */
4777
+ getPromptError?: () => Data
4778
+ /* Last error */
4779
+ lastError?: () => Data
4780
+ }
4781
+ }
4782
+
4783
+ /* Model Context Protocol (MCP) Client, support SSE and Generator MCPServer direct link */
4784
+ export type GeneratorMCP = Generator &
4785
+ GeneratorMCPDef & {
4786
+ templateKey: 'GENERATOR_MCP'
4787
+ switches: Array<
4788
+ SwitchDef &
4789
+ GeneratorMCPDef & {
4790
+ conds?: Array<{
4791
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
4792
+ cond:
4793
+ | SwitchCondInnerStateCurrentCanvas
4794
+ | SwitchCondData
4795
+ | {
4796
+ __typename: 'SwitchCondInnerStateOutlet'
4797
+ outlet:
4798
+ | 'connectionState'
4799
+ | 'listResourcesResponse'
4800
+ | 'listResourcesError'
4801
+ | 'listResourceTemplatesResponse'
4802
+ | 'listResourceTemplatesError'
4803
+ | 'readResourceResponse'
4804
+ | 'readResourceError'
4805
+ | 'listToolsResponse'
4806
+ | 'listToolsError'
4807
+ | 'callToolResponse'
4808
+ | 'callToolError'
4809
+ | 'listPromptsResponse'
4810
+ | 'listPromptsError'
4811
+ | 'getPromptResponse'
4812
+ | 'getPromptError'
4813
+ | 'lastError'
4814
+ value: any
4815
+ }
4816
+ }>
4817
+ }
4818
+ >
4819
+ }
4820
+
4310
4821
  /* Load the model */
4311
4822
  export type GeneratorTTSActionLoadModel = Action & {
4312
4823
  __actionName: 'GENERATOR_TTS_LOAD_MODEL'
@@ -4339,8 +4850,13 @@ Default property:
4339
4850
  "model": "BricksDisplay/vits-eng",
4340
4851
  "modelType": "auto",
4341
4852
  "vocoderModel": "speecht5_hifigan",
4853
+ "maxLength": 4096,
4854
+ "temperature": 0.1,
4855
+ "repetitionPenalty": 1.1,
4856
+ "doSample": true,
4342
4857
  "outputType": "play",
4343
4858
  "cacheGenerated": true,
4859
+ "speed": 1,
4344
4860
  "autoInferEnable": false,
4345
4861
  "softBreakRegex": "^[^\\r\\n\\t\\f\\v]*([\\r\\n]+|[。!?!?.]\\B)",
4346
4862
  "hardBreakTime": 500,
@@ -4352,29 +4868,9 @@ Default property:
4352
4868
  init?: boolean | DataLink
4353
4869
  /* TTS model
4354
4870
  The mms-tts models are licensed under CC-BY-NC-4.0 */
4355
- model?:
4356
- | 'Custom'
4357
- | 'BricksDisplay/vits-eng'
4358
- | 'BricksDisplay/vits-cmn'
4359
- | 'BricksDisplay/ellie-Bert-VITS2'
4360
- | 'mms-tts-ara (NC)'
4361
- | 'mms-tts-deu (NC)'
4362
- | 'mms-tts-eng (NC)'
4363
- | 'mms-tts-fra (NC)'
4364
- | 'mms-tts-hin (NC)'
4365
- | 'mms-tts-kor (NC)'
4366
- | 'mms-tts-por (NC)'
4367
- | 'mms-tts-ron (NC)'
4368
- | 'mms-tts-rus (NC)'
4369
- | 'mms-tts-spa (NC)'
4370
- | 'mms-tts-vie (NC)'
4371
- | 'mms-tts-yor (NC)'
4372
- | 'speecht5_tts'
4373
- | DataLink
4871
+ model?: string | DataLink
4374
4872
  /* Model type */
4375
- modelType?: 'auto' | 'vits' | 'bert_vits2' | 'speecht5' | DataLink
4376
- /* Load quantized model (deprecated, use `quantizeType` instead) */
4377
- quantized?: boolean | DataLink
4873
+ modelType?: string | DataLink
4378
4874
  /* Quantize type */
4379
4875
  quantizeType?:
4380
4876
  | 'auto'
@@ -4387,22 +4883,33 @@ Default property:
4387
4883
  | 'bnb4'
4388
4884
  | 'q4f16'
4389
4885
  | DataLink
4390
- /* Custom model name
4391
- Choose model from https://huggingface.co/models?pipeline_tag=text-to-audio&library=transformers.js */
4392
- customModel?: string | DataLink
4393
4886
  /* Vocoder model for SpeechT5 */
4394
4887
  vocoderModel?: 'Custom' | 'speecht5_hifigan' | DataLink
4395
4888
  /* Custom vocoder model
4396
4889
  Choose model from https://huggingface.co/models?library=transformers.js&other=hifigan */
4397
4890
  customVocoderModel?: string | DataLink
4398
- /* XVector speaker embedding for HiFi-GAN */
4891
+ /* Speaker embedding, for SpeechT5 or StyleTTS (Kokoro) */
4399
4892
  speakerEmbedUrl?: string | DataLink
4400
- /* MD5 checksum of `speakerEmbedUrl` */
4401
- speakerEmbedMd5?: string | DataLink
4893
+ /* Hash of `speakerEmbedUrl` */
4894
+ speakerEmbedHash?: string | DataLink
4895
+ /* Hash type of `speakerEmbedUrl` */
4896
+ speakerEmbedHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
4897
+ /* Speaker config, for OuteTTS model */
4898
+ speakerConfig?: {} | DataLink
4899
+ /* Audio token generation max length */
4900
+ maxLength?: number | DataLink
4901
+ /* Audio token generation temperature */
4902
+ temperature?: number | DataLink
4903
+ /* Audio token generation repetition penalty */
4904
+ repetitionPenalty?: number | DataLink
4905
+ /* Use greedy sampling for audio token generation */
4906
+ doSample?: boolean | DataLink
4402
4907
  /* Output mode */
4403
4908
  outputType?: 'play' | 'file' | DataLink
4404
4909
  /* Enable cache for generated audio */
4405
4910
  cacheGenerated?: boolean | DataLink
4911
+ /* Speed of the generated audio, for StyleTTS (Kokoro) */
4912
+ speed?: number | DataLink
4406
4913
  /* Text to generate */
4407
4914
  prompt?: string | DataLink
4408
4915
  /* Auto inference when prompt changes */
@@ -4484,6 +4991,21 @@ export type GeneratorOnnxLLMActionInfer = ActionWithParams & {
4484
4991
  value?: Array<any> | DataLink | EventProperty
4485
4992
  mapping?: string
4486
4993
  }
4994
+ | {
4995
+ input: 'images'
4996
+ value?: Array<any> | DataLink | EventProperty
4997
+ mapping?: string
4998
+ }
4999
+ | {
5000
+ input: 'tools'
5001
+ value?: Array<any> | DataLink | EventProperty
5002
+ mapping?: string
5003
+ }
5004
+ | {
5005
+ input: 'toolChoice'
5006
+ value?: string | DataLink | EventProperty
5007
+ mapping?: string
5008
+ }
4487
5009
  >
4488
5010
  }
4489
5011
 
@@ -4501,8 +5023,9 @@ interface GeneratorOnnxLLMDef {
4501
5023
  /*
4502
5024
  Default property:
4503
5025
  {
4504
- "model": "BricksDisplay/phi-1_5-q4",
4505
5026
  "modelType": "auto",
5027
+ "toolCallParser": "llama3_json",
5028
+ "toolChoice": "auto",
4506
5029
  "maxNewTokens": 256,
4507
5030
  "temperature": 0.7,
4508
5031
  "topK": 50,
@@ -4518,70 +5041,9 @@ Default property:
4518
5041
  /* Initialize the TTS context on generator initialization */
4519
5042
  init?: boolean | DataLink
4520
5043
  /* LLM model */
4521
- model?:
4522
- | 'Custom'
4523
- | 'BricksDisplay/phi-1_5'
4524
- | 'BricksDisplay/phi-1_5-q4'
4525
- | 'Qwen1.5-0.5B'
4526
- | 'Qwen1.5-1.8B'
4527
- | 'Qwen1.5-0.5B-Chat'
4528
- | 'Qwen1.5-1.8B-Chat'
4529
- | 'stablelm-2-1_6b'
4530
- | 'BricksDisplay/stablelm-2-1_6b-q4'
4531
- | 'stablelm-2-zephyr-1_6b'
4532
- | 'BricksDisplay/stablelm-2-zephyr-1_6b-q4'
4533
- | 'BricksDisplay/Llama-2-7b-chat-q4'
4534
- | 'TinyLLama-v0'
4535
- | 'TinyLlama-1.1B-Chat-v1.0'
4536
- | 'BricksDisplay/TinyLlama-1.1B-Chat-v1.0-q4'
4537
- | 'llama-160m'
4538
- | 'llama-68m'
4539
- | 'BricksDisplay/Yi-6B-q4'
4540
- | 'BricksDisplay/Yi-6B-Chat-q4'
4541
- | 'BricksDisplay/Mistral-7B-v0.1-q4'
4542
- | 'BricksDisplay/Mistral-7B-Instruct-v0.2-q4'
4543
- | 'BricksDisplay/Breeze-7B-Base-v1_0-q4'
4544
- | 'BricksDisplay/Breeze-7B-Instruct-v1_0-q4'
4545
- | 'gpt2'
4546
- | 'distilgpt2'
4547
- | 'gpt-neo-125M'
4548
- | 'opt-125m'
4549
- | 'opt-350m'
4550
- | 'bloom-560m'
4551
- | 'bloomz-560m'
4552
- | 't5-small'
4553
- | 't5-base'
4554
- | 'flan-t5-small'
4555
- | 'flan-t5-base'
4556
- | 'mt5-small'
4557
- | 'mt5-base'
4558
- | 'long-t5-lobal-base'
4559
- | 'long-t5-tglobal-base'
4560
- | DataLink
5044
+ model?: string | DataLink
4561
5045
  /* Model type */
4562
- modelType?:
4563
- | 'auto'
4564
- | 'gpt2'
4565
- | 'gptj'
4566
- | 'gpt_bigcode'
4567
- | 'gpt_neo'
4568
- | 'gpt_neox'
4569
- | 'bloom'
4570
- | 'mpt'
4571
- | 'opt'
4572
- | 'llama'
4573
- | 'falcon'
4574
- | 'mistral'
4575
- | 't5'
4576
- | 'mt5'
4577
- | 'longt5'
4578
- | 'phi'
4579
- | 'qwen2'
4580
- | 'stablelm'
4581
- | 'gemma'
4582
- | DataLink
4583
- /* Load quantized model (deprecated, use `quantizeType` instead) */
4584
- quantized?: boolean | DataLink
5046
+ modelType?: string | DataLink
4585
5047
  /* Quantize type */
4586
5048
  quantizeType?:
4587
5049
  | 'auto'
@@ -4594,10 +5056,20 @@ Default property:
4594
5056
  | 'bnb4'
4595
5057
  | 'q4f16'
4596
5058
  | DataLink
4597
- /* Custom model name
4598
- Choose model from https://huggingface.co/models?pipeline_tag=text2text-generation&library=transformers.js
4599
- or https://huggingface.co/models?pipeline_tag=text-generation&library=transformers.js&sort=trending */
4600
- customModel?: string | DataLink
5059
+ /* Prompt to inference */
5060
+ prompt?: string | DataLink
5061
+ /* Messages to inference */
5062
+ messages?: Array<DataLink | {}> | DataLink
5063
+ /* Images with message to inference */
5064
+ images?: Array<string | DataLink> | DataLink
5065
+ /* Tool call parser */
5066
+ toolCallParser?: 'llama3_json' | 'mistral' | 'hermes' | 'internlm' | 'phi4' | DataLink
5067
+ /* Tools for chat mode using OpenAI-compatible function calling format
5068
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
5069
+ See: https://platform.openai.com/docs/guides/function-calling */
5070
+ tools?: Array<{} | DataLink> | DataLink
5071
+ /* Tool choice for chat mode */
5072
+ toolChoice?: 'none' | 'auto' | DataLink
4601
5073
  /* Max new tokens to generate */
4602
5074
  maxNewTokens?: number | DataLink
4603
5075
  /* Temperature */
@@ -4632,6 +5104,10 @@ Default property:
4632
5104
  events?: {
4633
5105
  /* Event triggered when state change */
4634
5106
  onContextStateChange?: Array<EventAction>
5107
+ /* Event triggered on get function call request */
5108
+ onFunctionCall?: Array<EventAction>
5109
+ /* Event triggered on completion finished */
5110
+ onCompletionFinished?: Array<EventAction>
4635
5111
  /* Event triggered when error occurs */
4636
5112
  onError?: Array<EventAction>
4637
5113
  }
@@ -4642,6 +5118,8 @@ Default property:
4642
5118
  generated?: () => Data
4643
5119
  /* Full result of generation */
4644
5120
  fullResult?: () => Data
5121
+ /* Last function call */
5122
+ lastFunctionCall?: () => Data
4645
5123
  }
4646
5124
  }
4647
5125
 
@@ -4660,7 +5138,7 @@ export type GeneratorOnnxLLM = Generator &
4660
5138
  | SwitchCondData
4661
5139
  | {
4662
5140
  __typename: 'SwitchCondInnerStateOutlet'
4663
- outlet: 'contextState' | 'generated' | 'fullResult'
5141
+ outlet: 'contextState' | 'generated' | 'fullResult' | 'lastFunctionCall'
4664
5142
  value: any
4665
5143
  }
4666
5144
  }>
@@ -4707,27 +5185,9 @@ Default property:
4707
5185
  /* Initialize the TTS context on generator initialization */
4708
5186
  init?: boolean | DataLink
4709
5187
  /* STT model */
4710
- model?:
4711
- | 'Custom'
4712
- | 'whisper-tiny'
4713
- | 'whisper-tiny.en'
4714
- | 'whisper-small'
4715
- | 'whisper-small.en'
4716
- | 'whisper-base'
4717
- | 'whisper-base.en'
4718
- | 'whisper-medium'
4719
- | 'whisper-medium.en'
4720
- | 'whisper-large'
4721
- | 'whisper-large-v2'
4722
- | 'whisper-large-v3'
4723
- | 'mms-1b-all'
4724
- | 'mms-1b-fl102'
4725
- | 'mms-1b-l1107'
4726
- | DataLink
5188
+ model?: string | DataLink
4727
5189
  /* Model type */
4728
- modelType?: 'auto' | 'whisper' | 'hubert' | 'wav2vec2' | 'wav2vec2-bert' | DataLink
4729
- /* Load quantized model (deprecated, use `quantizeType` instead) */
4730
- quantized?: boolean | DataLink
5190
+ modelType?: string | DataLink
4731
5191
  /* Quantize type */
4732
5192
  quantizeType?:
4733
5193
  | 'auto'
@@ -4740,9 +5200,6 @@ Default property:
4740
5200
  | 'bnb4'
4741
5201
  | 'q4f16'
4742
5202
  | DataLink
4743
- /* Custom model name
4744
- Choose model from https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&library=transformers.js */
4745
- customModel?: string | DataLink
4746
5203
  /* Return timestamps */
4747
5204
  returnTimestamps?: 'none' | 'enable' | 'word' | DataLink
4748
5205
  /* Transcription language
@@ -4975,7 +5432,7 @@ export type GeneratorSpeechInferenceActionTranscribeData = ActionWithParams & {
4975
5432
  >
4976
5433
  }
4977
5434
 
4978
- /* Transcribe microphone audio source */
5435
+ /* [Deprecated] Transcribe microphone audio source */
4979
5436
  export type GeneratorSpeechInferenceActionTranscribeRealtime = ActionWithParams & {
4980
5437
  __actionName: 'GENERATOR_SPEECH_INFERENCE_TRANSCRIBE_REALTIME'
4981
5438
  params?: Array<
@@ -5042,7 +5499,7 @@ export type GeneratorSpeechInferenceActionTranscribeRealtime = ActionWithParams
5042
5499
  >
5043
5500
  }
5044
5501
 
5045
- /* Stop transcribing microphone audio source */
5502
+ /* [Deprecated] Stop transcribing microphone audio source */
5046
5503
  export type GeneratorSpeechInferenceActionTranscribeRealtimeStop = Action & {
5047
5504
  __actionName: 'GENERATOR_SPEECH_INFERENCE_TRANSCRIBE_REALTIME_STOP'
5048
5505
  }
@@ -5062,6 +5519,7 @@ interface GeneratorSpeechInferenceDef {
5062
5519
  Default property:
5063
5520
  {
5064
5521
  "init": false,
5522
+ "accelVariant": "default",
5065
5523
  "modelName": "base-q8_0",
5066
5524
  "modelUseCoreML": false,
5067
5525
  "modelUseGPU": true,
@@ -5080,6 +5538,11 @@ Default property:
5080
5538
  /* Initialize the Whisper context on generator initialization
5081
5539
  Please note that it will take some RAM depending on the model size */
5082
5540
  init?: boolean | DataLink
5541
+ /* Accelerator variant (Only for desktop)
5542
+ `default` - CPU / Metal (macOS)
5543
+ `vulkan` - Use Vulkan
5544
+ `cuda` - Use CUDA */
5545
+ accelVariant?: 'default' | 'vulkan' | 'cuda' | DataLink
5083
5546
  /* Use model name, the model download progress will be done in preload stage or the generator initialization stage.
5084
5547
  We used `ggml` format model, please refer to https://huggingface.co/BricksDisplay/whisper-ggml
5085
5548
  You can also choose `custom` option and set `Model URL` and `Model MD5` to use your own model */
@@ -5290,7 +5753,7 @@ Default property:
5290
5753
  inferRealtimeVadFreqThold?: number | DataLink
5291
5754
  }
5292
5755
  events?: {
5293
- /* Event triggered when load is done */
5756
+ /* Event triggered when context state changes */
5294
5757
  onContextStateChange?: Array<EventAction>
5295
5758
  /* Event triggered when error occurs */
5296
5759
  onError?: Array<EventAction>
@@ -5317,7 +5780,13 @@ Default property:
5317
5780
  }
5318
5781
  }
5319
5782
 
5320
- /* Local Speech-to-Text (STT) inference based on GGML and [whisper.cpp](https://github.com/ggerganov/whisper.cpp) */
5783
+ /* Local Speech-to-Text (STT) inference based on GGML and [whisper.cpp](https://github.com/ggerganov/whisper.cpp)
5784
+
5785
+ ## Notice
5786
+ - iOS: Supported GPU acceleration, recommended use M1+ / A17+ chip device
5787
+ - macOS: Supported GPU acceleration, recommended use M1+ chip device
5788
+ - Android: Currently not supported GPU acceleration (Coming soon), recommended use Android 13+ system
5789
+ - Linux / Windows: Supported GPU acceleration, you can choose `vulkan` or `cuda` backend in Accel Variant property */
5321
5790
  export type GeneratorSpeechInference = Generator &
5322
5791
  GeneratorSpeechInferenceDef & {
5323
5792
  templateKey: 'GENERATOR_SPEECH_INFERENCE'
@@ -5346,11 +5815,416 @@ export type GeneratorSpeechInference = Generator &
5346
5815
  >
5347
5816
  }
5348
5817
 
5818
+ /* Load the model */
5819
+ export type GeneratorVadInferenceActionLoadModel = Action & {
5820
+ __actionName: 'GENERATOR_VAD_INFERENCE_LOAD_MODEL'
5821
+ }
5822
+
5823
+ /* Detect speech in audio file. You can provide `File URL` property, if not provided, it will use the default `File URL` */
5824
+ export type GeneratorVadInferenceActionDetectFile = ActionWithParams & {
5825
+ __actionName: 'GENERATOR_VAD_INFERENCE_DETECT_FILE'
5826
+ params?: Array<
5827
+ | {
5828
+ input: 'fileUrl'
5829
+ value?: string | DataLink | EventProperty
5830
+ mapping?: string
5831
+ }
5832
+ | {
5833
+ input: 'threshold'
5834
+ value?: number | DataLink | EventProperty
5835
+ mapping?: string
5836
+ }
5837
+ | {
5838
+ input: 'minSpeechDurationMs'
5839
+ value?: number | DataLink | EventProperty
5840
+ mapping?: string
5841
+ }
5842
+ | {
5843
+ input: 'minSilenceDurationMs'
5844
+ value?: number | DataLink | EventProperty
5845
+ mapping?: string
5846
+ }
5847
+ | {
5848
+ input: 'maxSpeechDurationS'
5849
+ value?: number | DataLink | EventProperty
5850
+ mapping?: string
5851
+ }
5852
+ | {
5853
+ input: 'speechPadMs'
5854
+ value?: number | DataLink | EventProperty
5855
+ mapping?: string
5856
+ }
5857
+ | {
5858
+ input: 'samplesOverlap'
5859
+ value?: number | DataLink | EventProperty
5860
+ mapping?: string
5861
+ }
5862
+ >
5863
+ }
5864
+
5865
+ /* Detect speech in audio data. Currently only support base64 encoded audio data (16-bit PCM, mono, 16kHz) */
5866
+ export type GeneratorVadInferenceActionDetectData = ActionWithParams & {
5867
+ __actionName: 'GENERATOR_VAD_INFERENCE_DETECT_DATA'
5868
+ params?: Array<
5869
+ | {
5870
+ input: 'data'
5871
+ value?: any | EventProperty
5872
+ mapping?: string
5873
+ }
5874
+ | {
5875
+ input: 'threshold'
5876
+ value?: number | DataLink | EventProperty
5877
+ mapping?: string
5878
+ }
5879
+ | {
5880
+ input: 'minSpeechDurationMs'
5881
+ value?: number | DataLink | EventProperty
5882
+ mapping?: string
5883
+ }
5884
+ | {
5885
+ input: 'minSilenceDurationMs'
5886
+ value?: number | DataLink | EventProperty
5887
+ mapping?: string
5888
+ }
5889
+ | {
5890
+ input: 'maxSpeechDurationS'
5891
+ value?: number | DataLink | EventProperty
5892
+ mapping?: string
5893
+ }
5894
+ | {
5895
+ input: 'speechPadMs'
5896
+ value?: number | DataLink | EventProperty
5897
+ mapping?: string
5898
+ }
5899
+ | {
5900
+ input: 'samplesOverlap'
5901
+ value?: number | DataLink | EventProperty
5902
+ mapping?: string
5903
+ }
5904
+ >
5905
+ }
5906
+
5907
+ /* Clear downloaded files (model, audio) & current jobs */
5908
+ export type GeneratorVadInferenceActionClearDownload = Action & {
5909
+ __actionName: 'GENERATOR_VAD_INFERENCE_CLEAR_DOWNLOAD'
5910
+ }
5911
+
5912
+ /* Release context */
5913
+ export type GeneratorVadInferenceActionReleaseContext = Action & {
5914
+ __actionName: 'GENERATOR_VAD_INFERENCE_RELEASE_CONTEXT'
5915
+ }
5916
+
5917
+ interface GeneratorVadInferenceDef {
5918
+ /*
5919
+ Default property:
5920
+ {
5921
+ "init": false,
5922
+ "modelName": "silero-v5.1.2",
5923
+ "modelUseGPU": true,
5924
+ "modelThreads": 4,
5925
+ "detectThreshold": 0.5,
5926
+ "detectMinSpeechDurationMs": 250,
5927
+ "detectMinSilenceDurationMs": 100,
5928
+ "detectMaxSpeechDurationS": 30,
5929
+ "detectSpeechPadMs": 30,
5930
+ "detectSamplesOverlap": 0.1
5931
+ }
5932
+ */
5933
+ property?: {
5934
+ /* Initialize the VAD context on generator initialization
5935
+ Please note that it will take some RAM depending on the model size */
5936
+ init?: boolean | DataLink
5937
+ /* Use model name, currently only supports the Silero VAD model.
5938
+ The model download progress will be done in preload stage or the generator initialization stage.
5939
+ You can also choose `custom` option and set `Model URL` and `Model SHA1` to use your own model */
5940
+ modelName?: 'custom' | 'silero-v5.1.2' | DataLink
5941
+ /* The URL or path of model
5942
+ We used `ggml` format model, please refer to https://huggingface.co/ggml-org/whisper-vad */
5943
+ modelUrl?: string | DataLink
5944
+ /* Hash type of model */
5945
+ modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
5946
+ /* Hash of model */
5947
+ modelHash?: string | DataLink
5948
+ /* Use GPU Acceleration for inference. Currently iOS only. */
5949
+ modelUseGPU?: boolean | DataLink
5950
+ /* Number of threads to use for processing */
5951
+ modelThreads?: number | DataLink
5952
+ /* Speech probability threshold (0.0-1.0) */
5953
+ detectThreshold?: number | DataLink
5954
+ /* Minimum speech duration in milliseconds */
5955
+ detectMinSpeechDurationMs?: number | DataLink
5956
+ /* Minimum silence duration in milliseconds */
5957
+ detectMinSilenceDurationMs?: number | DataLink
5958
+ /* Maximum speech duration in seconds */
5959
+ detectMaxSpeechDurationS?: number | DataLink
5960
+ /* Padding around speech segments in milliseconds */
5961
+ detectSpeechPadMs?: number | DataLink
5962
+ /* Overlap between analysis windows (0.0-1.0) */
5963
+ detectSamplesOverlap?: number | DataLink
5964
+ /* The file URL or path to be analyzed.
5965
+ It only supported `wav` format with 16kHz sample rate & single (mono) channel */
5966
+ detectFileUrl?: string | DataLink
5967
+ /* MD5 of file to be analyzed */
5968
+ detectFileMd5?: string | DataLink
5969
+ }
5970
+ events?: {
5971
+ /* Event triggered when context state changes */
5972
+ onContextStateChange?: Array<EventAction>
5973
+ /* Event triggered when error occurs */
5974
+ onError?: Array<EventAction>
5975
+ /* Event triggered when got detection result */
5976
+ onDetected?: Array<EventAction>
5977
+ }
5978
+ outlets?: {
5979
+ /* Context state */
5980
+ contextState?: () => Data
5981
+ /* Context details */
5982
+ contextDetails?: () => Data
5983
+ /* Is detecting */
5984
+ isDetecting?: () => Data
5985
+ /* Detection segments result */
5986
+ detectionSegments?: () => Data
5987
+ /* Detection details */
5988
+ detectionDetails?: () => Data
5989
+ }
5990
+ }
5991
+
5992
+ /* Local Voice Activity Detection (VAD) inference based on GGML and [whisper.rn](https://github.com/mybigday/whisper.rn) */
5993
+ export type GeneratorVadInference = Generator &
5994
+ GeneratorVadInferenceDef & {
5995
+ templateKey: 'GENERATOR_VAD_INFERENCE'
5996
+ switches: Array<
5997
+ SwitchDef &
5998
+ GeneratorVadInferenceDef & {
5999
+ conds?: Array<{
6000
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
6001
+ cond:
6002
+ | SwitchCondInnerStateCurrentCanvas
6003
+ | SwitchCondData
6004
+ | {
6005
+ __typename: 'SwitchCondInnerStateOutlet'
6006
+ outlet:
6007
+ | 'contextState'
6008
+ | 'contextDetails'
6009
+ | 'isDetecting'
6010
+ | 'detectionSegments'
6011
+ | 'detectionDetails'
6012
+ value: any
6013
+ }
6014
+ }>
6015
+ }
6016
+ >
6017
+ }
6018
+
6019
+ /* Start realtime transcription */
6020
+ export type GeneratorRealtimeTranscriptionActionStart = Action & {
6021
+ __actionName: 'GENERATOR_REALTIME_TRANSCRIPTION_START'
6022
+ }
6023
+
6024
+ /* Stop realtime transcription */
6025
+ export type GeneratorRealtimeTranscriptionActionStop = Action & {
6026
+ __actionName: 'GENERATOR_REALTIME_TRANSCRIPTION_STOP'
6027
+ }
6028
+
6029
+ /* Force move to next slice */
6030
+ export type GeneratorRealtimeTranscriptionActionNextSlice = Action & {
6031
+ __actionName: 'GENERATOR_REALTIME_TRANSCRIPTION_NEXT_SLICE'
6032
+ }
6033
+
6034
+ /* Reset transcriber state */
6035
+ export type GeneratorRealtimeTranscriptionActionReset = Action & {
6036
+ __actionName: 'GENERATOR_REALTIME_TRANSCRIPTION_RESET'
6037
+ }
6038
+
6039
+ interface GeneratorRealtimeTranscriptionDef {
6040
+ /*
6041
+ Default property:
6042
+ {
6043
+ "sttLivePolicy": "only-in-use",
6044
+ "vadInferenceLivePolicy": "only-in-use",
6045
+ "vadEnabled": true,
6046
+ "audioSliceSec": 30,
6047
+ "audioMinSec": 1,
6048
+ "maxSlicesInMemory": 5,
6049
+ "vadStrategy": "use-preset",
6050
+ "vadPreset": "default",
6051
+ "autoSliceOnSpeechEnd": true,
6052
+ "autoSliceThreshold": 2,
6053
+ "initialPrompt": "",
6054
+ "promptPreviousSlices": false,
6055
+ "saveAudio": true,
6056
+ "testMode": false,
6057
+ "testPlaybackSpeed": 1,
6058
+ "testChunkDurationMs": 100,
6059
+ "testLoop": false
6060
+ }
6061
+ */
6062
+ property?: {
6063
+ /* STT Generator for Whisper context */
6064
+ sttGeneratorId?: string | DataLink
6065
+ /* STT Live Policy. If the policy is `only-in-use`, the STT context will be released when not in use. */
6066
+ sttLivePolicy?: 'only-in-use' | 'manual' | DataLink
6067
+ /* VAD Inference Generator for voice activity detection */
6068
+ vadInferenceGeneratorId?: string | DataLink
6069
+ /* VAD Inference Live Policy. If the policy is `only-in-use`, the VAD Inference context will be released when not in use. */
6070
+ vadInferenceLivePolicy?: 'only-in-use' | 'manual' | DataLink
6071
+ /* Enable VAD (Voice Activity Detection) */
6072
+ vadEnabled?: boolean | DataLink
6073
+ /* Audio slice duration in seconds */
6074
+ audioSliceSec?: number | DataLink
6075
+ /* Minimum audio duration to start transcription in seconds */
6076
+ audioMinSec?: number | DataLink
6077
+ /* Maximum number of slices to keep in memory */
6078
+ maxSlicesInMemory?: number | DataLink
6079
+ /* VAD Strategy */
6080
+ vadStrategy?: 'use-preset' | 'use-generator-options' | DataLink
6081
+ /* VAD preset configuration */
6082
+ vadPreset?:
6083
+ | 'default'
6084
+ | 'sensitive'
6085
+ | 'very-sensitive'
6086
+ | 'conservative'
6087
+ | 'very-conservative'
6088
+ | 'continuous-speech'
6089
+ | 'meeting'
6090
+ | 'noisy-environment'
6091
+ | DataLink
6092
+ /* Auto slice on speech end */
6093
+ autoSliceOnSpeechEnd?: boolean | DataLink
6094
+ /* Auto slice threshold in seconds */
6095
+ autoSliceThreshold?: number | DataLink
6096
+ /* Initial prompt for transcription */
6097
+ initialPrompt?: string | DataLink
6098
+ /* Include previous slices in prompt */
6099
+ promptPreviousSlices?: boolean | DataLink
6100
+ /* Enable audio output saving (auto-generates file path) */
6101
+ saveAudio?: boolean | DataLink
6102
+ /* Use test mode with file simulation */
6103
+ testMode?: boolean | DataLink
6104
+ /* Test audio file path for simulation */
6105
+ testFilePath?: string | DataLink
6106
+ /* Test audio file hash */
6107
+ testFileHash?: string | DataLink
6108
+ /* Test audio file hash type */
6109
+ testFileHashType?: string | DataLink
6110
+ /* Test playback speed */
6111
+ testPlaybackSpeed?: number | DataLink
6112
+ /* Test chunk duration in milliseconds */
6113
+ testChunkDurationMs?: number | DataLink
6114
+ /* Loop test audio file */
6115
+ testLoop?: boolean | DataLink
6116
+ }
6117
+ events?: {
6118
+ /* Event triggered when transcription starts, processes, or ends */
6119
+ onTranscribe?: Array<EventAction>
6120
+ /* Event triggered on VAD (Voice Activity Detection) events */
6121
+ onVad?: Array<EventAction>
6122
+ /* Event triggered when error occurs */
6123
+ onError?: Array<EventAction>
6124
+ /* Event triggered when status changes */
6125
+ onStatusChange?: Array<EventAction>
6126
+ /* Event triggered when statistics update */
6127
+ onStatsUpdate?: Array<EventAction>
6128
+ /* Event triggered when transcription ends */
6129
+ onEnd?: Array<EventAction>
6130
+ }
6131
+ outlets?: {
6132
+ /* Is realtime transcription currently active */
6133
+ isActive?: () => Data
6134
+ /* Is currently transcribing audio */
6135
+ isTranscribing?: () => Data
6136
+ /* Current transcription results */
6137
+ results?: () => Data
6138
+ /* Current transcription result text */
6139
+ resultText?: () => Data
6140
+ /* Current statistics */
6141
+ statistics?: () => Data
6142
+ /* Latest transcribe event */
6143
+ lastTranscribeEvent?: () => Data
6144
+ /* Latest VAD event */
6145
+ lastVadEvent?: () => Data
6146
+ /* Audio output file path (auto-generated when saving audio) */
6147
+ audioOutputPath?: () => Data
6148
+ }
6149
+ }
6150
+
6151
+ /* Realtime speech-to-text transcription using Whisper and VAD with live audio streaming */
6152
+ export type GeneratorRealtimeTranscription = Generator &
6153
+ GeneratorRealtimeTranscriptionDef & {
6154
+ templateKey: 'GENERATOR_REALTIME_TRANSCRIPTION'
6155
+ switches: Array<
6156
+ SwitchDef &
6157
+ GeneratorRealtimeTranscriptionDef & {
6158
+ conds?: Array<{
6159
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
6160
+ cond:
6161
+ | SwitchCondInnerStateCurrentCanvas
6162
+ | SwitchCondData
6163
+ | {
6164
+ __typename: 'SwitchCondInnerStateOutlet'
6165
+ outlet:
6166
+ | 'isActive'
6167
+ | 'isTranscribing'
6168
+ | 'results'
6169
+ | 'resultText'
6170
+ | 'statistics'
6171
+ | 'lastTranscribeEvent'
6172
+ | 'lastVadEvent'
6173
+ | 'audioOutputPath'
6174
+ value: any
6175
+ }
6176
+ }>
6177
+ }
6178
+ >
6179
+ }
6180
+
5349
6181
  /* Load the model */
5350
6182
  export type GeneratorLLMActionLoadModel = Action & {
5351
6183
  __actionName: 'GENERATOR_LLM_LOAD_MODEL'
5352
6184
  }
5353
6185
 
6186
+ /* Load multimodal (vision) model (PREVIEW FEATURE) */
6187
+ export type GeneratorLLMActionLoadMultimodalModel = Action & {
6188
+ __actionName: 'GENERATOR_LLM_LOAD_MULTIMODAL_MODEL'
6189
+ }
6190
+
6191
+ /* Tokenize the prompt */
6192
+ export type GeneratorLLMActionTokenize = ActionWithParams & {
6193
+ __actionName: 'GENERATOR_LLM_TOKENIZE'
6194
+ params?: Array<
6195
+ | {
6196
+ input: 'mode'
6197
+ value?: string | DataLink | EventProperty
6198
+ mapping?: string
6199
+ }
6200
+ | {
6201
+ input: 'prompt'
6202
+ value?: string | DataLink | EventProperty
6203
+ mapping?: string
6204
+ }
6205
+ | {
6206
+ input: 'promptMediaPaths'
6207
+ value?: Array<any> | DataLink | EventProperty
6208
+ mapping?: string
6209
+ }
6210
+ | {
6211
+ input: 'messages'
6212
+ value?: Array<any> | DataLink | EventProperty
6213
+ mapping?: string
6214
+ }
6215
+ >
6216
+ }
6217
+
6218
+ /* Detokenize the tokens to text */
6219
+ export type GeneratorLLMActionDetokenize = ActionWithParams & {
6220
+ __actionName: 'GENERATOR_LLM_DETOKENIZE'
6221
+ params?: Array<{
6222
+ input: 'tokens'
6223
+ value?: Array<any> | DataLink | EventProperty
6224
+ mapping?: string
6225
+ }>
6226
+ }
6227
+
5354
6228
  /* Pre-process the prompt, this can speed up the completion action */
5355
6229
  export type GeneratorLLMActionProcessPrompt = ActionWithParams & {
5356
6230
  __actionName: 'GENERATOR_LLM_PROCESS_PROMPT'
@@ -5385,11 +6259,21 @@ export type GeneratorLLMActionProcessPrompt = ActionWithParams & {
5385
6259
  value?: string | DataLink | EventProperty
5386
6260
  mapping?: string
5387
6261
  }
6262
+ | {
6263
+ input: 'enableThinking'
6264
+ value?: boolean | DataLink | EventProperty
6265
+ mapping?: string
6266
+ }
5388
6267
  | {
5389
6268
  input: 'prompt'
5390
6269
  value?: string | DataLink | EventProperty
5391
6270
  mapping?: string
5392
6271
  }
6272
+ | {
6273
+ input: 'promptMediaPaths'
6274
+ value?: Array<any> | DataLink | EventProperty
6275
+ mapping?: string
6276
+ }
5393
6277
  | {
5394
6278
  input: 'promptTemplateData'
5395
6279
  value?: {} | DataLink | EventProperty
@@ -5442,11 +6326,21 @@ export type GeneratorLLMActionCompletion = ActionWithParams & {
5442
6326
  value?: string | DataLink | EventProperty
5443
6327
  mapping?: string
5444
6328
  }
6329
+ | {
6330
+ input: 'enableThinking'
6331
+ value?: boolean | DataLink | EventProperty
6332
+ mapping?: string
6333
+ }
5445
6334
  | {
5446
6335
  input: 'prompt'
5447
6336
  value?: string | DataLink | EventProperty
5448
6337
  mapping?: string
5449
6338
  }
6339
+ | {
6340
+ input: 'promptMediaPaths'
6341
+ value?: Array<any> | DataLink | EventProperty
6342
+ mapping?: string
6343
+ }
5450
6344
  | {
5451
6345
  input: 'promptTemplateData'
5452
6346
  value?: {} | DataLink | EventProperty
@@ -5627,6 +6521,11 @@ export type GeneratorLLMActionClearDownload = Action & {
5627
6521
  __actionName: 'GENERATOR_LLM_CLEAR_DOWNLOAD'
5628
6522
  }
5629
6523
 
6524
+ /* Release multimodal (vision) context (PREVIEW FEATURE) */
6525
+ export type GeneratorLLMActionReleaseMultimodalContext = Action & {
6526
+ __actionName: 'GENERATOR_LLM_RELEASE_MULTIMODAL_CONTEXT'
6527
+ }
6528
+
5630
6529
  /* Release context */
5631
6530
  export type GeneratorLLMActionReleaseContext = Action & {
5632
6531
  __actionName: 'GENERATOR_LLM_RELEASE_CONTEXT'
@@ -5647,14 +6546,16 @@ Default property:
5647
6546
  "useMmap": true,
5648
6547
  "cacheKType": "f16",
5649
6548
  "cacheVType": "f16",
6549
+ "ctxShift": true,
5650
6550
  "transformScriptEnabled": false,
5651
- "transformScriptCode": "\/\* Global variable: inputs = { prompt, messages, variables } \*\/\nreturn inputs.prompt",
6551
+ "transformScriptCode": "\/\* Global variable: inputs = { prompt, messages, variables }, members = { llmUtils } \*\/\nreturn inputs.prompt",
5652
6552
  "transformScriptVariables": {},
5653
6553
  "sessionMinSaveSize": 50,
5654
6554
  "sessionRemain": 10,
5655
6555
  "completionMode": "auto",
5656
6556
  "completionPrompt": "",
5657
6557
  "completionPromptTemplateType": "${}",
6558
+ "completionEnableThinking": true,
5658
6559
  "completionStopWords": [
5659
6560
  "</s>",
5660
6561
  "<|end|>",
@@ -5703,6 +6604,14 @@ Default property:
5703
6604
  modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
5704
6605
  /* Hash of model */
5705
6606
  modelHash?: string | DataLink
6607
+ /* Load multimodal (vision) context after model loaded (PREVIEW FEATURE) */
6608
+ initMultimodal?: boolean | DataLink
6609
+ /* The URL or path of mmproj file for multimodal vision support (PREVIEW FEATURE) */
6610
+ mmprojUrl?: string | DataLink
6611
+ /* Hash type of mmproj file (PREVIEW FEATURE) */
6612
+ mmprojHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6613
+ /* Hash of mmproj file (PREVIEW FEATURE) */
6614
+ mmprojHash?: string | DataLink
5706
6615
  /* Chat Template (Jinja format) to override the default template from model */
5707
6616
  chatTemplate?: string | DataLink
5708
6617
  /* Context size (0 ~ 4096) (Default to 512) */
@@ -5728,10 +6637,14 @@ Default property:
5728
6637
  useMmap?: boolean | DataLink
5729
6638
  /* Use Flash Attention for inference (Recommended with GPU enabled) */
5730
6639
  useFlashAttn?: boolean | DataLink
6640
+ /* Use full-size SWA cache. May improve performance for multiple sequences but uses more memory. */
6641
+ useSwaFull?: boolean | DataLink
5731
6642
  /* KV cache data type for the K (Default: f16) */
5732
6643
  cacheKType?: 'f16' | 'f32' | 'q8_0' | 'q4_0' | 'q4_1' | 'iq4_nl' | 'q5_0' | 'q5_1' | DataLink
5733
6644
  /* KV cache data type for the V (Default: f16) */
5734
6645
  cacheVType?: 'f16' | 'f32' | 'q8_0' | 'q4_0' | 'q4_1' | 'iq4_nl' | 'q5_0' | 'q5_1' | DataLink
6646
+ /* Enable context shift */
6647
+ ctxShift?: boolean | DataLink
5735
6648
  /* Enable Transform Script for processing the prompt */
5736
6649
  transformScriptEnabled?: boolean | DataLink
5737
6650
  /* Code of Transform Script */
@@ -5750,8 +6663,10 @@ Default property:
5750
6663
  sessionRemain?: number | DataLink
5751
6664
  /* TODO:loran_gqarms_norm_epsrope_freq_baserope_freq_scale */
5752
6665
  completionMode?: 'auto' | 'chat' | 'text' | DataLink
5753
- /* Tools for chat mode */
5754
- completionTools?: {} | DataLink
6666
+ /* Tools for chat mode using OpenAI-compatible function calling format
6667
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
6668
+ See: https://platform.openai.com/docs/guides/function-calling */
6669
+ completionTools?: Array<{} | DataLink> | DataLink
5755
6670
  /* Enable parallel tool calls */
5756
6671
  completionParallelToolCalls?: boolean | DataLink
5757
6672
  /* Tool choice for chat mode */
@@ -5768,6 +6683,9 @@ Default property:
5768
6683
  | DataLink
5769
6684
  /* Prompt (text mode) */
5770
6685
  completionPrompt?: string | DataLink
6686
+ /* Media paths to be used in the prompt template (PREVIEW FEATURE)
6687
+ In prompt, use `<__media__>` for position of media content */
6688
+ completionPromptMediaPaths?: Array<string | DataLink> | DataLink
5771
6689
  /* Data to be used in the prompt template (e.g. `Hello ${name}`). Supports nested data, such as `Hello ${user.name}`. */
5772
6690
  completionPromptTemplateData?: {} | DataLink
5773
6691
  /* The prompt template type */
@@ -5785,6 +6703,8 @@ Default property:
5785
6703
  }
5786
6704
  schema?: {} | DataLink
5787
6705
  }
6706
+ /* Enable thinking */
6707
+ completionEnableThinking?: boolean | DataLink
5788
6708
  /* Stop words */
5789
6709
  completionStopWords?: Array<string | DataLink> | DataLink
5790
6710
  /* Number of tokens to predict */
@@ -5843,7 +6763,7 @@ Default property:
5843
6763
  completionIgnoreEOS?: boolean | DataLink
5844
6764
  }
5845
6765
  events?: {
5846
- /* Event triggered when load is done */
6766
+ /* Event triggered when context state changes */
5847
6767
  onContextStateChange?: Array<EventAction>
5848
6768
  /* Event triggered when error occurs */
5849
6769
  onError?: Array<EventAction>
@@ -5865,6 +6785,10 @@ Default property:
5865
6785
  sessions?: () => Data
5866
6786
  /* Is evaluating */
5867
6787
  isEvaluating?: () => Data
6788
+ /* Tokenize result */
6789
+ tokenizeResult?: () => Data
6790
+ /* Detokenize result */
6791
+ detokenizeResult?: () => Data
5868
6792
  /* Last formatted prompt (messages or prompt) */
5869
6793
  completionLastFormattedPrompt?: () => Data
5870
6794
  /* Last completion token */
@@ -5885,7 +6809,7 @@ Default property:
5885
6809
  - iOS: Supported GPU acceleration, recommended use M1+ / A17+ chip device
5886
6810
  - macOS: Supported GPU acceleration, recommended use M1+ chip device
5887
6811
  - Android: Currently not supported GPU acceleration (Coming soon), recommended use Android 13+ system
5888
- - Linux / Windows: Supported GPU acceleration, currently only Vulkan backend available */
6812
+ - Linux / Windows: Supported GPU acceleration, you can choose `vulkan` or `cuda` backend in Accel Variant property */
5889
6813
  export type GeneratorLLM = Generator &
5890
6814
  GeneratorLLMDef & {
5891
6815
  templateKey: 'GENERATOR_LLM'
@@ -5905,6 +6829,8 @@ export type GeneratorLLM = Generator &
5905
6829
  | 'contextDetails'
5906
6830
  | 'sessions'
5907
6831
  | 'isEvaluating'
6832
+ | 'tokenizeResult'
6833
+ | 'detokenizeResult'
5908
6834
  | 'completionLastFormattedPrompt'
5909
6835
  | 'completionLastToken'
5910
6836
  | 'completionResult'
@@ -5917,31 +6843,518 @@ export type GeneratorLLM = Generator &
5917
6843
  >
5918
6844
  }
5919
6845
 
5920
- /* Run text completion */
5921
- export type GeneratorOpenAILLMActionCompletion = ActionWithParams & {
5922
- __actionName: 'GENERATOR_OPENAI_LLM_COMPLETION'
5923
- params?: Array<
5924
- | {
5925
- input: 'messages'
5926
- value?: Array<any> | DataLink | EventProperty
5927
- mapping?: string
5928
- }
5929
- | {
5930
- input: 'maxTokens'
5931
- value?: number | DataLink | EventProperty
5932
- mapping?: string
5933
- }
5934
- | {
5935
- input: 'temperature'
5936
- value?: number | DataLink | EventProperty
5937
- mapping?: string
5938
- }
5939
- | {
5940
- input: 'topP'
5941
- value?: number | DataLink | EventProperty
5942
- mapping?: string
5943
- }
5944
- | {
6846
+ /* Load the model */
6847
+ export type GeneratorGGMLTTSActionLoadModel = Action & {
6848
+ __actionName: 'GENERATOR_GGML_TTS_LOAD_MODEL'
6849
+ }
6850
+
6851
+ /* Generate audio */
6852
+ export type GeneratorGGMLTTSActionGenerate = ActionWithParams & {
6853
+ __actionName: 'GENERATOR_GGML_TTS_GENERATE'
6854
+ params?: Array<{
6855
+ input: 'text'
6856
+ value?: string | DataLink | EventProperty
6857
+ mapping?: string
6858
+ }>
6859
+ }
6860
+
6861
+ /* Clean cache */
6862
+ export type GeneratorGGMLTTSActionCleanCache = Action & {
6863
+ __actionName: 'GENERATOR_GGML_TTS_CLEAN_CACHE'
6864
+ }
6865
+
6866
+ /* Release context */
6867
+ export type GeneratorGGMLTTSActionReleaseContext = Action & {
6868
+ __actionName: 'GENERATOR_GGML_TTS_RELEASE_CONTEXT'
6869
+ }
6870
+
6871
+ interface GeneratorGGMLTTSDef {
6872
+ /*
6873
+ Default property:
6874
+ {
6875
+ "vocoderUrl": "https://huggingface.co/ggml-org/WavTokenizer/resolve/main/WavTokenizer-Large-75-F16.gguf",
6876
+ "vocoderHashType": "sha256",
6877
+ "vocoderHash": "2356baa8631cc2995ea3465196a017a2733600d849a91180c0f97fa7fb375bbe",
6878
+ "vocoderBatchSize": 4096,
6879
+ "outputType": "play",
6880
+ "cacheGenerated": true,
6881
+ "autoInferEnable": false,
6882
+ "softBreakRegex": "^[^\\r\\n\\t\\f\\v]*([\\r\\n]+|[。!?!?.]\\B)",
6883
+ "hardBreakTime": 500,
6884
+ "completionTemperature": 0.1,
6885
+ "completionRepetitionPenalty": 1.1,
6886
+ "completionTopK": 40,
6887
+ "completionTopP": 0.9,
6888
+ "completionMinP": 0.05,
6889
+ "useGuideToken": false,
6890
+ "contextSize": 8192,
6891
+ "batchSize": 8192,
6892
+ "microBatchSize": 512,
6893
+ "maxThreads": 2,
6894
+ "accelVariant": "default",
6895
+ "mainGpu": 0,
6896
+ "gpuLayers": 0,
6897
+ "useMlock": true,
6898
+ "useMmap": true,
6899
+ "useFlashAttn": false
6900
+ }
6901
+ */
6902
+ property?: {
6903
+ /* Initialize the TTS context on generator initialization */
6904
+ init?: boolean | DataLink
6905
+ /* The URL or path of model
6906
+ We used GGUF format model, please refer to https://github.com/ggerganov/llama.cpp/tree/master#description */
6907
+ modelUrl?: string | DataLink
6908
+ /* Hash type of model */
6909
+ modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6910
+ /* Hash of model */
6911
+ modelHash?: string | DataLink
6912
+ /* The URL or path of vocoder model */
6913
+ vocoderUrl?: string | DataLink
6914
+ /* Hash type of vocoder model */
6915
+ vocoderHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6916
+ /* Hash of vocoder model */
6917
+ vocoderHash?: string | DataLink
6918
+ /* Batch size of vocoder model */
6919
+ vocoderBatchSize?: number | DataLink
6920
+ /* Output mode */
6921
+ outputType?: 'play' | 'file' | DataLink
6922
+ /* Enable cache for generated audio */
6923
+ cacheGenerated?: boolean | DataLink
6924
+ /* Text to generate */
6925
+ prompt?: string | DataLink
6926
+ /* Speaker JSON */
6927
+ speaker?: {} | DataLink
6928
+ /* Auto inference when prompt changes */
6929
+ autoInferEnable?: boolean | DataLink
6930
+ /* Segmentation rule for auto inference */
6931
+ softBreakRegex?: string | DataLink
6932
+ /* Time to force inference when softBreakRegex is not satisfied */
6933
+ hardBreakTime?: number | DataLink
6934
+ /* Temperature */
6935
+ completionTemperature?: number | DataLink
6936
+ /* Repetition Penalty */
6937
+ completionRepetitionPenalty?: number | DataLink
6938
+ /* Top K sampling */
6939
+ completionTopK?: number | DataLink
6940
+ /* Top P sampling */
6941
+ completionTopP?: number | DataLink
6942
+ /* Min P sampling */
6943
+ completionMinP?: number | DataLink
6944
+ /* Set the random number generator (RNG) seed (default: -1, -1 = random seed) */
6945
+ completionSeed?: number | DataLink
6946
+ /* Number of tokens to predict */
6947
+ completionPredict?: number | DataLink
6948
+ /* Enable guide token to help prevent hallucinations by forcing the TTS to use the correct words. */
6949
+ useGuideToken?: boolean | DataLink
6950
+ /* Context size, for OutTTS recommended 4096 ~ 8192 (Default to 4096) */
6951
+ contextSize?: number | DataLink
6952
+ /* Logical batch size for prompt processing */
6953
+ batchSize?: number | DataLink
6954
+ /* Physical batch size for prompt processing */
6955
+ microBatchSize?: number | DataLink
6956
+ /* Number of threads */
6957
+ maxThreads?: number | DataLink
6958
+ /* Accelerator variant (Only for desktop)
6959
+ `default` - CPU / Metal (macOS)
6960
+ `vulkan` - Use Vulkan
6961
+ `cuda` - Use CUDA */
6962
+ accelVariant?: 'default' | 'vulkan' | 'cuda' | DataLink
6963
+ /* Main GPU index */
6964
+ mainGpu?: number | DataLink
6965
+ /* Number of GPU layers (NOTE: Currently not supported for Android) */
6966
+ gpuLayers?: number | DataLink
6967
+ /* Use memory lock */
6968
+ useMlock?: boolean | DataLink
6969
+ /* Use mmap */
6970
+ useMmap?: boolean | DataLink
6971
+ /* Use Flash Attention for inference (Recommended with GPU enabled) */
6972
+ useFlashAttn?: boolean | DataLink
6973
+ }
6974
+ events?: {
6975
+ /* Event triggered when state change */
6976
+ onContextStateChange?: Array<EventAction>
6977
+ /* Event triggered when error occurs */
6978
+ onError?: Array<EventAction>
6979
+ }
6980
+ outlets?: {
6981
+ /* Context state */
6982
+ contextState?: () => Data
6983
+ /* Generated audio file */
6984
+ generatedAudio?: () => Data
6985
+ /* Generated audio file is playing */
6986
+ generatedAudioPlaying?: () => Data
6987
+ }
6988
+ }
6989
+
6990
+ /* Local Text-to-Speech (TTS) inference based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
6991
+ You can use any converted model on HuggingFace. */
6992
+ export type GeneratorGGMLTTS = Generator &
6993
+ GeneratorGGMLTTSDef & {
6994
+ templateKey: 'GENERATOR_GGML_TTS'
6995
+ switches: Array<
6996
+ SwitchDef &
6997
+ GeneratorGGMLTTSDef & {
6998
+ conds?: Array<{
6999
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
7000
+ cond:
7001
+ | SwitchCondInnerStateCurrentCanvas
7002
+ | SwitchCondData
7003
+ | {
7004
+ __typename: 'SwitchCondInnerStateOutlet'
7005
+ outlet: 'contextState' | 'generatedAudio' | 'generatedAudioPlaying'
7006
+ value: any
7007
+ }
7008
+ }>
7009
+ }
7010
+ >
7011
+ }
7012
+
7013
+ /* Load the model */
7014
+ export type GeneratorRerankerActionLoadModel = Action & {
7015
+ __actionName: 'GENERATOR_RERANKER_LOAD_MODEL'
7016
+ }
7017
+
7018
+ /* Rerank documents based on query relevance */
7019
+ export type GeneratorRerankerActionRerank = ActionWithParams & {
7020
+ __actionName: 'GENERATOR_RERANKER_RERANK'
7021
+ params?: Array<
7022
+ | {
7023
+ input: 'query'
7024
+ value?: string | DataLink | EventProperty
7025
+ mapping?: string
7026
+ }
7027
+ | {
7028
+ input: 'documents'
7029
+ value?: Array<any> | DataLink | EventProperty
7030
+ mapping?: string
7031
+ }
7032
+ >
7033
+ }
7034
+
7035
+ /* Release context */
7036
+ export type GeneratorRerankerActionReleaseContext = Action & {
7037
+ __actionName: 'GENERATOR_RERANKER_RELEASE_CONTEXT'
7038
+ }
7039
+
7040
+ interface GeneratorRerankerDef {
7041
+ /*
7042
+ Default property:
7043
+ {
7044
+ "init": false,
7045
+ "contextSize": 512,
7046
+ "batchSize": 512,
7047
+ "uBatchSize": 512,
7048
+ "accelVariant": "default",
7049
+ "mainGpu": 0,
7050
+ "gpuLayers": 0,
7051
+ "useMlock": true,
7052
+ "useMmap": true,
7053
+ "normalize": 1
7054
+ }
7055
+ */
7056
+ property?: {
7057
+ /* Initialize the Reranker context on generator initialization */
7058
+ init?: boolean | DataLink
7059
+ /* The URL or path of reranker model (GGUF format) */
7060
+ modelUrl?: string | DataLink
7061
+ /* Hash of reranker model */
7062
+ modelHash?: string | DataLink
7063
+ /* Hash type of reranker model */
7064
+ modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
7065
+ /* Context size (0 ~ 4096) (Default to 512) */
7066
+ contextSize?: number | DataLink
7067
+ /* Logical batch size for processing (default: 512) */
7068
+ batchSize?: number | DataLink
7069
+ /* Physical maximum batch size (default: 512) */
7070
+ uBatchSize?: number | DataLink
7071
+ /* Accelerator variant (default: default) */
7072
+ accelVariant?:
7073
+ | 'default'
7074
+ | 'avx'
7075
+ | 'avx2'
7076
+ | 'avx512'
7077
+ | 'metal'
7078
+ | 'opencl'
7079
+ | 'vulkan'
7080
+ | 'cuda'
7081
+ | 'rocm'
7082
+ | DataLink
7083
+ /* Main GPU index (default: 0) */
7084
+ mainGpu?: number | DataLink
7085
+ /* Number of layers to store in VRAM (default: 0) */
7086
+ gpuLayers?: number | DataLink
7087
+ /* Maximum number of threads to use (default: auto) */
7088
+ maxThreads?: number | DataLink
7089
+ /* Use mlock to keep model in memory (default: true) */
7090
+ useMlock?: boolean | DataLink
7091
+ /* Use mmap for model loading (default: true) */
7092
+ useMmap?: boolean | DataLink
7093
+ /* Query text for reranking */
7094
+ query?: string | DataLink
7095
+ /* Array of documents to rerank */
7096
+ documents?: Array<string | DataLink> | DataLink
7097
+ /* Normalize reranking scores (default: from model config) */
7098
+ normalize?: number | DataLink | boolean | DataLink | DataLink
7099
+ /* Maximum number of documents to return (default: unlimited) */
7100
+ topK?: number | DataLink
7101
+ }
7102
+ events?: {
7103
+ /* Event triggered when the reranker context state changes (loading, ready, error, released) */
7104
+ onContextStateChange?: Array<EventAction>
7105
+ /* Event triggered when an error occurs during reranker operations */
7106
+ onError?: Array<EventAction>
7107
+ }
7108
+ outlets?: {
7109
+ /* Current state of the reranker context (loading, ready, error, released) */
7110
+ contextState?: () => Data
7111
+ /* Loading progress of the reranker model (0-100) */
7112
+ contextLoadProgress?: () => Data
7113
+ /* Detailed information about the reranker context including instance ID and processing status */
7114
+ contextDetails?: () => Data
7115
+ /* Result of the reranking operation containing scored and ranked documents */
7116
+ rerankResult?: () => Data
7117
+ /* Boolean indicating whether the reranker is currently processing a request */
7118
+ isProcessing?: () => Data
7119
+ }
7120
+ }
7121
+
7122
+ /* Local rerank based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
7123
+
7124
+ ## Notice
7125
+ - The device RAM must be larger than 8GB
7126
+ - iOS: Supported GPU acceleration, recommended use M1+ / A17+ chip device
7127
+ - macOS: Supported GPU acceleration, recommended use M1+ chip device
7128
+ - Android: Currently not supported GPU acceleration (Coming soon), recommended use Android 13+ system
7129
+ - Linux / Windows: Supported GPU acceleration, currently only Vulkan backend available */
7130
+ export type GeneratorReranker = Generator &
7131
+ GeneratorRerankerDef & {
7132
+ templateKey: 'GENERATOR_RERANKER'
7133
+ switches: Array<
7134
+ SwitchDef &
7135
+ GeneratorRerankerDef & {
7136
+ conds?: Array<{
7137
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
7138
+ cond:
7139
+ | SwitchCondInnerStateCurrentCanvas
7140
+ | SwitchCondData
7141
+ | {
7142
+ __typename: 'SwitchCondInnerStateOutlet'
7143
+ outlet:
7144
+ | 'contextState'
7145
+ | 'contextLoadProgress'
7146
+ | 'contextDetails'
7147
+ | 'rerankResult'
7148
+ | 'isProcessing'
7149
+ value: any
7150
+ }
7151
+ }>
7152
+ }
7153
+ >
7154
+ }
7155
+
7156
+ /* Load the model */
7157
+ export type GeneratorQnnLlmActionLoadModel = Action & {
7158
+ __actionName: 'GENERATOR_QNN_LLM_LOAD_MODEL'
7159
+ }
7160
+
7161
+ /* Abort model download */
7162
+ export type GeneratorQnnLlmActionAbortModelDownload = Action & {
7163
+ __actionName: 'GENERATOR_QNN_LLM_ABORT_MODEL_DOWNLOAD'
7164
+ }
7165
+
7166
+ /* Generate text */
7167
+ export type GeneratorQnnLlmActionGenerate = ActionWithParams & {
7168
+ __actionName: 'GENERATOR_QNN_LLM_GENERATE'
7169
+ params?: Array<
7170
+ | {
7171
+ input: 'prompt'
7172
+ value?: string | DataLink | EventProperty
7173
+ mapping?: string
7174
+ }
7175
+ | {
7176
+ input: 'messages'
7177
+ value?: Array<any> | DataLink | EventProperty
7178
+ mapping?: string
7179
+ }
7180
+ | {
7181
+ input: 'tools'
7182
+ value?: Array<any> | DataLink | EventProperty
7183
+ mapping?: string
7184
+ }
7185
+ >
7186
+ }
7187
+
7188
+ /* Abort generation */
7189
+ export type GeneratorQnnLlmActionAbortGeneration = Action & {
7190
+ __actionName: 'GENERATOR_QNN_LLM_ABORT_GENERATION'
7191
+ }
7192
+
7193
+ /* Release context */
7194
+ export type GeneratorQnnLlmActionReleaseContext = Action & {
7195
+ __actionName: 'GENERATOR_QNN_LLM_RELEASE_CONTEXT'
7196
+ }
7197
+
7198
+ interface GeneratorQnnLlmDef {
7199
+ /*
7200
+ Default property:
7201
+ {
7202
+ "modelType": "Llama 3.2 3B Chat",
7203
+ "chatFormat": "Llama 3.x",
7204
+ "toolsInUserMessage": true,
7205
+ "toolCallParser": "llama3_json",
7206
+ "toolChoice": "auto",
7207
+ "parallelToolCalls": false,
7208
+ "greedy": false
7209
+ }
7210
+ */
7211
+ property?: {
7212
+ /* Load model context when generator is initialized */
7213
+ init?: boolean | DataLink
7214
+ /* Model type */
7215
+ modelType?:
7216
+ | 'Llama 3 8B Chat'
7217
+ | 'Llama 3.1 8B Chat'
7218
+ | 'Llama 3.2 3B Chat'
7219
+ | 'Mistral 7B Instruct v0.3'
7220
+ | 'Qwen 2 7B Chat'
7221
+ | 'Phi 3.5 Mini'
7222
+ | 'Granite v3.1 8B Instruct'
7223
+ | 'Custom'
7224
+ | DataLink
7225
+ /* SOC model */
7226
+ socModel?: 'X Elite' | 'X Plus' | '8 Elite' | '8 Gen 3' | 'QCS8550' | DataLink
7227
+ /* Custom model base URL
7228
+ The URL directory should contain `config.json` (model config) file, `model_part_*_of_*.bin` (model split files) files and `tokenizer.json` (tokenizer config) file. */
7229
+ customModelUrl?: string | DataLink
7230
+ /* Custom model split parts */
7231
+ customModelSplitParts?: number | DataLink
7232
+ /* Chat format */
7233
+ chatFormat?:
7234
+ | 'Llama 2'
7235
+ | 'Llama 3'
7236
+ | 'Llama 3.x'
7237
+ | 'Mistral v0.3'
7238
+ | 'Qwen 2'
7239
+ | 'Custom'
7240
+ | DataLink
7241
+ /* Custom chat format template */
7242
+ customChatFormat?: string | DataLink
7243
+ /* Put tools in user message */
7244
+ toolsInUserMessage?: boolean | DataLink
7245
+ /* Prompt to generate */
7246
+ prompt?: string | DataLink
7247
+ /* Chat messages */
7248
+ messages?:
7249
+ | Array<
7250
+ | DataLink
7251
+ | {
7252
+ role?: string | DataLink
7253
+ content?: string | DataLink
7254
+ }
7255
+ >
7256
+ | DataLink
7257
+ /* Stop words */
7258
+ stopWords?: Array<string | DataLink> | DataLink
7259
+ /* Tool call parser */
7260
+ toolCallParser?: 'llama3_json' | 'mistral' | 'hermes' | 'internlm' | 'phi4' | DataLink
7261
+ /* Tools for chat mode using OpenAI-compatible function calling format
7262
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
7263
+ See: https://platform.openai.com/docs/guides/function-calling */
7264
+ tools?: Array<{} | DataLink> | DataLink
7265
+ /* Tool choice for chat mode */
7266
+ toolChoice?: 'none' | 'auto' | 'required' | DataLink
7267
+ /* Enable parallel tool calls */
7268
+ parallelToolCalls?: boolean | DataLink
7269
+ /* Number of threads, -1 to use n-threads from model config */
7270
+ nThreads?: number | DataLink
7271
+ /* Temperature, -1 to use temperature from model config */
7272
+ temperature?: number | DataLink
7273
+ /* Seed, -1 to use seed from model config */
7274
+ seed?: number | DataLink
7275
+ /* Top K, -1 to use top-k from model config */
7276
+ topK?: number | DataLink
7277
+ /* Top P, -1 to use top-p from model config */
7278
+ topP?: number | DataLink
7279
+ /* Greedy, use greedy sampling */
7280
+ greedy?: boolean | DataLink
7281
+ }
7282
+ events?: {
7283
+ /* Event triggered when context state changes */
7284
+ onContextStateChange?: Array<EventAction>
7285
+ /* Event triggered when generate is done */
7286
+ onGenerate?: Array<EventAction>
7287
+ /* Event triggered on get function call request */
7288
+ onFunctionCall?: Array<EventAction>
7289
+ /* Event triggered when error occurs */
7290
+ onError?: Array<EventAction>
7291
+ }
7292
+ outlets?: {
7293
+ /* Context state */
7294
+ contextState?: () => Data
7295
+ /* Generation result */
7296
+ result?: () => Data
7297
+ /* Full context (Prompt + Generation Result) */
7298
+ fullContext?: () => Data
7299
+ /* Last function call details */
7300
+ lastFunctionCall?: () => Data
7301
+ /* Completion details */
7302
+ completionDetails?: () => Data
7303
+ }
7304
+ }
7305
+
7306
+ /* Local LLM inference using Qualcomm AI Engine */
7307
+ export type GeneratorQnnLlm = Generator &
7308
+ GeneratorQnnLlmDef & {
7309
+ templateKey: 'GENERATOR_QNN_LLM'
7310
+ switches: Array<
7311
+ SwitchDef &
7312
+ GeneratorQnnLlmDef & {
7313
+ conds?: Array<{
7314
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
7315
+ cond:
7316
+ | SwitchCondInnerStateCurrentCanvas
7317
+ | SwitchCondData
7318
+ | {
7319
+ __typename: 'SwitchCondInnerStateOutlet'
7320
+ outlet:
7321
+ | 'contextState'
7322
+ | 'result'
7323
+ | 'fullContext'
7324
+ | 'lastFunctionCall'
7325
+ | 'completionDetails'
7326
+ value: any
7327
+ }
7328
+ }>
7329
+ }
7330
+ >
7331
+ }
7332
+
7333
+ /* Run text completion */
7334
+ export type GeneratorOpenAILLMActionCompletion = ActionWithParams & {
7335
+ __actionName: 'GENERATOR_OPENAI_LLM_COMPLETION'
7336
+ params?: Array<
7337
+ | {
7338
+ input: 'messages'
7339
+ value?: Array<any> | DataLink | EventProperty
7340
+ mapping?: string
7341
+ }
7342
+ | {
7343
+ input: 'maxTokens'
7344
+ value?: number | DataLink | EventProperty
7345
+ mapping?: string
7346
+ }
7347
+ | {
7348
+ input: 'temperature'
7349
+ value?: number | DataLink | EventProperty
7350
+ mapping?: string
7351
+ }
7352
+ | {
7353
+ input: 'topP'
7354
+ value?: number | DataLink | EventProperty
7355
+ mapping?: string
7356
+ }
7357
+ | {
5945
7358
  input: 'frequencyPenalty'
5946
7359
  value?: number | DataLink | EventProperty
5947
7360
  mapping?: string
@@ -5989,15 +7402,16 @@ interface GeneratorOpenAILLMDef {
5989
7402
  Default property:
5990
7403
  {
5991
7404
  "apiEndpoint": "https://api.openai.com/v1",
5992
- "model": "gpt-4o-mini",
7405
+ "model": "gpt-4o",
5993
7406
  "completionMessages": [
5994
- null
7407
+ {
7408
+ "role": "system",
7409
+ "content": "You are a helpful assistant."
7410
+ }
5995
7411
  ],
5996
7412
  "completionMaxTokens": 1024,
5997
7413
  "completionTemperature": 1,
5998
7414
  "completionTopP": 1,
5999
- "completionFrequencyPenalty": 0,
6000
- "completionPresencePenalty": 0,
6001
7415
  "completionStop": []
6002
7416
  }
6003
7417
  */
@@ -6027,8 +7441,10 @@ Default property:
6027
7441
  }
6028
7442
  >
6029
7443
  | DataLink
6030
- /* Tools for chat mode */
6031
- completionTools?: {} | DataLink
7444
+ /* Tools for chat mode following OpenAI function calling format
7445
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
7446
+ See: https://platform.openai.com/docs/guides/function-calling */
7447
+ completionTools?: Array<{} | DataLink> | DataLink
6032
7448
  /* Enable parallel tool calls */
6033
7449
  completionParallelToolCalls?: boolean | DataLink
6034
7450
  /* Tool choice for chat mode */
@@ -6084,7 +7500,11 @@ Default property:
6084
7500
  - Compatible with OpenAI API format
6085
7501
  - Supports function calling
6086
7502
  - Streaming responses
6087
- - Custom API endpoints */
7503
+ - Custom API endpoints, like
7504
+ - OpenAI API: https://platform.openai.com/docs/guides/text?api-mode=chat
7505
+ - Anthropic API: https://docs.anthropic.com/en/api/openai-sdk
7506
+ - Gemini API: https://ai.google.dev/gemini-api/docs/openai
7507
+ - llama.cpp server: https://github.com/ggml-org/llama.cpp/tree/master/tools/server */
6088
7508
  export type GeneratorOpenAILLM = Generator &
6089
7509
  GeneratorOpenAILLMDef & {
6090
7510
  templateKey: 'GENERATOR_OPENAI_LLM'
@@ -6106,6 +7526,104 @@ export type GeneratorOpenAILLM = Generator &
6106
7526
  >
6107
7527
  }
6108
7528
 
7529
+ /* Generate audio */
7530
+ export type GeneratorOpenAiTTSActionGenerate = ActionWithParams & {
7531
+ __actionName: 'GENERATOR_OPENAI_TTS_GENERATE'
7532
+ params?: Array<{
7533
+ input: 'text'
7534
+ value?: string | DataLink | EventProperty
7535
+ mapping?: string
7536
+ }>
7537
+ }
7538
+
7539
+ /* Clean cache */
7540
+ export type GeneratorOpenAiTTSActionCleanCache = Action & {
7541
+ __actionName: 'GENERATOR_OPENAI_TTS_CLEAN_CACHE'
7542
+ }
7543
+
7544
+ interface GeneratorOpenAiTTSDef {
7545
+ /*
7546
+ Default property:
7547
+ {
7548
+ "apiEndpoint": "https://api.openai.com/v1",
7549
+ "model": "tts-1",
7550
+ "voice": "alloy",
7551
+ "speed": 1,
7552
+ "outputType": "play",
7553
+ "playbackVolume": 100,
7554
+ "cacheGenerated": true,
7555
+ "autoInferEnable": false,
7556
+ "softBreakRegex": "^[^\\r\\n\\t\\f\\v]*([\\r\\n]+|[。!?!?.]\\B)",
7557
+ "hardBreakTime": 500
7558
+ }
7559
+ */
7560
+ property?: {
7561
+ /* API endpoint URL */
7562
+ apiEndpoint?: string | DataLink
7563
+ /* OpenAI API Key */
7564
+ apiKey?: string | DataLink
7565
+ /* OpenAI TTS model */
7566
+ model?: string | DataLink
7567
+ /* Voice to use
7568
+ Select voice from https://openai.fm , default alloy */
7569
+ voice?: string | DataLink
7570
+ /* Additional instructions for the speech generation */
7571
+ instructions?: string | DataLink
7572
+ /* Speed of the generated audio */
7573
+ speed?: number | DataLink
7574
+ /* Output mode */
7575
+ outputType?: 'play' | 'file' | DataLink
7576
+ /* Playback volume (0 - 100) */
7577
+ playbackVolume?: number | DataLink
7578
+ /* Enable cache for generated audio */
7579
+ cacheGenerated?: boolean | DataLink
7580
+ /* Text to generate */
7581
+ prompt?: string | DataLink
7582
+ /* Auto inference when prompt changes */
7583
+ autoInferEnable?: boolean | DataLink
7584
+ /* Segmentation rule for auto inference */
7585
+ softBreakRegex?: string | DataLink
7586
+ /* Time to force inference when softBreakRegex is not satisfied */
7587
+ hardBreakTime?: number | DataLink
7588
+ }
7589
+ events?: {
7590
+ /* Event triggered when state change */
7591
+ onContextStateChange?: Array<EventAction>
7592
+ /* Event triggered when error occurs */
7593
+ onError?: Array<EventAction>
7594
+ }
7595
+ outlets?: {
7596
+ /* Context state */
7597
+ contextState?: () => Data
7598
+ /* Generated audio file */
7599
+ generatedAudio?: () => Data
7600
+ /* Generated audio file is playing */
7601
+ generatedAudioPlaying?: () => Data
7602
+ }
7603
+ }
7604
+
7605
+ /* Generate speech from text using OpenAI's Text-to-Speech API */
7606
+ export type GeneratorOpenAiTTS = Generator &
7607
+ GeneratorOpenAiTTSDef & {
7608
+ templateKey: 'GENERATOR_OPENAI_TTS'
7609
+ switches: Array<
7610
+ SwitchDef &
7611
+ GeneratorOpenAiTTSDef & {
7612
+ conds?: Array<{
7613
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
7614
+ cond:
7615
+ | SwitchCondInnerStateCurrentCanvas
7616
+ | SwitchCondData
7617
+ | {
7618
+ __typename: 'SwitchCondInnerStateOutlet'
7619
+ outlet: 'contextState' | 'generatedAudio' | 'generatedAudioPlaying'
7620
+ value: any
7621
+ }
7622
+ }>
7623
+ }
7624
+ >
7625
+ }
7626
+
6109
7627
  /* Add a message to the assistant */
6110
7628
  export type GeneratorAssistantActionAddMessage = ActionWithParams & {
6111
7629
  __actionName: 'GENERATOR_ASSISTANT_ADD_MESSAGE'
@@ -6120,6 +7638,11 @@ export type GeneratorAssistantActionAddMessage = ActionWithParams & {
6120
7638
  value?: string | DataLink | EventProperty
6121
7639
  mapping?: string
6122
7640
  }
7641
+ | {
7642
+ input: 'image'
7643
+ value?: string | DataLink | EventProperty
7644
+ mapping?: string
7645
+ }
6123
7646
  | {
6124
7647
  input: 'payload'
6125
7648
  value?: {} | DataLink | EventProperty
@@ -6163,6 +7686,55 @@ export type GeneratorAssistantActionAddMessage = ActionWithParams & {
6163
7686
  >
6164
7687
  }
6165
7688
 
7689
+ /* Initialize messages from MCP prompt */
7690
+ export type GeneratorAssistantActionInitMcpPrompt = ActionWithParams & {
7691
+ __actionName: 'GENERATOR_ASSISTANT_INIT_MCP_PROMPT'
7692
+ params?: Array<
7693
+ | {
7694
+ input: 'mcpClientName'
7695
+ value?: string | DataLink | EventProperty
7696
+ mapping?: string
7697
+ }
7698
+ | {
7699
+ input: 'mcpPromptName'
7700
+ value?: string | DataLink | EventProperty
7701
+ mapping?: string
7702
+ }
7703
+ | {
7704
+ input: 'mcpArguments'
7705
+ value?: {} | DataLink | EventProperty
7706
+ mapping?: string
7707
+ }
7708
+ | {
7709
+ input: 'firstMessageAsSystem'
7710
+ value?: boolean | DataLink | EventProperty
7711
+ mapping?: string
7712
+ }
7713
+ >
7714
+ }
7715
+
7716
+ /* Add messages from MCP prompt */
7717
+ export type GeneratorAssistantActionAddMcpPromptMessage = ActionWithParams & {
7718
+ __actionName: 'GENERATOR_ASSISTANT_ADD_MCP_PROMPT_MESSAGE'
7719
+ params?: Array<
7720
+ | {
7721
+ input: 'mcpClientName'
7722
+ value?: string | DataLink | EventProperty
7723
+ mapping?: string
7724
+ }
7725
+ | {
7726
+ input: 'mcpPromptName'
7727
+ value?: string | DataLink | EventProperty
7728
+ mapping?: string
7729
+ }
7730
+ | {
7731
+ input: 'mcpArguments'
7732
+ value?: {} | DataLink | EventProperty
7733
+ mapping?: string
7734
+ }
7735
+ >
7736
+ }
7737
+
6166
7738
  /* Update a message at a specific index */
6167
7739
  export type GeneratorAssistantActionUpdateMessageAtIndex = ActionWithParams & {
6168
7740
  __actionName: 'GENERATOR_ASSISTANT_UPDATE_MESSAGE_AT_INDEX'
@@ -6177,6 +7749,11 @@ export type GeneratorAssistantActionUpdateMessageAtIndex = ActionWithParams & {
6177
7749
  value?: string | DataLink | EventProperty
6178
7750
  mapping?: string
6179
7751
  }
7752
+ | {
7753
+ input: 'image'
7754
+ value?: string | DataLink | EventProperty
7755
+ mapping?: string
7756
+ }
6180
7757
  | {
6181
7758
  input: 'payload'
6182
7759
  value?: {} | DataLink | EventProperty
@@ -6204,6 +7781,11 @@ export type GeneratorAssistantActionAddAudioMessage = ActionWithParams & {
6204
7781
  value?: string | DataLink | EventProperty
6205
7782
  mapping?: string
6206
7783
  }
7784
+ | {
7785
+ input: 'image'
7786
+ value?: string | DataLink | EventProperty
7787
+ mapping?: string
7788
+ }
6207
7789
  | {
6208
7790
  input: 'useFileSearch'
6209
7791
  value?: boolean | DataLink | EventProperty
@@ -6303,6 +7885,11 @@ export type GeneratorAssistantActionUpdateAudioMessageAtIndex = ActionWithParams
6303
7885
  value?: string | DataLink | EventProperty
6304
7886
  mapping?: string
6305
7887
  }
7888
+ | {
7889
+ input: 'image'
7890
+ value?: string | DataLink | EventProperty
7891
+ mapping?: string
7892
+ }
6306
7893
  | {
6307
7894
  input: 'payload'
6308
7895
  value?: {} | DataLink | EventProperty
@@ -6332,8 +7919,25 @@ export type GeneratorAssistantActionReset = Action & {
6332
7919
  }
6333
7920
 
6334
7921
  /* Submit the assistant */
6335
- export type GeneratorAssistantActionSubmit = Action & {
7922
+ export type GeneratorAssistantActionSubmit = ActionWithParams & {
6336
7923
  __actionName: 'GENERATOR_ASSISTANT_SUBMIT'
7924
+ params?: Array<
7925
+ | {
7926
+ input: 'continueOnToolCallConfirm'
7927
+ value?: boolean | DataLink | EventProperty
7928
+ mapping?: string
7929
+ }
7930
+ | {
7931
+ input: 'continueOnToolCallStrategy'
7932
+ value?: 'never' | 'success' | 'always' | DataLink | EventProperty
7933
+ mapping?: string
7934
+ }
7935
+ | {
7936
+ input: 'continueOnToolCallLimit'
7937
+ value?: number | DataLink | EventProperty
7938
+ mapping?: string
7939
+ }
7940
+ >
6337
7941
  }
6338
7942
 
6339
7943
  /* Cancel the assistant responding */
@@ -6341,16 +7945,82 @@ export type GeneratorAssistantActionCancel = Action & {
6341
7945
  __actionName: 'GENERATOR_ASSISTANT_CANCEL'
6342
7946
  }
6343
7947
 
7948
+ /* Check the enabled MCP clients connection status and available tools */
7949
+ export type GeneratorAssistantActionCheckMcpServers = Action & {
7950
+ __actionName: 'GENERATOR_ASSISTANT_CHECK_MCP_SERVERS'
7951
+ }
7952
+
7953
+ /* Insert an MCP resource as a new assistant message */
7954
+ export type GeneratorAssistantActionInsertMcpResource = ActionWithParams & {
7955
+ __actionName: 'GENERATOR_ASSISTANT_INSERT_MCP_RESOURCE'
7956
+ params?: Array<
7957
+ | {
7958
+ input: 'mcpClientName'
7959
+ value?: string | DataLink | EventProperty
7960
+ mapping?: string
7961
+ }
7962
+ | {
7963
+ input: 'mcpResourceUri'
7964
+ value?: string | DataLink | EventProperty
7965
+ mapping?: string
7966
+ }
7967
+ | {
7968
+ input: 'mcpVariables'
7969
+ value?: {} | DataLink | EventProperty
7970
+ mapping?: string
7971
+ }
7972
+ | {
7973
+ input: 'role'
7974
+ value?: string | DataLink | EventProperty
7975
+ mapping?: string
7976
+ }
7977
+ >
7978
+ }
7979
+
7980
+ /* Summarize messages based on the conversation
7981
+
7982
+ Note: Summary uses the same LLM context size, so it is recommended only to use it when the system prompt (in Initial Messages) is long, otherwise it may still fail when the context is full (Ctx Shift is NO). */
7983
+ export type GeneratorAssistantActionSummaryMessages = ActionWithParams & {
7984
+ __actionName: 'GENERATOR_ASSISTANT_SUMMARY_MESSAGES'
7985
+ params?: Array<
7986
+ | {
7987
+ input: 'summaryMessages'
7988
+ value?: Array<any> | DataLink | EventProperty
7989
+ mapping?: string
7990
+ }
7991
+ | {
7992
+ input: 'summarySessionKey'
7993
+ value?: string | DataLink | EventProperty
7994
+ mapping?: string
7995
+ }
7996
+ >
7997
+ }
7998
+
6344
7999
  interface GeneratorAssistantDef {
6345
8000
  /*
6346
8001
  Default property:
6347
8002
  {
6348
8003
  "initialMessages": [
6349
- null
8004
+ {
8005
+ "role": "system",
8006
+ "content": "You are a helpful assistant."
8007
+ }
6350
8008
  ],
6351
8009
  "cacheMessages": false,
6352
8010
  "llmLivePolicy": "only-in-use",
6353
8011
  "llmSessionKey": "default-assistant",
8012
+ "llmAutoSummaryMessages": false,
8013
+ "llmSummaryMessages": [
8014
+ {
8015
+ "role": "system",
8016
+ "content": "You are a helpful assistant specialized in summarizing conversations. Create a concise summary of the conversation that captures the key points while maintaining important context. The summary should be clear, accurate, and briefer than the original conversation."
8017
+ },
8018
+ {
8019
+ "role": "user",
8020
+ "content": "Please summarize the following conversation into a concise system message that can replace the previous conversation context while maintaining all important information. Here is the conversation to summarize:\n\n"
8021
+ }
8022
+ ],
8023
+ "llmSummarySessionKey": "assistant-default-summary",
6354
8024
  "fileSearchEnabled": false,
6355
8025
  "fileSearchLivePolicy": "only-in-use",
6356
8026
  "sttEnabled": true,
@@ -6374,12 +8044,30 @@ Default property:
6374
8044
  | DataLink
6375
8045
  /* Whether to cache messages */
6376
8046
  cacheMessages?: boolean | DataLink
6377
- /* LLM Generator (Currently only support `LLM (GGML)` generator) */
8047
+ /* LLM Generator (Supports `LLM (GGML)` and `OpenAI LLM` generators) */
6378
8048
  llmGeneratorId?: string | DataLink
6379
- /* LLM Live Policy. If the policy is `only-in-use`, the LLM context will be released when the assistant is not in use. */
8049
+ /* LLM Live Policy. If the policy is `only-in-use`, the LLM context will be released when the assistant is not in use.
8050
+
8051
+ Note: LLM (Qualcomm AI Engine) recommend use `manual` and loaded constantly. */
6380
8052
  llmLivePolicy?: 'only-in-use' | 'manual' | DataLink
6381
8053
  /* LLM main session key */
6382
8054
  llmSessionKey?: string | DataLink
8055
+ /* Auto Summary Messages (Automatically summarize messages when the LLM context is full or content gets truncated, currently only supported with LLM (GGML) generators)
8056
+
8057
+ Note: Summary uses the same LLM context size, so it is recommended only to use it when the system prompt (in Initial Messages) is long, otherwise it may still fail when the context is full (Ctx Shift is NO). */
8058
+ llmAutoSummaryMessages?: boolean | DataLink
8059
+ /* Summary Messages (Messages used for summarization prompt, conversation will be appended to the last message) */
8060
+ llmSummaryMessages?:
8061
+ | Array<
8062
+ | DataLink
8063
+ | {
8064
+ role?: string | DataLink
8065
+ content?: string | DataLink
8066
+ }
8067
+ >
8068
+ | DataLink
8069
+ /* Summary Session Key (Custom session key for summarization) */
8070
+ llmSummarySessionKey?: string | DataLink
6383
8071
  /* File Search (Vector Store) Enabled */
6384
8072
  fileSearchEnabled?: boolean | DataLink
6385
8073
  /* File Search (Vector Store) Generator */
@@ -6392,18 +8080,29 @@ Default property:
6392
8080
  fileSearchThreshold?: number | DataLink
6393
8081
  /* File Search Ignore Threshold. (Default: false) */
6394
8082
  fileSearchIgnoreThreshold?: boolean | DataLink
6395
- /* STT Generator use for transcribing audio message (Currently only support `STT (GGML)` generator) */
8083
+ /* STT Generator use for transcribing audio message (Supports `STT (GGML)` generators) */
6396
8084
  sttGeneratorId?: string | DataLink
6397
8085
  /* STT Enabled */
6398
8086
  sttEnabled?: boolean | DataLink
6399
8087
  /* STT Live Policy. If the policy is `only-in-use`, the STT context will be released when the assistant is not in use. */
6400
8088
  sttLivePolicy?: 'only-in-use' | 'manual' | DataLink
6401
- /* TTS Generator use for generating LLM response audio message (Currently only support `TTS (ONNX)` generator) */
8089
+ /* TTS Generator use for generating LLM response audio message (Supports `TTS (ONNX)` and `OpenAI TTS` generators) */
6402
8090
  ttsGeneratorId?: string | DataLink
6403
8091
  /* TTS Enabled */
6404
8092
  ttsEnabled?: boolean | DataLink
6405
8093
  /* TTS Live Policy. If the policy is `only-in-use`, the TTS context will be released when the assistant is not in use. */
6406
8094
  ttsLivePolicy?: 'only-in-use' | 'manual' | DataLink
8095
+ /* MCP Generators (Add a unique name if generator name property are duplicate) */
8096
+ mcpGenerators?:
8097
+ | Array<
8098
+ | DataLink
8099
+ | {
8100
+ generatorId?: string | DataLink
8101
+ name?: string | DataLink
8102
+ enabled?: boolean | DataLink
8103
+ }
8104
+ >
8105
+ | DataLink
6407
8106
  }
6408
8107
  events?: {
6409
8108
  /* Error event */
@@ -6426,6 +8125,8 @@ Default property:
6426
8125
  files?: () => Data
6427
8126
  /* Messages of the assistant */
6428
8127
  messages?: () => Data
8128
+ /* MCP servers status and available tools */
8129
+ mcpServers?: () => Data
6429
8130
  }
6430
8131
  }
6431
8132
 
@@ -6450,6 +8151,7 @@ export type GeneratorAssistant = Generator &
6450
8151
  | 'isBusy'
6451
8152
  | 'files'
6452
8153
  | 'messages'
8154
+ | 'mcpServers'
6453
8155
  value: any
6454
8156
  }
6455
8157
  }>