@fugood/bricks-project 2.22.0-beta.2 → 2.22.0-beta.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -47,7 +47,7 @@ Default property:
47
47
  completed?: Array<EventAction>
48
48
  }
49
49
  outlets?: {
50
- /* Result of each countdown change */
50
+ /* Countdown step value */
51
51
  countdown?: () => Data
52
52
  }
53
53
  }
@@ -182,8 +182,8 @@ export type GeneratorFileActionReadContent = ActionWithParams & {
182
182
  }
183
183
 
184
184
  /* Delete */
185
- export type GeneratorFileActionGeneratorDeleteFile = Action & {
186
- __actionName: 'GENERATOR_DELETE_FILE'
185
+ export type GeneratorFileActionDelete = Action & {
186
+ __actionName: 'GENERATOR_FILE_DELETE'
187
187
  }
188
188
 
189
189
  /* Append (Currently only support text file) */
@@ -691,7 +691,7 @@ Default property:
691
691
  }
692
692
  */
693
693
  property?: {
694
- /* Start query on generator initialized */
694
+ /* Start generator initialization execution immediately */
695
695
  init?: boolean | DataLink
696
696
  /* Data Bank Space ID */
697
697
  spacename?: string | DataLink
@@ -730,7 +730,7 @@ Default property:
730
730
  }
731
731
  }
732
732
 
733
- /* Fetch data & subscribe data change from Data Bank */
733
+ /* Get Data or subscribe to Data changes from Data Bank */
734
734
  export type GeneratorDataBank = Generator &
735
735
  GeneratorDataBankDef & {
736
736
  templateKey: 'GENERATOR_DATA_BANK'
@@ -752,7 +752,7 @@ export type GeneratorDataBank = Generator &
752
752
  >
753
753
  }
754
754
 
755
- /* Run GraphQL query with defined properties */
755
+ /* Execute GraphQL request with defined properties */
756
756
  export type GeneratorGraphQLActionRunQuery = ActionWithParams & {
757
757
  __actionName: 'GENERATOR_GRAPHQL_RUN_QUERY'
758
758
  params?: Array<
@@ -797,7 +797,7 @@ Default property:
797
797
  }
798
798
  */
799
799
  property?: {
800
- /* Start GraphQL query on generator initialized */
800
+ /* Start GraphQL request immediately after generator initialization */
801
801
  init?: boolean | DataLink
802
802
  /* GraphQL request type */
803
803
  type?: 'query' | 'mutation' | 'subscription' | DataLink
@@ -805,9 +805,9 @@ Default property:
805
805
  headers?: {} | DataLink
806
806
  /* HTTP request URL endpoint */
807
807
  endpoint?: string | DataLink
808
- /* Subscrpition endpoint */
808
+ /* Subscription endpoint */
809
809
  endpointForSubscription?: string | DataLink
810
- /* Subscrpition connection params */
810
+ /* Subscription connection params */
811
811
  connectionParams?: {} | DataLink
812
812
  /* Query content */
813
813
  query?: string | DataLink
@@ -831,9 +831,9 @@ Default property:
831
831
  firebaseMessagingSenderId?: string | DataLink
832
832
  }
833
833
  events?: {
834
- /* Event of subscription on connection */
834
+ /* Event triggered when subscription connection is successful */
835
835
  subscriptionOnConnection?: Array<EventAction>
836
- /* Server connections error of GraphQL subscription */
836
+ /* Event triggered when subscription connection error occurs */
837
837
  subscriptionOnConnectionError?: Array<EventAction>
838
838
  }
839
839
  outlets?: {
@@ -973,22 +973,22 @@ export type GeneratorHTTP = Generator &
973
973
  >
974
974
  }
975
975
 
976
- /* Start play sound */
976
+ /* Start playing sound */
977
977
  export type GeneratorSoundPlayerActionPlay = Action & {
978
978
  __actionName: 'GENERATOR_SOUND_PLAYER_PLAY'
979
979
  }
980
980
 
981
- /* Pause play sound */
981
+ /* Pause playing sound */
982
982
  export type GeneratorSoundPlayerActionPause = Action & {
983
983
  __actionName: 'GENERATOR_SOUND_PLAYER_PAUSE'
984
984
  }
985
985
 
986
- /* Resume play sound from pause */
986
+ /* Resume playing sound from pause */
987
987
  export type GeneratorSoundPlayerActionResume = Action & {
988
988
  __actionName: 'GENERATOR_SOUND_PLAYER_RESUME'
989
989
  }
990
990
 
991
- /* Stop play sound */
991
+ /* Stop playing sound */
992
992
  export type GeneratorSoundPlayerActionRelease = Action & {
993
993
  __actionName: 'GENERATOR_SOUND_PLAYER_RELEASE'
994
994
  }
@@ -1002,32 +1002,32 @@ Default property:
1002
1002
  }
1003
1003
  */
1004
1004
  property?: {
1005
- /* The sound file path */
1005
+ /* Sound file path */
1006
1006
  filePath?: string | DataLink
1007
- /* The checksum of file */
1007
+ /* MD5 */
1008
1008
  md5?: string | DataLink
1009
- /* Repeat the sound */
1009
+ /* Repeat playback */
1010
1010
  loop?: boolean | DataLink
1011
- /* The volume of sound (0 - 100) */
1011
+ /* Sound volume (0 - 100) */
1012
1012
  volume?: number | DataLink
1013
1013
  }
1014
1014
  events?: {
1015
- /* Event on sound file loaded */
1015
+ /* Sound file loaded successfully */
1016
1016
  onLoad?: Array<EventAction>
1017
- /* Event on load error */
1017
+ /* Sound file load error */
1018
1018
  onLoadError?: Array<EventAction>
1019
- /* Event on sound play */
1019
+ /* Sound playback complete */
1020
1020
  onPlay?: Array<EventAction>
1021
- /* Event on sound end */
1021
+ /* Sound file playback end */
1022
1022
  onEnd?: Array<EventAction>
1023
1023
  }
1024
1024
  outlets?: {
1025
- /* Is sound playing */
1025
+ /* Whether the sound is playing */
1026
1026
  isPlaying?: () => Data
1027
1027
  }
1028
1028
  }
1029
1029
 
1030
- /* Play sound file from file system, support sound format refer this https://developer.android.com/guide/topics/media/media-formats */
1030
+ /* Play sound, see supported formats at https://developer.android.com/guide/topics/media/media-formats */
1031
1031
  export type GeneratorSoundPlayer = Generator &
1032
1032
  GeneratorSoundPlayerDef & {
1033
1033
  templateKey: 'GENERATOR_SOUND_PLAYER'
@@ -1062,43 +1062,42 @@ Default property:
1062
1062
  }
1063
1063
  */
1064
1064
  property?: {
1065
- /* Enable listening */
1065
+ /* Enable listening for input */
1066
1066
  enabled?: boolean | DataLink
1067
- /* Key map to convert key or key code to specify content (e.g. { 37: 'left' }) */
1067
+ /* Key map to transform key or key code to the designated content (e.g. { 37: 'left' }) */
1068
1068
  keyMap?: {} | DataLink
1069
- /* Key outlet prefer use key code or key.
1070
- Please note that the key code is not supported on iOS / tvOS, so it will use `key` if value is `auto`. */
1069
+ /* Key outlet preference use key code or key. */
1071
1070
  keyOutletPrefer?: 'auto' | 'key-code' | 'key' | DataLink
1072
- /* Stop key or code to finish batch */
1071
+ /* Key or code to finish continuous input */
1073
1072
  batchStopKeys?: Array<string | DataLink | number | DataLink | DataLink> | DataLink
1074
- /* Debounce time to finish batch (ms) */
1073
+ /* Debounce time (ms) to finish continuous input */
1075
1074
  batchDebounce?: number | DataLink
1076
- /* Max wait time to finish batch (ms) (Default: No limit) */
1075
+ /* Maximum wait time (ms) to finish continuous input (default: unlimited) */
1077
1076
  batchDebounceMaxWait?: number | DataLink
1078
1077
  }
1079
1078
  events?: {
1080
- /* Event of key down */
1079
+ /* Event on key press */
1081
1080
  onDown?: Array<EventAction>
1082
- /* Event of key up */
1081
+ /* Event on key up */
1083
1082
  onUp?: Array<EventAction>
1084
- /* Event of batch input finished */
1083
+ /* Event on continuous input complete */
1085
1084
  onBatch?: Array<EventAction>
1086
1085
  }
1087
1086
  outlets?: {
1088
- /* Last key down code */
1087
+ /* Last key code pressed */
1089
1088
  lastKeyDown?: () => Data
1090
- /* Last key down flags */
1089
+ /* Modifier key information on last key press */
1091
1090
  lastKeyDownFlags?: () => Data
1092
- /* Last key up code */
1091
+ /* Last key code released */
1093
1092
  lastKeyUp?: () => Data
1094
- /* Last key up flags */
1093
+ /* Modifier key information on last key release */
1095
1094
  lastKeyUpFlags?: () => Data
1096
- /* Last batch events */
1095
+ /* Last continuous event */
1097
1096
  lastBatchEvents?: () => Data
1098
1097
  }
1099
1098
  }
1100
1099
 
1101
- /* Listening keyboard (controller) event */
1100
+ /* Access keyboard (remote control) events */
1102
1101
  export type GeneratorKeyboard = Generator &
1103
1102
  GeneratorKeyboardDef & {
1104
1103
  templateKey: 'GENERATOR_KEYBOARD'
@@ -1395,22 +1394,22 @@ export type GeneratorStep = Generator &
1395
1394
  >
1396
1395
  }
1397
1396
 
1398
- /* Start the next iterate */
1397
+ /* Proceed to next iteration */
1399
1398
  export type GeneratorIteratorActionNext = Action & {
1400
1399
  __actionName: 'GENERATOR_ITERATOR_NEXT'
1401
1400
  }
1402
1401
 
1403
- /* Back to the previous iterate */
1402
+ /* Go back to previous iteration */
1404
1403
  export type GeneratorIteratorActionPrevious = Action & {
1405
1404
  __actionName: 'GENERATOR_ITERATOR_PREVIOUS'
1406
1405
  }
1407
1406
 
1408
- /* Skip to last iterate element (Ignore loop) */
1407
+ /* Jump to the last iteration element (ignoring the loop setting) */
1409
1408
  export type GeneratorIteratorActionLast = Action & {
1410
1409
  __actionName: 'GENERATOR_ITERATOR_LAST'
1411
1410
  }
1412
1411
 
1413
- /* Reset the iterator state */
1412
+ /* Reset iteration state */
1414
1413
  export type GeneratorIteratorActionReset = Action & {
1415
1414
  __actionName: 'GENERATOR_ITERATOR_RESET'
1416
1415
  }
@@ -1425,40 +1424,40 @@ Default property:
1425
1424
  }
1426
1425
  */
1427
1426
  property?: {
1428
- /* The data source of the value. If it is an array, the value element is used. If it is an object, Object.values() is used as the data. If it is a string, the source character of the value is taken. If it is a number, it represents a count of 1N. */
1427
+ /* Data source for iteration. If it's an Array, it will iterate through elements. If it's an Object, it will use Object.values() as data source. If it's a String, it will iterate through characters. If it's a Number, it represents count from 1 to N. */
1429
1428
  data?: any
1430
- /* Starting element index */
1429
+ /* Starting element position */
1431
1430
  start?: number | DataLink
1432
- /* Iterate step */
1431
+ /* Step size for each iteration */
1433
1432
  step?: number | DataLink
1434
- /* The maximum number of iterations (Set -1 for unlimited) */
1433
+ /* Maximum number of iterations (can be set to -1 for unlimited) */
1435
1434
  maxQuantity?: number | DataLink
1436
- /* Loop iterate */
1435
+ /* Whether to loop the iteration */
1437
1436
  loop?: boolean | DataLink
1438
1437
  }
1439
1438
  events?: {
1440
- /* Event on iterate */
1439
+ /* Event triggered on each iteration */
1441
1440
  iterate?: Array<EventAction>
1442
- /* Event on iterate round start */
1441
+ /* Event triggered on the first iteration of a round */
1443
1442
  first?: Array<EventAction>
1444
- /* Event on iterate round end */
1443
+ /* Event triggered on the last iteration of a round */
1445
1444
  end?: Array<EventAction>
1446
1445
  }
1447
1446
  outlets?: {
1448
- /* Elements that have been iterated (Included current iterated) */
1447
+ /* Elements that have been iterated (including current one) */
1449
1448
  iteratedArray?: () => Data
1450
- /* Elements not yet iterated */
1449
+ /* Elements that will be iterated but have not been iterated yet */
1451
1450
  upcomingArray?: () => Data
1452
- /* Current iterated element */
1451
+ /* Current iteration element */
1453
1452
  value?: () => Data
1454
- /* Current Key of iterated element (number: same as value; array, string: index; object: string) */
1453
+ /* Key of the current iteration element (for number: same as value, for array/string: index, for object: string key) */
1455
1454
  key?: () => Data
1456
- /* The current number of iterations (if data is 6 and step is 2, this value will be returned in order: 1, 2, 3) */
1455
+ /* Current iteration count (if data is 6 and step is 2, this will return: 1,2,3 in sequence) */
1457
1456
  index?: () => Data
1458
1457
  }
1459
1458
  }
1460
1459
 
1461
- /* Iterate values (Array, Object, Number, String) */
1460
+ /* Iterate through values (Array, Object, Number, String) */
1462
1461
  export type GeneratorIterator = Generator &
1463
1462
  GeneratorIteratorDef & {
1464
1463
  templateKey: 'GENERATOR_ITERATOR'
@@ -2065,14 +2064,11 @@ Default property:
2065
2064
  }
2066
2065
  */
2067
2066
  property?: {
2068
- /* Try attach on generator initialized
2069
- On web require user activation to attach device */
2067
+ /* Try attach on generator initialized On web require user activation to attach device */
2070
2068
  attachOnInit?: boolean | DataLink
2071
2069
  /* The serial device driver */
2072
2070
  driver?: 'fd' | 'usb' | DataLink
2073
- /* The serial device path
2074
- e.g. /dev/ttyS0 or /dev/bus/usb/001/001
2075
- For desktop and web is device index number */
2071
+ /* The serial device path e.g. /dev/ttyS0 or /dev/bus/usb/001/001 For desktop and web is device index number */
2076
2072
  path?: string | DataLink
2077
2073
  /* The serial USB vendor id (autoconnect first) */
2078
2074
  vendorId?: number | DataLink
@@ -4307,6 +4303,521 @@ export type GeneratorSqlite = Generator &
4307
4303
  >
4308
4304
  }
4309
4305
 
4306
+ /* Refresh tools and resources, used for case if tools or resources are changed. Note that the current connections will be closed. */
4307
+ export type GeneratorMCPServerActionRefreshResources = Action & {
4308
+ __actionName: 'GENERATOR_MCP_SERVER_REFRESH_RESOURCES'
4309
+ }
4310
+
4311
+ interface GeneratorMCPServerDef {
4312
+ /*
4313
+ Default property:
4314
+ {
4315
+ "enabled": true,
4316
+ "listening": true,
4317
+ "authType": "none",
4318
+ "name": "bricks-foundation-mcp-server-default",
4319
+ "version": "1.0.0",
4320
+ "resources": [],
4321
+ "tools": [],
4322
+ "prompts": []
4323
+ }
4324
+ */
4325
+ property?: {
4326
+ /* Enable MCP server. If enabled and Listening is false, the generator can still provide application-scoped resources. */
4327
+ enabled?: boolean | DataLink
4328
+ /* Application-scoped generator key, key cannot be the same with other application-scoped generators */
4329
+ globalGeneratorKey?: string | DataLink
4330
+ /* Start MCP server */
4331
+ listening?: boolean | DataLink
4332
+ /* HTTP server port */
4333
+ port?: number | DataLink
4334
+ /* Authorization type of HTTP request */
4335
+ authType?: 'none' | 'bearer' | DataLink
4336
+ /* Token of bearer auth */
4337
+ bearerToken?: string | DataLink
4338
+ /* Name of the MCP server */
4339
+ name?: string | DataLink
4340
+ /* Version of the MCP server */
4341
+ version?: string | DataLink
4342
+ /* Resources
4343
+ Type:
4344
+ `static`: Return static data
4345
+ `detect-data-change`: Watch data target change to return data,
4346
+ please update data with ({ id: string, content: string | object }),
4347
+ and ensure the id is same with request id
4348
+ `script`: Run a JavaScript code to return data
4349
+ - Script can define members to call generator functions
4350
+ - Script support async/await */
4351
+ resources?:
4352
+ | Array<
4353
+ | DataLink
4354
+ | {
4355
+ enabled?: boolean | DataLink
4356
+ name?: string | DataLink
4357
+ description?: string | DataLink
4358
+ uriOrTemplate?: string | DataLink
4359
+ type?: 'static' | 'detect-data-change' | 'script' | DataLink
4360
+ staticData?: any
4361
+ dataChangeConfig?:
4362
+ | DataLink
4363
+ | {
4364
+ target?: string | DataLink
4365
+ timeout?: number | DataLink
4366
+ additionalParams?: {} | DataLink
4367
+ }
4368
+ scriptConfig?:
4369
+ | DataLink
4370
+ | {
4371
+ code?: string | DataLink
4372
+ timeout?: number | DataLink
4373
+ members?:
4374
+ | Array<
4375
+ | DataLink
4376
+ | {
4377
+ handler?: string | DataLink
4378
+ varName?: string | DataLink
4379
+ }
4380
+ >
4381
+ | DataLink
4382
+ additionalParams?: {} | DataLink
4383
+ }
4384
+ }
4385
+ >
4386
+ | DataLink
4387
+ /* Tools
4388
+ Type:
4389
+ `detect-data-change`: Watch data target change to return data,
4390
+ please update data with ({ id: string, content: string | object }),
4391
+ and ensure the id is same with request id.
4392
+ `script`: Run a JavaScript code to return data
4393
+ - Script can define members to call generator functions
4394
+ - Script support async/await */
4395
+ tools?:
4396
+ | Array<
4397
+ | DataLink
4398
+ | {
4399
+ enabled?: boolean | DataLink
4400
+ name?: string | DataLink
4401
+ description?: string | DataLink
4402
+ params?: {} | DataLink
4403
+ type?: 'detect-data-change' | 'script' | DataLink
4404
+ dataChangeConfig?:
4405
+ | DataLink
4406
+ | {
4407
+ target?: string | DataLink
4408
+ timeout?: number | DataLink
4409
+ additionalParams?: {} | DataLink
4410
+ }
4411
+ scriptConfig?:
4412
+ | DataLink
4413
+ | {
4414
+ code?: string | DataLink
4415
+ timeout?: number | DataLink
4416
+ members?:
4417
+ | Array<
4418
+ | DataLink
4419
+ | {
4420
+ handler?: string | DataLink
4421
+ varName?: string | DataLink
4422
+ }
4423
+ >
4424
+ | DataLink
4425
+ additionalParams?: {} | DataLink
4426
+ }
4427
+ }
4428
+ >
4429
+ | DataLink
4430
+ /* Prompts
4431
+ Type:
4432
+ `static`: Return static data
4433
+ `detect-data-change`: Watch data target change to return data,
4434
+ please update data with ({ id: string, content: string | object }),
4435
+ and ensure the id is same with request id
4436
+ `script`: Run a JavaScript code to return data
4437
+ - Script can define members to call generator functions
4438
+ - Script support async/await */
4439
+ prompts?:
4440
+ | Array<
4441
+ | DataLink
4442
+ | {
4443
+ enabled?: boolean | DataLink
4444
+ name?: string | DataLink
4445
+ description?: string | DataLink
4446
+ arguments?: {} | DataLink
4447
+ type?: 'static' | 'detect-data-change' | 'script' | DataLink
4448
+ staticData?: any
4449
+ dataChangeConfig?:
4450
+ | DataLink
4451
+ | {
4452
+ target?: string | DataLink
4453
+ timeout?: number | DataLink
4454
+ additionalParams?: {} | DataLink
4455
+ }
4456
+ scriptConfig?:
4457
+ | DataLink
4458
+ | {
4459
+ code?: string | DataLink
4460
+ timeout?: number | DataLink
4461
+ members?:
4462
+ | Array<
4463
+ | DataLink
4464
+ | {
4465
+ handler?: string | DataLink
4466
+ varName?: string | DataLink
4467
+ }
4468
+ >
4469
+ | DataLink
4470
+ additionalParams?: {} | DataLink
4471
+ }
4472
+ }
4473
+ >
4474
+ | DataLink
4475
+ }
4476
+ events?: {
4477
+ /* Listening of HTTP server */
4478
+ onListening?: Array<EventAction>
4479
+ /* Error of HTTP server */
4480
+ onError?: Array<EventAction>
4481
+ /* Client error of HTTP server */
4482
+ onClientError?: Array<EventAction>
4483
+ /* Client close of HTTP server */
4484
+ onClientClose?: Array<EventAction>
4485
+ /* On request resource (Request: { name: string, uri: string, params: object }) */
4486
+ onRequestResource?: Array<EventAction>
4487
+ /* On call tool (Request: { name: string, params: object }) */
4488
+ onCallTool?: Array<EventAction>
4489
+ /* On get prompt (Request: { name: string, arguments: object }) */
4490
+ onGetPrompt?: Array<EventAction>
4491
+ }
4492
+ outlets?: {
4493
+ /* Whether the HTTP server is listening */
4494
+ isListening?: () => Data
4495
+ /* Last error of HTTP server */
4496
+ lastError?: () => Data
4497
+ /* MCP server endpoint URL */
4498
+ endpoint?: () => Data
4499
+ /* Connected remotes (Session ID) */
4500
+ connectedRemotes?: () => Data
4501
+ /* Last resource request ({ name: string, uri: string, params: object }) */
4502
+ lastResourceRequest?: () => Data
4503
+ /* Last tool call ({ name: string, params: object }) */
4504
+ lastToolCall?: () => Data
4505
+ /* Last prompt get ({ name: string, arguments: object }) */
4506
+ lastPromptGet?: () => Data
4507
+ }
4508
+ }
4509
+
4510
+ /* Model Context Protocol (MCP) Server (https://docs.anthropic.com/en/docs/agents-and-tools/mcp) */
4511
+ export type GeneratorMCPServer = Generator &
4512
+ GeneratorMCPServerDef & {
4513
+ templateKey: 'GENERATOR_MCP_SERVER'
4514
+ switches: Array<
4515
+ SwitchDef &
4516
+ GeneratorMCPServerDef & {
4517
+ conds?: Array<{
4518
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
4519
+ cond:
4520
+ | SwitchCondInnerStateCurrentCanvas
4521
+ | SwitchCondData
4522
+ | {
4523
+ __typename: 'SwitchCondInnerStateOutlet'
4524
+ outlet:
4525
+ | 'isListening'
4526
+ | 'lastError'
4527
+ | 'endpoint'
4528
+ | 'connectedRemotes'
4529
+ | 'lastResourceRequest'
4530
+ | 'lastToolCall'
4531
+ | 'lastPromptGet'
4532
+ value: any
4533
+ }
4534
+ }>
4535
+ }
4536
+ >
4537
+ }
4538
+
4539
+ /* Connect to MCP server */
4540
+ export type GeneratorMCPActionConnect = Action & {
4541
+ __actionName: 'GENERATOR_MCP_CONNECT'
4542
+ }
4543
+
4544
+ /* Disconnect from MCP server */
4545
+ export type GeneratorMCPActionDisconnect = Action & {
4546
+ __actionName: 'GENERATOR_MCP_DISCONNECT'
4547
+ }
4548
+
4549
+ /* List resources */
4550
+ export type GeneratorMCPActionListResources = ActionWithParams & {
4551
+ __actionName: 'GENERATOR_MCP_LIST_RESOURCES'
4552
+ params?: Array<{
4553
+ input: 'requestId'
4554
+ value?: string | DataLink | EventProperty
4555
+ mapping?: string
4556
+ }>
4557
+ }
4558
+
4559
+ /* List resource templates */
4560
+ export type GeneratorMCPActionListResourceTemplates = ActionWithParams & {
4561
+ __actionName: 'GENERATOR_MCP_LIST_RESOURCE_TEMPLATES'
4562
+ params?: Array<{
4563
+ input: 'requestId'
4564
+ value?: string | DataLink | EventProperty
4565
+ mapping?: string
4566
+ }>
4567
+ }
4568
+
4569
+ /* Read resource */
4570
+ export type GeneratorMCPActionReadResource = ActionWithParams & {
4571
+ __actionName: 'GENERATOR_MCP_READ_RESOURCE'
4572
+ params?: Array<
4573
+ | {
4574
+ input: 'requestId'
4575
+ value?: string | DataLink | EventProperty
4576
+ mapping?: string
4577
+ }
4578
+ | {
4579
+ input: 'uri'
4580
+ value?: string | DataLink | EventProperty
4581
+ mapping?: string
4582
+ }
4583
+ | {
4584
+ input: 'variables'
4585
+ value?: {} | DataLink | EventProperty
4586
+ mapping?: string
4587
+ }
4588
+ >
4589
+ }
4590
+
4591
+ /* List tools */
4592
+ export type GeneratorMCPActionListTools = ActionWithParams & {
4593
+ __actionName: 'GENERATOR_MCP_LIST_TOOLS'
4594
+ params?: Array<{
4595
+ input: 'requestId'
4596
+ value?: string | DataLink | EventProperty
4597
+ mapping?: string
4598
+ }>
4599
+ }
4600
+
4601
+ /* Call tool */
4602
+ export type GeneratorMCPActionCallTool = ActionWithParams & {
4603
+ __actionName: 'GENERATOR_MCP_CALL_TOOL'
4604
+ params?: Array<
4605
+ | {
4606
+ input: 'requestId'
4607
+ value?: string | DataLink | EventProperty
4608
+ mapping?: string
4609
+ }
4610
+ | {
4611
+ input: 'name'
4612
+ value?: string | DataLink | EventProperty
4613
+ mapping?: string
4614
+ }
4615
+ | {
4616
+ input: 'variables'
4617
+ value?: {} | DataLink | EventProperty
4618
+ mapping?: string
4619
+ }
4620
+ >
4621
+ }
4622
+
4623
+ /* List prompts */
4624
+ export type GeneratorMCPActionListPrompts = ActionWithParams & {
4625
+ __actionName: 'GENERATOR_MCP_LIST_PROMPTS'
4626
+ params?: Array<{
4627
+ input: 'requestId'
4628
+ value?: string | DataLink | EventProperty
4629
+ mapping?: string
4630
+ }>
4631
+ }
4632
+
4633
+ /* Request prompt */
4634
+ export type GeneratorMCPActionGetPrompt = ActionWithParams & {
4635
+ __actionName: 'GENERATOR_MCP_GET_PROMPT'
4636
+ params?: Array<
4637
+ | {
4638
+ input: 'requestId'
4639
+ value?: string | DataLink | EventProperty
4640
+ mapping?: string
4641
+ }
4642
+ | {
4643
+ input: 'name'
4644
+ value?: string | DataLink | EventProperty
4645
+ mapping?: string
4646
+ }
4647
+ | {
4648
+ input: 'variables'
4649
+ value?: {} | DataLink | EventProperty
4650
+ mapping?: string
4651
+ }
4652
+ >
4653
+ }
4654
+
4655
+ interface GeneratorMCPDef {
4656
+ /*
4657
+ Default property:
4658
+ {
4659
+ "init": false,
4660
+ "type": "streamable-http",
4661
+ "url": "",
4662
+ "autoReconnect": true,
4663
+ "maxReconnectAttempts": 10,
4664
+ "reconnectInterval": 1000,
4665
+ "generatorId": "",
4666
+ "generatorKey": "",
4667
+ "name": "bricks-foundation-mcp-client-default",
4668
+ "version": "1.0.0",
4669
+ "ignoreResourceInList": [],
4670
+ "ignoreToolInList": [],
4671
+ "ignorePromptInList": [],
4672
+ "requestTimeout": 60000
4673
+ }
4674
+ */
4675
+ property?: {
4676
+ /* Initialize the MCP client on start */
4677
+ init?: boolean | DataLink
4678
+ /* Type of the MCP connection, e.g. sse or direct-link (generator) */
4679
+ type?: 'streamable-http' | 'sse' | 'direct-link' | DataLink
4680
+ /* URL of the MCP server, e.g. http://localhost:19853/sse */
4681
+ url?: string | DataLink
4682
+ /* Whether to automatically reconnect to the MCP server */
4683
+ autoReconnect?: boolean | DataLink
4684
+ /* Maximum number of reconnection attempts */
4685
+ maxReconnectAttempts?: number | DataLink
4686
+ /* Reconnection interval in milliseconds */
4687
+ reconnectInterval?: number | DataLink
4688
+ /* SSE connection headers */
4689
+ sseHeaders?: {} | DataLink
4690
+ /* Send request headers */
4691
+ sendHeaders?: {} | DataLink
4692
+ /* Bearer token for authentication */
4693
+ bearerToken?: string | DataLink
4694
+ /* Generator MCP Server ID for direct link */
4695
+ generatorId?: string | DataLink
4696
+ /* Application-scoped key of Generator MCP Server for direct link (If ID is not provided) */
4697
+ generatorKey?: string | DataLink
4698
+ /* Name of the MCP client */
4699
+ name?: string | DataLink
4700
+ /* Version of the MCP client */
4701
+ version?: string | DataLink
4702
+ /* Ignore resources in list response */
4703
+ ignoreResourceInList?: Array<string | DataLink> | DataLink
4704
+ /* Ignore tools in list response */
4705
+ ignoreToolInList?: Array<string | DataLink> | DataLink
4706
+ /* Ignore prompts in list response */
4707
+ ignorePromptInList?: Array<string | DataLink> | DataLink
4708
+ /* Request timeout in milliseconds */
4709
+ requestTimeout?: number | DataLink
4710
+ }
4711
+ events?: {
4712
+ /* On connected */
4713
+ onConnected?: Array<EventAction>
4714
+ /* On connection error */
4715
+ onConnectionError?: Array<EventAction>
4716
+ /* On disconnected */
4717
+ onDisconnected?: Array<EventAction>
4718
+ /* On list resources */
4719
+ onListResources?: Array<EventAction>
4720
+ /* On list resources error */
4721
+ onListResourcesError?: Array<EventAction>
4722
+ /* On list resource templates */
4723
+ onListResourceTemplates?: Array<EventAction>
4724
+ /* On list resource templates error */
4725
+ onListResourceTemplatesError?: Array<EventAction>
4726
+ /* On read resource */
4727
+ onReadResource?: Array<EventAction>
4728
+ /* On read resource error */
4729
+ onReadResourceError?: Array<EventAction>
4730
+ /* On list tools */
4731
+ onListTools?: Array<EventAction>
4732
+ /* On list tools error */
4733
+ onListToolsError?: Array<EventAction>
4734
+ /* On call tool */
4735
+ onCallTool?: Array<EventAction>
4736
+ /* On call tool error */
4737
+ onCallToolError?: Array<EventAction>
4738
+ /* On list prompts */
4739
+ onListPrompts?: Array<EventAction>
4740
+ /* On list prompts error */
4741
+ onListPromptsError?: Array<EventAction>
4742
+ /* On get prompt */
4743
+ onGetPrompt?: Array<EventAction>
4744
+ /* On get prompt error */
4745
+ onGetPromptError?: Array<EventAction>
4746
+ }
4747
+ outlets?: {
4748
+ /* Connection state */
4749
+ connectionState?: () => Data
4750
+ /* List resources response */
4751
+ listResourcesResponse?: () => Data
4752
+ /* List resources error */
4753
+ listResourcesError?: () => Data
4754
+ /* List resource templates response */
4755
+ listResourceTemplatesResponse?: () => Data
4756
+ /* List resource templates error */
4757
+ listResourceTemplatesError?: () => Data
4758
+ /* Read resource response */
4759
+ readResourceResponse?: () => Data
4760
+ /* Read resource error */
4761
+ readResourceError?: () => Data
4762
+ /* List tools response */
4763
+ listToolsResponse?: () => Data
4764
+ /* List tools error */
4765
+ listToolsError?: () => Data
4766
+ /* Call tool response */
4767
+ callToolResponse?: () => Data
4768
+ /* Call tool error */
4769
+ callToolError?: () => Data
4770
+ /* List prompts response */
4771
+ listPromptsResponse?: () => Data
4772
+ /* List prompts error */
4773
+ listPromptsError?: () => Data
4774
+ /* Get prompt response */
4775
+ getPromptResponse?: () => Data
4776
+ /* Request prompt error */
4777
+ getPromptError?: () => Data
4778
+ /* Last error */
4779
+ lastError?: () => Data
4780
+ }
4781
+ }
4782
+
4783
+ /* Model Context Protocol (MCP) Client, support SSE and Generator MCPServer direct link */
4784
+ export type GeneratorMCP = Generator &
4785
+ GeneratorMCPDef & {
4786
+ templateKey: 'GENERATOR_MCP'
4787
+ switches: Array<
4788
+ SwitchDef &
4789
+ GeneratorMCPDef & {
4790
+ conds?: Array<{
4791
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
4792
+ cond:
4793
+ | SwitchCondInnerStateCurrentCanvas
4794
+ | SwitchCondData
4795
+ | {
4796
+ __typename: 'SwitchCondInnerStateOutlet'
4797
+ outlet:
4798
+ | 'connectionState'
4799
+ | 'listResourcesResponse'
4800
+ | 'listResourcesError'
4801
+ | 'listResourceTemplatesResponse'
4802
+ | 'listResourceTemplatesError'
4803
+ | 'readResourceResponse'
4804
+ | 'readResourceError'
4805
+ | 'listToolsResponse'
4806
+ | 'listToolsError'
4807
+ | 'callToolResponse'
4808
+ | 'callToolError'
4809
+ | 'listPromptsResponse'
4810
+ | 'listPromptsError'
4811
+ | 'getPromptResponse'
4812
+ | 'getPromptError'
4813
+ | 'lastError'
4814
+ value: any
4815
+ }
4816
+ }>
4817
+ }
4818
+ >
4819
+ }
4820
+
4310
4821
  /* Load the model */
4311
4822
  export type GeneratorTTSActionLoadModel = Action & {
4312
4823
  __actionName: 'GENERATOR_TTS_LOAD_MODEL'
@@ -4339,8 +4850,13 @@ Default property:
4339
4850
  "model": "BricksDisplay/vits-eng",
4340
4851
  "modelType": "auto",
4341
4852
  "vocoderModel": "speecht5_hifigan",
4853
+ "maxLength": 4096,
4854
+ "temperature": 0.1,
4855
+ "repetitionPenalty": 1.1,
4856
+ "doSample": true,
4342
4857
  "outputType": "play",
4343
4858
  "cacheGenerated": true,
4859
+ "speed": 1,
4344
4860
  "autoInferEnable": false,
4345
4861
  "softBreakRegex": "^[^\\r\\n\\t\\f\\v]*([\\r\\n]+|[。!?!?.]\\B)",
4346
4862
  "hardBreakTime": 500,
@@ -4352,29 +4868,9 @@ Default property:
4352
4868
  init?: boolean | DataLink
4353
4869
  /* TTS model
4354
4870
  The mms-tts models are licensed under CC-BY-NC-4.0 */
4355
- model?:
4356
- | 'Custom'
4357
- | 'BricksDisplay/vits-eng'
4358
- | 'BricksDisplay/vits-cmn'
4359
- | 'BricksDisplay/ellie-Bert-VITS2'
4360
- | 'mms-tts-ara (NC)'
4361
- | 'mms-tts-deu (NC)'
4362
- | 'mms-tts-eng (NC)'
4363
- | 'mms-tts-fra (NC)'
4364
- | 'mms-tts-hin (NC)'
4365
- | 'mms-tts-kor (NC)'
4366
- | 'mms-tts-por (NC)'
4367
- | 'mms-tts-ron (NC)'
4368
- | 'mms-tts-rus (NC)'
4369
- | 'mms-tts-spa (NC)'
4370
- | 'mms-tts-vie (NC)'
4371
- | 'mms-tts-yor (NC)'
4372
- | 'speecht5_tts'
4373
- | DataLink
4871
+ model?: string | DataLink
4374
4872
  /* Model type */
4375
- modelType?: 'auto' | 'vits' | 'bert_vits2' | 'speecht5' | DataLink
4376
- /* Load quantized model (deprecated, use `quantizeType` instead) */
4377
- quantized?: boolean | DataLink
4873
+ modelType?: string | DataLink
4378
4874
  /* Quantize type */
4379
4875
  quantizeType?:
4380
4876
  | 'auto'
@@ -4387,22 +4883,33 @@ Default property:
4387
4883
  | 'bnb4'
4388
4884
  | 'q4f16'
4389
4885
  | DataLink
4390
- /* Custom model name
4391
- Choose model from https://huggingface.co/models?pipeline_tag=text-to-audio&library=transformers.js */
4392
- customModel?: string | DataLink
4393
4886
  /* Vocoder model for SpeechT5 */
4394
4887
  vocoderModel?: 'Custom' | 'speecht5_hifigan' | DataLink
4395
4888
  /* Custom vocoder model
4396
4889
  Choose model from https://huggingface.co/models?library=transformers.js&other=hifigan */
4397
4890
  customVocoderModel?: string | DataLink
4398
- /* XVector speaker embedding for HiFi-GAN */
4891
+ /* Speaker embedding, for SpeechT5 or StyleTTS (Kokoro) */
4399
4892
  speakerEmbedUrl?: string | DataLink
4400
- /* MD5 checksum of `speakerEmbedUrl` */
4401
- speakerEmbedMd5?: string | DataLink
4893
+ /* Hash of `speakerEmbedUrl` */
4894
+ speakerEmbedHash?: string | DataLink
4895
+ /* Hash type of `speakerEmbedUrl` */
4896
+ speakerEmbedHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
4897
+ /* Speaker config, for OuteTTS model */
4898
+ speakerConfig?: {} | DataLink
4899
+ /* Audio token generation max length */
4900
+ maxLength?: number | DataLink
4901
+ /* Audio token generation temperature */
4902
+ temperature?: number | DataLink
4903
+ /* Audio token generation repetition penalty */
4904
+ repetitionPenalty?: number | DataLink
4905
+ /* Use greedy sampling for audio token generation */
4906
+ doSample?: boolean | DataLink
4402
4907
  /* Output mode */
4403
4908
  outputType?: 'play' | 'file' | DataLink
4404
4909
  /* Enable cache for generated audio */
4405
4910
  cacheGenerated?: boolean | DataLink
4911
+ /* Speed of the generated audio, for StyleTTS (Kokoro) */
4912
+ speed?: number | DataLink
4406
4913
  /* Text to generate */
4407
4914
  prompt?: string | DataLink
4408
4915
  /* Auto inference when prompt changes */
@@ -4484,6 +4991,21 @@ export type GeneratorOnnxLLMActionInfer = ActionWithParams & {
4484
4991
  value?: Array<any> | DataLink | EventProperty
4485
4992
  mapping?: string
4486
4993
  }
4994
+ | {
4995
+ input: 'images'
4996
+ value?: Array<any> | DataLink | EventProperty
4997
+ mapping?: string
4998
+ }
4999
+ | {
5000
+ input: 'tools'
5001
+ value?: Array<any> | DataLink | EventProperty
5002
+ mapping?: string
5003
+ }
5004
+ | {
5005
+ input: 'toolChoice'
5006
+ value?: string | DataLink | EventProperty
5007
+ mapping?: string
5008
+ }
4487
5009
  >
4488
5010
  }
4489
5011
 
@@ -4501,8 +5023,9 @@ interface GeneratorOnnxLLMDef {
4501
5023
  /*
4502
5024
  Default property:
4503
5025
  {
4504
- "model": "BricksDisplay/phi-1_5-q4",
4505
5026
  "modelType": "auto",
5027
+ "toolCallParser": "llama3_json",
5028
+ "toolChoice": "auto",
4506
5029
  "maxNewTokens": 256,
4507
5030
  "temperature": 0.7,
4508
5031
  "topK": 50,
@@ -4518,70 +5041,9 @@ Default property:
4518
5041
  /* Initialize the TTS context on generator initialization */
4519
5042
  init?: boolean | DataLink
4520
5043
  /* LLM model */
4521
- model?:
4522
- | 'Custom'
4523
- | 'BricksDisplay/phi-1_5'
4524
- | 'BricksDisplay/phi-1_5-q4'
4525
- | 'Qwen1.5-0.5B'
4526
- | 'Qwen1.5-1.8B'
4527
- | 'Qwen1.5-0.5B-Chat'
4528
- | 'Qwen1.5-1.8B-Chat'
4529
- | 'stablelm-2-1_6b'
4530
- | 'BricksDisplay/stablelm-2-1_6b-q4'
4531
- | 'stablelm-2-zephyr-1_6b'
4532
- | 'BricksDisplay/stablelm-2-zephyr-1_6b-q4'
4533
- | 'BricksDisplay/Llama-2-7b-chat-q4'
4534
- | 'TinyLLama-v0'
4535
- | 'TinyLlama-1.1B-Chat-v1.0'
4536
- | 'BricksDisplay/TinyLlama-1.1B-Chat-v1.0-q4'
4537
- | 'llama-160m'
4538
- | 'llama-68m'
4539
- | 'BricksDisplay/Yi-6B-q4'
4540
- | 'BricksDisplay/Yi-6B-Chat-q4'
4541
- | 'BricksDisplay/Mistral-7B-v0.1-q4'
4542
- | 'BricksDisplay/Mistral-7B-Instruct-v0.2-q4'
4543
- | 'BricksDisplay/Breeze-7B-Base-v1_0-q4'
4544
- | 'BricksDisplay/Breeze-7B-Instruct-v1_0-q4'
4545
- | 'gpt2'
4546
- | 'distilgpt2'
4547
- | 'gpt-neo-125M'
4548
- | 'opt-125m'
4549
- | 'opt-350m'
4550
- | 'bloom-560m'
4551
- | 'bloomz-560m'
4552
- | 't5-small'
4553
- | 't5-base'
4554
- | 'flan-t5-small'
4555
- | 'flan-t5-base'
4556
- | 'mt5-small'
4557
- | 'mt5-base'
4558
- | 'long-t5-lobal-base'
4559
- | 'long-t5-tglobal-base'
4560
- | DataLink
5044
+ model?: string | DataLink
4561
5045
  /* Model type */
4562
- modelType?:
4563
- | 'auto'
4564
- | 'gpt2'
4565
- | 'gptj'
4566
- | 'gpt_bigcode'
4567
- | 'gpt_neo'
4568
- | 'gpt_neox'
4569
- | 'bloom'
4570
- | 'mpt'
4571
- | 'opt'
4572
- | 'llama'
4573
- | 'falcon'
4574
- | 'mistral'
4575
- | 't5'
4576
- | 'mt5'
4577
- | 'longt5'
4578
- | 'phi'
4579
- | 'qwen2'
4580
- | 'stablelm'
4581
- | 'gemma'
4582
- | DataLink
4583
- /* Load quantized model (deprecated, use `quantizeType` instead) */
4584
- quantized?: boolean | DataLink
5046
+ modelType?: string | DataLink
4585
5047
  /* Quantize type */
4586
5048
  quantizeType?:
4587
5049
  | 'auto'
@@ -4594,10 +5056,20 @@ Default property:
4594
5056
  | 'bnb4'
4595
5057
  | 'q4f16'
4596
5058
  | DataLink
4597
- /* Custom model name
4598
- Choose model from https://huggingface.co/models?pipeline_tag=text2text-generation&library=transformers.js
4599
- or https://huggingface.co/models?pipeline_tag=text-generation&library=transformers.js&sort=trending */
4600
- customModel?: string | DataLink
5059
+ /* Prompt to inference */
5060
+ prompt?: string | DataLink
5061
+ /* Messages to inference */
5062
+ messages?: Array<DataLink | {}> | DataLink
5063
+ /* Images with message to inference */
5064
+ images?: Array<string | DataLink> | DataLink
5065
+ /* Tool call parser */
5066
+ toolCallParser?: 'llama3_json' | 'mistral' | 'hermes' | 'internlm' | 'phi4' | DataLink
5067
+ /* Tools for chat mode using OpenAI-compatible function calling format
5068
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
5069
+ See: https://platform.openai.com/docs/guides/function-calling */
5070
+ tools?: Array<{} | DataLink> | DataLink
5071
+ /* Tool choice for chat mode */
5072
+ toolChoice?: 'none' | 'auto' | DataLink
4601
5073
  /* Max new tokens to generate */
4602
5074
  maxNewTokens?: number | DataLink
4603
5075
  /* Temperature */
@@ -4632,6 +5104,10 @@ Default property:
4632
5104
  events?: {
4633
5105
  /* Event triggered when state change */
4634
5106
  onContextStateChange?: Array<EventAction>
5107
+ /* Event triggered on get function call request */
5108
+ onFunctionCall?: Array<EventAction>
5109
+ /* Event triggered on completion finished */
5110
+ onCompletionFinished?: Array<EventAction>
4635
5111
  /* Event triggered when error occurs */
4636
5112
  onError?: Array<EventAction>
4637
5113
  }
@@ -4642,6 +5118,8 @@ Default property:
4642
5118
  generated?: () => Data
4643
5119
  /* Full result of generation */
4644
5120
  fullResult?: () => Data
5121
+ /* Last function call */
5122
+ lastFunctionCall?: () => Data
4645
5123
  }
4646
5124
  }
4647
5125
 
@@ -4660,7 +5138,7 @@ export type GeneratorOnnxLLM = Generator &
4660
5138
  | SwitchCondData
4661
5139
  | {
4662
5140
  __typename: 'SwitchCondInnerStateOutlet'
4663
- outlet: 'contextState' | 'generated' | 'fullResult'
5141
+ outlet: 'contextState' | 'generated' | 'fullResult' | 'lastFunctionCall'
4664
5142
  value: any
4665
5143
  }
4666
5144
  }>
@@ -4707,27 +5185,9 @@ Default property:
4707
5185
  /* Initialize the TTS context on generator initialization */
4708
5186
  init?: boolean | DataLink
4709
5187
  /* STT model */
4710
- model?:
4711
- | 'Custom'
4712
- | 'whisper-tiny'
4713
- | 'whisper-tiny.en'
4714
- | 'whisper-small'
4715
- | 'whisper-small.en'
4716
- | 'whisper-base'
4717
- | 'whisper-base.en'
4718
- | 'whisper-medium'
4719
- | 'whisper-medium.en'
4720
- | 'whisper-large'
4721
- | 'whisper-large-v2'
4722
- | 'whisper-large-v3'
4723
- | 'mms-1b-all'
4724
- | 'mms-1b-fl102'
4725
- | 'mms-1b-l1107'
4726
- | DataLink
5188
+ model?: string | DataLink
4727
5189
  /* Model type */
4728
- modelType?: 'auto' | 'whisper' | 'hubert' | 'wav2vec2' | 'wav2vec2-bert' | DataLink
4729
- /* Load quantized model (deprecated, use `quantizeType` instead) */
4730
- quantized?: boolean | DataLink
5190
+ modelType?: string | DataLink
4731
5191
  /* Quantize type */
4732
5192
  quantizeType?:
4733
5193
  | 'auto'
@@ -4740,9 +5200,6 @@ Default property:
4740
5200
  | 'bnb4'
4741
5201
  | 'q4f16'
4742
5202
  | DataLink
4743
- /* Custom model name
4744
- Choose model from https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&library=transformers.js */
4745
- customModel?: string | DataLink
4746
5203
  /* Return timestamps */
4747
5204
  returnTimestamps?: 'none' | 'enable' | 'word' | DataLink
4748
5205
  /* Transcription language
@@ -4975,7 +5432,7 @@ export type GeneratorSpeechInferenceActionTranscribeData = ActionWithParams & {
4975
5432
  >
4976
5433
  }
4977
5434
 
4978
- /* Transcribe microphone audio source */
5435
+ /* [Deprecated] Transcribe microphone audio source */
4979
5436
  export type GeneratorSpeechInferenceActionTranscribeRealtime = ActionWithParams & {
4980
5437
  __actionName: 'GENERATOR_SPEECH_INFERENCE_TRANSCRIBE_REALTIME'
4981
5438
  params?: Array<
@@ -5042,7 +5499,7 @@ export type GeneratorSpeechInferenceActionTranscribeRealtime = ActionWithParams
5042
5499
  >
5043
5500
  }
5044
5501
 
5045
- /* Stop transcribing microphone audio source */
5502
+ /* [Deprecated] Stop transcribing microphone audio source */
5046
5503
  export type GeneratorSpeechInferenceActionTranscribeRealtimeStop = Action & {
5047
5504
  __actionName: 'GENERATOR_SPEECH_INFERENCE_TRANSCRIBE_REALTIME_STOP'
5048
5505
  }
@@ -5062,6 +5519,7 @@ interface GeneratorSpeechInferenceDef {
5062
5519
  Default property:
5063
5520
  {
5064
5521
  "init": false,
5522
+ "accelVariant": "default",
5065
5523
  "modelName": "base-q8_0",
5066
5524
  "modelUseCoreML": false,
5067
5525
  "modelUseGPU": true,
@@ -5080,6 +5538,11 @@ Default property:
5080
5538
  /* Initialize the Whisper context on generator initialization
5081
5539
  Please note that it will take some RAM depending on the model size */
5082
5540
  init?: boolean | DataLink
5541
+ /* Accelerator variant (Only for desktop)
5542
+ `default` - CPU / Metal (macOS)
5543
+ `vulkan` - Use Vulkan
5544
+ `cuda` - Use CUDA */
5545
+ accelVariant?: 'default' | 'vulkan' | 'cuda' | DataLink
5083
5546
  /* Use model name, the model download progress will be done in preload stage or the generator initialization stage.
5084
5547
  We used `ggml` format model, please refer to https://huggingface.co/BricksDisplay/whisper-ggml
5085
5548
  You can also choose `custom` option and set `Model URL` and `Model MD5` to use your own model */
@@ -5290,7 +5753,7 @@ Default property:
5290
5753
  inferRealtimeVadFreqThold?: number | DataLink
5291
5754
  }
5292
5755
  events?: {
5293
- /* Event triggered when load is done */
5756
+ /* Event triggered when context state changes */
5294
5757
  onContextStateChange?: Array<EventAction>
5295
5758
  /* Event triggered when error occurs */
5296
5759
  onError?: Array<EventAction>
@@ -5317,7 +5780,13 @@ Default property:
5317
5780
  }
5318
5781
  }
5319
5782
 
5320
- /* Local Speech-to-Text (STT) inference based on GGML and [whisper.cpp](https://github.com/ggerganov/whisper.cpp) */
5783
+ /* Local Speech-to-Text (STT) inference based on GGML and [whisper.cpp](https://github.com/ggerganov/whisper.cpp)
5784
+
5785
+ ## Notice
5786
+ - iOS: Supported GPU acceleration, recommended use M1+ / A17+ chip device
5787
+ - macOS: Supported GPU acceleration, recommended use M1+ chip device
5788
+ - Android: Currently not supported GPU acceleration (Coming soon), recommended use Android 13+ system
5789
+ - Linux / Windows: Supported GPU acceleration, you can choose `vulkan` or `cuda` backend in Accel Variant property */
5321
5790
  export type GeneratorSpeechInference = Generator &
5322
5791
  GeneratorSpeechInferenceDef & {
5323
5792
  templateKey: 'GENERATOR_SPEECH_INFERENCE'
@@ -5346,11 +5815,416 @@ export type GeneratorSpeechInference = Generator &
5346
5815
  >
5347
5816
  }
5348
5817
 
5818
+ /* Load the model */
5819
+ export type GeneratorVadInferenceActionLoadModel = Action & {
5820
+ __actionName: 'GENERATOR_VAD_INFERENCE_LOAD_MODEL'
5821
+ }
5822
+
5823
+ /* Detect speech in audio file. You can provide `File URL` property, if not provided, it will use the default `File URL` */
5824
+ export type GeneratorVadInferenceActionDetectFile = ActionWithParams & {
5825
+ __actionName: 'GENERATOR_VAD_INFERENCE_DETECT_FILE'
5826
+ params?: Array<
5827
+ | {
5828
+ input: 'fileUrl'
5829
+ value?: string | DataLink | EventProperty
5830
+ mapping?: string
5831
+ }
5832
+ | {
5833
+ input: 'threshold'
5834
+ value?: number | DataLink | EventProperty
5835
+ mapping?: string
5836
+ }
5837
+ | {
5838
+ input: 'minSpeechDurationMs'
5839
+ value?: number | DataLink | EventProperty
5840
+ mapping?: string
5841
+ }
5842
+ | {
5843
+ input: 'minSilenceDurationMs'
5844
+ value?: number | DataLink | EventProperty
5845
+ mapping?: string
5846
+ }
5847
+ | {
5848
+ input: 'maxSpeechDurationS'
5849
+ value?: number | DataLink | EventProperty
5850
+ mapping?: string
5851
+ }
5852
+ | {
5853
+ input: 'speechPadMs'
5854
+ value?: number | DataLink | EventProperty
5855
+ mapping?: string
5856
+ }
5857
+ | {
5858
+ input: 'samplesOverlap'
5859
+ value?: number | DataLink | EventProperty
5860
+ mapping?: string
5861
+ }
5862
+ >
5863
+ }
5864
+
5865
+ /* Detect speech in audio data. Currently only support base64 encoded audio data (16-bit PCM, mono, 16kHz) */
5866
+ export type GeneratorVadInferenceActionDetectData = ActionWithParams & {
5867
+ __actionName: 'GENERATOR_VAD_INFERENCE_DETECT_DATA'
5868
+ params?: Array<
5869
+ | {
5870
+ input: 'data'
5871
+ value?: any | EventProperty
5872
+ mapping?: string
5873
+ }
5874
+ | {
5875
+ input: 'threshold'
5876
+ value?: number | DataLink | EventProperty
5877
+ mapping?: string
5878
+ }
5879
+ | {
5880
+ input: 'minSpeechDurationMs'
5881
+ value?: number | DataLink | EventProperty
5882
+ mapping?: string
5883
+ }
5884
+ | {
5885
+ input: 'minSilenceDurationMs'
5886
+ value?: number | DataLink | EventProperty
5887
+ mapping?: string
5888
+ }
5889
+ | {
5890
+ input: 'maxSpeechDurationS'
5891
+ value?: number | DataLink | EventProperty
5892
+ mapping?: string
5893
+ }
5894
+ | {
5895
+ input: 'speechPadMs'
5896
+ value?: number | DataLink | EventProperty
5897
+ mapping?: string
5898
+ }
5899
+ | {
5900
+ input: 'samplesOverlap'
5901
+ value?: number | DataLink | EventProperty
5902
+ mapping?: string
5903
+ }
5904
+ >
5905
+ }
5906
+
5907
+ /* Clear downloaded files (model, audio) & current jobs */
5908
+ export type GeneratorVadInferenceActionClearDownload = Action & {
5909
+ __actionName: 'GENERATOR_VAD_INFERENCE_CLEAR_DOWNLOAD'
5910
+ }
5911
+
5912
+ /* Release context */
5913
+ export type GeneratorVadInferenceActionReleaseContext = Action & {
5914
+ __actionName: 'GENERATOR_VAD_INFERENCE_RELEASE_CONTEXT'
5915
+ }
5916
+
5917
+ interface GeneratorVadInferenceDef {
5918
+ /*
5919
+ Default property:
5920
+ {
5921
+ "init": false,
5922
+ "modelName": "silero-v5.1.2",
5923
+ "modelUseGPU": true,
5924
+ "modelThreads": 4,
5925
+ "detectThreshold": 0.5,
5926
+ "detectMinSpeechDurationMs": 250,
5927
+ "detectMinSilenceDurationMs": 100,
5928
+ "detectMaxSpeechDurationS": 30,
5929
+ "detectSpeechPadMs": 30,
5930
+ "detectSamplesOverlap": 0.1
5931
+ }
5932
+ */
5933
+ property?: {
5934
+ /* Initialize the VAD context on generator initialization
5935
+ Please note that it will take some RAM depending on the model size */
5936
+ init?: boolean | DataLink
5937
+ /* Use model name, currently only supports the Silero VAD model.
5938
+ The model download progress will be done in preload stage or the generator initialization stage.
5939
+ You can also choose `custom` option and set `Model URL` and `Model SHA1` to use your own model */
5940
+ modelName?: 'custom' | 'silero-v5.1.2' | DataLink
5941
+ /* The URL or path of model
5942
+ We used `ggml` format model, please refer to https://huggingface.co/ggml-org/whisper-vad */
5943
+ modelUrl?: string | DataLink
5944
+ /* Hash type of model */
5945
+ modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
5946
+ /* Hash of model */
5947
+ modelHash?: string | DataLink
5948
+ /* Use GPU Acceleration for inference. Currently iOS only. */
5949
+ modelUseGPU?: boolean | DataLink
5950
+ /* Number of threads to use for processing */
5951
+ modelThreads?: number | DataLink
5952
+ /* Speech probability threshold (0.0-1.0) */
5953
+ detectThreshold?: number | DataLink
5954
+ /* Minimum speech duration in milliseconds */
5955
+ detectMinSpeechDurationMs?: number | DataLink
5956
+ /* Minimum silence duration in milliseconds */
5957
+ detectMinSilenceDurationMs?: number | DataLink
5958
+ /* Maximum speech duration in seconds */
5959
+ detectMaxSpeechDurationS?: number | DataLink
5960
+ /* Padding around speech segments in milliseconds */
5961
+ detectSpeechPadMs?: number | DataLink
5962
+ /* Overlap between analysis windows (0.0-1.0) */
5963
+ detectSamplesOverlap?: number | DataLink
5964
+ /* The file URL or path to be analyzed.
5965
+ It only supported `wav` format with 16kHz sample rate & single (mono) channel */
5966
+ detectFileUrl?: string | DataLink
5967
+ /* MD5 of file to be analyzed */
5968
+ detectFileMd5?: string | DataLink
5969
+ }
5970
+ events?: {
5971
+ /* Event triggered when context state changes */
5972
+ onContextStateChange?: Array<EventAction>
5973
+ /* Event triggered when error occurs */
5974
+ onError?: Array<EventAction>
5975
+ /* Event triggered when got detection result */
5976
+ onDetected?: Array<EventAction>
5977
+ }
5978
+ outlets?: {
5979
+ /* Context state */
5980
+ contextState?: () => Data
5981
+ /* Context details */
5982
+ contextDetails?: () => Data
5983
+ /* Is detecting */
5984
+ isDetecting?: () => Data
5985
+ /* Detection segments result */
5986
+ detectionSegments?: () => Data
5987
+ /* Detection details */
5988
+ detectionDetails?: () => Data
5989
+ }
5990
+ }
5991
+
5992
+ /* Local Voice Activity Detection (VAD) inference based on GGML and [whisper.rn](https://github.com/mybigday/whisper.rn) */
5993
+ export type GeneratorVadInference = Generator &
5994
+ GeneratorVadInferenceDef & {
5995
+ templateKey: 'GENERATOR_VAD_INFERENCE'
5996
+ switches: Array<
5997
+ SwitchDef &
5998
+ GeneratorVadInferenceDef & {
5999
+ conds?: Array<{
6000
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
6001
+ cond:
6002
+ | SwitchCondInnerStateCurrentCanvas
6003
+ | SwitchCondData
6004
+ | {
6005
+ __typename: 'SwitchCondInnerStateOutlet'
6006
+ outlet:
6007
+ | 'contextState'
6008
+ | 'contextDetails'
6009
+ | 'isDetecting'
6010
+ | 'detectionSegments'
6011
+ | 'detectionDetails'
6012
+ value: any
6013
+ }
6014
+ }>
6015
+ }
6016
+ >
6017
+ }
6018
+
6019
+ /* Start realtime transcription */
6020
+ export type GeneratorRealtimeTranscriptionActionStart = Action & {
6021
+ __actionName: 'GENERATOR_REALTIME_TRANSCRIPTION_START'
6022
+ }
6023
+
6024
+ /* Stop realtime transcription */
6025
+ export type GeneratorRealtimeTranscriptionActionStop = Action & {
6026
+ __actionName: 'GENERATOR_REALTIME_TRANSCRIPTION_STOP'
6027
+ }
6028
+
6029
+ /* Force move to next slice */
6030
+ export type GeneratorRealtimeTranscriptionActionNextSlice = Action & {
6031
+ __actionName: 'GENERATOR_REALTIME_TRANSCRIPTION_NEXT_SLICE'
6032
+ }
6033
+
6034
+ /* Reset transcriber state */
6035
+ export type GeneratorRealtimeTranscriptionActionReset = Action & {
6036
+ __actionName: 'GENERATOR_REALTIME_TRANSCRIPTION_RESET'
6037
+ }
6038
+
6039
+ interface GeneratorRealtimeTranscriptionDef {
6040
+ /*
6041
+ Default property:
6042
+ {
6043
+ "sttLivePolicy": "only-in-use",
6044
+ "vadInferenceLivePolicy": "only-in-use",
6045
+ "vadEnabled": true,
6046
+ "audioSliceSec": 30,
6047
+ "audioMinSec": 1,
6048
+ "maxSlicesInMemory": 5,
6049
+ "vadStrategy": "use-preset",
6050
+ "vadPreset": "default",
6051
+ "autoSliceOnSpeechEnd": true,
6052
+ "autoSliceThreshold": 2,
6053
+ "initialPrompt": "",
6054
+ "promptPreviousSlices": false,
6055
+ "saveAudio": true,
6056
+ "testMode": false,
6057
+ "testPlaybackSpeed": 1,
6058
+ "testChunkDurationMs": 100,
6059
+ "testLoop": false
6060
+ }
6061
+ */
6062
+ property?: {
6063
+ /* STT Generator for Whisper context */
6064
+ sttGeneratorId?: string | DataLink
6065
+ /* STT Live Policy. If the policy is `only-in-use`, the STT context will be released when not in use. */
6066
+ sttLivePolicy?: 'only-in-use' | 'manual' | DataLink
6067
+ /* VAD Inference Generator for voice activity detection */
6068
+ vadInferenceGeneratorId?: string | DataLink
6069
+ /* VAD Inference Live Policy. If the policy is `only-in-use`, the VAD Inference context will be released when not in use. */
6070
+ vadInferenceLivePolicy?: 'only-in-use' | 'manual' | DataLink
6071
+ /* Enable VAD (Voice Activity Detection) */
6072
+ vadEnabled?: boolean | DataLink
6073
+ /* Audio slice duration in seconds */
6074
+ audioSliceSec?: number | DataLink
6075
+ /* Minimum audio duration to start transcription in seconds */
6076
+ audioMinSec?: number | DataLink
6077
+ /* Maximum number of slices to keep in memory */
6078
+ maxSlicesInMemory?: number | DataLink
6079
+ /* VAD Strategy */
6080
+ vadStrategy?: 'use-preset' | 'use-generator-options' | DataLink
6081
+ /* VAD preset configuration */
6082
+ vadPreset?:
6083
+ | 'default'
6084
+ | 'sensitive'
6085
+ | 'very-sensitive'
6086
+ | 'conservative'
6087
+ | 'very-conservative'
6088
+ | 'continuous-speech'
6089
+ | 'meeting'
6090
+ | 'noisy-environment'
6091
+ | DataLink
6092
+ /* Auto slice on speech end */
6093
+ autoSliceOnSpeechEnd?: boolean | DataLink
6094
+ /* Auto slice threshold in seconds */
6095
+ autoSliceThreshold?: number | DataLink
6096
+ /* Initial prompt for transcription */
6097
+ initialPrompt?: string | DataLink
6098
+ /* Include previous slices in prompt */
6099
+ promptPreviousSlices?: boolean | DataLink
6100
+ /* Enable audio output saving (auto-generates file path) */
6101
+ saveAudio?: boolean | DataLink
6102
+ /* Use test mode with file simulation */
6103
+ testMode?: boolean | DataLink
6104
+ /* Test audio file path for simulation */
6105
+ testFilePath?: string | DataLink
6106
+ /* Test audio file hash */
6107
+ testFileHash?: string | DataLink
6108
+ /* Test audio file hash type */
6109
+ testFileHashType?: string | DataLink
6110
+ /* Test playback speed */
6111
+ testPlaybackSpeed?: number | DataLink
6112
+ /* Test chunk duration in milliseconds */
6113
+ testChunkDurationMs?: number | DataLink
6114
+ /* Loop test audio file */
6115
+ testLoop?: boolean | DataLink
6116
+ }
6117
+ events?: {
6118
+ /* Event triggered when transcription starts, processes, or ends */
6119
+ onTranscribe?: Array<EventAction>
6120
+ /* Event triggered on VAD (Voice Activity Detection) events */
6121
+ onVad?: Array<EventAction>
6122
+ /* Event triggered when error occurs */
6123
+ onError?: Array<EventAction>
6124
+ /* Event triggered when status changes */
6125
+ onStatusChange?: Array<EventAction>
6126
+ /* Event triggered when statistics update */
6127
+ onStatsUpdate?: Array<EventAction>
6128
+ /* Event triggered when transcription ends */
6129
+ onEnd?: Array<EventAction>
6130
+ }
6131
+ outlets?: {
6132
+ /* Is realtime transcription currently active */
6133
+ isActive?: () => Data
6134
+ /* Is currently transcribing audio */
6135
+ isTranscribing?: () => Data
6136
+ /* Current transcription results */
6137
+ results?: () => Data
6138
+ /* Current transcription result text */
6139
+ resultText?: () => Data
6140
+ /* Current statistics */
6141
+ statistics?: () => Data
6142
+ /* Latest transcribe event */
6143
+ lastTranscribeEvent?: () => Data
6144
+ /* Latest VAD event */
6145
+ lastVadEvent?: () => Data
6146
+ /* Audio output file path (auto-generated when saving audio) */
6147
+ audioOutputPath?: () => Data
6148
+ }
6149
+ }
6150
+
6151
+ /* Realtime speech-to-text transcription using Whisper and VAD with live audio streaming */
6152
+ export type GeneratorRealtimeTranscription = Generator &
6153
+ GeneratorRealtimeTranscriptionDef & {
6154
+ templateKey: 'GENERATOR_REALTIME_TRANSCRIPTION'
6155
+ switches: Array<
6156
+ SwitchDef &
6157
+ GeneratorRealtimeTranscriptionDef & {
6158
+ conds?: Array<{
6159
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
6160
+ cond:
6161
+ | SwitchCondInnerStateCurrentCanvas
6162
+ | SwitchCondData
6163
+ | {
6164
+ __typename: 'SwitchCondInnerStateOutlet'
6165
+ outlet:
6166
+ | 'isActive'
6167
+ | 'isTranscribing'
6168
+ | 'results'
6169
+ | 'resultText'
6170
+ | 'statistics'
6171
+ | 'lastTranscribeEvent'
6172
+ | 'lastVadEvent'
6173
+ | 'audioOutputPath'
6174
+ value: any
6175
+ }
6176
+ }>
6177
+ }
6178
+ >
6179
+ }
6180
+
5349
6181
  /* Load the model */
5350
6182
  export type GeneratorLLMActionLoadModel = Action & {
5351
6183
  __actionName: 'GENERATOR_LLM_LOAD_MODEL'
5352
6184
  }
5353
6185
 
6186
+ /* Load multimodal (vision) model (PREVIEW FEATURE) */
6187
+ export type GeneratorLLMActionLoadMultimodalModel = Action & {
6188
+ __actionName: 'GENERATOR_LLM_LOAD_MULTIMODAL_MODEL'
6189
+ }
6190
+
6191
+ /* Tokenize the prompt */
6192
+ export type GeneratorLLMActionTokenize = ActionWithParams & {
6193
+ __actionName: 'GENERATOR_LLM_TOKENIZE'
6194
+ params?: Array<
6195
+ | {
6196
+ input: 'mode'
6197
+ value?: string | DataLink | EventProperty
6198
+ mapping?: string
6199
+ }
6200
+ | {
6201
+ input: 'prompt'
6202
+ value?: string | DataLink | EventProperty
6203
+ mapping?: string
6204
+ }
6205
+ | {
6206
+ input: 'promptMediaPaths'
6207
+ value?: Array<any> | DataLink | EventProperty
6208
+ mapping?: string
6209
+ }
6210
+ | {
6211
+ input: 'messages'
6212
+ value?: Array<any> | DataLink | EventProperty
6213
+ mapping?: string
6214
+ }
6215
+ >
6216
+ }
6217
+
6218
+ /* Detokenize the tokens to text */
6219
+ export type GeneratorLLMActionDetokenize = ActionWithParams & {
6220
+ __actionName: 'GENERATOR_LLM_DETOKENIZE'
6221
+ params?: Array<{
6222
+ input: 'tokens'
6223
+ value?: Array<any> | DataLink | EventProperty
6224
+ mapping?: string
6225
+ }>
6226
+ }
6227
+
5354
6228
  /* Pre-process the prompt, this can speed up the completion action */
5355
6229
  export type GeneratorLLMActionProcessPrompt = ActionWithParams & {
5356
6230
  __actionName: 'GENERATOR_LLM_PROCESS_PROMPT'
@@ -5385,11 +6259,21 @@ export type GeneratorLLMActionProcessPrompt = ActionWithParams & {
5385
6259
  value?: string | DataLink | EventProperty
5386
6260
  mapping?: string
5387
6261
  }
6262
+ | {
6263
+ input: 'enableThinking'
6264
+ value?: boolean | DataLink | EventProperty
6265
+ mapping?: string
6266
+ }
5388
6267
  | {
5389
6268
  input: 'prompt'
5390
6269
  value?: string | DataLink | EventProperty
5391
6270
  mapping?: string
5392
6271
  }
6272
+ | {
6273
+ input: 'promptMediaPaths'
6274
+ value?: Array<any> | DataLink | EventProperty
6275
+ mapping?: string
6276
+ }
5393
6277
  | {
5394
6278
  input: 'promptTemplateData'
5395
6279
  value?: {} | DataLink | EventProperty
@@ -5442,11 +6326,21 @@ export type GeneratorLLMActionCompletion = ActionWithParams & {
5442
6326
  value?: string | DataLink | EventProperty
5443
6327
  mapping?: string
5444
6328
  }
6329
+ | {
6330
+ input: 'enableThinking'
6331
+ value?: boolean | DataLink | EventProperty
6332
+ mapping?: string
6333
+ }
5445
6334
  | {
5446
6335
  input: 'prompt'
5447
6336
  value?: string | DataLink | EventProperty
5448
6337
  mapping?: string
5449
6338
  }
6339
+ | {
6340
+ input: 'promptMediaPaths'
6341
+ value?: Array<any> | DataLink | EventProperty
6342
+ mapping?: string
6343
+ }
5450
6344
  | {
5451
6345
  input: 'promptTemplateData'
5452
6346
  value?: {} | DataLink | EventProperty
@@ -5627,6 +6521,11 @@ export type GeneratorLLMActionClearDownload = Action & {
5627
6521
  __actionName: 'GENERATOR_LLM_CLEAR_DOWNLOAD'
5628
6522
  }
5629
6523
 
6524
+ /* Release multimodal (vision) context (PREVIEW FEATURE) */
6525
+ export type GeneratorLLMActionReleaseMultimodalContext = Action & {
6526
+ __actionName: 'GENERATOR_LLM_RELEASE_MULTIMODAL_CONTEXT'
6527
+ }
6528
+
5630
6529
  /* Release context */
5631
6530
  export type GeneratorLLMActionReleaseContext = Action & {
5632
6531
  __actionName: 'GENERATOR_LLM_RELEASE_CONTEXT'
@@ -5647,14 +6546,16 @@ Default property:
5647
6546
  "useMmap": true,
5648
6547
  "cacheKType": "f16",
5649
6548
  "cacheVType": "f16",
6549
+ "ctxShift": true,
5650
6550
  "transformScriptEnabled": false,
5651
- "transformScriptCode": "\/\* Global variable: inputs = { prompt, messages, variables } \*\/\nreturn inputs.prompt",
6551
+ "transformScriptCode": "\/\* Global variable: inputs = { prompt, messages, variables }, members = { llmUtils } \*\/\nreturn inputs.prompt",
5652
6552
  "transformScriptVariables": {},
5653
6553
  "sessionMinSaveSize": 50,
5654
6554
  "sessionRemain": 10,
5655
6555
  "completionMode": "auto",
5656
6556
  "completionPrompt": "",
5657
6557
  "completionPromptTemplateType": "${}",
6558
+ "completionEnableThinking": true,
5658
6559
  "completionStopWords": [
5659
6560
  "</s>",
5660
6561
  "<|end|>",
@@ -5703,6 +6604,14 @@ Default property:
5703
6604
  modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
5704
6605
  /* Hash of model */
5705
6606
  modelHash?: string | DataLink
6607
+ /* Load multimodal (vision) context after model loaded (PREVIEW FEATURE) */
6608
+ initMultimodal?: boolean | DataLink
6609
+ /* The URL or path of mmproj file for multimodal vision support (PREVIEW FEATURE) */
6610
+ mmprojUrl?: string | DataLink
6611
+ /* Hash type of mmproj file (PREVIEW FEATURE) */
6612
+ mmprojHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6613
+ /* Hash of mmproj file (PREVIEW FEATURE) */
6614
+ mmprojHash?: string | DataLink
5706
6615
  /* Chat Template (Jinja format) to override the default template from model */
5707
6616
  chatTemplate?: string | DataLink
5708
6617
  /* Context size (0 ~ 4096) (Default to 512) */
@@ -5732,6 +6641,8 @@ Default property:
5732
6641
  cacheKType?: 'f16' | 'f32' | 'q8_0' | 'q4_0' | 'q4_1' | 'iq4_nl' | 'q5_0' | 'q5_1' | DataLink
5733
6642
  /* KV cache data type for the V (Default: f16) */
5734
6643
  cacheVType?: 'f16' | 'f32' | 'q8_0' | 'q4_0' | 'q4_1' | 'iq4_nl' | 'q5_0' | 'q5_1' | DataLink
6644
+ /* Enable context shift */
6645
+ ctxShift?: boolean | DataLink
5735
6646
  /* Enable Transform Script for processing the prompt */
5736
6647
  transformScriptEnabled?: boolean | DataLink
5737
6648
  /* Code of Transform Script */
@@ -5750,8 +6661,10 @@ Default property:
5750
6661
  sessionRemain?: number | DataLink
5751
6662
  /* TODO:loran_gqarms_norm_epsrope_freq_baserope_freq_scale */
5752
6663
  completionMode?: 'auto' | 'chat' | 'text' | DataLink
5753
- /* Tools for chat mode */
5754
- completionTools?: {} | DataLink
6664
+ /* Tools for chat mode using OpenAI-compatible function calling format
6665
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
6666
+ See: https://platform.openai.com/docs/guides/function-calling */
6667
+ completionTools?: Array<{} | DataLink> | DataLink
5755
6668
  /* Enable parallel tool calls */
5756
6669
  completionParallelToolCalls?: boolean | DataLink
5757
6670
  /* Tool choice for chat mode */
@@ -5768,6 +6681,9 @@ Default property:
5768
6681
  | DataLink
5769
6682
  /* Prompt (text mode) */
5770
6683
  completionPrompt?: string | DataLink
6684
+ /* Media paths to be used in the prompt template (PREVIEW FEATURE)
6685
+ In prompt, use `<__media__>` for position of media content */
6686
+ completionPromptMediaPaths?: Array<string | DataLink> | DataLink
5771
6687
  /* Data to be used in the prompt template (e.g. `Hello ${name}`). Supports nested data, such as `Hello ${user.name}`. */
5772
6688
  completionPromptTemplateData?: {} | DataLink
5773
6689
  /* The prompt template type */
@@ -5785,6 +6701,8 @@ Default property:
5785
6701
  }
5786
6702
  schema?: {} | DataLink
5787
6703
  }
6704
+ /* Enable thinking */
6705
+ completionEnableThinking?: boolean | DataLink
5788
6706
  /* Stop words */
5789
6707
  completionStopWords?: Array<string | DataLink> | DataLink
5790
6708
  /* Number of tokens to predict */
@@ -5843,7 +6761,7 @@ Default property:
5843
6761
  completionIgnoreEOS?: boolean | DataLink
5844
6762
  }
5845
6763
  events?: {
5846
- /* Event triggered when load is done */
6764
+ /* Event triggered when context state changes */
5847
6765
  onContextStateChange?: Array<EventAction>
5848
6766
  /* Event triggered when error occurs */
5849
6767
  onError?: Array<EventAction>
@@ -5865,6 +6783,10 @@ Default property:
5865
6783
  sessions?: () => Data
5866
6784
  /* Is evaluating */
5867
6785
  isEvaluating?: () => Data
6786
+ /* Tokenize result */
6787
+ tokenizeResult?: () => Data
6788
+ /* Detokenize result */
6789
+ detokenizeResult?: () => Data
5868
6790
  /* Last formatted prompt (messages or prompt) */
5869
6791
  completionLastFormattedPrompt?: () => Data
5870
6792
  /* Last completion token */
@@ -5885,7 +6807,7 @@ Default property:
5885
6807
  - iOS: Supported GPU acceleration, recommended use M1+ / A17+ chip device
5886
6808
  - macOS: Supported GPU acceleration, recommended use M1+ chip device
5887
6809
  - Android: Currently not supported GPU acceleration (Coming soon), recommended use Android 13+ system
5888
- - Linux / Windows: Supported GPU acceleration, currently only Vulkan backend available */
6810
+ - Linux / Windows: Supported GPU acceleration, you can choose `vulkan` or `cuda` backend in Accel Variant property */
5889
6811
  export type GeneratorLLM = Generator &
5890
6812
  GeneratorLLMDef & {
5891
6813
  templateKey: 'GENERATOR_LLM'
@@ -5905,6 +6827,8 @@ export type GeneratorLLM = Generator &
5905
6827
  | 'contextDetails'
5906
6828
  | 'sessions'
5907
6829
  | 'isEvaluating'
6830
+ | 'tokenizeResult'
6831
+ | 'detokenizeResult'
5908
6832
  | 'completionLastFormattedPrompt'
5909
6833
  | 'completionLastToken'
5910
6834
  | 'completionResult'
@@ -5917,31 +6841,518 @@ export type GeneratorLLM = Generator &
5917
6841
  >
5918
6842
  }
5919
6843
 
5920
- /* Run text completion */
5921
- export type GeneratorOpenAILLMActionCompletion = ActionWithParams & {
5922
- __actionName: 'GENERATOR_OPENAI_LLM_COMPLETION'
5923
- params?: Array<
5924
- | {
5925
- input: 'messages'
5926
- value?: Array<any> | DataLink | EventProperty
5927
- mapping?: string
5928
- }
5929
- | {
5930
- input: 'maxTokens'
5931
- value?: number | DataLink | EventProperty
5932
- mapping?: string
5933
- }
5934
- | {
5935
- input: 'temperature'
5936
- value?: number | DataLink | EventProperty
5937
- mapping?: string
5938
- }
5939
- | {
5940
- input: 'topP'
5941
- value?: number | DataLink | EventProperty
5942
- mapping?: string
5943
- }
5944
- | {
6844
+ /* Load the model */
6845
+ export type GeneratorGGMLTTSActionLoadModel = Action & {
6846
+ __actionName: 'GENERATOR_GGML_TTS_LOAD_MODEL'
6847
+ }
6848
+
6849
+ /* Generate audio */
6850
+ export type GeneratorGGMLTTSActionGenerate = ActionWithParams & {
6851
+ __actionName: 'GENERATOR_GGML_TTS_GENERATE'
6852
+ params?: Array<{
6853
+ input: 'text'
6854
+ value?: string | DataLink | EventProperty
6855
+ mapping?: string
6856
+ }>
6857
+ }
6858
+
6859
+ /* Clean cache */
6860
+ export type GeneratorGGMLTTSActionCleanCache = Action & {
6861
+ __actionName: 'GENERATOR_GGML_TTS_CLEAN_CACHE'
6862
+ }
6863
+
6864
+ /* Release context */
6865
+ export type GeneratorGGMLTTSActionReleaseContext = Action & {
6866
+ __actionName: 'GENERATOR_GGML_TTS_RELEASE_CONTEXT'
6867
+ }
6868
+
6869
+ interface GeneratorGGMLTTSDef {
6870
+ /*
6871
+ Default property:
6872
+ {
6873
+ "vocoderUrl": "https://huggingface.co/ggml-org/WavTokenizer/resolve/main/WavTokenizer-Large-75-F16.gguf",
6874
+ "vocoderHashType": "sha256",
6875
+ "vocoderHash": "2356baa8631cc2995ea3465196a017a2733600d849a91180c0f97fa7fb375bbe",
6876
+ "vocoderBatchSize": 4096,
6877
+ "outputType": "play",
6878
+ "cacheGenerated": true,
6879
+ "autoInferEnable": false,
6880
+ "softBreakRegex": "^[^\\r\\n\\t\\f\\v]*([\\r\\n]+|[。!?!?.]\\B)",
6881
+ "hardBreakTime": 500,
6882
+ "completionTemperature": 0.1,
6883
+ "completionRepetitionPenalty": 1.1,
6884
+ "completionTopK": 40,
6885
+ "completionTopP": 0.9,
6886
+ "completionMinP": 0.05,
6887
+ "useGuideToken": false,
6888
+ "contextSize": 8192,
6889
+ "batchSize": 8192,
6890
+ "microBatchSize": 512,
6891
+ "maxThreads": 2,
6892
+ "accelVariant": "default",
6893
+ "mainGpu": 0,
6894
+ "gpuLayers": 0,
6895
+ "useMlock": true,
6896
+ "useMmap": true,
6897
+ "useFlashAttn": false
6898
+ }
6899
+ */
6900
+ property?: {
6901
+ /* Initialize the TTS context on generator initialization */
6902
+ init?: boolean | DataLink
6903
+ /* The URL or path of model
6904
+ We used GGUF format model, please refer to https://github.com/ggerganov/llama.cpp/tree/master#description */
6905
+ modelUrl?: string | DataLink
6906
+ /* Hash type of model */
6907
+ modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6908
+ /* Hash of model */
6909
+ modelHash?: string | DataLink
6910
+ /* The URL or path of vocoder model */
6911
+ vocoderUrl?: string | DataLink
6912
+ /* Hash type of vocoder model */
6913
+ vocoderHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6914
+ /* Hash of vocoder model */
6915
+ vocoderHash?: string | DataLink
6916
+ /* Batch size of vocoder model */
6917
+ vocoderBatchSize?: number | DataLink
6918
+ /* Output mode */
6919
+ outputType?: 'play' | 'file' | DataLink
6920
+ /* Enable cache for generated audio */
6921
+ cacheGenerated?: boolean | DataLink
6922
+ /* Text to generate */
6923
+ prompt?: string | DataLink
6924
+ /* Speaker JSON */
6925
+ speaker?: {} | DataLink
6926
+ /* Auto inference when prompt changes */
6927
+ autoInferEnable?: boolean | DataLink
6928
+ /* Segmentation rule for auto inference */
6929
+ softBreakRegex?: string | DataLink
6930
+ /* Time to force inference when softBreakRegex is not satisfied */
6931
+ hardBreakTime?: number | DataLink
6932
+ /* Temperature */
6933
+ completionTemperature?: number | DataLink
6934
+ /* Repetition Penalty */
6935
+ completionRepetitionPenalty?: number | DataLink
6936
+ /* Top K sampling */
6937
+ completionTopK?: number | DataLink
6938
+ /* Top P sampling */
6939
+ completionTopP?: number | DataLink
6940
+ /* Min P sampling */
6941
+ completionMinP?: number | DataLink
6942
+ /* Set the random number generator (RNG) seed (default: -1, -1 = random seed) */
6943
+ completionSeed?: number | DataLink
6944
+ /* Number of tokens to predict */
6945
+ completionPredict?: number | DataLink
6946
+ /* Enable guide token to help prevent hallucinations by forcing the TTS to use the correct words. */
6947
+ useGuideToken?: boolean | DataLink
6948
+ /* Context size, for OutTTS recommended 4096 ~ 8192 (Default to 4096) */
6949
+ contextSize?: number | DataLink
6950
+ /* Logical batch size for prompt processing */
6951
+ batchSize?: number | DataLink
6952
+ /* Physical batch size for prompt processing */
6953
+ microBatchSize?: number | DataLink
6954
+ /* Number of threads */
6955
+ maxThreads?: number | DataLink
6956
+ /* Accelerator variant (Only for desktop)
6957
+ `default` - CPU / Metal (macOS)
6958
+ `vulkan` - Use Vulkan
6959
+ `cuda` - Use CUDA */
6960
+ accelVariant?: 'default' | 'vulkan' | 'cuda' | DataLink
6961
+ /* Main GPU index */
6962
+ mainGpu?: number | DataLink
6963
+ /* Number of GPU layers (NOTE: Currently not supported for Android) */
6964
+ gpuLayers?: number | DataLink
6965
+ /* Use memory lock */
6966
+ useMlock?: boolean | DataLink
6967
+ /* Use mmap */
6968
+ useMmap?: boolean | DataLink
6969
+ /* Use Flash Attention for inference (Recommended with GPU enabled) */
6970
+ useFlashAttn?: boolean | DataLink
6971
+ }
6972
+ events?: {
6973
+ /* Event triggered when state change */
6974
+ onContextStateChange?: Array<EventAction>
6975
+ /* Event triggered when error occurs */
6976
+ onError?: Array<EventAction>
6977
+ }
6978
+ outlets?: {
6979
+ /* Context state */
6980
+ contextState?: () => Data
6981
+ /* Generated audio file */
6982
+ generatedAudio?: () => Data
6983
+ /* Generated audio file is playing */
6984
+ generatedAudioPlaying?: () => Data
6985
+ }
6986
+ }
6987
+
6988
+ /* Local Text-to-Speech (TTS) inference based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
6989
+ You can use any converted model on HuggingFace. */
6990
+ export type GeneratorGGMLTTS = Generator &
6991
+ GeneratorGGMLTTSDef & {
6992
+ templateKey: 'GENERATOR_GGML_TTS'
6993
+ switches: Array<
6994
+ SwitchDef &
6995
+ GeneratorGGMLTTSDef & {
6996
+ conds?: Array<{
6997
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
6998
+ cond:
6999
+ | SwitchCondInnerStateCurrentCanvas
7000
+ | SwitchCondData
7001
+ | {
7002
+ __typename: 'SwitchCondInnerStateOutlet'
7003
+ outlet: 'contextState' | 'generatedAudio' | 'generatedAudioPlaying'
7004
+ value: any
7005
+ }
7006
+ }>
7007
+ }
7008
+ >
7009
+ }
7010
+
7011
+ /* Load the model */
7012
+ export type GeneratorRerankerActionLoadModel = Action & {
7013
+ __actionName: 'GENERATOR_RERANKER_LOAD_MODEL'
7014
+ }
7015
+
7016
+ /* Rerank documents based on query relevance */
7017
+ export type GeneratorRerankerActionRerank = ActionWithParams & {
7018
+ __actionName: 'GENERATOR_RERANKER_RERANK'
7019
+ params?: Array<
7020
+ | {
7021
+ input: 'query'
7022
+ value?: string | DataLink | EventProperty
7023
+ mapping?: string
7024
+ }
7025
+ | {
7026
+ input: 'documents'
7027
+ value?: Array<any> | DataLink | EventProperty
7028
+ mapping?: string
7029
+ }
7030
+ >
7031
+ }
7032
+
7033
+ /* Release context */
7034
+ export type GeneratorRerankerActionReleaseContext = Action & {
7035
+ __actionName: 'GENERATOR_RERANKER_RELEASE_CONTEXT'
7036
+ }
7037
+
7038
+ interface GeneratorRerankerDef {
7039
+ /*
7040
+ Default property:
7041
+ {
7042
+ "init": false,
7043
+ "contextSize": 512,
7044
+ "batchSize": 512,
7045
+ "uBatchSize": 512,
7046
+ "accelVariant": "default",
7047
+ "mainGpu": 0,
7048
+ "gpuLayers": 0,
7049
+ "useMlock": true,
7050
+ "useMmap": true,
7051
+ "normalize": 1
7052
+ }
7053
+ */
7054
+ property?: {
7055
+ /* Initialize the Reranker context on generator initialization */
7056
+ init?: boolean | DataLink
7057
+ /* The URL or path of reranker model (GGUF format) */
7058
+ modelUrl?: string | DataLink
7059
+ /* Hash of reranker model */
7060
+ modelHash?: string | DataLink
7061
+ /* Hash type of reranker model */
7062
+ modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
7063
+ /* Context size (0 ~ 4096) (Default to 512) */
7064
+ contextSize?: number | DataLink
7065
+ /* Logical batch size for processing (default: 512) */
7066
+ batchSize?: number | DataLink
7067
+ /* Physical maximum batch size (default: 512) */
7068
+ uBatchSize?: number | DataLink
7069
+ /* Accelerator variant (default: default) */
7070
+ accelVariant?:
7071
+ | 'default'
7072
+ | 'avx'
7073
+ | 'avx2'
7074
+ | 'avx512'
7075
+ | 'metal'
7076
+ | 'opencl'
7077
+ | 'vulkan'
7078
+ | 'cuda'
7079
+ | 'rocm'
7080
+ | DataLink
7081
+ /* Main GPU index (default: 0) */
7082
+ mainGpu?: number | DataLink
7083
+ /* Number of layers to store in VRAM (default: 0) */
7084
+ gpuLayers?: number | DataLink
7085
+ /* Maximum number of threads to use (default: auto) */
7086
+ maxThreads?: number | DataLink
7087
+ /* Use mlock to keep model in memory (default: true) */
7088
+ useMlock?: boolean | DataLink
7089
+ /* Use mmap for model loading (default: true) */
7090
+ useMmap?: boolean | DataLink
7091
+ /* Query text for reranking */
7092
+ query?: string | DataLink
7093
+ /* Array of documents to rerank */
7094
+ documents?: Array<string | DataLink> | DataLink
7095
+ /* Normalize reranking scores (default: from model config) */
7096
+ normalize?: number | DataLink | boolean | DataLink | DataLink
7097
+ /* Maximum number of documents to return (default: unlimited) */
7098
+ topK?: number | DataLink
7099
+ }
7100
+ events?: {
7101
+ /* Event triggered when the reranker context state changes (loading, ready, error, released) */
7102
+ onContextStateChange?: Array<EventAction>
7103
+ /* Event triggered when an error occurs during reranker operations */
7104
+ onError?: Array<EventAction>
7105
+ }
7106
+ outlets?: {
7107
+ /* Current state of the reranker context (loading, ready, error, released) */
7108
+ contextState?: () => Data
7109
+ /* Loading progress of the reranker model (0-100) */
7110
+ contextLoadProgress?: () => Data
7111
+ /* Detailed information about the reranker context including instance ID and processing status */
7112
+ contextDetails?: () => Data
7113
+ /* Result of the reranking operation containing scored and ranked documents */
7114
+ rerankResult?: () => Data
7115
+ /* Boolean indicating whether the reranker is currently processing a request */
7116
+ isProcessing?: () => Data
7117
+ }
7118
+ }
7119
+
7120
+ /* Local rerank based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
7121
+
7122
+ ## Notice
7123
+ - The device RAM must be larger than 8GB
7124
+ - iOS: Supported GPU acceleration, recommended use M1+ / A17+ chip device
7125
+ - macOS: Supported GPU acceleration, recommended use M1+ chip device
7126
+ - Android: Currently not supported GPU acceleration (Coming soon), recommended use Android 13+ system
7127
+ - Linux / Windows: Supported GPU acceleration, currently only Vulkan backend available */
7128
+ export type GeneratorReranker = Generator &
7129
+ GeneratorRerankerDef & {
7130
+ templateKey: 'GENERATOR_RERANKER'
7131
+ switches: Array<
7132
+ SwitchDef &
7133
+ GeneratorRerankerDef & {
7134
+ conds?: Array<{
7135
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
7136
+ cond:
7137
+ | SwitchCondInnerStateCurrentCanvas
7138
+ | SwitchCondData
7139
+ | {
7140
+ __typename: 'SwitchCondInnerStateOutlet'
7141
+ outlet:
7142
+ | 'contextState'
7143
+ | 'contextLoadProgress'
7144
+ | 'contextDetails'
7145
+ | 'rerankResult'
7146
+ | 'isProcessing'
7147
+ value: any
7148
+ }
7149
+ }>
7150
+ }
7151
+ >
7152
+ }
7153
+
7154
+ /* Load the model */
7155
+ export type GeneratorQnnLlmActionLoadModel = Action & {
7156
+ __actionName: 'GENERATOR_QNN_LLM_LOAD_MODEL'
7157
+ }
7158
+
7159
+ /* Abort model download */
7160
+ export type GeneratorQnnLlmActionAbortModelDownload = Action & {
7161
+ __actionName: 'GENERATOR_QNN_LLM_ABORT_MODEL_DOWNLOAD'
7162
+ }
7163
+
7164
+ /* Generate text */
7165
+ export type GeneratorQnnLlmActionGenerate = ActionWithParams & {
7166
+ __actionName: 'GENERATOR_QNN_LLM_GENERATE'
7167
+ params?: Array<
7168
+ | {
7169
+ input: 'prompt'
7170
+ value?: string | DataLink | EventProperty
7171
+ mapping?: string
7172
+ }
7173
+ | {
7174
+ input: 'messages'
7175
+ value?: Array<any> | DataLink | EventProperty
7176
+ mapping?: string
7177
+ }
7178
+ | {
7179
+ input: 'tools'
7180
+ value?: Array<any> | DataLink | EventProperty
7181
+ mapping?: string
7182
+ }
7183
+ >
7184
+ }
7185
+
7186
+ /* Abort generation */
7187
+ export type GeneratorQnnLlmActionAbortGeneration = Action & {
7188
+ __actionName: 'GENERATOR_QNN_LLM_ABORT_GENERATION'
7189
+ }
7190
+
7191
+ /* Release context */
7192
+ export type GeneratorQnnLlmActionReleaseContext = Action & {
7193
+ __actionName: 'GENERATOR_QNN_LLM_RELEASE_CONTEXT'
7194
+ }
7195
+
7196
+ interface GeneratorQnnLlmDef {
7197
+ /*
7198
+ Default property:
7199
+ {
7200
+ "modelType": "Llama 3.2 3B Chat",
7201
+ "chatFormat": "Llama 3.x",
7202
+ "toolsInUserMessage": true,
7203
+ "toolCallParser": "llama3_json",
7204
+ "toolChoice": "auto",
7205
+ "parallelToolCalls": false,
7206
+ "greedy": false
7207
+ }
7208
+ */
7209
+ property?: {
7210
+ /* Load model context when generator is initialized */
7211
+ init?: boolean | DataLink
7212
+ /* Model type */
7213
+ modelType?:
7214
+ | 'Llama 3 8B Chat'
7215
+ | 'Llama 3.1 8B Chat'
7216
+ | 'Llama 3.2 3B Chat'
7217
+ | 'Mistral 7B Instruct v0.3'
7218
+ | 'Qwen 2 7B Chat'
7219
+ | 'Phi 3.5 Mini'
7220
+ | 'Granite v3.1 8B Instruct'
7221
+ | 'Custom'
7222
+ | DataLink
7223
+ /* SOC model */
7224
+ socModel?: 'X Elite' | 'X Plus' | '8 Elite' | '8 Gen 3' | 'QCS8550' | DataLink
7225
+ /* Custom model base URL
7226
+ The URL directory should contain `config.json` (model config) file, `model_part_*_of_*.bin` (model split files) files and `tokenizer.json` (tokenizer config) file. */
7227
+ customModelUrl?: string | DataLink
7228
+ /* Custom model split parts */
7229
+ customModelSplitParts?: number | DataLink
7230
+ /* Chat format */
7231
+ chatFormat?:
7232
+ | 'Llama 2'
7233
+ | 'Llama 3'
7234
+ | 'Llama 3.x'
7235
+ | 'Mistral v0.3'
7236
+ | 'Qwen 2'
7237
+ | 'Custom'
7238
+ | DataLink
7239
+ /* Custom chat format template */
7240
+ customChatFormat?: string | DataLink
7241
+ /* Put tools in user message */
7242
+ toolsInUserMessage?: boolean | DataLink
7243
+ /* Prompt to generate */
7244
+ prompt?: string | DataLink
7245
+ /* Chat messages */
7246
+ messages?:
7247
+ | Array<
7248
+ | DataLink
7249
+ | {
7250
+ role?: string | DataLink
7251
+ content?: string | DataLink
7252
+ }
7253
+ >
7254
+ | DataLink
7255
+ /* Stop words */
7256
+ stopWords?: Array<string | DataLink> | DataLink
7257
+ /* Tool call parser */
7258
+ toolCallParser?: 'llama3_json' | 'mistral' | 'hermes' | 'internlm' | 'phi4' | DataLink
7259
+ /* Tools for chat mode using OpenAI-compatible function calling format
7260
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
7261
+ See: https://platform.openai.com/docs/guides/function-calling */
7262
+ tools?: Array<{} | DataLink> | DataLink
7263
+ /* Tool choice for chat mode */
7264
+ toolChoice?: 'none' | 'auto' | 'required' | DataLink
7265
+ /* Enable parallel tool calls */
7266
+ parallelToolCalls?: boolean | DataLink
7267
+ /* Number of threads, -1 to use n-threads from model config */
7268
+ nThreads?: number | DataLink
7269
+ /* Temperature, -1 to use temperature from model config */
7270
+ temperature?: number | DataLink
7271
+ /* Seed, -1 to use seed from model config */
7272
+ seed?: number | DataLink
7273
+ /* Top K, -1 to use top-k from model config */
7274
+ topK?: number | DataLink
7275
+ /* Top P, -1 to use top-p from model config */
7276
+ topP?: number | DataLink
7277
+ /* Greedy, use greedy sampling */
7278
+ greedy?: boolean | DataLink
7279
+ }
7280
+ events?: {
7281
+ /* Event triggered when context state changes */
7282
+ onContextStateChange?: Array<EventAction>
7283
+ /* Event triggered when generate is done */
7284
+ onGenerate?: Array<EventAction>
7285
+ /* Event triggered on get function call request */
7286
+ onFunctionCall?: Array<EventAction>
7287
+ /* Event triggered when error occurs */
7288
+ onError?: Array<EventAction>
7289
+ }
7290
+ outlets?: {
7291
+ /* Context state */
7292
+ contextState?: () => Data
7293
+ /* Generation result */
7294
+ result?: () => Data
7295
+ /* Full context (Prompt + Generation Result) */
7296
+ fullContext?: () => Data
7297
+ /* Last function call details */
7298
+ lastFunctionCall?: () => Data
7299
+ /* Completion details */
7300
+ completionDetails?: () => Data
7301
+ }
7302
+ }
7303
+
7304
+ /* Local LLM inference using Qualcomm AI Engine */
7305
+ export type GeneratorQnnLlm = Generator &
7306
+ GeneratorQnnLlmDef & {
7307
+ templateKey: 'GENERATOR_QNN_LLM'
7308
+ switches: Array<
7309
+ SwitchDef &
7310
+ GeneratorQnnLlmDef & {
7311
+ conds?: Array<{
7312
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
7313
+ cond:
7314
+ | SwitchCondInnerStateCurrentCanvas
7315
+ | SwitchCondData
7316
+ | {
7317
+ __typename: 'SwitchCondInnerStateOutlet'
7318
+ outlet:
7319
+ | 'contextState'
7320
+ | 'result'
7321
+ | 'fullContext'
7322
+ | 'lastFunctionCall'
7323
+ | 'completionDetails'
7324
+ value: any
7325
+ }
7326
+ }>
7327
+ }
7328
+ >
7329
+ }
7330
+
7331
+ /* Run text completion */
7332
+ export type GeneratorOpenAILLMActionCompletion = ActionWithParams & {
7333
+ __actionName: 'GENERATOR_OPENAI_LLM_COMPLETION'
7334
+ params?: Array<
7335
+ | {
7336
+ input: 'messages'
7337
+ value?: Array<any> | DataLink | EventProperty
7338
+ mapping?: string
7339
+ }
7340
+ | {
7341
+ input: 'maxTokens'
7342
+ value?: number | DataLink | EventProperty
7343
+ mapping?: string
7344
+ }
7345
+ | {
7346
+ input: 'temperature'
7347
+ value?: number | DataLink | EventProperty
7348
+ mapping?: string
7349
+ }
7350
+ | {
7351
+ input: 'topP'
7352
+ value?: number | DataLink | EventProperty
7353
+ mapping?: string
7354
+ }
7355
+ | {
5945
7356
  input: 'frequencyPenalty'
5946
7357
  value?: number | DataLink | EventProperty
5947
7358
  mapping?: string
@@ -5989,15 +7400,16 @@ interface GeneratorOpenAILLMDef {
5989
7400
  Default property:
5990
7401
  {
5991
7402
  "apiEndpoint": "https://api.openai.com/v1",
5992
- "model": "gpt-4o-mini",
7403
+ "model": "gpt-4o",
5993
7404
  "completionMessages": [
5994
- null
7405
+ {
7406
+ "role": "system",
7407
+ "content": "You are a helpful assistant."
7408
+ }
5995
7409
  ],
5996
7410
  "completionMaxTokens": 1024,
5997
7411
  "completionTemperature": 1,
5998
7412
  "completionTopP": 1,
5999
- "completionFrequencyPenalty": 0,
6000
- "completionPresencePenalty": 0,
6001
7413
  "completionStop": []
6002
7414
  }
6003
7415
  */
@@ -6027,8 +7439,10 @@ Default property:
6027
7439
  }
6028
7440
  >
6029
7441
  | DataLink
6030
- /* Tools for chat mode */
6031
- completionTools?: {} | DataLink
7442
+ /* Tools for chat mode following OpenAI function calling format
7443
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
7444
+ See: https://platform.openai.com/docs/guides/function-calling */
7445
+ completionTools?: Array<{} | DataLink> | DataLink
6032
7446
  /* Enable parallel tool calls */
6033
7447
  completionParallelToolCalls?: boolean | DataLink
6034
7448
  /* Tool choice for chat mode */
@@ -6084,7 +7498,11 @@ Default property:
6084
7498
  - Compatible with OpenAI API format
6085
7499
  - Supports function calling
6086
7500
  - Streaming responses
6087
- - Custom API endpoints */
7501
+ - Custom API endpoints, like
7502
+ - OpenAI API: https://platform.openai.com/docs/guides/text?api-mode=chat
7503
+ - Anthropic API: https://docs.anthropic.com/en/api/openai-sdk
7504
+ - Gemini API: https://ai.google.dev/gemini-api/docs/openai
7505
+ - llama.cpp server: https://github.com/ggml-org/llama.cpp/tree/master/tools/server */
6088
7506
  export type GeneratorOpenAILLM = Generator &
6089
7507
  GeneratorOpenAILLMDef & {
6090
7508
  templateKey: 'GENERATOR_OPENAI_LLM'
@@ -6106,6 +7524,104 @@ export type GeneratorOpenAILLM = Generator &
6106
7524
  >
6107
7525
  }
6108
7526
 
7527
+ /* Generate audio */
7528
+ export type GeneratorOpenAiTTSActionGenerate = ActionWithParams & {
7529
+ __actionName: 'GENERATOR_OPENAI_TTS_GENERATE'
7530
+ params?: Array<{
7531
+ input: 'text'
7532
+ value?: string | DataLink | EventProperty
7533
+ mapping?: string
7534
+ }>
7535
+ }
7536
+
7537
+ /* Clean cache */
7538
+ export type GeneratorOpenAiTTSActionCleanCache = Action & {
7539
+ __actionName: 'GENERATOR_OPENAI_TTS_CLEAN_CACHE'
7540
+ }
7541
+
7542
+ interface GeneratorOpenAiTTSDef {
7543
+ /*
7544
+ Default property:
7545
+ {
7546
+ "apiEndpoint": "https://api.openai.com/v1",
7547
+ "model": "tts-1",
7548
+ "voice": "alloy",
7549
+ "speed": 1,
7550
+ "outputType": "play",
7551
+ "playbackVolume": 100,
7552
+ "cacheGenerated": true,
7553
+ "autoInferEnable": false,
7554
+ "softBreakRegex": "^[^\\r\\n\\t\\f\\v]*([\\r\\n]+|[。!?!?.]\\B)",
7555
+ "hardBreakTime": 500
7556
+ }
7557
+ */
7558
+ property?: {
7559
+ /* API endpoint URL */
7560
+ apiEndpoint?: string | DataLink
7561
+ /* OpenAI API Key */
7562
+ apiKey?: string | DataLink
7563
+ /* OpenAI TTS model */
7564
+ model?: string | DataLink
7565
+ /* Voice to use
7566
+ Select voice from https://openai.fm , default alloy */
7567
+ voice?: string | DataLink
7568
+ /* Additional instructions for the speech generation */
7569
+ instructions?: string | DataLink
7570
+ /* Speed of the generated audio */
7571
+ speed?: number | DataLink
7572
+ /* Output mode */
7573
+ outputType?: 'play' | 'file' | DataLink
7574
+ /* Playback volume (0 - 100) */
7575
+ playbackVolume?: number | DataLink
7576
+ /* Enable cache for generated audio */
7577
+ cacheGenerated?: boolean | DataLink
7578
+ /* Text to generate */
7579
+ prompt?: string | DataLink
7580
+ /* Auto inference when prompt changes */
7581
+ autoInferEnable?: boolean | DataLink
7582
+ /* Segmentation rule for auto inference */
7583
+ softBreakRegex?: string | DataLink
7584
+ /* Time to force inference when softBreakRegex is not satisfied */
7585
+ hardBreakTime?: number | DataLink
7586
+ }
7587
+ events?: {
7588
+ /* Event triggered when state change */
7589
+ onContextStateChange?: Array<EventAction>
7590
+ /* Event triggered when error occurs */
7591
+ onError?: Array<EventAction>
7592
+ }
7593
+ outlets?: {
7594
+ /* Context state */
7595
+ contextState?: () => Data
7596
+ /* Generated audio file */
7597
+ generatedAudio?: () => Data
7598
+ /* Generated audio file is playing */
7599
+ generatedAudioPlaying?: () => Data
7600
+ }
7601
+ }
7602
+
7603
+ /* Generate speech from text using OpenAI's Text-to-Speech API */
7604
+ export type GeneratorOpenAiTTS = Generator &
7605
+ GeneratorOpenAiTTSDef & {
7606
+ templateKey: 'GENERATOR_OPENAI_TTS'
7607
+ switches: Array<
7608
+ SwitchDef &
7609
+ GeneratorOpenAiTTSDef & {
7610
+ conds?: Array<{
7611
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
7612
+ cond:
7613
+ | SwitchCondInnerStateCurrentCanvas
7614
+ | SwitchCondData
7615
+ | {
7616
+ __typename: 'SwitchCondInnerStateOutlet'
7617
+ outlet: 'contextState' | 'generatedAudio' | 'generatedAudioPlaying'
7618
+ value: any
7619
+ }
7620
+ }>
7621
+ }
7622
+ >
7623
+ }
7624
+
6109
7625
  /* Add a message to the assistant */
6110
7626
  export type GeneratorAssistantActionAddMessage = ActionWithParams & {
6111
7627
  __actionName: 'GENERATOR_ASSISTANT_ADD_MESSAGE'
@@ -6120,6 +7636,11 @@ export type GeneratorAssistantActionAddMessage = ActionWithParams & {
6120
7636
  value?: string | DataLink | EventProperty
6121
7637
  mapping?: string
6122
7638
  }
7639
+ | {
7640
+ input: 'image'
7641
+ value?: string | DataLink | EventProperty
7642
+ mapping?: string
7643
+ }
6123
7644
  | {
6124
7645
  input: 'payload'
6125
7646
  value?: {} | DataLink | EventProperty
@@ -6163,6 +7684,55 @@ export type GeneratorAssistantActionAddMessage = ActionWithParams & {
6163
7684
  >
6164
7685
  }
6165
7686
 
7687
+ /* Initialize messages from MCP prompt */
7688
+ export type GeneratorAssistantActionInitMcpPrompt = ActionWithParams & {
7689
+ __actionName: 'GENERATOR_ASSISTANT_INIT_MCP_PROMPT'
7690
+ params?: Array<
7691
+ | {
7692
+ input: 'mcpClientName'
7693
+ value?: string | DataLink | EventProperty
7694
+ mapping?: string
7695
+ }
7696
+ | {
7697
+ input: 'mcpPromptName'
7698
+ value?: string | DataLink | EventProperty
7699
+ mapping?: string
7700
+ }
7701
+ | {
7702
+ input: 'mcpArguments'
7703
+ value?: {} | DataLink | EventProperty
7704
+ mapping?: string
7705
+ }
7706
+ | {
7707
+ input: 'firstMessageAsSystem'
7708
+ value?: boolean | DataLink | EventProperty
7709
+ mapping?: string
7710
+ }
7711
+ >
7712
+ }
7713
+
7714
+ /* Add messages from MCP prompt */
7715
+ export type GeneratorAssistantActionAddMcpPromptMessage = ActionWithParams & {
7716
+ __actionName: 'GENERATOR_ASSISTANT_ADD_MCP_PROMPT_MESSAGE'
7717
+ params?: Array<
7718
+ | {
7719
+ input: 'mcpClientName'
7720
+ value?: string | DataLink | EventProperty
7721
+ mapping?: string
7722
+ }
7723
+ | {
7724
+ input: 'mcpPromptName'
7725
+ value?: string | DataLink | EventProperty
7726
+ mapping?: string
7727
+ }
7728
+ | {
7729
+ input: 'mcpArguments'
7730
+ value?: {} | DataLink | EventProperty
7731
+ mapping?: string
7732
+ }
7733
+ >
7734
+ }
7735
+
6166
7736
  /* Update a message at a specific index */
6167
7737
  export type GeneratorAssistantActionUpdateMessageAtIndex = ActionWithParams & {
6168
7738
  __actionName: 'GENERATOR_ASSISTANT_UPDATE_MESSAGE_AT_INDEX'
@@ -6177,6 +7747,11 @@ export type GeneratorAssistantActionUpdateMessageAtIndex = ActionWithParams & {
6177
7747
  value?: string | DataLink | EventProperty
6178
7748
  mapping?: string
6179
7749
  }
7750
+ | {
7751
+ input: 'image'
7752
+ value?: string | DataLink | EventProperty
7753
+ mapping?: string
7754
+ }
6180
7755
  | {
6181
7756
  input: 'payload'
6182
7757
  value?: {} | DataLink | EventProperty
@@ -6204,6 +7779,11 @@ export type GeneratorAssistantActionAddAudioMessage = ActionWithParams & {
6204
7779
  value?: string | DataLink | EventProperty
6205
7780
  mapping?: string
6206
7781
  }
7782
+ | {
7783
+ input: 'image'
7784
+ value?: string | DataLink | EventProperty
7785
+ mapping?: string
7786
+ }
6207
7787
  | {
6208
7788
  input: 'useFileSearch'
6209
7789
  value?: boolean | DataLink | EventProperty
@@ -6303,6 +7883,11 @@ export type GeneratorAssistantActionUpdateAudioMessageAtIndex = ActionWithParams
6303
7883
  value?: string | DataLink | EventProperty
6304
7884
  mapping?: string
6305
7885
  }
7886
+ | {
7887
+ input: 'image'
7888
+ value?: string | DataLink | EventProperty
7889
+ mapping?: string
7890
+ }
6306
7891
  | {
6307
7892
  input: 'payload'
6308
7893
  value?: {} | DataLink | EventProperty
@@ -6332,8 +7917,25 @@ export type GeneratorAssistantActionReset = Action & {
6332
7917
  }
6333
7918
 
6334
7919
  /* Submit the assistant */
6335
- export type GeneratorAssistantActionSubmit = Action & {
7920
+ export type GeneratorAssistantActionSubmit = ActionWithParams & {
6336
7921
  __actionName: 'GENERATOR_ASSISTANT_SUBMIT'
7922
+ params?: Array<
7923
+ | {
7924
+ input: 'continueOnToolCallConfirm'
7925
+ value?: boolean | DataLink | EventProperty
7926
+ mapping?: string
7927
+ }
7928
+ | {
7929
+ input: 'continueOnToolCallStrategy'
7930
+ value?: 'never' | 'success' | 'always' | DataLink | EventProperty
7931
+ mapping?: string
7932
+ }
7933
+ | {
7934
+ input: 'continueOnToolCallLimit'
7935
+ value?: number | DataLink | EventProperty
7936
+ mapping?: string
7937
+ }
7938
+ >
6337
7939
  }
6338
7940
 
6339
7941
  /* Cancel the assistant responding */
@@ -6341,16 +7943,82 @@ export type GeneratorAssistantActionCancel = Action & {
6341
7943
  __actionName: 'GENERATOR_ASSISTANT_CANCEL'
6342
7944
  }
6343
7945
 
7946
+ /* Check the enabled MCP clients connection status and available tools */
7947
+ export type GeneratorAssistantActionCheckMcpServers = Action & {
7948
+ __actionName: 'GENERATOR_ASSISTANT_CHECK_MCP_SERVERS'
7949
+ }
7950
+
7951
+ /* Insert an MCP resource as a new assistant message */
7952
+ export type GeneratorAssistantActionInsertMcpResource = ActionWithParams & {
7953
+ __actionName: 'GENERATOR_ASSISTANT_INSERT_MCP_RESOURCE'
7954
+ params?: Array<
7955
+ | {
7956
+ input: 'mcpClientName'
7957
+ value?: string | DataLink | EventProperty
7958
+ mapping?: string
7959
+ }
7960
+ | {
7961
+ input: 'mcpResourceUri'
7962
+ value?: string | DataLink | EventProperty
7963
+ mapping?: string
7964
+ }
7965
+ | {
7966
+ input: 'mcpVariables'
7967
+ value?: {} | DataLink | EventProperty
7968
+ mapping?: string
7969
+ }
7970
+ | {
7971
+ input: 'role'
7972
+ value?: string | DataLink | EventProperty
7973
+ mapping?: string
7974
+ }
7975
+ >
7976
+ }
7977
+
7978
+ /* Summarize messages based on the conversation
7979
+
7980
+ Note: Summary uses the same LLM context size, so it is recommended only to use it when the system prompt (in Initial Messages) is long, otherwise it may still fail when the context is full (Ctx Shift is NO). */
7981
+ export type GeneratorAssistantActionSummaryMessages = ActionWithParams & {
7982
+ __actionName: 'GENERATOR_ASSISTANT_SUMMARY_MESSAGES'
7983
+ params?: Array<
7984
+ | {
7985
+ input: 'summaryMessages'
7986
+ value?: Array<any> | DataLink | EventProperty
7987
+ mapping?: string
7988
+ }
7989
+ | {
7990
+ input: 'summarySessionKey'
7991
+ value?: string | DataLink | EventProperty
7992
+ mapping?: string
7993
+ }
7994
+ >
7995
+ }
7996
+
6344
7997
  interface GeneratorAssistantDef {
6345
7998
  /*
6346
7999
  Default property:
6347
8000
  {
6348
8001
  "initialMessages": [
6349
- null
8002
+ {
8003
+ "role": "system",
8004
+ "content": "You are a helpful assistant."
8005
+ }
6350
8006
  ],
6351
8007
  "cacheMessages": false,
6352
8008
  "llmLivePolicy": "only-in-use",
6353
8009
  "llmSessionKey": "default-assistant",
8010
+ "llmAutoSummaryMessages": false,
8011
+ "llmSummaryMessages": [
8012
+ {
8013
+ "role": "system",
8014
+ "content": "You are a helpful assistant specialized in summarizing conversations. Create a concise summary of the conversation that captures the key points while maintaining important context. The summary should be clear, accurate, and briefer than the original conversation."
8015
+ },
8016
+ {
8017
+ "role": "user",
8018
+ "content": "Please summarize the following conversation into a concise system message that can replace the previous conversation context while maintaining all important information. Here is the conversation to summarize:\n\n"
8019
+ }
8020
+ ],
8021
+ "llmSummarySessionKey": "assistant-default-summary",
6354
8022
  "fileSearchEnabled": false,
6355
8023
  "fileSearchLivePolicy": "only-in-use",
6356
8024
  "sttEnabled": true,
@@ -6374,12 +8042,30 @@ Default property:
6374
8042
  | DataLink
6375
8043
  /* Whether to cache messages */
6376
8044
  cacheMessages?: boolean | DataLink
6377
- /* LLM Generator (Currently only support `LLM (GGML)` generator) */
8045
+ /* LLM Generator (Supports `LLM (GGML)` and `OpenAI LLM` generators) */
6378
8046
  llmGeneratorId?: string | DataLink
6379
- /* LLM Live Policy. If the policy is `only-in-use`, the LLM context will be released when the assistant is not in use. */
8047
+ /* LLM Live Policy. If the policy is `only-in-use`, the LLM context will be released when the assistant is not in use.
8048
+
8049
+ Note: LLM (Qualcomm AI Engine) recommend use `manual` and loaded constantly. */
6380
8050
  llmLivePolicy?: 'only-in-use' | 'manual' | DataLink
6381
8051
  /* LLM main session key */
6382
8052
  llmSessionKey?: string | DataLink
8053
+ /* Auto Summary Messages (Automatically summarize messages when the LLM context is full or content gets truncated, currently only supported with LLM (GGML) generators)
8054
+
8055
+ Note: Summary uses the same LLM context size, so it is recommended only to use it when the system prompt (in Initial Messages) is long, otherwise it may still fail when the context is full (Ctx Shift is NO). */
8056
+ llmAutoSummaryMessages?: boolean | DataLink
8057
+ /* Summary Messages (Messages used for summarization prompt, conversation will be appended to the last message) */
8058
+ llmSummaryMessages?:
8059
+ | Array<
8060
+ | DataLink
8061
+ | {
8062
+ role?: string | DataLink
8063
+ content?: string | DataLink
8064
+ }
8065
+ >
8066
+ | DataLink
8067
+ /* Summary Session Key (Custom session key for summarization) */
8068
+ llmSummarySessionKey?: string | DataLink
6383
8069
  /* File Search (Vector Store) Enabled */
6384
8070
  fileSearchEnabled?: boolean | DataLink
6385
8071
  /* File Search (Vector Store) Generator */
@@ -6392,18 +8078,29 @@ Default property:
6392
8078
  fileSearchThreshold?: number | DataLink
6393
8079
  /* File Search Ignore Threshold. (Default: false) */
6394
8080
  fileSearchIgnoreThreshold?: boolean | DataLink
6395
- /* STT Generator use for transcribing audio message (Currently only support `STT (GGML)` generator) */
8081
+ /* STT Generator use for transcribing audio message (Supports `STT (GGML)` generators) */
6396
8082
  sttGeneratorId?: string | DataLink
6397
8083
  /* STT Enabled */
6398
8084
  sttEnabled?: boolean | DataLink
6399
8085
  /* STT Live Policy. If the policy is `only-in-use`, the STT context will be released when the assistant is not in use. */
6400
8086
  sttLivePolicy?: 'only-in-use' | 'manual' | DataLink
6401
- /* TTS Generator use for generating LLM response audio message (Currently only support `TTS (ONNX)` generator) */
8087
+ /* TTS Generator use for generating LLM response audio message (Supports `TTS (ONNX)` and `OpenAI TTS` generators) */
6402
8088
  ttsGeneratorId?: string | DataLink
6403
8089
  /* TTS Enabled */
6404
8090
  ttsEnabled?: boolean | DataLink
6405
8091
  /* TTS Live Policy. If the policy is `only-in-use`, the TTS context will be released when the assistant is not in use. */
6406
8092
  ttsLivePolicy?: 'only-in-use' | 'manual' | DataLink
8093
+ /* MCP Generators (Add a unique name if generator name property are duplicate) */
8094
+ mcpGenerators?:
8095
+ | Array<
8096
+ | DataLink
8097
+ | {
8098
+ generatorId?: string | DataLink
8099
+ name?: string | DataLink
8100
+ enabled?: boolean | DataLink
8101
+ }
8102
+ >
8103
+ | DataLink
6407
8104
  }
6408
8105
  events?: {
6409
8106
  /* Error event */
@@ -6426,6 +8123,8 @@ Default property:
6426
8123
  files?: () => Data
6427
8124
  /* Messages of the assistant */
6428
8125
  messages?: () => Data
8126
+ /* MCP servers status and available tools */
8127
+ mcpServers?: () => Data
6429
8128
  }
6430
8129
  }
6431
8130
 
@@ -6450,6 +8149,7 @@ export type GeneratorAssistant = Generator &
6450
8149
  | 'isBusy'
6451
8150
  | 'files'
6452
8151
  | 'messages'
8152
+ | 'mcpServers'
6453
8153
  value: any
6454
8154
  }
6455
8155
  }>