@fugood/bricks-project 2.21.6 → 2.21.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -257,6 +257,10 @@ export const templateActionNameMap = {
257
257
  BRICK_WEBVIEW_INJECT_JAVASCRIPT: {
258
258
  javascriptCode: 'BRICK_WEBVIEW_JAVASCRIPT_CODE',
259
259
  },
260
+ BRICK_WEBVIEW_QUERY_SELECTOR: {
261
+ querySelector: 'BRICK_WEBVIEW_QUERY_SELECTOR',
262
+ expression: 'BRICK_WEBVIEW_EXPRESSION',
263
+ },
260
264
  },
261
265
  BRICK_CAMERA: {
262
266
  BRICK_CAMERA_TAKE_PICTURE: {
@@ -552,6 +556,8 @@ export const templateActionNameMap = {
552
556
  prompt: 'GENERATOR_ONNX_LLM_PROMPT',
553
557
  chat: 'GENERATOR_ONNX_LLM_CHAT',
554
558
  images: 'GENERATOR_ONNX_LLM_IMAGES',
559
+ tools: 'GENERATOR_ONNX_LLM_TOOLS',
560
+ toolChoice: 'GENERATOR_ONNX_LLM_TOOL_CHOICE',
555
561
  },
556
562
  },
557
563
  GENERATOR_ONNX_STT: {
@@ -590,6 +596,15 @@ export const templateActionNameMap = {
590
596
  },
591
597
  },
592
598
  GENERATOR_LLM: {
599
+ GENERATOR_LLM_TOKENIZE: {
600
+ mode: 'GENERATOR_LLM_MODE',
601
+ prompt: 'GENERATOR_LLM_PROMPT',
602
+ promptMediaPaths: 'GENERATOR_LLM_PROMPT_MEDIA_PATHS',
603
+ messages: 'GENERATOR_LLM_MESSAGES',
604
+ },
605
+ GENERATOR_LLM_DETOKENIZE: {
606
+ tokens: 'GENERATOR_LLM_TOKENS',
607
+ },
593
608
  GENERATOR_LLM_PROCESS_PROMPT: {
594
609
  sessionKey: 'GENERATOR_LLM_SESSION_KEY',
595
610
  mode: 'GENERATOR_LLM_MODE',
@@ -598,6 +613,7 @@ export const templateActionNameMap = {
598
613
  parallelToolCalls: 'GENERATOR_LLM_PARALLEL_TOOL_CALLS',
599
614
  toolChoice: 'GENERATOR_LLM_TOOL_CHOICE',
600
615
  prompt: 'GENERATOR_LLM_PROMPT',
616
+ promptMediaPaths: 'GENERATOR_LLM_PROMPT_MEDIA_PATHS',
601
617
  promptTemplateData: 'GENERATOR_LLM_PROMPT_TEMPLATE_DATA',
602
618
  promptTemplateType: 'GENERATOR_LLM_PROMPT_TEMPLATE_TYPE',
603
619
  responseFormat: 'GENERATOR_LLM_RESPONSE_FORMAT',
@@ -610,6 +626,7 @@ export const templateActionNameMap = {
610
626
  parallelToolCalls: 'GENERATOR_LLM_PARALLEL_TOOL_CALLS',
611
627
  toolChoice: 'GENERATOR_LLM_TOOL_CHOICE',
612
628
  prompt: 'GENERATOR_LLM_PROMPT',
629
+ promptMediaPaths: 'GENERATOR_LLM_PROMPT_MEDIA_PATHS',
613
630
  promptTemplateData: 'GENERATOR_LLM_PROMPT_TEMPLATE_DATA',
614
631
  promptTemplateType: 'GENERATOR_LLM_PROMPT_TEMPLATE_TYPE',
615
632
  responseFormat: 'GENERATOR_LLM_RESPONSE_FORMAT',
@@ -650,6 +667,7 @@ export const templateActionNameMap = {
650
667
  GENERATOR_QNN_LLM_GENERATE: {
651
668
  prompt: 'GENERATOR_QNN_LLM_PROMPT',
652
669
  messages: 'GENERATOR_QNN_LLM_MESSAGES',
670
+ tools: 'GENERATOR_QNN_LLM_TOOLS',
653
671
  },
654
672
  },
655
673
  GENERATOR_OPENAI_LLM: {
@@ -676,6 +694,7 @@ export const templateActionNameMap = {
676
694
  GENERATOR_ASSISTANT_ADD_MESSAGE: {
677
695
  role: 'GENERATOR_ASSISTANT_ROLE',
678
696
  content: 'GENERATOR_ASSISTANT_CONTENT',
697
+ image: 'GENERATOR_ASSISTANT_IMAGE',
679
698
  payload: 'GENERATOR_ASSISTANT_PAYLOAD',
680
699
  useFileSearch: 'GENERATOR_ASSISTANT_USE_FILE_SEARCH',
681
700
  filePath: 'GENERATOR_ASSISTANT_FILE_PATH',
@@ -699,12 +718,14 @@ export const templateActionNameMap = {
699
718
  GENERATOR_ASSISTANT_UPDATE_MESSAGE_AT_INDEX: {
700
719
  index: 'GENERATOR_ASSISTANT_INDEX',
701
720
  content: 'GENERATOR_ASSISTANT_CONTENT',
721
+ image: 'GENERATOR_ASSISTANT_IMAGE',
702
722
  payload: 'GENERATOR_ASSISTANT_PAYLOAD',
703
723
  },
704
724
  GENERATOR_ASSISTANT_ADD_AUDIO_MESSAGE: {
705
725
  role: 'GENERATOR_ASSISTANT_ROLE',
706
726
  contentFile: 'GENERATOR_ASSISTANT_CONTENT_FILE',
707
727
  contentBase64: 'GENERATOR_ASSISTANT_CONTENT_BASE64',
728
+ image: 'GENERATOR_ASSISTANT_IMAGE',
708
729
  useFileSearch: 'GENERATOR_ASSISTANT_USE_FILE_SEARCH',
709
730
  payload: 'GENERATOR_ASSISTANT_PAYLOAD',
710
731
  filePath: 'GENERATOR_ASSISTANT_FILE_PATH',
@@ -727,6 +748,7 @@ export const templateActionNameMap = {
727
748
  index: 'GENERATOR_ASSISTANT_INDEX',
728
749
  contentFile: 'GENERATOR_ASSISTANT_CONTENT_FILE',
729
750
  contentBase64: 'GENERATOR_ASSISTANT_CONTENT_BASE64',
751
+ image: 'GENERATOR_ASSISTANT_IMAGE',
730
752
  payload: 'GENERATOR_ASSISTANT_PAYLOAD',
731
753
  },
732
754
  GENERATOR_ASSISTANT_REMOVE_MESSAGE_AT_INDEX: {
package/package.json CHANGED
@@ -1,12 +1,12 @@
1
1
  {
2
2
  "name": "@fugood/bricks-project",
3
- "version": "2.21.6",
3
+ "version": "2.21.8",
4
4
  "main": "index.ts",
5
5
  "scripts": {
6
6
  "build": "node scripts/build.js"
7
7
  },
8
8
  "dependencies": {
9
- "@modelcontextprotocol/sdk": "^1.7.0",
9
+ "@modelcontextprotocol/sdk": "^1.11.1",
10
10
  "@types/escodegen": "^0.0.10",
11
11
  "@types/lodash": "^4.17.12",
12
12
  "acorn": "^8.13.0",
package/types/bricks.ts CHANGED
@@ -2420,6 +2420,23 @@ export type BrickWebViewActionInjectJavascript = ActionWithParams & {
2420
2420
  }>
2421
2421
  }
2422
2422
 
2423
+ /* Query selector on the WebView */
2424
+ export type BrickWebViewActionQuerySelector = ActionWithParams & {
2425
+ __actionName: 'BRICK_WEBVIEW_QUERY_SELECTOR'
2426
+ params?: Array<
2427
+ | {
2428
+ input: 'querySelector'
2429
+ value?: string | DataLink | EventProperty
2430
+ mapping?: string
2431
+ }
2432
+ | {
2433
+ input: 'expression'
2434
+ value?: string | DataLink | EventProperty
2435
+ mapping?: string
2436
+ }
2437
+ >
2438
+ }
2439
+
2423
2440
  /* Do go forward on the WebView */
2424
2441
  export type BrickWebViewActionGoForward = Action & {
2425
2442
  __actionName: 'BRICK_WEBVIEW_GO_FORWARD'
@@ -2502,6 +2519,12 @@ Default property:
2502
2519
  /* Event of the webview on message by `window.ReactNativeWebView.postMessage` on you're injected javascript code */
2503
2520
  onMessage?: Array<EventAction>
2504
2521
  }
2522
+ outlets?: {
2523
+ /* The result of the query selector action */
2524
+ queryResult?: () => Data
2525
+ /* The error of the query selector action */
2526
+ queryError?: () => Data
2527
+ }
2505
2528
  animation?: AnimationBasicEvents & {
2506
2529
  onLoad?: Animation
2507
2530
  onError?: Animation
@@ -2523,7 +2546,7 @@ export type BrickWebView = Brick &
2523
2546
  | SwitchCondData
2524
2547
  | {
2525
2548
  __typename: 'SwitchCondInnerStateOutlet'
2526
- outlet: ''
2549
+ outlet: 'queryResult' | 'queryError'
2527
2550
  value: any
2528
2551
  }
2529
2552
  }>
package/types/data.ts CHANGED
@@ -56,6 +56,11 @@ export type Data<T = any> = DataDef & {
56
56
  | 'rich-text-content'
57
57
  | 'sandbox-script'
58
58
  | 'llm-prompt'
59
+ | 'llm-messages'
60
+ | 'llm-tools'
61
+ | 'mcp-server-resources'
62
+ | 'mcp-server-tools'
63
+ | 'mcp-server-prompts'
59
64
  }
60
65
  value: T
61
66
  }
@@ -67,6 +72,7 @@ export type DataAssetKind = {
67
72
  | 'media-resource-audio'
68
73
  | 'media-resource-file'
69
74
  | 'lottie-file-uri'
75
+ | 'rive-file-uri'
70
76
  | 'ggml-model-asset'
71
77
  | 'gguf-model-asset'
72
78
  | 'binary-asset'
@@ -4365,7 +4365,7 @@ Default property:
4365
4365
  | {
4366
4366
  target?: string | DataLink
4367
4367
  timeout?: number | DataLink
4368
- payload?: any
4368
+ additionalParams?: any
4369
4369
  }
4370
4370
  scriptConfig?:
4371
4371
  | DataLink
@@ -4381,7 +4381,7 @@ Default property:
4381
4381
  }
4382
4382
  >
4383
4383
  | DataLink
4384
- payload?: any
4384
+ additionalParams?: any
4385
4385
  }
4386
4386
  }
4387
4387
  >
@@ -4406,7 +4406,7 @@ Default property:
4406
4406
  | {
4407
4407
  target?: string | DataLink
4408
4408
  timeout?: number | DataLink
4409
- payload?: any
4409
+ additionalParams?: any
4410
4410
  }
4411
4411
  scriptConfig?:
4412
4412
  | DataLink
@@ -4422,7 +4422,7 @@ Default property:
4422
4422
  }
4423
4423
  >
4424
4424
  | DataLink
4425
- payload?: any
4425
+ additionalParams?: any
4426
4426
  }
4427
4427
  }
4428
4428
  >
@@ -4449,7 +4449,7 @@ Default property:
4449
4449
  | {
4450
4450
  target?: string | DataLink
4451
4451
  timeout?: number | DataLink
4452
- payload?: any
4452
+ additionalParams?: any
4453
4453
  }
4454
4454
  scriptConfig?:
4455
4455
  | DataLink
@@ -4465,7 +4465,7 @@ Default property:
4465
4465
  }
4466
4466
  >
4467
4467
  | DataLink
4468
- payload?: any
4468
+ additionalParams?: any
4469
4469
  }
4470
4470
  }
4471
4471
  >
@@ -4655,12 +4655,13 @@ interface GeneratorMCPDef {
4655
4655
  Default property:
4656
4656
  {
4657
4657
  "init": false,
4658
- "type": "sse",
4658
+ "type": "streamable-http",
4659
4659
  "url": "",
4660
4660
  "autoReconnect": true,
4661
4661
  "maxReconnectAttempts": 10,
4662
4662
  "reconnectInterval": 1000,
4663
4663
  "generatorId": "",
4664
+ "generatorKey": "",
4664
4665
  "name": "bricks-foundation-mcp-client-default",
4665
4666
  "version": "1.0.0",
4666
4667
  "ignoreResourceInList": [],
@@ -4673,7 +4674,7 @@ Default property:
4673
4674
  /* Initialize the MCP client on start */
4674
4675
  init?: boolean | DataLink
4675
4676
  /* Type of the MCP connection, e.g. sse or direct-link (generator) */
4676
- type?: 'sse' | 'direct-link' | DataLink
4677
+ type?: 'streamable-http' | 'sse' | 'direct-link' | DataLink
4677
4678
  /* URL of the MCP server, e.g. http://localhost:19853/sse */
4678
4679
  url?: string | DataLink
4679
4680
  /* Whether to automatically reconnect to the MCP server */
@@ -4997,6 +4998,16 @@ export type GeneratorOnnxLLMActionInfer = ActionWithParams & {
4997
4998
  value?: Array<any> | DataLink | EventProperty
4998
4999
  mapping?: string
4999
5000
  }
5001
+ | {
5002
+ input: 'tools'
5003
+ value?: Array<any> | DataLink | EventProperty
5004
+ mapping?: string
5005
+ }
5006
+ | {
5007
+ input: 'toolChoice'
5008
+ value?: string | DataLink | EventProperty
5009
+ mapping?: string
5010
+ }
5000
5011
  >
5001
5012
  }
5002
5013
 
@@ -5016,6 +5027,8 @@ Default property:
5016
5027
  {
5017
5028
  "model": "BricksDisplay/phi-1_5-q4",
5018
5029
  "modelType": "auto",
5030
+ "toolCallParser": "llama3_json",
5031
+ "toolChoice": "auto",
5019
5032
  "maxNewTokens": 256,
5020
5033
  "temperature": 0.7,
5021
5034
  "topK": 50,
@@ -5079,24 +5092,19 @@ Default property:
5079
5092
  /* Model type */
5080
5093
  modelType?:
5081
5094
  | 'auto'
5082
- | 'gpt2'
5083
- | 'gptj'
5084
- | 'gpt_bigcode'
5085
- | 'gpt_neo'
5086
- | 'gpt_neox'
5087
- | 'bloom'
5088
- | 'mpt'
5089
- | 'opt'
5090
- | 'llama'
5091
- | 'falcon'
5092
- | 'mistral'
5095
+ | 'text-generation'
5096
+ | 'qwen2-vl'
5097
+ | 'paligemma'
5098
+ | 'llava'
5099
+ | 'llava_onevision'
5100
+ | 'moondream1'
5101
+ | 'florence2'
5102
+ | 'idefics3'
5103
+ | 'smolvlm'
5104
+ | 'phi3_v'
5093
5105
  | 't5'
5094
5106
  | 'mt5'
5095
5107
  | 'longt5'
5096
- | 'phi'
5097
- | 'qwen2'
5098
- | 'stablelm'
5099
- | 'gemma'
5100
5108
  | DataLink
5101
5109
  /* Load quantized model (deprecated, use `quantizeType` instead) */
5102
5110
  quantized?: boolean | DataLink
@@ -5122,6 +5130,14 @@ Default property:
5122
5130
  messages?: Array<DataLink | {}> | DataLink
5123
5131
  /* Images with message to inference */
5124
5132
  images?: Array<string | DataLink> | DataLink
5133
+ /* Tool call parser */
5134
+ toolCallParser?: 'llama3_json' | 'mistral' | 'hermes' | 'internlm' | 'phi4' | DataLink
5135
+ /* Tools for chat mode using OpenAI-compatible function calling format
5136
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
5137
+ See: https://platform.openai.com/docs/guides/function-calling */
5138
+ tools?: Array<{} | DataLink> | DataLink
5139
+ /* Tool choice for chat mode */
5140
+ toolChoice?: 'none' | 'auto' | DataLink
5125
5141
  /* Max new tokens to generate */
5126
5142
  maxNewTokens?: number | DataLink
5127
5143
  /* Temperature */
@@ -5156,6 +5172,10 @@ Default property:
5156
5172
  events?: {
5157
5173
  /* Event triggered when state change */
5158
5174
  onContextStateChange?: Array<EventAction>
5175
+ /* Event triggered on get function call request */
5176
+ onFunctionCall?: Array<EventAction>
5177
+ /* Event triggered on completion finished */
5178
+ onCompletionFinished?: Array<EventAction>
5159
5179
  /* Event triggered when error occurs */
5160
5180
  onError?: Array<EventAction>
5161
5181
  }
@@ -5166,6 +5186,8 @@ Default property:
5166
5186
  generated?: () => Data
5167
5187
  /* Full result of generation */
5168
5188
  fullResult?: () => Data
5189
+ /* Last function call */
5190
+ lastFunctionCall?: () => Data
5169
5191
  }
5170
5192
  }
5171
5193
 
@@ -5184,7 +5206,7 @@ export type GeneratorOnnxLLM = Generator &
5184
5206
  | SwitchCondData
5185
5207
  | {
5186
5208
  __typename: 'SwitchCondInnerStateOutlet'
5187
- outlet: 'contextState' | 'generated' | 'fullResult'
5209
+ outlet: 'contextState' | 'generated' | 'fullResult' | 'lastFunctionCall'
5188
5210
  value: any
5189
5211
  }
5190
5212
  }>
@@ -5875,6 +5897,48 @@ export type GeneratorLLMActionLoadModel = Action & {
5875
5897
  __actionName: 'GENERATOR_LLM_LOAD_MODEL'
5876
5898
  }
5877
5899
 
5900
+ /* Load multimodal (vision) model (PREVIEW FEATURE) */
5901
+ export type GeneratorLLMActionLoadMultimodalModel = Action & {
5902
+ __actionName: 'GENERATOR_LLM_LOAD_MULTIMODAL_MODEL'
5903
+ }
5904
+
5905
+ /* Tokenize the prompt */
5906
+ export type GeneratorLLMActionTokenize = ActionWithParams & {
5907
+ __actionName: 'GENERATOR_LLM_TOKENIZE'
5908
+ params?: Array<
5909
+ | {
5910
+ input: 'mode'
5911
+ value?: string | DataLink | EventProperty
5912
+ mapping?: string
5913
+ }
5914
+ | {
5915
+ input: 'prompt'
5916
+ value?: string | DataLink | EventProperty
5917
+ mapping?: string
5918
+ }
5919
+ | {
5920
+ input: 'promptMediaPaths'
5921
+ value?: Array<any> | DataLink | EventProperty
5922
+ mapping?: string
5923
+ }
5924
+ | {
5925
+ input: 'messages'
5926
+ value?: Array<any> | DataLink | EventProperty
5927
+ mapping?: string
5928
+ }
5929
+ >
5930
+ }
5931
+
5932
+ /* Detokenize the tokens to text */
5933
+ export type GeneratorLLMActionDetokenize = ActionWithParams & {
5934
+ __actionName: 'GENERATOR_LLM_DETOKENIZE'
5935
+ params?: Array<{
5936
+ input: 'tokens'
5937
+ value?: Array<any> | DataLink | EventProperty
5938
+ mapping?: string
5939
+ }>
5940
+ }
5941
+
5878
5942
  /* Pre-process the prompt, this can speed up the completion action */
5879
5943
  export type GeneratorLLMActionProcessPrompt = ActionWithParams & {
5880
5944
  __actionName: 'GENERATOR_LLM_PROCESS_PROMPT'
@@ -5914,6 +5978,11 @@ export type GeneratorLLMActionProcessPrompt = ActionWithParams & {
5914
5978
  value?: string | DataLink | EventProperty
5915
5979
  mapping?: string
5916
5980
  }
5981
+ | {
5982
+ input: 'promptMediaPaths'
5983
+ value?: Array<any> | DataLink | EventProperty
5984
+ mapping?: string
5985
+ }
5917
5986
  | {
5918
5987
  input: 'promptTemplateData'
5919
5988
  value?: {} | DataLink | EventProperty
@@ -5971,6 +6040,11 @@ export type GeneratorLLMActionCompletion = ActionWithParams & {
5971
6040
  value?: string | DataLink | EventProperty
5972
6041
  mapping?: string
5973
6042
  }
6043
+ | {
6044
+ input: 'promptMediaPaths'
6045
+ value?: Array<any> | DataLink | EventProperty
6046
+ mapping?: string
6047
+ }
5974
6048
  | {
5975
6049
  input: 'promptTemplateData'
5976
6050
  value?: {} | DataLink | EventProperty
@@ -6151,6 +6225,11 @@ export type GeneratorLLMActionClearDownload = Action & {
6151
6225
  __actionName: 'GENERATOR_LLM_CLEAR_DOWNLOAD'
6152
6226
  }
6153
6227
 
6228
+ /* Release multimodal (vision) context (PREVIEW FEATURE) */
6229
+ export type GeneratorLLMActionReleaseMultimodalContext = Action & {
6230
+ __actionName: 'GENERATOR_LLM_RELEASE_MULTIMODAL_CONTEXT'
6231
+ }
6232
+
6154
6233
  /* Release context */
6155
6234
  export type GeneratorLLMActionReleaseContext = Action & {
6156
6235
  __actionName: 'GENERATOR_LLM_RELEASE_CONTEXT'
@@ -6173,7 +6252,7 @@ Default property:
6173
6252
  "cacheVType": "f16",
6174
6253
  "ctxShift": true,
6175
6254
  "transformScriptEnabled": false,
6176
- "transformScriptCode": "\/\* Global variable: inputs = { prompt, messages, variables } \*\/\nreturn inputs.prompt",
6255
+ "transformScriptCode": "\/\* Global variable: inputs = { prompt, messages, variables }, members = { llmUtils } \*\/\nreturn inputs.prompt",
6177
6256
  "transformScriptVariables": {},
6178
6257
  "sessionMinSaveSize": 50,
6179
6258
  "sessionRemain": 10,
@@ -6228,6 +6307,14 @@ Default property:
6228
6307
  modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6229
6308
  /* Hash of model */
6230
6309
  modelHash?: string | DataLink
6310
+ /* Load multimodal (vision) context after model loaded (PREVIEW FEATURE) */
6311
+ initMultimodal?: boolean | DataLink
6312
+ /* The URL or path of mmproj file for multimodal vision support (PREVIEW FEATURE) */
6313
+ mmprojUrl?: string | DataLink
6314
+ /* Hash type of mmproj file (PREVIEW FEATURE) */
6315
+ mmprojHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6316
+ /* Hash of mmproj file (PREVIEW FEATURE) */
6317
+ mmprojHash?: string | DataLink
6231
6318
  /* Chat Template (Jinja format) to override the default template from model */
6232
6319
  chatTemplate?: string | DataLink
6233
6320
  /* Context size (0 ~ 4096) (Default to 512) */
@@ -6297,6 +6384,9 @@ Default property:
6297
6384
  | DataLink
6298
6385
  /* Prompt (text mode) */
6299
6386
  completionPrompt?: string | DataLink
6387
+ /* Media paths to be used in the prompt template (PREVIEW FEATURE)
6388
+ In prompt, use `<__image__>` for position of media content */
6389
+ completionPromptMediaPaths?: Array<string | DataLink> | DataLink
6300
6390
  /* Data to be used in the prompt template (e.g. `Hello ${name}`). Supports nested data, such as `Hello ${user.name}`. */
6301
6391
  completionPromptTemplateData?: {} | DataLink
6302
6392
  /* The prompt template type */
@@ -6394,6 +6484,10 @@ Default property:
6394
6484
  sessions?: () => Data
6395
6485
  /* Is evaluating */
6396
6486
  isEvaluating?: () => Data
6487
+ /* Tokenize result */
6488
+ tokenizeResult?: () => Data
6489
+ /* Detokenize result */
6490
+ detokenizeResult?: () => Data
6397
6491
  /* Last formatted prompt (messages or prompt) */
6398
6492
  completionLastFormattedPrompt?: () => Data
6399
6493
  /* Last completion token */
@@ -6434,6 +6528,8 @@ export type GeneratorLLM = Generator &
6434
6528
  | 'contextDetails'
6435
6529
  | 'sessions'
6436
6530
  | 'isEvaluating'
6531
+ | 'tokenizeResult'
6532
+ | 'detokenizeResult'
6437
6533
  | 'completionLastFormattedPrompt'
6438
6534
  | 'completionLastToken'
6439
6535
  | 'completionResult'
@@ -6470,6 +6566,11 @@ export type GeneratorQnnLlmActionGenerate = ActionWithParams & {
6470
6566
  value?: Array<any> | DataLink | EventProperty
6471
6567
  mapping?: string
6472
6568
  }
6569
+ | {
6570
+ input: 'tools'
6571
+ value?: Array<any> | DataLink | EventProperty
6572
+ mapping?: string
6573
+ }
6473
6574
  >
6474
6575
  }
6475
6576
 
@@ -6489,6 +6590,7 @@ Default property:
6489
6590
  {
6490
6591
  "modelType": "Llama 3.2 3B Chat",
6491
6592
  "chatFormat": "Llama 3.x",
6593
+ "toolsInUserMessage": true,
6492
6594
  "toolCallParser": "llama3_json",
6493
6595
  "toolChoice": "auto",
6494
6596
  "parallelToolCalls": false,
@@ -6517,9 +6619,18 @@ Default property:
6517
6619
  /* Custom model split parts */
6518
6620
  customModelSplitParts?: number | DataLink
6519
6621
  /* Chat format */
6520
- chatFormat?: 'Llama 2' | 'Llama 3' | 'Llama 3.x' | 'Mistral v0.3' | 'Qwen 2' | DataLink
6622
+ chatFormat?:
6623
+ | 'Llama 2'
6624
+ | 'Llama 3'
6625
+ | 'Llama 3.x'
6626
+ | 'Mistral v0.3'
6627
+ | 'Qwen 2'
6628
+ | 'Custom'
6629
+ | DataLink
6521
6630
  /* Custom chat format template */
6522
6631
  customChatFormat?: string | DataLink
6632
+ /* Put tools in user message */
6633
+ toolsInUserMessage?: boolean | DataLink
6523
6634
  /* Prompt to generate */
6524
6635
  prompt?: string | DataLink
6525
6636
  /* Chat messages */
@@ -6535,7 +6646,7 @@ Default property:
6535
6646
  /* Stop words */
6536
6647
  stopWords?: Array<string | DataLink> | DataLink
6537
6648
  /* Tool call parser */
6538
- toolCallParser?: 'llama3_json' | 'mistral' | DataLink
6649
+ toolCallParser?: 'llama3_json' | 'mistral' | 'hermes' | 'internlm' | 'phi4' | DataLink
6539
6650
  /* Tools for chat mode using OpenAI-compatible function calling format
6540
6651
  Format: Array of objects with {type, function: {name, description, parameters}} structure
6541
6652
  See: https://platform.openai.com/docs/guides/function-calling */
@@ -6680,7 +6791,7 @@ interface GeneratorOpenAILLMDef {
6680
6791
  Default property:
6681
6792
  {
6682
6793
  "apiEndpoint": "https://api.openai.com/v1",
6683
- "model": "gpt-4o-mini",
6794
+ "model": "gpt-4o",
6684
6795
  "completionMessages": [
6685
6796
  {
6686
6797
  "role": "system",
@@ -6690,8 +6801,6 @@ Default property:
6690
6801
  "completionMaxTokens": 1024,
6691
6802
  "completionTemperature": 1,
6692
6803
  "completionTopP": 1,
6693
- "completionFrequencyPenalty": 0,
6694
- "completionPresencePenalty": 0,
6695
6804
  "completionStop": []
6696
6805
  }
6697
6806
  */
@@ -6780,7 +6889,11 @@ Default property:
6780
6889
  - Compatible with OpenAI API format
6781
6890
  - Supports function calling
6782
6891
  - Streaming responses
6783
- - Custom API endpoints */
6892
+ - Custom API endpoints, like
6893
+ - OpenAI API: https://platform.openai.com/docs/guides/text?api-mode=chat
6894
+ - Anthropic API: https://docs.anthropic.com/en/api/openai-sdk
6895
+ - Gemini API: https://ai.google.dev/gemini-api/docs/openai
6896
+ - llama.cpp server: https://github.com/ggml-org/llama.cpp/tree/master/tools/server */
6784
6897
  export type GeneratorOpenAILLM = Generator &
6785
6898
  GeneratorOpenAILLMDef & {
6786
6899
  templateKey: 'GENERATOR_OPENAI_LLM'
@@ -6914,6 +7027,11 @@ export type GeneratorAssistantActionAddMessage = ActionWithParams & {
6914
7027
  value?: string | DataLink | EventProperty
6915
7028
  mapping?: string
6916
7029
  }
7030
+ | {
7031
+ input: 'image'
7032
+ value?: string | DataLink | EventProperty
7033
+ mapping?: string
7034
+ }
6917
7035
  | {
6918
7036
  input: 'payload'
6919
7037
  value?: {} | DataLink | EventProperty
@@ -7020,6 +7138,11 @@ export type GeneratorAssistantActionUpdateMessageAtIndex = ActionWithParams & {
7020
7138
  value?: string | DataLink | EventProperty
7021
7139
  mapping?: string
7022
7140
  }
7141
+ | {
7142
+ input: 'image'
7143
+ value?: string | DataLink | EventProperty
7144
+ mapping?: string
7145
+ }
7023
7146
  | {
7024
7147
  input: 'payload'
7025
7148
  value?: {} | DataLink | EventProperty
@@ -7047,6 +7170,11 @@ export type GeneratorAssistantActionAddAudioMessage = ActionWithParams & {
7047
7170
  value?: string | DataLink | EventProperty
7048
7171
  mapping?: string
7049
7172
  }
7173
+ | {
7174
+ input: 'image'
7175
+ value?: string | DataLink | EventProperty
7176
+ mapping?: string
7177
+ }
7050
7178
  | {
7051
7179
  input: 'useFileSearch'
7052
7180
  value?: boolean | DataLink | EventProperty
@@ -7146,6 +7274,11 @@ export type GeneratorAssistantActionUpdateAudioMessageAtIndex = ActionWithParams
7146
7274
  value?: string | DataLink | EventProperty
7147
7275
  mapping?: string
7148
7276
  }
7277
+ | {
7278
+ input: 'image'
7279
+ value?: string | DataLink | EventProperty
7280
+ mapping?: string
7281
+ }
7149
7282
  | {
7150
7283
  input: 'payload'
7151
7284
  value?: {} | DataLink | EventProperty
@@ -7302,7 +7435,9 @@ Default property:
7302
7435
  cacheMessages?: boolean | DataLink
7303
7436
  /* LLM Generator (Supports `LLM (GGML)` and `OpenAI LLM` generators) */
7304
7437
  llmGeneratorId?: string | DataLink
7305
- /* LLM Live Policy. If the policy is `only-in-use`, the LLM context will be released when the assistant is not in use. */
7438
+ /* LLM Live Policy. If the policy is `only-in-use`, the LLM context will be released when the assistant is not in use.
7439
+
7440
+ Note: LLM (Qualcomm AI Engine) recommend use `manual` and loaded constantly. */
7306
7441
  llmLivePolicy?: 'only-in-use' | 'manual' | DataLink
7307
7442
  /* LLM main session key */
7308
7443
  llmSessionKey?: string | DataLink
@@ -739,6 +739,16 @@ export const templateEventPropsMap = {
739
739
  onContextStateChange: [
740
740
  'GENERATOR_ONNX_LLM_CONTEXT_STATE', // type: string
741
741
  ],
742
+ onFunctionCall: [
743
+ 'GENERATOR_ONNX_LLM_FUNCTION_CALL_NAME', // type: string
744
+ 'GENERATOR_ONNX_LLM_FUNCTION_CALL_ARGUMENTS', // type: object
745
+ 'GENERATOR_ONNX_LLM_FUNCTION_CALL_DETAILS', // type: object
746
+ ],
747
+ onCompletionFinished: [
748
+ 'GENERATOR_ONNX_LLM_COMPLETION_RESULT', // type: string
749
+ 'GENERATOR_ONNX_LLM_COMPLETION_TOOL_CALLS', // type: array
750
+ 'GENERATOR_ONNX_LLM_COMPLETION_FULL_CONTEXT', // type: string
751
+ ],
742
752
  onError: [
743
753
  'GENERATOR_ONNX_LLM_ERROR', // type: string
744
754
  ],
@@ -803,6 +813,7 @@ export const templateEventPropsMap = {
803
813
  onGenerate: [
804
814
  'GENERATOR_QNN_LLM_RESULT', // type: string
805
815
  'GENERATOR_QNN_LLM_FULL_CONTEXT', // type: string
816
+ 'GENERATOR_QNN_LLM_TOOL_CALLS', // type: array
806
817
  ],
807
818
  onFunctionCall: [
808
819
  'GENERATOR_QNN_LLM_FUNCTION_CALL_NAME', // type: string