@fugood/bricks-project 2.22.0-beta.16 → 2.22.0-beta.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -129,6 +129,12 @@ export const templateActionNameMap = {
129
129
  behavior: 'BEHAVIOR',
130
130
  exceptionMessage: 'EXCEPTION_MESSAGE',
131
131
  },
132
+ TRIGGER_APPLICATION_FILTER: {
133
+ name: 'NAME',
134
+ variables: 'VARIABLES',
135
+ result: 'RESULT',
136
+ error: 'ERROR',
137
+ },
132
138
  },
133
139
 
134
140
  BRICK_TEXT_INPUT: {
@@ -630,6 +636,7 @@ export const templateActionNameMap = {
630
636
  samplesOverlap: 'GENERATOR_VAD_INFERENCE_SAMPLES_OVERLAP',
631
637
  },
632
638
  },
639
+
633
640
  GENERATOR_LLM: {
634
641
  GENERATOR_LLM_TOKENIZE: {
635
642
  mode: 'GENERATOR_LLM_MODE',
package/package.json CHANGED
@@ -1,12 +1,12 @@
1
1
  {
2
2
  "name": "@fugood/bricks-project",
3
- "version": "2.22.0-beta.16",
3
+ "version": "2.22.0-beta.18",
4
4
  "main": "index.ts",
5
5
  "scripts": {
6
6
  "build": "node scripts/build.js"
7
7
  },
8
8
  "dependencies": {
9
- "@modelcontextprotocol/sdk": "^1.11.1",
9
+ "@modelcontextprotocol/sdk": "^1.15.0",
10
10
  "@types/escodegen": "^0.0.10",
11
11
  "@types/lodash": "^4.17.12",
12
12
  "acorn": "^8.13.0",
@@ -14,5 +14,5 @@
14
14
  "lodash": "^4.17.4",
15
15
  "uuid": "^8.3.1"
16
16
  },
17
- "gitHead": "d8231fa6787b8d95bc2bec94a86ceba596cf57f0"
17
+ "gitHead": "3d0d19e21dcdb4a989e24f9ab8c69578c4c550fa"
18
18
  }
package/types/common.ts CHANGED
@@ -39,7 +39,7 @@ export type SubpsaceAction = string
39
39
  export type Action = {
40
40
  __actionName: string
41
41
  parent: 'Brick' | 'Generator' | 'Subspace' | 'System'
42
- name: string
42
+ name?: string
43
43
  }
44
44
 
45
45
  // Find correct key in bricks-project/utils/event-props for EventAction
@@ -4868,39 +4868,9 @@ Default property:
4868
4868
  init?: boolean | DataLink
4869
4869
  /* TTS model
4870
4870
  The mms-tts models are licensed under CC-BY-NC-4.0 */
4871
- model?:
4872
- | 'Custom'
4873
- | 'BricksDisplay/vits-eng'
4874
- | 'BricksDisplay/vits-cmn'
4875
- | 'BricksDisplay/ellie-Bert-VITS2'
4876
- | 'onnx-community/Kokoro-82M-v1.0-ONNX'
4877
- | 'onnx-community/Kokoro-82M-v1.1-zh-ONNX'
4878
- | 'onnx-community/OuteTTS-1.0-0.6B-ONNX'
4879
- | 'mms-tts-ara (NC)'
4880
- | 'mms-tts-deu (NC)'
4881
- | 'mms-tts-eng (NC)'
4882
- | 'mms-tts-fra (NC)'
4883
- | 'mms-tts-hin (NC)'
4884
- | 'mms-tts-kor (NC)'
4885
- | 'mms-tts-por (NC)'
4886
- | 'mms-tts-ron (NC)'
4887
- | 'mms-tts-rus (NC)'
4888
- | 'mms-tts-spa (NC)'
4889
- | 'mms-tts-vie (NC)'
4890
- | 'mms-tts-yor (NC)'
4891
- | 'speecht5_tts'
4892
- | DataLink
4871
+ model?: string | DataLink
4893
4872
  /* Model type */
4894
- modelType?:
4895
- | 'auto'
4896
- | 'vits'
4897
- | 'bert_vits2'
4898
- | 'style_text_to_speech_2'
4899
- | 'speecht5'
4900
- | 'outetts-1.0'
4901
- | DataLink
4902
- /* Load quantized model (deprecated, use `quantizeType` instead) */
4903
- quantized?: boolean | DataLink
4873
+ modelType?: string | DataLink
4904
4874
  /* Quantize type */
4905
4875
  quantizeType?:
4906
4876
  | 'auto'
@@ -4913,9 +4883,6 @@ Default property:
4913
4883
  | 'bnb4'
4914
4884
  | 'q4f16'
4915
4885
  | DataLink
4916
- /* Custom model name
4917
- Choose model from https://huggingface.co/models?pipeline_tag=text-to-audio&library=transformers.js */
4918
- customModel?: string | DataLink
4919
4886
  /* Vocoder model for SpeechT5 */
4920
4887
  vocoderModel?: 'Custom' | 'speecht5_hifigan' | DataLink
4921
4888
  /* Custom vocoder model
@@ -4923,8 +4890,10 @@ Default property:
4923
4890
  customVocoderModel?: string | DataLink
4924
4891
  /* Speaker embedding, for SpeechT5 or StyleTTS (Kokoro) */
4925
4892
  speakerEmbedUrl?: string | DataLink
4926
- /* MD5 checksum of `speakerEmbedUrl` */
4927
- speakerEmbedMd5?: string | DataLink
4893
+ /* Hash of `speakerEmbedUrl` */
4894
+ speakerEmbedHash?: string | DataLink
4895
+ /* Hash type of `speakerEmbedUrl` */
4896
+ speakerEmbedHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
4928
4897
  /* Speaker config, for OuteTTS model */
4929
4898
  speakerConfig?: {} | DataLink
4930
4899
  /* Audio token generation max length */
@@ -5054,7 +5023,6 @@ interface GeneratorOnnxLLMDef {
5054
5023
  /*
5055
5024
  Default property:
5056
5025
  {
5057
- "model": "BricksDisplay/phi-1_5-q4",
5058
5026
  "modelType": "auto",
5059
5027
  "toolCallParser": "llama3_json",
5060
5028
  "toolChoice": "auto",
@@ -5073,70 +5041,9 @@ Default property:
5073
5041
  /* Initialize the TTS context on generator initialization */
5074
5042
  init?: boolean | DataLink
5075
5043
  /* LLM model */
5076
- model?:
5077
- | 'Custom'
5078
- | 'onnx-community/gemma-3-1b-it-ONNX'
5079
- | 'BricksDisplay/phi-1_5'
5080
- | 'BricksDisplay/phi-1_5-q4'
5081
- | 'onnx-community/Phi-3.5-vision-instruct'
5082
- | 'onnx-community/Phi-3-vision-128k-instruct'
5083
- | 'onnx-community/Phi-4-mini-instruct-ONNX-MHA'
5084
- | 'onnx-community/Qwen2.5-0.5B'
5085
- | 'onnx-community/Qwen2.5-0.5B-Instruct'
5086
- | 'onnx-community/Qwen2.5-1.5B'
5087
- | 'onnx-community/Qwen2.5-1.5B-Instruct'
5088
- | 'onnx-community/Qwen2-VL-2B-Instruct'
5089
- | 'stablelm-2-1_6b'
5090
- | 'BricksDisplay/stablelm-2-1_6b-q4'
5091
- | 'stablelm-2-zephyr-1_6b'
5092
- | 'BricksDisplay/stablelm-2-zephyr-1_6b-q4'
5093
- | 'BricksDisplay/Llama-2-7b-chat-q4'
5094
- | 'TinyLLama-v0'
5095
- | 'TinyLlama-1.1B-Chat-v1.0'
5096
- | 'BricksDisplay/TinyLlama-1.1B-Chat-v1.0-q4'
5097
- | 'llama-160m'
5098
- | 'llama-68m'
5099
- | 'BricksDisplay/Yi-6B-q4'
5100
- | 'BricksDisplay/Yi-6B-Chat-q4'
5101
- | 'BricksDisplay/Mistral-7B-v0.1-q4'
5102
- | 'BricksDisplay/Mistral-7B-Instruct-v0.2-q4'
5103
- | 'BricksDisplay/Breeze-7B-Base-v1_0-q4'
5104
- | 'BricksDisplay/Breeze-7B-Instruct-v1_0-q4'
5105
- | 'gpt2'
5106
- | 'distilgpt2'
5107
- | 'gpt-neo-125M'
5108
- | 'opt-125m'
5109
- | 'opt-350m'
5110
- | 'bloom-560m'
5111
- | 'bloomz-560m'
5112
- | 't5-small'
5113
- | 't5-base'
5114
- | 'flan-t5-small'
5115
- | 'flan-t5-base'
5116
- | 'mt5-small'
5117
- | 'mt5-base'
5118
- | 'long-t5-lobal-base'
5119
- | 'long-t5-tglobal-base'
5120
- | DataLink
5044
+ model?: string | DataLink
5121
5045
  /* Model type */
5122
- modelType?:
5123
- | 'auto'
5124
- | 'text-generation'
5125
- | 'qwen2-vl'
5126
- | 'paligemma'
5127
- | 'llava'
5128
- | 'llava_onevision'
5129
- | 'moondream1'
5130
- | 'florence2'
5131
- | 'idefics3'
5132
- | 'smolvlm'
5133
- | 'phi3_v'
5134
- | 't5'
5135
- | 'mt5'
5136
- | 'longt5'
5137
- | DataLink
5138
- /* Load quantized model (deprecated, use `quantizeType` instead) */
5139
- quantized?: boolean | DataLink
5046
+ modelType?: string | DataLink
5140
5047
  /* Quantize type */
5141
5048
  quantizeType?:
5142
5049
  | 'auto'
@@ -5149,10 +5056,6 @@ Default property:
5149
5056
  | 'bnb4'
5150
5057
  | 'q4f16'
5151
5058
  | DataLink
5152
- /* Custom model name
5153
- Choose model from https://huggingface.co/models?pipeline_tag=text2text-generation&library=transformers.js
5154
- or https://huggingface.co/models?pipeline_tag=text-generation&library=transformers.js&sort=trending */
5155
- customModel?: string | DataLink
5156
5059
  /* Prompt to inference */
5157
5060
  prompt?: string | DataLink
5158
5061
  /* Messages to inference */
@@ -5282,27 +5185,9 @@ Default property:
5282
5185
  /* Initialize the TTS context on generator initialization */
5283
5186
  init?: boolean | DataLink
5284
5187
  /* STT model */
5285
- model?:
5286
- | 'Custom'
5287
- | 'whisper-tiny'
5288
- | 'whisper-tiny.en'
5289
- | 'whisper-small'
5290
- | 'whisper-small.en'
5291
- | 'whisper-base'
5292
- | 'whisper-base.en'
5293
- | 'whisper-medium'
5294
- | 'whisper-medium.en'
5295
- | 'whisper-large'
5296
- | 'whisper-large-v2'
5297
- | 'whisper-large-v3'
5298
- | 'mms-1b-all'
5299
- | 'mms-1b-fl102'
5300
- | 'mms-1b-l1107'
5301
- | DataLink
5188
+ model?: string | DataLink
5302
5189
  /* Model type */
5303
- modelType?: 'auto' | 'whisper' | 'hubert' | 'wav2vec2' | 'wav2vec2-bert' | DataLink
5304
- /* Load quantized model (deprecated, use `quantizeType` instead) */
5305
- quantized?: boolean | DataLink
5190
+ modelType?: string | DataLink
5306
5191
  /* Quantize type */
5307
5192
  quantizeType?:
5308
5193
  | 'auto'
@@ -5315,9 +5200,6 @@ Default property:
5315
5200
  | 'bnb4'
5316
5201
  | 'q4f16'
5317
5202
  | DataLink
5318
- /* Custom model name
5319
- Choose model from https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&library=transformers.js */
5320
- customModel?: string | DataLink
5321
5203
  /* Return timestamps */
5322
5204
  returnTimestamps?: 'none' | 'enable' | 'word' | DataLink
5323
5205
  /* Transcription language
@@ -5550,7 +5432,7 @@ export type GeneratorSpeechInferenceActionTranscribeData = ActionWithParams & {
5550
5432
  >
5551
5433
  }
5552
5434
 
5553
- /* Transcribe microphone audio source */
5435
+ /* [Deprecated] Transcribe microphone audio source */
5554
5436
  export type GeneratorSpeechInferenceActionTranscribeRealtime = ActionWithParams & {
5555
5437
  __actionName: 'GENERATOR_SPEECH_INFERENCE_TRANSCRIBE_REALTIME'
5556
5438
  params?: Array<
@@ -5617,7 +5499,7 @@ export type GeneratorSpeechInferenceActionTranscribeRealtime = ActionWithParams
5617
5499
  >
5618
5500
  }
5619
5501
 
5620
- /* Stop transcribing microphone audio source */
5502
+ /* [Deprecated] Stop transcribing microphone audio source */
5621
5503
  export type GeneratorSpeechInferenceActionTranscribeRealtimeStop = Action & {
5622
5504
  __actionName: 'GENERATOR_SPEECH_INFERENCE_TRANSCRIBE_REALTIME_STOP'
5623
5505
  }
@@ -5637,6 +5519,7 @@ interface GeneratorSpeechInferenceDef {
5637
5519
  Default property:
5638
5520
  {
5639
5521
  "init": false,
5522
+ "accelVariant": "default",
5640
5523
  "modelName": "base-q8_0",
5641
5524
  "modelUseCoreML": false,
5642
5525
  "modelUseGPU": true,
@@ -5655,6 +5538,11 @@ Default property:
5655
5538
  /* Initialize the Whisper context on generator initialization
5656
5539
  Please note that it will take some RAM depending on the model size */
5657
5540
  init?: boolean | DataLink
5541
+ /* Accelerator variant (Only for desktop)
5542
+ `default` - CPU / Metal (macOS)
5543
+ `vulkan` - Use Vulkan
5544
+ `cuda` - Use CUDA */
5545
+ accelVariant?: 'default' | 'vulkan' | 'cuda' | DataLink
5658
5546
  /* Use model name, the model download progress will be done in preload stage or the generator initialization stage.
5659
5547
  We used `ggml` format model, please refer to https://huggingface.co/BricksDisplay/whisper-ggml
5660
5548
  You can also choose `custom` option and set `Model URL` and `Model MD5` to use your own model */
@@ -5865,7 +5753,7 @@ Default property:
5865
5753
  inferRealtimeVadFreqThold?: number | DataLink
5866
5754
  }
5867
5755
  events?: {
5868
- /* Event triggered when load is done */
5756
+ /* Event triggered when context state changes */
5869
5757
  onContextStateChange?: Array<EventAction>
5870
5758
  /* Event triggered when error occurs */
5871
5759
  onError?: Array<EventAction>
@@ -5892,7 +5780,13 @@ Default property:
5892
5780
  }
5893
5781
  }
5894
5782
 
5895
- /* Local Speech-to-Text (STT) inference based on GGML and [whisper.cpp](https://github.com/ggerganov/whisper.cpp) */
5783
+ /* Local Speech-to-Text (STT) inference based on GGML and [whisper.cpp](https://github.com/ggerganov/whisper.cpp)
5784
+
5785
+ ## Notice
5786
+ - iOS: Supported GPU acceleration, recommended use M1+ / A17+ chip device
5787
+ - macOS: Supported GPU acceleration, recommended use M1+ chip device
5788
+ - Android: Currently not supported GPU acceleration (Coming soon), recommended use Android 13+ system
5789
+ - Linux / Windows: Supported GPU acceleration, you can choose `vulkan` or `cuda` backend in Accel Variant property */
5896
5790
  export type GeneratorSpeechInference = Generator &
5897
5791
  GeneratorSpeechInferenceDef & {
5898
5792
  templateKey: 'GENERATOR_SPEECH_INFERENCE'
@@ -6074,7 +5968,7 @@ Default property:
6074
5968
  detectFileMd5?: string | DataLink
6075
5969
  }
6076
5970
  events?: {
6077
- /* Event triggered when load is done */
5971
+ /* Event triggered when context state changes */
6078
5972
  onContextStateChange?: Array<EventAction>
6079
5973
  /* Event triggered when error occurs */
6080
5974
  onError?: Array<EventAction>
@@ -6122,6 +6016,168 @@ export type GeneratorVadInference = Generator &
6122
6016
  >
6123
6017
  }
6124
6018
 
6019
+ /* Start realtime transcription */
6020
+ export type GeneratorRealtimeTranscriptionActionStart = Action & {
6021
+ __actionName: 'GENERATOR_REALTIME_TRANSCRIPTION_START'
6022
+ }
6023
+
6024
+ /* Stop realtime transcription */
6025
+ export type GeneratorRealtimeTranscriptionActionStop = Action & {
6026
+ __actionName: 'GENERATOR_REALTIME_TRANSCRIPTION_STOP'
6027
+ }
6028
+
6029
+ /* Force move to next slice */
6030
+ export type GeneratorRealtimeTranscriptionActionNextSlice = Action & {
6031
+ __actionName: 'GENERATOR_REALTIME_TRANSCRIPTION_NEXT_SLICE'
6032
+ }
6033
+
6034
+ /* Reset transcriber state */
6035
+ export type GeneratorRealtimeTranscriptionActionReset = Action & {
6036
+ __actionName: 'GENERATOR_REALTIME_TRANSCRIPTION_RESET'
6037
+ }
6038
+
6039
+ interface GeneratorRealtimeTranscriptionDef {
6040
+ /*
6041
+ Default property:
6042
+ {
6043
+ "sttLivePolicy": "only-in-use",
6044
+ "vadInferenceLivePolicy": "only-in-use",
6045
+ "vadEnabled": true,
6046
+ "audioSliceSec": 30,
6047
+ "audioMinSec": 1,
6048
+ "maxSlicesInMemory": 5,
6049
+ "vadStrategy": "use-preset",
6050
+ "vadPreset": "default",
6051
+ "autoSliceOnSpeechEnd": true,
6052
+ "autoSliceThreshold": 2,
6053
+ "initialPrompt": "",
6054
+ "promptPreviousSlices": false,
6055
+ "saveAudio": true,
6056
+ "testMode": false,
6057
+ "testPlaybackSpeed": 1,
6058
+ "testChunkDurationMs": 100,
6059
+ "testLoop": false
6060
+ }
6061
+ */
6062
+ property?: {
6063
+ /* STT Generator for Whisper context */
6064
+ sttGeneratorId?: string | DataLink
6065
+ /* STT Live Policy. If the policy is `only-in-use`, the STT context will be released when not in use. */
6066
+ sttLivePolicy?: 'only-in-use' | 'manual' | DataLink
6067
+ /* VAD Inference Generator for voice activity detection */
6068
+ vadInferenceGeneratorId?: string | DataLink
6069
+ /* VAD Inference Live Policy. If the policy is `only-in-use`, the VAD Inference context will be released when not in use. */
6070
+ vadInferenceLivePolicy?: 'only-in-use' | 'manual' | DataLink
6071
+ /* Enable VAD (Voice Activity Detection) */
6072
+ vadEnabled?: boolean | DataLink
6073
+ /* Audio slice duration in seconds */
6074
+ audioSliceSec?: number | DataLink
6075
+ /* Minimum audio duration to start transcription in seconds */
6076
+ audioMinSec?: number | DataLink
6077
+ /* Maximum number of slices to keep in memory */
6078
+ maxSlicesInMemory?: number | DataLink
6079
+ /* VAD Strategy */
6080
+ vadStrategy?: 'use-preset' | 'use-generator-options' | DataLink
6081
+ /* VAD preset configuration */
6082
+ vadPreset?:
6083
+ | 'default'
6084
+ | 'sensitive'
6085
+ | 'very-sensitive'
6086
+ | 'conservative'
6087
+ | 'very-conservative'
6088
+ | 'continuous-speech'
6089
+ | 'meeting'
6090
+ | 'noisy-environment'
6091
+ | DataLink
6092
+ /* Auto slice on speech end */
6093
+ autoSliceOnSpeechEnd?: boolean | DataLink
6094
+ /* Auto slice threshold in seconds */
6095
+ autoSliceThreshold?: number | DataLink
6096
+ /* Initial prompt for transcription */
6097
+ initialPrompt?: string | DataLink
6098
+ /* Include previous slices in prompt */
6099
+ promptPreviousSlices?: boolean | DataLink
6100
+ /* Enable audio output saving (auto-generates file path) */
6101
+ saveAudio?: boolean | DataLink
6102
+ /* Use test mode with file simulation */
6103
+ testMode?: boolean | DataLink
6104
+ /* Test audio file path for simulation */
6105
+ testFilePath?: string | DataLink
6106
+ /* Test audio file hash */
6107
+ testFileHash?: string | DataLink
6108
+ /* Test audio file hash type */
6109
+ testFileHashType?: string | DataLink
6110
+ /* Test playback speed */
6111
+ testPlaybackSpeed?: number | DataLink
6112
+ /* Test chunk duration in milliseconds */
6113
+ testChunkDurationMs?: number | DataLink
6114
+ /* Loop test audio file */
6115
+ testLoop?: boolean | DataLink
6116
+ }
6117
+ events?: {
6118
+ /* Event triggered when transcription starts, processes, or ends */
6119
+ onTranscribe?: Array<EventAction>
6120
+ /* Event triggered on VAD (Voice Activity Detection) events */
6121
+ onVad?: Array<EventAction>
6122
+ /* Event triggered when error occurs */
6123
+ onError?: Array<EventAction>
6124
+ /* Event triggered when status changes */
6125
+ onStatusChange?: Array<EventAction>
6126
+ /* Event triggered when statistics update */
6127
+ onStatsUpdate?: Array<EventAction>
6128
+ /* Event triggered when transcription ends */
6129
+ onEnd?: Array<EventAction>
6130
+ }
6131
+ outlets?: {
6132
+ /* Is realtime transcription currently active */
6133
+ isActive?: () => Data
6134
+ /* Is currently transcribing audio */
6135
+ isTranscribing?: () => Data
6136
+ /* Current transcription results */
6137
+ results?: () => Data
6138
+ /* Current transcription result text */
6139
+ resultText?: () => Data
6140
+ /* Current statistics */
6141
+ statistics?: () => Data
6142
+ /* Latest transcribe event */
6143
+ lastTranscribeEvent?: () => Data
6144
+ /* Latest VAD event */
6145
+ lastVadEvent?: () => Data
6146
+ /* Audio output file path (auto-generated when saving audio) */
6147
+ audioOutputPath?: () => Data
6148
+ }
6149
+ }
6150
+
6151
+ /* Realtime speech-to-text transcription using Whisper and VAD with live audio streaming */
6152
+ export type GeneratorRealtimeTranscription = Generator &
6153
+ GeneratorRealtimeTranscriptionDef & {
6154
+ templateKey: 'GENERATOR_REALTIME_TRANSCRIPTION'
6155
+ switches: Array<
6156
+ SwitchDef &
6157
+ GeneratorRealtimeTranscriptionDef & {
6158
+ conds?: Array<{
6159
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
6160
+ cond:
6161
+ | SwitchCondInnerStateCurrentCanvas
6162
+ | SwitchCondData
6163
+ | {
6164
+ __typename: 'SwitchCondInnerStateOutlet'
6165
+ outlet:
6166
+ | 'isActive'
6167
+ | 'isTranscribing'
6168
+ | 'results'
6169
+ | 'resultText'
6170
+ | 'statistics'
6171
+ | 'lastTranscribeEvent'
6172
+ | 'lastVadEvent'
6173
+ | 'audioOutputPath'
6174
+ value: any
6175
+ }
6176
+ }>
6177
+ }
6178
+ >
6179
+ }
6180
+
6125
6181
  /* Load the model */
6126
6182
  export type GeneratorLLMActionLoadModel = Action & {
6127
6183
  __actionName: 'GENERATOR_LLM_LOAD_MODEL'
@@ -6705,7 +6761,7 @@ Default property:
6705
6761
  completionIgnoreEOS?: boolean | DataLink
6706
6762
  }
6707
6763
  events?: {
6708
- /* Event triggered when load is done */
6764
+ /* Event triggered when context state changes */
6709
6765
  onContextStateChange?: Array<EventAction>
6710
6766
  /* Event triggered when error occurs */
6711
6767
  onError?: Array<EventAction>
@@ -6751,7 +6807,7 @@ Default property:
6751
6807
  - iOS: Supported GPU acceleration, recommended use M1+ / A17+ chip device
6752
6808
  - macOS: Supported GPU acceleration, recommended use M1+ chip device
6753
6809
  - Android: Currently not supported GPU acceleration (Coming soon), recommended use Android 13+ system
6754
- - Linux / Windows: Supported GPU acceleration, currently only Vulkan backend available */
6810
+ - Linux / Windows: Supported GPU acceleration, you can choose `vulkan` or `cuda` backend in Accel Variant property */
6755
6811
  export type GeneratorLLM = Generator &
6756
6812
  GeneratorLLMDef & {
6757
6813
  templateKey: 'GENERATOR_LLM'
@@ -7214,7 +7270,7 @@ Default property:
7214
7270
  greedy?: boolean | DataLink
7215
7271
  }
7216
7272
  events?: {
7217
- /* Event triggered when load is done */
7273
+ /* Event triggered when context state changes */
7218
7274
  onContextStateChange?: Array<EventAction>
7219
7275
  /* Event triggered when generate is done */
7220
7276
  onGenerate?: Array<EventAction>
package/types/system.ts CHANGED
@@ -609,3 +609,35 @@ export type SystemActionThrowException = ActionWithParams & {
609
609
  export type SystemActionDismissKeyboard = Action & {
610
610
  __actionName: 'DISMISS_KEYBOARD'
611
611
  }
612
+
613
+ /* Trigger application AI filter */
614
+ export type SystemActionTriggerApplicationFilter = ActionWithParams & {
615
+ __actionName: 'TRIGGER_APPLICATION_FILTER'
616
+ params?: Array<
617
+ | {
618
+ input: 'name'
619
+ value?: string | DataLink | EventProperty
620
+ mapping?: string
621
+ }
622
+ | {
623
+ input: 'variables'
624
+ value?: any | EventProperty
625
+ mapping?: string
626
+ }
627
+ | {
628
+ input: 'result'
629
+ value?: string | DataLink | (() => Data) | EventProperty
630
+ mapping?: string
631
+ }
632
+ | {
633
+ input: 'error'
634
+ value?: string | DataLink | (() => Data) | EventProperty
635
+ mapping?: string
636
+ }
637
+ >
638
+ }
639
+
640
+ /* Stop running application AI filter */
641
+ export type SystemActionStopApplicationFilter = Action & {
642
+ __actionName: 'STOP_APPLICATION_FILTER'
643
+ }
package/utils/data.ts CHANGED
@@ -75,6 +75,7 @@ type SystemDataName =
75
75
  | 'env'
76
76
  | 'isViewDebugModeEnabled'
77
77
  | 'language'
78
+ | 'aiFilters'
78
79
 
79
80
  type SystemDataInfo = {
80
81
  name: SystemDataName
@@ -432,6 +433,14 @@ export const systemDataList: Array<SystemDataInfo> = [
432
433
  type: 'string',
433
434
  value: '',
434
435
  },
436
+ {
437
+ name: 'aiFilters',
438
+ id: 'PROPERTY_BANK_DATA_NODE_3ffc1e98-8f65-4cc8-a949-3da8d2092ccb',
439
+ title: 'SYSTEM: AI Filters',
440
+ description: 'AI filters configuration from the application',
441
+ type: 'array',
442
+ value: [],
443
+ },
435
444
  ]
436
445
 
437
446
  export const useSystemData = (name: SystemDataName): Data => {
@@ -774,6 +774,33 @@ export const templateEventPropsMap = {
774
774
  'GENERATOR_VAD_INFERENCE_DETECTION_TIME', // type: number
775
775
  ],
776
776
  },
777
+ GENERATOR_REALTIME_TRANSCRIPTION: {
778
+ onTranscribe: [
779
+ 'GENERATOR_REALTIME_TRANSCRIPTION_TRANSCRIBE_EVENT', // type: object
780
+ ],
781
+ onVad: [
782
+ 'GENERATOR_REALTIME_TRANSCRIPTION_VAD_EVENT', // type: object
783
+ 'GENERATOR_REALTIME_TRANSCRIPTION_VAD_EVENT_TYPE', // type: string
784
+ 'GENERATOR_REALTIME_TRANSCRIPTION_VAD_EVENT_CONFIDENCE', // type: number
785
+ 'GENERATOR_REALTIME_TRANSCRIPTION_VAD_EVENT_DURATION', // type: number
786
+ 'GENERATOR_REALTIME_TRANSCRIPTION_VAD_EVENT_SLICE_INDEX', // type: number
787
+ ],
788
+ onError: [
789
+ 'GENERATOR_REALTIME_TRANSCRIPTION_ERROR', // type: string
790
+ ],
791
+ onStatusChange: [
792
+ 'GENERATOR_REALTIME_TRANSCRIPTION_IS_ACTIVE', // type: bool
793
+ ],
794
+ onStatsUpdate: [
795
+ 'GENERATOR_REALTIME_TRANSCRIPTION_STATS_TYPE', // type: string
796
+ 'GENERATOR_REALTIME_TRANSCRIPTION_STATS_TIMESTAMP', // type: number
797
+ 'GENERATOR_REALTIME_TRANSCRIPTION_STATS', // type: object
798
+ ],
799
+ onEnd: [
800
+ 'GENERATOR_REALTIME_TRANSCRIPTION_END_RESULTS', // type: array
801
+ 'GENERATOR_REALTIME_TRANSCRIPTION_END_AUDIO_OUTPUT_PATH', // type: string
802
+ ],
803
+ },
777
804
  GENERATOR_LLM: {
778
805
  onContextStateChange: [
779
806
  'GENERATOR_LLM_CONTEXT_STATE', // type: string