@fugood/bricks-project 2.22.0-beta.15 → 2.22.0-beta.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -610,6 +610,26 @@ export const templateActionNameMap = {
610
610
  realtimeVadFreqThold: 'GENERATOR_SPEECH_INFERENCE_REALTIME_VAD_FREQ_THOLD',
611
611
  },
612
612
  },
613
+ GENERATOR_VAD_INFERENCE: {
614
+ GENERATOR_VAD_INFERENCE_DETECT_FILE: {
615
+ fileUrl: 'GENERATOR_VAD_INFERENCE_FILE_URL',
616
+ threshold: 'GENERATOR_VAD_INFERENCE_THRESHOLD',
617
+ minSpeechDurationMs: 'GENERATOR_VAD_INFERENCE_MIN_SPEECH_DURATION_MS',
618
+ minSilenceDurationMs: 'GENERATOR_VAD_INFERENCE_MIN_SILENCE_DURATION_MS',
619
+ maxSpeechDurationS: 'GENERATOR_VAD_INFERENCE_MAX_SPEECH_DURATION_S',
620
+ speechPadMs: 'GENERATOR_VAD_INFERENCE_SPEECH_PAD_MS',
621
+ samplesOverlap: 'GENERATOR_VAD_INFERENCE_SAMPLES_OVERLAP',
622
+ },
623
+ GENERATOR_VAD_INFERENCE_DETECT_DATA: {
624
+ data: 'GENERATOR_VAD_INFERENCE_DATA',
625
+ threshold: 'GENERATOR_VAD_INFERENCE_THRESHOLD',
626
+ minSpeechDurationMs: 'GENERATOR_VAD_INFERENCE_MIN_SPEECH_DURATION_MS',
627
+ minSilenceDurationMs: 'GENERATOR_VAD_INFERENCE_MIN_SILENCE_DURATION_MS',
628
+ maxSpeechDurationS: 'GENERATOR_VAD_INFERENCE_MAX_SPEECH_DURATION_S',
629
+ speechPadMs: 'GENERATOR_VAD_INFERENCE_SPEECH_PAD_MS',
630
+ samplesOverlap: 'GENERATOR_VAD_INFERENCE_SAMPLES_OVERLAP',
631
+ },
632
+ },
613
633
  GENERATOR_LLM: {
614
634
  GENERATOR_LLM_TOKENIZE: {
615
635
  mode: 'GENERATOR_LLM_MODE',
@@ -685,6 +705,12 @@ export const templateActionNameMap = {
685
705
  text: 'GENERATOR_GGML_TTS_TEXT',
686
706
  },
687
707
  },
708
+ GENERATOR_RERANKER: {
709
+ GENERATOR_RERANKER_RERANK: {
710
+ query: 'GENERATOR_RERANKER_QUERY',
711
+ documents: 'GENERATOR_RERANKER_DOCUMENTS',
712
+ },
713
+ },
688
714
  GENERATOR_QNN_LLM: {
689
715
  GENERATOR_QNN_LLM_GENERATE: {
690
716
  prompt: 'GENERATOR_QNN_LLM_PROMPT',
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fugood/bricks-project",
3
- "version": "2.22.0-beta.15",
3
+ "version": "2.22.0-beta.16",
4
4
  "main": "index.ts",
5
5
  "scripts": {
6
6
  "build": "node scripts/build.js"
@@ -14,5 +14,5 @@
14
14
  "lodash": "^4.17.4",
15
15
  "uuid": "^8.3.1"
16
16
  },
17
- "gitHead": "4995b78120bd6e75df65a302398a9ba3070da3e7"
17
+ "gitHead": "d8231fa6787b8d95bc2bec94a86ceba596cf57f0"
18
18
  }
@@ -1,15 +1,7 @@
1
1
  import { $ } from 'bun'
2
2
  import { stat, readFile, writeFile } from 'fs/promises'
3
- const cwd = process.cwd()
4
-
5
- const libFiles = ['types', 'utils', 'index.ts']
6
3
 
7
- await $`mkdir -p ${cwd}/project`
8
- for (const file of libFiles) {
9
- await $`cp -r ${__dirname}/../${file} ${cwd}/project`
10
- }
11
-
12
- console.log('Copied files to project/')
4
+ const cwd = process.cwd()
13
5
 
14
6
  async function exists(f: string) {
15
7
  try {
@@ -20,6 +12,21 @@ async function exists(f: string) {
20
12
  }
21
13
  }
22
14
 
15
+ // handle flag --skip-copy
16
+ const skipCopyProject = process.argv.includes('--skip-copy-project')
17
+ if (skipCopyProject) {
18
+ console.log('Skipping copy of files to project/')
19
+ } else {
20
+
21
+ const libFiles = ['types', 'utils', 'index.ts']
22
+
23
+ await $`mkdir -p ${cwd}/project`
24
+ for (const file of libFiles) {
25
+ await $`cp -r ${__dirname}/../${file} ${cwd}/project`
26
+ }
27
+ console.log('Copied files to project/')
28
+ }
29
+
23
30
  const projectMcpServer = {
24
31
  command: 'bun',
25
32
  args: [`${cwd}/node_modules/@fugood/bricks-project/tools/mcp-server.ts`],
package/types/common.ts CHANGED
@@ -65,7 +65,7 @@ export type ItemBrickID = string
65
65
 
66
66
  export type EventAction = {
67
67
  handler: 'system' | (() => Brick | Generator) | SubspaceID | ItemBrickID
68
- action: Action
68
+ action: ActionWithParams | ActionWithDataParams
69
69
  waitAsync?: boolean
70
70
  }
71
71
 
@@ -4856,6 +4856,7 @@ Default property:
4856
4856
  "doSample": true,
4857
4857
  "outputType": "play",
4858
4858
  "cacheGenerated": true,
4859
+ "speed": 1,
4859
4860
  "autoInferEnable": false,
4860
4861
  "softBreakRegex": "^[^\\r\\n\\t\\f\\v]*([\\r\\n]+|[。!?!?.]\\B)",
4861
4862
  "hardBreakTime": 500,
@@ -4872,6 +4873,8 @@ Default property:
4872
4873
  | 'BricksDisplay/vits-eng'
4873
4874
  | 'BricksDisplay/vits-cmn'
4874
4875
  | 'BricksDisplay/ellie-Bert-VITS2'
4876
+ | 'onnx-community/Kokoro-82M-v1.0-ONNX'
4877
+ | 'onnx-community/Kokoro-82M-v1.1-zh-ONNX'
4875
4878
  | 'onnx-community/OuteTTS-1.0-0.6B-ONNX'
4876
4879
  | 'mms-tts-ara (NC)'
4877
4880
  | 'mms-tts-deu (NC)'
@@ -4888,7 +4891,14 @@ Default property:
4888
4891
  | 'speecht5_tts'
4889
4892
  | DataLink
4890
4893
  /* Model type */
4891
- modelType?: 'auto' | 'vits' | 'bert_vits2' | 'speecht5' | 'outetts-1.0' | DataLink
4894
+ modelType?:
4895
+ | 'auto'
4896
+ | 'vits'
4897
+ | 'bert_vits2'
4898
+ | 'style_text_to_speech_2'
4899
+ | 'speecht5'
4900
+ | 'outetts-1.0'
4901
+ | DataLink
4892
4902
  /* Load quantized model (deprecated, use `quantizeType` instead) */
4893
4903
  quantized?: boolean | DataLink
4894
4904
  /* Quantize type */
@@ -4911,7 +4921,7 @@ Default property:
4911
4921
  /* Custom vocoder model
4912
4922
  Choose model from https://huggingface.co/models?library=transformers.js&other=hifigan */
4913
4923
  customVocoderModel?: string | DataLink
4914
- /* XVector speaker embedding for HiFi-GAN */
4924
+ /* Speaker embedding, for SpeechT5 or StyleTTS (Kokoro) */
4915
4925
  speakerEmbedUrl?: string | DataLink
4916
4926
  /* MD5 checksum of `speakerEmbedUrl` */
4917
4927
  speakerEmbedMd5?: string | DataLink
@@ -4929,6 +4939,8 @@ Default property:
4929
4939
  outputType?: 'play' | 'file' | DataLink
4930
4940
  /* Enable cache for generated audio */
4931
4941
  cacheGenerated?: boolean | DataLink
4942
+ /* Speed of the generated audio, for StyleTTS (Kokoro) */
4943
+ speed?: number | DataLink
4932
4944
  /* Text to generate */
4933
4945
  prompt?: string | DataLink
4934
4946
  /* Auto inference when prompt changes */
@@ -5909,6 +5921,207 @@ export type GeneratorSpeechInference = Generator &
5909
5921
  >
5910
5922
  }
5911
5923
 
5924
+ /* Load the model */
5925
+ export type GeneratorVadInferenceActionLoadModel = Action & {
5926
+ __actionName: 'GENERATOR_VAD_INFERENCE_LOAD_MODEL'
5927
+ }
5928
+
5929
+ /* Detect speech in audio file. You can provide `File URL` property, if not provided, it will use the default `File URL` */
5930
+ export type GeneratorVadInferenceActionDetectFile = ActionWithParams & {
5931
+ __actionName: 'GENERATOR_VAD_INFERENCE_DETECT_FILE'
5932
+ params?: Array<
5933
+ | {
5934
+ input: 'fileUrl'
5935
+ value?: string | DataLink | EventProperty
5936
+ mapping?: string
5937
+ }
5938
+ | {
5939
+ input: 'threshold'
5940
+ value?: number | DataLink | EventProperty
5941
+ mapping?: string
5942
+ }
5943
+ | {
5944
+ input: 'minSpeechDurationMs'
5945
+ value?: number | DataLink | EventProperty
5946
+ mapping?: string
5947
+ }
5948
+ | {
5949
+ input: 'minSilenceDurationMs'
5950
+ value?: number | DataLink | EventProperty
5951
+ mapping?: string
5952
+ }
5953
+ | {
5954
+ input: 'maxSpeechDurationS'
5955
+ value?: number | DataLink | EventProperty
5956
+ mapping?: string
5957
+ }
5958
+ | {
5959
+ input: 'speechPadMs'
5960
+ value?: number | DataLink | EventProperty
5961
+ mapping?: string
5962
+ }
5963
+ | {
5964
+ input: 'samplesOverlap'
5965
+ value?: number | DataLink | EventProperty
5966
+ mapping?: string
5967
+ }
5968
+ >
5969
+ }
5970
+
5971
+ /* Detect speech in audio data. Currently only support base64 encoded audio data (16-bit PCM, mono, 16kHz) */
5972
+ export type GeneratorVadInferenceActionDetectData = ActionWithParams & {
5973
+ __actionName: 'GENERATOR_VAD_INFERENCE_DETECT_DATA'
5974
+ params?: Array<
5975
+ | {
5976
+ input: 'data'
5977
+ value?: any | EventProperty
5978
+ mapping?: string
5979
+ }
5980
+ | {
5981
+ input: 'threshold'
5982
+ value?: number | DataLink | EventProperty
5983
+ mapping?: string
5984
+ }
5985
+ | {
5986
+ input: 'minSpeechDurationMs'
5987
+ value?: number | DataLink | EventProperty
5988
+ mapping?: string
5989
+ }
5990
+ | {
5991
+ input: 'minSilenceDurationMs'
5992
+ value?: number | DataLink | EventProperty
5993
+ mapping?: string
5994
+ }
5995
+ | {
5996
+ input: 'maxSpeechDurationS'
5997
+ value?: number | DataLink | EventProperty
5998
+ mapping?: string
5999
+ }
6000
+ | {
6001
+ input: 'speechPadMs'
6002
+ value?: number | DataLink | EventProperty
6003
+ mapping?: string
6004
+ }
6005
+ | {
6006
+ input: 'samplesOverlap'
6007
+ value?: number | DataLink | EventProperty
6008
+ mapping?: string
6009
+ }
6010
+ >
6011
+ }
6012
+
6013
+ /* Clear downloaded files (model, audio) & current jobs */
6014
+ export type GeneratorVadInferenceActionClearDownload = Action & {
6015
+ __actionName: 'GENERATOR_VAD_INFERENCE_CLEAR_DOWNLOAD'
6016
+ }
6017
+
6018
+ /* Release context */
6019
+ export type GeneratorVadInferenceActionReleaseContext = Action & {
6020
+ __actionName: 'GENERATOR_VAD_INFERENCE_RELEASE_CONTEXT'
6021
+ }
6022
+
6023
+ interface GeneratorVadInferenceDef {
6024
+ /*
6025
+ Default property:
6026
+ {
6027
+ "init": false,
6028
+ "modelName": "silero-v5.1.2",
6029
+ "modelUseGPU": true,
6030
+ "modelThreads": 4,
6031
+ "detectThreshold": 0.5,
6032
+ "detectMinSpeechDurationMs": 250,
6033
+ "detectMinSilenceDurationMs": 100,
6034
+ "detectMaxSpeechDurationS": 30,
6035
+ "detectSpeechPadMs": 30,
6036
+ "detectSamplesOverlap": 0.1
6037
+ }
6038
+ */
6039
+ property?: {
6040
+ /* Initialize the VAD context on generator initialization
6041
+ Please note that it will take some RAM depending on the model size */
6042
+ init?: boolean | DataLink
6043
+ /* Use model name, currently only supports the Silero VAD model.
6044
+ The model download progress will be done in preload stage or the generator initialization stage.
6045
+ You can also choose `custom` option and set `Model URL` and `Model SHA1` to use your own model */
6046
+ modelName?: 'custom' | 'silero-v5.1.2' | DataLink
6047
+ /* The URL or path of model
6048
+ We used `ggml` format model, please refer to https://huggingface.co/ggml-org/whisper-vad */
6049
+ modelUrl?: string | DataLink
6050
+ /* Hash type of model */
6051
+ modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6052
+ /* Hash of model */
6053
+ modelHash?: string | DataLink
6054
+ /* Use GPU Acceleration for inference. Currently iOS only. */
6055
+ modelUseGPU?: boolean | DataLink
6056
+ /* Number of threads to use for processing */
6057
+ modelThreads?: number | DataLink
6058
+ /* Speech probability threshold (0.0-1.0) */
6059
+ detectThreshold?: number | DataLink
6060
+ /* Minimum speech duration in milliseconds */
6061
+ detectMinSpeechDurationMs?: number | DataLink
6062
+ /* Minimum silence duration in milliseconds */
6063
+ detectMinSilenceDurationMs?: number | DataLink
6064
+ /* Maximum speech duration in seconds */
6065
+ detectMaxSpeechDurationS?: number | DataLink
6066
+ /* Padding around speech segments in milliseconds */
6067
+ detectSpeechPadMs?: number | DataLink
6068
+ /* Overlap between analysis windows (0.0-1.0) */
6069
+ detectSamplesOverlap?: number | DataLink
6070
+ /* The file URL or path to be analyzed.
6071
+ It only supported `wav` format with 16kHz sample rate & single (mono) channel */
6072
+ detectFileUrl?: string | DataLink
6073
+ /* MD5 of file to be analyzed */
6074
+ detectFileMd5?: string | DataLink
6075
+ }
6076
+ events?: {
6077
+ /* Event triggered when load is done */
6078
+ onContextStateChange?: Array<EventAction>
6079
+ /* Event triggered when error occurs */
6080
+ onError?: Array<EventAction>
6081
+ /* Event triggered when got detection result */
6082
+ onDetected?: Array<EventAction>
6083
+ }
6084
+ outlets?: {
6085
+ /* Context state */
6086
+ contextState?: () => Data
6087
+ /* Context details */
6088
+ contextDetails?: () => Data
6089
+ /* Is detecting */
6090
+ isDetecting?: () => Data
6091
+ /* Detection segments result */
6092
+ detectionSegments?: () => Data
6093
+ /* Detection details */
6094
+ detectionDetails?: () => Data
6095
+ }
6096
+ }
6097
+
6098
+ /* Local Voice Activity Detection (VAD) inference based on GGML and [whisper.rn](https://github.com/mybigday/whisper.rn) */
6099
+ export type GeneratorVadInference = Generator &
6100
+ GeneratorVadInferenceDef & {
6101
+ templateKey: 'GENERATOR_VAD_INFERENCE'
6102
+ switches: Array<
6103
+ SwitchDef &
6104
+ GeneratorVadInferenceDef & {
6105
+ conds?: Array<{
6106
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
6107
+ cond:
6108
+ | SwitchCondInnerStateCurrentCanvas
6109
+ | SwitchCondData
6110
+ | {
6111
+ __typename: 'SwitchCondInnerStateOutlet'
6112
+ outlet:
6113
+ | 'contextState'
6114
+ | 'contextDetails'
6115
+ | 'isDetecting'
6116
+ | 'detectionSegments'
6117
+ | 'detectionDetails'
6118
+ value: any
6119
+ }
6120
+ }>
6121
+ }
6122
+ >
6123
+ }
6124
+
5912
6125
  /* Load the model */
5913
6126
  export type GeneratorLLMActionLoadModel = Action & {
5914
6127
  __actionName: 'GENERATOR_LLM_LOAD_MODEL'
@@ -6731,6 +6944,149 @@ export type GeneratorGGMLTTS = Generator &
6731
6944
  >
6732
6945
  }
6733
6946
 
6947
+ /* Load the model */
6948
+ export type GeneratorRerankerActionLoadModel = Action & {
6949
+ __actionName: 'GENERATOR_RERANKER_LOAD_MODEL'
6950
+ }
6951
+
6952
+ /* Rerank documents based on query relevance */
6953
+ export type GeneratorRerankerActionRerank = ActionWithParams & {
6954
+ __actionName: 'GENERATOR_RERANKER_RERANK'
6955
+ params?: Array<
6956
+ | {
6957
+ input: 'query'
6958
+ value?: string | DataLink | EventProperty
6959
+ mapping?: string
6960
+ }
6961
+ | {
6962
+ input: 'documents'
6963
+ value?: Array<any> | DataLink | EventProperty
6964
+ mapping?: string
6965
+ }
6966
+ >
6967
+ }
6968
+
6969
+ /* Release context */
6970
+ export type GeneratorRerankerActionReleaseContext = Action & {
6971
+ __actionName: 'GENERATOR_RERANKER_RELEASE_CONTEXT'
6972
+ }
6973
+
6974
+ interface GeneratorRerankerDef {
6975
+ /*
6976
+ Default property:
6977
+ {
6978
+ "init": false,
6979
+ "contextSize": 512,
6980
+ "batchSize": 512,
6981
+ "uBatchSize": 512,
6982
+ "accelVariant": "default",
6983
+ "mainGpu": 0,
6984
+ "gpuLayers": 0,
6985
+ "useMlock": true,
6986
+ "useMmap": true,
6987
+ "normalize": 1
6988
+ }
6989
+ */
6990
+ property?: {
6991
+ /* Initialize the Reranker context on generator initialization */
6992
+ init?: boolean | DataLink
6993
+ /* The URL or path of reranker model (GGUF format) */
6994
+ modelUrl?: string | DataLink
6995
+ /* Hash of reranker model */
6996
+ modelHash?: string | DataLink
6997
+ /* Hash type of reranker model */
6998
+ modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6999
+ /* Context size (0 ~ 4096) (Default to 512) */
7000
+ contextSize?: number | DataLink
7001
+ /* Logical batch size for processing (default: 512) */
7002
+ batchSize?: number | DataLink
7003
+ /* Physical maximum batch size (default: 512) */
7004
+ uBatchSize?: number | DataLink
7005
+ /* Accelerator variant (default: default) */
7006
+ accelVariant?:
7007
+ | 'default'
7008
+ | 'avx'
7009
+ | 'avx2'
7010
+ | 'avx512'
7011
+ | 'metal'
7012
+ | 'opencl'
7013
+ | 'vulkan'
7014
+ | 'cuda'
7015
+ | 'rocm'
7016
+ | DataLink
7017
+ /* Main GPU index (default: 0) */
7018
+ mainGpu?: number | DataLink
7019
+ /* Number of layers to store in VRAM (default: 0) */
7020
+ gpuLayers?: number | DataLink
7021
+ /* Maximum number of threads to use (default: auto) */
7022
+ maxThreads?: number | DataLink
7023
+ /* Use mlock to keep model in memory (default: true) */
7024
+ useMlock?: boolean | DataLink
7025
+ /* Use mmap for model loading (default: true) */
7026
+ useMmap?: boolean | DataLink
7027
+ /* Query text for reranking */
7028
+ query?: string | DataLink
7029
+ /* Array of documents to rerank */
7030
+ documents?: Array<string | DataLink> | DataLink
7031
+ /* Normalize reranking scores (default: from model config) */
7032
+ normalize?: number | DataLink | boolean | DataLink | DataLink
7033
+ /* Maximum number of documents to return (default: unlimited) */
7034
+ topK?: number | DataLink
7035
+ }
7036
+ events?: {
7037
+ /* Event triggered when the reranker context state changes (loading, ready, error, released) */
7038
+ onContextStateChange?: Array<EventAction>
7039
+ /* Event triggered when an error occurs during reranker operations */
7040
+ onError?: Array<EventAction>
7041
+ }
7042
+ outlets?: {
7043
+ /* Current state of the reranker context (loading, ready, error, released) */
7044
+ contextState?: () => Data
7045
+ /* Loading progress of the reranker model (0-100) */
7046
+ contextLoadProgress?: () => Data
7047
+ /* Detailed information about the reranker context including instance ID and processing status */
7048
+ contextDetails?: () => Data
7049
+ /* Result of the reranking operation containing scored and ranked documents */
7050
+ rerankResult?: () => Data
7051
+ /* Boolean indicating whether the reranker is currently processing a request */
7052
+ isProcessing?: () => Data
7053
+ }
7054
+ }
7055
+
7056
+ /* Local rerank based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
7057
+
7058
+ ## Notice
7059
+ - The device RAM must be larger than 8GB
7060
+ - iOS: Supported GPU acceleration, recommended use M1+ / A17+ chip device
7061
+ - macOS: Supported GPU acceleration, recommended use M1+ chip device
7062
+ - Android: Currently not supported GPU acceleration (Coming soon), recommended use Android 13+ system
7063
+ - Linux / Windows: Supported GPU acceleration, currently only Vulkan backend available */
7064
+ export type GeneratorReranker = Generator &
7065
+ GeneratorRerankerDef & {
7066
+ templateKey: 'GENERATOR_RERANKER'
7067
+ switches: Array<
7068
+ SwitchDef &
7069
+ GeneratorRerankerDef & {
7070
+ conds?: Array<{
7071
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
7072
+ cond:
7073
+ | SwitchCondInnerStateCurrentCanvas
7074
+ | SwitchCondData
7075
+ | {
7076
+ __typename: 'SwitchCondInnerStateOutlet'
7077
+ outlet:
7078
+ | 'contextState'
7079
+ | 'contextLoadProgress'
7080
+ | 'contextDetails'
7081
+ | 'rerankResult'
7082
+ | 'isProcessing'
7083
+ value: any
7084
+ }
7085
+ }>
7086
+ }
7087
+ >
7088
+ }
7089
+
6734
7090
  /* Load the model */
6735
7091
  export type GeneratorQnnLlmActionLoadModel = Action & {
6736
7092
  __actionName: 'GENERATOR_QNN_LLM_LOAD_MODEL'
@@ -761,6 +761,19 @@ export const templateEventPropsMap = {
761
761
  'GENERATOR_SPEECH_INFERENCE_TRANSCRIBE_TIME', // type: number
762
762
  ],
763
763
  },
764
+ GENERATOR_VAD_INFERENCE: {
765
+ onContextStateChange: [
766
+ 'GENERATOR_VAD_INFERENCE_CONTEXT_STATE', // type: string
767
+ 'GENERATOR_VAD_INFERENCE_CONTEXT_DETAILS', // type: object
768
+ ],
769
+ onError: [
770
+ 'GENERATOR_VAD_INFERENCE_ERROR', // type: string
771
+ ],
772
+ onDetected: [
773
+ 'GENERATOR_VAD_INFERENCE_DETECTION_SEGMENTS', // type: array
774
+ 'GENERATOR_VAD_INFERENCE_DETECTION_TIME', // type: number
775
+ ],
776
+ },
764
777
  GENERATOR_LLM: {
765
778
  onContextStateChange: [
766
779
  'GENERATOR_LLM_CONTEXT_STATE', // type: string
@@ -799,6 +812,7 @@ export const templateEventPropsMap = {
799
812
  'GENERATOR_GGML_TTS_ERROR', // type: string
800
813
  ],
801
814
  },
815
+ GENERATOR_RERANKER: {},
802
816
  GENERATOR_QNN_LLM: {
803
817
  onContextStateChange: [
804
818
  'GENERATOR_QNN_LLM_CONTEXT_STATE', // type: string