@fugood/bricks-project 2.22.0-beta.17 → 2.22.0-beta.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -129,6 +129,12 @@ export const templateActionNameMap = {
129
129
  behavior: 'BEHAVIOR',
130
130
  exceptionMessage: 'EXCEPTION_MESSAGE',
131
131
  },
132
+ TRIGGER_APPLICATION_FILTER: {
133
+ name: 'NAME',
134
+ variables: 'VARIABLES',
135
+ result: 'RESULT',
136
+ error: 'ERROR',
137
+ },
132
138
  },
133
139
 
134
140
  BRICK_TEXT_INPUT: {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fugood/bricks-project",
3
- "version": "2.22.0-beta.17",
3
+ "version": "2.22.0-beta.19",
4
4
  "main": "index.ts",
5
5
  "scripts": {
6
6
  "build": "node scripts/build.js"
@@ -14,5 +14,5 @@
14
14
  "lodash": "^4.17.4",
15
15
  "uuid": "^8.3.1"
16
16
  },
17
- "gitHead": "5e8893af505fa5a7b1a902e11764c1f9ebb36a4f"
17
+ "gitHead": "584da836043d75818a8c629a61edb10cb6dc0d3c"
18
18
  }
@@ -5519,6 +5519,7 @@ interface GeneratorSpeechInferenceDef {
5519
5519
  Default property:
5520
5520
  {
5521
5521
  "init": false,
5522
+ "accelVariant": "default",
5522
5523
  "modelName": "base-q8_0",
5523
5524
  "modelUseCoreML": false,
5524
5525
  "modelUseGPU": true,
@@ -5537,6 +5538,11 @@ Default property:
5537
5538
  /* Initialize the Whisper context on generator initialization
5538
5539
  Please note that it will take some RAM depending on the model size */
5539
5540
  init?: boolean | DataLink
5541
+ /* Accelerator variant (Only for desktop)
5542
+ `default` - CPU / Metal (macOS)
5543
+ `vulkan` - Use Vulkan
5544
+ `cuda` - Use CUDA */
5545
+ accelVariant?: 'default' | 'vulkan' | 'cuda' | DataLink
5540
5546
  /* Use model name, the model download progress will be done in preload stage or the generator initialization stage.
5541
5547
  We used `ggml` format model, please refer to https://huggingface.co/BricksDisplay/whisper-ggml
5542
5548
  You can also choose `custom` option and set `Model URL` and `Model MD5` to use your own model */
@@ -5774,7 +5780,13 @@ Default property:
5774
5780
  }
5775
5781
  }
5776
5782
 
5777
- /* Local Speech-to-Text (STT) inference based on GGML and [whisper.cpp](https://github.com/ggerganov/whisper.cpp) */
5783
+ /* Local Speech-to-Text (STT) inference based on GGML and [whisper.cpp](https://github.com/ggerganov/whisper.cpp)
5784
+
5785
+ ## Notice
5786
+ - iOS: Supported GPU acceleration, recommended use M1+ / A17+ chip device
5787
+ - macOS: Supported GPU acceleration, recommended use M1+ chip device
5788
+ - Android: Currently not supported GPU acceleration (Coming soon), recommended use Android 13+ system
5789
+ - Linux / Windows: Supported GPU acceleration, you can choose `vulkan` or `cuda` backend in Accel Variant property */
5778
5790
  export type GeneratorSpeechInference = Generator &
5779
5791
  GeneratorSpeechInferenceDef & {
5780
5792
  templateKey: 'GENERATOR_SPEECH_INFERENCE'
@@ -6795,7 +6807,7 @@ Default property:
6795
6807
  - iOS: Supported GPU acceleration, recommended use M1+ / A17+ chip device
6796
6808
  - macOS: Supported GPU acceleration, recommended use M1+ chip device
6797
6809
  - Android: Currently not supported GPU acceleration (Coming soon), recommended use Android 13+ system
6798
- - Linux / Windows: Supported GPU acceleration, currently only Vulkan backend available */
6810
+ - Linux / Windows: Supported GPU acceleration, you can choose `vulkan` or `cuda` backend in Accel Variant property */
6799
6811
  export type GeneratorLLM = Generator &
6800
6812
  GeneratorLLMDef & {
6801
6813
  templateKey: 'GENERATOR_LLM'
@@ -6861,6 +6873,7 @@ Default property:
6861
6873
  "vocoderUrl": "https://huggingface.co/ggml-org/WavTokenizer/resolve/main/WavTokenizer-Large-75-F16.gguf",
6862
6874
  "vocoderHashType": "sha256",
6863
6875
  "vocoderHash": "2356baa8631cc2995ea3465196a017a2733600d849a91180c0f97fa7fb375bbe",
6876
+ "vocoderBatchSize": 4096,
6864
6877
  "outputType": "play",
6865
6878
  "cacheGenerated": true,
6866
6879
  "autoInferEnable": false,
@@ -6872,9 +6885,10 @@ Default property:
6872
6885
  "completionTopP": 0.9,
6873
6886
  "completionMinP": 0.05,
6874
6887
  "useGuideToken": false,
6875
- "contextSize": 4096,
6876
- "batchSize": 512,
6877
- "maxThreads": 1,
6888
+ "contextSize": 8192,
6889
+ "batchSize": 8192,
6890
+ "microBatchSize": 512,
6891
+ "maxThreads": 2,
6878
6892
  "accelVariant": "default",
6879
6893
  "mainGpu": 0,
6880
6894
  "gpuLayers": 0,
@@ -6899,6 +6913,8 @@ Default property:
6899
6913
  vocoderHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6900
6914
  /* Hash of vocoder model */
6901
6915
  vocoderHash?: string | DataLink
6916
+ /* Batch size of vocoder model */
6917
+ vocoderBatchSize?: number | DataLink
6902
6918
  /* Output mode */
6903
6919
  outputType?: 'play' | 'file' | DataLink
6904
6920
  /* Enable cache for generated audio */
@@ -6925,12 +6941,16 @@ Default property:
6925
6941
  completionMinP?: number | DataLink
6926
6942
  /* Set the random number generator (RNG) seed (default: -1, -1 = random seed) */
6927
6943
  completionSeed?: number | DataLink
6944
+ /* Number of tokens to predict */
6945
+ completionPredict?: number | DataLink
6928
6946
  /* Enable guide token to help prevent hallucinations by forcing the TTS to use the correct words. */
6929
6947
  useGuideToken?: boolean | DataLink
6930
6948
  /* Context size, for OutTTS recommended 4096 ~ 8192 (Default to 4096) */
6931
6949
  contextSize?: number | DataLink
6932
6950
  /* Logical batch size for prompt processing */
6933
6951
  batchSize?: number | DataLink
6952
+ /* Physical batch size for prompt processing */
6953
+ microBatchSize?: number | DataLink
6934
6954
  /* Number of threads */
6935
6955
  maxThreads?: number | DataLink
6936
6956
  /* Accelerator variant (Only for desktop)
package/types/system.ts CHANGED
@@ -609,3 +609,35 @@ export type SystemActionThrowException = ActionWithParams & {
609
609
  export type SystemActionDismissKeyboard = Action & {
610
610
  __actionName: 'DISMISS_KEYBOARD'
611
611
  }
612
+
613
+ /* Trigger application AI filter */
614
+ export type SystemActionTriggerApplicationFilter = ActionWithParams & {
615
+ __actionName: 'TRIGGER_APPLICATION_FILTER'
616
+ params?: Array<
617
+ | {
618
+ input: 'name'
619
+ value?: string | DataLink | EventProperty
620
+ mapping?: string
621
+ }
622
+ | {
623
+ input: 'variables'
624
+ value?: any | EventProperty
625
+ mapping?: string
626
+ }
627
+ | {
628
+ input: 'result'
629
+ value?: string | DataLink | (() => Data) | EventProperty
630
+ mapping?: string
631
+ }
632
+ | {
633
+ input: 'error'
634
+ value?: string | DataLink | (() => Data) | EventProperty
635
+ mapping?: string
636
+ }
637
+ >
638
+ }
639
+
640
+ /* Stop running application AI filter */
641
+ export type SystemActionStopApplicationFilter = Action & {
642
+ __actionName: 'STOP_APPLICATION_FILTER'
643
+ }
package/utils/data.ts CHANGED
@@ -75,6 +75,7 @@ type SystemDataName =
75
75
  | 'env'
76
76
  | 'isViewDebugModeEnabled'
77
77
  | 'language'
78
+ | 'aiFilters'
78
79
 
79
80
  type SystemDataInfo = {
80
81
  name: SystemDataName
@@ -432,6 +433,14 @@ export const systemDataList: Array<SystemDataInfo> = [
432
433
  type: 'string',
433
434
  value: '',
434
435
  },
436
+ {
437
+ name: 'aiFilters',
438
+ id: 'PROPERTY_BANK_DATA_NODE_3ffc1e98-8f65-4cc8-a949-3da8d2092ccb',
439
+ title: 'SYSTEM: AI Filters',
440
+ description: 'AI filters configuration from the application',
441
+ type: 'array',
442
+ value: [],
443
+ },
435
444
  ]
436
445
 
437
446
  export const useSystemData = (name: SystemDataName): Data => {