@fugood/bricks-project 2.21.8 → 2.21.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -142,6 +142,13 @@ export const templateActionNameMap = {
142
142
  },
143
143
  },
144
144
 
145
+ BRICK_VIDEO: {
146
+ BRICK_VIDEO_SEEK: {
147
+ seekTime: 'BRICK_VIDEO_SEEK_TIME',
148
+ play: 'BRICK_VIDEO_PLAY',
149
+ },
150
+ },
151
+
145
152
  BRICK_SLIDESHOW: {
146
153
  BRICK_SLIDESHOW_JUMP_TO_INDEX: {
147
154
  index: 'BRICK_SLIDESHOW_INDEX',
@@ -231,7 +238,12 @@ export const templateActionNameMap = {
231
238
  index: 'BRICK_ITEMS_INDEX',
232
239
  },
233
240
  },
234
-
241
+ BRICK_LOTTIE: {
242
+ BRICK_LOTTIE_PLAY: {
243
+ startFrame: 'BRICK_LOTTIE_START_FRAME',
244
+ endFrame: 'BRICK_LOTTIE_END_FRAME',
245
+ },
246
+ },
235
247
  BRICK_RIVE: {
236
248
  BRICK_RIVE_PLAY: {
237
249
  animationName: 'BRICK_RIVE_ANIMATION_NAME',
@@ -663,6 +675,11 @@ export const templateActionNameMap = {
663
675
  sessionCustomKey: 'GENERATOR_LLM_SESSION_CUSTOM_KEY',
664
676
  },
665
677
  },
678
+ GENERATOR_GGML_TTS: {
679
+ GENERATOR_GGML_TTS_GENERATE: {
680
+ text: 'GENERATOR_GGML_TTS_TEXT',
681
+ },
682
+ },
666
683
  GENERATOR_QNN_LLM: {
667
684
  GENERATOR_QNN_LLM_GENERATE: {
668
685
  prompt: 'GENERATOR_QNN_LLM_PROMPT',
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fugood/bricks-project",
3
- "version": "2.21.8",
3
+ "version": "2.21.9",
4
4
  "main": "index.ts",
5
5
  "scripts": {
6
6
  "build": "node scripts/build.js"
package/types/bricks.ts CHANGED
@@ -865,6 +865,43 @@ export type BrickIcon = Brick &
865
865
  >
866
866
  }
867
867
 
868
+ /* Play the video */
869
+ export type BrickVideoActionPlay = Action & {
870
+ __actionName: 'BRICK_VIDEO_PLAY'
871
+ }
872
+
873
+ /* Seek the video */
874
+ export type BrickVideoActionSeek = ActionWithParams & {
875
+ __actionName: 'BRICK_VIDEO_SEEK'
876
+ params?: Array<
877
+ | {
878
+ input: 'seekTime'
879
+ value?: number | DataLink | EventProperty
880
+ mapping?: string
881
+ }
882
+ | {
883
+ input: 'play'
884
+ value?: boolean | DataLink | EventProperty
885
+ mapping?: string
886
+ }
887
+ >
888
+ }
889
+
890
+ /* Pause the video */
891
+ export type BrickVideoActionPause = Action & {
892
+ __actionName: 'BRICK_VIDEO_PAUSE'
893
+ }
894
+
895
+ /* Replay the video */
896
+ export type BrickVideoActionReplay = Action & {
897
+ __actionName: 'BRICK_VIDEO_REPLAY'
898
+ }
899
+
900
+ /* Stop the video */
901
+ export type BrickVideoActionStop = Action & {
902
+ __actionName: 'BRICK_VIDEO_STOP'
903
+ }
904
+
868
905
  interface BrickVideoDef {
869
906
  /*
870
907
  Default property:
@@ -2108,6 +2145,43 @@ export type BrickItems = Brick &
2108
2145
  >
2109
2146
  }
2110
2147
 
2148
+ /* Play animation */
2149
+ export type BrickLottieActionPlay = ActionWithParams & {
2150
+ __actionName: 'BRICK_LOTTIE_PLAY'
2151
+ params?: Array<
2152
+ | {
2153
+ input: 'startFrame'
2154
+ value?: number | DataLink | EventProperty
2155
+ mapping?: string
2156
+ }
2157
+ | {
2158
+ input: 'endFrame'
2159
+ value?: number | DataLink | EventProperty
2160
+ mapping?: string
2161
+ }
2162
+ >
2163
+ }
2164
+
2165
+ /* Pause animation */
2166
+ export type BrickLottieActionPause = Action & {
2167
+ __actionName: 'BRICK_LOTTIE_PAUSE'
2168
+ }
2169
+
2170
+ /* Resume animation */
2171
+ export type BrickLottieActionResume = Action & {
2172
+ __actionName: 'BRICK_LOTTIE_RESUME'
2173
+ }
2174
+
2175
+ /* Stop animation */
2176
+ export type BrickLottieActionStop = Action & {
2177
+ __actionName: 'BRICK_LOTTIE_STOP'
2178
+ }
2179
+
2180
+ /* Reset animation */
2181
+ export type BrickLottieActionReset = Action & {
2182
+ __actionName: 'BRICK_LOTTIE_RESET'
2183
+ }
2184
+
2111
2185
  interface BrickLottieDef {
2112
2186
  /*
2113
2187
  Default property:
@@ -2990,6 +3064,10 @@ Default property:
2990
3064
  type?: 'image' | 'video' | DataLink
2991
3065
  /* Default image to display when no generated image is available */
2992
3066
  defaultImage?: string | DataLink
3067
+ /* The hash of the default image */
3068
+ defaultImageHash?: string | DataLink
3069
+ /* The type of the default image hash */
3070
+ defaultImageHashType?: 'md5' | 'sha1' | 'sha256' | DataLink
2993
3071
  /* The Lottie animation to show while generating */
2994
3072
  loadingAnimation?: string | DataLink
2995
3073
  /* The Lottie animation to show when an error occurs */
@@ -4848,6 +4848,10 @@ Default property:
4848
4848
  "model": "BricksDisplay/vits-eng",
4849
4849
  "modelType": "auto",
4850
4850
  "vocoderModel": "speecht5_hifigan",
4851
+ "maxLength": 4096,
4852
+ "temperature": 0.1,
4853
+ "repetitionPenalty": 1.1,
4854
+ "doSample": true,
4851
4855
  "outputType": "play",
4852
4856
  "cacheGenerated": true,
4853
4857
  "autoInferEnable": false,
@@ -4866,6 +4870,7 @@ Default property:
4866
4870
  | 'BricksDisplay/vits-eng'
4867
4871
  | 'BricksDisplay/vits-cmn'
4868
4872
  | 'BricksDisplay/ellie-Bert-VITS2'
4873
+ | 'onnx-community/OuteTTS-1.0-0.6B-ONNX'
4869
4874
  | 'mms-tts-ara (NC)'
4870
4875
  | 'mms-tts-deu (NC)'
4871
4876
  | 'mms-tts-eng (NC)'
@@ -4881,7 +4886,7 @@ Default property:
4881
4886
  | 'speecht5_tts'
4882
4887
  | DataLink
4883
4888
  /* Model type */
4884
- modelType?: 'auto' | 'vits' | 'bert_vits2' | 'speecht5' | DataLink
4889
+ modelType?: 'auto' | 'vits' | 'bert_vits2' | 'speecht5' | 'outetts-1.0' | DataLink
4885
4890
  /* Load quantized model (deprecated, use `quantizeType` instead) */
4886
4891
  quantized?: boolean | DataLink
4887
4892
  /* Quantize type */
@@ -4908,6 +4913,16 @@ Default property:
4908
4913
  speakerEmbedUrl?: string | DataLink
4909
4914
  /* MD5 checksum of `speakerEmbedUrl` */
4910
4915
  speakerEmbedMd5?: string | DataLink
4916
+ /* Speaker config, for OuteTTS model */
4917
+ speakerConfig?: {} | DataLink
4918
+ /* Audio token generation max length */
4919
+ maxLength?: number | DataLink
4920
+ /* Audio token generation temperature */
4921
+ temperature?: number | DataLink
4922
+ /* Audio token generation repetition penalty */
4923
+ repetitionPenalty?: number | DataLink
4924
+ /* Use greedy sampling for audio token generation */
4925
+ doSample?: boolean | DataLink
4911
4926
  /* Output mode */
4912
4927
  outputType?: 'play' | 'file' | DataLink
4913
4928
  /* Enable cache for generated audio */
@@ -6385,7 +6400,7 @@ Default property:
6385
6400
  /* Prompt (text mode) */
6386
6401
  completionPrompt?: string | DataLink
6387
6402
  /* Media paths to be used in the prompt template (PREVIEW FEATURE)
6388
- In prompt, use `<__image__>` for position of media content */
6403
+ In prompt, use `<__media__>` for position of media content */
6389
6404
  completionPromptMediaPaths?: Array<string | DataLink> | DataLink
6390
6405
  /* Data to be used in the prompt template (e.g. `Hello ${name}`). Supports nested data, such as `Hello ${user.name}`. */
6391
6406
  completionPromptTemplateData?: {} | DataLink
@@ -6542,6 +6557,165 @@ export type GeneratorLLM = Generator &
6542
6557
  >
6543
6558
  }
6544
6559
 
6560
+ /* Load the model */
6561
+ export type GeneratorGGMLTTSActionLoadModel = Action & {
6562
+ __actionName: 'GENERATOR_GGML_TTS_LOAD_MODEL'
6563
+ }
6564
+
6565
+ /* Generate audio */
6566
+ export type GeneratorGGMLTTSActionGenerate = ActionWithParams & {
6567
+ __actionName: 'GENERATOR_GGML_TTS_GENERATE'
6568
+ params?: Array<{
6569
+ input: 'text'
6570
+ value?: string | DataLink | EventProperty
6571
+ mapping?: string
6572
+ }>
6573
+ }
6574
+
6575
+ /* Clean cache */
6576
+ export type GeneratorGGMLTTSActionCleanCache = Action & {
6577
+ __actionName: 'GENERATOR_GGML_TTS_CLEAN_CACHE'
6578
+ }
6579
+
6580
+ /* Release context */
6581
+ export type GeneratorGGMLTTSActionReleaseContext = Action & {
6582
+ __actionName: 'GENERATOR_GGML_TTS_RELEASE_CONTEXT'
6583
+ }
6584
+
6585
+ interface GeneratorGGMLTTSDef {
6586
+ /*
6587
+ Default property:
6588
+ {
6589
+ "vocoderUrl": "https://huggingface.co/ggml-org/WavTokenizer/resolve/main/WavTokenizer-Large-75-F16.gguf",
6590
+ "vocoderHashType": "sha256",
6591
+ "vocoderHash": "2356baa8631cc2995ea3465196a017a2733600d849a91180c0f97fa7fb375bbe",
6592
+ "outputType": "play",
6593
+ "cacheGenerated": true,
6594
+ "autoInferEnable": false,
6595
+ "softBreakRegex": "^[^\\r\\n\\t\\f\\v]*([\\r\\n]+|[。!?!?.]\\B)",
6596
+ "hardBreakTime": 500,
6597
+ "completionTemperature": 0.1,
6598
+ "completionRepetitionPenalty": 1.1,
6599
+ "completionTopK": 40,
6600
+ "completionTopP": 0.9,
6601
+ "completionMinP": 0.05,
6602
+ "useGuideToken": false,
6603
+ "contextSize": 4096,
6604
+ "batchSize": 512,
6605
+ "maxThreads": 1,
6606
+ "accelVariant": "default",
6607
+ "mainGpu": 0,
6608
+ "gpuLayers": 0,
6609
+ "useMlock": true,
6610
+ "useMmap": true,
6611
+ "useFlashAttn": false
6612
+ }
6613
+ */
6614
+ property?: {
6615
+ /* Initialize the TTS context on generator initialization */
6616
+ init?: boolean | DataLink
6617
+ /* The URL or path of model
6618
+ We used GGUF format model, please refer to https://github.com/ggerganov/llama.cpp/tree/master#description */
6619
+ modelUrl?: string | DataLink
6620
+ /* Hash type of model */
6621
+ modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6622
+ /* Hash of model */
6623
+ modelHash?: string | DataLink
6624
+ /* The URL or path of vocoder model */
6625
+ vocoderUrl?: string | DataLink
6626
+ /* Hash type of vocoder model */
6627
+ vocoderHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6628
+ /* Hash of vocoder model */
6629
+ vocoderHash?: string | DataLink
6630
+ /* Output mode */
6631
+ outputType?: 'play' | 'file' | DataLink
6632
+ /* Enable cache for generated audio */
6633
+ cacheGenerated?: boolean | DataLink
6634
+ /* Text to generate */
6635
+ prompt?: string | DataLink
6636
+ /* Speaker JSON */
6637
+ speaker?: {} | DataLink
6638
+ /* Auto inference when prompt changes */
6639
+ autoInferEnable?: boolean | DataLink
6640
+ /* Segmentation rule for auto inference */
6641
+ softBreakRegex?: string | DataLink
6642
+ /* Time to force inference when softBreakRegex is not satisfied */
6643
+ hardBreakTime?: number | DataLink
6644
+ /* Temperature */
6645
+ completionTemperature?: number | DataLink
6646
+ /* Repetition Penalty */
6647
+ completionRepetitionPenalty?: number | DataLink
6648
+ /* Top K sampling */
6649
+ completionTopK?: number | DataLink
6650
+ /* Top P sampling */
6651
+ completionTopP?: number | DataLink
6652
+ /* Min P sampling */
6653
+ completionMinP?: number | DataLink
6654
+ /* Set the random number generator (RNG) seed (default: -1, -1 = random seed) */
6655
+ completionSeed?: number | DataLink
6656
+ /* Enable guide token to help prevent hallucinations by forcing the TTS to use the correct words. */
6657
+ useGuideToken?: boolean | DataLink
6658
+ /* Context size, for OutTTS recommended 4096 ~ 8192 (Default to 4096) */
6659
+ contextSize?: number | DataLink
6660
+ /* Logical batch size for prompt processing */
6661
+ batchSize?: number | DataLink
6662
+ /* Number of threads */
6663
+ maxThreads?: number | DataLink
6664
+ /* Accelerator variant (Only for desktop)
6665
+ `default` - CPU / Metal (macOS)
6666
+ `vulkan` - Use Vulkan
6667
+ `cuda` - Use CUDA */
6668
+ accelVariant?: 'default' | 'vulkan' | 'cuda' | DataLink
6669
+ /* Main GPU index */
6670
+ mainGpu?: number | DataLink
6671
+ /* Number of GPU layers (NOTE: Currently not supported for Android) */
6672
+ gpuLayers?: number | DataLink
6673
+ /* Use memory lock */
6674
+ useMlock?: boolean | DataLink
6675
+ /* Use mmap */
6676
+ useMmap?: boolean | DataLink
6677
+ /* Use Flash Attention for inference (Recommended with GPU enabled) */
6678
+ useFlashAttn?: boolean | DataLink
6679
+ }
6680
+ events?: {
6681
+ /* Event triggered when state change */
6682
+ onContextStateChange?: Array<EventAction>
6683
+ /* Event triggered when error occurs */
6684
+ onError?: Array<EventAction>
6685
+ }
6686
+ outlets?: {
6687
+ /* Context state */
6688
+ contextState?: () => Data
6689
+ /* Generated audio file */
6690
+ generatedAudio?: () => Data
6691
+ /* Generated audio file is playing */
6692
+ generatedAudioPlaying?: () => Data
6693
+ }
6694
+ }
6695
+
6696
+ /* Local Text-to-Speech (TTS) inference based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
6697
+ You can use any converted model on HuggingFace. */
6698
+ export type GeneratorGGMLTTS = Generator &
6699
+ GeneratorGGMLTTSDef & {
6700
+ templateKey: 'GENERATOR_GGML_TTS'
6701
+ switches: Array<
6702
+ SwitchDef &
6703
+ GeneratorGGMLTTSDef & {
6704
+ conds?: Array<{
6705
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
6706
+ cond:
6707
+ | SwitchCondInnerStateCurrentCanvas
6708
+ | SwitchCondData
6709
+ | {
6710
+ __typename: 'SwitchCondInnerStateOutlet'
6711
+ outlet: 'contextState' | 'generatedAudio' | 'generatedAudioPlaying'
6712
+ value: any
6713
+ }
6714
+ }>
6715
+ }
6716
+ >
6717
+ }
6718
+
6545
6719
  /* Load the model */
6546
6720
  export type GeneratorQnnLlmActionLoadModel = Action & {
6547
6721
  __actionName: 'GENERATOR_QNN_LLM_LOAD_MODEL'
@@ -806,6 +806,14 @@ export const templateEventPropsMap = {
806
806
  'GENERATOR_LLM_COMPLETION_FUNCTION_DETAILS', // type: object
807
807
  ],
808
808
  },
809
+ GENERATOR_GGML_TTS: {
810
+ onContextStateChange: [
811
+ 'GENERATOR_GGML_TTS_CONTEXT_STATE', // type: string
812
+ ],
813
+ onError: [
814
+ 'GENERATOR_GGML_TTS_ERROR', // type: string
815
+ ],
816
+ },
809
817
  GENERATOR_QNN_LLM: {
810
818
  onContextStateChange: [
811
819
  'GENERATOR_QNN_LLM_CONTEXT_STATE', // type: string