@fugood/bricks-project 2.22.0-beta.13 → 2.22.0-beta.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -100,6 +100,9 @@ export const templateActionNameMap = {
100
100
  payloadType: 'CHANNEL_PUBLISH_PAYLOAD_TYPE',
101
101
  payload: 'CHANNEL_PUBLISH_PAYLOAD',
102
102
  },
103
+ DELAY: {
104
+ time: 'DELAY_TIME',
105
+ },
103
106
  USE_SHARE_APPLICATION: {
104
107
  applicationId: 'APPLICATION_ID',
105
108
  releaseVersion: 'RELEASE_VERSION',
@@ -142,6 +145,13 @@ export const templateActionNameMap = {
142
145
  },
143
146
  },
144
147
 
148
+ BRICK_VIDEO: {
149
+ BRICK_VIDEO_SEEK: {
150
+ seekTime: 'BRICK_VIDEO_SEEK_TIME',
151
+ play: 'BRICK_VIDEO_PLAY',
152
+ },
153
+ },
154
+
145
155
  BRICK_SLIDESHOW: {
146
156
  BRICK_SLIDESHOW_JUMP_TO_INDEX: {
147
157
  index: 'BRICK_SLIDESHOW_INDEX',
@@ -231,7 +241,12 @@ export const templateActionNameMap = {
231
241
  index: 'BRICK_ITEMS_INDEX',
232
242
  },
233
243
  },
234
-
244
+ BRICK_LOTTIE: {
245
+ BRICK_LOTTIE_PLAY: {
246
+ startFrame: 'BRICK_LOTTIE_START_FRAME',
247
+ endFrame: 'BRICK_LOTTIE_END_FRAME',
248
+ },
249
+ },
235
250
  BRICK_RIVE: {
236
251
  BRICK_RIVE_PLAY: {
237
252
  animationName: 'BRICK_RIVE_ANIMATION_NAME',
@@ -599,7 +614,7 @@ export const templateActionNameMap = {
599
614
  GENERATOR_LLM_TOKENIZE: {
600
615
  mode: 'GENERATOR_LLM_MODE',
601
616
  prompt: 'GENERATOR_LLM_PROMPT',
602
- promptImagePaths: 'GENERATOR_LLM_PROMPT_IMAGE_PATHS',
617
+ promptMediaPaths: 'GENERATOR_LLM_PROMPT_MEDIA_PATHS',
603
618
  messages: 'GENERATOR_LLM_MESSAGES',
604
619
  },
605
620
  GENERATOR_LLM_DETOKENIZE: {
@@ -612,8 +627,9 @@ export const templateActionNameMap = {
612
627
  tools: 'GENERATOR_LLM_TOOLS',
613
628
  parallelToolCalls: 'GENERATOR_LLM_PARALLEL_TOOL_CALLS',
614
629
  toolChoice: 'GENERATOR_LLM_TOOL_CHOICE',
630
+ enableThinking: 'GENERATOR_LLM_ENABLE_THINKING',
615
631
  prompt: 'GENERATOR_LLM_PROMPT',
616
- promptImagePaths: 'GENERATOR_LLM_PROMPT_IMAGE_PATHS',
632
+ promptMediaPaths: 'GENERATOR_LLM_PROMPT_MEDIA_PATHS',
617
633
  promptTemplateData: 'GENERATOR_LLM_PROMPT_TEMPLATE_DATA',
618
634
  promptTemplateType: 'GENERATOR_LLM_PROMPT_TEMPLATE_TYPE',
619
635
  responseFormat: 'GENERATOR_LLM_RESPONSE_FORMAT',
@@ -625,8 +641,9 @@ export const templateActionNameMap = {
625
641
  tools: 'GENERATOR_LLM_TOOLS',
626
642
  parallelToolCalls: 'GENERATOR_LLM_PARALLEL_TOOL_CALLS',
627
643
  toolChoice: 'GENERATOR_LLM_TOOL_CHOICE',
644
+ enableThinking: 'GENERATOR_LLM_ENABLE_THINKING',
628
645
  prompt: 'GENERATOR_LLM_PROMPT',
629
- promptImagePaths: 'GENERATOR_LLM_PROMPT_IMAGE_PATHS',
646
+ promptMediaPaths: 'GENERATOR_LLM_PROMPT_MEDIA_PATHS',
630
647
  promptTemplateData: 'GENERATOR_LLM_PROMPT_TEMPLATE_DATA',
631
648
  promptTemplateType: 'GENERATOR_LLM_PROMPT_TEMPLATE_TYPE',
632
649
  responseFormat: 'GENERATOR_LLM_RESPONSE_FORMAT',
@@ -663,6 +680,11 @@ export const templateActionNameMap = {
663
680
  sessionCustomKey: 'GENERATOR_LLM_SESSION_CUSTOM_KEY',
664
681
  },
665
682
  },
683
+ GENERATOR_GGML_TTS: {
684
+ GENERATOR_GGML_TTS_GENERATE: {
685
+ text: 'GENERATOR_GGML_TTS_TEXT',
686
+ },
687
+ },
666
688
  GENERATOR_QNN_LLM: {
667
689
  GENERATOR_QNN_LLM_GENERATE: {
668
690
  prompt: 'GENERATOR_QNN_LLM_PROMPT',
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fugood/bricks-project",
3
- "version": "2.22.0-beta.13",
3
+ "version": "2.22.0-beta.15",
4
4
  "main": "index.ts",
5
5
  "scripts": {
6
6
  "build": "node scripts/build.js"
@@ -14,5 +14,5 @@
14
14
  "lodash": "^4.17.4",
15
15
  "uuid": "^8.3.1"
16
16
  },
17
- "gitHead": "0039a8fa08e89411caf8608a98ae80d5cca64cbe"
17
+ "gitHead": "4995b78120bd6e75df65a302398a9ba3070da3e7"
18
18
  }
package/types/bricks.ts CHANGED
@@ -865,6 +865,43 @@ export type BrickIcon = Brick &
865
865
  >
866
866
  }
867
867
 
868
+ /* Play the video */
869
+ export type BrickVideoActionPlay = Action & {
870
+ __actionName: 'BRICK_VIDEO_PLAY'
871
+ }
872
+
873
+ /* Seek the video */
874
+ export type BrickVideoActionSeek = ActionWithParams & {
875
+ __actionName: 'BRICK_VIDEO_SEEK'
876
+ params?: Array<
877
+ | {
878
+ input: 'seekTime'
879
+ value?: number | DataLink | EventProperty
880
+ mapping?: string
881
+ }
882
+ | {
883
+ input: 'play'
884
+ value?: boolean | DataLink | EventProperty
885
+ mapping?: string
886
+ }
887
+ >
888
+ }
889
+
890
+ /* Pause the video */
891
+ export type BrickVideoActionPause = Action & {
892
+ __actionName: 'BRICK_VIDEO_PAUSE'
893
+ }
894
+
895
+ /* Replay the video */
896
+ export type BrickVideoActionReplay = Action & {
897
+ __actionName: 'BRICK_VIDEO_REPLAY'
898
+ }
899
+
900
+ /* Stop the video */
901
+ export type BrickVideoActionStop = Action & {
902
+ __actionName: 'BRICK_VIDEO_STOP'
903
+ }
904
+
868
905
  interface BrickVideoDef {
869
906
  /*
870
907
  Default property:
@@ -2108,6 +2145,43 @@ export type BrickItems = Brick &
2108
2145
  >
2109
2146
  }
2110
2147
 
2148
+ /* Play animation */
2149
+ export type BrickLottieActionPlay = ActionWithParams & {
2150
+ __actionName: 'BRICK_LOTTIE_PLAY'
2151
+ params?: Array<
2152
+ | {
2153
+ input: 'startFrame'
2154
+ value?: number | DataLink | EventProperty
2155
+ mapping?: string
2156
+ }
2157
+ | {
2158
+ input: 'endFrame'
2159
+ value?: number | DataLink | EventProperty
2160
+ mapping?: string
2161
+ }
2162
+ >
2163
+ }
2164
+
2165
+ /* Pause animation */
2166
+ export type BrickLottieActionPause = Action & {
2167
+ __actionName: 'BRICK_LOTTIE_PAUSE'
2168
+ }
2169
+
2170
+ /* Resume animation */
2171
+ export type BrickLottieActionResume = Action & {
2172
+ __actionName: 'BRICK_LOTTIE_RESUME'
2173
+ }
2174
+
2175
+ /* Stop animation */
2176
+ export type BrickLottieActionStop = Action & {
2177
+ __actionName: 'BRICK_LOTTIE_STOP'
2178
+ }
2179
+
2180
+ /* Reset animation */
2181
+ export type BrickLottieActionReset = Action & {
2182
+ __actionName: 'BRICK_LOTTIE_RESET'
2183
+ }
2184
+
2111
2185
  interface BrickLottieDef {
2112
2186
  /*
2113
2187
  Default property:
@@ -2990,6 +3064,10 @@ Default property:
2990
3064
  type?: 'image' | 'video' | DataLink
2991
3065
  /* Default image to display when no generated image is available */
2992
3066
  defaultImage?: string | DataLink
3067
+ /* The hash of the default image */
3068
+ defaultImageHash?: string | DataLink
3069
+ /* The type of the default image hash */
3070
+ defaultImageHashType?: 'md5' | 'sha1' | 'sha256' | DataLink
2993
3071
  /* The Lottie animation to show while generating */
2994
3072
  loadingAnimation?: string | DataLink
2995
3073
  /* The Lottie animation to show when an error occurs */
@@ -182,8 +182,8 @@ export type GeneratorFileActionReadContent = ActionWithParams & {
182
182
  }
183
183
 
184
184
  /* Delete */
185
- export type GeneratorFileActionGeneratorDeleteFile = Action & {
186
- __actionName: 'GENERATOR_DELETE_FILE'
185
+ export type GeneratorFileActionDelete = Action & {
186
+ __actionName: 'GENERATOR_FILE_DELETE'
187
187
  }
188
188
 
189
189
  /* Append (Currently only support text file) */
@@ -4850,6 +4850,10 @@ Default property:
4850
4850
  "model": "BricksDisplay/vits-eng",
4851
4851
  "modelType": "auto",
4852
4852
  "vocoderModel": "speecht5_hifigan",
4853
+ "maxLength": 4096,
4854
+ "temperature": 0.1,
4855
+ "repetitionPenalty": 1.1,
4856
+ "doSample": true,
4853
4857
  "outputType": "play",
4854
4858
  "cacheGenerated": true,
4855
4859
  "autoInferEnable": false,
@@ -4868,6 +4872,7 @@ Default property:
4868
4872
  | 'BricksDisplay/vits-eng'
4869
4873
  | 'BricksDisplay/vits-cmn'
4870
4874
  | 'BricksDisplay/ellie-Bert-VITS2'
4875
+ | 'onnx-community/OuteTTS-1.0-0.6B-ONNX'
4871
4876
  | 'mms-tts-ara (NC)'
4872
4877
  | 'mms-tts-deu (NC)'
4873
4878
  | 'mms-tts-eng (NC)'
@@ -4883,7 +4888,7 @@ Default property:
4883
4888
  | 'speecht5_tts'
4884
4889
  | DataLink
4885
4890
  /* Model type */
4886
- modelType?: 'auto' | 'vits' | 'bert_vits2' | 'speecht5' | DataLink
4891
+ modelType?: 'auto' | 'vits' | 'bert_vits2' | 'speecht5' | 'outetts-1.0' | DataLink
4887
4892
  /* Load quantized model (deprecated, use `quantizeType` instead) */
4888
4893
  quantized?: boolean | DataLink
4889
4894
  /* Quantize type */
@@ -4910,6 +4915,16 @@ Default property:
4910
4915
  speakerEmbedUrl?: string | DataLink
4911
4916
  /* MD5 checksum of `speakerEmbedUrl` */
4912
4917
  speakerEmbedMd5?: string | DataLink
4918
+ /* Speaker config, for OuteTTS model */
4919
+ speakerConfig?: {} | DataLink
4920
+ /* Audio token generation max length */
4921
+ maxLength?: number | DataLink
4922
+ /* Audio token generation temperature */
4923
+ temperature?: number | DataLink
4924
+ /* Audio token generation repetition penalty */
4925
+ repetitionPenalty?: number | DataLink
4926
+ /* Use greedy sampling for audio token generation */
4927
+ doSample?: boolean | DataLink
4913
4928
  /* Output mode */
4914
4929
  outputType?: 'play' | 'file' | DataLink
4915
4930
  /* Enable cache for generated audio */
@@ -5094,24 +5109,19 @@ Default property:
5094
5109
  /* Model type */
5095
5110
  modelType?:
5096
5111
  | 'auto'
5097
- | 'gpt2'
5098
- | 'gptj'
5099
- | 'gpt_bigcode'
5100
- | 'gpt_neo'
5101
- | 'gpt_neox'
5102
- | 'bloom'
5103
- | 'mpt'
5104
- | 'opt'
5105
- | 'llama'
5106
- | 'falcon'
5107
- | 'mistral'
5112
+ | 'text-generation'
5113
+ | 'qwen2-vl'
5114
+ | 'paligemma'
5115
+ | 'llava'
5116
+ | 'llava_onevision'
5117
+ | 'moondream1'
5118
+ | 'florence2'
5119
+ | 'idefics3'
5120
+ | 'smolvlm'
5121
+ | 'phi3_v'
5108
5122
  | 't5'
5109
5123
  | 'mt5'
5110
5124
  | 'longt5'
5111
- | 'phi'
5112
- | 'qwen2'
5113
- | 'stablelm'
5114
- | 'gemma'
5115
5125
  | DataLink
5116
5126
  /* Load quantized model (deprecated, use `quantizeType` instead) */
5117
5127
  quantized?: boolean | DataLink
@@ -5924,7 +5934,7 @@ export type GeneratorLLMActionTokenize = ActionWithParams & {
5924
5934
  mapping?: string
5925
5935
  }
5926
5936
  | {
5927
- input: 'promptImagePaths'
5937
+ input: 'promptMediaPaths'
5928
5938
  value?: Array<any> | DataLink | EventProperty
5929
5939
  mapping?: string
5930
5940
  }
@@ -5980,13 +5990,18 @@ export type GeneratorLLMActionProcessPrompt = ActionWithParams & {
5980
5990
  value?: string | DataLink | EventProperty
5981
5991
  mapping?: string
5982
5992
  }
5993
+ | {
5994
+ input: 'enableThinking'
5995
+ value?: boolean | DataLink | EventProperty
5996
+ mapping?: string
5997
+ }
5983
5998
  | {
5984
5999
  input: 'prompt'
5985
6000
  value?: string | DataLink | EventProperty
5986
6001
  mapping?: string
5987
6002
  }
5988
6003
  | {
5989
- input: 'promptImagePaths'
6004
+ input: 'promptMediaPaths'
5990
6005
  value?: Array<any> | DataLink | EventProperty
5991
6006
  mapping?: string
5992
6007
  }
@@ -6042,13 +6057,18 @@ export type GeneratorLLMActionCompletion = ActionWithParams & {
6042
6057
  value?: string | DataLink | EventProperty
6043
6058
  mapping?: string
6044
6059
  }
6060
+ | {
6061
+ input: 'enableThinking'
6062
+ value?: boolean | DataLink | EventProperty
6063
+ mapping?: string
6064
+ }
6045
6065
  | {
6046
6066
  input: 'prompt'
6047
6067
  value?: string | DataLink | EventProperty
6048
6068
  mapping?: string
6049
6069
  }
6050
6070
  | {
6051
- input: 'promptImagePaths'
6071
+ input: 'promptMediaPaths'
6052
6072
  value?: Array<any> | DataLink | EventProperty
6053
6073
  mapping?: string
6054
6074
  }
@@ -6266,6 +6286,7 @@ Default property:
6266
6286
  "completionMode": "auto",
6267
6287
  "completionPrompt": "",
6268
6288
  "completionPromptTemplateType": "${}",
6289
+ "completionEnableThinking": true,
6269
6290
  "completionStopWords": [
6270
6291
  "</s>",
6271
6292
  "<|end|>",
@@ -6391,9 +6412,9 @@ Default property:
6391
6412
  | DataLink
6392
6413
  /* Prompt (text mode) */
6393
6414
  completionPrompt?: string | DataLink
6394
- /* Image paths to be used in the prompt template (PREVIEW FEATURE)
6395
- In prompt, use `<__image__>` for position of image content */
6396
- completionPromptImagePaths?: Array<string | DataLink> | DataLink
6415
+ /* Media paths to be used in the prompt template (PREVIEW FEATURE)
6416
+ In prompt, use `<__media__>` for position of media content */
6417
+ completionPromptMediaPaths?: Array<string | DataLink> | DataLink
6397
6418
  /* Data to be used in the prompt template (e.g. `Hello ${name}`). Supports nested data, such as `Hello ${user.name}`. */
6398
6419
  completionPromptTemplateData?: {} | DataLink
6399
6420
  /* The prompt template type */
@@ -6411,6 +6432,8 @@ Default property:
6411
6432
  }
6412
6433
  schema?: {} | DataLink
6413
6434
  }
6435
+ /* Enable thinking */
6436
+ completionEnableThinking?: boolean | DataLink
6414
6437
  /* Stop words */
6415
6438
  completionStopWords?: Array<string | DataLink> | DataLink
6416
6439
  /* Number of tokens to predict */
@@ -6549,6 +6572,165 @@ export type GeneratorLLM = Generator &
6549
6572
  >
6550
6573
  }
6551
6574
 
6575
+ /* Load the model */
6576
+ export type GeneratorGGMLTTSActionLoadModel = Action & {
6577
+ __actionName: 'GENERATOR_GGML_TTS_LOAD_MODEL'
6578
+ }
6579
+
6580
+ /* Generate audio */
6581
+ export type GeneratorGGMLTTSActionGenerate = ActionWithParams & {
6582
+ __actionName: 'GENERATOR_GGML_TTS_GENERATE'
6583
+ params?: Array<{
6584
+ input: 'text'
6585
+ value?: string | DataLink | EventProperty
6586
+ mapping?: string
6587
+ }>
6588
+ }
6589
+
6590
+ /* Clean cache */
6591
+ export type GeneratorGGMLTTSActionCleanCache = Action & {
6592
+ __actionName: 'GENERATOR_GGML_TTS_CLEAN_CACHE'
6593
+ }
6594
+
6595
+ /* Release context */
6596
+ export type GeneratorGGMLTTSActionReleaseContext = Action & {
6597
+ __actionName: 'GENERATOR_GGML_TTS_RELEASE_CONTEXT'
6598
+ }
6599
+
6600
+ interface GeneratorGGMLTTSDef {
6601
+ /*
6602
+ Default property:
6603
+ {
6604
+ "vocoderUrl": "https://huggingface.co/ggml-org/WavTokenizer/resolve/main/WavTokenizer-Large-75-F16.gguf",
6605
+ "vocoderHashType": "sha256",
6606
+ "vocoderHash": "2356baa8631cc2995ea3465196a017a2733600d849a91180c0f97fa7fb375bbe",
6607
+ "outputType": "play",
6608
+ "cacheGenerated": true,
6609
+ "autoInferEnable": false,
6610
+ "softBreakRegex": "^[^\\r\\n\\t\\f\\v]*([\\r\\n]+|[。!?!?.]\\B)",
6611
+ "hardBreakTime": 500,
6612
+ "completionTemperature": 0.1,
6613
+ "completionRepetitionPenalty": 1.1,
6614
+ "completionTopK": 40,
6615
+ "completionTopP": 0.9,
6616
+ "completionMinP": 0.05,
6617
+ "useGuideToken": false,
6618
+ "contextSize": 4096,
6619
+ "batchSize": 512,
6620
+ "maxThreads": 1,
6621
+ "accelVariant": "default",
6622
+ "mainGpu": 0,
6623
+ "gpuLayers": 0,
6624
+ "useMlock": true,
6625
+ "useMmap": true,
6626
+ "useFlashAttn": false
6627
+ }
6628
+ */
6629
+ property?: {
6630
+ /* Initialize the TTS context on generator initialization */
6631
+ init?: boolean | DataLink
6632
+ /* The URL or path of model
6633
+ We used GGUF format model, please refer to https://github.com/ggerganov/llama.cpp/tree/master#description */
6634
+ modelUrl?: string | DataLink
6635
+ /* Hash type of model */
6636
+ modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6637
+ /* Hash of model */
6638
+ modelHash?: string | DataLink
6639
+ /* The URL or path of vocoder model */
6640
+ vocoderUrl?: string | DataLink
6641
+ /* Hash type of vocoder model */
6642
+ vocoderHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
6643
+ /* Hash of vocoder model */
6644
+ vocoderHash?: string | DataLink
6645
+ /* Output mode */
6646
+ outputType?: 'play' | 'file' | DataLink
6647
+ /* Enable cache for generated audio */
6648
+ cacheGenerated?: boolean | DataLink
6649
+ /* Text to generate */
6650
+ prompt?: string | DataLink
6651
+ /* Speaker JSON */
6652
+ speaker?: {} | DataLink
6653
+ /* Auto inference when prompt changes */
6654
+ autoInferEnable?: boolean | DataLink
6655
+ /* Segmentation rule for auto inference */
6656
+ softBreakRegex?: string | DataLink
6657
+ /* Time to force inference when softBreakRegex is not satisfied */
6658
+ hardBreakTime?: number | DataLink
6659
+ /* Temperature */
6660
+ completionTemperature?: number | DataLink
6661
+ /* Repetition Penalty */
6662
+ completionRepetitionPenalty?: number | DataLink
6663
+ /* Top K sampling */
6664
+ completionTopK?: number | DataLink
6665
+ /* Top P sampling */
6666
+ completionTopP?: number | DataLink
6667
+ /* Min P sampling */
6668
+ completionMinP?: number | DataLink
6669
+ /* Set the random number generator (RNG) seed (default: -1, -1 = random seed) */
6670
+ completionSeed?: number | DataLink
6671
+ /* Enable guide token to help prevent hallucinations by forcing the TTS to use the correct words. */
6672
+ useGuideToken?: boolean | DataLink
6673
+ /* Context size, for OutTTS recommended 4096 ~ 8192 (Default to 4096) */
6674
+ contextSize?: number | DataLink
6675
+ /* Logical batch size for prompt processing */
6676
+ batchSize?: number | DataLink
6677
+ /* Number of threads */
6678
+ maxThreads?: number | DataLink
6679
+ /* Accelerator variant (Only for desktop)
6680
+ `default` - CPU / Metal (macOS)
6681
+ `vulkan` - Use Vulkan
6682
+ `cuda` - Use CUDA */
6683
+ accelVariant?: 'default' | 'vulkan' | 'cuda' | DataLink
6684
+ /* Main GPU index */
6685
+ mainGpu?: number | DataLink
6686
+ /* Number of GPU layers (NOTE: Currently not supported for Android) */
6687
+ gpuLayers?: number | DataLink
6688
+ /* Use memory lock */
6689
+ useMlock?: boolean | DataLink
6690
+ /* Use mmap */
6691
+ useMmap?: boolean | DataLink
6692
+ /* Use Flash Attention for inference (Recommended with GPU enabled) */
6693
+ useFlashAttn?: boolean | DataLink
6694
+ }
6695
+ events?: {
6696
+ /* Event triggered when state change */
6697
+ onContextStateChange?: Array<EventAction>
6698
+ /* Event triggered when error occurs */
6699
+ onError?: Array<EventAction>
6700
+ }
6701
+ outlets?: {
6702
+ /* Context state */
6703
+ contextState?: () => Data
6704
+ /* Generated audio file */
6705
+ generatedAudio?: () => Data
6706
+ /* Generated audio file is playing */
6707
+ generatedAudioPlaying?: () => Data
6708
+ }
6709
+ }
6710
+
6711
+ /* Local Text-to-Speech (TTS) inference based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
6712
+ You can use any converted model on HuggingFace. */
6713
+ export type GeneratorGGMLTTS = Generator &
6714
+ GeneratorGGMLTTSDef & {
6715
+ templateKey: 'GENERATOR_GGML_TTS'
6716
+ switches: Array<
6717
+ SwitchDef &
6718
+ GeneratorGGMLTTSDef & {
6719
+ conds?: Array<{
6720
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
6721
+ cond:
6722
+ | SwitchCondInnerStateCurrentCanvas
6723
+ | SwitchCondData
6724
+ | {
6725
+ __typename: 'SwitchCondInnerStateOutlet'
6726
+ outlet: 'contextState' | 'generatedAudio' | 'generatedAudioPlaying'
6727
+ value: any
6728
+ }
6729
+ }>
6730
+ }
6731
+ >
6732
+ }
6733
+
6552
6734
  /* Load the model */
6553
6735
  export type GeneratorQnnLlmActionLoadModel = Action & {
6554
6736
  __actionName: 'GENERATOR_QNN_LLM_LOAD_MODEL'
@@ -6798,7 +6980,7 @@ interface GeneratorOpenAILLMDef {
6798
6980
  Default property:
6799
6981
  {
6800
6982
  "apiEndpoint": "https://api.openai.com/v1",
6801
- "model": "gpt-4o-mini",
6983
+ "model": "gpt-4o",
6802
6984
  "completionMessages": [
6803
6985
  {
6804
6986
  "role": "system",
@@ -6808,8 +6990,6 @@ Default property:
6808
6990
  "completionMaxTokens": 1024,
6809
6991
  "completionTemperature": 1,
6810
6992
  "completionTopP": 1,
6811
- "completionFrequencyPenalty": 0,
6812
- "completionPresencePenalty": 0,
6813
6993
  "completionStop": []
6814
6994
  }
6815
6995
  */
@@ -6898,7 +7078,11 @@ Default property:
6898
7078
  - Compatible with OpenAI API format
6899
7079
  - Supports function calling
6900
7080
  - Streaming responses
6901
- - Custom API endpoints */
7081
+ - Custom API endpoints, like
7082
+ - OpenAI API: https://platform.openai.com/docs/guides/text?api-mode=chat
7083
+ - Anthropic API: https://docs.anthropic.com/en/api/openai-sdk
7084
+ - Gemini API: https://ai.google.dev/gemini-api/docs/openai
7085
+ - llama.cpp server: https://github.com/ggml-org/llama.cpp/tree/master/tools/server */
6902
7086
  export type GeneratorOpenAILLM = Generator &
6903
7087
  GeneratorOpenAILLMDef & {
6904
7088
  templateKey: 'GENERATOR_OPENAI_LLM'
package/types/system.ts CHANGED
@@ -467,6 +467,16 @@ export type SystemActionChannelPublish = ActionWithParams & {
467
467
  >
468
468
  }
469
469
 
470
+ /* Delay for a certain time between actions */
471
+ export type SystemActionDelay = ActionWithParams & {
472
+ __actionName: 'DELAY'
473
+ params?: Array<{
474
+ input: 'time'
475
+ value?: number | DataLink | EventProperty
476
+ mapping?: string
477
+ }>
478
+ }
479
+
470
480
  /* [Internal] Use a shared application */
471
481
  export type SystemActionUseShareApplication = ActionWithParams & {
472
482
  __actionName: 'USE_SHARE_APPLICATION'
package/utils/data.ts CHANGED
@@ -64,6 +64,7 @@ type SystemDataName =
64
64
  | 'workspaceName'
65
65
  | 'applicationInfo'
66
66
  | 'applicationName'
67
+ | 'deviceID'
67
68
  | 'macAddress'
68
69
  | 'bindDeviceCode'
69
70
  | 'bindDeviceCodeExpire'
@@ -343,6 +344,14 @@ export const systemDataList: Array<SystemDataInfo> = [
343
344
  type: 'string',
344
345
  value: '',
345
346
  },
347
+ {
348
+ name: 'deviceID',
349
+ id: 'PROPERTY_BANK_DATA_NODE_8b5c2d9e-f0a1-4b2c-8d3e-4f5a6b7c8d9e',
350
+ title: 'SYSTEM: Device ID',
351
+ description: 'Device ID of current device',
352
+ type: 'string',
353
+ value: 'unknown',
354
+ },
346
355
  {
347
356
  name: 'macAddress',
348
357
  id: 'PROPERTY_BANK_DATA_NODE_f01fcc78-0723-11ed-ac00-877339de1030',
@@ -791,6 +791,14 @@ export const templateEventPropsMap = {
791
791
  'GENERATOR_LLM_COMPLETION_FUNCTION_DETAILS', // type: object
792
792
  ],
793
793
  },
794
+ GENERATOR_GGML_TTS: {
795
+ onContextStateChange: [
796
+ 'GENERATOR_GGML_TTS_CONTEXT_STATE', // type: string
797
+ ],
798
+ onError: [
799
+ 'GENERATOR_GGML_TTS_ERROR', // type: string
800
+ ],
801
+ },
794
802
  GENERATOR_QNN_LLM: {
795
803
  onContextStateChange: [
796
804
  'GENERATOR_QNN_LLM_CONTEXT_STATE', // type: string