@fugood/bricks-project 2.22.0-beta.22 → 2.22.0-beta.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -669,6 +669,7 @@ export const templateActionNameMap = {
669
669
  parallelToolCalls: 'GENERATOR_LLM_PARALLEL_TOOL_CALLS',
670
670
  toolChoice: 'GENERATOR_LLM_TOOL_CHOICE',
671
671
  enableThinking: 'GENERATOR_LLM_ENABLE_THINKING',
672
+ useReasoningFormat: 'GENERATOR_LLM_USE_REASONING_FORMAT',
672
673
  prompt: 'GENERATOR_LLM_PROMPT',
673
674
  promptMediaPaths: 'GENERATOR_LLM_PROMPT_MEDIA_PATHS',
674
675
  promptTemplateData: 'GENERATOR_LLM_PROMPT_TEMPLATE_DATA',
@@ -719,6 +720,11 @@ export const templateActionNameMap = {
719
720
  },
720
721
  },
721
722
  GENERATOR_QNN_LLM: {
723
+ GENERATOR_QNN_LLM_PROCESS: {
724
+ prompt: 'GENERATOR_QNN_LLM_PROMPT',
725
+ messages: 'GENERATOR_QNN_LLM_MESSAGES',
726
+ tools: 'GENERATOR_QNN_LLM_TOOLS',
727
+ },
722
728
  GENERATOR_QNN_LLM_GENERATE: {
723
729
  prompt: 'GENERATOR_QNN_LLM_PROMPT',
724
730
  messages: 'GENERATOR_QNN_LLM_MESSAGES',
@@ -745,6 +751,19 @@ export const templateActionNameMap = {
745
751
  text: 'GENERATOR_OPENAI_TTS_TEXT',
746
752
  },
747
753
  },
754
+ GENERATOR_ANTHROPIC_LLM: {
755
+ GENERATOR_ANTHROPIC_LLM_COMPLETION: {
756
+ systemMessage: 'GENERATOR_ANTHROPIC_LLM_SYSTEM_MESSAGE',
757
+ messages: 'GENERATOR_ANTHROPIC_LLM_MESSAGES',
758
+ maxTokens: 'GENERATOR_ANTHROPIC_LLM_MAX_TOKENS',
759
+ temperature: 'GENERATOR_ANTHROPIC_LLM_TEMPERATURE',
760
+ topP: 'GENERATOR_ANTHROPIC_LLM_TOP_P',
761
+ topK: 'GENERATOR_ANTHROPIC_LLM_TOP_K',
762
+ stopSequences: 'GENERATOR_ANTHROPIC_LLM_STOP_SEQUENCES',
763
+ tools: 'GENERATOR_ANTHROPIC_LLM_TOOLS',
764
+ toolChoice: 'GENERATOR_ANTHROPIC_LLM_TOOL_CHOICE',
765
+ },
766
+ },
748
767
  GENERATOR_ASSISTANT: {
749
768
  GENERATOR_ASSISTANT_ADD_MESSAGE: {
750
769
  role: 'GENERATOR_ASSISTANT_ROLE',
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fugood/bricks-project",
3
- "version": "2.22.0-beta.22",
3
+ "version": "2.22.0-beta.24",
4
4
  "main": "index.ts",
5
5
  "scripts": {
6
6
  "build": "node scripts/build.js"
@@ -14,5 +14,5 @@
14
14
  "lodash": "^4.17.4",
15
15
  "uuid": "^8.3.1"
16
16
  },
17
- "gitHead": "f1a67a715ee30aac5067df53b8b640511a81fc83"
17
+ "gitHead": "e95b5edabbaba53ed373b3efb5d587d421972348"
18
18
  }
@@ -1928,9 +1928,10 @@ interface GeneratorBleCentralDef {
1928
1928
  Default property:
1929
1929
  {
1930
1930
  "init": false,
1931
- "scanTime": 0,
1932
- "updateInterval": 100,
1931
+ "scanTime": 10,
1932
+ "updateInterval": 1000,
1933
1933
  "retainTime": 120,
1934
+ "maximumDiscoveredDevice": 256,
1934
1935
  "allowDuplicates": false,
1935
1936
  "scanMode": "LowPower"
1936
1937
  }
@@ -4386,6 +4387,7 @@ Default property:
4386
4387
  | DataLink
4387
4388
  /* Tools
4388
4389
  Type:
4390
+ `static`: Return static data
4389
4391
  `detect-data-change`: Watch data target change to return data,
4390
4392
  please update data with ({ id: string, content: string | object }),
4391
4393
  and ensure the id is same with request id.
@@ -4400,7 +4402,8 @@ Default property:
4400
4402
  name?: string | DataLink
4401
4403
  description?: string | DataLink
4402
4404
  params?: {} | DataLink
4403
- type?: 'detect-data-change' | 'script' | DataLink
4405
+ type?: 'static' | 'detect-data-change' | 'script' | DataLink
4406
+ staticData?: any
4404
4407
  dataChangeConfig?:
4405
4408
  | DataLink
4406
4409
  | {
@@ -4673,6 +4676,8 @@ Default property:
4673
4676
  }
4674
4677
  */
4675
4678
  property?: {
4679
+ /* Application-scoped generator key, key cannot be the same with other application-scoped generators */
4680
+ globalGeneratorKey?: string | DataLink
4676
4681
  /* Initialize the MCP client on start */
4677
4682
  init?: boolean | DataLink
4678
4683
  /* Type of the MCP connection, e.g. sse or direct-link (generator) */
@@ -6331,6 +6336,11 @@ export type GeneratorLLMActionCompletion = ActionWithParams & {
6331
6336
  value?: boolean | DataLink | EventProperty
6332
6337
  mapping?: string
6333
6338
  }
6339
+ | {
6340
+ input: 'useReasoningFormat'
6341
+ value?: string | DataLink | EventProperty
6342
+ mapping?: string
6343
+ }
6334
6344
  | {
6335
6345
  input: 'prompt'
6336
6346
  value?: string | DataLink | EventProperty
@@ -6556,6 +6566,7 @@ Default property:
6556
6566
  "completionPrompt": "",
6557
6567
  "completionPromptTemplateType": "${}",
6558
6568
  "completionEnableThinking": true,
6569
+ "completionUseReasoningFormat": "auto",
6559
6570
  "completionStopWords": [
6560
6571
  "</s>",
6561
6572
  "<|end|>",
@@ -6637,12 +6648,14 @@ Default property:
6637
6648
  useMmap?: boolean | DataLink
6638
6649
  /* Use Flash Attention for inference (Recommended with GPU enabled) */
6639
6650
  useFlashAttn?: boolean | DataLink
6640
- /* Use full-size SWA cache. May improve performance for multiple sequences but uses more memory. */
6641
- useSwaFull?: boolean | DataLink
6642
6651
  /* KV cache data type for the K (Default: f16) */
6643
6652
  cacheKType?: 'f16' | 'f32' | 'q8_0' | 'q4_0' | 'q4_1' | 'iq4_nl' | 'q5_0' | 'q5_1' | DataLink
6644
6653
  /* KV cache data type for the V (Default: f16) */
6645
6654
  cacheVType?: 'f16' | 'f32' | 'q8_0' | 'q4_0' | 'q4_1' | 'iq4_nl' | 'q5_0' | 'q5_1' | DataLink
6655
+ /* Use a unified buffer across the input sequences when computing the attention */
6656
+ useKVUnified?: boolean | DataLink
6657
+ /* Use full-size SWA cache. May improve performance for multiple sequences but uses more memory. */
6658
+ useSwaFull?: boolean | DataLink
6646
6659
  /* Enable context shift */
6647
6660
  ctxShift?: boolean | DataLink
6648
6661
  /* Enable Transform Script for processing the prompt */
@@ -6705,6 +6718,10 @@ Default property:
6705
6718
  }
6706
6719
  /* Enable thinking */
6707
6720
  completionEnableThinking?: boolean | DataLink
6721
+ /* Use reasoning format for enhanced response structure
6722
+ `auto` - Auto-determine the reasoning format of the model
6723
+ `none` - Disable reasoning format */
6724
+ completionUseReasoningFormat?: 'auto' | 'none' | DataLink
6708
6725
  /* Stop words */
6709
6726
  completionStopWords?: Array<string | DataLink> | DataLink
6710
6727
  /* Number of tokens to predict */
@@ -6795,6 +6812,8 @@ Default property:
6795
6812
  completionLastToken?: () => Data
6796
6813
  /* Completion result */
6797
6814
  completionResult?: () => Data
6815
+ /* Reasoning content from model responses */
6816
+ completionReasoningContent?: () => Data
6798
6817
  /* Full context (Prompt + Completion) */
6799
6818
  completionFullContext?: () => Data
6800
6819
  /* Inference result details */
@@ -6834,6 +6853,7 @@ export type GeneratorLLM = Generator &
6834
6853
  | 'completionLastFormattedPrompt'
6835
6854
  | 'completionLastToken'
6836
6855
  | 'completionResult'
6856
+ | 'completionReasoningContent'
6837
6857
  | 'completionFullContext'
6838
6858
  | 'completionResultDetails'
6839
6859
  value: any
@@ -6886,7 +6906,7 @@ Default property:
6886
6906
  "completionTopK": 40,
6887
6907
  "completionTopP": 0.9,
6888
6908
  "completionMinP": 0.05,
6889
- "useGuideToken": false,
6909
+ "useGuideToken": true,
6890
6910
  "contextSize": 8192,
6891
6911
  "batchSize": 8192,
6892
6912
  "microBatchSize": 512,
@@ -7163,6 +7183,28 @@ export type GeneratorQnnLlmActionAbortModelDownload = Action & {
7163
7183
  __actionName: 'GENERATOR_QNN_LLM_ABORT_MODEL_DOWNLOAD'
7164
7184
  }
7165
7185
 
7186
+ /* Pre-process the prompt, to prepare KV cache */
7187
+ export type GeneratorQnnLlmActionProcess = ActionWithParams & {
7188
+ __actionName: 'GENERATOR_QNN_LLM_PROCESS'
7189
+ params?: Array<
7190
+ | {
7191
+ input: 'prompt'
7192
+ value?: string | DataLink | EventProperty
7193
+ mapping?: string
7194
+ }
7195
+ | {
7196
+ input: 'messages'
7197
+ value?: Array<any> | DataLink | EventProperty
7198
+ mapping?: string
7199
+ }
7200
+ | {
7201
+ input: 'tools'
7202
+ value?: Array<any> | DataLink | EventProperty
7203
+ mapping?: string
7204
+ }
7205
+ >
7206
+ }
7207
+
7166
7208
  /* Generate text */
7167
7209
  export type GeneratorQnnLlmActionGenerate = ActionWithParams & {
7168
7210
  __actionName: 'GENERATOR_QNN_LLM_GENERATE'
@@ -7205,6 +7247,10 @@ Default property:
7205
7247
  "toolCallParser": "llama3_json",
7206
7248
  "toolChoice": "auto",
7207
7249
  "parallelToolCalls": false,
7250
+ "temperature": 0.8,
7251
+ "seed": 42,
7252
+ "topK": 40,
7253
+ "topP": 0.95,
7208
7254
  "greedy": false
7209
7255
  }
7210
7256
  */
@@ -7225,10 +7271,10 @@ Default property:
7225
7271
  /* SOC model */
7226
7272
  socModel?: 'X Elite' | 'X Plus' | '8 Elite' | '8 Gen 3' | 'QCS8550' | DataLink
7227
7273
  /* Custom model base URL
7228
- The URL directory should contain `config.json` (model config) file, `model_part_*_of_*.bin` (model split files) files and `tokenizer.json` (tokenizer config) file. */
7274
+ The model should be bundled, for details see https://github.com/mybigday/node-qnn-llm?tab=readme-ov-file#bundled-file */
7229
7275
  customModelUrl?: string | DataLink
7230
- /* Custom model split parts */
7231
- customModelSplitParts?: number | DataLink
7276
+ /* Custom model MD5 */
7277
+ customModelMd5?: string | DataLink
7232
7278
  /* Chat format */
7233
7279
  chatFormat?:
7234
7280
  | 'Llama 2'
@@ -7624,6 +7670,184 @@ export type GeneratorOpenAiTTS = Generator &
7624
7670
  >
7625
7671
  }
7626
7672
 
7673
+ /* Run text completion */
7674
+ export type GeneratorAnthropicLLMActionCompletion = ActionWithParams & {
7675
+ __actionName: 'GENERATOR_ANTHROPIC_LLM_COMPLETION'
7676
+ params?: Array<
7677
+ | {
7678
+ input: 'systemMessage'
7679
+ value?: string | DataLink | EventProperty
7680
+ mapping?: string
7681
+ }
7682
+ | {
7683
+ input: 'messages'
7684
+ value?: Array<any> | DataLink | EventProperty
7685
+ mapping?: string
7686
+ }
7687
+ | {
7688
+ input: 'maxTokens'
7689
+ value?: number | DataLink | EventProperty
7690
+ mapping?: string
7691
+ }
7692
+ | {
7693
+ input: 'temperature'
7694
+ value?: number | DataLink | EventProperty
7695
+ mapping?: string
7696
+ }
7697
+ | {
7698
+ input: 'topP'
7699
+ value?: number | DataLink | EventProperty
7700
+ mapping?: string
7701
+ }
7702
+ | {
7703
+ input: 'topK'
7704
+ value?: number | DataLink | EventProperty
7705
+ mapping?: string
7706
+ }
7707
+ | {
7708
+ input: 'stopSequences'
7709
+ value?: Array<any> | DataLink | EventProperty
7710
+ mapping?: string
7711
+ }
7712
+ | {
7713
+ input: 'tools'
7714
+ value?: Array<any> | DataLink | EventProperty
7715
+ mapping?: string
7716
+ }
7717
+ | {
7718
+ input: 'toolChoice'
7719
+ value?: {} | DataLink | EventProperty
7720
+ mapping?: string
7721
+ }
7722
+ >
7723
+ }
7724
+
7725
+ /* Stop text completion */
7726
+ export type GeneratorAnthropicLLMActionStopCompletion = Action & {
7727
+ __actionName: 'GENERATOR_ANTHROPIC_LLM_STOP_COMPLETION'
7728
+ }
7729
+
7730
+ interface GeneratorAnthropicLLMDef {
7731
+ /*
7732
+ Default property:
7733
+ {
7734
+ "apiEndpoint": "https://api.anthropic.com/v1/messages",
7735
+ "model": "claude-3-5-sonnet-20241022",
7736
+ "systemMessage": "You are a helpful assistant.",
7737
+ "completionMessages": [],
7738
+ "completionMaxTokens": 1024,
7739
+ "completionTemperature": 1,
7740
+ "completionTopP": 1,
7741
+ "completionStopSequences": []
7742
+ }
7743
+ */
7744
+ property?: {
7745
+ /* API endpoint URL */
7746
+ apiEndpoint?: string | DataLink
7747
+ /* API key */
7748
+ apiKey?: string | DataLink
7749
+ /* Model name (Default: claude-3-5-sonnet-20241022) */
7750
+ model?: string | DataLink
7751
+ /* System message content */
7752
+ systemMessage?: string | DataLink
7753
+ /* Chat messages (user/assistant only) */
7754
+ completionMessages?:
7755
+ | Array<
7756
+ | DataLink
7757
+ | {
7758
+ role?: 'user' | 'assistant' | DataLink
7759
+ content?:
7760
+ | string
7761
+ | DataLink
7762
+ | Array<
7763
+ | DataLink
7764
+ | {
7765
+ type?: 'text' | 'image' | DataLink
7766
+ text?: string | DataLink
7767
+ source?:
7768
+ | DataLink
7769
+ | {
7770
+ type?: string | DataLink
7771
+ media_type?: string | DataLink
7772
+ data?: string | DataLink
7773
+ }
7774
+ }
7775
+ >
7776
+ | DataLink
7777
+ | DataLink
7778
+ }
7779
+ >
7780
+ | DataLink
7781
+ /* Tools for function calling following Anthropic format
7782
+ Format: Array of objects with {name, description, input_schema} structure
7783
+ See: https://docs.anthropic.com/en/docs/tool-use */
7784
+ completionTools?: Array<{} | DataLink> | DataLink
7785
+ /* Tool choice for function calling */
7786
+ completionToolChoice?:
7787
+ | DataLink
7788
+ | {
7789
+ type?: 'auto' | 'any' | 'tool' | DataLink
7790
+ name?: string | DataLink
7791
+ }
7792
+ /* Maximum tokens to generate */
7793
+ completionMaxTokens?: number | DataLink
7794
+ /* Temperature */
7795
+ completionTemperature?: number | DataLink
7796
+ /* Top P sampling */
7797
+ completionTopP?: number | DataLink
7798
+ /* Top K sampling */
7799
+ completionTopK?: number | DataLink
7800
+ /* Stop sequences */
7801
+ completionStopSequences?: Array<string | DataLink> | DataLink
7802
+ }
7803
+ events?: {
7804
+ /* Error event */
7805
+ onError?: Array<EventAction>
7806
+ /* Completion event */
7807
+ onCompletion?: Array<EventAction>
7808
+ /* Completion finished event */
7809
+ onCompletionFinished?: Array<EventAction>
7810
+ /* Tool use event */
7811
+ onToolUse?: Array<EventAction>
7812
+ }
7813
+ outlets?: {
7814
+ /* Evaluating outlet */
7815
+ isEvaluating?: () => Data
7816
+ /* Completion result outlet */
7817
+ completionResult?: () => Data
7818
+ /* Completion details outlet */
7819
+ completionDetails?: () => Data
7820
+ }
7821
+ }
7822
+
7823
+ /* LLM inference using Anthropic-compatible API endpoints
7824
+
7825
+ ## Features
7826
+ - Compatible with Anthropic API format
7827
+ - Supports function calling (tools)
7828
+ - Streaming responses
7829
+ - Custom API endpoints (Default to https://api.anthropic.com/v1/messages) */
7830
+ export type GeneratorAnthropicLLM = Generator &
7831
+ GeneratorAnthropicLLMDef & {
7832
+ templateKey: 'GENERATOR_ANTHROPIC_LLM'
7833
+ switches: Array<
7834
+ SwitchDef &
7835
+ GeneratorAnthropicLLMDef & {
7836
+ conds?: Array<{
7837
+ method: '==' | '!=' | '>' | '<' | '>=' | '<='
7838
+ cond:
7839
+ | SwitchCondInnerStateCurrentCanvas
7840
+ | SwitchCondData
7841
+ | {
7842
+ __typename: 'SwitchCondInnerStateOutlet'
7843
+ outlet: 'isEvaluating' | 'completionResult' | 'completionDetails'
7844
+ value: any
7845
+ }
7846
+ }>
7847
+ }
7848
+ >
7849
+ }
7850
+
7627
7851
  /* Add a message to the assistant */
7628
7852
  export type GeneratorAssistantActionAddMessage = ActionWithParams & {
7629
7853
  __actionName: 'GENERATOR_ASSISTANT_ADD_MESSAGE'
@@ -8098,6 +8322,7 @@ Default property:
8098
8322
  | DataLink
8099
8323
  | {
8100
8324
  generatorId?: string | DataLink
8325
+ generatorKey?: string | DataLink
8101
8326
  name?: string | DataLink
8102
8327
  enabled?: boolean | DataLink
8103
8328
  }
@@ -812,12 +812,14 @@ export const templateEventPropsMap = {
812
812
  onCompletion: [
813
813
  'GENERATOR_LLM_COMPLETION_SESSION_KEY', // type: string
814
814
  'GENERATOR_LLM_COMPLETION_RESULT', // type: string
815
+ 'GENERATOR_LLM_COMPLETION_REASONING_CONTENT', // type: string
815
816
  'GENERATOR_LLM_COMPLETION_FULL_CONTEXT', // type: string
816
817
  'GENERATOR_LLM_COMPLETION_RESULT_DETAILS', // type: object
817
818
  ],
818
819
  onCompletionFinished: [
819
820
  'GENERATOR_LLM_COMPLETION_SESSION_KEY', // type: string
820
821
  'GENERATOR_LLM_COMPLETION_RESULT', // type: string
822
+ 'GENERATOR_LLM_COMPLETION_REASONING_CONTENT', // type: string
821
823
  'GENERATOR_LLM_COMPLETION_TOOL_CALLS', // type: array
822
824
  'GENERATOR_LLM_COMPLETION_FULL_CONTEXT', // type: string
823
825
  'GENERATOR_LLM_COMPLETION_IS_CONTEXT_FULL', // type: bool
@@ -884,6 +886,23 @@ export const templateEventPropsMap = {
884
886
  'GENERATOR_OPENAI_TTS_ERROR', // type: string
885
887
  ],
886
888
  },
889
+ GENERATOR_ANTHROPIC_LLM: {
890
+ onError: [
891
+ 'GENERATOR_ANTHROPIC_LLM_ERROR', // type: string
892
+ ],
893
+ onCompletion: [
894
+ 'GENERATOR_ANTHROPIC_LLM_COMPLETION_RESULT', // type: string
895
+ 'GENERATOR_ANTHROPIC_LLM_COMPLETION_DETAILS', // type: object
896
+ ],
897
+ onCompletionFinished: [
898
+ 'GENERATOR_ANTHROPIC_LLM_COMPLETION_RESULT', // type: string
899
+ 'GENERATOR_ANTHROPIC_LLM_TOOL_USES', // type: array
900
+ ],
901
+ onToolUse: [
902
+ 'GENERATOR_ANTHROPIC_LLM_TOOL_USE_NAME', // type: string
903
+ 'GENERATOR_ANTHROPIC_LLM_TOOL_USE_INPUT', // type: object
904
+ ],
905
+ },
887
906
  GENERATOR_ASSISTANT: {
888
907
  onError: [
889
908
  'GENERATOR_ASSISTANT_ERROR', // type: string