@fugood/bricks-project 2.24.0-beta.13 → 2.24.0-beta.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/package.json +6 -3
  2. package/tools/preview-main.mjs +1 -1
  3. package/tools/pull.ts +10 -10
  4. package/types/brick-base.ts +1 -1
  5. package/types/bricks/Slideshow.ts +2 -2
  6. package/types/generators/Assistant.ts +3 -3
  7. package/types/generators/BlePeripheral.ts +1 -1
  8. package/types/generators/DataBank.ts +1 -1
  9. package/types/generators/Http.ts +1 -1
  10. package/types/generators/HttpServer.ts +4 -4
  11. package/types/generators/Keyboard.ts +2 -2
  12. package/types/generators/LlmAnthropicCompat.ts +2 -2
  13. package/types/generators/LlmAppleBuiltin.ts +1 -1
  14. package/types/generators/LlmGgml.ts +11 -11
  15. package/types/generators/LlmOnnx.ts +4 -4
  16. package/types/generators/LlmOpenAiCompat.ts +2 -2
  17. package/types/generators/LlmQualcommAiEngine.ts +2 -2
  18. package/types/generators/McpServer.ts +3 -3
  19. package/types/generators/MediaFlow.ts +1 -1
  20. package/types/generators/Question.ts +1 -1
  21. package/types/generators/RerankerGgml.ts +3 -3
  22. package/types/generators/SpeechToTextGgml.ts +6 -6
  23. package/types/generators/SpeechToTextOnnx.ts +4 -4
  24. package/types/generators/SttAppleBuiltin.ts +1 -1
  25. package/types/generators/Tcp.ts +1 -1
  26. package/types/generators/TcpServer.ts +1 -1
  27. package/types/generators/TextToSpeechApple.ts +1 -1
  28. package/types/generators/TextToSpeechAppleBuiltin.ts +1 -1
  29. package/types/generators/TextToSpeechGgml.ts +4 -4
  30. package/types/generators/TextToSpeechOnnx.ts +5 -5
  31. package/types/generators/TextToSpeechOpenAiLike.ts +1 -1
  32. package/types/generators/ThermalPrinter.ts +1 -1
  33. package/types/generators/VadGgml.ts +4 -4
  34. package/types/generators/VadOnnx.ts +3 -3
  35. package/types/generators/VadTraditional.ts +1 -1
  36. package/types/generators/VectorStore.ts +3 -3
  37. package/types/generators/WebCrawler.ts +1 -1
  38. package/types/generators/WebRtc.ts +2 -2
  39. package/types/system.ts +1 -1
package/package.json CHANGED
@@ -1,13 +1,13 @@
1
1
  {
2
2
  "name": "@fugood/bricks-project",
3
- "version": "2.24.0-beta.13",
3
+ "version": "2.24.0-beta.15",
4
4
  "main": "index.ts",
5
5
  "scripts": {
6
6
  "typecheck": "tsc --noEmit",
7
7
  "build": "bun scripts/build.js"
8
8
  },
9
9
  "dependencies": {
10
- "@fugood/bricks-cli": "^2.24.0-beta.13",
10
+ "@fugood/bricks-cli": "^2.24.0-beta.15",
11
11
  "@huggingface/gguf": "^0.3.2",
12
12
  "@iarna/toml": "^3.0.0",
13
13
  "@modelcontextprotocol/sdk": "^1.15.0",
@@ -21,5 +21,8 @@
21
21
  "lodash": "^4.17.4",
22
22
  "uuid": "^8.3.1"
23
23
  },
24
- "gitHead": "6c7a2e18339ef4c9114560871c8d57fe81e42952"
24
+ "peerDependencies": {
25
+ "oxfmt": "^0.36.0"
26
+ },
27
+ "gitHead": "acd51daff8ae31ef803c59b974dfe55fa21af9ed"
25
28
  }
@@ -128,7 +128,7 @@ app.on('ready', () => {
128
128
 
129
129
  // Capture console messages from the preview
130
130
  if (testId) {
131
- mainWindow.webContents.on('console-message', (_, __, message) => {
131
+ mainWindow.webContents.on('console-message', (_, { message }) => {
132
132
  if (message.startsWith('[TEST_RESULT]')) {
133
133
  const data = JSON.parse(message.replace('[TEST_RESULT]', ''))
134
134
  console.log(`[TEST_RESULT_TOON]${TOON.encode(data.result)}`)
package/tools/pull.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { $ } from 'bun'
2
- import { format } from 'prettier'
2
+ import { format } from 'oxfmt'
3
3
 
4
4
  const cwd = process.cwd()
5
5
  const args = process.argv.slice(2)
@@ -74,7 +74,7 @@ if (isGitRepo && !force) {
74
74
  }
75
75
  }
76
76
 
77
- const prettierConfig = await Bun.file(`${cwd}/.prettierrc`)
77
+ const oxfmtConfig = await Bun.file(`${cwd}/.oxfmtrc.json`)
78
78
  .json()
79
79
  .catch(() => ({
80
80
  trailingComma: 'all',
@@ -85,14 +85,14 @@ const prettierConfig = await Bun.file(`${cwd}/.prettierrc`)
85
85
  }))
86
86
 
87
87
  await Promise.all(
88
- files.map(async (file: { name: string; input: string; formatable?: boolean }) =>
89
- Bun.write(
90
- `${cwd}/${file.name}`,
91
- file.formatable
92
- ? await format(file.input, { parser: 'typescript', ...prettierConfig })
93
- : file.input,
94
- ),
95
- ),
88
+ files.map(async (file: { name: string; input: string; formatable?: boolean }) => {
89
+ let content = file.input
90
+ if (file.formatable) {
91
+ const result = await format(file.name, file.input, oxfmtConfig)
92
+ content = result.code
93
+ }
94
+ return Bun.write(`${cwd}/${file.name}`, content)
95
+ }),
96
96
  )
97
97
 
98
98
  if (isGitRepo) {
@@ -59,7 +59,7 @@ export interface BrickBasicProperty {
59
59
  shadowOffsetWidth?: number | DataLink
60
60
  /* The brick shadow offset height */
61
61
  shadowOffsetHeight?: number | DataLink
62
- /* Brick pressable.
62
+ /* Brick pressable.
63
63
  Disabled: Disabled even if event or animation is set.
64
64
  Bypass: Disable and bypass the touch event on the brick. */
65
65
  pressable?: 'enabled' | 'disabled' | 'bypass' | DataLink
@@ -81,7 +81,7 @@ Default property:
81
81
  property?: BrickBasicProperty & {
82
82
  /* The time interval of show for each photo */
83
83
  countdown?: number | DataLink
84
- /* The slideshow media path list (File, URL)
84
+ /* The slideshow media path list (File, URL)
85
85
  Each path object can override global photo/video settings.
86
86
  Item-level properties take precedence over brick-level properties. */
87
87
  paths?:
@@ -102,7 +102,7 @@ Default property:
102
102
  }
103
103
  >
104
104
  | DataLink
105
- /* Multiple slideshow media path lists to combine (Array of path arrays).
105
+ /* Multiple slideshow media path lists to combine (Array of path arrays).
106
106
  All arrays are flattened and combined: [...paths, ...pathsList[0], ...pathsList[1], ...] */
107
107
  pathsList?: Array<Array<any> | DataLink | DataLink> | DataLink
108
108
  /* Loop the slideshow */
@@ -364,7 +364,7 @@ export type GeneratorAssistantActionInsertMcpResource = ActionWithParams & {
364
364
  >
365
365
  }
366
366
 
367
- /* Summarize messages based on the conversation
367
+ /* Summarize messages based on the conversation
368
368
 
369
369
  Note: Summary uses the same LLM context size, so it is recommended only to use it when the system prompt (in Initial Messages) is long, otherwise it may still fail when the context is full (Ctx Shift is NO). */
370
370
  export type GeneratorAssistantActionSummaryMessages = ActionWithParams & {
@@ -436,13 +436,13 @@ Default property:
436
436
  cacheMessages?: boolean | DataLink
437
437
  /* LLM Generator (Supports `LLM (GGML)` and `OpenAI LLM` generators) */
438
438
  llmGeneratorId?: string | DataLink | (() => Generator)
439
- /* LLM Live Policy. If the policy is `only-in-use`, the LLM context will be released when the assistant is not in use.
439
+ /* LLM Live Policy. If the policy is `only-in-use`, the LLM context will be released when the assistant is not in use.
440
440
 
441
441
  Note: LLM (Qualcomm AI Engine) recommend use `manual` and loaded constantly. */
442
442
  llmLivePolicy?: 'only-in-use' | 'manual' | DataLink
443
443
  /* LLM main session key */
444
444
  llmSessionKey?: string | DataLink
445
- /* Auto Summary Messages (Automatically summarize messages when the LLM context is full or content gets truncated, currently only supported with LLM (GGML) generators)
445
+ /* Auto Summary Messages (Automatically summarize messages when the LLM context is full or content gets truncated, currently only supported with LLM (GGML) generators)
446
446
 
447
447
  Note: Summary uses the same LLM context size, so it is recommended only to use it when the system prompt (in Initial Messages) is long, otherwise it may still fail when the context is full (Ctx Shift is NO). */
448
448
  llmAutoSummaryMessages?: boolean | DataLink
@@ -149,7 +149,7 @@ Default property:
149
149
  beaconsEnable?: boolean | DataLink
150
150
  /* Measured RSSI @ 1m, used for calculate distance */
151
151
  beaconsTxPower?: number | DataLink
152
- /* Beacon configs
152
+ /* Beacon configs
153
153
  presets has pre-defined layout and manufacturerId
154
154
  layout string is like AltBeacon's Beacon Layout, but replace "x" as extra data
155
155
  id and data could be 0x prefixed hex, hex string, decimal or UUID
@@ -71,7 +71,7 @@ Default property:
71
71
  spacekey?: string | DataLink
72
72
  /* Data need to be fetched */
73
73
  properties?: Array<string | DataLink> | DataLink
74
- /* Determine the cache behavior
74
+ /* Determine the cache behavior
75
75
 
76
76
  cache-first: Cache first if existing, then fetch from network
77
77
  network-and-cache: Fetch from network, then save cache
@@ -141,7 +141,7 @@ Default property:
141
141
  redirect?: 'manual' | 'follow' | 'error' | DataLink
142
142
  /* Referrer of HTTP request */
143
143
  referrer?: 'no-referrer' | 'client' | DataLink
144
- /* HTTP request body, it will transform depends on `headers.Content-Type` (`application/json`, `application/x-www-form-urlencoded` or `multipart/form-data`)
144
+ /* HTTP request body, it will transform depends on `headers.Content-Type` (`application/json`, `application/x-www-form-urlencoded` or `multipart/form-data`)
145
145
  The multipart schema like `{ file: { uri: File, type: File MIME, name: File Name }, field: "value" }` */
146
146
  body?: {} | DataLink
147
147
  /* HTTP response type */
@@ -70,7 +70,7 @@ Default property:
70
70
  idleTimeout?: number | DataLink
71
71
  /* HTTP request body limit, 0 is unlimited */
72
72
  bodyLimit?: number | DataLink
73
- /* CORS allowed origins
73
+ /* CORS allowed origins
74
74
  You can use wildcard like `*`, `*.example.com`, `http://*.example.com` */
75
75
  corsOrigins?: Array<string | DataLink> | DataLink
76
76
  /* Authorization type of HTTP request */
@@ -81,15 +81,15 @@ Default property:
81
81
  basicAuthPassword?: string | DataLink
82
82
  /* Token of bearer auth */
83
83
  bearerToken?: string | DataLink
84
- /* Asynchronous response mode
84
+ /* Asynchronous response mode
85
85
  Will block connection until Response Body update. */
86
86
  asyncMode?: boolean | DataLink
87
- /* Save request body as file
87
+ /* Save request body as file
88
88
  Only work on `application/octet-stream` or `multipart/form-data`. */
89
89
  saveBodyAsFile?: boolean | DataLink
90
90
  /* Response status code */
91
91
  resStatusCode?: number | DataLink
92
- /* Response Content-Type
92
+ /* Response Content-Type
93
93
  `text/*` will not convert body.
94
94
  `application/xml` convert object to XML, [example object struct](https://github.com/davidcalhoun/jstoxml#example-10-podcast-rss-feed)
95
95
  `application/octet-stream` body should be Base64 string or file path. */
@@ -27,13 +27,13 @@ Default property:
27
27
  property?: {
28
28
  /* Enable listening for input */
29
29
  enabled?: boolean | DataLink
30
- /* Key map to transform key or key code to the designated content
30
+ /* Key map to transform key or key code to the designated content
31
31
  Example: { 37: 'left', 38: 'up', 39: 'right', 40: 'down', 'Enter': 'confirm' }
32
32
  Supports both key codes (numbers) and key names (strings) as keys */
33
33
  keyMap?: {} | DataLink
34
34
  /* Key outlet preference use key code or key. */
35
35
  keyOutletPrefer?: 'auto' | 'key-code' | 'key' | DataLink
36
- /* Key or code to finish batch input
36
+ /* Key or code to finish batch input
37
37
  Common values: 13 (Enter), 27 (Escape), 'Enter', 'Escape' */
38
38
  batchStopKeys?: Array<string | DataLink | number | DataLink | DataLink> | DataLink
39
39
  /* Debounce time (ms) to finish batch input */
@@ -119,7 +119,7 @@ Default property:
119
119
  }
120
120
  >
121
121
  | DataLink
122
- /* Tools for function calling following Anthropic format
122
+ /* Tools for function calling following Anthropic format
123
123
  Format: Array of objects with {name, description, input_schema} structure
124
124
  See: https://docs.anthropic.com/en/docs/tool-use */
125
125
  completionTools?: Array<{} | DataLink> | DataLink
@@ -161,7 +161,7 @@ Default property:
161
161
  }
162
162
  }
163
163
 
164
- /* LLM inference using Anthropic-compatible API endpoints
164
+ /* LLM inference using Anthropic-compatible API endpoints
165
165
 
166
166
  ## Features
167
167
  - Compatible with Anthropic API format
@@ -110,7 +110,7 @@ Default property:
110
110
  }
111
111
  }
112
112
 
113
- /* LLM inference using Apple Intelligence on iOS/tvOS 26+
113
+ /* LLM inference using Apple Intelligence on iOS/tvOS 26+
114
114
 
115
115
  ## Features
116
116
  - Native Apple Intelligence integration
@@ -459,10 +459,10 @@ Default property:
459
459
  }
460
460
  */
461
461
  property?: {
462
- /* Initialize the Llama context on generator initialization
462
+ /* Initialize the Llama context on generator initialization
463
463
  Please note that it will take some RAM depending on the model size */
464
464
  init?: boolean | DataLink
465
- /* The URL or path of model
465
+ /* The URL or path of model
466
466
  We used GGUF format model, please refer to https://github.com/ggerganov/llama.cpp/tree/master#description */
467
467
  modelUrl?: string | DataLink
468
468
  /* Hash type of model */
@@ -477,10 +477,10 @@ Default property:
477
477
  mmprojHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
478
478
  /* Hash of mmproj file (PREVIEW FEATURE) */
479
479
  mmprojHash?: string | DataLink
480
- /* Minimum tokens for image encoding in multimodal (PREVIEW FEATURE)
480
+ /* Minimum tokens for image encoding in multimodal (PREVIEW FEATURE)
481
481
  Useful for dynamic resolution models (e.g. Qwen-VL). Default: -1 (auto) */
482
482
  imageMinTokens?: number | DataLink
483
- /* Maximum tokens for image encoding in multimodal (PREVIEW FEATURE)
483
+ /* Maximum tokens for image encoding in multimodal (PREVIEW FEATURE)
484
484
  Limit tokens for dynamic resolution models to balance speed vs. detail. Default: -1 (auto) */
485
485
  imageMaxTokens?: number | DataLink
486
486
  /* Chat Template (Jinja format) to override the default template from model */
@@ -493,13 +493,13 @@ Default property:
493
493
  uBatchSize?: number | DataLink
494
494
  /* Number of threads */
495
495
  maxThreads?: number | DataLink
496
- /* Accelerator variant (Only for desktop)
496
+ /* Accelerator variant (Only for desktop)
497
497
  `default` - CPU / Metal (macOS)
498
498
  `vulkan` - Use Vulkan
499
499
  `cuda` - Use CUDA
500
500
  `snapdragon` - Use OpenCL/Hexagon of Qualcomm Snapdragon */
501
501
  accelVariant?: 'default' | 'vulkan' | 'cuda' | 'snapdragon' | DataLink
502
- /* Devices. For example:
502
+ /* Devices. For example:
503
503
 
504
504
  Metal or CPU for iOS/tvOS/MacOS
505
505
  OpenCL or CPU for Android
@@ -534,7 +534,7 @@ Default property:
534
534
  transformScriptCode?: string | DataLink
535
535
  /* Variables used in Transform Script (object) */
536
536
  transformScriptVariables?: {} | DataLink
537
- /* Session save mode
537
+ /* Session save mode
538
538
  `none` - No session saving
539
539
  `prompt` - Save session when prompt processed
540
540
  `completion` - Save session when completion finished
@@ -546,7 +546,7 @@ Default property:
546
546
  sessionRemain?: number | DataLink
547
547
  /* TODO:loran_gqarms_norm_epsrope_freq_baserope_freq_scale */
548
548
  completionMode?: 'auto' | 'chat' | 'text' | DataLink
549
- /* Tools for chat mode using OpenAI-compatible function calling format
549
+ /* Tools for chat mode using OpenAI-compatible function calling format
550
550
  Format: Array of objects with {type, function: {name, description, parameters}} structure
551
551
  See: https://platform.openai.com/docs/guides/function-calling */
552
552
  completionTools?: Array<{} | DataLink> | DataLink
@@ -566,7 +566,7 @@ Default property:
566
566
  | DataLink
567
567
  /* Prompt (text mode) */
568
568
  completionPrompt?: string | DataLink
569
- /* Media paths to be used in the prompt template (PREVIEW FEATURE)
569
+ /* Media paths to be used in the prompt template (PREVIEW FEATURE)
570
570
  In prompt, use `<__media__>` for position of media content */
571
571
  completionPromptMediaPaths?: Array<string | DataLink> | DataLink
572
572
  /* Data to be used in the prompt template (e.g. `Hello ${name}`). Supports nested data, such as `Hello ${user.name}`. */
@@ -594,7 +594,7 @@ Default property:
594
594
  completionNow?: string | DataLink
595
595
  /* Additional keyword arguments for chat template (object) */
596
596
  completionChatTemplateKwargs?: {} | DataLink
597
- /* Use reasoning format for enhanced response structure
597
+ /* Use reasoning format for enhanced response structure
598
598
  `auto` - Auto-determine the reasoning format of the model
599
599
  `none` - Disable reasoning format */
600
600
  completionUseReasoningFormat?: 'auto' | 'none' | DataLink
@@ -706,7 +706,7 @@ Default property:
706
706
  }
707
707
  }
708
708
 
709
- /* Local Large Language Model (LLM) inference based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
709
+ /* Local Large Language Model (LLM) inference based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
710
710
 
711
711
  ## Notice
712
712
  - The device RAM must be larger than 8GB
@@ -110,7 +110,7 @@ Default property:
110
110
  audios?: Array<string | DataLink> | DataLink
111
111
  /* Tool call parser */
112
112
  toolCallParser?: 'llama3_json' | 'mistral' | 'hermes' | 'internlm' | 'phi4' | DataLink
113
- /* Tools for chat mode using OpenAI-compatible function calling format
113
+ /* Tools for chat mode using OpenAI-compatible function calling format
114
114
  Format: Array of objects with {type, function: {name, description, parameters}} structure
115
115
  See: https://platform.openai.com/docs/guides/function-calling */
116
116
  tools?: Array<{} | DataLink> | DataLink
@@ -132,12 +132,12 @@ Default property:
132
132
  numBeams?: number | DataLink
133
133
  /* Do sampling */
134
134
  doSample?: boolean | DataLink
135
- /* Executor candidates, descending order of priority
135
+ /* Executor candidates, descending order of priority
136
136
  Default will be xnnpack, wasm, cpu */
137
137
  executors?:
138
138
  | Array<'qnn' | 'dml' | 'nnapi' | 'xnnpack' | 'coreml' | 'cpu' | 'wasm' | 'webgpu' | DataLink>
139
139
  | DataLink
140
- /* Execution mode
140
+ /* Execution mode
141
141
  Usually when the model has many branches, setting this option to `parallel` will give you better performance. */
142
142
  executionMode?: 'sequential' | 'parallel' | DataLink
143
143
  /* QNN backend */
@@ -169,7 +169,7 @@ Default property:
169
169
  }
170
170
  }
171
171
 
172
- /* Local LLM inference based on [transformers.js](https://huggingface.co/docs/transformers.js)
172
+ /* Local LLM inference based on [transformers.js](https://huggingface.co/docs/transformers.js)
173
173
  You can use any converted model on HuggingFace. */
174
174
  export type GeneratorOnnxLLM = Generator &
175
175
  GeneratorOnnxLLMDef & {
@@ -122,7 +122,7 @@ Default property:
122
122
  }
123
123
  >
124
124
  | DataLink
125
- /* Tools for chat mode following OpenAI function calling format
125
+ /* Tools for chat mode following OpenAI function calling format
126
126
  Format: Array of objects with {type, function: {name, description, parameters}} structure
127
127
  See: https://platform.openai.com/docs/guides/function-calling */
128
128
  completionTools?: Array<{} | DataLink> | DataLink
@@ -175,7 +175,7 @@ Default property:
175
175
  }
176
176
  }
177
177
 
178
- /* LLM inference using OpenAI-compatible API endpoints
178
+ /* LLM inference using OpenAI-compatible API endpoints
179
179
 
180
180
  ## Features
181
181
  - Compatible with OpenAI API format
@@ -108,7 +108,7 @@ Default property:
108
108
  | DataLink
109
109
  /* SOC model */
110
110
  socModel?: 'X Elite' | 'X Plus' | '8 Elite' | '8 Gen 3' | 'QCS8550' | DataLink
111
- /* Custom model base URL
111
+ /* Custom model base URL
112
112
  The model should be bundled, for details see https://github.com/mybigday/node-qnn-llm?tab=readme-ov-file#bundled-file */
113
113
  customModelUrl?: string | DataLink
114
114
  /* Custom model MD5 */
@@ -142,7 +142,7 @@ Default property:
142
142
  stopWords?: Array<string | DataLink> | DataLink
143
143
  /* Tool call parser */
144
144
  toolCallParser?: 'llama3_json' | 'mistral' | 'hermes' | 'internlm' | 'phi4' | DataLink
145
- /* Tools for chat mode using OpenAI-compatible function calling format
145
+ /* Tools for chat mode using OpenAI-compatible function calling format
146
146
  Format: Array of objects with {type, function: {name, description, parameters}} structure
147
147
  See: https://platform.openai.com/docs/guides/function-calling */
148
148
  tools?: Array<{} | DataLink> | DataLink
@@ -47,7 +47,7 @@ Default property:
47
47
  name?: string | DataLink
48
48
  /* Version of the MCP server */
49
49
  version?: string | DataLink
50
- /* Resources
50
+ /* Resources
51
51
  Type:
52
52
  `static`: Return static data
53
53
  `detect-data-change`: Watch data target change to return data,
@@ -93,7 +93,7 @@ Default property:
93
93
  }
94
94
  >
95
95
  | DataLink
96
- /* Tools
96
+ /* Tools
97
97
  Type:
98
98
  `static`: Return static data
99
99
  `detect-data-change`: Watch data target change to return data,
@@ -139,7 +139,7 @@ Default property:
139
139
  }
140
140
  >
141
141
  | DataLink
142
- /* Prompts
142
+ /* Prompts
143
143
  Type:
144
144
  `static`: Return static data
145
145
  `detect-data-change`: Watch data target change to return data,
@@ -54,7 +54,7 @@ Default property:
54
54
  passcode?: string | DataLink
55
55
  /* Include File Types */
56
56
  includeTypes?: Array<'IMAGE' | 'VIDEO' | 'AUDIO' | 'FILE' | DataLink> | DataLink
57
- /* Determine the cache behavior
57
+ /* Determine the cache behavior
58
58
 
59
59
  cache-first: Cache first if existing, then fetch from network
60
60
  network-and-cache: Fetch from network, then save cache
@@ -60,7 +60,7 @@ Default property:
60
60
  property?: {
61
61
  /* Modal mode */
62
62
  modalMode?: 'root' | 'in-subspace' | DataLink
63
- /* Inquirer schema
63
+ /* Inquirer schema
64
64
  `key`: Field key (unique, required)
65
65
  `kind`: Field type (required)
66
66
  `title`: Field or message title
@@ -69,13 +69,13 @@ Default property:
69
69
  batchSize?: number | DataLink
70
70
  /* Physical maximum batch size (default: 512) */
71
71
  uBatchSize?: number | DataLink
72
- /* GGML accelerator variant (Only for desktop)
72
+ /* GGML accelerator variant (Only for desktop)
73
73
  `default` - CPU / Metal (macOS)
74
74
  `vulkan` - Use Vulkan
75
75
  `cuda` - Use CUDA
76
76
  `snapdragon` - Use OpenCL/Hexagon of Qualcomm Snapdragon */
77
77
  accelVariant?: 'default' | 'vulkan' | 'cuda' | 'snapdragon' | DataLink
78
- /* Devices. For example:
78
+ /* Devices. For example:
79
79
 
80
80
  Metal or CPU for iOS/tvOS/MacOS
81
81
  OpenCL or CPU for Android
@@ -121,7 +121,7 @@ Default property:
121
121
  }
122
122
  }
123
123
 
124
- /* Local rerank based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
124
+ /* Local rerank based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
125
125
 
126
126
  ## Notice
127
127
  - The device RAM must be larger than 8GB
@@ -109,15 +109,15 @@ Default property:
109
109
  }
110
110
  */
111
111
  property?: {
112
- /* Initialize the Whisper context on generator initialization
112
+ /* Initialize the Whisper context on generator initialization
113
113
  Please note that it will take some RAM depending on the model size */
114
114
  init?: boolean | DataLink
115
- /* Accelerator variant (Only for desktop)
115
+ /* Accelerator variant (Only for desktop)
116
116
  `default` - CPU / Metal (macOS)
117
117
  `vulkan` - Use Vulkan
118
118
  `cuda` - Use CUDA */
119
119
  accelVariant?: 'default' | 'vulkan' | 'cuda' | DataLink
120
- /* Use model name, the model download progress will be done in preload stage or the generator initialization stage.
120
+ /* Use model name, the model download progress will be done in preload stage or the generator initialization stage.
121
121
  We used `ggml` format model, please refer to https://huggingface.co/BricksDisplay/whisper-ggml
122
122
  You can also choose `custom` option and set `Model URL` and `Model MD5` to use your own model */
123
123
  modelName?:
@@ -168,7 +168,7 @@ Default property:
168
168
  | 'distil-large-v3-q5_0'
169
169
  | 'distil-large-v3-q8_0'
170
170
  | DataLink
171
- /* The URL or path of model
171
+ /* The URL or path of model
172
172
  We used `ggml` format model, please refer to https://github.com/ggerganov/whisper.cpp/tree/master/models */
173
173
  modelUrl?: string | DataLink
174
174
  /* Hash type of model */
@@ -302,7 +302,7 @@ Default property:
302
302
  inferOffset?: number | DataLink
303
303
  /* Audio duration of audio to process in milliseconds */
304
304
  inferDuration?: number | DataLink
305
- /* The file URL or path to be inferred.
305
+ /* The file URL or path to be inferred.
306
306
  It only supported `wav` format with 16kHz sample rate & single (mono) channel */
307
307
  inferFileUrl?: string | DataLink
308
308
  /* MD5 of file to be inferred */
@@ -345,7 +345,7 @@ Default property:
345
345
  }
346
346
  }
347
347
 
348
- /* Local Speech-to-Text (STT) inference based on GGML and [whisper.cpp](https://github.com/ggerganov/whisper.cpp)
348
+ /* Local Speech-to-Text (STT) inference based on GGML and [whisper.cpp](https://github.com/ggerganov/whisper.cpp)
349
349
 
350
350
  ## Notice
351
351
  - iOS: Supported GPU acceleration, recommended use M1+ / A17+ chip device
@@ -67,7 +67,7 @@ Default property:
67
67
  | DataLink
68
68
  /* Return timestamps */
69
69
  returnTimestamps?: 'none' | 'enable' | 'word' | DataLink
70
- /* Transcription language
70
+ /* Transcription language
71
71
  Not specifying the language will auto detect the language. */
72
72
  language?:
73
73
  | 'English'
@@ -174,12 +174,12 @@ Default property:
174
174
  task?: 'transcribe' | 'translate' | DataLink
175
175
  /* Inferencing chunk length */
176
176
  chunkLength?: number | DataLink
177
- /* Executor candidates, descending order of priority
177
+ /* Executor candidates, descending order of priority
178
178
  Default will be xnnpack, wasm, cpu */
179
179
  executors?:
180
180
  | Array<'qnn' | 'dml' | 'nnapi' | 'xnnpack' | 'coreml' | 'cpu' | 'wasm' | 'webgpu' | DataLink>
181
181
  | DataLink
182
- /* Execution mode
182
+ /* Execution mode
183
183
  Usually when the model has many branches, setting this option to `parallel` will give you better performance. */
184
184
  executionMode?: 'sequential' | 'parallel' | DataLink
185
185
  /* QNN backend */
@@ -205,7 +205,7 @@ Default property:
205
205
  }
206
206
  }
207
207
 
208
- /* Local STT inference based on [transformers.js](https://huggingface.co/docs/transformers.js)
208
+ /* Local STT inference based on [transformers.js](https://huggingface.co/docs/transformers.js)
209
209
  You can use any converted model on HuggingFace. */
210
210
  export type GeneratorOnnxSTT = Generator &
211
211
  GeneratorOnnxSTTDef & {
@@ -87,7 +87,7 @@ Default property:
87
87
  }
88
88
  }
89
89
 
90
- /* Speech recognition on iOS 26+ (Not supported on tvOS)
90
+ /* Speech recognition on iOS 26+ (Not supported on tvOS)
91
91
 
92
92
  ## Features
93
93
  - Native Apple speech recognition
@@ -57,7 +57,7 @@ Default property:
57
57
  host?: string | DataLink
58
58
  /* Connection target port */
59
59
  port?: number | DataLink
60
- /* Data mode
60
+ /* Data mode
61
61
  CRLF: Receive until CRLF
62
62
  LF: Receive until LF
63
63
  raw: Raw packet */
@@ -72,7 +72,7 @@ Default property:
72
72
  init?: boolean | DataLink
73
73
  /* Bind port of TCP server */
74
74
  port?: number | DataLink
75
- /* Data mode
75
+ /* Data mode
76
76
  CRLF: Receive until CRLF
77
77
  LF: Receive until LF
78
78
  raw: Raw packet */
@@ -83,7 +83,7 @@ Default property:
83
83
  }
84
84
  }
85
85
 
86
- /* Text-to-speech synthesis using Apple's native speech synthesis
86
+ /* Text-to-speech synthesis using Apple's native speech synthesis
87
87
 
88
88
  ## Features
89
89
  - Native Apple speech synthesis
@@ -84,7 +84,7 @@ Default property:
84
84
  }
85
85
  }
86
86
 
87
- /* Text-to-speech synthesis using Apple's native speech synthesis
87
+ /* Text-to-speech synthesis using Apple's native speech synthesis
88
88
 
89
89
  ## Features
90
90
  - Native Apple speech synthesis
@@ -75,7 +75,7 @@ Default property:
75
75
  property?: {
76
76
  /* Initialize the TTS context on generator initialization */
77
77
  init?: boolean | DataLink
78
- /* The URL or path of model
78
+ /* The URL or path of model
79
79
  We used GGUF format model, please refer to https://github.com/ggerganov/llama.cpp/tree/master#description */
80
80
  modelUrl?: string | DataLink
81
81
  /* Hash type of model */
@@ -128,13 +128,13 @@ Default property:
128
128
  microBatchSize?: number | DataLink
129
129
  /* Number of threads */
130
130
  maxThreads?: number | DataLink
131
- /* Accelerator variant (Only for desktop)
131
+ /* Accelerator variant (Only for desktop)
132
132
  `default` - CPU / Metal (macOS)
133
133
  `vulkan` - Use Vulkan
134
134
  `cuda` - Use CUDA
135
135
  `snapdragon` - Use OpenCL/Hexagon of Qualcomm Snapdragon */
136
136
  accelVariant?: 'default' | 'vulkan' | 'cuda' | 'snapdragon' | DataLink
137
- /* Devices. For example:
137
+ /* Devices. For example:
138
138
 
139
139
  Metal or CPU for iOS/tvOS/MacOS
140
140
  OpenCL or CPU for Android
@@ -168,7 +168,7 @@ Default property:
168
168
  }
169
169
  }
170
170
 
171
- /* Local Text-to-Speech (TTS) inference based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
171
+ /* Local Text-to-Speech (TTS) inference based on GGML and [llama.cpp](https://github.com/ggerganov/llama.cpp)
172
172
  You can use any converted model on HuggingFace.
173
173
 
174
174
  ## Notice
@@ -64,7 +64,7 @@ Default property:
64
64
  property?: {
65
65
  /* Initialize the TTS context on generator initialization */
66
66
  init?: boolean | DataLink
67
- /* TTS model
67
+ /* TTS model
68
68
  The mms-tts models are licensed under CC-BY-NC-4.0 */
69
69
  model?: string | DataLink
70
70
  /* Model type */
@@ -83,7 +83,7 @@ Default property:
83
83
  | DataLink
84
84
  /* Vocoder model for SpeechT5 */
85
85
  vocoderModel?: 'Custom' | 'speecht5_hifigan' | DataLink
86
- /* Custom vocoder model
86
+ /* Custom vocoder model
87
87
  Choose model from https://huggingface.co/models?library=transformers.js&other=hifigan */
88
88
  customVocoderModel?: string | DataLink
89
89
  /* Speaker embedding, for SpeechT5 or StyleTTS (Kokoro) */
@@ -116,12 +116,12 @@ Default property:
116
116
  softBreakRegex?: string | DataLink
117
117
  /* Time to force inference when softBreakRegex is not satisfied */
118
118
  hardBreakTime?: number | DataLink
119
- /* Executor candidates, descending order of priority
119
+ /* Executor candidates, descending order of priority
120
120
  Default will be xnnpack, wasm, cpu */
121
121
  executors?:
122
122
  | Array<'qnn' | 'dml' | 'nnapi' | 'xnnpack' | 'coreml' | 'cpu' | 'wasm' | 'webgpu' | DataLink>
123
123
  | DataLink
124
- /* Execution mode
124
+ /* Execution mode
125
125
  Usually when the model has many branches, setting this option to `parallel` will give you better performance. */
126
126
  executionMode?: 'sequential' | 'parallel' | DataLink
127
127
  /* QNN backend */
@@ -147,7 +147,7 @@ Default property:
147
147
  }
148
148
  }
149
149
 
150
- /* Local Text-to-Speech (TTS) inference based on ONNX Runtime and [transformers.js](https://huggingface.co/docs/transformers.js)
150
+ /* Local Text-to-Speech (TTS) inference based on ONNX Runtime and [transformers.js](https://huggingface.co/docs/transformers.js)
151
151
  You can use any converted model on HuggingFace. */
152
152
  export type GeneratorTTS = Generator &
153
153
  GeneratorTTSDef & {
@@ -54,7 +54,7 @@ Default property:
54
54
  apiKey?: string | DataLink
55
55
  /* OpenAI TTS model */
56
56
  model?: string | DataLink
57
- /* Voice to use
57
+ /* Voice to use
58
58
  Select voice from https://openai.fm , default alloy */
59
59
  voice?: string | DataLink
60
60
  /* Additional instructions for the speech generation */
@@ -85,7 +85,7 @@ Default property:
85
85
  pageHeight?: number | DataLink
86
86
  /* Cut mode at the end of printing */
87
87
  cutMode?: 'NONE' | 'FULL' | 'PARTIAL' | DataLink
88
- /* Print payload
88
+ /* Print payload
89
89
  example:
90
90
  { type: 'text', content: 'Hello, World!' }
91
91
  { type: 'feed', feedLines: 1 }
@@ -127,14 +127,14 @@ Default property:
127
127
  }
128
128
  */
129
129
  property?: {
130
- /* Initialize the VAD context on generator initialization
130
+ /* Initialize the VAD context on generator initialization
131
131
  Please note that it will take some RAM depending on the model size */
132
132
  init?: boolean | DataLink
133
- /* Use model name, currently only supports the Silero VAD model.
133
+ /* Use model name, currently only supports the Silero VAD model.
134
134
  The model download progress will be done in preload stage or the generator initialization stage.
135
135
  You can also choose `custom` option and set `Model URL` and `Model SHA1` to use your own model */
136
136
  modelName?: 'custom' | 'silero-v6.2.0' | 'silero-v5.1.2' | DataLink
137
- /* The URL or path of model
137
+ /* The URL or path of model
138
138
  We used `ggml` format model, please refer to https://huggingface.co/ggml-org/whisper-vad */
139
139
  modelUrl?: string | DataLink
140
140
  /* Hash type of model */
@@ -157,7 +157,7 @@ Default property:
157
157
  detectSpeechPadMs?: number | DataLink
158
158
  /* Overlap between analysis windows (0.0-1.0) */
159
159
  detectSamplesOverlap?: number | DataLink
160
- /* The file URL or path to be analyzed.
160
+ /* The file URL or path to be analyzed.
161
161
  It only supported `wav` format with 16kHz sample rate & single (mono) channel */
162
162
  detectFileUrl?: string | DataLink
163
163
  /* MD5 of file to be analyzed */
@@ -142,12 +142,12 @@ Default property:
142
142
  detectMaxSpeechDurationS?: number | DataLink
143
143
  /* Padding around speech segments in milliseconds */
144
144
  detectSpeechPadMs?: number | DataLink
145
- /* Executor candidates, descending order of priority
145
+ /* Executor candidates, descending order of priority
146
146
  Default will be xnnpack, wasm, cpu */
147
147
  executors?:
148
148
  | Array<'qnn' | 'dml' | 'nnapi' | 'xnnpack' | 'coreml' | 'cpu' | 'wasm' | 'webgpu' | DataLink>
149
149
  | DataLink
150
- /* Execution mode
150
+ /* Execution mode
151
151
  Usually when the model has many branches, setting this option to `parallel` will give you better performance. */
152
152
  executionMode?: 'sequential' | 'parallel' | DataLink
153
153
  /* QNN backend */
@@ -177,7 +177,7 @@ Default property:
177
177
  }
178
178
  }
179
179
 
180
- /* Local Voice Activity Detection (VAD) inference based on [transformers.js](https://huggingface.co/docs/transformers.js)
180
+ /* Local Voice Activity Detection (VAD) inference based on [transformers.js](https://huggingface.co/docs/transformers.js)
181
181
  You can use any compatible VAD model from HuggingFace (Silero VAD, smart-turn, etc.) */
182
182
  export type GeneratorVadInferenceOnnx = Generator &
183
183
  GeneratorVadInferenceOnnxDef & {
@@ -92,7 +92,7 @@ Default property:
92
92
  }
93
93
  }
94
94
 
95
- /* Traditional Voice Activity Detection (VAD) using pitch detection and RMS volume analysis
95
+ /* Traditional Voice Activity Detection (VAD) using pitch detection and RMS volume analysis
96
96
  No model download required - pure algorithmic approach */
97
97
  export type GeneratorVadInferenceTraditional = Generator &
98
98
  GeneratorVadInferenceTraditionalDef & {
@@ -31,7 +31,7 @@ export type GeneratorVectorStoreActionReset = ActionWithParams & {
31
31
  }>
32
32
  }
33
33
 
34
- /* Insert file content with path or url, support Office / OpenOffice / PDF
34
+ /* Insert file content with path or url, support Office / OpenOffice / PDF
35
35
 
36
36
  PDF is not currently supprted in iOS / Android */
37
37
  export type GeneratorVectorStoreActionInsertFile = ActionWithParams & {
@@ -161,13 +161,13 @@ Default property:
161
161
  ggmlContextSize?: number | DataLink
162
162
  /* Pooling type of ggml model */
163
163
  ggmlPoolingType?: 'none' | 'mean' | 'cls' | 'last' | 'rank' | DataLink
164
- /* GGML accelerator variant (Only for desktop)
164
+ /* GGML accelerator variant (Only for desktop)
165
165
  `default` - CPU / Metal (macOS)
166
166
  `vulkan` - Use Vulkan
167
167
  `cuda` - Use CUDA
168
168
  `snapdragon` - Use OpenCL/Hexagon of Qualcomm Snapdragon */
169
169
  ggmlAccelVariant?: 'default' | 'vulkan' | 'cuda' | 'snapdragon' | DataLink
170
- /* Devices. For example:
170
+ /* Devices. For example:
171
171
 
172
172
  Metal or CPU for iOS/tvOS/MacOS
173
173
  OpenCL or CPU for Android
@@ -31,7 +31,7 @@ Default property:
31
31
  init?: boolean | DataLink
32
32
  /* URL of crawler request */
33
33
  url?: string | DataLink
34
- /* Method of crawler request
34
+ /* Method of crawler request
35
35
 
36
36
  Platform not supported for `webview`: tvOS, Desktop, Web */
37
37
  method?: 'webview' | 'http' | DataLink
@@ -63,7 +63,7 @@ Default property:
63
63
  localVideoTarget?: string | DataLink | (() => Brick)
64
64
  /* Target remote WebRTC stream brick ID */
65
65
  remoteVideoTarget?: string | DataLink | (() => Brick)
66
- /* ICE Server list
66
+ /* ICE Server list
67
67
  Default use Google public STUN servers if not setted. */
68
68
  iceServers?:
69
69
  | Array<
@@ -89,7 +89,7 @@ Default property:
89
89
  videoWidth?: number | DataLink
90
90
  /* Label of data channel */
91
91
  dataChannelLabel?: string | DataLink
92
- /* Input signal for create peer connection
92
+ /* Input signal for create peer connection
93
93
  Signal Types:
94
94
  `initiate`: Initiate a WebRTC call
95
95
  `offer`: WebRTC SDP offer
package/types/system.ts CHANGED
@@ -276,7 +276,7 @@ export type SystemActionPopupReset = ActionWithParams & {
276
276
  >
277
277
  }
278
278
 
279
- /* Take screenshot for current subspace */
279
+ /* Take screenshot for current subspace or the full viewport (including portal subspaces) */
280
280
  export type SystemActionTakeScreenshot = ActionWithParams & {
281
281
  __actionName: 'TAKE_SCREENSHOT'
282
282
  params?: Array<