@fugood/bricks-project 2.22.0-beta.7 → 2.22.0-beta.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -743,6 +743,10 @@ export const templateActionNameMap = {
743
743
  mcpVariables: 'GENERATOR_ASSISTANT_MCP_VARIABLES',
744
744
  role: 'GENERATOR_ASSISTANT_ROLE',
745
745
  },
746
+ GENERATOR_ASSISTANT_SUMMARY_MESSAGES: {
747
+ summaryMessages: 'GENERATOR_ASSISTANT_SUMMARY_MESSAGES',
748
+ summarySessionKey: 'GENERATOR_ASSISTANT_SUMMARY_SESSION_KEY',
749
+ },
746
750
  },
747
751
  GENERATOR_VECTOR_STORE: {
748
752
  GENERATOR_VECTOR_STORE_RESET: {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fugood/bricks-project",
3
- "version": "2.22.0-beta.7",
3
+ "version": "2.22.0-beta.9",
4
4
  "main": "index.ts",
5
5
  "scripts": {
6
6
  "build": "node scripts/build.js"
@@ -14,5 +14,5 @@
14
14
  "lodash": "^4.17.4",
15
15
  "uuid": "^8.3.1"
16
16
  },
17
- "gitHead": "b205bf371305e9d4929f05c0c310ff2362a43739"
17
+ "gitHead": "286abe17fdd111c728fd74e3d98822d7e11d718e"
18
18
  }
@@ -4497,6 +4497,8 @@ Default property:
4497
4497
  isListening?: () => Data
4498
4498
  /* Last error of HTTP server */
4499
4499
  lastError?: () => Data
4500
+ /* MCP server endpoint URL */
4501
+ endpoint?: () => Data
4500
4502
  /* Connected remotes (Session ID) */
4501
4503
  connectedRemotes?: () => Data
4502
4504
  /* Last resource request ({ name: string, uri: string, params: object }) */
@@ -4525,6 +4527,7 @@ export type GeneratorMCPServer = Generator &
4525
4527
  outlet:
4526
4528
  | 'isListening'
4527
4529
  | 'lastError'
4530
+ | 'endpoint'
4528
4531
  | 'connectedRemotes'
4529
4532
  | 'lastResourceRequest'
4530
4533
  | 'lastToolCall'
@@ -6174,6 +6177,7 @@ Default property:
6174
6177
  "useMmap": true,
6175
6178
  "cacheKType": "f16",
6176
6179
  "cacheVType": "f16",
6180
+ "ctxShift": true,
6177
6181
  "transformScriptEnabled": false,
6178
6182
  "transformScriptCode": "\/\* Global variable: inputs = { prompt, messages, variables } \*\/\nreturn inputs.prompt",
6179
6183
  "transformScriptVariables": {},
@@ -6259,6 +6263,8 @@ Default property:
6259
6263
  cacheKType?: 'f16' | 'f32' | 'q8_0' | 'q4_0' | 'q4_1' | 'iq4_nl' | 'q5_0' | 'q5_1' | DataLink
6260
6264
  /* KV cache data type for the V (Default: f16) */
6261
6265
  cacheVType?: 'f16' | 'f32' | 'q8_0' | 'q4_0' | 'q4_1' | 'iq4_nl' | 'q5_0' | 'q5_1' | DataLink
6266
+ /* Enable context shift */
6267
+ ctxShift?: boolean | DataLink
6262
6268
  /* Enable Transform Script for processing the prompt */
6263
6269
  transformScriptEnabled?: boolean | DataLink
6264
6270
  /* Code of Transform Script */
@@ -6277,8 +6283,10 @@ Default property:
6277
6283
  sessionRemain?: number | DataLink
6278
6284
  /* TODO:loran_gqarms_norm_epsrope_freq_baserope_freq_scale */
6279
6285
  completionMode?: 'auto' | 'chat' | 'text' | DataLink
6280
- /* Tools for chat mode */
6281
- completionTools?: {} | DataLink
6286
+ /* Tools for chat mode using OpenAI-compatible function calling format
6287
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
6288
+ See: https://platform.openai.com/docs/guides/function-calling */
6289
+ completionTools?: Array<{} | DataLink> | DataLink
6282
6290
  /* Enable parallel tool calls */
6283
6291
  completionParallelToolCalls?: boolean | DataLink
6284
6292
  /* Tool choice for chat mode */
@@ -6534,8 +6542,10 @@ Default property:
6534
6542
  stopWords?: Array<string | DataLink> | DataLink
6535
6543
  /* Tool call parser */
6536
6544
  toolCallParser?: 'llama3_json' | 'mistral' | DataLink
6537
- /* Tools for chat mode */
6538
- tools?: {} | DataLink
6545
+ /* Tools for chat mode using OpenAI-compatible function calling format
6546
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
6547
+ See: https://platform.openai.com/docs/guides/function-calling */
6548
+ tools?: Array<{} | DataLink> | DataLink
6539
6549
  /* Tool choice for chat mode */
6540
6550
  toolChoice?: 'none' | 'auto' | 'required' | DataLink
6541
6551
  /* Enable parallel tool calls */
@@ -6678,7 +6688,10 @@ Default property:
6678
6688
  "apiEndpoint": "https://api.openai.com/v1",
6679
6689
  "model": "gpt-4o-mini",
6680
6690
  "completionMessages": [
6681
- null
6691
+ {
6692
+ "role": "system",
6693
+ "content": "You are a helpful assistant."
6694
+ }
6682
6695
  ],
6683
6696
  "completionMaxTokens": 1024,
6684
6697
  "completionTemperature": 1,
@@ -6714,8 +6727,10 @@ Default property:
6714
6727
  }
6715
6728
  >
6716
6729
  | DataLink
6717
- /* Tools for chat mode */
6718
- completionTools?: {} | DataLink
6730
+ /* Tools for chat mode following OpenAI function calling format
6731
+ Format: Array of objects with {type, function: {name, description, parameters}} structure
6732
+ See: https://platform.openai.com/docs/guides/function-calling */
6733
+ completionTools?: Array<{} | DataLink> | DataLink
6719
6734
  /* Enable parallel tool calls */
6720
6735
  completionParallelToolCalls?: boolean | DataLink
6721
6736
  /* Tool choice for chat mode */
@@ -7224,16 +7239,50 @@ export type GeneratorAssistantActionInsertMcpResource = ActionWithParams & {
7224
7239
  >
7225
7240
  }
7226
7241
 
7242
+ /* Summarize messages based on the conversation
7243
+
7244
+ Note: Summary uses the same LLM context size, so it is recommended only to use it when the system prompt (in Initial Messages) is long, otherwise it may still fail when the context is full (Ctx Shift is NO). */
7245
+ export type GeneratorAssistantActionSummaryMessages = ActionWithParams & {
7246
+ __actionName: 'GENERATOR_ASSISTANT_SUMMARY_MESSAGES'
7247
+ params?: Array<
7248
+ | {
7249
+ input: 'summaryMessages'
7250
+ value?: Array<any> | DataLink | EventProperty
7251
+ mapping?: string
7252
+ }
7253
+ | {
7254
+ input: 'summarySessionKey'
7255
+ value?: string | DataLink | EventProperty
7256
+ mapping?: string
7257
+ }
7258
+ >
7259
+ }
7260
+
7227
7261
  interface GeneratorAssistantDef {
7228
7262
  /*
7229
7263
  Default property:
7230
7264
  {
7231
7265
  "initialMessages": [
7232
- null
7266
+ {
7267
+ "role": "system",
7268
+ "content": "You are a helpful assistant."
7269
+ }
7233
7270
  ],
7234
7271
  "cacheMessages": false,
7235
7272
  "llmLivePolicy": "only-in-use",
7236
7273
  "llmSessionKey": "default-assistant",
7274
+ "llmAutoSummaryMessages": false,
7275
+ "llmSummaryMessages": [
7276
+ {
7277
+ "role": "system",
7278
+ "content": "You are a helpful assistant specialized in summarizing conversations. Create a concise summary of the conversation that captures the key points while maintaining important context. The summary should be clear, accurate, and briefer than the original conversation."
7279
+ },
7280
+ {
7281
+ "role": "user",
7282
+ "content": "Please summarize the following conversation into a concise system message that can replace the previous conversation context while maintaining all important information. Here is the conversation to summarize:\n\n"
7283
+ }
7284
+ ],
7285
+ "llmSummarySessionKey": "assistant-default-summary",
7237
7286
  "fileSearchEnabled": false,
7238
7287
  "fileSearchLivePolicy": "only-in-use",
7239
7288
  "sttEnabled": true,
@@ -7257,12 +7306,28 @@ Default property:
7257
7306
  | DataLink
7258
7307
  /* Whether to cache messages */
7259
7308
  cacheMessages?: boolean | DataLink
7260
- /* LLM Generator (Currently only support `LLM (GGML)` generator) */
7309
+ /* LLM Generator (Supports `LLM (GGML)` and `OpenAI LLM` generators) */
7261
7310
  llmGeneratorId?: string | DataLink
7262
7311
  /* LLM Live Policy. If the policy is `only-in-use`, the LLM context will be released when the assistant is not in use. */
7263
7312
  llmLivePolicy?: 'only-in-use' | 'manual' | DataLink
7264
7313
  /* LLM main session key */
7265
7314
  llmSessionKey?: string | DataLink
7315
+ /* Auto Summary Messages (Automatically summarize messages when the LLM context is full or content gets truncated, currently only supported with LLM (GGML) generators)
7316
+
7317
+ Note: Summary uses the same LLM context size, so it is recommended only to use it when the system prompt (in Initial Messages) is long, otherwise it may still fail when the context is full (Ctx Shift is NO). */
7318
+ llmAutoSummaryMessages?: boolean | DataLink
7319
+ /* Summary Messages (Messages used for summarization prompt, conversation will be appended to the last message) */
7320
+ llmSummaryMessages?:
7321
+ | Array<
7322
+ | DataLink
7323
+ | {
7324
+ role?: string | DataLink
7325
+ content?: string | DataLink
7326
+ }
7327
+ >
7328
+ | DataLink
7329
+ /* Summary Session Key (Custom session key for summarization) */
7330
+ llmSummarySessionKey?: string | DataLink
7266
7331
  /* File Search (Vector Store) Enabled */
7267
7332
  fileSearchEnabled?: boolean | DataLink
7268
7333
  /* File Search (Vector Store) Generator */
@@ -7275,13 +7340,13 @@ Default property:
7275
7340
  fileSearchThreshold?: number | DataLink
7276
7341
  /* File Search Ignore Threshold. (Default: false) */
7277
7342
  fileSearchIgnoreThreshold?: boolean | DataLink
7278
- /* STT Generator use for transcribing audio message (Currently only support `STT (GGML)` generator) */
7343
+ /* STT Generator use for transcribing audio message (Supports `STT (GGML)` generators) */
7279
7344
  sttGeneratorId?: string | DataLink
7280
7345
  /* STT Enabled */
7281
7346
  sttEnabled?: boolean | DataLink
7282
7347
  /* STT Live Policy. If the policy is `only-in-use`, the STT context will be released when the assistant is not in use. */
7283
7348
  sttLivePolicy?: 'only-in-use' | 'manual' | DataLink
7284
- /* TTS Generator use for generating LLM response audio message (Currently only support `TTS (ONNX)` generator) */
7349
+ /* TTS Generator use for generating LLM response audio message (Supports `TTS (ONNX)` and `OpenAI TTS` generators) */
7285
7350
  ttsGeneratorId?: string | DataLink
7286
7351
  /* TTS Enabled */
7287
7352
  ttsEnabled?: boolean | DataLink
@@ -770,6 +770,8 @@ export const templateEventPropsMap = {
770
770
  'GENERATOR_LLM_COMPLETION_RESULT', // type: string
771
771
  'GENERATOR_LLM_COMPLETION_TOOL_CALLS', // type: array
772
772
  'GENERATOR_LLM_COMPLETION_FULL_CONTEXT', // type: string
773
+ 'GENERATOR_LLM_COMPLETION_IS_CONTEXT_FULL', // type: bool
774
+ 'GENERATOR_LLM_COMPLETION_IS_TRUNCATED', // type: bool
773
775
  'GENERATOR_LLM_COMPLETION_RESULT_DETAILS', // type: object
774
776
  ],
775
777
  onCompletionFunctionCall: [