@fugood/bricks-project 2.21.5 → 2.21.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -743,6 +743,10 @@ export const templateActionNameMap = {
743
743
  mcpVariables: 'GENERATOR_ASSISTANT_MCP_VARIABLES',
744
744
  role: 'GENERATOR_ASSISTANT_ROLE',
745
745
  },
746
+ GENERATOR_ASSISTANT_SUMMARY_MESSAGES: {
747
+ summaryMessages: 'GENERATOR_ASSISTANT_SUMMARY_MESSAGES',
748
+ summarySessionKey: 'GENERATOR_ASSISTANT_SUMMARY_SESSION_KEY',
749
+ },
746
750
  },
747
751
  GENERATOR_VECTOR_STORE: {
748
752
  GENERATOR_VECTOR_STORE_RESET: {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fugood/bricks-project",
3
- "version": "2.21.5",
3
+ "version": "2.21.6",
4
4
  "main": "index.ts",
5
5
  "scripts": {
6
6
  "build": "node scripts/build.js"
@@ -4312,11 +4312,6 @@ export type GeneratorMCPServerActionRefreshResources = Action & {
4312
4312
  __actionName: 'GENERATOR_MCP_SERVER_REFRESH_RESOURCES'
4313
4313
  }
4314
4314
 
4315
- /* End stream */
4316
- export type GeneratorMCPServerActionEndStream = Action & {
4317
- __actionName: 'GENERATOR_MCP_SERVER_END_STREAM'
4318
- }
4319
-
4320
4315
  interface GeneratorMCPServerDef {
4321
4316
  /*
4322
4317
  Default property:
@@ -6285,7 +6280,7 @@ Default property:
6285
6280
  /* Tools for chat mode using OpenAI-compatible function calling format
6286
6281
  Format: Array of objects with {type, function: {name, description, parameters}} structure
6287
6282
  See: https://platform.openai.com/docs/guides/function-calling */
6288
- completionTools?: {} | DataLink
6283
+ completionTools?: Array<{} | DataLink> | DataLink
6289
6284
  /* Enable parallel tool calls */
6290
6285
  completionParallelToolCalls?: boolean | DataLink
6291
6286
  /* Tool choice for chat mode */
@@ -6544,7 +6539,7 @@ Default property:
6544
6539
  /* Tools for chat mode using OpenAI-compatible function calling format
6545
6540
  Format: Array of objects with {type, function: {name, description, parameters}} structure
6546
6541
  See: https://platform.openai.com/docs/guides/function-calling */
6547
- tools?: {} | DataLink
6542
+ tools?: Array<{} | DataLink> | DataLink
6548
6543
  /* Tool choice for chat mode */
6549
6544
  toolChoice?: 'none' | 'auto' | 'required' | DataLink
6550
6545
  /* Enable parallel tool calls */
@@ -6687,7 +6682,10 @@ Default property:
6687
6682
  "apiEndpoint": "https://api.openai.com/v1",
6688
6683
  "model": "gpt-4o-mini",
6689
6684
  "completionMessages": [
6690
- null
6685
+ {
6686
+ "role": "system",
6687
+ "content": "You are a helpful assistant."
6688
+ }
6691
6689
  ],
6692
6690
  "completionMaxTokens": 1024,
6693
6691
  "completionTemperature": 1,
@@ -6726,7 +6724,7 @@ Default property:
6726
6724
  /* Tools for chat mode following OpenAI function calling format
6727
6725
  Format: Array of objects with {type, function: {name, description, parameters}} structure
6728
6726
  See: https://platform.openai.com/docs/guides/function-calling */
6729
- completionTools?: {} | DataLink
6727
+ completionTools?: Array<{} | DataLink> | DataLink
6730
6728
  /* Enable parallel tool calls */
6731
6729
  completionParallelToolCalls?: boolean | DataLink
6732
6730
  /* Tool choice for chat mode */
@@ -7235,16 +7233,50 @@ export type GeneratorAssistantActionInsertMcpResource = ActionWithParams & {
7235
7233
  >
7236
7234
  }
7237
7235
 
7236
+ /* Summarize messages based on the conversation
7237
+
7238
+ Note: Summary uses the same LLM context size, so it is recommended only to use it when the system prompt (in Initial Messages) is long, otherwise it may still fail when the context is full (Ctx Shift is NO). */
7239
+ export type GeneratorAssistantActionSummaryMessages = ActionWithParams & {
7240
+ __actionName: 'GENERATOR_ASSISTANT_SUMMARY_MESSAGES'
7241
+ params?: Array<
7242
+ | {
7243
+ input: 'summaryMessages'
7244
+ value?: Array<any> | DataLink | EventProperty
7245
+ mapping?: string
7246
+ }
7247
+ | {
7248
+ input: 'summarySessionKey'
7249
+ value?: string | DataLink | EventProperty
7250
+ mapping?: string
7251
+ }
7252
+ >
7253
+ }
7254
+
7238
7255
  interface GeneratorAssistantDef {
7239
7256
  /*
7240
7257
  Default property:
7241
7258
  {
7242
7259
  "initialMessages": [
7243
- null
7260
+ {
7261
+ "role": "system",
7262
+ "content": "You are a helpful assistant."
7263
+ }
7244
7264
  ],
7245
7265
  "cacheMessages": false,
7246
7266
  "llmLivePolicy": "only-in-use",
7247
7267
  "llmSessionKey": "default-assistant",
7268
+ "llmAutoSummaryMessages": false,
7269
+ "llmSummaryMessages": [
7270
+ {
7271
+ "role": "system",
7272
+ "content": "You are a helpful assistant specialized in summarizing conversations. Create a concise summary of the conversation that captures the key points while maintaining important context. The summary should be clear, accurate, and briefer than the original conversation."
7273
+ },
7274
+ {
7275
+ "role": "user",
7276
+ "content": "Please summarize the following conversation into a concise system message that can replace the previous conversation context while maintaining all important information. Here is the conversation to summarize:\n\n"
7277
+ }
7278
+ ],
7279
+ "llmSummarySessionKey": "assistant-default-summary",
7248
7280
  "fileSearchEnabled": false,
7249
7281
  "fileSearchLivePolicy": "only-in-use",
7250
7282
  "sttEnabled": true,
@@ -7268,12 +7300,28 @@ Default property:
7268
7300
  | DataLink
7269
7301
  /* Whether to cache messages */
7270
7302
  cacheMessages?: boolean | DataLink
7271
- /* LLM Generator (Currently only support `LLM (GGML)` generator) */
7303
+ /* LLM Generator (Supports `LLM (GGML)` and `OpenAI LLM` generators) */
7272
7304
  llmGeneratorId?: string | DataLink
7273
7305
  /* LLM Live Policy. If the policy is `only-in-use`, the LLM context will be released when the assistant is not in use. */
7274
7306
  llmLivePolicy?: 'only-in-use' | 'manual' | DataLink
7275
7307
  /* LLM main session key */
7276
7308
  llmSessionKey?: string | DataLink
7309
+ /* Auto Summary Messages (Automatically summarize messages when the LLM context is full or content gets truncated, currently only supported with LLM (GGML) generators)
7310
+
7311
+ Note: Summary uses the same LLM context size, so it is recommended only to use it when the system prompt (in Initial Messages) is long, otherwise it may still fail when the context is full (Ctx Shift is NO). */
7312
+ llmAutoSummaryMessages?: boolean | DataLink
7313
+ /* Summary Messages (Messages used for summarization prompt, conversation will be appended to the last message) */
7314
+ llmSummaryMessages?:
7315
+ | Array<
7316
+ | DataLink
7317
+ | {
7318
+ role?: string | DataLink
7319
+ content?: string | DataLink
7320
+ }
7321
+ >
7322
+ | DataLink
7323
+ /* Summary Session Key (Custom session key for summarization) */
7324
+ llmSummarySessionKey?: string | DataLink
7277
7325
  /* File Search (Vector Store) Enabled */
7278
7326
  fileSearchEnabled?: boolean | DataLink
7279
7327
  /* File Search (Vector Store) Generator */
@@ -7286,13 +7334,13 @@ Default property:
7286
7334
  fileSearchThreshold?: number | DataLink
7287
7335
  /* File Search Ignore Threshold. (Default: false) */
7288
7336
  fileSearchIgnoreThreshold?: boolean | DataLink
7289
- /* STT Generator use for transcribing audio message (Currently only support `STT (GGML)` generator) */
7337
+ /* STT Generator use for transcribing audio message (Supports `STT (GGML)` generators) */
7290
7338
  sttGeneratorId?: string | DataLink
7291
7339
  /* STT Enabled */
7292
7340
  sttEnabled?: boolean | DataLink
7293
7341
  /* STT Live Policy. If the policy is `only-in-use`, the STT context will be released when the assistant is not in use. */
7294
7342
  sttLivePolicy?: 'only-in-use' | 'manual' | DataLink
7295
- /* TTS Generator use for generating LLM response audio message (Currently only support `TTS (ONNX)` generator) */
7343
+ /* TTS Generator use for generating LLM response audio message (Supports `TTS (ONNX)` and `OpenAI TTS` generators) */
7296
7344
  ttsGeneratorId?: string | DataLink
7297
7345
  /* TTS Enabled */
7298
7346
  ttsEnabled?: boolean | DataLink