@fugood/bricks-project 2.22.0-beta.8 → 2.22.0-beta.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/compile/action-name-map.ts +4 -0
- package/package.json +2 -2
- package/types/generators.ts +61 -8
|
@@ -743,6 +743,10 @@ export const templateActionNameMap = {
|
|
|
743
743
|
mcpVariables: 'GENERATOR_ASSISTANT_MCP_VARIABLES',
|
|
744
744
|
role: 'GENERATOR_ASSISTANT_ROLE',
|
|
745
745
|
},
|
|
746
|
+
GENERATOR_ASSISTANT_SUMMARY_MESSAGES: {
|
|
747
|
+
summaryMessages: 'GENERATOR_ASSISTANT_SUMMARY_MESSAGES',
|
|
748
|
+
summarySessionKey: 'GENERATOR_ASSISTANT_SUMMARY_SESSION_KEY',
|
|
749
|
+
},
|
|
746
750
|
},
|
|
747
751
|
GENERATOR_VECTOR_STORE: {
|
|
748
752
|
GENERATOR_VECTOR_STORE_RESET: {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@fugood/bricks-project",
|
|
3
|
-
"version": "2.22.0-beta.
|
|
3
|
+
"version": "2.22.0-beta.9",
|
|
4
4
|
"main": "index.ts",
|
|
5
5
|
"scripts": {
|
|
6
6
|
"build": "node scripts/build.js"
|
|
@@ -14,5 +14,5 @@
|
|
|
14
14
|
"lodash": "^4.17.4",
|
|
15
15
|
"uuid": "^8.3.1"
|
|
16
16
|
},
|
|
17
|
-
"gitHead": "
|
|
17
|
+
"gitHead": "286abe17fdd111c728fd74e3d98822d7e11d718e"
|
|
18
18
|
}
|
package/types/generators.ts
CHANGED
|
@@ -6286,7 +6286,7 @@ Default property:
|
|
|
6286
6286
|
/* Tools for chat mode using OpenAI-compatible function calling format
|
|
6287
6287
|
Format: Array of objects with {type, function: {name, description, parameters}} structure
|
|
6288
6288
|
See: https://platform.openai.com/docs/guides/function-calling */
|
|
6289
|
-
completionTools?: {} | DataLink
|
|
6289
|
+
completionTools?: Array<{} | DataLink> | DataLink
|
|
6290
6290
|
/* Enable parallel tool calls */
|
|
6291
6291
|
completionParallelToolCalls?: boolean | DataLink
|
|
6292
6292
|
/* Tool choice for chat mode */
|
|
@@ -6545,7 +6545,7 @@ Default property:
|
|
|
6545
6545
|
/* Tools for chat mode using OpenAI-compatible function calling format
|
|
6546
6546
|
Format: Array of objects with {type, function: {name, description, parameters}} structure
|
|
6547
6547
|
See: https://platform.openai.com/docs/guides/function-calling */
|
|
6548
|
-
tools?: {} | DataLink
|
|
6548
|
+
tools?: Array<{} | DataLink> | DataLink
|
|
6549
6549
|
/* Tool choice for chat mode */
|
|
6550
6550
|
toolChoice?: 'none' | 'auto' | 'required' | DataLink
|
|
6551
6551
|
/* Enable parallel tool calls */
|
|
@@ -6688,7 +6688,10 @@ Default property:
|
|
|
6688
6688
|
"apiEndpoint": "https://api.openai.com/v1",
|
|
6689
6689
|
"model": "gpt-4o-mini",
|
|
6690
6690
|
"completionMessages": [
|
|
6691
|
-
|
|
6691
|
+
{
|
|
6692
|
+
"role": "system",
|
|
6693
|
+
"content": "You are a helpful assistant."
|
|
6694
|
+
}
|
|
6692
6695
|
],
|
|
6693
6696
|
"completionMaxTokens": 1024,
|
|
6694
6697
|
"completionTemperature": 1,
|
|
@@ -6727,7 +6730,7 @@ Default property:
|
|
|
6727
6730
|
/* Tools for chat mode following OpenAI function calling format
|
|
6728
6731
|
Format: Array of objects with {type, function: {name, description, parameters}} structure
|
|
6729
6732
|
See: https://platform.openai.com/docs/guides/function-calling */
|
|
6730
|
-
completionTools?: {} | DataLink
|
|
6733
|
+
completionTools?: Array<{} | DataLink> | DataLink
|
|
6731
6734
|
/* Enable parallel tool calls */
|
|
6732
6735
|
completionParallelToolCalls?: boolean | DataLink
|
|
6733
6736
|
/* Tool choice for chat mode */
|
|
@@ -7236,16 +7239,50 @@ export type GeneratorAssistantActionInsertMcpResource = ActionWithParams & {
|
|
|
7236
7239
|
>
|
|
7237
7240
|
}
|
|
7238
7241
|
|
|
7242
|
+
/* Summarize messages based on the conversation
|
|
7243
|
+
|
|
7244
|
+
Note: Summary uses the same LLM context size, so it is recommended only to use it when the system prompt (in Initial Messages) is long, otherwise it may still fail when the context is full (Ctx Shift is NO). */
|
|
7245
|
+
export type GeneratorAssistantActionSummaryMessages = ActionWithParams & {
|
|
7246
|
+
__actionName: 'GENERATOR_ASSISTANT_SUMMARY_MESSAGES'
|
|
7247
|
+
params?: Array<
|
|
7248
|
+
| {
|
|
7249
|
+
input: 'summaryMessages'
|
|
7250
|
+
value?: Array<any> | DataLink | EventProperty
|
|
7251
|
+
mapping?: string
|
|
7252
|
+
}
|
|
7253
|
+
| {
|
|
7254
|
+
input: 'summarySessionKey'
|
|
7255
|
+
value?: string | DataLink | EventProperty
|
|
7256
|
+
mapping?: string
|
|
7257
|
+
}
|
|
7258
|
+
>
|
|
7259
|
+
}
|
|
7260
|
+
|
|
7239
7261
|
interface GeneratorAssistantDef {
|
|
7240
7262
|
/*
|
|
7241
7263
|
Default property:
|
|
7242
7264
|
{
|
|
7243
7265
|
"initialMessages": [
|
|
7244
|
-
|
|
7266
|
+
{
|
|
7267
|
+
"role": "system",
|
|
7268
|
+
"content": "You are a helpful assistant."
|
|
7269
|
+
}
|
|
7245
7270
|
],
|
|
7246
7271
|
"cacheMessages": false,
|
|
7247
7272
|
"llmLivePolicy": "only-in-use",
|
|
7248
7273
|
"llmSessionKey": "default-assistant",
|
|
7274
|
+
"llmAutoSummaryMessages": false,
|
|
7275
|
+
"llmSummaryMessages": [
|
|
7276
|
+
{
|
|
7277
|
+
"role": "system",
|
|
7278
|
+
"content": "You are a helpful assistant specialized in summarizing conversations. Create a concise summary of the conversation that captures the key points while maintaining important context. The summary should be clear, accurate, and briefer than the original conversation."
|
|
7279
|
+
},
|
|
7280
|
+
{
|
|
7281
|
+
"role": "user",
|
|
7282
|
+
"content": "Please summarize the following conversation into a concise system message that can replace the previous conversation context while maintaining all important information. Here is the conversation to summarize:\n\n"
|
|
7283
|
+
}
|
|
7284
|
+
],
|
|
7285
|
+
"llmSummarySessionKey": "assistant-default-summary",
|
|
7249
7286
|
"fileSearchEnabled": false,
|
|
7250
7287
|
"fileSearchLivePolicy": "only-in-use",
|
|
7251
7288
|
"sttEnabled": true,
|
|
@@ -7269,12 +7306,28 @@ Default property:
|
|
|
7269
7306
|
| DataLink
|
|
7270
7307
|
/* Whether to cache messages */
|
|
7271
7308
|
cacheMessages?: boolean | DataLink
|
|
7272
|
-
/* LLM Generator (
|
|
7309
|
+
/* LLM Generator (Supports `LLM (GGML)` and `OpenAI LLM` generators) */
|
|
7273
7310
|
llmGeneratorId?: string | DataLink
|
|
7274
7311
|
/* LLM Live Policy. If the policy is `only-in-use`, the LLM context will be released when the assistant is not in use. */
|
|
7275
7312
|
llmLivePolicy?: 'only-in-use' | 'manual' | DataLink
|
|
7276
7313
|
/* LLM main session key */
|
|
7277
7314
|
llmSessionKey?: string | DataLink
|
|
7315
|
+
/* Auto Summary Messages (Automatically summarize messages when the LLM context is full or content gets truncated, currently only supported with LLM (GGML) generators)
|
|
7316
|
+
|
|
7317
|
+
Note: Summary uses the same LLM context size, so it is recommended only to use it when the system prompt (in Initial Messages) is long, otherwise it may still fail when the context is full (Ctx Shift is NO). */
|
|
7318
|
+
llmAutoSummaryMessages?: boolean | DataLink
|
|
7319
|
+
/* Summary Messages (Messages used for summarization prompt, conversation will be appended to the last message) */
|
|
7320
|
+
llmSummaryMessages?:
|
|
7321
|
+
| Array<
|
|
7322
|
+
| DataLink
|
|
7323
|
+
| {
|
|
7324
|
+
role?: string | DataLink
|
|
7325
|
+
content?: string | DataLink
|
|
7326
|
+
}
|
|
7327
|
+
>
|
|
7328
|
+
| DataLink
|
|
7329
|
+
/* Summary Session Key (Custom session key for summarization) */
|
|
7330
|
+
llmSummarySessionKey?: string | DataLink
|
|
7278
7331
|
/* File Search (Vector Store) Enabled */
|
|
7279
7332
|
fileSearchEnabled?: boolean | DataLink
|
|
7280
7333
|
/* File Search (Vector Store) Generator */
|
|
@@ -7287,13 +7340,13 @@ Default property:
|
|
|
7287
7340
|
fileSearchThreshold?: number | DataLink
|
|
7288
7341
|
/* File Search Ignore Threshold. (Default: false) */
|
|
7289
7342
|
fileSearchIgnoreThreshold?: boolean | DataLink
|
|
7290
|
-
/* STT Generator use for transcribing audio message (
|
|
7343
|
+
/* STT Generator use for transcribing audio message (Supports `STT (GGML)` generators) */
|
|
7291
7344
|
sttGeneratorId?: string | DataLink
|
|
7292
7345
|
/* STT Enabled */
|
|
7293
7346
|
sttEnabled?: boolean | DataLink
|
|
7294
7347
|
/* STT Live Policy. If the policy is `only-in-use`, the STT context will be released when the assistant is not in use. */
|
|
7295
7348
|
sttLivePolicy?: 'only-in-use' | 'manual' | DataLink
|
|
7296
|
-
/* TTS Generator use for generating LLM response audio message (
|
|
7349
|
+
/* TTS Generator use for generating LLM response audio message (Supports `TTS (ONNX)` and `OpenAI TTS` generators) */
|
|
7297
7350
|
ttsGeneratorId?: string | DataLink
|
|
7298
7351
|
/* TTS Enabled */
|
|
7299
7352
|
ttsEnabled?: boolean | DataLink
|