@promptbook/legacy-documents 0.101.0-9 → 0.101.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +0 -4
- package/esm/index.es.js +57 -112
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +14 -0
- package/esm/typings/src/_packages/core.index.d.ts +12 -0
- package/esm/typings/src/_packages/types.index.d.ts +8 -0
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +11 -4
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
- package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +0 -24
- package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/META/META.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +23 -14
- package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -14
- package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
- package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
- package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +18 -1
- package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +8 -0
- package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -15
- package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +9 -0
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
- package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
- package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
- package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
- package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
- package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
- package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
- package/esm/typings/src/execution/PromptResult.d.ts +2 -4
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
- package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
- package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
- package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
- package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
- package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
- package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
- package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
- package/esm/typings/src/personas/preparePersona.d.ts +1 -0
- package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
- package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
- package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
- package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
- package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
- package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +0 -1
- package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +57 -112
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
- package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
- package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
- package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
- package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
- package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
- /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts → utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/umd/index.umd.js
CHANGED
|
@@ -25,7 +25,7 @@
|
|
|
25
25
|
* @generated
|
|
26
26
|
* @see https://github.com/webgptorg/promptbook
|
|
27
27
|
*/
|
|
28
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0
|
|
28
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0';
|
|
29
29
|
/**
|
|
30
30
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
31
31
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -1055,11 +1055,12 @@
|
|
|
1055
1055
|
catch (error) {
|
|
1056
1056
|
// Note: If we can't create cache directory, continue without it
|
|
1057
1057
|
// This handles read-only filesystems, permission issues, and missing parent directories
|
|
1058
|
-
if (error instanceof Error &&
|
|
1059
|
-
error.message.includes('
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1058
|
+
if (error instanceof Error &&
|
|
1059
|
+
(error.message.includes('EROFS') ||
|
|
1060
|
+
error.message.includes('read-only') ||
|
|
1061
|
+
error.message.includes('EACCES') ||
|
|
1062
|
+
error.message.includes('EPERM') ||
|
|
1063
|
+
error.message.includes('ENOENT'))) ;
|
|
1063
1064
|
else {
|
|
1064
1065
|
// Re-throw other unexpected errors
|
|
1065
1066
|
throw error;
|
|
@@ -3120,75 +3121,32 @@
|
|
|
3120
3121
|
*/
|
|
3121
3122
|
|
|
3122
3123
|
/**
|
|
3123
|
-
*
|
|
3124
|
-
* These profiles represent each provider as a virtual persona in chat interfaces
|
|
3124
|
+
* Takes an item or an array of items and returns an array of items
|
|
3125
3125
|
*
|
|
3126
|
-
*
|
|
3126
|
+
* 1) Any item except array and undefined returns array with that one item (also null)
|
|
3127
|
+
* 2) Undefined returns empty array
|
|
3128
|
+
* 3) Array returns itself
|
|
3129
|
+
*
|
|
3130
|
+
* @private internal utility
|
|
3127
3131
|
*/
|
|
3128
|
-
|
|
3129
|
-
|
|
3130
|
-
|
|
3131
|
-
|
|
3132
|
-
|
|
3133
|
-
|
|
3134
|
-
}
|
|
3135
|
-
|
|
3136
|
-
|
|
3137
|
-
|
|
3138
|
-
color: '#d97706', // Anthropic's orange/amber color
|
|
3139
|
-
},
|
|
3140
|
-
AZURE_OPENAI: {
|
|
3141
|
-
name: 'AZURE_OPENAI',
|
|
3142
|
-
fullname: 'Azure OpenAI',
|
|
3143
|
-
color: '#0078d4', // Microsoft Azure blue
|
|
3144
|
-
},
|
|
3145
|
-
GOOGLE: {
|
|
3146
|
-
name: 'GOOGLE',
|
|
3147
|
-
fullname: 'Google Gemini',
|
|
3148
|
-
color: '#4285f4', // Google blue
|
|
3149
|
-
},
|
|
3150
|
-
DEEPSEEK: {
|
|
3151
|
-
name: 'DEEPSEEK',
|
|
3152
|
-
fullname: 'DeepSeek',
|
|
3153
|
-
color: '#7c3aed', // Purple color for DeepSeek
|
|
3154
|
-
},
|
|
3155
|
-
OLLAMA: {
|
|
3156
|
-
name: 'OLLAMA',
|
|
3157
|
-
fullname: 'Ollama',
|
|
3158
|
-
color: '#059669', // Emerald green for local models
|
|
3159
|
-
},
|
|
3160
|
-
REMOTE: {
|
|
3161
|
-
name: 'REMOTE',
|
|
3162
|
-
fullname: 'Remote Server',
|
|
3163
|
-
color: '#6b7280', // Gray for remote/proxy connections
|
|
3164
|
-
},
|
|
3165
|
-
MOCKED_ECHO: {
|
|
3166
|
-
name: 'MOCKED_ECHO',
|
|
3167
|
-
fullname: 'Echo (Test)',
|
|
3168
|
-
color: '#8b5cf6', // Purple for test/mock tools
|
|
3169
|
-
},
|
|
3170
|
-
MOCKED_FAKE: {
|
|
3171
|
-
name: 'MOCKED_FAKE',
|
|
3172
|
-
fullname: 'Fake LLM (Test)',
|
|
3173
|
-
color: '#ec4899', // Pink for fake/test tools
|
|
3174
|
-
},
|
|
3175
|
-
VERCEL: {
|
|
3176
|
-
name: 'VERCEL',
|
|
3177
|
-
fullname: 'Vercel AI',
|
|
3178
|
-
color: '#000000', // Vercel's black
|
|
3179
|
-
},
|
|
3180
|
-
MULTIPLE: {
|
|
3181
|
-
name: 'MULTIPLE',
|
|
3182
|
-
fullname: 'Multiple Providers',
|
|
3183
|
-
color: '#6366f1', // Indigo for combined/multiple providers
|
|
3184
|
-
},
|
|
3185
|
-
};
|
|
3132
|
+
function arrayableToArray(input) {
|
|
3133
|
+
if (input === undefined) {
|
|
3134
|
+
return [];
|
|
3135
|
+
}
|
|
3136
|
+
if (input instanceof Array) {
|
|
3137
|
+
return input;
|
|
3138
|
+
}
|
|
3139
|
+
return [input];
|
|
3140
|
+
}
|
|
3141
|
+
|
|
3186
3142
|
/**
|
|
3187
|
-
*
|
|
3188
|
-
* TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
|
|
3189
|
-
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
3143
|
+
* Profile for Multiple providers aggregation
|
|
3190
3144
|
*/
|
|
3191
|
-
|
|
3145
|
+
const MULTIPLE_PROVIDER_PROFILE = {
|
|
3146
|
+
name: 'MULTIPLE',
|
|
3147
|
+
fullname: 'Multiple Providers',
|
|
3148
|
+
color: '#6366f1',
|
|
3149
|
+
};
|
|
3192
3150
|
/**
|
|
3193
3151
|
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
|
3194
3152
|
*
|
|
@@ -3199,12 +3157,10 @@
|
|
|
3199
3157
|
/**
|
|
3200
3158
|
* Gets array of execution tools in order of priority
|
|
3201
3159
|
*/
|
|
3202
|
-
constructor(...llmExecutionTools) {
|
|
3160
|
+
constructor(title, ...llmExecutionTools) {
|
|
3161
|
+
this.title = title;
|
|
3203
3162
|
this.llmExecutionTools = llmExecutionTools;
|
|
3204
3163
|
}
|
|
3205
|
-
get title() {
|
|
3206
|
-
return 'Multiple LLM Providers';
|
|
3207
|
-
}
|
|
3208
3164
|
get description() {
|
|
3209
3165
|
const innerModelsTitlesAndDescriptions = this.llmExecutionTools
|
|
3210
3166
|
.map(({ title, description }, index) => {
|
|
@@ -3226,7 +3182,7 @@
|
|
|
3226
3182
|
`);
|
|
3227
3183
|
}
|
|
3228
3184
|
get profile() {
|
|
3229
|
-
return
|
|
3185
|
+
return MULTIPLE_PROVIDER_PROFILE;
|
|
3230
3186
|
}
|
|
3231
3187
|
/**
|
|
3232
3188
|
* Check the configuration of all execution tools
|
|
@@ -3290,7 +3246,7 @@
|
|
|
3290
3246
|
return await llmExecutionTools.callEmbeddingModel(prompt);
|
|
3291
3247
|
// <- case [🤖]:
|
|
3292
3248
|
default:
|
|
3293
|
-
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
|
|
3249
|
+
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
|
|
3294
3250
|
}
|
|
3295
3251
|
}
|
|
3296
3252
|
catch (error) {
|
|
@@ -3311,7 +3267,7 @@
|
|
|
3311
3267
|
// 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
|
|
3312
3268
|
// 3) ...
|
|
3313
3269
|
spaceTrim__default["default"]((block) => `
|
|
3314
|
-
All execution tools failed:
|
|
3270
|
+
All execution tools of ${this.title} failed:
|
|
3315
3271
|
|
|
3316
3272
|
${block(errors
|
|
3317
3273
|
.map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
|
|
@@ -3320,11 +3276,11 @@
|
|
|
3320
3276
|
`));
|
|
3321
3277
|
}
|
|
3322
3278
|
else if (this.llmExecutionTools.length === 0) {
|
|
3323
|
-
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools
|
|
3279
|
+
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
|
|
3324
3280
|
}
|
|
3325
3281
|
else {
|
|
3326
3282
|
throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
|
|
3327
|
-
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
|
|
3283
|
+
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
|
|
3328
3284
|
|
|
3329
3285
|
Available \`LlmExecutionTools\`:
|
|
3330
3286
|
${block(this.description)}
|
|
@@ -3354,7 +3310,7 @@
|
|
|
3354
3310
|
*
|
|
3355
3311
|
* @public exported from `@promptbook/core`
|
|
3356
3312
|
*/
|
|
3357
|
-
function joinLlmExecutionTools(...llmExecutionTools) {
|
|
3313
|
+
function joinLlmExecutionTools(title, ...llmExecutionTools) {
|
|
3358
3314
|
if (llmExecutionTools.length === 0) {
|
|
3359
3315
|
const warningMessage = spaceTrim__default["default"](`
|
|
3360
3316
|
You have not provided any \`LlmExecutionTools\`
|
|
@@ -3386,30 +3342,27 @@
|
|
|
3386
3342
|
};
|
|
3387
3343
|
*/
|
|
3388
3344
|
}
|
|
3389
|
-
return new MultipleLlmExecutionTools(...llmExecutionTools);
|
|
3345
|
+
return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
|
|
3390
3346
|
}
|
|
3391
3347
|
/**
|
|
3392
3348
|
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
3393
3349
|
*/
|
|
3394
3350
|
|
|
3395
3351
|
/**
|
|
3396
|
-
*
|
|
3397
|
-
*
|
|
3398
|
-
* 1) Any item except array and undefined returns array with that one item (also null)
|
|
3399
|
-
* 2) Undefined returns empty array
|
|
3400
|
-
* 3) Array returns itself
|
|
3352
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
|
3401
3353
|
*
|
|
3402
|
-
* @
|
|
3354
|
+
* @public exported from `@promptbook/core`
|
|
3403
3355
|
*/
|
|
3404
|
-
function
|
|
3405
|
-
|
|
3406
|
-
|
|
3407
|
-
|
|
3408
|
-
|
|
3409
|
-
|
|
3410
|
-
}
|
|
3411
|
-
return [input];
|
|
3356
|
+
function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
|
|
3357
|
+
const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
|
|
3358
|
+
const llmTools = _llms.length === 1
|
|
3359
|
+
? _llms[0]
|
|
3360
|
+
: joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
|
|
3361
|
+
return llmTools;
|
|
3412
3362
|
}
|
|
3363
|
+
/**
|
|
3364
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
3365
|
+
*/
|
|
3413
3366
|
|
|
3414
3367
|
/**
|
|
3415
3368
|
* Prepares the persona for the pipeline
|
|
@@ -3428,8 +3381,7 @@
|
|
|
3428
3381
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
|
3429
3382
|
tools,
|
|
3430
3383
|
});
|
|
3431
|
-
const
|
|
3432
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
3384
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
3433
3385
|
const availableModels = (await llmTools.listModels())
|
|
3434
3386
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
|
3435
3387
|
.map(({ modelName, modelDescription }) => ({
|
|
@@ -3473,6 +3425,7 @@
|
|
|
3473
3425
|
};
|
|
3474
3426
|
}
|
|
3475
3427
|
/**
|
|
3428
|
+
* TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
|
|
3476
3429
|
* TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
|
|
3477
3430
|
* TODO: [🏢] Check validity of `modelName` in pipeline
|
|
3478
3431
|
* TODO: [🏢] Check validity of `systemMessage` in pipeline
|
|
@@ -4190,9 +4143,7 @@
|
|
|
4190
4143
|
if (tools === undefined || tools.llm === undefined) {
|
|
4191
4144
|
throw new MissingToolsError('LLM tools are required for preparing the pipeline');
|
|
4192
4145
|
}
|
|
4193
|
-
|
|
4194
|
-
const _llms = arrayableToArray(tools.llm);
|
|
4195
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
4146
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
4196
4147
|
const llmToolsWithUsage = countUsage(llmTools);
|
|
4197
4148
|
// <- TODO: [🌯]
|
|
4198
4149
|
/*
|
|
@@ -5335,9 +5286,7 @@
|
|
|
5335
5286
|
$scriptPipelineExecutionErrors: [],
|
|
5336
5287
|
$failedResults: [], // Track all failed attempts
|
|
5337
5288
|
};
|
|
5338
|
-
|
|
5339
|
-
const _llms = arrayableToArray(tools.llm);
|
|
5340
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5289
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
5341
5290
|
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
|
5342
5291
|
const isJokerAttempt = attemptIndex < 0;
|
|
5343
5292
|
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
|
@@ -5857,9 +5806,7 @@
|
|
|
5857
5806
|
return ''; // <- Note: Np knowledge present, return empty string
|
|
5858
5807
|
}
|
|
5859
5808
|
try {
|
|
5860
|
-
|
|
5861
|
-
const _llms = arrayableToArray(tools.llm);
|
|
5862
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5809
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
5863
5810
|
const taskEmbeddingPrompt = {
|
|
5864
5811
|
title: 'Knowledge Search',
|
|
5865
5812
|
modelRequirements: {
|
|
@@ -6460,13 +6407,13 @@
|
|
|
6460
6407
|
// Calculate and update tldr based on pipeline progress
|
|
6461
6408
|
const cv = newOngoingResult;
|
|
6462
6409
|
// Calculate progress based on parameters resolved vs total parameters
|
|
6463
|
-
const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
|
|
6410
|
+
const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
|
|
6464
6411
|
let resolvedParameters = 0;
|
|
6465
6412
|
let currentTaskTitle = '';
|
|
6466
6413
|
// Get the resolved parameters from output parameters
|
|
6467
6414
|
if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
|
|
6468
6415
|
// Count how many output parameters have non-empty values
|
|
6469
|
-
resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
|
|
6416
|
+
resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
|
|
6470
6417
|
}
|
|
6471
6418
|
// Try to determine current task from execution report
|
|
6472
6419
|
if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
|
|
@@ -6576,9 +6523,7 @@
|
|
|
6576
6523
|
throw new MissingToolsError('LLM tools are required for scraping external files');
|
|
6577
6524
|
// <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
|
|
6578
6525
|
}
|
|
6579
|
-
|
|
6580
|
-
const _llms = arrayableToArray(llm);
|
|
6581
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
6526
|
+
const llmTools = getSingleLlmExecutionTools(llm);
|
|
6582
6527
|
// TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
|
|
6583
6528
|
const collection = createCollectionFromJson(...PipelineCollection);
|
|
6584
6529
|
const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({
|