@promptbook/legacy-documents 0.101.0-9 → 0.102.0-0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +57 -112
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +30 -0
- package/esm/typings/src/_packages/core.index.d.ts +12 -0
- package/esm/typings/src/_packages/types.index.d.ts +12 -0
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +11 -4
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
- package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +0 -24
- package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/META/META.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +23 -14
- package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -14
- package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
- package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
- package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +18 -1
- package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +8 -0
- package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -15
- package/esm/typings/src/book-components/Chat/Chat/Chat.d.ts +5 -1
- package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +9 -0
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
- package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
- package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
- package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
- package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
- package/esm/typings/src/book-components/Chat/utils/savePlugins.d.ts +55 -0
- package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
- package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
- package/esm/typings/src/execution/PromptResult.d.ts +2 -4
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
- package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
- package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
- package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
- package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
- package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
- package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
- package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
- package/esm/typings/src/personas/preparePersona.d.ts +1 -0
- package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
- package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
- package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
- package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
- package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
- package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +0 -1
- package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +57 -112
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
- package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
- package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
- package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
- package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
- package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
- /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts → utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/esm/index.es.js
CHANGED
|
@@ -26,7 +26,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
|
26
26
|
* @generated
|
|
27
27
|
* @see https://github.com/webgptorg/promptbook
|
|
28
28
|
*/
|
|
29
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
29
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.102.0-0';
|
|
30
30
|
/**
|
|
31
31
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
32
32
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -1056,11 +1056,12 @@ async function getScraperIntermediateSource(source, options) {
|
|
|
1056
1056
|
catch (error) {
|
|
1057
1057
|
// Note: If we can't create cache directory, continue without it
|
|
1058
1058
|
// This handles read-only filesystems, permission issues, and missing parent directories
|
|
1059
|
-
if (error instanceof Error &&
|
|
1060
|
-
error.message.includes('
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1059
|
+
if (error instanceof Error &&
|
|
1060
|
+
(error.message.includes('EROFS') ||
|
|
1061
|
+
error.message.includes('read-only') ||
|
|
1062
|
+
error.message.includes('EACCES') ||
|
|
1063
|
+
error.message.includes('EPERM') ||
|
|
1064
|
+
error.message.includes('ENOENT'))) ;
|
|
1064
1065
|
else {
|
|
1065
1066
|
// Re-throw other unexpected errors
|
|
1066
1067
|
throw error;
|
|
@@ -3121,75 +3122,32 @@ function countUsage(llmTools) {
|
|
|
3121
3122
|
*/
|
|
3122
3123
|
|
|
3123
3124
|
/**
|
|
3124
|
-
*
|
|
3125
|
-
* These profiles represent each provider as a virtual persona in chat interfaces
|
|
3125
|
+
* Takes an item or an array of items and returns an array of items
|
|
3126
3126
|
*
|
|
3127
|
-
*
|
|
3127
|
+
* 1) Any item except array and undefined returns array with that one item (also null)
|
|
3128
|
+
* 2) Undefined returns empty array
|
|
3129
|
+
* 3) Array returns itself
|
|
3130
|
+
*
|
|
3131
|
+
* @private internal utility
|
|
3128
3132
|
*/
|
|
3129
|
-
|
|
3130
|
-
|
|
3131
|
-
|
|
3132
|
-
|
|
3133
|
-
|
|
3134
|
-
|
|
3135
|
-
}
|
|
3136
|
-
|
|
3137
|
-
|
|
3138
|
-
|
|
3139
|
-
color: '#d97706', // Anthropic's orange/amber color
|
|
3140
|
-
},
|
|
3141
|
-
AZURE_OPENAI: {
|
|
3142
|
-
name: 'AZURE_OPENAI',
|
|
3143
|
-
fullname: 'Azure OpenAI',
|
|
3144
|
-
color: '#0078d4', // Microsoft Azure blue
|
|
3145
|
-
},
|
|
3146
|
-
GOOGLE: {
|
|
3147
|
-
name: 'GOOGLE',
|
|
3148
|
-
fullname: 'Google Gemini',
|
|
3149
|
-
color: '#4285f4', // Google blue
|
|
3150
|
-
},
|
|
3151
|
-
DEEPSEEK: {
|
|
3152
|
-
name: 'DEEPSEEK',
|
|
3153
|
-
fullname: 'DeepSeek',
|
|
3154
|
-
color: '#7c3aed', // Purple color for DeepSeek
|
|
3155
|
-
},
|
|
3156
|
-
OLLAMA: {
|
|
3157
|
-
name: 'OLLAMA',
|
|
3158
|
-
fullname: 'Ollama',
|
|
3159
|
-
color: '#059669', // Emerald green for local models
|
|
3160
|
-
},
|
|
3161
|
-
REMOTE: {
|
|
3162
|
-
name: 'REMOTE',
|
|
3163
|
-
fullname: 'Remote Server',
|
|
3164
|
-
color: '#6b7280', // Gray for remote/proxy connections
|
|
3165
|
-
},
|
|
3166
|
-
MOCKED_ECHO: {
|
|
3167
|
-
name: 'MOCKED_ECHO',
|
|
3168
|
-
fullname: 'Echo (Test)',
|
|
3169
|
-
color: '#8b5cf6', // Purple for test/mock tools
|
|
3170
|
-
},
|
|
3171
|
-
MOCKED_FAKE: {
|
|
3172
|
-
name: 'MOCKED_FAKE',
|
|
3173
|
-
fullname: 'Fake LLM (Test)',
|
|
3174
|
-
color: '#ec4899', // Pink for fake/test tools
|
|
3175
|
-
},
|
|
3176
|
-
VERCEL: {
|
|
3177
|
-
name: 'VERCEL',
|
|
3178
|
-
fullname: 'Vercel AI',
|
|
3179
|
-
color: '#000000', // Vercel's black
|
|
3180
|
-
},
|
|
3181
|
-
MULTIPLE: {
|
|
3182
|
-
name: 'MULTIPLE',
|
|
3183
|
-
fullname: 'Multiple Providers',
|
|
3184
|
-
color: '#6366f1', // Indigo for combined/multiple providers
|
|
3185
|
-
},
|
|
3186
|
-
};
|
|
3133
|
+
function arrayableToArray(input) {
|
|
3134
|
+
if (input === undefined) {
|
|
3135
|
+
return [];
|
|
3136
|
+
}
|
|
3137
|
+
if (input instanceof Array) {
|
|
3138
|
+
return input;
|
|
3139
|
+
}
|
|
3140
|
+
return [input];
|
|
3141
|
+
}
|
|
3142
|
+
|
|
3187
3143
|
/**
|
|
3188
|
-
*
|
|
3189
|
-
* TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
|
|
3190
|
-
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
3144
|
+
* Profile for Multiple providers aggregation
|
|
3191
3145
|
*/
|
|
3192
|
-
|
|
3146
|
+
const MULTIPLE_PROVIDER_PROFILE = {
|
|
3147
|
+
name: 'MULTIPLE',
|
|
3148
|
+
fullname: 'Multiple Providers',
|
|
3149
|
+
color: '#6366f1',
|
|
3150
|
+
};
|
|
3193
3151
|
/**
|
|
3194
3152
|
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
|
3195
3153
|
*
|
|
@@ -3200,12 +3158,10 @@ class MultipleLlmExecutionTools {
|
|
|
3200
3158
|
/**
|
|
3201
3159
|
* Gets array of execution tools in order of priority
|
|
3202
3160
|
*/
|
|
3203
|
-
constructor(...llmExecutionTools) {
|
|
3161
|
+
constructor(title, ...llmExecutionTools) {
|
|
3162
|
+
this.title = title;
|
|
3204
3163
|
this.llmExecutionTools = llmExecutionTools;
|
|
3205
3164
|
}
|
|
3206
|
-
get title() {
|
|
3207
|
-
return 'Multiple LLM Providers';
|
|
3208
|
-
}
|
|
3209
3165
|
get description() {
|
|
3210
3166
|
const innerModelsTitlesAndDescriptions = this.llmExecutionTools
|
|
3211
3167
|
.map(({ title, description }, index) => {
|
|
@@ -3227,7 +3183,7 @@ class MultipleLlmExecutionTools {
|
|
|
3227
3183
|
`);
|
|
3228
3184
|
}
|
|
3229
3185
|
get profile() {
|
|
3230
|
-
return
|
|
3186
|
+
return MULTIPLE_PROVIDER_PROFILE;
|
|
3231
3187
|
}
|
|
3232
3188
|
/**
|
|
3233
3189
|
* Check the configuration of all execution tools
|
|
@@ -3291,7 +3247,7 @@ class MultipleLlmExecutionTools {
|
|
|
3291
3247
|
return await llmExecutionTools.callEmbeddingModel(prompt);
|
|
3292
3248
|
// <- case [🤖]:
|
|
3293
3249
|
default:
|
|
3294
|
-
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
|
|
3250
|
+
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
|
|
3295
3251
|
}
|
|
3296
3252
|
}
|
|
3297
3253
|
catch (error) {
|
|
@@ -3312,7 +3268,7 @@ class MultipleLlmExecutionTools {
|
|
|
3312
3268
|
// 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
|
|
3313
3269
|
// 3) ...
|
|
3314
3270
|
spaceTrim$1((block) => `
|
|
3315
|
-
All execution tools failed:
|
|
3271
|
+
All execution tools of ${this.title} failed:
|
|
3316
3272
|
|
|
3317
3273
|
${block(errors
|
|
3318
3274
|
.map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
|
|
@@ -3321,11 +3277,11 @@ class MultipleLlmExecutionTools {
|
|
|
3321
3277
|
`));
|
|
3322
3278
|
}
|
|
3323
3279
|
else if (this.llmExecutionTools.length === 0) {
|
|
3324
|
-
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools
|
|
3280
|
+
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
|
|
3325
3281
|
}
|
|
3326
3282
|
else {
|
|
3327
3283
|
throw new PipelineExecutionError(spaceTrim$1((block) => `
|
|
3328
|
-
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
|
|
3284
|
+
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
|
|
3329
3285
|
|
|
3330
3286
|
Available \`LlmExecutionTools\`:
|
|
3331
3287
|
${block(this.description)}
|
|
@@ -3355,7 +3311,7 @@ class MultipleLlmExecutionTools {
|
|
|
3355
3311
|
*
|
|
3356
3312
|
* @public exported from `@promptbook/core`
|
|
3357
3313
|
*/
|
|
3358
|
-
function joinLlmExecutionTools(...llmExecutionTools) {
|
|
3314
|
+
function joinLlmExecutionTools(title, ...llmExecutionTools) {
|
|
3359
3315
|
if (llmExecutionTools.length === 0) {
|
|
3360
3316
|
const warningMessage = spaceTrim$1(`
|
|
3361
3317
|
You have not provided any \`LlmExecutionTools\`
|
|
@@ -3387,30 +3343,27 @@ function joinLlmExecutionTools(...llmExecutionTools) {
|
|
|
3387
3343
|
};
|
|
3388
3344
|
*/
|
|
3389
3345
|
}
|
|
3390
|
-
return new MultipleLlmExecutionTools(...llmExecutionTools);
|
|
3346
|
+
return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
|
|
3391
3347
|
}
|
|
3392
3348
|
/**
|
|
3393
3349
|
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
3394
3350
|
*/
|
|
3395
3351
|
|
|
3396
3352
|
/**
|
|
3397
|
-
*
|
|
3398
|
-
*
|
|
3399
|
-
* 1) Any item except array and undefined returns array with that one item (also null)
|
|
3400
|
-
* 2) Undefined returns empty array
|
|
3401
|
-
* 3) Array returns itself
|
|
3353
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
|
3402
3354
|
*
|
|
3403
|
-
* @
|
|
3355
|
+
* @public exported from `@promptbook/core`
|
|
3404
3356
|
*/
|
|
3405
|
-
function
|
|
3406
|
-
|
|
3407
|
-
|
|
3408
|
-
|
|
3409
|
-
|
|
3410
|
-
|
|
3411
|
-
}
|
|
3412
|
-
return [input];
|
|
3357
|
+
function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
|
|
3358
|
+
const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
|
|
3359
|
+
const llmTools = _llms.length === 1
|
|
3360
|
+
? _llms[0]
|
|
3361
|
+
: joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
|
|
3362
|
+
return llmTools;
|
|
3413
3363
|
}
|
|
3364
|
+
/**
|
|
3365
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
3366
|
+
*/
|
|
3414
3367
|
|
|
3415
3368
|
/**
|
|
3416
3369
|
* Prepares the persona for the pipeline
|
|
@@ -3429,8 +3382,7 @@ async function preparePersona(personaDescription, tools, options) {
|
|
|
3429
3382
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
|
3430
3383
|
tools,
|
|
3431
3384
|
});
|
|
3432
|
-
const
|
|
3433
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
3385
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
3434
3386
|
const availableModels = (await llmTools.listModels())
|
|
3435
3387
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
|
3436
3388
|
.map(({ modelName, modelDescription }) => ({
|
|
@@ -3474,6 +3426,7 @@ async function preparePersona(personaDescription, tools, options) {
|
|
|
3474
3426
|
};
|
|
3475
3427
|
}
|
|
3476
3428
|
/**
|
|
3429
|
+
* TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
|
|
3477
3430
|
* TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
|
|
3478
3431
|
* TODO: [🏢] Check validity of `modelName` in pipeline
|
|
3479
3432
|
* TODO: [🏢] Check validity of `systemMessage` in pipeline
|
|
@@ -4191,9 +4144,7 @@ async function preparePipeline(pipeline, tools, options) {
|
|
|
4191
4144
|
if (tools === undefined || tools.llm === undefined) {
|
|
4192
4145
|
throw new MissingToolsError('LLM tools are required for preparing the pipeline');
|
|
4193
4146
|
}
|
|
4194
|
-
|
|
4195
|
-
const _llms = arrayableToArray(tools.llm);
|
|
4196
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
4147
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
4197
4148
|
const llmToolsWithUsage = countUsage(llmTools);
|
|
4198
4149
|
// <- TODO: [🌯]
|
|
4199
4150
|
/*
|
|
@@ -5336,9 +5287,7 @@ async function executeAttempts(options) {
|
|
|
5336
5287
|
$scriptPipelineExecutionErrors: [],
|
|
5337
5288
|
$failedResults: [], // Track all failed attempts
|
|
5338
5289
|
};
|
|
5339
|
-
|
|
5340
|
-
const _llms = arrayableToArray(tools.llm);
|
|
5341
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5290
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
5342
5291
|
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
|
5343
5292
|
const isJokerAttempt = attemptIndex < 0;
|
|
5344
5293
|
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
|
@@ -5858,9 +5807,7 @@ async function getKnowledgeForTask(options) {
|
|
|
5858
5807
|
return ''; // <- Note: Np knowledge present, return empty string
|
|
5859
5808
|
}
|
|
5860
5809
|
try {
|
|
5861
|
-
|
|
5862
|
-
const _llms = arrayableToArray(tools.llm);
|
|
5863
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5810
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
5864
5811
|
const taskEmbeddingPrompt = {
|
|
5865
5812
|
title: 'Knowledge Search',
|
|
5866
5813
|
modelRequirements: {
|
|
@@ -6461,13 +6408,13 @@ function createPipelineExecutor(options) {
|
|
|
6461
6408
|
// Calculate and update tldr based on pipeline progress
|
|
6462
6409
|
const cv = newOngoingResult;
|
|
6463
6410
|
// Calculate progress based on parameters resolved vs total parameters
|
|
6464
|
-
const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
|
|
6411
|
+
const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
|
|
6465
6412
|
let resolvedParameters = 0;
|
|
6466
6413
|
let currentTaskTitle = '';
|
|
6467
6414
|
// Get the resolved parameters from output parameters
|
|
6468
6415
|
if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
|
|
6469
6416
|
// Count how many output parameters have non-empty values
|
|
6470
|
-
resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
|
|
6417
|
+
resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
|
|
6471
6418
|
}
|
|
6472
6419
|
// Try to determine current task from execution report
|
|
6473
6420
|
if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
|
|
@@ -6577,9 +6524,7 @@ class MarkdownScraper {
|
|
|
6577
6524
|
throw new MissingToolsError('LLM tools are required for scraping external files');
|
|
6578
6525
|
// <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
|
|
6579
6526
|
}
|
|
6580
|
-
|
|
6581
|
-
const _llms = arrayableToArray(llm);
|
|
6582
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
6527
|
+
const llmTools = getSingleLlmExecutionTools(llm);
|
|
6583
6528
|
// TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
|
|
6584
6529
|
const collection = createCollectionFromJson(...PipelineCollection);
|
|
6585
6530
|
const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({
|