@promptbook/node 0.92.0-15 → 0.92.0-17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -30,7 +30,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
30
30
  * @generated
31
31
  * @see https://github.com/webgptorg/promptbook
32
32
  */
33
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-15';
33
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-17';
34
34
  /**
35
35
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
36
36
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3224,10 +3224,12 @@ function templateParameters(template, parameters) {
3224
3224
  throw new PipelineExecutionError('Parameter is already opened or not closed');
3225
3225
  }
3226
3226
  if (parameters[parameterName] === undefined) {
3227
+ console.log('!!! templateParameters 1', { parameterName, template, parameters });
3227
3228
  throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
3228
3229
  }
3229
3230
  let parameterValue = parameters[parameterName];
3230
3231
  if (parameterValue === undefined) {
3232
+ console.log('!!! templateParameters 2', { parameterName, template, parameters });
3231
3233
  throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
3232
3234
  }
3233
3235
  parameterValue = valueToString(parameterValue);
@@ -4144,6 +4146,23 @@ function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
4144
4146
  return 1 - dotProduct / (magnitude1 * magnitude2);
4145
4147
  }
4146
4148
 
4149
+ /**
4150
+ *
4151
+ * @param knowledgePieces
4152
+ * @returns
4153
+ *
4154
+ * @private internal utility of `createPipelineExecutor`
4155
+ */
4156
+ function knowledgePiecesToString(knowledgePieces) {
4157
+ return knowledgePieces
4158
+ .map((knowledgePiece) => {
4159
+ const { content } = knowledgePiece;
4160
+ return `- ${content}`;
4161
+ })
4162
+ .join('\n');
4163
+ // <- TODO: [🧠] Some smarter aggregation of knowledge pieces, single-line vs multi-line vs mixed
4164
+ }
4165
+
4147
4166
  /**
4148
4167
  * @@@
4149
4168
  *
@@ -4157,53 +4176,60 @@ async function getKnowledgeForTask(options) {
4157
4176
  const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
4158
4177
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
4159
4178
  if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
4160
- return 'No knowledge pieces found';
4179
+ return ''; // <- Note: Np knowledge present, return empty string
4161
4180
  }
4162
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4163
- const _llms = arrayableToArray(tools.llm);
4164
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4165
- const taskEmbeddingPrompt = {
4166
- title: 'Knowledge Search',
4167
- modelRequirements: {
4168
- modelVariant: 'EMBEDDING',
4169
- modelName: firstKnowlegeIndex.modelName,
4170
- },
4171
- content: task.content,
4172
- parameters: {
4173
- /* !!!! */
4174
- },
4175
- };
4176
- const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
4177
- const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
4178
- const { index } = knowledgePiece;
4179
- const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
4180
- // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
4181
- if (knowledgePieceIndex === undefined) {
4181
+ try {
4182
+ // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4183
+ const _llms = arrayableToArray(tools.llm);
4184
+ const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4185
+ const taskEmbeddingPrompt = {
4186
+ title: 'Knowledge Search',
4187
+ modelRequirements: {
4188
+ modelVariant: 'EMBEDDING',
4189
+ modelName: firstKnowlegeIndex.modelName,
4190
+ },
4191
+ content: task.content,
4192
+ parameters: {
4193
+ /* !!!! */
4194
+ },
4195
+ };
4196
+ const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
4197
+ const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
4198
+ const { index } = knowledgePiece;
4199
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
4200
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
4201
+ if (knowledgePieceIndex === undefined) {
4202
+ return {
4203
+ content: knowledgePiece.content,
4204
+ relevance: 0,
4205
+ };
4206
+ }
4207
+ const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
4182
4208
  return {
4183
4209
  content: knowledgePiece.content,
4184
- relevance: 0,
4210
+ relevance,
4185
4211
  };
4186
- }
4187
- const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
4188
- return {
4189
- content: knowledgePiece.content,
4190
- relevance,
4191
- };
4192
- });
4193
- const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
4194
- const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
4195
- console.log('!!! Embedding', {
4196
- task,
4197
- taskEmbeddingPrompt,
4198
- taskEmbeddingResult,
4199
- firstKnowlegePiece,
4200
- firstKnowlegeIndex,
4201
- knowledgePiecesWithRelevance,
4202
- knowledgePiecesSorted,
4203
- knowledgePiecesLimited,
4204
- });
4205
- return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
4206
- // <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
4212
+ });
4213
+ const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
4214
+ const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
4215
+ console.log('!!! Embedding', {
4216
+ task,
4217
+ taskEmbeddingPrompt,
4218
+ taskEmbeddingResult,
4219
+ firstKnowlegePiece,
4220
+ firstKnowlegeIndex,
4221
+ knowledgePiecesWithRelevance,
4222
+ knowledgePiecesSorted,
4223
+ knowledgePiecesLimited,
4224
+ });
4225
+ return knowledgePiecesToString(knowledgePiecesLimited);
4226
+ }
4227
+ catch (error) {
4228
+ assertsError(error);
4229
+ console.error('Error in `getKnowledgeForTask`', error);
4230
+ // Note: If the LLM fails, just return all knowledge pieces
4231
+ return knowledgePiecesToString(preparedPipeline.knowledgePieces);
4232
+ }
4207
4233
  }
4208
4234
  /**
4209
4235
  * TODO: !!!! Verify if this is working