@promptbook/pdf 0.92.0-14 → 0.92.0-17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -26,7 +26,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
26
26
  * @generated
27
27
  * @see https://github.com/webgptorg/promptbook
28
28
  */
29
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-14';
29
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-17';
30
30
  /**
31
31
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
32
32
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -4562,10 +4562,12 @@ function templateParameters(template, parameters) {
4562
4562
  throw new PipelineExecutionError('Parameter is already opened or not closed');
4563
4563
  }
4564
4564
  if (parameters[parameterName] === undefined) {
4565
+ console.log('!!! templateParameters 1', { parameterName, template, parameters });
4565
4566
  throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
4566
4567
  }
4567
4568
  let parameterValue = parameters[parameterName];
4568
4569
  if (parameterValue === undefined) {
4570
+ console.log('!!! templateParameters 2', { parameterName, template, parameters });
4569
4571
  throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
4570
4572
  }
4571
4573
  parameterValue = valueToString(parameterValue);
@@ -5221,6 +5223,23 @@ function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
5221
5223
  return 1 - dotProduct / (magnitude1 * magnitude2);
5222
5224
  }
5223
5225
 
5226
+ /**
5227
+ *
5228
+ * @param knowledgePieces
5229
+ * @returns
5230
+ *
5231
+ * @private internal utility of `createPipelineExecutor`
5232
+ */
5233
+ function knowledgePiecesToString(knowledgePieces) {
5234
+ return knowledgePieces
5235
+ .map((knowledgePiece) => {
5236
+ const { content } = knowledgePiece;
5237
+ return `- ${content}`;
5238
+ })
5239
+ .join('\n');
5240
+ // <- TODO: [🧠] Some smarter aggregation of knowledge pieces, single-line vs multi-line vs mixed
5241
+ }
5242
+
5224
5243
  /**
5225
5244
  * @@@
5226
5245
  *
@@ -5234,53 +5253,60 @@ async function getKnowledgeForTask(options) {
5234
5253
  const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
5235
5254
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
5236
5255
  if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
5237
- return 'No knowledge pieces found';
5256
+ return ''; // <- Note: Np knowledge present, return empty string
5238
5257
  }
5239
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5240
- const _llms = arrayableToArray(tools.llm);
5241
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5242
- const taskEmbeddingPrompt = {
5243
- title: 'Knowledge Search',
5244
- modelRequirements: {
5245
- modelVariant: 'EMBEDDING',
5246
- modelName: firstKnowlegeIndex.modelName,
5247
- },
5248
- content: task.content,
5249
- parameters: {
5250
- /* !!!! */
5251
- },
5252
- };
5253
- const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5254
- const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5255
- const { index } = knowledgePiece;
5256
- const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
5257
- // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5258
- if (knowledgePieceIndex === undefined) {
5258
+ try {
5259
+ // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5260
+ const _llms = arrayableToArray(tools.llm);
5261
+ const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5262
+ const taskEmbeddingPrompt = {
5263
+ title: 'Knowledge Search',
5264
+ modelRequirements: {
5265
+ modelVariant: 'EMBEDDING',
5266
+ modelName: firstKnowlegeIndex.modelName,
5267
+ },
5268
+ content: task.content,
5269
+ parameters: {
5270
+ /* !!!! */
5271
+ },
5272
+ };
5273
+ const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5274
+ const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5275
+ const { index } = knowledgePiece;
5276
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
5277
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5278
+ if (knowledgePieceIndex === undefined) {
5279
+ return {
5280
+ content: knowledgePiece.content,
5281
+ relevance: 0,
5282
+ };
5283
+ }
5284
+ const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
5259
5285
  return {
5260
5286
  content: knowledgePiece.content,
5261
- relevance: 0,
5287
+ relevance,
5262
5288
  };
5263
- }
5264
- const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
5265
- return {
5266
- content: knowledgePiece.content,
5267
- relevance,
5268
- };
5269
- });
5270
- const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
5271
- const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
5272
- console.log('!!! Embedding', {
5273
- task,
5274
- taskEmbeddingPrompt,
5275
- taskEmbeddingResult,
5276
- firstKnowlegePiece,
5277
- firstKnowlegeIndex,
5278
- knowledgePiecesWithRelevance,
5279
- knowledgePiecesSorted,
5280
- knowledgePiecesLimited,
5281
- });
5282
- return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
5283
- // <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
5289
+ });
5290
+ const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
5291
+ const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
5292
+ console.log('!!! Embedding', {
5293
+ task,
5294
+ taskEmbeddingPrompt,
5295
+ taskEmbeddingResult,
5296
+ firstKnowlegePiece,
5297
+ firstKnowlegeIndex,
5298
+ knowledgePiecesWithRelevance,
5299
+ knowledgePiecesSorted,
5300
+ knowledgePiecesLimited,
5301
+ });
5302
+ return knowledgePiecesToString(knowledgePiecesLimited);
5303
+ }
5304
+ catch (error) {
5305
+ assertsError(error);
5306
+ console.error('Error in `getKnowledgeForTask`', error);
5307
+ // Note: If the LLM fails, just return all knowledge pieces
5308
+ return knowledgePiecesToString(preparedPipeline.knowledgePieces);
5309
+ }
5284
5310
  }
5285
5311
  /**
5286
5312
  * TODO: !!!! Verify if this is working