@promptbook/legacy-documents 0.92.0-14 → 0.92.0-17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -28,7 +28,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
28
28
  * @generated
29
29
  * @see https://github.com/webgptorg/promptbook
30
30
  */
31
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-14';
31
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-17';
32
32
  /**
33
33
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
34
34
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -4713,10 +4713,12 @@ function templateParameters(template, parameters) {
4713
4713
  throw new PipelineExecutionError('Parameter is already opened or not closed');
4714
4714
  }
4715
4715
  if (parameters[parameterName] === undefined) {
4716
+ console.log('!!! templateParameters 1', { parameterName, template, parameters });
4716
4717
  throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
4717
4718
  }
4718
4719
  let parameterValue = parameters[parameterName];
4719
4720
  if (parameterValue === undefined) {
4721
+ console.log('!!! templateParameters 2', { parameterName, template, parameters });
4720
4722
  throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
4721
4723
  }
4722
4724
  parameterValue = valueToString(parameterValue);
@@ -5372,6 +5374,23 @@ function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
5372
5374
  return 1 - dotProduct / (magnitude1 * magnitude2);
5373
5375
  }
5374
5376
 
5377
+ /**
5378
+ *
5379
+ * @param knowledgePieces
5380
+ * @returns
5381
+ *
5382
+ * @private internal utility of `createPipelineExecutor`
5383
+ */
5384
+ function knowledgePiecesToString(knowledgePieces) {
5385
+ return knowledgePieces
5386
+ .map((knowledgePiece) => {
5387
+ const { content } = knowledgePiece;
5388
+ return `- ${content}`;
5389
+ })
5390
+ .join('\n');
5391
+ // <- TODO: [🧠] Some smarter aggregation of knowledge pieces, single-line vs multi-line vs mixed
5392
+ }
5393
+
5375
5394
  /**
5376
5395
  * @@@
5377
5396
  *
@@ -5385,53 +5404,60 @@ async function getKnowledgeForTask(options) {
5385
5404
  const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
5386
5405
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
5387
5406
  if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
5388
- return 'No knowledge pieces found';
5407
+ return ''; // <- Note: Np knowledge present, return empty string
5389
5408
  }
5390
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5391
- const _llms = arrayableToArray(tools.llm);
5392
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5393
- const taskEmbeddingPrompt = {
5394
- title: 'Knowledge Search',
5395
- modelRequirements: {
5396
- modelVariant: 'EMBEDDING',
5397
- modelName: firstKnowlegeIndex.modelName,
5398
- },
5399
- content: task.content,
5400
- parameters: {
5401
- /* !!!! */
5402
- },
5403
- };
5404
- const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5405
- const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5406
- const { index } = knowledgePiece;
5407
- const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
5408
- // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5409
- if (knowledgePieceIndex === undefined) {
5409
+ try {
5410
+ // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5411
+ const _llms = arrayableToArray(tools.llm);
5412
+ const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5413
+ const taskEmbeddingPrompt = {
5414
+ title: 'Knowledge Search',
5415
+ modelRequirements: {
5416
+ modelVariant: 'EMBEDDING',
5417
+ modelName: firstKnowlegeIndex.modelName,
5418
+ },
5419
+ content: task.content,
5420
+ parameters: {
5421
+ /* !!!! */
5422
+ },
5423
+ };
5424
+ const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5425
+ const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5426
+ const { index } = knowledgePiece;
5427
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
5428
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5429
+ if (knowledgePieceIndex === undefined) {
5430
+ return {
5431
+ content: knowledgePiece.content,
5432
+ relevance: 0,
5433
+ };
5434
+ }
5435
+ const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
5410
5436
  return {
5411
5437
  content: knowledgePiece.content,
5412
- relevance: 0,
5438
+ relevance,
5413
5439
  };
5414
- }
5415
- const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
5416
- return {
5417
- content: knowledgePiece.content,
5418
- relevance,
5419
- };
5420
- });
5421
- const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
5422
- const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
5423
- console.log('!!! Embedding', {
5424
- task,
5425
- taskEmbeddingPrompt,
5426
- taskEmbeddingResult,
5427
- firstKnowlegePiece,
5428
- firstKnowlegeIndex,
5429
- knowledgePiecesWithRelevance,
5430
- knowledgePiecesSorted,
5431
- knowledgePiecesLimited,
5432
- });
5433
- return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
5434
- // <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
5440
+ });
5441
+ const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
5442
+ const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
5443
+ console.log('!!! Embedding', {
5444
+ task,
5445
+ taskEmbeddingPrompt,
5446
+ taskEmbeddingResult,
5447
+ firstKnowlegePiece,
5448
+ firstKnowlegeIndex,
5449
+ knowledgePiecesWithRelevance,
5450
+ knowledgePiecesSorted,
5451
+ knowledgePiecesLimited,
5452
+ });
5453
+ return knowledgePiecesToString(knowledgePiecesLimited);
5454
+ }
5455
+ catch (error) {
5456
+ assertsError(error);
5457
+ console.error('Error in `getKnowledgeForTask`', error);
5458
+ // Note: If the LLM fails, just return all knowledge pieces
5459
+ return knowledgePiecesToString(preparedPipeline.knowledgePieces);
5460
+ }
5435
5461
  }
5436
5462
  /**
5437
5463
  * TODO: !!!! Verify if this is working