@promptbook/core 0.92.0-15 → 0.92.0-16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -27,7 +27,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
27
27
  * @generated
28
28
  * @see https://github.com/webgptorg/promptbook
29
29
  */
30
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-15';
30
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-16';
31
31
  /**
32
32
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
33
33
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3453,10 +3453,12 @@ function templateParameters(template, parameters) {
3453
3453
  throw new PipelineExecutionError('Parameter is already opened or not closed');
3454
3454
  }
3455
3455
  if (parameters[parameterName] === undefined) {
3456
+ console.log('!!! templateParameters 1', { parameterName, template, parameters });
3456
3457
  throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
3457
3458
  }
3458
3459
  let parameterValue = parameters[parameterName];
3459
3460
  if (parameterValue === undefined) {
3461
+ console.log('!!! templateParameters 2', { parameterName, template, parameters });
3460
3462
  throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
3461
3463
  }
3462
3464
  parameterValue = valueToString(parameterValue);
@@ -4395,6 +4397,23 @@ function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
4395
4397
  return 1 - dotProduct / (magnitude1 * magnitude2);
4396
4398
  }
4397
4399
 
4400
+ /**
4401
+ *
4402
+ * @param knowledgePieces
4403
+ * @returns
4404
+ *
4405
+ * @private internal utility of `createPipelineExecutor`
4406
+ */
4407
+ function knowledgePiecesToString(knowledgePieces) {
4408
+ return knowledgePieces
4409
+ .map((knowledgePiece) => {
4410
+ const { content } = knowledgePiece;
4411
+ return `- ${content}`;
4412
+ })
4413
+ .join('\n');
4414
+ // <- TODO: [🧠] Some smarter aggregation of knowledge pieces, single-line vs multi-line vs mixed
4415
+ }
4416
+
4398
4417
  /**
4399
4418
  * @@@
4400
4419
  *
@@ -4408,53 +4427,60 @@ async function getKnowledgeForTask(options) {
4408
4427
  const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
4409
4428
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
4410
4429
  if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
4411
- return 'No knowledge pieces found';
4430
+ return ''; // <- Note: Np knowledge present, return empty string
4412
4431
  }
4413
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4414
- const _llms = arrayableToArray(tools.llm);
4415
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4416
- const taskEmbeddingPrompt = {
4417
- title: 'Knowledge Search',
4418
- modelRequirements: {
4419
- modelVariant: 'EMBEDDING',
4420
- modelName: firstKnowlegeIndex.modelName,
4421
- },
4422
- content: task.content,
4423
- parameters: {
4424
- /* !!!! */
4425
- },
4426
- };
4427
- const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
4428
- const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
4429
- const { index } = knowledgePiece;
4430
- const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
4431
- // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
4432
- if (knowledgePieceIndex === undefined) {
4432
+ try {
4433
+ // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4434
+ const _llms = arrayableToArray(tools.llm);
4435
+ const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4436
+ const taskEmbeddingPrompt = {
4437
+ title: 'Knowledge Search',
4438
+ modelRequirements: {
4439
+ modelVariant: 'EMBEDDING',
4440
+ modelName: firstKnowlegeIndex.modelName,
4441
+ },
4442
+ content: task.content,
4443
+ parameters: {
4444
+ /* !!!! */
4445
+ },
4446
+ };
4447
+ const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
4448
+ const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
4449
+ const { index } = knowledgePiece;
4450
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
4451
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
4452
+ if (knowledgePieceIndex === undefined) {
4453
+ return {
4454
+ content: knowledgePiece.content,
4455
+ relevance: 0,
4456
+ };
4457
+ }
4458
+ const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
4433
4459
  return {
4434
4460
  content: knowledgePiece.content,
4435
- relevance: 0,
4461
+ relevance,
4436
4462
  };
4437
- }
4438
- const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
4439
- return {
4440
- content: knowledgePiece.content,
4441
- relevance,
4442
- };
4443
- });
4444
- const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
4445
- const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
4446
- console.log('!!! Embedding', {
4447
- task,
4448
- taskEmbeddingPrompt,
4449
- taskEmbeddingResult,
4450
- firstKnowlegePiece,
4451
- firstKnowlegeIndex,
4452
- knowledgePiecesWithRelevance,
4453
- knowledgePiecesSorted,
4454
- knowledgePiecesLimited,
4455
- });
4456
- return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
4457
- // <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
4463
+ });
4464
+ const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
4465
+ const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
4466
+ console.log('!!! Embedding', {
4467
+ task,
4468
+ taskEmbeddingPrompt,
4469
+ taskEmbeddingResult,
4470
+ firstKnowlegePiece,
4471
+ firstKnowlegeIndex,
4472
+ knowledgePiecesWithRelevance,
4473
+ knowledgePiecesSorted,
4474
+ knowledgePiecesLimited,
4475
+ });
4476
+ return knowledgePiecesToString(knowledgePiecesLimited);
4477
+ }
4478
+ catch (error) {
4479
+ assertsError(error);
4480
+ console.error('Error in `getKnowledgeForTask`', error);
4481
+ // Note: If the LLM fails, just return all knowledge pieces
4482
+ return knowledgePiecesToString(preparedPipeline.knowledgePieces);
4483
+ }
4458
4484
  }
4459
4485
  /**
4460
4486
  * TODO: !!!! Verify if this is working