@promptbook/markdown-utils 0.92.0-14 → 0.92.0-16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -25,7 +25,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-14';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-16';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -4532,10 +4532,12 @@ function templateParameters(template, parameters) {
4532
4532
  throw new PipelineExecutionError('Parameter is already opened or not closed');
4533
4533
  }
4534
4534
  if (parameters[parameterName] === undefined) {
4535
+ console.log('!!! templateParameters 1', { parameterName, template, parameters });
4535
4536
  throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
4536
4537
  }
4537
4538
  let parameterValue = parameters[parameterName];
4538
4539
  if (parameterValue === undefined) {
4540
+ console.log('!!! templateParameters 2', { parameterName, template, parameters });
4539
4541
  throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
4540
4542
  }
4541
4543
  parameterValue = valueToString(parameterValue);
@@ -5191,6 +5193,23 @@ function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
5191
5193
  return 1 - dotProduct / (magnitude1 * magnitude2);
5192
5194
  }
5193
5195
 
5196
+ /**
5197
+ *
5198
+ * @param knowledgePieces
5199
+ * @returns
5200
+ *
5201
+ * @private internal utility of `createPipelineExecutor`
5202
+ */
5203
+ function knowledgePiecesToString(knowledgePieces) {
5204
+ return knowledgePieces
5205
+ .map((knowledgePiece) => {
5206
+ const { content } = knowledgePiece;
5207
+ return `- ${content}`;
5208
+ })
5209
+ .join('\n');
5210
+ // <- TODO: [🧠] Some smarter aggregation of knowledge pieces, single-line vs multi-line vs mixed
5211
+ }
5212
+
5194
5213
  /**
5195
5214
  * @@@
5196
5215
  *
@@ -5204,53 +5223,60 @@ async function getKnowledgeForTask(options) {
5204
5223
  const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
5205
5224
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
5206
5225
  if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
5207
- return 'No knowledge pieces found';
5226
+ return ''; // <- Note: Np knowledge present, return empty string
5208
5227
  }
5209
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5210
- const _llms = arrayableToArray(tools.llm);
5211
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5212
- const taskEmbeddingPrompt = {
5213
- title: 'Knowledge Search',
5214
- modelRequirements: {
5215
- modelVariant: 'EMBEDDING',
5216
- modelName: firstKnowlegeIndex.modelName,
5217
- },
5218
- content: task.content,
5219
- parameters: {
5220
- /* !!!! */
5221
- },
5222
- };
5223
- const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5224
- const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5225
- const { index } = knowledgePiece;
5226
- const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
5227
- // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5228
- if (knowledgePieceIndex === undefined) {
5228
+ try {
5229
+ // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5230
+ const _llms = arrayableToArray(tools.llm);
5231
+ const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5232
+ const taskEmbeddingPrompt = {
5233
+ title: 'Knowledge Search',
5234
+ modelRequirements: {
5235
+ modelVariant: 'EMBEDDING',
5236
+ modelName: firstKnowlegeIndex.modelName,
5237
+ },
5238
+ content: task.content,
5239
+ parameters: {
5240
+ /* !!!! */
5241
+ },
5242
+ };
5243
+ const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5244
+ const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5245
+ const { index } = knowledgePiece;
5246
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
5247
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5248
+ if (knowledgePieceIndex === undefined) {
5249
+ return {
5250
+ content: knowledgePiece.content,
5251
+ relevance: 0,
5252
+ };
5253
+ }
5254
+ const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
5229
5255
  return {
5230
5256
  content: knowledgePiece.content,
5231
- relevance: 0,
5257
+ relevance,
5232
5258
  };
5233
- }
5234
- const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
5235
- return {
5236
- content: knowledgePiece.content,
5237
- relevance,
5238
- };
5239
- });
5240
- const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
5241
- const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
5242
- console.log('!!! Embedding', {
5243
- task,
5244
- taskEmbeddingPrompt,
5245
- taskEmbeddingResult,
5246
- firstKnowlegePiece,
5247
- firstKnowlegeIndex,
5248
- knowledgePiecesWithRelevance,
5249
- knowledgePiecesSorted,
5250
- knowledgePiecesLimited,
5251
- });
5252
- return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
5253
- // <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
5259
+ });
5260
+ const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
5261
+ const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
5262
+ console.log('!!! Embedding', {
5263
+ task,
5264
+ taskEmbeddingPrompt,
5265
+ taskEmbeddingResult,
5266
+ firstKnowlegePiece,
5267
+ firstKnowlegeIndex,
5268
+ knowledgePiecesWithRelevance,
5269
+ knowledgePiecesSorted,
5270
+ knowledgePiecesLimited,
5271
+ });
5272
+ return knowledgePiecesToString(knowledgePiecesLimited);
5273
+ }
5274
+ catch (error) {
5275
+ assertsError(error);
5276
+ console.error('Error in `getKnowledgeForTask`', error);
5277
+ // Note: If the LLM fails, just return all knowledge pieces
5278
+ return knowledgePiecesToString(preparedPipeline.knowledgePieces);
5279
+ }
5254
5280
  }
5255
5281
  /**
5256
5282
  * TODO: !!!! Verify if this is working