@promptbook/remote-server 0.92.0-15 → 0.92.0-17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,9 @@
1
+ import type { KnowledgePiecePreparedJson } from '../../pipeline/PipelineJson/KnowledgePieceJson';
2
+ /**
3
+ *
4
+ * @param knowledgePieces
5
+ * @returns
6
+ *
7
+ * @private internal utility of `createPipelineExecutor`
8
+ */
9
+ export declare function knowledgePiecesToString(knowledgePieces: ReadonlyArray<Pick<KnowledgePiecePreparedJson, 'content'>>): string;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/remote-server",
3
- "version": "0.92.0-15",
3
+ "version": "0.92.0-17",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -51,7 +51,7 @@
51
51
  "module": "./esm/index.es.js",
52
52
  "typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
53
53
  "peerDependencies": {
54
- "@promptbook/core": "0.92.0-15"
54
+ "@promptbook/core": "0.92.0-17"
55
55
  },
56
56
  "dependencies": {
57
57
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -48,7 +48,7 @@
48
48
  * @generated
49
49
  * @see https://github.com/webgptorg/promptbook
50
50
  */
51
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-15';
51
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-17';
52
52
  /**
53
53
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
54
54
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -4912,10 +4912,12 @@
4912
4912
  throw new PipelineExecutionError('Parameter is already opened or not closed');
4913
4913
  }
4914
4914
  if (parameters[parameterName] === undefined) {
4915
+ console.log('!!! templateParameters 1', { parameterName, template, parameters });
4915
4916
  throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
4916
4917
  }
4917
4918
  let parameterValue = parameters[parameterName];
4918
4919
  if (parameterValue === undefined) {
4920
+ console.log('!!! templateParameters 2', { parameterName, template, parameters });
4919
4921
  throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
4920
4922
  }
4921
4923
  parameterValue = valueToString(parameterValue);
@@ -5571,6 +5573,23 @@
5571
5573
  return 1 - dotProduct / (magnitude1 * magnitude2);
5572
5574
  }
5573
5575
 
5576
+ /**
5577
+ *
5578
+ * @param knowledgePieces
5579
+ * @returns
5580
+ *
5581
+ * @private internal utility of `createPipelineExecutor`
5582
+ */
5583
+ function knowledgePiecesToString(knowledgePieces) {
5584
+ return knowledgePieces
5585
+ .map((knowledgePiece) => {
5586
+ const { content } = knowledgePiece;
5587
+ return `- ${content}`;
5588
+ })
5589
+ .join('\n');
5590
+ // <- TODO: [🧠] Some smarter aggregation of knowledge pieces, single-line vs multi-line vs mixed
5591
+ }
5592
+
5574
5593
  /**
5575
5594
  * @@@
5576
5595
  *
@@ -5584,53 +5603,60 @@
5584
5603
  const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
5585
5604
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
5586
5605
  if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
5587
- return 'No knowledge pieces found';
5606
+ return ''; // <- Note: Np knowledge present, return empty string
5588
5607
  }
5589
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5590
- const _llms = arrayableToArray(tools.llm);
5591
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5592
- const taskEmbeddingPrompt = {
5593
- title: 'Knowledge Search',
5594
- modelRequirements: {
5595
- modelVariant: 'EMBEDDING',
5596
- modelName: firstKnowlegeIndex.modelName,
5597
- },
5598
- content: task.content,
5599
- parameters: {
5600
- /* !!!! */
5601
- },
5602
- };
5603
- const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5604
- const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5605
- const { index } = knowledgePiece;
5606
- const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
5607
- // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5608
- if (knowledgePieceIndex === undefined) {
5608
+ try {
5609
+ // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5610
+ const _llms = arrayableToArray(tools.llm);
5611
+ const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5612
+ const taskEmbeddingPrompt = {
5613
+ title: 'Knowledge Search',
5614
+ modelRequirements: {
5615
+ modelVariant: 'EMBEDDING',
5616
+ modelName: firstKnowlegeIndex.modelName,
5617
+ },
5618
+ content: task.content,
5619
+ parameters: {
5620
+ /* !!!! */
5621
+ },
5622
+ };
5623
+ const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5624
+ const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5625
+ const { index } = knowledgePiece;
5626
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
5627
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5628
+ if (knowledgePieceIndex === undefined) {
5629
+ return {
5630
+ content: knowledgePiece.content,
5631
+ relevance: 0,
5632
+ };
5633
+ }
5634
+ const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
5609
5635
  return {
5610
5636
  content: knowledgePiece.content,
5611
- relevance: 0,
5637
+ relevance,
5612
5638
  };
5613
- }
5614
- const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
5615
- return {
5616
- content: knowledgePiece.content,
5617
- relevance,
5618
- };
5619
- });
5620
- const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
5621
- const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
5622
- console.log('!!! Embedding', {
5623
- task,
5624
- taskEmbeddingPrompt,
5625
- taskEmbeddingResult,
5626
- firstKnowlegePiece,
5627
- firstKnowlegeIndex,
5628
- knowledgePiecesWithRelevance,
5629
- knowledgePiecesSorted,
5630
- knowledgePiecesLimited,
5631
- });
5632
- return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
5633
- // <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
5639
+ });
5640
+ const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
5641
+ const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
5642
+ console.log('!!! Embedding', {
5643
+ task,
5644
+ taskEmbeddingPrompt,
5645
+ taskEmbeddingResult,
5646
+ firstKnowlegePiece,
5647
+ firstKnowlegeIndex,
5648
+ knowledgePiecesWithRelevance,
5649
+ knowledgePiecesSorted,
5650
+ knowledgePiecesLimited,
5651
+ });
5652
+ return knowledgePiecesToString(knowledgePiecesLimited);
5653
+ }
5654
+ catch (error) {
5655
+ assertsError(error);
5656
+ console.error('Error in `getKnowledgeForTask`', error);
5657
+ // Note: If the LLM fails, just return all knowledge pieces
5658
+ return knowledgePiecesToString(preparedPipeline.knowledgePieces);
5659
+ }
5634
5660
  }
5635
5661
  /**
5636
5662
  * TODO: !!!! Verify if this is working
@@ -7970,6 +7996,7 @@
7970
7996
  promptResult = await llm.callCompletionModel(prompt);
7971
7997
  break;
7972
7998
  case 'EMBEDDING':
7999
+ console.log('!!! llm');
7973
8000
  if (llm.callEmbeddingModel === undefined) {
7974
8001
  // Note: [0] This check should not be a thing
7975
8002
  throw new PipelineExecutionError(`Embedding model is not available`);