@promptbook/core 0.89.0 → 0.92.0-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -87,6 +87,7 @@ import { FORMFACTOR_DEFINITIONS } from '../formfactors/index';
87
87
  import { MatcherFormfactorDefinition } from '../formfactors/matcher/MatcherFormfactorDefinition';
88
88
  import { SheetsFormfactorDefinition } from '../formfactors/sheets/SheetsFormfactorDefinition';
89
89
  import { TranslatorFormfactorDefinition } from '../formfactors/translator/TranslatorFormfactorDefinition';
90
+ import { filterModels } from '../llm-providers/_common/filterModels';
90
91
  import { $llmToolsMetadataRegister } from '../llm-providers/_common/register/$llmToolsMetadataRegister';
91
92
  import { $llmToolsRegister } from '../llm-providers/_common/register/$llmToolsRegister';
92
93
  import { createLlmToolsFromConfiguration } from '../llm-providers/_common/register/createLlmToolsFromConfiguration';
@@ -225,6 +226,7 @@ export { FORMFACTOR_DEFINITIONS };
225
226
  export { MatcherFormfactorDefinition };
226
227
  export { SheetsFormfactorDefinition };
227
228
  export { TranslatorFormfactorDefinition };
229
+ export { filterModels };
228
230
  export { $llmToolsMetadataRegister };
229
231
  export { $llmToolsRegister };
230
232
  export { createLlmToolsFromConfiguration };
@@ -0,0 +1,15 @@
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
+ /**
4
+ * Creates a wrapper around LlmExecutionTools that only exposes models matching the filter function
5
+ *
6
+ * @param llmTools The original LLM execution tools to wrap
7
+ * @param modelFilter Function that determines whether a model should be included
8
+ * @returns A new LlmExecutionTools instance with filtered models
9
+ *
10
+ * @public exported from `@promptbook/core`
11
+ */
12
+ export declare function filterModels<TLlmTools extends LlmExecutionTools>(llmTools: TLlmTools, modelFilter: (model: AvailableModel) => boolean): TLlmTools;
13
+ /**
14
+ * TODO: !!! [models] Test that this is working
15
+ */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/core",
3
- "version": "0.89.0",
3
+ "version": "0.92.0-3",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "private": false,
6
6
  "sideEffects": false,
package/umd/index.umd.js CHANGED
@@ -27,7 +27,7 @@
27
27
  * @generated
28
28
  * @see https://github.com/webgptorg/promptbook
29
29
  */
30
- const PROMPTBOOK_ENGINE_VERSION = '0.89.0';
30
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-3';
31
31
  /**
32
32
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
33
33
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -9904,6 +9904,81 @@
9904
9904
  },
9905
9905
  };
9906
9906
 
9907
+ /**
9908
+ * Creates a wrapper around LlmExecutionTools that only exposes models matching the filter function
9909
+ *
9910
+ * @param llmTools The original LLM execution tools to wrap
9911
+ * @param modelFilter Function that determines whether a model should be included
9912
+ * @returns A new LlmExecutionTools instance with filtered models
9913
+ *
9914
+ * @public exported from `@promptbook/core`
9915
+ */
9916
+ function filterModels(llmTools, modelFilter) {
9917
+ const filteredTools = {
9918
+ // Keep all properties from the original llmTools
9919
+ ...llmTools,
9920
+ get description() {
9921
+ return `${llmTools.description} (filtered)`;
9922
+ },
9923
+ // Override listModels to filter the models
9924
+ async listModels() {
9925
+ const originalModels = await llmTools.listModels();
9926
+ // Handle both synchronous and Promise return types
9927
+ if (originalModels instanceof Promise) {
9928
+ return originalModels.then((models) => models.filter(modelFilter));
9929
+ }
9930
+ else {
9931
+ return originalModels.filter(modelFilter);
9932
+ }
9933
+ },
9934
+ };
9935
+ // Helper function to validate if a model is allowed
9936
+ async function isModelAllowed(modelName) {
9937
+ const models = await filteredTools.listModels();
9938
+ return models.some((model) => model.modelName === modelName);
9939
+ }
9940
+ // Override callChatModel if it exists in the original tools
9941
+ if (llmTools.callChatModel) {
9942
+ filteredTools.callChatModel = async (prompt) => {
9943
+ var _a;
9944
+ const modelName = (_a = prompt.modelRequirements) === null || _a === void 0 ? void 0 : _a.modelName;
9945
+ // If a specific model is requested, check if it's allowed
9946
+ if (modelName && !(await isModelAllowed(modelName))) {
9947
+ throw new PipelineExecutionError(`Model ${modelName} is not allowed by the filter for chat calls`);
9948
+ }
9949
+ return llmTools.callChatModel(prompt);
9950
+ };
9951
+ }
9952
+ // Override callCompletionModel if it exists in the original tools
9953
+ if (llmTools.callCompletionModel) {
9954
+ filteredTools.callCompletionModel = async (prompt) => {
9955
+ var _a;
9956
+ const modelName = (_a = prompt.modelRequirements) === null || _a === void 0 ? void 0 : _a.modelName;
9957
+ // If a specific model is requested, check if it's allowed
9958
+ if (modelName && !(await isModelAllowed(modelName))) {
9959
+ throw new PipelineExecutionError(`Model ${modelName} is not allowed by the filter for completion calls`);
9960
+ }
9961
+ return llmTools.callCompletionModel(prompt);
9962
+ };
9963
+ }
9964
+ // Override callEmbeddingModel if it exists in the original tools
9965
+ if (llmTools.callEmbeddingModel) {
9966
+ filteredTools.callEmbeddingModel = async (prompt) => {
9967
+ var _a;
9968
+ const modelName = (_a = prompt.modelRequirements) === null || _a === void 0 ? void 0 : _a.modelName;
9969
+ // If a specific model is requested, check if it's allowed
9970
+ if (modelName && !(await isModelAllowed(modelName))) {
9971
+ throw new PipelineExecutionError(`Model ${modelName} is not allowed by the filter for embedding calls`);
9972
+ }
9973
+ return llmTools.callEmbeddingModel(prompt);
9974
+ };
9975
+ }
9976
+ return filteredTools;
9977
+ }
9978
+ /**
9979
+ * TODO: !!! [models] Test that this is working
9980
+ */
9981
+
9907
9982
  /**
9908
9983
  * @@@
9909
9984
  *
@@ -11167,6 +11242,7 @@
11167
11242
  exports.embeddingVectorToString = embeddingVectorToString;
11168
11243
  exports.executionReportJsonToString = executionReportJsonToString;
11169
11244
  exports.extractParameterNamesFromTask = extractParameterNamesFromTask;
11245
+ exports.filterModels = filterModels;
11170
11246
  exports.getPipelineInterface = getPipelineInterface;
11171
11247
  exports.identificationToPromptbookToken = identificationToPromptbookToken;
11172
11248
  exports.isPassingExpectations = isPassingExpectations;