@promptbook/core 0.94.0-0 → 0.94.0-12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/README.md +1 -8
  2. package/esm/index.es.js +71 -14
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/cli.index.d.ts +4 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  6. package/esm/typings/src/_packages/ollama.index.d.ts +14 -0
  7. package/esm/typings/src/_packages/openai.index.d.ts +2 -0
  8. package/esm/typings/src/_packages/types.index.d.ts +2 -0
  9. package/esm/typings/src/_packages/wizzard.index.d.ts +4 -0
  10. package/esm/typings/src/execution/AvailableModel.d.ts +9 -1
  11. package/esm/typings/src/execution/ExecutionTask.d.ts +3 -1
  12. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +2 -2
  13. package/esm/typings/src/llm-providers/{openai/computeUsage.d.ts → _common/utils/pricing.d.ts} +2 -2
  14. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/deepseek/DeepseekExecutionToolsOptions.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/google/GoogleExecutionToolsOptions.d.ts +1 -1
  18. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +44 -0
  19. package/esm/typings/src/llm-providers/ollama/OllamaExecutionToolsOptions.d.ts +23 -0
  20. package/esm/typings/src/llm-providers/ollama/createOllamaExecutionTools.d.ts +11 -0
  21. package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +14 -0
  22. package/esm/typings/src/llm-providers/ollama/playground/playground.d.ts +6 -0
  23. package/esm/typings/src/llm-providers/ollama/register-configuration.d.ts +14 -0
  24. package/esm/typings/src/llm-providers/ollama/register-constructor.d.ts +15 -0
  25. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +1 -1
  26. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +91 -0
  27. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +12 -53
  28. package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +1 -1
  29. package/esm/typings/src/llm-providers/openai/createOpenAiExecutionTools.d.ts +2 -0
  30. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -7
  31. package/esm/typings/src/version.d.ts +1 -1
  32. package/package.json +24 -1
  33. package/umd/index.umd.js +71 -13
  34. package/umd/index.umd.js.map +1 -1
  35. /package/esm/typings/src/llm-providers/{openai/computeUsage.test.d.ts → _common/utils/pricing.test.d.ts} +0 -0
package/README.md CHANGED
@@ -191,16 +191,8 @@ Join our growing community of developers and users:
191
191
 
192
192
  _A concise, Markdown-based DSL for crafting AI workflows and automations._
193
193
 
194
- ---
195
194
 
196
- ### 📑 Table of Contents
197
195
 
198
- - [Introduction](#introduction)
199
- - [Example](#example)
200
- - [1. What: Workflows, Tasks & Parameters](#1-what-workflows-tasks--parameters)
201
- - [2. Who: Personas](#2-who-personas)
202
- - [3. How: Knowledge, Instruments & Actions](#3-how-knowledge-instruments-and-actions)
203
- - [General Principles](#general-principles)
204
196
 
205
197
  ### Introduction
206
198
 
@@ -313,6 +305,7 @@ Or you can install them separately:
313
305
  - **[@promptbook/vercel](https://www.npmjs.com/package/@promptbook/vercel)** - Adapter for Vercel functionalities
314
306
  - **[@promptbook/google](https://www.npmjs.com/package/@promptbook/google)** - Integration with Google's Gemini API
315
307
  - **[@promptbook/deepseek](https://www.npmjs.com/package/@promptbook/deepseek)** - Integration with [DeepSeek API](https://www.deepseek.com/)
308
+ - **[@promptbook/ollama](https://www.npmjs.com/package/@promptbook/ollama)** - Integration with [Ollama](https://ollama.com/) API
316
309
  - **[@promptbook/azure-openai](https://www.npmjs.com/package/@promptbook/azure-openai)** - Execution tools for Azure OpenAI API
317
310
 
318
311
  - **[@promptbook/fake-llm](https://www.npmjs.com/package/@promptbook/fake-llm)** - Mocked execution tools for testing the library and saving the tokens
package/esm/index.es.js CHANGED
@@ -27,7 +27,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
27
27
  * @generated
28
28
  * @see https://github.com/webgptorg/promptbook
29
29
  */
30
- const PROMPTBOOK_ENGINE_VERSION = '0.94.0-0';
30
+ const PROMPTBOOK_ENGINE_VERSION = '0.94.0-12';
31
31
  /**
32
32
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
33
33
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -4751,11 +4751,6 @@ async function executeTask(options) {
4751
4751
  const jokerParameterNames = currentTask.jokerParameterNames || [];
4752
4752
  const preparedContent = (currentTask.preparedContent || '{content}').split('{content}').join(currentTask.content);
4753
4753
  // <- TODO: [🍵] Use here `templateParameters` to replace {websiteContent} with option to ignore missing parameters
4754
- await onProgress({
4755
- outputParameters: {
4756
- [currentTask.resultingParameterName]: '',
4757
- },
4758
- });
4759
4754
  const resultString = await executeFormatSubvalues({
4760
4755
  jokerParameterNames,
4761
4756
  priority,
@@ -4854,6 +4849,13 @@ async function executePipeline(options) {
4854
4849
  * Note: This is a flag to prevent `onProgress` call after the pipeline execution is finished
4855
4850
  */
4856
4851
  let isReturned = false;
4852
+ // Note: Report all output parameters upfront as empty strings
4853
+ if (onProgress) {
4854
+ const emptyOutputParameters = Object.fromEntries(preparedPipeline.parameters.filter((param) => !param.isInput).map((param) => [param.name, '']));
4855
+ onProgress({
4856
+ outputParameters: emptyOutputParameters,
4857
+ });
4858
+ }
4857
4859
  // Note: Check that all input input parameters are defined
4858
4860
  for (const parameter of preparedPipeline.parameters.filter(({ isInput }) => isInput)) {
4859
4861
  if (inputParameters[parameter.name] === undefined) {
@@ -10298,7 +10300,10 @@ function usageToWorktime(usage) {
10298
10300
  function usageToHuman(usage) {
10299
10301
  const reportItems = [];
10300
10302
  const uncertainNumberToHuman = ({ value, isUncertain }) => `${isUncertain ? 'approximately ' : ''}${Math.round(value * 100) / 100}`;
10301
- if (usage.price.value > 0.01
10303
+ if (usage.price.value === 0) {
10304
+ reportItems.push(`Zero cost`);
10305
+ }
10306
+ else if (usage.price.value > 0.01
10302
10307
  // <- TODO: [🍓][🧞‍♂️][👩🏽‍🤝‍🧑🏻] Configure negligible value - default value to config + value to `UsageToHumanSettings`
10303
10308
  ) {
10304
10309
  reportItems.push(`Cost ${uncertainNumberToHuman(usage.price)} USD`);
@@ -10363,12 +10368,12 @@ const BoilerplateFormfactorDefinition = {
10363
10368
  * Creates a wrapper around LlmExecutionTools that only exposes models matching the filter function
10364
10369
  *
10365
10370
  * @param llmTools The original LLM execution tools to wrap
10366
- * @param modelFilter Function that determines whether a model should be included
10371
+ * @param predicate Function that determines whether a model should be included
10367
10372
  * @returns A new LlmExecutionTools instance with filtered models
10368
10373
  *
10369
10374
  * @public exported from `@promptbook/core`
10370
10375
  */
10371
- function filterModels(llmTools, modelFilter) {
10376
+ function filterModels(llmTools, predicate) {
10372
10377
  const filteredTools = {
10373
10378
  // Keep all properties from the original llmTools
10374
10379
  ...llmTools,
@@ -10385,10 +10390,10 @@ function filterModels(llmTools, modelFilter) {
10385
10390
  const originalModels = await llmTools.listModels();
10386
10391
  // Handle both synchronous and Promise return types
10387
10392
  if (originalModels instanceof Promise) {
10388
- return originalModels.then((models) => models.filter(modelFilter));
10393
+ return originalModels.then((models) => models.filter(predicate));
10389
10394
  }
10390
10395
  else {
10391
- return originalModels.filter(modelFilter);
10396
+ return originalModels.filter(predicate);
10392
10397
  }
10393
10398
  },
10394
10399
  };
@@ -10955,7 +10960,7 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
10955
10960
  packageName: '@promptbook/azure-openai',
10956
10961
  className: 'AzureOpenAiExecutionTools',
10957
10962
  options: {
10958
- apiKey: 'sk-',
10963
+ apiKey: '',
10959
10964
  resourceName: 'my-resource-name',
10960
10965
  deploymentName: 'my-deployment-name',
10961
10966
  maxRequestsPerMinute: DEFAULT_MAX_REQUESTS_PER_MINUTE,
@@ -11126,6 +11131,58 @@ const _GoogleMetadataRegistration = $llmToolsMetadataRegister.register({
11126
11131
  * Note: [💞] Ignore a discrepancy between file name and entity name
11127
11132
  */
11128
11133
 
11134
+ /**
11135
+ * Default base URL for Ollama API
11136
+ *
11137
+ * @public exported from `@promptbook/ollama`
11138
+ */
11139
+ const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434/v1';
11140
+
11141
+ /**
11142
+ * Registration of LLM provider metadata
11143
+ *
11144
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
11145
+ *
11146
+ * @public exported from `@promptbook/core`
11147
+ * @public exported from `@promptbook/wizzard`
11148
+ * @public exported from `@promptbook/cli`
11149
+ */
11150
+ const _OllamaMetadataRegistration = $llmToolsMetadataRegister.register({
11151
+ title: 'Ollama',
11152
+ packageName: '@promptbook/ollama',
11153
+ className: 'OllamaExecutionTools',
11154
+ envVariables: ['OLLAMA_BASE_URL', 'OLLAMA_MODEL'],
11155
+ trustLevel: 'CLOSED_LOCAL',
11156
+ order: MODEL_ORDERS.NORMAL,
11157
+ getBoilerplateConfiguration() {
11158
+ return {
11159
+ title: 'Ollama',
11160
+ packageName: '@promptbook/ollama',
11161
+ className: 'OllamaExecutionTools',
11162
+ options: {
11163
+ baseURL: DEFAULT_OLLAMA_BASE_URL,
11164
+ maxRequestsPerMinute: DEFAULT_MAX_REQUESTS_PER_MINUTE,
11165
+ },
11166
+ };
11167
+ },
11168
+ createConfigurationFromEnv(env) {
11169
+ if (typeof env.OLLAMA_BASE_URL === 'string') {
11170
+ return {
11171
+ title: 'Ollama (from env)',
11172
+ packageName: '@promptbook/ollama',
11173
+ className: 'OllamaExecutionTools',
11174
+ options: {
11175
+ baseURL: env.OLLAMA_BASE_URL,
11176
+ },
11177
+ };
11178
+ }
11179
+ return null;
11180
+ },
11181
+ });
11182
+ /**
11183
+ * Note: [💞] Ignore a discrepancy between file name and entity name
11184
+ */
11185
+
11129
11186
  /**
11130
11187
  * Registration of LLM provider metadata
11131
11188
  *
@@ -11208,7 +11265,7 @@ const _OpenAiAssistantMetadataRegistration = $llmToolsMetadataRegister.register(
11208
11265
  options: {
11209
11266
  apiKey: env.OPENAI_API_KEY!,
11210
11267
  assistantId: env.OPENAI_XXX!
11211
- },
11268
+ } satisfies OpenAiAssistantExecutionToolsOptions,
11212
11269
  };
11213
11270
  }
11214
11271
 
@@ -11698,5 +11755,5 @@ class PrefixStorage {
11698
11755
  }
11699
11756
  }
11700
11757
 
11701
- export { $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, AbstractFormatError, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_MAX_REQUESTS_PER_MINUTE, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_TITLE, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotFoundError, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, REMOTE_SERVER_URLS, RESERVED_PARAMETER_NAMES, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, addUsage, book, cacheLlmTools, collectionToJson, compilePipeline, computeCosineSimilarity, countUsage, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createLlmToolsFromConfiguration, createPipelineExecutor, createSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, getPipelineInterface, identificationToPromptbookToken, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, parsePipeline, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validatePipeline, validatePipelineString };
11758
+ export { $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, AbstractFormatError, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_MAX_REQUESTS_PER_MINUTE, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_TITLE, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotFoundError, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, REMOTE_SERVER_URLS, RESERVED_PARAMETER_NAMES, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OllamaMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, addUsage, book, cacheLlmTools, collectionToJson, compilePipeline, computeCosineSimilarity, countUsage, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createLlmToolsFromConfiguration, createPipelineExecutor, createSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, getPipelineInterface, identificationToPromptbookToken, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, parsePipeline, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validatePipeline, validatePipelineString };
11702
11759
  //# sourceMappingURL=index.es.js.map