@promptbook/core 0.94.0-1 → 0.94.0-12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/README.md +6 -8
  2. package/esm/index.es.js +20 -13
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/ollama.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/openai.index.d.ts +2 -0
  6. package/esm/typings/src/execution/AvailableModel.d.ts +9 -1
  7. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +2 -2
  8. package/esm/typings/src/llm-providers/{openai/computeUsage.d.ts → _common/utils/pricing.d.ts} +2 -2
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +1 -1
  10. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +1 -1
  11. package/esm/typings/src/llm-providers/deepseek/DeepseekExecutionToolsOptions.d.ts +1 -1
  12. package/esm/typings/src/llm-providers/google/GoogleExecutionToolsOptions.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +36 -11
  14. package/esm/typings/src/llm-providers/ollama/OllamaExecutionToolsOptions.d.ts +23 -12
  15. package/esm/typings/src/llm-providers/ollama/createOllamaExecutionTools.d.ts +3 -3
  16. package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +14 -0
  17. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +1 -1
  18. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +91 -0
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +12 -53
  20. package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +1 -1
  21. package/esm/typings/src/llm-providers/openai/createOpenAiExecutionTools.d.ts +2 -0
  22. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -7
  23. package/esm/typings/src/version.d.ts +1 -1
  24. package/package.json +24 -1
  25. package/umd/index.umd.js +20 -13
  26. package/umd/index.umd.js.map +1 -1
  27. /package/esm/typings/src/llm-providers/{openai/computeUsage.test.d.ts → _common/utils/pricing.test.d.ts} +0 -0
package/README.md CHANGED
@@ -64,6 +64,8 @@ Rest of the documentation is common for **entire promptbook ecosystem**:
64
64
 
65
65
  During the computer revolution, we have seen [multiple generations of computer languages](https://github.com/webgptorg/promptbook/discussions/180), from the physical rewiring of the vacuum tubes through low-level machine code to the high-level languages like Python or JavaScript. And now, we're on the edge of the **next revolution**!
66
66
 
67
+
68
+
67
69
  It's a revolution of writing software in **plain human language** that is understandable and executable by both humans and machines – and it's going to change everything!
68
70
 
69
71
  The incredible growth in power of microprocessors and the Moore's Law have been the driving force behind the ever-more powerful languages, and it's been an amazing journey! Similarly, the large language models (like GPT or Claude) are the next big thing in language technology, and they're set to transform the way we interact with computers.
@@ -189,16 +191,8 @@ Join our growing community of developers and users:
189
191
 
190
192
  _A concise, Markdown-based DSL for crafting AI workflows and automations._
191
193
 
192
- ---
193
194
 
194
- ### 📑 Table of Contents
195
195
 
196
- - [Introduction](#introduction)
197
- - [Example](#example)
198
- - [1. What: Workflows, Tasks & Parameters](#1-what-workflows-tasks--parameters)
199
- - [2. Who: Personas](#2-who-personas)
200
- - [3. How: Knowledge, Instruments & Actions](#3-how-knowledge-instruments-and-actions)
201
- - [General Principles](#general-principles)
202
196
 
203
197
  ### Introduction
204
198
 
@@ -249,6 +243,8 @@ Personas can have access to different knowledge, tools and actions. They can als
249
243
 
250
244
  - [PERSONA](https://github.com/webgptorg/promptbook/blob/main/documents/commands/PERSONA.md)
251
245
 
246
+
247
+
252
248
  ### **3. How:** Knowledge, Instruments and Actions
253
249
 
254
250
  The resources used by the personas are used to do the work.
@@ -348,6 +344,8 @@ The following glossary is used to clarify certain concepts:
348
344
 
349
345
  _Note: This section is not complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
350
346
 
347
+
348
+
351
349
  ### 💯 Core concepts
352
350
 
353
351
  - [📚 Collection of pipelines](https://github.com/webgptorg/promptbook/discussions/65)
package/esm/index.es.js CHANGED
@@ -27,7 +27,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
27
27
  * @generated
28
28
  * @see https://github.com/webgptorg/promptbook
29
29
  */
30
- const PROMPTBOOK_ENGINE_VERSION = '0.94.0-1';
30
+ const PROMPTBOOK_ENGINE_VERSION = '0.94.0-12';
31
31
  /**
32
32
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
33
33
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -10300,7 +10300,10 @@ function usageToWorktime(usage) {
10300
10300
  function usageToHuman(usage) {
10301
10301
  const reportItems = [];
10302
10302
  const uncertainNumberToHuman = ({ value, isUncertain }) => `${isUncertain ? 'approximately ' : ''}${Math.round(value * 100) / 100}`;
10303
- if (usage.price.value > 0.01
10303
+ if (usage.price.value === 0) {
10304
+ reportItems.push(`Zero cost`);
10305
+ }
10306
+ else if (usage.price.value > 0.01
10304
10307
  // <- TODO: [🍓][🧞‍♂️][👩🏽‍🤝‍🧑🏻] Configure negligible value - default value to config + value to `UsageToHumanSettings`
10305
10308
  ) {
10306
10309
  reportItems.push(`Cost ${uncertainNumberToHuman(usage.price)} USD`);
@@ -10365,12 +10368,12 @@ const BoilerplateFormfactorDefinition = {
10365
10368
  * Creates a wrapper around LlmExecutionTools that only exposes models matching the filter function
10366
10369
  *
10367
10370
  * @param llmTools The original LLM execution tools to wrap
10368
- * @param modelFilter Function that determines whether a model should be included
10371
+ * @param predicate Function that determines whether a model should be included
10369
10372
  * @returns A new LlmExecutionTools instance with filtered models
10370
10373
  *
10371
10374
  * @public exported from `@promptbook/core`
10372
10375
  */
10373
- function filterModels(llmTools, modelFilter) {
10376
+ function filterModels(llmTools, predicate) {
10374
10377
  const filteredTools = {
10375
10378
  // Keep all properties from the original llmTools
10376
10379
  ...llmTools,
@@ -10387,10 +10390,10 @@ function filterModels(llmTools, modelFilter) {
10387
10390
  const originalModels = await llmTools.listModels();
10388
10391
  // Handle both synchronous and Promise return types
10389
10392
  if (originalModels instanceof Promise) {
10390
- return originalModels.then((models) => models.filter(modelFilter));
10393
+ return originalModels.then((models) => models.filter(predicate));
10391
10394
  }
10392
10395
  else {
10393
- return originalModels.filter(modelFilter);
10396
+ return originalModels.filter(predicate);
10394
10397
  }
10395
10398
  },
10396
10399
  };
@@ -10957,7 +10960,7 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
10957
10960
  packageName: '@promptbook/azure-openai',
10958
10961
  className: 'AzureOpenAiExecutionTools',
10959
10962
  options: {
10960
- apiKey: 'sk-',
10963
+ apiKey: '',
10961
10964
  resourceName: 'my-resource-name',
10962
10965
  deploymentName: 'my-deployment-name',
10963
10966
  maxRequestsPerMinute: DEFAULT_MAX_REQUESTS_PER_MINUTE,
@@ -11128,6 +11131,13 @@ const _GoogleMetadataRegistration = $llmToolsMetadataRegister.register({
11128
11131
  * Note: [💞] Ignore a discrepancy between file name and entity name
11129
11132
  */
11130
11133
 
11134
+ /**
11135
+ * Default base URL for Ollama API
11136
+ *
11137
+ * @public exported from `@promptbook/ollama`
11138
+ */
11139
+ const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434/v1';
11140
+
11131
11141
  /**
11132
11142
  * Registration of LLM provider metadata
11133
11143
  *
@@ -11150,8 +11160,7 @@ const _OllamaMetadataRegistration = $llmToolsMetadataRegister.register({
11150
11160
  packageName: '@promptbook/ollama',
11151
11161
  className: 'OllamaExecutionTools',
11152
11162
  options: {
11153
- baseUrl: 'http://localhost:11434',
11154
- model: 'llama2',
11163
+ baseURL: DEFAULT_OLLAMA_BASE_URL,
11155
11164
  maxRequestsPerMinute: DEFAULT_MAX_REQUESTS_PER_MINUTE,
11156
11165
  },
11157
11166
  };
@@ -11163,9 +11172,7 @@ const _OllamaMetadataRegistration = $llmToolsMetadataRegister.register({
11163
11172
  packageName: '@promptbook/ollama',
11164
11173
  className: 'OllamaExecutionTools',
11165
11174
  options: {
11166
- baseUrl: env.OLLAMA_BASE_URL,
11167
- model: env.OLLAMA_MODEL || 'llama2',
11168
- maxRequestsPerMinute: DEFAULT_MAX_REQUESTS_PER_MINUTE,
11175
+ baseURL: env.OLLAMA_BASE_URL,
11169
11176
  },
11170
11177
  };
11171
11178
  }
@@ -11258,7 +11265,7 @@ const _OpenAiAssistantMetadataRegistration = $llmToolsMetadataRegister.register(
11258
11265
  options: {
11259
11266
  apiKey: env.OPENAI_API_KEY!,
11260
11267
  assistantId: env.OPENAI_XXX!
11261
- },
11268
+ } satisfies OpenAiAssistantExecutionToolsOptions,
11262
11269
  };
11263
11270
  }
11264
11271