@promptbook/node 0.94.0-1 → 0.94.0-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -64,6 +64,8 @@ Rest of the documentation is common for **entire promptbook ecosystem**:
64
64
 
65
65
  During the computer revolution, we have seen [multiple generations of computer languages](https://github.com/webgptorg/promptbook/discussions/180), from the physical rewiring of the vacuum tubes through low-level machine code to the high-level languages like Python or JavaScript. And now, we're on the edge of the **next revolution**!
66
66
 
67
+
68
+
67
69
  It's a revolution of writing software in **plain human language** that is understandable and executable by both humans and machines – and it's going to change everything!
68
70
 
69
71
  The incredible growth in power of microprocessors and the Moore's Law have been the driving force behind the ever-more powerful languages, and it's been an amazing journey! Similarly, the large language models (like GPT or Claude) are the next big thing in language technology, and they're set to transform the way we interact with computers.
@@ -189,16 +191,8 @@ Join our growing community of developers and users:
189
191
 
190
192
  _A concise, Markdown-based DSL for crafting AI workflows and automations._
191
193
 
192
- ---
193
194
 
194
- ### 📑 Table of Contents
195
195
 
196
- - [Introduction](#introduction)
197
- - [Example](#example)
198
- - [1. What: Workflows, Tasks & Parameters](#1-what-workflows-tasks--parameters)
199
- - [2. Who: Personas](#2-who-personas)
200
- - [3. How: Knowledge, Instruments & Actions](#3-how-knowledge-instruments-and-actions)
201
- - [General Principles](#general-principles)
202
196
 
203
197
  ### Introduction
204
198
 
@@ -249,6 +243,8 @@ Personas can have access to different knowledge, tools and actions. They can als
249
243
 
250
244
  - [PERSONA](https://github.com/webgptorg/promptbook/blob/main/documents/commands/PERSONA.md)
251
245
 
246
+
247
+
252
248
  ### **3. How:** Knowledge, Instruments and Actions
253
249
 
254
250
  The resources used by the personas are used to do the work.
@@ -348,6 +344,8 @@ The following glossary is used to clarify certain concepts:
348
344
 
349
345
  _Note: This section is not complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
350
346
 
347
+
348
+
351
349
  ### 💯 Core concepts
352
350
 
353
351
  - [📚 Collection of pipelines](https://github.com/webgptorg/promptbook/discussions/65)
package/esm/index.es.js CHANGED
@@ -30,7 +30,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
30
30
  * @generated
31
31
  * @see https://github.com/webgptorg/promptbook
32
32
  */
33
- const PROMPTBOOK_ENGINE_VERSION = '0.94.0-1';
33
+ const PROMPTBOOK_ENGINE_VERSION = '0.94.0-3';
34
34
  /**
35
35
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
36
36
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1,8 +1,10 @@
1
1
  import { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION } from '../version';
2
2
  import { createOllamaExecutionTools } from '../llm-providers/ollama/createOllamaExecutionTools';
3
- import { OllamaExecutionTools } from '../llm-providers/ollama/OllamaExecutionTools';
3
+ import { DEFAULT_OLLAMA_BASE_URL } from '../llm-providers/ollama/OllamaExecutionToolsOptions';
4
+ import type { OllamaExecutionToolsOptions } from '../llm-providers/ollama/OllamaExecutionToolsOptions';
4
5
  import { _OllamaRegistration } from '../llm-providers/ollama/register-constructor';
5
6
  export { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION };
6
7
  export { createOllamaExecutionTools };
7
- export { OllamaExecutionTools };
8
+ export { DEFAULT_OLLAMA_BASE_URL };
9
+ export type { OllamaExecutionToolsOptions };
8
10
  export { _OllamaRegistration };
@@ -1,4 +1,5 @@
1
1
  import type { ModelVariant } from '../types/ModelVariant';
2
+ import type { number_usd } from '../types/typeAliases';
2
3
  import type { string_model_description } from '../types/typeAliases';
3
4
  import type { string_model_name } from '../types/typeAliases';
4
5
  import type { string_title } from '../types/typeAliases';
@@ -32,7 +33,14 @@ export type AvailableModel = {
32
33
  * @example "Model with 1 billion parameters and advanced reasoning capabilities"
33
34
  */
34
35
  readonly modelDescription?: string_model_description;
36
+ /**
37
+ * Pricing information for the model
38
+ */
39
+ readonly pricing?: {
40
+ readonly prompt: number_usd;
41
+ readonly output: number_usd;
42
+ };
35
43
  };
36
44
  /**
37
- * TODO: (not only [🕘]) Put pricing information here
45
+ * TODO: [🧠] Maybe rename to something else - like `ModelInformation` or `ModelMetadata`
38
46
  */
@@ -4,12 +4,12 @@ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
4
  * Creates a wrapper around LlmExecutionTools that only exposes models matching the filter function
5
5
  *
6
6
  * @param llmTools The original LLM execution tools to wrap
7
- * @param modelFilter Function that determines whether a model should be included
7
+ * @param predicate Function that determines whether a model should be included
8
8
  * @returns A new LlmExecutionTools instance with filtered models
9
9
  *
10
10
  * @public exported from `@promptbook/core`
11
11
  */
12
- export declare function filterModels<TLlmTools extends LlmExecutionTools>(llmTools: TLlmTools, modelFilter: (model: AvailableModel) => boolean): TLlmTools;
12
+ export declare function filterModels<TLlmTools extends LlmExecutionTools>(llmTools: TLlmTools, predicate: (model: AvailableModel) => boolean): TLlmTools;
13
13
  /**
14
14
  * TODO: !!! [models] Test that this is working
15
15
  */
@@ -2,7 +2,7 @@ import type { ClientOptions } from '@anthropic-ai/sdk';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
3
  import type { RemoteClientOptions } from '../../remote-server/types/RemoteClientOptions';
4
4
  /**
5
- * Options for `AnthropicClaudeExecutionTools`
5
+ * Options for `createAnthropicClaudeExecutionTools` and `AnthropicClaudeExecutionTools`
6
6
  *
7
7
  * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
8
8
  * @public exported from `@promptbook/anthropic-claude`
@@ -3,7 +3,7 @@ import type { string_name } from '../../types/typeAliases';
3
3
  import type { string_token } from '../../types/typeAliases';
4
4
  import type { string_user_id } from '../../types/typeAliases';
5
5
  /**
6
- * Options for `AzureOpenAiExecutionTools`
6
+ * Options for `createAzureOpenAiExecutionTools` and `AzureOpenAiExecutionTools`
7
7
  *
8
8
  * @see https://oai.azure.com/portal/
9
9
  * @public exported from `@promptbook/azure-openai`
@@ -1,7 +1,7 @@
1
1
  import type { createDeepSeek } from '@ai-sdk/deepseek';
2
2
  import type { VercelExecutionToolsOptions } from '../vercel/VercelExecutionToolsOptions';
3
3
  /**
4
- * Options for `DeepseekExecutionTools`
4
+ * Options for `createDeepseekExecutionTools`
5
5
  *
6
6
  * This combines options for Promptbook, Deepseek and Vercel together
7
7
  * @public exported from `@promptbook/deepseek`
@@ -1,7 +1,7 @@
1
1
  import type { createGoogleGenerativeAI } from '@ai-sdk/google';
2
2
  import type { VercelExecutionToolsOptions } from '../vercel/VercelExecutionToolsOptions';
3
3
  /**
4
- * Options for `GoogleExecutionTools`
4
+ * Options for `createGoogleExecutionTools`
5
5
  *
6
6
  * This combines options for Promptbook, Google and Vercel together
7
7
  * @public exported from `@promptbook/google`
@@ -1,12 +1,23 @@
1
- export interface OllamaExecutionToolsOptions {
2
- /** Base URL of Ollama API, e.g., http://localhost:11434 */
3
- baseUrl: string;
4
- /** Model name to use for requests */
5
- model: string;
6
- /** Optional rate limit: max requests per minute */
7
- maxRequestsPerMinute?: number;
8
- /** Verbose logging */
9
- isVerbose?: boolean;
10
- /** Optional user identifier */
11
- userId?: string;
12
- }
1
+ import type { OpenAiExecutionToolsOptions } from '../openai/OpenAiExecutionToolsOptions';
2
+ /**
3
+ * Default base URL for Ollama API
4
+ *
5
+ * @public exported from `@promptbook/ollama`
6
+ */
7
+ export declare const DEFAULT_OLLAMA_BASE_URL = "http://localhost:11434";
8
+ /**
9
+ * Options for `createOllamaExecutionTools`
10
+ *
11
+ * This combines options for Promptbook, Google and Vercel together
12
+ * @public exported from `@promptbook/ollama`
13
+ */
14
+ export type OllamaExecutionToolsOptions = {
15
+ /**
16
+ * Base URL of Ollama API
17
+ *
18
+ * Note: Naming this `baseURL` not `baseUrl` to be consistent with OpenAI API
19
+ *
20
+ * @default `DEFAULT_OLLAMA_BASE_URL`
21
+ */
22
+ baseURL?: string;
23
+ } & Omit<OpenAiExecutionToolsOptions, 'baseURL' | 'userId'>;
@@ -1,11 +1,11 @@
1
- import { OllamaExecutionTools } from "./OllamaExecutionTools";
2
- import { OllamaExecutionToolsOptions } from "./OllamaExecutionToolsOptions";
1
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import type { OllamaExecutionToolsOptions } from './OllamaExecutionToolsOptions';
3
3
  /**
4
4
  * Execution Tools for calling Ollama API
5
5
  *
6
6
  * @public exported from `@promptbook/ollama`
7
7
  */
8
- export declare const createOllamaExecutionTools: ((options: OllamaExecutionToolsOptions) => OllamaExecutionTools) & {
8
+ export declare const createOllamaExecutionTools: ((ollamaOptions: OllamaExecutionToolsOptions) => LlmExecutionTools) & {
9
9
  packageName: string;
10
10
  className: string;
11
11
  };
@@ -2,7 +2,7 @@ import type { ClientOptions } from 'openai';
2
2
  import type { string_token } from '../../types/typeAliases';
3
3
  import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
4
4
  /**
5
- * Options for `OpenAiAssistantExecutionTools`
5
+ * Options for `createOpenAiAssistantExecutionTools` and `OpenAiAssistantExecutionTools`
6
6
  *
7
7
  * @public exported from `@promptbook/openai`
8
8
  */
@@ -40,7 +40,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
40
40
  /**
41
41
  * List all available OpenAI models that can be used
42
42
  */
43
- listModels(): ReadonlyArray<AvailableModel>;
43
+ listModels(): Promise<ReadonlyArray<AvailableModel>>;
44
44
  /**
45
45
  * Calls OpenAI API to use a chat model.
46
46
  */
@@ -1,7 +1,7 @@
1
1
  import type { ClientOptions } from 'openai';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
3
  /**
4
- * Options for `OpenAiExecutionTools`
4
+ * Options for `createOpenAiExecutionTools` and `OpenAiExecutionTools`
5
5
  *
6
6
  * This extends OpenAI's `ClientOptions` with are directly passed to the OpenAI client.
7
7
  * Rest is used by the `OpenAiExecutionTools`.
@@ -3,6 +3,8 @@ import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions'
3
3
  /**
4
4
  * Execution Tools for calling OpenAI API
5
5
  *
6
+ * Note: This can be also used for other OpenAI compatible APIs, like Ollama
7
+ *
6
8
  * @public exported from `@promptbook/openai`
7
9
  */
8
10
  export declare const createOpenAiExecutionTools: ((options: OpenAiExecutionToolsOptions) => OpenAiExecutionTools) & {
@@ -1,5 +1,4 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
- import type { number_usd } from '../../types/typeAliases';
3
2
  /**
4
3
  * List of available OpenAI models with pricing
5
4
  *
@@ -9,12 +8,7 @@ import type { number_usd } from '../../types/typeAliases';
9
8
  * @see https://openai.com/api/pricing/
10
9
  * @public exported from `@promptbook/openai`
11
10
  */
12
- export declare const OPENAI_MODELS: ReadonlyArray<AvailableModel & {
13
- pricing?: {
14
- readonly prompt: number_usd;
15
- readonly output: number_usd;
16
- };
17
- }>;
11
+ export declare const OPENAI_MODELS: ReadonlyArray<AvailableModel>;
18
12
  /**
19
13
  * Note: [🤖] Add models of new variant
20
14
  * TODO: [🧠] Some mechanism to propagate unsureness
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.94.0-0`).
18
+ * It follows semantic versioning (e.g., `0.94.0-2`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/node",
3
- "version": "0.94.0-1",
3
+ "version": "0.94.0-3",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -63,7 +63,7 @@
63
63
  "module": "./esm/index.es.js",
64
64
  "typings": "./esm/typings/src/_packages/node.index.d.ts",
65
65
  "peerDependencies": {
66
- "@promptbook/core": "0.94.0-1"
66
+ "@promptbook/core": "0.94.0-3"
67
67
  },
68
68
  "dependencies": {
69
69
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -46,7 +46,7 @@
46
46
  * @generated
47
47
  * @see https://github.com/webgptorg/promptbook
48
48
  */
49
- const PROMPTBOOK_ENGINE_VERSION = '0.94.0-1';
49
+ const PROMPTBOOK_ENGINE_VERSION = '0.94.0-3';
50
50
  /**
51
51
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
52
52
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1,19 +0,0 @@
1
- import type { AvailableModel } from '../../execution/AvailableModel';
2
- import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
- import type { ChatPromptResult } from '../../execution/PromptResult';
4
- import type { OllamaExecutionToolsOptions } from './OllamaExecutionToolsOptions';
5
- /**
6
- * Execution Tools for calling a local Ollama model via HTTP API
7
- *
8
- * @public exported from `@promptbook/ollama`
9
- */
10
- export declare class OllamaExecutionTools implements LlmExecutionTools {
11
- protected readonly options: OllamaExecutionToolsOptions;
12
- private limiter;
13
- constructor(options: OllamaExecutionToolsOptions);
14
- get title(): string;
15
- get description(): string;
16
- checkConfiguration(): Promise<void>;
17
- listModels(): Promise<ReadonlyArray<AvailableModel>>;
18
- callChatModel(prompt: Pick<import('../../types/Prompt').Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<ChatPromptResult>;
19
- }