@promptbook/editable 0.94.0-1 → 0.94.0-12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/README.md +6 -8
  2. package/esm/index.es.js +1 -1
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/ollama.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/openai.index.d.ts +2 -0
  6. package/esm/typings/src/execution/AvailableModel.d.ts +9 -1
  7. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +2 -2
  8. package/esm/typings/src/llm-providers/{openai/computeUsage.d.ts → _common/utils/pricing.d.ts} +2 -2
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +1 -1
  10. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +1 -1
  11. package/esm/typings/src/llm-providers/deepseek/DeepseekExecutionToolsOptions.d.ts +1 -1
  12. package/esm/typings/src/llm-providers/google/GoogleExecutionToolsOptions.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +36 -11
  14. package/esm/typings/src/llm-providers/ollama/OllamaExecutionToolsOptions.d.ts +23 -12
  15. package/esm/typings/src/llm-providers/ollama/createOllamaExecutionTools.d.ts +3 -3
  16. package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +14 -0
  17. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +1 -1
  18. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +91 -0
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +12 -53
  20. package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +1 -1
  21. package/esm/typings/src/llm-providers/openai/createOpenAiExecutionTools.d.ts +2 -0
  22. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -7
  23. package/esm/typings/src/version.d.ts +1 -1
  24. package/package.json +25 -2
  25. package/umd/index.umd.js +1 -1
  26. package/umd/index.umd.js.map +1 -1
  27. /package/esm/typings/src/llm-providers/{openai/computeUsage.test.d.ts → _common/utils/pricing.test.d.ts} +0 -0
@@ -1,19 +1,44 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
- import type { ChatPromptResult } from '../../execution/PromptResult';
3
+ import type { Usage } from '../../execution/Usage';
4
+ import type { string_markdown } from '../../types/typeAliases';
5
+ import type { string_markdown_text } from '../../types/typeAliases';
6
+ import type { string_title } from '../../types/typeAliases';
7
+ import { computeOpenAiUsage } from '../openai/computeOpenAiUsage';
8
+ import { OpenAiCompatibleExecutionTools } from '../openai/OpenAiCompatibleExecutionTools';
4
9
  import type { OllamaExecutionToolsOptions } from './OllamaExecutionToolsOptions';
5
10
  /**
6
- * Execution Tools for calling a local Ollama model via HTTP API
11
+ * Execution Tools for calling Ollama API
7
12
  *
8
13
  * @public exported from `@promptbook/ollama`
9
14
  */
10
- export declare class OllamaExecutionTools implements LlmExecutionTools {
11
- protected readonly options: OllamaExecutionToolsOptions;
12
- private limiter;
13
- constructor(options: OllamaExecutionToolsOptions);
14
- get title(): string;
15
- get description(): string;
16
- checkConfiguration(): Promise<void>;
17
- listModels(): Promise<ReadonlyArray<AvailableModel>>;
18
- callChatModel(prompt: Pick<import('../../types/Prompt').Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<ChatPromptResult>;
15
+ export declare class OllamaExecutionTools extends OpenAiCompatibleExecutionTools implements LlmExecutionTools {
16
+ constructor(ollamaOptions: OllamaExecutionToolsOptions);
17
+ get title(): string_title & string_markdown_text;
18
+ get description(): string_markdown;
19
+ /**
20
+ * List all available models (non dynamically)
21
+ *
22
+ * Note: Purpose of this is to provide more information about models than standard listing from API
23
+ */
24
+ protected get HARDCODED_MODELS(): ReadonlyArray<AvailableModel>;
25
+ /**
26
+ * Computes the usage of the Ollama API based on the response from Ollama
27
+ */
28
+ protected computeUsage(...args: Parameters<typeof computeOpenAiUsage>): Usage;
29
+ /**
30
+ * Default model for chat variant.
31
+ */
32
+ protected getDefaultChatModel(): AvailableModel;
33
+ /**
34
+ * Default model for completion variant.
35
+ */
36
+ protected getDefaultCompletionModel(): AvailableModel;
37
+ /**
38
+ * Default model for completion variant.
39
+ */
40
+ protected getDefaultEmbeddingModel(): AvailableModel;
19
41
  }
42
+ /**
43
+ * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
44
+ */
@@ -1,12 +1,23 @@
1
- export interface OllamaExecutionToolsOptions {
2
- /** Base URL of Ollama API, e.g., http://localhost:11434 */
3
- baseUrl: string;
4
- /** Model name to use for requests */
5
- model: string;
6
- /** Optional rate limit: max requests per minute */
7
- maxRequestsPerMinute?: number;
8
- /** Verbose logging */
9
- isVerbose?: boolean;
10
- /** Optional user identifier */
11
- userId?: string;
12
- }
1
+ import type { OpenAiExecutionToolsOptions } from '../openai/OpenAiExecutionToolsOptions';
2
+ /**
3
+ * Default base URL for Ollama API
4
+ *
5
+ * @public exported from `@promptbook/ollama`
6
+ */
7
+ export declare const DEFAULT_OLLAMA_BASE_URL = "http://localhost:11434/v1";
8
+ /**
9
+ * Options for `createOllamaExecutionTools`
10
+ *
11
+ * This combines options for Promptbook, Google and Vercel together
12
+ * @public exported from `@promptbook/ollama`
13
+ */
14
+ export type OllamaExecutionToolsOptions = {
15
+ /**
16
+ * Base URL of Ollama API
17
+ *
18
+ * Note: Naming this `baseURL` not `baseUrl` to be consistent with OpenAI API
19
+ *
20
+ * @default `DEFAULT_OLLAMA_BASE_URL`
21
+ */
22
+ baseURL?: string;
23
+ } & Omit<OpenAiExecutionToolsOptions, 'baseURL' | 'userId'>;
@@ -1,11 +1,11 @@
1
- import { OllamaExecutionTools } from "./OllamaExecutionTools";
2
- import { OllamaExecutionToolsOptions } from "./OllamaExecutionToolsOptions";
1
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import type { OllamaExecutionToolsOptions } from './OllamaExecutionToolsOptions';
3
3
  /**
4
4
  * Execution Tools for calling Ollama API
5
5
  *
6
6
  * @public exported from `@promptbook/ollama`
7
7
  */
8
- export declare const createOllamaExecutionTools: ((options: OllamaExecutionToolsOptions) => OllamaExecutionTools) & {
8
+ export declare const createOllamaExecutionTools: ((options: OllamaExecutionToolsOptions) => LlmExecutionTools) & {
9
9
  packageName: string;
10
10
  className: string;
11
11
  };
@@ -0,0 +1,14 @@
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
+ /**
3
+ * List of available models in Ollama library
4
+ *
5
+ * Note: Done at 2025-05-19
6
+ *
7
+ * @see https://ollama.com/library
8
+ * @public exported from `@promptbook/ollama`
9
+ */
10
+ export declare const OLLAMA_MODELS: ReadonlyArray<AvailableModel>;
11
+ /**
12
+ * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
13
+ * Note: [💞] Ignore a discrepancy between file name and entity name
14
+ */
@@ -2,7 +2,7 @@ import type { ClientOptions } from 'openai';
2
2
  import type { string_token } from '../../types/typeAliases';
3
3
  import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
4
4
  /**
5
- * Options for `OpenAiAssistantExecutionTools`
5
+ * Options for `createOpenAiAssistantExecutionTools` and `OpenAiAssistantExecutionTools`
6
6
  *
7
7
  * @public exported from `@promptbook/openai`
8
8
  */
@@ -0,0 +1,91 @@
1
+ import OpenAI from 'openai';
2
+ import type { AvailableModel } from '../../execution/AvailableModel';
3
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
+ import type { ChatPromptResult } from '../../execution/PromptResult';
5
+ import type { CompletionPromptResult } from '../../execution/PromptResult';
6
+ import type { EmbeddingPromptResult } from '../../execution/PromptResult';
7
+ import type { Usage } from '../../execution/Usage';
8
+ import type { Prompt } from '../../types/Prompt';
9
+ import type { string_markdown } from '../../types/typeAliases';
10
+ import type { string_markdown_text } from '../../types/typeAliases';
11
+ import type { string_model_name } from '../../types/typeAliases';
12
+ import type { string_title } from '../../types/typeAliases';
13
+ import { computeOpenAiUsage } from './computeOpenAiUsage';
14
+ import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
15
+ /**
16
+ * Execution Tools for calling OpenAI API or other OpeenAI compatible provider
17
+ *
18
+ * @public exported from `@promptbook/openai`
19
+ */
20
+ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecutionTools {
21
+ protected readonly options: OpenAiExecutionToolsOptions;
22
+ /**
23
+ * OpenAI API client.
24
+ */
25
+ private client;
26
+ /**
27
+ * Rate limiter instance
28
+ */
29
+ private limiter;
30
+ /**
31
+ * Creates OpenAI compatible Execution Tools.
32
+ *
33
+ * @param options which are relevant are directly passed to the OpenAI compatible client
34
+ */
35
+ constructor(options: OpenAiExecutionToolsOptions);
36
+ abstract get title(): string_title & string_markdown_text;
37
+ abstract get description(): string_markdown;
38
+ getClient(): Promise<OpenAI>;
39
+ /**
40
+ * Check the `options` passed to `constructor`
41
+ */
42
+ checkConfiguration(): Promise<void>;
43
+ /**
44
+ * List all available OpenAI compatible models that can be used
45
+ */
46
+ listModels(): Promise<ReadonlyArray<AvailableModel>>;
47
+ /**
48
+ * Calls OpenAI compatible API to use a chat model.
49
+ */
50
+ callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'format'>): Promise<ChatPromptResult>;
51
+ /**
52
+ * Calls OpenAI API to use a complete model.
53
+ */
54
+ callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
55
+ /**
56
+ * Calls OpenAI compatible API to use a embedding model
57
+ */
58
+ callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<EmbeddingPromptResult>;
59
+ /**
60
+ * Get the model that should be used as default
61
+ */
62
+ protected getDefaultModel(defaultModelName: string_model_name): AvailableModel;
63
+ /**
64
+ * List all available models (non dynamically)
65
+ *
66
+ * Note: Purpose of this is to provide more information about models than standard listing from API
67
+ */
68
+ protected abstract get HARDCODED_MODELS(): ReadonlyArray<AvailableModel>;
69
+ /**
70
+ * Computes the usage of the OpenAI API based on the response from OpenAI Compatible API
71
+ */
72
+ protected abstract computeUsage(...args: Parameters<typeof computeOpenAiUsage>): Usage;
73
+ /**
74
+ * Default model for chat variant.
75
+ */
76
+ protected abstract getDefaultChatModel(): AvailableModel;
77
+ /**
78
+ * Default model for completion variant.
79
+ */
80
+ protected abstract getDefaultCompletionModel(): AvailableModel;
81
+ /**
82
+ * Default model for completion variant.
83
+ */
84
+ protected abstract getDefaultEmbeddingModel(): AvailableModel;
85
+ }
86
+ /**
87
+ * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
88
+ * TODO: [🛄] Maybe make custom `OpenAiCompatibleError`
89
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
90
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
91
+ */
@@ -1,79 +1,38 @@
1
- import OpenAI from 'openai';
2
1
  import type { AvailableModel } from '../../execution/AvailableModel';
3
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult } from '../../execution/PromptResult';
5
- import type { CompletionPromptResult } from '../../execution/PromptResult';
6
- import type { EmbeddingPromptResult } from '../../execution/PromptResult';
7
- import type { Prompt } from '../../types/Prompt';
8
3
  import type { string_markdown } from '../../types/typeAliases';
9
4
  import type { string_markdown_text } from '../../types/typeAliases';
10
5
  import type { string_title } from '../../types/typeAliases';
11
- import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
6
+ import { computeOpenAiUsage } from './computeOpenAiUsage';
7
+ import { OpenAiCompatibleExecutionTools } from './OpenAiCompatibleExecutionTools';
12
8
  /**
13
9
  * Execution Tools for calling OpenAI API
14
10
  *
15
11
  * @public exported from `@promptbook/openai`
16
12
  */
17
- export declare class OpenAiExecutionTools implements LlmExecutionTools {
18
- protected readonly options: OpenAiExecutionToolsOptions;
19
- /**
20
- * OpenAI API client.
21
- */
22
- private client;
23
- /**
24
- * Rate limiter instance
25
- */
26
- private limiter;
27
- /**
28
- * Creates OpenAI Execution Tools.
29
- *
30
- * @param options which are relevant are directly passed to the OpenAI client
31
- */
32
- constructor(options: OpenAiExecutionToolsOptions);
13
+ export declare class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools implements LlmExecutionTools {
33
14
  get title(): string_title & string_markdown_text;
34
15
  get description(): string_markdown;
35
- getClient(): Promise<OpenAI>;
36
- /**
37
- * Check the `options` passed to `constructor`
38
- */
39
- checkConfiguration(): Promise<void>;
40
16
  /**
41
- * List all available OpenAI models that can be used
42
- */
43
- listModels(): ReadonlyArray<AvailableModel>;
44
- /**
45
- * Calls OpenAI API to use a chat model.
46
- */
47
- callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'format'>): Promise<ChatPromptResult>;
48
- /**
49
- * Calls OpenAI API to use a complete model.
50
- */
51
- callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
52
- /**
53
- * Calls OpenAI API to use a embedding model
17
+ * List all available models (non dynamically)
18
+ *
19
+ * Note: Purpose of this is to provide more information about models than standard listing from API
54
20
  */
55
- callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<EmbeddingPromptResult>;
21
+ protected get HARDCODED_MODELS(): ReadonlyArray<AvailableModel>;
56
22
  /**
57
- * Get the model that should be used as default
23
+ * Computes the usage of the OpenAI API based on the response from OpenAI
58
24
  */
59
- private getDefaultModel;
25
+ protected computeUsage: typeof computeOpenAiUsage;
60
26
  /**
61
27
  * Default model for chat variant.
62
28
  */
63
- private getDefaultChatModel;
29
+ protected getDefaultChatModel(): AvailableModel;
64
30
  /**
65
31
  * Default model for completion variant.
66
32
  */
67
- private getDefaultCompletionModel;
33
+ protected getDefaultCompletionModel(): AvailableModel;
68
34
  /**
69
35
  * Default model for completion variant.
70
36
  */
71
- private getDefaultEmbeddingModel;
37
+ protected getDefaultEmbeddingModel(): AvailableModel;
72
38
  }
73
- /**
74
- * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
75
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
76
- * TODO: Maybe make custom OpenAiError
77
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
78
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
79
- */
@@ -1,7 +1,7 @@
1
1
  import type { ClientOptions } from 'openai';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
3
  /**
4
- * Options for `OpenAiExecutionTools`
4
+ * Options for `createOpenAiExecutionTools` and `OpenAiExecutionTools`
5
5
  *
6
6
  * This extends OpenAI's `ClientOptions` with are directly passed to the OpenAI client.
7
7
  * Rest is used by the `OpenAiExecutionTools`.
@@ -3,6 +3,8 @@ import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions'
3
3
  /**
4
4
  * Execution Tools for calling OpenAI API
5
5
  *
6
+ * Note: This can be also used for other OpenAI compatible APIs, like Ollama
7
+ *
6
8
  * @public exported from `@promptbook/openai`
7
9
  */
8
10
  export declare const createOpenAiExecutionTools: ((options: OpenAiExecutionToolsOptions) => OpenAiExecutionTools) & {
@@ -1,5 +1,4 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
- import type { number_usd } from '../../types/typeAliases';
3
2
  /**
4
3
  * List of available OpenAI models with pricing
5
4
  *
@@ -9,12 +8,7 @@ import type { number_usd } from '../../types/typeAliases';
9
8
  * @see https://openai.com/api/pricing/
10
9
  * @public exported from `@promptbook/openai`
11
10
  */
12
- export declare const OPENAI_MODELS: ReadonlyArray<AvailableModel & {
13
- pricing?: {
14
- readonly prompt: number_usd;
15
- readonly output: number_usd;
16
- };
17
- }>;
11
+ export declare const OPENAI_MODELS: ReadonlyArray<AvailableModel>;
18
12
  /**
19
13
  * Note: [🤖] Add models of new variant
20
14
  * TODO: [🧠] Some mechanism to propagate unsureness
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.94.0-0`).
18
+ * It follows semantic versioning (e.g., `0.94.0-7`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/editable",
3
- "version": "0.94.0-1",
3
+ "version": "0.94.0-12",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -36,6 +36,29 @@
36
36
  "o1-mini",
37
37
  "o1-preview",
38
38
  "anthropic",
39
+ "claude",
40
+ "claude-3",
41
+ "claude-3-opus",
42
+ "claude-3-sonnet",
43
+ "claude-3-haiku",
44
+ "gemini",
45
+ "gemini-pro",
46
+ "gemini-flash",
47
+ "mixtral",
48
+ "mistral",
49
+ "ollama",
50
+ "ai-orchestration",
51
+ "prompt-engineering",
52
+ "llmops",
53
+ "multimodal",
54
+ "reasoning",
55
+ "rag",
56
+ "embeddings",
57
+ "function-calling",
58
+ "large-language-models",
59
+ "ai-application-framework",
60
+ "text-generation",
61
+ "ai-agents",
39
62
  "LLMOps"
40
63
  ],
41
64
  "license": "BUSL-1.1",
@@ -63,7 +86,7 @@
63
86
  "module": "./esm/index.es.js",
64
87
  "typings": "./esm/typings/src/_packages/editable.index.d.ts",
65
88
  "peerDependencies": {
66
- "@promptbook/core": "0.94.0-1"
89
+ "@promptbook/core": "0.94.0-12"
67
90
  },
68
91
  "dependencies": {
69
92
  "crypto-js": "4.2.0",
package/umd/index.umd.js CHANGED
@@ -23,7 +23,7 @@
23
23
  * @generated
24
24
  * @see https://github.com/webgptorg/promptbook
25
25
  */
26
- const PROMPTBOOK_ENGINE_VERSION = '0.94.0-1';
26
+ const PROMPTBOOK_ENGINE_VERSION = '0.94.0-12';
27
27
  /**
28
28
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
29
29
  * Note: [💞] Ignore a discrepancy between file name and entity name