@promptbook/vercel 0.103.0-48 → 0.103.0-50
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +21 -3
- package/esm/index.es.js.map +1 -1
- package/esm/typings/servers.d.ts +1 -0
- package/esm/typings/src/_packages/components.index.d.ts +2 -0
- package/esm/typings/src/_packages/types.index.d.ts +2 -0
- package/esm/typings/src/_packages/utils.index.d.ts +2 -0
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +12 -2
- package/esm/typings/src/book-components/PromptbookAgent/PromptbookAgent.d.ts +20 -0
- package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabase.d.ts +14 -8
- package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabaseOptions.d.ts +10 -0
- package/esm/typings/src/commitments/MESSAGE/InitialMessageCommitmentDefinition.d.ts +28 -0
- package/esm/typings/src/commitments/index.d.ts +2 -1
- package/esm/typings/src/config.d.ts +1 -0
- package/esm/typings/src/errors/DatabaseError.d.ts +2 -2
- package/esm/typings/src/errors/WrappedError.d.ts +2 -2
- package/esm/typings/src/execution/ExecutionTask.d.ts +2 -2
- package/esm/typings/src/execution/LlmExecutionTools.d.ts +6 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +2 -2
- package/esm/typings/src/llm-providers/agent/Agent.d.ts +19 -3
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +13 -1
- package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +11 -2
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +6 -1
- package/esm/typings/src/remote-server/startAgentServer.d.ts +2 -2
- package/esm/typings/src/utils/color/Color.d.ts +7 -0
- package/esm/typings/src/utils/color/Color.test.d.ts +1 -0
- package/esm/typings/src/utils/environment/$getGlobalScope.d.ts +2 -2
- package/esm/typings/src/utils/misc/computeHash.d.ts +11 -0
- package/esm/typings/src/utils/misc/computeHash.test.d.ts +1 -0
- package/esm/typings/src/utils/organization/$sideEffect.d.ts +2 -2
- package/esm/typings/src/utils/organization/$side_effect.d.ts +2 -2
- package/esm/typings/src/utils/organization/TODO_USE.d.ts +2 -2
- package/esm/typings/src/utils/organization/keepUnused.d.ts +2 -2
- package/esm/typings/src/utils/organization/preserve.d.ts +3 -3
- package/esm/typings/src/utils/organization/really_any.d.ts +7 -0
- package/esm/typings/src/utils/serialization/asSerializable.d.ts +2 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +21 -3
- package/umd/index.umd.js.map +1 -1
|
@@ -9,11 +9,12 @@ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult }
|
|
|
9
9
|
* On its interface it exposes common methods for prompt execution.
|
|
10
10
|
* Inside (in constructor) it calls OpenAI, Azure, GPU, proxy, cache, logging,...
|
|
11
11
|
*
|
|
12
|
-
*
|
|
12
|
+
* Note: [🦖] There are several different things in Promptbook:
|
|
13
13
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
14
14
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
15
15
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
16
16
|
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
17
|
+
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
17
18
|
*
|
|
18
19
|
* @see https://github.com/webgptorg/promptbook#llm-execution-tools
|
|
19
20
|
*/
|
|
@@ -52,6 +53,10 @@ export type LlmExecutionTools = {
|
|
|
52
53
|
* Calls a chat model
|
|
53
54
|
*/
|
|
54
55
|
callChatModel?(prompt: Prompt): Promise<ChatPromptResult>;
|
|
56
|
+
/**
|
|
57
|
+
* Calls a chat model with streaming
|
|
58
|
+
*/
|
|
59
|
+
callChatModelStream?(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
|
|
55
60
|
/**
|
|
56
61
|
* Calls a completion model
|
|
57
62
|
*/
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { Promisable } from 'type-fest';
|
|
2
2
|
import type { Identification } from '../../../remote-server/socket-types/_subtypes/Identification';
|
|
3
3
|
import type { string_app_id, string_url } from '../../../types/typeAliases';
|
|
4
|
-
import type {
|
|
4
|
+
import type { chococake } from '../../../utils/organization/really_any';
|
|
5
5
|
import type { CacheLlmToolsOptions } from '../utils/cache/CacheLlmToolsOptions';
|
|
6
6
|
import type { LlmExecutionToolsWithTotalUsage } from '../utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
|
|
7
7
|
type ProvideLlmToolsForWizardOrCliOptions = {
|
|
@@ -38,7 +38,7 @@ type ProvideLlmToolsForWizardOrCliOptions = {
|
|
|
38
38
|
*
|
|
39
39
|
* Note: When login prompt fails, `process.exit(1)` is called
|
|
40
40
|
*/
|
|
41
|
-
loginPrompt(): Promisable<Identification<
|
|
41
|
+
loginPrompt(): Promisable<Identification<chococake>>;
|
|
42
42
|
});
|
|
43
43
|
/**
|
|
44
44
|
* Returns LLM tools for CLI
|
|
@@ -2,17 +2,20 @@ import { BehaviorSubject } from 'rxjs';
|
|
|
2
2
|
import type { AgentBasicInformation, BookParameter } from '../../book-2.0/agent-source/AgentBasicInformation';
|
|
3
3
|
import type { string_book } from '../../book-2.0/agent-source/string_book';
|
|
4
4
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
5
|
-
import type {
|
|
5
|
+
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
6
|
+
import type { Prompt } from '../../types/Prompt';
|
|
7
|
+
import type { string_agent_hash, string_agent_name, string_agent_url, string_url_image } from '../../types/typeAliases';
|
|
6
8
|
import { AgentLlmExecutionTools } from './AgentLlmExecutionTools';
|
|
7
9
|
import type { AgentOptions } from './AgentOptions';
|
|
8
10
|
/**
|
|
9
11
|
* Represents one AI Agent
|
|
10
12
|
*
|
|
11
|
-
*
|
|
13
|
+
* Note: [🦖] There are several different things in Promptbook:
|
|
12
14
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
13
15
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
14
16
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
15
17
|
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
18
|
+
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
16
19
|
*
|
|
17
20
|
* @public exported from `@promptbook/core`
|
|
18
21
|
*/
|
|
@@ -26,6 +29,14 @@ export declare class Agent extends AgentLlmExecutionTools implements LlmExecutio
|
|
|
26
29
|
* Description of the agent
|
|
27
30
|
*/
|
|
28
31
|
personaDescription: string | null;
|
|
32
|
+
/**
|
|
33
|
+
* The initial message shown to the user when the chat starts
|
|
34
|
+
*/
|
|
35
|
+
initialMessage: string | null;
|
|
36
|
+
/**
|
|
37
|
+
* Links found in the agent source
|
|
38
|
+
*/
|
|
39
|
+
links: Array<string_agent_url>;
|
|
29
40
|
/**
|
|
30
41
|
* Computed hash of the agent source for integrity verification
|
|
31
42
|
*/
|
|
@@ -46,8 +57,13 @@ export declare class Agent extends AgentLlmExecutionTools implements LlmExecutio
|
|
|
46
57
|
get parameters(): BookParameter[];
|
|
47
58
|
readonly agentSource: BehaviorSubject<string_book>;
|
|
48
59
|
constructor(options: AgentOptions);
|
|
60
|
+
/**
|
|
61
|
+
* Calls the chat model with agent-specific system prompt and requirements with streaming
|
|
62
|
+
*
|
|
63
|
+
* Note: This method also implements the learning mechanism
|
|
64
|
+
*/
|
|
65
|
+
callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
|
|
49
66
|
}
|
|
50
67
|
/**
|
|
51
68
|
* TODO: [🧠][😰]Agent is not working with the parameters, should it be?
|
|
52
|
-
* TODO: !!! Agent on remote server
|
|
53
69
|
*/
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import type { Promisable } from 'type-fest';
|
|
2
|
+
import type { string_book } from '../../book-2.0/agent-source/string_book';
|
|
2
3
|
import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
|
|
3
4
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
4
5
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
@@ -10,11 +11,12 @@ import type { CreateAgentLlmExecutionToolsOptions } from './CreateAgentLlmExecut
|
|
|
10
11
|
* Execution Tools for calling LLM models with a predefined agent "soul"
|
|
11
12
|
* This wraps underlying LLM execution tools and applies agent-specific system prompts and requirements
|
|
12
13
|
*
|
|
13
|
-
*
|
|
14
|
+
* Note: [🦖] There are several different things in Promptbook:
|
|
14
15
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
15
16
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
16
17
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
17
18
|
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
19
|
+
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
18
20
|
*
|
|
19
21
|
* @public exported from `@promptbook/core`
|
|
20
22
|
*/
|
|
@@ -39,6 +41,12 @@ export declare class AgentLlmExecutionTools implements LlmExecutionTools {
|
|
|
39
41
|
* @param agentSource The agent source string that defines the agent's behavior
|
|
40
42
|
*/
|
|
41
43
|
constructor(options: CreateAgentLlmExecutionToolsOptions);
|
|
44
|
+
/**
|
|
45
|
+
* Updates the agent source and clears the cache
|
|
46
|
+
*
|
|
47
|
+
* @param agentSource The new agent source string
|
|
48
|
+
*/
|
|
49
|
+
protected updateAgentSource(agentSource: string_book): void;
|
|
42
50
|
/**
|
|
43
51
|
* Get cached or parse agent information
|
|
44
52
|
*/
|
|
@@ -60,6 +68,10 @@ export declare class AgentLlmExecutionTools implements LlmExecutionTools {
|
|
|
60
68
|
* Calls the chat model with agent-specific system prompt and requirements
|
|
61
69
|
*/
|
|
62
70
|
callChatModel(prompt: Prompt): Promise<ChatPromptResult>;
|
|
71
|
+
/**
|
|
72
|
+
* Calls the chat model with agent-specific system prompt and requirements with streaming
|
|
73
|
+
*/
|
|
74
|
+
callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
|
|
63
75
|
}
|
|
64
76
|
/**
|
|
65
77
|
* TODO: [🍚] Implement Destroyable pattern to free resources
|
|
@@ -1,16 +1,17 @@
|
|
|
1
1
|
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
2
2
|
import type { Prompt } from '../../types/Prompt';
|
|
3
|
+
import type { string_agent_hash, string_agent_name } from '../../types/typeAliases';
|
|
3
4
|
import { Agent } from './Agent';
|
|
4
5
|
import type { RemoteAgentOptions } from './RemoteAgentOptions';
|
|
5
6
|
/**
|
|
6
7
|
* Represents one AI Agent
|
|
7
8
|
*
|
|
8
|
-
*
|
|
9
|
+
* Note: [🦖] There are several different things in Promptbook:
|
|
9
10
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
10
|
-
* !!!!!! `RemoteAgent`
|
|
11
11
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
12
12
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
13
13
|
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
14
|
+
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
14
15
|
*
|
|
15
16
|
* @public exported from `@promptbook/core`
|
|
16
17
|
*/
|
|
@@ -20,11 +21,19 @@ export declare class RemoteAgent extends Agent {
|
|
|
20
21
|
* The source of the agent
|
|
21
22
|
*/
|
|
22
23
|
private agentUrl;
|
|
24
|
+
private _remoteAgentName;
|
|
25
|
+
private _remoteAgentHash;
|
|
23
26
|
private constructor();
|
|
27
|
+
get agentName(): string_agent_name;
|
|
28
|
+
get agentHash(): string_agent_hash;
|
|
24
29
|
/**
|
|
25
30
|
* Calls the agent on agents remote server
|
|
26
31
|
*/
|
|
27
32
|
callChatModel(prompt: Prompt): Promise<ChatPromptResult>;
|
|
33
|
+
/**
|
|
34
|
+
* Calls the agent on agents remote server with streaming
|
|
35
|
+
*/
|
|
36
|
+
callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
|
|
28
37
|
}
|
|
29
38
|
/**
|
|
30
39
|
* TODO: [🧠][😰]Agent is not working with the parameters, should it be?
|
|
@@ -9,11 +9,12 @@ import { OpenAiExecutionTools } from './OpenAiExecutionTools';
|
|
|
9
9
|
*
|
|
10
10
|
* This is useful for calling OpenAI API with a single assistant, for more wide usage use `OpenAiExecutionTools`.
|
|
11
11
|
*
|
|
12
|
-
*
|
|
12
|
+
* Note: [🦖] There are several different things in Promptbook:
|
|
13
13
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
14
14
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
15
15
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
16
16
|
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
17
|
+
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
17
18
|
*
|
|
18
19
|
* @public exported from `@promptbook/openai`
|
|
19
20
|
*/
|
|
@@ -32,6 +33,10 @@ export declare class OpenAiAssistantExecutionTools extends OpenAiExecutionTools
|
|
|
32
33
|
* Calls OpenAI API to use a chat model.
|
|
33
34
|
*/
|
|
34
35
|
callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'format'>): Promise<ChatPromptResult>;
|
|
36
|
+
/**
|
|
37
|
+
* Calls OpenAI API to use a chat model with streaming.
|
|
38
|
+
*/
|
|
39
|
+
callChatModelStream(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'format'>, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
|
|
35
40
|
/**
|
|
36
41
|
* Get an existing assistant tool wrapper
|
|
37
42
|
*/
|
|
@@ -9,7 +9,7 @@ type AgentsServerOptions = {
|
|
|
9
9
|
port: number_port;
|
|
10
10
|
};
|
|
11
11
|
/**
|
|
12
|
-
*
|
|
12
|
+
* [🐱🚀]
|
|
13
13
|
* Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
|
|
14
14
|
*
|
|
15
15
|
* You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
|
|
@@ -17,7 +17,7 @@ type AgentsServerOptions = {
|
|
|
17
17
|
*
|
|
18
18
|
* @see https://github.com/webgptorg/promptbook#remote-server
|
|
19
19
|
* @public exported from `@promptbook/remote-server`
|
|
20
|
-
* <- TODO:
|
|
20
|
+
* <- TODO: [🐱🚀] Change to `@promptbook/agent-server`
|
|
21
21
|
*/
|
|
22
22
|
export declare function startAgentServer(options: AgentsServerOptions): Promise<TODO_any>;
|
|
23
23
|
export {};
|
|
@@ -59,6 +59,13 @@ export declare class Color {
|
|
|
59
59
|
* @returns Color object
|
|
60
60
|
*/
|
|
61
61
|
private static fromHex3;
|
|
62
|
+
/**
|
|
63
|
+
* Creates a new Color instance from color in hex format with 4 digits (with alpha channel)
|
|
64
|
+
*
|
|
65
|
+
* @param color in hex for example `09df`
|
|
66
|
+
* @returns Color object
|
|
67
|
+
*/
|
|
68
|
+
private static fromHex4;
|
|
62
69
|
/**
|
|
63
70
|
* Creates a new Color instance from color in hex format with 6 color digits (without alpha channel)
|
|
64
71
|
*
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { chococake } from '../organization/really_any';
|
|
2
2
|
/**
|
|
3
3
|
* Safely retrieves the global scope object (window in browser, global in Node.js)
|
|
4
4
|
* regardless of the JavaScript environment in which the code is running
|
|
@@ -7,4 +7,4 @@ import type { really_any } from '../organization/really_any';
|
|
|
7
7
|
*
|
|
8
8
|
* @private internal function of `$Register`
|
|
9
9
|
*/
|
|
10
|
-
export declare function $getGlobalScope():
|
|
10
|
+
export declare function $getGlobalScope(): chococake;
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { string_sha256 } from '../../types/typeAliases';
|
|
2
|
+
import { really_unknown } from '../organization/really_unknown';
|
|
3
|
+
/**
|
|
4
|
+
* Computes SHA-256 hash of the given object
|
|
5
|
+
*
|
|
6
|
+
* @public exported from `@promptbook/utils`
|
|
7
|
+
*/
|
|
8
|
+
export declare function computeHash(value: really_unknown): string_sha256;
|
|
9
|
+
/**
|
|
10
|
+
* TODO: [🥬][🥬] Use this ACRY
|
|
11
|
+
*/
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { chococake } from './really_any';
|
|
2
2
|
/**
|
|
3
3
|
* Just says that the variable is not used directlys but should be kept because the existence of the variable is important
|
|
4
4
|
*
|
|
@@ -6,4 +6,4 @@ import type { really_any } from './really_any';
|
|
|
6
6
|
* @returns void
|
|
7
7
|
* @private within the repository
|
|
8
8
|
*/
|
|
9
|
-
export declare function $sideEffect(...sideEffectSubjects: ReadonlyArray<
|
|
9
|
+
export declare function $sideEffect(...sideEffectSubjects: ReadonlyArray<chococake>): void;
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { chococake } from './really_any';
|
|
2
2
|
/**
|
|
3
3
|
* Organizational helper to mark a function that produces side effects
|
|
4
4
|
*
|
|
5
5
|
* @private within the repository
|
|
6
6
|
*/
|
|
7
|
-
export type $side_effect = void |
|
|
7
|
+
export type $side_effect = void | chococake;
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { chococake } from './really_any';
|
|
2
2
|
/**
|
|
3
3
|
* Just marks a place of place where should be something implemented
|
|
4
4
|
* No side effects.
|
|
@@ -9,4 +9,4 @@ import type { really_any } from './really_any';
|
|
|
9
9
|
* @returns void
|
|
10
10
|
* @private within the repository
|
|
11
11
|
*/
|
|
12
|
-
export declare function TODO_USE(...value: ReadonlyArray<
|
|
12
|
+
export declare function TODO_USE(...value: ReadonlyArray<chococake>): void;
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { chococake } from './really_any';
|
|
2
2
|
/**
|
|
3
3
|
* Just says that the variable is not used but should be kept
|
|
4
4
|
* No side effects.
|
|
@@ -13,4 +13,4 @@ import type { really_any } from './really_any';
|
|
|
13
13
|
* @returns void
|
|
14
14
|
* @private within the repository
|
|
15
15
|
*/
|
|
16
|
-
export declare function keepUnused<TTypeToKeep1 =
|
|
16
|
+
export declare function keepUnused<TTypeToKeep1 = chococake, TTypeToKeep2 = chococake, TTypeToKeep3 = chococake>(...valuesToKeep: ReadonlyArray<chococake>): void;
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { chococake } from './really_any';
|
|
2
2
|
/**
|
|
3
3
|
* Does nothing, but preserves the function in the bundle
|
|
4
4
|
* Compiler is tricked into thinking the function is used
|
|
@@ -7,7 +7,7 @@ import type { really_any } from './really_any';
|
|
|
7
7
|
* @returns nothing
|
|
8
8
|
* @private within the repository
|
|
9
9
|
*/
|
|
10
|
-
export declare function $preserve(...value: Array<
|
|
10
|
+
export declare function $preserve(...value: Array<chococake>): void;
|
|
11
11
|
/**
|
|
12
12
|
* DO NOT USE THIS FUNCTION
|
|
13
13
|
* Only purpose of this function is to trick the compiler and javascript engine
|
|
@@ -15,7 +15,7 @@ export declare function $preserve(...value: Array<really_any>): void;
|
|
|
15
15
|
*
|
|
16
16
|
* @private internal for `preserve`
|
|
17
17
|
*/
|
|
18
|
-
export declare function __DO_NOT_USE_getPreserved(): Array<
|
|
18
|
+
export declare function __DO_NOT_USE_getPreserved(): Array<chococake>;
|
|
19
19
|
/**
|
|
20
20
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
21
21
|
*/
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { chococake } from '../organization/really_any';
|
|
2
2
|
/**
|
|
3
3
|
* Function `asSerializable` will convert values which are not serializable to serializable values
|
|
4
4
|
* It walks deeply through the object and converts all values
|
|
@@ -12,4 +12,4 @@ import type { really_any } from '../organization/really_any';
|
|
|
12
12
|
*
|
|
13
13
|
* @private Internal helper function
|
|
14
14
|
*/
|
|
15
|
-
export declare function asSerializable(value:
|
|
15
|
+
export declare function asSerializable(value: chococake): chococake;
|
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
|
16
16
|
/**
|
|
17
17
|
* Represents the version string of the Promptbook engine.
|
|
18
|
-
* It follows semantic versioning (e.g., `0.103.0-
|
|
18
|
+
* It follows semantic versioning (e.g., `0.103.0-49`).
|
|
19
19
|
*
|
|
20
20
|
* @generated
|
|
21
21
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/vercel",
|
|
3
|
-
"version": "0.103.0-
|
|
3
|
+
"version": "0.103.0-50",
|
|
4
4
|
"description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -94,7 +94,7 @@
|
|
|
94
94
|
"module": "./esm/index.es.js",
|
|
95
95
|
"typings": "./esm/typings/src/_packages/vercel.index.d.ts",
|
|
96
96
|
"peerDependencies": {
|
|
97
|
-
"@promptbook/core": "0.103.0-
|
|
97
|
+
"@promptbook/core": "0.103.0-50"
|
|
98
98
|
},
|
|
99
99
|
"dependencies": {
|
|
100
100
|
"colors": "^1.4.0",
|
package/umd/index.umd.js
CHANGED
|
@@ -23,7 +23,7 @@
|
|
|
23
23
|
* @generated
|
|
24
24
|
* @see https://github.com/webgptorg/promptbook
|
|
25
25
|
*/
|
|
26
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.103.0-
|
|
26
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.103.0-50';
|
|
27
27
|
/**
|
|
28
28
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
29
29
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -926,6 +926,9 @@
|
|
|
926
926
|
if (hex.length === 3) {
|
|
927
927
|
return Color.fromHex3(hex);
|
|
928
928
|
}
|
|
929
|
+
if (hex.length === 4) {
|
|
930
|
+
return Color.fromHex4(hex);
|
|
931
|
+
}
|
|
929
932
|
if (hex.length === 6) {
|
|
930
933
|
return Color.fromHex6(hex);
|
|
931
934
|
}
|
|
@@ -946,6 +949,19 @@
|
|
|
946
949
|
const b = parseInt(hex.substr(2, 1), 16) * 16;
|
|
947
950
|
return take(new Color(r, g, b));
|
|
948
951
|
}
|
|
952
|
+
/**
|
|
953
|
+
* Creates a new Color instance from color in hex format with 4 digits (with alpha channel)
|
|
954
|
+
*
|
|
955
|
+
* @param color in hex for example `09df`
|
|
956
|
+
* @returns Color object
|
|
957
|
+
*/
|
|
958
|
+
static fromHex4(hex) {
|
|
959
|
+
const r = parseInt(hex.substr(0, 1), 16) * 16;
|
|
960
|
+
const g = parseInt(hex.substr(1, 1), 16) * 16;
|
|
961
|
+
const b = parseInt(hex.substr(2, 1), 16) * 16;
|
|
962
|
+
const a = parseInt(hex.substr(3, 1), 16) * 16;
|
|
963
|
+
return take(new Color(r, g, b, a));
|
|
964
|
+
}
|
|
949
965
|
/**
|
|
950
966
|
* Creates a new Color instance from color in hex format with 6 color digits (without alpha channel)
|
|
951
967
|
*
|
|
@@ -1136,7 +1152,8 @@
|
|
|
1136
1152
|
* @returns true if the value is a valid hex color string (e.g., `#009edd`, `#fff`, etc.)
|
|
1137
1153
|
*/
|
|
1138
1154
|
static isHexColorString(value) {
|
|
1139
|
-
return typeof value === 'string' &&
|
|
1155
|
+
return (typeof value === 'string' &&
|
|
1156
|
+
/^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{4}|[0-9a-fA-F]{6}|[0-9a-fA-F]{8})$/.test(value));
|
|
1140
1157
|
}
|
|
1141
1158
|
/**
|
|
1142
1159
|
* Creates new Color object
|
|
@@ -1443,6 +1460,7 @@
|
|
|
1443
1460
|
({
|
|
1444
1461
|
TITLE: Color.fromHex('#244EA8'),
|
|
1445
1462
|
LINE: Color.fromHex('#eeeeee'),
|
|
1463
|
+
SEPARATOR: Color.fromHex('#cccccc'),
|
|
1446
1464
|
COMMITMENT: Color.fromHex('#DA0F78'),
|
|
1447
1465
|
PARAMETER: Color.fromHex('#8e44ad'),
|
|
1448
1466
|
});
|
|
@@ -1801,7 +1819,7 @@
|
|
|
1801
1819
|
TODO: [🧠] Is there a better implementation?
|
|
1802
1820
|
> const propertyNames = Object.getOwnPropertyNames(objectValue);
|
|
1803
1821
|
> for (const propertyName of propertyNames) {
|
|
1804
|
-
> const value = (objectValue as
|
|
1822
|
+
> const value = (objectValue as chococake)[propertyName];
|
|
1805
1823
|
> if (value && typeof value === 'object') {
|
|
1806
1824
|
> deepClone(value);
|
|
1807
1825
|
> }
|