@promptbook/google 0.103.0-48 → 0.103.0-50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/esm/index.es.js +21 -3
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/servers.d.ts +1 -0
  4. package/esm/typings/src/_packages/components.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +2 -0
  6. package/esm/typings/src/_packages/utils.index.d.ts +2 -0
  7. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +12 -2
  8. package/esm/typings/src/book-components/PromptbookAgent/PromptbookAgent.d.ts +20 -0
  9. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabase.d.ts +14 -8
  10. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabaseOptions.d.ts +10 -0
  11. package/esm/typings/src/commitments/MESSAGE/InitialMessageCommitmentDefinition.d.ts +28 -0
  12. package/esm/typings/src/commitments/index.d.ts +2 -1
  13. package/esm/typings/src/config.d.ts +1 -0
  14. package/esm/typings/src/errors/DatabaseError.d.ts +2 -2
  15. package/esm/typings/src/errors/WrappedError.d.ts +2 -2
  16. package/esm/typings/src/execution/ExecutionTask.d.ts +2 -2
  17. package/esm/typings/src/execution/LlmExecutionTools.d.ts +6 -1
  18. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +2 -2
  19. package/esm/typings/src/llm-providers/agent/Agent.d.ts +19 -3
  20. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +13 -1
  21. package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +11 -2
  22. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +6 -1
  23. package/esm/typings/src/remote-server/startAgentServer.d.ts +2 -2
  24. package/esm/typings/src/utils/color/Color.d.ts +7 -0
  25. package/esm/typings/src/utils/color/Color.test.d.ts +1 -0
  26. package/esm/typings/src/utils/environment/$getGlobalScope.d.ts +2 -2
  27. package/esm/typings/src/utils/misc/computeHash.d.ts +11 -0
  28. package/esm/typings/src/utils/misc/computeHash.test.d.ts +1 -0
  29. package/esm/typings/src/utils/organization/$sideEffect.d.ts +2 -2
  30. package/esm/typings/src/utils/organization/$side_effect.d.ts +2 -2
  31. package/esm/typings/src/utils/organization/TODO_USE.d.ts +2 -2
  32. package/esm/typings/src/utils/organization/keepUnused.d.ts +2 -2
  33. package/esm/typings/src/utils/organization/preserve.d.ts +3 -3
  34. package/esm/typings/src/utils/organization/really_any.d.ts +7 -0
  35. package/esm/typings/src/utils/serialization/asSerializable.d.ts +2 -2
  36. package/esm/typings/src/version.d.ts +1 -1
  37. package/package.json +2 -2
  38. package/umd/index.umd.js +21 -3
  39. package/umd/index.umd.js.map +1 -1
@@ -1,4 +1,4 @@
1
- import type { really_any } from '../utils/organization/really_any';
1
+ import type { chococake } from '../utils/organization/really_any';
2
2
  /**
3
3
  * This error type indicates that somewhere in the code non-Error object was thrown and it was wrapped into the `WrappedError`
4
4
  *
@@ -6,5 +6,5 @@ import type { really_any } from '../utils/organization/really_any';
6
6
  */
7
7
  export declare class WrappedError extends Error {
8
8
  readonly name = "WrappedError";
9
- constructor(whatWasThrown: really_any);
9
+ constructor(whatWasThrown: chococake);
10
10
  }
@@ -3,7 +3,7 @@ import { PartialDeep } from 'type-fest';
3
3
  import type { LlmCall } from '../types/LlmCall';
4
4
  import type { number_percent, task_id } from '../types/typeAliases';
5
5
  import type { string_SCREAMING_CASE } from '../utils/normalization/normalizeTo_SCREAMING_CASE';
6
- import type { really_any } from '../utils/organization/really_any';
6
+ import type { chococake } from '../utils/organization/really_any';
7
7
  import type { string_promptbook_version } from '../version';
8
8
  import type { AbstractTaskResult } from './AbstractTaskResult';
9
9
  import type { PipelineExecutorResult } from './PipelineExecutorResult';
@@ -144,7 +144,7 @@ export type AbstractTask<TTaskResult extends AbstractTaskResult> = {
144
144
  /**
145
145
  * Optional nonce to correlate logs with version of the Promptbook engine
146
146
  */
147
- readonly ptbkNonce?: really_any;
147
+ readonly ptbkNonce?: chococake;
148
148
  };
149
149
  export type Task = ExecutionTask | PreparationTask;
150
150
  export {};
@@ -9,11 +9,12 @@ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult }
9
9
  * On its interface it exposes common methods for prompt execution.
10
10
  * Inside (in constructor) it calls OpenAI, Azure, GPU, proxy, cache, logging,...
11
11
  *
12
- * !!! Note: [🦖] There are several different things in Promptbook:
12
+ * Note: [🦖] There are several different things in Promptbook:
13
13
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
14
14
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
15
15
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
16
16
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
17
+ * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
17
18
  *
18
19
  * @see https://github.com/webgptorg/promptbook#llm-execution-tools
19
20
  */
@@ -52,6 +53,10 @@ export type LlmExecutionTools = {
52
53
  * Calls a chat model
53
54
  */
54
55
  callChatModel?(prompt: Prompt): Promise<ChatPromptResult>;
56
+ /**
57
+ * Calls a chat model with streaming
58
+ */
59
+ callChatModelStream?(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
55
60
  /**
56
61
  * Calls a completion model
57
62
  */
@@ -1,7 +1,7 @@
1
1
  import { Promisable } from 'type-fest';
2
2
  import type { Identification } from '../../../remote-server/socket-types/_subtypes/Identification';
3
3
  import type { string_app_id, string_url } from '../../../types/typeAliases';
4
- import type { really_any } from '../../../utils/organization/really_any';
4
+ import type { chococake } from '../../../utils/organization/really_any';
5
5
  import type { CacheLlmToolsOptions } from '../utils/cache/CacheLlmToolsOptions';
6
6
  import type { LlmExecutionToolsWithTotalUsage } from '../utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
7
7
  type ProvideLlmToolsForWizardOrCliOptions = {
@@ -38,7 +38,7 @@ type ProvideLlmToolsForWizardOrCliOptions = {
38
38
  *
39
39
  * Note: When login prompt fails, `process.exit(1)` is called
40
40
  */
41
- loginPrompt(): Promisable<Identification<really_any>>;
41
+ loginPrompt(): Promisable<Identification<chococake>>;
42
42
  });
43
43
  /**
44
44
  * Returns LLM tools for CLI
@@ -2,17 +2,20 @@ import { BehaviorSubject } from 'rxjs';
2
2
  import type { AgentBasicInformation, BookParameter } from '../../book-2.0/agent-source/AgentBasicInformation';
3
3
  import type { string_book } from '../../book-2.0/agent-source/string_book';
4
4
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
5
- import type { string_agent_hash, string_agent_name, string_url_image } from '../../types/typeAliases';
5
+ import type { ChatPromptResult } from '../../execution/PromptResult';
6
+ import type { Prompt } from '../../types/Prompt';
7
+ import type { string_agent_hash, string_agent_name, string_agent_url, string_url_image } from '../../types/typeAliases';
6
8
  import { AgentLlmExecutionTools } from './AgentLlmExecutionTools';
7
9
  import type { AgentOptions } from './AgentOptions';
8
10
  /**
9
11
  * Represents one AI Agent
10
12
  *
11
- * !!! Note: [🦖] There are several different things in Promptbook:
13
+ * Note: [🦖] There are several different things in Promptbook:
12
14
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
13
15
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
14
16
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
15
17
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
18
+ * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
16
19
  *
17
20
  * @public exported from `@promptbook/core`
18
21
  */
@@ -26,6 +29,14 @@ export declare class Agent extends AgentLlmExecutionTools implements LlmExecutio
26
29
  * Description of the agent
27
30
  */
28
31
  personaDescription: string | null;
32
+ /**
33
+ * The initial message shown to the user when the chat starts
34
+ */
35
+ initialMessage: string | null;
36
+ /**
37
+ * Links found in the agent source
38
+ */
39
+ links: Array<string_agent_url>;
29
40
  /**
30
41
  * Computed hash of the agent source for integrity verification
31
42
  */
@@ -46,8 +57,13 @@ export declare class Agent extends AgentLlmExecutionTools implements LlmExecutio
46
57
  get parameters(): BookParameter[];
47
58
  readonly agentSource: BehaviorSubject<string_book>;
48
59
  constructor(options: AgentOptions);
60
+ /**
61
+ * Calls the chat model with agent-specific system prompt and requirements with streaming
62
+ *
63
+ * Note: This method also implements the learning mechanism
64
+ */
65
+ callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
49
66
  }
50
67
  /**
51
68
  * TODO: [🧠][😰]Agent is not working with the parameters, should it be?
52
- * TODO: !!! Agent on remote server
53
69
  */
@@ -1,4 +1,5 @@
1
1
  import type { Promisable } from 'type-fest';
2
+ import type { string_book } from '../../book-2.0/agent-source/string_book';
2
3
  import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
3
4
  import type { AvailableModel } from '../../execution/AvailableModel';
4
5
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
@@ -10,11 +11,12 @@ import type { CreateAgentLlmExecutionToolsOptions } from './CreateAgentLlmExecut
10
11
  * Execution Tools for calling LLM models with a predefined agent "soul"
11
12
  * This wraps underlying LLM execution tools and applies agent-specific system prompts and requirements
12
13
  *
13
- * !!! Note: [🦖] There are several different things in Promptbook:
14
+ * Note: [🦖] There are several different things in Promptbook:
14
15
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
15
16
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
16
17
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
17
18
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
19
+ * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
18
20
  *
19
21
  * @public exported from `@promptbook/core`
20
22
  */
@@ -39,6 +41,12 @@ export declare class AgentLlmExecutionTools implements LlmExecutionTools {
39
41
  * @param agentSource The agent source string that defines the agent's behavior
40
42
  */
41
43
  constructor(options: CreateAgentLlmExecutionToolsOptions);
44
+ /**
45
+ * Updates the agent source and clears the cache
46
+ *
47
+ * @param agentSource The new agent source string
48
+ */
49
+ protected updateAgentSource(agentSource: string_book): void;
42
50
  /**
43
51
  * Get cached or parse agent information
44
52
  */
@@ -60,6 +68,10 @@ export declare class AgentLlmExecutionTools implements LlmExecutionTools {
60
68
  * Calls the chat model with agent-specific system prompt and requirements
61
69
  */
62
70
  callChatModel(prompt: Prompt): Promise<ChatPromptResult>;
71
+ /**
72
+ * Calls the chat model with agent-specific system prompt and requirements with streaming
73
+ */
74
+ callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
63
75
  }
64
76
  /**
65
77
  * TODO: [🍚] Implement Destroyable pattern to free resources
@@ -1,16 +1,17 @@
1
1
  import type { ChatPromptResult } from '../../execution/PromptResult';
2
2
  import type { Prompt } from '../../types/Prompt';
3
+ import type { string_agent_hash, string_agent_name } from '../../types/typeAliases';
3
4
  import { Agent } from './Agent';
4
5
  import type { RemoteAgentOptions } from './RemoteAgentOptions';
5
6
  /**
6
7
  * Represents one AI Agent
7
8
  *
8
- * !!!!!! Note: [🦖] There are several different things in Promptbook:
9
+ * Note: [🦖] There are several different things in Promptbook:
9
10
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
10
- * !!!!!! `RemoteAgent`
11
11
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
12
12
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
13
13
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
14
+ * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
14
15
  *
15
16
  * @public exported from `@promptbook/core`
16
17
  */
@@ -20,11 +21,19 @@ export declare class RemoteAgent extends Agent {
20
21
  * The source of the agent
21
22
  */
22
23
  private agentUrl;
24
+ private _remoteAgentName;
25
+ private _remoteAgentHash;
23
26
  private constructor();
27
+ get agentName(): string_agent_name;
28
+ get agentHash(): string_agent_hash;
24
29
  /**
25
30
  * Calls the agent on agents remote server
26
31
  */
27
32
  callChatModel(prompt: Prompt): Promise<ChatPromptResult>;
33
+ /**
34
+ * Calls the agent on agents remote server with streaming
35
+ */
36
+ callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
28
37
  }
29
38
  /**
30
39
  * TODO: [🧠][😰]Agent is not working with the parameters, should it be?
@@ -9,11 +9,12 @@ import { OpenAiExecutionTools } from './OpenAiExecutionTools';
9
9
  *
10
10
  * This is useful for calling OpenAI API with a single assistant, for more wide usage use `OpenAiExecutionTools`.
11
11
  *
12
- * !!! Note: [🦖] There are several different things in Promptbook:
12
+ * Note: [🦖] There are several different things in Promptbook:
13
13
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
14
14
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
15
15
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
16
16
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
17
+ * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
17
18
  *
18
19
  * @public exported from `@promptbook/openai`
19
20
  */
@@ -32,6 +33,10 @@ export declare class OpenAiAssistantExecutionTools extends OpenAiExecutionTools
32
33
  * Calls OpenAI API to use a chat model.
33
34
  */
34
35
  callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'format'>): Promise<ChatPromptResult>;
36
+ /**
37
+ * Calls OpenAI API to use a chat model with streaming.
38
+ */
39
+ callChatModelStream(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'format'>, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
35
40
  /**
36
41
  * Get an existing assistant tool wrapper
37
42
  */
@@ -9,7 +9,7 @@ type AgentsServerOptions = {
9
9
  port: number_port;
10
10
  };
11
11
  /**
12
- * !!!!!
12
+ * [🐱‍🚀]
13
13
  * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
14
14
  *
15
15
  * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
@@ -17,7 +17,7 @@ type AgentsServerOptions = {
17
17
  *
18
18
  * @see https://github.com/webgptorg/promptbook#remote-server
19
19
  * @public exported from `@promptbook/remote-server`
20
- * <- TODO: !!!! Change to `@promptbook/agent-server`
20
+ * <- TODO: [🐱‍🚀] Change to `@promptbook/agent-server`
21
21
  */
22
22
  export declare function startAgentServer(options: AgentsServerOptions): Promise<TODO_any>;
23
23
  export {};
@@ -59,6 +59,13 @@ export declare class Color {
59
59
  * @returns Color object
60
60
  */
61
61
  private static fromHex3;
62
+ /**
63
+ * Creates a new Color instance from color in hex format with 4 digits (with alpha channel)
64
+ *
65
+ * @param color in hex for example `09df`
66
+ * @returns Color object
67
+ */
68
+ private static fromHex4;
62
69
  /**
63
70
  * Creates a new Color instance from color in hex format with 6 color digits (without alpha channel)
64
71
  *
@@ -0,0 +1 @@
1
+ export {};
@@ -1,4 +1,4 @@
1
- import type { really_any } from '../organization/really_any';
1
+ import type { chococake } from '../organization/really_any';
2
2
  /**
3
3
  * Safely retrieves the global scope object (window in browser, global in Node.js)
4
4
  * regardless of the JavaScript environment in which the code is running
@@ -7,4 +7,4 @@ import type { really_any } from '../organization/really_any';
7
7
  *
8
8
  * @private internal function of `$Register`
9
9
  */
10
- export declare function $getGlobalScope(): really_any;
10
+ export declare function $getGlobalScope(): chococake;
@@ -0,0 +1,11 @@
1
+ import { string_sha256 } from '../../types/typeAliases';
2
+ import { really_unknown } from '../organization/really_unknown';
3
+ /**
4
+ * Computes SHA-256 hash of the given object
5
+ *
6
+ * @public exported from `@promptbook/utils`
7
+ */
8
+ export declare function computeHash(value: really_unknown): string_sha256;
9
+ /**
10
+ * TODO: [🥬][🥬] Use this ACRY
11
+ */
@@ -0,0 +1 @@
1
+ export {};
@@ -1,4 +1,4 @@
1
- import type { really_any } from './really_any';
1
+ import type { chococake } from './really_any';
2
2
  /**
3
3
  * Just says that the variable is not used directlys but should be kept because the existence of the variable is important
4
4
  *
@@ -6,4 +6,4 @@ import type { really_any } from './really_any';
6
6
  * @returns void
7
7
  * @private within the repository
8
8
  */
9
- export declare function $sideEffect(...sideEffectSubjects: ReadonlyArray<really_any>): void;
9
+ export declare function $sideEffect(...sideEffectSubjects: ReadonlyArray<chococake>): void;
@@ -1,7 +1,7 @@
1
- import type { really_any } from './really_any';
1
+ import type { chococake } from './really_any';
2
2
  /**
3
3
  * Organizational helper to mark a function that produces side effects
4
4
  *
5
5
  * @private within the repository
6
6
  */
7
- export type $side_effect = void | really_any;
7
+ export type $side_effect = void | chococake;
@@ -1,4 +1,4 @@
1
- import type { really_any } from './really_any';
1
+ import type { chococake } from './really_any';
2
2
  /**
3
3
  * Just marks a place of place where should be something implemented
4
4
  * No side effects.
@@ -9,4 +9,4 @@ import type { really_any } from './really_any';
9
9
  * @returns void
10
10
  * @private within the repository
11
11
  */
12
- export declare function TODO_USE(...value: ReadonlyArray<really_any>): void;
12
+ export declare function TODO_USE(...value: ReadonlyArray<chococake>): void;
@@ -1,4 +1,4 @@
1
- import type { really_any } from './really_any';
1
+ import type { chococake } from './really_any';
2
2
  /**
3
3
  * Just says that the variable is not used but should be kept
4
4
  * No side effects.
@@ -13,4 +13,4 @@ import type { really_any } from './really_any';
13
13
  * @returns void
14
14
  * @private within the repository
15
15
  */
16
- export declare function keepUnused<TTypeToKeep1 = really_any, TTypeToKeep2 = really_any, TTypeToKeep3 = really_any>(...valuesToKeep: ReadonlyArray<really_any>): void;
16
+ export declare function keepUnused<TTypeToKeep1 = chococake, TTypeToKeep2 = chococake, TTypeToKeep3 = chococake>(...valuesToKeep: ReadonlyArray<chococake>): void;
@@ -1,4 +1,4 @@
1
- import type { really_any } from './really_any';
1
+ import type { chococake } from './really_any';
2
2
  /**
3
3
  * Does nothing, but preserves the function in the bundle
4
4
  * Compiler is tricked into thinking the function is used
@@ -7,7 +7,7 @@ import type { really_any } from './really_any';
7
7
  * @returns nothing
8
8
  * @private within the repository
9
9
  */
10
- export declare function $preserve(...value: Array<really_any>): void;
10
+ export declare function $preserve(...value: Array<chococake>): void;
11
11
  /**
12
12
  * DO NOT USE THIS FUNCTION
13
13
  * Only purpose of this function is to trick the compiler and javascript engine
@@ -15,7 +15,7 @@ export declare function $preserve(...value: Array<really_any>): void;
15
15
  *
16
16
  * @private internal for `preserve`
17
17
  */
18
- export declare function __DO_NOT_USE_getPreserved(): Array<really_any>;
18
+ export declare function __DO_NOT_USE_getPreserved(): Array<chococake>;
19
19
  /**
20
20
  * Note: [💞] Ignore a discrepancy between file name and entity name
21
21
  */
@@ -1,3 +1,10 @@
1
+ /**
2
+ * Organizational helper to tell to use intentionally `any`
3
+ *
4
+ * @alias really_any
5
+ * @private within the repository
6
+ */
7
+ export type chococake = really_any;
1
8
  /**
2
9
  * Organizational helper to mark a place where to really use `any`
3
10
  *
@@ -1,4 +1,4 @@
1
- import type { really_any } from '../organization/really_any';
1
+ import type { chococake } from '../organization/really_any';
2
2
  /**
3
3
  * Function `asSerializable` will convert values which are not serializable to serializable values
4
4
  * It walks deeply through the object and converts all values
@@ -12,4 +12,4 @@ import type { really_any } from '../organization/really_any';
12
12
  *
13
13
  * @private Internal helper function
14
14
  */
15
- export declare function asSerializable(value: really_any): really_any;
15
+ export declare function asSerializable(value: chococake): chococake;
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.103.0-47`).
18
+ * It follows semantic versioning (e.g., `0.103.0-49`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/google",
3
- "version": "0.103.0-48",
3
+ "version": "0.103.0-50",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -95,7 +95,7 @@
95
95
  "module": "./esm/index.es.js",
96
96
  "typings": "./esm/typings/src/_packages/google.index.d.ts",
97
97
  "peerDependencies": {
98
- "@promptbook/core": "0.103.0-48"
98
+ "@promptbook/core": "0.103.0-50"
99
99
  },
100
100
  "dependencies": {
101
101
  "@ai-sdk/google": "1.0.17",
package/umd/index.umd.js CHANGED
@@ -23,7 +23,7 @@
23
23
  * @generated
24
24
  * @see https://github.com/webgptorg/promptbook
25
25
  */
26
- const PROMPTBOOK_ENGINE_VERSION = '0.103.0-48';
26
+ const PROMPTBOOK_ENGINE_VERSION = '0.103.0-50';
27
27
  /**
28
28
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
29
29
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -944,6 +944,9 @@
944
944
  if (hex.length === 3) {
945
945
  return Color.fromHex3(hex);
946
946
  }
947
+ if (hex.length === 4) {
948
+ return Color.fromHex4(hex);
949
+ }
947
950
  if (hex.length === 6) {
948
951
  return Color.fromHex6(hex);
949
952
  }
@@ -964,6 +967,19 @@
964
967
  const b = parseInt(hex.substr(2, 1), 16) * 16;
965
968
  return take(new Color(r, g, b));
966
969
  }
970
+ /**
971
+ * Creates a new Color instance from color in hex format with 4 digits (with alpha channel)
972
+ *
973
+ * @param color in hex for example `09df`
974
+ * @returns Color object
975
+ */
976
+ static fromHex4(hex) {
977
+ const r = parseInt(hex.substr(0, 1), 16) * 16;
978
+ const g = parseInt(hex.substr(1, 1), 16) * 16;
979
+ const b = parseInt(hex.substr(2, 1), 16) * 16;
980
+ const a = parseInt(hex.substr(3, 1), 16) * 16;
981
+ return take(new Color(r, g, b, a));
982
+ }
967
983
  /**
968
984
  * Creates a new Color instance from color in hex format with 6 color digits (without alpha channel)
969
985
  *
@@ -1154,7 +1170,8 @@
1154
1170
  * @returns true if the value is a valid hex color string (e.g., `#009edd`, `#fff`, etc.)
1155
1171
  */
1156
1172
  static isHexColorString(value) {
1157
- return typeof value === 'string' && /^#(?:[0-9a-fA-F]{3}){1,2}$/.test(value);
1173
+ return (typeof value === 'string' &&
1174
+ /^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{4}|[0-9a-fA-F]{6}|[0-9a-fA-F]{8})$/.test(value));
1158
1175
  }
1159
1176
  /**
1160
1177
  * Creates new Color object
@@ -1461,6 +1478,7 @@
1461
1478
  ({
1462
1479
  TITLE: Color.fromHex('#244EA8'),
1463
1480
  LINE: Color.fromHex('#eeeeee'),
1481
+ SEPARATOR: Color.fromHex('#cccccc'),
1464
1482
  COMMITMENT: Color.fromHex('#DA0F78'),
1465
1483
  PARAMETER: Color.fromHex('#8e44ad'),
1466
1484
  });
@@ -1819,7 +1837,7 @@
1819
1837
  TODO: [🧠] Is there a better implementation?
1820
1838
  > const propertyNames = Object.getOwnPropertyNames(objectValue);
1821
1839
  > for (const propertyName of propertyNames) {
1822
- > const value = (objectValue as really_any)[propertyName];
1840
+ > const value = (objectValue as chococake)[propertyName];
1823
1841
  > if (value && typeof value === 'object') {
1824
1842
  > deepClone(value);
1825
1843
  > }