@promptbook/browser 0.88.0 → 0.89.0-2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +1 -1
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -2
  5. package/esm/typings/src/_packages/types.index.d.ts +16 -4
  6. package/esm/typings/src/cli/cli-commands/login.d.ts +15 -0
  7. package/esm/typings/src/execution/PipelineExecutorResult.d.ts +2 -2
  8. package/esm/typings/src/execution/PromptResult.d.ts +2 -2
  9. package/esm/typings/src/execution/{PromptResultUsage.d.ts → Usage.d.ts} +5 -5
  10. package/esm/typings/src/execution/utils/addUsage.d.ts +2 -2
  11. package/esm/typings/src/execution/utils/computeUsageCounts.d.ts +3 -3
  12. package/esm/typings/src/execution/utils/usage-constants.d.ts +77 -60
  13. package/esm/typings/src/execution/utils/usageToHuman.d.ts +5 -5
  14. package/esm/typings/src/execution/utils/usageToWorktime.d.ts +5 -5
  15. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +9 -2
  16. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/{countTotalUsage.d.ts → countUsage.d.ts} +1 -1
  17. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/limitTotalUsage.d.ts +2 -2
  18. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +2 -2
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +0 -9
  20. package/esm/typings/src/llm-providers/openai/computeOpenAiUsage.d.ts +2 -2
  21. package/esm/typings/src/pipeline/PipelineJson/PreparationJson.d.ts +2 -2
  22. package/esm/typings/src/playground/BrjappConnector.d.ts +67 -0
  23. package/esm/typings/src/playground/brjapp-api-schema.d.ts +12879 -0
  24. package/esm/typings/src/playground/playground.d.ts +5 -0
  25. package/esm/typings/src/remote-server/socket-types/_subtypes/PromptbookServer_Identification.d.ts +2 -1
  26. package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +15 -3
  27. package/esm/typings/src/types/typeAliases.d.ts +8 -2
  28. package/package.json +2 -2
  29. package/umd/index.umd.js +1 -1
  30. package/umd/index.umd.js.map +1 -1
@@ -1,6 +1,6 @@
1
1
  import type Anthropic from '@anthropic-ai/sdk';
2
2
  import type { PartialDeep } from 'type-fest';
3
- import type { PromptResultUsage } from '../../execution/PromptResultUsage';
3
+ import type { Usage } from '../../execution/Usage';
4
4
  import type { Prompt } from '../../types/Prompt';
5
5
  /**
6
6
  * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
@@ -12,7 +12,7 @@ import type { Prompt } from '../../types/Prompt';
12
12
  * @private internal utility of `AnthropicClaudeExecutionTools`
13
13
  */
14
14
  export declare function computeAnthropicClaudeUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
15
- resultContent: string, rawResponse: PartialDeep<Pick<Anthropic.Messages.Message, 'model' | 'usage'>>): PromptResultUsage;
15
+ resultContent: string, rawResponse: PartialDeep<Pick<Anthropic.Messages.Message, 'model' | 'usage'>>): Usage;
16
16
  /**
17
17
  * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
18
18
  */
@@ -8,8 +8,6 @@ import type { Prompt } from '../../types/Prompt';
8
8
  import type { string_markdown } from '../../types/typeAliases';
9
9
  import type { string_markdown_text } from '../../types/typeAliases';
10
10
  import type { string_title } from '../../types/typeAliases';
11
- import type { string_token } from '../../types/typeAliases';
12
- import { OpenAiAssistantExecutionTools } from './OpenAiAssistantExecutionTools';
13
11
  import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
14
12
  /**
15
13
  * Execution Tools for calling OpenAI API
@@ -31,13 +29,6 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
31
29
  get title(): string_title & string_markdown_text;
32
30
  get description(): string_markdown;
33
31
  getClient(): Promise<OpenAI>;
34
- /**
35
- * Create (sub)tools for calling OpenAI API Assistants
36
- *
37
- * @param assistantId Which assistant to use
38
- * @returns Tools for calling OpenAI API Assistants with same token
39
- */
40
- createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools;
41
32
  /**
42
33
  * Check the `options` passed to `constructor`
43
34
  */
@@ -1,6 +1,6 @@
1
1
  import type OpenAI from 'openai';
2
2
  import type { PartialDeep } from 'type-fest';
3
- import type { PromptResultUsage } from '../../execution/PromptResultUsage';
3
+ import type { Usage } from '../../execution/Usage';
4
4
  import type { Prompt } from '../../types/Prompt';
5
5
  /**
6
6
  * Computes the usage of the OpenAI API based on the response from OpenAI
@@ -12,7 +12,7 @@ import type { Prompt } from '../../types/Prompt';
12
12
  * @private internal utility of `OpenAiExecutionTools`
13
13
  */
14
14
  export declare function computeOpenAiUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
15
- resultContent: string, rawResponse: PartialDeep<Pick<OpenAI.Chat.Completions.ChatCompletion | OpenAI.Completions.Completion | OpenAI.Embeddings.CreateEmbeddingResponse, 'model' | 'usage'>>): PromptResultUsage;
15
+ resultContent: string, rawResponse: PartialDeep<Pick<OpenAI.Chat.Completions.ChatCompletion | OpenAI.Completions.Completion | OpenAI.Embeddings.CreateEmbeddingResponse, 'model' | 'usage'>>): Usage;
16
16
  /**
17
17
  * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
18
18
  */
@@ -1,4 +1,4 @@
1
- import type { PromptResultUsage } from '../../execution/PromptResultUsage';
1
+ import type { Usage } from '../../execution/Usage';
2
2
  import type { number_id } from '../../types/typeAliases';
3
3
  import type { string_promptbook_version } from '../../version';
4
4
  export type PreparationJson = {
@@ -13,7 +13,7 @@ export type PreparationJson = {
13
13
  /**
14
14
  * Usage of the prompt execution
15
15
  */
16
- readonly usage: PromptResultUsage;
16
+ readonly usage: Usage;
17
17
  };
18
18
  /**
19
19
  * TODO: [🍙] Make some standard order of json properties
@@ -0,0 +1,67 @@
1
+ /**
2
+ * TODO: !!!!!! Implement Promptbook remote server login and move to Promptbook.studio
3
+ */
4
+ type BrjappOptions = {
5
+ /**
6
+ * Add user to these groups
7
+ */
8
+ readonly userGroups: Array<string>;
9
+ /**
10
+ * Add this amount of credits to new users
11
+ */
12
+ readonly initialCredits: number;
13
+ };
14
+ /**
15
+ * Note: Credit = 1 Word to generate or read
16
+ * Note: What we call here "user" is on BRJ.APP "customer"
17
+ *
18
+ * @private - this will be moved to Promptbook studio
19
+ */
20
+ export declare class BrjappConnector {
21
+ private readonly apiKey;
22
+ private options;
23
+ private readonly client;
24
+ constructor(apiKey: string, options: BrjappOptions);
25
+ /**
26
+ * Login or register user
27
+ *
28
+ * TODO: [🧠] Probbably better name for this method
29
+ *
30
+ * @param options
31
+ * @returns user token or null if user needs to verify email
32
+ */
33
+ loginOrRegister(options: {
34
+ email: string;
35
+ password: string;
36
+ customerRealIp: string;
37
+ }): Promise<{
38
+ isSuccess: boolean;
39
+ message: string;
40
+ token: string | null;
41
+ isEmailVerificationRequired: boolean;
42
+ }>;
43
+ private addInitailCredits;
44
+ buyCredits(options: {
45
+ email: string;
46
+ customerRealIp: string;
47
+ }): Promise<{
48
+ isSuccess: boolean;
49
+ message: string;
50
+ payLink: string | null;
51
+ }>;
52
+ /**
53
+ *
54
+ * @returns true if credits were spent, false if not enough credits or another error
55
+ */
56
+ spendCredits(options: {
57
+ email: string;
58
+ token: string;
59
+ creditsAmount: number;
60
+ description: string;
61
+ customerRealIp: string;
62
+ }): Promise<{
63
+ isSuccess: boolean;
64
+ message: string;
65
+ }>;
66
+ }
67
+ export {};