@promptbook/wizard 0.104.0-3 → 0.104.0-5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,11 +3,12 @@
3
3
  * Source of truth: `/apps/agents-server/src/database/schema.sql` *(do not edit table structure here manually)*
4
4
  *
5
5
  * [💽] Prompt:
6
- * Re-generate this sub-schema
6
+ * Re-generate this sub-schema from `/apps/agents-server/src/database/schema.ts` *(which was generated from `/apps/agents-server/src/database/migrations/*.sql`)*
7
+ * `AgentsDatabaseSchema` is strict subset of `AgentsServerDatabase`
7
8
  * Generate Supabase TypeScript schema which is a subset of `AgentsServerDatabase`
8
9
  * containing only tables `Agent` and `AgentHistory`
9
10
  *
10
- * NOTE: This file intentionally omits all other tables (EnvironmentVariable, ChatHistory, ChatFeedback)
11
+ * NOTE: This file intentionally omits all other tables (`Metadata`, `ChatHistory`, `ChatFeedback`, `User`, `LlmCache`, etc.)
11
12
  * and any extra schemas (e.g. `graphql_public`) to remain a strict subset.
12
13
  */
13
14
  export type Json = string | number | boolean | null | {
@@ -31,6 +32,7 @@ export type AgentsDatabaseSchema = {
31
32
  preparedModelRequirements: Json | null;
32
33
  preparedExternals: Json | null;
33
34
  deletedAt: string | null;
35
+ visibility: 'PUBLIC' | 'PRIVATE';
34
36
  };
35
37
  Insert: {
36
38
  id?: number;
@@ -46,6 +48,7 @@ export type AgentsDatabaseSchema = {
46
48
  preparedModelRequirements?: Json | null;
47
49
  preparedExternals?: Json | null;
48
50
  deletedAt?: string | null;
51
+ visibility?: 'PUBLIC' | 'PRIVATE';
49
52
  };
50
53
  Update: {
51
54
  id?: number;
@@ -61,6 +64,7 @@ export type AgentsDatabaseSchema = {
61
64
  preparedModelRequirements?: Json | null;
62
65
  preparedExternals?: Json | null;
63
66
  deletedAt?: string | null;
67
+ visibility?: 'PUBLIC' | 'PRIVATE';
64
68
  };
65
69
  Relationships: [];
66
70
  };
@@ -92,21 +96,20 @@ export type AgentsDatabaseSchema = {
92
96
  agentSource?: string;
93
97
  promptbookEngineVersion?: string;
94
98
  };
95
- Relationships: [];
99
+ Relationships: [
100
+ {
101
+ foreignKeyName: 'AgentHistory_agentName_fkey';
102
+ columns: ['agentName'];
103
+ referencedRelation: 'Agent';
104
+ referencedColumns: ['agentName'];
105
+ }
106
+ ];
96
107
  };
97
108
  };
98
- Views: {
99
- [_ in never]: never;
100
- };
101
- Functions: {
102
- [_ in never]: never;
103
- };
104
- Enums: {
105
- [_ in never]: never;
106
- };
107
- CompositeTypes: {
108
- [_ in never]: never;
109
- };
109
+ Views: Record<string, never>;
110
+ Functions: Record<string, never>;
111
+ Enums: Record<string, never>;
112
+ CompositeTypes: Record<string, never>;
110
113
  };
111
114
  };
112
115
  type PublicSchema = AgentsDatabaseSchema[Extract<keyof AgentsDatabaseSchema, 'public'>];
@@ -1,8 +1,8 @@
1
1
  import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
2
2
  import type { AvailableModel } from '../../execution/AvailableModel';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
5
- import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
4
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, ImagePromptResult, PromptResult } from '../../execution/PromptResult';
5
+ import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, ImagePrompt, Prompt } from '../../types/Prompt';
6
6
  import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
7
7
  /**
8
8
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
@@ -43,6 +43,10 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
43
43
  * Calls the best available embedding model
44
44
  */
45
45
  callEmbeddingModel(prompt: EmbeddingPrompt): Promise<EmbeddingPromptResult>;
46
+ /**
47
+ * Calls the best available embedding model
48
+ */
49
+ callImageGenerationModel(prompt: ImagePrompt): Promise<ImagePromptResult>;
46
50
  /**
47
51
  * Calls the best available model
48
52
  *
@@ -46,6 +46,7 @@ export declare class RemoteLlmExecutionTools<TCustomOptions = undefined> impleme
46
46
  private callCommonModel;
47
47
  }
48
48
  /**
49
+ * TODO: !!!! Deprecate pipeline server and all of its components
49
50
  * TODO: Maybe use `$exportJson`
50
51
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
51
52
  * TODO: [🍓] Allow to list compatible models with each variant
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.104.0-2`).
18
+ * It follows semantic versioning (e.g., `0.104.0-4`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/wizard",
3
- "version": "0.104.0-3",
3
+ "version": "0.104.0-5",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -95,7 +95,7 @@
95
95
  "module": "./esm/index.es.js",
96
96
  "typings": "./esm/typings/src/_packages/wizard.index.d.ts",
97
97
  "peerDependencies": {
98
- "@promptbook/core": "0.104.0-3"
98
+ "@promptbook/core": "0.104.0-5"
99
99
  },
100
100
  "dependencies": {
101
101
  "@ai-sdk/deepseek": "0.1.17",
package/umd/index.umd.js CHANGED
@@ -48,7 +48,7 @@
48
48
  * @generated
49
49
  * @see https://github.com/webgptorg/promptbook
50
50
  */
51
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-3';
51
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-5';
52
52
  /**
53
53
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
54
54
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2538,6 +2538,7 @@
2538
2538
  }
2539
2539
  }
2540
2540
  /**
2541
+ * TODO: !!!! Deprecate pipeline server and all of its components
2541
2542
  * TODO: Maybe use `$exportJson`
2542
2543
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
2543
2544
  * TODO: [🍓] Allow to list compatible models with each variant
@@ -9429,6 +9430,15 @@
9429
9430
  return promptResult;
9430
9431
  };
9431
9432
  }
9433
+ if (llmTools.callImageGenerationModel !== undefined) {
9434
+ proxyTools.callImageGenerationModel = async (prompt) => {
9435
+ // console.info('[🚕] callImageGenerationModel through countTotalUsage');
9436
+ const promptResult = await llmTools.callImageGenerationModel(prompt);
9437
+ totalUsage = addUsage(totalUsage, promptResult.usage);
9438
+ spending.next(promptResult.usage);
9439
+ return promptResult;
9440
+ };
9441
+ }
9432
9442
  // <- Note: [🤖]
9433
9443
  return proxyTools;
9434
9444
  }
@@ -9538,6 +9548,12 @@
9538
9548
  callEmbeddingModel(prompt) {
9539
9549
  return this.callCommonModel(prompt);
9540
9550
  }
9551
+ /**
9552
+ * Calls the best available embedding model
9553
+ */
9554
+ callImageGenerationModel(prompt) {
9555
+ return this.callCommonModel(prompt);
9556
+ }
9541
9557
  // <- Note: [🤖]
9542
9558
  /**
9543
9559
  * Calls the best available model
@@ -9564,6 +9580,11 @@
9564
9580
  continue llm;
9565
9581
  }
9566
9582
  return await llmExecutionTools.callEmbeddingModel(prompt);
9583
+ case 'IMAGE_GENERATION':
9584
+ if (llmExecutionTools.callImageGenerationModel === undefined) {
9585
+ continue llm;
9586
+ }
9587
+ return await llmExecutionTools.callImageGenerationModel(prompt);
9567
9588
  // <- case [🤖]:
9568
9589
  default:
9569
9590
  throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
@@ -11267,8 +11288,9 @@
11267
11288
  $ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
11268
11289
  break variant;
11269
11290
  case 'EMBEDDING':
11291
+ case 'IMAGE_GENERATION':
11270
11292
  throw new PipelineExecutionError(spaceTrim$1.spaceTrim((block) => `
11271
- Embedding model can not be used in pipeline
11293
+ ${modelRequirements.modelVariant} model can not be used in pipeline
11272
11294
 
11273
11295
  This should be catched during parsing
11274
11296
 
@@ -19121,6 +19143,9 @@
19121
19143
  case 'EMBEDDING':
19122
19144
  promptResult = await llmTools.callEmbeddingModel(prompt);
19123
19145
  break variant;
19146
+ case 'IMAGE_GENERATION':
19147
+ promptResult = await llmTools.callImageGenerationModel(prompt);
19148
+ break variant;
19124
19149
  // <- case [🤖]:
19125
19150
  default:
19126
19151
  throw new PipelineExecutionError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
@@ -19157,12 +19182,13 @@
19157
19182
  }
19158
19183
  }
19159
19184
  catch (error) {
19185
+ assertsError(error);
19160
19186
  // If validation throws an unexpected error, don't cache
19161
19187
  shouldCache = false;
19162
19188
  if (isVerbose) {
19163
19189
  console.info('Not caching result due to validation error for key:', key, {
19164
19190
  content: promptResult.content,
19165
- validationError: error instanceof Error ? error.message : String(error),
19191
+ validationError: serializeError(error),
19166
19192
  });
19167
19193
  }
19168
19194
  }
@@ -19208,6 +19234,11 @@
19208
19234
  return /* not await */ callCommonModel(prompt);
19209
19235
  };
19210
19236
  }
19237
+ if (llmTools.callImageGenerationModel !== undefined) {
19238
+ proxyTools.callImageGenerationModel = async (prompt) => {
19239
+ return /* not await */ callCommonModel(prompt);
19240
+ };
19241
+ }
19211
19242
  // <- Note: [🤖]
19212
19243
  return proxyTools;
19213
19244
  }