@promptbook/node 0.65.0-2 → 0.65.0-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/esm/index.es.js +244 -108
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +6 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/node.index.d.ts +0 -2
  6. package/esm/typings/src/_packages/remote-client.index.d.ts +2 -2
  7. package/esm/typings/src/_packages/types.index.d.ts +16 -2
  8. package/esm/typings/src/llm-providers/_common/config.d.ts +3 -3
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +5 -3
  10. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +23 -2
  11. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +13 -0
  12. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +3 -2
  14. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Error.d.ts +2 -2
  15. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Progress.d.ts +2 -2
  16. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Request.d.ts +14 -2
  17. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +49 -0
  18. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +23 -2
  19. package/esm/typings/src/llm-providers/remote/playground/playground.d.ts +2 -0
  20. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -1
  21. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  22. package/package.json +3 -2
  23. package/umd/index.umd.js +246 -112
  24. package/umd/index.umd.js.map +1 -1
  25. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionToolsOptions.d.ts +0 -26
package/esm/index.es.js CHANGED
@@ -7,16 +7,17 @@ import parserHtml from 'prettier/parser-html';
7
7
  import hexEncoder from 'crypto-js/enc-hex';
8
8
  import sha256 from 'crypto-js/sha256';
9
9
  import { join } from 'path/posix';
10
+ import * as dotenv from 'dotenv';
11
+ import { io } from 'socket.io-client';
10
12
  import Anthropic from '@anthropic-ai/sdk';
11
13
  import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
12
14
  import OpenAI from 'openai';
13
- import * as dotenv from 'dotenv';
14
15
 
15
16
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
16
17
  /**
17
18
  * The version of the Promptbook library
18
19
  */
19
- var PROMPTBOOK_VERSION = '0.65.0-1';
20
+ var PROMPTBOOK_VERSION = '0.65.0-2';
20
21
  // TODO: !!!! List here all the versions and annotate + put into script
21
22
 
22
23
  /*! *****************************************************************************
@@ -691,7 +692,7 @@ function forEachAsync(array, options, callbackfunction) {
691
692
  });
692
693
  }
693
694
 
694
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.65.0-1",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.65.0-1",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.65.0-1",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.65.0-1",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
695
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.65.0-2",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.65.0-2",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.65.0-2",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.65.0-2",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
695
696
 
696
697
  /**
697
698
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -6265,6 +6266,213 @@ function createCollectionFromDirectory(path, options) {
6265
6266
  * TODO: [🖇] What about symlinks? Maybe option isSymlinksFollowed
6266
6267
  */
6267
6268
 
6269
+ /**
6270
+ * This error type indicates that you try to use a feature that is not available in the current environment
6271
+ *
6272
+ * @public exported from `@promptbook/core`
6273
+ */
6274
+ var EnvironmentMismatchError = /** @class */ (function (_super) {
6275
+ __extends(EnvironmentMismatchError, _super);
6276
+ function EnvironmentMismatchError(message) {
6277
+ var _this = _super.call(this, message) || this;
6278
+ _this.name = 'EnvironmentMismatchError';
6279
+ Object.setPrototypeOf(_this, EnvironmentMismatchError.prototype);
6280
+ return _this;
6281
+ }
6282
+ return EnvironmentMismatchError;
6283
+ }(Error));
6284
+
6285
+ /**
6286
+ * @@@
6287
+ *
6288
+ * @@@ .env
6289
+ *
6290
+ * It looks for environment variables:
6291
+ * - `process.env.OPENAI_API_KEY`
6292
+ * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
6293
+ *
6294
+ * @returns @@@
6295
+ * @public exported from `@promptbook/node`
6296
+ */
6297
+ function createLlmToolsFromConfigurationFromEnv() {
6298
+ if (!isRunningInNode()) {
6299
+ throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
6300
+ }
6301
+ var llmToolsConfiguration = [];
6302
+ if (typeof process.env.OPENAI_API_KEY === 'string') {
6303
+ llmToolsConfiguration.push({
6304
+ title: 'OpenAI (from env)',
6305
+ packageName: '@promptbook/openai',
6306
+ className: 'OpenAiExecutionTools',
6307
+ options: {
6308
+ apiKey: process.env.OPENAI_API_KEY,
6309
+ },
6310
+ });
6311
+ }
6312
+ if (typeof process.env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
6313
+ llmToolsConfiguration.push({
6314
+ title: 'Claude (from env)',
6315
+ packageName: '@promptbook/antrhopic-claude',
6316
+ className: 'AnthropicClaudeExecutionTools',
6317
+ options: {
6318
+ apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
6319
+ },
6320
+ });
6321
+ }
6322
+ // <- Note: [🦑] Add here new LLM provider
6323
+ return llmToolsConfiguration;
6324
+ }
6325
+ /**
6326
+ * TODO: Add Azure OpenAI
6327
+ * TODO: [🧠][🍛]
6328
+ * TODO: [🧠] Is there some meaningfull way how to test this util
6329
+ * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
6330
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
6331
+ * TODO: This should be maybe not under `_common` but under `utils`
6332
+ * TODO: [🧠] Maybe pass env as argument
6333
+ */
6334
+
6335
+ /**
6336
+ * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
6337
+ *
6338
+ * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
6339
+ * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
6340
+ *
6341
+ * @see https://github.com/webgptorg/promptbook#remote-server
6342
+ * @public exported from `@promptbook/remote-client`
6343
+ */
6344
+ var RemoteLlmExecutionTools = /** @class */ (function () {
6345
+ function RemoteLlmExecutionTools(options) {
6346
+ this.options = options;
6347
+ }
6348
+ Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
6349
+ get: function () {
6350
+ // TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
6351
+ return 'Remote server';
6352
+ },
6353
+ enumerable: false,
6354
+ configurable: true
6355
+ });
6356
+ Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
6357
+ get: function () {
6358
+ return 'Use all models by your remote server';
6359
+ },
6360
+ enumerable: false,
6361
+ configurable: true
6362
+ });
6363
+ /**
6364
+ * Creates a connection to the remote proxy server.
6365
+ */
6366
+ RemoteLlmExecutionTools.prototype.makeConnection = function () {
6367
+ var _this = this;
6368
+ return new Promise(function (resolve, reject) {
6369
+ var socket = io(_this.options.remoteUrl, {
6370
+ path: _this.options.path,
6371
+ // path: `${this.remoteUrl.pathname}/socket.io`,
6372
+ transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
6373
+ });
6374
+ // console.log('Connecting to', this.options.remoteUrl.href, { socket });
6375
+ socket.on('connect', function () {
6376
+ resolve(socket);
6377
+ });
6378
+ setTimeout(function () {
6379
+ reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
6380
+ }, 60000 /* <- TODO: Timeout to config */);
6381
+ });
6382
+ };
6383
+ /**
6384
+ * Calls remote proxy server to use a chat model
6385
+ */
6386
+ RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
6387
+ if (this.options.isVerbose) {
6388
+ console.info("\uD83D\uDD8B Remote callChatModel call");
6389
+ }
6390
+ return /* not await */ this.callCommonModel(prompt);
6391
+ };
6392
+ /**
6393
+ * Calls remote proxy server to use a completion model
6394
+ */
6395
+ RemoteLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
6396
+ if (this.options.isVerbose) {
6397
+ console.info("\uD83D\uDCAC Remote callCompletionModel call");
6398
+ }
6399
+ return /* not await */ this.callCommonModel(prompt);
6400
+ };
6401
+ /**
6402
+ * Calls remote proxy server to use a embedding model
6403
+ */
6404
+ RemoteLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6405
+ if (this.options.isVerbose) {
6406
+ console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
6407
+ }
6408
+ return /* not await */ this.callCommonModel(prompt);
6409
+ };
6410
+ // <- Note: [🤖] callXxxModel
6411
+ /**
6412
+ * Calls remote proxy server to use both completion or chat model
6413
+ */
6414
+ RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
6415
+ return __awaiter(this, void 0, void 0, function () {
6416
+ var socket, promptResult;
6417
+ return __generator(this, function (_a) {
6418
+ switch (_a.label) {
6419
+ case 0: return [4 /*yield*/, this.makeConnection()];
6420
+ case 1:
6421
+ socket = _a.sent();
6422
+ if (this.options.isAnonymous) {
6423
+ socket.emit('request', {
6424
+ llmToolsConfiguration: this.options.llmToolsConfiguration,
6425
+ prompt: prompt,
6426
+ // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
6427
+ });
6428
+ }
6429
+ else {
6430
+ socket.emit('request', {
6431
+ clientId: this.options.clientId,
6432
+ prompt: prompt,
6433
+ // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
6434
+ });
6435
+ }
6436
+ return [4 /*yield*/, new Promise(function (resolve, reject) {
6437
+ socket.on('response', function (response) {
6438
+ resolve(response.promptResult);
6439
+ socket.disconnect();
6440
+ });
6441
+ socket.on('error', function (error) {
6442
+ reject(new PipelineExecutionError(error.errorMessage));
6443
+ socket.disconnect();
6444
+ });
6445
+ })];
6446
+ case 2:
6447
+ promptResult = _a.sent();
6448
+ socket.disconnect();
6449
+ return [2 /*return*/, promptResult];
6450
+ }
6451
+ });
6452
+ });
6453
+ };
6454
+ /**
6455
+ * List all available models that can be used
6456
+ */
6457
+ RemoteLlmExecutionTools.prototype.listModels = function () {
6458
+ return __awaiter(this, void 0, void 0, function () {
6459
+ return __generator(this, function (_a) {
6460
+ return [2 /*return*/, [
6461
+ /* !!! */
6462
+ ]];
6463
+ });
6464
+ });
6465
+ };
6466
+ return RemoteLlmExecutionTools;
6467
+ }());
6468
+ /**
6469
+ * TODO: [🍜] !!!!!! Default remote remoteUrl and path for anonymous server
6470
+ * TODO: [🍓] Allow to list compatible models with each variant
6471
+ * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
6472
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6473
+ * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
6474
+ */
6475
+
6268
6476
  /**
6269
6477
  * Helper of usage compute
6270
6478
  *
@@ -6394,6 +6602,7 @@ var ANTHROPIC_CLAUDE_MODELS = [
6394
6602
  * Execution Tools for calling Anthropic Claude API.
6395
6603
  *
6396
6604
  * @public exported from `@promptbook/anthropic-claude`
6605
+ * @deprecated use `createAnthropicClaudeExecutionTools` instead
6397
6606
  */
6398
6607
  var AnthropicClaudeExecutionTools = /** @class */ (function () {
6399
6608
  /**
@@ -6402,11 +6611,12 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6402
6611
  * @param options which are relevant are directly passed to the Anthropic Claude client
6403
6612
  */
6404
6613
  function AnthropicClaudeExecutionTools(options) {
6405
- if (options === void 0) { options = {}; }
6614
+ if (options === void 0) { options = { isProxied: false }; }
6406
6615
  this.options = options;
6407
6616
  // Note: Passing only Anthropic Claude relevant options to Anthropic constructor
6408
6617
  var anthropicOptions = __assign({}, options);
6409
6618
  delete anthropicOptions.isVerbose;
6619
+ delete anthropicOptions.isProxied;
6410
6620
  this.client = new Anthropic(anthropicOptions);
6411
6621
  }
6412
6622
  Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
@@ -6609,8 +6819,32 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6609
6819
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6610
6820
  * TODO: Maybe make custom OpenaiError
6611
6821
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6612
- * TODO: [🍜] Auto use anonymous server in browser
6822
+ * TODO: [🍜] !!!!!! Auto use anonymous server in browser
6613
6823
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6824
+ * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
6825
+ */
6826
+
6827
+ /**
6828
+ * Execution Tools for calling Anthropic Claude API.
6829
+ *
6830
+ * @public exported from `@promptbook/anthropic-claude`
6831
+ */
6832
+ function createAnthropicClaudeExecutionTools(options) {
6833
+ if (options.isProxied) {
6834
+ return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
6835
+ {
6836
+ title: 'Anthropic Claude (proxied)',
6837
+ packageName: '@promptbook/anthropic-claude',
6838
+ className: 'AnthropicClaudeExecutionTools',
6839
+ options: __assign(__assign({}, options), { isProxied: false }),
6840
+ },
6841
+ ] }));
6842
+ }
6843
+ return new AnthropicClaudeExecutionTools(options);
6844
+ }
6845
+ /**
6846
+ * TODO: !!!!!! Make this with all LLM providers
6847
+ * TODO: !!!!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
6614
6848
  */
6615
6849
 
6616
6850
  /**
@@ -7552,47 +7786,15 @@ var OpenAiExecutionTools = /** @class */ (function () {
7552
7786
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7553
7787
  */
7554
7788
 
7555
- /**
7556
- * @public exported from `@promptbook/node`
7557
- */
7558
- var LLM_CONFIGURATION_BOILERPLATES = [
7559
- {
7560
- title: 'Open AI',
7561
- packageName: '@promptbook/openai',
7562
- className: 'OpenAiExecutionTools',
7563
- options: {
7564
- apiKey: 'sk-',
7565
- },
7566
- },
7567
- {
7568
- title: 'Anthropic Claude',
7569
- packageName: '@promptbook/anthropic-claude',
7570
- className: 'AnthropicClaudeExecutionTools',
7571
- options: {
7572
- apiKey: 'sk-ant-api03-',
7573
- },
7574
- },
7575
- {
7576
- title: 'Azure Open AI',
7577
- packageName: '@promptbook/azure-openai',
7578
- className: 'AzureOpenAiExecutionTools',
7579
- options: {
7580
- // TODO: !!!> resourceName
7581
- // TODO: !!!> deploymentName
7582
- apiKey: 'sk-',
7583
- },
7584
- },
7585
- // <- Note: [🦑] Add here new LLM provider
7586
- ];
7587
7789
  /**
7588
7790
  * @private internal type for `createLlmToolsFromConfiguration`
7589
7791
  */
7590
7792
  var EXECUTION_TOOLS_CLASSES = {
7591
- getOpenAiExecutionTools: function (options) {
7793
+ createOpenAiExecutionTools: function (options) {
7592
7794
  return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
7593
7795
  },
7594
- getAnthropicClaudeExecutionTools: function (options) { return new AnthropicClaudeExecutionTools(options); },
7595
- getAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
7796
+ createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
7797
+ createAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
7596
7798
  // <- Note: [🦑] Add here new LLM provider
7597
7799
  };
7598
7800
  /**
@@ -7600,72 +7802,6 @@ var EXECUTION_TOOLS_CLASSES = {
7600
7802
  * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
7601
7803
  */
7602
7804
 
7603
- /**
7604
- * This error type indicates that you try to use a feature that is not available in the current environment
7605
- *
7606
- * @public exported from `@promptbook/core`
7607
- */
7608
- var EnvironmentMismatchError = /** @class */ (function (_super) {
7609
- __extends(EnvironmentMismatchError, _super);
7610
- function EnvironmentMismatchError(message) {
7611
- var _this = _super.call(this, message) || this;
7612
- _this.name = 'EnvironmentMismatchError';
7613
- Object.setPrototypeOf(_this, EnvironmentMismatchError.prototype);
7614
- return _this;
7615
- }
7616
- return EnvironmentMismatchError;
7617
- }(Error));
7618
-
7619
- /**
7620
- * @@@
7621
- *
7622
- * @@@ .env
7623
- *
7624
- * It looks for environment variables:
7625
- * - `process.env.OPENAI_API_KEY`
7626
- * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
7627
- *
7628
- * @returns @@@
7629
- * @public exported from `@promptbook/node`
7630
- */
7631
- function createLlmToolsFromConfigurationFromEnv() {
7632
- if (!isRunningInNode()) {
7633
- throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7634
- }
7635
- var llmToolsConfiguration = [];
7636
- if (typeof process.env.OPENAI_API_KEY === 'string') {
7637
- llmToolsConfiguration.push({
7638
- title: 'OpenAI (from env)',
7639
- packageName: '@promptbook/openai',
7640
- className: 'OpenAiExecutionTools',
7641
- options: {
7642
- apiKey: process.env.OPENAI_API_KEY,
7643
- },
7644
- });
7645
- }
7646
- if (typeof process.env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
7647
- llmToolsConfiguration.push({
7648
- title: 'Claude (from env)',
7649
- packageName: '@promptbook/antrhopic-claude',
7650
- className: 'AnthropicClaudeExecutionTools',
7651
- options: {
7652
- apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
7653
- },
7654
- });
7655
- }
7656
- // <- Note: [🦑] Add here new LLM provider
7657
- return llmToolsConfiguration;
7658
- }
7659
- /**
7660
- * TODO: Add Azure OpenAI
7661
- * TODO: [🧠][🍛]
7662
- * TODO: [🧠] Is there some meaningfull way how to test this util
7663
- * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7664
- * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7665
- * TODO: This should be maybe not under `_common` but under `utils`
7666
- * TODO: [🧠] Maybe pass env as argument
7667
- */
7668
-
7669
7805
  /**
7670
7806
  * @@@
7671
7807
  *
@@ -7679,7 +7815,7 @@ function createLlmToolsFromConfiguration(configuration, options) {
7679
7815
  var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7680
7816
  dotenv.config();
7681
7817
  var llmTools = configuration.map(function (llmConfiguration) {
7682
- return EXECUTION_TOOLS_CLASSES["get".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
7818
+ return EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
7683
7819
  });
7684
7820
  return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
7685
7821
  }
@@ -7855,5 +7991,5 @@ var FilesStorage = /** @class */ (function () {
7855
7991
  * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7856
7992
  */
7857
7993
 
7858
- export { FilesStorage, LLM_CONFIGURATION_BOILERPLATES, PROMPTBOOK_VERSION, createCollectionFromDirectory, createLlmToolsFromConfigurationFromEnv, createLlmToolsFromEnv };
7994
+ export { FilesStorage, PROMPTBOOK_VERSION, createCollectionFromDirectory, createLlmToolsFromConfigurationFromEnv, createLlmToolsFromEnv };
7859
7995
  //# sourceMappingURL=index.es.js.map