@promptbook/node 0.65.0-2 → 0.65.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/esm/index.es.js +245 -109
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +6 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/node.index.d.ts +0 -2
  6. package/esm/typings/src/_packages/remote-client.index.d.ts +2 -2
  7. package/esm/typings/src/_packages/types.index.d.ts +16 -2
  8. package/esm/typings/src/llm-providers/_common/config.d.ts +3 -3
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +5 -3
  10. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +23 -2
  11. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +13 -0
  12. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +3 -2
  14. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Error.d.ts +2 -2
  15. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Progress.d.ts +2 -2
  16. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Request.d.ts +14 -2
  17. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +49 -0
  18. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +23 -2
  19. package/esm/typings/src/llm-providers/remote/playground/playground.d.ts +2 -0
  20. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -1
  21. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  22. package/package.json +3 -2
  23. package/umd/index.umd.js +247 -113
  24. package/umd/index.umd.js.map +1 -1
  25. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionToolsOptions.d.ts +0 -26
package/esm/index.es.js CHANGED
@@ -7,16 +7,17 @@ import parserHtml from 'prettier/parser-html';
7
7
  import hexEncoder from 'crypto-js/enc-hex';
8
8
  import sha256 from 'crypto-js/sha256';
9
9
  import { join } from 'path/posix';
10
+ import * as dotenv from 'dotenv';
11
+ import { io } from 'socket.io-client';
10
12
  import Anthropic from '@anthropic-ai/sdk';
11
13
  import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
12
14
  import OpenAI from 'openai';
13
- import * as dotenv from 'dotenv';
14
15
 
15
16
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
16
17
  /**
17
18
  * The version of the Promptbook library
18
19
  */
19
- var PROMPTBOOK_VERSION = '0.65.0-1';
20
+ var PROMPTBOOK_VERSION = '0.65.0-3';
20
21
  // TODO: !!!! List here all the versions and annotate + put into script
21
22
 
22
23
  /*! *****************************************************************************
@@ -691,7 +692,7 @@ function forEachAsync(array, options, callbackfunction) {
691
692
  });
692
693
  }
693
694
 
694
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.65.0-1",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.65.0-1",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.65.0-1",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.65.0-1",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
695
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.65.0-3",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.65.0-3",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.65.0-3",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.65.0-3",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
695
696
 
696
697
  /**
697
698
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -6265,6 +6266,214 @@ function createCollectionFromDirectory(path, options) {
6265
6266
  * TODO: [🖇] What about symlinks? Maybe option isSymlinksFollowed
6266
6267
  */
6267
6268
 
6269
+ /**
6270
+ * This error type indicates that you try to use a feature that is not available in the current environment
6271
+ *
6272
+ * @public exported from `@promptbook/core`
6273
+ */
6274
+ var EnvironmentMismatchError = /** @class */ (function (_super) {
6275
+ __extends(EnvironmentMismatchError, _super);
6276
+ function EnvironmentMismatchError(message) {
6277
+ var _this = _super.call(this, message) || this;
6278
+ _this.name = 'EnvironmentMismatchError';
6279
+ Object.setPrototypeOf(_this, EnvironmentMismatchError.prototype);
6280
+ return _this;
6281
+ }
6282
+ return EnvironmentMismatchError;
6283
+ }(Error));
6284
+
6285
+ /**
6286
+ * @@@
6287
+ *
6288
+ * @@@ .env
6289
+ *
6290
+ * It looks for environment variables:
6291
+ * - `process.env.OPENAI_API_KEY`
6292
+ * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
6293
+ *
6294
+ * @returns @@@
6295
+ * @public exported from `@promptbook/node`
6296
+ */
6297
+ function createLlmToolsFromConfigurationFromEnv() {
6298
+ if (!isRunningInNode()) {
6299
+ throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
6300
+ }
6301
+ dotenv.config();
6302
+ var llmToolsConfiguration = [];
6303
+ if (typeof process.env.OPENAI_API_KEY === 'string') {
6304
+ llmToolsConfiguration.push({
6305
+ title: 'OpenAI (from env)',
6306
+ packageName: '@promptbook/openai',
6307
+ className: 'OpenAiExecutionTools',
6308
+ options: {
6309
+ apiKey: process.env.OPENAI_API_KEY,
6310
+ },
6311
+ });
6312
+ }
6313
+ if (typeof process.env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
6314
+ llmToolsConfiguration.push({
6315
+ title: 'Claude (from env)',
6316
+ packageName: '@promptbook/antrhopic-claude',
6317
+ className: 'AnthropicClaudeExecutionTools',
6318
+ options: {
6319
+ apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
6320
+ },
6321
+ });
6322
+ }
6323
+ // <- Note: [🦑] Add here new LLM provider
6324
+ return llmToolsConfiguration;
6325
+ }
6326
+ /**
6327
+ * TODO: Add Azure OpenAI
6328
+ * TODO: [🧠][🍛]
6329
+ * TODO: [🧠] Is there some meaningfull way how to test this util
6330
+ * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
6331
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
6332
+ * TODO: This should be maybe not under `_common` but under `utils`
6333
+ * TODO: [🧠] Maybe pass env as argument
6334
+ */
6335
+
6336
+ /**
6337
+ * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
6338
+ *
6339
+ * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
6340
+ * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
6341
+ *
6342
+ * @see https://github.com/webgptorg/promptbook#remote-server
6343
+ * @public exported from `@promptbook/remote-client`
6344
+ */
6345
+ var RemoteLlmExecutionTools = /** @class */ (function () {
6346
+ function RemoteLlmExecutionTools(options) {
6347
+ this.options = options;
6348
+ }
6349
+ Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
6350
+ get: function () {
6351
+ // TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
6352
+ return 'Remote server';
6353
+ },
6354
+ enumerable: false,
6355
+ configurable: true
6356
+ });
6357
+ Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
6358
+ get: function () {
6359
+ return 'Use all models by your remote server';
6360
+ },
6361
+ enumerable: false,
6362
+ configurable: true
6363
+ });
6364
+ /**
6365
+ * Creates a connection to the remote proxy server.
6366
+ */
6367
+ RemoteLlmExecutionTools.prototype.makeConnection = function () {
6368
+ var _this = this;
6369
+ return new Promise(function (resolve, reject) {
6370
+ var socket = io(_this.options.remoteUrl, {
6371
+ path: _this.options.path,
6372
+ // path: `${this.remoteUrl.pathname}/socket.io`,
6373
+ transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
6374
+ });
6375
+ // console.log('Connecting to', this.options.remoteUrl.href, { socket });
6376
+ socket.on('connect', function () {
6377
+ resolve(socket);
6378
+ });
6379
+ setTimeout(function () {
6380
+ reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
6381
+ }, 60000 /* <- TODO: Timeout to config */);
6382
+ });
6383
+ };
6384
+ /**
6385
+ * Calls remote proxy server to use a chat model
6386
+ */
6387
+ RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
6388
+ if (this.options.isVerbose) {
6389
+ console.info("\uD83D\uDD8B Remote callChatModel call");
6390
+ }
6391
+ return /* not await */ this.callCommonModel(prompt);
6392
+ };
6393
+ /**
6394
+ * Calls remote proxy server to use a completion model
6395
+ */
6396
+ RemoteLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
6397
+ if (this.options.isVerbose) {
6398
+ console.info("\uD83D\uDCAC Remote callCompletionModel call");
6399
+ }
6400
+ return /* not await */ this.callCommonModel(prompt);
6401
+ };
6402
+ /**
6403
+ * Calls remote proxy server to use a embedding model
6404
+ */
6405
+ RemoteLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6406
+ if (this.options.isVerbose) {
6407
+ console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
6408
+ }
6409
+ return /* not await */ this.callCommonModel(prompt);
6410
+ };
6411
+ // <- Note: [🤖] callXxxModel
6412
+ /**
6413
+ * Calls remote proxy server to use both completion or chat model
6414
+ */
6415
+ RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
6416
+ return __awaiter(this, void 0, void 0, function () {
6417
+ var socket, promptResult;
6418
+ return __generator(this, function (_a) {
6419
+ switch (_a.label) {
6420
+ case 0: return [4 /*yield*/, this.makeConnection()];
6421
+ case 1:
6422
+ socket = _a.sent();
6423
+ if (this.options.isAnonymous) {
6424
+ socket.emit('request', {
6425
+ llmToolsConfiguration: this.options.llmToolsConfiguration,
6426
+ prompt: prompt,
6427
+ // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
6428
+ });
6429
+ }
6430
+ else {
6431
+ socket.emit('request', {
6432
+ clientId: this.options.clientId,
6433
+ prompt: prompt,
6434
+ // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
6435
+ });
6436
+ }
6437
+ return [4 /*yield*/, new Promise(function (resolve, reject) {
6438
+ socket.on('response', function (response) {
6439
+ resolve(response.promptResult);
6440
+ socket.disconnect();
6441
+ });
6442
+ socket.on('error', function (error) {
6443
+ reject(new PipelineExecutionError(error.errorMessage));
6444
+ socket.disconnect();
6445
+ });
6446
+ })];
6447
+ case 2:
6448
+ promptResult = _a.sent();
6449
+ socket.disconnect();
6450
+ return [2 /*return*/, promptResult];
6451
+ }
6452
+ });
6453
+ });
6454
+ };
6455
+ /**
6456
+ * List all available models that can be used
6457
+ */
6458
+ RemoteLlmExecutionTools.prototype.listModels = function () {
6459
+ return __awaiter(this, void 0, void 0, function () {
6460
+ return __generator(this, function (_a) {
6461
+ return [2 /*return*/, [
6462
+ /* !!! */
6463
+ ]];
6464
+ });
6465
+ });
6466
+ };
6467
+ return RemoteLlmExecutionTools;
6468
+ }());
6469
+ /**
6470
+ * TODO: [🍜] !!!!!! Default remote remoteUrl and path for anonymous server
6471
+ * TODO: [🍓] Allow to list compatible models with each variant
6472
+ * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
6473
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6474
+ * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
6475
+ */
6476
+
6268
6477
  /**
6269
6478
  * Helper of usage compute
6270
6479
  *
@@ -6394,6 +6603,7 @@ var ANTHROPIC_CLAUDE_MODELS = [
6394
6603
  * Execution Tools for calling Anthropic Claude API.
6395
6604
  *
6396
6605
  * @public exported from `@promptbook/anthropic-claude`
6606
+ * @deprecated use `createAnthropicClaudeExecutionTools` instead
6397
6607
  */
6398
6608
  var AnthropicClaudeExecutionTools = /** @class */ (function () {
6399
6609
  /**
@@ -6402,11 +6612,12 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6402
6612
  * @param options which are relevant are directly passed to the Anthropic Claude client
6403
6613
  */
6404
6614
  function AnthropicClaudeExecutionTools(options) {
6405
- if (options === void 0) { options = {}; }
6615
+ if (options === void 0) { options = { isProxied: false }; }
6406
6616
  this.options = options;
6407
6617
  // Note: Passing only Anthropic Claude relevant options to Anthropic constructor
6408
6618
  var anthropicOptions = __assign({}, options);
6409
6619
  delete anthropicOptions.isVerbose;
6620
+ delete anthropicOptions.isProxied;
6410
6621
  this.client = new Anthropic(anthropicOptions);
6411
6622
  }
6412
6623
  Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
@@ -6609,8 +6820,32 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6609
6820
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6610
6821
  * TODO: Maybe make custom OpenaiError
6611
6822
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6612
- * TODO: [🍜] Auto use anonymous server in browser
6823
+ * TODO: [🍜] !!!!!! Auto use anonymous server in browser
6613
6824
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6825
+ * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
6826
+ */
6827
+
6828
+ /**
6829
+ * Execution Tools for calling Anthropic Claude API.
6830
+ *
6831
+ * @public exported from `@promptbook/anthropic-claude`
6832
+ */
6833
+ function createAnthropicClaudeExecutionTools(options) {
6834
+ if (options.isProxied) {
6835
+ return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
6836
+ {
6837
+ title: 'Anthropic Claude (proxied)',
6838
+ packageName: '@promptbook/anthropic-claude',
6839
+ className: 'AnthropicClaudeExecutionTools',
6840
+ options: __assign(__assign({}, options), { isProxied: false }),
6841
+ },
6842
+ ] }));
6843
+ }
6844
+ return new AnthropicClaudeExecutionTools(options);
6845
+ }
6846
+ /**
6847
+ * TODO: !!!!!! Make this with all LLM providers
6848
+ * TODO: !!!!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
6614
6849
  */
6615
6850
 
6616
6851
  /**
@@ -7552,47 +7787,15 @@ var OpenAiExecutionTools = /** @class */ (function () {
7552
7787
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7553
7788
  */
7554
7789
 
7555
- /**
7556
- * @public exported from `@promptbook/node`
7557
- */
7558
- var LLM_CONFIGURATION_BOILERPLATES = [
7559
- {
7560
- title: 'Open AI',
7561
- packageName: '@promptbook/openai',
7562
- className: 'OpenAiExecutionTools',
7563
- options: {
7564
- apiKey: 'sk-',
7565
- },
7566
- },
7567
- {
7568
- title: 'Anthropic Claude',
7569
- packageName: '@promptbook/anthropic-claude',
7570
- className: 'AnthropicClaudeExecutionTools',
7571
- options: {
7572
- apiKey: 'sk-ant-api03-',
7573
- },
7574
- },
7575
- {
7576
- title: 'Azure Open AI',
7577
- packageName: '@promptbook/azure-openai',
7578
- className: 'AzureOpenAiExecutionTools',
7579
- options: {
7580
- // TODO: !!!> resourceName
7581
- // TODO: !!!> deploymentName
7582
- apiKey: 'sk-',
7583
- },
7584
- },
7585
- // <- Note: [🦑] Add here new LLM provider
7586
- ];
7587
7790
  /**
7588
7791
  * @private internal type for `createLlmToolsFromConfiguration`
7589
7792
  */
7590
7793
  var EXECUTION_TOOLS_CLASSES = {
7591
- getOpenAiExecutionTools: function (options) {
7794
+ createOpenAiExecutionTools: function (options) {
7592
7795
  return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
7593
7796
  },
7594
- getAnthropicClaudeExecutionTools: function (options) { return new AnthropicClaudeExecutionTools(options); },
7595
- getAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
7797
+ createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
7798
+ createAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
7596
7799
  // <- Note: [🦑] Add here new LLM provider
7597
7800
  };
7598
7801
  /**
@@ -7600,72 +7803,6 @@ var EXECUTION_TOOLS_CLASSES = {
7600
7803
  * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
7601
7804
  */
7602
7805
 
7603
- /**
7604
- * This error type indicates that you try to use a feature that is not available in the current environment
7605
- *
7606
- * @public exported from `@promptbook/core`
7607
- */
7608
- var EnvironmentMismatchError = /** @class */ (function (_super) {
7609
- __extends(EnvironmentMismatchError, _super);
7610
- function EnvironmentMismatchError(message) {
7611
- var _this = _super.call(this, message) || this;
7612
- _this.name = 'EnvironmentMismatchError';
7613
- Object.setPrototypeOf(_this, EnvironmentMismatchError.prototype);
7614
- return _this;
7615
- }
7616
- return EnvironmentMismatchError;
7617
- }(Error));
7618
-
7619
- /**
7620
- * @@@
7621
- *
7622
- * @@@ .env
7623
- *
7624
- * It looks for environment variables:
7625
- * - `process.env.OPENAI_API_KEY`
7626
- * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
7627
- *
7628
- * @returns @@@
7629
- * @public exported from `@promptbook/node`
7630
- */
7631
- function createLlmToolsFromConfigurationFromEnv() {
7632
- if (!isRunningInNode()) {
7633
- throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7634
- }
7635
- var llmToolsConfiguration = [];
7636
- if (typeof process.env.OPENAI_API_KEY === 'string') {
7637
- llmToolsConfiguration.push({
7638
- title: 'OpenAI (from env)',
7639
- packageName: '@promptbook/openai',
7640
- className: 'OpenAiExecutionTools',
7641
- options: {
7642
- apiKey: process.env.OPENAI_API_KEY,
7643
- },
7644
- });
7645
- }
7646
- if (typeof process.env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
7647
- llmToolsConfiguration.push({
7648
- title: 'Claude (from env)',
7649
- packageName: '@promptbook/antrhopic-claude',
7650
- className: 'AnthropicClaudeExecutionTools',
7651
- options: {
7652
- apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
7653
- },
7654
- });
7655
- }
7656
- // <- Note: [🦑] Add here new LLM provider
7657
- return llmToolsConfiguration;
7658
- }
7659
- /**
7660
- * TODO: Add Azure OpenAI
7661
- * TODO: [🧠][🍛]
7662
- * TODO: [🧠] Is there some meaningfull way how to test this util
7663
- * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7664
- * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7665
- * TODO: This should be maybe not under `_common` but under `utils`
7666
- * TODO: [🧠] Maybe pass env as argument
7667
- */
7668
-
7669
7806
  /**
7670
7807
  * @@@
7671
7808
  *
@@ -7677,9 +7814,8 @@ function createLlmToolsFromConfigurationFromEnv() {
7677
7814
  function createLlmToolsFromConfiguration(configuration, options) {
7678
7815
  if (options === void 0) { options = {}; }
7679
7816
  var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7680
- dotenv.config();
7681
7817
  var llmTools = configuration.map(function (llmConfiguration) {
7682
- return EXECUTION_TOOLS_CLASSES["get".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
7818
+ return EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
7683
7819
  });
7684
7820
  return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
7685
7821
  }
@@ -7855,5 +7991,5 @@ var FilesStorage = /** @class */ (function () {
7855
7991
  * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7856
7992
  */
7857
7993
 
7858
- export { FilesStorage, LLM_CONFIGURATION_BOILERPLATES, PROMPTBOOK_VERSION, createCollectionFromDirectory, createLlmToolsFromConfigurationFromEnv, createLlmToolsFromEnv };
7994
+ export { FilesStorage, PROMPTBOOK_VERSION, createCollectionFromDirectory, createLlmToolsFromConfigurationFromEnv, createLlmToolsFromEnv };
7859
7995
  //# sourceMappingURL=index.es.js.map