@promptbook/node 0.65.0-1 → 0.65.0-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. package/esm/index.es.js +246 -113
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +6 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +4 -0
  5. package/esm/typings/src/_packages/node.index.d.ts +0 -4
  6. package/esm/typings/src/_packages/remote-client.index.d.ts +2 -2
  7. package/esm/typings/src/_packages/types.index.d.ts +16 -2
  8. package/esm/typings/src/llm-providers/_common/config.d.ts +3 -3
  9. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfiguration.d.ts +2 -2
  10. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +5 -3
  11. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +23 -2
  12. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +13 -0
  13. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +3 -2
  15. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Error.d.ts +2 -2
  16. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Progress.d.ts +2 -2
  17. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Request.d.ts +14 -2
  18. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +49 -0
  19. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +23 -2
  20. package/esm/typings/src/llm-providers/remote/playground/playground.d.ts +2 -0
  21. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -1
  22. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  23. package/package.json +3 -2
  24. package/umd/index.umd.js +248 -118
  25. package/umd/index.umd.js.map +1 -1
  26. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionToolsOptions.d.ts +0 -26
package/umd/index.umd.js CHANGED
@@ -1,8 +1,8 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('fs/promises'), require('path'), require('spacetrim'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('path/posix'), require('@anthropic-ai/sdk'), require('@azure/openai'), require('openai'), require('dotenv')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'colors', 'fs/promises', 'path', 'spacetrim', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', 'path/posix', '@anthropic-ai/sdk', '@azure/openai', 'openai', 'dotenv'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-node"] = {}, global.colors, global.promises, global.path, global.spaceTrim, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.posix, global.Anthropic, global.openai, global.OpenAI, global.dotenv));
5
- })(this, (function (exports, colors, promises, path, spaceTrim, prettier, parserHtml, hexEncoder, sha256, posix, Anthropic, openai, OpenAI, dotenv) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('fs/promises'), require('path'), require('spacetrim'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('path/posix'), require('dotenv'), require('socket.io-client'), require('@anthropic-ai/sdk'), require('@azure/openai'), require('openai')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'colors', 'fs/promises', 'path', 'spacetrim', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', 'path/posix', 'dotenv', 'socket.io-client', '@anthropic-ai/sdk', '@azure/openai', 'openai'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-node"] = {}, global.colors, global.promises, global.path, global.spaceTrim, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.posix, global.dotenv, global.socket_ioClient, global.Anthropic, global.openai, global.OpenAI));
5
+ })(this, (function (exports, colors, promises, path, spaceTrim, prettier, parserHtml, hexEncoder, sha256, posix, dotenv, socket_ioClient, Anthropic, openai, OpenAI) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
@@ -29,15 +29,15 @@
29
29
  var parserHtml__default = /*#__PURE__*/_interopDefaultLegacy(parserHtml);
30
30
  var hexEncoder__default = /*#__PURE__*/_interopDefaultLegacy(hexEncoder);
31
31
  var sha256__default = /*#__PURE__*/_interopDefaultLegacy(sha256);
32
+ var dotenv__namespace = /*#__PURE__*/_interopNamespace(dotenv);
32
33
  var Anthropic__default = /*#__PURE__*/_interopDefaultLegacy(Anthropic);
33
34
  var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
34
- var dotenv__namespace = /*#__PURE__*/_interopNamespace(dotenv);
35
35
 
36
36
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
37
37
  /**
38
38
  * The version of the Promptbook library
39
39
  */
40
- var PROMPTBOOK_VERSION = '0.65.0-0';
40
+ var PROMPTBOOK_VERSION = '0.65.0-2';
41
41
  // TODO: !!!! List here all the versions and annotate + put into script
42
42
 
43
43
  /*! *****************************************************************************
@@ -712,7 +712,7 @@
712
712
  });
713
713
  }
714
714
 
715
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.65.0-0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.65.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.65.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.65.0-0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
715
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.65.0-2",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.65.0-2",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.65.0-2",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.65.0-2",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
716
716
 
717
717
  /**
718
718
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -6286,6 +6286,213 @@
6286
6286
  * TODO: [🖇] What about symlinks? Maybe option isSymlinksFollowed
6287
6287
  */
6288
6288
 
6289
+ /**
6290
+ * This error type indicates that you try to use a feature that is not available in the current environment
6291
+ *
6292
+ * @public exported from `@promptbook/core`
6293
+ */
6294
+ var EnvironmentMismatchError = /** @class */ (function (_super) {
6295
+ __extends(EnvironmentMismatchError, _super);
6296
+ function EnvironmentMismatchError(message) {
6297
+ var _this = _super.call(this, message) || this;
6298
+ _this.name = 'EnvironmentMismatchError';
6299
+ Object.setPrototypeOf(_this, EnvironmentMismatchError.prototype);
6300
+ return _this;
6301
+ }
6302
+ return EnvironmentMismatchError;
6303
+ }(Error));
6304
+
6305
+ /**
6306
+ * @@@
6307
+ *
6308
+ * @@@ .env
6309
+ *
6310
+ * It looks for environment variables:
6311
+ * - `process.env.OPENAI_API_KEY`
6312
+ * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
6313
+ *
6314
+ * @returns @@@
6315
+ * @public exported from `@promptbook/node`
6316
+ */
6317
+ function createLlmToolsFromConfigurationFromEnv() {
6318
+ if (!isRunningInNode()) {
6319
+ throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
6320
+ }
6321
+ var llmToolsConfiguration = [];
6322
+ if (typeof process.env.OPENAI_API_KEY === 'string') {
6323
+ llmToolsConfiguration.push({
6324
+ title: 'OpenAI (from env)',
6325
+ packageName: '@promptbook/openai',
6326
+ className: 'OpenAiExecutionTools',
6327
+ options: {
6328
+ apiKey: process.env.OPENAI_API_KEY,
6329
+ },
6330
+ });
6331
+ }
6332
+ if (typeof process.env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
6333
+ llmToolsConfiguration.push({
6334
+ title: 'Claude (from env)',
6335
+ packageName: '@promptbook/antrhopic-claude',
6336
+ className: 'AnthropicClaudeExecutionTools',
6337
+ options: {
6338
+ apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
6339
+ },
6340
+ });
6341
+ }
6342
+ // <- Note: [🦑] Add here new LLM provider
6343
+ return llmToolsConfiguration;
6344
+ }
6345
+ /**
6346
+ * TODO: Add Azure OpenAI
6347
+ * TODO: [🧠][🍛]
6348
+ * TODO: [🧠] Is there some meaningfull way how to test this util
6349
+ * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
6350
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
6351
+ * TODO: This should be maybe not under `_common` but under `utils`
6352
+ * TODO: [🧠] Maybe pass env as argument
6353
+ */
6354
+
6355
+ /**
6356
+ * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
6357
+ *
6358
+ * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
6359
+ * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
6360
+ *
6361
+ * @see https://github.com/webgptorg/promptbook#remote-server
6362
+ * @public exported from `@promptbook/remote-client`
6363
+ */
6364
+ var RemoteLlmExecutionTools = /** @class */ (function () {
6365
+ function RemoteLlmExecutionTools(options) {
6366
+ this.options = options;
6367
+ }
6368
+ Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
6369
+ get: function () {
6370
+ // TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
6371
+ return 'Remote server';
6372
+ },
6373
+ enumerable: false,
6374
+ configurable: true
6375
+ });
6376
+ Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
6377
+ get: function () {
6378
+ return 'Use all models by your remote server';
6379
+ },
6380
+ enumerable: false,
6381
+ configurable: true
6382
+ });
6383
+ /**
6384
+ * Creates a connection to the remote proxy server.
6385
+ */
6386
+ RemoteLlmExecutionTools.prototype.makeConnection = function () {
6387
+ var _this = this;
6388
+ return new Promise(function (resolve, reject) {
6389
+ var socket = socket_ioClient.io(_this.options.remoteUrl, {
6390
+ path: _this.options.path,
6391
+ // path: `${this.remoteUrl.pathname}/socket.io`,
6392
+ transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
6393
+ });
6394
+ // console.log('Connecting to', this.options.remoteUrl.href, { socket });
6395
+ socket.on('connect', function () {
6396
+ resolve(socket);
6397
+ });
6398
+ setTimeout(function () {
6399
+ reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
6400
+ }, 60000 /* <- TODO: Timeout to config */);
6401
+ });
6402
+ };
6403
+ /**
6404
+ * Calls remote proxy server to use a chat model
6405
+ */
6406
+ RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
6407
+ if (this.options.isVerbose) {
6408
+ console.info("\uD83D\uDD8B Remote callChatModel call");
6409
+ }
6410
+ return /* not await */ this.callCommonModel(prompt);
6411
+ };
6412
+ /**
6413
+ * Calls remote proxy server to use a completion model
6414
+ */
6415
+ RemoteLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
6416
+ if (this.options.isVerbose) {
6417
+ console.info("\uD83D\uDCAC Remote callCompletionModel call");
6418
+ }
6419
+ return /* not await */ this.callCommonModel(prompt);
6420
+ };
6421
+ /**
6422
+ * Calls remote proxy server to use a embedding model
6423
+ */
6424
+ RemoteLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6425
+ if (this.options.isVerbose) {
6426
+ console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
6427
+ }
6428
+ return /* not await */ this.callCommonModel(prompt);
6429
+ };
6430
+ // <- Note: [🤖] callXxxModel
6431
+ /**
6432
+ * Calls remote proxy server to use both completion or chat model
6433
+ */
6434
+ RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
6435
+ return __awaiter(this, void 0, void 0, function () {
6436
+ var socket, promptResult;
6437
+ return __generator(this, function (_a) {
6438
+ switch (_a.label) {
6439
+ case 0: return [4 /*yield*/, this.makeConnection()];
6440
+ case 1:
6441
+ socket = _a.sent();
6442
+ if (this.options.isAnonymous) {
6443
+ socket.emit('request', {
6444
+ llmToolsConfiguration: this.options.llmToolsConfiguration,
6445
+ prompt: prompt,
6446
+ // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
6447
+ });
6448
+ }
6449
+ else {
6450
+ socket.emit('request', {
6451
+ clientId: this.options.clientId,
6452
+ prompt: prompt,
6453
+ // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
6454
+ });
6455
+ }
6456
+ return [4 /*yield*/, new Promise(function (resolve, reject) {
6457
+ socket.on('response', function (response) {
6458
+ resolve(response.promptResult);
6459
+ socket.disconnect();
6460
+ });
6461
+ socket.on('error', function (error) {
6462
+ reject(new PipelineExecutionError(error.errorMessage));
6463
+ socket.disconnect();
6464
+ });
6465
+ })];
6466
+ case 2:
6467
+ promptResult = _a.sent();
6468
+ socket.disconnect();
6469
+ return [2 /*return*/, promptResult];
6470
+ }
6471
+ });
6472
+ });
6473
+ };
6474
+ /**
6475
+ * List all available models that can be used
6476
+ */
6477
+ RemoteLlmExecutionTools.prototype.listModels = function () {
6478
+ return __awaiter(this, void 0, void 0, function () {
6479
+ return __generator(this, function (_a) {
6480
+ return [2 /*return*/, [
6481
+ /* !!! */
6482
+ ]];
6483
+ });
6484
+ });
6485
+ };
6486
+ return RemoteLlmExecutionTools;
6487
+ }());
6488
+ /**
6489
+ * TODO: [🍜] !!!!!! Default remote remoteUrl and path for anonymous server
6490
+ * TODO: [🍓] Allow to list compatible models with each variant
6491
+ * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
6492
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6493
+ * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
6494
+ */
6495
+
6289
6496
  /**
6290
6497
  * Helper of usage compute
6291
6498
  *
@@ -6415,6 +6622,7 @@
6415
6622
  * Execution Tools for calling Anthropic Claude API.
6416
6623
  *
6417
6624
  * @public exported from `@promptbook/anthropic-claude`
6625
+ * @deprecated use `createAnthropicClaudeExecutionTools` instead
6418
6626
  */
6419
6627
  var AnthropicClaudeExecutionTools = /** @class */ (function () {
6420
6628
  /**
@@ -6423,11 +6631,12 @@
6423
6631
  * @param options which are relevant are directly passed to the Anthropic Claude client
6424
6632
  */
6425
6633
  function AnthropicClaudeExecutionTools(options) {
6426
- if (options === void 0) { options = {}; }
6634
+ if (options === void 0) { options = { isProxied: false }; }
6427
6635
  this.options = options;
6428
6636
  // Note: Passing only Anthropic Claude relevant options to Anthropic constructor
6429
6637
  var anthropicOptions = __assign({}, options);
6430
6638
  delete anthropicOptions.isVerbose;
6639
+ delete anthropicOptions.isProxied;
6431
6640
  this.client = new Anthropic__default["default"](anthropicOptions);
6432
6641
  }
6433
6642
  Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
@@ -6630,8 +6839,32 @@
6630
6839
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6631
6840
  * TODO: Maybe make custom OpenaiError
6632
6841
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6633
- * TODO: [🍜] Auto use anonymous server in browser
6842
+ * TODO: [🍜] !!!!!! Auto use anonymous server in browser
6634
6843
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6844
+ * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
6845
+ */
6846
+
6847
+ /**
6848
+ * Execution Tools for calling Anthropic Claude API.
6849
+ *
6850
+ * @public exported from `@promptbook/anthropic-claude`
6851
+ */
6852
+ function createAnthropicClaudeExecutionTools(options) {
6853
+ if (options.isProxied) {
6854
+ return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
6855
+ {
6856
+ title: 'Anthropic Claude (proxied)',
6857
+ packageName: '@promptbook/anthropic-claude',
6858
+ className: 'AnthropicClaudeExecutionTools',
6859
+ options: __assign(__assign({}, options), { isProxied: false }),
6860
+ },
6861
+ ] }));
6862
+ }
6863
+ return new AnthropicClaudeExecutionTools(options);
6864
+ }
6865
+ /**
6866
+ * TODO: !!!!!! Make this with all LLM providers
6867
+ * TODO: !!!!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
6635
6868
  */
6636
6869
 
6637
6870
  /**
@@ -7573,47 +7806,15 @@
7573
7806
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7574
7807
  */
7575
7808
 
7576
- /**
7577
- * @public exported from `@promptbook/node`
7578
- */
7579
- var LLM_CONFIGURATION_BOILERPLATES = [
7580
- {
7581
- title: 'Open AI',
7582
- packageName: '@promptbook/openai',
7583
- className: 'OpenAiExecutionTools',
7584
- options: {
7585
- apiKey: 'sk-',
7586
- },
7587
- },
7588
- {
7589
- title: 'Anthropic Claude',
7590
- packageName: '@promptbook/anthropic-claude',
7591
- className: 'AnthropicClaudeExecutionTools',
7592
- options: {
7593
- apiKey: 'sk-ant-api03-',
7594
- },
7595
- },
7596
- {
7597
- title: 'Azure Open AI',
7598
- packageName: '@promptbook/azure-openai',
7599
- className: 'AzureOpenAiExecutionTools',
7600
- options: {
7601
- // TODO: !!!> resourceName
7602
- // TODO: !!!> deploymentName
7603
- apiKey: 'sk-',
7604
- },
7605
- },
7606
- // <- Note: [🦑] Add here new LLM provider
7607
- ];
7608
7809
  /**
7609
7810
  * @private internal type for `createLlmToolsFromConfiguration`
7610
7811
  */
7611
7812
  var EXECUTION_TOOLS_CLASSES = {
7612
- getOpenAiExecutionTools: function (options) {
7813
+ createOpenAiExecutionTools: function (options) {
7613
7814
  return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
7614
7815
  },
7615
- getAnthropicClaudeExecutionTools: function (options) { return new AnthropicClaudeExecutionTools(options); },
7616
- getAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
7816
+ createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
7817
+ createAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
7617
7818
  // <- Note: [🦑] Add here new LLM provider
7618
7819
  };
7619
7820
  /**
@@ -7621,101 +7822,32 @@
7621
7822
  * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
7622
7823
  */
7623
7824
 
7624
- /**
7625
- * This error type indicates that you try to use a feature that is not available in the current environment
7626
- *
7627
- * @public exported from `@promptbook/core`
7628
- */
7629
- var EnvironmentMismatchError = /** @class */ (function (_super) {
7630
- __extends(EnvironmentMismatchError, _super);
7631
- function EnvironmentMismatchError(message) {
7632
- var _this = _super.call(this, message) || this;
7633
- _this.name = 'EnvironmentMismatchError';
7634
- Object.setPrototypeOf(_this, EnvironmentMismatchError.prototype);
7635
- return _this;
7636
- }
7637
- return EnvironmentMismatchError;
7638
- }(Error));
7639
-
7640
7825
  /**
7641
7826
  * @@@
7642
7827
  *
7643
7828
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
7644
7829
  *
7645
7830
  * @returns @@@
7646
- * @public exported from `@promptbook/node`
7831
+ * @public exported from `@promptbook/core`
7647
7832
  */
7648
7833
  function createLlmToolsFromConfiguration(configuration, options) {
7649
7834
  if (options === void 0) { options = {}; }
7650
- if (!isRunningInNode()) {
7651
- throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7652
- }
7653
7835
  var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7654
7836
  dotenv__namespace.config();
7655
7837
  var llmTools = configuration.map(function (llmConfiguration) {
7656
- return EXECUTION_TOOLS_CLASSES["get".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
7838
+ return EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
7657
7839
  });
7658
7840
  return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
7659
7841
  }
7660
7842
  /**
7843
+ * TODO: [🎌] Togethere with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
7661
7844
  * TODO: [🧠][🎌] Dynamically install required providers
7662
7845
  * TODO: @@@ write discussion about this - wizzard
7663
7846
  * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
7664
7847
  * TODO: [🧠] Is there some meaningfull way how to test this util
7665
- * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7666
7848
  * TODO: This should be maybe not under `_common` but under `utils`
7667
7849
  */
7668
7850
 
7669
- /**
7670
- * @@@
7671
- *
7672
- * @@@ .env
7673
- *
7674
- * It looks for environment variables:
7675
- * - `process.env.OPENAI_API_KEY`
7676
- * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
7677
- *
7678
- * @returns @@@
7679
- * @public exported from `@promptbook/node`
7680
- */
7681
- function createLlmToolsFromConfigurationFromEnv() {
7682
- if (!isRunningInNode()) {
7683
- throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7684
- }
7685
- var llmToolsConfiguration = [];
7686
- if (typeof process.env.OPENAI_API_KEY === 'string') {
7687
- llmToolsConfiguration.push({
7688
- title: 'OpenAI (from env)',
7689
- packageName: '@promptbook/openai',
7690
- className: 'OpenAiExecutionTools',
7691
- options: {
7692
- apiKey: process.env.OPENAI_API_KEY,
7693
- },
7694
- });
7695
- }
7696
- if (typeof process.env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
7697
- llmToolsConfiguration.push({
7698
- title: 'Claude (from env)',
7699
- packageName: '@promptbook/antrhopic-claude',
7700
- className: 'AnthropicClaudeExecutionTools',
7701
- options: {
7702
- apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
7703
- },
7704
- });
7705
- }
7706
- // <- Note: [🦑] Add here new LLM provider
7707
- return llmToolsConfiguration;
7708
- }
7709
- /**
7710
- * TODO: Add Azure OpenAI
7711
- * TODO: [🧠][🍛]
7712
- * TODO: [🧠] Is there some meaningfull way how to test this util
7713
- * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7714
- * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7715
- * TODO: This should be maybe not under `_common` but under `utils`
7716
- * TODO: [🧠] Maybe pass env as argument
7717
- */
7718
-
7719
7851
  /**
7720
7852
  * @@@
7721
7853
  *
@@ -7880,10 +8012,8 @@
7880
8012
  */
7881
8013
 
7882
8014
  exports.FilesStorage = FilesStorage;
7883
- exports.LLM_CONFIGURATION_BOILERPLATES = LLM_CONFIGURATION_BOILERPLATES;
7884
8015
  exports.PROMPTBOOK_VERSION = PROMPTBOOK_VERSION;
7885
8016
  exports.createCollectionFromDirectory = createCollectionFromDirectory;
7886
- exports.createLlmToolsFromConfiguration = createLlmToolsFromConfiguration;
7887
8017
  exports.createLlmToolsFromConfigurationFromEnv = createLlmToolsFromConfigurationFromEnv;
7888
8018
  exports.createLlmToolsFromEnv = createLlmToolsFromEnv;
7889
8019