@promptbook/cli 0.65.0-2 → 0.65.0-4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +178 -10
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/anthropic-claude.index.d.ts +6 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/_packages/node.index.d.ts +0 -2
- package/esm/typings/src/_packages/remote-client.index.d.ts +2 -2
- package/esm/typings/src/_packages/types.index.d.ts +16 -2
- package/esm/typings/src/llm-providers/_common/config.d.ts +3 -3
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +5 -3
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +23 -2
- package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +13 -0
- package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -1
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +3 -2
- package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Error.d.ts +2 -2
- package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Progress.d.ts +2 -2
- package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Request.d.ts +14 -2
- package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +49 -0
- package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +23 -2
- package/esm/typings/src/llm-providers/remote/playground/playground.d.ts +2 -0
- package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -1
- package/esm/typings/src/types/typeAliases.d.ts +6 -0
- package/package.json +2 -1
- package/umd/index.umd.js +181 -14
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionToolsOptions.d.ts +0 -26
package/umd/index.umd.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
(function (global, factory) {
|
|
2
|
-
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('commander'), require('spacetrim'), require('colors'), require('waitasecond'), require('fs/promises'), require('path'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('path/posix'), require('
|
|
3
|
-
typeof define === 'function' && define.amd ? define(['exports', 'commander', 'spacetrim', 'colors', 'waitasecond', 'fs/promises', 'path', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', 'path/posix', '
|
|
4
|
-
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-cli"] = {}, global.commander, global.spaceTrim, global.colors, global.waitasecond, global.promises, global.path, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.posix, global.
|
|
5
|
-
})(this, (function (exports, commander, spaceTrim, colors, waitasecond, promises, path, prettier, parserHtml, hexEncoder, sha256, posix,
|
|
2
|
+
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('commander'), require('spacetrim'), require('colors'), require('waitasecond'), require('fs/promises'), require('path'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('path/posix'), require('socket.io-client'), require('@anthropic-ai/sdk'), require('@azure/openai'), require('openai'), require('dotenv'), require('glob-promise')) :
|
|
3
|
+
typeof define === 'function' && define.amd ? define(['exports', 'commander', 'spacetrim', 'colors', 'waitasecond', 'fs/promises', 'path', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', 'path/posix', 'socket.io-client', '@anthropic-ai/sdk', '@azure/openai', 'openai', 'dotenv', 'glob-promise'], factory) :
|
|
4
|
+
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-cli"] = {}, global.commander, global.spaceTrim, global.colors, global.waitasecond, global.promises, global.path, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.posix, global.socket_ioClient, global.Anthropic, global.openai, global.OpenAI, global.dotenv, global.glob));
|
|
5
|
+
})(this, (function (exports, commander, spaceTrim, colors, waitasecond, promises, path, prettier, parserHtml, hexEncoder, sha256, posix, socket_ioClient, Anthropic, openai, OpenAI, dotenv, glob) { 'use strict';
|
|
6
6
|
|
|
7
7
|
function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
|
|
8
8
|
|
|
@@ -30,16 +30,16 @@
|
|
|
30
30
|
var parserHtml__default = /*#__PURE__*/_interopDefaultLegacy(parserHtml);
|
|
31
31
|
var hexEncoder__default = /*#__PURE__*/_interopDefaultLegacy(hexEncoder);
|
|
32
32
|
var sha256__default = /*#__PURE__*/_interopDefaultLegacy(sha256);
|
|
33
|
-
var dotenv__namespace = /*#__PURE__*/_interopNamespace(dotenv);
|
|
34
33
|
var Anthropic__default = /*#__PURE__*/_interopDefaultLegacy(Anthropic);
|
|
35
34
|
var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
|
|
35
|
+
var dotenv__namespace = /*#__PURE__*/_interopNamespace(dotenv);
|
|
36
36
|
var glob__default = /*#__PURE__*/_interopDefaultLegacy(glob);
|
|
37
37
|
|
|
38
38
|
// ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
|
|
39
39
|
/**
|
|
40
40
|
* The version of the Promptbook library
|
|
41
41
|
*/
|
|
42
|
-
var PROMPTBOOK_VERSION = '0.65.0-
|
|
42
|
+
var PROMPTBOOK_VERSION = '0.65.0-3';
|
|
43
43
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
44
44
|
|
|
45
45
|
/*! *****************************************************************************
|
|
@@ -860,7 +860,7 @@
|
|
|
860
860
|
});
|
|
861
861
|
}
|
|
862
862
|
|
|
863
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.65.0-
|
|
863
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.65.0-3",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.65.0-3",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.65.0-3",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.65.0-3",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
864
864
|
|
|
865
865
|
/**
|
|
866
866
|
* This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
|
|
@@ -6588,6 +6588,147 @@
|
|
|
6588
6588
|
* Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
|
|
6589
6589
|
*/
|
|
6590
6590
|
|
|
6591
|
+
/**
|
|
6592
|
+
* Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
|
|
6593
|
+
*
|
|
6594
|
+
* You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
|
|
6595
|
+
* This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
|
|
6596
|
+
*
|
|
6597
|
+
* @see https://github.com/webgptorg/promptbook#remote-server
|
|
6598
|
+
* @public exported from `@promptbook/remote-client`
|
|
6599
|
+
*/
|
|
6600
|
+
var RemoteLlmExecutionTools = /** @class */ (function () {
|
|
6601
|
+
function RemoteLlmExecutionTools(options) {
|
|
6602
|
+
this.options = options;
|
|
6603
|
+
}
|
|
6604
|
+
Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
|
|
6605
|
+
get: function () {
|
|
6606
|
+
// TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
|
|
6607
|
+
return 'Remote server';
|
|
6608
|
+
},
|
|
6609
|
+
enumerable: false,
|
|
6610
|
+
configurable: true
|
|
6611
|
+
});
|
|
6612
|
+
Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
|
|
6613
|
+
get: function () {
|
|
6614
|
+
return 'Use all models by your remote server';
|
|
6615
|
+
},
|
|
6616
|
+
enumerable: false,
|
|
6617
|
+
configurable: true
|
|
6618
|
+
});
|
|
6619
|
+
/**
|
|
6620
|
+
* Creates a connection to the remote proxy server.
|
|
6621
|
+
*/
|
|
6622
|
+
RemoteLlmExecutionTools.prototype.makeConnection = function () {
|
|
6623
|
+
var _this = this;
|
|
6624
|
+
return new Promise(function (resolve, reject) {
|
|
6625
|
+
var socket = socket_ioClient.io(_this.options.remoteUrl, {
|
|
6626
|
+
path: _this.options.path,
|
|
6627
|
+
// path: `${this.remoteUrl.pathname}/socket.io`,
|
|
6628
|
+
transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
|
|
6629
|
+
});
|
|
6630
|
+
// console.log('Connecting to', this.options.remoteUrl.href, { socket });
|
|
6631
|
+
socket.on('connect', function () {
|
|
6632
|
+
resolve(socket);
|
|
6633
|
+
});
|
|
6634
|
+
setTimeout(function () {
|
|
6635
|
+
reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
|
|
6636
|
+
}, 60000 /* <- TODO: Timeout to config */);
|
|
6637
|
+
});
|
|
6638
|
+
};
|
|
6639
|
+
/**
|
|
6640
|
+
* Calls remote proxy server to use a chat model
|
|
6641
|
+
*/
|
|
6642
|
+
RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6643
|
+
if (this.options.isVerbose) {
|
|
6644
|
+
console.info("\uD83D\uDD8B Remote callChatModel call");
|
|
6645
|
+
}
|
|
6646
|
+
return /* not await */ this.callCommonModel(prompt);
|
|
6647
|
+
};
|
|
6648
|
+
/**
|
|
6649
|
+
* Calls remote proxy server to use a completion model
|
|
6650
|
+
*/
|
|
6651
|
+
RemoteLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
6652
|
+
if (this.options.isVerbose) {
|
|
6653
|
+
console.info("\uD83D\uDCAC Remote callCompletionModel call");
|
|
6654
|
+
}
|
|
6655
|
+
return /* not await */ this.callCommonModel(prompt);
|
|
6656
|
+
};
|
|
6657
|
+
/**
|
|
6658
|
+
* Calls remote proxy server to use a embedding model
|
|
6659
|
+
*/
|
|
6660
|
+
RemoteLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
|
|
6661
|
+
if (this.options.isVerbose) {
|
|
6662
|
+
console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
|
|
6663
|
+
}
|
|
6664
|
+
return /* not await */ this.callCommonModel(prompt);
|
|
6665
|
+
};
|
|
6666
|
+
// <- Note: [🤖] callXxxModel
|
|
6667
|
+
/**
|
|
6668
|
+
* Calls remote proxy server to use both completion or chat model
|
|
6669
|
+
*/
|
|
6670
|
+
RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
|
|
6671
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
6672
|
+
var socket, promptResult;
|
|
6673
|
+
return __generator(this, function (_a) {
|
|
6674
|
+
switch (_a.label) {
|
|
6675
|
+
case 0: return [4 /*yield*/, this.makeConnection()];
|
|
6676
|
+
case 1:
|
|
6677
|
+
socket = _a.sent();
|
|
6678
|
+
if (this.options.isAnonymous) {
|
|
6679
|
+
socket.emit('request', {
|
|
6680
|
+
llmToolsConfiguration: this.options.llmToolsConfiguration,
|
|
6681
|
+
prompt: prompt,
|
|
6682
|
+
// <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
|
|
6683
|
+
});
|
|
6684
|
+
}
|
|
6685
|
+
else {
|
|
6686
|
+
socket.emit('request', {
|
|
6687
|
+
clientId: this.options.clientId,
|
|
6688
|
+
prompt: prompt,
|
|
6689
|
+
// <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
|
|
6690
|
+
});
|
|
6691
|
+
}
|
|
6692
|
+
return [4 /*yield*/, new Promise(function (resolve, reject) {
|
|
6693
|
+
socket.on('response', function (response) {
|
|
6694
|
+
resolve(response.promptResult);
|
|
6695
|
+
socket.disconnect();
|
|
6696
|
+
});
|
|
6697
|
+
socket.on('error', function (error) {
|
|
6698
|
+
reject(new PipelineExecutionError(error.errorMessage));
|
|
6699
|
+
socket.disconnect();
|
|
6700
|
+
});
|
|
6701
|
+
})];
|
|
6702
|
+
case 2:
|
|
6703
|
+
promptResult = _a.sent();
|
|
6704
|
+
socket.disconnect();
|
|
6705
|
+
return [2 /*return*/, promptResult];
|
|
6706
|
+
}
|
|
6707
|
+
});
|
|
6708
|
+
});
|
|
6709
|
+
};
|
|
6710
|
+
/**
|
|
6711
|
+
* List all available models that can be used
|
|
6712
|
+
*/
|
|
6713
|
+
RemoteLlmExecutionTools.prototype.listModels = function () {
|
|
6714
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
6715
|
+
return __generator(this, function (_a) {
|
|
6716
|
+
return [2 /*return*/, [
|
|
6717
|
+
/* !!! */
|
|
6718
|
+
]];
|
|
6719
|
+
});
|
|
6720
|
+
});
|
|
6721
|
+
};
|
|
6722
|
+
return RemoteLlmExecutionTools;
|
|
6723
|
+
}());
|
|
6724
|
+
/**
|
|
6725
|
+
* TODO: [🍜] !!!!!! Default remote remoteUrl and path for anonymous server
|
|
6726
|
+
* TODO: [🍓] Allow to list compatible models with each variant
|
|
6727
|
+
* TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
|
|
6728
|
+
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
6729
|
+
* TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
|
|
6730
|
+
*/
|
|
6731
|
+
|
|
6591
6732
|
/**
|
|
6592
6733
|
* Helper of usage compute
|
|
6593
6734
|
*
|
|
@@ -6717,6 +6858,7 @@
|
|
|
6717
6858
|
* Execution Tools for calling Anthropic Claude API.
|
|
6718
6859
|
*
|
|
6719
6860
|
* @public exported from `@promptbook/anthropic-claude`
|
|
6861
|
+
* @deprecated use `createAnthropicClaudeExecutionTools` instead
|
|
6720
6862
|
*/
|
|
6721
6863
|
var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
6722
6864
|
/**
|
|
@@ -6725,11 +6867,12 @@
|
|
|
6725
6867
|
* @param options which are relevant are directly passed to the Anthropic Claude client
|
|
6726
6868
|
*/
|
|
6727
6869
|
function AnthropicClaudeExecutionTools(options) {
|
|
6728
|
-
if (options === void 0) { options = {}; }
|
|
6870
|
+
if (options === void 0) { options = { isProxied: false }; }
|
|
6729
6871
|
this.options = options;
|
|
6730
6872
|
// Note: Passing only Anthropic Claude relevant options to Anthropic constructor
|
|
6731
6873
|
var anthropicOptions = __assign({}, options);
|
|
6732
6874
|
delete anthropicOptions.isVerbose;
|
|
6875
|
+
delete anthropicOptions.isProxied;
|
|
6733
6876
|
this.client = new Anthropic__default["default"](anthropicOptions);
|
|
6734
6877
|
}
|
|
6735
6878
|
Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
|
|
@@ -6932,8 +7075,32 @@
|
|
|
6932
7075
|
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
6933
7076
|
* TODO: Maybe make custom OpenaiError
|
|
6934
7077
|
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
6935
|
-
* TODO: [🍜] Auto use anonymous server in browser
|
|
7078
|
+
* TODO: [🍜] !!!!!! Auto use anonymous server in browser
|
|
6936
7079
|
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
7080
|
+
* TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
|
|
7081
|
+
*/
|
|
7082
|
+
|
|
7083
|
+
/**
|
|
7084
|
+
* Execution Tools for calling Anthropic Claude API.
|
|
7085
|
+
*
|
|
7086
|
+
* @public exported from `@promptbook/anthropic-claude`
|
|
7087
|
+
*/
|
|
7088
|
+
function createAnthropicClaudeExecutionTools(options) {
|
|
7089
|
+
if (options.isProxied) {
|
|
7090
|
+
return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
|
|
7091
|
+
{
|
|
7092
|
+
title: 'Anthropic Claude (proxied)',
|
|
7093
|
+
packageName: '@promptbook/anthropic-claude',
|
|
7094
|
+
className: 'AnthropicClaudeExecutionTools',
|
|
7095
|
+
options: __assign(__assign({}, options), { isProxied: false }),
|
|
7096
|
+
},
|
|
7097
|
+
] }));
|
|
7098
|
+
}
|
|
7099
|
+
return new AnthropicClaudeExecutionTools(options);
|
|
7100
|
+
}
|
|
7101
|
+
/**
|
|
7102
|
+
* TODO: !!!!!! Make this with all LLM providers
|
|
7103
|
+
* TODO: !!!!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
|
|
6937
7104
|
*/
|
|
6938
7105
|
|
|
6939
7106
|
/**
|
|
@@ -7879,11 +8046,11 @@
|
|
|
7879
8046
|
* @private internal type for `createLlmToolsFromConfiguration`
|
|
7880
8047
|
*/
|
|
7881
8048
|
var EXECUTION_TOOLS_CLASSES = {
|
|
7882
|
-
|
|
8049
|
+
createOpenAiExecutionTools: function (options) {
|
|
7883
8050
|
return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
|
|
7884
8051
|
},
|
|
7885
|
-
|
|
7886
|
-
|
|
8052
|
+
createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
|
|
8053
|
+
createAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
|
|
7887
8054
|
// <- Note: [🦑] Add here new LLM provider
|
|
7888
8055
|
};
|
|
7889
8056
|
/**
|
|
@@ -7902,9 +8069,8 @@
|
|
|
7902
8069
|
function createLlmToolsFromConfiguration(configuration, options) {
|
|
7903
8070
|
if (options === void 0) { options = {}; }
|
|
7904
8071
|
var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
|
|
7905
|
-
dotenv__namespace.config();
|
|
7906
8072
|
var llmTools = configuration.map(function (llmConfiguration) {
|
|
7907
|
-
return EXECUTION_TOOLS_CLASSES["
|
|
8073
|
+
return EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
|
|
7908
8074
|
});
|
|
7909
8075
|
return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
|
|
7910
8076
|
}
|
|
@@ -7933,6 +8099,7 @@
|
|
|
7933
8099
|
if (!isRunningInNode()) {
|
|
7934
8100
|
throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
|
|
7935
8101
|
}
|
|
8102
|
+
dotenv__namespace.config();
|
|
7936
8103
|
var llmToolsConfiguration = [];
|
|
7937
8104
|
if (typeof process.env.OPENAI_API_KEY === 'string') {
|
|
7938
8105
|
llmToolsConfiguration.push({
|