@promptbook/pdf 0.71.0-16 → 0.72.0-9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/esm/index.es.js +231 -205
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/cli.index.d.ts +4 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -2
  5. package/esm/typings/src/_packages/openai.index.d.ts +8 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +2 -0
  7. package/esm/typings/src/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
  8. package/esm/typings/src/dialogs/simple-prompt/SimplePromptInterfaceTools.d.ts +1 -1
  9. package/esm/typings/src/execution/translation/automatic-translate/automatic-translators/LindatAutomaticTranslator.d.ts +1 -1
  10. package/esm/typings/src/execution/utils/addUsage.d.ts +0 -56
  11. package/esm/typings/src/execution/utils/usage-constants.d.ts +127 -0
  12. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +37 -0
  17. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +14 -0
  18. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +10 -1
  19. package/esm/typings/src/llm-providers/openai/createOpenAiAssistantExecutionTools.d.ts +15 -0
  20. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +9 -0
  21. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +9 -0
  22. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  23. package/esm/typings/src/scripting/javascript/JavascriptEvalExecutionTools.d.ts +1 -1
  24. package/esm/typings/src/scripting/python/PythonExecutionTools.d.ts +1 -1
  25. package/esm/typings/src/scripting/typescript/TypescriptExecutionTools.d.ts +1 -1
  26. package/esm/typings/src/storage/file-cache-storage/FileCacheStorage.d.ts +1 -1
  27. package/package.json +2 -2
  28. package/umd/index.umd.js +235 -208
  29. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -3,7 +3,6 @@ import { format } from 'prettier';
3
3
  import parserHtml from 'prettier/parser-html';
4
4
  import { basename, join } from 'path';
5
5
  import { forTime } from 'waitasecond';
6
- import { readFile } from 'fs/promises';
7
6
  import { SHA256 } from 'crypto-js';
8
7
  import hexEncoder from 'crypto-js/enc-hex';
9
8
  import { lookup } from 'mime-types';
@@ -13,7 +12,7 @@ import { unparse, parse } from 'papaparse';
13
12
  /**
14
13
  * The version of the Promptbook library
15
14
  */
16
- var PROMPTBOOK_VERSION = '0.71.0-15';
15
+ var PROMPTBOOK_VERSION = '0.72.0-8';
17
16
  // TODO: [main] !!!! List here all the versions and annotate + put into script
18
17
 
19
18
  /*! *****************************************************************************
@@ -1955,127 +1954,6 @@ function serializeError(error) {
1955
1954
  };
1956
1955
  }
1957
1956
 
1958
- /**
1959
- * @@@
1960
- *
1961
- * @public exported from `@promptbook/utils`
1962
- */
1963
- function deepClone(objectValue) {
1964
- return JSON.parse(JSON.stringify(objectValue));
1965
- /*
1966
- TODO: [🧠] Is there a better implementation?
1967
- > const propertyNames = Object.getOwnPropertyNames(objectValue);
1968
- > for (const propertyName of propertyNames) {
1969
- > const value = (objectValue as really_any)[propertyName];
1970
- > if (value && typeof value === 'object') {
1971
- > deepClone(value);
1972
- > }
1973
- > }
1974
- > return Object.assign({}, objectValue);
1975
- */
1976
- }
1977
- /**
1978
- * TODO: [🧠] Is there a way how to meaningfully test this utility
1979
- */
1980
-
1981
- /**
1982
- * @@@
1983
- *
1984
- * @public exported from `@promptbook/core`
1985
- */
1986
- var ZERO_USAGE = $deepFreeze({
1987
- price: { value: 0 },
1988
- input: {
1989
- tokensCount: { value: 0 },
1990
- charactersCount: { value: 0 },
1991
- wordsCount: { value: 0 },
1992
- sentencesCount: { value: 0 },
1993
- linesCount: { value: 0 },
1994
- paragraphsCount: { value: 0 },
1995
- pagesCount: { value: 0 },
1996
- },
1997
- output: {
1998
- tokensCount: { value: 0 },
1999
- charactersCount: { value: 0 },
2000
- wordsCount: { value: 0 },
2001
- sentencesCount: { value: 0 },
2002
- linesCount: { value: 0 },
2003
- paragraphsCount: { value: 0 },
2004
- pagesCount: { value: 0 },
2005
- },
2006
- });
2007
- /**
2008
- * Function `addUsage` will add multiple usages into one
2009
- *
2010
- * Note: If you provide 0 values, it returns ZERO_USAGE
2011
- *
2012
- * @public exported from `@promptbook/core`
2013
- */
2014
- function addUsage() {
2015
- var usageItems = [];
2016
- for (var _i = 0; _i < arguments.length; _i++) {
2017
- usageItems[_i] = arguments[_i];
2018
- }
2019
- return usageItems.reduce(function (acc, item) {
2020
- var e_1, _a, e_2, _b;
2021
- var _c;
2022
- acc.price.value += ((_c = item.price) === null || _c === void 0 ? void 0 : _c.value) || 0;
2023
- try {
2024
- for (var _d = __values(Object.keys(acc.input)), _e = _d.next(); !_e.done; _e = _d.next()) {
2025
- var key = _e.value;
2026
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2027
- //@ts-ignore
2028
- if (item.input[key]) {
2029
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2030
- //@ts-ignore
2031
- acc.input[key].value += item.input[key].value || 0;
2032
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2033
- //@ts-ignore
2034
- if (item.input[key].isUncertain) {
2035
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2036
- //@ts-ignore
2037
- acc.input[key].isUncertain = true;
2038
- }
2039
- }
2040
- }
2041
- }
2042
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
2043
- finally {
2044
- try {
2045
- if (_e && !_e.done && (_a = _d.return)) _a.call(_d);
2046
- }
2047
- finally { if (e_1) throw e_1.error; }
2048
- }
2049
- try {
2050
- for (var _f = __values(Object.keys(acc.output)), _g = _f.next(); !_g.done; _g = _f.next()) {
2051
- var key = _g.value;
2052
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2053
- //@ts-ignore
2054
- if (item.output[key]) {
2055
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2056
- //@ts-ignore
2057
- acc.output[key].value += item.output[key].value || 0;
2058
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2059
- //@ts-ignore
2060
- if (item.output[key].isUncertain) {
2061
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2062
- //@ts-ignore
2063
- acc.output[key].isUncertain = true;
2064
- }
2065
- }
2066
- }
2067
- }
2068
- catch (e_2_1) { e_2 = { error: e_2_1 }; }
2069
- finally {
2070
- try {
2071
- if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
2072
- }
2073
- finally { if (e_2) throw e_2.error; }
2074
- }
2075
- return acc;
2076
- }, deepClone(ZERO_USAGE));
2077
- }
2078
-
2079
1957
  /**
2080
1958
  * Async version of Array.forEach
2081
1959
  *
@@ -2153,91 +2031,57 @@ function forEachAsync(array, options, callbackfunction) {
2153
2031
  }
2154
2032
 
2155
2033
  /**
2156
- * Intercepts LLM tools and counts total usage of the tools
2034
+ * Represents the usage with no resources consumed
2157
2035
  *
2158
- * @param llmTools LLM tools to be intercepted with usage counting
2159
- * @returns LLM tools with same functionality with added total cost counting
2160
2036
  * @public exported from `@promptbook/core`
2161
2037
  */
2162
- function countTotalUsage(llmTools) {
2163
- var _this = this;
2164
- var totalUsage = ZERO_USAGE;
2165
- var proxyTools = {
2166
- get title() {
2167
- // TODO: [🧠] Maybe put here some suffix
2168
- return llmTools.title;
2169
- },
2170
- get description() {
2171
- // TODO: [🧠] Maybe put here some suffix
2172
- return llmTools.description;
2173
- },
2174
- checkConfiguration: function () {
2175
- return __awaiter(this, void 0, void 0, function () {
2176
- return __generator(this, function (_a) {
2177
- return [2 /*return*/, /* not await */ llmTools.checkConfiguration()];
2178
- });
2179
- });
2180
- },
2181
- listModels: function () {
2182
- return /* not await */ llmTools.listModels();
2183
- },
2184
- getTotalUsage: function () {
2185
- // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
2186
- return totalUsage;
2187
- },
2188
- };
2189
- if (llmTools.callChatModel !== undefined) {
2190
- proxyTools.callChatModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2191
- var promptResult;
2192
- return __generator(this, function (_a) {
2193
- switch (_a.label) {
2194
- case 0: return [4 /*yield*/, llmTools.callChatModel(prompt)];
2195
- case 1:
2196
- promptResult = _a.sent();
2197
- totalUsage = addUsage(totalUsage, promptResult.usage);
2198
- return [2 /*return*/, promptResult];
2199
- }
2200
- });
2201
- }); };
2202
- }
2203
- if (llmTools.callCompletionModel !== undefined) {
2204
- proxyTools.callCompletionModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2205
- var promptResult;
2206
- return __generator(this, function (_a) {
2207
- switch (_a.label) {
2208
- case 0: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
2209
- case 1:
2210
- promptResult = _a.sent();
2211
- totalUsage = addUsage(totalUsage, promptResult.usage);
2212
- return [2 /*return*/, promptResult];
2213
- }
2214
- });
2215
- }); };
2216
- }
2217
- if (llmTools.callEmbeddingModel !== undefined) {
2218
- proxyTools.callEmbeddingModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2219
- var promptResult;
2220
- return __generator(this, function (_a) {
2221
- switch (_a.label) {
2222
- case 0: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
2223
- case 1:
2224
- promptResult = _a.sent();
2225
- totalUsage = addUsage(totalUsage, promptResult.usage);
2226
- return [2 /*return*/, promptResult];
2227
- }
2228
- });
2229
- }); };
2230
- }
2231
- // <- Note: [🤖]
2232
- return proxyTools;
2233
- }
2038
+ var ZERO_USAGE = $deepFreeze({
2039
+ price: { value: 0 },
2040
+ input: {
2041
+ tokensCount: { value: 0 },
2042
+ charactersCount: { value: 0 },
2043
+ wordsCount: { value: 0 },
2044
+ sentencesCount: { value: 0 },
2045
+ linesCount: { value: 0 },
2046
+ paragraphsCount: { value: 0 },
2047
+ pagesCount: { value: 0 },
2048
+ },
2049
+ output: {
2050
+ tokensCount: { value: 0 },
2051
+ charactersCount: { value: 0 },
2052
+ wordsCount: { value: 0 },
2053
+ sentencesCount: { value: 0 },
2054
+ linesCount: { value: 0 },
2055
+ paragraphsCount: { value: 0 },
2056
+ pagesCount: { value: 0 },
2057
+ },
2058
+ });
2234
2059
  /**
2235
- * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
2236
- * TODO: [🧠] Is there some meaningfull way how to test this util
2237
- * TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
2238
- * > const [llmToolsWithUsage,getUsage] = countTotalUsage(llmTools);
2239
- * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2060
+ * Represents the usage with unknown resources consumed
2061
+ *
2062
+ * @public exported from `@promptbook/core`
2240
2063
  */
2064
+ $deepFreeze({
2065
+ price: { value: 0, isUncertain: true },
2066
+ input: {
2067
+ tokensCount: { value: 0, isUncertain: true },
2068
+ charactersCount: { value: 0, isUncertain: true },
2069
+ wordsCount: { value: 0, isUncertain: true },
2070
+ sentencesCount: { value: 0, isUncertain: true },
2071
+ linesCount: { value: 0, isUncertain: true },
2072
+ paragraphsCount: { value: 0, isUncertain: true },
2073
+ pagesCount: { value: 0, isUncertain: true },
2074
+ },
2075
+ output: {
2076
+ tokensCount: { value: 0, isUncertain: true },
2077
+ charactersCount: { value: 0, isUncertain: true },
2078
+ wordsCount: { value: 0, isUncertain: true },
2079
+ sentencesCount: { value: 0, isUncertain: true },
2080
+ linesCount: { value: 0, isUncertain: true },
2081
+ paragraphsCount: { value: 0, isUncertain: true },
2082
+ pagesCount: { value: 0, isUncertain: true },
2083
+ },
2084
+ });
2241
2085
 
2242
2086
  /**
2243
2087
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
@@ -2532,6 +2376,188 @@ function joinLlmExecutionTools() {
2532
2376
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2533
2377
  */
2534
2378
 
2379
+ /**
2380
+ * @@@
2381
+ *
2382
+ * @public exported from `@promptbook/utils`
2383
+ */
2384
+ function deepClone(objectValue) {
2385
+ return JSON.parse(JSON.stringify(objectValue));
2386
+ /*
2387
+ TODO: [🧠] Is there a better implementation?
2388
+ > const propertyNames = Object.getOwnPropertyNames(objectValue);
2389
+ > for (const propertyName of propertyNames) {
2390
+ > const value = (objectValue as really_any)[propertyName];
2391
+ > if (value && typeof value === 'object') {
2392
+ > deepClone(value);
2393
+ > }
2394
+ > }
2395
+ > return Object.assign({}, objectValue);
2396
+ */
2397
+ }
2398
+ /**
2399
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
2400
+ */
2401
+
2402
+ /**
2403
+ * Function `addUsage` will add multiple usages into one
2404
+ *
2405
+ * Note: If you provide 0 values, it returns ZERO_USAGE
2406
+ *
2407
+ * @public exported from `@promptbook/core`
2408
+ */
2409
+ function addUsage() {
2410
+ var usageItems = [];
2411
+ for (var _i = 0; _i < arguments.length; _i++) {
2412
+ usageItems[_i] = arguments[_i];
2413
+ }
2414
+ return usageItems.reduce(function (acc, item) {
2415
+ var e_1, _a, e_2, _b;
2416
+ var _c;
2417
+ acc.price.value += ((_c = item.price) === null || _c === void 0 ? void 0 : _c.value) || 0;
2418
+ try {
2419
+ for (var _d = __values(Object.keys(acc.input)), _e = _d.next(); !_e.done; _e = _d.next()) {
2420
+ var key = _e.value;
2421
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2422
+ //@ts-ignore
2423
+ if (item.input[key]) {
2424
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2425
+ //@ts-ignore
2426
+ acc.input[key].value += item.input[key].value || 0;
2427
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2428
+ //@ts-ignore
2429
+ if (item.input[key].isUncertain) {
2430
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2431
+ //@ts-ignore
2432
+ acc.input[key].isUncertain = true;
2433
+ }
2434
+ }
2435
+ }
2436
+ }
2437
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
2438
+ finally {
2439
+ try {
2440
+ if (_e && !_e.done && (_a = _d.return)) _a.call(_d);
2441
+ }
2442
+ finally { if (e_1) throw e_1.error; }
2443
+ }
2444
+ try {
2445
+ for (var _f = __values(Object.keys(acc.output)), _g = _f.next(); !_g.done; _g = _f.next()) {
2446
+ var key = _g.value;
2447
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2448
+ //@ts-ignore
2449
+ if (item.output[key]) {
2450
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2451
+ //@ts-ignore
2452
+ acc.output[key].value += item.output[key].value || 0;
2453
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2454
+ //@ts-ignore
2455
+ if (item.output[key].isUncertain) {
2456
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2457
+ //@ts-ignore
2458
+ acc.output[key].isUncertain = true;
2459
+ }
2460
+ }
2461
+ }
2462
+ }
2463
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
2464
+ finally {
2465
+ try {
2466
+ if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
2467
+ }
2468
+ finally { if (e_2) throw e_2.error; }
2469
+ }
2470
+ return acc;
2471
+ }, deepClone(ZERO_USAGE));
2472
+ }
2473
+
2474
+ /**
2475
+ * Intercepts LLM tools and counts total usage of the tools
2476
+ *
2477
+ * @param llmTools LLM tools to be intercepted with usage counting
2478
+ * @returns LLM tools with same functionality with added total cost counting
2479
+ * @public exported from `@promptbook/core`
2480
+ */
2481
+ function countTotalUsage(llmTools) {
2482
+ var _this = this;
2483
+ var totalUsage = ZERO_USAGE;
2484
+ var proxyTools = {
2485
+ get title() {
2486
+ // TODO: [🧠] Maybe put here some suffix
2487
+ return llmTools.title;
2488
+ },
2489
+ get description() {
2490
+ // TODO: [🧠] Maybe put here some suffix
2491
+ return llmTools.description;
2492
+ },
2493
+ checkConfiguration: function () {
2494
+ return __awaiter(this, void 0, void 0, function () {
2495
+ return __generator(this, function (_a) {
2496
+ return [2 /*return*/, /* not await */ llmTools.checkConfiguration()];
2497
+ });
2498
+ });
2499
+ },
2500
+ listModels: function () {
2501
+ return /* not await */ llmTools.listModels();
2502
+ },
2503
+ getTotalUsage: function () {
2504
+ // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
2505
+ return totalUsage;
2506
+ },
2507
+ };
2508
+ if (llmTools.callChatModel !== undefined) {
2509
+ proxyTools.callChatModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2510
+ var promptResult;
2511
+ return __generator(this, function (_a) {
2512
+ switch (_a.label) {
2513
+ case 0: return [4 /*yield*/, llmTools.callChatModel(prompt)];
2514
+ case 1:
2515
+ promptResult = _a.sent();
2516
+ totalUsage = addUsage(totalUsage, promptResult.usage);
2517
+ return [2 /*return*/, promptResult];
2518
+ }
2519
+ });
2520
+ }); };
2521
+ }
2522
+ if (llmTools.callCompletionModel !== undefined) {
2523
+ proxyTools.callCompletionModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2524
+ var promptResult;
2525
+ return __generator(this, function (_a) {
2526
+ switch (_a.label) {
2527
+ case 0: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
2528
+ case 1:
2529
+ promptResult = _a.sent();
2530
+ totalUsage = addUsage(totalUsage, promptResult.usage);
2531
+ return [2 /*return*/, promptResult];
2532
+ }
2533
+ });
2534
+ }); };
2535
+ }
2536
+ if (llmTools.callEmbeddingModel !== undefined) {
2537
+ proxyTools.callEmbeddingModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2538
+ var promptResult;
2539
+ return __generator(this, function (_a) {
2540
+ switch (_a.label) {
2541
+ case 0: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
2542
+ case 1:
2543
+ promptResult = _a.sent();
2544
+ totalUsage = addUsage(totalUsage, promptResult.usage);
2545
+ return [2 /*return*/, promptResult];
2546
+ }
2547
+ });
2548
+ }); };
2549
+ }
2550
+ // <- Note: [🤖]
2551
+ return proxyTools;
2552
+ }
2553
+ /**
2554
+ * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
2555
+ * TODO: [🧠] Is there some meaningfull way how to test this util
2556
+ * TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
2557
+ * > const [llmToolsWithUsage,getUsage] = countTotalUsage(llmTools);
2558
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2559
+ */
2560
+
2535
2561
  /**
2536
2562
  * Takes an item or an array of items and returns an array of items
2537
2563
  *
@@ -3096,7 +3122,7 @@ function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
3096
3122
  switch (_c.label) {
3097
3123
  case 0:
3098
3124
  _b = (_a = JSON).parse;
3099
- return [4 /*yield*/, readFile(filename_1, 'utf-8')];
3125
+ return [4 /*yield*/, tools.fs.readFile(filename_1, 'utf-8')];
3100
3126
  case 1: return [2 /*return*/, _b.apply(_a, [_c.sent()])];
3101
3127
  }
3102
3128
  });
@@ -3106,7 +3132,7 @@ function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
3106
3132
  return __awaiter(this, void 0, void 0, function () {
3107
3133
  return __generator(this, function (_a) {
3108
3134
  switch (_a.label) {
3109
- case 0: return [4 /*yield*/, readFile(filename_1, 'utf-8')];
3135
+ case 0: return [4 /*yield*/, tools.fs.readFile(filename_1, 'utf-8')];
3110
3136
  case 1: return [2 /*return*/, _a.sent()];
3111
3137
  }
3112
3138
  });