@promptbook/pdf 0.71.0-17 → 0.72.0-9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/esm/index.es.js +229 -202
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/cli.index.d.ts +4 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -2
  5. package/esm/typings/src/_packages/openai.index.d.ts +8 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +2 -0
  7. package/esm/typings/src/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
  8. package/esm/typings/src/dialogs/simple-prompt/SimplePromptInterfaceTools.d.ts +1 -1
  9. package/esm/typings/src/execution/translation/automatic-translate/automatic-translators/LindatAutomaticTranslator.d.ts +1 -1
  10. package/esm/typings/src/execution/utils/addUsage.d.ts +0 -56
  11. package/esm/typings/src/execution/utils/usage-constants.d.ts +127 -0
  12. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +37 -0
  17. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +14 -0
  18. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +10 -1
  19. package/esm/typings/src/llm-providers/openai/createOpenAiAssistantExecutionTools.d.ts +15 -0
  20. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +9 -0
  21. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +9 -0
  22. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  23. package/esm/typings/src/scripting/javascript/JavascriptEvalExecutionTools.d.ts +1 -1
  24. package/esm/typings/src/scripting/python/PythonExecutionTools.d.ts +1 -1
  25. package/esm/typings/src/scripting/typescript/TypescriptExecutionTools.d.ts +1 -1
  26. package/esm/typings/src/storage/file-cache-storage/FileCacheStorage.d.ts +1 -1
  27. package/package.json +2 -2
  28. package/umd/index.umd.js +229 -202
  29. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -12,7 +12,7 @@ import { unparse, parse } from 'papaparse';
12
12
  /**
13
13
  * The version of the Promptbook library
14
14
  */
15
- var PROMPTBOOK_VERSION = '0.71.0-16';
15
+ var PROMPTBOOK_VERSION = '0.72.0-8';
16
16
  // TODO: [main] !!!! List here all the versions and annotate + put into script
17
17
 
18
18
  /*! *****************************************************************************
@@ -1954,127 +1954,6 @@ function serializeError(error) {
1954
1954
  };
1955
1955
  }
1956
1956
 
1957
- /**
1958
- * @@@
1959
- *
1960
- * @public exported from `@promptbook/utils`
1961
- */
1962
- function deepClone(objectValue) {
1963
- return JSON.parse(JSON.stringify(objectValue));
1964
- /*
1965
- TODO: [🧠] Is there a better implementation?
1966
- > const propertyNames = Object.getOwnPropertyNames(objectValue);
1967
- > for (const propertyName of propertyNames) {
1968
- > const value = (objectValue as really_any)[propertyName];
1969
- > if (value && typeof value === 'object') {
1970
- > deepClone(value);
1971
- > }
1972
- > }
1973
- > return Object.assign({}, objectValue);
1974
- */
1975
- }
1976
- /**
1977
- * TODO: [🧠] Is there a way how to meaningfully test this utility
1978
- */
1979
-
1980
- /**
1981
- * @@@
1982
- *
1983
- * @public exported from `@promptbook/core`
1984
- */
1985
- var ZERO_USAGE = $deepFreeze({
1986
- price: { value: 0 },
1987
- input: {
1988
- tokensCount: { value: 0 },
1989
- charactersCount: { value: 0 },
1990
- wordsCount: { value: 0 },
1991
- sentencesCount: { value: 0 },
1992
- linesCount: { value: 0 },
1993
- paragraphsCount: { value: 0 },
1994
- pagesCount: { value: 0 },
1995
- },
1996
- output: {
1997
- tokensCount: { value: 0 },
1998
- charactersCount: { value: 0 },
1999
- wordsCount: { value: 0 },
2000
- sentencesCount: { value: 0 },
2001
- linesCount: { value: 0 },
2002
- paragraphsCount: { value: 0 },
2003
- pagesCount: { value: 0 },
2004
- },
2005
- });
2006
- /**
2007
- * Function `addUsage` will add multiple usages into one
2008
- *
2009
- * Note: If you provide 0 values, it returns ZERO_USAGE
2010
- *
2011
- * @public exported from `@promptbook/core`
2012
- */
2013
- function addUsage() {
2014
- var usageItems = [];
2015
- for (var _i = 0; _i < arguments.length; _i++) {
2016
- usageItems[_i] = arguments[_i];
2017
- }
2018
- return usageItems.reduce(function (acc, item) {
2019
- var e_1, _a, e_2, _b;
2020
- var _c;
2021
- acc.price.value += ((_c = item.price) === null || _c === void 0 ? void 0 : _c.value) || 0;
2022
- try {
2023
- for (var _d = __values(Object.keys(acc.input)), _e = _d.next(); !_e.done; _e = _d.next()) {
2024
- var key = _e.value;
2025
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2026
- //@ts-ignore
2027
- if (item.input[key]) {
2028
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2029
- //@ts-ignore
2030
- acc.input[key].value += item.input[key].value || 0;
2031
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2032
- //@ts-ignore
2033
- if (item.input[key].isUncertain) {
2034
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2035
- //@ts-ignore
2036
- acc.input[key].isUncertain = true;
2037
- }
2038
- }
2039
- }
2040
- }
2041
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
2042
- finally {
2043
- try {
2044
- if (_e && !_e.done && (_a = _d.return)) _a.call(_d);
2045
- }
2046
- finally { if (e_1) throw e_1.error; }
2047
- }
2048
- try {
2049
- for (var _f = __values(Object.keys(acc.output)), _g = _f.next(); !_g.done; _g = _f.next()) {
2050
- var key = _g.value;
2051
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2052
- //@ts-ignore
2053
- if (item.output[key]) {
2054
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2055
- //@ts-ignore
2056
- acc.output[key].value += item.output[key].value || 0;
2057
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2058
- //@ts-ignore
2059
- if (item.output[key].isUncertain) {
2060
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2061
- //@ts-ignore
2062
- acc.output[key].isUncertain = true;
2063
- }
2064
- }
2065
- }
2066
- }
2067
- catch (e_2_1) { e_2 = { error: e_2_1 }; }
2068
- finally {
2069
- try {
2070
- if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
2071
- }
2072
- finally { if (e_2) throw e_2.error; }
2073
- }
2074
- return acc;
2075
- }, deepClone(ZERO_USAGE));
2076
- }
2077
-
2078
1957
  /**
2079
1958
  * Async version of Array.forEach
2080
1959
  *
@@ -2152,91 +2031,57 @@ function forEachAsync(array, options, callbackfunction) {
2152
2031
  }
2153
2032
 
2154
2033
  /**
2155
- * Intercepts LLM tools and counts total usage of the tools
2034
+ * Represents the usage with no resources consumed
2156
2035
  *
2157
- * @param llmTools LLM tools to be intercepted with usage counting
2158
- * @returns LLM tools with same functionality with added total cost counting
2159
2036
  * @public exported from `@promptbook/core`
2160
2037
  */
2161
- function countTotalUsage(llmTools) {
2162
- var _this = this;
2163
- var totalUsage = ZERO_USAGE;
2164
- var proxyTools = {
2165
- get title() {
2166
- // TODO: [🧠] Maybe put here some suffix
2167
- return llmTools.title;
2168
- },
2169
- get description() {
2170
- // TODO: [🧠] Maybe put here some suffix
2171
- return llmTools.description;
2172
- },
2173
- checkConfiguration: function () {
2174
- return __awaiter(this, void 0, void 0, function () {
2175
- return __generator(this, function (_a) {
2176
- return [2 /*return*/, /* not await */ llmTools.checkConfiguration()];
2177
- });
2178
- });
2179
- },
2180
- listModels: function () {
2181
- return /* not await */ llmTools.listModels();
2182
- },
2183
- getTotalUsage: function () {
2184
- // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
2185
- return totalUsage;
2186
- },
2187
- };
2188
- if (llmTools.callChatModel !== undefined) {
2189
- proxyTools.callChatModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2190
- var promptResult;
2191
- return __generator(this, function (_a) {
2192
- switch (_a.label) {
2193
- case 0: return [4 /*yield*/, llmTools.callChatModel(prompt)];
2194
- case 1:
2195
- promptResult = _a.sent();
2196
- totalUsage = addUsage(totalUsage, promptResult.usage);
2197
- return [2 /*return*/, promptResult];
2198
- }
2199
- });
2200
- }); };
2201
- }
2202
- if (llmTools.callCompletionModel !== undefined) {
2203
- proxyTools.callCompletionModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2204
- var promptResult;
2205
- return __generator(this, function (_a) {
2206
- switch (_a.label) {
2207
- case 0: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
2208
- case 1:
2209
- promptResult = _a.sent();
2210
- totalUsage = addUsage(totalUsage, promptResult.usage);
2211
- return [2 /*return*/, promptResult];
2212
- }
2213
- });
2214
- }); };
2215
- }
2216
- if (llmTools.callEmbeddingModel !== undefined) {
2217
- proxyTools.callEmbeddingModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2218
- var promptResult;
2219
- return __generator(this, function (_a) {
2220
- switch (_a.label) {
2221
- case 0: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
2222
- case 1:
2223
- promptResult = _a.sent();
2224
- totalUsage = addUsage(totalUsage, promptResult.usage);
2225
- return [2 /*return*/, promptResult];
2226
- }
2227
- });
2228
- }); };
2229
- }
2230
- // <- Note: [🤖]
2231
- return proxyTools;
2232
- }
2038
+ var ZERO_USAGE = $deepFreeze({
2039
+ price: { value: 0 },
2040
+ input: {
2041
+ tokensCount: { value: 0 },
2042
+ charactersCount: { value: 0 },
2043
+ wordsCount: { value: 0 },
2044
+ sentencesCount: { value: 0 },
2045
+ linesCount: { value: 0 },
2046
+ paragraphsCount: { value: 0 },
2047
+ pagesCount: { value: 0 },
2048
+ },
2049
+ output: {
2050
+ tokensCount: { value: 0 },
2051
+ charactersCount: { value: 0 },
2052
+ wordsCount: { value: 0 },
2053
+ sentencesCount: { value: 0 },
2054
+ linesCount: { value: 0 },
2055
+ paragraphsCount: { value: 0 },
2056
+ pagesCount: { value: 0 },
2057
+ },
2058
+ });
2233
2059
  /**
2234
- * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
2235
- * TODO: [🧠] Is there some meaningfull way how to test this util
2236
- * TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
2237
- * > const [llmToolsWithUsage,getUsage] = countTotalUsage(llmTools);
2238
- * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2060
+ * Represents the usage with unknown resources consumed
2061
+ *
2062
+ * @public exported from `@promptbook/core`
2239
2063
  */
2064
+ $deepFreeze({
2065
+ price: { value: 0, isUncertain: true },
2066
+ input: {
2067
+ tokensCount: { value: 0, isUncertain: true },
2068
+ charactersCount: { value: 0, isUncertain: true },
2069
+ wordsCount: { value: 0, isUncertain: true },
2070
+ sentencesCount: { value: 0, isUncertain: true },
2071
+ linesCount: { value: 0, isUncertain: true },
2072
+ paragraphsCount: { value: 0, isUncertain: true },
2073
+ pagesCount: { value: 0, isUncertain: true },
2074
+ },
2075
+ output: {
2076
+ tokensCount: { value: 0, isUncertain: true },
2077
+ charactersCount: { value: 0, isUncertain: true },
2078
+ wordsCount: { value: 0, isUncertain: true },
2079
+ sentencesCount: { value: 0, isUncertain: true },
2080
+ linesCount: { value: 0, isUncertain: true },
2081
+ paragraphsCount: { value: 0, isUncertain: true },
2082
+ pagesCount: { value: 0, isUncertain: true },
2083
+ },
2084
+ });
2240
2085
 
2241
2086
  /**
2242
2087
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
@@ -2531,6 +2376,188 @@ function joinLlmExecutionTools() {
2531
2376
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2532
2377
  */
2533
2378
 
2379
+ /**
2380
+ * @@@
2381
+ *
2382
+ * @public exported from `@promptbook/utils`
2383
+ */
2384
+ function deepClone(objectValue) {
2385
+ return JSON.parse(JSON.stringify(objectValue));
2386
+ /*
2387
+ TODO: [🧠] Is there a better implementation?
2388
+ > const propertyNames = Object.getOwnPropertyNames(objectValue);
2389
+ > for (const propertyName of propertyNames) {
2390
+ > const value = (objectValue as really_any)[propertyName];
2391
+ > if (value && typeof value === 'object') {
2392
+ > deepClone(value);
2393
+ > }
2394
+ > }
2395
+ > return Object.assign({}, objectValue);
2396
+ */
2397
+ }
2398
+ /**
2399
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
2400
+ */
2401
+
2402
+ /**
2403
+ * Function `addUsage` will add multiple usages into one
2404
+ *
2405
+ * Note: If you provide 0 values, it returns ZERO_USAGE
2406
+ *
2407
+ * @public exported from `@promptbook/core`
2408
+ */
2409
+ function addUsage() {
2410
+ var usageItems = [];
2411
+ for (var _i = 0; _i < arguments.length; _i++) {
2412
+ usageItems[_i] = arguments[_i];
2413
+ }
2414
+ return usageItems.reduce(function (acc, item) {
2415
+ var e_1, _a, e_2, _b;
2416
+ var _c;
2417
+ acc.price.value += ((_c = item.price) === null || _c === void 0 ? void 0 : _c.value) || 0;
2418
+ try {
2419
+ for (var _d = __values(Object.keys(acc.input)), _e = _d.next(); !_e.done; _e = _d.next()) {
2420
+ var key = _e.value;
2421
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2422
+ //@ts-ignore
2423
+ if (item.input[key]) {
2424
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2425
+ //@ts-ignore
2426
+ acc.input[key].value += item.input[key].value || 0;
2427
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2428
+ //@ts-ignore
2429
+ if (item.input[key].isUncertain) {
2430
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2431
+ //@ts-ignore
2432
+ acc.input[key].isUncertain = true;
2433
+ }
2434
+ }
2435
+ }
2436
+ }
2437
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
2438
+ finally {
2439
+ try {
2440
+ if (_e && !_e.done && (_a = _d.return)) _a.call(_d);
2441
+ }
2442
+ finally { if (e_1) throw e_1.error; }
2443
+ }
2444
+ try {
2445
+ for (var _f = __values(Object.keys(acc.output)), _g = _f.next(); !_g.done; _g = _f.next()) {
2446
+ var key = _g.value;
2447
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2448
+ //@ts-ignore
2449
+ if (item.output[key]) {
2450
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2451
+ //@ts-ignore
2452
+ acc.output[key].value += item.output[key].value || 0;
2453
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2454
+ //@ts-ignore
2455
+ if (item.output[key].isUncertain) {
2456
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2457
+ //@ts-ignore
2458
+ acc.output[key].isUncertain = true;
2459
+ }
2460
+ }
2461
+ }
2462
+ }
2463
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
2464
+ finally {
2465
+ try {
2466
+ if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
2467
+ }
2468
+ finally { if (e_2) throw e_2.error; }
2469
+ }
2470
+ return acc;
2471
+ }, deepClone(ZERO_USAGE));
2472
+ }
2473
+
2474
+ /**
2475
+ * Intercepts LLM tools and counts total usage of the tools
2476
+ *
2477
+ * @param llmTools LLM tools to be intercepted with usage counting
2478
+ * @returns LLM tools with same functionality with added total cost counting
2479
+ * @public exported from `@promptbook/core`
2480
+ */
2481
+ function countTotalUsage(llmTools) {
2482
+ var _this = this;
2483
+ var totalUsage = ZERO_USAGE;
2484
+ var proxyTools = {
2485
+ get title() {
2486
+ // TODO: [🧠] Maybe put here some suffix
2487
+ return llmTools.title;
2488
+ },
2489
+ get description() {
2490
+ // TODO: [🧠] Maybe put here some suffix
2491
+ return llmTools.description;
2492
+ },
2493
+ checkConfiguration: function () {
2494
+ return __awaiter(this, void 0, void 0, function () {
2495
+ return __generator(this, function (_a) {
2496
+ return [2 /*return*/, /* not await */ llmTools.checkConfiguration()];
2497
+ });
2498
+ });
2499
+ },
2500
+ listModels: function () {
2501
+ return /* not await */ llmTools.listModels();
2502
+ },
2503
+ getTotalUsage: function () {
2504
+ // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
2505
+ return totalUsage;
2506
+ },
2507
+ };
2508
+ if (llmTools.callChatModel !== undefined) {
2509
+ proxyTools.callChatModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2510
+ var promptResult;
2511
+ return __generator(this, function (_a) {
2512
+ switch (_a.label) {
2513
+ case 0: return [4 /*yield*/, llmTools.callChatModel(prompt)];
2514
+ case 1:
2515
+ promptResult = _a.sent();
2516
+ totalUsage = addUsage(totalUsage, promptResult.usage);
2517
+ return [2 /*return*/, promptResult];
2518
+ }
2519
+ });
2520
+ }); };
2521
+ }
2522
+ if (llmTools.callCompletionModel !== undefined) {
2523
+ proxyTools.callCompletionModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2524
+ var promptResult;
2525
+ return __generator(this, function (_a) {
2526
+ switch (_a.label) {
2527
+ case 0: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
2528
+ case 1:
2529
+ promptResult = _a.sent();
2530
+ totalUsage = addUsage(totalUsage, promptResult.usage);
2531
+ return [2 /*return*/, promptResult];
2532
+ }
2533
+ });
2534
+ }); };
2535
+ }
2536
+ if (llmTools.callEmbeddingModel !== undefined) {
2537
+ proxyTools.callEmbeddingModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2538
+ var promptResult;
2539
+ return __generator(this, function (_a) {
2540
+ switch (_a.label) {
2541
+ case 0: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
2542
+ case 1:
2543
+ promptResult = _a.sent();
2544
+ totalUsage = addUsage(totalUsage, promptResult.usage);
2545
+ return [2 /*return*/, promptResult];
2546
+ }
2547
+ });
2548
+ }); };
2549
+ }
2550
+ // <- Note: [🤖]
2551
+ return proxyTools;
2552
+ }
2553
+ /**
2554
+ * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
2555
+ * TODO: [🧠] Is there some meaningfull way how to test this util
2556
+ * TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
2557
+ * > const [llmToolsWithUsage,getUsage] = countTotalUsage(llmTools);
2558
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2559
+ */
2560
+
2534
2561
  /**
2535
2562
  * Takes an item or an array of items and returns an array of items
2536
2563
  *