@promptbook/openai 0.70.0-1 → 0.72.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/README.md +21 -57
  2. package/esm/index.es.js +290 -5
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/promptbook-collection/index.d.ts +0 -3
  5. package/esm/typings/src/_packages/core.index.d.ts +4 -2
  6. package/esm/typings/src/_packages/openai.index.d.ts +4 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +2 -0
  8. package/esm/typings/src/cli/cli-commands/make.d.ts +1 -1
  9. package/esm/typings/src/collection/constructors/createCollectionFromUrl.d.ts +1 -1
  10. package/esm/typings/src/commands/FOREACH/ForeachCommand.d.ts +1 -6
  11. package/esm/typings/src/commands/FOREACH/foreachCommandParser.d.ts +1 -2
  12. package/esm/typings/src/commands/_common/types/CommandParser.d.ts +1 -16
  13. package/esm/typings/src/config.d.ts +2 -2
  14. package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -1
  15. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +5 -5
  16. package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -1
  17. package/esm/typings/src/execution/translation/automatic-translate/automatic-translators/LindatAutomaticTranslator.d.ts +1 -1
  18. package/esm/typings/src/execution/utils/addUsage.d.ts +0 -56
  19. package/esm/typings/src/execution/utils/usage-constants.d.ts +127 -0
  20. package/esm/typings/src/knowledge/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
  21. package/esm/typings/src/knowledge/dialogs/simple-prompt/SimplePromptInterfaceTools.d.ts +1 -1
  22. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
  23. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -1
  24. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +1 -1
  25. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -2
  26. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  27. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +2 -2
  28. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +2 -2
  29. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +3 -2
  30. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  31. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  32. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +37 -0
  33. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +14 -0
  34. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -2
  35. package/esm/typings/src/llm-providers/openai/playground/playground.d.ts +1 -1
  36. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  37. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  38. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +1 -1
  39. package/esm/typings/src/prepare/prepareTemplates.d.ts +1 -1
  40. package/esm/typings/src/scripting/javascript/JavascriptEvalExecutionTools.d.ts +1 -1
  41. package/esm/typings/src/scripting/python/PythonExecutionTools.d.ts +1 -1
  42. package/esm/typings/src/scripting/typescript/TypescriptExecutionTools.d.ts +1 -1
  43. package/esm/typings/src/storage/files-storage/FilesStorage.d.ts +1 -1
  44. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -1
  45. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  46. package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +1 -1
  47. package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +1 -1
  48. package/package.json +2 -2
  49. package/umd/index.umd.js +290 -4
  50. package/umd/index.umd.js.map +1 -1
  51. package/esm/typings/src/personas/preparePersona.test.d.ts +0 -1
package/README.md CHANGED
@@ -202,15 +202,21 @@ See the other models available in the Promptbook package:
202
202
 
203
203
  Rest of the documentation is common for **entire promptbook ecosystem**:
204
204
 
205
+ # ✨ New Features
206
+
207
+ - ✨ **Support [OpenAI o1 model](https://openai.com/o1/)**
208
+
205
209
  ## 🤍 The Promptbook Whitepaper
206
210
 
211
+
212
+
207
213
  If you have a simple, single prompt for ChatGPT, GPT-4, Anthropic Claude, Google Gemini, Llama 2, or whatever, it doesn't matter how you integrate it. Whether it's calling a REST API directly, using the SDK, hardcoding the prompt into the source code, or importing a text file, the process remains the same.
208
214
 
209
215
  But often you will struggle with the limitations of LLMs, such as hallucinations, off-topic responses, poor quality output, language drift, word repetition repetition repetition repetition or misuse, lack of context, or just plain w𝒆𝐢rd responses. When this happens, you generally have three options:
210
216
 
211
217
  1. **Fine-tune** the model to your specifications or even train your own.
212
218
  2. **Prompt-engineer** the prompt to the best shape you can achieve.
213
- 3. Use **multiple prompts** in a [pipeline](https://github.com/webgptorg/promptbook/discussions/64) to get the best result.
219
+ 3. Orchestrate **multiple prompts** in a [pipeline](https://github.com/webgptorg/promptbook/discussions/64) to get the best result.
214
220
 
215
221
  In all of these situations, but especially in 3., the Promptbook library can make your life easier.
216
222
 
@@ -222,7 +228,9 @@ In all of these situations, but especially in 3., the Promptbook library can mak
222
228
  - Promptbook has built in versioning. You can test multiple **A/B versions** of pipelines and see which one works best.
223
229
  - Promptbook is designed to do [**RAG** (Retrieval-Augmented Generation)](https://github.com/webgptorg/promptbook/discussions/41) and other advanced techniques. You can use **knowledge** to improve the quality of the output.
224
230
 
225
- ## 🧔 Promptbook _(for prompt-engeneers)_
231
+
232
+
233
+ ## 🧔 Pipeline _(for prompt-engeneers)_
226
234
 
227
235
  **P**romp**t** **b**oo**k** markdown file (or `.ptbk.md` file) is document that describes a **pipeline** - a series of prompts that are chained together to form somewhat reciepe for transforming natural language input.
228
236
 
@@ -539,6 +547,8 @@ The following glossary is used to clarify certain concepts:
539
547
  - When you want to **version** your prompts and **test multiple versions**
540
548
  - When you want to **log** the execution of prompts and backtrace the issues
541
549
 
550
+ [See more](https://github.com/webgptorg/promptbook/discussions/111)
551
+
542
552
  ### ➖ When not to use
543
553
 
544
554
  - When you have already implemented single simple prompt and it works fine for your job
@@ -548,6 +558,8 @@ The following glossary is used to clarify certain concepts:
548
558
  - When your main focus is on something other than text - like images, audio, video, spreadsheets _(other media types may be added in the future, [see discussion](https://github.com/webgptorg/promptbook/discussions/103))_
549
559
  - When you need to use recursion _([see the discussion](https://github.com/webgptorg/promptbook/discussions/38))_
550
560
 
561
+ [See more](https://github.com/webgptorg/promptbook/discussions/112)
562
+
551
563
  ## 🐜 Known issues
552
564
 
553
565
  - [🤸‍♂️ Iterations not working yet](https://github.com/webgptorg/promptbook/discussions/55)
@@ -560,63 +572,15 @@ The following glossary is used to clarify certain concepts:
560
572
 
561
573
  ## ❔ FAQ
562
574
 
563
-
564
-
565
575
  If you have a question [start a discussion](https://github.com/webgptorg/promptbook/discussions/), [open an issue](https://github.com/webgptorg/promptbook/issues) or [write me an email](https://www.pavolhejny.com/contact).
566
576
 
567
- ### Why not just use the OpenAI SDK / Anthropic Claude SDK / ...?
568
-
569
- Different levels of abstraction. OpenAI library is for direct use of OpenAI API. This library is for a higher level of abstraction. It define pipelines that are independent of the underlying library, LLM model, or even LLM provider.
570
-
571
- ### How is it different from the Langchain library?
572
-
573
- Langchain is primarily aimed at ML developers working in Python. This library is for developers working in javascript/typescript and creating applications for end users.
574
-
575
- We are considering creating a bridge/converter between these two libraries.
576
-
577
-
578
-
579
- ### Promptbooks vs. OpenAI`s GPTs
580
-
581
- GPTs are chat assistants that can be assigned to specific tasks and materials. But they are still chat assistants. Promptbooks are a way to orchestrate many more predefined tasks to have much tighter control over the process. Promptbooks are not a good technology for creating human-like chatbots, GPTs are not a good technology for creating outputs with specific requirements.
582
-
583
-
584
-
585
-
586
-
587
-
588
-
589
-
590
-
591
-
592
-
593
-
594
-
595
-
596
-
597
- ### Where should I store my promptbooks?
598
-
599
- If you use raw SDKs, you just put prompts in the sourcecode, mixed in with typescript, javascript, python or whatever programming language you use.
600
-
601
- If you use promptbooks, you can store them in several places, each with its own advantages and disadvantages:
602
-
603
- 1. As **source code**, typically git-committed. In this case you can use the versioning system and the promptbooks will be tightly coupled with the version of the application. You still get the power of promptbooks, as you separate the concerns of the prompt-engineer and the programmer.
604
-
605
- 2. As data in a **database** In this case, promptbooks are like posts / articles on the blog. They can be modified independently of the application. You don't need to redeploy the application to change the promptbooks. You can have multiple versions of promptbooks for each user. You can have a web interface for non-programmers to create and modify promptbooks. But you lose the versioning system and you still have to consider the interface between the promptbooks and the application _(= input and output parameters)_.
606
-
607
- 3. In a **configuration** in environment variables. This is a good way to store promptbooks if you have an application with multiple deployments and you want to have different but simple promptbooks for each deployment and you don't need to change them often.
608
-
609
- ### What should I do when I need same promptbook in multiple human languages?
610
-
611
- A single promptbook can be written for several _(human)_ languages at once. However, we recommend that you have separate promptbooks for each language.
612
-
613
- In large language models, you will get better results if you have prompts in the same language as the user input.
614
-
615
- The best way to manage this is to have suffixed promptbooks like `write-website-content.en.ptbk.md` and `write-website-content.cs.ptbk.md` for each supported language.
616
-
617
-
618
-
619
-
577
+ - [❔ Why not just use the OpenAI SDK / Anthropic Claude SDK / ...?](https://github.com/webgptorg/promptbook/discussions/114)
578
+ - [❔ How is it different from the OpenAI`s GPTs?](https://github.com/webgptorg/promptbook/discussions/118)
579
+ - [❔ How is it different from the Langchain?](https://github.com/webgptorg/promptbook/discussions/115)
580
+ - [❔ How is it different from the DSPy?](https://github.com/webgptorg/promptbook/discussions/117)
581
+ - [❔ How is it different from _anything_?](https://github.com/webgptorg/promptbook/discussions?discussions_q=is%3Aopen+label%3A%22Promptbook+vs%22)
582
+ - [❔ Is Promptbook using RAG _(Retrieval-Augmented Generation)_?](https://github.com/webgptorg/promptbook/discussions/123)
583
+ - [❔ Is Promptbook using function calling?](https://github.com/webgptorg/promptbook/discussions/124)
620
584
 
621
585
  ## ⌚ Changelog
622
586
 
package/esm/index.es.js CHANGED
@@ -6,8 +6,8 @@ import spaceTrim$1, { spaceTrim } from 'spacetrim';
6
6
  /**
7
7
  * The version of the Promptbook library
8
8
  */
9
- var PROMPTBOOK_VERSION = '0.70.0-0';
10
- // TODO: !!!! List here all the versions and annotate + put into script
9
+ var PROMPTBOOK_VERSION = '0.68.5';
10
+ // TODO:[main] !!!! List here all the versions and annotate + put into script
11
11
 
12
12
  /*! *****************************************************************************
13
13
  Copyright (c) Microsoft Corporation.
@@ -340,7 +340,7 @@ function checkSerializableAsJson(name, value) {
340
340
  }
341
341
  /**
342
342
  * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
343
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
343
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
344
344
  * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
345
345
  */
346
346
 
@@ -1196,6 +1196,7 @@ var OPENAI_MODELS = $asDeeplyFrozenSerializableJson('OPENAI_MODELS', [
1196
1196
  prompt: computeUsage("$5.00 / 1M tokens"),
1197
1197
  output: computeUsage("$15.00 / 1M tokens"),
1198
1198
  },
1199
+ //TODO:[main] !!!!!! Add gpt-4o-mini-2024-07-18 and all others to be up to date
1199
1200
  },
1200
1201
  /**/
1201
1202
  /**/
@@ -1210,6 +1211,51 @@ var OPENAI_MODELS = $asDeeplyFrozenSerializableJson('OPENAI_MODELS', [
1210
1211
  },
1211
1212
  /**/
1212
1213
  /**/
1214
+ {
1215
+ modelVariant: 'CHAT',
1216
+ modelTitle: 'o1-preview',
1217
+ modelName: 'o1-preview',
1218
+ pricing: {
1219
+ prompt: computeUsage("$15.00 / 1M tokens"),
1220
+ output: computeUsage("$60.00 / 1M tokens"),
1221
+ },
1222
+ },
1223
+ /**/
1224
+ /**/
1225
+ {
1226
+ modelVariant: 'CHAT',
1227
+ modelTitle: 'o1-preview-2024-09-12',
1228
+ modelName: 'o1-preview-2024-09-12',
1229
+ // <- TODO:[main] !!!!!! Some better system to organize theese date suffixes and versions
1230
+ pricing: {
1231
+ prompt: computeUsage("$15.00 / 1M tokens"),
1232
+ output: computeUsage("$60.00 / 1M tokens"),
1233
+ },
1234
+ },
1235
+ /**/
1236
+ /**/
1237
+ {
1238
+ modelVariant: 'CHAT',
1239
+ modelTitle: 'o1-mini',
1240
+ modelName: 'o1-mini',
1241
+ pricing: {
1242
+ prompt: computeUsage("$3.00 / 1M tokens"),
1243
+ output: computeUsage("$12.00 / 1M tokens"),
1244
+ },
1245
+ },
1246
+ /**/
1247
+ /**/
1248
+ {
1249
+ modelVariant: 'CHAT',
1250
+ modelTitle: 'o1-mini-2024-09-12',
1251
+ modelName: 'o1-mini-2024-09-12',
1252
+ pricing: {
1253
+ prompt: computeUsage("$3.00 / 1M tokens"),
1254
+ output: computeUsage("$12.00 / 1M tokens"),
1255
+ },
1256
+ },
1257
+ /**/
1258
+ /**/
1213
1259
  {
1214
1260
  modelVariant: 'CHAT',
1215
1261
  modelTitle: 'gpt-3.5-turbo-16k-0613',
@@ -1619,7 +1665,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
1619
1665
  * @public exported from `@promptbook/openai`
1620
1666
  */
1621
1667
  var createOpenAiExecutionTools = Object.assign(function (options) {
1622
- // TODO: [🧠] !!!! If browser, auto add `dangerouslyAllowBrowser`
1668
+ // TODO: [🧠][main] !!!! If browser, auto add `dangerouslyAllowBrowser`
1623
1669
  if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
1624
1670
  options = __assign(__assign({}, options), { dangerouslyAllowBrowser: true });
1625
1671
  }
@@ -1633,6 +1679,245 @@ var createOpenAiExecutionTools = Object.assign(function (options) {
1633
1679
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
1634
1680
  */
1635
1681
 
1682
+ /**
1683
+ * This error type indicates that some part of the code is not implemented yet
1684
+ *
1685
+ * @public exported from `@promptbook/core`
1686
+ */
1687
+ var NotYetImplementedError = /** @class */ (function (_super) {
1688
+ __extends(NotYetImplementedError, _super);
1689
+ function NotYetImplementedError(message) {
1690
+ var _this = _super.call(this, spaceTrim(function (block) { return "\n ".concat(block(message), "\n\n Note: This feature is not implemented yet but it will be soon.\n\n If you want speed up the implementation or just read more, look here:\n https://github.com/webgptorg/promptbook\n\n Or contact us on me@pavolhejny.com\n\n "); })) || this;
1691
+ _this.name = 'NotYetImplementedError';
1692
+ Object.setPrototypeOf(_this, NotYetImplementedError.prototype);
1693
+ return _this;
1694
+ }
1695
+ return NotYetImplementedError;
1696
+ }(Error));
1697
+
1698
+ /**
1699
+ * Represents the usage with no resources consumed
1700
+ *
1701
+ * @public exported from `@promptbook/core`
1702
+ */
1703
+ $deepFreeze({
1704
+ price: { value: 0 },
1705
+ input: {
1706
+ tokensCount: { value: 0 },
1707
+ charactersCount: { value: 0 },
1708
+ wordsCount: { value: 0 },
1709
+ sentencesCount: { value: 0 },
1710
+ linesCount: { value: 0 },
1711
+ paragraphsCount: { value: 0 },
1712
+ pagesCount: { value: 0 },
1713
+ },
1714
+ output: {
1715
+ tokensCount: { value: 0 },
1716
+ charactersCount: { value: 0 },
1717
+ wordsCount: { value: 0 },
1718
+ sentencesCount: { value: 0 },
1719
+ linesCount: { value: 0 },
1720
+ paragraphsCount: { value: 0 },
1721
+ pagesCount: { value: 0 },
1722
+ },
1723
+ });
1724
+ /**
1725
+ * Represents the usage with unknown resources consumed
1726
+ *
1727
+ * @public exported from `@promptbook/core`
1728
+ */
1729
+ var UNCERTAIN_USAGE = $deepFreeze({
1730
+ price: { value: 0, isUncertain: true },
1731
+ input: {
1732
+ tokensCount: { value: 0, isUncertain: true },
1733
+ charactersCount: { value: 0, isUncertain: true },
1734
+ wordsCount: { value: 0, isUncertain: true },
1735
+ sentencesCount: { value: 0, isUncertain: true },
1736
+ linesCount: { value: 0, isUncertain: true },
1737
+ paragraphsCount: { value: 0, isUncertain: true },
1738
+ pagesCount: { value: 0, isUncertain: true },
1739
+ },
1740
+ output: {
1741
+ tokensCount: { value: 0, isUncertain: true },
1742
+ charactersCount: { value: 0, isUncertain: true },
1743
+ wordsCount: { value: 0, isUncertain: true },
1744
+ sentencesCount: { value: 0, isUncertain: true },
1745
+ linesCount: { value: 0, isUncertain: true },
1746
+ paragraphsCount: { value: 0, isUncertain: true },
1747
+ pagesCount: { value: 0, isUncertain: true },
1748
+ },
1749
+ });
1750
+
1751
+ /**
1752
+ * Execution Tools for calling OpenAI API Assistants
1753
+ *
1754
+ * This is usefull for calling OpenAI API with a single assistant, for more wide usage use `OpenAiExecutionTools`.
1755
+ *
1756
+ * @public exported from `@promptbook/openai`
1757
+ */
1758
+ var OpenAiAssistantExecutionTools = /** @class */ (function (_super) {
1759
+ __extends(OpenAiAssistantExecutionTools, _super);
1760
+ /**
1761
+ * Creates OpenAI Execution Tools.
1762
+ *
1763
+ * @param options which are relevant are directly passed to the OpenAI client
1764
+ */
1765
+ function OpenAiAssistantExecutionTools(options) {
1766
+ if (options === void 0) { options = {}; }
1767
+ return _super.call(this, options) || this;
1768
+ }
1769
+ Object.defineProperty(OpenAiAssistantExecutionTools.prototype, "title", {
1770
+ get: function () {
1771
+ return 'OpenAI Assistant';
1772
+ },
1773
+ enumerable: false,
1774
+ configurable: true
1775
+ });
1776
+ Object.defineProperty(OpenAiAssistantExecutionTools.prototype, "description", {
1777
+ get: function () {
1778
+ return 'Use single assistant provided by OpenAI';
1779
+ },
1780
+ enumerable: false,
1781
+ configurable: true
1782
+ });
1783
+ /**
1784
+ * Calls OpenAI API to use a chat model.
1785
+ */
1786
+ OpenAiAssistantExecutionTools.prototype.callChatModel = function (prompt) {
1787
+ var _a, _b, _c;
1788
+ return __awaiter(this, void 0, void 0, function () {
1789
+ var content, parameters, modelRequirements /*, format*/, client, _d, _e, key, rawPromptContent, rawRequest, start, complete, stream, rawResponse, resultContent, usage;
1790
+ var e_1, _f;
1791
+ var _this = this;
1792
+ return __generator(this, function (_g) {
1793
+ switch (_g.label) {
1794
+ case 0:
1795
+ if (this.options.isVerbose) {
1796
+ console.info('💬 OpenAI callChatModel call', { prompt: prompt });
1797
+ }
1798
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1799
+ return [4 /*yield*/, this.getClient()];
1800
+ case 1:
1801
+ client = _g.sent();
1802
+ // TODO: [☂] Use here more modelRequirements
1803
+ if (modelRequirements.modelVariant !== 'CHAT') {
1804
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1805
+ }
1806
+ try {
1807
+ // TODO: [👨‍👨‍👧‍👧] Remove:
1808
+ for (_d = __values(['maxTokens', 'modelName', 'seed', 'temperature']), _e = _d.next(); !_e.done; _e = _d.next()) {
1809
+ key = _e.value;
1810
+ if (modelRequirements[key] !== undefined) {
1811
+ throw new NotYetImplementedError("In `OpenAiAssistantExecutionTools` you cannot specify `".concat(key, "`"));
1812
+ }
1813
+ }
1814
+ }
1815
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
1816
+ finally {
1817
+ try {
1818
+ if (_e && !_e.done && (_f = _d.return)) _f.call(_d);
1819
+ }
1820
+ finally { if (e_1) throw e_1.error; }
1821
+ }
1822
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: 'assistant' }));
1823
+ rawRequest = {
1824
+ // [👨‍👨‍👧‍👧] ...modelSettings,
1825
+ assistant_id: 'asst_CJCZzFCbBL0f2D4OWMXVTdBB',
1826
+ // <- Note: This is not a private information, just ID of the assistant which is accessible only with correct API key
1827
+ thread: {
1828
+ messages: [
1829
+ // TODO: !!!!!! Unhardcode
1830
+ // TODO: !!!!!! Allow threads to be passed
1831
+ { role: 'user', content: 'What is the meaning of life? I want breathtaking speech.' },
1832
+ ],
1833
+ },
1834
+ // !!!!!! user: this.options.user,
1835
+ };
1836
+ start = getCurrentIsoDate();
1837
+ if (this.options.isVerbose) {
1838
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1839
+ }
1840
+ return [4 /*yield*/, client.beta.threads.createAndRunStream(rawRequest)];
1841
+ case 2:
1842
+ stream = _g.sent();
1843
+ stream.on('connect', function () {
1844
+ if (_this.options.isVerbose) {
1845
+ console.info('connect', stream.currentEvent);
1846
+ }
1847
+ });
1848
+ stream.on('messageDelta', function (messageDelta) {
1849
+ var _a;
1850
+ if (_this.options.isVerbose &&
1851
+ messageDelta &&
1852
+ messageDelta.content &&
1853
+ messageDelta.content[0] &&
1854
+ messageDelta.content[0].type === 'text') {
1855
+ console.info('messageDelta', (_a = messageDelta.content[0].text) === null || _a === void 0 ? void 0 : _a.value);
1856
+ }
1857
+ // TODO: !!!!!! report progress
1858
+ });
1859
+ stream.on('messageCreated', function (message) {
1860
+ if (_this.options.isVerbose) {
1861
+ console.info('messageCreated', message);
1862
+ }
1863
+ });
1864
+ stream.on('messageDone', function (message) {
1865
+ if (_this.options.isVerbose) {
1866
+ console.info('messageDone', message);
1867
+ }
1868
+ });
1869
+ return [4 /*yield*/, stream.finalMessages()];
1870
+ case 3:
1871
+ rawResponse = _g.sent();
1872
+ if (this.options.isVerbose) {
1873
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1874
+ }
1875
+ if (rawResponse.length !== 1) {
1876
+ throw new PipelineExecutionError("There is NOT 1 BUT ".concat(rawResponse.length, " finalMessages from OpenAI"));
1877
+ }
1878
+ if (rawResponse[0].content.length !== 1) {
1879
+ throw new PipelineExecutionError("There is NOT 1 BUT ".concat(rawResponse[0].content.length, " finalMessages content from OpenAI"));
1880
+ }
1881
+ if (((_a = rawResponse[0].content[0]) === null || _a === void 0 ? void 0 : _a.type) !== 'text') {
1882
+ throw new PipelineExecutionError("There is NOT 'text' BUT ".concat((_b = rawResponse[0].content[0]) === null || _b === void 0 ? void 0 : _b.type, " finalMessages content type from OpenAI"));
1883
+ }
1884
+ resultContent = (_c = rawResponse[0].content[0]) === null || _c === void 0 ? void 0 : _c.text.value;
1885
+ // <- TODO: !!!!!! There are also annotations, maybe use them
1886
+ // eslint-disable-next-line prefer-const
1887
+ complete = getCurrentIsoDate();
1888
+ usage = UNCERTAIN_USAGE;
1889
+ // TODO: !!!!!!> = computeOpenAiUsage(content, resultContent || '', rawResponse);
1890
+ if (resultContent === null) {
1891
+ throw new PipelineExecutionError('No response message from OpenAI');
1892
+ }
1893
+ return [2 /*return*/, $asDeeplyFrozenSerializableJson('OpenAiAssistantExecutionTools ChatPromptResult', {
1894
+ content: resultContent,
1895
+ modelName: 'assistant',
1896
+ // <- TODO: !!!!!! Can we detect really used model: rawResponse.model || modelName,
1897
+ timing: {
1898
+ start: start,
1899
+ complete: complete,
1900
+ },
1901
+ usage: usage,
1902
+ rawPromptContent: rawPromptContent,
1903
+ rawRequest: rawRequest,
1904
+ rawResponse: rawResponse,
1905
+ // <- [🗯]
1906
+ })];
1907
+ }
1908
+ });
1909
+ });
1910
+ };
1911
+ return OpenAiAssistantExecutionTools;
1912
+ }(OpenAiExecutionTools));
1913
+ /**
1914
+ * TODO: !!!!!! DO not use colors - can be used in browser
1915
+ * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
1916
+ * TODO: Maybe make custom OpenAiError
1917
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
1918
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
1919
+ */
1920
+
1636
1921
  /**
1637
1922
  * @@@
1638
1923
  *
@@ -1705,5 +1990,5 @@ var _OpenAiRegistration = $llmToolsRegister.register(createOpenAiExecutionTools)
1705
1990
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
1706
1991
  */
1707
1992
 
1708
- export { OPENAI_MODELS, OpenAiExecutionTools, PROMPTBOOK_VERSION, _OpenAiRegistration, createOpenAiExecutionTools };
1993
+ export { OPENAI_MODELS, OpenAiAssistantExecutionTools, OpenAiExecutionTools, PROMPTBOOK_VERSION, _OpenAiRegistration, createOpenAiExecutionTools };
1709
1994
  //# sourceMappingURL=index.es.js.map