@promptbook/openai 0.44.0-2 → 0.44.0-21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. package/README.md +716 -2
  2. package/esm/typings/_packages/utils.index.d.ts +5 -3
  3. package/esm/typings/config.d.ts +4 -0
  4. package/esm/typings/execution/plugins/natural-execution-tools/mocked/fakeTextToExpectations.d.ts +9 -1
  5. package/esm/typings/execution/plugins/natural-execution-tools/mocked/fakeTextToExpectations.test.d.ts +1 -0
  6. package/esm/typings/execution/plugins/natural-execution-tools/mocked/faked-completion.test.d.ts +1 -0
  7. package/esm/typings/execution/plugins/script-execution-tools/javascript/JavascriptExecutionToolsOptions.d.ts +5 -1
  8. package/esm/typings/execution/plugins/script-execution-tools/javascript/utils/unknownToString.d.ts +7 -0
  9. package/esm/typings/execution/utils/checkExpectations.d.ts +25 -0
  10. package/esm/typings/execution/utils/checkExpectations.test.d.ts +1 -0
  11. package/esm/typings/types/Prompt.d.ts +5 -0
  12. package/esm/typings/types/PromptbookJson/PromptTemplateJson.d.ts +2 -0
  13. package/package.json +2 -2
  14. package/umd/typings/_packages/utils.index.d.ts +5 -3
  15. package/umd/typings/config.d.ts +4 -0
  16. package/umd/typings/execution/plugins/natural-execution-tools/mocked/fakeTextToExpectations.d.ts +9 -1
  17. package/umd/typings/execution/plugins/natural-execution-tools/mocked/fakeTextToExpectations.test.d.ts +1 -0
  18. package/umd/typings/execution/plugins/natural-execution-tools/mocked/faked-completion.test.d.ts +1 -0
  19. package/umd/typings/execution/plugins/script-execution-tools/javascript/JavascriptExecutionToolsOptions.d.ts +5 -1
  20. package/umd/typings/execution/plugins/script-execution-tools/javascript/utils/unknownToString.d.ts +7 -0
  21. package/umd/typings/execution/utils/checkExpectations.d.ts +25 -0
  22. package/umd/typings/execution/utils/checkExpectations.test.d.ts +1 -0
  23. package/umd/typings/types/Prompt.d.ts +5 -0
  24. package/umd/typings/types/PromptbookJson/PromptTemplateJson.d.ts +2 -0
package/README.md CHANGED
@@ -1,5 +1,719 @@
1
- # 📖 Promptbook
1
+ # ![Promptbook logo - cube with letters P and B](./other/design/logo-h1.png) Promptbook
2
2
 
3
3
  Library to supercharge your use of large language models
4
4
 
5
- [Read the manual](https://github.com/webgptorg/promptbook)
5
+ <!--Badges-->
6
+ <!--⚠️WARNING: This section was generated by https://github.com/hejny/batch-project-editor/blob/main/src/workflows/800-badges/badges.ts so every manual change will be overwritten.-->
7
+
8
+ [![License of 📖 Prompt book](https://img.shields.io/github/license/webgptorg/promptbook.svg?style=flat)](https://github.com/webgptorg/promptbook/blob/main/LICENSE)
9
+ [![Known Vulnerabilities](https://snyk.io/test/github/webgptorg/promptbook/badge.svg)](https://snyk.io/test/github/webgptorg/promptbook)
10
+ [![Issues](https://img.shields.io/github/issues/webgptorg/promptbook.svg?style=flat)](https://github.com/webgptorg/promptbook/issues)
11
+ [![Socket Badge](https://socket.dev/api/badge/npm/package/@promptbook/openai)](https://socket.dev/npm/package/@promptbook/openai)
12
+
13
+ <!--/Badges-->
14
+
15
+ ## 📦 Package `@promptbook/openai`
16
+
17
+ - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
18
+ - This package `@promptbook/openai` is one part of the promptbook ecosystem.
19
+
20
+ To install this package, run:
21
+
22
+ ```bash
23
+ npm i @promptbook/openai
24
+ ```
25
+
26
+ Wrapper around [OpenAI's SDK](https://www.npmjs.com/package/openai) to make it easier to use inside Promptbooks.
27
+
28
+ <!--!!! Simillar wrappers-->
29
+
30
+
31
+ ---
32
+
33
+ Rest of the documentation is common for entire promptbook ecosystem:
34
+
35
+ <!--
36
+ TODO: Probbably remove this section only in packages
37
+ > ⚠ Warning: This library is still in early development.
38
+ -->
39
+
40
+ ## 🤍 Whitepaper
41
+
42
+ When you have a simple, single prompt for ChatGPT, GPT-4, Anthropic Claude, Google Gemini, Llama 2, or whatever, it doesn't matter how it is integrated. Whether it's the direct calling of a REST API, using the SDK, hardcoding the prompt in the source code, or importing a text file, the process remains the same.
43
+
44
+ If you need something more advanced or want to extend the capabilities of LLMs, you generally have three ways to proceed:
45
+
46
+ 1. **Fine-tune** the model to your specifications or even train your own.
47
+ 2. **Prompt-engineer** the prompt to the best shape you can achieve.
48
+ 3. Use **multiple prompts** in a pipeline to get the best result.
49
+
50
+ In any of these situations, but especially in (3), the Promptbook library can make your life easier and make **orchestraror for your prompts**.
51
+
52
+ - **Separation of concerns** between prompt engineer and programmer; between code files and prompt files; and between prompts, templates, templating pipelines, and their execution logic.
53
+ - Set up a **common format** for prompts that is interchangeable between projects and language/technology stacks.
54
+ - **Preprocessing** and cleaning the input data from the user.
55
+ - Use default values - **Jokers** to bypass some parts of the pipeline.
56
+ - **Expect** some specific output from the model.
57
+ - **Retry** mismatched outputs.
58
+ - **Combine** multiple models together.
59
+ - Interactive **User interaction** with the model and the user.
60
+ - Leverage **external** sources (like ChatGPT plugins or OpenAI's GPTs).
61
+ - Simplify your code to be **DRY** and not repeat all the boilerplate code for each prompt.
62
+ - **Versioning** of promptbooks
63
+ - **Reuse** parts of promptbooks in/between projects.
64
+ - Run the LLM **optimally** in parallel, with the best _cost/quality_ ratio or _speed/quality_ ratio.
65
+ - **Execution report** to see what happened during the execution.
66
+ - **Logging** the results of the promptbooks.
67
+ - _(Not ready yet)_ **Caching** calls to LLMs to save money and time.
68
+ - _(Not ready yet)_ Extend one prompt book from another one.
69
+ - _(Not ready yet)_ Leverage the **streaming** to make super cool UI/UX.
70
+ - _(Not ready yet)_ **A/B testing** to determine which prompt works best for the job.
71
+
72
+ ![WebGPT](./other/screencasts/screencast-fiabciakcmgepblmdkmemdbbkilneeeh-2023.10.26-21_46_17.gif)
73
+
74
+ ## 🧔 Promptbook _(for prompt-engeneers)_
75
+
76
+ **P**romp**t** **b**oo**k** markdown file (**PTBK** for short, or `.ptbk.md`) is document that describes a series of prompts that are chained together to form somewhat reciepe for transforming natural language input. Inside a PTBK you can use chat prompts, completion prompts, scripting or trigger interaction with user to ask for additional information.
77
+
78
+ - Multiple promptbooks forms a library which will become a **part of your application codebase**.
79
+ - Theese promptbooks are designed such as they **can be written by non-programmers**.
80
+
81
+ <!-- TODO: [🧠] Make some more clear escaping -->
82
+
83
+ ### Sample:
84
+
85
+ File `write-website-content.ptbk.md`:
86
+
87
+ <!------------------------[ Sample: ]------------------------>
88
+
89
+ > # 🌍 Create website content
90
+ >
91
+ > Instructions for creating web page content.
92
+ >
93
+ > - PROMPTBOOK URL https://promptbook.webgpt.com/en/write-website-content.ptbk.md@v0.1.0
94
+ > - PROMPTBOOK VERSION 0.0.1
95
+ > - INPUT  PARAM `{rawTitle}` Automatically suggested a site name or empty text
96
+ > - INPUT  PARAM `{rawAssigment}` Automatically generated site entry from image recognition
97
+ > - OUTPUT PARAM `{content}` Web content
98
+ > - OUTPUT PARAM `{keywords}` Keywords
99
+ >
100
+ > ## 👤 Specifying the assigment
101
+ >
102
+ > What is your web about?
103
+ >
104
+ > - PROMPT DIALOG
105
+ >
106
+ > ```
107
+ > {rawAssigment}
108
+ > ```
109
+ >
110
+ > `-> {assigment}` Website assignment and specification
111
+ >
112
+ > ## ✨ Improving the title
113
+ >
114
+ > - MODEL VARIANT Chat
115
+ > - MODEL NAME `gpt-4`
116
+ > - POSTPROCESSING `unwrapResult`
117
+ >
118
+ > ```
119
+ > As an experienced marketing specialist, you have been entrusted with improving the name of your client's business.
120
+ >
121
+ > A suggested name from a client:
122
+ > "{rawTitle}"
123
+ >
124
+ > Assignment from customer:
125
+ >
126
+ > > {assigment}
127
+ >
128
+ > ## Instructions:
129
+ >
130
+ > - Write only one name suggestion
131
+ > - The name will be used on the website, business cards, visuals, etc.
132
+ > ```
133
+ >
134
+ > `-> {enhancedTitle}` Enhanced title
135
+ >
136
+ > ## 👤 Website title approval
137
+ >
138
+ > Is the title for your website okay?
139
+ >
140
+ > - PROMPT DIALOG
141
+ >
142
+ > ```
143
+ > {enhancedTitle}
144
+ > ```
145
+ >
146
+ > `-> {title}` Title for the website
147
+ >
148
+ > ## 🐰 Cunning subtitle
149
+ >
150
+ > - MODEL VARIANT Chat
151
+ > - MODEL NAME `gpt-4`
152
+ > - POSTPROCESSING `unwrapResult`
153
+ >
154
+ > ```
155
+ > As an experienced copywriter, you have been entrusted with creating a claim for the "{title}" web page.
156
+ >
157
+ > A website assignment from a customer:
158
+ >
159
+ > > {assigment}
160
+ >
161
+ > ## Instructions:
162
+ >
163
+ > - Write only one name suggestion
164
+ > - Claim will be used on website, business cards, visuals, etc.
165
+ > - Claim should be punchy, funny, original
166
+ > ```
167
+ >
168
+ > `-> {claim}` Claim for the web
169
+ >
170
+ > ## 🚦 Keyword analysis
171
+ >
172
+ > - MODEL VARIANT Chat
173
+ > - MODEL NAME `gpt-4`
174
+ >
175
+ > ```
176
+ > As an experienced SEO specialist, you have been entrusted with creating keywords for the website "{title}".
177
+ >
178
+ > Website assignment from the customer:
179
+ >
180
+ > > {assigment}
181
+ >
182
+ > ## Instructions:
183
+ >
184
+ > - Write a list of keywords
185
+ > - Keywords are in basic form
186
+ >
187
+ > ## Example:
188
+ >
189
+ > - Ice cream
190
+ > - Olomouc
191
+ > - Quality
192
+ > - Family
193
+ > - Tradition
194
+ > - Italy
195
+ > - Craft
196
+ >
197
+ > ```
198
+ >
199
+ > `-> {keywords}` Keywords
200
+ >
201
+ > ## 🔗 Combine the beginning
202
+ >
203
+ > - SIMPLE TEMPLATE
204
+ >
205
+ > ```
206
+ >
207
+ > # {title}
208
+ >
209
+ > > {claim}
210
+ >
211
+ > ```
212
+ >
213
+ > `-> {contentBeginning}` Beginning of web content
214
+ >
215
+ > ## 🖋 Write the content
216
+ >
217
+ > - MODEL VARIANT Completion
218
+ > - MODEL NAME `gpt-3.5-turbo-instruct`
219
+ >
220
+ > ```
221
+ > As an experienced copywriter and web designer, you have been entrusted with creating text for a new website {title}.
222
+ >
223
+ > A website assignment from a customer:
224
+ >
225
+ > > {assigment}
226
+ >
227
+ > ## Instructions:
228
+ >
229
+ > - Text formatting is in Markdown
230
+ > - Be concise and to the point
231
+ > - Use keywords, but they should be naturally in the text
232
+ > - This is the complete content of the page, so don't forget all the important information and elements the page should contain
233
+ > - Use headings, bullets, text formatting
234
+ >
235
+ > ## Keywords:
236
+ >
237
+ > {keywords}
238
+ >
239
+ > ## Web Content:
240
+ >
241
+ > {contentBeginning}
242
+ > ```
243
+ >
244
+ > `-> {contentBody}` Middle of the web content
245
+ >
246
+ > ## 🔗 Combine the content
247
+ >
248
+ > - SIMPLE TEMPLATE
249
+ >
250
+ > ```markdown
251
+ > {contentBeginning}
252
+ >
253
+ > {contentBody}
254
+ > ```
255
+ >
256
+ > `-> {content}`
257
+
258
+ <!------------------------[ /Sample ]------------------------>
259
+
260
+ Following is the scheme how the promptbook above is executed:
261
+
262
+ ```mermaid
263
+ %% 🔮 Tip: Open this on GitHub or in the VSCode website to see the Mermaid graph visually
264
+
265
+ flowchart LR
266
+ subgraph "🌍 Create website content"
267
+
268
+ direction TB
269
+
270
+ input((Input)):::input
271
+ templateSpecifyingTheAssigment(👤 Specifying the assigment)
272
+ input--"{rawAssigment}"-->templateSpecifyingTheAssigment
273
+ templateImprovingTheTitle(✨ Improving the title)
274
+ input--"{rawTitle}"-->templateImprovingTheTitle
275
+ templateSpecifyingTheAssigment--"{assigment}"-->templateImprovingTheTitle
276
+ templateWebsiteTitleApproval(👤 Website title approval)
277
+ templateImprovingTheTitle--"{enhancedTitle}"-->templateWebsiteTitleApproval
278
+ templateCunningSubtitle(🐰 Cunning subtitle)
279
+ templateWebsiteTitleApproval--"{title}"-->templateCunningSubtitle
280
+ templateSpecifyingTheAssigment--"{assigment}"-->templateCunningSubtitle
281
+ templateKeywordAnalysis(🚦 Keyword analysis)
282
+ templateWebsiteTitleApproval--"{title}"-->templateKeywordAnalysis
283
+ templateSpecifyingTheAssigment--"{assigment}"-->templateKeywordAnalysis
284
+ templateCombineTheBeginning(🔗 Combine the beginning)
285
+ templateWebsiteTitleApproval--"{title}"-->templateCombineTheBeginning
286
+ templateCunningSubtitle--"{claim}"-->templateCombineTheBeginning
287
+ templateWriteTheContent(🖋 Write the content)
288
+ templateWebsiteTitleApproval--"{title}"-->templateWriteTheContent
289
+ templateSpecifyingTheAssigment--"{assigment}"-->templateWriteTheContent
290
+ templateKeywordAnalysis--"{keywords}"-->templateWriteTheContent
291
+ templateCombineTheBeginning--"{contentBeginning}"-->templateWriteTheContent
292
+ templateCombineTheContent(🔗 Combine the content)
293
+ templateCombineTheBeginning--"{contentBeginning}"-->templateCombineTheContent
294
+ templateWriteTheContent--"{contentBody}"-->templateCombineTheContent
295
+
296
+ templateCombineTheContent--"{content}"-->output
297
+ output((Output)):::output
298
+
299
+ classDef input color: grey;
300
+ classDef output color: grey;
301
+
302
+ end;
303
+ ```
304
+
305
+ [More template samples](./samples/templates/)
306
+
307
+ _Note: We are using [postprocessing functions](#postprocessing-functions) like `unwrapResult` that can be used to postprocess the result._
308
+
309
+ ## 📦 Packages
310
+
311
+ This library is divided into several packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook):
312
+
313
+ <!--[🔠]-->
314
+
315
+ - **[@promptbook/core](https://www.npmjs.com/package/@promptbook/core)** - Core of the library, it contains the main logic for promptbooks
316
+ - **[@promptbook/utils](https://www.npmjs.com/package/@promptbook/utils)** - Utility functions used in the library but also useful for individual use in preprocessing and postprocessing LLM inputs and outputs
317
+ - _(Not finished)_ **[@promptbook/wizzard](https://www.npmjs.com/package/@promptbook/wizzard)** - Wizard for creating+running promptbooks in single line
318
+ - **[@promptbook/execute-javascript](https://www.npmjs.com/package/@promptbook/execute-javascript)** - Execution tools for javascript inside promptbooks
319
+ - **[@promptbook/openai](https://www.npmjs.com/package/@promptbook/openai)** - Execution tools for OpenAI API, wrapper around OpenAI SDK
320
+ - **[@promptbook/remote-client](https://www.npmjs.com/package/@promptbook/remote-client)** - Remote client for remote execution of promptbooks
321
+ - **[@promptbook/remote-server](https://www.npmjs.com/package/@promptbook/remote-server)** - Remote server for remote execution of promptbooks
322
+ - **[@promptbook/types](https://www.npmjs.com/package/@promptbook/types)** - Just typescript types used in the library
323
+ - **[@promptbook/cli](https://www.npmjs.com/package/@promptbook/cli)** - Command line interface utilities for promptbooks
324
+
325
+ ## 📚 Dictionary
326
+
327
+ The following glossary is used to clarify certain basic concepts:
328
+
329
+ ### Prompt
330
+
331
+ Prompt in a text along with model requirements, but without any execution or templating logic.
332
+
333
+ For example:
334
+
335
+ ```json
336
+ {
337
+ "request": "Which sound does a cat make?",
338
+ "modelRequirements": {
339
+ "variant": "CHAT"
340
+ }
341
+ }
342
+ ```
343
+
344
+ ```json
345
+ {
346
+ "request": "I am a cat.\nI like to eat fish.\nI like to sleep.\nI like to play with a ball.\nI l",
347
+ "modelRequirements": {
348
+ "variant": "COMPLETION"
349
+ }
350
+ }
351
+ ```
352
+
353
+ ### Prompt Template
354
+
355
+ Similar concept to Prompt, but with templating logic.
356
+
357
+ For example:
358
+
359
+ ```json
360
+ {
361
+ "request": "Which sound does a {animalName} make?",
362
+ "modelRequirements": {
363
+ "variant": "CHAT"
364
+ }
365
+ }
366
+ ```
367
+
368
+ ### Model Requirements
369
+
370
+ Abstract way to specify the LLM.
371
+ It does not specify the LLM with concrete version itself, only the requirements for the LLM.
372
+ _NOT chatgpt-3.5-turbo BUT CHAT variant of GPT-3.5._
373
+
374
+ For example:
375
+
376
+ ```json
377
+ {
378
+ "variant": "CHAT",
379
+ "version": "GPT-3.5",
380
+ "temperature": 0.7
381
+ }
382
+ ```
383
+
384
+ ### Execution type
385
+
386
+ Each block of promptbook can have a different execution type.
387
+ It is specified in list of requirements for the block.
388
+ By default, it is `Prompt template`
389
+
390
+ - _(default)_ `Prompt template` The block is a prompt template and is executed by LLM (OpenAI, Azure,...)
391
+ - `SIMPLE TEMPLATE` The block is a simple text template which is just filled with parameters
392
+ - `Script` The block is a script that is executed by some script runtime, the runtime is determined by block type, currently only `javascript` is supported but we plan to add `python` and `typescript` in the future.
393
+ - `PROMPT DIALOG` Ask user for input
394
+
395
+ ### Parameters
396
+
397
+ Parameters that are placed in the prompt template and replaced to create the prompt.
398
+ It is a simple key-value object.
399
+
400
+ ```json
401
+ {
402
+ "animalName": "cat",
403
+ "animalSound": "Meow!"
404
+ }
405
+ ```
406
+
407
+ There are three types of template parameters, depending on how they are used in the promptbook:
408
+
409
+ - **INPUT PARAMETER**s are required to execute the promptbook.
410
+ - **Intermediate parameters** are used internally in the promptbook.
411
+ - **OUTPUT PARAMETER**s are explicitelly marked and they are returned as the result of the promptbook execution.
412
+
413
+ _Note: Parameter can be both intermedite and output at the same time._
414
+
415
+ ### Promptbook
416
+
417
+ Promptbook is **core concept of this library**.
418
+ It represents a series of prompt templates chained together to form a **pipeline** / one big prompt template with input and result parameters.
419
+
420
+ Internally it can have multiple formats:
421
+
422
+ - **.ptbk.md file** in custom markdown format described above
423
+ - _(concept)_ **.ptbk** format, custom fileextension based on markdown
424
+ - _(internal)_ **JSON** format, parsed from the .ptbk.md file
425
+
426
+ ### Promptbook **Library**
427
+
428
+ Library of all promptbooks used in your application.
429
+
430
+ <!-- TODO: !!! Write more -->
431
+
432
+ ### Prompt Result
433
+
434
+ Prompt result is the simplest concept of execution.
435
+ It is the result of executing one prompt _(NOT a template)_.
436
+
437
+ For example:
438
+
439
+ ```json
440
+ {
441
+ "response": "Meow!",
442
+ "model": "chatgpt-3.5-turbo"
443
+ }
444
+ ```
445
+
446
+ ### Execution Tools
447
+
448
+ `ExecutionTools` is an interface which contains all the tools needed to execute prompts.
449
+ It contais 3 subtools:
450
+
451
+ - `NaturalExecutionTools`
452
+ - `ScriptExecutionTools`
453
+ - `UserInterfaceTools`
454
+
455
+ Which are described below:
456
+
457
+ #### Natural Execution Tools
458
+
459
+ `NaturalExecutionTools` is a container for all the tools needed to execute prompts to large language models like GPT-4.
460
+ On its interface it exposes common methods for prompt execution.
461
+ Internally it calls OpenAI, Azure, GPU, proxy, cache, logging,...
462
+
463
+ `NaturalExecutionTools` an abstract interface that is implemented by concrete execution tools:
464
+
465
+ - `OpenAiExecutionTools`
466
+ - _(Not implemented yet)_ `AnthropicClaudeExecutionTools`
467
+ - _(Not implemented yet)_ `AzureOpenAiExecutionTools`
468
+ - _(Not implemented yet)_ `BardExecutionTools`
469
+ - _(Not implemented yet)_ `LamaExecutionTools`
470
+ - _(Not implemented yet)_ `GpuExecutionTools`
471
+ - And a special case are `RemoteNaturalExecutionTools` that connect to a remote server and run one of the above execution tools on that server.
472
+ - The second special case is `MockedEchoNaturalExecutionTools` that is used for testing and mocking.
473
+ - The third special case is `LogNaturalExecutionToolsWrapper` that is technically also an execution tools but it is more proxy wrapper around other execution tools that logs all calls to execution tools.
474
+
475
+ #### Script Execution Tools
476
+
477
+ `ScriptExecutionTools` is an abstract container that represents all the tools needed to EXECUTE SCRIPTs. It is implemented by concrete execution tools:
478
+
479
+ - `JavascriptExecutionTools` is a wrapper around `vm2` module that executes javascript code in a sandbox.
480
+ - `JavascriptEvalExecutionTools` is wrapper around `eval` function that executes javascript. It is used for testing and mocking **NOT intended to use in the production** due to its unsafe nature, use `JavascriptExecutionTools` instead.
481
+ - _(Not implemented yet)_ `TypescriptExecutionTools` executes typescript code in a sandbox.
482
+ - _(Not implemented yet)_ `PythonExecutionTools` executes python code in a sandbox.
483
+
484
+ There are [postprocessing functions](#postprocessing-functions) that can be used to postprocess the result.
485
+
486
+ #### User Interface Tools
487
+
488
+ `UserInterfaceTools` is an abstract container that represents all the tools needed to interact with the user. It is implemented by concrete execution tools:
489
+
490
+ - _(Not implemented yet)_ `ConsoleInterfaceTools` is a wrapper around `readline` module that interacts with the user via console.
491
+ - `SimplePromptInterfaceTools` is a wrapper around `window.prompt` synchronous function that interacts with the user via browser prompt. It is used for testing and mocking **NOT intended to use in the production** due to its synchronous nature.
492
+ - `CallbackInterfaceTools` delagates the user interaction to a async callback function. You need to provide your own implementation of this callback function and its bind to UI. <!-- <- TODO: Provide here a way how to do it with some our plugin -->
493
+
494
+ ### Executor
495
+
496
+ Executor is a simple async function that takes **input parameters** and returns **output parameters**.
497
+ It is constructed by combining execution tools and promptbook to execute together.
498
+
499
+ ### 🃏 Jokers
500
+
501
+ Joker is a previously defined parameter that is used to bypass some parts of the pipeline.
502
+ If the joker is present in the template, it is checked to see if it meets the requirements (without postprocessing), and if so, it is used instead of executing that prompt template. There can be multiple wildcards in a prompt template, if so they are checked in order and the first one that meets the requirements is used.
503
+
504
+ If none of the jokers meet the requirements, the prompt template is executed as usual.
505
+
506
+ This can be useful, for example, if you want to use some predefined data, or if you want to use some data from the user, but you are not sure if it is suitable form.
507
+
508
+ When using wildcards, you must have at least one minimum expectation. If you do not have a minimum expectation, the joker will always fulfil the expectation because it has none, so it makes no logical sense.
509
+
510
+ Look at [jokers.ptbk.md](samples/templates/41-jokers.ptbk.md) sample.
511
+
512
+ ### Postprocessing functions
513
+
514
+ You can define postprocessing functions when creating `JavascriptEvalExecutionTools`:
515
+
516
+ ```
517
+
518
+ ```
519
+
520
+ Additionally there are some usefull string-manipulation build-in functions, which are [listed here](src/execution/plugins/script-execution-tools/javascript/JavascriptEvalExecutionTools.ts).
521
+
522
+ ### Expectations
523
+
524
+ `Expect` command describes the desired output of the prompt template (after post-processing)
525
+ It can set limits for the maximum/minimum length of the output, measured in characters, words, sentences, paragraphs,...
526
+
527
+ _Note: LLMs work with tokens, not characters, but in Promptbooks we want to use some human-recognisable and cross-model interoperable units._
528
+
529
+ ```markdown
530
+ # ✨ Sample: Expectations
531
+
532
+ - PROMPTBOOK URL https://promptbook.example.com/samples/postprocessing-2.ptbk.md@v1
533
+ - PROMPTBOOK VERSION 1.0.0
534
+ - INPUT  PARAMETER {yourName} Name of the hero
535
+
536
+ ## 💬 Question
537
+
538
+ - EXPECT MAX 30 CHARACTERS
539
+ - EXPECT MIN 2 CHARACTERS
540
+ - EXPECT MAX 3 WORDS
541
+ - EXPECT EXACTLY 1 SENTENCE
542
+ - EXPECT EXACTLY 1 LINE
543
+
544
+ ...
545
+ ```
546
+
547
+ There are two types of expectations which are not strictly symmetrical:
548
+
549
+ #### Minimal expectations
550
+
551
+ - `EXPECT MIN 0 ...` is not valid minimal expectation. It makes no sense.
552
+ - `EXPECT JSON` is both minimal and maximal expectation
553
+ - When you are using `JOKER` in same prompt template, you need to have at least one minimal expectation
554
+
555
+ #### Maximal expectations
556
+
557
+ - `EXPECT MAX 0 ...` is valid maximal expectation. For example, you can expect 0 pages and 2 sentences.
558
+ - `EXPECT JSON` is both minimal and maximal expectation
559
+
560
+ Look at [expectations.ptbk.md](samples/templates/45-expectations.ptbk.md) and [expect-json.ptbk.md](samples/templates/45-expect-json.ptbk.md) samples for more.
561
+
562
+ <!--
563
+ ### New
564
+ [🥻] Insert here when making new command
565
+ -->
566
+
567
+ ### Execution report
568
+
569
+ Execution report is a simple object or markdown that contains information about the execution of the promptbook.
570
+
571
+ [See the example of such a report](/samples/templates/50-advanced.report.md)
572
+
573
+ <!-- TODO: Write more -->
574
+
575
+ ### Remote server
576
+
577
+ Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
578
+
579
+ You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
580
+ This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
581
+
582
+ ## 👨‍💻 Usage and integration _(for developers)_
583
+
584
+ <!--
585
+
586
+ TODO: [🧙‍♂️]
587
+
588
+ ### 🧙‍♂️ Using wizzard
589
+
590
+ First you need to install this library:
591
+
592
+ ```bash
593
+ npm install --save @promptbook/wizzard
594
+ ```
595
+
596
+ > TODO: !! Write the Wizzard sample
597
+
598
+ [Usage samples](./samples/usage/)
599
+
600
+ -->
601
+
602
+ ### 🔌 Usage in Typescript / Javascript
603
+
604
+ - [Simple usage](./samples/usage/simple-script)
605
+ - [Usage with client and remote server](./samples/usage/remote)
606
+
607
+ ## ❔ FAQ
608
+
609
+ If you have a question [start a discussion](https://github.com/webgptorg/promptbook/discussions/), [open an issue](https://github.com/webgptorg/promptbook/issues) or [write me an email](https://www.pavolhejny.com/contact).
610
+
611
+ ### Why not just use the OpenAI SDK / Anthropic Claude SDK / ...?
612
+
613
+ Different levels of abstraction. OpenAI library is for direct use of OpenAI API. This library is for a higher level of abstraction. It is for creating prompt templates and promptbooks that are independent of the underlying library, LLM model, or even LLM provider.
614
+
615
+ ### How is it different from the Langchain library?
616
+
617
+ Langchain is primarily aimed at ML developers working in Python. This library is for developers working in javascript/typescript and creating applications for end users.
618
+
619
+ We are considering creating a bridge/converter between these two libraries.
620
+
621
+ <!--
622
+
623
+ ==========
624
+ Include:
625
+ - Langchain is the python library and JavaScript is on second place
626
+ - Langchain primarily focused on making templates, not on combining templates into larger structures
627
+ - at the language level it distinguishes between chat and completion, I need to mix the two into one template pipeline
628
+ - for a non-programmer it's quite hard to work with such a thing and write templates - I would much prefer a system that allows non-technical people to write templates (of which there are many more on the market than free pythonists)
629
+ - The focus of promptbooks is primarily on building user applications, not the data processing, training or autogpt.
630
+ -->
631
+
632
+ ### Promptbooks vs. OpenAI`s GPTs
633
+
634
+ GPTs are chat assistants that can be assigned to specific tasks and materials. But they are still chat assistants. Promptbooks are a way to orchestrate many more predefined tasks to have much tighter control over the process. Promptbooks are not a good technology for creating human-like chatbots, GPTs are not a good technology for creating outputs with specific requirements.
635
+
636
+ <!--
637
+ TODO:!!!
638
+ ### Promptbooks vs. Semantic Kernel
639
+
640
+
641
+ -->
642
+
643
+ <!--
644
+ TODO:
645
+ ### Promptbooks vs. Langtail
646
+
647
+
648
+ -->
649
+
650
+ <!--
651
+ TODO:
652
+ ### Promptbooks vs. Evidentally AI
653
+
654
+ Logging and monitoring
655
+
656
+ -->
657
+
658
+ ### Where should I store my promptbooks?
659
+
660
+ If you use raw SDKs, you just put prompts in the sourcecode, mixed in with typescript, javascript, python or whatever programming language you use.
661
+
662
+ If you use promptbooks, you can store them in several places, each with its own advantages and disadvantages:
663
+
664
+ 1. As **source code**, typically git-committed. In this case you can use the versioning system and the promptbooks will be tightly coupled with the version of the application. You still get the power of promptbooks, as you separate the concerns of the prompt-engineer and the programmer.
665
+
666
+ 2. As data in a **database** In this case, promptbooks are like posts / articles on the blog. They can be modified independently of the application. You don't need to redeploy the application to change the promptbooks. You can have multiple versions of promptbooks for each user. You can have a web interface for non-programmers to create and modify promptbooks. But you lose the versioning system and you still have to consider the interface between the promptbooks and the application _(= input and output parameters)_.
667
+
668
+ 3. In a **configuration** in environment variables. This is a good way to store promptbooks if you have an application with multiple deployments and you want to have different but simple promptbooks for each deployment and you don't need to change them often.
669
+
670
+ ### What should I do when I need same promptbook in multiple human languages?
671
+
672
+ A single promptbook can be written for several _(human)_ languages at once. However, we recommend that you have separate promptbooks for each language.
673
+
674
+ In large language models, you will get better results if you have prompts in the same language as the user input.
675
+
676
+ The best way to manage this is to have suffixed promptbooks like `write-website-content.en.ptbk.md` and `write-website-content.cs.ptbk.md` for each supported language.
677
+
678
+ <!--
679
+ TODO: (Maybe)
680
+ ### Why you need to explicitly specify input and output parameters?
681
+ -->
682
+
683
+ <!--
684
+
685
+
686
+
687
+ !!!!
688
+
689
+
690
+
691
+ unit testing
692
+
693
+ escaping
694
+
695
+ how i get block into prompt
696
+
697
+
698
+ ## 🚷 Limitations
699
+
700
+ function calling
701
+ system message
702
+ iterations
703
+
704
+ -->
705
+
706
+ ## ⌚ Changelog
707
+
708
+ See [CHANGELOG.md](./CHANGELOG.md)
709
+
710
+ <!--Contributing-->
711
+ <!--⚠️WARNING: This section was generated by https://github.com/hejny/batch-project-editor/blob/main/src/workflows/810-contributing/contributing.ts so every manual change will be overwritten.-->
712
+
713
+ ## 🖋️ Contributing
714
+
715
+ I am open to pull requests, feedback, and suggestions. Or if you like this utility, you can [☕ buy me a coffee](https://www.buymeacoffee.com/hejny) or [donate via cryptocurrencies](https://github.com/hejny/hejny/blob/main/documents/crypto.md).
716
+
717
+ You can also ⭐ star the promptbook package, [follow me on GitHub](https://github.com/hejny) or [various other social networks](https://www.pavolhejny.com/contact/).
718
+
719
+ <!--/Contributing-->
@@ -1,6 +1,7 @@
1
1
  import { prettifyPromptbookString } from '../conversion/prettify/prettifyPromptbookString';
2
2
  import { parseNumber } from '../conversion/utils/parseNumber';
3
3
  import { assertsExecutionSuccessful } from '../execution/assertsExecutionSuccessful';
4
+ import { checkExpectations, isPassingExpectations } from '../execution/utils/checkExpectations';
4
5
  import { replaceParameters } from '../execution/utils/replaceParameters';
5
6
  import { executionReportJsonToString } from '../types/execution-report/executionReportJsonToString';
6
7
  import { ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults } from '../types/execution-report/ExecutionReportStringOptions';
@@ -40,20 +41,21 @@ import { removeQuotes } from '../utils/removeQuotes';
40
41
  import { trimCodeBlock } from '../utils/trimCodeBlock';
41
42
  import { trimEndOfCodeBlock } from '../utils/trimEndOfCodeBlock';
42
43
  import { unwrapResult } from '../utils/unwrapResult';
43
- export { assertsExecutionSuccessful, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, extractAllBlocksFromMarkdown, // <- [🌻]
44
+ export { assertsExecutionSuccessful, checkExpectations, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, extractAllBlocksFromMarkdown, // <- [🌻]
44
45
  extractAllListItemsFromMarkdown, extractBlock, // <- [🌻]
45
- extractOneBlockFromMarkdown, isValidJsonString, parseNumber, // <- [🌻]
46
+ extractOneBlockFromMarkdown, isPassingExpectations, isValidJsonString, parseNumber, // <- [🌻]
46
47
  prettifyPromptbookString, removeContentComments, removeEmojis, removeMarkdownFormatting, removeQuotes, replaceParameters, trimCodeBlock, trimEndOfCodeBlock, unwrapResult, };
47
48
  export { countCharacters, countLines, countPages, countParagraphs, countSentences, CountUtils, countWords };
48
49
  export { splitIntoSentences };
49
50
  export declare const normalizeTo: {
50
51
  camelCase: typeof normalizeTo_camelCase;
51
52
  PascalCase: typeof normalizeTo_PascalCase;
52
- 'SCREAMING-CASE': typeof normalizeTo_SCREAMING_CASE;
53
+ SCREAMING_CASE: typeof normalizeTo_SCREAMING_CASE;
53
54
  snake_case: typeof normalizeTo_snake_case;
54
55
  'kebab-case': typeof normalizeToKebabCase;
55
56
  };
56
57
  export { capitalize, decapitalize, DIACRITIC_VARIANTS_LETTERS, IKeywords, isValidKeyword, nameToUriPart, nameToUriParts, normalizeTo_camelCase, normalizeTo_PascalCase, normalizeTo_SCREAMING_CASE, normalizeTo_snake_case, normalizeToKebabCase, normalizeWhitespaces, parseKeywords, parseKeywordsFromString, removeDiacritics, searchKeywords, string_keyword, };
57
58
  /**
58
59
  * TODO: [🧠] Maybe create some indipendent package like `markdown-tools` from both here exported and @private utilities
60
+ * Note: [🕙] It does not make sence to have simple lower / UPPER case normalization
59
61
  */
@@ -2,3 +2,7 @@
2
2
  * The maximum number of iterations for a loops
3
3
  */
4
4
  export declare const LOOP_LIMIT = 1000;
5
+ /**
6
+ * The maximum number of iterations for a loops which adds characters one by one
7
+ */
8
+ export declare const CHARACTER_LOOP_LIMIT = 100000;
@@ -1,7 +1,15 @@
1
1
  import type { Expectations } from '../../../../types/PromptbookJson/PromptTemplateJson';
2
+ import { PostprocessingFunction } from '../../script-execution-tools/javascript/JavascriptExecutionToolsOptions';
2
3
  /**
3
4
  * Gets the expectations and creates a fake text that meets the expectations
4
5
  *
6
+ * Note: You can provide postprocessing functions to modify the text before checking the expectations
7
+ * The result will be the text BEFORE the postprocessing
8
+ *
5
9
  * @private internal util for MockedFackedNaturalExecutionTools
6
10
  */
7
- export declare function $fakeTextToExpectations(expectations: Expectations): string;
11
+ export declare function $fakeTextToExpectations(expectations: Expectations, postprocessing?: Array<PostprocessingFunction>): Promise<string>;
12
+ /**
13
+ * TODO: Implement better - create FakeLLM from this
14
+ * TODO: [💝] Unite object for expecting amount and format - use here also a format
15
+ */
@@ -15,8 +15,12 @@ export type JavascriptExecutionToolsOptions = CommonExecutionToolsOptions & {
15
15
  * Note: There are also some built-in functions available:
16
16
  * @see ./JavascriptEvalExecutionTools.ts
17
17
  */
18
- functions?: Record<string_javascript_name, ((value: string) => Promisable<string>) | Function>;
18
+ functions?: Record<string_javascript_name, PostprocessingFunction>;
19
19
  };
20
+ /**
21
+ * Function that can be used to postprocess the output of the LLM
22
+ */
23
+ export type PostprocessingFunction = ((value: string) => Promisable<string>) | Function;
20
24
  /**
21
25
  * TODO: [🧠][💙] Distinct between options passed into ExecutionTools and to ExecutionTools.execute
22
26
  */
@@ -0,0 +1,7 @@
1
+ /**
2
+ * Converts anything to string that can be used for debugging and logging
3
+ *
4
+ * @param value String value for logging
5
+ * @private Internal util
6
+ */
7
+ export declare function unknownToString(value: unknown): string;
@@ -0,0 +1,25 @@
1
+ import type { Expectations } from '../../types/PromptbookJson/PromptTemplateJson';
2
+ /**
3
+ * Function checkExpectations will check if the expectations on given value are met
4
+ *
5
+ * Note: There are two simmilar functions:
6
+ * - `checkExpectations` which throws an error if the expectations are not met
7
+ * - `isPassingExpectations` which returns a boolean
8
+ *
9
+ * @throws {ExpectError} if the expectations are not met
10
+ * @returns {void} Nothing
11
+ */
12
+ export declare function checkExpectations(expectations: Expectations, value: string): void;
13
+ /**
14
+ * Function checkExpectations will check if the expectations on given value are met
15
+ *
16
+ * Note: There are two simmilar functions:
17
+ * - `checkExpectations` which throws an error if the expectations are not met
18
+ * - `isPassingExpectations` which returns a boolean
19
+ *
20
+ * @returns {boolean} True if the expectations are met
21
+ */
22
+ export declare function isPassingExpectations(expectations: Expectations, value: string): boolean;
23
+ /**
24
+ * TODO: [💝] Unite object for expecting amount and format
25
+ */
@@ -1,4 +1,5 @@
1
1
  import type { string_name, string_prompt, string_promptbook_url_with_hashtemplate, string_title } from '.././types/typeAliases';
2
+ import { PostprocessingFunction } from '../execution/plugins/script-execution-tools/javascript/JavascriptExecutionToolsOptions';
2
3
  import type { ModelRequirements } from './ModelRequirements';
3
4
  import type { Expectations } from './PromptbookJson/PromptTemplateJson';
4
5
  /**
@@ -24,6 +25,10 @@ export type Prompt = {
24
25
  * Requirements for the model
25
26
  */
26
27
  readonly modelRequirements: ModelRequirements;
28
+ /**
29
+ * List of postprocessing steps that are executed after the prompt
30
+ */
31
+ readonly postprocessing?: Array<PostprocessingFunction>;
27
32
  /**
28
33
  * Expectations for the answer
29
34
  *
@@ -121,6 +121,7 @@ interface PromptTemplateJsonCommon {
121
121
  * Expect this format of the answer
122
122
  *
123
123
  * Note: Expectations are performed after all postprocessing steps
124
+ * @deprecated [💝]
124
125
  */
125
126
  readonly expectFormat?: ExpectFormatCommand['format'];
126
127
  /**
@@ -130,5 +131,6 @@ interface PromptTemplateJsonCommon {
130
131
  }
131
132
  export {};
132
133
  /**
134
+ * TODO: [💝] Unite object for expecting amount and format - remove expectFormat
133
135
  * TODO: use one helper type> (string_prompt | string_javascript | string_markdown) & string_template
134
136
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/openai",
3
- "version": "0.44.0-2",
3
+ "version": "0.44.0-21",
4
4
  "description": "Library to supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -38,7 +38,7 @@
38
38
  "openai": "4.2.0"
39
39
  },
40
40
  "peerDependencies": {
41
- "@promptbook/core": "0.44.0-2"
41
+ "@promptbook/core": "0.44.0-21"
42
42
  },
43
43
  "main": "./umd/index.umd.js",
44
44
  "module": "./esm/index.es.js",
@@ -1,6 +1,7 @@
1
1
  import { prettifyPromptbookString } from '../conversion/prettify/prettifyPromptbookString';
2
2
  import { parseNumber } from '../conversion/utils/parseNumber';
3
3
  import { assertsExecutionSuccessful } from '../execution/assertsExecutionSuccessful';
4
+ import { checkExpectations, isPassingExpectations } from '../execution/utils/checkExpectations';
4
5
  import { replaceParameters } from '../execution/utils/replaceParameters';
5
6
  import { executionReportJsonToString } from '../types/execution-report/executionReportJsonToString';
6
7
  import { ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults } from '../types/execution-report/ExecutionReportStringOptions';
@@ -40,20 +41,21 @@ import { removeQuotes } from '../utils/removeQuotes';
40
41
  import { trimCodeBlock } from '../utils/trimCodeBlock';
41
42
  import { trimEndOfCodeBlock } from '../utils/trimEndOfCodeBlock';
42
43
  import { unwrapResult } from '../utils/unwrapResult';
43
- export { assertsExecutionSuccessful, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, extractAllBlocksFromMarkdown, // <- [🌻]
44
+ export { assertsExecutionSuccessful, checkExpectations, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, extractAllBlocksFromMarkdown, // <- [🌻]
44
45
  extractAllListItemsFromMarkdown, extractBlock, // <- [🌻]
45
- extractOneBlockFromMarkdown, isValidJsonString, parseNumber, // <- [🌻]
46
+ extractOneBlockFromMarkdown, isPassingExpectations, isValidJsonString, parseNumber, // <- [🌻]
46
47
  prettifyPromptbookString, removeContentComments, removeEmojis, removeMarkdownFormatting, removeQuotes, replaceParameters, trimCodeBlock, trimEndOfCodeBlock, unwrapResult, };
47
48
  export { countCharacters, countLines, countPages, countParagraphs, countSentences, CountUtils, countWords };
48
49
  export { splitIntoSentences };
49
50
  export declare const normalizeTo: {
50
51
  camelCase: typeof normalizeTo_camelCase;
51
52
  PascalCase: typeof normalizeTo_PascalCase;
52
- 'SCREAMING-CASE': typeof normalizeTo_SCREAMING_CASE;
53
+ SCREAMING_CASE: typeof normalizeTo_SCREAMING_CASE;
53
54
  snake_case: typeof normalizeTo_snake_case;
54
55
  'kebab-case': typeof normalizeToKebabCase;
55
56
  };
56
57
  export { capitalize, decapitalize, DIACRITIC_VARIANTS_LETTERS, IKeywords, isValidKeyword, nameToUriPart, nameToUriParts, normalizeTo_camelCase, normalizeTo_PascalCase, normalizeTo_SCREAMING_CASE, normalizeTo_snake_case, normalizeToKebabCase, normalizeWhitespaces, parseKeywords, parseKeywordsFromString, removeDiacritics, searchKeywords, string_keyword, };
57
58
  /**
58
59
  * TODO: [🧠] Maybe create some indipendent package like `markdown-tools` from both here exported and @private utilities
60
+ * Note: [🕙] It does not make sence to have simple lower / UPPER case normalization
59
61
  */
@@ -2,3 +2,7 @@
2
2
  * The maximum number of iterations for a loops
3
3
  */
4
4
  export declare const LOOP_LIMIT = 1000;
5
+ /**
6
+ * The maximum number of iterations for a loops which adds characters one by one
7
+ */
8
+ export declare const CHARACTER_LOOP_LIMIT = 100000;
@@ -1,7 +1,15 @@
1
1
  import type { Expectations } from '../../../../types/PromptbookJson/PromptTemplateJson';
2
+ import { PostprocessingFunction } from '../../script-execution-tools/javascript/JavascriptExecutionToolsOptions';
2
3
  /**
3
4
  * Gets the expectations and creates a fake text that meets the expectations
4
5
  *
6
+ * Note: You can provide postprocessing functions to modify the text before checking the expectations
7
+ * The result will be the text BEFORE the postprocessing
8
+ *
5
9
  * @private internal util for MockedFackedNaturalExecutionTools
6
10
  */
7
- export declare function $fakeTextToExpectations(expectations: Expectations): string;
11
+ export declare function $fakeTextToExpectations(expectations: Expectations, postprocessing?: Array<PostprocessingFunction>): Promise<string>;
12
+ /**
13
+ * TODO: Implement better - create FakeLLM from this
14
+ * TODO: [💝] Unite object for expecting amount and format - use here also a format
15
+ */
@@ -15,8 +15,12 @@ export type JavascriptExecutionToolsOptions = CommonExecutionToolsOptions & {
15
15
  * Note: There are also some built-in functions available:
16
16
  * @see ./JavascriptEvalExecutionTools.ts
17
17
  */
18
- functions?: Record<string_javascript_name, ((value: string) => Promisable<string>) | Function>;
18
+ functions?: Record<string_javascript_name, PostprocessingFunction>;
19
19
  };
20
+ /**
21
+ * Function that can be used to postprocess the output of the LLM
22
+ */
23
+ export type PostprocessingFunction = ((value: string) => Promisable<string>) | Function;
20
24
  /**
21
25
  * TODO: [🧠][💙] Distinct between options passed into ExecutionTools and to ExecutionTools.execute
22
26
  */
@@ -0,0 +1,7 @@
1
+ /**
2
+ * Converts anything to string that can be used for debugging and logging
3
+ *
4
+ * @param value String value for logging
5
+ * @private Internal util
6
+ */
7
+ export declare function unknownToString(value: unknown): string;
@@ -0,0 +1,25 @@
1
+ import type { Expectations } from '../../types/PromptbookJson/PromptTemplateJson';
2
+ /**
3
+ * Function checkExpectations will check if the expectations on given value are met
4
+ *
5
+ * Note: There are two simmilar functions:
6
+ * - `checkExpectations` which throws an error if the expectations are not met
7
+ * - `isPassingExpectations` which returns a boolean
8
+ *
9
+ * @throws {ExpectError} if the expectations are not met
10
+ * @returns {void} Nothing
11
+ */
12
+ export declare function checkExpectations(expectations: Expectations, value: string): void;
13
+ /**
14
+ * Function checkExpectations will check if the expectations on given value are met
15
+ *
16
+ * Note: There are two simmilar functions:
17
+ * - `checkExpectations` which throws an error if the expectations are not met
18
+ * - `isPassingExpectations` which returns a boolean
19
+ *
20
+ * @returns {boolean} True if the expectations are met
21
+ */
22
+ export declare function isPassingExpectations(expectations: Expectations, value: string): boolean;
23
+ /**
24
+ * TODO: [💝] Unite object for expecting amount and format
25
+ */
@@ -1,4 +1,5 @@
1
1
  import type { string_name, string_prompt, string_promptbook_url_with_hashtemplate, string_title } from '.././types/typeAliases';
2
+ import { PostprocessingFunction } from '../execution/plugins/script-execution-tools/javascript/JavascriptExecutionToolsOptions';
2
3
  import type { ModelRequirements } from './ModelRequirements';
3
4
  import type { Expectations } from './PromptbookJson/PromptTemplateJson';
4
5
  /**
@@ -24,6 +25,10 @@ export type Prompt = {
24
25
  * Requirements for the model
25
26
  */
26
27
  readonly modelRequirements: ModelRequirements;
28
+ /**
29
+ * List of postprocessing steps that are executed after the prompt
30
+ */
31
+ readonly postprocessing?: Array<PostprocessingFunction>;
27
32
  /**
28
33
  * Expectations for the answer
29
34
  *
@@ -121,6 +121,7 @@ interface PromptTemplateJsonCommon {
121
121
  * Expect this format of the answer
122
122
  *
123
123
  * Note: Expectations are performed after all postprocessing steps
124
+ * @deprecated [💝]
124
125
  */
125
126
  readonly expectFormat?: ExpectFormatCommand['format'];
126
127
  /**
@@ -130,5 +131,6 @@ interface PromptTemplateJsonCommon {
130
131
  }
131
132
  export {};
132
133
  /**
134
+ * TODO: [💝] Unite object for expecting amount and format - remove expectFormat
133
135
  * TODO: use one helper type> (string_prompt | string_javascript | string_markdown) & string_template
134
136
  */