@promptbook/cli 0.95.0 → 0.98.0-2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -25,6 +25,10 @@ Write AI applications using plain human language across multiple models and plat
25
25
 
26
26
 
27
27
 
28
+ <blockquote style="color: #ff8811">
29
+ <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
30
+ </blockquote>
31
+
28
32
  ## 📦 Package `@promptbook/cli`
29
33
 
30
34
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
@@ -110,6 +114,8 @@ Rest of the documentation is common for **entire promptbook ecosystem**:
110
114
 
111
115
  During the computer revolution, we have seen [multiple generations of computer languages](https://github.com/webgptorg/promptbook/discussions/180), from the physical rewiring of the vacuum tubes through low-level machine code to the high-level languages like Python or JavaScript. And now, we're on the edge of the **next revolution**!
112
116
 
117
+
118
+
113
119
  It's a revolution of writing software in **plain human language** that is understandable and executable by both humans and machines – and it's going to change everything!
114
120
 
115
121
  The incredible growth in power of microprocessors and the Moore's Law have been the driving force behind the ever-more powerful languages, and it's been an amazing journey! Similarly, the large language models (like GPT or Claude) are the next big thing in language technology, and they're set to transform the way we interact with computers.
@@ -235,6 +241,8 @@ Join our growing community of developers and users:
235
241
 
236
242
  _A concise, Markdown-based DSL for crafting AI workflows and automations._
237
243
 
244
+
245
+
238
246
  ### Introduction
239
247
 
240
248
  Book is a Markdown-based language that simplifies the creation of AI applications, workflows, and automations. With human-readable commands, you can define inputs, outputs, personas, knowledge sources, and actions—without needing model-specific details.
@@ -284,6 +292,8 @@ Personas can have access to different knowledge, tools and actions. They can als
284
292
 
285
293
  - [PERSONA](https://github.com/webgptorg/promptbook/blob/main/documents/commands/PERSONA.md)
286
294
 
295
+
296
+
287
297
  ### **3. How:** Knowledge, Instruments and Actions
288
298
 
289
299
  The resources used by the personas are used to do the work.
@@ -383,6 +393,8 @@ The following glossary is used to clarify certain concepts:
383
393
 
384
394
  _Note: This section is not a complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
385
395
 
396
+
397
+
386
398
  ### 💯 Core concepts
387
399
 
388
400
  - [📚 Collection of pipelines](https://github.com/webgptorg/promptbook/discussions/65)
package/esm/index.es.js CHANGED
@@ -47,7 +47,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
47
47
  * @generated
48
48
  * @see https://github.com/webgptorg/promptbook
49
49
  */
50
- const PROMPTBOOK_ENGINE_VERSION = '0.95.0';
50
+ const PROMPTBOOK_ENGINE_VERSION = '0.98.0-2';
51
51
  /**
52
52
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
53
53
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2838,21 +2838,39 @@ function cacheLlmTools(llmTools, options = {}) {
2838
2838
  }
2839
2839
  // TODO: [🧠] !!5 How to do timing in mixed cache / non-cache situation
2840
2840
  // promptResult.timing: FromtoItems
2841
- await storage.setItem(key, {
2842
- date: $getCurrentDate(),
2843
- promptbookVersion: PROMPTBOOK_ENGINE_VERSION,
2844
- bookVersion: BOOK_LANGUAGE_VERSION,
2845
- prompt: {
2846
- ...prompt,
2847
- parameters: Object.entries(parameters).length === Object.entries(relevantParameters).length
2848
- ? parameters
2849
- : {
2850
- ...relevantParameters,
2851
- note: `<- Note: Only relevant parameters are stored in the cache`,
2852
- },
2853
- },
2854
- promptResult,
2855
- });
2841
+ // Check if the result is valid and should be cached
2842
+ // A result is considered failed if:
2843
+ // 1. It has a content property that is null or undefined
2844
+ // 2. It has an error property that is truthy
2845
+ // 3. It has a success property that is explicitly false
2846
+ const isFailedResult = promptResult.content === null ||
2847
+ promptResult.content === undefined ||
2848
+ promptResult.error ||
2849
+ promptResult.success === false;
2850
+ if (!isFailedResult) {
2851
+ await storage.setItem(key, {
2852
+ date: $getCurrentDate(),
2853
+ promptbookVersion: PROMPTBOOK_ENGINE_VERSION,
2854
+ bookVersion: BOOK_LANGUAGE_VERSION,
2855
+ prompt: {
2856
+ ...prompt,
2857
+ parameters: Object.entries(parameters).length === Object.entries(relevantParameters).length
2858
+ ? parameters
2859
+ : {
2860
+ ...relevantParameters,
2861
+ note: `<- Note: Only relevant parameters are stored in the cache`,
2862
+ },
2863
+ },
2864
+ promptResult,
2865
+ });
2866
+ }
2867
+ else if (isVerbose) {
2868
+ console.info('Not caching failed result for key:', key, {
2869
+ content: promptResult.content,
2870
+ error: promptResult.error,
2871
+ success: promptResult.success,
2872
+ });
2873
+ }
2856
2874
  return promptResult;
2857
2875
  };
2858
2876
  if (llmTools.callChatModel !== undefined) {
@@ -14339,6 +14357,66 @@ function startRemoteServer(options) {
14339
14357
  response.setHeader('X-Powered-By', 'Promptbook engine');
14340
14358
  next();
14341
14359
  });
14360
+ // Note: OpenAI-compatible chat completions endpoint
14361
+ app.post('/v1/chat/completions', async (request, response) => {
14362
+ // TODO: !!!! Make more promptbook-native:
14363
+ try {
14364
+ const params = request.body;
14365
+ const { model, messages } = params;
14366
+ // Convert messages to a single prompt
14367
+ const prompt = messages
14368
+ .map((message) => `${message.role}: ${message.content}`)
14369
+ .join('\n');
14370
+ // Get pipeline for the book
14371
+ if (!collection) {
14372
+ throw new Error('No collection available');
14373
+ }
14374
+ const pipeline = await collection.getPipelineByUrl(model);
14375
+ const pipelineExecutor = createPipelineExecutor({
14376
+ pipeline,
14377
+ tools: await getExecutionToolsFromIdentification({
14378
+ isAnonymous: true,
14379
+ llmToolsConfiguration: [],
14380
+ }),
14381
+ });
14382
+ // Execute the pipeline with the prompt content as input
14383
+ const result = await pipelineExecutor({ prompt }).asPromise({ isCrashedOnError: true });
14384
+ if (!result.isSuccessful) {
14385
+ throw new Error(`Failed to execute book: ${result.errors.join(', ')}`);
14386
+ }
14387
+ // Return the result in OpenAI-compatible format
14388
+ response.json({
14389
+ id: 'chatcmpl-' + Math.random().toString(36).substring(2),
14390
+ object: 'chat.completion',
14391
+ created: Math.floor(Date.now() / 1000),
14392
+ model,
14393
+ choices: [
14394
+ {
14395
+ index: 0,
14396
+ message: {
14397
+ role: 'assistant',
14398
+ content: result.outputParameters.response,
14399
+ },
14400
+ finish_reason: 'stop',
14401
+ },
14402
+ ],
14403
+ usage: {
14404
+ prompt_tokens: 0,
14405
+ completion_tokens: 0,
14406
+ total_tokens: 0,
14407
+ },
14408
+ });
14409
+ }
14410
+ catch (error) {
14411
+ response.status(500).json({
14412
+ error: {
14413
+ message: error instanceof Error ? error.message : 'Unknown error',
14414
+ type: 'server_error',
14415
+ code: 'internal_error',
14416
+ },
14417
+ });
14418
+ }
14419
+ });
14342
14420
  // TODO: [🥺] Expose openapiJson to consumer and also allow to add new routes
14343
14421
  app.use(OpenApiValidator.middleware({
14344
14422
  apiSpec: openapiJson,