@promptbook/cli 0.69.0 → 0.69.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -21,7 +21,7 @@ import OpenAI from 'openai';
21
21
  /**
22
22
  * The version of the Promptbook library
23
23
  */
24
- var PROMPTBOOK_VERSION = '0.69.0-21';
24
+ var PROMPTBOOK_VERSION = '0.69.0';
25
25
  // TODO: [main] !!!! List here all the versions and annotate + put into script
26
26
 
27
27
  /*! *****************************************************************************
@@ -10030,6 +10030,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
10030
10030
  AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
10031
10031
  return __awaiter(this, void 0, void 0, function () {
10032
10032
  var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
10033
+ var _this = this;
10033
10034
  return __generator(this, function (_a) {
10034
10035
  switch (_a.label) {
10035
10036
  case 0:
@@ -10068,7 +10069,12 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
10068
10069
  if (this.options.isVerbose) {
10069
10070
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
10070
10071
  }
10071
- return [4 /*yield*/, client.messages.create(rawRequest)];
10072
+ return [4 /*yield*/, client.messages.create(rawRequest).catch(function (error) {
10073
+ if (_this.options.isVerbose) {
10074
+ console.info(colors.bgRed('error'), error);
10075
+ }
10076
+ throw error;
10077
+ })];
10072
10078
  case 2:
10073
10079
  rawResponse = _a.sent();
10074
10080
  if (this.options.isVerbose) {
@@ -10141,7 +10147,14 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
10141
10147
  if (this.options.isVerbose) {
10142
10148
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
10143
10149
  }
10144
- const rawResponse = await this.client.completions.create(rawRequest);
10150
+ const rawResponse = await this.client.completions.create(rawRequest).catch((error) => {
10151
+ if (this.options.isVerbose) {
10152
+ console.info(colors.bgRed('error'), error);
10153
+ }
10154
+ throw error;
10155
+ });
10156
+
10157
+
10145
10158
  if (this.options.isVerbose) {
10146
10159
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
10147
10160
  }
@@ -10781,6 +10794,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
10781
10794
  var _a, _b;
10782
10795
  return __awaiter(this, void 0, void 0, function () {
10783
10796
  var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
10797
+ var _this = this;
10784
10798
  return __generator(this, function (_c) {
10785
10799
  switch (_c.label) {
10786
10800
  case 0:
@@ -10827,7 +10841,12 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
10827
10841
  console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
10828
10842
  }
10829
10843
  rawRequest = [modelName, messages, modelSettings];
10830
- return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
10844
+ return [4 /*yield*/, this.withTimeout(client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))).catch(function (error) {
10845
+ if (_this.options.isVerbose) {
10846
+ console.info(colors.bgRed('error'), error);
10847
+ }
10848
+ throw error;
10849
+ })];
10831
10850
  case 3:
10832
10851
  rawResponse = _c.sent();
10833
10852
  if (this.options.isVerbose) {
@@ -10879,6 +10898,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
10879
10898
  var _a, _b;
10880
10899
  return __awaiter(this, void 0, void 0, function () {
10881
10900
  var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
10901
+ var _this = this;
10882
10902
  return __generator(this, function (_c) {
10883
10903
  switch (_c.label) {
10884
10904
  case 0:
@@ -10917,7 +10937,12 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
10917
10937
  [rawPromptContent],
10918
10938
  modelSettings,
10919
10939
  ];
10920
- return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
10940
+ return [4 /*yield*/, this.withTimeout(client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))).catch(function (error) {
10941
+ if (_this.options.isVerbose) {
10942
+ console.info(colors.bgRed('error'), error);
10943
+ }
10944
+ throw error;
10945
+ })];
10921
10946
  case 3:
10922
10947
  rawResponse = _c.sent();
10923
10948
  if (this.options.isVerbose) {
@@ -10960,6 +10985,22 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
10960
10985
  });
10961
10986
  };
10962
10987
  // <- Note: [🤖] callXxxModel
10988
+ /**
10989
+ * Library `@azure/openai` has bug/weird behavior that it does not throw error but hangs forever
10990
+ *
10991
+ * This method wraps the promise with timeout
10992
+ */
10993
+ AzureOpenAiExecutionTools.prototype.withTimeout = function (promise) {
10994
+ return new Promise(function (resolve, reject) {
10995
+ var timeout = setTimeout(function () {
10996
+ reject(new PipelineExecutionError('Timeout'));
10997
+ }, CONNECTION_TIMEOUT_MS);
10998
+ promise.then(function (result) {
10999
+ clearTimeout(timeout);
11000
+ resolve(result);
11001
+ }, reject);
11002
+ });
11003
+ };
10963
11004
  /**
10964
11005
  * Changes Azure error (which is not propper Error but object) to propper Error
10965
11006
  */
@@ -11185,6 +11226,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
11185
11226
  OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
11186
11227
  return __awaiter(this, void 0, void 0, function () {
11187
11228
  var content, parameters, modelRequirements, format, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
11229
+ var _this = this;
11188
11230
  return __generator(this, function (_a) {
11189
11231
  switch (_a.label) {
11190
11232
  case 0:
@@ -11231,7 +11273,12 @@ var OpenAiExecutionTools = /** @class */ (function () {
11231
11273
  if (this.options.isVerbose) {
11232
11274
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
11233
11275
  }
11234
- return [4 /*yield*/, client.chat.completions.create(rawRequest)];
11276
+ return [4 /*yield*/, client.chat.completions.create(rawRequest).catch(function (error) {
11277
+ if (_this.options.isVerbose) {
11278
+ console.info(colors.bgRed('error'), error);
11279
+ }
11280
+ throw error;
11281
+ })];
11235
11282
  case 2:
11236
11283
  rawResponse = _a.sent();
11237
11284
  if (this.options.isVerbose) {
@@ -11274,6 +11321,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
11274
11321
  OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
11275
11322
  return __awaiter(this, void 0, void 0, function () {
11276
11323
  var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
11324
+ var _this = this;
11277
11325
  return __generator(this, function (_a) {
11278
11326
  switch (_a.label) {
11279
11327
  case 0:
@@ -11303,7 +11351,12 @@ var OpenAiExecutionTools = /** @class */ (function () {
11303
11351
  if (this.options.isVerbose) {
11304
11352
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
11305
11353
  }
11306
- return [4 /*yield*/, client.completions.create(rawRequest)];
11354
+ return [4 /*yield*/, client.completions.create(rawRequest).catch(function (error) {
11355
+ if (_this.options.isVerbose) {
11356
+ console.info(colors.bgRed('error'), error);
11357
+ }
11358
+ throw error;
11359
+ })];
11307
11360
  case 2:
11308
11361
  rawResponse = _a.sent();
11309
11362
  if (this.options.isVerbose) {
@@ -11343,6 +11396,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
11343
11396
  OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
11344
11397
  return __awaiter(this, void 0, void 0, function () {
11345
11398
  var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
11399
+ var _this = this;
11346
11400
  return __generator(this, function (_a) {
11347
11401
  switch (_a.label) {
11348
11402
  case 0:
@@ -11367,7 +11421,12 @@ var OpenAiExecutionTools = /** @class */ (function () {
11367
11421
  if (this.options.isVerbose) {
11368
11422
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
11369
11423
  }
11370
- return [4 /*yield*/, client.embeddings.create(rawRequest)];
11424
+ return [4 /*yield*/, client.embeddings.create(rawRequest).catch(function (error) {
11425
+ if (_this.options.isVerbose) {
11426
+ console.info(colors.bgRed('error'), error);
11427
+ }
11428
+ throw error;
11429
+ })];
11371
11430
  case 2:
11372
11431
  rawResponse = _a.sent();
11373
11432
  if (this.options.isVerbose) {