@promptbook/openai 0.100.0-45 → 0.100.0-46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,30 +1,16 @@
1
- export type ChatMessage = UserChatMessage | PromptbookPersonaChatMessage;
2
- export interface UserChatMessage {
3
- id: string;
4
- date: Date;
5
- from: 'USER';
6
- content: string;
7
- isComplete: boolean;
8
- expectedAnswer?: string;
9
- isVoiceCall?: boolean;
10
- }
11
- export interface PromptbookPersonaChatMessage {
1
+ import type { string_markdown } from '../../../types/typeAliases';
2
+ import type { string_name } from '../../../types/typeAliases';
3
+ /**
4
+ * A message in the chat
5
+ *
6
+ * @public exported from `@promptbook/components`
7
+ */
8
+ export type ChatMessage = {
12
9
  id: string;
13
10
  date: Date;
14
- from: 'PROMPTBOOK_PERSONA';
15
- avatar?: string | {
16
- src: string;
17
- width?: number;
18
- height?: number;
19
- };
20
- content: string;
21
- isComplete: boolean;
11
+ from: string_name;
12
+ content: string_markdown;
13
+ isComplete?: boolean;
22
14
  expectedAnswer?: string;
23
15
  isVoiceCall?: boolean;
24
- }
25
- export interface CompleteChatMessage {
26
- isComplete: true;
27
- }
28
- /**
29
- * TODO: [🧠] ACRY Rename PROMPTBOOK_PERSONA + USER, Teacher, teacher to sth else
30
- */
16
+ };
@@ -0,0 +1,12 @@
1
+ import type { string_name } from '../../../types/typeAliases';
2
+ import type { string_url_image } from '../../../types/typeAliases';
3
+ /**
4
+ * A participant in the chat
5
+ *
6
+ * @public exported from `@promptbook/components`
7
+ */
8
+ export type ChatParticipant = {
9
+ name: string_name;
10
+ avatarUrl?: string_url_image;
11
+ color: string;
12
+ };
@@ -301,6 +301,13 @@ export declare const DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME = "getPipelin
301
301
  * @public exported from `@promptbook/core`
302
302
  */
303
303
  export declare const DEFAULT_MAX_REQUESTS_PER_MINUTE = 60;
304
+ /**
305
+ * API request timeout in milliseconds
306
+ * Can be overridden via API_REQUEST_TIMEOUT environment variable
307
+ *
308
+ * @public exported from `@promptbook/core`
309
+ */
310
+ export declare const API_REQUEST_TIMEOUT: number;
304
311
  /**
305
312
  * URL of the Promptbook logo
306
313
  *
@@ -110,6 +110,14 @@ export type AbstractTask<TTaskResult extends AbstractTaskResult> = {
110
110
  * Gets a promise that resolves with the task result
111
111
  */
112
112
  asPromise(options?: {
113
+ /**
114
+ * Do the task throws on error
115
+ *
116
+ * - If `true` when error occurs the returned promise will rejects
117
+ * - If `false` the promise will resolve with object with all listed errors and warnings and partial result
118
+ *
119
+ * @default true
120
+ */
113
121
  readonly isCrashedOnError?: boolean;
114
122
  }): Promise<TTaskResult>;
115
123
  /**
@@ -82,6 +82,14 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
82
82
  * Default model for completion variant.
83
83
  */
84
84
  protected abstract getDefaultEmbeddingModel(): AvailableModel;
85
+ /**
86
+ * Makes a request with retry logic for network errors like ECONNRESET
87
+ */
88
+ private makeRequestWithRetry;
89
+ /**
90
+ * Determines if an error is retryable (network-related errors)
91
+ */
92
+ private isRetryableNetworkError;
85
93
  }
86
94
  /**
87
95
  * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
@@ -0,0 +1,5 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
3
+ /**
4
+ * Note: [⚫] Code in this file should never be published in any package
5
+ */
@@ -0,0 +1,21 @@
1
+ import type { really_any } from './really_any';
2
+ /**
3
+ * Does nothing, but preserves the function in the bundle
4
+ * Compiler is tricked into thinking the function is used
5
+ *
6
+ * @param value any function to preserve
7
+ * @returns nothing
8
+ * @private within the repository
9
+ */
10
+ export declare function $preserve(...value: Array<really_any>): void;
11
+ /**
12
+ * DO NOT USE THIS FUNCTION
13
+ * Only purpose of this function is to trick the compiler and javascript engine
14
+ * that `_preserved` array can be used in the future and should not be garbage collected
15
+ *
16
+ * @private internal for `preserve`
17
+ */
18
+ export declare function __DO_NOT_USE_getPreserved(): Array<really_any>;
19
+ /**
20
+ * Note: [💞] Ignore a discrepancy between file name and entity name
21
+ */
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.100.0-44`).
18
+ * It follows semantic versioning (e.g., `0.100.0-45`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/openai",
3
- "version": "0.100.0-45",
3
+ "version": "0.100.0-46",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -102,7 +102,7 @@
102
102
  "module": "./esm/index.es.js",
103
103
  "typings": "./esm/typings/src/_packages/openai.index.d.ts",
104
104
  "peerDependencies": {
105
- "@promptbook/core": "0.100.0-45"
105
+ "@promptbook/core": "0.100.0-46"
106
106
  },
107
107
  "dependencies": {
108
108
  "bottleneck": "^2.19.5",
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.100.0-45';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.100.0-46';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -316,6 +316,13 @@
316
316
  * @public exported from `@promptbook/core`
317
317
  */
318
318
  const DEFAULT_MAX_REQUESTS_PER_MINUTE = 60;
319
+ /**
320
+ * API request timeout in milliseconds
321
+ * Can be overridden via API_REQUEST_TIMEOUT environment variable
322
+ *
323
+ * @public exported from `@promptbook/core`
324
+ */
325
+ const API_REQUEST_TIMEOUT = parseInt(process.env.API_REQUEST_TIMEOUT || '90000');
319
326
  /**
320
327
  * Note: [💞] Ignore a discrepancy between file name and entity name
321
328
  * TODO: [🧠][🧜‍♂️] Maybe join remoteServerUrl and path into single value
@@ -1959,7 +1966,18 @@
1959
1966
  const openAiOptions = { ...this.options };
1960
1967
  delete openAiOptions.isVerbose;
1961
1968
  delete openAiOptions.userId;
1962
- this.client = new OpenAI__default["default"](openAiOptions);
1969
+ // Enhanced configuration for better ECONNRESET handling
1970
+ const enhancedOptions = {
1971
+ ...openAiOptions,
1972
+ timeout: API_REQUEST_TIMEOUT,
1973
+ maxRetries: CONNECTION_RETRIES_LIMIT,
1974
+ defaultHeaders: {
1975
+ Connection: 'keep-alive',
1976
+ 'Keep-Alive': 'timeout=30, max=100',
1977
+ ...openAiOptions.defaultHeaders,
1978
+ },
1979
+ };
1980
+ this.client = new OpenAI__default["default"](enhancedOptions);
1963
1981
  }
1964
1982
  return this.client;
1965
1983
  }
@@ -2047,7 +2065,7 @@
2047
2065
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2048
2066
  }
2049
2067
  const rawResponse = await this.limiter
2050
- .schedule(() => client.chat.completions.create(rawRequest))
2068
+ .schedule(() => this.makeRequestWithRetry(() => client.chat.completions.create(rawRequest)))
2051
2069
  .catch((error) => {
2052
2070
  assertsError(error);
2053
2071
  if (this.options.isVerbose) {
@@ -2123,7 +2141,7 @@
2123
2141
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2124
2142
  }
2125
2143
  const rawResponse = await this.limiter
2126
- .schedule(() => client.completions.create(rawRequest))
2144
+ .schedule(() => this.makeRequestWithRetry(() => client.completions.create(rawRequest)))
2127
2145
  .catch((error) => {
2128
2146
  assertsError(error);
2129
2147
  if (this.options.isVerbose) {
@@ -2187,7 +2205,7 @@
2187
2205
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2188
2206
  }
2189
2207
  const rawResponse = await this.limiter
2190
- .schedule(() => client.embeddings.create(rawRequest))
2208
+ .schedule(() => this.makeRequestWithRetry(() => client.embeddings.create(rawRequest)))
2191
2209
  .catch((error) => {
2192
2210
  assertsError(error);
2193
2211
  if (this.options.isVerbose) {
@@ -2245,6 +2263,76 @@
2245
2263
  }
2246
2264
  return model;
2247
2265
  }
2266
+ // <- Note: [🤖] getDefaultXxxModel
2267
+ /**
2268
+ * Makes a request with retry logic for network errors like ECONNRESET
2269
+ */
2270
+ async makeRequestWithRetry(requestFn) {
2271
+ let lastError;
2272
+ for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
2273
+ try {
2274
+ return await requestFn();
2275
+ }
2276
+ catch (error) {
2277
+ assertsError(error);
2278
+ lastError = error;
2279
+ // Check if this is a retryable network error
2280
+ const isRetryableError = this.isRetryableNetworkError(error);
2281
+ if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
2282
+ if (this.options.isVerbose) {
2283
+ console.info(colors__default["default"].bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
2284
+ }
2285
+ throw error;
2286
+ }
2287
+ // Calculate exponential backoff delay
2288
+ const baseDelay = 1000; // 1 second
2289
+ const backoffDelay = baseDelay * Math.pow(2, attempt - 1);
2290
+ const jitterDelay = Math.random() * 500; // Add some randomness
2291
+ const totalDelay = backoffDelay + jitterDelay;
2292
+ if (this.options.isVerbose) {
2293
+ console.info(colors__default["default"].bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
2294
+ }
2295
+ // Wait before retrying
2296
+ await new Promise((resolve) => setTimeout(resolve, totalDelay));
2297
+ }
2298
+ }
2299
+ throw lastError;
2300
+ }
2301
+ /**
2302
+ * Determines if an error is retryable (network-related errors)
2303
+ */
2304
+ isRetryableNetworkError(error) {
2305
+ const errorMessage = error.message.toLowerCase();
2306
+ const errorCode = error.code;
2307
+ // Network connection errors that should be retried
2308
+ const retryableErrors = [
2309
+ 'econnreset',
2310
+ 'enotfound',
2311
+ 'econnrefused',
2312
+ 'etimedout',
2313
+ 'socket hang up',
2314
+ 'network error',
2315
+ 'fetch failed',
2316
+ 'connection reset',
2317
+ 'connection refused',
2318
+ 'timeout',
2319
+ ];
2320
+ // Check error message
2321
+ if (retryableErrors.some((retryableError) => errorMessage.includes(retryableError))) {
2322
+ return true;
2323
+ }
2324
+ // Check error code
2325
+ if (errorCode && retryableErrors.includes(errorCode.toLowerCase())) {
2326
+ return true;
2327
+ }
2328
+ // Check for specific HTTP status codes that are retryable
2329
+ const errorWithStatus = error;
2330
+ const httpStatus = errorWithStatus.status || errorWithStatus.statusCode;
2331
+ if (httpStatus && [429, 500, 502, 503, 504].includes(httpStatus)) {
2332
+ return true;
2333
+ }
2334
+ return false;
2335
+ }
2248
2336
  }
2249
2337
  /**
2250
2338
  * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`