te.js 2.1.5 → 2.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,6 +7,8 @@
7
7
  import { env } from 'tej-env';
8
8
 
9
9
  const MESSAGE_TYPES = /** @type {const} */ (['endUser', 'developer']);
10
+ const LLM_MODES = /** @type {const} */ (['sync', 'async']);
11
+ const LLM_CHANNELS = /** @type {const} */ (['console', 'log', 'both']);
10
12
 
11
13
  /**
12
14
  * Normalize messageType to 'endUser' | 'developer'.
@@ -14,14 +16,56 @@ const MESSAGE_TYPES = /** @type {const} */ (['endUser', 'developer']);
14
16
  * @returns {'endUser'|'developer'}
15
17
  */
16
18
  function normalizeMessageType(v) {
17
- const s = String(v ?? '').trim().toLowerCase();
19
+ const s = String(v ?? '')
20
+ .trim()
21
+ .toLowerCase();
18
22
  if (s === 'developer' || s === 'dev') return 'developer';
19
23
  return 'endUser'; // endUser, end_user, default
20
24
  }
21
25
 
26
+ /**
27
+ * Normalize mode to 'sync' | 'async'.
28
+ * @param {string} v
29
+ * @returns {'sync'|'async'}
30
+ */
31
+ function normalizeMode(v) {
32
+ const s = String(v ?? '')
33
+ .trim()
34
+ .toLowerCase();
35
+ if (s === 'async') return 'async';
36
+ return 'sync';
37
+ }
38
+
39
+ /**
40
+ * Normalize channel to 'console' | 'log' | 'both'.
41
+ * @param {string} v
42
+ * @returns {'console'|'log'|'both'}
43
+ */
44
+ function normalizeChannel(v) {
45
+ const s = String(v ?? '')
46
+ .trim()
47
+ .toLowerCase();
48
+ if (s === 'log') return 'log';
49
+ if (s === 'both') return 'both';
50
+ return 'console';
51
+ }
52
+
22
53
  /**
23
54
  * Resolve errors.llm config from env (feature-specific then LLM_ fallback).
24
- * @returns {{ enabled: boolean, baseURL: string, apiKey: string, model: string, messageType: 'endUser'|'developer' }}
55
+ * @returns {{
56
+ * enabled: boolean,
57
+ * baseURL: string,
58
+ * apiKey: string,
59
+ * model: string,
60
+ * messageType: 'endUser'|'developer',
61
+ * mode: 'sync'|'async',
62
+ * timeout: number,
63
+ * channel: 'console'|'log'|'both',
64
+ * logFile: string,
65
+ * rateLimit: number,
66
+ * cache: boolean,
67
+ * cacheTTL: number,
68
+ * }}
25
69
  */
26
70
  export function getErrorsLlmConfig() {
27
71
  const enabledRaw = env('ERRORS_LLM_ENABLED') ?? '';
@@ -45,11 +89,50 @@ export function getErrorsLlmConfig() {
45
89
  env('LLM_APIKEY') ??
46
90
  '';
47
91
 
48
- const model =
49
- env('ERRORS_LLM_MODEL') ?? env('LLM_MODEL') ?? '';
92
+ const model = env('ERRORS_LLM_MODEL') ?? env('LLM_MODEL') ?? '';
50
93
 
51
94
  const messageTypeRaw =
52
- env('ERRORS_LLM_MESSAGE_TYPE') ?? env('ERRORS_LLM_MESSAGETYPE') ?? env('LLM_MESSAGE_TYPE') ?? env('LLM_MESSAGETYPE') ?? '';
95
+ env('ERRORS_LLM_MESSAGE_TYPE') ??
96
+ env('ERRORS_LLM_MESSAGETYPE') ??
97
+ env('LLM_MESSAGE_TYPE') ??
98
+ env('LLM_MESSAGETYPE') ??
99
+ '';
100
+
101
+ const modeRaw = env('ERRORS_LLM_MODE') ?? env('LLM_MODE') ?? '';
102
+
103
+ const timeoutRaw = env('ERRORS_LLM_TIMEOUT') ?? env('LLM_TIMEOUT') ?? '';
104
+ const timeoutNum = Number(timeoutRaw);
105
+ const timeout =
106
+ !timeoutRaw || isNaN(timeoutNum) || timeoutNum <= 0 ? 10000 : timeoutNum;
107
+
108
+ const channelRaw = env('ERRORS_LLM_CHANNEL') ?? env('LLM_CHANNEL') ?? '';
109
+
110
+ const logFile =
111
+ String(env('ERRORS_LLM_LOG_FILE') ?? '').trim() || './errors.llm.log';
112
+
113
+ const rateLimitRaw =
114
+ env('ERRORS_LLM_RATE_LIMIT') ?? env('LLM_RATE_LIMIT') ?? '';
115
+ const rateLimitNum = Number(rateLimitRaw);
116
+ const rateLimit =
117
+ !rateLimitRaw || isNaN(rateLimitNum) || rateLimitNum <= 0
118
+ ? 10
119
+ : Math.floor(rateLimitNum);
120
+
121
+ const cacheRaw = env('ERRORS_LLM_CACHE') ?? '';
122
+ const cache =
123
+ cacheRaw === false ||
124
+ cacheRaw === 'false' ||
125
+ cacheRaw === '0' ||
126
+ cacheRaw === 0
127
+ ? false
128
+ : true;
129
+
130
+ const cacheTTLRaw = env('ERRORS_LLM_CACHE_TTL') ?? '';
131
+ const cacheTTLNum = Number(cacheTTLRaw);
132
+ const cacheTTL =
133
+ !cacheTTLRaw || isNaN(cacheTTLNum) || cacheTTLNum <= 0
134
+ ? 3600000
135
+ : cacheTTLNum;
53
136
 
54
137
  return {
55
138
  enabled: Boolean(enabled),
@@ -57,18 +140,35 @@ export function getErrorsLlmConfig() {
57
140
  apiKey: String(apiKey ?? '').trim(),
58
141
  model: String(model ?? '').trim(),
59
142
  messageType: normalizeMessageType(messageTypeRaw || 'endUser'),
143
+ mode: normalizeMode(modeRaw),
144
+ timeout,
145
+ channel: normalizeChannel(channelRaw),
146
+ logFile,
147
+ rateLimit,
148
+ cache,
149
+ cacheTTL,
60
150
  };
61
151
  }
62
152
 
63
- export { MESSAGE_TYPES };
153
+ export { MESSAGE_TYPES, LLM_MODES, LLM_CHANNELS };
64
154
 
65
155
  /**
66
156
  * Validate errors.llm when enabled: require baseURL, apiKey, and model (after LLM_ fallback).
157
+ * Also warns about misconfigurations (e.g. channel set with sync mode).
67
158
  * Call at takeoff. Throws if enabled but config is invalid; no-op otherwise.
68
159
  * @throws {Error} If errors.llm.enabled is true but any of baseURL, apiKey, or model is missing
69
160
  */
70
161
  export function validateErrorsLlmAtTakeoff() {
71
- const { enabled, baseURL, apiKey, model } = getErrorsLlmConfig();
162
+ const {
163
+ enabled,
164
+ baseURL,
165
+ apiKey,
166
+ model,
167
+ mode,
168
+ channel,
169
+ rateLimit,
170
+ cacheTTL,
171
+ } = getErrorsLlmConfig();
72
172
  if (!enabled) return;
73
173
 
74
174
  const missing = [];
@@ -81,4 +181,34 @@ export function validateErrorsLlmAtTakeoff() {
81
181
  `errors.llm is enabled but required config is missing: ${missing.join(', ')}. Set these env vars or disable errors.llm.enabled.`,
82
182
  );
83
183
  }
184
+
185
+ // Warn about channel set while mode is sync (channel only applies in async mode).
186
+ const channelRaw = String(
187
+ env('ERRORS_LLM_CHANNEL') ?? env('LLM_CHANNEL') ?? '',
188
+ ).trim();
189
+ if (mode === 'sync' && channelRaw) {
190
+ console.warn(
191
+ `[Tejas] errors.llm: channel="${channel}" is set but mode is "sync" — channel output only applies in async mode. Set ERRORS_LLM_MODE=async to use it.`,
192
+ );
193
+ }
194
+
195
+ // Warn about invalid numeric values that were silently reset to defaults.
196
+ const rateLimitRaw = String(
197
+ env('ERRORS_LLM_RATE_LIMIT') ?? env('LLM_RATE_LIMIT') ?? '',
198
+ ).trim();
199
+ if (
200
+ rateLimitRaw &&
201
+ (isNaN(Number(rateLimitRaw)) || Number(rateLimitRaw) <= 0)
202
+ ) {
203
+ console.warn(
204
+ `[Tejas] errors.llm: rateLimit value "${rateLimitRaw}" is invalid; defaulting to 10.`,
205
+ );
206
+ }
207
+
208
+ const cacheTTLRaw = String(env('ERRORS_LLM_CACHE_TTL') ?? '').trim();
209
+ if (cacheTTLRaw && (isNaN(Number(cacheTTLRaw)) || Number(cacheTTLRaw) <= 0)) {
210
+ console.warn(
211
+ `[Tejas] errors.llm: cacheTTL value "${cacheTTLRaw}" is invalid; defaulting to 3600000.`,
212
+ );
213
+ }
84
214
  }