@far-world-labs/verblets 0.1.4 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/.github/workflows/ci.yml +38 -43
  2. package/.vitest.config.examples.js +4 -0
  3. package/DEVELOPING.md +1 -1
  4. package/package.json +9 -9
  5. package/scripts/clear-redis.js +74 -0
  6. package/src/chains/conversation/README.md +26 -0
  7. package/src/chains/conversation/index.examples.js +398 -0
  8. package/src/chains/conversation/index.js +126 -0
  9. package/src/chains/conversation/index.spec.js +148 -0
  10. package/src/chains/conversation/turn-policies.js +93 -0
  11. package/src/chains/conversation/turn-policies.md +123 -0
  12. package/src/chains/conversation/turn-policies.spec.js +135 -0
  13. package/src/chains/expect/index.js +34 -0
  14. package/src/chains/intersections/README.md +20 -6
  15. package/src/chains/intersections/index.examples.js +9 -8
  16. package/src/chains/intersections/index.js +39 -187
  17. package/src/chains/llm-logger/README.md +291 -133
  18. package/src/chains/llm-logger/index.js +451 -65
  19. package/src/chains/llm-logger/index.spec.js +85 -24
  20. package/src/chains/llm-logger/schema.json +105 -0
  21. package/src/chains/set-interval/index.examples.js +34 -6
  22. package/src/chains/set-interval/index.js +53 -32
  23. package/src/chains/themes/index.js +2 -2
  24. package/src/constants/common.js +7 -1
  25. package/src/constants/models.js +21 -9
  26. package/src/index.js +14 -4
  27. package/src/lib/assert/README.md +84 -0
  28. package/src/lib/assert/index.js +50 -0
  29. package/src/lib/ring-buffer/README.md +50 -428
  30. package/src/lib/ring-buffer/index.js +148 -987
  31. package/src/lib/ring-buffer/index.spec.js +388 -0
  32. package/src/verblets/conversation-turn/README.md +33 -0
  33. package/src/verblets/conversation-turn/index.examples.js +218 -0
  34. package/src/verblets/conversation-turn/index.js +68 -0
  35. package/src/verblets/conversation-turn/index.spec.js +77 -0
  36. package/src/verblets/conversation-turn-multi/README.md +31 -0
  37. package/src/verblets/conversation-turn-multi/index.examples.js +160 -0
  38. package/src/verblets/conversation-turn-multi/index.js +104 -0
  39. package/src/verblets/conversation-turn-multi/index.spec.js +63 -0
  40. package/src/verblets/intent/index.examples.js +1 -1
  41. package/src/verblets/intersection/index.js +46 -5
  42. package/src/verblets/people-list/README.md +28 -0
  43. package/src/verblets/people-list/index.examples.js +184 -0
  44. package/src/verblets/people-list/index.js +44 -0
  45. package/src/verblets/people-list/index.spec.js +49 -0
  46. package/scripts/version-bump.js +0 -33
@@ -1,6 +1,8 @@
1
1
  import { beforeEach, describe, expect, it, vi, afterEach } from 'vitest';
2
+
3
+ vi.useFakeTimers();
2
4
  import { createLLMLogger, createConsoleWriter, createFileWriter } from './index.js';
3
- import { setLogger, resetLogger } from '../../lib/logger-service/index.js';
5
+ import { resetLogger } from '../../lib/logger-service/index.js';
4
6
 
5
7
  // Mock console methods
6
8
  const mockConsoleLog = vi.fn();
@@ -58,15 +60,18 @@ describe('LLM Logger - Factory Pattern', () => {
58
60
  describe('Global Logger Service Integration', () => {
59
61
  it('should work as global logger', async () => {
60
62
  const logger = createLLMLogger({
63
+ immediateFlush: true, // Enable immediate flushing for tests
61
64
  lanes: [
62
65
  {
63
- laneId: 'console',
66
+ laneId: 'global',
64
67
  writer: createConsoleWriter('[GLOBAL] '),
65
68
  },
66
69
  ],
67
70
  });
68
71
 
69
- setLogger(logger);
72
+ logger.log('test log');
73
+ logger.info('test info');
74
+ logger.error('test error');
70
75
 
71
76
  // Use global logger service methods
72
77
  const { log, info, error } = await import('../../lib/logger-service/index.js');
@@ -76,11 +81,17 @@ describe('LLM Logger - Factory Pattern', () => {
76
81
  error('test error');
77
82
 
78
83
  // Allow flush loops to complete
79
- await new Promise((resolve) => setTimeout(resolve, 150));
84
+ await vi.advanceTimersByTimeAsync(150);
80
85
 
81
- expect(mockConsoleLog).toHaveBeenCalledWith('[GLOBAL] test log');
82
- expect(mockConsoleLog).toHaveBeenCalledWith('[GLOBAL] test info');
83
- expect(mockConsoleLog).toHaveBeenCalledWith('[GLOBAL] test error');
86
+ expect(mockConsoleLog).toHaveBeenCalledWith(
87
+ expect.stringContaining('[GLOBAL] {"data":"test log"')
88
+ );
89
+ expect(mockConsoleLog).toHaveBeenCalledWith(
90
+ expect.stringContaining('[GLOBAL] {"data":"test info"')
91
+ );
92
+ expect(mockConsoleLog).toHaveBeenCalledWith(
93
+ expect.stringContaining('[GLOBAL] {"data":"test error"')
94
+ );
84
95
  });
85
96
  });
86
97
 
@@ -125,16 +136,17 @@ describe('LLM Logger - Factory Pattern', () => {
125
136
  const infoWriter = vi.fn();
126
137
 
127
138
  const logger = createLLMLogger({
139
+ immediateFlush: true, // Enable immediate flushing for tests
128
140
  lanes: [
129
141
  {
130
142
  laneId: 'errors',
131
- filters: (log) => log.meta.get('level') === 'error',
132
143
  writer: errorWriter,
144
+ filters: (log) => log.meta.get('level') === 'error',
133
145
  },
134
146
  {
135
147
  laneId: 'info',
136
- filters: (log) => log.meta.get('level') === 'info',
137
148
  writer: infoWriter,
149
+ filters: (log) => log.meta.get('level') === 'info',
138
150
  },
139
151
  ],
140
152
  });
@@ -144,16 +156,27 @@ describe('LLM Logger - Factory Pattern', () => {
144
156
  logger.debug('debug message'); // Should not match any lane
145
157
 
146
158
  // Allow flush loops to complete
147
- await new Promise((resolve) => setTimeout(resolve, 150));
148
-
149
- expect(errorWriter).toHaveBeenCalledWith(['error message']);
150
- expect(infoWriter).toHaveBeenCalledWith(['info message']);
159
+ await vi.advanceTimersByTimeAsync(150);
160
+
161
+ expect(errorWriter).toHaveBeenCalledWith([
162
+ expect.objectContaining({
163
+ level: 'error',
164
+ data: 'error message',
165
+ }),
166
+ ]);
167
+ expect(infoWriter).toHaveBeenCalledWith([
168
+ expect.objectContaining({
169
+ level: 'info',
170
+ data: 'info message',
171
+ }),
172
+ ]);
151
173
  });
152
174
 
153
175
  it('should handle lanes without filters', async () => {
154
176
  const allWriter = vi.fn();
155
177
 
156
178
  const logger = createLLMLogger({
179
+ immediateFlush: true, // Enable immediate flushing for tests
157
180
  lanes: [
158
181
  {
159
182
  laneId: 'all',
@@ -166,11 +189,21 @@ describe('LLM Logger - Factory Pattern', () => {
166
189
  logger.info({ type: 'object log' });
167
190
 
168
191
  // Allow flush loops to complete
169
- await new Promise((resolve) => setTimeout(resolve, 150));
192
+ await vi.advanceTimersByTimeAsync(150);
170
193
 
171
194
  expect(allWriter).toHaveBeenCalledTimes(2);
172
- expect(allWriter).toHaveBeenCalledWith(['string log']);
173
- expect(allWriter).toHaveBeenCalledWith(['{"type":"object log"}']);
195
+ expect(allWriter).toHaveBeenCalledWith([
196
+ expect.objectContaining({
197
+ level: 'log',
198
+ data: 'string log',
199
+ }),
200
+ ]);
201
+ expect(allWriter).toHaveBeenCalledWith([
202
+ expect.objectContaining({
203
+ level: 'info',
204
+ type: 'object log',
205
+ }),
206
+ ]);
174
207
  });
175
208
  });
176
209
 
@@ -211,7 +244,13 @@ describe('LLM Logger - Factory Pattern', () => {
211
244
  // Manual flush
212
245
  logger.flush();
213
246
 
214
- expect(writer).toHaveBeenCalledWith(['test message']);
247
+ // Check that writer received object with expected properties
248
+ expect(writer).toHaveBeenCalledWith([
249
+ expect.objectContaining({
250
+ level: 'log',
251
+ data: 'test message',
252
+ }),
253
+ ]);
215
254
  });
216
255
 
217
256
  it('should clear ring buffer and lane buffers', () => {
@@ -277,6 +316,7 @@ describe('LLM Logger - Factory Pattern', () => {
277
316
  const writer = vi.fn();
278
317
 
279
318
  const logger = createLLMLogger({
319
+ immediateFlush: true, // Enable immediate flushing for tests
280
320
  lanes: [
281
321
  {
282
322
  laneId: 'mixed',
@@ -291,18 +331,39 @@ describe('LLM Logger - Factory Pattern', () => {
291
331
  logger.log(null);
292
332
 
293
333
  // Allow flush loops to complete
294
- await new Promise((resolve) => setTimeout(resolve, 150));
295
-
296
- expect(writer).toHaveBeenCalledWith(['string']);
297
- expect(writer).toHaveBeenCalledWith(['{"object":"data"}']);
298
- expect(writer).toHaveBeenCalledWith(['123']);
299
- expect(writer).toHaveBeenCalledWith(['null']);
334
+ await vi.advanceTimersByTimeAsync(150);
335
+
336
+ expect(writer).toHaveBeenCalledWith([
337
+ expect.objectContaining({
338
+ level: 'log',
339
+ data: 'string',
340
+ }),
341
+ ]);
342
+ expect(writer).toHaveBeenCalledWith([
343
+ expect.objectContaining({
344
+ level: 'log',
345
+ object: 'data',
346
+ }),
347
+ ]);
348
+ expect(writer).toHaveBeenCalledWith([
349
+ expect.objectContaining({
350
+ level: 'log',
351
+ data: 123,
352
+ }),
353
+ ]);
354
+ expect(writer).toHaveBeenCalledWith([
355
+ expect.objectContaining({
356
+ level: 'log',
357
+ data: null,
358
+ }),
359
+ ]);
300
360
  });
301
361
 
302
362
  it('should handle high-volume logging', async () => {
303
363
  const writer = vi.fn();
304
364
 
305
365
  const logger = createLLMLogger({
366
+ immediateFlush: true, // Enable immediate flushing for tests
306
367
  ringBufferSize: 100,
307
368
  lanes: [
308
369
  {
@@ -318,7 +379,7 @@ describe('LLM Logger - Factory Pattern', () => {
318
379
  }
319
380
 
320
381
  // Allow flush loops to complete
321
- await new Promise((resolve) => setTimeout(resolve, 200));
382
+ await vi.advanceTimersByTimeAsync(200);
322
383
 
323
384
  // Should have processed all logs
324
385
  expect(writer).toHaveBeenCalledTimes(50);
@@ -0,0 +1,105 @@
1
+ {
2
+ "$schema": "http://json-schema.org/draft-07/schema#",
3
+ "title": "LLM Logger Bulk Adjustments Schema",
4
+ "description": "Schema for bulk log adjustments returned by LLM processors",
5
+ "type": "array",
6
+ "items": {
7
+ "type": "object",
8
+ "properties": {
9
+ "logId": {
10
+ "type": "string",
11
+ "description": "Unique identifier of the log entry to adjust"
12
+ },
13
+ "adjustments": {
14
+ "type": "object",
15
+ "description": "Key-value pairs of JSON paths to values for non-destructive enhancement",
16
+ "patternProperties": {
17
+ "^[a-zA-Z0-9_.]+$": {
18
+ "description": "JSON path (e.g., 'analysis.sentiment', 'meta.category') mapped to any value"
19
+ }
20
+ },
21
+ "additionalProperties": true,
22
+ "examples": [
23
+ {
24
+ "analysis.sentiment": "positive",
25
+ "analysis.confidence": 0.95,
26
+ "category.type": "user-activity",
27
+ "metadata.processed": true
28
+ }
29
+ ]
30
+ },
31
+ "aiMeta": {
32
+ "type": "object",
33
+ "description": "AI-specific metadata that controls processing but is not included in output",
34
+ "properties": {
35
+ "skip": {
36
+ "type": "boolean",
37
+ "description": "Whether this log should be skipped in output lanes",
38
+ "default": false
39
+ },
40
+ "confidence": {
41
+ "type": "number",
42
+ "minimum": 0,
43
+ "maximum": 1,
44
+ "description": "Confidence score for the analysis (0.0 to 1.0)"
45
+ },
46
+ "processingOrder": {
47
+ "type": "integer",
48
+ "description": "Suggested processing order for multi-stage processing"
49
+ },
50
+ "tags": {
51
+ "type": "array",
52
+ "items": {
53
+ "type": "string"
54
+ },
55
+ "description": "Tags for categorization and filtering"
56
+ },
57
+ "priority": {
58
+ "type": "string",
59
+ "enum": ["low", "medium", "high", "critical"],
60
+ "description": "Priority level for the log entry"
61
+ }
62
+ },
63
+ "additionalProperties": true
64
+ }
65
+ },
66
+ "required": ["logId"],
67
+ "additionalProperties": false
68
+ },
69
+ "examples": [
70
+ [
71
+ {
72
+ "logId": "1750305969596-u6n7hyd9v",
73
+ "adjustments": {
74
+ "analysis.sentiment": "positive",
75
+ "analysis.confidence": 0.95,
76
+ "analysis.keywords": ["user", "authentication", "successful"],
77
+ "category.type": "user-activity",
78
+ "category.subcategory": "authentication"
79
+ },
80
+ "aiMeta": {
81
+ "skip": false,
82
+ "confidence": 0.95,
83
+ "tags": ["auth", "success"],
84
+ "priority": "medium"
85
+ }
86
+ },
87
+ {
88
+ "logId": "1750305969597-6t0ys0zat",
89
+ "adjustments": {
90
+ "analysis.sentiment": "negative",
91
+ "analysis.confidence": 0.88,
92
+ "analysis.error_type": "connection",
93
+ "category.type": "system-error",
94
+ "category.severity": "high"
95
+ },
96
+ "aiMeta": {
97
+ "skip": false,
98
+ "confidence": 0.88,
99
+ "tags": ["error", "database", "connection"],
100
+ "priority": "high"
101
+ }
102
+ }
103
+ ]
104
+ ]
105
+ }
@@ -1,4 +1,4 @@
1
- import { describe, it } from 'vitest';
1
+ import { describe, it, expect } from 'vitest';
2
2
  import setInterval from './index.js';
3
3
  import { longTestTimeout } from '../../constants/common.js';
4
4
 
@@ -6,14 +6,28 @@ describe('setInterval (example)', () => {
6
6
  it(
7
7
  'adjusts meditation sessions using wearable stress levels',
8
8
  async () => {
9
+ const results = [];
9
10
  const stop = setInterval({
10
11
  prompt:
11
12
  'Current stress level: {stress}. Start at 3 min. If stress > 70, shorten by 1 min; if below 30, lengthen by 2 min.',
12
13
  getData: () => ({ stress: Math.floor(Math.random() * 100) }),
13
- onTick: ({ data }) => console.log(`Meditation session with stress level: ${data.stress}`),
14
+ onTick: ({ data }) => {
15
+ results.push(data);
16
+ },
14
17
  });
15
- await new Promise((r) => setTimeout(r, 5000));
18
+
19
+ // Wait longer to allow for LLM processing time
20
+ await new Promise((r) => setTimeout(r, 10000));
16
21
  stop();
22
+
23
+ // Assert that the function executed and collected stress data
24
+ expect(results.length).toBeGreaterThan(0);
25
+ expect(
26
+ results.every(
27
+ (result) =>
28
+ typeof result.stress === 'number' && result.stress >= 0 && result.stress <= 100
29
+ )
30
+ ).toBe(true);
17
31
  },
18
32
  longTestTimeout
19
33
  );
@@ -21,15 +35,29 @@ describe('setInterval (example)', () => {
21
35
  it(
22
36
  'paces game events to match player skill',
23
37
  async () => {
38
+ const results = [];
39
+
24
40
  const stop = setInterval({
25
41
  prompt:
26
42
  'Player win rate: {winRate}%. Begin at 10 sec. If winRate > 80, decrease by 2 sec; if under 40, increase by 5 sec.',
27
43
  getData: () => ({ winRate: Math.floor(Math.random() * 100) }),
28
- onTick: ({ data }) =>
29
- console.log(`Game event triggered for player with ${data.winRate}% win rate`),
44
+ onTick: ({ data }) => {
45
+ results.push(data);
46
+ },
30
47
  });
31
- await new Promise((r) => setTimeout(r, 5000));
48
+
49
+ // Wait longer to allow for LLM processing time
50
+ await new Promise((r) => setTimeout(r, 10000));
32
51
  stop();
52
+
53
+ // Assert that the function executed and collected win rate data
54
+ expect(results.length).toBeGreaterThan(0);
55
+ expect(
56
+ results.every(
57
+ (result) =>
58
+ typeof result.winRate === 'number' && result.winRate >= 0 && result.winRate <= 100
59
+ )
60
+ ).toBe(true);
33
61
  },
34
62
  longTestTimeout
35
63
  );
@@ -74,18 +74,19 @@ export default function setInterval({
74
74
  const step = async () => {
75
75
  if (!active) return;
76
76
 
77
- // Get data for AI decision making
78
- lastResult = await getData({
79
- count,
80
- lastInvocationResult: lastResult,
81
- initial,
82
- });
77
+ try {
78
+ // Get data for AI decision making
79
+ lastResult = await getData({
80
+ count,
81
+ lastInvocationResult: lastResult,
82
+ initial,
83
+ });
83
84
 
84
- // Replace {variable} placeholders in the prompt with actual values from lastResult
85
- const processedPrompt = templateReplace(prompt, lastResult);
85
+ // Replace {variable} placeholders in the prompt with actual values from lastResult
86
+ const processedPrompt = templateReplace(prompt, lastResult);
86
87
 
87
- // Always invoke the prompt to determine the next interval
88
- const intervalPrompt = `${contentIsInstructions} ${processedPrompt}
88
+ // Always invoke the prompt to determine the next interval
89
+ const intervalPrompt = `${contentIsInstructions} ${processedPrompt}
89
90
 
90
91
  ${explainAndSeparate} ${explainAndSeparatePrimitive}
91
92
 
@@ -95,30 +96,50 @@ ${wrapVariable(history, { tag: 'history', title: 'History:', forceHTML: true })}
95
96
  ${wrapVariable(count, { tag: 'count', title: 'Count:' })}
96
97
  Next wait:`;
97
98
 
98
- const intervalText = await chatGPT(intervalPrompt, {
99
- modelOptions: model ? { modelName: model, ...llm } : { ...llm },
100
- ...options,
101
- });
102
-
103
- history.push(intervalText);
104
- if (history.length > historySize) history.shift();
105
-
106
- const delay = await toMs(intervalText, { llm, ...options });
107
-
108
- // Call onTick callback if provided - this is when the tick happens
109
- if (onTick) {
110
- const nextTime = new Date(Date.now() + delay);
111
- await onTick({
112
- timingString: intervalText,
113
- data: lastResult,
114
- nextDate: nextTime,
99
+ const intervalText = await chatGPT(intervalPrompt, {
100
+ modelOptions: model ? { modelName: model, ...llm } : { ...llm },
101
+ ...options,
115
102
  });
116
- }
117
-
118
- count += 1;
119
103
 
120
- // Schedule the next iteration
121
- timer = setTimeout(step, delay);
104
+ history.push(intervalText);
105
+ if (history.length > historySize) history.shift();
106
+
107
+ const delay = await toMs(intervalText, { llm, ...options });
108
+
109
+ // Call onTick callback if provided - this is when the tick happens
110
+ if (onTick) {
111
+ const nextTime = new Date(Date.now() + delay);
112
+ await onTick({
113
+ timingString: intervalText,
114
+ data: lastResult,
115
+ nextDate: nextTime,
116
+ });
117
+ }
118
+
119
+ count += 1;
120
+
121
+ // Schedule the next iteration
122
+ timer = setTimeout(step, delay);
123
+ } catch (error) {
124
+ console.error('Error in setInterval step:', error);
125
+
126
+ // Call onTick with the data we have, even if LLM failed
127
+ if (onTick && lastResult) {
128
+ await onTick({
129
+ timingString: 'error - using fallback',
130
+ data: lastResult,
131
+ nextDate: new Date(Date.now() + 1000), // 1 second fallback
132
+ error: error.message,
133
+ });
134
+ }
135
+
136
+ count += 1;
137
+
138
+ // Continue with a fallback delay of 1 second
139
+ if (active) {
140
+ timer = setTimeout(step, 1000);
141
+ }
142
+ }
122
143
  };
123
144
 
124
145
  // Start immediately - the prompt will determine the first interval
@@ -11,7 +11,7 @@ export default async function themes(text, config = {}) {
11
11
  const { chunkSize = 5, topN, llm, ...options } = config;
12
12
  const pieces = splitText(text);
13
13
  const reducePrompt =
14
- 'Update the accumulator with short themes from this text. Avoid duplicates. Return a comma-separated list of themes.';
14
+ 'Update the accumulator with short themes from this text. Avoid duplicates. Return ONLY a comma-separated list of themes with no explanation or additional text.';
15
15
  const firstPass = await bulkReduce(shuffle(pieces), reducePrompt, { chunkSize, llm, ...options });
16
16
  const rawThemes = firstPass
17
17
  .split(',')
@@ -19,7 +19,7 @@ export default async function themes(text, config = {}) {
19
19
  .filter(Boolean);
20
20
 
21
21
  const limitText = topN ? `Limit to the top ${topN} themes.` : 'Return all meaningful themes.';
22
- const refinePrompt = `Refine the accumulator by merging similar themes. ${limitText} Return a comma-separated list.`;
22
+ const refinePrompt = `Refine the accumulator by merging similar themes. ${limitText} Return ONLY a comma-separated list with no explanation or additional text.`;
23
23
  const final = await bulkReduce(rawThemes, refinePrompt, { chunkSize, llm, ...options });
24
24
  return final
25
25
  .split(',')
@@ -1,7 +1,13 @@
1
- export const longTestTimeout = 120000;
1
+ export const longTestTimeout = 10 * 60 * 1000; // 10 minutes
2
2
 
3
3
  export const maxRetries = 3;
4
4
 
5
5
  export const retryDelay = 1000;
6
6
 
7
7
  export const debugToObject = process.env.DEBUG_TO_OBJECT ?? false;
8
+
9
+ // Utility to conditionally skip long-running examples
10
+ // Set ENABLE_LONG_EXAMPLES=true to run all examples
11
+ // Set ENABLE_LONG_EXAMPLES=false or leave unset to skip long examples
12
+ export const shouldRunLongExamples =
13
+ process.env.ENABLE_LONG_EXAMPLES === 'true' || process.env.ENABLE_LONG_EXAMPLES === '1';
@@ -1,9 +1,10 @@
1
- // Importing dotenv config to load environment variables from .env file
2
- // eslint-disable-next-line no-unused-vars
3
- import dotenv from 'dotenv/config';
1
+ // Environment variables loaded in index.js
4
2
 
5
3
  const _models = {};
6
4
 
5
+ // Function to get API key at runtime
6
+ const getOpenAIKey = () => process.env.OPENAI_API_KEY;
7
+
7
8
  const systemPrompt = `You are a superintelligent processing unit, answering prompts with precise instructions.
8
9
  You are a small but critical component in a complex system, so your role in giving quality outputs to your given inputs and instructions is critical.
9
10
  You must obey those instructions to the letter at all costs--do not deviate or add your own interpretation or flair. Stick to the instructions.
@@ -45,7 +46,9 @@ _models.fastCheapMulti = {
45
46
  maxContextWindow: 128_000,
46
47
  maxOutputTokens: 16_384,
47
48
  requestTimeout: 20_000,
48
- apiKey: process.env.OPENAI_API_KEY ?? 'undefined',
49
+ get apiKey() {
50
+ return getOpenAIKey();
51
+ },
49
52
  apiUrl: 'https://api.openai.com/',
50
53
  systemPrompt,
51
54
  };
@@ -59,7 +62,9 @@ _models.goodMulti = {
59
62
  maxContextWindow: 128_000,
60
63
  maxOutputTokens: 16_384,
61
64
  requestTimeout: 20_000,
62
- apiKey: process.env.OPENAI_API_KEY ?? 'undefined',
65
+ get apiKey() {
66
+ return getOpenAIKey();
67
+ },
63
68
  apiUrl: 'https://api.openai.com/',
64
69
  systemPrompt,
65
70
  };
@@ -73,7 +78,9 @@ _models.fastCheapReasoningMulti = {
73
78
  maxContextWindow: 128_000,
74
79
  maxOutputTokens: 16_384,
75
80
  requestTimeout: 40_000,
76
- apiKey: process.env.OPENAI_API_KEY ?? 'undefined',
81
+ get apiKey() {
82
+ return getOpenAIKey();
83
+ },
77
84
  apiUrl: 'https://api.openai.com/',
78
85
  systemPrompt,
79
86
  };
@@ -87,7 +94,9 @@ _models.reasoningNoImage = {
87
94
  maxContextWindow: 200_000,
88
95
  maxOutputTokens: 100_000,
89
96
  requestTimeout: 120_000,
90
- apiKey: process.env.OPENAI_API_KEY ?? 'undefined',
97
+ get apiKey() {
98
+ return getOpenAIKey();
99
+ },
91
100
  apiUrl: 'https://api.openai.com/',
92
101
  systemPrompt,
93
102
  };
@@ -132,7 +141,9 @@ _models.privacy = {
132
141
  apiUrl: (process.env.OPENWEBUI_API_URL ?? '').endsWith('/')
133
142
  ? process.env.OPENWEBUI_API_URL
134
143
  : `${process.env.OPENWEBUI_API_URL}/`,
135
- apiKey: process.env.OPENWEBUI_API_KEY ?? 'undefined',
144
+ get apiKey() {
145
+ return process.env.OPENWEBUI_API_KEY;
146
+ },
136
147
  systemPrompt,
137
148
  modelOptions: {
138
149
  stop: ['</s>'],
@@ -145,7 +156,8 @@ if (process.env.NODE_ENV !== 'test') {
145
156
  }
146
157
 
147
158
  const secondsInDay = 60 * 60 * 24;
148
- export const cacheTTL = process.env.CHATGPT_CACHE_TTL ?? secondsInDay;
159
+ const secondsInYear = secondsInDay * 365; // 365 days
160
+ export const cacheTTL = process.env.CHATGPT_CACHE_TTL ?? secondsInYear;
149
161
 
150
162
  // Caching can be disabled by setting DISABLE_CACHE=true
151
163
  // By default, caching is enabled when Redis is available and working
package/src/index.js CHANGED
@@ -1,6 +1,6 @@
1
- // Importing dotenv config to load environment variables from .env file
2
- // eslint-disable-next-line no-unused-vars
3
- import dotenv from 'dotenv/config';
1
+ // Load environment variables from .env file FIRST
2
+ import dotenv from 'dotenv';
3
+ dotenv.config();
4
4
 
5
5
  import chatGPT from './lib/chatgpt/index.js';
6
6
 
@@ -33,6 +33,8 @@ import themes from './chains/themes/index.js';
33
33
  import test from './chains/test/index.js';
34
34
 
35
35
  import testAdvice from './chains/test-advice/index.js';
36
+ import Conversation from './chains/conversation/index.js';
37
+ import * as turnPolicies from './chains/conversation/turn-policies.js';
36
38
 
37
39
  import schemas from './json-schemas/index.js';
38
40
  import * as common from './constants/common.js';
@@ -84,12 +86,16 @@ import schemaOrg from './verblets/schema-org/index.js';
84
86
  import nameSimilarTo from './verblets/name-similar-to/index.js';
85
87
 
86
88
  import name from './verblets/name/index.js';
89
+ import peopleList from './verblets/people-list/index.js';
87
90
 
88
91
  import toObject from './verblets/to-object/index.js';
89
92
 
90
93
  import listMap from './verblets/list-map/index.js';
91
94
  import listFind from './verblets/list-find/index.js';
92
95
 
96
+ import conversationTurn from './verblets/conversation-turn/index.js';
97
+ import conversationTurnMulti from './verblets/conversation-turn-multi/index.js';
98
+
93
99
  import bulkGroup from './chains/bulk-group/index.js';
94
100
 
95
101
  import listGroup from './verblets/list-group/index.js';
@@ -146,6 +152,7 @@ export const verblets = {
146
152
  schemaOrg,
147
153
  nameSimilarTo,
148
154
  name,
155
+ peopleList,
149
156
  toObject,
150
157
  listMap,
151
158
  listFind,
@@ -166,6 +173,9 @@ export const verblets = {
166
173
  setInterval,
167
174
  test,
168
175
  testAdvice,
176
+ Conversation,
177
+ conversationTurn,
178
+ conversationTurnMulti,
169
179
  bulkScore,
170
180
  filterAmbiguous,
171
181
  bulkGroup,
@@ -188,6 +198,6 @@ export const constants = {
188
198
  models,
189
199
  };
190
200
 
191
- export { prompts, schemas };
201
+ export { prompts, schemas, turnPolicies };
192
202
 
193
203
  export default chatGPT;