@recombine-ai/engine 0.1.1 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/index.d.ts +5 -3
- package/build/index.d.ts.map +1 -1
- package/build/index.js +5 -4
- package/build/lib/ai.d.ts +323 -78
- package/build/lib/ai.d.ts.map +1 -1
- package/build/lib/ai.js +237 -186
- package/build/lib/bosun/agent.d.ts +42 -21
- package/build/lib/bosun/agent.d.ts.map +1 -1
- package/build/lib/bosun/agent.js +27 -4
- package/build/lib/interfaces.d.ts +19 -23
- package/build/lib/interfaces.d.ts.map +1 -1
- package/changelog.md +11 -0
- package/package.json +4 -3
- package/readme.md +1 -1
package/build/index.d.ts
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
|
-
|
|
2
|
-
export {
|
|
3
|
-
export
|
|
1
|
+
import { AIEngine } from './lib/ai';
|
|
2
|
+
export { AIEngine } from './lib/ai';
|
|
3
|
+
export declare const createAIEngine: typeof AIEngine.createAIEngine;
|
|
4
|
+
export { delayFactory, Schedule } from './lib/schedule';
|
|
5
|
+
export { Scheduler, Logger } from './lib/interfaces';
|
|
4
6
|
export * from './lib/bosun';
|
|
5
7
|
//# sourceMappingURL=index.d.ts.map
|
package/build/index.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,QAAQ,EAAE,MAAM,UAAU,CAAA;AAEnC,OAAO,EAAE,QAAQ,EAAE,MAAM,UAAU,CAAA;AAEnC,eAAO,MAAM,cAAc,gCAA0B,CAAA;AAErD,OAAO,EAAE,YAAY,EAAE,QAAQ,EAAE,MAAM,gBAAgB,CAAA;AAEvD,OAAO,EAAE,SAAS,EAAE,MAAM,EAAE,MAAM,kBAAkB,CAAA;AAEpD,cAAc,aAAa,CAAA"}
|
package/build/index.js
CHANGED
|
@@ -14,10 +14,11 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
15
|
};
|
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
-
exports.delayFactory = exports.createAIEngine = void 0;
|
|
18
|
-
|
|
19
|
-
|
|
17
|
+
exports.delayFactory = exports.createAIEngine = exports.AIEngine = void 0;
|
|
18
|
+
const ai_1 = require("./lib/ai");
|
|
19
|
+
var ai_2 = require("./lib/ai");
|
|
20
|
+
Object.defineProperty(exports, "AIEngine", { enumerable: true, get: function () { return ai_2.AIEngine; } });
|
|
21
|
+
exports.createAIEngine = ai_1.AIEngine.createAIEngine;
|
|
20
22
|
var schedule_1 = require("./lib/schedule");
|
|
21
23
|
Object.defineProperty(exports, "delayFactory", { enumerable: true, get: function () { return schedule_1.delayFactory; } });
|
|
22
|
-
__exportStar(require("./lib/interfaces"), exports);
|
|
23
24
|
__exportStar(require("./lib/bosun"), exports);
|
package/build/lib/ai.d.ts
CHANGED
|
@@ -1,88 +1,333 @@
|
|
|
1
1
|
import { ZodSchema } from 'zod';
|
|
2
|
-
import { Logger
|
|
2
|
+
import { Logger } from './interfaces';
|
|
3
3
|
import { SendAction } from './bosun/action';
|
|
4
|
-
export
|
|
5
|
-
|
|
6
|
-
|
|
4
|
+
export declare namespace AIEngine {
|
|
5
|
+
type BasicModel = 'o3-mini-2025-01-31' | 'o1-preview-2024-09-12' | 'gpt-4o-2024-11-20' | 'o1-2024-12-17';
|
|
6
|
+
interface ProgrammaticStep {
|
|
7
|
+
/** Step name for debugging */
|
|
8
|
+
name: string;
|
|
9
|
+
/** Determines if the step should be run or not */
|
|
10
|
+
runIf?: (messages: Conversation) => boolean | Promise<boolean>;
|
|
11
|
+
/** Content of the step */
|
|
12
|
+
execute: () => Promise<unknown>;
|
|
13
|
+
/** Error handler called if an error occurred during in `execute` function */
|
|
14
|
+
onError: (error: string) => Promise<unknown>;
|
|
15
|
+
}
|
|
16
|
+
interface LLMStep {
|
|
17
|
+
/** Step name for debugging */
|
|
18
|
+
name: string;
|
|
19
|
+
/** Determines if the step should be run or not */
|
|
20
|
+
runIf?: (messages: Conversation) => boolean | Promise<boolean>;
|
|
21
|
+
/** LLM to use. Defaults to gpt-4o */
|
|
22
|
+
model?: BasicModel;
|
|
23
|
+
/**
|
|
24
|
+
* Prompt can be a simple string or a link to a file, loaded with `loadFile` function which
|
|
25
|
+
* takes a path to the file relative to `src/use-cases` directory. Should be Nunjucks-compatible.
|
|
26
|
+
*/
|
|
27
|
+
prompt: string | File;
|
|
28
|
+
/**
|
|
29
|
+
* Schema for structured LLM output using {@link zod https://zod.dev/}
|
|
30
|
+
* library.
|
|
31
|
+
*/
|
|
32
|
+
schema?: ZodSchema;
|
|
33
|
+
/** Exclude directives from message history passed to the LLM for this step */
|
|
34
|
+
ignoreDirectives?: boolean;
|
|
35
|
+
/**
|
|
36
|
+
* Additional data to be inserted into the prompt. Accessible via Nunjucks variables.
|
|
37
|
+
* @example
|
|
38
|
+
* ```
|
|
39
|
+
* prompt: "Hello {{ name }}, your score is {{ score }}"
|
|
40
|
+
* context: { name: "John", score: 42 }
|
|
41
|
+
* ```
|
|
42
|
+
*/
|
|
43
|
+
context?: Record<string, unknown>;
|
|
44
|
+
/**
|
|
45
|
+
* Function to execute with the LLM's response. Use {@link setProposedReply} to use the LLM's output as the proposed reply.
|
|
46
|
+
* Or use combination of {@link getProposedReply} and {@link setProposedReply} to substitute parts of the string.
|
|
47
|
+
* @example
|
|
48
|
+
* ```
|
|
49
|
+
* // Use LLM output directly as reply
|
|
50
|
+
* execute: (reply) => messages.setProposedReply(reply)
|
|
51
|
+
*
|
|
52
|
+
* // Substitute tokens in LLM output
|
|
53
|
+
* execute: (reply) => {
|
|
54
|
+
* const withLink = reply.replace('<PAYMENT_LINK>', 'https://payment.example.com/123')
|
|
55
|
+
* messages.setProposedReply(withLink)
|
|
56
|
+
* }
|
|
57
|
+
* ```
|
|
58
|
+
*/
|
|
59
|
+
execute: (reply: string) => Promise<unknown>;
|
|
60
|
+
/**
|
|
61
|
+
* Check a condition, whether the `execute` function should run or not
|
|
62
|
+
* @deprecated use `runIf` to check if the step should be run, use if in `execute` to check
|
|
63
|
+
* if it should be executed
|
|
64
|
+
**/
|
|
65
|
+
shouldExecute?: (reply: string) => boolean | Promise<boolean>;
|
|
66
|
+
/**
|
|
67
|
+
* When provided, throws an error if the step is invoked more times than `maxAttempts`.
|
|
68
|
+
* Number of attempts taken is reset when `shouldExecute` returns `false`. Useful to limit
|
|
69
|
+
* rewinds by reviewers. NOTE that it doesn't work on steps without `shouldExecute` method.
|
|
70
|
+
*/
|
|
71
|
+
maxAttempts?: number;
|
|
72
|
+
/** Error handler called if an error occurred during LLM API call or in `execute` function */
|
|
73
|
+
onError: (error: string) => Promise<unknown>;
|
|
74
|
+
}
|
|
7
75
|
/**
|
|
8
|
-
*
|
|
9
|
-
* takes a path to the file relative to `src/use-cases` directory.
|
|
76
|
+
* An AI workflow composed of steps.
|
|
10
77
|
*/
|
|
11
|
-
|
|
78
|
+
interface Workflow {
|
|
79
|
+
/**
|
|
80
|
+
* Terminates the workflow, preventing further steps from being executed.
|
|
81
|
+
*/
|
|
82
|
+
terminate: () => void;
|
|
83
|
+
/**
|
|
84
|
+
* Runs the workflow with a given conversation context.
|
|
85
|
+
* Executes steps sequentially until completion or termination.
|
|
86
|
+
* @param messages - The conversation context for the workflow
|
|
87
|
+
* @returns The proposed reply if workflow completes, or null if terminated
|
|
88
|
+
*/
|
|
89
|
+
run: (messages: Conversation) => Promise<string | null>;
|
|
90
|
+
/**
|
|
91
|
+
* Rewinds the workflow execution to a specific step.
|
|
92
|
+
* @param step - The step to rewind to
|
|
93
|
+
*/
|
|
94
|
+
rewindTo: (step: LLMStep | ProgrammaticStep) => void;
|
|
95
|
+
/**
|
|
96
|
+
* Registers a callback to be executed before each step.
|
|
97
|
+
* @param callback - Async function to execute before each step
|
|
98
|
+
*/
|
|
99
|
+
beforeEach: (callback: () => Promise<unknown>) => void;
|
|
100
|
+
}
|
|
12
101
|
/**
|
|
13
|
-
*
|
|
14
|
-
*
|
|
102
|
+
* The main interface for the AI Engine.
|
|
103
|
+
*
|
|
104
|
+
* @example
|
|
105
|
+
* ```typescript
|
|
106
|
+
* import { AIEngine } from './lib/ai'
|
|
107
|
+
*
|
|
108
|
+
* // Create a new AI engine instance
|
|
109
|
+
* const ai = AIEngine.createAIEngine()
|
|
110
|
+
*
|
|
111
|
+
* // Create a conversation
|
|
112
|
+
* const conversation = ai.createConversation()
|
|
113
|
+
* conversation.addMessage('user', 'I need help with my order')
|
|
114
|
+
*
|
|
115
|
+
* // Define workflow steps
|
|
116
|
+
* const killswitch = ai.createStep({
|
|
117
|
+
* name: 'killswitch',
|
|
118
|
+
* prompt: ai.loadFile('prompts/killswitch.njk'),
|
|
119
|
+
* execute: async (reply) => {
|
|
120
|
+
* const result = JSON.parse(reply)
|
|
121
|
+
* if (result.terminate) {
|
|
122
|
+
* conversation.addDirective(`Terminating workflow: ${result.reason}`)
|
|
123
|
+
* return workflow.terminate()
|
|
124
|
+
* }
|
|
125
|
+
* },
|
|
126
|
+
* onError: async (error) => conversation.addDirective(`Error in killswitch: ${error}`)
|
|
127
|
+
* })
|
|
128
|
+
*
|
|
129
|
+
* const analyzeIntent = ai.createStep({
|
|
130
|
+
* name: 'analyze-intent',
|
|
131
|
+
* prompt: ai.loadFile('prompts/analyze-intent.njk'),
|
|
132
|
+
* execute: async (reply) => {
|
|
133
|
+
* const intent = JSON.parse(reply)
|
|
134
|
+
* conversation.addDirective(`User intent is: ${intent.category}`)
|
|
135
|
+
* },
|
|
136
|
+
* onError: async (error) => conversation.addDirective(`Error analyzing intent: ${error}`)
|
|
137
|
+
* })
|
|
138
|
+
*
|
|
139
|
+
* const mainReply = ai.createStep({
|
|
140
|
+
* name: 'main-reply',
|
|
141
|
+
* prompt: ai.loadFile('prompts/generate-response.njk'),
|
|
142
|
+
* execute: async (reply) => conversation.setProposedReply(reply),
|
|
143
|
+
* onError: async (error) => conversation.setProposedReply(`I'm sorry, I'm having trouble right now.`)
|
|
144
|
+
* })
|
|
145
|
+
*
|
|
146
|
+
* // Create and run the workflow
|
|
147
|
+
* const workflow = await ai.createWorkflow(killswitch, analyzeIntent, mainReply)
|
|
148
|
+
* const response = await workflow.run(conversation)
|
|
149
|
+
* console.log(response)
|
|
150
|
+
* ```
|
|
15
151
|
*/
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
152
|
+
interface AIEngine {
|
|
153
|
+
/**
|
|
154
|
+
* Creates a workflow from a sequence of steps.
|
|
155
|
+
* @param steps - An array of LLM or programmatic steps to be executed in order.
|
|
156
|
+
* @returns A Promise that resolves to the created Workflow.
|
|
157
|
+
*/
|
|
158
|
+
createWorkflow: (...steps: Array<LLMStep | ProgrammaticStep>) => Promise<Workflow>;
|
|
159
|
+
/**
|
|
160
|
+
* Creates a step that can be used in a workflow.
|
|
161
|
+
* @param step - The LLM or programmatic step to create.
|
|
162
|
+
* @returns The created step of the same type as the input.
|
|
163
|
+
*/
|
|
164
|
+
createStep: <T extends LLMStep | ProgrammaticStep>(step: T) => T;
|
|
165
|
+
/**
|
|
166
|
+
* Loads a file from the specified path.
|
|
167
|
+
* @param path - The path to the file to load.
|
|
168
|
+
* @returns The loaded File object.
|
|
169
|
+
*/
|
|
170
|
+
loadFile: (path: string) => File;
|
|
171
|
+
/**
|
|
172
|
+
* Creates a new conversation instance.
|
|
173
|
+
* @param messages - Optional initial messages for the conversation.
|
|
174
|
+
* @returns A new Conversation object.
|
|
175
|
+
*/
|
|
176
|
+
createConversation: (messages?: Message[]) => Conversation;
|
|
177
|
+
}
|
|
29
178
|
/**
|
|
30
|
-
*
|
|
31
|
-
*
|
|
32
|
-
*
|
|
179
|
+
* Represents a conversation between a user and an AI agent.
|
|
180
|
+
* Provides methods to manage the conversation flow, format messages, and convert the conversation to a string representation.
|
|
181
|
+
*
|
|
182
|
+
* @example
|
|
183
|
+
* ```typescript
|
|
184
|
+
* // Create a new conversation instance
|
|
185
|
+
* const conversation = new Conversation();
|
|
186
|
+
*
|
|
187
|
+
* // Set names for the participants
|
|
188
|
+
* conversation.setUserName("Client");
|
|
189
|
+
* conversation.setAgentName("Support");
|
|
190
|
+
*
|
|
191
|
+
* // Add messages to the conversation
|
|
192
|
+
* conversation.addMessage("user", "I need help with my account");
|
|
193
|
+
* conversation.addDirective("Ask for account details");
|
|
194
|
+
*
|
|
195
|
+
* // Get the conversation as a string to feed to an LLM
|
|
196
|
+
* const conversationText = conversation.toString();
|
|
197
|
+
* // Output:
|
|
198
|
+
* // Client: I need help with my account
|
|
199
|
+
* // System: Ask for account details
|
|
200
|
+
* ```
|
|
33
201
|
*/
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
}
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
202
|
+
interface Conversation {
|
|
203
|
+
/**
|
|
204
|
+
* Sets the name of the user in the conversation to be used in {@link toString}.
|
|
205
|
+
* @param name - The name to set for the user.
|
|
206
|
+
*/
|
|
207
|
+
setUserName(name: string): void;
|
|
208
|
+
/**
|
|
209
|
+
* Sets the name of the AI agent in the conversation to be used in {@link toString}.
|
|
210
|
+
* @param name - The name to set for the agent.
|
|
211
|
+
*/
|
|
212
|
+
setAgentName(name: string): void;
|
|
213
|
+
/**
|
|
214
|
+
* Converts the conversation to a string representation to be fed to an LLM.
|
|
215
|
+
* @param ignoreDirectives - Whether to ignore directives in the string output.
|
|
216
|
+
* @returns The string representation of the conversation.
|
|
217
|
+
*/
|
|
218
|
+
toString: (ignoreDirectives?: boolean) => string;
|
|
219
|
+
/**
|
|
220
|
+
* Adds a directive message to the conversation.
|
|
221
|
+
* @param message - The directive message to add.
|
|
222
|
+
* @example
|
|
223
|
+
* ```
|
|
224
|
+
* // Add a directive to guide the LLM response
|
|
225
|
+
* conversation.addDirective("Ask the user for their preferred date and time for the reservation");
|
|
226
|
+
*
|
|
227
|
+
* // The resulting conversation string might look like:
|
|
228
|
+
* // User: I'd like to book a table at your restaurant.
|
|
229
|
+
* // System: Ask the user for their preferred date and time for the reservation
|
|
230
|
+
* ```
|
|
231
|
+
*/
|
|
232
|
+
addDirective: (message: string) => void;
|
|
233
|
+
/**
|
|
234
|
+
* Adds a message from a specified sender to the conversation.
|
|
235
|
+
* @param name - The sender of the message.
|
|
236
|
+
* @param message - The content of the message.
|
|
237
|
+
*/
|
|
238
|
+
addMessage: (name: Message['sender'], message: string) => void;
|
|
239
|
+
/**
|
|
240
|
+
* Sets a custom formatter for directive messages.
|
|
241
|
+
* @param formatter - A function that takes a Message and returns a formatted string.
|
|
242
|
+
*/
|
|
243
|
+
setDirectiveFormatter: (formatter: (message: Message) => string) => void;
|
|
244
|
+
/**
|
|
245
|
+
* Sets a custom formatter for proposed messages.
|
|
246
|
+
* @param formatter - A function that takes a message string and returns a formatted string.
|
|
247
|
+
*/
|
|
248
|
+
setProposedMessageFormatter: (formatter: (message: string) => string) => void;
|
|
249
|
+
/**
|
|
250
|
+
* Sets a proposed reply message.
|
|
251
|
+
* @param message - The proposed reply message.
|
|
252
|
+
*/
|
|
253
|
+
setProposedReply: (message: string) => void;
|
|
254
|
+
/**
|
|
255
|
+
* Gets the current proposed reply message.
|
|
256
|
+
* @returns The proposed reply message, or null if none exists.
|
|
257
|
+
*/
|
|
258
|
+
getProposedReply: () => string | null;
|
|
259
|
+
/**
|
|
260
|
+
* Gets the history of all messages in the conversation.
|
|
261
|
+
* @returns An array of Message objects representing the conversation history.
|
|
262
|
+
*/
|
|
263
|
+
getHistory: () => Message[];
|
|
264
|
+
}
|
|
265
|
+
/**
|
|
266
|
+
* Represents a message in a conversation between a user and an agent, or a system message.
|
|
267
|
+
* Messages can contain text and optionally an image URL. To be used in the {@link Conversation} interface.
|
|
268
|
+
*/
|
|
269
|
+
interface Message {
|
|
270
|
+
/** The sender of the message, which can be one of the following: 'user', 'agent', or 'system' */
|
|
271
|
+
sender: 'user' | 'agent' | 'system';
|
|
272
|
+
/** The text content of the message */
|
|
273
|
+
text: string;
|
|
274
|
+
/** Optional URL of an image associated with the message */
|
|
275
|
+
imageUrl?: string;
|
|
276
|
+
}
|
|
277
|
+
interface File {
|
|
84
278
|
content: () => Promise<string>;
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
|
|
279
|
+
}
|
|
280
|
+
/**
|
|
281
|
+
* Configuration options for the Engine.
|
|
282
|
+
*/
|
|
283
|
+
interface EngineConfig {
|
|
284
|
+
/**
|
|
285
|
+
* Optional token storage object that provides access to authentication tokens.
|
|
286
|
+
* @property {object} tokenStorage - Object containing method to retrieve token.
|
|
287
|
+
* @property {() => Promise<string | null>} tokenStorage.getToken - Function that returns a promise resolving to an authentication token or null.
|
|
288
|
+
*/
|
|
289
|
+
tokenStorage?: {
|
|
290
|
+
getToken: () => Promise<string | null>;
|
|
291
|
+
};
|
|
292
|
+
/**
|
|
293
|
+
* Optional base URL path for resolving paths to prompts.
|
|
294
|
+
*/
|
|
295
|
+
basePath?: string;
|
|
296
|
+
/**
|
|
297
|
+
* Optional logger instance for handling log messages.
|
|
298
|
+
*/
|
|
299
|
+
logger?: Logger;
|
|
300
|
+
/**
|
|
301
|
+
* Optional function for sending actions.
|
|
302
|
+
*/
|
|
303
|
+
sendAction?: SendAction;
|
|
304
|
+
}
|
|
305
|
+
/**
|
|
306
|
+
* Creates an AI Engine with the given configuration.
|
|
307
|
+
*
|
|
308
|
+
* The AI Engine provides utilities for creating and running conversational workflows
|
|
309
|
+
* with large language models, specifically OpenAI GPT models.
|
|
310
|
+
*
|
|
311
|
+
* @returns An AIEngine instance.
|
|
312
|
+
*
|
|
313
|
+
* @example
|
|
314
|
+
* ```ts
|
|
315
|
+
* const engine = createAIEngine({
|
|
316
|
+
* logger: customLogger,
|
|
317
|
+
* basePath: '/path/to/prompts'
|
|
318
|
+
* });
|
|
319
|
+
*
|
|
320
|
+
* const workflow = await engine.createWorkflow(
|
|
321
|
+
* engine.createStep({
|
|
322
|
+
* name: 'generate-response',
|
|
323
|
+
* prompt: engine.loadFile('prompts/response.txt'),
|
|
324
|
+
* execute: (response) => conversation.setProposedReply(response)
|
|
325
|
+
* })
|
|
326
|
+
* );
|
|
327
|
+
*
|
|
328
|
+
* const reply = await workflow.run(conversation);
|
|
329
|
+
* ```
|
|
330
|
+
*/
|
|
331
|
+
function createAIEngine(cfg?: EngineConfig): AIEngine;
|
|
332
|
+
}
|
|
88
333
|
//# sourceMappingURL=ai.d.ts.map
|
package/build/lib/ai.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"ai.d.ts","sourceRoot":"","sources":["../../src/lib/ai.ts"],"names":[],"mappings":"AAMA,OAAO,EAAE,SAAS,EAAE,MAAM,KAAK,CAAA;AAE/B,OAAO,EAAE,MAAM,EAAE,
|
|
1
|
+
{"version":3,"file":"ai.d.ts","sourceRoot":"","sources":["../../src/lib/ai.ts"],"names":[],"mappings":"AAMA,OAAO,EAAE,SAAS,EAAE,MAAM,KAAK,CAAA;AAE/B,OAAO,EAAE,MAAM,EAAE,MAAM,cAAc,CAAA;AACrC,OAAO,EAAc,UAAU,EAAE,MAAM,gBAAgB,CAAA;AAGvD,yBAAiB,QAAQ,CAAC;IACtB,KAAY,UAAU,GAChB,oBAAoB,GACpB,uBAAuB,GACvB,mBAAmB,GACnB,eAAe,CAAA;IAErB,UAAiB,gBAAgB;QAC7B,8BAA8B;QAC9B,IAAI,EAAE,MAAM,CAAA;QAEZ,kDAAkD;QAClD,KAAK,CAAC,EAAE,CAAC,QAAQ,EAAE,YAAY,KAAK,OAAO,GAAG,OAAO,CAAC,OAAO,CAAC,CAAA;QAE9D,0BAA0B;QAC1B,OAAO,EAAE,MAAM,OAAO,CAAC,OAAO,CAAC,CAAA;QAE/B,6EAA6E;QAC7E,OAAO,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,OAAO,CAAC,OAAO,CAAC,CAAA;KAC/C;IAED,UAAiB,OAAO;QACpB,8BAA8B;QAC9B,IAAI,EAAE,MAAM,CAAA;QAEZ,kDAAkD;QAClD,KAAK,CAAC,EAAE,CAAC,QAAQ,EAAE,YAAY,KAAK,OAAO,GAAG,OAAO,CAAC,OAAO,CAAC,CAAA;QAE9D,qCAAqC;QACrC,KAAK,CAAC,EAAE,UAAU,CAAA;QAElB;;;WAGG;QACH,MAAM,EAAE,MAAM,GAAG,IAAI,CAAA;QAErB;;;WAGG;QACH,MAAM,CAAC,EAAE,SAAS,CAAA;QAElB,8EAA8E;QAC9E,gBAAgB,CAAC,EAAE,OAAO,CAAA;QAE1B;;;;;;;WAOG;QACH,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAA;QAEjC;;;;;;;;;;;;;;WAcG;QACH,OAAO,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,OAAO,CAAC,OAAO,CAAC,CAAA;QAE5C;;;;YAII;QACJ,aAAa,CAAC,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,OAAO,GAAG,OAAO,CAAC,OAAO,CAAC,CAAA;QAE7D;;;;WAIG;QACH,WAAW,CAAC,EAAE,MAAM,CAAA;QAEpB,6FAA6F;QAC7F,OAAO,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,OAAO,CAAC,OAAO,CAAC,CAAA;KAC/C;IAED;;OAEG;IACH,UAAiB,QAAQ;QACrB;;WAEG;QACH,SAAS,EAAE,MAAM,IAAI,CAAA;QAErB;;;;;WAKG;QACH,GAAG,EAAE,CAAC,QAAQ,EAAE,YAAY,KAAK,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAA;QAEvD;;;WAGG;QACH,QAAQ,EAAE,CAAC,IAAI,EAAE,OAAO,GAAG,gBAAgB,KAAK,IAAI,CAAA;QAEpD;;;WAGG;QACH,UAAU,EAAE,CAAC,QAAQ,EAAE,MAAM,OAAO,CAAC,OAAO,CAAC,KAAK,IAAI,CAAA;KACzD;IAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;OAkDG;IACH,UAAiB,QAAQ;QACrB;;;;WAIG;QACH,cAAc,EAAE,CAAC,GAAG,KAAK,EAAE,KAAK,CAAC,OAAO,GAAG,gBAAgB,CAAC,KAAK,OAAO,CAAC,QAAQ,CAAC,CAAC;QAEnF;;;;WAIG;QACH,UAAU,EAAE,CAAC,CAAC,SAAS,OAAO,GAAG,gBAAgB,EAAE,IAAI,EAAE,CAAC,KAAK,CAAC,CAAC;QAEjE;;;;WAIG;QACH,QAAQ,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,IAAI,CAAC;QAEjC;;;;WAIG;QACH,kBAAkB,EAAE,CAAC,QAAQ,CAAC,EAAE,OAAO,EAAE,KAAK,YAAY,CAAC;KAC9D;IAED;;;;;;;;;;;;;;;;;;;;;;;OAuBG;IACH,UAAiB,YAAY;QACzB;;;WAGG;QACH,WAAW,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI,CAAA;QAE/B;;;WAGG;QACH,YAAY,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI,CAAA;QAEhC;;;;WAIG;QACH,QAAQ,EAAE,CAAC,gBAAgB,CAAC,EAAE,OAAO,KAAK,MAAM,CAAA;QAEhD;;;;;;;;;;;;WAYG;QACH,YAAY,EAAE,CAAC,OAAO,EAAE,MAAM,KAAK,IAAI,CAAA;QAEvC;;;;WAIG;QACH,UAAU,EAAE,CAAC,IAAI,EAAE,OAAO,CAAC,QAAQ,CAAC,EAAE,OAAO,EAAE,MAAM,KAAK,IAAI,CAAA;QAE9D;;;WAGG;QACH,qBAAqB,EAAE,CAAC,SAAS,EAAE,CAAC,OAAO,EAAE,OAAO,KAAK,MAAM,KAAK,IAAI,CAAA;QAExE;;;WAGG;QACH,2BAA2B,EAAE,CAAC,SAAS,EAAE,CAAC,OAAO,EAAE,MAAM,KAAK,MAAM,KAAK,IAAI,CAAA;QAE7E;;;WAGG;QACH,gBAAgB,EAAE,CAAC,OAAO,EAAE,MAAM,KAAK,IAAI,CAAA;QAE3C;;;WAGG;QACH,gBAAgB,EAAE,MAAM,MAAM,GAAG,IAAI,CAAA;QAErC;;;WAGG;QACH,UAAU,EAAE,MAAM,OAAO,EAAE,CAAA;KAC9B;IAED;;;OAGG;IACH,UAAiB,OAAO;QACpB,iGAAiG;QACjG,MAAM,EAAE,MAAM,GAAG,OAAO,GAAG,QAAQ,CAAA;QACnC,sCAAsC;QACtC,IAAI,EAAE,MAAM,CAAA;QACZ,2DAA2D;QAC3D,QAAQ,CAAC,EAAE,MAAM,CAAA;KACpB;IAED,UAAiB,IAAI;QACjB,OAAO,EAAE,MAAM,OAAO,CAAC,MAAM,CAAC,CAAA;KACjC;IAED;;OAEG;IACH,UAAiB,YAAY;QACzB;;;;WAIG;QACH,YAAY,CAAC,EAAE;YAAE,QAAQ,EAAE,MAAM,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAA;SAAE,CAAA;QACzD;;WAEG;QACH,QAAQ,CAAC,EAAE,MAAM,CAAA;QACjB;;WAEG;QACH,MAAM,CAAC,EAAE,MAAM,CAAA;QACf;;WAEG;QACH,UAAU,CAAC,EAAE,UAAU,CAAA;KAC1B;IAED;;;;;;;;;;;;;;;;;;;;;;;;;OAyBG;IACH,SAAgB,cAAc,CAAC,GAAG,GAAE,YAAiB,GAAG,QAAQ,CAoP/D;CA6BJ"}
|
package/build/lib/ai.js
CHANGED
|
@@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
|
3
3
|
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
4
|
};
|
|
5
5
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
-
exports.
|
|
6
|
+
exports.AIEngine = void 0;
|
|
7
7
|
// cspell:words lstripBlocks
|
|
8
8
|
const fs_1 = __importDefault(require("fs"));
|
|
9
9
|
const openai_1 = __importDefault(require("openai"));
|
|
@@ -12,189 +12,264 @@ const nunjucks_1 = __importDefault(require("nunjucks"));
|
|
|
12
12
|
const zod_to_json_schema_1 = require("zod-to-json-schema");
|
|
13
13
|
const action_1 = require("./bosun/action");
|
|
14
14
|
const core_1 = require("openai/core");
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
15
|
+
var AIEngine;
|
|
16
|
+
(function (AIEngine) {
|
|
17
|
+
/**
|
|
18
|
+
* Creates an AI Engine with the given configuration.
|
|
19
|
+
*
|
|
20
|
+
* The AI Engine provides utilities for creating and running conversational workflows
|
|
21
|
+
* with large language models, specifically OpenAI GPT models.
|
|
22
|
+
*
|
|
23
|
+
* @returns An AIEngine instance.
|
|
24
|
+
*
|
|
25
|
+
* @example
|
|
26
|
+
* ```ts
|
|
27
|
+
* const engine = createAIEngine({
|
|
28
|
+
* logger: customLogger,
|
|
29
|
+
* basePath: '/path/to/prompts'
|
|
30
|
+
* });
|
|
31
|
+
*
|
|
32
|
+
* const workflow = await engine.createWorkflow(
|
|
33
|
+
* engine.createStep({
|
|
34
|
+
* name: 'generate-response',
|
|
35
|
+
* prompt: engine.loadFile('prompts/response.txt'),
|
|
36
|
+
* execute: (response) => conversation.setProposedReply(response)
|
|
37
|
+
* })
|
|
38
|
+
* );
|
|
39
|
+
*
|
|
40
|
+
* const reply = await workflow.run(conversation);
|
|
41
|
+
* ```
|
|
42
|
+
*/
|
|
43
|
+
function createAIEngine(cfg = {}) {
|
|
44
|
+
const logger = cfg.logger || globalThis.console;
|
|
45
|
+
const basePath = cfg.basePath || process.cwd();
|
|
46
|
+
const tokenStorage = cfg.tokenStorage || {
|
|
47
|
+
async getToken() {
|
|
48
|
+
if (process.env.OPENAI_API_KEY) {
|
|
49
|
+
return process.env.OPENAI_API_KEY;
|
|
43
50
|
}
|
|
44
|
-
|
|
45
|
-
})
|
|
46
|
-
.filter((msg) => msg !== null)
|
|
47
|
-
.join('\n') + (proposedReply ? `\n${proposedFormatter(proposedReply)}` : ''),
|
|
48
|
-
addMessage: (sender, text) => messages.push({ sender, text }),
|
|
49
|
-
addDirective: (message) => {
|
|
50
|
-
logger.debug(`AI Core: add directive: ${message}`);
|
|
51
|
-
messages.push({ sender: 'system', text: message });
|
|
52
|
-
},
|
|
53
|
-
directiveFormat: (formatter) => {
|
|
54
|
-
directivesFormatter = formatter;
|
|
55
|
-
},
|
|
56
|
-
proposedMessageFormat: (formatter) => {
|
|
57
|
-
proposedFormatter = formatter;
|
|
58
|
-
},
|
|
59
|
-
setProposedReply: (message) => (proposedReply = message),
|
|
60
|
-
getProposedReply: () => proposedReply,
|
|
61
|
-
getHistory: () => messages,
|
|
62
|
-
setUserName: (name) => {
|
|
63
|
-
names.user = name;
|
|
64
|
-
},
|
|
65
|
-
setAgentName: (name) => {
|
|
66
|
-
names.agent = name;
|
|
51
|
+
throw new Error('OpenAI API key is not set');
|
|
67
52
|
},
|
|
68
53
|
};
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
}
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
break;
|
|
54
|
+
function createStep(step) {
|
|
55
|
+
return step;
|
|
56
|
+
}
|
|
57
|
+
function getConversation(messages = []) {
|
|
58
|
+
let directivesFormatter = (message) => `${message.sender}: ${message.text}`;
|
|
59
|
+
let proposedFormatter = (message) => `Proposed reply: ${message}`;
|
|
60
|
+
let proposedReply = null;
|
|
61
|
+
const names = {
|
|
62
|
+
agent: 'Agent',
|
|
63
|
+
user: 'User',
|
|
64
|
+
system: 'System',
|
|
65
|
+
};
|
|
66
|
+
return {
|
|
67
|
+
toString: (ignoreDirectives = false) => messages
|
|
68
|
+
.map((msg) => {
|
|
69
|
+
if (msg.sender === 'system') {
|
|
70
|
+
return ignoreDirectives ? null : directivesFormatter(msg);
|
|
87
71
|
}
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
72
|
+
return `${names[msg.sender]}: ${msg.text}`;
|
|
73
|
+
})
|
|
74
|
+
.filter((msg) => msg !== null)
|
|
75
|
+
.join('\n') +
|
|
76
|
+
(proposedReply ? `\n${proposedFormatter(proposedReply)}` : ''),
|
|
77
|
+
addMessage: (sender, text) => messages.push({ sender, text }),
|
|
78
|
+
addDirective: (message) => {
|
|
79
|
+
logger.debug(`AI Engine: add directive: ${message}`);
|
|
80
|
+
messages.push({ sender: 'system', text: message });
|
|
81
|
+
},
|
|
82
|
+
setDirectiveFormatter: (formatter) => {
|
|
83
|
+
directivesFormatter = formatter;
|
|
84
|
+
},
|
|
85
|
+
setProposedMessageFormatter: (formatter) => {
|
|
86
|
+
proposedFormatter = formatter;
|
|
87
|
+
},
|
|
88
|
+
setProposedReply: (message) => (proposedReply = message),
|
|
89
|
+
getProposedReply: () => proposedReply,
|
|
90
|
+
getHistory: () => messages,
|
|
91
|
+
setUserName: (name) => {
|
|
92
|
+
names.user = name;
|
|
93
|
+
},
|
|
94
|
+
setAgentName: (name) => {
|
|
95
|
+
names.agent = name;
|
|
96
|
+
},
|
|
97
|
+
};
|
|
98
|
+
}
|
|
99
|
+
async function createWorkflow(...steps) {
|
|
100
|
+
const apiKey = await tokenStorage.getToken();
|
|
101
|
+
let shouldRun = true;
|
|
102
|
+
let currentStep = 0;
|
|
103
|
+
let beforeEachCallback = async () => Promise.resolve(null);
|
|
104
|
+
const attempts = new Map();
|
|
105
|
+
return {
|
|
106
|
+
terminate: () => {
|
|
107
|
+
logger.debug('AI Engine: Terminating conversation...');
|
|
108
|
+
shouldRun = false;
|
|
109
|
+
},
|
|
110
|
+
run: async (messages) => {
|
|
111
|
+
for (; currentStep < steps.length; currentStep++) {
|
|
112
|
+
await beforeEachCallback();
|
|
113
|
+
const step = steps[currentStep];
|
|
114
|
+
if (!shouldRun) {
|
|
115
|
+
break;
|
|
94
116
|
}
|
|
95
|
-
|
|
96
|
-
|
|
117
|
+
if (!step.runIf || (await step.runIf(messages))) {
|
|
118
|
+
const action = (0, action_1.makeAction)(cfg.sendAction, 'AI', step.name);
|
|
119
|
+
await action('started');
|
|
120
|
+
logger.debug(`AI Engine: Step: ${step.name}`);
|
|
121
|
+
if ('prompt' in step) {
|
|
122
|
+
await runStep(step, messages);
|
|
123
|
+
}
|
|
124
|
+
else {
|
|
125
|
+
await runDumbStep(step, messages);
|
|
126
|
+
}
|
|
127
|
+
await action('completed');
|
|
97
128
|
}
|
|
98
|
-
await action('completed');
|
|
99
129
|
}
|
|
130
|
+
return shouldRun ? messages.getProposedReply() : null;
|
|
131
|
+
},
|
|
132
|
+
rewindTo: (step) => {
|
|
133
|
+
const index = steps.indexOf(step);
|
|
134
|
+
if (index === -1) {
|
|
135
|
+
throw new Error(`Step ${step.name} not found`);
|
|
136
|
+
}
|
|
137
|
+
if (index > currentStep) {
|
|
138
|
+
throw new Error(`Cannot rewind to a step ahead of the current step`);
|
|
139
|
+
}
|
|
140
|
+
currentStep = index - 1; // -1 because it will be incremented in the loop definition
|
|
141
|
+
},
|
|
142
|
+
beforeEach(callback) {
|
|
143
|
+
beforeEachCallback = callback;
|
|
144
|
+
},
|
|
145
|
+
};
|
|
146
|
+
async function runStep(step, messages) {
|
|
147
|
+
if (!apiKey) {
|
|
148
|
+
throw new Error('OpenAI API key is not set');
|
|
100
149
|
}
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
150
|
+
try {
|
|
151
|
+
let response = null;
|
|
152
|
+
let prompt = typeof step.prompt === 'string' ? step.prompt : await step.prompt.content();
|
|
153
|
+
logger.debug('AI Engine: context', step.context);
|
|
154
|
+
logger.debug('AI Engine: messages', messages.toString(step.ignoreDirectives || false));
|
|
155
|
+
if (step.context) {
|
|
156
|
+
nunjucks_1.default.configure({
|
|
157
|
+
autoescape: true,
|
|
158
|
+
trimBlocks: true,
|
|
159
|
+
lstripBlocks: true,
|
|
160
|
+
});
|
|
161
|
+
prompt = nunjucks_1.default.renderString(prompt, step.context);
|
|
162
|
+
}
|
|
163
|
+
response = await runLLM(apiKey, prompt, messages.toString(step.ignoreDirectives || false), step.schema, step.model);
|
|
164
|
+
if (!response) {
|
|
165
|
+
throw new Error('No response from OpenAI');
|
|
166
|
+
}
|
|
167
|
+
logger.debug(`AI Engine: response: ${response}`);
|
|
168
|
+
if (typeof step.shouldExecute === 'function') {
|
|
169
|
+
if (await step.shouldExecute(response)) {
|
|
170
|
+
logger.debug(`AI Engine: executing`);
|
|
171
|
+
checkAttempts(step);
|
|
172
|
+
await step.execute(response);
|
|
173
|
+
}
|
|
174
|
+
else {
|
|
175
|
+
resetAttempts(step);
|
|
176
|
+
logger.debug(`AI Engine: skipping`);
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
else {
|
|
180
|
+
logger.debug(`AI Engine: replying`);
|
|
181
|
+
await step.execute(response);
|
|
182
|
+
}
|
|
107
183
|
}
|
|
108
|
-
|
|
109
|
-
|
|
184
|
+
catch (error) {
|
|
185
|
+
// FIXME: this doesn't terminate the workflow
|
|
186
|
+
await step.onError(error.message);
|
|
187
|
+
shouldRun = false;
|
|
110
188
|
}
|
|
111
|
-
currentStep = index - 1; // -1 because it will be incremented in the loop definition
|
|
112
|
-
},
|
|
113
|
-
beforeEach(callback) {
|
|
114
|
-
beforeEachCallback = callback;
|
|
115
|
-
},
|
|
116
|
-
};
|
|
117
|
-
async function runStep(step, messages) {
|
|
118
|
-
if (!apiKey) {
|
|
119
|
-
throw new Error('OpenAI API key is not set');
|
|
120
189
|
}
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
if (step.context) {
|
|
127
|
-
nunjucks_1.default.configure({ autoescape: true, trimBlocks: true, lstripBlocks: true });
|
|
128
|
-
prompt = nunjucks_1.default.renderString(prompt, step.context);
|
|
190
|
+
async function runDumbStep(step, messages) {
|
|
191
|
+
try {
|
|
192
|
+
if (!step.runIf || (await step.runIf(messages))) {
|
|
193
|
+
await step.execute();
|
|
194
|
+
}
|
|
129
195
|
}
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
196
|
+
catch (error) {
|
|
197
|
+
console.error(`AI Engine: error in dumb step ${step.name}: ${error.message}`);
|
|
198
|
+
await step.onError(error.message);
|
|
199
|
+
shouldRun = false;
|
|
133
200
|
}
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
await step.execute(response);
|
|
201
|
+
}
|
|
202
|
+
function checkAttempts(step) {
|
|
203
|
+
if (step.maxAttempts) {
|
|
204
|
+
if (!attempts.has(step)) {
|
|
205
|
+
attempts.set(step, 0);
|
|
140
206
|
}
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
207
|
+
attempts.set(step, attempts.get(step) + 1);
|
|
208
|
+
if (attempts.get(step) > step.maxAttempts) {
|
|
209
|
+
throw new Error(`Max attempts reached for step ${step.name}`);
|
|
144
210
|
}
|
|
145
211
|
}
|
|
146
|
-
else {
|
|
147
|
-
logger.debug(`AI Core: replying`);
|
|
148
|
-
await step.execute(response);
|
|
149
|
-
}
|
|
150
212
|
}
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
await step.onError(error.message);
|
|
154
|
-
shouldRun = false;
|
|
213
|
+
function resetAttempts(step) {
|
|
214
|
+
attempts.set(step, 0);
|
|
155
215
|
}
|
|
156
216
|
}
|
|
157
|
-
async function
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
217
|
+
async function runLLM(apiKey, systemPrompt, messages, schema, model = 'gpt-4o-2024-11-20') {
|
|
218
|
+
logger.debug('AI Engine: model:', model);
|
|
219
|
+
logger.debug('----------- RENDERED PROMPT ---------------');
|
|
220
|
+
logger.debug(systemPrompt);
|
|
221
|
+
logger.debug('-------------------------------------------');
|
|
222
|
+
if (apiKey === '__TESTING__') {
|
|
223
|
+
await (0, core_1.sleep)(100);
|
|
224
|
+
return schema
|
|
225
|
+
? JSON.stringify({ message: 'canned response', reasons: [] })
|
|
226
|
+
: 'canned response';
|
|
167
227
|
}
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
228
|
+
const client = new openai_1.default({ apiKey });
|
|
229
|
+
const response = await client.chat.completions.create({
|
|
230
|
+
messages: [
|
|
231
|
+
{ role: 'system', content: systemPrompt },
|
|
232
|
+
{ role: 'user', content: messages },
|
|
233
|
+
],
|
|
234
|
+
...getOpenAiOptions(model, schema),
|
|
235
|
+
});
|
|
236
|
+
if (!response.choices[0].message.content) {
|
|
237
|
+
throw new Error('No response from OpenAI');
|
|
178
238
|
}
|
|
239
|
+
return response.choices[0].message.content;
|
|
179
240
|
}
|
|
180
|
-
function
|
|
181
|
-
|
|
241
|
+
function loadFile(path) {
|
|
242
|
+
// NOTE: there probably will be S3 loading stuff here
|
|
243
|
+
return {
|
|
244
|
+
content: async () => {
|
|
245
|
+
logger.debug('AI Engine: loading prompt:', path);
|
|
246
|
+
return fs_1.default.promises.readFile((0, path_1.join)(basePath, path), 'utf-8');
|
|
247
|
+
},
|
|
248
|
+
};
|
|
182
249
|
}
|
|
250
|
+
return {
|
|
251
|
+
createWorkflow: createWorkflow,
|
|
252
|
+
createStep,
|
|
253
|
+
loadFile,
|
|
254
|
+
createConversation: getConversation,
|
|
255
|
+
};
|
|
183
256
|
}
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
257
|
+
AIEngine.createAIEngine = createAIEngine;
|
|
258
|
+
function getOpenAiOptions(model, schema) {
|
|
259
|
+
const options = {
|
|
260
|
+
model,
|
|
261
|
+
};
|
|
262
|
+
const isReasoningModel = ['o3-', 'o1-', 'o1-preview-'].some((m) => model.startsWith(m));
|
|
263
|
+
if (isReasoningModel) {
|
|
264
|
+
if (!model.startsWith('o1-preview-')) {
|
|
265
|
+
options.reasoning_effort = 'high';
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
else {
|
|
269
|
+
options.temperature = 0.1;
|
|
190
270
|
}
|
|
191
|
-
const client = new openai_1.default({ apiKey });
|
|
192
|
-
logger.log('----------- RENDERED PROMPT ---------------');
|
|
193
|
-
logger.log(systemPrompt);
|
|
194
|
-
logger.log('---------------------------------------');
|
|
195
|
-
let format = { type: 'text' };
|
|
196
271
|
if (schema) {
|
|
197
|
-
|
|
272
|
+
options.response_format = {
|
|
198
273
|
type: 'json_schema',
|
|
199
274
|
json_schema: {
|
|
200
275
|
name: 'detector_response',
|
|
@@ -202,33 +277,9 @@ function createAIEngine(cfg = {}) {
|
|
|
202
277
|
},
|
|
203
278
|
};
|
|
204
279
|
}
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
{ role: 'system', content: systemPrompt },
|
|
208
|
-
{ role: 'user', content: messages },
|
|
209
|
-
],
|
|
210
|
-
model: 'gpt-4o',
|
|
211
|
-
response_format: format,
|
|
212
|
-
temperature: 0.1,
|
|
213
|
-
});
|
|
214
|
-
if (!response.choices[0].message.content) {
|
|
215
|
-
throw new Error('No response from OpenAI');
|
|
280
|
+
else {
|
|
281
|
+
options.response_format = { type: 'text' };
|
|
216
282
|
}
|
|
217
|
-
return
|
|
218
|
-
}
|
|
219
|
-
function loadFile(path) {
|
|
220
|
-
// NOTE: there probably will be S3 loading stuff here
|
|
221
|
-
return {
|
|
222
|
-
content: async () => {
|
|
223
|
-
logger.debug('AI Core: loading prompt:', path);
|
|
224
|
-
return fs_1.default.promises.readFile((0, path_1.join)(basePath, path), 'utf-8');
|
|
225
|
-
},
|
|
226
|
-
};
|
|
283
|
+
return options;
|
|
227
284
|
}
|
|
228
|
-
|
|
229
|
-
createWorkflow: createWorkflow,
|
|
230
|
-
createStep,
|
|
231
|
-
loadFile,
|
|
232
|
-
makeMessagesList,
|
|
233
|
-
};
|
|
234
|
-
}
|
|
285
|
+
})(AIEngine || (exports.AIEngine = AIEngine = {}));
|
|
@@ -1,25 +1,46 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { Logger,
|
|
1
|
+
import { AIEngine } from '../ai';
|
|
2
|
+
import { Logger, Scheduler } from '../interfaces';
|
|
3
3
|
import { SendAction } from './action';
|
|
4
4
|
import { Context } from './context';
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
5
|
+
/**
|
|
6
|
+
* Bosun is a UI for testing Recombine AI agents. It enables testing complex agent interactions with multiple steps, error handling, and state management.
|
|
7
|
+
*
|
|
8
|
+
* @example
|
|
9
|
+
* ```typescript
|
|
10
|
+
* // In workflows.ts
|
|
11
|
+
* const agents = {
|
|
12
|
+
* "testbot": createTestAgentFactory((props) => {
|
|
13
|
+
* return {
|
|
14
|
+
* start: async () => { ... },
|
|
15
|
+
* reactOnMessage: async () => { ... },
|
|
16
|
+
* respondToMessage: async () => { ... }
|
|
17
|
+
* }
|
|
18
|
+
* })
|
|
19
|
+
* }
|
|
20
|
+
*
|
|
21
|
+
* export agents;
|
|
22
|
+
* ```
|
|
23
|
+
*/
|
|
24
|
+
export declare namespace Bosun {
|
|
25
|
+
type DefaultContext = Record<string, any>;
|
|
26
|
+
export interface TesAgentFactoryProps<CTX extends DefaultContext = DefaultContext> {
|
|
27
|
+
logger: Logger;
|
|
28
|
+
scheduler: Scheduler.Scheduler;
|
|
29
|
+
ai: AIEngine.AIEngine;
|
|
30
|
+
getMessages: () => AIEngine.Message[];
|
|
31
|
+
sendMessage: (message: string) => Promise<void>;
|
|
32
|
+
sendAction: SendAction;
|
|
33
|
+
ctx: Context<CTX>;
|
|
34
|
+
}
|
|
35
|
+
export interface TestAgent {
|
|
36
|
+
start: () => Promise<unknown>;
|
|
37
|
+
reactOnMessage: () => Promise<unknown>;
|
|
38
|
+
respondToMessage: () => Promise<unknown>;
|
|
39
|
+
isAssigned: () => Promise<boolean>;
|
|
40
|
+
onFatalError: (error: Error) => Promise<unknown>;
|
|
41
|
+
}
|
|
42
|
+
export type TestAgentFactory<T extends DefaultContext = DefaultContext> = (props: TesAgentFactoryProps<T>) => TestAgent;
|
|
43
|
+
export function createTestAgentFactory<T extends DefaultContext>(creator: TestAgentFactory<T>): TestAgentFactory<T>;
|
|
44
|
+
export {};
|
|
14
45
|
}
|
|
15
|
-
export interface TestAgent {
|
|
16
|
-
start: () => Promise<unknown>;
|
|
17
|
-
reactOnMessage: () => Promise<unknown>;
|
|
18
|
-
respondToMessage: () => Promise<unknown>;
|
|
19
|
-
isAssigned: () => Promise<boolean>;
|
|
20
|
-
onFatalError: (error: Error) => Promise<unknown>;
|
|
21
|
-
}
|
|
22
|
-
export type TestAgentFactory<T extends DefaultContext = DefaultContext> = (props: TesAgentFactoryProps<T>) => TestAgent;
|
|
23
|
-
export declare function createTestAgentFactory<T extends DefaultContext>(creator: TestAgentFactory<T>): TestAgentFactory<T>;
|
|
24
|
-
export {};
|
|
25
46
|
//# sourceMappingURL=agent.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"agent.d.ts","sourceRoot":"","sources":["../../../src/lib/bosun/agent.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,QAAQ,EAAE,MAAM,OAAO,CAAA;AAChC,OAAO,EAAE,MAAM,EAAE,
|
|
1
|
+
{"version":3,"file":"agent.d.ts","sourceRoot":"","sources":["../../../src/lib/bosun/agent.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,QAAQ,EAAE,MAAM,OAAO,CAAA;AAChC,OAAO,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,eAAe,CAAA;AACjD,OAAO,EAAE,UAAU,EAAE,MAAM,UAAU,CAAA;AACrC,OAAO,EAAE,OAAO,EAAE,MAAM,WAAW,CAAA;AAEnC;;;;;;;;;;;;;;;;;;GAkBG;AACH,yBAAiB,KAAK,CAAA;IAClB,KAAK,cAAc,GAAG,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;IACzC,MAAM,WAAW,oBAAoB,CAAC,GAAG,SAAS,cAAc,GAAG,cAAc;QAC7E,MAAM,EAAE,MAAM,CAAA;QACd,SAAS,EAAE,SAAS,CAAC,SAAS,CAAA;QAC9B,EAAE,EAAE,QAAQ,CAAC,QAAQ,CAAA;QACrB,WAAW,EAAE,MAAM,QAAQ,CAAC,OAAO,EAAE,CAAA;QACrC,WAAW,EAAE,CAAC,OAAO,EAAE,MAAM,KAAK,OAAO,CAAC,IAAI,CAAC,CAAA;QAC/C,UAAU,EAAE,UAAU,CAAA;QACtB,GAAG,EAAE,OAAO,CAAC,GAAG,CAAC,CAAA;KACpB;IAED,MAAM,WAAW,SAAS;QACtB,KAAK,EAAE,MAAM,OAAO,CAAC,OAAO,CAAC,CAAA;QAC7B,cAAc,EAAE,MAAM,OAAO,CAAC,OAAO,CAAC,CAAA;QACtC,gBAAgB,EAAE,MAAM,OAAO,CAAC,OAAO,CAAC,CAAA;QACxC,UAAU,EAAE,MAAM,OAAO,CAAC,OAAO,CAAC,CAAA;QAClC,YAAY,EAAE,CAAC,KAAK,EAAE,KAAK,KAAK,OAAO,CAAC,OAAO,CAAC,CAAA;KACnD;IAED,MAAM,MAAM,gBAAgB,CAAC,CAAC,SAAS,cAAc,GAAG,cAAc,IAAI,CACtE,KAAK,EAAE,oBAAoB,CAAC,CAAC,CAAC,KAC7B,SAAS,CAAA;IAEd,MAAM,UAAU,sBAAsB,CAAC,CAAC,SAAS,cAAc,EAAE,OAAO,EAAE,gBAAgB,CAAC,CAAC,CAAC,uBAE5F;;CAEJ"}
|
package/build/lib/bosun/agent.js
CHANGED
|
@@ -1,6 +1,29 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
3
|
+
exports.Bosun = void 0;
|
|
4
|
+
/**
|
|
5
|
+
* Bosun is a UI for testing Recombine AI agents. It enables testing complex agent interactions with multiple steps, error handling, and state management.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* // In workflows.ts
|
|
10
|
+
* const agents = {
|
|
11
|
+
* "testbot": createTestAgentFactory((props) => {
|
|
12
|
+
* return {
|
|
13
|
+
* start: async () => { ... },
|
|
14
|
+
* reactOnMessage: async () => { ... },
|
|
15
|
+
* respondToMessage: async () => { ... }
|
|
16
|
+
* }
|
|
17
|
+
* })
|
|
18
|
+
* }
|
|
19
|
+
*
|
|
20
|
+
* export agents;
|
|
21
|
+
* ```
|
|
22
|
+
*/
|
|
23
|
+
var Bosun;
|
|
24
|
+
(function (Bosun) {
|
|
25
|
+
function createTestAgentFactory(creator) {
|
|
26
|
+
return creator;
|
|
27
|
+
}
|
|
28
|
+
Bosun.createTestAgentFactory = createTestAgentFactory;
|
|
29
|
+
})(Bosun || (exports.Bosun = Bosun = {}));
|
|
@@ -3,30 +3,26 @@ export interface Logger {
|
|
|
3
3
|
debug: (...args: any[]) => void;
|
|
4
4
|
error: (...args: any[]) => void;
|
|
5
5
|
}
|
|
6
|
-
|
|
7
|
-
* A function that schedules an action to be executed in the future
|
|
8
|
-
* @param delay – a date when the action should be executed use {@link delayFactory} to create a
|
|
9
|
-
* date
|
|
10
|
-
* @param phone – user's phone
|
|
11
|
-
*/
|
|
12
|
-
type ScheduleAction = (delay: Date, phone: string) => Promise<void>;
|
|
13
|
-
export interface Scheduler {
|
|
6
|
+
export declare namespace Scheduler {
|
|
14
7
|
/**
|
|
15
|
-
*
|
|
16
|
-
* @param
|
|
17
|
-
*
|
|
18
|
-
* @
|
|
8
|
+
* A function that schedules an action to be executed in the future
|
|
9
|
+
* @param delay – a date when the action should be executed use {@link delayFactory} to create a
|
|
10
|
+
* date
|
|
11
|
+
* @param phone – user's phone
|
|
19
12
|
*/
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
13
|
+
type ScheduleAction = (delay: Date, phone: string) => Promise<void>;
|
|
14
|
+
interface Scheduler {
|
|
15
|
+
/**
|
|
16
|
+
* Register a delayed action handler.
|
|
17
|
+
* @param actionName – a unique (inside one use-case) name for the action
|
|
18
|
+
* @param action – a function that will be called when the action is triggered
|
|
19
|
+
* @returns a function to schedule the action
|
|
20
|
+
*/
|
|
21
|
+
registerAction: (actionName: string, action: (phone: string) => Promise<unknown>) => ScheduleAction;
|
|
22
|
+
/**
|
|
23
|
+
* Removes all actions for the given phone that were not executed yet.
|
|
24
|
+
*/
|
|
25
|
+
clearAllPendingActions: (phone: string) => Promise<unknown>;
|
|
26
|
+
}
|
|
30
27
|
}
|
|
31
|
-
export {};
|
|
32
28
|
//# sourceMappingURL=interfaces.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"interfaces.d.ts","sourceRoot":"","sources":["../../src/lib/interfaces.ts"],"names":[],"mappings":"AAAA,MAAM,WAAW,MAAM;IACnB,GAAG,EAAE,CAAC,GAAG,IAAI,EAAE,GAAG,EAAE,KAAK,IAAI,CAAA;IAC7B,KAAK,EAAE,CAAC,GAAG,IAAI,EAAE,GAAG,EAAE,KAAK,IAAI,CAAA;IAC/B,KAAK,EAAE,CAAC,GAAG,IAAI,EAAE,GAAG,EAAE,KAAK,IAAI,CAAA;CAClC;AAED;;;;;
|
|
1
|
+
{"version":3,"file":"interfaces.d.ts","sourceRoot":"","sources":["../../src/lib/interfaces.ts"],"names":[],"mappings":"AAAA,MAAM,WAAW,MAAM;IACnB,GAAG,EAAE,CAAC,GAAG,IAAI,EAAE,GAAG,EAAE,KAAK,IAAI,CAAA;IAC7B,KAAK,EAAE,CAAC,GAAG,IAAI,EAAE,GAAG,EAAE,KAAK,IAAI,CAAA;IAC/B,KAAK,EAAE,CAAC,GAAG,IAAI,EAAE,GAAG,EAAE,KAAK,IAAI,CAAA;CAClC;AAED,yBAAiB,SAAS,CAAC;IACvB;;;;;OAKG;IACH,KAAY,cAAc,GAAG,CAAC,KAAK,EAAE,IAAI,EAAE,KAAK,EAAE,MAAM,KAAK,OAAO,CAAC,IAAI,CAAC,CAAA;IAC1E,UAAiB,SAAS;QACtB;;;;;WAKG;QACH,cAAc,EAAE,CACZ,UAAU,EAAE,MAAM,EAClB,MAAM,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,OAAO,CAAC,OAAO,CAAC,KAC1C,cAAc,CAAA;QAEnB;;WAEG;QACH,sBAAsB,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,OAAO,CAAC,OAAO,CAAC,CAAA;KAC9D;CACJ"}
|
package/changelog.md
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# Changelog
|
|
2
|
+
|
|
3
|
+
### 0.2.0 → 0.3.0 (unstable)
|
|
4
|
+
|
|
5
|
+
Breaking changes:
|
|
6
|
+
|
|
7
|
+
- Break down the library into namespace: AIEngine, Scheduler
|
|
8
|
+
- Models → BasicModel
|
|
9
|
+
- Step → LLMStep & ProgrammaticStep
|
|
10
|
+
- makeMessagesList → getConversation
|
|
11
|
+
- Deprecation of shouldExecute (discouraged to use if there's no `maxAttempts` in a step)
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@recombine-ai/engine",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.3.0",
|
|
4
4
|
"description": "Recombine AI engine for creating conversational AI agents",
|
|
5
5
|
"main": "build/index.js",
|
|
6
6
|
"types": "build/index.d.ts",
|
|
@@ -11,7 +11,8 @@
|
|
|
11
11
|
"dev": "tsc -w",
|
|
12
12
|
"build": "tsc",
|
|
13
13
|
"prepublishOnly": "npm run build",
|
|
14
|
-
"test": "vitest"
|
|
14
|
+
"test": "vitest",
|
|
15
|
+
"docs": "cd docusaurus && yarn build --out-dir ../docs && cd .."
|
|
15
16
|
},
|
|
16
17
|
"devDependencies": {
|
|
17
18
|
"@types/node": "^22.8.1",
|
|
@@ -23,7 +24,7 @@
|
|
|
23
24
|
"dependencies": {
|
|
24
25
|
"nunjucks": "^3.2.4",
|
|
25
26
|
"openai": "^4.68.4",
|
|
26
|
-
"zod": "
|
|
27
|
+
"zod": "3.23.8",
|
|
27
28
|
"zod-to-json-schema": "^3.23.5"
|
|
28
29
|
}
|
|
29
30
|
}
|