connectonion 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +362 -0
  3. package/dist/connect.d.ts +35 -0
  4. package/dist/connect.d.ts.map +1 -0
  5. package/dist/connect.js +149 -0
  6. package/dist/console.d.ts +30 -0
  7. package/dist/console.d.ts.map +1 -0
  8. package/dist/console.js +124 -0
  9. package/dist/core/agent.d.ts +233 -0
  10. package/dist/core/agent.d.ts.map +1 -0
  11. package/dist/core/agent.js +500 -0
  12. package/dist/examples/comprehensive-test.js +314 -0
  13. package/dist/examples/simple-test.js +80 -0
  14. package/dist/history/index.d.ts +42 -0
  15. package/dist/history/index.d.ts.map +1 -0
  16. package/dist/history/index.js +140 -0
  17. package/dist/index.d.ts +20 -0
  18. package/dist/index.d.ts.map +1 -0
  19. package/dist/index.js +53 -0
  20. package/dist/llm/anthropic.d.ts +23 -0
  21. package/dist/llm/anthropic.d.ts.map +1 -0
  22. package/dist/llm/anthropic.js +139 -0
  23. package/dist/llm/gemini.d.ts +20 -0
  24. package/dist/llm/gemini.d.ts.map +1 -0
  25. package/dist/llm/gemini.js +136 -0
  26. package/dist/llm/index.d.ts +18 -0
  27. package/dist/llm/index.d.ts.map +1 -0
  28. package/dist/llm/index.js +76 -0
  29. package/dist/llm/llm-do.d.ts +8 -0
  30. package/dist/llm/llm-do.d.ts.map +1 -0
  31. package/dist/llm/llm-do.js +25 -0
  32. package/dist/llm/noop.d.ts +16 -0
  33. package/dist/llm/noop.d.ts.map +1 -0
  34. package/dist/llm/noop.js +23 -0
  35. package/dist/llm/openai.d.ts +21 -0
  36. package/dist/llm/openai.d.ts.map +1 -0
  37. package/dist/llm/openai.js +131 -0
  38. package/dist/src/core/agent.js +368 -0
  39. package/dist/src/history/index.js +140 -0
  40. package/dist/src/index.js +34 -0
  41. package/dist/src/llm/index.js +22 -0
  42. package/dist/src/llm/openai.js +78 -0
  43. package/dist/src/tools/tool-utils.js +348 -0
  44. package/dist/src/types.js +8 -0
  45. package/dist/tools/email.d.ts +13 -0
  46. package/dist/tools/email.d.ts.map +1 -0
  47. package/dist/tools/email.js +98 -0
  48. package/dist/tools/replay.d.ts +19 -0
  49. package/dist/tools/replay.d.ts.map +1 -0
  50. package/dist/tools/replay.js +62 -0
  51. package/dist/tools/tool-executor.d.ts +58 -0
  52. package/dist/tools/tool-executor.d.ts.map +1 -0
  53. package/dist/tools/tool-executor.js +100 -0
  54. package/dist/tools/tool-utils.d.ts +133 -0
  55. package/dist/tools/tool-utils.d.ts.map +1 -0
  56. package/dist/tools/tool-utils.js +380 -0
  57. package/dist/tools/xray.d.ts +58 -0
  58. package/dist/tools/xray.d.ts.map +1 -0
  59. package/dist/tools/xray.js +110 -0
  60. package/dist/trust/index.d.ts +26 -0
  61. package/dist/trust/index.d.ts.map +1 -0
  62. package/dist/trust/index.js +47 -0
  63. package/dist/trust/tools.d.ts +4 -0
  64. package/dist/trust/tools.d.ts.map +1 -0
  65. package/dist/trust/tools.js +71 -0
  66. package/dist/types.d.ts +141 -0
  67. package/dist/types.d.ts.map +1 -0
  68. package/dist/types.js +10 -0
  69. package/package.json +63 -0
@@ -0,0 +1,131 @@
1
+ "use strict";
2
+ /**
3
+ * @purpose OpenAI LLM provider with support for GPT models, O-series models, and OpenOnion managed keys via configurable baseURL
4
+ * @llm-note
5
+ * Dependencies: imports from [openai npm package, src/types.ts] | imported by [src/llm/index.ts, src/index.ts] | tested by [tests/e2e/realProviders.test.ts]
6
+ * Data flow: receives Message[] + FunctionSchema[] → converts to OpenAI format → calls client.chat.completions.create() → parses tool_calls → returns LLMResponse
7
+ * State/Effects: makes HTTP POST to OpenAI API (or baseURL override) | reads env OPENAI_API_KEY/OPENONION_API_KEY | no persistent state
8
+ * Integration: implements LLM interface | exposes complete(), structuredComplete() | default model 'o4-mini' | supports baseURL override for co/* models via createLLM factory
9
+ * Performance: direct API call, no caching | tool_choice: 'auto' for parallel tool execution
10
+ */
11
+ var __importDefault = (this && this.__importDefault) || function (mod) {
12
+ return (mod && mod.__esModule) ? mod : { "default": mod };
13
+ };
14
+ Object.defineProperty(exports, "__esModule", { value: true });
15
+ exports.OpenAILLM = void 0;
16
+ const openai_1 = __importDefault(require("openai"));
17
+ class OpenAILLM {
18
+ constructor(apiKey, model = 'o4-mini', options) {
19
+ const key = apiKey || process.env.OPENAI_API_KEY || process.env.OPENONION_API_KEY;
20
+ if (!key) {
21
+ throw new Error('OpenAI-compatible API key required. Set OPENAI_API_KEY (or OPENONION_API_KEY) or pass apiKey.');
22
+ }
23
+ // Allow overriding base URL to support managed/proxy endpoints
24
+ const envBase = process.env.OPENAI_BASE_URL || process.env.OPENONION_BASE_URL;
25
+ const baseURL = options?.baseURL || envBase;
26
+ this.client = new openai_1.default(baseURL ? { apiKey: key, baseURL } : { apiKey: key });
27
+ this.model = model;
28
+ }
29
+ async complete(messages, tools) {
30
+ const openAIMessages = this.convertMessages(messages);
31
+ const params = {
32
+ model: this.model,
33
+ messages: openAIMessages,
34
+ };
35
+ if (tools && tools.length > 0) {
36
+ params.tools = tools.map(tool => ({
37
+ type: 'function',
38
+ function: tool,
39
+ }));
40
+ params.tool_choice = 'auto';
41
+ }
42
+ const response = await this.client.chat.completions.create(params);
43
+ const message = response.choices[0].message;
44
+ // Parse tool calls
45
+ const toolCalls = [];
46
+ if (message.tool_calls) {
47
+ for (const tc of message.tool_calls) {
48
+ toolCalls.push({
49
+ name: tc.function.name,
50
+ arguments: JSON.parse(tc.function.arguments),
51
+ id: tc.id,
52
+ });
53
+ }
54
+ }
55
+ return {
56
+ content: message.content,
57
+ toolCalls,
58
+ rawResponse: response,
59
+ };
60
+ }
61
+ convertMessages(messages) {
62
+ return messages.map(msg => {
63
+ const converted = {
64
+ role: msg.role,
65
+ content: msg.content,
66
+ };
67
+ if (msg.name) {
68
+ converted.name = msg.name;
69
+ }
70
+ if (msg.tool_calls) {
71
+ converted.tool_calls = msg.tool_calls.map(tc => ({
72
+ id: tc.id,
73
+ type: 'function',
74
+ function: {
75
+ name: tc.name,
76
+ arguments: JSON.stringify(tc.arguments),
77
+ },
78
+ }));
79
+ }
80
+ if (msg.tool_call_id) {
81
+ converted.tool_call_id = msg.tool_call_id;
82
+ }
83
+ return converted;
84
+ });
85
+ }
86
+ async structuredComplete(messages, schema) {
87
+ // Prefer responses.parse if available in the SDK
88
+ const anyClient = this.client;
89
+ try {
90
+ if (anyClient.responses && typeof anyClient.responses.parse === 'function') {
91
+ const response = await anyClient.responses.parse({
92
+ model: this.model,
93
+ input: this.convertMessages(messages),
94
+ // Not all SDKs accept arbitrary objects; if it errors, fallback below.
95
+ text_format: schema,
96
+ });
97
+ // Try common shapes for parsed output
98
+ if (response?.output_parsed)
99
+ return response.output_parsed;
100
+ if (response?.parsed)
101
+ return response.parsed;
102
+ }
103
+ }
104
+ catch (_err) {
105
+ // Fall through to chat JSON fallback
106
+ }
107
+ // Fallback: ask the model to return only JSON matching the schema
108
+ const system = `You are a JSON generator. Return ONLY valid minified JSON matching this schema: ${JSON.stringify(schema)}. No prose.`;
109
+ const openAIMessages = this.convertMessages([
110
+ { role: 'system', content: system },
111
+ ...messages,
112
+ ]);
113
+ const res = await this.client.chat.completions.create({
114
+ model: this.model,
115
+ messages: openAIMessages,
116
+ temperature: 0,
117
+ });
118
+ const content = res.choices?.[0]?.message?.content || '{}';
119
+ try {
120
+ return JSON.parse(content);
121
+ }
122
+ catch (e) {
123
+ // Try to extract JSON substring if the model added extra text
124
+ const match = content.match(/\{[\s\S]*\}/);
125
+ if (match)
126
+ return JSON.parse(match[0]);
127
+ throw new Error(`Failed to parse structured JSON: ${String(e)} | content=${content}`);
128
+ }
129
+ }
130
+ }
131
+ exports.OpenAILLM = OpenAILLM;
@@ -0,0 +1,368 @@
1
+ "use strict";
2
+ /**
3
+ * Core Agent implementation for ConnectOnion TypeScript SDK
4
+ *
5
+ * The Agent class is the main orchestrator that combines LLM capabilities
6
+ * with tool execution to create powerful AI agents that can perform actions.
7
+ */
8
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
9
+ if (k2 === undefined) k2 = k;
10
+ var desc = Object.getOwnPropertyDescriptor(m, k);
11
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
12
+ desc = { enumerable: true, get: function() { return m[k]; } };
13
+ }
14
+ Object.defineProperty(o, k2, desc);
15
+ }) : (function(o, m, k, k2) {
16
+ if (k2 === undefined) k2 = k;
17
+ o[k2] = m[k];
18
+ }));
19
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
20
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
21
+ }) : function(o, v) {
22
+ o["default"] = v;
23
+ });
24
+ var __importStar = (this && this.__importStar) || (function () {
25
+ var ownKeys = function(o) {
26
+ ownKeys = Object.getOwnPropertyNames || function (o) {
27
+ var ar = [];
28
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
29
+ return ar;
30
+ };
31
+ return ownKeys(o);
32
+ };
33
+ return function (mod) {
34
+ if (mod && mod.__esModule) return mod;
35
+ var result = {};
36
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
37
+ __setModuleDefault(result, mod);
38
+ return result;
39
+ };
40
+ })();
41
+ Object.defineProperty(exports, "__esModule", { value: true });
42
+ exports.Agent = void 0;
43
+ const llm_1 = require("../llm");
44
+ const history_1 = require("../history");
45
+ const tool_utils_1 = require("../tools/tool-utils");
46
+ const dotenv = __importStar(require("dotenv"));
47
+ // Load environment variables from .env file
48
+ dotenv.config();
49
+ /**
50
+ * Agent class - The core of ConnectOnion
51
+ *
52
+ * An Agent combines:
53
+ * - LLM for intelligence and reasoning
54
+ * - Tools for taking actions
55
+ * - History for behavior tracking
56
+ * - System prompts for personality
57
+ *
58
+ * @example
59
+ * ```typescript
60
+ * // Create a simple agent with a calculator tool
61
+ * function add(a: number, b: number): number {
62
+ * return a + b;
63
+ * }
64
+ *
65
+ * const agent = new Agent({
66
+ * name: 'calculator',
67
+ * tools: [add],
68
+ * systemPrompt: 'You are a helpful math assistant.'
69
+ * });
70
+ *
71
+ * const response = await agent.input('What is 5 plus 3?');
72
+ * console.log(response); // "5 plus 3 equals 8"
73
+ * ```
74
+ */
75
+ class Agent {
76
+ /**
77
+ * Creates a new Agent instance
78
+ *
79
+ * @param config - Configuration object for the agent
80
+ * @param config.name - Unique name for the agent (used for behavior tracking)
81
+ * @param config.llm - Optional custom LLM instance
82
+ * @param config.tools - Array of tools (functions, class instances, or Tool objects)
83
+ * @param config.systemPrompt - System prompt defining agent behavior
84
+ * @param config.apiKey - API key for LLM provider (uses env var if not provided)
85
+ * @param config.model - Model to use (default: 'gpt-4o-mini')
86
+ * @param config.maxIterations - Max iterations for tool calling (default: 10)
87
+ *
88
+ * @example
89
+ * ```typescript
90
+ * // With environment variable for API key
91
+ * const agent = new Agent({
92
+ * name: 'my-agent',
93
+ * tools: [myTool1, myTool2]
94
+ * });
95
+ *
96
+ * // With explicit API key
97
+ * const agent = new Agent({
98
+ * name: 'my-agent',
99
+ * apiKey: 'sk-...',
100
+ * model: 'gpt-4',
101
+ * tools: [myTool]
102
+ * });
103
+ * ```
104
+ */
105
+ constructor(config) {
106
+ this.name = config.name;
107
+ this.systemPrompt = config.systemPrompt || `You are ${config.name}, a helpful AI assistant.`;
108
+ this.maxIterations = config.maxIterations || 10;
109
+ // Process tools: convert functions, class instances, etc. to Tool objects
110
+ this.tools = (0, tool_utils_1.processTools)(config.tools || []);
111
+ // Create a map for O(1) tool lookup by name
112
+ this.toolMap = new Map(this.tools.map(tool => [tool.name, tool]));
113
+ // Initialize behavior history tracking
114
+ // All behaviors are saved to ~/.connectonion/agents/{name}/behavior.json
115
+ this.history = new history_1.History(this.name);
116
+ // Initialize LLM - either use provided instance or create one
117
+ if (config.llm) {
118
+ this.llm = config.llm;
119
+ }
120
+ else {
121
+ // Create LLM based on model name (currently supports OpenAI)
122
+ this.llm = (0, llm_1.createLLM)(config.model || 'gpt-4o-mini', config.apiKey);
123
+ }
124
+ // Trust parameter handling can be added later if needed
125
+ }
126
+ /**
127
+ * Process user input and generate a response
128
+ *
129
+ * This is the main entry point for interacting with the agent.
130
+ * The agent will:
131
+ * 1. Process the input prompt
132
+ * 2. Decide whether to use tools
133
+ * 3. Execute any necessary tool calls
134
+ * 4. Generate a final response
135
+ *
136
+ * @param prompt - The user's input prompt
137
+ * @param maxIterations - Override the default max iterations for this request
138
+ * @returns The agent's response as a string
139
+ *
140
+ * @example
141
+ * ```typescript
142
+ * const response = await agent.input('What is the weather in NYC?');
143
+ * console.log(response);
144
+ *
145
+ * // With custom iteration limit for complex tasks
146
+ * const response = await agent.input(
147
+ * 'Analyze this data and create a report',
148
+ * 20 // Allow more iterations for complex task
149
+ * );
150
+ * ```
151
+ */
152
+ async input(prompt, maxIterations) {
153
+ const iterations = maxIterations || this.maxIterations;
154
+ // Record the input in history
155
+ this.history.addInput(prompt);
156
+ // Initialize conversation with system prompt and user input
157
+ const messages = [
158
+ { role: 'system', content: this.systemPrompt },
159
+ { role: 'user', content: prompt }
160
+ ];
161
+ // Convert tools to OpenAI-compatible function schemas
162
+ const toolSchemas = this.tools.map(tool => tool.toFunctionSchema());
163
+ let finalResponse = '';
164
+ // Main execution loop - allows for multiple rounds of tool calling
165
+ for (let i = 0; i < iterations; i++) {
166
+ // Call LLM with current conversation and available tools
167
+ const llmResponse = await this.llm.complete(messages, toolSchemas);
168
+ // Record the LLM response in history
169
+ this.history.addLLMResponse(llmResponse);
170
+ // Add assistant's response to the conversation
171
+ if (llmResponse.content || llmResponse.toolCalls.length > 0) {
172
+ const assistantMessage = {
173
+ role: 'assistant',
174
+ content: llmResponse.content || '',
175
+ };
176
+ // Include tool calls in the message if present
177
+ if (llmResponse.toolCalls.length > 0) {
178
+ assistantMessage.tool_calls = llmResponse.toolCalls;
179
+ }
180
+ messages.push(assistantMessage);
181
+ }
182
+ // Process tool calls if the LLM requested any
183
+ if (llmResponse.toolCalls.length > 0) {
184
+ // Execute all tool calls in parallel for efficiency
185
+ const toolResults = await this.executeToolCalls(llmResponse.toolCalls);
186
+ // Add tool results back to the conversation
187
+ // This allows the LLM to see the results and continue reasoning
188
+ for (const result of toolResults) {
189
+ messages.push({
190
+ role: 'tool',
191
+ content: JSON.stringify(result.result),
192
+ tool_call_id: result.callId,
193
+ });
194
+ }
195
+ }
196
+ else {
197
+ // No more tool calls - we have our final response
198
+ finalResponse = llmResponse.content || '';
199
+ break;
200
+ }
201
+ }
202
+ // Record the final output in history
203
+ this.history.addOutput(finalResponse);
204
+ return finalResponse;
205
+ }
206
+ /**
207
+ * Execute multiple tool calls in parallel
208
+ *
209
+ * @param toolCalls - Array of tool calls to execute
210
+ * @returns Array of results with their corresponding call IDs
211
+ *
212
+ * @private
213
+ */
214
+ async executeToolCalls(toolCalls) {
215
+ // Execute all tool calls in parallel using Promise.all
216
+ // This significantly improves performance when multiple tools are called
217
+ const results = await Promise.all(toolCalls.map(async (toolCall) => {
218
+ const result = await this.executeToolCall(toolCall.name, toolCall.arguments, toolCall.id);
219
+ return { result, callId: toolCall.id };
220
+ }));
221
+ return results;
222
+ }
223
+ /**
224
+ * Execute a single tool call
225
+ *
226
+ * @param name - Name of the tool to execute
227
+ * @param args - Arguments to pass to the tool
228
+ * @param callId - Unique identifier for this tool call
229
+ * @returns The result of the tool execution
230
+ *
231
+ * @private
232
+ */
233
+ async executeToolCall(name, args, callId) {
234
+ // Look up the tool by name
235
+ const tool = this.toolMap.get(name);
236
+ // Handle case where tool doesn't exist
237
+ if (!tool) {
238
+ const result = {
239
+ status: 'not_found',
240
+ error: `Tool ${name} not found`,
241
+ };
242
+ // Record the failed tool call in history
243
+ this.history.addToolCall(name, args, result, callId);
244
+ return result;
245
+ }
246
+ try {
247
+ // Execute the tool with provided arguments
248
+ const output = await tool.run(args);
249
+ const result = {
250
+ status: 'success',
251
+ result: output,
252
+ };
253
+ // Record successful tool call in history
254
+ this.history.addToolCall(name, args, result, callId);
255
+ return result;
256
+ }
257
+ catch (error) {
258
+ // Handle tool execution errors gracefully
259
+ const result = {
260
+ status: 'error',
261
+ error: error instanceof Error ? error.message : String(error),
262
+ };
263
+ // Record the error in history
264
+ this.history.addToolCall(name, args, result, callId);
265
+ return result;
266
+ }
267
+ }
268
+ /**
269
+ * Get the complete conversation history
270
+ *
271
+ * @returns Array of all behavior entries
272
+ *
273
+ * @example
274
+ * ```typescript
275
+ * const history = agent.getHistory();
276
+ * console.log(`Total interactions: ${history.length}`);
277
+ *
278
+ * // Filter for tool calls
279
+ * const toolCalls = history.filter(h => h.type === 'tool_call');
280
+ * ```
281
+ */
282
+ getHistory() {
283
+ return this.history.getBehaviors();
284
+ }
285
+ /**
286
+ * Clear the conversation history
287
+ *
288
+ * This removes all recorded behaviors for this agent.
289
+ * Use with caution as this action cannot be undone.
290
+ *
291
+ * @example
292
+ * ```typescript
293
+ * agent.clearHistory();
294
+ * console.log('History cleared');
295
+ * ```
296
+ */
297
+ clearHistory() {
298
+ this.history.clear();
299
+ }
300
+ /**
301
+ * Get all available tools for this agent
302
+ *
303
+ * @returns Array of Tool objects
304
+ *
305
+ * @example
306
+ * ```typescript
307
+ * const tools = agent.getTools();
308
+ * console.log('Available tools:');
309
+ * tools.forEach(tool => {
310
+ * console.log(`- ${tool.name}: ${tool.description}`);
311
+ * });
312
+ * ```
313
+ */
314
+ getTools() {
315
+ return this.tools;
316
+ }
317
+ /**
318
+ * Dynamically add a new tool to the agent
319
+ *
320
+ * @param tool - Tool to add (function, class instance, or Tool object)
321
+ *
322
+ * @example
323
+ * ```typescript
324
+ * // Add a function as a tool
325
+ * function newTool(param: string): string {
326
+ * return `Processed: ${param}`;
327
+ * }
328
+ * agent.addTool(newTool);
329
+ *
330
+ * // Add a class instance
331
+ * class MyService {
332
+ * getData(): string { return 'data'; }
333
+ * }
334
+ * agent.addTool(new MyService());
335
+ * ```
336
+ */
337
+ addTool(tool) {
338
+ const processed = (0, tool_utils_1.processTools)([tool]);
339
+ for (const t of processed) {
340
+ this.tools.push(t);
341
+ this.toolMap.set(t.name, t);
342
+ }
343
+ }
344
+ /**
345
+ * Remove a tool by name
346
+ *
347
+ * @param name - Name of the tool to remove
348
+ * @returns true if the tool was found and removed, false otherwise
349
+ *
350
+ * @example
351
+ * ```typescript
352
+ * const removed = agent.removeTool('oldTool');
353
+ * if (removed) {
354
+ * console.log('Tool removed successfully');
355
+ * }
356
+ * ```
357
+ */
358
+ removeTool(name) {
359
+ const index = this.tools.findIndex(t => t.name === name);
360
+ if (index !== -1) {
361
+ this.tools.splice(index, 1);
362
+ this.toolMap.delete(name);
363
+ return true;
364
+ }
365
+ return false;
366
+ }
367
+ }
368
+ exports.Agent = Agent;
@@ -0,0 +1,140 @@
1
+ "use strict";
2
+ /**
3
+ * History tracking for ConnectOnion TypeScript SDK
4
+ */
5
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
6
+ if (k2 === undefined) k2 = k;
7
+ var desc = Object.getOwnPropertyDescriptor(m, k);
8
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
9
+ desc = { enumerable: true, get: function() { return m[k]; } };
10
+ }
11
+ Object.defineProperty(o, k2, desc);
12
+ }) : (function(o, m, k, k2) {
13
+ if (k2 === undefined) k2 = k;
14
+ o[k2] = m[k];
15
+ }));
16
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
17
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
18
+ }) : function(o, v) {
19
+ o["default"] = v;
20
+ });
21
+ var __importStar = (this && this.__importStar) || (function () {
22
+ var ownKeys = function(o) {
23
+ ownKeys = Object.getOwnPropertyNames || function (o) {
24
+ var ar = [];
25
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
26
+ return ar;
27
+ };
28
+ return ownKeys(o);
29
+ };
30
+ return function (mod) {
31
+ if (mod && mod.__esModule) return mod;
32
+ var result = {};
33
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
34
+ __setModuleDefault(result, mod);
35
+ return result;
36
+ };
37
+ })();
38
+ Object.defineProperty(exports, "__esModule", { value: true });
39
+ exports.History = void 0;
40
+ const fs = __importStar(require("fs"));
41
+ const path = __importStar(require("path"));
42
+ const os = __importStar(require("os"));
43
+ class History {
44
+ constructor(name) {
45
+ this.behaviors = [];
46
+ // Set up behavior tracking directory
47
+ const homeDir = os.homedir();
48
+ const connectonionDir = path.join(homeDir, '.connectonion', 'agents', name);
49
+ // Create directory if it doesn't exist
50
+ if (!fs.existsSync(connectonionDir)) {
51
+ fs.mkdirSync(connectonionDir, { recursive: true });
52
+ }
53
+ this.behaviorPath = path.join(connectonionDir, 'behavior.json');
54
+ // Load existing behaviors if file exists
55
+ if (fs.existsSync(this.behaviorPath)) {
56
+ try {
57
+ const data = fs.readFileSync(this.behaviorPath, 'utf-8');
58
+ const parsed = JSON.parse(data);
59
+ // Ensure behaviors is always an array
60
+ if (Array.isArray(parsed)) {
61
+ this.behaviors = parsed;
62
+ }
63
+ else {
64
+ console.warn('Behavior file does not contain an array, initializing empty');
65
+ this.behaviors = [];
66
+ }
67
+ }
68
+ catch (error) {
69
+ console.warn(`Failed to load existing behaviors: ${error}`);
70
+ this.behaviors = [];
71
+ }
72
+ }
73
+ }
74
+ /**
75
+ * Add an entry to the behavior history
76
+ */
77
+ add(type, data) {
78
+ const entry = {
79
+ timestamp: new Date().toISOString(),
80
+ type,
81
+ data,
82
+ };
83
+ this.behaviors.push(entry);
84
+ this.save();
85
+ }
86
+ /**
87
+ * Add input to history
88
+ */
89
+ addInput(prompt) {
90
+ this.add('input', { prompt });
91
+ }
92
+ /**
93
+ * Add LLM response to history
94
+ */
95
+ addLLMResponse(response) {
96
+ this.add('llm_response', response);
97
+ }
98
+ /**
99
+ * Add tool call to history
100
+ */
101
+ addToolCall(name, args, result, callId) {
102
+ this.add('tool_call', {
103
+ name,
104
+ arguments: args,
105
+ result,
106
+ call_id: callId,
107
+ });
108
+ }
109
+ /**
110
+ * Add output to history
111
+ */
112
+ addOutput(output) {
113
+ this.add('output', { output });
114
+ }
115
+ /**
116
+ * Get all behaviors
117
+ */
118
+ getBehaviors() {
119
+ return this.behaviors;
120
+ }
121
+ /**
122
+ * Clear history
123
+ */
124
+ clear() {
125
+ this.behaviors = [];
126
+ this.save();
127
+ }
128
+ /**
129
+ * Save behaviors to file
130
+ */
131
+ save() {
132
+ try {
133
+ fs.writeFileSync(this.behaviorPath, JSON.stringify(this.behaviors, null, 2), 'utf-8');
134
+ }
135
+ catch (error) {
136
+ console.error(`Failed to save behaviors: ${error}`);
137
+ }
138
+ }
139
+ }
140
+ exports.History = History;
@@ -0,0 +1,34 @@
1
+ "use strict";
2
+ /**
3
+ * ConnectOnion TypeScript SDK
4
+ * A framework for creating AI agents with behavior tracking
5
+ */
6
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
7
+ if (k2 === undefined) k2 = k;
8
+ var desc = Object.getOwnPropertyDescriptor(m, k);
9
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
10
+ desc = { enumerable: true, get: function() { return m[k]; } };
11
+ }
12
+ Object.defineProperty(o, k2, desc);
13
+ }) : (function(o, m, k, k2) {
14
+ if (k2 === undefined) k2 = k;
15
+ o[k2] = m[k];
16
+ }));
17
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
18
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
19
+ };
20
+ Object.defineProperty(exports, "__esModule", { value: true });
21
+ exports.processTools = exports.extractMethodsFromInstance = exports.isClassInstance = exports.createToolFromFunction = exports.History = exports.OpenAILLM = exports.createLLM = exports.Agent = void 0;
22
+ var agent_1 = require("./core/agent");
23
+ Object.defineProperty(exports, "Agent", { enumerable: true, get: function () { return agent_1.Agent; } });
24
+ var llm_1 = require("./llm");
25
+ Object.defineProperty(exports, "createLLM", { enumerable: true, get: function () { return llm_1.createLLM; } });
26
+ Object.defineProperty(exports, "OpenAILLM", { enumerable: true, get: function () { return llm_1.OpenAILLM; } });
27
+ var history_1 = require("./history");
28
+ Object.defineProperty(exports, "History", { enumerable: true, get: function () { return history_1.History; } });
29
+ var tool_utils_1 = require("./tools/tool-utils");
30
+ Object.defineProperty(exports, "createToolFromFunction", { enumerable: true, get: function () { return tool_utils_1.createToolFromFunction; } });
31
+ Object.defineProperty(exports, "isClassInstance", { enumerable: true, get: function () { return tool_utils_1.isClassInstance; } });
32
+ Object.defineProperty(exports, "extractMethodsFromInstance", { enumerable: true, get: function () { return tool_utils_1.extractMethodsFromInstance; } });
33
+ Object.defineProperty(exports, "processTools", { enumerable: true, get: function () { return tool_utils_1.processTools; } });
34
+ __exportStar(require("./types"), exports);
@@ -0,0 +1,22 @@
1
+ "use strict";
2
+ /**
3
+ * LLM factory and exports for ConnectOnion TypeScript SDK
4
+ */
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.OpenAILLM = void 0;
7
+ exports.createLLM = createLLM;
8
+ const openai_1 = require("./openai");
9
+ var openai_2 = require("./openai");
10
+ Object.defineProperty(exports, "OpenAILLM", { enumerable: true, get: function () { return openai_2.OpenAILLM; } });
11
+ /**
12
+ * Create an LLM instance based on the model name
13
+ */
14
+ function createLLM(model = 'gpt-4o-mini', apiKey) {
15
+ // For now, we only support OpenAI models
16
+ // In the future, we can add support for Anthropic, Google, etc.
17
+ if (model.startsWith('gpt-') || model.startsWith('o1-')) {
18
+ return new openai_1.OpenAILLM(apiKey, model);
19
+ }
20
+ // Default to OpenAI
21
+ return new openai_1.OpenAILLM(apiKey, model);
22
+ }