@kigathi/ai-agents 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Chege Kigathi
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,91 @@
1
+ # @lyre/ai-agents
2
+
3
+ Unified Agents/Bots SDK for Responses API.
4
+
5
+ ## Install
6
+
7
+ ```bash
8
+ npm install @lyre/ai-agents
9
+ ```
10
+
11
+ ## Direct OpenAI mode
12
+
13
+ ```js
14
+ import { createClient } from '@lyre/ai-agents';
15
+
16
+ const sdk = createClient({
17
+ apiKey: process.env.OPENAI_API_KEY,
18
+ orgId: process.env.OPENAI_ORG_ID,
19
+ projectId: process.env.OPENAI_PROJECT_ID,
20
+ });
21
+
22
+ sdk.registerTool({
23
+ name: 'lookup_order',
24
+ type: 'function',
25
+ description: 'Find order by order number',
26
+ parameters_schema: {
27
+ type: 'object',
28
+ properties: { order_number: { type: 'string' } },
29
+ required: ['order_number'],
30
+ },
31
+ handler: async ({ order_number }) => ({ order_number, status: 'processing' }),
32
+ });
33
+
34
+ sdk.createAgent({
35
+ name: 'support-bot',
36
+ model: 'gpt-4.1-mini',
37
+ instructions: 'You are a concise support assistant.',
38
+ tools: ['lookup_order'],
39
+ });
40
+
41
+ const result = await sdk.run({ agent: 'support-bot', message: 'Check order AX-4420' });
42
+ console.log(result.output_text);
43
+ ```
44
+
45
+ ## Streaming
46
+
47
+ ```js
48
+ for await (const delta of sdk.runStream({
49
+ agent: 'support-bot',
50
+ message: 'Give me a short summary of today\'s ticket updates',
51
+ })) {
52
+ process.stdout.write(delta);
53
+ }
54
+ ```
55
+
56
+ ## Backend proxy mode (frontend-safe)
57
+
58
+ ```js
59
+ const sdk = createClient({
60
+ backendUrl: 'https://api.example.com',
61
+ mode: 'proxy',
62
+ });
63
+
64
+ const result = await sdk.run({
65
+ agent: 'support-bot',
66
+ message: 'Start claim #99',
67
+ conversation_id: 1234,
68
+ });
69
+ ```
70
+
71
+ ## Full sample apps
72
+
73
+ - `../examples/lyre-ai-agents-node/express-chat` - Express server + Tailwind widget UI
74
+ - `../examples/lyre-ai-agents-node/nuxt-chat` - Nuxt 3 app + server API route + Tailwind
75
+ - `../examples/lyre-ai-agents-node/sveltekit-chat` - SvelteKit app + server endpoint + Tailwind
76
+
77
+ All three use `@lyre/ai-agents` in `proxy` mode against Axis backend so conversation/message/cost metadata stays in Axis.
78
+
79
+ ## Publishing to npm
80
+
81
+ This package is configured for public publishing as a scoped package via:
82
+
83
+ - `name: @lyre/ai-agents`
84
+ - `publishConfig.access: public`
85
+
86
+ Before the first publish, make sure you:
87
+
88
+ 1. Pick and add a license file.
89
+ 2. Log in with `npm login`.
90
+ 3. Run `npm pack --dry-run` to inspect the publish artifact.
91
+ 4. Publish with `npm publish`.
package/package.json ADDED
@@ -0,0 +1,41 @@
1
+ {
2
+ "name": "@kigathi/ai-agents",
3
+ "version": "0.1.0",
4
+ "description": "Forward-only Agents/Bots SDK on top of OpenAI Responses API.",
5
+ "type": "module",
6
+ "sideEffects": false,
7
+ "main": "src/index.js",
8
+ "exports": {
9
+ ".": "./src/index.js"
10
+ },
11
+ "engines": {
12
+ "node": ">=18"
13
+ },
14
+ "files": [
15
+ "src",
16
+ "README.md"
17
+ ],
18
+ "keywords": [
19
+ "openai",
20
+ "agents",
21
+ "bots",
22
+ "responses-api",
23
+ "sdk",
24
+ "ai"
25
+ ],
26
+ "repository": {
27
+ "type": "git",
28
+ "url": "git+https://github.com/kigathi-chege/lyre-ai-agents-node.git"
29
+ },
30
+ "bugs": {
31
+ "url": "https://github.com/kigathi-chege/lyre-ai-agents-node/issues"
32
+ },
33
+ "homepage": "https://github.com/kigathi-chege/lyre-ai-agents-node#readme",
34
+ "license": "MIT",
35
+ "publishConfig": {
36
+ "access": "public"
37
+ },
38
+ "dependencies": {
39
+ "openai": "^4.100.0"
40
+ }
41
+ }
@@ -0,0 +1,37 @@
1
+ import { resolveRemoteAgent } from "./backend.js";
2
+
3
+ export function registerTool(client, tool) {
4
+ client.tools.set(tool.name, tool);
5
+ return tool;
6
+ }
7
+
8
+ export function createAgent(client, definition) {
9
+ const agent = {
10
+ id: definition.id || definition.name,
11
+ name: definition.name,
12
+ model: definition.model,
13
+ instructions: definition.instructions || "",
14
+ temperature: definition.temperature,
15
+ max_output_tokens: definition.max_output_tokens,
16
+ tools: definition.tools || [],
17
+ metadata: definition.metadata || {},
18
+ };
19
+
20
+ client.agents.set(agent.id, agent);
21
+ client.agents.set(String(agent.id), agent);
22
+ client.agents.set(agent.name, agent);
23
+ return agent;
24
+ }
25
+
26
+ export async function resolveAgent(client, input) {
27
+ if (typeof input === "object" && input?.model) return input;
28
+
29
+ const key = String(input);
30
+ const local = client.agents.get(input) || client.agents.get(key);
31
+ if (local) return local;
32
+
33
+ const remote = await resolveRemoteAgent(client, input);
34
+ if (remote) return remote;
35
+
36
+ throw new Error(`Unknown agent: ${input}`);
37
+ }
@@ -0,0 +1,70 @@
1
+ export async function maybeSyncEvent(client, event) {
2
+ if (!client.config.backendUrl) return;
3
+
4
+ try {
5
+ await fetch(`${client.config.backendUrl}/api/ai-agents/events`, {
6
+ method: "POST",
7
+ headers: {
8
+ "Content-Type": "application/json",
9
+ },
10
+ body: JSON.stringify(event),
11
+ });
12
+ } catch {
13
+ // Event sync is best effort.
14
+ }
15
+ }
16
+
17
+ export async function ingestEvent(client, body) {
18
+ if (!client.config.backendUrl) return null;
19
+
20
+ const response = await fetch(`${client.config.backendUrl}/api/ai-agents/events`, {
21
+ method: "POST",
22
+ headers: { "Content-Type": "application/json" },
23
+ body: JSON.stringify(body),
24
+ });
25
+
26
+ if (!response.ok) {
27
+ return null;
28
+ }
29
+
30
+ return await response.json();
31
+ }
32
+
33
+ export async function resolveRemoteAgent(client, input) {
34
+ if (!client.config.backendUrl) return null;
35
+
36
+ const key = String(input);
37
+ const cached = client.backendAgents.get(key);
38
+ if (cached) return cached;
39
+
40
+ const response = await fetch(
41
+ `${client.config.backendUrl}/api/ai-agents/agents/resolve`,
42
+ {
43
+ method: "POST",
44
+ headers: { "Content-Type": "application/json" },
45
+ body: JSON.stringify({ agent: input }),
46
+ },
47
+ );
48
+
49
+ if (!response.ok) {
50
+ return null;
51
+ }
52
+
53
+ const remote = await response.json();
54
+ const resolved = {
55
+ id: remote.id ?? input,
56
+ name: remote.name ?? key,
57
+ model: remote.model,
58
+ instructions: remote.instructions || "",
59
+ temperature: remote.temperature,
60
+ max_output_tokens: remote.max_output_tokens,
61
+ tools: remote.tools || [],
62
+ metadata: remote.metadata || {},
63
+ };
64
+
65
+ client.backendAgents.set(key, resolved);
66
+ if (resolved.id !== undefined && resolved.id !== null) {
67
+ client.backendAgents.set(String(resolved.id), resolved);
68
+ }
69
+ return resolved;
70
+ }
@@ -0,0 +1,11 @@
1
+ export const DEFAULT_PRICING = {
2
+ "gpt-4.1": { prompt_per_million: 2.0, completion_per_million: 8.0 },
3
+ "gpt-4.1-mini": {
4
+ prompt_per_million: 0.4,
5
+ completion_per_million: 1.6,
6
+ },
7
+ "gpt-4.1-nano": {
8
+ prompt_per_million: 0.1,
9
+ completion_per_million: 0.4,
10
+ },
11
+ };
@@ -0,0 +1,79 @@
1
+ export function normalizeMessages(agent, params, previousResponseId = null) {
2
+ if (previousResponseId) {
3
+ return [{
4
+ role: "user",
5
+ content: [{ type: "input_text", text: params.message }],
6
+ }];
7
+ }
8
+
9
+ const maxHistory = params.max_history_messages || 30;
10
+ const history = [...(params.messages || [])].slice(maxHistory * -1);
11
+
12
+ history.push({
13
+ role: "user",
14
+ content: [{ type: "input_text", text: params.message }],
15
+ });
16
+
17
+ return history;
18
+ }
19
+
20
+ export function calculateCost(pricing, model, promptTokens, completionTokens) {
21
+ const modelPricing = pricing[model];
22
+ if (!modelPricing) return 0;
23
+
24
+ const promptCost =
25
+ (promptTokens / 1_000_000) * (modelPricing.prompt_per_million || 0);
26
+ const completionCost =
27
+ (completionTokens / 1_000_000) * (modelPricing.completion_per_million || 0);
28
+ return Number((promptCost + completionCost).toFixed(8));
29
+ }
30
+
31
+ export function buildResponseTools(client, agent) {
32
+ const names = agent.tools?.length ? agent.tools : [...client.tools.keys()];
33
+
34
+ return names
35
+ .map((name) => client.tools.get(name) || { type: "builtin", name })
36
+ .map((tool) => {
37
+ if (tool.type === "builtin") return { type: tool.name };
38
+ return {
39
+ type: "function",
40
+ name: tool.name,
41
+ description: tool.description || "",
42
+ parameters: tool.parameters_schema || {
43
+ type: "object",
44
+ properties: {},
45
+ },
46
+ };
47
+ });
48
+ }
49
+
50
+ export function extractOutputText(response) {
51
+ const output = response.output || [];
52
+ const chunks = [];
53
+ for (const item of output) {
54
+ for (const c of item.content || []) {
55
+ if (c.type === "output_text" && c.text) chunks.push(c.text);
56
+ }
57
+ }
58
+ return chunks.join("\n").trim();
59
+ }
60
+
61
+ export function safeJson(value) {
62
+ try {
63
+ return JSON.parse(value || "{}");
64
+ } catch {
65
+ return {};
66
+ }
67
+ }
68
+
69
+ export function extractOutputMessageId(response) {
70
+ const output = response?.output || [];
71
+ const message = output.find((item) => item?.type === "message" && item?.id);
72
+ return message?.id || null;
73
+ }
74
+
75
+ export function fireAndForget(promise) {
76
+ promise.catch(() => {
77
+ // Persistence/event ingestion is best-effort in async mode.
78
+ });
79
+ }
@@ -0,0 +1,37 @@
1
+ export async function getOpenAIClient(client) {
2
+ if (client.config.mode !== "direct") {
3
+ return null;
4
+ }
5
+
6
+ if (client.openai) {
7
+ return client.openai;
8
+ }
9
+
10
+ const OpenAI = await resolveOpenAIConstructor();
11
+ client.openai = new OpenAI({
12
+ apiKey: client.config.apiKey,
13
+ organization: client.config.orgId,
14
+ project: client.config.projectId,
15
+ });
16
+
17
+ return client.openai;
18
+ }
19
+
20
+ export async function resolveOpenAIConstructor() {
21
+ try {
22
+ const mod = await import("openai");
23
+ return mod.default || mod.OpenAI || mod;
24
+ } catch (firstError) {
25
+ try {
26
+ const { createRequire } = await import("node:module");
27
+ const requireFromCwd = createRequire(`${process.cwd()}/package.json`);
28
+ const mod = requireFromCwd("openai");
29
+ return mod.default || mod.OpenAI || mod;
30
+ } catch {
31
+ throw new Error(
32
+ "Failed to load `openai` package for direct mode. Install it in the consuming app (`npm i openai`) or use proxy mode.",
33
+ { cause: firstError },
34
+ );
35
+ }
36
+ }
37
+ }
@@ -0,0 +1,78 @@
1
+ import { ingestEvent } from "./backend.js";
2
+
3
+ function asNumericId(value) {
4
+ const parsed = Number(value);
5
+ return Number.isFinite(parsed) ? parsed : null;
6
+ }
7
+
8
+ export async function persistDirectUserMessage(
9
+ client,
10
+ { agent, params, conversationId, replyingTo },
11
+ ) {
12
+ const idempotencyKey =
13
+ params.idempotency_key ||
14
+ `user:${agent.id}:${conversationId || "new"}:${replyingTo || "none"}:${String(
15
+ params.message || "",
16
+ ).trim()}`;
17
+
18
+ const res = await ingestEvent(client, {
19
+ event_name: "agent.message.upsert",
20
+ idempotency_key: idempotencyKey,
21
+ process_now: true,
22
+ agent_id: asNumericId(agent.id),
23
+ conversation_id: conversationId ?? null,
24
+ payload: {
25
+ agent_id: asNumericId(agent.id),
26
+ conversation_id: conversationId ?? null,
27
+ external_id: replyingTo || undefined,
28
+ role: "user",
29
+ message: params.message,
30
+ user_id: params.user_id ?? null,
31
+ metadata: params.metadata || {},
32
+ source_message_id: params.client_message_id || null,
33
+ },
34
+ metadata: params.metadata || {},
35
+ });
36
+
37
+ return res?.conversation_id ?? conversationId ?? null;
38
+ }
39
+
40
+ export async function persistDirectAssistantMessage(
41
+ client,
42
+ { agent, params, conversationId, response, responseId, outputMessageId, text },
43
+ ) {
44
+ const usage = response?.usage || {};
45
+ const idempotencyKey =
46
+ params.idempotency_key_response ||
47
+ `assistant:${agent.id}:${conversationId || "new"}:${responseId || outputMessageId || text}`;
48
+
49
+ const res = await ingestEvent(client, {
50
+ event_name: "agent.message.upsert",
51
+ idempotency_key: idempotencyKey,
52
+ process_now: true,
53
+ agent_id: asNumericId(agent.id),
54
+ conversation_id: conversationId ?? null,
55
+ payload: {
56
+ agent_id: asNumericId(agent.id),
57
+ conversation_id: conversationId ?? null,
58
+ external_id: responseId || undefined,
59
+ role: "assistant",
60
+ message: text,
61
+ model: agent.model,
62
+ usage: {
63
+ input_tokens: usage.input_tokens || 0,
64
+ output_tokens: usage.output_tokens || 0,
65
+ total_tokens: usage.total_tokens || 0,
66
+ },
67
+ source_message_id: outputMessageId || responseId || null,
68
+ metadata: {
69
+ ...(params.metadata || {}),
70
+ openai_response_id: responseId || null,
71
+ openai_message_id: outputMessageId || null,
72
+ },
73
+ },
74
+ metadata: params.metadata || {},
75
+ });
76
+
77
+ return res?.conversation_id ?? conversationId ?? null;
78
+ }
@@ -0,0 +1,66 @@
1
+ export function resolveConversationStateKey(agent, params) {
2
+ if (params.conversation_key) {
3
+ return String(params.conversation_key);
4
+ }
5
+ if (params.conversation_id !== null && params.conversation_id !== undefined) {
6
+ return `conversation:${params.conversation_id}`;
7
+ }
8
+
9
+ const agentKey =
10
+ typeof params.agent === "string" || typeof params.agent === "number"
11
+ ? String(params.agent)
12
+ : String(agent?.id ?? agent?.name ?? "default-agent");
13
+ const userKey =
14
+ params.user_id !== null && params.user_id !== undefined
15
+ ? `user:${params.user_id}`
16
+ : "user:anon";
17
+ return `${agentKey}:${userKey}`;
18
+ }
19
+
20
+ export function getConversationState(client, key) {
21
+ if (!client.conversationState.has(key)) {
22
+ client.conversationState.set(key, {
23
+ conversation_id: null,
24
+ last_response_id: null,
25
+ messages: [],
26
+ persistence_chain: Promise.resolve(),
27
+ });
28
+ }
29
+ return client.conversationState.get(key);
30
+ }
31
+
32
+ export function resolveMessagesForRequest(params, state) {
33
+ if (Array.isArray(params.messages) && params.messages.length > 0) {
34
+ return params.messages;
35
+ }
36
+ return Array.isArray(state.messages) ? state.messages : [];
37
+ }
38
+
39
+ export function updateConversationStateAfterCompletion(
40
+ state,
41
+ { userText, assistantText, conversationId, responseId, outputMessageId, maxHistory },
42
+ ) {
43
+ if (conversationId !== null && conversationId !== undefined) {
44
+ state.conversation_id = conversationId;
45
+ }
46
+
47
+ const nextResponseId = responseId || outputMessageId || null;
48
+ if (nextResponseId) {
49
+ state.last_response_id = nextResponseId;
50
+ }
51
+
52
+ if (!userText && !assistantText) {
53
+ return;
54
+ }
55
+
56
+ const messages = Array.isArray(state.messages) ? state.messages : [];
57
+ if (userText) {
58
+ messages.push({ role: "user", content: userText });
59
+ }
60
+ if (assistantText) {
61
+ messages.push({ role: "assistant", content: assistantText });
62
+ }
63
+
64
+ const keep = Math.max(1, Number(maxHistory || 30));
65
+ state.messages = messages.slice(-keep);
66
+ }
package/src/index.js ADDED
@@ -0,0 +1,62 @@
1
+ import { DEFAULT_PRICING } from "./core/defaults.js";
2
+ import { createAgent as createAgentImpl, registerTool as registerToolImpl } from "./core/agents.js";
3
+ import { runDirect, runStreamDirect } from "./modes/direct.js";
4
+ import { runProxy, runStreamProxy } from "./modes/proxy.js";
5
+
6
+ export function createClient(config = {}) {
7
+ const mode =
8
+ config.mode || (config.backendUrl && !config.apiKey ? "proxy" : "direct");
9
+
10
+ const client = {
11
+ config: {
12
+ backendUrl: config.backendUrl,
13
+ apiKey: config.apiKey,
14
+ orgId: config.orgId,
15
+ projectId: config.projectId,
16
+ mode,
17
+ pricing: config.pricing || DEFAULT_PRICING,
18
+ },
19
+ tools: new Map(),
20
+ agents: new Map(),
21
+ backendAgents: new Map(),
22
+ conversationState: new Map(),
23
+ openai: null,
24
+ };
25
+
26
+ return {
27
+ registerTool: (tool) => registerTool(client, tool),
28
+ createAgent: (agent) => createAgent(client, agent),
29
+ run: (params) => run(client, params),
30
+ runStream: (params) => runStream(client, params),
31
+ raw: client,
32
+ };
33
+ }
34
+
35
+ export function registerTool(clientOrSdk, tool) {
36
+ const client = clientOrSdk.raw || clientOrSdk;
37
+ return registerToolImpl(client, tool);
38
+ }
39
+
40
+ export function createAgent(clientOrSdk, definition) {
41
+ const client = clientOrSdk.raw || clientOrSdk;
42
+ return createAgentImpl(client, definition);
43
+ }
44
+
45
+ export async function run(clientOrSdk, params) {
46
+ const client = clientOrSdk.raw || clientOrSdk;
47
+ if (client.config.mode === "proxy") {
48
+ return await runProxy(client, params);
49
+ }
50
+
51
+ return await runDirect(client, params);
52
+ }
53
+
54
+ export async function* runStream(clientOrSdk, params) {
55
+ const client = clientOrSdk.raw || clientOrSdk;
56
+ if (client.config.mode === "proxy") {
57
+ yield* runStreamProxy(client, params);
58
+ return;
59
+ }
60
+
61
+ yield* runStreamDirect(client, params);
62
+ }
@@ -0,0 +1,279 @@
1
+ import { resolveAgent } from "../core/agents.js";
2
+ import { maybeSyncEvent } from "../core/backend.js";
3
+ import { getOpenAIClient } from "../core/openai.js";
4
+ import {
5
+ buildResponseTools,
6
+ calculateCost,
7
+ extractOutputMessageId,
8
+ extractOutputText,
9
+ fireAndForget,
10
+ normalizeMessages,
11
+ safeJson,
12
+ } from "../core/helpers.js";
13
+ import {
14
+ getConversationState,
15
+ resolveConversationStateKey,
16
+ resolveMessagesForRequest,
17
+ updateConversationStateAfterCompletion,
18
+ } from "../core/state.js";
19
+ import {
20
+ persistDirectAssistantMessage,
21
+ persistDirectUserMessage,
22
+ } from "../core/persistence.js";
23
+
24
+ function enqueuePersistence(state, task, onResolved) {
25
+ const previous = state.persistence_chain || Promise.resolve();
26
+ const next = previous
27
+ .then(task)
28
+ .then((value) => {
29
+ if (onResolved) {
30
+ onResolved(value);
31
+ }
32
+ return value;
33
+ })
34
+ .catch(() => null);
35
+
36
+ state.persistence_chain = next;
37
+ return next;
38
+ }
39
+
40
+ export async function runDirect(client, params) {
41
+ const agent = await resolveAgent(client, params.agent);
42
+ const stateKey = resolveConversationStateKey(agent, params);
43
+ const state = getConversationState(client, stateKey);
44
+ const persistedConversationId = params.conversation_id ?? state.conversation_id ?? null;
45
+ const previousResponseId =
46
+ params.replying_to || params.previous_response_id || state.last_response_id || null;
47
+
48
+ if (client.config.backendUrl) {
49
+ const persistUser = enqueuePersistence(
50
+ state,
51
+ () =>
52
+ persistDirectUserMessage(client, {
53
+ agent,
54
+ params,
55
+ conversationId: persistedConversationId,
56
+ replyingTo: previousResponseId,
57
+ }),
58
+ (resolvedConversationId) => {
59
+ if (resolvedConversationId !== null && resolvedConversationId !== undefined) {
60
+ state.conversation_id = resolvedConversationId;
61
+ }
62
+ },
63
+ );
64
+ fireAndForget(
65
+ persistUser,
66
+ );
67
+ }
68
+
69
+ const openai = await getOpenAIClient(client);
70
+ let history = normalizeMessages(
71
+ agent,
72
+ {
73
+ ...params,
74
+ messages: resolveMessagesForRequest(params, state),
75
+ },
76
+ previousResponseId,
77
+ );
78
+
79
+ let finalResponse = null;
80
+ for (let i = 0; i < (params.maxToolIterations || 8); i += 1) {
81
+ finalResponse = await openai.responses.create({
82
+ model: agent.model,
83
+ instructions: agent.instructions || undefined,
84
+ input: history,
85
+ previous_response_id: previousResponseId && i === 0 ? previousResponseId : undefined,
86
+ tools: buildResponseTools(client, agent),
87
+ temperature: agent.temperature,
88
+ max_output_tokens: agent.max_output_tokens,
89
+ });
90
+
91
+ const functionCalls = (finalResponse.output || []).filter(
92
+ (item) => item.type === "function_call",
93
+ );
94
+
95
+ if (!functionCalls.length) {
96
+ const text = extractOutputText(finalResponse);
97
+ const responseId = finalResponse?.id || null;
98
+ const outputMessageId = extractOutputMessageId(finalResponse);
99
+
100
+ if (client.config.backendUrl) {
101
+ const persistAssistant = enqueuePersistence(
102
+ state,
103
+ () =>
104
+ persistDirectAssistantMessage(client, {
105
+ agent,
106
+ params,
107
+ conversationId: persistedConversationId,
108
+ response: finalResponse,
109
+ responseId,
110
+ outputMessageId,
111
+ text,
112
+ }),
113
+ (resolvedConversationId) => {
114
+ if (resolvedConversationId !== null && resolvedConversationId !== undefined) {
115
+ state.conversation_id = resolvedConversationId;
116
+ }
117
+ },
118
+ );
119
+ fireAndForget(
120
+ persistAssistant,
121
+ );
122
+ }
123
+
124
+ updateConversationStateAfterCompletion(state, {
125
+ userText: String(params.message || ""),
126
+ assistantText: text,
127
+ conversationId: persistedConversationId,
128
+ responseId,
129
+ outputMessageId,
130
+ maxHistory: params.max_history_messages || 30,
131
+ });
132
+
133
+ return {
134
+ conversation_id: persistedConversationId ?? state.conversation_id ?? null,
135
+ output_text: text,
136
+ response_id: responseId,
137
+ output_message_id: outputMessageId,
138
+ usage: {
139
+ prompt_tokens: finalResponse.usage?.input_tokens || 0,
140
+ completion_tokens: finalResponse.usage?.output_tokens || 0,
141
+ total_tokens: finalResponse.usage?.total_tokens || 0,
142
+ },
143
+ cost_usd: calculateCost(
144
+ client.config.pricing,
145
+ agent.model,
146
+ finalResponse.usage?.input_tokens || 0,
147
+ finalResponse.usage?.output_tokens || 0,
148
+ ),
149
+ raw: finalResponse,
150
+ };
151
+ }
152
+
153
+ for (const call of functionCalls) {
154
+ const tool = client.tools.get(call.name);
155
+ const args = safeJson(call.arguments);
156
+
157
+ let result;
158
+ if (!tool || typeof tool.handler !== "function") {
159
+ result = { error: `Tool not registered: ${call.name}` };
160
+ } else {
161
+ result = await tool.handler(args, params.context || {});
162
+ }
163
+
164
+ fireAndForget(
165
+ maybeSyncEvent(client, {
166
+ event_name: "AgentToolCalled",
167
+ payload: {
168
+ agent_id: agent.id,
169
+ conversation_id: persistedConversationId,
170
+ tool_name: call.name,
171
+ tool_arguments: args,
172
+ tool_result: result,
173
+ },
174
+ }),
175
+ );
176
+
177
+ history.push({
178
+ type: "function_call_output",
179
+ call_id: call.call_id,
180
+ output: JSON.stringify(result),
181
+ });
182
+ }
183
+ }
184
+
185
+ throw new Error("Tool call loop exceeded max iterations");
186
+ }
187
+
188
+ export async function* runStreamDirect(client, params) {
189
+ const agent = await resolveAgent(client, params.agent);
190
+ const stateKey = resolveConversationStateKey(agent, params);
191
+ const state = getConversationState(client, stateKey);
192
+ const persistedConversationId = params.conversation_id ?? state.conversation_id ?? null;
193
+ const previousResponseId =
194
+ params.replying_to || params.previous_response_id || state.last_response_id || null;
195
+
196
+ if (client.config.backendUrl) {
197
+ const persistUser = enqueuePersistence(
198
+ state,
199
+ () =>
200
+ persistDirectUserMessage(client, {
201
+ agent,
202
+ params,
203
+ conversationId: persistedConversationId,
204
+ replyingTo: previousResponseId,
205
+ }),
206
+ (resolvedConversationId) => {
207
+ if (resolvedConversationId !== null && resolvedConversationId !== undefined) {
208
+ state.conversation_id = resolvedConversationId;
209
+ }
210
+ },
211
+ );
212
+ fireAndForget(
213
+ persistUser,
214
+ );
215
+ }
216
+
217
+ const openai = await getOpenAIClient(client);
218
+ const stream = await openai.responses.stream({
219
+ model: agent.model,
220
+ instructions: agent.instructions || undefined,
221
+ input: normalizeMessages(
222
+ agent,
223
+ {
224
+ ...params,
225
+ messages: resolveMessagesForRequest(params, state),
226
+ },
227
+ previousResponseId,
228
+ ),
229
+ previous_response_id: previousResponseId || undefined,
230
+ tools: buildResponseTools(client, agent),
231
+ temperature: agent.temperature,
232
+ max_output_tokens: agent.max_output_tokens,
233
+ });
234
+
235
+ let text = "";
236
+ for await (const event of stream) {
237
+ if (event.type === "response.output_text.delta") {
238
+ text += event.delta || "";
239
+ yield event.delta;
240
+ }
241
+ }
242
+
243
+ const final = await stream.finalResponse();
244
+ const responseId = final?.id || null;
245
+ const outputMessageId = extractOutputMessageId(final);
246
+
247
+ if (client.config.backendUrl) {
248
+ const persistAssistant = enqueuePersistence(
249
+ state,
250
+ () =>
251
+ persistDirectAssistantMessage(client, {
252
+ agent,
253
+ params,
254
+ conversationId: persistedConversationId,
255
+ response: final,
256
+ responseId,
257
+ outputMessageId,
258
+ text: text || extractOutputText(final),
259
+ }),
260
+ (resolvedConversationId) => {
261
+ if (resolvedConversationId !== null && resolvedConversationId !== undefined) {
262
+ state.conversation_id = resolvedConversationId;
263
+ }
264
+ },
265
+ );
266
+ fireAndForget(
267
+ persistAssistant,
268
+ );
269
+ }
270
+
271
+ updateConversationStateAfterCompletion(state, {
272
+ userText: String(params.message || ""),
273
+ assistantText: text || extractOutputText(final),
274
+ conversationId: persistedConversationId,
275
+ responseId,
276
+ outputMessageId,
277
+ maxHistory: params.max_history_messages || 30,
278
+ });
279
+ }
@@ -0,0 +1,30 @@
1
+ export async function runProxy(client, params) {
2
+ const response = await fetch(`${client.config.backendUrl}/api/ai-agents/run`, {
3
+ method: "POST",
4
+ headers: { "Content-Type": "application/json" },
5
+ body: JSON.stringify(params),
6
+ });
7
+
8
+ if (!response.ok) throw new Error(`Proxy run failed: ${response.status}`);
9
+ return await response.json();
10
+ }
11
+
12
+ export async function* runStreamProxy(client, params) {
13
+ const response = await fetch(`${client.config.backendUrl}/api/ai-agents/stream`, {
14
+ method: "POST",
15
+ headers: { "Content-Type": "application/json" },
16
+ body: JSON.stringify(params),
17
+ });
18
+
19
+ if (!response.ok || !response.body) {
20
+ throw new Error(`Proxy stream failed: ${response.status}`);
21
+ }
22
+
23
+ const reader = response.body.getReader();
24
+ const decoder = new TextDecoder();
25
+ while (true) {
26
+ const { done, value } = await reader.read();
27
+ if (done) break;
28
+ yield decoder.decode(value, { stream: true });
29
+ }
30
+ }