chainlesschain 0.40.2 → 0.40.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +22 -15
- package/package.json +1 -1
- package/src/lib/agent-core.js +861 -0
- package/src/lib/chat-core.js +177 -0
- package/src/lib/interaction-adapter.js +177 -0
- package/src/lib/interactive-planner.js +524 -0
- package/src/lib/llm-providers.js +9 -1
- package/src/lib/slot-filler.js +465 -0
- package/src/lib/task-model-selector.js +5 -5
- package/src/lib/ws-agent-handler.js +403 -0
- package/src/lib/ws-chat-handler.js +145 -0
- package/src/lib/ws-server.js +280 -1
- package/src/lib/ws-session-manager.js +363 -0
- package/src/repl/agent-repl.js +159 -11
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Chat Core — transport-independent streaming chat logic
|
|
3
|
+
*
|
|
4
|
+
* Extracted from chat-repl.js so that both the terminal REPL and the
|
|
5
|
+
* WebSocket chat handler can consume the same streaming API.
|
|
6
|
+
*
|
|
7
|
+
* Key exports:
|
|
8
|
+
* - chatStream — async generator yielding response-token / response-complete events
|
|
9
|
+
* - streamOllama / streamOpenAI — low-level streaming helpers
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import { BUILT_IN_PROVIDERS } from "./llm-providers.js";
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Stream a response from Ollama
|
|
16
|
+
*/
|
|
17
|
+
export async function streamOllama(messages, model, baseUrl, onToken) {
|
|
18
|
+
const response = await fetch(`${baseUrl}/api/chat`, {
|
|
19
|
+
method: "POST",
|
|
20
|
+
headers: { "Content-Type": "application/json" },
|
|
21
|
+
body: JSON.stringify({
|
|
22
|
+
model,
|
|
23
|
+
messages,
|
|
24
|
+
stream: true,
|
|
25
|
+
}),
|
|
26
|
+
});
|
|
27
|
+
|
|
28
|
+
if (!response.ok) {
|
|
29
|
+
throw new Error(`Ollama error: ${response.status} ${response.statusText}`);
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
const reader = response.body.getReader();
|
|
33
|
+
const decoder = new TextDecoder();
|
|
34
|
+
let fullResponse = "";
|
|
35
|
+
|
|
36
|
+
while (true) {
|
|
37
|
+
const { done, value } = await reader.read();
|
|
38
|
+
if (done) break;
|
|
39
|
+
|
|
40
|
+
const text = decoder.decode(value, { stream: true });
|
|
41
|
+
const lines = text.split("\n").filter(Boolean);
|
|
42
|
+
|
|
43
|
+
for (const line of lines) {
|
|
44
|
+
try {
|
|
45
|
+
const json = JSON.parse(line);
|
|
46
|
+
if (json.message?.content) {
|
|
47
|
+
fullResponse += json.message.content;
|
|
48
|
+
onToken(json.message.content);
|
|
49
|
+
}
|
|
50
|
+
} catch {
|
|
51
|
+
// Partial JSON, skip
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
return fullResponse;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Stream a response from OpenAI-compatible API
|
|
61
|
+
*/
|
|
62
|
+
export async function streamOpenAI(messages, model, baseUrl, apiKey, onToken) {
|
|
63
|
+
const response = await fetch(`${baseUrl}/chat/completions`, {
|
|
64
|
+
method: "POST",
|
|
65
|
+
headers: {
|
|
66
|
+
"Content-Type": "application/json",
|
|
67
|
+
Authorization: `Bearer ${apiKey}`,
|
|
68
|
+
},
|
|
69
|
+
body: JSON.stringify({
|
|
70
|
+
model,
|
|
71
|
+
messages,
|
|
72
|
+
stream: true,
|
|
73
|
+
}),
|
|
74
|
+
});
|
|
75
|
+
|
|
76
|
+
if (!response.ok) {
|
|
77
|
+
throw new Error(`API error: ${response.status} ${response.statusText}`);
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
const reader = response.body.getReader();
|
|
81
|
+
const decoder = new TextDecoder();
|
|
82
|
+
let fullResponse = "";
|
|
83
|
+
|
|
84
|
+
while (true) {
|
|
85
|
+
const { done, value } = await reader.read();
|
|
86
|
+
if (done) break;
|
|
87
|
+
|
|
88
|
+
const text = decoder.decode(value, { stream: true });
|
|
89
|
+
const lines = text.split("\n").filter(Boolean);
|
|
90
|
+
|
|
91
|
+
for (const line of lines) {
|
|
92
|
+
if (line.startsWith("data: ")) {
|
|
93
|
+
const data = line.slice(6);
|
|
94
|
+
if (data === "[DONE]") continue;
|
|
95
|
+
try {
|
|
96
|
+
const json = JSON.parse(data);
|
|
97
|
+
const content = json.choices?.[0]?.delta?.content;
|
|
98
|
+
if (content) {
|
|
99
|
+
fullResponse += content;
|
|
100
|
+
onToken(content);
|
|
101
|
+
}
|
|
102
|
+
} catch {
|
|
103
|
+
// Partial data
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
return fullResponse;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* Async generator that streams a chat response.
|
|
114
|
+
*
|
|
115
|
+
* Yields events:
|
|
116
|
+
* { type: "response-token", token }
|
|
117
|
+
* { type: "response-complete", content }
|
|
118
|
+
*
|
|
119
|
+
* @param {Array} messages
|
|
120
|
+
* @param {object} options - provider, model, baseUrl, apiKey
|
|
121
|
+
*/
|
|
122
|
+
export async function* chatStream(messages, options) {
|
|
123
|
+
const { provider, model, baseUrl, apiKey } = options;
|
|
124
|
+
|
|
125
|
+
const tokens = [];
|
|
126
|
+
const onToken = (token) => {
|
|
127
|
+
tokens.push(token);
|
|
128
|
+
};
|
|
129
|
+
|
|
130
|
+
let fullResponse;
|
|
131
|
+
|
|
132
|
+
if (provider === "ollama") {
|
|
133
|
+
fullResponse = await streamOllama(messages, model, baseUrl, onToken);
|
|
134
|
+
} else {
|
|
135
|
+
const providerDef = BUILT_IN_PROVIDERS[provider];
|
|
136
|
+
const url =
|
|
137
|
+
baseUrl !== "http://localhost:11434"
|
|
138
|
+
? baseUrl
|
|
139
|
+
: providerDef?.baseUrl || "https://api.openai.com/v1";
|
|
140
|
+
const key =
|
|
141
|
+
apiKey ||
|
|
142
|
+
(providerDef?.apiKeyEnv ? process.env[providerDef.apiKeyEnv] : null);
|
|
143
|
+
if (!key) {
|
|
144
|
+
throw new Error(
|
|
145
|
+
`API key required for ${provider} (set ${providerDef?.apiKeyEnv || "API key"})`,
|
|
146
|
+
);
|
|
147
|
+
}
|
|
148
|
+
fullResponse = await streamOpenAI(messages, model, url, key, onToken);
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// Yield all collected tokens
|
|
152
|
+
for (const token of tokens) {
|
|
153
|
+
yield { type: "response-token", token };
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
yield { type: "response-complete", content: fullResponse };
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Non-streaming version: chatStream but collects tokens and returns full response.
|
|
161
|
+
* Yields events incrementally via the onEvent callback.
|
|
162
|
+
*
|
|
163
|
+
* @param {Array} messages
|
|
164
|
+
* @param {object} options - provider, model, baseUrl, apiKey
|
|
165
|
+
* @param {function} [onEvent] - called with each event { type, token?, content? }
|
|
166
|
+
* @returns {Promise<string>} full response
|
|
167
|
+
*/
|
|
168
|
+
export async function chatWithStreaming(messages, options, onEvent) {
|
|
169
|
+
let fullContent = "";
|
|
170
|
+
for await (const event of chatStream(messages, options)) {
|
|
171
|
+
if (onEvent) onEvent(event);
|
|
172
|
+
if (event.type === "response-complete") {
|
|
173
|
+
fullContent = event.content;
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
return fullContent;
|
|
177
|
+
}
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Interaction Adapter — abstraction layer for user interaction
|
|
3
|
+
*
|
|
4
|
+
* Unifies terminal REPL and WebSocket modes so that agent-core, slot-filler,
|
|
5
|
+
* and interactive-planner can ask the user questions without knowing the
|
|
6
|
+
* transport.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { createHash } from "crypto";
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Base class — subclasses must implement askInput, askSelect, askConfirm, emit.
|
|
13
|
+
*/
|
|
14
|
+
export class InteractionAdapter {
|
|
15
|
+
/** @param {string} question @param {object} [options] @returns {Promise<string>} */
|
|
16
|
+
async askInput(question, _options) {
|
|
17
|
+
throw new Error(`askInput not implemented: ${question}`);
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
/** @param {string} question @param {Array<{name:string,value:string}>} choices @returns {Promise<string>} */
|
|
21
|
+
async askSelect(question, _choices) {
|
|
22
|
+
throw new Error(`askSelect not implemented: ${question}`);
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/** @param {string} question @param {boolean} [defaultVal] @returns {Promise<boolean>} */
|
|
26
|
+
async askConfirm(question, _defaultVal) {
|
|
27
|
+
throw new Error(`askConfirm not implemented: ${question}`);
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
/** Emit an event to the consumer (terminal stdout or WebSocket client) */
|
|
31
|
+
emit(_eventType, _data) {}
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
// ─── Terminal mode ────────────────────────────────────────────────────────
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Terminal adapter — wraps @inquirer/prompts via prompts.js
|
|
38
|
+
*/
|
|
39
|
+
export class TerminalInteractionAdapter extends InteractionAdapter {
|
|
40
|
+
constructor() {
|
|
41
|
+
super();
|
|
42
|
+
this._prompts = null;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
async _loadPrompts() {
|
|
46
|
+
if (!this._prompts) {
|
|
47
|
+
this._prompts = await import("./prompts.js");
|
|
48
|
+
}
|
|
49
|
+
return this._prompts;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
async askInput(question, options = {}) {
|
|
53
|
+
const p = await this._loadPrompts();
|
|
54
|
+
return p.askInput(question, options.default || "");
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
async askSelect(question, choices) {
|
|
58
|
+
const p = await this._loadPrompts();
|
|
59
|
+
return p.askSelect(question, choices);
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
async askConfirm(question, defaultVal = true) {
|
|
63
|
+
const p = await this._loadPrompts();
|
|
64
|
+
return p.askConfirm(question, defaultVal);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
emit(_eventType, _data) {
|
|
68
|
+
// Terminal mode does not need to emit structured events —
|
|
69
|
+
// callers use process.stdout directly.
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// ─── WebSocket mode ───────────────────────────────────────────────────────
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* WebSocket adapter — sends question messages to the client and waits for
|
|
77
|
+
* session-answer responses.
|
|
78
|
+
*/
|
|
79
|
+
export class WebSocketInteractionAdapter extends InteractionAdapter {
|
|
80
|
+
/**
|
|
81
|
+
* @param {import("ws").WebSocket} ws
|
|
82
|
+
* @param {string} sessionId
|
|
83
|
+
*/
|
|
84
|
+
constructor(ws, sessionId) {
|
|
85
|
+
super();
|
|
86
|
+
this.ws = ws;
|
|
87
|
+
this.sessionId = sessionId;
|
|
88
|
+
/** @type {Map<string, {resolve: Function, reject: Function}>} */
|
|
89
|
+
this._pending = new Map();
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/** Generate a unique request id */
|
|
93
|
+
_requestId() {
|
|
94
|
+
return `q-${Date.now()}-${createHash("sha256").update(Math.random().toString()).digest("hex").slice(0, 6)}`;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
/**
|
|
98
|
+
* Ask a question over WebSocket and wait for the answer.
|
|
99
|
+
* @param {string} questionType - "input" | "select" | "confirm"
|
|
100
|
+
* @param {string} question
|
|
101
|
+
* @param {object} [extra] - choices, default, etc.
|
|
102
|
+
* @returns {Promise<string|boolean>}
|
|
103
|
+
*/
|
|
104
|
+
_ask(questionType, question, extra = {}) {
|
|
105
|
+
return new Promise((resolve, reject) => {
|
|
106
|
+
const requestId = this._requestId();
|
|
107
|
+
this._pending.set(requestId, { resolve, reject });
|
|
108
|
+
|
|
109
|
+
this._sendWs({
|
|
110
|
+
type: "question",
|
|
111
|
+
sessionId: this.sessionId,
|
|
112
|
+
requestId,
|
|
113
|
+
questionType,
|
|
114
|
+
question,
|
|
115
|
+
...extra,
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
// Timeout after 5 minutes
|
|
119
|
+
setTimeout(
|
|
120
|
+
() => {
|
|
121
|
+
if (this._pending.has(requestId)) {
|
|
122
|
+
this._pending.delete(requestId);
|
|
123
|
+
reject(new Error("Question timed out"));
|
|
124
|
+
}
|
|
125
|
+
},
|
|
126
|
+
5 * 60 * 1000,
|
|
127
|
+
);
|
|
128
|
+
});
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
async askInput(question, options = {}) {
|
|
132
|
+
return this._ask("input", question, { default: options.default || "" });
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
async askSelect(question, choices) {
|
|
136
|
+
return this._ask("select", question, { choices });
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
async askConfirm(question, defaultVal = true) {
|
|
140
|
+
const answer = await this._ask("confirm", question, {
|
|
141
|
+
default: defaultVal,
|
|
142
|
+
});
|
|
143
|
+
// Normalize to boolean
|
|
144
|
+
if (typeof answer === "boolean") return answer;
|
|
145
|
+
return answer === "true" || answer === "yes" || answer === "y";
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
/**
|
|
149
|
+
* Called by ws-server when a session-answer message arrives.
|
|
150
|
+
* Resolves the corresponding pending promise.
|
|
151
|
+
*/
|
|
152
|
+
resolveAnswer(requestId, answer) {
|
|
153
|
+
const pending = this._pending.get(requestId);
|
|
154
|
+
if (pending) {
|
|
155
|
+
this._pending.delete(requestId);
|
|
156
|
+
pending.resolve(answer);
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
emit(eventType, data) {
|
|
161
|
+
this._sendWs({
|
|
162
|
+
type: eventType,
|
|
163
|
+
sessionId: this.sessionId,
|
|
164
|
+
...data,
|
|
165
|
+
});
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
_sendWs(data) {
|
|
169
|
+
if (this.ws.readyState === this.ws.OPEN) {
|
|
170
|
+
try {
|
|
171
|
+
this.ws.send(JSON.stringify(data));
|
|
172
|
+
} catch (_err) {
|
|
173
|
+
// Connection may have closed
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
}
|