@tjamescouch/gro 1.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/ci.yml +20 -0
- package/README.md +218 -0
- package/_base.md +44 -0
- package/gro +198 -0
- package/owl/behaviors/agentic-turn.md +43 -0
- package/owl/components/cli.md +37 -0
- package/owl/components/drivers.md +29 -0
- package/owl/components/mcp.md +33 -0
- package/owl/components/memory.md +35 -0
- package/owl/components/session.md +35 -0
- package/owl/constraints.md +32 -0
- package/owl/product.md +28 -0
- package/owl/proposals/cooperative-scheduler.md +106 -0
- package/package.json +22 -0
- package/providers/claude.sh +50 -0
- package/providers/gemini.sh +36 -0
- package/providers/openai.py +85 -0
- package/src/drivers/anthropic.ts +215 -0
- package/src/drivers/index.ts +5 -0
- package/src/drivers/streaming-openai.ts +245 -0
- package/src/drivers/types.ts +33 -0
- package/src/errors.ts +97 -0
- package/src/logger.ts +28 -0
- package/src/main.ts +827 -0
- package/src/mcp/client.ts +147 -0
- package/src/mcp/index.ts +2 -0
- package/src/memory/advanced-memory.ts +263 -0
- package/src/memory/agent-memory.ts +61 -0
- package/src/memory/agenthnsw.ts +122 -0
- package/src/memory/index.ts +6 -0
- package/src/memory/simple-memory.ts +41 -0
- package/src/memory/vector-index.ts +30 -0
- package/src/session.ts +150 -0
- package/src/tools/agentpatch.ts +89 -0
- package/src/tools/bash.ts +61 -0
- package/src/utils/rate-limiter.ts +60 -0
- package/src/utils/retry.ts +32 -0
- package/src/utils/timed-fetch.ts +29 -0
- package/tests/errors.test.ts +246 -0
- package/tests/memory.test.ts +186 -0
- package/tests/rate-limiter.test.ts +76 -0
- package/tests/retry.test.ts +138 -0
- package/tests/timed-fetch.test.ts +104 -0
- package/tsconfig.json +13 -0
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Streaming OpenAI-compatible chat driver.
|
|
3
|
+
* Works with OpenAI, Anthropic (via proxy), LM Studio, Ollama, etc.
|
|
4
|
+
*/
|
|
5
|
+
import { Logger } from "../logger.js";
|
|
6
|
+
import { asError } from "../errors.js";
|
|
7
|
+
import { rateLimiter } from "../utils/rate-limiter.js";
|
|
8
|
+
import { timedFetch } from "../utils/timed-fetch.js";
|
|
9
|
+
import { MAX_RETRIES, isRetryable, retryDelay, sleep } from "../utils/retry.js";
|
|
10
|
+
import type { ChatDriver, ChatMessage, ChatOutput, ChatToolCall } from "./types.js";
|
|
11
|
+
|
|
12
|
+
export interface OpenAiDriverConfig {
|
|
13
|
+
baseUrl: string;
|
|
14
|
+
model: string;
|
|
15
|
+
apiKey?: string;
|
|
16
|
+
timeoutMs?: number;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
function yieldToLoop(): Promise<void> {
|
|
20
|
+
return new Promise<void>((resolve) =>
|
|
21
|
+
typeof (globalThis as any).setImmediate === "function"
|
|
22
|
+
? (globalThis as any).setImmediate(resolve)
|
|
23
|
+
: setTimeout(resolve, 0)
|
|
24
|
+
);
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
class YieldBudget {
|
|
28
|
+
private bytesSince = 0;
|
|
29
|
+
private last = Date.now();
|
|
30
|
+
constructor(
|
|
31
|
+
private readonly byteBudget = 1024,
|
|
32
|
+
private readonly msBudget = 8
|
|
33
|
+
) {}
|
|
34
|
+
async maybe(extraBytes = 0): Promise<void> {
|
|
35
|
+
this.bytesSince += extraBytes;
|
|
36
|
+
const now = Date.now();
|
|
37
|
+
if (this.bytesSince >= this.byteBudget || (now - this.last) >= this.msBudget) {
|
|
38
|
+
this.bytesSince = 0;
|
|
39
|
+
this.last = now;
|
|
40
|
+
await yieldToLoop();
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
export function makeStreamingOpenAiDriver(cfg: OpenAiDriverConfig): ChatDriver {
|
|
46
|
+
const base = cfg.baseUrl.replace(/\/+$/, "");
|
|
47
|
+
const endpoint = `${base}/v1/chat/completions`;
|
|
48
|
+
const defaultTimeout = cfg.timeoutMs ?? 2 * 60 * 60 * 1000;
|
|
49
|
+
|
|
50
|
+
async function chat(messages: ChatMessage[], opts?: any): Promise<ChatOutput> {
|
|
51
|
+
await rateLimiter.limit("llm-ask", 1);
|
|
52
|
+
Logger.debug("streaming messages out", messages);
|
|
53
|
+
|
|
54
|
+
const controller = new AbortController();
|
|
55
|
+
const userSignal: AbortSignal | undefined = opts?.signal;
|
|
56
|
+
const linkAbort = () => controller.abort();
|
|
57
|
+
if (userSignal) {
|
|
58
|
+
if (userSignal.aborted) controller.abort();
|
|
59
|
+
else userSignal.addEventListener("abort", linkAbort, { once: true });
|
|
60
|
+
}
|
|
61
|
+
const timer = setTimeout(() => controller.abort(), defaultTimeout);
|
|
62
|
+
|
|
63
|
+
const model = opts?.model ?? cfg.model;
|
|
64
|
+
const tools = Array.isArray(opts?.tools) && opts.tools.length ? opts.tools : undefined;
|
|
65
|
+
const onToken: ((t: string) => void) | undefined = opts?.onToken;
|
|
66
|
+
const onReasoningToken: ((t: string) => void) | undefined = opts?.onReasoningToken;
|
|
67
|
+
const onToolCallDelta: ((t: ChatToolCall) => void) | undefined = opts?.onToolCallDelta;
|
|
68
|
+
|
|
69
|
+
const headers: Record<string, string> = { "Content-Type": "application/json" };
|
|
70
|
+
if (cfg.apiKey) headers["Authorization"] = `Bearer ${cfg.apiKey}`;
|
|
71
|
+
|
|
72
|
+
const payload: any = { model, messages, stream: true };
|
|
73
|
+
if (tools) {
|
|
74
|
+
payload.tools = tools;
|
|
75
|
+
payload.tool_choice = "auto";
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
try {
|
|
79
|
+
let res!: Response;
|
|
80
|
+
for (let attempt = 0; ; attempt++) {
|
|
81
|
+
res = await timedFetch(endpoint, {
|
|
82
|
+
method: "POST",
|
|
83
|
+
headers,
|
|
84
|
+
body: JSON.stringify(payload),
|
|
85
|
+
signal: controller.signal,
|
|
86
|
+
where: "driver:openai:stream",
|
|
87
|
+
timeoutMs: defaultTimeout,
|
|
88
|
+
});
|
|
89
|
+
|
|
90
|
+
if (res.ok) break;
|
|
91
|
+
|
|
92
|
+
if (isRetryable(res.status) && attempt < MAX_RETRIES) {
|
|
93
|
+
const delay = retryDelay(attempt);
|
|
94
|
+
Logger.warn(`OpenAI ${res.status}, retry ${attempt + 1}/${MAX_RETRIES} in ${Math.round(delay)}ms`);
|
|
95
|
+
await sleep(delay);
|
|
96
|
+
continue;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
const text = await res.text().catch(() => "");
|
|
100
|
+
throw new Error(`OpenAI chat (stream) failed (${res.status}): ${text}`);
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
const ct = (res.headers.get("content-type") || "").toLowerCase();
|
|
104
|
+
if (!ct.includes("text/event-stream")) {
|
|
105
|
+
const data = await res.json().catch(() => ({}));
|
|
106
|
+
const choice = data?.choices?.[0];
|
|
107
|
+
const msg = choice?.message || {};
|
|
108
|
+
const content = typeof msg?.content === "string" ? msg.content : "";
|
|
109
|
+
const toolCalls: ChatToolCall[] = Array.isArray(msg?.tool_calls) ? msg.tool_calls : [];
|
|
110
|
+
if (content && onToken) onToken(content);
|
|
111
|
+
return { text: content, reasoning: msg?.reasoning || undefined, toolCalls };
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
// SSE streaming
|
|
115
|
+
const decoder = new TextDecoder("utf-8");
|
|
116
|
+
const yb = new YieldBudget(1024, 8);
|
|
117
|
+
let buf = "";
|
|
118
|
+
let fullText = "";
|
|
119
|
+
let fullReasoning = "";
|
|
120
|
+
const toolByIndex = new Map<number, ChatToolCall>();
|
|
121
|
+
|
|
122
|
+
const pumpEvent = async (rawEvent: string) => {
|
|
123
|
+
const dataLines = rawEvent
|
|
124
|
+
.split("\n")
|
|
125
|
+
.map((l) => l.trim())
|
|
126
|
+
.filter((l) => l.startsWith("data:"))
|
|
127
|
+
.map((l) => l.replace(/^data:\s?/, ""));
|
|
128
|
+
|
|
129
|
+
if (!dataLines.length) return;
|
|
130
|
+
const joined = dataLines.join("\n").trim();
|
|
131
|
+
if (!joined || joined === "[DONE]") return;
|
|
132
|
+
|
|
133
|
+
let payload: any;
|
|
134
|
+
try { payload = JSON.parse(joined); } catch { return; }
|
|
135
|
+
|
|
136
|
+
const delta = payload?.choices?.[0]?.delta;
|
|
137
|
+
if (!delta) return;
|
|
138
|
+
|
|
139
|
+
if (typeof delta.content === "string" && delta.content.length) {
|
|
140
|
+
fullText += delta.content;
|
|
141
|
+
if (onToken) {
|
|
142
|
+
let s = delta.content;
|
|
143
|
+
while (s.length) {
|
|
144
|
+
const piece = s.slice(0, 512);
|
|
145
|
+
s = s.slice(512);
|
|
146
|
+
try { onToken(piece); } catch {}
|
|
147
|
+
await yb.maybe(piece.length);
|
|
148
|
+
}
|
|
149
|
+
} else {
|
|
150
|
+
await yb.maybe(delta.content.length);
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
if (typeof delta.reasoning === "string" && delta.reasoning.length) {
|
|
155
|
+
fullReasoning += delta.reasoning;
|
|
156
|
+
if (onReasoningToken) {
|
|
157
|
+
let s = delta.reasoning;
|
|
158
|
+
while (s.length) {
|
|
159
|
+
const piece = s.slice(0, 512);
|
|
160
|
+
s = s.slice(512);
|
|
161
|
+
try { onReasoningToken(piece); } catch {}
|
|
162
|
+
await yb.maybe(piece.length);
|
|
163
|
+
}
|
|
164
|
+
} else {
|
|
165
|
+
await yb.maybe(delta.reasoning.length);
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
if (Array.isArray(delta.tool_calls)) {
|
|
170
|
+
for (const item of delta.tool_calls) {
|
|
171
|
+
const idx: number = typeof item?.index === "number" ? item.index : 0;
|
|
172
|
+
const prev = toolByIndex.get(idx) ?? {
|
|
173
|
+
id: "", type: "function", function: { name: "", arguments: "" }
|
|
174
|
+
};
|
|
175
|
+
if (typeof item.id === "string" && item.id) prev.id = item.id;
|
|
176
|
+
if (typeof item.type === "string" && item.type) (prev as any).type = item.type;
|
|
177
|
+
const f = item?.function ?? {};
|
|
178
|
+
if (typeof f.name === "string" && f.name) prev.function.name += f.name;
|
|
179
|
+
if (typeof f.arguments === "string" && f.arguments) prev.function.arguments += f.arguments;
|
|
180
|
+
toolByIndex.set(idx, prev);
|
|
181
|
+
if (onToolCallDelta) {
|
|
182
|
+
try { onToolCallDelta(prev); } catch {}
|
|
183
|
+
}
|
|
184
|
+
await yb.maybe(64);
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
};
|
|
188
|
+
|
|
189
|
+
const body: any = res.body;
|
|
190
|
+
|
|
191
|
+
if (body && typeof body.getReader === "function") {
|
|
192
|
+
const reader = body.getReader();
|
|
193
|
+
try {
|
|
194
|
+
while (true) {
|
|
195
|
+
const { value, done } = await reader.read();
|
|
196
|
+
if (done) break;
|
|
197
|
+
buf += decoder.decode(value, { stream: true });
|
|
198
|
+
await yb.maybe((value as Uint8Array)?.byteLength ?? 0);
|
|
199
|
+
let sepIdx: number;
|
|
200
|
+
while ((sepIdx = buf.indexOf("\n\n")) !== -1) {
|
|
201
|
+
const rawEvent = buf.slice(0, sepIdx).trim();
|
|
202
|
+
buf = buf.slice(sepIdx + 2);
|
|
203
|
+
if (rawEvent) await pumpEvent(rawEvent);
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
if (buf.trim()) await pumpEvent(buf.trim());
|
|
207
|
+
} finally {
|
|
208
|
+
reader.cancel().catch(() => {});
|
|
209
|
+
}
|
|
210
|
+
} else if (body && typeof body[Symbol.asyncIterator] === "function") {
|
|
211
|
+
for await (const chunk of body as AsyncIterable<Uint8Array>) {
|
|
212
|
+
buf += decoder.decode(chunk, { stream: true });
|
|
213
|
+
await yb.maybe(chunk.byteLength);
|
|
214
|
+
let sepIdx: number;
|
|
215
|
+
while ((sepIdx = buf.indexOf("\n\n")) !== -1) {
|
|
216
|
+
const rawEvent = buf.slice(0, sepIdx).trim();
|
|
217
|
+
buf = buf.slice(sepIdx + 2);
|
|
218
|
+
if (rawEvent) await pumpEvent(rawEvent);
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
if (buf.trim()) await pumpEvent(buf.trim());
|
|
222
|
+
} else {
|
|
223
|
+
const txt = await res.text();
|
|
224
|
+
for (const part of txt.split("\n\n").map((s) => s.trim())) {
|
|
225
|
+
if (part) await pumpEvent(part);
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
const toolCalls: ChatToolCall[] = Array.from(toolByIndex.entries())
|
|
230
|
+
.sort((a, b) => a[0] - b[0])
|
|
231
|
+
.map(([, v]) => v);
|
|
232
|
+
|
|
233
|
+
return { text: fullText, reasoning: fullReasoning || undefined, toolCalls };
|
|
234
|
+
} catch (e: unknown) {
|
|
235
|
+
const wrapped = asError(e);
|
|
236
|
+
if (wrapped.name === "AbortError") Logger.debug("timeout(stream)", { ms: defaultTimeout });
|
|
237
|
+
throw wrapped;
|
|
238
|
+
} finally {
|
|
239
|
+
clearTimeout(timer);
|
|
240
|
+
if (userSignal) userSignal.removeEventListener("abort", linkAbort);
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
return { chat };
|
|
245
|
+
}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
export interface ChatMessage {
|
|
2
|
+
role: string;
|
|
3
|
+
from: string;
|
|
4
|
+
content: string;
|
|
5
|
+
reasoning?: string;
|
|
6
|
+
/** When role==="tool", the id of the tool call being answered. */
|
|
7
|
+
tool_call_id?: string;
|
|
8
|
+
/** Optional: tool/function name for clarity. */
|
|
9
|
+
name?: string;
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
export interface ChatToolCall {
|
|
13
|
+
id: string;
|
|
14
|
+
type: "function" | "custom";
|
|
15
|
+
function: { name: string; arguments: string };
|
|
16
|
+
raw?: string;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export interface ChatOutput {
|
|
20
|
+
text: string;
|
|
21
|
+
toolCalls: ChatToolCall[];
|
|
22
|
+
reasoning?: string;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export interface ChatDriver {
|
|
26
|
+
chat(messages: ChatMessage[], opts?: {
|
|
27
|
+
model?: string;
|
|
28
|
+
tools?: any[];
|
|
29
|
+
onToken?: (s: string) => void;
|
|
30
|
+
onReasoningToken?: (s: string) => void;
|
|
31
|
+
onToolCallDelta?: (s: ChatToolCall) => void;
|
|
32
|
+
}): Promise<ChatOutput>;
|
|
33
|
+
}
|
package/src/errors.ts
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Structured error types for gro.
|
|
3
|
+
*
|
|
4
|
+
* All error boundaries should wrap errors using GroError instead of
|
|
5
|
+
* stringifying with e.message. This preserves stack traces, enables
|
|
6
|
+
* retry logic, and provides consistent logging fields.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
export type GroErrorKind =
|
|
10
|
+
| "provider_error"
|
|
11
|
+
| "tool_error"
|
|
12
|
+
| "config_error"
|
|
13
|
+
| "mcp_error"
|
|
14
|
+
| "timeout_error"
|
|
15
|
+
| "session_error";
|
|
16
|
+
|
|
17
|
+
export interface GroError extends Error {
|
|
18
|
+
kind: GroErrorKind;
|
|
19
|
+
provider?: string;
|
|
20
|
+
model?: string;
|
|
21
|
+
request_id?: string;
|
|
22
|
+
retryable: boolean;
|
|
23
|
+
latency_ms?: number;
|
|
24
|
+
cause?: unknown;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Create a GroError with structured fields.
|
|
29
|
+
*/
|
|
30
|
+
export function groError(
|
|
31
|
+
kind: GroErrorKind,
|
|
32
|
+
message: string,
|
|
33
|
+
opts: {
|
|
34
|
+
provider?: string;
|
|
35
|
+
model?: string;
|
|
36
|
+
request_id?: string;
|
|
37
|
+
retryable?: boolean;
|
|
38
|
+
latency_ms?: number;
|
|
39
|
+
cause?: unknown;
|
|
40
|
+
} = {},
|
|
41
|
+
): GroError {
|
|
42
|
+
const err = new Error(message) as GroError;
|
|
43
|
+
err.kind = kind;
|
|
44
|
+
err.retryable = opts.retryable ?? false;
|
|
45
|
+
if (opts.provider) err.provider = opts.provider;
|
|
46
|
+
if (opts.model) err.model = opts.model;
|
|
47
|
+
if (opts.request_id) err.request_id = opts.request_id;
|
|
48
|
+
if (opts.latency_ms !== undefined) err.latency_ms = opts.latency_ms;
|
|
49
|
+
if (opts.cause !== undefined) err.cause = opts.cause;
|
|
50
|
+
return err;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Normalize an unknown thrown value into an Error.
|
|
55
|
+
* Handles strings, objects, nulls — the full JS throw spectrum.
|
|
56
|
+
*/
|
|
57
|
+
export function asError(e: unknown): Error {
|
|
58
|
+
if (e instanceof Error) return e;
|
|
59
|
+
if (typeof e === "string") return new Error(e.slice(0, 1024));
|
|
60
|
+
if (e === null || e === undefined) return new Error("Unknown error");
|
|
61
|
+
try {
|
|
62
|
+
const s = String(e);
|
|
63
|
+
return new Error(s.length > 1024 ? s.slice(0, 1024) + "..." : s);
|
|
64
|
+
} catch {
|
|
65
|
+
return new Error("Unknown error (unstringifiable)");
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* Check if an error is a GroError with structured fields.
|
|
71
|
+
*/
|
|
72
|
+
export function isGroError(e: unknown): e is GroError {
|
|
73
|
+
return e instanceof Error && "kind" in e && "retryable" in e;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Format a GroError for structured logging.
|
|
78
|
+
* Returns a plain object suitable for JSON.stringify or structured loggers.
|
|
79
|
+
*/
|
|
80
|
+
export function errorLogFields(e: GroError): Record<string, unknown> {
|
|
81
|
+
const fields: Record<string, unknown> = {
|
|
82
|
+
kind: e.kind,
|
|
83
|
+
message: e.message,
|
|
84
|
+
retryable: e.retryable,
|
|
85
|
+
};
|
|
86
|
+
if (e.provider) fields.provider = e.provider;
|
|
87
|
+
if (e.model) fields.model = e.model;
|
|
88
|
+
if (e.request_id) fields.request_id = e.request_id;
|
|
89
|
+
if (e.latency_ms !== undefined) fields.latency_ms = e.latency_ms;
|
|
90
|
+
if (e.cause) {
|
|
91
|
+
const cause = asError(e.cause);
|
|
92
|
+
fields.cause_message = cause.message;
|
|
93
|
+
if (cause.stack) fields.cause_stack = cause.stack;
|
|
94
|
+
}
|
|
95
|
+
if (e.stack) fields.stack = e.stack;
|
|
96
|
+
return fields;
|
|
97
|
+
}
|
package/src/logger.ts
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
export const C = {
|
|
2
|
+
reset: "\x1b[0m",
|
|
3
|
+
bold: (s: string) => `\x1b[1m${s}\x1b[0m`,
|
|
4
|
+
red: (s: string) => `\x1b[31m${s}\x1b[0m`,
|
|
5
|
+
green: (s: string) => `\x1b[32m${s}\x1b[0m`,
|
|
6
|
+
yellow: (s: string) => `\x1b[33m${s}\x1b[0m`,
|
|
7
|
+
blue: (s: string) => `\x1b[34m${s}\x1b[0m`,
|
|
8
|
+
magenta: (s: string) => `\x1b[35m${s}\x1b[0m`,
|
|
9
|
+
cyan: (s: string) => `\x1b[36m${s}\x1b[0m`,
|
|
10
|
+
gray: (s: string) => `\x1b[90m${s}\x1b[0m`,
|
|
11
|
+
};
|
|
12
|
+
|
|
13
|
+
function writeRaw(s: string) {
|
|
14
|
+
const g: any = globalThis as any;
|
|
15
|
+
if (g?.Bun?.stdout?.write) { g.Bun.stdout.write(s); return; }
|
|
16
|
+
g?.process?.stdout?.write?.(s);
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export class Logger {
|
|
20
|
+
static info(...a: any[]) { console.log(...a); }
|
|
21
|
+
static warn(...a: any[]) { console.warn(...a); }
|
|
22
|
+
static error(...a: any[]) { console.error(...a); }
|
|
23
|
+
static debug(...a: any[]) {
|
|
24
|
+
if ((process.env.GRO_LOG_LEVEL ?? "").toUpperCase() === "DEBUG") console.log(...a);
|
|
25
|
+
}
|
|
26
|
+
static streamInfo(s: string) { writeRaw(s); }
|
|
27
|
+
static endStreamLine(suffix = "") { writeRaw(suffix + "\n"); }
|
|
28
|
+
}
|