wauldo 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,1212 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ AgentClient: () => AgentClient,
24
+ ConnectionError: () => ConnectionError,
25
+ Conversation: () => Conversation,
26
+ HttpClient: () => HttpClient,
27
+ MockHttpClient: () => MockHttpClient,
28
+ ServerError: () => ServerError,
29
+ TimeoutError: () => TimeoutError,
30
+ ToolNotFoundError: () => ToolNotFoundError,
31
+ ValidationError: () => ValidationError,
32
+ WauldoError: () => WauldoError
33
+ });
34
+ module.exports = __toCommonJS(index_exports);
35
+
36
+ // src/errors.ts
37
+ var WauldoError = class _WauldoError extends Error {
38
+ code;
39
+ data;
40
+ constructor(message, code, data) {
41
+ super(message);
42
+ this.name = "WauldoError";
43
+ this.code = code;
44
+ this.data = data;
45
+ Object.setPrototypeOf(this, _WauldoError.prototype);
46
+ }
47
+ toString() {
48
+ if (this.code !== void 0) {
49
+ return `[${this.code}] ${this.message}`;
50
+ }
51
+ return this.message;
52
+ }
53
+ };
54
+ var ConnectionError = class _ConnectionError extends WauldoError {
55
+ constructor(message = "Failed to connect to MCP server") {
56
+ super(message, -32e3);
57
+ this.name = "ConnectionError";
58
+ Object.setPrototypeOf(this, _ConnectionError.prototype);
59
+ }
60
+ };
61
+ var ServerError = class _ServerError extends WauldoError {
62
+ constructor(message, code, data) {
63
+ super(message, code, data);
64
+ this.name = "ServerError";
65
+ Object.setPrototypeOf(this, _ServerError.prototype);
66
+ }
67
+ };
68
+ var ValidationError = class _ValidationError extends WauldoError {
69
+ field;
70
+ constructor(message, field) {
71
+ super(message, -32602);
72
+ this.name = "ValidationError";
73
+ this.field = field;
74
+ Object.setPrototypeOf(this, _ValidationError.prototype);
75
+ }
76
+ };
77
+ var TimeoutError = class _TimeoutError extends WauldoError {
78
+ timeout;
79
+ constructor(message = "Operation timed out", timeout) {
80
+ super(message, -32001);
81
+ this.name = "TimeoutError";
82
+ this.timeout = timeout;
83
+ Object.setPrototypeOf(this, _TimeoutError.prototype);
84
+ }
85
+ };
86
+ var ToolNotFoundError = class _ToolNotFoundError extends WauldoError {
87
+ toolName;
88
+ constructor(toolName) {
89
+ super(`Tool not found: ${toolName}`, -32601);
90
+ this.name = "ToolNotFoundError";
91
+ this.toolName = toolName;
92
+ Object.setPrototypeOf(this, _ToolNotFoundError.prototype);
93
+ }
94
+ };
95
+
96
+ // src/transport.ts
97
+ var import_node_child_process = require("child_process");
98
+ var import_node_fs = require("fs");
99
+ var import_node_path = require("path");
100
+ var import_node_os = require("os");
101
+ var import_node_readline = require("readline");
102
+ var StdioTransport = class {
103
+ serverPath;
104
+ timeout;
105
+ process = null;
106
+ requestId = 0;
107
+ readline = null;
108
+ connectingPromise = null;
109
+ disconnected = false;
110
+ responseQueue = /* @__PURE__ */ new Map();
111
+ constructor(serverPath, timeout = 3e4) {
112
+ this.serverPath = serverPath ?? null;
113
+ this.timeout = timeout;
114
+ }
115
+ /**
116
+ * Find MCP server binary in common locations
117
+ */
118
+ findServer() {
119
+ const searchPaths = [
120
+ (0, import_node_path.join)(process.cwd(), "target", "release", "wauldo-mcp"),
121
+ (0, import_node_path.join)(process.cwd(), "target", "debug", "wauldo-mcp"),
122
+ (0, import_node_path.join)(process.cwd(), "..", "target", "release", "wauldo-mcp"),
123
+ (0, import_node_path.join)((0, import_node_os.homedir)(), ".cargo", "bin", "wauldo-mcp")
124
+ ];
125
+ for (const path of searchPaths) {
126
+ if ((0, import_node_fs.existsSync)(path)) {
127
+ return path;
128
+ }
129
+ }
130
+ throw new ConnectionError(
131
+ 'MCP server binary not found. Please provide serverPath or install with "cargo install".'
132
+ );
133
+ }
134
+ /**
135
+ * Get server path, finding it lazily if needed
136
+ */
137
+ getServerPath() {
138
+ if (this.serverPath === null) {
139
+ this.serverPath = this.findServer();
140
+ }
141
+ return this.serverPath;
142
+ }
143
+ /**
144
+ * Connect to MCP server
145
+ */
146
+ async connect() {
147
+ if (this.process !== null) {
148
+ return;
149
+ }
150
+ if (this.connectingPromise !== null) {
151
+ return this.connectingPromise;
152
+ }
153
+ this.connectingPromise = this.doConnect();
154
+ try {
155
+ await this.connectingPromise;
156
+ } finally {
157
+ this.connectingPromise = null;
158
+ }
159
+ }
160
+ async doConnect() {
161
+ const serverPath = this.getServerPath();
162
+ try {
163
+ this.process = (0, import_node_child_process.spawn)(serverPath, [], {
164
+ stdio: ["pipe", "pipe", "pipe"]
165
+ });
166
+ } catch (error) {
167
+ throw new ConnectionError(`Failed to start server: ${error}`);
168
+ }
169
+ if (!this.process.stdout || !this.process.stdin) {
170
+ throw new ConnectionError("Failed to get stdio handles");
171
+ }
172
+ this.readline = (0, import_node_readline.createInterface)({
173
+ input: this.process.stdout,
174
+ crlfDelay: Infinity
175
+ });
176
+ this.disconnected = false;
177
+ this.readline.on("line", (line) => {
178
+ if (!this.disconnected) this.handleResponse(line);
179
+ });
180
+ this.process.on("error", (error) => {
181
+ if (!this.disconnected) this.handleError(new ConnectionError(`Server error: ${error.message}`));
182
+ });
183
+ this.process.on("close", (code) => {
184
+ if (!this.disconnected && code !== 0) {
185
+ this.handleError(new ConnectionError(`Server exited with code ${code}`));
186
+ }
187
+ });
188
+ try {
189
+ await this.initialize();
190
+ } catch (err) {
191
+ this.disconnect();
192
+ throw err;
193
+ }
194
+ }
195
+ /**
196
+ * Disconnect from MCP server
197
+ */
198
+ disconnect() {
199
+ this.disconnected = true;
200
+ if (this.readline) {
201
+ this.readline.close();
202
+ this.readline = null;
203
+ }
204
+ if (this.process) {
205
+ this.process.kill();
206
+ this.process = null;
207
+ }
208
+ for (const [, pending] of this.responseQueue) {
209
+ clearTimeout(pending.timer);
210
+ pending.reject(new ConnectionError("Connection closed"));
211
+ }
212
+ this.responseQueue.clear();
213
+ }
214
+ /**
215
+ * Handle incoming response
216
+ */
217
+ handleResponse(line) {
218
+ try {
219
+ const response = JSON.parse(line);
220
+ const pending = this.responseQueue.get(response.id);
221
+ if (pending) {
222
+ clearTimeout(pending.timer);
223
+ this.responseQueue.delete(response.id);
224
+ if (response.error) {
225
+ pending.reject(
226
+ new ServerError(
227
+ response.error.message,
228
+ response.error.code,
229
+ response.error.data
230
+ )
231
+ );
232
+ } else {
233
+ pending.resolve(response.result);
234
+ }
235
+ }
236
+ } catch {
237
+ }
238
+ }
239
+ /**
240
+ * Handle transport error
241
+ */
242
+ handleError(error) {
243
+ for (const [, pending] of this.responseQueue) {
244
+ clearTimeout(pending.timer);
245
+ pending.reject(error);
246
+ }
247
+ this.responseQueue.clear();
248
+ }
249
+ /**
250
+ * Send MCP initialize request
251
+ */
252
+ async initialize() {
253
+ await this.request("initialize", {
254
+ protocolVersion: "2024-11-05",
255
+ capabilities: {},
256
+ clientInfo: { name: "wauldo-typescript", version: "0.1.0" }
257
+ });
258
+ }
259
+ /**
260
+ * Send JSON-RPC request and wait for response
261
+ */
262
+ async request(method, params, timeout) {
263
+ if (!this.process || !this.process.stdin) {
264
+ throw new ConnectionError("Not connected. Call connect() first.");
265
+ }
266
+ this.requestId++;
267
+ const id = this.requestId;
268
+ const request = {
269
+ jsonrpc: "2.0",
270
+ id,
271
+ method
272
+ };
273
+ if (params) {
274
+ request.params = params;
275
+ }
276
+ const requestData = JSON.stringify(request) + "\n";
277
+ return new Promise((resolve, reject) => {
278
+ const timeoutMs = timeout ?? this.timeout;
279
+ let settled = false;
280
+ const safeReject = (err) => {
281
+ if (settled) return;
282
+ settled = true;
283
+ clearTimeout(timer);
284
+ this.responseQueue.delete(id);
285
+ reject(err);
286
+ };
287
+ const timer = setTimeout(() => {
288
+ safeReject(new TimeoutError(`Request timed out after ${timeoutMs}ms`, timeoutMs));
289
+ }, timeoutMs);
290
+ this.responseQueue.set(id, {
291
+ resolve: (value) => {
292
+ if (!settled) {
293
+ settled = true;
294
+ clearTimeout(timer);
295
+ resolve(value);
296
+ }
297
+ },
298
+ reject: safeReject,
299
+ timer
300
+ });
301
+ this.process.stdin.write(requestData, (error) => {
302
+ if (error) {
303
+ safeReject(new ConnectionError(`Failed to send request: ${error.message}`));
304
+ }
305
+ });
306
+ });
307
+ }
308
+ };
309
+
310
+ // src/client.ts
311
+ function parseChunkList(raw, primaryKey, fallbackKey) {
312
+ try {
313
+ const data = JSON.parse(raw);
314
+ const items = data?.[primaryKey] ?? data?.[fallbackKey] ?? [];
315
+ if (!Array.isArray(items)) return [];
316
+ return items.filter((c) => typeof c === "object" && c !== null).map((c, i) => ({
317
+ id: String(c.id ?? ""),
318
+ content: String(c.content ?? ""),
319
+ position: Number(c.position ?? i),
320
+ priority: String(c.priority ?? "medium")
321
+ }));
322
+ } catch {
323
+ return [];
324
+ }
325
+ }
326
+ function parseChunks(raw) {
327
+ return parseChunkList(raw, "chunks", "results");
328
+ }
329
+ function parseRetrievalResults(raw) {
330
+ return parseChunkList(raw, "results", "chunks");
331
+ }
332
+ var AgentClient = class {
333
+ transport;
334
+ autoConnect;
335
+ connected = false;
336
+ constructor(options = {}) {
337
+ this.transport = new StdioTransport(
338
+ options.serverPath,
339
+ options.timeout ?? 3e4
340
+ );
341
+ this.autoConnect = options.autoConnect ?? true;
342
+ }
343
+ /**
344
+ * Connect to MCP server
345
+ */
346
+ async connect() {
347
+ await this.transport.connect();
348
+ this.connected = true;
349
+ return this;
350
+ }
351
+ /**
352
+ * Disconnect from MCP server
353
+ */
354
+ disconnect() {
355
+ this.transport.disconnect();
356
+ this.connected = false;
357
+ }
358
+ /**
359
+ * Ensure client is connected
360
+ */
361
+ async ensureConnected() {
362
+ if (!this.connected) {
363
+ if (this.autoConnect) {
364
+ await this.connect();
365
+ } else {
366
+ throw new ConnectionError("Not connected. Call connect() first.");
367
+ }
368
+ }
369
+ }
370
+ // Tool discovery
371
+ /**
372
+ * List all available tools
373
+ */
374
+ async listTools() {
375
+ await this.ensureConnected();
376
+ const result = await this.transport.request("tools/list");
377
+ return result.tools ?? [];
378
+ }
379
+ /**
380
+ * Call a tool by name
381
+ */
382
+ async callTool(name, args) {
383
+ await this.ensureConnected();
384
+ const result = await this.transport.request("tools/call", {
385
+ name,
386
+ arguments: args
387
+ });
388
+ const content = result.content;
389
+ if (content && content.length > 0 && content[0]) {
390
+ return content[0].text ?? "";
391
+ }
392
+ return "";
393
+ }
394
+ // Reasoning
395
+ /**
396
+ * Perform Tree-of-Thought reasoning on a problem
397
+ *
398
+ * @example
399
+ * ```typescript
400
+ * const result = await client.reason(
401
+ * "What's the best sorting algorithm for nearly sorted data?",
402
+ * { depth: 4, branches: 3 }
403
+ * );
404
+ * console.log(result.solution);
405
+ * ```
406
+ */
407
+ async reason(problem, options = {}) {
408
+ const { depth = 3, branches = 3 } = options;
409
+ if (!problem.trim()) {
410
+ throw new ValidationError("Problem cannot be empty", "problem");
411
+ }
412
+ if (depth < 1 || depth > 10) {
413
+ throw new ValidationError("Depth must be between 1 and 10", "depth");
414
+ }
415
+ if (branches < 1 || branches > 10) {
416
+ throw new ValidationError("Branches must be between 1 and 10", "branches");
417
+ }
418
+ const content = await this.callTool("reason_tree_of_thought", {
419
+ problem,
420
+ depth,
421
+ branches
422
+ });
423
+ return this.parseReasoningResult(content, problem, depth, branches);
424
+ }
425
+ parseReasoningResult(content, problem, depth, branches) {
426
+ try {
427
+ const data = JSON.parse(content);
428
+ if (data.solution !== void 0) {
429
+ return {
430
+ problem: data.problem ?? problem,
431
+ solution: data.solution,
432
+ thoughtTree: data.thought_tree ?? content,
433
+ depth: data.depth ?? depth,
434
+ branches: data.branches ?? branches,
435
+ rawContent: content
436
+ };
437
+ }
438
+ } catch {
439
+ }
440
+ const lines = content.split("\n");
441
+ let solution = "";
442
+ let inSolution = false;
443
+ for (const line of lines) {
444
+ if (line.includes("Solution:") || line.includes("Best path:")) {
445
+ inSolution = true;
446
+ continue;
447
+ }
448
+ if (inSolution && line.trim()) {
449
+ solution = line.trim();
450
+ break;
451
+ }
452
+ }
453
+ return {
454
+ problem,
455
+ solution: solution || "See thought tree for analysis",
456
+ thoughtTree: content,
457
+ depth,
458
+ branches,
459
+ rawContent: content
460
+ };
461
+ }
462
+ // Concept extraction
463
+ /**
464
+ * Extract concepts from text or code
465
+ *
466
+ * @example
467
+ * ```typescript
468
+ * const result = await client.extractConcepts(code, 'code');
469
+ * for (const concept of result.concepts) {
470
+ * console.log(`${concept.name}: ${concept.weight}`);
471
+ * }
472
+ * ```
473
+ */
474
+ async extractConcepts(text, sourceType = "text") {
475
+ if (!text.trim()) {
476
+ throw new ValidationError("Text cannot be empty", "text");
477
+ }
478
+ const content = await this.callTool("extract_concepts", {
479
+ text,
480
+ source_type: sourceType
481
+ });
482
+ return this.parseConceptResult(content, sourceType);
483
+ }
484
+ parseConceptResult(content, sourceType) {
485
+ try {
486
+ const data = JSON.parse(content);
487
+ if (Array.isArray(data.concepts)) {
488
+ return {
489
+ concepts: data.concepts.map((c) => ({
490
+ name: String(c.name ?? ""),
491
+ conceptType: String(c.concept_type ?? "Entity"),
492
+ weight: Number(c.weight ?? 0.8)
493
+ })),
494
+ sourceType: ["text", "code"].includes(String(data.source_type)) ? String(data.source_type) : sourceType,
495
+ rawContent: content
496
+ };
497
+ }
498
+ } catch {
499
+ }
500
+ const concepts = [];
501
+ const lines = content.split("\n");
502
+ for (const line of lines) {
503
+ if (line.trim().startsWith("- ")) {
504
+ const name = line.trim().slice(2).split(":")[0]?.trim();
505
+ if (name) {
506
+ concepts.push({
507
+ name,
508
+ conceptType: "Entity",
509
+ weight: 0.8
510
+ });
511
+ }
512
+ }
513
+ }
514
+ return {
515
+ concepts,
516
+ sourceType,
517
+ rawContent: content
518
+ };
519
+ }
520
+ // Long context management
521
+ /**
522
+ * Split a document into manageable chunks
523
+ */
524
+ async chunkDocument(content, chunkSize = 512) {
525
+ if (!content.trim()) {
526
+ throw new ValidationError("Content cannot be empty", "content");
527
+ }
528
+ const result = await this.callTool("manage_long_context", {
529
+ operation: "chunk",
530
+ content,
531
+ chunk_size: chunkSize
532
+ });
533
+ const chunks = parseChunks(result);
534
+ return {
535
+ chunks,
536
+ totalChunks: chunks.length,
537
+ rawContent: result
538
+ };
539
+ }
540
+ /**
541
+ * Retrieve relevant context for a query
542
+ */
543
+ async retrieveContext(query, topK = 5) {
544
+ if (!query.trim()) {
545
+ throw new ValidationError("Query cannot be empty", "query");
546
+ }
547
+ const result = await this.callTool("manage_long_context", {
548
+ operation: "retrieve",
549
+ query,
550
+ top_k: topK
551
+ });
552
+ return {
553
+ query,
554
+ results: parseRetrievalResults(result),
555
+ rawContent: result
556
+ };
557
+ }
558
+ /**
559
+ * Summarize document content
560
+ */
561
+ async summarize(content) {
562
+ if (!content.trim()) {
563
+ throw new ValidationError("Content cannot be empty", "content");
564
+ }
565
+ return this.callTool("manage_long_context", {
566
+ operation: "summarize",
567
+ content
568
+ });
569
+ }
570
+ // Knowledge graph
571
+ /**
572
+ * Search the knowledge graph
573
+ */
574
+ async searchKnowledge(query, limit = 10) {
575
+ if (!query.trim()) {
576
+ throw new ValidationError("Query cannot be empty", "query");
577
+ }
578
+ const result = await this.callTool("query_knowledge_graph", {
579
+ operation: "search",
580
+ query,
581
+ limit
582
+ });
583
+ return {
584
+ operation: "search",
585
+ nodes: [],
586
+ rawContent: result
587
+ };
588
+ }
589
+ /**
590
+ * Add concepts from text to knowledge graph
591
+ */
592
+ async addToKnowledge(text) {
593
+ if (!text.trim()) {
594
+ throw new ValidationError("Text cannot be empty", "text");
595
+ }
596
+ const result = await this.callTool("query_knowledge_graph", {
597
+ operation: "add",
598
+ text
599
+ });
600
+ return {
601
+ operation: "add",
602
+ nodes: [],
603
+ rawContent: result
604
+ };
605
+ }
606
+ /**
607
+ * Get knowledge graph statistics
608
+ */
609
+ async knowledgeStats() {
610
+ const result = await this.callTool("query_knowledge_graph", {
611
+ operation: "stats"
612
+ });
613
+ return {
614
+ operation: "stats",
615
+ nodes: [],
616
+ rawContent: result
617
+ };
618
+ }
619
+ // Task planning
620
+ /**
621
+ * Break down a task into actionable steps
622
+ *
623
+ * @example
624
+ * ```typescript
625
+ * const plan = await client.planTask(
626
+ * "Implement user authentication",
627
+ * { context: "Using JWT tokens", detailLevel: "detailed" }
628
+ * );
629
+ * for (const step of plan.steps) {
630
+ * console.log(`${step.number}. ${step.title}`);
631
+ * }
632
+ * ```
633
+ */
634
+ async planTask(task, options = {}) {
635
+ const {
636
+ context = "",
637
+ maxSteps = 10,
638
+ detailLevel = "normal"
639
+ } = options;
640
+ if (!task.trim()) {
641
+ throw new ValidationError("Task cannot be empty", "task");
642
+ }
643
+ if (maxSteps < 1 || maxSteps > 20) {
644
+ throw new ValidationError("maxSteps must be between 1 and 20", "maxSteps");
645
+ }
646
+ const content = await this.callTool("plan_task", {
647
+ task,
648
+ context,
649
+ max_steps: maxSteps,
650
+ detail_level: detailLevel
651
+ });
652
+ return this.parsePlanResult(content, task);
653
+ }
654
+ parsePlanResult(content, task) {
655
+ try {
656
+ const data = JSON.parse(content);
657
+ if (Array.isArray(data.steps)) {
658
+ return {
659
+ task: data.task ?? task,
660
+ category: data.category ?? "General",
661
+ steps: data.steps.map((s, i) => ({
662
+ number: Number(s.number ?? i + 1),
663
+ title: String(s.title ?? ""),
664
+ description: String(s.description ?? ""),
665
+ priority: String(s.priority ?? "Medium"),
666
+ effort: String(s.effort ?? ""),
667
+ dependencies: Array.isArray(s.dependencies) ? s.dependencies.map(String) : []
668
+ })),
669
+ totalEffort: String(data.total_effort ?? ""),
670
+ rawContent: content
671
+ };
672
+ }
673
+ } catch {
674
+ }
675
+ const steps = [];
676
+ let category = "General";
677
+ let totalEffort = "";
678
+ let currentStep = 0;
679
+ const stepPattern = /^(\d+)\.\s+(.+)$/;
680
+ const lines = content.split("\n");
681
+ for (const line of lines) {
682
+ const trimmed = line.trim();
683
+ if (trimmed.startsWith("**Category**:")) {
684
+ category = trimmed.slice("**Category**:".length).trim() || "General";
685
+ continue;
686
+ }
687
+ const match = stepPattern.exec(trimmed);
688
+ if (match) {
689
+ currentStep++;
690
+ const title = match[2]?.trim() ?? "";
691
+ if (title) {
692
+ steps.push({
693
+ number: currentStep,
694
+ title,
695
+ description: "",
696
+ priority: "Medium",
697
+ effort: "",
698
+ dependencies: []
699
+ });
700
+ }
701
+ continue;
702
+ }
703
+ if (trimmed.startsWith("**Estimated total effort**:")) {
704
+ totalEffort = trimmed.slice("**Estimated total effort**:".length).trim();
705
+ }
706
+ }
707
+ return {
708
+ task,
709
+ category,
710
+ steps,
711
+ totalEffort,
712
+ rawContent: content
713
+ };
714
+ }
715
+ };
716
+
717
+ // src/conversation.ts
718
+ var Conversation = class {
719
+ client;
720
+ history = [];
721
+ model;
722
+ constructor(client, options) {
723
+ this.client = client;
724
+ this.model = options?.model ?? "default";
725
+ if (options?.system) {
726
+ this.history.push({ role: "system", content: options.system });
727
+ }
728
+ }
729
+ /**
730
+ * Send a user message and get the assistant reply.
731
+ * Both the user message and the assistant reply are appended to history.
732
+ *
733
+ * @param message - The user message to send
734
+ * @returns The assistant's reply content string
735
+ *
736
+ * @example
737
+ * ```typescript
738
+ * const conv = client.conversation({ system: 'You are helpful' });
739
+ * const reply = await conv.say('What is TypeScript?');
740
+ * const followUp = await conv.say('Show me an example'); // includes prior context
741
+ * ```
742
+ */
743
+ async say(message) {
744
+ this.history.push({ role: "user", content: message });
745
+ let response;
746
+ try {
747
+ response = await this.client.chat({
748
+ model: this.model,
749
+ messages: [...this.history]
750
+ });
751
+ } catch (err) {
752
+ this.history.pop();
753
+ throw err;
754
+ }
755
+ const reply = response.choices[0]?.message?.content ?? "";
756
+ this.history.push({ role: "assistant", content: reply });
757
+ return reply;
758
+ }
759
+ /**
760
+ * Return a copy of the full conversation history.
761
+ *
762
+ * @returns An array of ChatMessage objects (system, user, assistant turns)
763
+ *
764
+ * @example
765
+ * ```typescript
766
+ * const history = conv.getHistory();
767
+ * console.log(`${history.length} messages in conversation`);
768
+ * ```
769
+ */
770
+ getHistory() {
771
+ return [...this.history];
772
+ }
773
+ /**
774
+ * Clear user and assistant messages, preserving the system prompt (if any).
775
+ *
776
+ * @example
777
+ * ```typescript
778
+ * conv.clear();
779
+ * // System prompt is preserved; user/assistant messages are removed.
780
+ * ```
781
+ */
782
+ clear() {
783
+ const systemMsg = this.history.find((m) => m.role === "system");
784
+ this.history = systemMsg ? [systemMsg] : [];
785
+ }
786
+ };
787
+
788
+ // src/retry_fetch.ts
789
+ var RETRYABLE_STATUSES = /* @__PURE__ */ new Set([429, 500, 502, 503, 504]);
790
+ async function fetchWithRetry(config, method, path, body, overrideTimeoutMs) {
791
+ const effectiveTimeout = overrideTimeoutMs ?? config.timeoutMs;
792
+ let lastError;
793
+ for (let attempt = 0; attempt <= config.maxRetries; attempt++) {
794
+ try {
795
+ config.onLog?.("debug", `${method} ${path} (attempt ${attempt + 1})`);
796
+ config.onRequest?.(method, path);
797
+ const start = Date.now();
798
+ const options = {
799
+ method,
800
+ headers: config.headers,
801
+ signal: AbortSignal.timeout(effectiveTimeout)
802
+ };
803
+ if (body !== void 0 && method !== "GET" && method !== "HEAD") {
804
+ options.body = JSON.stringify(body);
805
+ }
806
+ const resp = await fetch(`${config.baseUrl}${path}`, options);
807
+ const durationMs = Date.now() - start;
808
+ if (resp.ok) {
809
+ config.onLog?.("debug", `${method} ${path} -> ${resp.status}`);
810
+ config.onResponse?.(resp.status, durationMs);
811
+ return resp.json();
812
+ }
813
+ config.onResponse?.(resp.status, durationMs);
814
+ if (RETRYABLE_STATUSES.has(resp.status) && attempt < config.maxRetries) {
815
+ const waitMs = computeBackoff(config.retryBackoffMs, attempt, resp);
816
+ config.onLog?.("warn", `${method} ${path} -> ${resp.status}, retrying in ${waitMs}ms`);
817
+ await sleep(waitMs);
818
+ lastError = new Error(`HTTP ${resp.status}: ${await resp.text()}`);
819
+ continue;
820
+ }
821
+ const text = await resp.text();
822
+ config.onLog?.("error", `${method} ${path} -> ${resp.status}: ${text}`);
823
+ const err = new Error(`HTTP ${resp.status}: ${text}`);
824
+ config.onError?.(err);
825
+ throw err;
826
+ } catch (err) {
827
+ if (err instanceof TypeError && attempt < config.maxRetries) {
828
+ const waitMs = config.retryBackoffMs * Math.pow(2, attempt);
829
+ config.onLog?.("warn", `${method} ${path} network error, retrying in ${waitMs}ms`);
830
+ await sleep(waitMs);
831
+ lastError = err;
832
+ continue;
833
+ }
834
+ if (err instanceof Error) {
835
+ config.onError?.(err);
836
+ }
837
+ throw err;
838
+ }
839
+ }
840
+ const finalErr = lastError ?? new Error("Request failed after retries");
841
+ config.onError?.(finalErr);
842
+ throw finalErr;
843
+ }
844
+ function computeBackoff(retryBackoffMs, attempt, resp) {
845
+ const retryAfter = resp.headers.get("Retry-After");
846
+ if (retryAfter) {
847
+ const seconds = Number(retryAfter);
848
+ if (!Number.isNaN(seconds) && seconds > 0) {
849
+ return seconds * 1e3;
850
+ }
851
+ }
852
+ return retryBackoffMs * Math.pow(2, attempt);
853
+ }
854
+ function sleep(ms) {
855
+ return new Promise((resolve) => setTimeout(resolve, ms));
856
+ }
857
+
858
+ // src/sse_parser.ts
859
+ async function* parseSSEStream(body) {
860
+ const reader = body.getReader();
861
+ const decoder = new TextDecoder();
862
+ let buffer = "";
863
+ try {
864
+ while (true) {
865
+ const { done, value } = await reader.read();
866
+ if (done) {
867
+ const remaining = decoder.decode();
868
+ if (remaining) buffer += remaining;
869
+ break;
870
+ }
871
+ buffer += decoder.decode(value, { stream: true });
872
+ const lines = buffer.split("\n");
873
+ buffer = lines.pop() ?? "";
874
+ for (const line of lines) {
875
+ const trimmed = line.trim();
876
+ if (!trimmed.startsWith("data: ")) continue;
877
+ const payload = trimmed.slice(6);
878
+ if (payload === "[DONE]") return;
879
+ try {
880
+ const chunk = JSON.parse(payload);
881
+ const choices = chunk["choices"];
882
+ const delta = choices?.[0]?.["delta"];
883
+ const content = delta?.["content"];
884
+ if (typeof content === "string") yield content;
885
+ } catch (e) {
886
+ console.warn("[wauldo] Malformed SSE chunk skipped:", String(e).slice(0, 100));
887
+ }
888
+ }
889
+ }
890
+ } finally {
891
+ reader.releaseLock();
892
+ }
893
+ }
894
+
895
+ // src/http_client.ts
896
+ function validateResponse(data, typeName) {
897
+ if (data === null || data === void 0) {
898
+ throw new ServerError(`Invalid ${typeName}: response is null`, 0);
899
+ }
900
+ return data;
901
+ }
902
+ var HttpClient = class {
903
+ retryConfig;
904
+ constructor(config = {}) {
905
+ const baseUrl = (config.baseUrl ?? "http://localhost:3000").replace(/\/$/, "");
906
+ const headers = { "Content-Type": "application/json" };
907
+ if (config.apiKey) {
908
+ headers["Authorization"] = `Bearer ${config.apiKey}`;
909
+ }
910
+ if (config.headers) {
911
+ Object.assign(headers, config.headers);
912
+ }
913
+ this.retryConfig = {
914
+ baseUrl,
915
+ headers,
916
+ timeoutMs: config.timeoutMs ?? 12e4,
917
+ maxRetries: config.maxRetries ?? 3,
918
+ retryBackoffMs: config.retryBackoffMs ?? 1e3,
919
+ onLog: config.onLog,
920
+ onRequest: config.onRequest,
921
+ onResponse: config.onResponse,
922
+ onError: config.onError
923
+ };
924
+ }
925
+ // ── OpenAI-compatible endpoints ──────────────────────────────────────
926
+ /** GET /v1/models — List available LLM models */
927
+ async listModels() {
928
+ const data = await fetchWithRetry(this.retryConfig, "GET", "/v1/models");
929
+ return validateResponse(data, "ModelList");
930
+ }
931
+ /**
932
+ * POST /v1/chat/completions — Chat completion (non-streaming).
933
+ *
934
+ * @param request - The chat request (model, messages, temperature, etc.)
935
+ * @param options - Optional per-request overrides (e.g. timeoutMs)
936
+ * @returns The full chat completion response
937
+ *
938
+ * @example
939
+ * ```typescript
940
+ * const resp = await client.chat({
941
+ * model: 'qwen2.5:7b',
942
+ * messages: [{ role: 'user', content: 'Hello' }],
943
+ * });
944
+ * console.log(resp.choices[0]?.message?.content);
945
+ * ```
946
+ */
947
+ async chat(request, options) {
948
+ const data = await fetchWithRetry(
949
+ this.retryConfig,
950
+ "POST",
951
+ "/v1/chat/completions",
952
+ { ...request, stream: false },
953
+ options?.timeoutMs
954
+ );
955
+ return validateResponse(data, "ChatResponse");
956
+ }
957
+ /** Convenience: single message chat, returns content string */
958
+ async chatSimple(model, message) {
959
+ const resp = await this.chat({
960
+ model,
961
+ messages: [{ role: "user", content: message }]
962
+ });
963
+ return resp.choices[0]?.message?.content ?? "";
964
+ }
965
+ /** POST /v1/chat/completions — SSE streaming, yields content chunks */
966
+ async *chatStream(request, options) {
967
+ const cfg = this.retryConfig;
968
+ const effectiveTimeout = options?.timeoutMs ?? cfg.timeoutMs;
969
+ cfg.onRequest?.("POST", "/v1/chat/completions");
970
+ const start = Date.now();
971
+ let resp;
972
+ try {
973
+ resp = await fetch(`${cfg.baseUrl}/v1/chat/completions`, {
974
+ method: "POST",
975
+ headers: { ...cfg.headers },
976
+ body: JSON.stringify({ ...request, stream: true }),
977
+ signal: AbortSignal.timeout(effectiveTimeout)
978
+ });
979
+ } catch (err) {
980
+ if (err instanceof Error) cfg.onError?.(err);
981
+ throw err;
982
+ }
983
+ if (!resp.ok) {
984
+ const body = await resp.text();
985
+ const err = new ServerError(`HTTP ${resp.status}: ${body}`, resp.status);
986
+ cfg.onError?.(err);
987
+ throw err;
988
+ }
989
+ cfg.onResponse?.(resp.status, Date.now() - start);
990
+ if (!resp.body) throw new ServerError("No response body for streaming", 0);
991
+ yield* parseSSEStream(resp.body);
992
+ }
993
+ /** POST /v1/embeddings — Generate text embeddings */
994
+ async embeddings(input, model) {
995
+ const data = await fetchWithRetry(
996
+ this.retryConfig,
997
+ "POST",
998
+ "/v1/embeddings",
999
+ { input, model }
1000
+ );
1001
+ return validateResponse(data, "EmbeddingResponse");
1002
+ }
1003
+ // ── RAG endpoints ────────────────────────────────────────────────────
1004
+ /**
1005
+ * POST /v1/upload — Upload document for RAG indexing.
1006
+ *
1007
+ * @param content - The document text to index
1008
+ * @param filename - Optional filename for the document
1009
+ * @param options - Optional per-request overrides (e.g. timeoutMs)
1010
+ * @returns Upload confirmation with document_id and chunks_count
1011
+ */
1012
+ async ragUpload(content, filename, options) {
1013
+ const body = { content };
1014
+ if (filename) body["filename"] = filename;
1015
+ const data = await fetchWithRetry(
1016
+ this.retryConfig,
1017
+ "POST",
1018
+ "/v1/upload",
1019
+ body,
1020
+ options?.timeoutMs
1021
+ );
1022
+ return validateResponse(data, "RagUploadResponse");
1023
+ }
1024
+ /** POST /v1/query — Query RAG knowledge base */
1025
+ async ragQuery(query, topK = 5, options) {
1026
+ const body = { query, top_k: topK };
1027
+ if (options?.debug) body.debug = true;
1028
+ if (options?.qualityMode) body.quality_mode = options.qualityMode;
1029
+ const data = await fetchWithRetry(
1030
+ this.retryConfig,
1031
+ "POST",
1032
+ "/v1/query",
1033
+ body
1034
+ );
1035
+ return validateResponse(data, "RagQueryResponse");
1036
+ }
1037
+ // ── Conversation & RAG helpers ────────────────────────────────────────
1038
+ /**
1039
+ * Create a stateful conversation that tracks message history automatically.
1040
+ *
1041
+ * @param options - Optional system prompt and model name
1042
+ * @returns A Conversation instance bound to this client
1043
+ *
1044
+ * @example
1045
+ * ```typescript
1046
+ * const conv = client.conversation({ system: 'You are a TypeScript expert' });
1047
+ * const reply = await conv.say('What are generics?');
1048
+ * ```
1049
+ */
1050
+ conversation(options) {
1051
+ return new Conversation(this, options);
1052
+ }
1053
+ /**
1054
+ * Upload text to RAG, then query it — one-shot Q&A over a document.
1055
+ *
1056
+ * @param question - The question to ask about the document
1057
+ * @param text - The document text to index and query
1058
+ * @param source - Optional source name (defaults to 'document')
1059
+ * @returns The answer string
1060
+ */
1061
+ async ragAsk(question, text, source = "document") {
1062
+ await this.ragUpload(text, source);
1063
+ const result = await this.ragQuery(question, 3);
1064
+ return result.answer ?? JSON.stringify(result.sources);
1065
+ }
1066
+ // ── Orchestrator endpoints ───────────────────────────────────────────
1067
+ /** POST /v1/orchestrator/execute — Route to best specialist agent */
1068
+ async orchestrate(prompt) {
1069
+ const data = await fetchWithRetry(
1070
+ this.retryConfig,
1071
+ "POST",
1072
+ "/v1/orchestrator/execute",
1073
+ { prompt }
1074
+ );
1075
+ return validateResponse(data, "OrchestratorResponse");
1076
+ }
1077
+ /** POST /v1/orchestrator/parallel — Run all 4 specialists in parallel */
1078
+ async orchestrateParallel(prompt) {
1079
+ const data = await fetchWithRetry(
1080
+ this.retryConfig,
1081
+ "POST",
1082
+ "/v1/orchestrator/parallel",
1083
+ { prompt }
1084
+ );
1085
+ return validateResponse(data, "OrchestratorResponse");
1086
+ }
1087
+ };
1088
+
1089
+ // src/mock_client.ts
1090
+ var DEFAULT_CHAT = {
1091
+ id: "mock-1",
1092
+ object: "chat.completion",
1093
+ created: 0,
1094
+ model: "mock-model",
1095
+ choices: [{ index: 0, message: { role: "assistant", content: "Mock reply" }, finish_reason: "stop" }],
1096
+ usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }
1097
+ };
1098
+ var DEFAULT_MODELS = {
1099
+ object: "list",
1100
+ data: [{ id: "mock-model", object: "model", created: 0, owned_by: "mock" }]
1101
+ };
1102
+ var MockHttpClient = class {
1103
+ chatResponse = DEFAULT_CHAT;
1104
+ modelList = DEFAULT_MODELS;
1105
+ calls = [];
1106
+ /**
1107
+ * Configure the response returned by `chat()` and `chatSimple()`.
1108
+ *
1109
+ * @param response - The ChatResponse to return on subsequent chat calls
1110
+ * @returns `this` for method chaining
1111
+ *
1112
+ * @example
1113
+ * ```typescript
1114
+ * const mock = new MockHttpClient().withChatResponse({
1115
+ * id: 'test-1', object: 'chat.completion', created: 0, model: 'test',
1116
+ * choices: [{ index: 0, message: { role: 'assistant', content: 'Hi' }, finish_reason: 'stop' }],
1117
+ * usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 },
1118
+ * });
1119
+ * ```
1120
+ */
1121
+ withChatResponse(response) {
1122
+ this.chatResponse = response;
1123
+ return this;
1124
+ }
1125
+ /**
1126
+ * Configure the model list returned by `listModels()`.
1127
+ *
1128
+ * @param models - Array of ModelInfo objects
1129
+ * @returns `this` for method chaining
1130
+ *
1131
+ * @example
1132
+ * ```typescript
1133
+ * const mock = new MockHttpClient().withModels([
1134
+ * { id: 'gpt-4', object: 'model', created: 0, owned_by: 'openai' },
1135
+ * ]);
1136
+ * ```
1137
+ */
1138
+ withModels(models) {
1139
+ this.modelList = { object: "list", data: models };
1140
+ return this;
1141
+ }
1142
+ async listModels() {
1143
+ this.record("listModels");
1144
+ return this.modelList;
1145
+ }
1146
+ async chat(request, _options) {
1147
+ this.record("chat", request);
1148
+ return this.chatResponse;
1149
+ }
1150
+ async chatSimple(model, message) {
1151
+ this.record("chatSimple", model, message);
1152
+ return this.chatResponse.choices[0]?.message?.content ?? "";
1153
+ }
1154
+ async *chatStream(_request, _options) {
1155
+ this.record("chatStream", _request);
1156
+ const content = this.chatResponse.choices[0]?.message?.content ?? "";
1157
+ for (const word of content.split(" ")) {
1158
+ yield word + " ";
1159
+ }
1160
+ }
1161
+ async embeddings(input, model) {
1162
+ this.record("embeddings", input, model);
1163
+ const items = Array.isArray(input) ? input : [input];
1164
+ return {
1165
+ data: items.map((_, i) => ({ embedding: [0.1, 0.2, 0.3], index: i })),
1166
+ model,
1167
+ usage: { prompt_tokens: 5, total_tokens: 5 }
1168
+ };
1169
+ }
1170
+ async ragUpload(content, filename, _options) {
1171
+ this.record("ragUpload", content, filename);
1172
+ return { document_id: "mock-doc-1", chunks_count: 1 };
1173
+ }
1174
+ async ragQuery(query, topK = 5, options) {
1175
+ this.record("ragQuery", query, topK, options);
1176
+ return { answer: `Mock answer for: ${query}`, sources: [] };
1177
+ }
1178
+ async orchestrate(prompt) {
1179
+ this.record("orchestrate", prompt);
1180
+ return { final_output: `Mock orchestration: ${prompt}` };
1181
+ }
1182
+ async orchestrateParallel(prompt) {
1183
+ this.record("orchestrateParallel", prompt);
1184
+ return { final_output: `Mock parallel: ${prompt}` };
1185
+ }
1186
+ conversation(options) {
1187
+ this.record("conversation", options);
1188
+ return new Conversation(this, options);
1189
+ }
1190
+ async ragAsk(question, text, source = "document") {
1191
+ this.record("ragAsk", question, text, source);
1192
+ await this.ragUpload(text, source);
1193
+ const result = await this.ragQuery(question, 3);
1194
+ return result.answer;
1195
+ }
1196
+ record(method, ...args) {
1197
+ this.calls.push({ method, args });
1198
+ }
1199
+ };
1200
+ // Annotate the CommonJS export names for ESM import in node:
1201
+ 0 && (module.exports = {
1202
+ AgentClient,
1203
+ ConnectionError,
1204
+ Conversation,
1205
+ HttpClient,
1206
+ MockHttpClient,
1207
+ ServerError,
1208
+ TimeoutError,
1209
+ ToolNotFoundError,
1210
+ ValidationError,
1211
+ WauldoError
1212
+ });