wauldo 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,590 @@
1
+ /**
2
+ * Type definitions for Wauldo SDK
3
+ */
4
+ interface ClientOptions {
5
+ /** Path to MCP server binary */
6
+ serverPath?: string;
7
+ /** Default timeout in milliseconds */
8
+ timeout?: number;
9
+ /** Automatically connect on first operation */
10
+ autoConnect?: boolean;
11
+ }
12
+ interface ReasoningOptions {
13
+ /** Depth of the thought tree (1-10) */
14
+ depth?: number;
15
+ /** Number of branches at each level (1-10) */
16
+ branches?: number;
17
+ }
18
+ interface ReasoningResult {
19
+ problem: string;
20
+ solution: string;
21
+ thoughtTree: string;
22
+ depth: number;
23
+ branches: number;
24
+ rawContent: string;
25
+ }
26
+ type SourceType = 'text' | 'code';
27
+ interface Concept {
28
+ name: string;
29
+ conceptType: string;
30
+ weight: number;
31
+ description?: string;
32
+ }
33
+ interface ConceptResult {
34
+ concepts: Concept[];
35
+ sourceType: SourceType;
36
+ rawContent: string;
37
+ }
38
+ interface Chunk {
39
+ id: string;
40
+ content: string;
41
+ position: number;
42
+ priority: string;
43
+ }
44
+ interface ChunkResult {
45
+ chunks: Chunk[];
46
+ totalChunks: number;
47
+ rawContent: string;
48
+ }
49
+ interface RetrievalResult {
50
+ query: string;
51
+ results: Chunk[];
52
+ rawContent: string;
53
+ }
54
+ interface GraphNode {
55
+ id: string;
56
+ name: string;
57
+ nodeType: string;
58
+ weight: number;
59
+ }
60
+ interface KnowledgeGraphResult {
61
+ operation: string;
62
+ nodes: GraphNode[];
63
+ stats?: Record<string, unknown>;
64
+ rawContent: string;
65
+ }
66
+ type DetailLevel = 'brief' | 'normal' | 'detailed';
67
+ interface PlanStep {
68
+ number: number;
69
+ title: string;
70
+ description: string;
71
+ priority: string;
72
+ effort: string;
73
+ dependencies: string[];
74
+ }
75
+ interface PlanResult {
76
+ task: string;
77
+ category: string;
78
+ steps: PlanStep[];
79
+ totalEffort: string;
80
+ rawContent: string;
81
+ }
82
+ interface PlanOptions {
83
+ /** Additional context or constraints */
84
+ context?: string;
85
+ /** Maximum number of steps (1-20) */
86
+ maxSteps?: number;
87
+ /** Level of detail for each step */
88
+ detailLevel?: DetailLevel;
89
+ }
90
+ interface ToolDefinition {
91
+ name: string;
92
+ description: string;
93
+ inputSchema: Record<string, unknown>;
94
+ }
95
+ interface ToolContent {
96
+ type: 'text';
97
+ text: string;
98
+ }
99
+ interface CallToolResponse {
100
+ content: ToolContent[];
101
+ isError?: boolean;
102
+ }
103
+
104
+ /**
105
+ * Wauldo client implementation
106
+ */
107
+
108
+ /**
109
+ * Client for Wauldo MCP Server
110
+ *
111
+ * @example
112
+ * ```typescript
113
+ * const client = new AgentClient();
114
+ * await client.connect();
115
+ *
116
+ * const result = await client.reason("How to optimize this algorithm?");
117
+ * console.log(result.solution);
118
+ *
119
+ * client.disconnect();
120
+ * ```
121
+ */
122
+ declare class AgentClient {
123
+ private readonly transport;
124
+ private readonly autoConnect;
125
+ private connected;
126
+ constructor(options?: ClientOptions);
127
+ /**
128
+ * Connect to MCP server
129
+ */
130
+ connect(): Promise<this>;
131
+ /**
132
+ * Disconnect from MCP server
133
+ */
134
+ disconnect(): void;
135
+ /**
136
+ * Ensure client is connected
137
+ */
138
+ private ensureConnected;
139
+ /**
140
+ * List all available tools
141
+ */
142
+ listTools(): Promise<ToolDefinition[]>;
143
+ /**
144
+ * Call a tool by name
145
+ */
146
+ callTool(name: string, args: Record<string, unknown>): Promise<string>;
147
+ /**
148
+ * Perform Tree-of-Thought reasoning on a problem
149
+ *
150
+ * @example
151
+ * ```typescript
152
+ * const result = await client.reason(
153
+ * "What's the best sorting algorithm for nearly sorted data?",
154
+ * { depth: 4, branches: 3 }
155
+ * );
156
+ * console.log(result.solution);
157
+ * ```
158
+ */
159
+ reason(problem: string, options?: ReasoningOptions): Promise<ReasoningResult>;
160
+ private parseReasoningResult;
161
+ /**
162
+ * Extract concepts from text or code
163
+ *
164
+ * @example
165
+ * ```typescript
166
+ * const result = await client.extractConcepts(code, 'code');
167
+ * for (const concept of result.concepts) {
168
+ * console.log(`${concept.name}: ${concept.weight}`);
169
+ * }
170
+ * ```
171
+ */
172
+ extractConcepts(text: string, sourceType?: SourceType): Promise<ConceptResult>;
173
+ private parseConceptResult;
174
+ /**
175
+ * Split a document into manageable chunks
176
+ */
177
+ chunkDocument(content: string, chunkSize?: number): Promise<ChunkResult>;
178
+ /**
179
+ * Retrieve relevant context for a query
180
+ */
181
+ retrieveContext(query: string, topK?: number): Promise<RetrievalResult>;
182
+ /**
183
+ * Summarize document content
184
+ */
185
+ summarize(content: string): Promise<string>;
186
+ /**
187
+ * Search the knowledge graph
188
+ */
189
+ searchKnowledge(query: string, limit?: number): Promise<KnowledgeGraphResult>;
190
+ /**
191
+ * Add concepts from text to knowledge graph
192
+ */
193
+ addToKnowledge(text: string): Promise<KnowledgeGraphResult>;
194
+ /**
195
+ * Get knowledge graph statistics
196
+ */
197
+ knowledgeStats(): Promise<KnowledgeGraphResult>;
198
+ /**
199
+ * Break down a task into actionable steps
200
+ *
201
+ * @example
202
+ * ```typescript
203
+ * const plan = await client.planTask(
204
+ * "Implement user authentication",
205
+ * { context: "Using JWT tokens", detailLevel: "detailed" }
206
+ * );
207
+ * for (const step of plan.steps) {
208
+ * console.log(`${step.number}. ${step.title}`);
209
+ * }
210
+ * ```
211
+ */
212
+ planTask(task: string, options?: PlanOptions): Promise<PlanResult>;
213
+ private parsePlanResult;
214
+ }
215
+
216
+ /**
217
+ * HTTP API types for OpenAI-compatible endpoints
218
+ */
219
+ interface ChatMessage {
220
+ role: 'system' | 'user' | 'assistant';
221
+ content?: string;
222
+ name?: string;
223
+ }
224
+ interface ChatRequest {
225
+ model: string;
226
+ messages: ChatMessage[];
227
+ temperature?: number;
228
+ max_tokens?: number;
229
+ stream?: boolean;
230
+ top_p?: number;
231
+ stop?: string[];
232
+ }
233
+ interface ChatUsage {
234
+ prompt_tokens: number;
235
+ completion_tokens: number;
236
+ total_tokens: number;
237
+ }
238
+ interface ChatChoice {
239
+ index: number;
240
+ message: ChatMessage;
241
+ finish_reason: string | null;
242
+ }
243
+ interface ChatResponse {
244
+ id: string;
245
+ object: string;
246
+ created: number;
247
+ model: string;
248
+ choices: ChatChoice[];
249
+ usage: ChatUsage;
250
+ }
251
+ interface ModelInfo {
252
+ id: string;
253
+ object: string;
254
+ created: number;
255
+ owned_by: string;
256
+ }
257
+ interface ModelList {
258
+ object: string;
259
+ data: ModelInfo[];
260
+ }
261
+ interface EmbeddingData {
262
+ embedding: number[];
263
+ index: number;
264
+ }
265
+ interface EmbeddingUsage {
266
+ prompt_tokens: number;
267
+ total_tokens: number;
268
+ }
269
+ interface EmbeddingResponse {
270
+ data: EmbeddingData[];
271
+ model: string;
272
+ usage: EmbeddingUsage;
273
+ }
274
+ interface RagUploadResponse {
275
+ document_id: string;
276
+ chunks_count: number;
277
+ }
278
+ interface RagSource {
279
+ document_id: string;
280
+ content: string;
281
+ score: number;
282
+ chunk_id?: string;
283
+ metadata?: Record<string, unknown>;
284
+ }
285
+ /** Audit trail for RAG responses — verification and accountability */
286
+ interface RagAuditInfo {
287
+ confidence: number;
288
+ retrieval_path: string;
289
+ sources_evaluated: number;
290
+ sources_used: number;
291
+ best_score: number;
292
+ grounded: boolean;
293
+ confidence_label: string;
294
+ model: string;
295
+ latency_ms: number;
296
+ /** Retrieval funnel diagnostics (v1.6.5+) */
297
+ candidates_found?: number;
298
+ candidates_after_tenant?: number;
299
+ candidates_after_score?: number;
300
+ query_type?: string;
301
+ }
302
+ interface RagQueryResponse {
303
+ answer: string;
304
+ sources: RagSource[];
305
+ /** Full audit trail — always present on v1.6.5+ servers */
306
+ audit?: RagAuditInfo;
307
+ confidence?: number;
308
+ grounded?: boolean;
309
+ }
310
+ interface OrchestratorResponse {
311
+ final_output: string;
312
+ }
313
+ /** Minimal interface required by Conversation — implemented by both HttpClient and MockHttpClient */
314
+ interface ChatClientLike {
315
+ chat(request: ChatRequest, options?: RequestOptions): Promise<ChatResponse>;
316
+ }
317
+ /** Log levels emitted by HttpClient */
318
+ type LogLevel = 'debug' | 'warn' | 'error';
319
+ interface HttpClientConfig {
320
+ baseUrl?: string;
321
+ apiKey?: string;
322
+ /** Extra headers added to every request (e.g. X-RapidAPI-Key) */
323
+ headers?: Record<string, string>;
324
+ timeoutMs?: number;
325
+ maxRetries?: number;
326
+ retryBackoffMs?: number;
327
+ /** Optional callback invoked on request lifecycle events */
328
+ onLog?: (level: LogLevel, message: string) => void;
329
+ /** Called before each HTTP request is sent */
330
+ onRequest?: (method: string, path: string) => void;
331
+ /** Called after each successful HTTP response */
332
+ onResponse?: (status: number, durationMs: number) => void;
333
+ /** Called when an HTTP request fails (after all retries exhausted) */
334
+ onError?: (error: Error) => void;
335
+ }
336
+ /** Options that can be passed per-request to override defaults */
337
+ interface RequestOptions {
338
+ /** Override the default timeout for this specific request (milliseconds) */
339
+ timeoutMs?: number;
340
+ }
341
+
342
+ /**
343
+ * Conversation helper — manages chat history automatically.
344
+ */
345
+
346
+ declare class Conversation {
347
+ private client;
348
+ private history;
349
+ private model;
350
+ constructor(client: ChatClientLike, options?: {
351
+ system?: string;
352
+ model?: string;
353
+ });
354
+ /**
355
+ * Send a user message and get the assistant reply.
356
+ * Both the user message and the assistant reply are appended to history.
357
+ *
358
+ * @param message - The user message to send
359
+ * @returns The assistant's reply content string
360
+ *
361
+ * @example
362
+ * ```typescript
363
+ * const conv = client.conversation({ system: 'You are helpful' });
364
+ * const reply = await conv.say('What is TypeScript?');
365
+ * const followUp = await conv.say('Show me an example'); // includes prior context
366
+ * ```
367
+ */
368
+ say(message: string): Promise<string>;
369
+ /**
370
+ * Return a copy of the full conversation history.
371
+ *
372
+ * @returns An array of ChatMessage objects (system, user, assistant turns)
373
+ *
374
+ * @example
375
+ * ```typescript
376
+ * const history = conv.getHistory();
377
+ * console.log(`${history.length} messages in conversation`);
378
+ * ```
379
+ */
380
+ getHistory(): ChatMessage[];
381
+ /**
382
+ * Clear user and assistant messages, preserving the system prompt (if any).
383
+ *
384
+ * @example
385
+ * ```typescript
386
+ * conv.clear();
387
+ * // System prompt is preserved; user/assistant messages are removed.
388
+ * ```
389
+ */
390
+ clear(): void;
391
+ }
392
+
393
+ /**
394
+ * HTTP client for Wauldo REST API (OpenAI-compatible)
395
+ *
396
+ * Uses Node 18+ built-in fetch — zero external dependencies.
397
+ */
398
+
399
+ declare class HttpClient {
400
+ private retryConfig;
401
+ constructor(config?: HttpClientConfig);
402
+ /** GET /v1/models — List available LLM models */
403
+ listModels(): Promise<ModelList>;
404
+ /**
405
+ * POST /v1/chat/completions — Chat completion (non-streaming).
406
+ *
407
+ * @param request - The chat request (model, messages, temperature, etc.)
408
+ * @param options - Optional per-request overrides (e.g. timeoutMs)
409
+ * @returns The full chat completion response
410
+ *
411
+ * @example
412
+ * ```typescript
413
+ * const resp = await client.chat({
414
+ * model: 'qwen2.5:7b',
415
+ * messages: [{ role: 'user', content: 'Hello' }],
416
+ * });
417
+ * console.log(resp.choices[0]?.message?.content);
418
+ * ```
419
+ */
420
+ chat(request: ChatRequest, options?: RequestOptions): Promise<ChatResponse>;
421
+ /** Convenience: single message chat, returns content string */
422
+ chatSimple(model: string, message: string): Promise<string>;
423
+ /** POST /v1/chat/completions — SSE streaming, yields content chunks */
424
+ chatStream(request: ChatRequest, options?: RequestOptions): AsyncGenerator<string>;
425
+ /** POST /v1/embeddings — Generate text embeddings */
426
+ embeddings(input: string | string[], model: string): Promise<EmbeddingResponse>;
427
+ /**
428
+ * POST /v1/upload — Upload document for RAG indexing.
429
+ *
430
+ * @param content - The document text to index
431
+ * @param filename - Optional filename for the document
432
+ * @param options - Optional per-request overrides (e.g. timeoutMs)
433
+ * @returns Upload confirmation with document_id and chunks_count
434
+ */
435
+ ragUpload(content: string, filename?: string, options?: RequestOptions): Promise<RagUploadResponse>;
436
+ /** POST /v1/query — Query RAG knowledge base */
437
+ ragQuery(query: string, topK?: number, options?: {
438
+ debug?: boolean;
439
+ qualityMode?: string;
440
+ }): Promise<RagQueryResponse>;
441
+ /**
442
+ * Create a stateful conversation that tracks message history automatically.
443
+ *
444
+ * @param options - Optional system prompt and model name
445
+ * @returns A Conversation instance bound to this client
446
+ *
447
+ * @example
448
+ * ```typescript
449
+ * const conv = client.conversation({ system: 'You are a TypeScript expert' });
450
+ * const reply = await conv.say('What are generics?');
451
+ * ```
452
+ */
453
+ conversation(options?: {
454
+ system?: string;
455
+ model?: string;
456
+ }): Conversation;
457
+ /**
458
+ * Upload text to RAG, then query it — one-shot Q&A over a document.
459
+ *
460
+ * @param question - The question to ask about the document
461
+ * @param text - The document text to index and query
462
+ * @param source - Optional source name (defaults to 'document')
463
+ * @returns The answer string
464
+ */
465
+ ragAsk(question: string, text: string, source?: string): Promise<string>;
466
+ /** POST /v1/orchestrator/execute — Route to best specialist agent */
467
+ orchestrate(prompt: string): Promise<OrchestratorResponse>;
468
+ /** POST /v1/orchestrator/parallel — Run all 4 specialists in parallel */
469
+ orchestrateParallel(prompt: string): Promise<OrchestratorResponse>;
470
+ }
471
+
472
+ /**
473
+ * Mock HTTP client for testing without a running server
474
+ */
475
+
476
+ /**
477
+ * Mock implementation of HttpClient for offline testing.
478
+ * Records all method calls in the `calls` array for assertions.
479
+ *
480
+ * @example
481
+ * ```typescript
482
+ * const mock = new MockHttpClient();
483
+ * const result = await mock.chat({ model: 'test', messages: [] });
484
+ * console.log(mock.calls); // [{ method: 'chat', args: [...] }]
485
+ * ```
486
+ */
487
+ declare class MockHttpClient {
488
+ private chatResponse;
489
+ private modelList;
490
+ readonly calls: Array<{
491
+ method: string;
492
+ args: unknown[];
493
+ }>;
494
+ /**
495
+ * Configure the response returned by `chat()` and `chatSimple()`.
496
+ *
497
+ * @param response - The ChatResponse to return on subsequent chat calls
498
+ * @returns `this` for method chaining
499
+ *
500
+ * @example
501
+ * ```typescript
502
+ * const mock = new MockHttpClient().withChatResponse({
503
+ * id: 'test-1', object: 'chat.completion', created: 0, model: 'test',
504
+ * choices: [{ index: 0, message: { role: 'assistant', content: 'Hi' }, finish_reason: 'stop' }],
505
+ * usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 },
506
+ * });
507
+ * ```
508
+ */
509
+ withChatResponse(response: ChatResponse): this;
510
+ /**
511
+ * Configure the model list returned by `listModels()`.
512
+ *
513
+ * @param models - Array of ModelInfo objects
514
+ * @returns `this` for method chaining
515
+ *
516
+ * @example
517
+ * ```typescript
518
+ * const mock = new MockHttpClient().withModels([
519
+ * { id: 'gpt-4', object: 'model', created: 0, owned_by: 'openai' },
520
+ * ]);
521
+ * ```
522
+ */
523
+ withModels(models: ModelInfo[]): this;
524
+ listModels(): Promise<ModelList>;
525
+ chat(request: ChatRequest, _options?: RequestOptions): Promise<ChatResponse>;
526
+ chatSimple(model: string, message: string): Promise<string>;
527
+ chatStream(_request: ChatRequest, _options?: RequestOptions): AsyncGenerator<string>;
528
+ embeddings(input: string | string[], model: string): Promise<EmbeddingResponse>;
529
+ ragUpload(content: string, filename?: string, _options?: RequestOptions): Promise<RagUploadResponse>;
530
+ ragQuery(query: string, topK?: number, options?: {
531
+ debug?: boolean;
532
+ qualityMode?: string;
533
+ }): Promise<RagQueryResponse>;
534
+ orchestrate(prompt: string): Promise<OrchestratorResponse>;
535
+ orchestrateParallel(prompt: string): Promise<OrchestratorResponse>;
536
+ conversation(options?: {
537
+ system?: string;
538
+ model?: string;
539
+ }): Conversation;
540
+ ragAsk(question: string, text: string, source?: string): Promise<string>;
541
+ private record;
542
+ }
543
+
544
+ /**
545
+ * Custom errors for Wauldo SDK
546
+ */
547
+ /**
548
+ * Base error class for all Wauldo errors
549
+ */
550
+ declare class WauldoError extends Error {
551
+ readonly code: number | undefined;
552
+ readonly data: unknown;
553
+ constructor(message: string, code?: number, data?: unknown);
554
+ toString(): string;
555
+ }
556
+ /**
557
+ * Thrown when connection to MCP server fails
558
+ */
559
+ declare class ConnectionError extends WauldoError {
560
+ constructor(message?: string);
561
+ }
562
+ /**
563
+ * Thrown when server returns an error response
564
+ */
565
+ declare class ServerError extends WauldoError {
566
+ constructor(message: string, code?: number, data?: unknown);
567
+ }
568
+ /**
569
+ * Thrown when input validation fails
570
+ */
571
+ declare class ValidationError extends WauldoError {
572
+ readonly field: string | undefined;
573
+ constructor(message: string, field?: string);
574
+ }
575
+ /**
576
+ * Thrown when operation times out
577
+ */
578
+ declare class TimeoutError extends WauldoError {
579
+ readonly timeout: number | undefined;
580
+ constructor(message?: string, timeout?: number);
581
+ }
582
+ /**
583
+ * Thrown when requested tool is not available
584
+ */
585
+ declare class ToolNotFoundError extends WauldoError {
586
+ readonly toolName: string;
587
+ constructor(toolName: string);
588
+ }
589
+
590
+ export { AgentClient, type CallToolResponse, type ChatChoice, type ChatClientLike, type ChatMessage, type ChatRequest, type ChatResponse, type ChatUsage, type Chunk, type ChunkResult, type ClientOptions, type Concept, type ConceptResult, ConnectionError, Conversation, type DetailLevel, type EmbeddingData, type EmbeddingResponse, type EmbeddingUsage, type GraphNode, HttpClient, type HttpClientConfig, type KnowledgeGraphResult, type LogLevel, MockHttpClient, type ModelInfo, type ModelList, type OrchestratorResponse, type PlanOptions, type PlanResult, type PlanStep, type RagAuditInfo, type RagQueryResponse, type RagSource, type RagUploadResponse, type ReasoningOptions, type ReasoningResult, type RequestOptions, type RetrievalResult, ServerError, type SourceType, TimeoutError, type ToolContent, type ToolDefinition, ToolNotFoundError, ValidationError, WauldoError };