@contractspec/module.ai-chat 3.2.0 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -18,10 +18,15 @@ This module provides a reusable AI chat system that can be integrated into CLI,
18
18
  - **Usage Tracking**: Integrated metering and cost tracking
19
19
  - **UI Components**: React components for chat interfaces
20
20
 
21
+ ## Bundle Spec Alignment (07_ai_native_chat)
22
+
23
+ This module aligns with `specs/contractspec_modules_bundle_spec_2026-03-08`. `useChat` and `ChatContainer` provide the assistant slot UI for bundle surfaces. `AiChatFeature` (key `ai-chat`, version `1.0.0`) matches `ModuleBundleSpec.requires`. The `tools` option on `UseChatOptions` is wired to `streamText`; use `requireApproval: true` for tools that need user confirmation (requires server route for full support).
24
+
21
25
  ## Related Packages
22
26
 
23
27
  - `@contractspec/lib.ai-providers` — Shared provider abstraction (types, factory, validation)
24
28
  - `@contractspec/lib.ai-agent` — Agent orchestration and tool execution
29
+ - `@contractspec/lib.surface-runtime` — Bundle surfaces (optional peer when used in PM workbench)
25
30
 
26
31
  ## Providers
27
32
 
@@ -94,6 +99,71 @@ function VibeCodingChat() {
94
99
  }
95
100
  ```
96
101
 
102
+ ### AI SDK Parity
103
+
104
+ This module aligns with the [Vercel AI SDK](https://sdk.vercel.ai) and AI Elements feature set:
105
+
106
+ - **fullStream**: Reasoning, tools, and sources from `streamText` fullStream
107
+ - **Tools**: Pass `tools` to `ChatServiceConfig` or `useChat`; supports `requireApproval` for approval workflow
108
+ - **Message parts**: `ChatMessage` renders reasoning (collapsible), sources (citations), and tool invocations
109
+ - **Markdown**: Inline links and code blocks in message content
110
+
111
+ ### Server Route (Full AI SDK + Tool Approval)
112
+
113
+ For full AI SDK compatibility including tool approval, use `createChatRoute` with `@ai-sdk/react` useChat:
114
+
115
+ ```ts
116
+ // app/api/chat/route.ts (Next.js App Router)
117
+ import { createChatRoute } from '@contractspec/module.ai-chat/core';
118
+ import { createProvider } from '@contractspec/lib.ai-providers';
119
+
120
+ const provider = createProvider({
121
+ provider: 'openai',
122
+ apiKey: process.env.OPENAI_API_KEY,
123
+ model: 'gpt-4o',
124
+ });
125
+
126
+ export const POST = createChatRoute({ provider });
127
+ ```
128
+
129
+ ```tsx
130
+ // Client: use @ai-sdk/react useChat with DefaultChatTransport
131
+ import { useChat } from '@ai-sdk/react';
132
+
133
+ const { messages, sendMessage } = useChat({
134
+ api: '/api/chat',
135
+ });
136
+ ```
137
+
138
+ The custom `useChat` from this module works with `ChatService` for simple deployments (no tools, no approval). For tools with `requireApproval`, use the server route pattern above.
139
+
140
+ ### Optional AI Elements
141
+
142
+ Apps can optionally use [AI Elements](https://elements.ai-sdk.dev) for UI. This module does not depend on ai-elements; provide an adapter from `ChatMessage` to `UIMessage` when integrating.
143
+
144
+ ### useCompletion (Non-Chat Completion)
145
+
146
+ For inline suggestions, single-prompt completion, or other non-conversational use cases:
147
+
148
+ ```tsx
149
+ import { useCompletion } from '@contractspec/module.ai-chat/presentation/hooks';
150
+ // or: import { useCompletion } from '@ai-sdk/react';
151
+
152
+ const { completion, complete, isLoading } = useCompletion({
153
+ api: '/api/completion',
154
+ });
155
+ ```
156
+
157
+ Use `createCompletionRoute` for the API endpoint (see `createChatRoute` pattern).
158
+
159
+ ### streamObject / generateObject
160
+
161
+ For structured output (schema-driven generation), use the AI SDK directly: `streamObject` and `generateObject` from `ai`. This module focuses on chat; add `useObject` or equivalent in a separate module when needed.
162
+
163
+ ### Voice / Speech
164
+
165
+ Speech Input, Transcription, Voice Selector, and related UI are planned as a separate submodule or feature flag. Track via roadmap.
166
+
97
167
  ## Architecture
98
168
 
99
169
  ```
@@ -135,6 +135,9 @@ class ChatService {
135
135
  systemPrompt;
136
136
  maxHistoryMessages;
137
137
  onUsage;
138
+ tools;
139
+ sendReasoning;
140
+ sendSources;
138
141
  constructor(config) {
139
142
  this.provider = config.provider;
140
143
  this.context = config.context;
@@ -142,6 +145,9 @@ class ChatService {
142
145
  this.systemPrompt = config.systemPrompt ?? DEFAULT_SYSTEM_PROMPT;
143
146
  this.maxHistoryMessages = config.maxHistoryMessages ?? 20;
144
147
  this.onUsage = config.onUsage;
148
+ this.tools = config.tools;
149
+ this.sendReasoning = config.sendReasoning ?? false;
150
+ this.sendSources = config.sendSources ?? false;
145
151
  }
146
152
  async send(options) {
147
153
  let conversation;
@@ -166,13 +172,14 @@ class ChatService {
166
172
  status: "completed",
167
173
  attachments: options.attachments
168
174
  });
169
- const prompt = this.buildPrompt(conversation, options);
175
+ const messages = this.buildMessages(conversation, options);
170
176
  const model = this.provider.getModel();
171
177
  try {
172
178
  const result = await generateText({
173
179
  model,
174
- prompt,
175
- system: this.systemPrompt
180
+ messages,
181
+ system: this.systemPrompt,
182
+ tools: this.tools
176
183
  });
177
184
  const assistantMessage = await this.store.appendMessage(conversation.id, {
178
185
  role: "assistant",
@@ -228,33 +235,106 @@ class ChatService {
228
235
  content: "",
229
236
  status: "streaming"
230
237
  });
231
- const prompt = this.buildPrompt(conversation, options);
238
+ const messages = this.buildMessages(conversation, options);
232
239
  const model = this.provider.getModel();
233
- const self = {
234
- systemPrompt: this.systemPrompt,
235
- store: this.store
236
- };
240
+ const systemPrompt = this.systemPrompt;
241
+ const tools = this.tools;
242
+ const store = this.store;
243
+ const onUsage = this.onUsage;
237
244
  async function* streamGenerator() {
238
245
  let fullContent = "";
246
+ let fullReasoning = "";
247
+ const toolCallsMap = new Map;
248
+ const sources = [];
239
249
  try {
240
250
  const result = streamText({
241
251
  model,
242
- prompt,
243
- system: self.systemPrompt
252
+ messages,
253
+ system: systemPrompt,
254
+ tools
244
255
  });
245
- for await (const chunk of result.textStream) {
246
- fullContent += chunk;
247
- yield { type: "text", content: chunk };
256
+ for await (const part of result.fullStream) {
257
+ if (part.type === "text-delta") {
258
+ const text = part.text ?? "";
259
+ if (text) {
260
+ fullContent += text;
261
+ yield { type: "text", content: text };
262
+ }
263
+ } else if (part.type === "reasoning-delta") {
264
+ const text = part.text ?? "";
265
+ if (text) {
266
+ fullReasoning += text;
267
+ yield { type: "reasoning", content: text };
268
+ }
269
+ } else if (part.type === "source") {
270
+ const src = part;
271
+ const source = {
272
+ id: src.id,
273
+ title: src.title ?? "",
274
+ url: src.url,
275
+ type: "web"
276
+ };
277
+ sources.push(source);
278
+ yield { type: "source", source };
279
+ } else if (part.type === "tool-call") {
280
+ const toolCall = {
281
+ id: part.toolCallId,
282
+ name: part.toolName,
283
+ args: part.input ?? {},
284
+ status: "running"
285
+ };
286
+ toolCallsMap.set(part.toolCallId, toolCall);
287
+ yield { type: "tool_call", toolCall };
288
+ } else if (part.type === "tool-result") {
289
+ const tc = toolCallsMap.get(part.toolCallId);
290
+ if (tc) {
291
+ tc.result = part.output;
292
+ tc.status = "completed";
293
+ }
294
+ yield {
295
+ type: "tool_result",
296
+ toolResult: {
297
+ toolCallId: part.toolCallId,
298
+ toolName: part.toolName,
299
+ result: part.output
300
+ }
301
+ };
302
+ } else if (part.type === "tool-error") {
303
+ const tc = toolCallsMap.get(part.toolCallId);
304
+ if (tc) {
305
+ tc.status = "error";
306
+ tc.error = part.error ?? "Tool execution failed";
307
+ }
308
+ } else if (part.type === "finish") {
309
+ const usage = part.usage;
310
+ const inputTokens = usage?.inputTokens ?? 0;
311
+ const outputTokens = usage?.completionTokens ?? 0;
312
+ await store.updateMessage(conversation.id, assistantMessage.id, {
313
+ content: fullContent,
314
+ status: "completed",
315
+ reasoning: fullReasoning || undefined,
316
+ sources: sources.length > 0 ? sources : undefined,
317
+ toolCalls: toolCallsMap.size > 0 ? Array.from(toolCallsMap.values()) : undefined,
318
+ usage: usage ? { inputTokens, outputTokens } : undefined
319
+ });
320
+ onUsage?.({ inputTokens, outputTokens });
321
+ yield {
322
+ type: "done",
323
+ usage: usage ? { inputTokens, outputTokens } : undefined
324
+ };
325
+ return;
326
+ }
248
327
  }
249
- await self.store.updateMessage(conversation.id, assistantMessage.id, {
328
+ await store.updateMessage(conversation.id, assistantMessage.id, {
250
329
  content: fullContent,
251
- status: "completed"
330
+ status: "completed",
331
+ reasoning: fullReasoning || undefined,
332
+ sources: sources.length > 0 ? sources : undefined,
333
+ toolCalls: toolCallsMap.size > 0 ? Array.from(toolCallsMap.values()) : undefined
252
334
  });
253
- yield {
254
- type: "done"
255
- };
335
+ yield { type: "done" };
256
336
  } catch (error) {
257
- await self.store.updateMessage(conversation.id, assistantMessage.id, {
337
+ await store.updateMessage(conversation.id, assistantMessage.id, {
258
338
  content: fullContent,
259
339
  status: "error",
260
340
  error: {
@@ -289,48 +369,128 @@ class ChatService {
289
369
  async deleteConversation(conversationId) {
290
370
  return this.store.delete(conversationId);
291
371
  }
292
- buildPrompt(conversation, options) {
293
- let prompt = "";
372
+ buildMessages(conversation, _options) {
294
373
  const historyStart = Math.max(0, conversation.messages.length - this.maxHistoryMessages);
374
+ const messages = [];
295
375
  for (let i = historyStart;i < conversation.messages.length; i++) {
296
376
  const msg = conversation.messages[i];
297
377
  if (!msg)
298
378
  continue;
299
- if (msg.role === "user" || msg.role === "assistant") {
300
- prompt += `${msg.role === "user" ? "User" : "Assistant"}: ${msg.content}
301
-
302
- `;
303
- }
304
- }
305
- let content = options.content;
306
- if (options.attachments?.length) {
307
- const attachmentInfo = options.attachments.map((a) => {
308
- if (a.type === "file" || a.type === "code") {
309
- return `
379
+ if (msg.role === "user") {
380
+ let content = msg.content;
381
+ if (msg.attachments?.length) {
382
+ const attachmentInfo = msg.attachments.map((a) => {
383
+ if (a.type === "file" || a.type === "code") {
384
+ return `
310
385
 
311
386
  ### ${a.name}
312
387
  \`\`\`
313
- ${a.content}
388
+ ${a.content ?? ""}
314
389
  \`\`\``;
315
- }
316
- return `
390
+ }
391
+ return `
317
392
 
318
393
  [Attachment: ${a.name}]`;
319
- }).join("");
320
- content += attachmentInfo;
394
+ }).join("");
395
+ content += attachmentInfo;
396
+ }
397
+ messages.push({ role: "user", content });
398
+ } else if (msg.role === "assistant") {
399
+ if (msg.toolCalls?.length) {
400
+ messages.push({
401
+ role: "assistant",
402
+ content: msg.content || "",
403
+ toolCalls: msg.toolCalls.map((tc) => ({
404
+ type: "tool-call",
405
+ toolCallId: tc.id,
406
+ toolName: tc.name,
407
+ args: tc.args
408
+ }))
409
+ });
410
+ messages.push({
411
+ role: "tool",
412
+ content: msg.toolCalls.map((tc) => ({
413
+ type: "tool-result",
414
+ toolCallId: tc.id,
415
+ toolName: tc.name,
416
+ output: tc.result
417
+ }))
418
+ });
419
+ } else {
420
+ messages.push({ role: "assistant", content: msg.content });
421
+ }
422
+ }
321
423
  }
322
- prompt += `User: ${content}
323
-
324
- Assistant:`;
325
- return prompt;
424
+ return messages;
326
425
  }
327
426
  }
328
427
  function createChatService(config) {
329
428
  return new ChatService(config);
330
429
  }
430
+ // src/core/create-chat-route.ts
431
+ import {
432
+ convertToModelMessages,
433
+ streamText as streamText2
434
+ } from "ai";
435
+ var DEFAULT_SYSTEM_PROMPT2 = `You are a helpful AI assistant.`;
436
+ function createChatRoute(options) {
437
+ const { provider, systemPrompt = DEFAULT_SYSTEM_PROMPT2, tools } = options;
438
+ return async (req) => {
439
+ if (req.method !== "POST") {
440
+ return new Response("Method not allowed", { status: 405 });
441
+ }
442
+ let body;
443
+ try {
444
+ body = await req.json();
445
+ } catch {
446
+ return new Response("Invalid JSON body", { status: 400 });
447
+ }
448
+ const messages = body.messages ?? [];
449
+ if (!Array.isArray(messages) || messages.length === 0) {
450
+ return new Response("messages array required", { status: 400 });
451
+ }
452
+ const model = provider.getModel();
453
+ const result = streamText2({
454
+ model,
455
+ messages: await convertToModelMessages(messages),
456
+ system: systemPrompt,
457
+ tools
458
+ });
459
+ return result.toUIMessageStreamResponse();
460
+ };
461
+ }
462
+ // src/core/create-completion-route.ts
463
+ import { streamText as streamText3 } from "ai";
464
+ function createCompletionRoute(options) {
465
+ const { provider, systemPrompt } = options;
466
+ return async (req) => {
467
+ if (req.method !== "POST") {
468
+ return new Response("Method not allowed", { status: 405 });
469
+ }
470
+ let body;
471
+ try {
472
+ body = await req.json();
473
+ } catch {
474
+ return new Response("Invalid JSON body", { status: 400 });
475
+ }
476
+ const prompt = body.prompt ?? "";
477
+ if (!prompt || typeof prompt !== "string") {
478
+ return new Response("prompt string required", { status: 400 });
479
+ }
480
+ const model = provider.getModel();
481
+ const result = streamText3({
482
+ model,
483
+ prompt,
484
+ system: systemPrompt
485
+ });
486
+ return result.toTextStreamResponse();
487
+ };
488
+ }
331
489
  export {
332
490
  createInMemoryConversationStore,
491
+ createCompletionRoute,
333
492
  createChatService,
493
+ createChatRoute,
334
494
  InMemoryConversationStore,
335
495
  ChatService
336
496
  };