codeblog-app 1.6.3 → 1.6.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/src/ai/chat.ts +50 -78
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "$schema": "https://json.schemastore.org/package.json",
3
3
  "name": "codeblog-app",
4
- "version": "1.6.3",
4
+ "version": "1.6.4",
5
5
  "description": "CLI client for CodeBlog — the forum where AI writes the posts",
6
6
  "type": "module",
7
7
  "license": "MIT",
package/src/ai/chat.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { streamText, type CoreMessage } from "ai"
1
+ import { streamText, type CoreMessage, type CoreToolMessage, type CoreAssistantMessage } from "ai"
2
2
  import { AIProvider } from "./provider"
3
3
  import { chatTools } from "./tools"
4
4
  import { Log } from "../util/log"
@@ -35,92 +35,64 @@ export namespace AIChat {
35
35
  onToolResult?: (name: string, result: unknown) => void
36
36
  }
37
37
 
38
+ // Convert our simple messages to CoreMessage[] for AI SDK
39
+ // Only user/assistant text messages — tool history is handled by maxSteps internally
40
+ function toCoreMessages(messages: Message[]): CoreMessage[] {
41
+ return messages
42
+ .filter((m) => m.role === "user" || m.role === "assistant")
43
+ .map((m) => ({ role: m.role as "user" | "assistant", content: m.content }))
44
+ }
45
+
38
46
  export async function stream(messages: Message[], callbacks: StreamCallbacks, modelID?: string, signal?: AbortSignal) {
39
47
  const model = await AIProvider.getModel(modelID)
40
48
  log.info("streaming", { model: modelID || AIProvider.DEFAULT_MODEL, messages: messages.length })
41
49
 
42
- const history: CoreMessage[] = messages.map((m) => ({ role: m.role, content: m.content }))
50
+ const coreMessages = toCoreMessages(messages)
43
51
  let full = ""
44
52
 
45
- // Manual multi-step loop (maxSteps=1 per call, we handle tool follow-up ourselves)
46
- // This is needed because openai-compatible providers don't support automatic multi-step
47
- for (let step = 0; step < 5; step++) {
48
- if (signal?.aborted) break
49
-
50
- const result = streamText({
51
- model,
52
- system: SYSTEM_PROMPT,
53
- messages: history,
54
- tools: chatTools,
55
- maxSteps: 1,
56
- abortSignal: signal,
57
- })
58
-
59
- const calls: Array<{ id: string; name: string; input: unknown; output: unknown }> = []
60
-
61
- try {
62
- for await (const part of result.fullStream) {
63
- if (signal?.aborted) break
64
- switch (part.type) {
65
- case "text-delta": {
66
- const delta = (part as any).text ?? (part as any).textDelta ?? ""
67
- if (delta) { full += delta; callbacks.onToken?.(delta) }
68
- break
69
- }
70
- case "tool-call": {
71
- const input = (part as any).input ?? (part as any).args
72
- callbacks.onToolCall?.(part.toolName, input)
73
- calls.push({ id: part.toolCallId, name: part.toolName, input, output: undefined })
74
- break
75
- }
76
- case "tool-result": {
77
- const output = (part as any).output ?? (part as any).result ?? {}
78
- const name = (part as any).toolName
79
- callbacks.onToolResult?.(name, output)
80
- const match = calls.find((c) => c.name === name && c.output === undefined)
81
- if (match) match.output = output
82
- break
83
- }
84
- case "error": {
85
- const msg = part.error instanceof Error ? part.error.message : String(part.error)
86
- log.error("stream part error", { error: msg })
87
- callbacks.onError?.(part.error instanceof Error ? part.error : new Error(msg))
88
- break
89
- }
53
+ const result = streamText({
54
+ model,
55
+ system: SYSTEM_PROMPT,
56
+ messages: coreMessages,
57
+ tools: chatTools,
58
+ maxSteps: 5,
59
+ abortSignal: signal,
60
+ })
61
+
62
+ try {
63
+ for await (const part of result.fullStream) {
64
+ if (signal?.aborted) break
65
+ switch (part.type) {
66
+ case "text-delta": {
67
+ const delta = (part as any).text ?? (part as any).textDelta ?? ""
68
+ if (delta) { full += delta; callbacks.onToken?.(delta) }
69
+ break
70
+ }
71
+ case "tool-call": {
72
+ const input = (part as any).input ?? (part as any).args
73
+ callbacks.onToolCall?.(part.toolName, input)
74
+ break
75
+ }
76
+ case "tool-result": {
77
+ const output = (part as any).output ?? (part as any).result ?? {}
78
+ const name = (part as any).toolName
79
+ callbacks.onToolResult?.(name, output)
80
+ break
81
+ }
82
+ case "error": {
83
+ const msg = part.error instanceof Error ? part.error.message : String(part.error)
84
+ log.error("stream part error", { error: msg })
85
+ callbacks.onError?.(part.error instanceof Error ? part.error : new Error(msg))
86
+ break
90
87
  }
91
88
  }
92
- } catch (err) {
93
- const error = err instanceof Error ? err : new Error(String(err))
94
- log.error("stream error", { error: error.message })
95
- if (callbacks.onError) callbacks.onError(error)
96
- else throw error
97
- return full
98
89
  }
99
-
100
- // No tool calls this step done
101
- if (calls.length === 0) break
102
-
103
- // Append assistant + tool messages in AI SDK v6 format for next round
104
- history.push({
105
- role: "assistant",
106
- content: calls.map((c) => ({
107
- type: "tool-call" as const,
108
- toolCallId: c.id,
109
- toolName: c.name,
110
- input: c.input ?? {},
111
- })),
112
- } as any)
113
- history.push({
114
- role: "tool",
115
- content: calls.map((c) => ({
116
- type: "tool-result" as const,
117
- toolCallId: c.id,
118
- toolName: c.name,
119
- output: { type: "json", value: c.output ?? {} },
120
- })),
121
- } as any)
122
-
123
- log.info("tool round done, sending follow-up", { step, tools: calls.map((c) => c.name) })
90
+ } catch (err) {
91
+ const error = err instanceof Error ? err : new Error(String(err))
92
+ log.error("stream error", { error: error.message })
93
+ if (callbacks.onError) callbacks.onError(error)
94
+ else throw error
95
+ return full
124
96
  }
125
97
 
126
98
  callbacks.onFinish?.(full || "(No response)")