codeblog-app 2.2.6 → 2.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/package.json +9 -7
  2. package/src/ai/__tests__/chat.test.ts +11 -2
  3. package/src/ai/__tests__/compat.test.ts +46 -0
  4. package/src/ai/__tests__/home.ai-stream.integration.test.ts +77 -0
  5. package/src/ai/__tests__/provider-registry.test.ts +61 -0
  6. package/src/ai/__tests__/provider.test.ts +58 -18
  7. package/src/ai/__tests__/stream-events.test.ts +152 -0
  8. package/src/ai/chat.ts +200 -88
  9. package/src/ai/configure.ts +13 -4
  10. package/src/ai/models.ts +26 -0
  11. package/src/ai/provider-registry.ts +150 -0
  12. package/src/ai/provider.ts +99 -137
  13. package/src/ai/stream-events.ts +64 -0
  14. package/src/ai/tools.ts +10 -6
  15. package/src/ai/types.ts +105 -0
  16. package/src/auth/index.ts +3 -1
  17. package/src/auth/oauth.ts +17 -2
  18. package/src/cli/__tests__/commands.test.ts +6 -2
  19. package/src/cli/cmd/ai.ts +10 -0
  20. package/src/cli/cmd/setup.ts +275 -5
  21. package/src/cli/ui.ts +131 -24
  22. package/src/config/index.ts +38 -1
  23. package/src/index.ts +4 -1
  24. package/src/mcp/__tests__/client.test.ts +2 -2
  25. package/src/mcp/__tests__/e2e.ts +10 -6
  26. package/src/mcp/client.ts +33 -63
  27. package/src/storage/chat.ts +3 -1
  28. package/src/tui/__tests__/input-intent.test.ts +27 -0
  29. package/src/tui/__tests__/stream-assembler.test.ts +33 -0
  30. package/src/tui/ai-stream.ts +28 -0
  31. package/src/tui/app.tsx +27 -1
  32. package/src/tui/commands.ts +41 -7
  33. package/src/tui/context/theme.tsx +2 -1
  34. package/src/tui/input-intent.ts +26 -0
  35. package/src/tui/routes/home.tsx +590 -190
  36. package/src/tui/routes/setup.tsx +20 -8
  37. package/src/tui/stream-assembler.ts +49 -0
  38. package/src/util/log.ts +3 -1
  39. package/tsconfig.json +1 -1
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "$schema": "https://json.schemastore.org/package.json",
3
3
  "name": "codeblog-app",
4
- "version": "2.2.6",
4
+ "version": "2.3.1",
5
5
  "description": "CLI client for CodeBlog — the forum where AI writes the posts",
6
6
  "type": "module",
7
7
  "license": "MIT",
@@ -49,18 +49,20 @@
49
49
  "./*": "./src/*.ts"
50
50
  },
51
51
  "devDependencies": {
52
+ "@babel/core": "^7.28.4",
52
53
  "@tsconfig/bun": "1.0.9",
54
+ "@types/babel__core": "^7.20.5",
53
55
  "@types/bun": "1.3.9",
54
56
  "@types/yargs": "17.0.33",
55
57
  "drizzle-kit": "1.0.0-beta.12-a5629fb",
56
58
  "typescript": "5.8.2"
57
59
  },
58
60
  "optionalDependencies": {
59
- "codeblog-app-darwin-arm64": "2.2.6",
60
- "codeblog-app-darwin-x64": "2.2.6",
61
- "codeblog-app-linux-arm64": "2.2.6",
62
- "codeblog-app-linux-x64": "2.2.6",
63
- "codeblog-app-windows-x64": "2.2.6"
61
+ "codeblog-app-darwin-arm64": "2.3.1",
62
+ "codeblog-app-darwin-x64": "2.3.1",
63
+ "codeblog-app-linux-arm64": "2.3.1",
64
+ "codeblog-app-linux-x64": "2.3.1",
65
+ "codeblog-app-windows-x64": "2.3.1"
64
66
  },
65
67
  "dependencies": {
66
68
  "@ai-sdk/anthropic": "^3.0.44",
@@ -71,7 +73,7 @@
71
73
  "@opentui/core": "^0.1.79",
72
74
  "@opentui/solid": "^0.1.79",
73
75
  "ai": "^6.0.86",
74
- "codeblog-mcp": "^2.1.5",
76
+ "codeblog-mcp": "2.2.0",
75
77
  "drizzle-orm": "1.0.0-beta.12-a5629fb",
76
78
  "fuzzysort": "^3.1.0",
77
79
  "hono": "4.10.7",
@@ -40,7 +40,7 @@ function makeToolCallStreamResult() {
40
40
  }
41
41
  }
42
42
 
43
- let streamFactory = () => makeStreamResult()
43
+ let streamFactory: () => { fullStream: AsyncGenerator<any, void, unknown> } = () => makeStreamResult()
44
44
 
45
45
  mock.module("ai", () => ({
46
46
  streamText: () => streamFactory(),
@@ -52,6 +52,15 @@ mock.module("ai", () => ({
52
52
  mock.module("../provider", () => ({
53
53
  AIProvider: {
54
54
  getModel: mock(() => Promise.resolve({ id: "test-model" })),
55
+ resolveModelCompat: mock(() => Promise.resolve({
56
+ providerID: "openai-compatible",
57
+ modelID: "test-model",
58
+ api: "openai-compatible",
59
+ compatProfile: "openai-compatible",
60
+ cacheKey: "openai-compatible:openai-compatible",
61
+ stripParallelToolCalls: true,
62
+ normalizeToolSchema: true,
63
+ })),
55
64
  DEFAULT_MODEL: "test-model",
56
65
  },
57
66
  }))
@@ -69,7 +78,7 @@ describe("AIChat", () => {
69
78
  // ---------------------------------------------------------------------------
70
79
 
71
80
  test("Message type accepts user, assistant, system roles", () => {
72
- const messages: AIChat.Message[] = [
81
+ const messages = [
73
82
  { role: "user", content: "hello" },
74
83
  { role: "assistant", content: "hi" },
75
84
  { role: "system", content: "you are a bot" },
@@ -0,0 +1,46 @@
1
+ import { describe, test, expect } from "bun:test"
2
+ import { patchRequestByCompat, resolveCompat } from "../types"
3
+
4
+ describe("AI Compat", () => {
5
+ test("resolveCompat uses openai-compatible preset by default", () => {
6
+ const compat = resolveCompat({
7
+ providerID: "openai-compatible",
8
+ modelID: "deepseek-chat",
9
+ })
10
+ expect(compat.api).toBe("openai-compatible")
11
+ expect(compat.stripParallelToolCalls).toBe(true)
12
+ expect(compat.normalizeToolSchema).toBe(true)
13
+ })
14
+
15
+ test("resolveCompat honors provider config override", () => {
16
+ const compat = resolveCompat({
17
+ providerID: "openai-compatible",
18
+ modelID: "claude-sonnet-4-20250514",
19
+ providerConfig: { api_key: "x", api: "anthropic", compat_profile: "anthropic" },
20
+ })
21
+ expect(compat.api).toBe("anthropic")
22
+ expect(compat.compatProfile).toBe("anthropic")
23
+ expect(compat.stripParallelToolCalls).toBe(false)
24
+ })
25
+
26
+ test("patchRequestByCompat removes parallel_tool_calls and fixes schema", () => {
27
+ const compat = resolveCompat({
28
+ providerID: "openai-compatible",
29
+ modelID: "qwen3-coder",
30
+ })
31
+ const body = {
32
+ parallel_tool_calls: true,
33
+ tools: [
34
+ {
35
+ function: {
36
+ parameters: {},
37
+ },
38
+ },
39
+ ],
40
+ }
41
+ const patched = patchRequestByCompat(compat, body)
42
+ expect(patched.parallel_tool_calls).toBeUndefined()
43
+ expect(patched.tools[0]!.function.parameters.type).toBe("object")
44
+ expect(patched.tools[0]!.function.parameters.properties).toEqual({})
45
+ })
46
+ })
@@ -0,0 +1,77 @@
1
+ import { describe, test, expect, mock } from "bun:test"
2
+ import { resolveAssistantContent } from "../../tui/ai-stream"
3
+
4
+ const streamTextMock = mock(() => ({
5
+ fullStream: (async function* () {
6
+ yield { type: "tool-call", toolName: "scan_sessions", args: { limit: 1 } }
7
+ yield { type: "tool-result", toolName: "scan_sessions", result: { sessions: [{ id: "s1" }] } }
8
+ })(),
9
+ }))
10
+
11
+ mock.module("ai", () => ({
12
+ streamText: streamTextMock,
13
+ stepCountIs: (n: number) => ({ type: "step-count", count: n }),
14
+ tool: (config: any) => config,
15
+ jsonSchema: (schema: any) => schema,
16
+ }))
17
+
18
+ mock.module("../../mcp/client", () => ({
19
+ McpBridge: {
20
+ listTools: mock(async () => ({ tools: [] })),
21
+ callToolJSON: mock(async () => ({})),
22
+ },
23
+ }))
24
+
25
+ mock.module("../provider", () => ({
26
+ AIProvider: {
27
+ getModel: mock(async () => ({ id: "test-model" })),
28
+ resolveModelCompat: mock(async () => ({
29
+ providerID: "openai-compatible",
30
+ modelID: "test-model",
31
+ api: "openai-compatible",
32
+ compatProfile: "openai-compatible",
33
+ cacheKey: "openai-compatible:openai-compatible",
34
+ stripParallelToolCalls: true,
35
+ normalizeToolSchema: true,
36
+ })),
37
+ DEFAULT_MODEL: "test-model",
38
+ },
39
+ }))
40
+
41
+ const { AIChat } = await import("../chat")
42
+
43
+ describe("home ai stream integration (equivalent)", () => {
44
+ test("tool-only run stays single-stream and produces structured fallback content", async () => {
45
+ let finishText = ""
46
+ await AIChat.stream(
47
+ [{ role: "user", content: "scan now" }],
48
+ {
49
+ onFinish: (text) => { finishText = text },
50
+ },
51
+ )
52
+
53
+ expect(streamTextMock).toHaveBeenCalledTimes(1)
54
+ expect(finishText).toBe("(No response)")
55
+
56
+ const content = resolveAssistantContent({
57
+ finalText: "",
58
+ aborted: false,
59
+ abortByUser: false,
60
+ hasToolCalls: true,
61
+ toolResults: [{ name: "scan_sessions", result: "{\"sessions\":[{\"id\":\"s1\"}]}" }],
62
+ })
63
+ expect(content).toContain("Tool execution completed:")
64
+ expect(content).toContain("scan_sessions")
65
+ })
66
+
67
+ test("abort state is rendered consistently", () => {
68
+ const content = resolveAssistantContent({
69
+ finalText: "partial answer",
70
+ aborted: true,
71
+ abortByUser: true,
72
+ hasToolCalls: false,
73
+ toolResults: [],
74
+ })
75
+ expect(content).toContain("(interrupted)")
76
+ })
77
+ })
@@ -0,0 +1,61 @@
1
+ import { describe, test, expect, beforeEach } from "bun:test"
2
+ import { routeModel } from "../provider-registry"
3
+
4
+ describe("provider-registry", () => {
5
+ beforeEach(() => {
6
+ delete process.env.ANTHROPIC_API_KEY
7
+ delete process.env.OPENAI_API_KEY
8
+ delete process.env.GOOGLE_GENERATIVE_AI_API_KEY
9
+ delete process.env.OPENAI_COMPATIBLE_API_KEY
10
+ })
11
+
12
+ test("routes explicit provider/model first", async () => {
13
+ const route = await routeModel("openai/gpt-4o", {
14
+ api_url: "https://codeblog.ai",
15
+ providers: {
16
+ openai: { api_key: "sk-openai" },
17
+ },
18
+ })
19
+ expect(route.providerID).toBe("openai")
20
+ expect(route.modelID).toBe("gpt-4o")
21
+ })
22
+
23
+ test("routes by default_provider for unknown model", async () => {
24
+ const route = await routeModel("deepseek-chat", {
25
+ api_url: "https://codeblog.ai",
26
+ default_provider: "openai-compatible",
27
+ providers: {
28
+ "openai-compatible": {
29
+ api_key: "sk-compat",
30
+ base_url: "https://api.deepseek.com",
31
+ api: "openai-compatible",
32
+ compat_profile: "openai-compatible",
33
+ },
34
+ },
35
+ })
36
+ expect(route.providerID).toBe("openai-compatible")
37
+ expect(route.modelID).toBe("deepseek-chat")
38
+ })
39
+
40
+ test("unknown model throws deterministic actionable error", async () => {
41
+ await expect(routeModel("unknown-model-x", {
42
+ api_url: "https://codeblog.ai",
43
+ providers: {
44
+ openai: { api_key: "sk-openai" },
45
+ },
46
+ })).rejects.toThrow('Unknown model "unknown-model-x"')
47
+ })
48
+
49
+ test("multi-provider routing is deterministic by prefix", async () => {
50
+ const route = await routeModel("gpt-4o-mini", {
51
+ api_url: "https://codeblog.ai",
52
+ default_provider: "openai-compatible",
53
+ providers: {
54
+ openai: { api_key: "sk-openai" },
55
+ "openai-compatible": { api_key: "sk-compat", base_url: "https://api.deepseek.com" },
56
+ },
57
+ })
58
+ expect(route.providerID).toBe("openai")
59
+ expect(route.modelID).toBe("gpt-4o-mini")
60
+ })
61
+ })
@@ -1,26 +1,56 @@
1
- import { describe, test, expect, beforeEach, afterEach } from "bun:test"
2
- import { AIProvider } from "../provider"
1
+ import fs from "fs/promises"
2
+ import os from "os"
3
+ import path from "path"
4
+ import { describe, test, expect, beforeAll, beforeEach, afterEach, afterAll } from "bun:test"
5
+ import { Config } from "../../config"
3
6
 
4
7
  describe("AIProvider", () => {
5
8
  const originalEnv = { ...process.env }
6
-
7
- beforeEach(() => {
8
- // Clean up env vars before each test
9
- delete process.env.ANTHROPIC_API_KEY
10
- delete process.env.ANTHROPIC_AUTH_TOKEN
11
- delete process.env.OPENAI_API_KEY
12
- delete process.env.GOOGLE_GENERATIVE_AI_API_KEY
13
- delete process.env.GOOGLE_API_KEY
14
- delete process.env.OPENAI_COMPATIBLE_API_KEY
15
- delete process.env.ANTHROPIC_BASE_URL
16
- delete process.env.OPENAI_BASE_URL
17
- delete process.env.OPENAI_API_BASE
18
- delete process.env.GOOGLE_API_BASE_URL
19
- delete process.env.OPENAI_COMPATIBLE_BASE_URL
9
+ const testHome = path.join(os.tmpdir(), `codeblog-provider-test-${process.pid}-${Date.now()}`)
10
+ const configFile = path.join(testHome, ".config", "codeblog", "config.json")
11
+ const xdgData = path.join(testHome, ".local", "share")
12
+ const xdgCache = path.join(testHome, ".cache")
13
+ const xdgConfig = path.join(testHome, ".config")
14
+ const xdgState = path.join(testHome, ".local", "state")
15
+ const envKeys = [
16
+ "ANTHROPIC_API_KEY",
17
+ "ANTHROPIC_AUTH_TOKEN",
18
+ "OPENAI_API_KEY",
19
+ "GOOGLE_GENERATIVE_AI_API_KEY",
20
+ "GOOGLE_API_KEY",
21
+ "OPENAI_COMPATIBLE_API_KEY",
22
+ "ANTHROPIC_BASE_URL",
23
+ "OPENAI_BASE_URL",
24
+ "OPENAI_API_BASE",
25
+ "GOOGLE_API_BASE_URL",
26
+ "OPENAI_COMPATIBLE_BASE_URL",
27
+ ]
28
+ let AIProvider: (typeof import("../provider"))["AIProvider"]
29
+
30
+ beforeAll(async () => {
31
+ process.env.CODEBLOG_TEST_HOME = testHome
32
+ process.env.XDG_DATA_HOME = xdgData
33
+ process.env.XDG_CACHE_HOME = xdgCache
34
+ process.env.XDG_CONFIG_HOME = xdgConfig
35
+ process.env.XDG_STATE_HOME = xdgState
36
+ process.env.CODEBLOG_AI_PROVIDER_REGISTRY_V2 = "0"
37
+ await fs.mkdir(path.dirname(configFile), { recursive: true })
38
+ await fs.writeFile(configFile, "{}\n")
39
+ ;({ AIProvider } = await import("../provider"))
40
+ })
41
+
42
+ beforeEach(async () => {
43
+ process.env.CODEBLOG_TEST_HOME = testHome
44
+ process.env.XDG_DATA_HOME = xdgData
45
+ process.env.XDG_CACHE_HOME = xdgCache
46
+ process.env.XDG_CONFIG_HOME = xdgConfig
47
+ process.env.XDG_STATE_HOME = xdgState
48
+ process.env.CODEBLOG_AI_PROVIDER_REGISTRY_V2 = "0"
49
+ for (const key of envKeys) delete process.env[key]
50
+ await fs.writeFile(configFile, "{}\n")
20
51
  })
21
52
 
22
53
  afterEach(() => {
23
- // Restore original env
24
54
  for (const key of Object.keys(process.env)) {
25
55
  if (!(key in originalEnv)) delete process.env[key]
26
56
  }
@@ -29,6 +59,10 @@ describe("AIProvider", () => {
29
59
  }
30
60
  })
31
61
 
62
+ afterAll(async () => {
63
+ await fs.rm(testHome, { recursive: true, force: true })
64
+ })
65
+
32
66
  // ---------------------------------------------------------------------------
33
67
  // BUILTIN_MODELS
34
68
  // ---------------------------------------------------------------------------
@@ -171,7 +205,13 @@ describe("AIProvider", () => {
171
205
  // ---------------------------------------------------------------------------
172
206
 
173
207
  test("getModel throws when no API key for builtin model", async () => {
174
- expect(AIProvider.getModel("gpt-4o")).rejects.toThrow("No API key for openai")
208
+ const load = Config.load
209
+ Config.load = async () => ({ api_url: "https://codeblog.ai" })
210
+ try {
211
+ await expect(AIProvider.getModel("gpt-4o")).rejects.toThrow("No API key for openai")
212
+ } finally {
213
+ Config.load = load
214
+ }
175
215
  })
176
216
 
177
217
  test("getModel falls back to provider with base_url for unknown model", async () => {
@@ -0,0 +1,152 @@
1
+ import { describe, test, expect, beforeEach, mock } from "bun:test"
2
+
3
+ const mockListTools = mock(async () => ({ tools: [] }))
4
+
5
+ mock.module("../../mcp/client", () => ({
6
+ McpBridge: {
7
+ listTools: mockListTools,
8
+ callToolJSON: mock(async () => ({})),
9
+ },
10
+ }))
11
+
12
+ let streamFactory: () => { fullStream: AsyncGenerator<any, void, unknown> } = () => ({
13
+ fullStream: (async function* () {
14
+ yield { type: "text-delta", textDelta: "hello" }
15
+ })(),
16
+ })
17
+
18
+ mock.module("ai", () => ({
19
+ streamText: () => streamFactory(),
20
+ stepCountIs: (n: number) => ({ type: "step-count", count: n }),
21
+ tool: (config: any) => config,
22
+ jsonSchema: (schema: any) => schema,
23
+ }))
24
+
25
+ mock.module("../provider", () => ({
26
+ AIProvider: {
27
+ getModel: mock(async () => ({ id: "test-model" })),
28
+ resolveModelCompat: mock(async () => ({
29
+ providerID: "openai-compatible",
30
+ modelID: "test-model",
31
+ api: "openai-compatible",
32
+ compatProfile: "openai-compatible",
33
+ cacheKey: "openai-compatible:openai-compatible",
34
+ stripParallelToolCalls: true,
35
+ normalizeToolSchema: true,
36
+ })),
37
+ DEFAULT_MODEL: "test-model",
38
+ },
39
+ }))
40
+
41
+ const { AIChat } = await import("../chat")
42
+
43
+ describe("stream events", () => {
44
+ beforeEach(() => {
45
+ mockListTools.mockClear()
46
+ })
47
+
48
+ test("emits ordered run-start -> deltas -> run-finish sequence", async () => {
49
+ streamFactory = () => ({
50
+ fullStream: (async function* () {
51
+ yield { type: "text-delta", textDelta: "Hello " }
52
+ yield { type: "text-delta", textDelta: "World" }
53
+ })(),
54
+ })
55
+
56
+ const events: any[] = []
57
+ for await (const event of AIChat.streamEvents([{ role: "user", content: "hi" }])) {
58
+ events.push(event)
59
+ }
60
+
61
+ expect(events.map((e: any) => e.type)).toEqual(["run-start", "text-delta", "text-delta", "run-finish"])
62
+ expect(events.every((e: any) => e.runId === events[0]!.runId)).toBe(true)
63
+ expect(events.map((e: any) => e.seq)).toEqual([1, 2, 3, 4])
64
+ })
65
+
66
+ test("tool-start and tool-result are paired", async () => {
67
+ streamFactory = () => ({
68
+ fullStream: (async function* () {
69
+ yield { type: "tool-call", toolName: "scan_sessions", args: { limit: 5 } }
70
+ yield { type: "tool-result", toolName: "scan_sessions", result: { sessions: [] } }
71
+ yield { type: "text-delta", textDelta: "done" }
72
+ })(),
73
+ })
74
+
75
+ const starts: string[] = []
76
+ const results: string[] = []
77
+ const ids: Array<{ start?: string; result?: string }> = []
78
+ for await (const event of AIChat.streamEvents([{ role: "user", content: "scan" }])) {
79
+ if (event.type === "tool-start") {
80
+ starts.push(event.name)
81
+ ids.push({ start: event.callID })
82
+ }
83
+ if (event.type === "tool-result") {
84
+ results.push(event.name)
85
+ ids[0] = { ...ids[0], result: event.callID }
86
+ }
87
+ }
88
+
89
+ expect(starts).toEqual(["scan_sessions"])
90
+ expect(results).toEqual(["scan_sessions"])
91
+ expect(ids[0]?.start).toBe(ids[0]?.result)
92
+ })
93
+
94
+ test("abort keeps lifecycle consistent and marks run-finish.aborted", async () => {
95
+ streamFactory = () => ({
96
+ fullStream: (async function* () {
97
+ yield { type: "text-delta", textDelta: "partial" }
98
+ await Bun.sleep(40)
99
+ yield { type: "text-delta", textDelta: " late" }
100
+ })(),
101
+ })
102
+
103
+ const ctrl = new AbortController()
104
+ let seenFinish = false
105
+ for await (const event of AIChat.streamEvents([{ role: "user", content: "stop test" }], undefined, ctrl.signal)) {
106
+ if (event.type === "text-delta") ctrl.abort()
107
+ if (event.type === "run-finish") {
108
+ seenFinish = true
109
+ expect(event.aborted).toBe(true)
110
+ }
111
+ }
112
+
113
+ expect(seenFinish).toBe(true)
114
+ })
115
+
116
+ test("error part is surfaced and lifecycle still finishes", async () => {
117
+ streamFactory = () => ({
118
+ fullStream: (async function* () {
119
+ yield { type: "error", error: new Error("boom") }
120
+ })(),
121
+ })
122
+
123
+ const types: string[] = []
124
+ for await (const event of AIChat.streamEvents([{ role: "user", content: "error path" }])) {
125
+ types.push(event.type)
126
+ if (event.type === "error") expect(event.error.message).toBe("boom")
127
+ }
128
+
129
+ expect(types).toContain("error")
130
+ expect(types[types.length - 1]).toBe("run-finish")
131
+ })
132
+
133
+ test("tool timeout emits error and still reaches run-finish", async () => {
134
+ streamFactory = () => ({
135
+ fullStream: (async function* () {
136
+ yield { type: "tool-call", toolName: "scan_sessions", args: { limit: 1 } }
137
+ await Bun.sleep(40)
138
+ })(),
139
+ })
140
+
141
+ const seen: string[] = []
142
+ for await (const event of AIChat.streamEvents([{ role: "user", content: "scan" }], undefined, undefined, {
143
+ toolTimeoutMs: 15,
144
+ idleTimeoutMs: 1000,
145
+ })) {
146
+ seen.push(event.type)
147
+ }
148
+
149
+ expect(seen).toContain("error")
150
+ expect(seen[seen.length - 1]).toBe("run-finish")
151
+ })
152
+ })