codeblog-app 0.2.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +7 -1
- package/src/ai/chat.ts +92 -0
- package/src/ai/provider.ts +117 -0
- package/src/cli/cmd/ai-publish.ts +95 -0
- package/src/cli/cmd/chat.ts +175 -0
- package/src/cli/cmd/config.ts +96 -0
- package/src/index.ts +8 -1
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"$schema": "https://json.schemastore.org/package.json",
|
|
3
3
|
"name": "codeblog-app",
|
|
4
|
-
"version": "0.
|
|
4
|
+
"version": "0.3.0",
|
|
5
5
|
"description": "CLI client for CodeBlog — the forum where AI writes the posts",
|
|
6
6
|
"type": "module",
|
|
7
7
|
"license": "MIT",
|
|
@@ -56,9 +56,15 @@
|
|
|
56
56
|
"typescript": "5.8.2"
|
|
57
57
|
},
|
|
58
58
|
"dependencies": {
|
|
59
|
+
"@ai-sdk/anthropic": "^3.0.44",
|
|
60
|
+
"@ai-sdk/google": "^3.0.29",
|
|
61
|
+
"@ai-sdk/openai": "^3.0.29",
|
|
62
|
+
"ai": "^6.0.86",
|
|
59
63
|
"drizzle-orm": "1.0.0-beta.12-a5629fb",
|
|
60
64
|
"hono": "4.10.7",
|
|
65
|
+
"ink": "^6.7.0",
|
|
61
66
|
"open": "10.1.2",
|
|
67
|
+
"react": "^19.2.4",
|
|
62
68
|
"xdg-basedir": "5.1.0",
|
|
63
69
|
"yargs": "18.0.0",
|
|
64
70
|
"zod": "4.1.8"
|
package/src/ai/chat.ts
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
import { streamText, type CoreMessage } from "ai"
|
|
2
|
+
import { AIProvider } from "./provider"
|
|
3
|
+
import { Log } from "../util/log"
|
|
4
|
+
|
|
5
|
+
const log = Log.create({ service: "ai-chat" })
|
|
6
|
+
|
|
7
|
+
export namespace AIChat {
|
|
8
|
+
export interface Message {
|
|
9
|
+
role: "user" | "assistant" | "system"
|
|
10
|
+
content: string
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export interface StreamCallbacks {
|
|
14
|
+
onToken?: (token: string) => void
|
|
15
|
+
onFinish?: (text: string) => void
|
|
16
|
+
onError?: (error: Error) => void
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
const SYSTEM_PROMPT = `You are CodeBlog AI — an assistant for the CodeBlog developer forum (codeblog.ai).
|
|
20
|
+
|
|
21
|
+
You help developers:
|
|
22
|
+
- Write engaging blog posts from their coding sessions
|
|
23
|
+
- Analyze code and explain technical concepts
|
|
24
|
+
- Draft comments and debate arguments
|
|
25
|
+
- Summarize posts and discussions
|
|
26
|
+
- Generate tags and titles for posts
|
|
27
|
+
|
|
28
|
+
Write casually like a dev talking to another dev. Be specific, opinionated, and genuine.
|
|
29
|
+
Use code examples when relevant. Think Juejin / HN / Linux.do vibes — not a conference paper.`
|
|
30
|
+
|
|
31
|
+
export async function stream(messages: Message[], callbacks: StreamCallbacks, modelID?: string) {
|
|
32
|
+
const model = await AIProvider.getModel(modelID)
|
|
33
|
+
|
|
34
|
+
log.info("streaming", { model: modelID || AIProvider.DEFAULT_MODEL, messages: messages.length })
|
|
35
|
+
|
|
36
|
+
const coreMessages: CoreMessage[] = messages.map((m) => ({
|
|
37
|
+
role: m.role,
|
|
38
|
+
content: m.content,
|
|
39
|
+
}))
|
|
40
|
+
|
|
41
|
+
const result = streamText({
|
|
42
|
+
model,
|
|
43
|
+
system: SYSTEM_PROMPT,
|
|
44
|
+
messages: coreMessages,
|
|
45
|
+
})
|
|
46
|
+
|
|
47
|
+
let full = ""
|
|
48
|
+
for await (const chunk of result.textStream) {
|
|
49
|
+
full += chunk
|
|
50
|
+
callbacks.onToken?.(chunk)
|
|
51
|
+
}
|
|
52
|
+
callbacks.onFinish?.(full)
|
|
53
|
+
return full
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
export async function generate(prompt: string, modelID?: string): Promise<string> {
|
|
57
|
+
let result = ""
|
|
58
|
+
await stream([{ role: "user", content: prompt }], { onFinish: (text) => (result = text) }, modelID)
|
|
59
|
+
return result
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
export async function analyzeAndPost(sessionContent: string, modelID?: string): Promise<{ title: string; content: string; tags: string[]; summary: string }> {
|
|
63
|
+
const prompt = `Analyze this coding session and write a blog post about it.
|
|
64
|
+
|
|
65
|
+
The post should:
|
|
66
|
+
- Have a catchy, dev-friendly title (like HN or Juejin)
|
|
67
|
+
- Tell a story: what you were doing, what went wrong/right, what you learned
|
|
68
|
+
- Include relevant code snippets
|
|
69
|
+
- Be casual and genuine, written in first person
|
|
70
|
+
- End with key takeaways
|
|
71
|
+
|
|
72
|
+
Also provide:
|
|
73
|
+
- 3-8 relevant tags (lowercase, hyphenated)
|
|
74
|
+
- A one-line summary/hook
|
|
75
|
+
|
|
76
|
+
Session content:
|
|
77
|
+
${sessionContent.slice(0, 50000)}
|
|
78
|
+
|
|
79
|
+
Respond in this exact JSON format:
|
|
80
|
+
{
|
|
81
|
+
"title": "...",
|
|
82
|
+
"content": "... (markdown)",
|
|
83
|
+
"tags": ["tag1", "tag2"],
|
|
84
|
+
"summary": "..."
|
|
85
|
+
}`
|
|
86
|
+
|
|
87
|
+
const raw = await generate(prompt, modelID)
|
|
88
|
+
const jsonMatch = raw.match(/\{[\s\S]*\}/)
|
|
89
|
+
if (!jsonMatch) throw new Error("AI did not return valid JSON")
|
|
90
|
+
return JSON.parse(jsonMatch[0])
|
|
91
|
+
}
|
|
92
|
+
}
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
import { createAnthropic } from "@ai-sdk/anthropic"
|
|
2
|
+
import { createOpenAI } from "@ai-sdk/openai"
|
|
3
|
+
import { createGoogleGenerativeAI } from "@ai-sdk/google"
|
|
4
|
+
import { type LanguageModel } from "ai"
|
|
5
|
+
import { Config } from "../config"
|
|
6
|
+
import { Log } from "../util/log"
|
|
7
|
+
|
|
8
|
+
const log = Log.create({ service: "ai-provider" })
|
|
9
|
+
|
|
10
|
+
export namespace AIProvider {
|
|
11
|
+
export type ProviderID = "anthropic" | "openai" | "google"
|
|
12
|
+
|
|
13
|
+
export interface ModelInfo {
|
|
14
|
+
id: string
|
|
15
|
+
providerID: ProviderID
|
|
16
|
+
name: string
|
|
17
|
+
contextWindow: number
|
|
18
|
+
outputTokens: number
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export const MODELS: Record<string, ModelInfo> = {
|
|
22
|
+
"claude-sonnet-4-20250514": {
|
|
23
|
+
id: "claude-sonnet-4-20250514",
|
|
24
|
+
providerID: "anthropic",
|
|
25
|
+
name: "Claude Sonnet 4",
|
|
26
|
+
contextWindow: 200000,
|
|
27
|
+
outputTokens: 16384,
|
|
28
|
+
},
|
|
29
|
+
"claude-3-5-haiku-20241022": {
|
|
30
|
+
id: "claude-3-5-haiku-20241022",
|
|
31
|
+
providerID: "anthropic",
|
|
32
|
+
name: "Claude 3.5 Haiku",
|
|
33
|
+
contextWindow: 200000,
|
|
34
|
+
outputTokens: 8192,
|
|
35
|
+
},
|
|
36
|
+
"gpt-4o": {
|
|
37
|
+
id: "gpt-4o",
|
|
38
|
+
providerID: "openai",
|
|
39
|
+
name: "GPT-4o",
|
|
40
|
+
contextWindow: 128000,
|
|
41
|
+
outputTokens: 16384,
|
|
42
|
+
},
|
|
43
|
+
"gpt-4o-mini": {
|
|
44
|
+
id: "gpt-4o-mini",
|
|
45
|
+
providerID: "openai",
|
|
46
|
+
name: "GPT-4o Mini",
|
|
47
|
+
contextWindow: 128000,
|
|
48
|
+
outputTokens: 16384,
|
|
49
|
+
},
|
|
50
|
+
"gemini-2.5-flash": {
|
|
51
|
+
id: "gemini-2.5-flash",
|
|
52
|
+
providerID: "google",
|
|
53
|
+
name: "Gemini 2.5 Flash",
|
|
54
|
+
contextWindow: 1048576,
|
|
55
|
+
outputTokens: 65536,
|
|
56
|
+
},
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
export const DEFAULT_MODEL = "claude-sonnet-4-20250514"
|
|
60
|
+
|
|
61
|
+
export async function getApiKey(providerID: ProviderID): Promise<string | undefined> {
|
|
62
|
+
const env: Record<ProviderID, string> = {
|
|
63
|
+
anthropic: "ANTHROPIC_API_KEY",
|
|
64
|
+
openai: "OPENAI_API_KEY",
|
|
65
|
+
google: "GOOGLE_GENERATIVE_AI_API_KEY",
|
|
66
|
+
}
|
|
67
|
+
const envKey = process.env[env[providerID]]
|
|
68
|
+
if (envKey) return envKey
|
|
69
|
+
|
|
70
|
+
const cfg = await Config.load()
|
|
71
|
+
const providers = (cfg as Record<string, unknown>).providers as Record<string, { api_key?: string }> | undefined
|
|
72
|
+
return providers?.[providerID]?.api_key
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
export async function getModel(modelID?: string): Promise<LanguageModel> {
|
|
76
|
+
const id = modelID || (await getConfiguredModel()) || DEFAULT_MODEL
|
|
77
|
+
const info = MODELS[id]
|
|
78
|
+
if (!info) throw new Error(`Unknown model: ${id}. Available: ${Object.keys(MODELS).join(", ")}`)
|
|
79
|
+
|
|
80
|
+
const apiKey = await getApiKey(info.providerID)
|
|
81
|
+
if (!apiKey) {
|
|
82
|
+
throw new Error(
|
|
83
|
+
`No API key for ${info.providerID}. Set ${info.providerID === "anthropic" ? "ANTHROPIC_API_KEY" : info.providerID === "openai" ? "OPENAI_API_KEY" : "GOOGLE_GENERATIVE_AI_API_KEY"} or run: codeblog config --provider ${info.providerID} --api-key <key>`,
|
|
84
|
+
)
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
log.info("loading model", { model: id, provider: info.providerID })
|
|
88
|
+
|
|
89
|
+
if (info.providerID === "anthropic") {
|
|
90
|
+
const provider = createAnthropic({ apiKey })
|
|
91
|
+
return provider(id)
|
|
92
|
+
}
|
|
93
|
+
if (info.providerID === "openai") {
|
|
94
|
+
const provider = createOpenAI({ apiKey })
|
|
95
|
+
return provider(id)
|
|
96
|
+
}
|
|
97
|
+
if (info.providerID === "google") {
|
|
98
|
+
const provider = createGoogleGenerativeAI({ apiKey })
|
|
99
|
+
return provider(id)
|
|
100
|
+
}
|
|
101
|
+
throw new Error(`Unsupported provider: ${info.providerID}`)
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
async function getConfiguredModel(): Promise<string | undefined> {
|
|
105
|
+
const cfg = await Config.load()
|
|
106
|
+
return (cfg as Record<string, unknown>).model as string | undefined
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
export async function available(): Promise<Array<{ model: ModelInfo; hasKey: boolean }>> {
|
|
110
|
+
const result: Array<{ model: ModelInfo; hasKey: boolean }> = []
|
|
111
|
+
for (const model of Object.values(MODELS)) {
|
|
112
|
+
const key = await getApiKey(model.providerID)
|
|
113
|
+
result.push({ model, hasKey: !!key })
|
|
114
|
+
}
|
|
115
|
+
return result
|
|
116
|
+
}
|
|
117
|
+
}
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
import type { CommandModule } from "yargs"
|
|
2
|
+
import { AIChat } from "../../ai/chat"
|
|
3
|
+
import { Posts } from "../../api/posts"
|
|
4
|
+
import { scanAll, parseSession, registerAllScanners } from "../../scanner"
|
|
5
|
+
import { UI } from "../ui"
|
|
6
|
+
|
|
7
|
+
export const AIPublishCommand: CommandModule = {
|
|
8
|
+
command: "ai-publish",
|
|
9
|
+
aliases: ["ap"],
|
|
10
|
+
describe: "AI-powered publish — scan sessions, let AI write the post",
|
|
11
|
+
builder: (yargs) =>
|
|
12
|
+
yargs
|
|
13
|
+
.option("model", {
|
|
14
|
+
alias: "m",
|
|
15
|
+
describe: "AI model to use",
|
|
16
|
+
type: "string",
|
|
17
|
+
})
|
|
18
|
+
.option("dry-run", {
|
|
19
|
+
describe: "Preview without publishing",
|
|
20
|
+
type: "boolean",
|
|
21
|
+
default: false,
|
|
22
|
+
})
|
|
23
|
+
.option("limit", {
|
|
24
|
+
describe: "Max sessions to scan",
|
|
25
|
+
type: "number",
|
|
26
|
+
default: 10,
|
|
27
|
+
}),
|
|
28
|
+
handler: async (args) => {
|
|
29
|
+
try {
|
|
30
|
+
UI.info("Scanning IDE sessions...")
|
|
31
|
+
registerAllScanners()
|
|
32
|
+
const sessions = scanAll(args.limit as number)
|
|
33
|
+
|
|
34
|
+
if (sessions.length === 0) {
|
|
35
|
+
UI.warn("No IDE sessions found.")
|
|
36
|
+
return
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
console.log(` Found ${UI.Style.TEXT_HIGHLIGHT}${sessions.length}${UI.Style.TEXT_NORMAL} sessions`)
|
|
40
|
+
console.log("")
|
|
41
|
+
|
|
42
|
+
// Pick the best session
|
|
43
|
+
const best = sessions[0]
|
|
44
|
+
console.log(` ${UI.Style.TEXT_NORMAL_BOLD}Selected:${UI.Style.TEXT_NORMAL} ${best.title}`)
|
|
45
|
+
console.log(` ${UI.Style.TEXT_DIM}${best.source} · ${best.project}${UI.Style.TEXT_NORMAL}`)
|
|
46
|
+
console.log("")
|
|
47
|
+
|
|
48
|
+
// Parse session content
|
|
49
|
+
const parsed = parseSession(best.filePath, best.source, 50)
|
|
50
|
+
if (!parsed || parsed.turns.length < 2) {
|
|
51
|
+
UI.warn("Session too short to generate a post.")
|
|
52
|
+
return
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
const content = parsed.turns
|
|
56
|
+
.map((t) => `[${t.role}]: ${t.content.slice(0, 2000)}`)
|
|
57
|
+
.join("\n\n")
|
|
58
|
+
|
|
59
|
+
UI.info("AI is writing your post...")
|
|
60
|
+
console.log("")
|
|
61
|
+
|
|
62
|
+
process.stdout.write(` ${UI.Style.TEXT_DIM}`)
|
|
63
|
+
const result = await AIChat.analyzeAndPost(content, args.model as string | undefined)
|
|
64
|
+
process.stdout.write(UI.Style.TEXT_NORMAL)
|
|
65
|
+
|
|
66
|
+
console.log(` ${UI.Style.TEXT_NORMAL_BOLD}Title:${UI.Style.TEXT_NORMAL} ${result.title}`)
|
|
67
|
+
console.log(` ${UI.Style.TEXT_DIM}Tags: ${result.tags.join(", ")}${UI.Style.TEXT_NORMAL}`)
|
|
68
|
+
console.log(` ${UI.Style.TEXT_DIM}Summary: ${result.summary}${UI.Style.TEXT_NORMAL}`)
|
|
69
|
+
console.log("")
|
|
70
|
+
|
|
71
|
+
if (args.dryRun) {
|
|
72
|
+
console.log(` ${UI.Style.TEXT_WARNING}[DRY RUN]${UI.Style.TEXT_NORMAL} Preview:`)
|
|
73
|
+
console.log("")
|
|
74
|
+
console.log(result.content.slice(0, 1000))
|
|
75
|
+
if (result.content.length > 1000) console.log(` ${UI.Style.TEXT_DIM}... (${result.content.length} chars total)${UI.Style.TEXT_NORMAL}`)
|
|
76
|
+
console.log("")
|
|
77
|
+
return
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
UI.info("Publishing to CodeBlog...")
|
|
81
|
+
const post = await Posts.create({
|
|
82
|
+
title: result.title,
|
|
83
|
+
content: result.content,
|
|
84
|
+
tags: result.tags,
|
|
85
|
+
summary: result.summary,
|
|
86
|
+
source_session: best.filePath,
|
|
87
|
+
})
|
|
88
|
+
|
|
89
|
+
UI.success(`Published! Post ID: ${post.post.id}`)
|
|
90
|
+
} catch (err) {
|
|
91
|
+
UI.error(`AI publish failed: ${err instanceof Error ? err.message : String(err)}`)
|
|
92
|
+
process.exitCode = 1
|
|
93
|
+
}
|
|
94
|
+
},
|
|
95
|
+
}
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
import type { CommandModule } from "yargs"
|
|
2
|
+
import { AIChat } from "../../ai/chat"
|
|
3
|
+
import { AIProvider } from "../../ai/provider"
|
|
4
|
+
import { UI } from "../ui"
|
|
5
|
+
import readline from "readline"
|
|
6
|
+
|
|
7
|
+
export const ChatCommand: CommandModule = {
|
|
8
|
+
command: "chat",
|
|
9
|
+
aliases: ["c"],
|
|
10
|
+
describe: "Interactive AI chat — write posts, analyze code, browse the forum",
|
|
11
|
+
builder: (yargs) =>
|
|
12
|
+
yargs
|
|
13
|
+
.option("model", {
|
|
14
|
+
alias: "m",
|
|
15
|
+
describe: "Model to use (e.g. claude-sonnet-4-20250514, gpt-4o)",
|
|
16
|
+
type: "string",
|
|
17
|
+
})
|
|
18
|
+
.option("prompt", {
|
|
19
|
+
alias: "p",
|
|
20
|
+
describe: "Single prompt (non-interactive mode)",
|
|
21
|
+
type: "string",
|
|
22
|
+
}),
|
|
23
|
+
handler: async (args) => {
|
|
24
|
+
const modelID = args.model as string | undefined
|
|
25
|
+
|
|
26
|
+
// Non-interactive: single prompt
|
|
27
|
+
if (args.prompt) {
|
|
28
|
+
try {
|
|
29
|
+
await AIChat.stream(
|
|
30
|
+
[{ role: "user", content: args.prompt as string }],
|
|
31
|
+
{
|
|
32
|
+
onToken: (token) => process.stdout.write(token),
|
|
33
|
+
onFinish: () => process.stdout.write("\n"),
|
|
34
|
+
onError: (err) => UI.error(err.message),
|
|
35
|
+
},
|
|
36
|
+
modelID,
|
|
37
|
+
)
|
|
38
|
+
} catch (err) {
|
|
39
|
+
UI.error(err instanceof Error ? err.message : String(err))
|
|
40
|
+
process.exitCode = 1
|
|
41
|
+
}
|
|
42
|
+
return
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
// Interactive REPL
|
|
46
|
+
const modelInfo = AIProvider.MODELS[modelID || AIProvider.DEFAULT_MODEL]
|
|
47
|
+
const modelName = modelInfo?.name || modelID || AIProvider.DEFAULT_MODEL
|
|
48
|
+
|
|
49
|
+
console.log("")
|
|
50
|
+
console.log(` ${UI.Style.TEXT_HIGHLIGHT_BOLD}CodeBlog AI Chat${UI.Style.TEXT_NORMAL}`)
|
|
51
|
+
console.log(` ${UI.Style.TEXT_DIM}Model: ${modelName}${UI.Style.TEXT_NORMAL}`)
|
|
52
|
+
console.log(` ${UI.Style.TEXT_DIM}Type your message. Commands: /help /model /clear /exit${UI.Style.TEXT_NORMAL}`)
|
|
53
|
+
console.log("")
|
|
54
|
+
|
|
55
|
+
const messages: AIChat.Message[] = []
|
|
56
|
+
const rl = readline.createInterface({
|
|
57
|
+
input: process.stdin,
|
|
58
|
+
output: process.stdout,
|
|
59
|
+
prompt: `${UI.Style.TEXT_HIGHLIGHT}❯ ${UI.Style.TEXT_NORMAL}`,
|
|
60
|
+
})
|
|
61
|
+
|
|
62
|
+
let currentModel = modelID
|
|
63
|
+
|
|
64
|
+
rl.prompt()
|
|
65
|
+
|
|
66
|
+
rl.on("line", async (line) => {
|
|
67
|
+
const input = line.trim()
|
|
68
|
+
if (!input) {
|
|
69
|
+
rl.prompt()
|
|
70
|
+
return
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// Handle commands
|
|
74
|
+
if (input.startsWith("/")) {
|
|
75
|
+
const cmd = input.split(" ")[0]
|
|
76
|
+
const rest = input.slice(cmd.length).trim()
|
|
77
|
+
|
|
78
|
+
if (cmd === "/exit" || cmd === "/quit" || cmd === "/q") {
|
|
79
|
+
console.log("")
|
|
80
|
+
UI.info("Bye!")
|
|
81
|
+
rl.close()
|
|
82
|
+
return
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
if (cmd === "/clear") {
|
|
86
|
+
messages.length = 0
|
|
87
|
+
console.log(` ${UI.Style.TEXT_DIM}Chat history cleared${UI.Style.TEXT_NORMAL}`)
|
|
88
|
+
rl.prompt()
|
|
89
|
+
return
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
if (cmd === "/model") {
|
|
93
|
+
if (rest) {
|
|
94
|
+
if (AIProvider.MODELS[rest]) {
|
|
95
|
+
currentModel = rest
|
|
96
|
+
console.log(` ${UI.Style.TEXT_SUCCESS}Model: ${AIProvider.MODELS[rest].name}${UI.Style.TEXT_NORMAL}`)
|
|
97
|
+
} else {
|
|
98
|
+
console.log(` ${UI.Style.TEXT_DANGER}Unknown model: ${rest}${UI.Style.TEXT_NORMAL}`)
|
|
99
|
+
console.log(` ${UI.Style.TEXT_DIM}Available: ${Object.keys(AIProvider.MODELS).join(", ")}${UI.Style.TEXT_NORMAL}`)
|
|
100
|
+
}
|
|
101
|
+
} else {
|
|
102
|
+
const current = AIProvider.MODELS[currentModel || AIProvider.DEFAULT_MODEL]
|
|
103
|
+
console.log(` ${UI.Style.TEXT_DIM}Current: ${current?.name || currentModel || AIProvider.DEFAULT_MODEL}${UI.Style.TEXT_NORMAL}`)
|
|
104
|
+
console.log(` ${UI.Style.TEXT_DIM}Available: ${Object.keys(AIProvider.MODELS).join(", ")}${UI.Style.TEXT_NORMAL}`)
|
|
105
|
+
}
|
|
106
|
+
rl.prompt()
|
|
107
|
+
return
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
if (cmd === "/help") {
|
|
111
|
+
console.log("")
|
|
112
|
+
console.log(` ${UI.Style.TEXT_NORMAL_BOLD}Commands${UI.Style.TEXT_NORMAL}`)
|
|
113
|
+
console.log(` ${UI.Style.TEXT_DIM}/model [id]${UI.Style.TEXT_NORMAL} Switch or show model`)
|
|
114
|
+
console.log(` ${UI.Style.TEXT_DIM}/clear${UI.Style.TEXT_NORMAL} Clear chat history`)
|
|
115
|
+
console.log(` ${UI.Style.TEXT_DIM}/exit${UI.Style.TEXT_NORMAL} Exit chat`)
|
|
116
|
+
console.log("")
|
|
117
|
+
console.log(` ${UI.Style.TEXT_NORMAL_BOLD}Tips${UI.Style.TEXT_NORMAL}`)
|
|
118
|
+
console.log(` ${UI.Style.TEXT_DIM}Ask me to write a blog post, analyze code, draft comments,${UI.Style.TEXT_NORMAL}`)
|
|
119
|
+
console.log(` ${UI.Style.TEXT_DIM}summarize discussions, or generate tags and titles.${UI.Style.TEXT_NORMAL}`)
|
|
120
|
+
console.log("")
|
|
121
|
+
rl.prompt()
|
|
122
|
+
return
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
console.log(` ${UI.Style.TEXT_DIM}Unknown command: ${cmd}. Type /help${UI.Style.TEXT_NORMAL}`)
|
|
126
|
+
rl.prompt()
|
|
127
|
+
return
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// Send message to AI
|
|
131
|
+
messages.push({ role: "user", content: input })
|
|
132
|
+
|
|
133
|
+
console.log("")
|
|
134
|
+
process.stdout.write(` ${UI.Style.TEXT_INFO}`)
|
|
135
|
+
|
|
136
|
+
try {
|
|
137
|
+
let response = ""
|
|
138
|
+
await AIChat.stream(
|
|
139
|
+
messages,
|
|
140
|
+
{
|
|
141
|
+
onToken: (token) => {
|
|
142
|
+
process.stdout.write(token)
|
|
143
|
+
response += token
|
|
144
|
+
},
|
|
145
|
+
onFinish: () => {
|
|
146
|
+
process.stdout.write(UI.Style.TEXT_NORMAL)
|
|
147
|
+
console.log("")
|
|
148
|
+
console.log("")
|
|
149
|
+
},
|
|
150
|
+
onError: (err) => {
|
|
151
|
+
process.stdout.write(UI.Style.TEXT_NORMAL)
|
|
152
|
+
console.log("")
|
|
153
|
+
UI.error(err.message)
|
|
154
|
+
},
|
|
155
|
+
},
|
|
156
|
+
currentModel,
|
|
157
|
+
)
|
|
158
|
+
messages.push({ role: "assistant", content: response })
|
|
159
|
+
} catch (err) {
|
|
160
|
+
process.stdout.write(UI.Style.TEXT_NORMAL)
|
|
161
|
+
console.log("")
|
|
162
|
+
UI.error(err instanceof Error ? err.message : String(err))
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
rl.prompt()
|
|
166
|
+
})
|
|
167
|
+
|
|
168
|
+
rl.on("close", () => {
|
|
169
|
+
process.exit(0)
|
|
170
|
+
})
|
|
171
|
+
|
|
172
|
+
// Keep process alive
|
|
173
|
+
await new Promise(() => {})
|
|
174
|
+
},
|
|
175
|
+
}
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
import type { CommandModule } from "yargs"
|
|
2
|
+
import { Config } from "../../config"
|
|
3
|
+
import { AIProvider } from "../../ai/provider"
|
|
4
|
+
import { UI } from "../ui"
|
|
5
|
+
|
|
6
|
+
export const ConfigCommand: CommandModule = {
|
|
7
|
+
command: "config",
|
|
8
|
+
describe: "Configure AI provider and model settings",
|
|
9
|
+
builder: (yargs) =>
|
|
10
|
+
yargs
|
|
11
|
+
.option("provider", {
|
|
12
|
+
describe: "AI provider: anthropic, openai, google",
|
|
13
|
+
type: "string",
|
|
14
|
+
})
|
|
15
|
+
.option("api-key", {
|
|
16
|
+
describe: "API key for the provider",
|
|
17
|
+
type: "string",
|
|
18
|
+
})
|
|
19
|
+
.option("model", {
|
|
20
|
+
describe: "Default model ID",
|
|
21
|
+
type: "string",
|
|
22
|
+
})
|
|
23
|
+
.option("list", {
|
|
24
|
+
describe: "List available models and their status",
|
|
25
|
+
type: "boolean",
|
|
26
|
+
default: false,
|
|
27
|
+
}),
|
|
28
|
+
handler: async (args) => {
|
|
29
|
+
try {
|
|
30
|
+
if (args.list) {
|
|
31
|
+
const models = await AIProvider.available()
|
|
32
|
+
console.log("")
|
|
33
|
+
console.log(` ${UI.Style.TEXT_NORMAL_BOLD}Available Models${UI.Style.TEXT_NORMAL}`)
|
|
34
|
+
console.log("")
|
|
35
|
+
for (const { model, hasKey } of models) {
|
|
36
|
+
const status = hasKey ? `${UI.Style.TEXT_SUCCESS}✓${UI.Style.TEXT_NORMAL}` : `${UI.Style.TEXT_DIM}✗${UI.Style.TEXT_NORMAL}`
|
|
37
|
+
console.log(` ${status} ${UI.Style.TEXT_NORMAL_BOLD}${model.name}${UI.Style.TEXT_NORMAL} ${UI.Style.TEXT_DIM}(${model.id})${UI.Style.TEXT_NORMAL}`)
|
|
38
|
+
console.log(` ${UI.Style.TEXT_DIM}${model.providerID} · ${(model.contextWindow / 1000).toFixed(0)}k context${UI.Style.TEXT_NORMAL}`)
|
|
39
|
+
}
|
|
40
|
+
console.log("")
|
|
41
|
+
console.log(` ${UI.Style.TEXT_DIM}✓ = API key configured, ✗ = needs key${UI.Style.TEXT_NORMAL}`)
|
|
42
|
+
console.log(` ${UI.Style.TEXT_DIM}Set key: codeblog config --provider anthropic --api-key sk-...${UI.Style.TEXT_NORMAL}`)
|
|
43
|
+
console.log("")
|
|
44
|
+
return
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
if (args.provider && args.apiKey) {
|
|
48
|
+
const provider = args.provider as string
|
|
49
|
+
if (!["anthropic", "openai", "google"].includes(provider)) {
|
|
50
|
+
UI.error("Provider must be: anthropic, openai, or google")
|
|
51
|
+
process.exitCode = 1
|
|
52
|
+
return
|
|
53
|
+
}
|
|
54
|
+
const cfg = await Config.load() as Record<string, unknown>
|
|
55
|
+
const providers = (cfg.providers || {}) as Record<string, Record<string, string>>
|
|
56
|
+
providers[provider] = { ...providers[provider], api_key: args.apiKey as string }
|
|
57
|
+
await Config.save({ ...cfg, providers } as unknown as Config.CodeblogConfig)
|
|
58
|
+
UI.success(`${provider} API key saved`)
|
|
59
|
+
return
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
if (args.model) {
|
|
63
|
+
const model = args.model as string
|
|
64
|
+
if (!AIProvider.MODELS[model]) {
|
|
65
|
+
UI.error(`Unknown model: ${model}. Run: codeblog config --list`)
|
|
66
|
+
process.exitCode = 1
|
|
67
|
+
return
|
|
68
|
+
}
|
|
69
|
+
const cfg = await Config.load() as Record<string, unknown>
|
|
70
|
+
await Config.save({ ...cfg, model } as unknown as Config.CodeblogConfig)
|
|
71
|
+
UI.success(`Default model set to ${model}`)
|
|
72
|
+
return
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// Show current config
|
|
76
|
+
const cfg = await Config.load() as Record<string, unknown>
|
|
77
|
+
const model = (cfg.model as string) || AIProvider.DEFAULT_MODEL
|
|
78
|
+
const providers = (cfg.providers || {}) as Record<string, Record<string, string>>
|
|
79
|
+
|
|
80
|
+
console.log("")
|
|
81
|
+
console.log(` ${UI.Style.TEXT_NORMAL_BOLD}Current Config${UI.Style.TEXT_NORMAL}`)
|
|
82
|
+
console.log("")
|
|
83
|
+
console.log(` Model: ${UI.Style.TEXT_HIGHLIGHT}${model}${UI.Style.TEXT_NORMAL}`)
|
|
84
|
+
console.log(` API URL: ${cfg.api_url || "https://codeblog.ai"}`)
|
|
85
|
+
console.log("")
|
|
86
|
+
for (const [id, p] of Object.entries(providers)) {
|
|
87
|
+
const masked = p.api_key ? p.api_key.slice(0, 8) + "..." : "not set"
|
|
88
|
+
console.log(` ${id}: ${UI.Style.TEXT_DIM}${masked}${UI.Style.TEXT_NORMAL}`)
|
|
89
|
+
}
|
|
90
|
+
console.log("")
|
|
91
|
+
} catch (err) {
|
|
92
|
+
UI.error(`Config failed: ${err instanceof Error ? err.message : String(err)}`)
|
|
93
|
+
process.exitCode = 1
|
|
94
|
+
}
|
|
95
|
+
},
|
|
96
|
+
}
|
package/src/index.ts
CHANGED
|
@@ -27,8 +27,11 @@ import { FollowCommand } from "./cli/cmd/follow"
|
|
|
27
27
|
import { MyPostsCommand } from "./cli/cmd/myposts"
|
|
28
28
|
import { EditCommand } from "./cli/cmd/edit"
|
|
29
29
|
import { DeleteCommand } from "./cli/cmd/delete"
|
|
30
|
+
import { ChatCommand } from "./cli/cmd/chat"
|
|
31
|
+
import { ConfigCommand } from "./cli/cmd/config"
|
|
32
|
+
import { AIPublishCommand } from "./cli/cmd/ai-publish"
|
|
30
33
|
|
|
31
|
-
const VERSION = "0.
|
|
34
|
+
const VERSION = "0.3.0"
|
|
32
35
|
|
|
33
36
|
process.on("unhandledRejection", (e) => {
|
|
34
37
|
Log.Default.error("rejection", {
|
|
@@ -93,6 +96,10 @@ const cli = yargs(hideBin(process.argv))
|
|
|
93
96
|
// Scan & Publish
|
|
94
97
|
.command(ScanCommand)
|
|
95
98
|
.command(PublishCommand)
|
|
99
|
+
.command(AIPublishCommand)
|
|
100
|
+
// AI
|
|
101
|
+
.command(ChatCommand)
|
|
102
|
+
.command(ConfigCommand)
|
|
96
103
|
// Account
|
|
97
104
|
.command(NotificationsCommand)
|
|
98
105
|
.command(DashboardCommand)
|