harness-auto-docs 0.3.2 → 0.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,68 @@
1
+ # Design: Multi-Provider Model Support
2
+
3
+ **Date:** 2026-04-03
4
+ **Status:** Approved
5
+ **Scope:** Add support for Qwen, 智谱 (Zhipu), DeepSeek, 豆包 (Doubao), Kimi, and Grok AI models
6
+
7
+ ---
8
+
9
+ ## Overview
10
+
11
+ Extend `harness-auto-docs` to support six additional AI model providers. Each provider exposes an OpenAI-compatible Chat Completions API, so each will be implemented as an individual provider class (matching the existing `openai.ts` pattern) pointing to the provider's base URL.
12
+
13
+ Each provider uses its own dedicated API key environment variable — keys are not shared.
14
+
15
+ ---
16
+
17
+ ## New Provider Files
18
+
19
+ Six new files under `src/ai/`, one per provider:
20
+
21
+ | File | Class | Model prefix | Base URL |
22
+ |------|-------|-------------|---------|
23
+ | `src/ai/qwen.ts` | `QwenProvider` | `qwen-` | `https://dashscope.aliyuncs.com/compatible-mode/v1` |
24
+ | `src/ai/zhipu.ts` | `ZhipuProvider` | `glm-` | `https://open.bigmodel.cn/api/paas/v4` |
25
+ | `src/ai/deepseek.ts` | `DeepSeekProvider` | `deepseek-` | `https://api.deepseek.com` |
26
+ | `src/ai/doubao.ts` | `DoubaoProvider` | `doubao-` | `https://ark.cn-beijing.volces.com/api/v3` |
27
+ | `src/ai/kimi.ts` | `KimiProvider` | `moonshot-` | `https://api.moonshot.cn/v1` |
28
+ | `src/ai/grok.ts` | `GrokProvider` | `grok-` | `https://api.x.ai/v1` |
29
+
30
+ Each file is structurally identical to `src/ai/openai.ts`, using the `openai` SDK with a custom `baseURL`.
31
+
32
+ ---
33
+
34
+ ## Environment Variables
35
+
36
+ Each provider requires its own API key. Missing keys cause the CLI to exit with a clear error.
37
+
38
+ | Provider | Env var |
39
+ |----------|---------|
40
+ | Qwen | `QWEN_API_KEY` |
41
+ | 智谱 | `ZHIPU_API_KEY` |
42
+ | DeepSeek | `DEEPSEEK_API_KEY` |
43
+ | 豆包 | `DOUBAO_API_KEY` |
44
+ | Kimi | `KIMI_API_KEY` |
45
+ | Grok | `GROK_API_KEY` |
46
+
47
+ ---
48
+
49
+ ## `selectAI` Changes (`src/cli.ts`)
50
+
51
+ Extend the `selectAI` function with new prefix branches. Each branch reads the provider-specific env var via `requireEnv` and instantiates the corresponding provider.
52
+
53
+ The error message for unknown models is updated to list all supported prefixes.
54
+
55
+ ---
56
+
57
+ ## README Updates
58
+
59
+ Add a table row for each new provider in the "Supported models" section, showing the model prefix, env var, and example model name.
60
+
61
+ ---
62
+
63
+ ## What Is Not Changed
64
+
65
+ - `AIProvider` interface — no changes needed
66
+ - Existing providers (`AnthropicProvider`, `OpenAIProvider`, `MiniMaxProvider`) — untouched
67
+ - Platform providers, diff logic, generator — untouched
68
+ - No new tests required (provider classes are thin wrappers; existing test patterns do not test provider instantiation)
@@ -27,6 +27,14 @@ jobs:
27
27
  - name: Generate and PR Harness Engineering docs
28
28
  run: npx harness-auto-docs@latest
29
29
  env:
30
- AI_MODEL: claude-sonnet-4-6 # or gpt-4o
31
- AI_API_KEY: ${{ secrets.AI_API_KEY }}
30
+ AI_MODEL: claude-sonnet-4-6 # or gpt-4o, MiniMax-Text-01, qwen-turbo, etc.
31
+ ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} # for claude-* models
32
+ # OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} # for gpt-* models
33
+ # MINIMAX_API_KEY: ${{ secrets.MINIMAX_API_KEY }} # for MiniMax-* models
34
+ # QWEN_API_KEY: ${{ secrets.QWEN_API_KEY }} # for qwen-* models
35
+ # ZHIPU_API_KEY: ${{ secrets.ZHIPU_API_KEY }} # for glm-* models
36
+ # DEEPSEEK_API_KEY: ${{ secrets.DEEPSEEK_API_KEY }} # for deepseek-* models
37
+ # DOUBAO_API_KEY: ${{ secrets.DOUBAO_API_KEY }} # for doubao-* models
38
+ # KIMI_API_KEY: ${{ secrets.KIMI_API_KEY }} # for moonshot-* models
39
+ # GROK_API_KEY: ${{ secrets.GROK_API_KEY }} # for grok-* models
32
40
  GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "harness-auto-docs",
3
- "version": "0.3.2",
3
+ "version": "0.3.3",
4
4
  "description": "Auto-generate Harness Engineering docs on git tag",
5
5
  "type": "module",
6
6
  "bin": {
@@ -0,0 +1,20 @@
1
+ import OpenAI from 'openai';
2
+ import type { AIProvider } from './interface.js';
3
+
4
+ export class DeepSeekProvider implements AIProvider {
5
+ private client: OpenAI;
6
+ private model: string;
7
+
8
+ constructor(apiKey: string, model: string) {
9
+ this.client = new OpenAI({ apiKey, baseURL: 'https://api.deepseek.com' });
10
+ this.model = model;
11
+ }
12
+
13
+ async generate(prompt: string): Promise<string> {
14
+ const completion = await this.client.chat.completions.create({
15
+ model: this.model,
16
+ messages: [{ role: 'user', content: prompt }],
17
+ });
18
+ return completion.choices[0].message.content ?? '';
19
+ }
20
+ }
@@ -0,0 +1,20 @@
1
+ import OpenAI from 'openai';
2
+ import type { AIProvider } from './interface.js';
3
+
4
+ export class DoubaoProvider implements AIProvider {
5
+ private client: OpenAI;
6
+ private model: string;
7
+
8
+ constructor(apiKey: string, model: string) {
9
+ this.client = new OpenAI({ apiKey, baseURL: 'https://ark.cn-beijing.volces.com/api/v3' });
10
+ this.model = model;
11
+ }
12
+
13
+ async generate(prompt: string): Promise<string> {
14
+ const completion = await this.client.chat.completions.create({
15
+ model: this.model,
16
+ messages: [{ role: 'user', content: prompt }],
17
+ });
18
+ return completion.choices[0].message.content ?? '';
19
+ }
20
+ }
package/src/ai/grok.ts ADDED
@@ -0,0 +1,20 @@
1
+ import OpenAI from 'openai';
2
+ import type { AIProvider } from './interface.js';
3
+
4
+ export class GrokProvider implements AIProvider {
5
+ private client: OpenAI;
6
+ private model: string;
7
+
8
+ constructor(apiKey: string, model: string) {
9
+ this.client = new OpenAI({ apiKey, baseURL: 'https://api.x.ai/v1' });
10
+ this.model = model;
11
+ }
12
+
13
+ async generate(prompt: string): Promise<string> {
14
+ const completion = await this.client.chat.completions.create({
15
+ model: this.model,
16
+ messages: [{ role: 'user', content: prompt }],
17
+ });
18
+ return completion.choices[0].message.content ?? '';
19
+ }
20
+ }
package/src/ai/kimi.ts ADDED
@@ -0,0 +1,20 @@
1
+ import OpenAI from 'openai';
2
+ import type { AIProvider } from './interface.js';
3
+
4
+ export class KimiProvider implements AIProvider {
5
+ private client: OpenAI;
6
+ private model: string;
7
+
8
+ constructor(apiKey: string, model: string) {
9
+ this.client = new OpenAI({ apiKey, baseURL: 'https://api.moonshot.cn/v1' });
10
+ this.model = model;
11
+ }
12
+
13
+ async generate(prompt: string): Promise<string> {
14
+ const completion = await this.client.chat.completions.create({
15
+ model: this.model,
16
+ messages: [{ role: 'user', content: prompt }],
17
+ });
18
+ return completion.choices[0].message.content ?? '';
19
+ }
20
+ }
package/src/ai/qwen.ts ADDED
@@ -0,0 +1,20 @@
1
+ import OpenAI from 'openai';
2
+ import type { AIProvider } from './interface.js';
3
+
4
+ export class QwenProvider implements AIProvider {
5
+ private client: OpenAI;
6
+ private model: string;
7
+
8
+ constructor(apiKey: string, model: string) {
9
+ this.client = new OpenAI({ apiKey, baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1' });
10
+ this.model = model;
11
+ }
12
+
13
+ async generate(prompt: string): Promise<string> {
14
+ const completion = await this.client.chat.completions.create({
15
+ model: this.model,
16
+ messages: [{ role: 'user', content: prompt }],
17
+ });
18
+ return completion.choices[0].message.content ?? '';
19
+ }
20
+ }
@@ -0,0 +1,20 @@
1
+ import OpenAI from 'openai';
2
+ import type { AIProvider } from './interface.js';
3
+
4
+ export class ZhipuProvider implements AIProvider {
5
+ private client: OpenAI;
6
+ private model: string;
7
+
8
+ constructor(apiKey: string, model: string) {
9
+ this.client = new OpenAI({ apiKey, baseURL: 'https://open.bigmodel.cn/api/paas/v4' });
10
+ this.model = model;
11
+ }
12
+
13
+ async generate(prompt: string): Promise<string> {
14
+ const completion = await this.client.chat.completions.create({
15
+ model: this.model,
16
+ messages: [{ role: 'user', content: prompt }],
17
+ });
18
+ return completion.choices[0].message.content ?? '';
19
+ }
20
+ }
package/src/cli.ts CHANGED
@@ -9,6 +9,12 @@ import { GitLabProvider } from './providers/gitlab.js';
9
9
  import { AnthropicProvider } from './ai/anthropic.js';
10
10
  import { OpenAIProvider } from './ai/openai.js';
11
11
  import { MiniMaxProvider } from './ai/minimax.js';
12
+ import { QwenProvider } from './ai/qwen.js';
13
+ import { ZhipuProvider } from './ai/zhipu.js';
14
+ import { DeepSeekProvider } from './ai/deepseek.js';
15
+ import { DoubaoProvider } from './ai/doubao.js';
16
+ import { KimiProvider } from './ai/kimi.js';
17
+ import { GrokProvider } from './ai/grok.js';
12
18
  import type { AIProvider } from './ai/interface.js';
13
19
  import type { PlatformProvider } from './providers/interface.js';
14
20
 
@@ -36,11 +42,19 @@ function detectPlatform(): PlatformProvider {
36
42
  return new GitHubProvider(token);
37
43
  }
38
44
 
39
- function selectAI(model: string, apiKey: string): AIProvider {
40
- if (model.startsWith('claude-')) return new AnthropicProvider(apiKey, model);
41
- if (model.startsWith('gpt-')) return new OpenAIProvider(apiKey, model);
42
- if (model.startsWith('MiniMax-')) return new MiniMaxProvider(apiKey, model);
43
- console.error(`Error: Unknown model "${model}". Use a claude-*, gpt-*, or MiniMax-* model.`);
45
+ function selectAI(model: string): AIProvider {
46
+ if (model.startsWith('claude-')) return new AnthropicProvider(requireEnv('ANTHROPIC_API_KEY'), model);
47
+ if (model.startsWith('gpt-')) return new OpenAIProvider(requireEnv('OPENAI_API_KEY'), model);
48
+ if (model.startsWith('MiniMax-')) return new MiniMaxProvider(requireEnv('MINIMAX_API_KEY'), model);
49
+ if (model.startsWith('qwen-')) return new QwenProvider(requireEnv('QWEN_API_KEY'), model);
50
+ if (model.startsWith('glm-')) return new ZhipuProvider(requireEnv('ZHIPU_API_KEY'), model);
51
+ if (model.startsWith('deepseek-')) return new DeepSeekProvider(requireEnv('DEEPSEEK_API_KEY'), model);
52
+ if (model.startsWith('doubao-')) return new DoubaoProvider(requireEnv('DOUBAO_API_KEY'), model);
53
+ if (model.startsWith('moonshot-')) return new KimiProvider(requireEnv('KIMI_API_KEY'), model); // Kimi is the product; Moonshot AI is the provider
54
+ if (model.startsWith('grok-')) return new GrokProvider(requireEnv('GROK_API_KEY'), model);
55
+ console.error(
56
+ `Error: Unknown model "${model}". Supported prefixes: claude-*, gpt-*, MiniMax-*, qwen-*, glm-*, deepseek-*, doubao-*, moonshot-*, grok-*`
57
+ );
44
58
  process.exit(1);
45
59
  }
46
60
 
@@ -55,7 +69,6 @@ function getDefaultBranch(): string {
55
69
 
56
70
  async function main() {
57
71
  const model = requireEnv('AI_MODEL');
58
- const apiKey = requireEnv('AI_API_KEY');
59
72
 
60
73
  const diff = extractDiff();
61
74
 
@@ -66,7 +79,7 @@ async function main() {
66
79
 
67
80
  console.log(`Generating docs: ${diff.prevTag} → ${diff.currentTag}`);
68
81
 
69
- const ai = selectAI(model, apiKey);
82
+ const ai = selectAI(model);
70
83
  const targets = selectTargets(diff.fileGroups, diff.changedFiles);
71
84
  console.log(`Updating ${targets.length} documents: ${targets.join(', ')}`);
72
85
 
@@ -0,0 +1,30 @@
1
+ import { describe, it, expect, vi } from 'vitest';
2
+ import { DeepSeekProvider } from '../../src/ai/deepseek.js';
3
+
4
+ const mockCreate = vi.fn().mockResolvedValue({
5
+ choices: [{ message: { content: 'deepseek generated content' } }],
6
+ });
7
+
8
+ vi.mock('openai', () => ({
9
+ default: vi.fn().mockImplementation(({ baseURL }: { baseURL: string }) => {
10
+ if (baseURL !== 'https://api.deepseek.com') {
11
+ throw new Error(`unexpected baseURL: ${baseURL}`);
12
+ }
13
+ return { chat: { completions: { create: mockCreate } } };
14
+ }),
15
+ }));
16
+
17
+ describe('DeepSeekProvider', () => {
18
+ it('returns text content from the API response', async () => {
19
+ const provider = new DeepSeekProvider('test-key', 'deepseek-chat');
20
+ const result = await provider.generate('write docs for this diff');
21
+ expect(result).toBe('deepseek generated content');
22
+ });
23
+
24
+ it('returns empty string when message content is null', async () => {
25
+ mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: null } }] });
26
+ const provider = new DeepSeekProvider('test-key', 'deepseek-chat');
27
+ const result = await provider.generate('prompt');
28
+ expect(result).toBe('');
29
+ });
30
+ });
@@ -0,0 +1,30 @@
1
+ import { describe, it, expect, vi } from 'vitest';
2
+ import { DoubaoProvider } from '../../src/ai/doubao.js';
3
+
4
+ const mockCreate = vi.fn().mockResolvedValue({
5
+ choices: [{ message: { content: 'doubao generated content' } }],
6
+ });
7
+
8
+ vi.mock('openai', () => ({
9
+ default: vi.fn().mockImplementation(({ baseURL }: { baseURL: string }) => {
10
+ if (baseURL !== 'https://ark.cn-beijing.volces.com/api/v3') {
11
+ throw new Error(`unexpected baseURL: ${baseURL}`);
12
+ }
13
+ return { chat: { completions: { create: mockCreate } } };
14
+ }),
15
+ }));
16
+
17
+ describe('DoubaoProvider', () => {
18
+ it('returns text content from the API response', async () => {
19
+ const provider = new DoubaoProvider('test-key', 'doubao-pro-4k');
20
+ const result = await provider.generate('write docs for this diff');
21
+ expect(result).toBe('doubao generated content');
22
+ });
23
+
24
+ it('returns empty string when message content is null', async () => {
25
+ mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: null } }] });
26
+ const provider = new DoubaoProvider('test-key', 'doubao-pro-4k');
27
+ const result = await provider.generate('prompt');
28
+ expect(result).toBe('');
29
+ });
30
+ });
@@ -0,0 +1,30 @@
1
+ import { describe, it, expect, vi } from 'vitest';
2
+ import { GrokProvider } from '../../src/ai/grok.js';
3
+
4
+ const mockCreate = vi.fn().mockResolvedValue({
5
+ choices: [{ message: { content: 'grok generated content' } }],
6
+ });
7
+
8
+ vi.mock('openai', () => ({
9
+ default: vi.fn().mockImplementation(({ baseURL }: { baseURL: string }) => {
10
+ if (baseURL !== 'https://api.x.ai/v1') {
11
+ throw new Error(`unexpected baseURL: ${baseURL}`);
12
+ }
13
+ return { chat: { completions: { create: mockCreate } } };
14
+ }),
15
+ }));
16
+
17
+ describe('GrokProvider', () => {
18
+ it('returns text content from the API response', async () => {
19
+ const provider = new GrokProvider('test-key', 'grok-2');
20
+ const result = await provider.generate('write docs for this diff');
21
+ expect(result).toBe('grok generated content');
22
+ });
23
+
24
+ it('returns empty string when message content is null', async () => {
25
+ mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: null } }] });
26
+ const provider = new GrokProvider('test-key', 'grok-2');
27
+ const result = await provider.generate('prompt');
28
+ expect(result).toBe('');
29
+ });
30
+ });
@@ -0,0 +1,30 @@
1
+ import { describe, it, expect, vi } from 'vitest';
2
+ import { KimiProvider } from '../../src/ai/kimi.js';
3
+
4
+ const mockCreate = vi.fn().mockResolvedValue({
5
+ choices: [{ message: { content: 'kimi generated content' } }],
6
+ });
7
+
8
+ vi.mock('openai', () => ({
9
+ default: vi.fn().mockImplementation(({ baseURL }: { baseURL: string }) => {
10
+ if (baseURL !== 'https://api.moonshot.cn/v1') {
11
+ throw new Error(`unexpected baseURL: ${baseURL}`);
12
+ }
13
+ return { chat: { completions: { create: mockCreate } } };
14
+ }),
15
+ }));
16
+
17
+ describe('KimiProvider', () => {
18
+ it('returns text content from the API response', async () => {
19
+ const provider = new KimiProvider('test-key', 'moonshot-v1-8k');
20
+ const result = await provider.generate('write docs for this diff');
21
+ expect(result).toBe('kimi generated content');
22
+ });
23
+
24
+ it('returns empty string when message content is null', async () => {
25
+ mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: null } }] });
26
+ const provider = new KimiProvider('test-key', 'moonshot-v1-8k');
27
+ const result = await provider.generate('prompt');
28
+ expect(result).toBe('');
29
+ });
30
+ });
@@ -0,0 +1,30 @@
1
+ import { describe, it, expect, vi } from 'vitest';
2
+ import { QwenProvider } from '../../src/ai/qwen.js';
3
+
4
+ const mockCreate = vi.fn().mockResolvedValue({
5
+ choices: [{ message: { content: 'qwen generated content' } }],
6
+ });
7
+
8
+ vi.mock('openai', () => ({
9
+ default: vi.fn().mockImplementation(({ baseURL }: { baseURL: string }) => {
10
+ if (baseURL !== 'https://dashscope.aliyuncs.com/compatible-mode/v1') {
11
+ throw new Error(`unexpected baseURL: ${baseURL}`);
12
+ }
13
+ return { chat: { completions: { create: mockCreate } } };
14
+ }),
15
+ }));
16
+
17
+ describe('QwenProvider', () => {
18
+ it('returns text content from the API response', async () => {
19
+ const provider = new QwenProvider('test-key', 'qwen-turbo');
20
+ const result = await provider.generate('write docs for this diff');
21
+ expect(result).toBe('qwen generated content');
22
+ });
23
+
24
+ it('returns empty string when message content is null', async () => {
25
+ mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: null } }] });
26
+ const provider = new QwenProvider('test-key', 'qwen-turbo');
27
+ const result = await provider.generate('prompt');
28
+ expect(result).toBe('');
29
+ });
30
+ });
@@ -0,0 +1,30 @@
1
+ import { describe, it, expect, vi } from 'vitest';
2
+ import { ZhipuProvider } from '../../src/ai/zhipu.js';
3
+
4
+ const mockCreate = vi.fn().mockResolvedValue({
5
+ choices: [{ message: { content: 'zhipu generated content' } }],
6
+ });
7
+
8
+ vi.mock('openai', () => ({
9
+ default: vi.fn().mockImplementation(({ baseURL }: { baseURL: string }) => {
10
+ if (baseURL !== 'https://open.bigmodel.cn/api/paas/v4') {
11
+ throw new Error(`unexpected baseURL: ${baseURL}`);
12
+ }
13
+ return { chat: { completions: { create: mockCreate } } };
14
+ }),
15
+ }));
16
+
17
+ describe('ZhipuProvider', () => {
18
+ it('returns text content from the API response', async () => {
19
+ const provider = new ZhipuProvider('test-key', 'glm-4');
20
+ const result = await provider.generate('write docs for this diff');
21
+ expect(result).toBe('zhipu generated content');
22
+ });
23
+
24
+ it('returns empty string when message content is null', async () => {
25
+ mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: null } }] });
26
+ const provider = new ZhipuProvider('test-key', 'glm-4');
27
+ const result = await provider.generate('prompt');
28
+ expect(result).toBe('');
29
+ });
30
+ });