@spaceflow/core 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +1176 -0
- package/README.md +105 -0
- package/nest-cli.json +10 -0
- package/package.json +128 -0
- package/rspack.config.mjs +62 -0
- package/src/__mocks__/@opencode-ai/sdk.js +9 -0
- package/src/__mocks__/c12.ts +3 -0
- package/src/app.module.ts +18 -0
- package/src/config/ci.config.ts +29 -0
- package/src/config/config-loader.ts +101 -0
- package/src/config/config-reader.module.ts +16 -0
- package/src/config/config-reader.service.ts +133 -0
- package/src/config/feishu.config.ts +35 -0
- package/src/config/git-provider.config.ts +29 -0
- package/src/config/index.ts +29 -0
- package/src/config/llm.config.ts +110 -0
- package/src/config/schema-generator.service.ts +129 -0
- package/src/config/spaceflow.config.ts +292 -0
- package/src/config/storage.config.ts +33 -0
- package/src/extension-system/extension.interface.ts +221 -0
- package/src/extension-system/index.ts +1 -0
- package/src/index.ts +80 -0
- package/src/locales/en/translation.json +11 -0
- package/src/locales/zh-cn/translation.json +11 -0
- package/src/shared/claude-setup/claude-setup.module.ts +8 -0
- package/src/shared/claude-setup/claude-setup.service.ts +131 -0
- package/src/shared/claude-setup/index.ts +2 -0
- package/src/shared/editor-config/index.ts +23 -0
- package/src/shared/feishu-sdk/feishu-sdk.module.ts +77 -0
- package/src/shared/feishu-sdk/feishu-sdk.service.ts +130 -0
- package/src/shared/feishu-sdk/fieshu-card.service.ts +139 -0
- package/src/shared/feishu-sdk/index.ts +4 -0
- package/src/shared/feishu-sdk/types/card-action.ts +132 -0
- package/src/shared/feishu-sdk/types/card.ts +64 -0
- package/src/shared/feishu-sdk/types/common.ts +22 -0
- package/src/shared/feishu-sdk/types/index.ts +46 -0
- package/src/shared/feishu-sdk/types/message.ts +35 -0
- package/src/shared/feishu-sdk/types/module.ts +21 -0
- package/src/shared/feishu-sdk/types/user.ts +77 -0
- package/src/shared/git-provider/adapters/gitea.adapter.spec.ts +473 -0
- package/src/shared/git-provider/adapters/gitea.adapter.ts +499 -0
- package/src/shared/git-provider/adapters/github.adapter.spec.ts +341 -0
- package/src/shared/git-provider/adapters/github.adapter.ts +830 -0
- package/src/shared/git-provider/adapters/gitlab.adapter.ts +839 -0
- package/src/shared/git-provider/adapters/index.ts +3 -0
- package/src/shared/git-provider/detect-provider.spec.ts +195 -0
- package/src/shared/git-provider/detect-provider.ts +112 -0
- package/src/shared/git-provider/git-provider.interface.ts +188 -0
- package/src/shared/git-provider/git-provider.module.ts +73 -0
- package/src/shared/git-provider/git-provider.service.spec.ts +282 -0
- package/src/shared/git-provider/git-provider.service.ts +309 -0
- package/src/shared/git-provider/index.ts +7 -0
- package/src/shared/git-provider/parse-repo-url.spec.ts +221 -0
- package/src/shared/git-provider/parse-repo-url.ts +155 -0
- package/src/shared/git-provider/types.ts +434 -0
- package/src/shared/git-sdk/git-sdk-diff.utils.spec.ts +344 -0
- package/src/shared/git-sdk/git-sdk-diff.utils.ts +151 -0
- package/src/shared/git-sdk/git-sdk.module.ts +8 -0
- package/src/shared/git-sdk/git-sdk.service.ts +235 -0
- package/src/shared/git-sdk/git-sdk.types.ts +25 -0
- package/src/shared/git-sdk/index.ts +4 -0
- package/src/shared/i18n/i18n.spec.ts +96 -0
- package/src/shared/i18n/i18n.ts +86 -0
- package/src/shared/i18n/index.ts +1 -0
- package/src/shared/i18n/locale-detect.ts +134 -0
- package/src/shared/llm-jsonput/index.ts +94 -0
- package/src/shared/llm-jsonput/types.ts +17 -0
- package/src/shared/llm-proxy/adapters/claude-code.adapter.spec.ts +131 -0
- package/src/shared/llm-proxy/adapters/claude-code.adapter.ts +208 -0
- package/src/shared/llm-proxy/adapters/index.ts +4 -0
- package/src/shared/llm-proxy/adapters/llm-adapter.interface.ts +23 -0
- package/src/shared/llm-proxy/adapters/open-code.adapter.ts +342 -0
- package/src/shared/llm-proxy/adapters/openai.adapter.spec.ts +215 -0
- package/src/shared/llm-proxy/adapters/openai.adapter.ts +153 -0
- package/src/shared/llm-proxy/index.ts +6 -0
- package/src/shared/llm-proxy/interfaces/config.interface.ts +32 -0
- package/src/shared/llm-proxy/interfaces/index.ts +4 -0
- package/src/shared/llm-proxy/interfaces/message.interface.ts +48 -0
- package/src/shared/llm-proxy/interfaces/session.interface.ts +28 -0
- package/src/shared/llm-proxy/llm-proxy.module.ts +140 -0
- package/src/shared/llm-proxy/llm-proxy.service.spec.ts +303 -0
- package/src/shared/llm-proxy/llm-proxy.service.ts +132 -0
- package/src/shared/llm-proxy/llm-session.spec.ts +111 -0
- package/src/shared/llm-proxy/llm-session.ts +109 -0
- package/src/shared/llm-proxy/stream-logger.ts +97 -0
- package/src/shared/logger/index.ts +11 -0
- package/src/shared/logger/logger.interface.ts +93 -0
- package/src/shared/logger/logger.spec.ts +178 -0
- package/src/shared/logger/logger.ts +175 -0
- package/src/shared/logger/renderers/plain.renderer.ts +116 -0
- package/src/shared/logger/renderers/tui.renderer.ts +162 -0
- package/src/shared/mcp/index.ts +332 -0
- package/src/shared/output/index.ts +2 -0
- package/src/shared/output/output.module.ts +9 -0
- package/src/shared/output/output.service.ts +97 -0
- package/src/shared/package-manager/index.ts +115 -0
- package/src/shared/parallel/index.ts +1 -0
- package/src/shared/parallel/parallel-executor.ts +169 -0
- package/src/shared/rspack-config/index.ts +1 -0
- package/src/shared/rspack-config/rspack-config.ts +157 -0
- package/src/shared/source-utils/index.ts +130 -0
- package/src/shared/spaceflow-dir/index.ts +158 -0
- package/src/shared/storage/adapters/file.adapter.ts +113 -0
- package/src/shared/storage/adapters/index.ts +3 -0
- package/src/shared/storage/adapters/memory.adapter.ts +50 -0
- package/src/shared/storage/adapters/storage-adapter.interface.ts +48 -0
- package/src/shared/storage/index.ts +4 -0
- package/src/shared/storage/storage.module.ts +150 -0
- package/src/shared/storage/storage.service.ts +293 -0
- package/src/shared/storage/types.ts +51 -0
- package/src/shared/verbose/index.ts +73 -0
- package/test/app.e2e-spec.ts +22 -0
- package/tsconfig.build.json +4 -0
- package/tsconfig.json +25 -0
- package/tsconfig.skill.json +18 -0
- package/vitest.config.ts +58 -0
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
import { vi, type Mock } from "vitest";
|
|
2
|
+
import { Test, TestingModule } from "@nestjs/testing";
|
|
3
|
+
import { OpenAIAdapter } from "./openai.adapter";
|
|
4
|
+
import OpenAI from "openai";
|
|
5
|
+
|
|
6
|
+
vi.mock("openai");
|
|
7
|
+
|
|
8
|
+
describe("OpenAIAdapter", () => {
|
|
9
|
+
let adapter: OpenAIAdapter;
|
|
10
|
+
let mockOpenAIInstance: any;
|
|
11
|
+
|
|
12
|
+
const mockConfig = {
|
|
13
|
+
openai: {
|
|
14
|
+
apiKey: "test-key",
|
|
15
|
+
model: "gpt-4o",
|
|
16
|
+
baseUrl: "https://api.openai.com/v1",
|
|
17
|
+
},
|
|
18
|
+
};
|
|
19
|
+
|
|
20
|
+
beforeEach(async () => {
|
|
21
|
+
mockOpenAIInstance = {
|
|
22
|
+
chat: {
|
|
23
|
+
completions: {
|
|
24
|
+
create: vi.fn(),
|
|
25
|
+
},
|
|
26
|
+
},
|
|
27
|
+
};
|
|
28
|
+
(OpenAI as unknown as Mock).mockImplementation(function () {
|
|
29
|
+
return mockOpenAIInstance;
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
const module: TestingModule = await Test.createTestingModule({
|
|
33
|
+
providers: [OpenAIAdapter, { provide: "LLM_PROXY_CONFIG", useValue: mockConfig }],
|
|
34
|
+
}).compile();
|
|
35
|
+
|
|
36
|
+
adapter = module.get<OpenAIAdapter>(OpenAIAdapter);
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
it("should be defined", () => {
|
|
40
|
+
expect(adapter).toBeDefined();
|
|
41
|
+
expect(adapter.name).toBe("openai");
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
describe("isConfigured", () => {
|
|
45
|
+
it("should return true if openai config exists", () => {
|
|
46
|
+
expect(adapter.isConfigured()).toBe(true);
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
it("should return false if openai config is missing", async () => {
|
|
50
|
+
const module: TestingModule = await Test.createTestingModule({
|
|
51
|
+
providers: [OpenAIAdapter, { provide: "LLM_PROXY_CONFIG", useValue: {} }],
|
|
52
|
+
}).compile();
|
|
53
|
+
const unconfiguredAdapter = module.get<OpenAIAdapter>(OpenAIAdapter);
|
|
54
|
+
expect(unconfiguredAdapter.isConfigured()).toBe(false);
|
|
55
|
+
});
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
describe("chat", () => {
|
|
59
|
+
it("should call openai.chat.completions.create with correct params", async () => {
|
|
60
|
+
const messages = [{ role: "user", content: "hello" }] as any;
|
|
61
|
+
const mockResponse = {
|
|
62
|
+
choices: [{ message: { content: "hi" } }],
|
|
63
|
+
model: "gpt-4o",
|
|
64
|
+
usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 },
|
|
65
|
+
};
|
|
66
|
+
mockOpenAIInstance.chat.completions.create.mockResolvedValue(mockResponse);
|
|
67
|
+
|
|
68
|
+
const result = await adapter.chat(messages);
|
|
69
|
+
|
|
70
|
+
expect(mockOpenAIInstance.chat.completions.create).toHaveBeenCalledWith(
|
|
71
|
+
expect.objectContaining({
|
|
72
|
+
model: "gpt-4o",
|
|
73
|
+
messages: [{ role: "user", content: "hello" }],
|
|
74
|
+
}),
|
|
75
|
+
);
|
|
76
|
+
expect(result.content).toBe("hi");
|
|
77
|
+
expect(result.usage?.totalTokens).toBe(15);
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
it("should handle API error", async () => {
|
|
81
|
+
const messages = [{ role: "user", content: "hello" }] as any;
|
|
82
|
+
const apiError = new Error("API Error");
|
|
83
|
+
(apiError as any).status = 401;
|
|
84
|
+
Object.setPrototypeOf(apiError, OpenAI.APIError.prototype);
|
|
85
|
+
mockOpenAIInstance.chat.completions.create.mockRejectedValue(apiError);
|
|
86
|
+
|
|
87
|
+
await expect(adapter.chat(messages)).rejects.toThrow("API 错误 (401)");
|
|
88
|
+
});
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
describe("chatStream", () => {
|
|
92
|
+
it("should handle streaming response", async () => {
|
|
93
|
+
const messages = [{ role: "user", content: "hello" }] as any;
|
|
94
|
+
const mockStream = (async function* () {
|
|
95
|
+
yield { choices: [{ delta: { content: "h" } }] };
|
|
96
|
+
yield { choices: [{ delta: { content: "i" } }] };
|
|
97
|
+
})();
|
|
98
|
+
mockOpenAIInstance.chat.completions.create.mockResolvedValue(mockStream);
|
|
99
|
+
|
|
100
|
+
const stream = adapter.chatStream(messages);
|
|
101
|
+
const events: any[] = [];
|
|
102
|
+
for await (const event of stream) {
|
|
103
|
+
events.push(event);
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
expect(events).toContainEqual({ type: "text", content: "h" });
|
|
107
|
+
expect(events).toContainEqual({ type: "text", content: "i" });
|
|
108
|
+
expect(events).toContainEqual({ type: "result", response: { content: "hi" } });
|
|
109
|
+
});
|
|
110
|
+
|
|
111
|
+
it("should handle stream API error", async () => {
|
|
112
|
+
const messages = [{ role: "user", content: "hello" }] as any;
|
|
113
|
+
const apiError = new Error("Stream Error");
|
|
114
|
+
(apiError as any).status = 500;
|
|
115
|
+
Object.setPrototypeOf(apiError, OpenAI.APIError.prototype);
|
|
116
|
+
mockOpenAIInstance.chat.completions.create.mockRejectedValue(apiError);
|
|
117
|
+
|
|
118
|
+
const stream = adapter.chatStream(messages);
|
|
119
|
+
const events: any[] = [];
|
|
120
|
+
for await (const event of stream) {
|
|
121
|
+
events.push(event);
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
expect(events[0]).toMatchObject({
|
|
125
|
+
type: "error",
|
|
126
|
+
message: expect.stringContaining("API 错误 (500)"),
|
|
127
|
+
});
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
it("should throw non-API errors in chatStream", async () => {
|
|
131
|
+
const messages = [{ role: "user", content: "hello" }] as any;
|
|
132
|
+
mockOpenAIInstance.chat.completions.create.mockRejectedValue(new Error("network"));
|
|
133
|
+
|
|
134
|
+
const stream = adapter.chatStream(messages);
|
|
135
|
+
await expect(async () => {
|
|
136
|
+
for await (const _ of stream) {
|
|
137
|
+
/* consume */
|
|
138
|
+
}
|
|
139
|
+
}).rejects.toThrow("network");
|
|
140
|
+
});
|
|
141
|
+
|
|
142
|
+
it("should handle chunk with empty delta", async () => {
|
|
143
|
+
const messages = [{ role: "user", content: "hello" }] as any;
|
|
144
|
+
const mockStream = (async function* () {
|
|
145
|
+
yield { choices: [{ delta: {} }] };
|
|
146
|
+
yield { choices: [{ delta: { content: "ok" } }] };
|
|
147
|
+
})();
|
|
148
|
+
mockOpenAIInstance.chat.completions.create.mockResolvedValue(mockStream);
|
|
149
|
+
|
|
150
|
+
const events: any[] = [];
|
|
151
|
+
for await (const event of adapter.chatStream(messages)) {
|
|
152
|
+
events.push(event);
|
|
153
|
+
}
|
|
154
|
+
expect(events).toContainEqual({ type: "text", content: "ok" });
|
|
155
|
+
expect(events).toContainEqual({ type: "result", response: { content: "ok" } });
|
|
156
|
+
});
|
|
157
|
+
|
|
158
|
+
it("should yield error when openai not configured", async () => {
|
|
159
|
+
const module: TestingModule = await Test.createTestingModule({
|
|
160
|
+
providers: [OpenAIAdapter, { provide: "LLM_PROXY_CONFIG", useValue: {} }],
|
|
161
|
+
}).compile();
|
|
162
|
+
const unconfigured = module.get<OpenAIAdapter>(OpenAIAdapter);
|
|
163
|
+
const events: any[] = [];
|
|
164
|
+
for await (const event of unconfigured.chatStream([{ role: "user", content: "hi" }] as any)) {
|
|
165
|
+
events.push(event);
|
|
166
|
+
}
|
|
167
|
+
expect(events[0]).toMatchObject({ type: "error" });
|
|
168
|
+
});
|
|
169
|
+
});
|
|
170
|
+
|
|
171
|
+
describe("chat edge cases", () => {
|
|
172
|
+
it("should throw non-API errors in chat", async () => {
|
|
173
|
+
const messages = [{ role: "user", content: "hello" }] as any;
|
|
174
|
+
mockOpenAIInstance.chat.completions.create.mockRejectedValue(new Error("timeout"));
|
|
175
|
+
await expect(adapter.chat(messages)).rejects.toThrow("timeout");
|
|
176
|
+
});
|
|
177
|
+
|
|
178
|
+
it("should throw when openai not configured for chat", async () => {
|
|
179
|
+
const module: TestingModule = await Test.createTestingModule({
|
|
180
|
+
providers: [OpenAIAdapter, { provide: "LLM_PROXY_CONFIG", useValue: {} }],
|
|
181
|
+
}).compile();
|
|
182
|
+
const unconfigured = module.get<OpenAIAdapter>(OpenAIAdapter);
|
|
183
|
+
await expect(unconfigured.chat([{ role: "user", content: "hi" }] as any)).rejects.toThrow(
|
|
184
|
+
"未配置 openai",
|
|
185
|
+
);
|
|
186
|
+
});
|
|
187
|
+
|
|
188
|
+
it("should return empty content when choices empty", async () => {
|
|
189
|
+
const messages = [{ role: "user", content: "hello" }] as any;
|
|
190
|
+
mockOpenAIInstance.chat.completions.create.mockResolvedValue({
|
|
191
|
+
choices: [{ message: {} }],
|
|
192
|
+
model: "gpt-4o",
|
|
193
|
+
});
|
|
194
|
+
const result = await adapter.chat(messages);
|
|
195
|
+
expect(result.content).toBe("");
|
|
196
|
+
expect(result.usage).toBeUndefined();
|
|
197
|
+
});
|
|
198
|
+
|
|
199
|
+
it("should reuse cached client on second call", async () => {
|
|
200
|
+
const messages = [{ role: "user", content: "hello" }] as any;
|
|
201
|
+
mockOpenAIInstance.chat.completions.create.mockResolvedValue({
|
|
202
|
+
choices: [{ message: { content: "a" } }],
|
|
203
|
+
model: "gpt-4o",
|
|
204
|
+
});
|
|
205
|
+
(OpenAI as unknown as Mock).mockClear();
|
|
206
|
+
await adapter.chat(messages);
|
|
207
|
+
await adapter.chat(messages);
|
|
208
|
+
expect(OpenAI).toHaveBeenCalledTimes(1);
|
|
209
|
+
});
|
|
210
|
+
});
|
|
211
|
+
|
|
212
|
+
it("isSupportJsonSchema should return false", () => {
|
|
213
|
+
expect(adapter.isSupportJsonSchema()).toBe(false);
|
|
214
|
+
});
|
|
215
|
+
});
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
import { Injectable, Inject } from "@nestjs/common";
|
|
2
|
+
import OpenAI from "openai";
|
|
3
|
+
import type { LlmAdapter } from "./llm-adapter.interface";
|
|
4
|
+
import type {
|
|
5
|
+
LlmMessage,
|
|
6
|
+
LlmRequestOptions,
|
|
7
|
+
LlmResponse,
|
|
8
|
+
LlmStreamEvent,
|
|
9
|
+
LlmProxyConfig,
|
|
10
|
+
} from "../interfaces";
|
|
11
|
+
import { shouldLog } from "../../verbose";
|
|
12
|
+
|
|
13
|
+
@Injectable()
|
|
14
|
+
export class OpenAIAdapter implements LlmAdapter {
|
|
15
|
+
readonly name = "openai";
|
|
16
|
+
|
|
17
|
+
private client: OpenAI | null = null;
|
|
18
|
+
|
|
19
|
+
constructor(@Inject("LLM_PROXY_CONFIG") private readonly config: LlmProxyConfig) {}
|
|
20
|
+
|
|
21
|
+
isConfigured(): boolean {
|
|
22
|
+
return !!this.config.openai;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
private getClient(): OpenAI {
|
|
26
|
+
if (this.client) {
|
|
27
|
+
return this.client;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
const openaiConf = this.config.openai;
|
|
31
|
+
|
|
32
|
+
if (!openaiConf) {
|
|
33
|
+
throw new Error("[LLMProxy.OpenAIAdapter.getClient] 未配置 openai 设置");
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
this.client = new OpenAI({
|
|
37
|
+
apiKey: openaiConf.apiKey,
|
|
38
|
+
baseURL: openaiConf.baseUrl || undefined,
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
return this.client;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
async chat(messages: LlmMessage[], options?: LlmRequestOptions): Promise<LlmResponse> {
|
|
45
|
+
const openaiConf = this.config.openai;
|
|
46
|
+
|
|
47
|
+
if (!openaiConf) {
|
|
48
|
+
throw new Error("[LLMProxy.OpenAIAdapter.chat] 未配置 openai 设置");
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
const client = this.getClient();
|
|
52
|
+
const model = options?.model || openaiConf.model;
|
|
53
|
+
|
|
54
|
+
if (shouldLog(options?.verbose, 1)) {
|
|
55
|
+
console.log(
|
|
56
|
+
`[LLMProxy.OpenAIAdapter.chat] 配置: Model=${model}, BaseURL=${openaiConf.baseUrl || "(默认)"}`,
|
|
57
|
+
);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
try {
|
|
61
|
+
const response = await client.chat.completions.create({
|
|
62
|
+
model,
|
|
63
|
+
messages: messages,
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
const content = response.choices[0]?.message?.content || "";
|
|
67
|
+
|
|
68
|
+
if (shouldLog(options?.verbose, 1)) {
|
|
69
|
+
console.log(
|
|
70
|
+
`[LLMProxy.OpenAIAdapter.chat] 响应: Model=${response.model}, Usage=${response.usage?.total_tokens} tokens`,
|
|
71
|
+
);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
return {
|
|
75
|
+
content,
|
|
76
|
+
usage: response.usage
|
|
77
|
+
? {
|
|
78
|
+
promptTokens: response.usage.prompt_tokens,
|
|
79
|
+
completionTokens: response.usage.completion_tokens,
|
|
80
|
+
totalTokens: response.usage.total_tokens,
|
|
81
|
+
}
|
|
82
|
+
: undefined,
|
|
83
|
+
};
|
|
84
|
+
} catch (error: any) {
|
|
85
|
+
if (error instanceof OpenAI.APIError) {
|
|
86
|
+
throw new Error(
|
|
87
|
+
`[LLMProxy.OpenAIAdapter.chat] API 错误 (${error.status}): ${error.message}\n` +
|
|
88
|
+
`请检查:\n` +
|
|
89
|
+
`1. API Key 是否正确\n` +
|
|
90
|
+
`2. Base URL 是否正确\n` +
|
|
91
|
+
`3. 模型名称是否有效`,
|
|
92
|
+
);
|
|
93
|
+
}
|
|
94
|
+
throw error;
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
async *chatStream(
|
|
99
|
+
messages: LlmMessage[],
|
|
100
|
+
options?: LlmRequestOptions,
|
|
101
|
+
): AsyncIterable<LlmStreamEvent> {
|
|
102
|
+
const openaiConf = this.config.openai;
|
|
103
|
+
|
|
104
|
+
if (!openaiConf) {
|
|
105
|
+
yield { type: "error", message: "[LLMProxy.OpenAIAdapter.chatStream] 未配置 openai 设置" };
|
|
106
|
+
return;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
const client = this.getClient();
|
|
110
|
+
const model = options?.model || openaiConf.model;
|
|
111
|
+
|
|
112
|
+
if (shouldLog(options?.verbose, 1)) {
|
|
113
|
+
console.log(`[LLMProxy.OpenAIAdapter.chatStream] 配置: Model=${model}`);
|
|
114
|
+
}
|
|
115
|
+
try {
|
|
116
|
+
const stream = await client.chat.completions.create({
|
|
117
|
+
model,
|
|
118
|
+
messages: messages,
|
|
119
|
+
stream: true,
|
|
120
|
+
});
|
|
121
|
+
|
|
122
|
+
let fullContent = "";
|
|
123
|
+
|
|
124
|
+
for await (const chunk of stream) {
|
|
125
|
+
const delta = chunk.choices[0]?.delta?.content;
|
|
126
|
+
if (delta) {
|
|
127
|
+
yield { type: "text", content: delta };
|
|
128
|
+
fullContent += delta;
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
yield {
|
|
133
|
+
type: "result",
|
|
134
|
+
response: {
|
|
135
|
+
content: fullContent,
|
|
136
|
+
},
|
|
137
|
+
};
|
|
138
|
+
} catch (error: any) {
|
|
139
|
+
if (error instanceof OpenAI.APIError) {
|
|
140
|
+
yield {
|
|
141
|
+
type: "error",
|
|
142
|
+
message: `[LLMProxy.OpenAIAdapter.chatStream] API 错误 (${error.status}): ${error.message}`,
|
|
143
|
+
};
|
|
144
|
+
} else {
|
|
145
|
+
throw error;
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
isSupportJsonSchema(): boolean {
|
|
151
|
+
return false;
|
|
152
|
+
}
|
|
153
|
+
}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
export interface ClaudeAdapterConfig {
|
|
2
|
+
model?: string;
|
|
3
|
+
baseUrl?: string;
|
|
4
|
+
authToken?: string;
|
|
5
|
+
}
|
|
6
|
+
|
|
7
|
+
export interface OpenAIAdapterConfig {
|
|
8
|
+
model: string;
|
|
9
|
+
baseUrl?: string;
|
|
10
|
+
apiKey: string;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export interface OpenCodeAdapterConfig {
|
|
14
|
+
model?: string;
|
|
15
|
+
/** OpenCode 服务地址,默认 http://localhost:4096 */
|
|
16
|
+
serverUrl?: string;
|
|
17
|
+
/** 云厂商 API 地址(会动态写入 opencode.json 配置) */
|
|
18
|
+
baseUrl?: string;
|
|
19
|
+
apiKey?: string;
|
|
20
|
+
providerID?: string;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
export type LLMMode = "claude-code" | "openai" | "gemini" | "open-code";
|
|
24
|
+
|
|
25
|
+
export interface LlmProxyConfig {
|
|
26
|
+
defaultAdapter?: LLMMode;
|
|
27
|
+
claudeCode?: ClaudeAdapterConfig;
|
|
28
|
+
openai?: OpenAIAdapterConfig;
|
|
29
|
+
openCode?: OpenCodeAdapterConfig;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
export const LLM_PROXY_CONFIG = Symbol("LLM_PROXY_CONFIG");
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import type { LlmJsonPut } from "../../llm-jsonput";
|
|
2
|
+
import type { VerboseLevel } from "../../verbose";
|
|
3
|
+
|
|
4
|
+
export type LlmRole = "system" | "user" | "assistant";
|
|
5
|
+
|
|
6
|
+
export interface LlmMessage {
|
|
7
|
+
role: LlmRole;
|
|
8
|
+
content: string;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
export interface LlmRequestOptions {
|
|
12
|
+
model?: string;
|
|
13
|
+
jsonSchema?: LlmJsonPut;
|
|
14
|
+
stream?: boolean;
|
|
15
|
+
verbose?: VerboseLevel;
|
|
16
|
+
allowedTools?: string[];
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export interface LlmUsage {
|
|
20
|
+
promptTokens: number;
|
|
21
|
+
completionTokens: number;
|
|
22
|
+
totalTokens: number;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export interface LlmResponse {
|
|
26
|
+
content: string;
|
|
27
|
+
structuredOutput?: unknown;
|
|
28
|
+
usage?: LlmUsage;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export type LlmStreamEvent =
|
|
32
|
+
| { type: "text"; content: string }
|
|
33
|
+
| {
|
|
34
|
+
type: "tool_use";
|
|
35
|
+
name: string;
|
|
36
|
+
input: unknown;
|
|
37
|
+
status?: string;
|
|
38
|
+
output?: string;
|
|
39
|
+
title?: string;
|
|
40
|
+
}
|
|
41
|
+
| { type: "thought"; content: string }
|
|
42
|
+
| { type: "result"; response: LlmResponse }
|
|
43
|
+
| { type: "error"; message: string }
|
|
44
|
+
| { type: "agent"; name: string; source?: string }
|
|
45
|
+
| { type: "subtask"; agent: string; prompt: string; description: string }
|
|
46
|
+
| { type: "step_start"; snapshot?: string }
|
|
47
|
+
| { type: "step_finish"; reason: string; tokens?: unknown; cost?: number }
|
|
48
|
+
| { type: "reasoning"; content: string };
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
LlmMessage,
|
|
3
|
+
LlmRequestOptions,
|
|
4
|
+
LlmResponse,
|
|
5
|
+
LlmStreamEvent,
|
|
6
|
+
} from "./message.interface";
|
|
7
|
+
import type { VerboseLevel } from "../../verbose";
|
|
8
|
+
|
|
9
|
+
export interface SessionOptions {
|
|
10
|
+
systemPrompt?: string;
|
|
11
|
+
model?: string;
|
|
12
|
+
verbose?: VerboseLevel;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export interface LlmSession {
|
|
16
|
+
readonly id: string;
|
|
17
|
+
readonly adapterName: string;
|
|
18
|
+
|
|
19
|
+
send(content: string, options?: LlmRequestOptions): Promise<LlmResponse>;
|
|
20
|
+
|
|
21
|
+
sendStream(content: string, options?: LlmRequestOptions): AsyncIterable<LlmStreamEvent>;
|
|
22
|
+
|
|
23
|
+
getHistory(): LlmMessage[];
|
|
24
|
+
|
|
25
|
+
clearHistory(): void;
|
|
26
|
+
|
|
27
|
+
setSystemPrompt(prompt: string): void;
|
|
28
|
+
}
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import { Module, DynamicModule, Provider, Type } from "@nestjs/common";
|
|
2
|
+
import { LlmProxyService } from "./llm-proxy.service";
|
|
3
|
+
import { ClaudeCodeAdapter } from "./adapters/claude-code.adapter";
|
|
4
|
+
import { OpenAIAdapter } from "./adapters/openai.adapter";
|
|
5
|
+
import { ClaudeSetupModule } from "../claude-setup";
|
|
6
|
+
import type { LlmProxyConfig } from "./interfaces";
|
|
7
|
+
import { OpenCodeAdapter } from "./adapters";
|
|
8
|
+
|
|
9
|
+
export interface LlmProxyModuleOptions extends LlmProxyConfig {}
|
|
10
|
+
|
|
11
|
+
export interface LlmProxyModuleAsyncOptions {
|
|
12
|
+
imports?: any[];
|
|
13
|
+
useFactory?: (...args: any[]) => Promise<LlmProxyConfig> | LlmProxyConfig;
|
|
14
|
+
inject?: any[];
|
|
15
|
+
useClass?: Type<LlmProxyOptionsFactory>;
|
|
16
|
+
useExisting?: Type<LlmProxyOptionsFactory>;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export interface LlmProxyOptionsFactory {
|
|
20
|
+
createLlmProxyOptions(): Promise<LlmProxyConfig> | LlmProxyConfig;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
@Module({})
|
|
24
|
+
export class LlmProxyModule {
|
|
25
|
+
static forRoot(options: LlmProxyModuleOptions): DynamicModule {
|
|
26
|
+
const resolvedOptions = this.resolveOpenCodeConfig(options);
|
|
27
|
+
return {
|
|
28
|
+
module: LlmProxyModule,
|
|
29
|
+
imports: [ClaudeSetupModule],
|
|
30
|
+
providers: [
|
|
31
|
+
{
|
|
32
|
+
provide: "LLM_PROXY_CONFIG",
|
|
33
|
+
useValue: resolvedOptions,
|
|
34
|
+
},
|
|
35
|
+
ClaudeCodeAdapter,
|
|
36
|
+
OpenAIAdapter,
|
|
37
|
+
OpenCodeAdapter,
|
|
38
|
+
LlmProxyService,
|
|
39
|
+
],
|
|
40
|
+
exports: [LlmProxyService, ClaudeCodeAdapter, OpenCodeAdapter, OpenAIAdapter],
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
static forRootAsync(options: LlmProxyModuleAsyncOptions): DynamicModule {
|
|
45
|
+
const asyncProviders = this.createAsyncProviders(options);
|
|
46
|
+
|
|
47
|
+
return {
|
|
48
|
+
module: LlmProxyModule,
|
|
49
|
+
imports: [...(options.imports || []), ClaudeSetupModule],
|
|
50
|
+
providers: [
|
|
51
|
+
...asyncProviders,
|
|
52
|
+
ClaudeCodeAdapter,
|
|
53
|
+
OpenAIAdapter,
|
|
54
|
+
OpenCodeAdapter,
|
|
55
|
+
LlmProxyService,
|
|
56
|
+
],
|
|
57
|
+
exports: [LlmProxyService, ClaudeCodeAdapter, OpenCodeAdapter, OpenAIAdapter],
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
private static createAsyncProviders(options: LlmProxyModuleAsyncOptions): Provider[] {
|
|
62
|
+
if (options.useFactory) {
|
|
63
|
+
const originalFactory = options.useFactory;
|
|
64
|
+
return [
|
|
65
|
+
{
|
|
66
|
+
provide: "LLM_PROXY_CONFIG",
|
|
67
|
+
useFactory: async (...args: any[]) => {
|
|
68
|
+
const config = await originalFactory(...args);
|
|
69
|
+
return this.resolveOpenCodeConfig(config);
|
|
70
|
+
},
|
|
71
|
+
inject: options.inject || [],
|
|
72
|
+
},
|
|
73
|
+
];
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
if (options.useClass) {
|
|
77
|
+
return [
|
|
78
|
+
{
|
|
79
|
+
provide: "LLM_PROXY_CONFIG",
|
|
80
|
+
useFactory: async (optionsFactory: LlmProxyOptionsFactory) =>
|
|
81
|
+
optionsFactory.createLlmProxyOptions(),
|
|
82
|
+
inject: [options.useClass],
|
|
83
|
+
},
|
|
84
|
+
{
|
|
85
|
+
provide: options.useClass,
|
|
86
|
+
useClass: options.useClass,
|
|
87
|
+
},
|
|
88
|
+
];
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
if (options.useExisting) {
|
|
92
|
+
return [
|
|
93
|
+
{
|
|
94
|
+
provide: "LLM_PROXY_CONFIG",
|
|
95
|
+
useFactory: async (optionsFactory: LlmProxyOptionsFactory) =>
|
|
96
|
+
optionsFactory.createLlmProxyOptions(),
|
|
97
|
+
inject: [options.useExisting],
|
|
98
|
+
},
|
|
99
|
+
];
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
return [];
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
private static resolveOpenCodeConfig(config: LlmProxyConfig): LlmProxyConfig {
|
|
106
|
+
if (!config.openCode) {
|
|
107
|
+
return config;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
const providerID = config.openCode.providerID || "openai";
|
|
111
|
+
let apiKey = config.openCode.apiKey;
|
|
112
|
+
let baseUrl = config.openCode.baseUrl;
|
|
113
|
+
let model = config.openCode.model;
|
|
114
|
+
|
|
115
|
+
// 根据 providerID 从对应的 adapter 配置中读取缺失的值
|
|
116
|
+
if (providerID === "openai" && config.openai) {
|
|
117
|
+
if (!apiKey) apiKey = config.openai.apiKey;
|
|
118
|
+
if (!baseUrl) baseUrl = config.openai.baseUrl;
|
|
119
|
+
if (!model) model = config.openai.model;
|
|
120
|
+
} else if (providerID === "anthropic" && config.claudeCode) {
|
|
121
|
+
if (!apiKey) apiKey = config.claudeCode.authToken;
|
|
122
|
+
if (!baseUrl) baseUrl = config.claudeCode.baseUrl;
|
|
123
|
+
if (!model) model = config.claudeCode.model;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// 如果有任何值需要更新
|
|
127
|
+
if (
|
|
128
|
+
apiKey !== config.openCode.apiKey ||
|
|
129
|
+
baseUrl !== config.openCode.baseUrl ||
|
|
130
|
+
model !== config.openCode.model
|
|
131
|
+
) {
|
|
132
|
+
return {
|
|
133
|
+
...config,
|
|
134
|
+
openCode: { ...config.openCode, apiKey, baseUrl, model },
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
return config;
|
|
139
|
+
}
|
|
140
|
+
}
|