@nahisaho/katashiro-llm 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/dist/LLMClient.d.ts +64 -0
- package/dist/LLMClient.d.ts.map +1 -0
- package/dist/LLMClient.js +139 -0
- package/dist/LLMClient.js.map +1 -0
- package/dist/PromptManager.d.ts +66 -0
- package/dist/PromptManager.d.ts.map +1 -0
- package/dist/PromptManager.js +121 -0
- package/dist/PromptManager.js.map +1 -0
- package/dist/TokenCounter.d.ts +43 -0
- package/dist/TokenCounter.d.ts.map +1 -0
- package/dist/TokenCounter.js +100 -0
- package/dist/TokenCounter.js.map +1 -0
- package/dist/index.d.ts +12 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +17 -0
- package/dist/index.js.map +1 -0
- package/dist/providers/AzureOpenAILLMProvider.d.ts +82 -0
- package/dist/providers/AzureOpenAILLMProvider.d.ts.map +1 -0
- package/dist/providers/AzureOpenAILLMProvider.js +339 -0
- package/dist/providers/AzureOpenAILLMProvider.js.map +1 -0
- package/dist/providers/BaseLLMProvider.d.ts +51 -0
- package/dist/providers/BaseLLMProvider.d.ts.map +1 -0
- package/dist/providers/BaseLLMProvider.js +72 -0
- package/dist/providers/BaseLLMProvider.js.map +1 -0
- package/dist/providers/LLMFactory.d.ts +75 -0
- package/dist/providers/LLMFactory.d.ts.map +1 -0
- package/dist/providers/LLMFactory.js +149 -0
- package/dist/providers/LLMFactory.js.map +1 -0
- package/dist/providers/MockLLMProvider.d.ts +57 -0
- package/dist/providers/MockLLMProvider.d.ts.map +1 -0
- package/dist/providers/MockLLMProvider.js +120 -0
- package/dist/providers/MockLLMProvider.js.map +1 -0
- package/dist/providers/OllamaLLMProvider.d.ts +73 -0
- package/dist/providers/OllamaLLMProvider.d.ts.map +1 -0
- package/dist/providers/OllamaLLMProvider.js +242 -0
- package/dist/providers/OllamaLLMProvider.js.map +1 -0
- package/dist/providers/OpenAILLMProvider.d.ts +87 -0
- package/dist/providers/OpenAILLMProvider.d.ts.map +1 -0
- package/dist/providers/OpenAILLMProvider.js +349 -0
- package/dist/providers/OpenAILLMProvider.js.map +1 -0
- package/dist/providers/index.d.ts +17 -0
- package/dist/providers/index.d.ts.map +1 -0
- package/dist/providers/index.js +19 -0
- package/dist/providers/index.js.map +1 -0
- package/dist/types.d.ts +251 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +8 -0
- package/dist/types.js.map +1 -0
- package/package.json +51 -0
- package/src/LLMClient.ts +171 -0
- package/src/PromptManager.ts +156 -0
- package/src/TokenCounter.ts +114 -0
- package/src/index.ts +35 -0
- package/src/providers/AzureOpenAILLMProvider.ts +494 -0
- package/src/providers/BaseLLMProvider.ts +110 -0
- package/src/providers/LLMFactory.ts +216 -0
- package/src/providers/MockLLMProvider.ts +173 -0
- package/src/providers/OllamaLLMProvider.ts +322 -0
- package/src/providers/OpenAILLMProvider.ts +500 -0
- package/src/providers/index.ts +35 -0
- package/src/types.ts +268 -0
|
@@ -0,0 +1,500 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* OpenAI API provider for chat completions
|
|
5
|
+
*
|
|
6
|
+
* @requirement REQ-LLM-001
|
|
7
|
+
* @design DES-KATASHIRO-003-LLM
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import type { z, ZodType } from 'zod';
|
|
11
|
+
import type {
|
|
12
|
+
ProviderConfig,
|
|
13
|
+
GenerateRequest,
|
|
14
|
+
GenerateResponse,
|
|
15
|
+
StreamChunk,
|
|
16
|
+
Message,
|
|
17
|
+
TokenUsage,
|
|
18
|
+
ToolCall,
|
|
19
|
+
ToolDefinition,
|
|
20
|
+
FinishReason,
|
|
21
|
+
} from '../types.js';
|
|
22
|
+
import { BaseLLMProvider } from './BaseLLMProvider.js';
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* OpenAI設定
|
|
26
|
+
*/
|
|
27
|
+
export interface OpenAIProviderConfig extends ProviderConfig {
|
|
28
|
+
/** APIキー */
|
|
29
|
+
apiKey?: string;
|
|
30
|
+
/** ベースURL(カスタムエンドポイント用) */
|
|
31
|
+
baseUrl?: string;
|
|
32
|
+
/** モデル名 */
|
|
33
|
+
model?: string;
|
|
34
|
+
/** 組織ID */
|
|
35
|
+
organization?: string;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* OpenAIメッセージ形式
|
|
40
|
+
*/
|
|
41
|
+
interface OpenAIMessage {
|
|
42
|
+
role: 'system' | 'user' | 'assistant' | 'tool';
|
|
43
|
+
content: string | null;
|
|
44
|
+
name?: string;
|
|
45
|
+
tool_call_id?: string;
|
|
46
|
+
tool_calls?: Array<{
|
|
47
|
+
id: string;
|
|
48
|
+
type: 'function';
|
|
49
|
+
function: {
|
|
50
|
+
name: string;
|
|
51
|
+
arguments: string;
|
|
52
|
+
};
|
|
53
|
+
}>;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* OpenAI Chat Completion レスポンス
|
|
58
|
+
*/
|
|
59
|
+
interface OpenAIChatResponse {
|
|
60
|
+
id: string;
|
|
61
|
+
object: 'chat.completion';
|
|
62
|
+
created: number;
|
|
63
|
+
model: string;
|
|
64
|
+
choices: Array<{
|
|
65
|
+
index: number;
|
|
66
|
+
message: OpenAIMessage;
|
|
67
|
+
finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter';
|
|
68
|
+
}>;
|
|
69
|
+
usage: {
|
|
70
|
+
prompt_tokens: number;
|
|
71
|
+
completion_tokens: number;
|
|
72
|
+
total_tokens: number;
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* OpenAI Stream Chunk
|
|
78
|
+
*/
|
|
79
|
+
interface OpenAIStreamChunk {
|
|
80
|
+
id: string;
|
|
81
|
+
object: 'chat.completion.chunk';
|
|
82
|
+
created: number;
|
|
83
|
+
model: string;
|
|
84
|
+
choices: Array<{
|
|
85
|
+
index: number;
|
|
86
|
+
delta: {
|
|
87
|
+
role?: string;
|
|
88
|
+
content?: string;
|
|
89
|
+
tool_calls?: Array<{
|
|
90
|
+
index: number;
|
|
91
|
+
id?: string;
|
|
92
|
+
type?: 'function';
|
|
93
|
+
function?: {
|
|
94
|
+
name?: string;
|
|
95
|
+
arguments?: string;
|
|
96
|
+
};
|
|
97
|
+
}>;
|
|
98
|
+
};
|
|
99
|
+
finish_reason: string | null;
|
|
100
|
+
}>;
|
|
101
|
+
usage?: {
|
|
102
|
+
prompt_tokens: number;
|
|
103
|
+
completion_tokens: number;
|
|
104
|
+
total_tokens: number;
|
|
105
|
+
};
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* OpenAI LLMプロバイダー
|
|
110
|
+
*
|
|
111
|
+
* OpenAI API(またはOpenAI互換API)を使用したテキスト生成
|
|
112
|
+
*
|
|
113
|
+
* @example
|
|
114
|
+
* ```typescript
|
|
115
|
+
* // OpenAI API
|
|
116
|
+
* const provider = new OpenAILLMProvider({
|
|
117
|
+
* apiKey: process.env.OPENAI_API_KEY,
|
|
118
|
+
* model: 'gpt-4o',
|
|
119
|
+
* });
|
|
120
|
+
*
|
|
121
|
+
* // OpenAI互換API(vLLM, LM Studio等)
|
|
122
|
+
* const localProvider = new OpenAILLMProvider({
|
|
123
|
+
* baseUrl: 'http://localhost:8000/v1',
|
|
124
|
+
* model: 'local-model',
|
|
125
|
+
* });
|
|
126
|
+
* ```
|
|
127
|
+
*/
|
|
128
|
+
export class OpenAILLMProvider extends BaseLLMProvider {
|
|
129
|
+
readonly name = 'openai';
|
|
130
|
+
readonly supportedModels = [
|
|
131
|
+
'gpt-4o',
|
|
132
|
+
'gpt-4o-mini',
|
|
133
|
+
'gpt-4-turbo',
|
|
134
|
+
'gpt-4',
|
|
135
|
+
'gpt-3.5-turbo',
|
|
136
|
+
'o1',
|
|
137
|
+
'o1-mini',
|
|
138
|
+
'o1-preview',
|
|
139
|
+
];
|
|
140
|
+
|
|
141
|
+
private readonly apiKey: string;
|
|
142
|
+
private readonly baseUrl: string;
|
|
143
|
+
private readonly model: string;
|
|
144
|
+
private readonly organization?: string;
|
|
145
|
+
|
|
146
|
+
constructor(config: OpenAIProviderConfig = {}) {
|
|
147
|
+
super(config);
|
|
148
|
+
|
|
149
|
+
this.apiKey = config.apiKey ?? process.env.OPENAI_API_KEY ?? '';
|
|
150
|
+
this.baseUrl = config.baseUrl ?? 'https://api.openai.com/v1';
|
|
151
|
+
this.model = config.model ?? config.defaultModel ?? 'gpt-4o-mini';
|
|
152
|
+
this.organization = config.organization;
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
protected getDefaultModel(): string {
|
|
156
|
+
return 'gpt-4o-mini';
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* メッセージ形式変換
|
|
161
|
+
*/
|
|
162
|
+
private convertMessages(messages: Message[]): OpenAIMessage[] {
|
|
163
|
+
return messages.map((msg) => {
|
|
164
|
+
const converted: OpenAIMessage = {
|
|
165
|
+
role: msg.role,
|
|
166
|
+
content:
|
|
167
|
+
typeof msg.content === 'string'
|
|
168
|
+
? msg.content
|
|
169
|
+
: JSON.stringify(msg.content),
|
|
170
|
+
};
|
|
171
|
+
|
|
172
|
+
if (msg.name) converted.name = msg.name;
|
|
173
|
+
if (msg.toolCallId) converted.tool_call_id = msg.toolCallId;
|
|
174
|
+
if (msg.toolCalls) {
|
|
175
|
+
converted.tool_calls = msg.toolCalls.map((tc) => ({
|
|
176
|
+
id: tc.id,
|
|
177
|
+
type: tc.type,
|
|
178
|
+
function: tc.function,
|
|
179
|
+
}));
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
return converted;
|
|
183
|
+
});
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
/**
|
|
187
|
+
* ツール定義変換
|
|
188
|
+
*/
|
|
189
|
+
private convertTools(tools?: ToolDefinition[]): unknown[] | undefined {
|
|
190
|
+
if (!tools) return undefined;
|
|
191
|
+
|
|
192
|
+
return tools.map((tool) => ({
|
|
193
|
+
type: tool.type,
|
|
194
|
+
function: {
|
|
195
|
+
name: tool.function.name,
|
|
196
|
+
description: tool.function.description,
|
|
197
|
+
parameters: tool.function.parameters,
|
|
198
|
+
},
|
|
199
|
+
}));
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
/**
|
|
203
|
+
* Finish Reason変換
|
|
204
|
+
*/
|
|
205
|
+
private convertFinishReason(
|
|
206
|
+
reason: string | null
|
|
207
|
+
): FinishReason {
|
|
208
|
+
switch (reason) {
|
|
209
|
+
case 'stop':
|
|
210
|
+
return 'stop';
|
|
211
|
+
case 'length':
|
|
212
|
+
return 'length';
|
|
213
|
+
case 'tool_calls':
|
|
214
|
+
return 'tool_calls';
|
|
215
|
+
case 'content_filter':
|
|
216
|
+
return 'content_filter';
|
|
217
|
+
default:
|
|
218
|
+
return 'stop';
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
/**
|
|
223
|
+
* テキスト生成
|
|
224
|
+
*/
|
|
225
|
+
async generate(request: GenerateRequest): Promise<GenerateResponse> {
|
|
226
|
+
const url = `${this.baseUrl}/chat/completions`;
|
|
227
|
+
const model = request.model ?? this.model;
|
|
228
|
+
|
|
229
|
+
const headers: Record<string, string> = {
|
|
230
|
+
'Content-Type': 'application/json',
|
|
231
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
232
|
+
};
|
|
233
|
+
|
|
234
|
+
if (this.organization) {
|
|
235
|
+
headers['OpenAI-Organization'] = this.organization;
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
const body: Record<string, unknown> = {
|
|
239
|
+
model,
|
|
240
|
+
messages: this.convertMessages(request.messages),
|
|
241
|
+
temperature: request.temperature,
|
|
242
|
+
max_tokens: request.maxTokens,
|
|
243
|
+
top_p: request.topP,
|
|
244
|
+
stop: request.stopSequences,
|
|
245
|
+
user: request.user,
|
|
246
|
+
};
|
|
247
|
+
|
|
248
|
+
if (request.tools) {
|
|
249
|
+
body.tools = this.convertTools(request.tools);
|
|
250
|
+
body.tool_choice = request.toolChoice;
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
if (request.responseFormat) {
|
|
254
|
+
body.response_format = {
|
|
255
|
+
type: request.responseFormat.type,
|
|
256
|
+
};
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
const controller = new AbortController();
|
|
260
|
+
const timeoutId = setTimeout(
|
|
261
|
+
() => controller.abort(),
|
|
262
|
+
this.config.timeout ?? 30000
|
|
263
|
+
);
|
|
264
|
+
|
|
265
|
+
try {
|
|
266
|
+
const response = await fetch(url, {
|
|
267
|
+
method: 'POST',
|
|
268
|
+
headers,
|
|
269
|
+
body: JSON.stringify(body),
|
|
270
|
+
signal: controller.signal,
|
|
271
|
+
});
|
|
272
|
+
|
|
273
|
+
if (!response.ok) {
|
|
274
|
+
const errorText = await response.text();
|
|
275
|
+
throw new Error(`OpenAI API error: ${response.status} - ${errorText}`);
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
const data = (await response.json()) as OpenAIChatResponse;
|
|
279
|
+
const choice = data.choices[0];
|
|
280
|
+
|
|
281
|
+
if (!choice) {
|
|
282
|
+
throw new Error('No choices returned from OpenAI API');
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
const toolCalls: ToolCall[] | undefined = choice.message.tool_calls?.map(
|
|
286
|
+
(tc) => ({
|
|
287
|
+
id: tc.id,
|
|
288
|
+
type: tc.type,
|
|
289
|
+
function: tc.function,
|
|
290
|
+
})
|
|
291
|
+
);
|
|
292
|
+
|
|
293
|
+
const usage: TokenUsage = {
|
|
294
|
+
promptTokens: data.usage.prompt_tokens,
|
|
295
|
+
completionTokens: data.usage.completion_tokens,
|
|
296
|
+
totalTokens: data.usage.total_tokens,
|
|
297
|
+
};
|
|
298
|
+
|
|
299
|
+
return {
|
|
300
|
+
id: data.id,
|
|
301
|
+
model: data.model,
|
|
302
|
+
content: choice.message.content ?? '',
|
|
303
|
+
toolCalls,
|
|
304
|
+
usage,
|
|
305
|
+
finishReason: this.convertFinishReason(choice.finish_reason),
|
|
306
|
+
};
|
|
307
|
+
} finally {
|
|
308
|
+
clearTimeout(timeoutId);
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
/**
|
|
313
|
+
* ストリーミング生成
|
|
314
|
+
*/
|
|
315
|
+
async *generateStream(request: GenerateRequest): AsyncGenerator<StreamChunk> {
|
|
316
|
+
const url = `${this.baseUrl}/chat/completions`;
|
|
317
|
+
const model = request.model ?? this.model;
|
|
318
|
+
|
|
319
|
+
const headers: Record<string, string> = {
|
|
320
|
+
'Content-Type': 'application/json',
|
|
321
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
322
|
+
};
|
|
323
|
+
|
|
324
|
+
if (this.organization) {
|
|
325
|
+
headers['OpenAI-Organization'] = this.organization;
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
const body: Record<string, unknown> = {
|
|
329
|
+
model,
|
|
330
|
+
messages: this.convertMessages(request.messages),
|
|
331
|
+
temperature: request.temperature,
|
|
332
|
+
max_tokens: request.maxTokens,
|
|
333
|
+
top_p: request.topP,
|
|
334
|
+
stop: request.stopSequences,
|
|
335
|
+
user: request.user,
|
|
336
|
+
stream: true,
|
|
337
|
+
stream_options: { include_usage: true },
|
|
338
|
+
};
|
|
339
|
+
|
|
340
|
+
if (request.tools) {
|
|
341
|
+
body.tools = this.convertTools(request.tools);
|
|
342
|
+
body.tool_choice = request.toolChoice;
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
const response = await fetch(url, {
|
|
346
|
+
method: 'POST',
|
|
347
|
+
headers,
|
|
348
|
+
body: JSON.stringify(body),
|
|
349
|
+
});
|
|
350
|
+
|
|
351
|
+
if (!response.ok || !response.body) {
|
|
352
|
+
const errorText = await response.text();
|
|
353
|
+
throw new Error(`OpenAI API error: ${response.status} - ${errorText}`);
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
const reader = response.body.getReader();
|
|
357
|
+
const decoder = new TextDecoder();
|
|
358
|
+
let buffer = '';
|
|
359
|
+
|
|
360
|
+
// ツール呼び出しを累積
|
|
361
|
+
const toolCallsMap = new Map<
|
|
362
|
+
number,
|
|
363
|
+
{ id: string; name: string; arguments: string }
|
|
364
|
+
>();
|
|
365
|
+
|
|
366
|
+
try {
|
|
367
|
+
while (true) {
|
|
368
|
+
const { done, value } = await reader.read();
|
|
369
|
+
if (done) break;
|
|
370
|
+
|
|
371
|
+
buffer += decoder.decode(value, { stream: true });
|
|
372
|
+
const lines = buffer.split('\n');
|
|
373
|
+
buffer = lines.pop() ?? '';
|
|
374
|
+
|
|
375
|
+
for (const line of lines) {
|
|
376
|
+
if (!line.startsWith('data: ')) continue;
|
|
377
|
+
const data = line.slice(6).trim();
|
|
378
|
+
if (data === '[DONE]') {
|
|
379
|
+
yield { type: 'done' };
|
|
380
|
+
continue;
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
try {
|
|
384
|
+
const chunk = JSON.parse(data) as OpenAIStreamChunk;
|
|
385
|
+
const delta = chunk.choices[0]?.delta;
|
|
386
|
+
|
|
387
|
+
if (delta?.content) {
|
|
388
|
+
yield {
|
|
389
|
+
type: 'content',
|
|
390
|
+
content: delta.content,
|
|
391
|
+
};
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
if (delta?.tool_calls) {
|
|
395
|
+
for (const tc of delta.tool_calls) {
|
|
396
|
+
const existing = toolCallsMap.get(tc.index) ?? {
|
|
397
|
+
id: '',
|
|
398
|
+
name: '',
|
|
399
|
+
arguments: '',
|
|
400
|
+
};
|
|
401
|
+
if (tc.id) existing.id = tc.id;
|
|
402
|
+
if (tc.function?.name) existing.name = tc.function.name;
|
|
403
|
+
if (tc.function?.arguments) {
|
|
404
|
+
existing.arguments += tc.function.arguments;
|
|
405
|
+
}
|
|
406
|
+
toolCallsMap.set(tc.index, existing);
|
|
407
|
+
}
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
if (chunk.usage) {
|
|
411
|
+
yield {
|
|
412
|
+
type: 'usage',
|
|
413
|
+
usage: {
|
|
414
|
+
promptTokens: chunk.usage.prompt_tokens,
|
|
415
|
+
completionTokens: chunk.usage.completion_tokens,
|
|
416
|
+
totalTokens: chunk.usage.total_tokens,
|
|
417
|
+
},
|
|
418
|
+
};
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
// ツール呼び出し完了時
|
|
422
|
+
const finishReason = chunk.choices[0]?.finish_reason;
|
|
423
|
+
if (finishReason === 'tool_calls') {
|
|
424
|
+
for (const [, tc] of toolCallsMap) {
|
|
425
|
+
yield {
|
|
426
|
+
type: 'tool_call',
|
|
427
|
+
toolCall: {
|
|
428
|
+
id: tc.id,
|
|
429
|
+
type: 'function',
|
|
430
|
+
function: {
|
|
431
|
+
name: tc.name,
|
|
432
|
+
arguments: tc.arguments,
|
|
433
|
+
},
|
|
434
|
+
},
|
|
435
|
+
};
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
} catch {
|
|
439
|
+
// JSON parse error - skip
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
}
|
|
443
|
+
} finally {
|
|
444
|
+
reader.releaseLock();
|
|
445
|
+
}
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
/**
|
|
449
|
+
* 構造化出力生成
|
|
450
|
+
*/
|
|
451
|
+
override async generateStructured<T extends ZodType>(
|
|
452
|
+
request: GenerateRequest,
|
|
453
|
+
schema: T
|
|
454
|
+
): Promise<z.infer<T>> {
|
|
455
|
+
const jsonSchema = this.zodToJsonSchema(schema);
|
|
456
|
+
|
|
457
|
+
const enhancedRequest: GenerateRequest = {
|
|
458
|
+
...request,
|
|
459
|
+
responseFormat: { type: 'json_object' },
|
|
460
|
+
messages: [
|
|
461
|
+
...request.messages,
|
|
462
|
+
{
|
|
463
|
+
role: 'user',
|
|
464
|
+
content: `Respond with valid JSON matching this schema:\n${JSON.stringify(jsonSchema, null, 2)}`,
|
|
465
|
+
},
|
|
466
|
+
],
|
|
467
|
+
};
|
|
468
|
+
|
|
469
|
+
const response = await this.generate(enhancedRequest);
|
|
470
|
+
const parsed = JSON.parse(response.content);
|
|
471
|
+
return schema.parse(parsed);
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
/**
|
|
475
|
+
* トークン数カウント(近似)
|
|
476
|
+
*/
|
|
477
|
+
override async countTokens(text: string, _model?: string): Promise<number> {
|
|
478
|
+
// tiktoken互換の近似計算
|
|
479
|
+
// 英語: ~4文字/トークン, 日本語: ~2文字/トークン
|
|
480
|
+
const englishChars = text.replace(/[^\x00-\x7F]/g, '').length;
|
|
481
|
+
const nonEnglishChars = text.length - englishChars;
|
|
482
|
+
return Math.ceil(englishChars / 4 + nonEnglishChars / 2);
|
|
483
|
+
}
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
/**
|
|
487
|
+
* OpenAI互換プロバイダー(ファクトリ関数)
|
|
488
|
+
*/
|
|
489
|
+
export function createOpenAICompatibleLLMProvider(
|
|
490
|
+
baseUrl: string,
|
|
491
|
+
model: string,
|
|
492
|
+
config: Partial<OpenAIProviderConfig> = {}
|
|
493
|
+
): OpenAILLMProvider {
|
|
494
|
+
return new OpenAILLMProvider({
|
|
495
|
+
...config,
|
|
496
|
+
baseUrl,
|
|
497
|
+
model,
|
|
498
|
+
apiKey: config.apiKey ?? 'not-required',
|
|
499
|
+
});
|
|
500
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Providers Index
|
|
3
|
+
*
|
|
4
|
+
* @requirement REQ-LLM-001
|
|
5
|
+
* @design DES-KATASHIRO-003-LLM
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
// Base
|
|
9
|
+
export { BaseLLMProvider } from './BaseLLMProvider.js';
|
|
10
|
+
|
|
11
|
+
// Mock
|
|
12
|
+
export { MockLLMProvider, type MockProviderConfig } from './MockLLMProvider.js';
|
|
13
|
+
|
|
14
|
+
// Ollama
|
|
15
|
+
export { OllamaLLMProvider } from './OllamaLLMProvider.js';
|
|
16
|
+
export type { OllamaProviderConfig } from './OllamaLLMProvider.js';
|
|
17
|
+
|
|
18
|
+
// OpenAI
|
|
19
|
+
export { OpenAILLMProvider, createOpenAICompatibleLLMProvider } from './OpenAILLMProvider.js';
|
|
20
|
+
export type { OpenAIProviderConfig } from './OpenAILLMProvider.js';
|
|
21
|
+
|
|
22
|
+
// Azure OpenAI
|
|
23
|
+
export { AzureOpenAILLMProvider } from './AzureOpenAILLMProvider.js';
|
|
24
|
+
export type { AzureOpenAIProviderConfig } from './AzureOpenAILLMProvider.js';
|
|
25
|
+
|
|
26
|
+
// Factory
|
|
27
|
+
export {
|
|
28
|
+
LLMFactory,
|
|
29
|
+
createLLMProvider,
|
|
30
|
+
getDefaultLLMProvider,
|
|
31
|
+
} from './LLMFactory.js';
|
|
32
|
+
export type {
|
|
33
|
+
LLMProviderType,
|
|
34
|
+
LLMProviderConfigMap,
|
|
35
|
+
} from './LLMFactory.js';
|