@twelvehart/supermemory-runtime 1.0.0-next.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +57 -0
- package/README.md +374 -0
- package/dist/index.js +189 -0
- package/dist/mcp/index.js +1132 -0
- package/docker-compose.prod.yml +91 -0
- package/docker-compose.yml +358 -0
- package/drizzle/0000_dapper_the_professor.sql +159 -0
- package/drizzle/0001_api_keys.sql +51 -0
- package/drizzle/meta/0000_snapshot.json +1532 -0
- package/drizzle/meta/_journal.json +13 -0
- package/drizzle.config.ts +20 -0
- package/package.json +114 -0
- package/scripts/add-extraction-job.ts +122 -0
- package/scripts/benchmark-pgvector.ts +122 -0
- package/scripts/bootstrap.sh +209 -0
- package/scripts/check-runtime-pack.ts +111 -0
- package/scripts/claude-mcp-config.ts +336 -0
- package/scripts/docker-entrypoint.sh +183 -0
- package/scripts/doctor.ts +377 -0
- package/scripts/init-db.sql +33 -0
- package/scripts/install.sh +1110 -0
- package/scripts/mcp-setup.ts +271 -0
- package/scripts/migrations/001_create_pgvector_extension.sql +31 -0
- package/scripts/migrations/002_create_memory_embeddings_table.sql +75 -0
- package/scripts/migrations/003_create_hnsw_index.sql +94 -0
- package/scripts/migrations/004_create_memory_embeddings_standalone.sql +70 -0
- package/scripts/migrations/005_create_chunks_table.sql +95 -0
- package/scripts/migrations/006_create_processing_queue.sql +45 -0
- package/scripts/migrations/generate_test_data.sql +42 -0
- package/scripts/migrations/phase1_comprehensive_test.sql +204 -0
- package/scripts/migrations/run_migrations.sh +286 -0
- package/scripts/migrations/test_hnsw_index.sql +255 -0
- package/scripts/pre-commit-secrets +282 -0
- package/scripts/run-extraction-worker.ts +46 -0
- package/scripts/run-phase1-tests.sh +291 -0
- package/scripts/setup.ts +222 -0
- package/scripts/smoke-install.sh +12 -0
- package/scripts/test-health-endpoint.sh +328 -0
- package/src/api/index.ts +2 -0
- package/src/api/middleware/auth.ts +80 -0
- package/src/api/middleware/csrf.ts +308 -0
- package/src/api/middleware/errorHandler.ts +166 -0
- package/src/api/middleware/rateLimit.ts +360 -0
- package/src/api/middleware/validation.ts +514 -0
- package/src/api/routes/documents.ts +286 -0
- package/src/api/routes/profiles.ts +237 -0
- package/src/api/routes/search.ts +71 -0
- package/src/api/stores/index.ts +58 -0
- package/src/config/bootstrap-env.ts +3 -0
- package/src/config/env.ts +71 -0
- package/src/config/feature-flags.ts +25 -0
- package/src/config/index.ts +140 -0
- package/src/config/secrets.config.ts +291 -0
- package/src/db/client.ts +92 -0
- package/src/db/index.ts +73 -0
- package/src/db/postgres.ts +72 -0
- package/src/db/schema/chunks.schema.ts +31 -0
- package/src/db/schema/containers.schema.ts +46 -0
- package/src/db/schema/documents.schema.ts +49 -0
- package/src/db/schema/embeddings.schema.ts +32 -0
- package/src/db/schema/index.ts +11 -0
- package/src/db/schema/memories.schema.ts +72 -0
- package/src/db/schema/profiles.schema.ts +34 -0
- package/src/db/schema/queue.schema.ts +59 -0
- package/src/db/schema/relationships.schema.ts +42 -0
- package/src/db/schema.ts +223 -0
- package/src/db/worker-connection.ts +47 -0
- package/src/index.ts +235 -0
- package/src/mcp/CLAUDE.md +1 -0
- package/src/mcp/index.ts +1380 -0
- package/src/mcp/legacyState.ts +22 -0
- package/src/mcp/rateLimit.ts +358 -0
- package/src/mcp/resources.ts +309 -0
- package/src/mcp/results.ts +104 -0
- package/src/mcp/tools.ts +401 -0
- package/src/queues/config.ts +119 -0
- package/src/queues/index.ts +289 -0
- package/src/sdk/client.ts +225 -0
- package/src/sdk/errors.ts +266 -0
- package/src/sdk/http.ts +560 -0
- package/src/sdk/index.ts +244 -0
- package/src/sdk/resources/base.ts +65 -0
- package/src/sdk/resources/connections.ts +204 -0
- package/src/sdk/resources/documents.ts +163 -0
- package/src/sdk/resources/index.ts +10 -0
- package/src/sdk/resources/memories.ts +150 -0
- package/src/sdk/resources/search.ts +60 -0
- package/src/sdk/resources/settings.ts +36 -0
- package/src/sdk/types.ts +674 -0
- package/src/services/chunking/index.ts +451 -0
- package/src/services/chunking.service.ts +650 -0
- package/src/services/csrf.service.ts +252 -0
- package/src/services/documents.repository.ts +219 -0
- package/src/services/documents.service.ts +191 -0
- package/src/services/embedding.service.ts +404 -0
- package/src/services/extraction.service.ts +300 -0
- package/src/services/extractors/code.extractor.ts +451 -0
- package/src/services/extractors/index.ts +9 -0
- package/src/services/extractors/markdown.extractor.ts +461 -0
- package/src/services/extractors/pdf.extractor.ts +315 -0
- package/src/services/extractors/text.extractor.ts +118 -0
- package/src/services/extractors/url.extractor.ts +243 -0
- package/src/services/index.ts +235 -0
- package/src/services/ingestion.service.ts +177 -0
- package/src/services/llm/anthropic.ts +400 -0
- package/src/services/llm/base.ts +460 -0
- package/src/services/llm/contradiction-detector.service.ts +526 -0
- package/src/services/llm/heuristics.ts +148 -0
- package/src/services/llm/index.ts +309 -0
- package/src/services/llm/memory-classifier.service.ts +383 -0
- package/src/services/llm/memory-extension-detector.service.ts +523 -0
- package/src/services/llm/mock.ts +470 -0
- package/src/services/llm/openai.ts +398 -0
- package/src/services/llm/prompts.ts +438 -0
- package/src/services/llm/types.ts +373 -0
- package/src/services/memory.repository.ts +1769 -0
- package/src/services/memory.service.ts +1338 -0
- package/src/services/memory.types.ts +234 -0
- package/src/services/persistence/index.ts +295 -0
- package/src/services/pipeline.service.ts +509 -0
- package/src/services/profile.repository.ts +436 -0
- package/src/services/profile.service.ts +560 -0
- package/src/services/profile.types.ts +270 -0
- package/src/services/relationships/detector.ts +1128 -0
- package/src/services/relationships/index.ts +268 -0
- package/src/services/relationships/memory-integration.ts +459 -0
- package/src/services/relationships/strategies.ts +132 -0
- package/src/services/relationships/types.ts +370 -0
- package/src/services/search.service.ts +761 -0
- package/src/services/search.types.ts +220 -0
- package/src/services/secrets.service.ts +384 -0
- package/src/services/vectorstore/base.ts +327 -0
- package/src/services/vectorstore/index.ts +444 -0
- package/src/services/vectorstore/memory.ts +286 -0
- package/src/services/vectorstore/migration.ts +295 -0
- package/src/services/vectorstore/mock.ts +403 -0
- package/src/services/vectorstore/pgvector.ts +695 -0
- package/src/services/vectorstore/types.ts +247 -0
- package/src/startup.ts +389 -0
- package/src/types/api.types.ts +193 -0
- package/src/types/document.types.ts +103 -0
- package/src/types/index.ts +241 -0
- package/src/types/profile.base.ts +133 -0
- package/src/utils/errors.ts +447 -0
- package/src/utils/id.ts +15 -0
- package/src/utils/index.ts +101 -0
- package/src/utils/logger.ts +313 -0
- package/src/utils/sanitization.ts +501 -0
- package/src/utils/secret-validation.ts +273 -0
- package/src/utils/synonyms.ts +188 -0
- package/src/utils/validation.ts +581 -0
- package/src/workers/chunking.worker.ts +242 -0
- package/src/workers/embedding.worker.ts +358 -0
- package/src/workers/extraction.worker.ts +346 -0
- package/src/workers/indexing.worker.ts +505 -0
- package/tsconfig.json +38 -0
|
@@ -0,0 +1,398 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Implements LLM-based memory extraction using OpenAI's GPT models.
|
|
5
|
+
* Uses JSON mode for reliable structured output.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import OpenAI from 'openai'
|
|
9
|
+
import { getLogger } from '../../utils/logger.js'
|
|
10
|
+
import { BaseLLMProvider, LLMError } from './base.js'
|
|
11
|
+
import type {
|
|
12
|
+
OpenAILLMConfig,
|
|
13
|
+
LLMProviderType,
|
|
14
|
+
ExtractedMemory,
|
|
15
|
+
DetectedRelationship,
|
|
16
|
+
ExtractionOptions,
|
|
17
|
+
RelationshipDetectionOptions,
|
|
18
|
+
} from './types.js'
|
|
19
|
+
import { LLMErrorCode } from './types.js'
|
|
20
|
+
import type { MemoryType } from '../../types/index.js'
|
|
21
|
+
import {
|
|
22
|
+
MEMORY_EXTRACTION_SYSTEM_PROMPT,
|
|
23
|
+
MEMORY_EXTRACTION_EXAMPLES,
|
|
24
|
+
RELATIONSHIP_DETECTION_SYSTEM_PROMPT,
|
|
25
|
+
RELATIONSHIP_DETECTION_EXAMPLES,
|
|
26
|
+
generateExtractionPrompt,
|
|
27
|
+
generateRelationshipPrompt,
|
|
28
|
+
normalizeJsonResponse,
|
|
29
|
+
parseExtractionResponse,
|
|
30
|
+
parseRelationshipResponse,
|
|
31
|
+
} from './prompts.js'
|
|
32
|
+
|
|
33
|
+
const logger = getLogger('OpenAIProvider')
|
|
34
|
+
|
|
35
|
+
// ============================================================================
|
|
36
|
+
// Default Configuration
|
|
37
|
+
// ============================================================================
|
|
38
|
+
|
|
39
|
+
const DEFAULT_OPENAI_CONFIG: Partial<OpenAILLMConfig> = {
|
|
40
|
+
model: 'gpt-4o-mini',
|
|
41
|
+
maxTokens: 2000,
|
|
42
|
+
temperature: 0.1,
|
|
43
|
+
timeoutMs: 30000,
|
|
44
|
+
maxRetries: 3,
|
|
45
|
+
retryDelayMs: 1000,
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
// ============================================================================
|
|
49
|
+
// OpenAI Provider Implementation
|
|
50
|
+
// ============================================================================
|
|
51
|
+
|
|
52
|
+
export class OpenAILLMProvider extends BaseLLMProvider {
|
|
53
|
+
readonly type: LLMProviderType = 'openai'
|
|
54
|
+
|
|
55
|
+
private client: OpenAI | null = null
|
|
56
|
+
private readonly apiKey?: string
|
|
57
|
+
private readonly model: string
|
|
58
|
+
private readonly baseUrl?: string
|
|
59
|
+
private readonly organization?: string
|
|
60
|
+
|
|
61
|
+
constructor(config: OpenAILLMConfig) {
|
|
62
|
+
super({
|
|
63
|
+
...DEFAULT_OPENAI_CONFIG,
|
|
64
|
+
...config,
|
|
65
|
+
})
|
|
66
|
+
|
|
67
|
+
this.apiKey = config.apiKey
|
|
68
|
+
this.model = config.model ?? DEFAULT_OPENAI_CONFIG.model!
|
|
69
|
+
this.baseUrl = config.baseUrl
|
|
70
|
+
this.organization = config.organization
|
|
71
|
+
|
|
72
|
+
if (this.apiKey) {
|
|
73
|
+
this.client = new OpenAI({
|
|
74
|
+
apiKey: this.apiKey,
|
|
75
|
+
baseURL: this.baseUrl,
|
|
76
|
+
organization: this.organization,
|
|
77
|
+
timeout: this.config.timeoutMs,
|
|
78
|
+
maxRetries: 0, // We handle retries ourselves
|
|
79
|
+
})
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
logger.debug('OpenAI provider initialized', {
|
|
83
|
+
model: this.model,
|
|
84
|
+
hasApiKey: !!this.apiKey,
|
|
85
|
+
})
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// ============================================================================
|
|
89
|
+
// Availability Check
|
|
90
|
+
// ============================================================================
|
|
91
|
+
|
|
92
|
+
isAvailable(): boolean {
|
|
93
|
+
return !!this.client && !!this.apiKey
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// ============================================================================
|
|
97
|
+
// Memory Extraction
|
|
98
|
+
// ============================================================================
|
|
99
|
+
|
|
100
|
+
protected async doExtractMemories(
|
|
101
|
+
text: string,
|
|
102
|
+
options: ExtractionOptions
|
|
103
|
+
): Promise<{
|
|
104
|
+
memories: ExtractedMemory[]
|
|
105
|
+
rawResponse?: string
|
|
106
|
+
tokensUsed?: { prompt: number; completion: number; total: number }
|
|
107
|
+
}> {
|
|
108
|
+
if (!this.client) {
|
|
109
|
+
throw LLMError.providerUnavailable('openai')
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
const userPrompt = generateExtractionPrompt(text, options)
|
|
113
|
+
|
|
114
|
+
try {
|
|
115
|
+
const response = await this.client.chat.completions.create({
|
|
116
|
+
model: this.model,
|
|
117
|
+
messages: [
|
|
118
|
+
{
|
|
119
|
+
role: 'system',
|
|
120
|
+
content: `${MEMORY_EXTRACTION_SYSTEM_PROMPT}\n\n${MEMORY_EXTRACTION_EXAMPLES}`,
|
|
121
|
+
},
|
|
122
|
+
{
|
|
123
|
+
role: 'user',
|
|
124
|
+
content: userPrompt,
|
|
125
|
+
},
|
|
126
|
+
],
|
|
127
|
+
response_format: { type: 'json_object' },
|
|
128
|
+
temperature: this.config.temperature,
|
|
129
|
+
max_tokens: this.config.maxTokens,
|
|
130
|
+
})
|
|
131
|
+
|
|
132
|
+
const rawResponse = response.choices[0]?.message?.content
|
|
133
|
+
|
|
134
|
+
if (!rawResponse) {
|
|
135
|
+
throw LLMError.invalidResponse('openai', 'Empty response from model')
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
const parsed = parseExtractionResponse(rawResponse)
|
|
139
|
+
|
|
140
|
+
// Filter by confidence if specified
|
|
141
|
+
let memories: ExtractedMemory[] = parsed.memories.map((m) => ({
|
|
142
|
+
content: m.content,
|
|
143
|
+
type: m.type,
|
|
144
|
+
confidence: m.confidence,
|
|
145
|
+
entities: m.entities.map((e) => ({
|
|
146
|
+
name: e.name,
|
|
147
|
+
type: e.type as 'person' | 'place' | 'organization' | 'date' | 'concept' | 'other',
|
|
148
|
+
mentions: 1,
|
|
149
|
+
})),
|
|
150
|
+
keywords: m.keywords,
|
|
151
|
+
}))
|
|
152
|
+
|
|
153
|
+
if (options.minConfidence) {
|
|
154
|
+
memories = memories.filter((m) => m.confidence >= options.minConfidence!)
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
if (options.maxMemories) {
|
|
158
|
+
memories = memories.slice(0, options.maxMemories)
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
return {
|
|
162
|
+
memories,
|
|
163
|
+
rawResponse,
|
|
164
|
+
tokensUsed: response.usage
|
|
165
|
+
? {
|
|
166
|
+
prompt: response.usage.prompt_tokens,
|
|
167
|
+
completion: response.usage.completion_tokens,
|
|
168
|
+
total: response.usage.total_tokens,
|
|
169
|
+
}
|
|
170
|
+
: undefined,
|
|
171
|
+
}
|
|
172
|
+
} catch (error) {
|
|
173
|
+
throw this.handleOpenAIError(error)
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
// ============================================================================
|
|
178
|
+
// Relationship Detection
|
|
179
|
+
// ============================================================================
|
|
180
|
+
|
|
181
|
+
protected async doDetectRelationships(
|
|
182
|
+
newMemory: { id: string; content: string; type: MemoryType },
|
|
183
|
+
existingMemories: Array<{ id: string; content: string; type: MemoryType }>,
|
|
184
|
+
options: RelationshipDetectionOptions
|
|
185
|
+
): Promise<{
|
|
186
|
+
relationships: DetectedRelationship[]
|
|
187
|
+
supersededMemoryIds: string[]
|
|
188
|
+
}> {
|
|
189
|
+
if (!this.client) {
|
|
190
|
+
throw LLMError.providerUnavailable('openai')
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
// If no existing memories, return empty
|
|
194
|
+
if (existingMemories.length === 0) {
|
|
195
|
+
return { relationships: [], supersededMemoryIds: [] }
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
const userPrompt = generateRelationshipPrompt(newMemory, existingMemories, options)
|
|
199
|
+
|
|
200
|
+
try {
|
|
201
|
+
const response = await this.client.chat.completions.create({
|
|
202
|
+
model: this.model,
|
|
203
|
+
messages: [
|
|
204
|
+
{
|
|
205
|
+
role: 'system',
|
|
206
|
+
content: `${RELATIONSHIP_DETECTION_SYSTEM_PROMPT}\n\n${RELATIONSHIP_DETECTION_EXAMPLES}`,
|
|
207
|
+
},
|
|
208
|
+
{
|
|
209
|
+
role: 'user',
|
|
210
|
+
content: userPrompt,
|
|
211
|
+
},
|
|
212
|
+
],
|
|
213
|
+
response_format: { type: 'json_object' },
|
|
214
|
+
temperature: this.config.temperature,
|
|
215
|
+
max_tokens: this.config.maxTokens,
|
|
216
|
+
})
|
|
217
|
+
|
|
218
|
+
const rawResponse = response.choices[0]?.message?.content
|
|
219
|
+
|
|
220
|
+
if (!rawResponse) {
|
|
221
|
+
throw LLMError.invalidResponse('openai', 'Empty response from model')
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
const parsed = parseRelationshipResponse(rawResponse)
|
|
225
|
+
|
|
226
|
+
// Filter and validate relationships
|
|
227
|
+
let relationships: DetectedRelationship[] = parsed.relationships.map((r) => ({
|
|
228
|
+
sourceMemoryId: r.sourceMemoryId,
|
|
229
|
+
targetMemoryId: r.targetMemoryId,
|
|
230
|
+
type: r.type as DetectedRelationship['type'],
|
|
231
|
+
confidence: r.confidence,
|
|
232
|
+
reason: r.reason,
|
|
233
|
+
}))
|
|
234
|
+
|
|
235
|
+
if (options.minConfidence) {
|
|
236
|
+
relationships = relationships.filter((r) => r.confidence >= options.minConfidence!)
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
if (options.maxRelationships) {
|
|
240
|
+
relationships = relationships.slice(0, options.maxRelationships)
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
return {
|
|
244
|
+
relationships,
|
|
245
|
+
supersededMemoryIds: parsed.supersededMemoryIds,
|
|
246
|
+
}
|
|
247
|
+
} catch (error) {
|
|
248
|
+
throw this.handleOpenAIError(error)
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
// ============================================================================
|
|
253
|
+
// Generic JSON Task
|
|
254
|
+
// ============================================================================
|
|
255
|
+
|
|
256
|
+
protected async doGenerateJson(
|
|
257
|
+
systemPrompt: string,
|
|
258
|
+
userPrompt: string
|
|
259
|
+
): Promise<{
|
|
260
|
+
rawResponse: string
|
|
261
|
+
tokensUsed?: { prompt: number; completion: number; total: number }
|
|
262
|
+
}> {
|
|
263
|
+
if (!this.client) {
|
|
264
|
+
throw LLMError.providerUnavailable('openai')
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
try {
|
|
268
|
+
const response = await this.client.chat.completions.create({
|
|
269
|
+
model: this.model,
|
|
270
|
+
messages: [
|
|
271
|
+
{ role: 'system', content: systemPrompt },
|
|
272
|
+
{ role: 'user', content: userPrompt },
|
|
273
|
+
],
|
|
274
|
+
response_format: { type: 'json_object' },
|
|
275
|
+
temperature: this.config.temperature,
|
|
276
|
+
max_tokens: this.config.maxTokens,
|
|
277
|
+
})
|
|
278
|
+
|
|
279
|
+
const rawResponse = response.choices[0]?.message?.content
|
|
280
|
+
if (!rawResponse) {
|
|
281
|
+
throw LLMError.invalidResponse('openai', 'Empty response from model')
|
|
282
|
+
}
|
|
283
|
+
const normalized = normalizeJsonResponse(rawResponse)
|
|
284
|
+
|
|
285
|
+
return {
|
|
286
|
+
rawResponse: normalized,
|
|
287
|
+
tokensUsed: response.usage
|
|
288
|
+
? {
|
|
289
|
+
prompt: response.usage.prompt_tokens,
|
|
290
|
+
completion: response.usage.completion_tokens,
|
|
291
|
+
total: response.usage.total_tokens,
|
|
292
|
+
}
|
|
293
|
+
: undefined,
|
|
294
|
+
}
|
|
295
|
+
} catch (error) {
|
|
296
|
+
throw this.handleOpenAIError(error)
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
// ============================================================================
|
|
301
|
+
// Error Handling
|
|
302
|
+
// ============================================================================
|
|
303
|
+
|
|
304
|
+
private handleOpenAIError(error: unknown): LLMError {
|
|
305
|
+
if (error instanceof LLMError) {
|
|
306
|
+
return error
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
// Check for OpenAI API errors by checking error structure
|
|
310
|
+
if (this.isOpenAIApiError(error)) {
|
|
311
|
+
const status = error.status
|
|
312
|
+
const message = error.message
|
|
313
|
+
|
|
314
|
+
// Rate limiting
|
|
315
|
+
if (status === 429) {
|
|
316
|
+
const retryAfter = this.parseRetryAfter(error)
|
|
317
|
+
return LLMError.rateLimited('openai', retryAfter)
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
// Authentication errors
|
|
321
|
+
if (status === 401) {
|
|
322
|
+
return LLMError.invalidApiKey('openai')
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
// Content filtering
|
|
326
|
+
if (status === 400 && message.includes('content_filter')) {
|
|
327
|
+
return new LLMError('Content was filtered by OpenAI', LLMErrorCode.CONTENT_FILTERED, 'openai', false)
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
// Token limit
|
|
331
|
+
if (status === 400 && message.includes('maximum context length')) {
|
|
332
|
+
return new LLMError('Token limit exceeded', LLMErrorCode.TOKEN_LIMIT_EXCEEDED, 'openai', false)
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
// Server errors (retryable)
|
|
336
|
+
if (status && status >= 500) {
|
|
337
|
+
return LLMError.providerUnavailable('openai')
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
// Default to invalid response
|
|
341
|
+
return LLMError.invalidResponse('openai', message)
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
// Network or timeout errors
|
|
345
|
+
if (error instanceof Error) {
|
|
346
|
+
if (error.message.includes('timeout')) {
|
|
347
|
+
return LLMError.timeout('openai')
|
|
348
|
+
}
|
|
349
|
+
return new LLMError(error.message, LLMErrorCode.PROVIDER_UNAVAILABLE, 'openai', true)
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
return new LLMError(String(error), LLMErrorCode.PROVIDER_UNAVAILABLE, 'openai', true)
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
/**
|
|
356
|
+
* Type guard for OpenAI API errors
|
|
357
|
+
*/
|
|
358
|
+
private isOpenAIApiError(
|
|
359
|
+
error: unknown
|
|
360
|
+
): error is { status: number; message: string; headers?: Record<string, string> } {
|
|
361
|
+
return (
|
|
362
|
+
error !== null &&
|
|
363
|
+
typeof error === 'object' &&
|
|
364
|
+
'status' in error &&
|
|
365
|
+
typeof (error as Record<string, unknown>).status === 'number' &&
|
|
366
|
+
'message' in error &&
|
|
367
|
+
typeof (error as Record<string, unknown>).message === 'string'
|
|
368
|
+
)
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
private parseRetryAfter(error: {
|
|
372
|
+
status: number
|
|
373
|
+
message: string
|
|
374
|
+
headers?: Record<string, string>
|
|
375
|
+
}): number | undefined {
|
|
376
|
+
// Try to parse retry-after header or message
|
|
377
|
+
if (error.headers?.['retry-after']) {
|
|
378
|
+
const seconds = parseInt(error.headers['retry-after'], 10)
|
|
379
|
+
if (!Number.isNaN(seconds)) {
|
|
380
|
+
return seconds * 1000
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
// Default retry delay for rate limits
|
|
385
|
+
return 60000 // 1 minute
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
// ============================================================================
|
|
390
|
+
// Factory Function
|
|
391
|
+
// ============================================================================
|
|
392
|
+
|
|
393
|
+
/**
|
|
394
|
+
* Create an OpenAI LLM provider
|
|
395
|
+
*/
|
|
396
|
+
export function createOpenAIProvider(config: OpenAILLMConfig): OpenAILLMProvider {
|
|
397
|
+
return new OpenAILLMProvider(config)
|
|
398
|
+
}
|