@mnexium/core 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,137 @@
1
+ import type {
2
+ Claim,
3
+ ClaimAssertion,
4
+ ClaimEdge,
5
+ Memory,
6
+ MemoryRecallEvent,
7
+ MemoryRecallStats,
8
+ ResolvedTruthSlot,
9
+ } from "./types";
10
+
11
+ export interface CreateMemoryInput {
12
+ id: string;
13
+ project_id: string;
14
+ subject_id: string;
15
+ text: string;
16
+ kind?: Memory["kind"];
17
+ visibility?: Memory["visibility"];
18
+ importance?: number;
19
+ confidence?: number;
20
+ is_temporal?: boolean;
21
+ tags?: string[];
22
+ metadata?: Record<string, unknown>;
23
+ source_type?: string;
24
+ embedding?: number[] | null;
25
+ }
26
+
27
+ export interface UpdateMemoryInput {
28
+ text?: string;
29
+ kind?: Memory["kind"];
30
+ visibility?: Memory["visibility"];
31
+ importance?: number;
32
+ confidence?: number;
33
+ is_temporal?: boolean;
34
+ tags?: string[];
35
+ metadata?: Record<string, unknown>;
36
+ embedding?: number[] | null;
37
+ }
38
+
39
+ export interface CreateClaimInput {
40
+ claim_id: string;
41
+ project_id: string;
42
+ subject_id: string;
43
+ predicate: string;
44
+ object_value: string;
45
+ slot?: string;
46
+ claim_type?: string;
47
+ confidence?: number;
48
+ importance?: number;
49
+ tags?: string[];
50
+ source_memory_id?: string | null;
51
+ source_observation_id?: string | null;
52
+ subject_entity?: string;
53
+ valid_from?: string | null;
54
+ valid_until?: string | null;
55
+ embedding?: number[] | null;
56
+ }
57
+
58
+ export interface CoreStore {
59
+ listMemories(args: {
60
+ project_id: string;
61
+ subject_id: string;
62
+ limit: number;
63
+ offset: number;
64
+ include_deleted?: boolean;
65
+ include_superseded?: boolean;
66
+ }): Promise<Memory[]>;
67
+ searchMemories(args: {
68
+ project_id: string;
69
+ subject_id: string;
70
+ q: string;
71
+ query_embedding: number[] | null;
72
+ limit: number;
73
+ min_score: number;
74
+ }): Promise<Array<Memory & { score: number; effective_score: number }>>;
75
+ createMemory(input: CreateMemoryInput): Promise<Memory>;
76
+ getMemory(args: { project_id: string; id: string }): Promise<Memory | null>;
77
+ getMemoryClaims(args: { project_id: string; memory_id: string }): Promise<ClaimAssertion[]>;
78
+ updateMemory(args: { project_id: string; id: string; patch: UpdateMemoryInput }): Promise<Memory | null>;
79
+ deleteMemory(args: { project_id: string; id: string }): Promise<{ ok: true; deleted: boolean }>;
80
+ listSupersededMemories(args: {
81
+ project_id: string;
82
+ subject_id: string;
83
+ limit: number;
84
+ offset: number;
85
+ }): Promise<Memory[]>;
86
+ restoreMemory(args: { project_id: string; id: string }): Promise<Memory | null>;
87
+ findDuplicateMemory?(args: {
88
+ project_id: string;
89
+ subject_id: string;
90
+ embedding: number[];
91
+ threshold: number;
92
+ }): Promise<{ id: string; similarity: number } | null>;
93
+ findConflictingMemories?(args: {
94
+ project_id: string;
95
+ subject_id: string;
96
+ embedding: number[];
97
+ min_similarity: number;
98
+ max_similarity: number;
99
+ limit: number;
100
+ }): Promise<Array<{ id: string; similarity: number }>>;
101
+ supersedeMemories?(args: {
102
+ project_id: string;
103
+ subject_id: string;
104
+ memory_ids: string[];
105
+ superseded_by: string;
106
+ }): Promise<number>;
107
+
108
+ getRecallEventsByChat(args: { project_id: string; chat_id: string }): Promise<MemoryRecallEvent[]>;
109
+ getRecallEventsByMemory(args: { project_id: string; memory_id: string; limit: number }): Promise<MemoryRecallEvent[]>;
110
+ getMemoryRecallStats(args: { project_id: string; memory_id: string }): Promise<MemoryRecallStats>;
111
+
112
+ createClaim(input: CreateClaimInput): Promise<Claim>;
113
+ getClaim(args: { project_id: string; claim_id: string }): Promise<Claim | null>;
114
+ getAssertionsForClaim(args: { project_id: string; claim_id: string }): Promise<ClaimAssertion[]>;
115
+ getEdgesForClaim(args: { project_id: string; claim_id: string }): Promise<ClaimEdge[]>;
116
+ getCurrentTruth(args: { project_id: string; subject_id: string }): Promise<ResolvedTruthSlot[]>;
117
+ getCurrentSlot(args: { project_id: string; subject_id: string; slot: string }): Promise<ResolvedTruthSlot | null>;
118
+ getSlots(args: { project_id: string; subject_id: string; limit: number }): Promise<Array<ResolvedTruthSlot & { status: string }>>;
119
+ getClaimGraph(args: { project_id: string; subject_id: string; limit: number }): Promise<{ claims: Claim[]; edges: ClaimEdge[] }>;
120
+ getClaimHistory(args: {
121
+ project_id: string;
122
+ subject_id: string;
123
+ slot?: string | null;
124
+ limit: number;
125
+ }): Promise<{ claims: Claim[]; edges: ClaimEdge[]; by_slot: Record<string, Claim[]> }>;
126
+ retractClaim(args: {
127
+ project_id: string;
128
+ claim_id: string;
129
+ reason: string;
130
+ }): Promise<{
131
+ success: boolean;
132
+ claim_id: string;
133
+ slot: string;
134
+ previous_claim_id: string | null;
135
+ restored_previous: boolean;
136
+ }>;
137
+ }
@@ -0,0 +1,138 @@
1
+ export type Json =
2
+ | null
3
+ | boolean
4
+ | number
5
+ | string
6
+ | Json[]
7
+ | { [key: string]: Json };
8
+
9
+ export type MemoryStatus = "active" | "superseded";
10
+ export type ClaimStatus = "active" | "retracted";
11
+ export type SlotStatus = "active" | "superseded" | "retracted";
12
+ export type ClaimEdgeType = "supersedes" | "supports" | "duplicates" | "related" | "retracts";
13
+
14
+ export interface Memory {
15
+ id: string;
16
+ project_id: string;
17
+ subject_id: string;
18
+ text: string;
19
+ kind: "fact" | "preference" | "context" | "note" | "event" | "trait";
20
+ visibility: "private" | "shared" | "public";
21
+ importance: number;
22
+ confidence: number;
23
+ is_temporal: boolean;
24
+ tags: string[];
25
+ metadata: Record<string, Json>;
26
+ status: MemoryStatus;
27
+ superseded_by: string | null;
28
+ is_deleted: boolean;
29
+ source_type: string;
30
+ created_at: string;
31
+ updated_at: string;
32
+ last_seen_at: string | null;
33
+ seen_count: number;
34
+ reinforcement_count: number;
35
+ last_reinforced_at: string | null;
36
+ }
37
+
38
+ export interface Claim {
39
+ claim_id: string;
40
+ project_id: string;
41
+ subject_id: string;
42
+ predicate: string;
43
+ object_value: string;
44
+ slot: string;
45
+ claim_type: string;
46
+ confidence: number;
47
+ importance: number;
48
+ tags: string[];
49
+ source_memory_id: string | null;
50
+ source_observation_id: string | null;
51
+ subject_entity: string;
52
+ status: ClaimStatus;
53
+ asserted_at: string;
54
+ updated_at: string;
55
+ retracted_at: string | null;
56
+ retract_reason: string | null;
57
+ valid_from: string | null;
58
+ valid_until: string | null;
59
+ }
60
+
61
+ export interface ClaimAssertion {
62
+ assertion_id: string;
63
+ project_id: string;
64
+ subject_id: string;
65
+ claim_id: string;
66
+ memory_id: string | null;
67
+ predicate: string;
68
+ object_type: "string" | "number" | "date" | "json";
69
+ value_string: string | null;
70
+ value_number: number | null;
71
+ value_date: string | null;
72
+ value_json: Json | null;
73
+ confidence: number;
74
+ status: string;
75
+ first_seen_at: string;
76
+ last_seen_at: string;
77
+ }
78
+
79
+ export interface ClaimEdge {
80
+ edge_id: number;
81
+ project_id: string;
82
+ subject_id: string;
83
+ from_claim_id: string;
84
+ to_claim_id: string;
85
+ edge_type: ClaimEdgeType;
86
+ weight: number;
87
+ reason_code: string | null;
88
+ reason_text: string | null;
89
+ created_at: string;
90
+ }
91
+
92
+ export interface SlotState {
93
+ project_id: string;
94
+ subject_id: string;
95
+ slot: string;
96
+ active_claim_id: string | null;
97
+ status: SlotStatus;
98
+ replaced_by_claim_id: string | null;
99
+ updated_at: string;
100
+ }
101
+
102
+ export interface ResolvedTruthSlot {
103
+ slot: string;
104
+ active_claim_id: string;
105
+ predicate: string;
106
+ object_value: string;
107
+ claim_type: string;
108
+ confidence: number;
109
+ tags: string[];
110
+ updated_at: string;
111
+ source_memory_id: string | null;
112
+ source_observation_id: string | null;
113
+ }
114
+
115
+ export interface MemoryRecallEvent {
116
+ event_id: string;
117
+ project_id: string;
118
+ subject_id: string;
119
+ memory_id: string;
120
+ memory_text: string;
121
+ chat_id: string;
122
+ message_index: number;
123
+ chat_logged: boolean;
124
+ similarity_score: number;
125
+ request_type: string;
126
+ model: string;
127
+ metadata: Record<string, Json>;
128
+ recalled_at: string;
129
+ }
130
+
131
+ export interface MemoryRecallStats {
132
+ total_recalls: number;
133
+ unique_chats: number;
134
+ unique_subjects: number;
135
+ avg_score: number;
136
+ first_recalled_at: string | null;
137
+ last_recalled_at: string | null;
138
+ }
package/src/dev.ts ADDED
@@ -0,0 +1,144 @@
1
+ import "dotenv/config";
2
+ import { Pool } from "pg";
3
+ import { createCoreServer } from "./server/createCoreServer";
4
+ import { PostgresCoreStore } from "./adapters/postgres/PostgresCoreStore";
5
+ import { createOpenAIEmbedder } from "./providers/openaiEmbedding";
6
+ import { createCerebrasClient } from "./providers/cerebras";
7
+ import { createOpenAIChatClient } from "./providers/openaiChat";
8
+ import { createLLMRecallService, createSimpleRecallService } from "./ai/recallService";
9
+ import { createLLMMemoryExtractionService, createSimpleMemoryExtractionService } from "./ai/memoryExtractionService";
10
+ import type { JsonLlmClient } from "./ai/types";
11
+
12
+ function envFlag(raw: string | undefined, fallback = false): boolean {
13
+ const v = String(raw ?? "").trim().toLowerCase();
14
+ if (!v) return fallback;
15
+ if (["1", "true", "yes", "on"].includes(v)) return true;
16
+ if (["0", "false", "no", "off"].includes(v)) return false;
17
+ return fallback;
18
+ }
19
+
20
+ function toErrorMessage(err: unknown): string {
21
+ if (!err) return "unknown_error";
22
+ if (typeof err === "string") return err;
23
+ if (err instanceof AggregateError && Array.isArray(err.errors) && err.errors.length > 0) {
24
+ return `AggregateError: ${err.errors.map((e) => toErrorMessage(e)).join(" | ")}`;
25
+ }
26
+ if (typeof err === "object" && err && Array.isArray((err as any).errors) && (err as any).errors.length > 0) {
27
+ return `AggregateError: ${(err as any).errors.map((e: unknown) => toErrorMessage(e)).join(" | ")}`;
28
+ }
29
+ if (typeof err === "object" && err && (err as any).cause) {
30
+ return `${String((err as any).message || err)} (cause: ${toErrorMessage((err as any).cause)})`;
31
+ }
32
+ return String((err as any)?.message || err);
33
+ }
34
+
35
+ const required = ["POSTGRES_HOST", "POSTGRES_DB", "POSTGRES_USER", "POSTGRES_PASSWORD"];
36
+ for (const key of required) {
37
+ if (!process.env[key]) {
38
+ throw new Error(`Missing env var: ${key}`);
39
+ }
40
+ }
41
+
42
+ const pool = new Pool({
43
+ host: process.env.POSTGRES_HOST,
44
+ port: Number(process.env.POSTGRES_PORT || 5432),
45
+ database: process.env.POSTGRES_DB,
46
+ user: process.env.POSTGRES_USER,
47
+ password: process.env.POSTGRES_PASSWORD,
48
+ });
49
+
50
+ const store = new PostgresCoreStore(pool);
51
+ const embed = createOpenAIEmbedder({
52
+ apiKey: process.env.OPENAI_API_KEY,
53
+ model: process.env.OPENAI_EMBED_MODEL || "text-embedding-3-small",
54
+ });
55
+
56
+ const configuredAiMode = String(process.env.CORE_AI_MODE || "auto").trim().toLowerCase();
57
+ const cerebrasApiKey = String(process.env.CEREBRAS_API || "").trim();
58
+ const openaiApiKey = String(process.env.OPENAI_API_KEY || "").trim();
59
+ const retrievalModel = String(process.env.RETRIEVAL_MODEL || "").trim();
60
+ const useRetrievalExpand = envFlag(process.env.USE_RETRIEVAL_EXPAND, true);
61
+ const debugEnabled = envFlag(process.env.CORE_DEBUG, false);
62
+
63
+ let llmClient: JsonLlmClient | null = null;
64
+ let resolvedAiMode: "cerebras" | "openai" | "simple" = "simple";
65
+
66
+ const wantCerebras = configuredAiMode === "cerebras" || configuredAiMode === "auto";
67
+ const wantOpenAI = configuredAiMode === "openai" || configuredAiMode === "auto";
68
+ const wantSimple = configuredAiMode === "simple";
69
+
70
+ if (wantCerebras && cerebrasApiKey) {
71
+ llmClient = createCerebrasClient({
72
+ apiKey: cerebrasApiKey,
73
+ model: retrievalModel || "gpt-oss-120b",
74
+ });
75
+ resolvedAiMode = "cerebras";
76
+ } else if (wantOpenAI && openaiApiKey) {
77
+ llmClient = createOpenAIChatClient({
78
+ apiKey: openaiApiKey,
79
+ model: retrievalModel || "gpt-4o-mini",
80
+ });
81
+ resolvedAiMode = "openai";
82
+ } else if (wantSimple || configuredAiMode === "auto") {
83
+ llmClient = null;
84
+ resolvedAiMode = "simple";
85
+ }
86
+
87
+ if (configuredAiMode === "cerebras" && !cerebrasApiKey) {
88
+ console.warn("[core] CORE_AI_MODE=cerebras but CEREBRAS_API is missing; falling back to simple mode.");
89
+ }
90
+ if (configuredAiMode === "openai" && !openaiApiKey) {
91
+ console.warn("[core] CORE_AI_MODE=openai but OPENAI_API_KEY is missing; falling back to simple mode.");
92
+ }
93
+
94
+ const recallService = llmClient
95
+ ? (useRetrievalExpand
96
+ ? createLLMRecallService({
97
+ store,
98
+ embed,
99
+ llm: llmClient,
100
+ })
101
+ : createSimpleRecallService({
102
+ store,
103
+ embed,
104
+ }))
105
+ : createSimpleRecallService({
106
+ store,
107
+ embed,
108
+ });
109
+
110
+ const memoryExtractionService = llmClient
111
+ ? createLLMMemoryExtractionService({
112
+ llm: llmClient,
113
+ })
114
+ : createSimpleMemoryExtractionService();
115
+
116
+ const server = createCoreServer({
117
+ store,
118
+ defaultProjectId: process.env.CORE_DEFAULT_PROJECT_ID || "default-project",
119
+ embed,
120
+ recallService,
121
+ memoryExtractionService,
122
+ debug: debugEnabled,
123
+ });
124
+
125
+ const port = Number(process.env.PORT || 8080);
126
+ server.listen(port, () => {
127
+ const model = llmClient?.model ? ` (${llmClient.provider}:${llmClient.model})` : "";
128
+ console.log(
129
+ `[core] postgres target: ${process.env.POSTGRES_HOST}:${Number(process.env.POSTGRES_PORT || 5432)}/${process.env.POSTGRES_DB} user=${process.env.POSTGRES_USER}`,
130
+ );
131
+ console.log(`[core] ai mode: ${resolvedAiMode}${model}`);
132
+ console.log(`[core] retrieval expand: ${useRetrievalExpand ? "enabled" : "disabled (simple mode)"}`);
133
+ console.log(`[core] debug: ${debugEnabled ? "enabled" : "disabled"}`);
134
+ console.log(`[core] listening on http://localhost:${port}`);
135
+ });
136
+
137
+ void pool
138
+ .query("select 1 as ok")
139
+ .then(() => {
140
+ console.log("[core] postgres connectivity check: ok");
141
+ })
142
+ .catch((err) => {
143
+ console.warn(`[core] postgres connectivity check failed: ${toErrorMessage(err)}`);
144
+ });
package/src/index.ts ADDED
@@ -0,0 +1,15 @@
1
+ export { PostgresCoreStore } from "./adapters/postgres/PostgresCoreStore";
2
+ export { createCoreServer } from "./server/createCoreServer";
3
+ export type { CreateCoreServerOptions } from "./server/createCoreServer";
4
+ export { createOpenAIEmbedder } from "./providers/openaiEmbedding";
5
+ export { createCerebrasClient } from "./providers/cerebras";
6
+ export { createOpenAIChatClient } from "./providers/openaiChat";
7
+ export { createLLMRecallService, createSimpleRecallService, createCerebrasRecallService } from "./ai/recallService";
8
+ export {
9
+ createLLMMemoryExtractionService,
10
+ createSimpleMemoryExtractionService,
11
+ createCerebrasMemoryExtractionService,
12
+ } from "./ai/memoryExtractionService";
13
+ export type { JsonLlmClient } from "./ai/types";
14
+ export type * from "./contracts/types";
15
+ export type * from "./contracts/storage";
@@ -0,0 +1,101 @@
1
+ const CEREBRAS_API_URL = "https://api.cerebras.ai/v1/chat/completions";
2
+ const DEFAULT_MODEL = "gpt-oss-120b";
3
+
4
+ export interface CerebrasClientOptions {
5
+ apiKey?: string;
6
+ model?: string;
7
+ }
8
+
9
+ export interface CerebrasCallOptions {
10
+ systemPrompt: string;
11
+ userPrompt: string;
12
+ jsonMode?: boolean;
13
+ timeoutMs?: number;
14
+ temperature?: number;
15
+ }
16
+
17
+ export interface CerebrasClient {
18
+ provider: "cerebras";
19
+ callRaw: (opts: {
20
+ messages: Array<{ role: string; content: string }>;
21
+ jsonMode?: boolean;
22
+ timeoutMs?: number;
23
+ temperature?: number;
24
+ }) => Promise<any>;
25
+ call: (opts: CerebrasCallOptions) => Promise<any | null>;
26
+ model: string;
27
+ }
28
+
29
+ export function createCerebrasClient(options: CerebrasClientOptions = {}): CerebrasClient {
30
+ const apiKey = String(options.apiKey || process.env.CEREBRAS_API || "").trim();
31
+ const model = String(options.model || process.env.RETRIEVAL_MODEL || DEFAULT_MODEL).trim();
32
+
33
+ async function callRaw(opts: {
34
+ messages: Array<{ role: string; content: string }>;
35
+ jsonMode?: boolean;
36
+ timeoutMs?: number;
37
+ temperature?: number;
38
+ }): Promise<any> {
39
+ if (!apiKey) {
40
+ throw new Error("[core:cerebras] CEREBRAS_API env var is not set");
41
+ }
42
+
43
+ const body: Record<string, unknown> = {
44
+ model,
45
+ stream: false,
46
+ messages: opts.messages,
47
+ temperature: Number.isFinite(Number(opts.temperature)) ? Number(opts.temperature) : 0,
48
+ max_tokens: -1,
49
+ seed: 0,
50
+ top_p: 1,
51
+ };
52
+ if (opts.jsonMode !== false) {
53
+ body.response_format = { type: "json_object" };
54
+ }
55
+
56
+ const fetchOptions: RequestInit = {
57
+ method: "POST",
58
+ headers: {
59
+ "Content-Type": "application/json",
60
+ Authorization: `Bearer ${apiKey}`,
61
+ },
62
+ body: JSON.stringify(body),
63
+ };
64
+ if (opts.timeoutMs && typeof AbortSignal.timeout === "function") {
65
+ fetchOptions.signal = AbortSignal.timeout(opts.timeoutMs);
66
+ }
67
+
68
+ const res = await fetch(CEREBRAS_API_URL, fetchOptions);
69
+ if (!res.ok) {
70
+ const errorText = await res.text().catch(() => "");
71
+ throw new Error(`[core:cerebras] API error: ${res.status} - ${errorText}`);
72
+ }
73
+ return res.json();
74
+ }
75
+
76
+ async function call(opts: CerebrasCallOptions): Promise<any | null> {
77
+ const raw = await callRaw({
78
+ messages: [
79
+ { role: "system", content: String(opts.systemPrompt || "") },
80
+ { role: "user", content: String(opts.userPrompt || "") },
81
+ ],
82
+ jsonMode: opts.jsonMode !== false,
83
+ timeoutMs: opts.timeoutMs,
84
+ temperature: opts.temperature,
85
+ });
86
+
87
+ const content = raw?.choices?.[0]?.message?.content || "";
88
+ try {
89
+ return JSON.parse(content);
90
+ } catch {
91
+ return null;
92
+ }
93
+ }
94
+
95
+ return {
96
+ provider: "cerebras",
97
+ callRaw,
98
+ call,
99
+ model,
100
+ };
101
+ }
@@ -0,0 +1,116 @@
1
+ const OPENAI_RESPONSES_URL = "https://api.openai.com/v1/responses";
2
+ const DEFAULT_MODEL = "gpt-4o-mini";
3
+
4
+ export interface OpenAIChatClientOptions {
5
+ apiKey?: string;
6
+ model?: string;
7
+ }
8
+
9
+ export interface OpenAIChatClient {
10
+ provider: "openai";
11
+ model: string;
12
+ callRaw: (opts: {
13
+ systemPrompt: string;
14
+ userPrompt: string;
15
+ jsonMode?: boolean;
16
+ timeoutMs?: number;
17
+ temperature?: number;
18
+ }) => Promise<any>;
19
+ call: (opts: {
20
+ systemPrompt: string;
21
+ userPrompt: string;
22
+ jsonMode?: boolean;
23
+ timeoutMs?: number;
24
+ temperature?: number;
25
+ }) => Promise<any | null>;
26
+ }
27
+
28
+ export function createOpenAIChatClient(options: OpenAIChatClientOptions = {}): OpenAIChatClient {
29
+ const apiKey = String(options.apiKey || process.env.OPENAI_API_KEY || "").trim();
30
+ const model = String(options.model || process.env.RETRIEVAL_MODEL || DEFAULT_MODEL).trim();
31
+
32
+ async function callRaw(opts: {
33
+ systemPrompt: string;
34
+ userPrompt: string;
35
+ jsonMode?: boolean;
36
+ timeoutMs?: number;
37
+ temperature?: number;
38
+ }): Promise<any> {
39
+ if (!apiKey) {
40
+ throw new Error("[core:openai] OPENAI_API_KEY env var is not set");
41
+ }
42
+
43
+ const body: Record<string, unknown> = {
44
+ model,
45
+ input: [
46
+ {
47
+ role: "developer",
48
+ content: [{ type: "input_text", text: String(opts.systemPrompt || "") }],
49
+ },
50
+ {
51
+ role: "user",
52
+ content: [{ type: "input_text", text: String(opts.userPrompt || "") }],
53
+ },
54
+ ],
55
+ store: false,
56
+ reasoning: { effort: "minimal" },
57
+ temperature: Number.isFinite(Number(opts.temperature)) ? Number(opts.temperature) : 0,
58
+ };
59
+
60
+ if (opts.jsonMode !== false) {
61
+ body.text = { format: { type: "json_object" } };
62
+ }
63
+
64
+ const fetchOptions: RequestInit = {
65
+ method: "POST",
66
+ headers: {
67
+ "Content-Type": "application/json",
68
+ Authorization: `Bearer ${apiKey}`,
69
+ },
70
+ body: JSON.stringify(body),
71
+ };
72
+ if (opts.timeoutMs && typeof AbortSignal.timeout === "function") {
73
+ fetchOptions.signal = AbortSignal.timeout(opts.timeoutMs);
74
+ }
75
+
76
+ const res = await fetch(OPENAI_RESPONSES_URL, fetchOptions);
77
+ if (!res.ok) {
78
+ const errorText = await res.text().catch(() => "");
79
+ throw new Error(`[core:openai] API error: ${res.status} - ${errorText}`);
80
+ }
81
+ return res.json();
82
+ }
83
+
84
+ async function call(opts: {
85
+ systemPrompt: string;
86
+ userPrompt: string;
87
+ jsonMode?: boolean;
88
+ timeoutMs?: number;
89
+ temperature?: number;
90
+ }): Promise<any | null> {
91
+ const raw = await callRaw(opts);
92
+
93
+ const outputItem = Array.isArray(raw?.output)
94
+ ? raw.output.find((o: any) => o?.type === "message" && o?.role === "assistant")
95
+ : null;
96
+ const textContent = Array.isArray(outputItem?.content)
97
+ ? outputItem.content.find((c: any) => c?.type === "output_text")
98
+ : null;
99
+
100
+ const content = String(textContent?.text || raw?.output_text || "").trim();
101
+ if (!content) return null;
102
+
103
+ try {
104
+ return JSON.parse(content);
105
+ } catch {
106
+ return null;
107
+ }
108
+ }
109
+
110
+ return {
111
+ provider: "openai",
112
+ model,
113
+ callRaw,
114
+ call,
115
+ };
116
+ }
@@ -0,0 +1,52 @@
1
+ const OPENAI_EMBED_URL = "https://api.openai.com/v1/embeddings";
2
+
3
+ let warnedMissingKey = false;
4
+
5
+ export interface OpenAIEmbedderOptions {
6
+ apiKey?: string;
7
+ model?: string;
8
+ }
9
+
10
+ /**
11
+ * Returns a text->embedding function backed by OpenAI embeddings.
12
+ * If no API key is configured, it safely returns an empty embedding.
13
+ */
14
+ export function createOpenAIEmbedder(options: OpenAIEmbedderOptions = {}) {
15
+ const apiKey = String(options.apiKey || process.env.OPENAI_API_KEY || "").trim();
16
+ const model = String(options.model || process.env.OPENAI_EMBED_MODEL || "text-embedding-3-small").trim();
17
+
18
+ return async function embed(text: string): Promise<number[]> {
19
+ const input = String(text || "").trim();
20
+ if (!input) return [];
21
+
22
+ if (!apiKey) {
23
+ if (!warnedMissingKey) {
24
+ warnedMissingKey = true;
25
+ console.warn("[core] OPENAI_API_KEY not set; embedding-enabled routes will fall back to non-vector behavior.");
26
+ }
27
+ return [];
28
+ }
29
+
30
+ const res = await fetch(OPENAI_EMBED_URL, {
31
+ method: "POST",
32
+ headers: {
33
+ "Content-Type": "application/json",
34
+ Authorization: `Bearer ${apiKey}`,
35
+ },
36
+ body: JSON.stringify({
37
+ model,
38
+ input,
39
+ }),
40
+ });
41
+
42
+ if (!res.ok) {
43
+ const errText = await res.text().catch(() => "");
44
+ throw new Error(`openai_embedding_failed: ${res.status} ${errText}`);
45
+ }
46
+
47
+ const json: any = await res.json();
48
+ const embedding = json?.data?.[0]?.embedding;
49
+ if (!Array.isArray(embedding)) return [];
50
+ return embedding.map((v: unknown) => Number(v)).filter((v: number) => Number.isFinite(v));
51
+ };
52
+ }