@open-matrix/driver-openai 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,20 @@
1
+ import { type InferenceDriver, type DriverCapabilities, type InferRequest, type InferResponse, type HealthResult } from "@open-matrix/inference";
2
+ export interface OpenAIDriverConfig {
3
+ apiKey?: string;
4
+ model?: string;
5
+ maxTokens?: number;
6
+ baseUrl?: string;
7
+ }
8
+ export declare class OpenAIDriver implements InferenceDriver {
9
+ readonly id = "openai";
10
+ readonly capabilities: DriverCapabilities;
11
+ private apiKey;
12
+ private model;
13
+ private maxTokens;
14
+ private baseUrl;
15
+ private conversations;
16
+ constructor(config?: OpenAIDriverConfig);
17
+ infer(request: InferRequest): Promise<InferResponse>;
18
+ health(): Promise<HealthResult>;
19
+ dispose(): Promise<void>;
20
+ }
package/dist/index.js ADDED
@@ -0,0 +1,101 @@
1
+ // @omega/driver-openai
2
+ // InferenceDriver implementation for the OpenAI Chat Completions API.
3
+ // Supports: tool calling, multi-turn, structured output.
4
+ // ZERO imports from OmegaLLM internals.
5
+ import { InferenceError, } from "@open-matrix/inference";
6
+ export class OpenAIDriver {
7
+ id = "openai";
8
+ capabilities = {
9
+ oracleProtocol: { reentrant: true, supportedOps: ["eval", "apply", "observe", "match", "return"] },
10
+ tooling: { native: true, format: "openai", maxTools: 128 },
11
+ session: { multiTurn: true, streaming: false, managedSessions: false, maxContext: 128_000 },
12
+ io: { vision: true, audio: false, structuredOutput: true, fileUpload: false, artifacts: false },
13
+ transport: { kind: "api", latency: "seconds" },
14
+ mcp: { client: false, server: false },
15
+ };
16
+ apiKey;
17
+ model;
18
+ maxTokens;
19
+ baseUrl;
20
+ conversations = new Map();
21
+ constructor(config) {
22
+ this.apiKey = config?.apiKey ?? process.env.OPENAI_API_KEY ?? "";
23
+ this.model = config?.model ?? "gpt-4o";
24
+ this.maxTokens = config?.maxTokens ?? 4096;
25
+ this.baseUrl = config?.baseUrl ?? "https://api.openai.com";
26
+ }
27
+ async infer(request) {
28
+ const start = Date.now();
29
+ const model = request.model ?? this.model;
30
+ const maxTokens = request.maxTokens ?? this.maxTokens;
31
+ const messages = [];
32
+ if (request.systemPrompt) {
33
+ messages.push({ role: "system", content: request.systemPrompt });
34
+ }
35
+ // Resume conversation
36
+ if (request.conversationRef && this.conversations.has(request.conversationRef)) {
37
+ messages.push(...this.conversations.get(request.conversationRef));
38
+ }
39
+ messages.push({ role: "user", content: request.prompt });
40
+ const body = {
41
+ model,
42
+ max_tokens: maxTokens,
43
+ messages,
44
+ };
45
+ if (request.temperature !== undefined) {
46
+ body.temperature = request.temperature;
47
+ }
48
+ if (request.tools && request.tools.length > 0) {
49
+ body.tools = request.tools.map(t => ({
50
+ type: "function",
51
+ function: { name: t.name, description: t.description, parameters: t.inputSchema },
52
+ }));
53
+ }
54
+ const resp = await fetch(`${this.baseUrl}/v1/chat/completions`, {
55
+ method: "POST",
56
+ headers: {
57
+ "Content-Type": "application/json",
58
+ "Authorization": `Bearer ${this.apiKey}`,
59
+ },
60
+ body: JSON.stringify(body),
61
+ });
62
+ if (!resp.ok) {
63
+ const text = await resp.text();
64
+ const errorClass = resp.status === 401 ? "auth"
65
+ : resp.status === 429 ? "rate-limit"
66
+ : "network";
67
+ throw new InferenceError(`OpenAI API ${resp.status}: ${text}`, this.id, errorClass);
68
+ }
69
+ const data = await resp.json();
70
+ const latencyMs = Date.now() - start;
71
+ const choice = data.choices?.[0] ?? {};
72
+ const content = choice.message?.content ?? "";
73
+ const convRef = request.conversationRef ?? `conv-${Date.now()}`;
74
+ const existing = request.conversationRef ? (this.conversations.get(request.conversationRef) ?? []) : [];
75
+ this.conversations.set(convRef, [...existing, { role: "user", content: request.prompt }, { role: "assistant", content }]);
76
+ return {
77
+ content,
78
+ conversationRef: convRef,
79
+ usage: data.usage ? { inputTokens: data.usage.prompt_tokens, outputTokens: data.usage.completion_tokens } : undefined,
80
+ latencyMs,
81
+ driverId: this.id,
82
+ };
83
+ }
84
+ async health() {
85
+ if (!this.apiKey)
86
+ return { ok: false, message: "No OPENAI_API_KEY configured" };
87
+ try {
88
+ const start = Date.now();
89
+ const resp = await fetch(`${this.baseUrl}/v1/models`, {
90
+ headers: { "Authorization": `Bearer ${this.apiKey}` },
91
+ });
92
+ return { ok: resp.ok, latencyMs: Date.now() - start };
93
+ }
94
+ catch (e) {
95
+ return { ok: false, message: e.message };
96
+ }
97
+ }
98
+ async dispose() {
99
+ this.conversations.clear();
100
+ }
101
+ }
package/package.json ADDED
@@ -0,0 +1,20 @@
1
+ {
2
+ "name": "@open-matrix/driver-openai",
3
+ "version": "0.1.0",
4
+ "description": "OpenAI inference driver — raw fetch, zero SDK deps",
5
+ "type": "module",
6
+ "main": "dist/index.js",
7
+ "types": "dist/index.d.ts",
8
+ "files": ["dist"],
9
+ "scripts": {
10
+ "build": "tsc",
11
+ "clean": "rimraf dist"
12
+ },
13
+ "dependencies": {
14
+ "@open-matrix/inference": "^0.1.0"
15
+ },
16
+ "devDependencies": {
17
+ "@types/node": "^20.0.0",
18
+ "typescript": "^5.5.0"
19
+ }
20
+ }