@glueco/plugin-llm-openai 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +189 -0
- package/dist/chunk-6FNYHOB5.mjs +261 -0
- package/dist/chunk-6FNYHOB5.mjs.map +1 -0
- package/dist/chunk-6GMCQHIR.mjs +231 -0
- package/dist/chunk-6GMCQHIR.mjs.map +1 -0
- package/dist/client.d.mts +105 -0
- package/dist/client.d.ts +105 -0
- package/dist/client.js +329 -0
- package/dist/client.js.map +1 -0
- package/dist/client.mjs +62 -0
- package/dist/client.mjs.map +1 -0
- package/dist/contracts.d.mts +1494 -0
- package/dist/contracts.d.ts +1494 -0
- package/dist/contracts.js +299 -0
- package/dist/contracts.js.map +1 -0
- package/dist/contracts.mjs +35 -0
- package/dist/contracts.mjs.map +1 -0
- package/dist/index.d.mts +4 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.js +520 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +41 -0
- package/dist/index.mjs.map +1 -0
- package/dist/proxy.d.mts +5 -0
- package/dist/proxy.d.ts +5 -0
- package/dist/proxy.js +488 -0
- package/dist/proxy.js.map +1 -0
- package/dist/proxy.mjs +10 -0
- package/dist/proxy.mjs.map +1 -0
- package/package.json +77 -0
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
import {
|
|
2
|
+
ACTIONS,
|
|
3
|
+
ChatCompletionRequestSchema,
|
|
4
|
+
DEFAULT_API_URL,
|
|
5
|
+
DEFAULT_OPENAI_MODELS,
|
|
6
|
+
ENFORCEMENT_SUPPORT,
|
|
7
|
+
PLUGIN_ID,
|
|
8
|
+
PROVIDER,
|
|
9
|
+
RESOURCE_TYPE,
|
|
10
|
+
VERSION
|
|
11
|
+
} from "./chunk-6FNYHOB5.mjs";
|
|
12
|
+
|
|
13
|
+
// src/proxy.ts
|
|
14
|
+
import { createPluginBase } from "@glueco/shared";
|
|
15
|
+
var OpenAIApiError = class extends Error {
|
|
16
|
+
constructor(status, body) {
|
|
17
|
+
super(`OpenAI API error: ${status}`);
|
|
18
|
+
this.status = status;
|
|
19
|
+
this.body = body;
|
|
20
|
+
this.name = "OpenAIApiError";
|
|
21
|
+
}
|
|
22
|
+
};
|
|
23
|
+
function mapOpenAIError(error) {
|
|
24
|
+
let parsed = {};
|
|
25
|
+
try {
|
|
26
|
+
parsed = JSON.parse(error.body);
|
|
27
|
+
} catch {
|
|
28
|
+
}
|
|
29
|
+
const message = parsed.error?.message || error.body;
|
|
30
|
+
const code = parsed.error?.code;
|
|
31
|
+
switch (error.status) {
|
|
32
|
+
case 400:
|
|
33
|
+
return { status: 400, code: "BAD_REQUEST", message, retryable: false };
|
|
34
|
+
case 401:
|
|
35
|
+
return {
|
|
36
|
+
status: 401,
|
|
37
|
+
code: "UNAUTHORIZED",
|
|
38
|
+
message: "Invalid API key",
|
|
39
|
+
retryable: false
|
|
40
|
+
};
|
|
41
|
+
case 403:
|
|
42
|
+
return { status: 403, code: "FORBIDDEN", message, retryable: false };
|
|
43
|
+
case 404:
|
|
44
|
+
return { status: 404, code: "NOT_FOUND", message, retryable: false };
|
|
45
|
+
case 429:
|
|
46
|
+
if (code === "insufficient_quota") {
|
|
47
|
+
return {
|
|
48
|
+
status: 429,
|
|
49
|
+
code: "QUOTA_EXCEEDED",
|
|
50
|
+
message: "API quota exceeded",
|
|
51
|
+
retryable: false
|
|
52
|
+
};
|
|
53
|
+
}
|
|
54
|
+
return { status: 429, code: "RATE_LIMITED", message, retryable: true };
|
|
55
|
+
case 500:
|
|
56
|
+
case 502:
|
|
57
|
+
case 503:
|
|
58
|
+
return {
|
|
59
|
+
status: error.status,
|
|
60
|
+
code: "PROVIDER_ERROR",
|
|
61
|
+
message,
|
|
62
|
+
retryable: true
|
|
63
|
+
};
|
|
64
|
+
default:
|
|
65
|
+
return {
|
|
66
|
+
status: error.status,
|
|
67
|
+
code: "UNKNOWN",
|
|
68
|
+
message,
|
|
69
|
+
retryable: false
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
var openaiPlugin = {
|
|
74
|
+
...createPluginBase({
|
|
75
|
+
id: PLUGIN_ID,
|
|
76
|
+
resourceType: RESOURCE_TYPE,
|
|
77
|
+
provider: PROVIDER,
|
|
78
|
+
version: VERSION,
|
|
79
|
+
name: "OpenAI LLM",
|
|
80
|
+
actions: [...ACTIONS],
|
|
81
|
+
supports: {
|
|
82
|
+
enforcement: [...ENFORCEMENT_SUPPORT]
|
|
83
|
+
},
|
|
84
|
+
// Client contract metadata for SDK-compatible plugins
|
|
85
|
+
client: {
|
|
86
|
+
namespace: "openai",
|
|
87
|
+
actions: {
|
|
88
|
+
"chat.completions": {
|
|
89
|
+
description: "Generate chat completions using OpenAI GPT models"
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
}),
|
|
94
|
+
// Credential schema for UI
|
|
95
|
+
credentialSchema: {
|
|
96
|
+
fields: [
|
|
97
|
+
{
|
|
98
|
+
name: "apiKey",
|
|
99
|
+
type: "secret",
|
|
100
|
+
label: "API Key",
|
|
101
|
+
description: "Your OpenAI API key (starts with sk-)",
|
|
102
|
+
required: true
|
|
103
|
+
},
|
|
104
|
+
{
|
|
105
|
+
name: "organization",
|
|
106
|
+
type: "string",
|
|
107
|
+
label: "Organization ID",
|
|
108
|
+
description: "Optional OpenAI organization ID",
|
|
109
|
+
required: false
|
|
110
|
+
},
|
|
111
|
+
{
|
|
112
|
+
name: "baseUrl",
|
|
113
|
+
type: "url",
|
|
114
|
+
label: "Base URL",
|
|
115
|
+
description: "Custom API base URL (optional, for Azure OpenAI or proxies)",
|
|
116
|
+
required: false,
|
|
117
|
+
default: DEFAULT_API_URL
|
|
118
|
+
}
|
|
119
|
+
]
|
|
120
|
+
},
|
|
121
|
+
validateAndShape(action, input, constraints) {
|
|
122
|
+
if (action !== "chat.completions") {
|
|
123
|
+
return { valid: false, error: `Unsupported action: ${action}` };
|
|
124
|
+
}
|
|
125
|
+
const parsed = ChatCompletionRequestSchema.safeParse(input);
|
|
126
|
+
if (!parsed.success) {
|
|
127
|
+
return {
|
|
128
|
+
valid: false,
|
|
129
|
+
error: `Invalid request: ${parsed.error.errors.map((e) => e.message).join(", ")}`
|
|
130
|
+
};
|
|
131
|
+
}
|
|
132
|
+
const request = parsed.data;
|
|
133
|
+
const enforcement = {
|
|
134
|
+
model: request.model,
|
|
135
|
+
stream: request.stream ?? false,
|
|
136
|
+
usesTools: Array.isArray(request.tools) && request.tools.length > 0,
|
|
137
|
+
maxOutputTokens: request.max_tokens ?? request.max_completion_tokens
|
|
138
|
+
};
|
|
139
|
+
const allowedModels = constraints.allowedModels ?? [
|
|
140
|
+
...DEFAULT_OPENAI_MODELS
|
|
141
|
+
];
|
|
142
|
+
if (!allowedModels.includes(request.model)) {
|
|
143
|
+
return {
|
|
144
|
+
valid: false,
|
|
145
|
+
error: `Model '${request.model}' not allowed. Allowed: ${allowedModels.join(", ")}`
|
|
146
|
+
};
|
|
147
|
+
}
|
|
148
|
+
const maxTokens = constraints.maxOutputTokens ?? 16384;
|
|
149
|
+
const requestedTokens = request.max_tokens ?? request.max_completion_tokens;
|
|
150
|
+
if (requestedTokens && requestedTokens > maxTokens) {
|
|
151
|
+
return {
|
|
152
|
+
valid: false,
|
|
153
|
+
error: `max_tokens (${requestedTokens}) exceeds limit (${maxTokens})`
|
|
154
|
+
};
|
|
155
|
+
}
|
|
156
|
+
if (request.stream && constraints.allowStreaming === false) {
|
|
157
|
+
return {
|
|
158
|
+
valid: false,
|
|
159
|
+
error: "Streaming is not allowed for this app"
|
|
160
|
+
};
|
|
161
|
+
}
|
|
162
|
+
const shapedRequest = {
|
|
163
|
+
...request,
|
|
164
|
+
max_tokens: requestedTokens ? Math.min(requestedTokens, maxTokens) : void 0
|
|
165
|
+
// OpenAI doesn't require max_tokens
|
|
166
|
+
};
|
|
167
|
+
return { valid: true, shapedInput: shapedRequest, enforcement };
|
|
168
|
+
},
|
|
169
|
+
async execute(action, shapedInput, ctx, options) {
|
|
170
|
+
const request = shapedInput;
|
|
171
|
+
const baseUrl = ctx.config?.baseUrl || DEFAULT_API_URL;
|
|
172
|
+
const organization = ctx.config?.organization;
|
|
173
|
+
const headers = {
|
|
174
|
+
"Content-Type": "application/json",
|
|
175
|
+
Authorization: `Bearer ${ctx.secret}`
|
|
176
|
+
};
|
|
177
|
+
if (organization) {
|
|
178
|
+
headers["OpenAI-Organization"] = organization;
|
|
179
|
+
}
|
|
180
|
+
const response = await fetch(`${baseUrl}/chat/completions`, {
|
|
181
|
+
method: "POST",
|
|
182
|
+
headers,
|
|
183
|
+
body: JSON.stringify(request),
|
|
184
|
+
signal: options.signal
|
|
185
|
+
});
|
|
186
|
+
if (!response.ok) {
|
|
187
|
+
const errorBody = await response.text();
|
|
188
|
+
throw new OpenAIApiError(response.status, errorBody);
|
|
189
|
+
}
|
|
190
|
+
if (request.stream) {
|
|
191
|
+
return {
|
|
192
|
+
stream: response.body,
|
|
193
|
+
contentType: "text/event-stream"
|
|
194
|
+
};
|
|
195
|
+
} else {
|
|
196
|
+
const json = await response.json();
|
|
197
|
+
return {
|
|
198
|
+
response: json,
|
|
199
|
+
contentType: "application/json",
|
|
200
|
+
usage: this.extractUsage(json)
|
|
201
|
+
};
|
|
202
|
+
}
|
|
203
|
+
},
|
|
204
|
+
extractUsage(response) {
|
|
205
|
+
const res = response;
|
|
206
|
+
return {
|
|
207
|
+
inputTokens: res.usage?.prompt_tokens,
|
|
208
|
+
outputTokens: res.usage?.completion_tokens,
|
|
209
|
+
totalTokens: res.usage?.total_tokens,
|
|
210
|
+
model: res.model
|
|
211
|
+
};
|
|
212
|
+
},
|
|
213
|
+
mapError(error) {
|
|
214
|
+
if (error instanceof OpenAIApiError) {
|
|
215
|
+
return mapOpenAIError(error);
|
|
216
|
+
}
|
|
217
|
+
return {
|
|
218
|
+
status: 500,
|
|
219
|
+
code: "INTERNAL_ERROR",
|
|
220
|
+
message: error instanceof Error ? error.message : "Unknown error",
|
|
221
|
+
retryable: false
|
|
222
|
+
};
|
|
223
|
+
}
|
|
224
|
+
};
|
|
225
|
+
var proxy_default = openaiPlugin;
|
|
226
|
+
|
|
227
|
+
export {
|
|
228
|
+
openaiPlugin,
|
|
229
|
+
proxy_default
|
|
230
|
+
};
|
|
231
|
+
//# sourceMappingURL=chunk-6GMCQHIR.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/proxy.ts"],"sourcesContent":["// ============================================\n// OPENAI PLUGIN PROXY\n// Server-side plugin implementation for the gateway\n// ============================================\n//\n// This module is imported by the proxy to handle OpenAI requests.\n// It should NOT be imported by target apps.\n//\n// Import path: @glueco/plugin-llm-openai/proxy\n// ============================================\n\nimport type {\n PluginContract,\n PluginResourceConstraints,\n PluginValidationResult,\n PluginExecuteContext,\n PluginExecuteOptions,\n PluginExecuteResult,\n PluginUsageMetrics,\n PluginMappedError,\n EnforcementFields,\n} from \"@glueco/shared\";\nimport { createPluginBase } from \"@glueco/shared\";\n\nimport {\n ChatCompletionRequestSchema,\n type ChatCompletionRequest,\n PLUGIN_ID,\n RESOURCE_TYPE,\n PROVIDER,\n VERSION,\n DEFAULT_OPENAI_MODELS,\n ACTIONS,\n ENFORCEMENT_SUPPORT,\n DEFAULT_API_URL,\n} from \"./contracts\";\n\n// ============================================\n// ERROR HANDLING\n// ============================================\n\nclass OpenAIApiError extends Error {\n constructor(\n public status: number,\n public body: string,\n ) {\n super(`OpenAI API error: ${status}`);\n this.name = \"OpenAIApiError\";\n }\n}\n\nfunction mapOpenAIError(error: OpenAIApiError): PluginMappedError {\n let parsed: { error?: { message?: string; type?: string; code?: string } } =\n {};\n try {\n parsed = JSON.parse(error.body);\n } catch {\n // Ignore parse errors\n }\n\n const message = parsed.error?.message || error.body;\n const code = parsed.error?.code;\n\n switch (error.status) {\n case 400:\n return { status: 400, code: \"BAD_REQUEST\", message, retryable: false };\n case 401:\n return {\n status: 401,\n code: \"UNAUTHORIZED\",\n message: \"Invalid API key\",\n retryable: false,\n };\n case 403:\n return { status: 403, code: \"FORBIDDEN\", message, retryable: false };\n case 404:\n return { status: 404, code: \"NOT_FOUND\", message, retryable: false };\n case 429:\n // OpenAI distinguishes between rate limits and quota exceeded\n if (code === \"insufficient_quota\") {\n return {\n status: 429,\n code: \"QUOTA_EXCEEDED\",\n message: \"API quota exceeded\",\n retryable: false,\n };\n }\n return { status: 429, code: \"RATE_LIMITED\", message, retryable: true };\n case 500:\n case 502:\n case 503:\n return {\n status: error.status,\n code: \"PROVIDER_ERROR\",\n message,\n retryable: true,\n };\n default:\n return {\n status: error.status,\n code: \"UNKNOWN\",\n message,\n retryable: false,\n };\n }\n}\n\n// ============================================\n// PLUGIN IMPLEMENTATION\n// ============================================\n\nconst openaiPlugin: PluginContract = {\n ...createPluginBase({\n id: PLUGIN_ID,\n resourceType: RESOURCE_TYPE,\n provider: PROVIDER,\n version: VERSION,\n name: \"OpenAI LLM\",\n actions: [...ACTIONS],\n supports: {\n enforcement: [...ENFORCEMENT_SUPPORT],\n },\n // Client contract metadata for SDK-compatible plugins\n client: {\n namespace: \"openai\",\n actions: {\n \"chat.completions\": {\n description: \"Generate chat completions using OpenAI GPT models\",\n },\n },\n },\n }),\n\n // Credential schema for UI\n credentialSchema: {\n fields: [\n {\n name: \"apiKey\",\n type: \"secret\",\n label: \"API Key\",\n description: \"Your OpenAI API key (starts with sk-)\",\n required: true,\n },\n {\n name: \"organization\",\n type: \"string\",\n label: \"Organization ID\",\n description: \"Optional OpenAI organization ID\",\n required: false,\n },\n {\n name: \"baseUrl\",\n type: \"url\",\n label: \"Base URL\",\n description:\n \"Custom API base URL (optional, for Azure OpenAI or proxies)\",\n required: false,\n default: DEFAULT_API_URL,\n },\n ],\n },\n\n validateAndShape(\n action: string,\n input: unknown,\n constraints: PluginResourceConstraints,\n ): PluginValidationResult {\n if (action !== \"chat.completions\") {\n return { valid: false, error: `Unsupported action: ${action}` };\n }\n\n // Parse input - this is the schema-first validation\n const parsed = ChatCompletionRequestSchema.safeParse(input);\n if (!parsed.success) {\n return {\n valid: false,\n error: `Invalid request: ${parsed.error.errors.map((e) => e.message).join(\", \")}`,\n };\n }\n\n const request = parsed.data;\n\n // Build enforcement fields from validated request\n // These are extracted DURING validation, not after\n const enforcement: EnforcementFields = {\n model: request.model,\n stream: request.stream ?? false,\n usesTools: Array.isArray(request.tools) && request.tools.length > 0,\n maxOutputTokens: request.max_tokens ?? request.max_completion_tokens,\n };\n\n // Check allowed models\n const allowedModels = constraints.allowedModels ?? [\n ...DEFAULT_OPENAI_MODELS,\n ];\n if (!allowedModels.includes(request.model)) {\n return {\n valid: false,\n error: `Model '${request.model}' not allowed. Allowed: ${allowedModels.join(\", \")}`,\n };\n }\n\n // Enforce max tokens\n const maxTokens = constraints.maxOutputTokens ?? 16384;\n const requestedTokens = request.max_tokens ?? request.max_completion_tokens;\n\n if (requestedTokens && requestedTokens > maxTokens) {\n return {\n valid: false,\n error: `max_tokens (${requestedTokens}) exceeds limit (${maxTokens})`,\n };\n }\n\n // Check streaming permission\n if (request.stream && constraints.allowStreaming === false) {\n return {\n valid: false,\n error: \"Streaming is not allowed for this app\",\n };\n }\n\n // Shape the request (apply defaults, caps)\n const shapedRequest: ChatCompletionRequest = {\n ...request,\n max_tokens: requestedTokens\n ? Math.min(requestedTokens, maxTokens)\n : undefined, // OpenAI doesn't require max_tokens\n };\n\n return { valid: true, shapedInput: shapedRequest, enforcement };\n },\n\n async execute(\n action: string,\n shapedInput: unknown,\n ctx: PluginExecuteContext,\n options: PluginExecuteOptions,\n ): Promise<PluginExecuteResult> {\n const request = shapedInput as ChatCompletionRequest;\n const baseUrl = (ctx.config?.baseUrl as string) || DEFAULT_API_URL;\n const organization = ctx.config?.organization as string | undefined;\n\n const headers: Record<string, string> = {\n \"Content-Type\": \"application/json\",\n Authorization: `Bearer ${ctx.secret}`,\n };\n\n // Add organization header if provided\n if (organization) {\n headers[\"OpenAI-Organization\"] = organization;\n }\n\n const response = await fetch(`${baseUrl}/chat/completions`, {\n method: \"POST\",\n headers,\n body: JSON.stringify(request),\n signal: options.signal,\n });\n\n if (!response.ok) {\n const errorBody = await response.text();\n throw new OpenAIApiError(response.status, errorBody);\n }\n\n if (request.stream) {\n // Return streaming response\n return {\n stream: response.body!,\n contentType: \"text/event-stream\",\n };\n } else {\n // Return JSON response\n const json = await response.json();\n return {\n response: json,\n contentType: \"application/json\",\n usage: this.extractUsage(json),\n };\n }\n },\n\n extractUsage(response: unknown): PluginUsageMetrics {\n const res = response as {\n usage?: {\n prompt_tokens?: number;\n completion_tokens?: number;\n total_tokens?: number;\n };\n model?: string;\n };\n\n return {\n inputTokens: res.usage?.prompt_tokens,\n outputTokens: res.usage?.completion_tokens,\n totalTokens: res.usage?.total_tokens,\n model: res.model,\n };\n },\n\n mapError(error: unknown): PluginMappedError {\n if (error instanceof OpenAIApiError) {\n return mapOpenAIError(error);\n }\n\n return {\n status: 500,\n code: \"INTERNAL_ERROR\",\n message: error instanceof Error ? error.message : \"Unknown error\",\n retryable: false,\n };\n },\n};\n\nexport default openaiPlugin;\n\n// Also export named for flexibility\nexport { openaiPlugin };\n"],"mappings":";;;;;;;;;;;;;AAsBA,SAAS,wBAAwB;AAmBjC,IAAM,iBAAN,cAA6B,MAAM;AAAA,EACjC,YACS,QACA,MACP;AACA,UAAM,qBAAqB,MAAM,EAAE;AAH5B;AACA;AAGP,SAAK,OAAO;AAAA,EACd;AACF;AAEA,SAAS,eAAe,OAA0C;AAChE,MAAI,SACF,CAAC;AACH,MAAI;AACF,aAAS,KAAK,MAAM,MAAM,IAAI;AAAA,EAChC,QAAQ;AAAA,EAER;AAEA,QAAM,UAAU,OAAO,OAAO,WAAW,MAAM;AAC/C,QAAM,OAAO,OAAO,OAAO;AAE3B,UAAQ,MAAM,QAAQ;AAAA,IACpB,KAAK;AACH,aAAO,EAAE,QAAQ,KAAK,MAAM,eAAe,SAAS,WAAW,MAAM;AAAA,IACvE,KAAK;AACH,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,MAAM;AAAA,QACN,SAAS;AAAA,QACT,WAAW;AAAA,MACb;AAAA,IACF,KAAK;AACH,aAAO,EAAE,QAAQ,KAAK,MAAM,aAAa,SAAS,WAAW,MAAM;AAAA,IACrE,KAAK;AACH,aAAO,EAAE,QAAQ,KAAK,MAAM,aAAa,SAAS,WAAW,MAAM;AAAA,IACrE,KAAK;AAEH,UAAI,SAAS,sBAAsB;AACjC,eAAO;AAAA,UACL,QAAQ;AAAA,UACR,MAAM;AAAA,UACN,SAAS;AAAA,UACT,WAAW;AAAA,QACb;AAAA,MACF;AACA,aAAO,EAAE,QAAQ,KAAK,MAAM,gBAAgB,SAAS,WAAW,KAAK;AAAA,IACvE,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AACH,aAAO;AAAA,QACL,QAAQ,MAAM;AAAA,QACd,MAAM;AAAA,QACN;AAAA,QACA,WAAW;AAAA,MACb;AAAA,IACF;AACE,aAAO;AAAA,QACL,QAAQ,MAAM;AAAA,QACd,MAAM;AAAA,QACN;AAAA,QACA,WAAW;AAAA,MACb;AAAA,EACJ;AACF;AAMA,IAAM,eAA+B;AAAA,EACnC,GAAG,iBAAiB;AAAA,IAClB,IAAI;AAAA,IACJ,cAAc;AAAA,IACd,UAAU;AAAA,IACV,SAAS;AAAA,IACT,MAAM;AAAA,IACN,SAAS,CAAC,GAAG,OAAO;AAAA,IACpB,UAAU;AAAA,MACR,aAAa,CAAC,GAAG,mBAAmB;AAAA,IACtC;AAAA;AAAA,IAEA,QAAQ;AAAA,MACN,WAAW;AAAA,MACX,SAAS;AAAA,QACP,oBAAoB;AAAA,UAClB,aAAa;AAAA,QACf;AAAA,MACF;AAAA,IACF;AAAA,EACF,CAAC;AAAA;AAAA,EAGD,kBAAkB;AAAA,IAChB,QAAQ;AAAA,MACN;AAAA,QACE,MAAM;AAAA,QACN,MAAM;AAAA,QACN,OAAO;AAAA,QACP,aAAa;AAAA,QACb,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,MAAM;AAAA,QACN,MAAM;AAAA,QACN,OAAO;AAAA,QACP,aAAa;AAAA,QACb,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,MAAM;AAAA,QACN,MAAM;AAAA,QACN,OAAO;AAAA,QACP,aACE;AAAA,QACF,UAAU;AAAA,QACV,SAAS;AAAA,MACX;AAAA,IACF;AAAA,EACF;AAAA,EAEA,iBACE,QACA,OACA,aACwB;AACxB,QAAI,WAAW,oBAAoB;AACjC,aAAO,EAAE,OAAO,OAAO,OAAO,uBAAuB,MAAM,GAAG;AAAA,IAChE;AAGA,UAAM,SAAS,4BAA4B,UAAU,KAAK;AAC1D,QAAI,CAAC,OAAO,SAAS;AACnB,aAAO;AAAA,QACL,OAAO;AAAA,QACP,OAAO,oBAAoB,OAAO,MAAM,OAAO,IAAI,CAAC,MAAM,EAAE,OAAO,EAAE,KAAK,IAAI,CAAC;AAAA,MACjF;AAAA,IACF;AAEA,UAAM,UAAU,OAAO;AAIvB,UAAM,cAAiC;AAAA,MACrC,OAAO,QAAQ;AAAA,MACf,QAAQ,QAAQ,UAAU;AAAA,MAC1B,WAAW,MAAM,QAAQ,QAAQ,KAAK,KAAK,QAAQ,MAAM,SAAS;AAAA,MAClE,iBAAiB,QAAQ,cAAc,QAAQ;AAAA,IACjD;AAGA,UAAM,gBAAgB,YAAY,iBAAiB;AAAA,MACjD,GAAG;AAAA,IACL;AACA,QAAI,CAAC,cAAc,SAAS,QAAQ,KAAK,GAAG;AAC1C,aAAO;AAAA,QACL,OAAO;AAAA,QACP,OAAO,UAAU,QAAQ,KAAK,2BAA2B,cAAc,KAAK,IAAI,CAAC;AAAA,MACnF;AAAA,IACF;AAGA,UAAM,YAAY,YAAY,mBAAmB;AACjD,UAAM,kBAAkB,QAAQ,cAAc,QAAQ;AAEtD,QAAI,mBAAmB,kBAAkB,WAAW;AAClD,aAAO;AAAA,QACL,OAAO;AAAA,QACP,OAAO,eAAe,eAAe,oBAAoB,SAAS;AAAA,MACpE;AAAA,IACF;AAGA,QAAI,QAAQ,UAAU,YAAY,mBAAmB,OAAO;AAC1D,aAAO;AAAA,QACL,OAAO;AAAA,QACP,OAAO;AAAA,MACT;AAAA,IACF;AAGA,UAAM,gBAAuC;AAAA,MAC3C,GAAG;AAAA,MACH,YAAY,kBACR,KAAK,IAAI,iBAAiB,SAAS,IACnC;AAAA;AAAA,IACN;AAEA,WAAO,EAAE,OAAO,MAAM,aAAa,eAAe,YAAY;AAAA,EAChE;AAAA,EAEA,MAAM,QACJ,QACA,aACA,KACA,SAC8B;AAC9B,UAAM,UAAU;AAChB,UAAM,UAAW,IAAI,QAAQ,WAAsB;AACnD,UAAM,eAAe,IAAI,QAAQ;AAEjC,UAAM,UAAkC;AAAA,MACtC,gBAAgB;AAAA,MAChB,eAAe,UAAU,IAAI,MAAM;AAAA,IACrC;AAGA,QAAI,cAAc;AAChB,cAAQ,qBAAqB,IAAI;AAAA,IACnC;AAEA,UAAM,WAAW,MAAM,MAAM,GAAG,OAAO,qBAAqB;AAAA,MAC1D,QAAQ;AAAA,MACR;AAAA,MACA,MAAM,KAAK,UAAU,OAAO;AAAA,MAC5B,QAAQ,QAAQ;AAAA,IAClB,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AAChB,YAAM,YAAY,MAAM,SAAS,KAAK;AACtC,YAAM,IAAI,eAAe,SAAS,QAAQ,SAAS;AAAA,IACrD;AAEA,QAAI,QAAQ,QAAQ;AAElB,aAAO;AAAA,QACL,QAAQ,SAAS;AAAA,QACjB,aAAa;AAAA,MACf;AAAA,IACF,OAAO;AAEL,YAAM,OAAO,MAAM,SAAS,KAAK;AACjC,aAAO;AAAA,QACL,UAAU;AAAA,QACV,aAAa;AAAA,QACb,OAAO,KAAK,aAAa,IAAI;AAAA,MAC/B;AAAA,IACF;AAAA,EACF;AAAA,EAEA,aAAa,UAAuC;AAClD,UAAM,MAAM;AASZ,WAAO;AAAA,MACL,aAAa,IAAI,OAAO;AAAA,MACxB,cAAc,IAAI,OAAO;AAAA,MACzB,aAAa,IAAI,OAAO;AAAA,MACxB,OAAO,IAAI;AAAA,IACb;AAAA,EACF;AAAA,EAEA,SAAS,OAAmC;AAC1C,QAAI,iBAAiB,gBAAgB;AACnC,aAAO,eAAe,KAAK;AAAA,IAC7B;AAEA,WAAO;AAAA,MACL,QAAQ;AAAA,MACR,MAAM;AAAA,MACN,SAAS,iBAAiB,QAAQ,MAAM,UAAU;AAAA,MAClD,WAAW;AAAA,IACb;AAAA,EACF;AACF;AAEA,IAAO,gBAAQ;","names":[]}
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import { GatewayRequestOptions, GatewayResponse, GatewayStreamResponse, GatewayTransport } from '@glueco/sdk';
|
|
2
|
+
import { ChatCompletionRequest, ChatCompletionResponse } from './contracts.mjs';
|
|
3
|
+
export { ACTIONS, ChatCompletionChoice, ChatCompletionChoiceSchema, ChatCompletionChunk, ChatCompletionChunkSchema, ChatCompletionRequestSchema, ChatCompletionResponseSchema, ChatMessage, ChatMessageSchema, DEFAULT_API_URL, DEFAULT_OPENAI_MODELS, ENFORCEMENT_SUPPORT, OpenAIAction, PLUGIN_ID, PROVIDER, RESOURCE_TYPE, Tool, ToolSchema, Usage, UsageSchema, VERSION } from './contracts.mjs';
|
|
4
|
+
import 'zod';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Options for chat completion requests.
|
|
8
|
+
*/
|
|
9
|
+
interface ChatCompletionOptions extends Omit<GatewayRequestOptions, "stream" | "method"> {
|
|
10
|
+
/**
|
|
11
|
+
* Override for custom behavior (advanced usage).
|
|
12
|
+
*/
|
|
13
|
+
raw?: boolean;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* OpenAI client interface.
|
|
17
|
+
* Provides typed methods for all supported actions.
|
|
18
|
+
*/
|
|
19
|
+
interface OpenAIClient {
|
|
20
|
+
/**
|
|
21
|
+
* Create a chat completion.
|
|
22
|
+
*
|
|
23
|
+
* @param request - Chat completion request (OpenAI format)
|
|
24
|
+
* @param options - Optional request options
|
|
25
|
+
* @returns Chat completion response
|
|
26
|
+
*
|
|
27
|
+
* @example
|
|
28
|
+
* ```ts
|
|
29
|
+
* const response = await openaiClient.chatCompletions({
|
|
30
|
+
* model: "gpt-4o",
|
|
31
|
+
* messages: [
|
|
32
|
+
* { role: "system", content: "You are a helpful assistant." },
|
|
33
|
+
* { role: "user", content: "What is the capital of France?" }
|
|
34
|
+
* ],
|
|
35
|
+
* temperature: 0.7,
|
|
36
|
+
* max_tokens: 1000
|
|
37
|
+
* });
|
|
38
|
+
*
|
|
39
|
+
* console.log(response.data.choices[0].message.content);
|
|
40
|
+
* ```
|
|
41
|
+
*/
|
|
42
|
+
chatCompletions(request: ChatCompletionRequest, options?: ChatCompletionOptions): Promise<GatewayResponse<ChatCompletionResponse>>;
|
|
43
|
+
/**
|
|
44
|
+
* Create a streaming chat completion.
|
|
45
|
+
*
|
|
46
|
+
* @param request - Chat completion request (stream flag will be set automatically)
|
|
47
|
+
* @param options - Optional request options
|
|
48
|
+
* @returns Streaming response with SSE stream
|
|
49
|
+
*
|
|
50
|
+
* @example
|
|
51
|
+
* ```ts
|
|
52
|
+
* const response = await openaiClient.chatCompletionsStream({
|
|
53
|
+
* model: "gpt-4o",
|
|
54
|
+
* messages: [{ role: "user", content: "Tell me a story" }]
|
|
55
|
+
* });
|
|
56
|
+
*
|
|
57
|
+
* const reader = response.stream.getReader();
|
|
58
|
+
* const decoder = new TextDecoder();
|
|
59
|
+
*
|
|
60
|
+
* while (true) {
|
|
61
|
+
* const { done, value } = await reader.read();
|
|
62
|
+
* if (done) break;
|
|
63
|
+
* const chunk = decoder.decode(value);
|
|
64
|
+
* // Process SSE chunk
|
|
65
|
+
* }
|
|
66
|
+
* ```
|
|
67
|
+
*/
|
|
68
|
+
chatCompletionsStream(request: Omit<ChatCompletionRequest, "stream">, options?: ChatCompletionOptions): Promise<GatewayStreamResponse>;
|
|
69
|
+
/**
|
|
70
|
+
* Get the underlying transport for advanced usage.
|
|
71
|
+
* Useful when you need direct access to the gateway.
|
|
72
|
+
*/
|
|
73
|
+
readonly transport: GatewayTransport;
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Create a typed OpenAI client.
|
|
77
|
+
*
|
|
78
|
+
* @param transport - Gateway transport from SDK
|
|
79
|
+
* @returns Typed OpenAI client
|
|
80
|
+
*
|
|
81
|
+
* @example
|
|
82
|
+
* ```ts
|
|
83
|
+
* import { openai } from "@glueco/plugin-llm-openai/client";
|
|
84
|
+
* import { GatewayClient } from "@glueco/sdk";
|
|
85
|
+
*
|
|
86
|
+
* // Setup
|
|
87
|
+
* const gatewayClient = new GatewayClient({
|
|
88
|
+
* keyStorage: new FileKeyStorage('./.gateway/keys.json'),
|
|
89
|
+
* configStorage: new FileConfigStorage('./.gateway/config.json'),
|
|
90
|
+
* });
|
|
91
|
+
*
|
|
92
|
+
* // Get transport and create typed client
|
|
93
|
+
* const transport = await gatewayClient.getTransport();
|
|
94
|
+
* const openaiClient = openai(transport);
|
|
95
|
+
*
|
|
96
|
+
* // Use with full type safety
|
|
97
|
+
* const response = await openaiClient.chatCompletions({
|
|
98
|
+
* model: "gpt-4o",
|
|
99
|
+
* messages: [{ role: "user", content: "Hello!" }]
|
|
100
|
+
* });
|
|
101
|
+
* ```
|
|
102
|
+
*/
|
|
103
|
+
declare function openai(transport: GatewayTransport): OpenAIClient;
|
|
104
|
+
|
|
105
|
+
export { type ChatCompletionOptions, ChatCompletionRequest, ChatCompletionResponse, type OpenAIClient, openai as default, openai };
|
package/dist/client.d.ts
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import { GatewayRequestOptions, GatewayResponse, GatewayStreamResponse, GatewayTransport } from '@glueco/sdk';
|
|
2
|
+
import { ChatCompletionRequest, ChatCompletionResponse } from './contracts.js';
|
|
3
|
+
export { ACTIONS, ChatCompletionChoice, ChatCompletionChoiceSchema, ChatCompletionChunk, ChatCompletionChunkSchema, ChatCompletionRequestSchema, ChatCompletionResponseSchema, ChatMessage, ChatMessageSchema, DEFAULT_API_URL, DEFAULT_OPENAI_MODELS, ENFORCEMENT_SUPPORT, OpenAIAction, PLUGIN_ID, PROVIDER, RESOURCE_TYPE, Tool, ToolSchema, Usage, UsageSchema, VERSION } from './contracts.js';
|
|
4
|
+
import 'zod';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Options for chat completion requests.
|
|
8
|
+
*/
|
|
9
|
+
interface ChatCompletionOptions extends Omit<GatewayRequestOptions, "stream" | "method"> {
|
|
10
|
+
/**
|
|
11
|
+
* Override for custom behavior (advanced usage).
|
|
12
|
+
*/
|
|
13
|
+
raw?: boolean;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* OpenAI client interface.
|
|
17
|
+
* Provides typed methods for all supported actions.
|
|
18
|
+
*/
|
|
19
|
+
interface OpenAIClient {
|
|
20
|
+
/**
|
|
21
|
+
* Create a chat completion.
|
|
22
|
+
*
|
|
23
|
+
* @param request - Chat completion request (OpenAI format)
|
|
24
|
+
* @param options - Optional request options
|
|
25
|
+
* @returns Chat completion response
|
|
26
|
+
*
|
|
27
|
+
* @example
|
|
28
|
+
* ```ts
|
|
29
|
+
* const response = await openaiClient.chatCompletions({
|
|
30
|
+
* model: "gpt-4o",
|
|
31
|
+
* messages: [
|
|
32
|
+
* { role: "system", content: "You are a helpful assistant." },
|
|
33
|
+
* { role: "user", content: "What is the capital of France?" }
|
|
34
|
+
* ],
|
|
35
|
+
* temperature: 0.7,
|
|
36
|
+
* max_tokens: 1000
|
|
37
|
+
* });
|
|
38
|
+
*
|
|
39
|
+
* console.log(response.data.choices[0].message.content);
|
|
40
|
+
* ```
|
|
41
|
+
*/
|
|
42
|
+
chatCompletions(request: ChatCompletionRequest, options?: ChatCompletionOptions): Promise<GatewayResponse<ChatCompletionResponse>>;
|
|
43
|
+
/**
|
|
44
|
+
* Create a streaming chat completion.
|
|
45
|
+
*
|
|
46
|
+
* @param request - Chat completion request (stream flag will be set automatically)
|
|
47
|
+
* @param options - Optional request options
|
|
48
|
+
* @returns Streaming response with SSE stream
|
|
49
|
+
*
|
|
50
|
+
* @example
|
|
51
|
+
* ```ts
|
|
52
|
+
* const response = await openaiClient.chatCompletionsStream({
|
|
53
|
+
* model: "gpt-4o",
|
|
54
|
+
* messages: [{ role: "user", content: "Tell me a story" }]
|
|
55
|
+
* });
|
|
56
|
+
*
|
|
57
|
+
* const reader = response.stream.getReader();
|
|
58
|
+
* const decoder = new TextDecoder();
|
|
59
|
+
*
|
|
60
|
+
* while (true) {
|
|
61
|
+
* const { done, value } = await reader.read();
|
|
62
|
+
* if (done) break;
|
|
63
|
+
* const chunk = decoder.decode(value);
|
|
64
|
+
* // Process SSE chunk
|
|
65
|
+
* }
|
|
66
|
+
* ```
|
|
67
|
+
*/
|
|
68
|
+
chatCompletionsStream(request: Omit<ChatCompletionRequest, "stream">, options?: ChatCompletionOptions): Promise<GatewayStreamResponse>;
|
|
69
|
+
/**
|
|
70
|
+
* Get the underlying transport for advanced usage.
|
|
71
|
+
* Useful when you need direct access to the gateway.
|
|
72
|
+
*/
|
|
73
|
+
readonly transport: GatewayTransport;
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Create a typed OpenAI client.
|
|
77
|
+
*
|
|
78
|
+
* @param transport - Gateway transport from SDK
|
|
79
|
+
* @returns Typed OpenAI client
|
|
80
|
+
*
|
|
81
|
+
* @example
|
|
82
|
+
* ```ts
|
|
83
|
+
* import { openai } from "@glueco/plugin-llm-openai/client";
|
|
84
|
+
* import { GatewayClient } from "@glueco/sdk";
|
|
85
|
+
*
|
|
86
|
+
* // Setup
|
|
87
|
+
* const gatewayClient = new GatewayClient({
|
|
88
|
+
* keyStorage: new FileKeyStorage('./.gateway/keys.json'),
|
|
89
|
+
* configStorage: new FileConfigStorage('./.gateway/config.json'),
|
|
90
|
+
* });
|
|
91
|
+
*
|
|
92
|
+
* // Get transport and create typed client
|
|
93
|
+
* const transport = await gatewayClient.getTransport();
|
|
94
|
+
* const openaiClient = openai(transport);
|
|
95
|
+
*
|
|
96
|
+
* // Use with full type safety
|
|
97
|
+
* const response = await openaiClient.chatCompletions({
|
|
98
|
+
* model: "gpt-4o",
|
|
99
|
+
* messages: [{ role: "user", content: "Hello!" }]
|
|
100
|
+
* });
|
|
101
|
+
* ```
|
|
102
|
+
*/
|
|
103
|
+
declare function openai(transport: GatewayTransport): OpenAIClient;
|
|
104
|
+
|
|
105
|
+
export { type ChatCompletionOptions, ChatCompletionRequest, ChatCompletionResponse, type OpenAIClient, openai as default, openai };
|