@kb-labs/adapters-kblabs-gateway 2.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,159 @@
1
+ # @kb-labs/adapters-vibeproxy
2
+
3
+ > Part of [KB Labs](https://github.com/KirillBaranov/kb-labs) ecosystem. Works exclusively within KB Labs platform.
4
+
5
+ VibeProxy local adapter supporting multiple LLM providers (Claude, GPT, etc.) through a unified interface.
6
+
7
+ ## Overview
8
+
9
+ | Property | Value |
10
+ |----------|-------|
11
+ | **Implements** | `ILLM` |
12
+ | **Type** | `core` |
13
+ | **Requires** | None |
14
+ | **Category** | AI |
15
+
16
+ ## Features
17
+
18
+ - **Multi-Provider** - Claude, GPT, Gemini, and more via single interface
19
+ - **Local Proxy** - Route through local VibeProxy server
20
+ - **Function Calling** - Native tool support for all providers
21
+ - **Model Switching** - Change provider by just changing model name
22
+
23
+ ## Installation
24
+
25
+ ```bash
26
+ pnpm add @kb-labs/adapters-vibeproxy
27
+ ```
28
+
29
+ ## Configuration
30
+
31
+ Add to your `kb.config.json`:
32
+
33
+ ```json
34
+ {
35
+ "platform": {
36
+ "adapters": {
37
+ "llm": "@kb-labs/adapters-vibeproxy"
38
+ },
39
+ "adapterOptions": {
40
+ "llm": {
41
+ "baseURL": "http://localhost:8317",
42
+ "apiKey": "any-string",
43
+ "model": "claude-sonnet-4-20250514",
44
+ "timeout": 120000
45
+ }
46
+ }
47
+ }
48
+ }
49
+ ```
50
+
51
+ ### Options
52
+
53
+ | Option | Type | Default | Description |
54
+ |--------|------|---------|-------------|
55
+ | `baseURL` | `string` | `"http://localhost:8317"` | VibeProxy server URL |
56
+ | `apiKey` | `string` | `"any-string"` | API key (any string works for local) |
57
+ | `model` | `string` | `"claude-sonnet-4-20250514"` | Model to use |
58
+ | `timeout` | `number` | `120000` | Request timeout in ms |
59
+
60
+ ## Usage
61
+
62
+ ### Via Platform (Recommended)
63
+
64
+ ```typescript
65
+ import { usePlatform } from '@kb-labs/sdk';
66
+
67
+ const platform = usePlatform();
68
+
69
+ // Chat with Claude
70
+ const response = await platform.llm.chat([
71
+ { role: 'user', content: 'Hello!' }
72
+ ]);
73
+
74
+ // Switch to GPT by changing model
75
+ const gptResponse = await platform.llm.chat(
76
+ [{ role: 'user', content: 'Hello!' }],
77
+ { model: 'gpt-4-turbo' }
78
+ );
79
+
80
+ // Function calling
81
+ const result = await platform.llm.chatWithTools(
82
+ [{ role: 'user', content: 'What time is it?' }],
83
+ [{ name: 'getTime', parameters: { ... } }]
84
+ );
85
+ ```
86
+
87
+ ### Standalone (Testing/Development)
88
+
89
+ ```typescript
90
+ import { createAdapter } from '@kb-labs/adapters-vibeproxy';
91
+
92
+ const llm = createAdapter({
93
+ baseURL: 'http://localhost:8317',
94
+ model: 'claude-sonnet-4-20250514'
95
+ });
96
+
97
+ const response = await llm.chat([
98
+ { role: 'user', content: 'Hello!' }
99
+ ]);
100
+ ```
101
+
102
+ ## Adapter Manifest
103
+
104
+ ```typescript
105
+ {
106
+ id: 'vibeproxy-llm',
107
+ name: 'VibeProxy LLM',
108
+ version: '0.1.0',
109
+ implements: 'ILLM',
110
+ capabilities: {
111
+ streaming: false, // TODO: implement SSE streaming
112
+ custom: {
113
+ functionCalling: true,
114
+ multiProvider: true,
115
+ },
116
+ },
117
+ }
118
+ ```
119
+
120
+ ## Supported Models
121
+
122
+ | Provider | Model Examples |
123
+ |----------|---------------|
124
+ | **Anthropic** | `claude-sonnet-4-20250514`, `claude-3-opus-*` |
125
+ | **OpenAI** | `gpt-4-turbo`, `gpt-3.5-turbo` |
126
+ | **Google** | `gemini-pro`, `gemini-ultra` |
127
+
128
+ ## FAQ
129
+
130
+ <details>
131
+ <summary><strong>Q: How do I start VibeProxy locally?</strong></summary>
132
+
133
+ See VibeProxy documentation for setup instructions. Default port is 8317.
134
+ </details>
135
+
136
+ <details>
137
+ <summary><strong>Q: Why use VibeProxy instead of direct API?</strong></summary>
138
+
139
+ - Single interface for multiple providers
140
+ - Local caching and rate limiting
141
+ - Request logging and analytics
142
+ - Cost tracking across providers
143
+ </details>
144
+
145
+ <details>
146
+ <summary><strong>Q: Is streaming supported?</strong></summary>
147
+
148
+ Not yet. Streaming (SSE) is planned for a future release.
149
+ </details>
150
+
151
+ ## Related Adapters
152
+
153
+ | Adapter | Use Case |
154
+ |---------|----------|
155
+ | `@kb-labs/adapters-openai` | Direct OpenAI API access |
156
+
157
+ ## License
158
+
159
+ [KB Public License v1.1](../../LICENSE) - KB Labs Team
@@ -0,0 +1,3 @@
1
+ export { KBLabsGatewayLLM, KBLabsGatewayLLMConfig, createAdapter, createAdapter as default } from './llm.js';
2
+ export { manifest } from './manifest.js';
3
+ import '@kb-labs/core-platform';
package/dist/index.js ADDED
@@ -0,0 +1,218 @@
1
+ import OpenAI from 'openai';
2
+
3
+ // src/llm.ts
4
+ var DEFAULT_GATEWAY_URL = "https://api.kblabs.ru";
5
+ var DEFAULT_MAX_TOKENS = 16384;
6
+ function jwtExp(token) {
7
+ try {
8
+ const payload = token.split(".")[1] ?? "";
9
+ const json = Buffer.from(payload, "base64url").toString("utf8");
10
+ const data = JSON.parse(json);
11
+ return typeof data.exp === "number" ? data.exp : 0;
12
+ } catch {
13
+ return 0;
14
+ }
15
+ }
16
+ function tokenExpired(token) {
17
+ if (!token) {
18
+ return true;
19
+ }
20
+ return jwtExp(token) < Math.floor(Date.now() / 1e3) + 60;
21
+ }
22
+ async function refreshToken(gatewayURL, clientId, clientSecret) {
23
+ const res = await fetch(`${gatewayURL}/auth/token`, {
24
+ method: "POST",
25
+ headers: { "Content-Type": "application/json" },
26
+ body: JSON.stringify({ clientId, clientSecret })
27
+ });
28
+ if (!res.ok) {
29
+ throw new Error(`KB Labs Gateway token refresh failed: HTTP ${res.status}`);
30
+ }
31
+ const data = await res.json();
32
+ if (!data.accessToken) {
33
+ throw new Error("KB Labs Gateway token refresh: empty accessToken in response");
34
+ }
35
+ return data.accessToken;
36
+ }
37
+ var KBLabsGatewayLLM = class {
38
+ gatewayURL;
39
+ kbClientId;
40
+ kbClientSecret;
41
+ defaultModel;
42
+ defaultMaxTokens;
43
+ accessToken;
44
+ client;
45
+ constructor(config = {}) {
46
+ this.gatewayURL = config.gatewayURL ?? DEFAULT_GATEWAY_URL;
47
+ this.kbClientId = config.kbClientId;
48
+ this.kbClientSecret = config.kbClientSecret;
49
+ this.defaultModel = config.defaultModel ?? "small";
50
+ this.defaultMaxTokens = config.defaultMaxTokens ?? DEFAULT_MAX_TOKENS;
51
+ const initialKey = config.apiKey ?? process.env.KB_LABS_API_KEY ?? process.env.OPENAI_API_KEY ?? "pending";
52
+ this.client = new OpenAI({
53
+ apiKey: initialKey,
54
+ baseURL: `${this.gatewayURL}/llm`
55
+ });
56
+ }
57
+ getProtocolCapabilities() {
58
+ return {
59
+ cache: { supported: false },
60
+ stream: { supported: false }
61
+ };
62
+ }
63
+ async ensureToken() {
64
+ if (!this.kbClientId || !this.kbClientSecret) {
65
+ return;
66
+ }
67
+ if (!tokenExpired(this.accessToken)) {
68
+ return;
69
+ }
70
+ this.accessToken = await refreshToken(
71
+ this.gatewayURL,
72
+ this.kbClientId,
73
+ this.kbClientSecret
74
+ );
75
+ this.client = new OpenAI({
76
+ apiKey: this.accessToken,
77
+ baseURL: `${this.gatewayURL}/llm`
78
+ });
79
+ }
80
+ async complete(prompt, options) {
81
+ await this.ensureToken();
82
+ const model = options?.model ?? this.defaultModel;
83
+ const response = await this.client.chat.completions.create({
84
+ model,
85
+ max_tokens: options?.maxTokens ?? this.defaultMaxTokens,
86
+ messages: [{ role: "user", content: prompt }],
87
+ ...options?.temperature !== void 0 ? { temperature: options.temperature } : {}
88
+ });
89
+ const content = response.choices[0]?.message?.content ?? "";
90
+ return {
91
+ content,
92
+ usage: {
93
+ promptTokens: response.usage?.prompt_tokens ?? 0,
94
+ completionTokens: response.usage?.completion_tokens ?? 0
95
+ },
96
+ model: response.model
97
+ };
98
+ }
99
+ async *stream(prompt, options) {
100
+ const response = await this.complete(prompt, options);
101
+ yield response.content;
102
+ }
103
+ async chatWithTools(messages, options) {
104
+ await this.ensureToken();
105
+ const model = options?.model ?? this.defaultModel;
106
+ const openaiMessages = messages.map((m) => this.toOpenAIMessage(m));
107
+ const tools = options.tools.map((t) => ({
108
+ type: "function",
109
+ function: {
110
+ name: t.name,
111
+ description: t.description,
112
+ parameters: t.inputSchema
113
+ }
114
+ }));
115
+ const response = await this.client.chat.completions.create({
116
+ model,
117
+ max_tokens: options?.maxTokens ?? this.defaultMaxTokens,
118
+ messages: openaiMessages,
119
+ tools: options.toolChoice !== "none" ? tools : void 0
120
+ });
121
+ const message = response.choices[0]?.message;
122
+ if (!message) {
123
+ return { content: "", toolCalls: [], usage: { promptTokens: 0, completionTokens: 0 }, model: "" };
124
+ }
125
+ const toolCalls = (message.tool_calls ?? []).map((tc) => ({
126
+ id: tc.id,
127
+ name: tc.function.name,
128
+ input: JSON.parse(tc.function.arguments || "{}")
129
+ }));
130
+ return {
131
+ content: message.content ?? "",
132
+ toolCalls,
133
+ usage: {
134
+ promptTokens: response.usage?.prompt_tokens ?? 0,
135
+ completionTokens: response.usage?.completion_tokens ?? 0
136
+ },
137
+ model: response.model
138
+ };
139
+ }
140
+ toOpenAIMessage(m) {
141
+ if (m.role === "tool") {
142
+ return {
143
+ role: "tool",
144
+ tool_call_id: m.toolCallId ?? "",
145
+ content: m.content
146
+ };
147
+ }
148
+ if (m.role === "assistant" && m.toolCalls?.length) {
149
+ return {
150
+ role: "assistant",
151
+ content: m.content ?? null,
152
+ tool_calls: m.toolCalls.map((tc) => ({
153
+ id: tc.id,
154
+ type: "function",
155
+ function: {
156
+ name: tc.name,
157
+ arguments: JSON.stringify(tc.input)
158
+ }
159
+ }))
160
+ };
161
+ }
162
+ return {
163
+ role: m.role,
164
+ content: m.content
165
+ };
166
+ }
167
+ };
168
+ function createAdapter(config) {
169
+ return new KBLabsGatewayLLM(config);
170
+ }
171
+
172
+ // src/manifest.ts
173
+ var manifest = {
174
+ manifestVersion: "1.0.0",
175
+ id: "kblabs-gateway-llm",
176
+ name: "KB Labs Gateway LLM",
177
+ version: "0.1.0",
178
+ description: "KB Labs Gateway adapter \u2014 OpenAI-compatible LLM proxy with automatic JWT token refresh",
179
+ author: "KB Labs Team",
180
+ license: "KBPL-1.1",
181
+ type: "core",
182
+ implements: "ILLM",
183
+ capabilities: {
184
+ streaming: false,
185
+ custom: {
186
+ functionCalling: true,
187
+ autoTokenRefresh: true
188
+ }
189
+ },
190
+ configSchema: {
191
+ gatewayURL: {
192
+ type: "string",
193
+ default: "https://api.kblabs.ru",
194
+ description: "KB Labs Gateway base URL"
195
+ },
196
+ kbClientId: {
197
+ type: "string",
198
+ description: "Machine identity client ID (from kb-create --demo)"
199
+ },
200
+ kbClientSecret: {
201
+ type: "string",
202
+ description: "Machine identity client secret (from kb-create --demo)"
203
+ },
204
+ apiKey: {
205
+ type: "string",
206
+ description: "Static Bearer token (alternative to clientId/clientSecret)"
207
+ },
208
+ defaultModel: {
209
+ type: "string",
210
+ default: "small",
211
+ description: "Default model tier: small | medium | large"
212
+ }
213
+ }
214
+ };
215
+
216
+ export { KBLabsGatewayLLM, createAdapter, createAdapter as default, manifest };
217
+ //# sourceMappingURL=index.js.map
218
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/llm.ts","../src/manifest.ts"],"names":[],"mappings":";;;AAiDA,IAAM,mBAAA,GAAsB,uBAAA;AAC5B,IAAM,kBAAA,GAAqB,KAAA;AAK3B,SAAS,OAAO,KAAA,EAAuB;AACrC,EAAA,IAAI;AACF,IAAA,MAAM,UAAU,KAAA,CAAM,KAAA,CAAM,GAAG,CAAA,CAAE,CAAC,CAAA,IAAK,EAAA;AACvC,IAAA,MAAM,OAAO,MAAA,CAAO,IAAA,CAAK,SAAS,WAAW,CAAA,CAAE,SAAS,MAAM,CAAA;AAC9D,IAAA,MAAM,IAAA,GAAO,IAAA,CAAK,KAAA,CAAM,IAAI,CAAA;AAC5B,IAAA,OAAO,OAAO,IAAA,CAAK,GAAA,KAAQ,QAAA,GAAW,KAAK,GAAA,GAAM,CAAA;AAAA,EACnD,CAAA,CAAA,MAAQ;AACN,IAAA,OAAO,CAAA;AAAA,EACT;AACF;AAGA,SAAS,aAAa,KAAA,EAAoC;AACxD,EAAA,IAAI,CAAC,KAAA,EAAO;AAAC,IAAA,OAAO,IAAA;AAAA,EAAK;AACzB,EAAA,OAAO,MAAA,CAAO,KAAK,CAAA,GAAI,IAAA,CAAK,MAAM,IAAA,CAAK,GAAA,EAAI,GAAI,GAAI,CAAA,GAAI,EAAA;AACzD;AAGA,eAAe,YAAA,CACb,UAAA,EACA,QAAA,EACA,YAAA,EACiB;AACjB,EAAA,MAAM,GAAA,GAAM,MAAM,KAAA,CAAM,CAAA,EAAG,UAAU,CAAA,WAAA,CAAA,EAAe;AAAA,IAClD,MAAA,EAAQ,MAAA;AAAA,IACR,OAAA,EAAS,EAAE,cAAA,EAAgB,kBAAA,EAAmB;AAAA,IAC9C,MAAM,IAAA,CAAK,SAAA,CAAU,EAAE,QAAA,EAAU,cAAc;AAAA,GAChD,CAAA;AACD,EAAA,IAAI,CAAC,IAAI,EAAA,EAAI;AACX,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,2CAAA,EAA8C,GAAA,CAAI,MAAM,CAAA,CAAE,CAAA;AAAA,EAC5E;AACA,EAAA,MAAM,IAAA,GAAQ,MAAM,GAAA,CAAI,IAAA,EAAK;AAC7B,EAAA,IAAI,CAAC,KAAK,WAAA,EAAa;AACrB,IAAA,MAAM,IAAI,MAAM,8DAA8D,CAAA;AAAA,EAChF;AACA,EAAA,OAAO,IAAA,CAAK,WAAA;AACd;AAIO,IAAM,mBAAN,MAAuC;AAAA,EAC3B,UAAA;AAAA,EACA,UAAA;AAAA,EACA,cAAA;AAAA,EACA,YAAA;AAAA,EACA,gBAAA;AAAA,EAET,WAAA;AAAA,EACA,MAAA;AAAA,EAER,WAAA,CAAY,MAAA,GAAiC,EAAC,EAAG;AAC/C,IAAA,IAAA,CAAK,UAAA,GAAa,OAAO,UAAA,IAAc,mBAAA;AACvC,IAAA,IAAA,CAAK,aAAa,MAAA,CAAO,UAAA;AACzB,IAAA,IAAA,CAAK,iBAAiB,MAAA,CAAO,cAAA;AAC7B,IAAA,IAAA,CAAK,YAAA,GAAe,OAAO,YAAA,IAAgB,OAAA;AAC3C,IAAA,IAAA,CAAK,gBAAA,GAAmB,OAAO,gBAAA,IAAoB,kBAAA;AAEnD,IAAA,MAAM,UAAA,GACJ,OAAO,MAAA,IAAU,OAAA,CAAQ,IAAI,eAAA,IAAmB,OAAA,CAAQ,IAAI,cAAA,IAAkB,SAAA;AAEhF,IAAA,IAAA,CAAK,MAAA,GAAS,IAAI,MAAA,CAAO;AAAA,MACvB,MAAA,EAAQ,UAAA;AAAA,MACR,OAAA,EAAS,CAAA,EAAG,IAAA,CAAK,UAAU,CAAA,IAAA;AAAA,KAC5B,CAAA;AAAA,EACH;AAAA,EAEA,uBAAA,GAAmD;AACjD,IAAA,OAAO;AAAA,MACL,KAAA,EAAO,EAAE,SAAA,EAAW,KAAA,EAAM;AAAA,MAC1B,MAAA,EAAQ,EAAE,SAAA,EAAW,KAAA;AAAM,KAC7B;AAAA,EACF;AAAA,EAEA,MAAc,WAAA,GAA6B;AACzC,IAAA,IAAI,CAAC,IAAA,CAAK,UAAA,IAAc,CAAC,KAAK,cAAA,EAAgB;AAAC,MAAA;AAAA,IAAO;AACtD,IAAA,IAAI,CAAC,YAAA,CAAa,IAAA,CAAK,WAAW,CAAA,EAAG;AAAC,MAAA;AAAA,IAAO;AAE7C,IAAA,IAAA,CAAK,cAAc,MAAM,YAAA;AAAA,MACvB,IAAA,CAAK,UAAA;AAAA,MACL,IAAA,CAAK,UAAA;AAAA,MACL,IAAA,CAAK;AAAA,KACP;AACA,IAAA,IAAA,CAAK,MAAA,GAAS,IAAI,MAAA,CAAO;AAAA,MACvB,QAAQ,IAAA,CAAK,WAAA;AAAA,MACb,OAAA,EAAS,CAAA,EAAG,IAAA,CAAK,UAAU,CAAA,IAAA;AAAA,KAC5B,CAAA;AAAA,EACH;AAAA,EAEA,MAAM,QAAA,CAAS,MAAA,EAAgB,OAAA,EAA4C;AACzE,IAAA,MAAM,KAAK,WAAA,EAAY;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,EAAS,KAAA,IAAS,IAAA,CAAK,YAAA;AAErC,IAAA,MAAM,WAAW,MAAM,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA,MACzD,KAAA;AAAA,MACA,UAAA,EAAY,OAAA,EAAS,SAAA,IAAa,IAAA,CAAK,gBAAA;AAAA,MACvC,UAAU,CAAC,EAAE,MAAM,MAAA,EAAQ,OAAA,EAAS,QAAQ,CAAA;AAAA,MAC5C,GAAI,SAAS,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,KAClF,CAAA;AAED,IAAA,MAAM,UAAU,QAAA,CAAS,OAAA,CAAQ,CAAC,CAAA,EAAG,SAAS,OAAA,IAAW,EAAA;AACzD,IAAA,OAAO;AAAA,MACL,OAAA;AAAA,MACA,KAAA,EAAO;AAAA,QACL,YAAA,EAAc,QAAA,CAAS,KAAA,EAAO,aAAA,IAAiB,CAAA;AAAA,QAC/C,gBAAA,EAAkB,QAAA,CAAS,KAAA,EAAO,iBAAA,IAAqB;AAAA,OACzD;AAAA,MACA,OAAO,QAAA,CAAS;AAAA,KAClB;AAAA,EACF;AAAA,EAEA,OAAO,MAAA,CAAO,MAAA,EAAgB,OAAA,EAA6C;AACzE,IAAA,MAAM,QAAA,GAAW,MAAM,IAAA,CAAK,QAAA,CAAS,QAAQ,OAAO,CAAA;AACpD,IAAA,MAAM,QAAA,CAAS,OAAA;AAAA,EACjB;AAAA,EAEA,MAAM,aAAA,CACJ,QAAA,EACA,OAAA,EAC8B;AAC9B,IAAA,MAAM,KAAK,WAAA,EAAY;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,EAAS,KAAA,IAAS,IAAA,CAAK,YAAA;AAErC,IAAA,MAAM,cAAA,GAAiB,SAAS,GAAA,CAAI,CAAC,MAAM,IAAA,CAAK,eAAA,CAAgB,CAAC,CAAC,CAAA;AAElE,IAAA,MAAM,KAAA,GAA0C,OAAA,CAAQ,KAAA,CAAM,GAAA,CAAI,CAAC,CAAA,MAAgB;AAAA,MACjF,IAAA,EAAM,UAAA;AAAA,MACN,QAAA,EAAU;AAAA,QACR,MAAM,CAAA,CAAE,IAAA;AAAA,QACR,aAAa,CAAA,CAAE,WAAA;AAAA,QACf,YAAY,CAAA,CAAE;AAAA;AAChB,KACF,CAAE,CAAA;AAEF,IAAA,MAAM,WAAW,MAAM,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA,MACzD,KAAA;AAAA,MACA,UAAA,EAAY,OAAA,EAAS,SAAA,IAAa,IAAA,CAAK,gBAAA;AAAA,MACvC,QAAA,EAAU,cAAA;AAAA,MACV,KAAA,EAAO,OAAA,CAAQ,UAAA,KAAe,MAAA,GAAS,KAAA,GAAQ;AAAA,KAChD,CAAA;AAED,IAAA,MAAM,OAAA,GAAU,QAAA,CAAS,OAAA,CAAQ,CAAC,CAAA,EAAG,OAAA;AACrC,IAAA,IAAI,CAAC,OAAA,EAAS;AACZ,MAAA,OAAO,EAAE,OAAA,EAAS,EAAA,EAAI,SAAA,EAAW,EAAC,EAAG,KAAA,EAAO,EAAE,YAAA,EAAc,CAAA,EAAG,gBAAA,EAAkB,CAAA,EAAE,EAAG,OAAO,EAAA,EAAG;AAAA,IAClG;AAEA,IAAA,MAAM,aAA4B,OAAA,CAAQ,UAAA,IAAc,EAAC,EAAG,GAAA,CAAI,CAAC,EAAA,MAAQ;AAAA,MACvE,IAAI,EAAA,CAAG,EAAA;AAAA,MACP,IAAA,EAAM,GAAG,QAAA,CAAS,IAAA;AAAA,MAClB,OAAO,IAAA,CAAK,KAAA,CAAM,EAAA,CAAG,QAAA,CAAS,aAAa,IAAI;AAAA,KACjD,CAAE,CAAA;AAEF,IAAA,OAAO;AAAA,MACL,OAAA,EAAS,QAAQ,OAAA,IAAW,EAAA;AAAA,MAC5B,SAAA;AAAA,MACA,KAAA,EAAO;AAAA,QACL,YAAA,EAAc,QAAA,CAAS,KAAA,EAAO,aAAA,IAAiB,CAAA;AAAA,QAC/C,gBAAA,EAAkB,QAAA,CAAS,KAAA,EAAO,iBAAA,IAAqB;AAAA,OACzD;AAAA,MACA,OAAO,QAAA,CAAS;AAAA,KAClB;AAAA,EACF;AAAA,EAEQ,gBAAgB,CAAA,EAAuD;AAC7E,IAAA,IAAI,CAAA,CAAE,SAAS,MAAA,EAAQ;AACrB,MAAA,OAAO;AAAA,QACL,IAAA,EAAM,MAAA;AAAA,QACN,YAAA,EAAc,EAAE,UAAA,IAAc,EAAA;AAAA,QAC9B,SAAS,CAAA,CAAE;AAAA,OACb;AAAA,IACF;AACA,IAAA,IAAI,CAAA,CAAE,IAAA,KAAS,WAAA,IAAe,CAAA,CAAE,WAAW,MAAA,EAAQ;AACjD,MAAA,OAAO;AAAA,QACL,IAAA,EAAM,WAAA;AAAA,QACN,OAAA,EAAS,EAAE,OAAA,IAAW,IAAA;AAAA,QACtB,UAAA,EAAY,CAAA,CAAE,SAAA,CAAU,GAAA,CAAI,CAAC,EAAA,MAAqB;AAAA,UAChD,IAAI,EAAA,CAAG,EAAA;AAAA,UACP,IAAA,EAAM,UAAA;AAAA,UACN,QAAA,EAAU;AAAA,YACR,MAAM,EAAA,CAAG,IAAA;AAAA,YACT,SAAA,EAAW,IAAA,CAAK,SAAA,CAAU,EAAA,CAAG,KAAK;AAAA;AACpC,SACF,CAAE;AAAA,OACJ;AAAA,IACF;AACA,IAAA,OAAO;AAAA,MACL,MAAM,CAAA,CAAE,IAAA;AAAA,MACR,SAAS,CAAA,CAAE;AAAA,KACb;AAAA,EACF;AACF;AAEO,SAAS,cAAc,MAAA,EAAmD;AAC/E,EAAA,OAAO,IAAI,iBAAiB,MAAM,CAAA;AACpC;;;ACtPO,IAAM,QAAA,GAA4B;AAAA,EACvC,eAAA,EAAiB,OAAA;AAAA,EACjB,EAAA,EAAI,oBAAA;AAAA,EACJ,IAAA,EAAM,qBAAA;AAAA,EACN,OAAA,EAAS,OAAA;AAAA,EACT,WAAA,EACE,6FAAA;AAAA,EACF,MAAA,EAAQ,cAAA;AAAA,EACR,OAAA,EAAS,UAAA;AAAA,EACT,IAAA,EAAM,MAAA;AAAA,EACN,UAAA,EAAY,MAAA;AAAA,EACZ,YAAA,EAAc;AAAA,IACZ,SAAA,EAAW,KAAA;AAAA,IACX,MAAA,EAAQ;AAAA,MACN,eAAA,EAAiB,IAAA;AAAA,MACjB,gBAAA,EAAkB;AAAA;AACpB,GACF;AAAA,EACA,YAAA,EAAc;AAAA,IACZ,UAAA,EAAY;AAAA,MACV,IAAA,EAAM,QAAA;AAAA,MACN,OAAA,EAAS,uBAAA;AAAA,MACT,WAAA,EAAa;AAAA,KACf;AAAA,IACA,UAAA,EAAY;AAAA,MACV,IAAA,EAAM,QAAA;AAAA,MACN,WAAA,EAAa;AAAA,KACf;AAAA,IACA,cAAA,EAAgB;AAAA,MACd,IAAA,EAAM,QAAA;AAAA,MACN,WAAA,EAAa;AAAA,KACf;AAAA,IACA,MAAA,EAAQ;AAAA,MACN,IAAA,EAAM,QAAA;AAAA,MACN,WAAA,EAAa;AAAA,KACf;AAAA,IACA,YAAA,EAAc;AAAA,MACZ,IAAA,EAAM,QAAA;AAAA,MACN,OAAA,EAAS,OAAA;AAAA,MACT,WAAA,EAAa;AAAA;AACf;AAEJ","file":"index.js","sourcesContent":["/**\n * @module @kb-labs/adapters-kblabs-gateway/llm\n *\n * KB Labs Gateway LLM adapter.\n *\n * Implements ILLM against the KB Labs Gateway OpenAI-compatible endpoint:\n * POST <gatewayURL>/llm/v1/chat/completions\n *\n * Authentication: machine identity (clientId + clientSecret) obtained via\n * kb-create --demo. The adapter automatically refreshes the short-lived\n * JWT access token (~15 min) before each request using the stored credentials.\n */\n\nimport OpenAI from \"openai\";\nimport type {\n ILLM,\n LLMOptions,\n LLMResponse,\n LLMMessage,\n LLMToolCallOptions,\n LLMToolCallResponse,\n LLMTool,\n LLMToolCall,\n LLMProtocolCapabilities,\n} from \"@kb-labs/core-platform\";\n\n// ── Config ───────────────────────────────────────────────────────────────────\n\nexport interface KBLabsGatewayLLMConfig {\n /** KB Labs Gateway base URL. Defaults to https://api.kblabs.ru */\n gatewayURL?: string;\n /**\n * Machine identity credentials for automatic JWT token refresh.\n * Populated by kb-create --demo. Replace with your own API key\n * by removing these fields and setting apiKey instead.\n */\n kbClientId?: string;\n kbClientSecret?: string;\n /**\n * Static access token. Used directly if kbClientId/kbClientSecret are absent.\n * Falls back to KB_LABS_API_KEY or OPENAI_API_KEY env vars.\n */\n apiKey?: string;\n /** Default model tier. Values: \"small\" | \"medium\" | \"large\". */\n defaultModel?: string;\n /** Default max output tokens. Overrides the API default (4096). */\n defaultMaxTokens?: number;\n}\n\nconst DEFAULT_GATEWAY_URL = \"https://api.kblabs.ru\";\nconst DEFAULT_MAX_TOKENS = 16_384;\n\n// ── Token refresh ─────────────────────────────────────────────────────────────\n\n/** Decode JWT exp claim. Returns 0 on parse failure. */\nfunction jwtExp(token: string): number {\n try {\n const payload = token.split(\".\")[1] ?? \"\";\n const json = Buffer.from(payload, \"base64url\").toString(\"utf8\");\n const data = JSON.parse(json) as { exp?: number };\n return typeof data.exp === \"number\" ? data.exp : 0;\n } catch {\n return 0;\n }\n}\n\n/** Returns true if token is absent or expires within the next 60 seconds. */\nfunction tokenExpired(token: string | undefined): boolean {\n if (!token) {return true;}\n return jwtExp(token) < Math.floor(Date.now() / 1000) + 60;\n}\n\n/** Exchange clientId/clientSecret for a fresh accessToken. */\nasync function refreshToken(\n gatewayURL: string,\n clientId: string,\n clientSecret: string,\n): Promise<string> {\n const res = await fetch(`${gatewayURL}/auth/token`, {\n method: \"POST\",\n headers: { \"Content-Type\": \"application/json\" },\n body: JSON.stringify({ clientId, clientSecret }),\n });\n if (!res.ok) {\n throw new Error(`KB Labs Gateway token refresh failed: HTTP ${res.status}`);\n }\n const data = (await res.json()) as { accessToken: string };\n if (!data.accessToken) {\n throw new Error(\"KB Labs Gateway token refresh: empty accessToken in response\");\n }\n return data.accessToken;\n}\n\n// ── Adapter ───────────────────────────────────────────────────────────────────\n\nexport class KBLabsGatewayLLM implements ILLM {\n private readonly gatewayURL: string;\n private readonly kbClientId?: string;\n private readonly kbClientSecret?: string;\n private readonly defaultModel: string;\n private readonly defaultMaxTokens: number;\n\n private accessToken?: string;\n private client: OpenAI;\n\n constructor(config: KBLabsGatewayLLMConfig = {}) {\n this.gatewayURL = config.gatewayURL ?? DEFAULT_GATEWAY_URL;\n this.kbClientId = config.kbClientId;\n this.kbClientSecret = config.kbClientSecret;\n this.defaultModel = config.defaultModel ?? \"small\";\n this.defaultMaxTokens = config.defaultMaxTokens ?? DEFAULT_MAX_TOKENS;\n\n const initialKey =\n config.apiKey ?? process.env.KB_LABS_API_KEY ?? process.env.OPENAI_API_KEY ?? \"pending\";\n\n this.client = new OpenAI({\n apiKey: initialKey,\n baseURL: `${this.gatewayURL}/llm`,\n });\n }\n\n getProtocolCapabilities(): LLMProtocolCapabilities {\n return {\n cache: { supported: false },\n stream: { supported: false },\n };\n }\n\n private async ensureToken(): Promise<void> {\n if (!this.kbClientId || !this.kbClientSecret) {return;}\n if (!tokenExpired(this.accessToken)) {return;}\n\n this.accessToken = await refreshToken(\n this.gatewayURL,\n this.kbClientId,\n this.kbClientSecret,\n );\n this.client = new OpenAI({\n apiKey: this.accessToken,\n baseURL: `${this.gatewayURL}/llm`,\n });\n }\n\n async complete(prompt: string, options?: LLMOptions): Promise<LLMResponse> {\n await this.ensureToken();\n const model = options?.model ?? this.defaultModel;\n\n const response = await this.client.chat.completions.create({\n model,\n max_tokens: options?.maxTokens ?? this.defaultMaxTokens,\n messages: [{ role: \"user\", content: prompt }],\n ...(options?.temperature !== undefined ? { temperature: options.temperature } : {}),\n });\n\n const content = response.choices[0]?.message?.content ?? \"\";\n return {\n content,\n usage: {\n promptTokens: response.usage?.prompt_tokens ?? 0,\n completionTokens: response.usage?.completion_tokens ?? 0,\n },\n model: response.model,\n };\n }\n\n async *stream(prompt: string, options?: LLMOptions): AsyncIterable<string> {\n const response = await this.complete(prompt, options);\n yield response.content;\n }\n\n async chatWithTools(\n messages: LLMMessage[],\n options: LLMToolCallOptions,\n ): Promise<LLMToolCallResponse> {\n await this.ensureToken();\n const model = options?.model ?? this.defaultModel;\n\n const openaiMessages = messages.map((m) => this.toOpenAIMessage(m));\n\n const tools: OpenAI.Chat.ChatCompletionTool[] = options.tools.map((t: LLMTool) => ({\n type: \"function\" as const,\n function: {\n name: t.name,\n description: t.description,\n parameters: t.inputSchema as Record<string, unknown>,\n },\n }));\n\n const response = await this.client.chat.completions.create({\n model,\n max_tokens: options?.maxTokens ?? this.defaultMaxTokens,\n messages: openaiMessages,\n tools: options.toolChoice !== \"none\" ? tools : undefined,\n });\n\n const message = response.choices[0]?.message;\n if (!message) {\n return { content: \"\", toolCalls: [], usage: { promptTokens: 0, completionTokens: 0 }, model: \"\" };\n }\n\n const toolCalls: LLMToolCall[] = (message.tool_calls ?? []).map((tc) => ({\n id: tc.id,\n name: tc.function.name,\n input: JSON.parse(tc.function.arguments || \"{}\") as Record<string, unknown>,\n }));\n\n return {\n content: message.content ?? \"\",\n toolCalls,\n usage: {\n promptTokens: response.usage?.prompt_tokens ?? 0,\n completionTokens: response.usage?.completion_tokens ?? 0,\n },\n model: response.model,\n };\n }\n\n private toOpenAIMessage(m: LLMMessage): OpenAI.Chat.ChatCompletionMessageParam {\n if (m.role === \"tool\") {\n return {\n role: \"tool\",\n tool_call_id: m.toolCallId ?? \"\",\n content: m.content,\n };\n }\n if (m.role === \"assistant\" && m.toolCalls?.length) {\n return {\n role: \"assistant\",\n content: m.content ?? null,\n tool_calls: m.toolCalls.map((tc: LLMToolCall) => ({\n id: tc.id,\n type: \"function\" as const,\n function: {\n name: tc.name,\n arguments: JSON.stringify(tc.input),\n },\n })),\n };\n }\n return {\n role: m.role as \"user\" | \"assistant\" | \"system\",\n content: m.content,\n };\n }\n}\n\nexport function createAdapter(config?: KBLabsGatewayLLMConfig): KBLabsGatewayLLM {\n return new KBLabsGatewayLLM(config);\n}\n\nexport default createAdapter;\n","import type { AdapterManifest } from \"@kb-labs/core-platform\";\n\nexport const manifest: AdapterManifest = {\n manifestVersion: \"1.0.0\",\n id: \"kblabs-gateway-llm\",\n name: \"KB Labs Gateway LLM\",\n version: \"0.1.0\",\n description:\n \"KB Labs Gateway adapter — OpenAI-compatible LLM proxy with automatic JWT token refresh\",\n author: \"KB Labs Team\",\n license: \"KBPL-1.1\",\n type: \"core\",\n implements: \"ILLM\",\n capabilities: {\n streaming: false,\n custom: {\n functionCalling: true,\n autoTokenRefresh: true,\n },\n },\n configSchema: {\n gatewayURL: {\n type: \"string\",\n default: \"https://api.kblabs.ru\",\n description: \"KB Labs Gateway base URL\",\n },\n kbClientId: {\n type: \"string\",\n description: \"Machine identity client ID (from kb-create --demo)\",\n },\n kbClientSecret: {\n type: \"string\",\n description: \"Machine identity client secret (from kb-create --demo)\",\n },\n apiKey: {\n type: \"string\",\n description: \"Static Bearer token (alternative to clientId/clientSecret)\",\n },\n defaultModel: {\n type: \"string\",\n default: \"small\",\n description: \"Default model tier: small | medium | large\",\n },\n },\n};\n"]}
package/dist/llm.d.ts ADDED
@@ -0,0 +1,54 @@
1
+ import { ILLM, LLMProtocolCapabilities, LLMOptions, LLMResponse, LLMMessage, LLMToolCallOptions, LLMToolCallResponse } from '@kb-labs/core-platform';
2
+
3
+ /**
4
+ * @module @kb-labs/adapters-kblabs-gateway/llm
5
+ *
6
+ * KB Labs Gateway LLM adapter.
7
+ *
8
+ * Implements ILLM against the KB Labs Gateway OpenAI-compatible endpoint:
9
+ * POST <gatewayURL>/llm/v1/chat/completions
10
+ *
11
+ * Authentication: machine identity (clientId + clientSecret) obtained via
12
+ * kb-create --demo. The adapter automatically refreshes the short-lived
13
+ * JWT access token (~15 min) before each request using the stored credentials.
14
+ */
15
+
16
+ interface KBLabsGatewayLLMConfig {
17
+ /** KB Labs Gateway base URL. Defaults to https://api.kblabs.ru */
18
+ gatewayURL?: string;
19
+ /**
20
+ * Machine identity credentials for automatic JWT token refresh.
21
+ * Populated by kb-create --demo. Replace with your own API key
22
+ * by removing these fields and setting apiKey instead.
23
+ */
24
+ kbClientId?: string;
25
+ kbClientSecret?: string;
26
+ /**
27
+ * Static access token. Used directly if kbClientId/kbClientSecret are absent.
28
+ * Falls back to KB_LABS_API_KEY or OPENAI_API_KEY env vars.
29
+ */
30
+ apiKey?: string;
31
+ /** Default model tier. Values: "small" | "medium" | "large". */
32
+ defaultModel?: string;
33
+ /** Default max output tokens. Overrides the API default (4096). */
34
+ defaultMaxTokens?: number;
35
+ }
36
+ declare class KBLabsGatewayLLM implements ILLM {
37
+ private readonly gatewayURL;
38
+ private readonly kbClientId?;
39
+ private readonly kbClientSecret?;
40
+ private readonly defaultModel;
41
+ private readonly defaultMaxTokens;
42
+ private accessToken?;
43
+ private client;
44
+ constructor(config?: KBLabsGatewayLLMConfig);
45
+ getProtocolCapabilities(): LLMProtocolCapabilities;
46
+ private ensureToken;
47
+ complete(prompt: string, options?: LLMOptions): Promise<LLMResponse>;
48
+ stream(prompt: string, options?: LLMOptions): AsyncIterable<string>;
49
+ chatWithTools(messages: LLMMessage[], options: LLMToolCallOptions): Promise<LLMToolCallResponse>;
50
+ private toOpenAIMessage;
51
+ }
52
+ declare function createAdapter(config?: KBLabsGatewayLLMConfig): KBLabsGatewayLLM;
53
+
54
+ export { KBLabsGatewayLLM, type KBLabsGatewayLLMConfig, createAdapter, createAdapter as default };
package/dist/llm.js ADDED
@@ -0,0 +1,175 @@
1
+ import OpenAI from 'openai';
2
+
3
+ // src/llm.ts
4
+ var DEFAULT_GATEWAY_URL = "https://api.kblabs.ru";
5
+ var DEFAULT_MAX_TOKENS = 16384;
6
+ function jwtExp(token) {
7
+ try {
8
+ const payload = token.split(".")[1] ?? "";
9
+ const json = Buffer.from(payload, "base64url").toString("utf8");
10
+ const data = JSON.parse(json);
11
+ return typeof data.exp === "number" ? data.exp : 0;
12
+ } catch {
13
+ return 0;
14
+ }
15
+ }
16
+ function tokenExpired(token) {
17
+ if (!token) {
18
+ return true;
19
+ }
20
+ return jwtExp(token) < Math.floor(Date.now() / 1e3) + 60;
21
+ }
22
+ async function refreshToken(gatewayURL, clientId, clientSecret) {
23
+ const res = await fetch(`${gatewayURL}/auth/token`, {
24
+ method: "POST",
25
+ headers: { "Content-Type": "application/json" },
26
+ body: JSON.stringify({ clientId, clientSecret })
27
+ });
28
+ if (!res.ok) {
29
+ throw new Error(`KB Labs Gateway token refresh failed: HTTP ${res.status}`);
30
+ }
31
+ const data = await res.json();
32
+ if (!data.accessToken) {
33
+ throw new Error("KB Labs Gateway token refresh: empty accessToken in response");
34
+ }
35
+ return data.accessToken;
36
+ }
37
+ var KBLabsGatewayLLM = class {
38
+ gatewayURL;
39
+ kbClientId;
40
+ kbClientSecret;
41
+ defaultModel;
42
+ defaultMaxTokens;
43
+ accessToken;
44
+ client;
45
+ constructor(config = {}) {
46
+ this.gatewayURL = config.gatewayURL ?? DEFAULT_GATEWAY_URL;
47
+ this.kbClientId = config.kbClientId;
48
+ this.kbClientSecret = config.kbClientSecret;
49
+ this.defaultModel = config.defaultModel ?? "small";
50
+ this.defaultMaxTokens = config.defaultMaxTokens ?? DEFAULT_MAX_TOKENS;
51
+ const initialKey = config.apiKey ?? process.env.KB_LABS_API_KEY ?? process.env.OPENAI_API_KEY ?? "pending";
52
+ this.client = new OpenAI({
53
+ apiKey: initialKey,
54
+ baseURL: `${this.gatewayURL}/llm`
55
+ });
56
+ }
57
+ getProtocolCapabilities() {
58
+ return {
59
+ cache: { supported: false },
60
+ stream: { supported: false }
61
+ };
62
+ }
63
+ async ensureToken() {
64
+ if (!this.kbClientId || !this.kbClientSecret) {
65
+ return;
66
+ }
67
+ if (!tokenExpired(this.accessToken)) {
68
+ return;
69
+ }
70
+ this.accessToken = await refreshToken(
71
+ this.gatewayURL,
72
+ this.kbClientId,
73
+ this.kbClientSecret
74
+ );
75
+ this.client = new OpenAI({
76
+ apiKey: this.accessToken,
77
+ baseURL: `${this.gatewayURL}/llm`
78
+ });
79
+ }
80
+ async complete(prompt, options) {
81
+ await this.ensureToken();
82
+ const model = options?.model ?? this.defaultModel;
83
+ const response = await this.client.chat.completions.create({
84
+ model,
85
+ max_tokens: options?.maxTokens ?? this.defaultMaxTokens,
86
+ messages: [{ role: "user", content: prompt }],
87
+ ...options?.temperature !== void 0 ? { temperature: options.temperature } : {}
88
+ });
89
+ const content = response.choices[0]?.message?.content ?? "";
90
+ return {
91
+ content,
92
+ usage: {
93
+ promptTokens: response.usage?.prompt_tokens ?? 0,
94
+ completionTokens: response.usage?.completion_tokens ?? 0
95
+ },
96
+ model: response.model
97
+ };
98
+ }
99
+ async *stream(prompt, options) {
100
+ const response = await this.complete(prompt, options);
101
+ yield response.content;
102
+ }
103
+ async chatWithTools(messages, options) {
104
+ await this.ensureToken();
105
+ const model = options?.model ?? this.defaultModel;
106
+ const openaiMessages = messages.map((m) => this.toOpenAIMessage(m));
107
+ const tools = options.tools.map((t) => ({
108
+ type: "function",
109
+ function: {
110
+ name: t.name,
111
+ description: t.description,
112
+ parameters: t.inputSchema
113
+ }
114
+ }));
115
+ const response = await this.client.chat.completions.create({
116
+ model,
117
+ max_tokens: options?.maxTokens ?? this.defaultMaxTokens,
118
+ messages: openaiMessages,
119
+ tools: options.toolChoice !== "none" ? tools : void 0
120
+ });
121
+ const message = response.choices[0]?.message;
122
+ if (!message) {
123
+ return { content: "", toolCalls: [], usage: { promptTokens: 0, completionTokens: 0 }, model: "" };
124
+ }
125
+ const toolCalls = (message.tool_calls ?? []).map((tc) => ({
126
+ id: tc.id,
127
+ name: tc.function.name,
128
+ input: JSON.parse(tc.function.arguments || "{}")
129
+ }));
130
+ return {
131
+ content: message.content ?? "",
132
+ toolCalls,
133
+ usage: {
134
+ promptTokens: response.usage?.prompt_tokens ?? 0,
135
+ completionTokens: response.usage?.completion_tokens ?? 0
136
+ },
137
+ model: response.model
138
+ };
139
+ }
140
+ toOpenAIMessage(m) {
141
+ if (m.role === "tool") {
142
+ return {
143
+ role: "tool",
144
+ tool_call_id: m.toolCallId ?? "",
145
+ content: m.content
146
+ };
147
+ }
148
+ if (m.role === "assistant" && m.toolCalls?.length) {
149
+ return {
150
+ role: "assistant",
151
+ content: m.content ?? null,
152
+ tool_calls: m.toolCalls.map((tc) => ({
153
+ id: tc.id,
154
+ type: "function",
155
+ function: {
156
+ name: tc.name,
157
+ arguments: JSON.stringify(tc.input)
158
+ }
159
+ }))
160
+ };
161
+ }
162
+ return {
163
+ role: m.role,
164
+ content: m.content
165
+ };
166
+ }
167
+ };
168
+ function createAdapter(config) {
169
+ return new KBLabsGatewayLLM(config);
170
+ }
171
+ var llm_default = createAdapter;
172
+
173
+ export { KBLabsGatewayLLM, createAdapter, llm_default as default };
174
+ //# sourceMappingURL=llm.js.map
175
+ //# sourceMappingURL=llm.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/llm.ts"],"names":[],"mappings":";;;AAiDA,IAAM,mBAAA,GAAsB,uBAAA;AAC5B,IAAM,kBAAA,GAAqB,KAAA;AAK3B,SAAS,OAAO,KAAA,EAAuB;AACrC,EAAA,IAAI;AACF,IAAA,MAAM,UAAU,KAAA,CAAM,KAAA,CAAM,GAAG,CAAA,CAAE,CAAC,CAAA,IAAK,EAAA;AACvC,IAAA,MAAM,OAAO,MAAA,CAAO,IAAA,CAAK,SAAS,WAAW,CAAA,CAAE,SAAS,MAAM,CAAA;AAC9D,IAAA,MAAM,IAAA,GAAO,IAAA,CAAK,KAAA,CAAM,IAAI,CAAA;AAC5B,IAAA,OAAO,OAAO,IAAA,CAAK,GAAA,KAAQ,QAAA,GAAW,KAAK,GAAA,GAAM,CAAA;AAAA,EACnD,CAAA,CAAA,MAAQ;AACN,IAAA,OAAO,CAAA;AAAA,EACT;AACF;AAGA,SAAS,aAAa,KAAA,EAAoC;AACxD,EAAA,IAAI,CAAC,KAAA,EAAO;AAAC,IAAA,OAAO,IAAA;AAAA,EAAK;AACzB,EAAA,OAAO,MAAA,CAAO,KAAK,CAAA,GAAI,IAAA,CAAK,MAAM,IAAA,CAAK,GAAA,EAAI,GAAI,GAAI,CAAA,GAAI,EAAA;AACzD;AAGA,eAAe,YAAA,CACb,UAAA,EACA,QAAA,EACA,YAAA,EACiB;AACjB,EAAA,MAAM,GAAA,GAAM,MAAM,KAAA,CAAM,CAAA,EAAG,UAAU,CAAA,WAAA,CAAA,EAAe;AAAA,IAClD,MAAA,EAAQ,MAAA;AAAA,IACR,OAAA,EAAS,EAAE,cAAA,EAAgB,kBAAA,EAAmB;AAAA,IAC9C,MAAM,IAAA,CAAK,SAAA,CAAU,EAAE,QAAA,EAAU,cAAc;AAAA,GAChD,CAAA;AACD,EAAA,IAAI,CAAC,IAAI,EAAA,EAAI;AACX,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,2CAAA,EAA8C,GAAA,CAAI,MAAM,CAAA,CAAE,CAAA;AAAA,EAC5E;AACA,EAAA,MAAM,IAAA,GAAQ,MAAM,GAAA,CAAI,IAAA,EAAK;AAC7B,EAAA,IAAI,CAAC,KAAK,WAAA,EAAa;AACrB,IAAA,MAAM,IAAI,MAAM,8DAA8D,CAAA;AAAA,EAChF;AACA,EAAA,OAAO,IAAA,CAAK,WAAA;AACd;AAIO,IAAM,mBAAN,MAAuC;AAAA,EAC3B,UAAA;AAAA,EACA,UAAA;AAAA,EACA,cAAA;AAAA,EACA,YAAA;AAAA,EACA,gBAAA;AAAA,EAET,WAAA;AAAA,EACA,MAAA;AAAA,EAER,WAAA,CAAY,MAAA,GAAiC,EAAC,EAAG;AAC/C,IAAA,IAAA,CAAK,UAAA,GAAa,OAAO,UAAA,IAAc,mBAAA;AACvC,IAAA,IAAA,CAAK,aAAa,MAAA,CAAO,UAAA;AACzB,IAAA,IAAA,CAAK,iBAAiB,MAAA,CAAO,cAAA;AAC7B,IAAA,IAAA,CAAK,YAAA,GAAe,OAAO,YAAA,IAAgB,OAAA;AAC3C,IAAA,IAAA,CAAK,gBAAA,GAAmB,OAAO,gBAAA,IAAoB,kBAAA;AAEnD,IAAA,MAAM,UAAA,GACJ,OAAO,MAAA,IAAU,OAAA,CAAQ,IAAI,eAAA,IAAmB,OAAA,CAAQ,IAAI,cAAA,IAAkB,SAAA;AAEhF,IAAA,IAAA,CAAK,MAAA,GAAS,IAAI,MAAA,CAAO;AAAA,MACvB,MAAA,EAAQ,UAAA;AAAA,MACR,OAAA,EAAS,CAAA,EAAG,IAAA,CAAK,UAAU,CAAA,IAAA;AAAA,KAC5B,CAAA;AAAA,EACH;AAAA,EAEA,uBAAA,GAAmD;AACjD,IAAA,OAAO;AAAA,MACL,KAAA,EAAO,EAAE,SAAA,EAAW,KAAA,EAAM;AAAA,MAC1B,MAAA,EAAQ,EAAE,SAAA,EAAW,KAAA;AAAM,KAC7B;AAAA,EACF;AAAA,EAEA,MAAc,WAAA,GAA6B;AACzC,IAAA,IAAI,CAAC,IAAA,CAAK,UAAA,IAAc,CAAC,KAAK,cAAA,EAAgB;AAAC,MAAA;AAAA,IAAO;AACtD,IAAA,IAAI,CAAC,YAAA,CAAa,IAAA,CAAK,WAAW,CAAA,EAAG;AAAC,MAAA;AAAA,IAAO;AAE7C,IAAA,IAAA,CAAK,cAAc,MAAM,YAAA;AAAA,MACvB,IAAA,CAAK,UAAA;AAAA,MACL,IAAA,CAAK,UAAA;AAAA,MACL,IAAA,CAAK;AAAA,KACP;AACA,IAAA,IAAA,CAAK,MAAA,GAAS,IAAI,MAAA,CAAO;AAAA,MACvB,QAAQ,IAAA,CAAK,WAAA;AAAA,MACb,OAAA,EAAS,CAAA,EAAG,IAAA,CAAK,UAAU,CAAA,IAAA;AAAA,KAC5B,CAAA;AAAA,EACH;AAAA,EAEA,MAAM,QAAA,CAAS,MAAA,EAAgB,OAAA,EAA4C;AACzE,IAAA,MAAM,KAAK,WAAA,EAAY;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,EAAS,KAAA,IAAS,IAAA,CAAK,YAAA;AAErC,IAAA,MAAM,WAAW,MAAM,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA,MACzD,KAAA;AAAA,MACA,UAAA,EAAY,OAAA,EAAS,SAAA,IAAa,IAAA,CAAK,gBAAA;AAAA,MACvC,UAAU,CAAC,EAAE,MAAM,MAAA,EAAQ,OAAA,EAAS,QAAQ,CAAA;AAAA,MAC5C,GAAI,SAAS,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,KAClF,CAAA;AAED,IAAA,MAAM,UAAU,QAAA,CAAS,OAAA,CAAQ,CAAC,CAAA,EAAG,SAAS,OAAA,IAAW,EAAA;AACzD,IAAA,OAAO;AAAA,MACL,OAAA;AAAA,MACA,KAAA,EAAO;AAAA,QACL,YAAA,EAAc,QAAA,CAAS,KAAA,EAAO,aAAA,IAAiB,CAAA;AAAA,QAC/C,gBAAA,EAAkB,QAAA,CAAS,KAAA,EAAO,iBAAA,IAAqB;AAAA,OACzD;AAAA,MACA,OAAO,QAAA,CAAS;AAAA,KAClB;AAAA,EACF;AAAA,EAEA,OAAO,MAAA,CAAO,MAAA,EAAgB,OAAA,EAA6C;AACzE,IAAA,MAAM,QAAA,GAAW,MAAM,IAAA,CAAK,QAAA,CAAS,QAAQ,OAAO,CAAA;AACpD,IAAA,MAAM,QAAA,CAAS,OAAA;AAAA,EACjB;AAAA,EAEA,MAAM,aAAA,CACJ,QAAA,EACA,OAAA,EAC8B;AAC9B,IAAA,MAAM,KAAK,WAAA,EAAY;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,EAAS,KAAA,IAAS,IAAA,CAAK,YAAA;AAErC,IAAA,MAAM,cAAA,GAAiB,SAAS,GAAA,CAAI,CAAC,MAAM,IAAA,CAAK,eAAA,CAAgB,CAAC,CAAC,CAAA;AAElE,IAAA,MAAM,KAAA,GAA0C,OAAA,CAAQ,KAAA,CAAM,GAAA,CAAI,CAAC,CAAA,MAAgB;AAAA,MACjF,IAAA,EAAM,UAAA;AAAA,MACN,QAAA,EAAU;AAAA,QACR,MAAM,CAAA,CAAE,IAAA;AAAA,QACR,aAAa,CAAA,CAAE,WAAA;AAAA,QACf,YAAY,CAAA,CAAE;AAAA;AAChB,KACF,CAAE,CAAA;AAEF,IAAA,MAAM,WAAW,MAAM,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA,MACzD,KAAA;AAAA,MACA,UAAA,EAAY,OAAA,EAAS,SAAA,IAAa,IAAA,CAAK,gBAAA;AAAA,MACvC,QAAA,EAAU,cAAA;AAAA,MACV,KAAA,EAAO,OAAA,CAAQ,UAAA,KAAe,MAAA,GAAS,KAAA,GAAQ;AAAA,KAChD,CAAA;AAED,IAAA,MAAM,OAAA,GAAU,QAAA,CAAS,OAAA,CAAQ,CAAC,CAAA,EAAG,OAAA;AACrC,IAAA,IAAI,CAAC,OAAA,EAAS;AACZ,MAAA,OAAO,EAAE,OAAA,EAAS,EAAA,EAAI,SAAA,EAAW,EAAC,EAAG,KAAA,EAAO,EAAE,YAAA,EAAc,CAAA,EAAG,gBAAA,EAAkB,CAAA,EAAE,EAAG,OAAO,EAAA,EAAG;AAAA,IAClG;AAEA,IAAA,MAAM,aAA4B,OAAA,CAAQ,UAAA,IAAc,EAAC,EAAG,GAAA,CAAI,CAAC,EAAA,MAAQ;AAAA,MACvE,IAAI,EAAA,CAAG,EAAA;AAAA,MACP,IAAA,EAAM,GAAG,QAAA,CAAS,IAAA;AAAA,MAClB,OAAO,IAAA,CAAK,KAAA,CAAM,EAAA,CAAG,QAAA,CAAS,aAAa,IAAI;AAAA,KACjD,CAAE,CAAA;AAEF,IAAA,OAAO;AAAA,MACL,OAAA,EAAS,QAAQ,OAAA,IAAW,EAAA;AAAA,MAC5B,SAAA;AAAA,MACA,KAAA,EAAO;AAAA,QACL,YAAA,EAAc,QAAA,CAAS,KAAA,EAAO,aAAA,IAAiB,CAAA;AAAA,QAC/C,gBAAA,EAAkB,QAAA,CAAS,KAAA,EAAO,iBAAA,IAAqB;AAAA,OACzD;AAAA,MACA,OAAO,QAAA,CAAS;AAAA,KAClB;AAAA,EACF;AAAA,EAEQ,gBAAgB,CAAA,EAAuD;AAC7E,IAAA,IAAI,CAAA,CAAE,SAAS,MAAA,EAAQ;AACrB,MAAA,OAAO;AAAA,QACL,IAAA,EAAM,MAAA;AAAA,QACN,YAAA,EAAc,EAAE,UAAA,IAAc,EAAA;AAAA,QAC9B,SAAS,CAAA,CAAE;AAAA,OACb;AAAA,IACF;AACA,IAAA,IAAI,CAAA,CAAE,IAAA,KAAS,WAAA,IAAe,CAAA,CAAE,WAAW,MAAA,EAAQ;AACjD,MAAA,OAAO;AAAA,QACL,IAAA,EAAM,WAAA;AAAA,QACN,OAAA,EAAS,EAAE,OAAA,IAAW,IAAA;AAAA,QACtB,UAAA,EAAY,CAAA,CAAE,SAAA,CAAU,GAAA,CAAI,CAAC,EAAA,MAAqB;AAAA,UAChD,IAAI,EAAA,CAAG,EAAA;AAAA,UACP,IAAA,EAAM,UAAA;AAAA,UACN,QAAA,EAAU;AAAA,YACR,MAAM,EAAA,CAAG,IAAA;AAAA,YACT,SAAA,EAAW,IAAA,CAAK,SAAA,CAAU,EAAA,CAAG,KAAK;AAAA;AACpC,SACF,CAAE;AAAA,OACJ;AAAA,IACF;AACA,IAAA,OAAO;AAAA,MACL,MAAM,CAAA,CAAE,IAAA;AAAA,MACR,SAAS,CAAA,CAAE;AAAA,KACb;AAAA,EACF;AACF;AAEO,SAAS,cAAc,MAAA,EAAmD;AAC/E,EAAA,OAAO,IAAI,iBAAiB,MAAM,CAAA;AACpC;AAEA,IAAO,WAAA,GAAQ","file":"llm.js","sourcesContent":["/**\n * @module @kb-labs/adapters-kblabs-gateway/llm\n *\n * KB Labs Gateway LLM adapter.\n *\n * Implements ILLM against the KB Labs Gateway OpenAI-compatible endpoint:\n * POST <gatewayURL>/llm/v1/chat/completions\n *\n * Authentication: machine identity (clientId + clientSecret) obtained via\n * kb-create --demo. The adapter automatically refreshes the short-lived\n * JWT access token (~15 min) before each request using the stored credentials.\n */\n\nimport OpenAI from \"openai\";\nimport type {\n ILLM,\n LLMOptions,\n LLMResponse,\n LLMMessage,\n LLMToolCallOptions,\n LLMToolCallResponse,\n LLMTool,\n LLMToolCall,\n LLMProtocolCapabilities,\n} from \"@kb-labs/core-platform\";\n\n// ── Config ───────────────────────────────────────────────────────────────────\n\nexport interface KBLabsGatewayLLMConfig {\n /** KB Labs Gateway base URL. Defaults to https://api.kblabs.ru */\n gatewayURL?: string;\n /**\n * Machine identity credentials for automatic JWT token refresh.\n * Populated by kb-create --demo. Replace with your own API key\n * by removing these fields and setting apiKey instead.\n */\n kbClientId?: string;\n kbClientSecret?: string;\n /**\n * Static access token. Used directly if kbClientId/kbClientSecret are absent.\n * Falls back to KB_LABS_API_KEY or OPENAI_API_KEY env vars.\n */\n apiKey?: string;\n /** Default model tier. Values: \"small\" | \"medium\" | \"large\". */\n defaultModel?: string;\n /** Default max output tokens. Overrides the API default (4096). */\n defaultMaxTokens?: number;\n}\n\nconst DEFAULT_GATEWAY_URL = \"https://api.kblabs.ru\";\nconst DEFAULT_MAX_TOKENS = 16_384;\n\n// ── Token refresh ─────────────────────────────────────────────────────────────\n\n/** Decode JWT exp claim. Returns 0 on parse failure. */\nfunction jwtExp(token: string): number {\n try {\n const payload = token.split(\".\")[1] ?? \"\";\n const json = Buffer.from(payload, \"base64url\").toString(\"utf8\");\n const data = JSON.parse(json) as { exp?: number };\n return typeof data.exp === \"number\" ? data.exp : 0;\n } catch {\n return 0;\n }\n}\n\n/** Returns true if token is absent or expires within the next 60 seconds. */\nfunction tokenExpired(token: string | undefined): boolean {\n if (!token) {return true;}\n return jwtExp(token) < Math.floor(Date.now() / 1000) + 60;\n}\n\n/** Exchange clientId/clientSecret for a fresh accessToken. */\nasync function refreshToken(\n gatewayURL: string,\n clientId: string,\n clientSecret: string,\n): Promise<string> {\n const res = await fetch(`${gatewayURL}/auth/token`, {\n method: \"POST\",\n headers: { \"Content-Type\": \"application/json\" },\n body: JSON.stringify({ clientId, clientSecret }),\n });\n if (!res.ok) {\n throw new Error(`KB Labs Gateway token refresh failed: HTTP ${res.status}`);\n }\n const data = (await res.json()) as { accessToken: string };\n if (!data.accessToken) {\n throw new Error(\"KB Labs Gateway token refresh: empty accessToken in response\");\n }\n return data.accessToken;\n}\n\n// ── Adapter ───────────────────────────────────────────────────────────────────\n\nexport class KBLabsGatewayLLM implements ILLM {\n private readonly gatewayURL: string;\n private readonly kbClientId?: string;\n private readonly kbClientSecret?: string;\n private readonly defaultModel: string;\n private readonly defaultMaxTokens: number;\n\n private accessToken?: string;\n private client: OpenAI;\n\n constructor(config: KBLabsGatewayLLMConfig = {}) {\n this.gatewayURL = config.gatewayURL ?? DEFAULT_GATEWAY_URL;\n this.kbClientId = config.kbClientId;\n this.kbClientSecret = config.kbClientSecret;\n this.defaultModel = config.defaultModel ?? \"small\";\n this.defaultMaxTokens = config.defaultMaxTokens ?? DEFAULT_MAX_TOKENS;\n\n const initialKey =\n config.apiKey ?? process.env.KB_LABS_API_KEY ?? process.env.OPENAI_API_KEY ?? \"pending\";\n\n this.client = new OpenAI({\n apiKey: initialKey,\n baseURL: `${this.gatewayURL}/llm`,\n });\n }\n\n getProtocolCapabilities(): LLMProtocolCapabilities {\n return {\n cache: { supported: false },\n stream: { supported: false },\n };\n }\n\n private async ensureToken(): Promise<void> {\n if (!this.kbClientId || !this.kbClientSecret) {return;}\n if (!tokenExpired(this.accessToken)) {return;}\n\n this.accessToken = await refreshToken(\n this.gatewayURL,\n this.kbClientId,\n this.kbClientSecret,\n );\n this.client = new OpenAI({\n apiKey: this.accessToken,\n baseURL: `${this.gatewayURL}/llm`,\n });\n }\n\n async complete(prompt: string, options?: LLMOptions): Promise<LLMResponse> {\n await this.ensureToken();\n const model = options?.model ?? this.defaultModel;\n\n const response = await this.client.chat.completions.create({\n model,\n max_tokens: options?.maxTokens ?? this.defaultMaxTokens,\n messages: [{ role: \"user\", content: prompt }],\n ...(options?.temperature !== undefined ? { temperature: options.temperature } : {}),\n });\n\n const content = response.choices[0]?.message?.content ?? \"\";\n return {\n content,\n usage: {\n promptTokens: response.usage?.prompt_tokens ?? 0,\n completionTokens: response.usage?.completion_tokens ?? 0,\n },\n model: response.model,\n };\n }\n\n async *stream(prompt: string, options?: LLMOptions): AsyncIterable<string> {\n const response = await this.complete(prompt, options);\n yield response.content;\n }\n\n async chatWithTools(\n messages: LLMMessage[],\n options: LLMToolCallOptions,\n ): Promise<LLMToolCallResponse> {\n await this.ensureToken();\n const model = options?.model ?? this.defaultModel;\n\n const openaiMessages = messages.map((m) => this.toOpenAIMessage(m));\n\n const tools: OpenAI.Chat.ChatCompletionTool[] = options.tools.map((t: LLMTool) => ({\n type: \"function\" as const,\n function: {\n name: t.name,\n description: t.description,\n parameters: t.inputSchema as Record<string, unknown>,\n },\n }));\n\n const response = await this.client.chat.completions.create({\n model,\n max_tokens: options?.maxTokens ?? this.defaultMaxTokens,\n messages: openaiMessages,\n tools: options.toolChoice !== \"none\" ? tools : undefined,\n });\n\n const message = response.choices[0]?.message;\n if (!message) {\n return { content: \"\", toolCalls: [], usage: { promptTokens: 0, completionTokens: 0 }, model: \"\" };\n }\n\n const toolCalls: LLMToolCall[] = (message.tool_calls ?? []).map((tc) => ({\n id: tc.id,\n name: tc.function.name,\n input: JSON.parse(tc.function.arguments || \"{}\") as Record<string, unknown>,\n }));\n\n return {\n content: message.content ?? \"\",\n toolCalls,\n usage: {\n promptTokens: response.usage?.prompt_tokens ?? 0,\n completionTokens: response.usage?.completion_tokens ?? 0,\n },\n model: response.model,\n };\n }\n\n private toOpenAIMessage(m: LLMMessage): OpenAI.Chat.ChatCompletionMessageParam {\n if (m.role === \"tool\") {\n return {\n role: \"tool\",\n tool_call_id: m.toolCallId ?? \"\",\n content: m.content,\n };\n }\n if (m.role === \"assistant\" && m.toolCalls?.length) {\n return {\n role: \"assistant\",\n content: m.content ?? null,\n tool_calls: m.toolCalls.map((tc: LLMToolCall) => ({\n id: tc.id,\n type: \"function\" as const,\n function: {\n name: tc.name,\n arguments: JSON.stringify(tc.input),\n },\n })),\n };\n }\n return {\n role: m.role as \"user\" | \"assistant\" | \"system\",\n content: m.content,\n };\n }\n}\n\nexport function createAdapter(config?: KBLabsGatewayLLMConfig): KBLabsGatewayLLM {\n return new KBLabsGatewayLLM(config);\n}\n\nexport default createAdapter;\n"]}
@@ -0,0 +1,5 @@
1
+ import { AdapterManifest } from '@kb-labs/core-platform';
2
+
3
+ declare const manifest: AdapterManifest;
4
+
5
+ export { manifest };
@@ -0,0 +1,47 @@
1
+ // src/manifest.ts
2
+ var manifest = {
3
+ manifestVersion: "1.0.0",
4
+ id: "kblabs-gateway-llm",
5
+ name: "KB Labs Gateway LLM",
6
+ version: "0.1.0",
7
+ description: "KB Labs Gateway adapter \u2014 OpenAI-compatible LLM proxy with automatic JWT token refresh",
8
+ author: "KB Labs Team",
9
+ license: "KBPL-1.1",
10
+ type: "core",
11
+ implements: "ILLM",
12
+ capabilities: {
13
+ streaming: false,
14
+ custom: {
15
+ functionCalling: true,
16
+ autoTokenRefresh: true
17
+ }
18
+ },
19
+ configSchema: {
20
+ gatewayURL: {
21
+ type: "string",
22
+ default: "https://api.kblabs.ru",
23
+ description: "KB Labs Gateway base URL"
24
+ },
25
+ kbClientId: {
26
+ type: "string",
27
+ description: "Machine identity client ID (from kb-create --demo)"
28
+ },
29
+ kbClientSecret: {
30
+ type: "string",
31
+ description: "Machine identity client secret (from kb-create --demo)"
32
+ },
33
+ apiKey: {
34
+ type: "string",
35
+ description: "Static Bearer token (alternative to clientId/clientSecret)"
36
+ },
37
+ defaultModel: {
38
+ type: "string",
39
+ default: "small",
40
+ description: "Default model tier: small | medium | large"
41
+ }
42
+ }
43
+ };
44
+
45
+ export { manifest };
46
+ //# sourceMappingURL=manifest.js.map
47
+ //# sourceMappingURL=manifest.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/manifest.ts"],"names":[],"mappings":";AAEO,IAAM,QAAA,GAA4B;AAAA,EACvC,eAAA,EAAiB,OAAA;AAAA,EACjB,EAAA,EAAI,oBAAA;AAAA,EACJ,IAAA,EAAM,qBAAA;AAAA,EACN,OAAA,EAAS,OAAA;AAAA,EACT,WAAA,EACE,6FAAA;AAAA,EACF,MAAA,EAAQ,cAAA;AAAA,EACR,OAAA,EAAS,UAAA;AAAA,EACT,IAAA,EAAM,MAAA;AAAA,EACN,UAAA,EAAY,MAAA;AAAA,EACZ,YAAA,EAAc;AAAA,IACZ,SAAA,EAAW,KAAA;AAAA,IACX,MAAA,EAAQ;AAAA,MACN,eAAA,EAAiB,IAAA;AAAA,MACjB,gBAAA,EAAkB;AAAA;AACpB,GACF;AAAA,EACA,YAAA,EAAc;AAAA,IACZ,UAAA,EAAY;AAAA,MACV,IAAA,EAAM,QAAA;AAAA,MACN,OAAA,EAAS,uBAAA;AAAA,MACT,WAAA,EAAa;AAAA,KACf;AAAA,IACA,UAAA,EAAY;AAAA,MACV,IAAA,EAAM,QAAA;AAAA,MACN,WAAA,EAAa;AAAA,KACf;AAAA,IACA,cAAA,EAAgB;AAAA,MACd,IAAA,EAAM,QAAA;AAAA,MACN,WAAA,EAAa;AAAA,KACf;AAAA,IACA,MAAA,EAAQ;AAAA,MACN,IAAA,EAAM,QAAA;AAAA,MACN,WAAA,EAAa;AAAA,KACf;AAAA,IACA,YAAA,EAAc;AAAA,MACZ,IAAA,EAAM,QAAA;AAAA,MACN,OAAA,EAAS,OAAA;AAAA,MACT,WAAA,EAAa;AAAA;AACf;AAEJ","file":"manifest.js","sourcesContent":["import type { AdapterManifest } from \"@kb-labs/core-platform\";\n\nexport const manifest: AdapterManifest = {\n manifestVersion: \"1.0.0\",\n id: \"kblabs-gateway-llm\",\n name: \"KB Labs Gateway LLM\",\n version: \"0.1.0\",\n description:\n \"KB Labs Gateway adapter — OpenAI-compatible LLM proxy with automatic JWT token refresh\",\n author: \"KB Labs Team\",\n license: \"KBPL-1.1\",\n type: \"core\",\n implements: \"ILLM\",\n capabilities: {\n streaming: false,\n custom: {\n functionCalling: true,\n autoTokenRefresh: true,\n },\n },\n configSchema: {\n gatewayURL: {\n type: \"string\",\n default: \"https://api.kblabs.ru\",\n description: \"KB Labs Gateway base URL\",\n },\n kbClientId: {\n type: \"string\",\n description: \"Machine identity client ID (from kb-create --demo)\",\n },\n kbClientSecret: {\n type: \"string\",\n description: \"Machine identity client secret (from kb-create --demo)\",\n },\n apiKey: {\n type: \"string\",\n description: \"Static Bearer token (alternative to clientId/clientSecret)\",\n },\n defaultModel: {\n type: \"string\",\n default: \"small\",\n description: \"Default model tier: small | medium | large\",\n },\n },\n};\n"]}
package/package.json ADDED
@@ -0,0 +1,56 @@
1
+ {
2
+ "name": "@kb-labs/adapters-kblabs-gateway",
3
+ "version": "2.6.0",
4
+ "description": "KB Labs Gateway LLM adapter — OpenAI-compatible with automatic JWT token refresh",
5
+ "type": "module",
6
+ "main": "./dist/index.js",
7
+ "types": "./dist/index.d.ts",
8
+ "exports": {
9
+ ".": {
10
+ "import": "./dist/index.js",
11
+ "types": "./dist/index.d.ts"
12
+ },
13
+ "./llm": {
14
+ "import": "./dist/llm.js",
15
+ "types": "./dist/llm.d.ts"
16
+ }
17
+ },
18
+ "files": [
19
+ "dist",
20
+ "README.md"
21
+ ],
22
+ "sideEffects": false,
23
+ "scripts": {
24
+ "clean": "rimraf dist",
25
+ "build": "tsup",
26
+ "dev": "tsup --watch",
27
+ "type-check": "tsc --noEmit",
28
+ "test": "vitest run --passWithNoTests",
29
+ "test:watch": "vitest",
30
+ "lint": "eslint src --ext .ts",
31
+ "lint:fix": "eslint . --fix"
32
+ },
33
+ "dependencies": {
34
+ "openai": "^4.90.0"
35
+ },
36
+ "peerDependencies": {
37
+ "@kb-labs/core-platform": "^2.6.0"
38
+ },
39
+ "devDependencies": {
40
+ "@kb-labs/core-platform": "workspace:*",
41
+ "@types/node": "^24.3.3",
42
+ "eslint": "^9",
43
+ "rimraf": "^6.0.1",
44
+ "tsup": "^8.5.0",
45
+ "typescript": "^5.6.3",
46
+ "vitest": "^3.2.4",
47
+ "@kb-labs/devkit": "workspace:*"
48
+ },
49
+ "engines": {
50
+ "node": ">=20.0.0",
51
+ "pnpm": ">=9.0.0"
52
+ },
53
+ "kb": {
54
+ "manifest": "./dist/manifest.js"
55
+ }
56
+ }