@flightdev/ai 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +95 -0
- package/dist/adapters/anthropic.d.ts +14 -0
- package/dist/adapters/anthropic.js +118 -0
- package/dist/adapters/anthropic.js.map +1 -0
- package/dist/adapters/ollama.d.ts +13 -0
- package/dist/adapters/ollama.js +97 -0
- package/dist/adapters/ollama.js.map +1 -0
- package/dist/adapters/openai.d.ts +33 -0
- package/dist/adapters/openai.js +147 -0
- package/dist/adapters/openai.js.map +1 -0
- package/dist/index.d.ts +157 -0
- package/dist/index.js +70 -0
- package/dist/index.js.map +1 -0
- package/package.json +61 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024-2026 Flight Contributors
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
# @flight-framework/ai
|
|
2
|
+
|
|
3
|
+
AI utilities for Flight Framework. Integrate with OpenAI, Anthropic, and other LLM providers.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install @flight-framework/ai
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Quick Start
|
|
12
|
+
|
|
13
|
+
```typescript
|
|
14
|
+
import { createAI } from '@flight-framework/ai';
|
|
15
|
+
import { openai } from '@flight-framework/ai/openai';
|
|
16
|
+
|
|
17
|
+
const ai = createAI(openai({
|
|
18
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
19
|
+
}));
|
|
20
|
+
|
|
21
|
+
const response = await ai.chat([
|
|
22
|
+
{ role: 'user', content: 'Hello!' }
|
|
23
|
+
]);
|
|
24
|
+
|
|
25
|
+
console.log(response.content);
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
## Adapters
|
|
29
|
+
|
|
30
|
+
### OpenAI
|
|
31
|
+
|
|
32
|
+
```typescript
|
|
33
|
+
import { openai } from '@flight-framework/ai/openai';
|
|
34
|
+
|
|
35
|
+
const adapter = openai({
|
|
36
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
37
|
+
model: 'gpt-4-turbo',
|
|
38
|
+
});
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
### Anthropic
|
|
42
|
+
|
|
43
|
+
```typescript
|
|
44
|
+
import { anthropic } from '@flight-framework/ai/anthropic';
|
|
45
|
+
|
|
46
|
+
const adapter = anthropic({
|
|
47
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
48
|
+
model: 'claude-3-opus-20240229',
|
|
49
|
+
});
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
### Google AI (Gemini)
|
|
53
|
+
|
|
54
|
+
```typescript
|
|
55
|
+
import { google } from '@flight-framework/ai/google';
|
|
56
|
+
|
|
57
|
+
const adapter = google({
|
|
58
|
+
apiKey: process.env.GOOGLE_AI_API_KEY,
|
|
59
|
+
model: 'gemini-pro',
|
|
60
|
+
});
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
## Streaming
|
|
64
|
+
|
|
65
|
+
```typescript
|
|
66
|
+
const stream = await ai.stream([
|
|
67
|
+
{ role: 'user', content: 'Write a poem' }
|
|
68
|
+
]);
|
|
69
|
+
|
|
70
|
+
for await (const chunk of stream) {
|
|
71
|
+
process.stdout.write(chunk.content);
|
|
72
|
+
}
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
## React Integration
|
|
76
|
+
|
|
77
|
+
```tsx
|
|
78
|
+
import { useChat } from '@flight-framework/ai/react';
|
|
79
|
+
|
|
80
|
+
function ChatBox() {
|
|
81
|
+
const { messages, input, setInput, send, isLoading } = useChat();
|
|
82
|
+
|
|
83
|
+
return (
|
|
84
|
+
<div>
|
|
85
|
+
{messages.map(m => <div key={m.id}>{m.content}</div>)}
|
|
86
|
+
<input value={input} onChange={e => setInput(e.target.value)} />
|
|
87
|
+
<button onClick={send} disabled={isLoading}>Send</button>
|
|
88
|
+
</div>
|
|
89
|
+
);
|
|
90
|
+
}
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
## License
|
|
94
|
+
|
|
95
|
+
MIT
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { AIAdapterFactory } from '../index.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Anthropic Adapter for @flightdev/ai
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
interface AnthropicConfig {
|
|
8
|
+
apiKey: string;
|
|
9
|
+
model?: string;
|
|
10
|
+
baseUrl?: string;
|
|
11
|
+
}
|
|
12
|
+
declare const anthropic: AIAdapterFactory<AnthropicConfig>;
|
|
13
|
+
|
|
14
|
+
export { type AnthropicConfig, anthropic, anthropic as default };
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
// src/adapters/anthropic.ts
|
|
2
|
+
var anthropic = (config) => {
|
|
3
|
+
const {
|
|
4
|
+
apiKey,
|
|
5
|
+
model: defaultModel = "claude-3-5-sonnet-20241022",
|
|
6
|
+
baseUrl = "https://api.anthropic.com/v1"
|
|
7
|
+
} = config;
|
|
8
|
+
const headers = {
|
|
9
|
+
"x-api-key": apiKey,
|
|
10
|
+
"Content-Type": "application/json",
|
|
11
|
+
"anthropic-version": "2023-06-01"
|
|
12
|
+
};
|
|
13
|
+
async function request(endpoint, body) {
|
|
14
|
+
const response = await fetch(`${baseUrl}${endpoint}`, {
|
|
15
|
+
method: "POST",
|
|
16
|
+
headers,
|
|
17
|
+
body: JSON.stringify(body)
|
|
18
|
+
});
|
|
19
|
+
if (!response.ok) {
|
|
20
|
+
const error = await response.json().catch(() => ({}));
|
|
21
|
+
throw new Error(`Anthropic error: ${error.error?.message ?? response.statusText}`);
|
|
22
|
+
}
|
|
23
|
+
return response.json();
|
|
24
|
+
}
|
|
25
|
+
const adapter = {
|
|
26
|
+
name: "anthropic",
|
|
27
|
+
async listModels() {
|
|
28
|
+
return ["claude-3-5-sonnet-20241022", "claude-3-opus-20240229", "claude-3-haiku-20240307"];
|
|
29
|
+
},
|
|
30
|
+
async chat(messages, options) {
|
|
31
|
+
const model = options?.model ?? defaultModel;
|
|
32
|
+
const systemMsg = messages.find((m) => m.role === "system");
|
|
33
|
+
const nonSystemMessages = messages.filter((m) => m.role !== "system");
|
|
34
|
+
const body = {
|
|
35
|
+
model,
|
|
36
|
+
messages: nonSystemMessages.map((m) => ({
|
|
37
|
+
role: m.role === "assistant" ? "assistant" : "user",
|
|
38
|
+
content: m.content
|
|
39
|
+
})),
|
|
40
|
+
max_tokens: options?.maxTokens ?? 4096
|
|
41
|
+
};
|
|
42
|
+
if (systemMsg || options?.system) {
|
|
43
|
+
body.system = options?.system ?? systemMsg?.content;
|
|
44
|
+
}
|
|
45
|
+
if (options?.temperature !== void 0) body.temperature = options.temperature;
|
|
46
|
+
if (options?.stop) body.stop_sequences = options.stop;
|
|
47
|
+
const response = await request("/messages", body);
|
|
48
|
+
return {
|
|
49
|
+
content: response.content.find((c) => c.type === "text")?.text ?? "",
|
|
50
|
+
finishReason: response.stop_reason === "end_turn" ? "stop" : "length",
|
|
51
|
+
usage: {
|
|
52
|
+
promptTokens: response.usage.input_tokens,
|
|
53
|
+
completionTokens: response.usage.output_tokens,
|
|
54
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens
|
|
55
|
+
}
|
|
56
|
+
};
|
|
57
|
+
},
|
|
58
|
+
async *chatStream(messages, options) {
|
|
59
|
+
const model = options?.model ?? defaultModel;
|
|
60
|
+
const systemMsg = messages.find((m) => m.role === "system");
|
|
61
|
+
const nonSystemMessages = messages.filter((m) => m.role !== "system");
|
|
62
|
+
const body = {
|
|
63
|
+
model,
|
|
64
|
+
messages: nonSystemMessages.map((m) => ({
|
|
65
|
+
role: m.role === "assistant" ? "assistant" : "user",
|
|
66
|
+
content: m.content
|
|
67
|
+
})),
|
|
68
|
+
max_tokens: options?.maxTokens ?? 4096,
|
|
69
|
+
stream: true
|
|
70
|
+
};
|
|
71
|
+
if (systemMsg || options?.system) {
|
|
72
|
+
body.system = options?.system ?? systemMsg?.content;
|
|
73
|
+
}
|
|
74
|
+
const response = await fetch(`${baseUrl}/messages`, {
|
|
75
|
+
method: "POST",
|
|
76
|
+
headers,
|
|
77
|
+
body: JSON.stringify(body)
|
|
78
|
+
});
|
|
79
|
+
if (!response.ok || !response.body) {
|
|
80
|
+
throw new Error(`Anthropic stream error: ${response.statusText}`);
|
|
81
|
+
}
|
|
82
|
+
const reader = response.body.getReader();
|
|
83
|
+
const decoder = new TextDecoder();
|
|
84
|
+
let buffer = "";
|
|
85
|
+
while (true) {
|
|
86
|
+
const { done, value } = await reader.read();
|
|
87
|
+
if (done) break;
|
|
88
|
+
buffer += decoder.decode(value, { stream: true });
|
|
89
|
+
const lines = buffer.split("\n");
|
|
90
|
+
buffer = lines.pop() ?? "";
|
|
91
|
+
for (const line of lines) {
|
|
92
|
+
if (line.startsWith("data: ")) {
|
|
93
|
+
try {
|
|
94
|
+
const data = JSON.parse(line.slice(6));
|
|
95
|
+
if (data.type === "content_block_delta" && data.delta?.text) {
|
|
96
|
+
yield { content: data.delta.text, done: false };
|
|
97
|
+
}
|
|
98
|
+
if (data.type === "message_stop") {
|
|
99
|
+
yield { done: true };
|
|
100
|
+
return;
|
|
101
|
+
}
|
|
102
|
+
} catch {
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
},
|
|
108
|
+
async embed() {
|
|
109
|
+
throw new Error("Anthropic does not support embeddings. Use a different adapter.");
|
|
110
|
+
}
|
|
111
|
+
};
|
|
112
|
+
return adapter;
|
|
113
|
+
};
|
|
114
|
+
var anthropic_default = anthropic;
|
|
115
|
+
|
|
116
|
+
export { anthropic, anthropic_default as default };
|
|
117
|
+
//# sourceMappingURL=anthropic.js.map
|
|
118
|
+
//# sourceMappingURL=anthropic.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/adapters/anthropic.ts"],"names":[],"mappings":";AAoBO,IAAM,SAAA,GAA+C,CAAC,MAAA,KAAW;AACpE,EAAA,MAAM;AAAA,IACF,MAAA;AAAA,IACA,OAAO,YAAA,GAAe,4BAAA;AAAA,IACtB,OAAA,GAAU;AAAA,GACd,GAAI,MAAA;AAEJ,EAAA,MAAM,OAAA,GAAU;AAAA,IACZ,WAAA,EAAa,MAAA;AAAA,IACb,cAAA,EAAgB,kBAAA;AAAA,IAChB,mBAAA,EAAqB;AAAA,GACzB;AAEA,EAAA,eAAe,OAAA,CAAQ,UAAkB,IAAA,EAAiC;AACtE,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,GAAG,OAAO,CAAA,EAAG,QAAQ,CAAA,CAAA,EAAI;AAAA,MAClD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA;AAAA,MACA,IAAA,EAAM,IAAA,CAAK,SAAA,CAAU,IAAI;AAAA,KAC5B,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,MAAM,KAAA,GAAQ,MAAM,QAAA,CAAS,IAAA,GAAO,KAAA,CAAM,OAAO,EAAC,CAAE,CAAA;AACpD,MAAA,MAAM,IAAI,MAAM,CAAA,iBAAA,EAAoB,KAAA,CAAM,OAAO,OAAA,IAAW,QAAA,CAAS,UAAU,CAAA,CAAE,CAAA;AAAA,IACrF;AAEA,IAAA,OAAO,SAAS,IAAA,EAAK;AAAA,EACzB;AAEA,EAAA,MAAM,OAAA,GAAqB;AAAA,IACvB,IAAA,EAAM,WAAA;AAAA,IAEN,MAAM,UAAA,GAAgC;AAClC,MAAA,OAAO,CAAC,4BAAA,EAA8B,wBAAA,EAA0B,yBAAyB,CAAA;AAAA,IAC7F,CAAA;AAAA,IAEA,MAAM,IAAA,CAAK,QAAA,EAAyB,OAAA,EAA4C;AAC5E,MAAA,MAAM,KAAA,GAAQ,SAAS,KAAA,IAAS,YAAA;AAChC,MAAA,MAAM,YAAY,QAAA,CAAS,IAAA,CAAK,CAAA,CAAA,KAAK,CAAA,CAAE,SAAS,QAAQ,CAAA;AACxD,MAAA,MAAM,oBAAoB,QAAA,CAAS,MAAA,CAAO,CAAA,CAAA,KAAK,CAAA,CAAE,SAAS,QAAQ,CAAA;AAElE,MAAA,MAAM,IAAA,GAAgC;AAAA,QAClC,KAAA;AAAA,QACA,QAAA,EAAU,iBAAA,CAAkB,GAAA,CAAI,CAAA,CAAA,MAAM;AAAA,UAClC,IAAA,EAAM,CAAA,CAAE,IAAA,KAAS,WAAA,GAAc,WAAA,GAAc,MAAA;AAAA,UAC7C,SAAS,CAAA,CAAE;AAAA,SACf,CAAE,CAAA;AAAA,QACF,UAAA,EAAY,SAAS,SAAA,IAAa;AAAA,OACtC;AAEA,MAAA,IAAI,SAAA,IAAa,SAAS,MAAA,EAAQ;AAC9B,QAAA,IAAA,CAAK,MAAA,GAAS,OAAA,EAAS,MAAA,IAAU,SAAA,EAAW,OAAA;AAAA,MAChD;AACA,MAAA,IAAI,OAAA,EAAS,WAAA,KAAgB,MAAA,EAAW,IAAA,CAAK,cAAc,OAAA,CAAQ,WAAA;AACnE,MAAA,IAAI,OAAA,EAAS,IAAA,EAAM,IAAA,CAAK,cAAA,GAAiB,OAAA,CAAQ,IAAA;AAEjD,MAAA,MAAM,QAAA,GAAW,MAAM,OAAA,CAAQ,WAAA,EAAa,IAAI,CAAA;AAMhD,MAAA,OAAO;AAAA,QACH,OAAA,EAAS,SAAS,OAAA,CAAQ,IAAA,CAAK,OAAK,CAAA,CAAE,IAAA,KAAS,MAAM,CAAA,EAAG,IAAA,IAAQ,EAAA;AAAA,QAChE,YAAA,EAAc,QAAA,CAAS,WAAA,KAAgB,UAAA,GAAa,MAAA,GAAS,QAAA;AAAA,QAC7D,KAAA,EAAO;AAAA,UACH,YAAA,EAAc,SAAS,KAAA,CAAM,YAAA;AAAA,UAC7B,gBAAA,EAAkB,SAAS,KAAA,CAAM,aAAA;AAAA,UACjC,WAAA,EAAa,QAAA,CAAS,KAAA,CAAM,YAAA,GAAe,SAAS,KAAA,CAAM;AAAA;AAC9D,OACJ;AAAA,IACJ,CAAA;AAAA,IAEA,OAAO,UAAA,CAAW,QAAA,EAAyB,OAAA,EAAmD;AAC1F,MAAA,MAAM,KAAA,GAAQ,SAAS,KAAA,IAAS,YAAA;AAChC,MAAA,MAAM,YAAY,QAAA,CAAS,IAAA,CAAK,CAAA,CAAA,KAAK,CAAA,CAAE,SAAS,QAAQ,CAAA;AACxD,MAAA,MAAM,oBAAoB,QAAA,CAAS,MAAA,CAAO,CAAA,CAAA,KAAK,CAAA,CAAE,SAAS,QAAQ,CAAA;AAElE,MAAA,MAAM,IAAA,GAAgC;AAAA,QAClC,KAAA;AAAA,QACA,QAAA,EAAU,iBAAA,CAAkB,GAAA,CAAI,CAAA,CAAA,MAAM;AAAA,UAClC,IAAA,EAAM,CAAA,CAAE,IAAA,KAAS,WAAA,GAAc,WAAA,GAAc,MAAA;AAAA,UAC7C,SAAS,CAAA,CAAE;AAAA,SACf,CAAE,CAAA;AAAA,QACF,UAAA,EAAY,SAAS,SAAA,IAAa,IAAA;AAAA,QAClC,MAAA,EAAQ;AAAA,OACZ;AAEA,MAAA,IAAI,SAAA,IAAa,SAAS,MAAA,EAAQ;AAC9B,QAAA,IAAA,CAAK,MAAA,GAAS,OAAA,EAAS,MAAA,IAAU,SAAA,EAAW,OAAA;AAAA,MAChD;AAEA,MAAA,MAAM,QAAA,GAAW,MAAM,KAAA,CAAM,CAAA,EAAG,OAAO,CAAA,SAAA,CAAA,EAAa;AAAA,QAChD,MAAA,EAAQ,MAAA;AAAA,QACR,OAAA;AAAA,QACA,IAAA,EAAM,IAAA,CAAK,SAAA,CAAU,IAAI;AAAA,OAC5B,CAAA;AAED,MAAA,IAAI,CAAC,QAAA,CAAS,EAAA,IAAM,CAAC,SAAS,IAAA,EAAM;AAChC,QAAA,MAAM,IAAI,KAAA,CAAM,CAAA,wBAAA,EAA2B,QAAA,CAAS,UAAU,CAAA,CAAE,CAAA;AAAA,MACpE;AAEA,MAAA,MAAM,MAAA,GAAS,QAAA,CAAS,IAAA,CAAK,SAAA,EAAU;AACvC,MAAA,MAAM,OAAA,GAAU,IAAI,WAAA,EAAY;AAChC,MAAA,IAAI,MAAA,GAAS,EAAA;AAEb,MAAA,OAAO,IAAA,EAAM;AACT,QAAA,MAAM,EAAE,IAAA,EAAM,KAAA,EAAM,GAAI,MAAM,OAAO,IAAA,EAAK;AAC1C,QAAA,IAAI,IAAA,EAAM;AAEV,QAAA,MAAA,IAAU,QAAQ,MAAA,CAAO,KAAA,EAAO,EAAE,MAAA,EAAQ,MAAM,CAAA;AAChD,QAAA,MAAM,KAAA,GAAQ,MAAA,CAAO,KAAA,CAAM,IAAI,CAAA;AAC/B,QAAA,MAAA,GAAS,KAAA,CAAM,KAAI,IAAK,EAAA;AAExB,QAAA,KAAA,MAAW,QAAQ,KAAA,EAAO;AACtB,UAAA,IAAI,IAAA,CAAK,UAAA,CAAW,QAAQ,CAAA,EAAG;AAC3B,YAAA,IAAI;AACA,cAAA,MAAM,OAAO,IAAA,CAAK,KAAA,CAAM,IAAA,CAAK,KAAA,CAAM,CAAC,CAAC,CAAA;AAIrC,cAAA,IAAI,IAAA,CAAK,IAAA,KAAS,qBAAA,IAAyB,IAAA,CAAK,OAAO,IAAA,EAAM;AACzD,gBAAA,MAAM,EAAE,OAAA,EAAS,IAAA,CAAK,KAAA,CAAM,IAAA,EAAM,MAAM,KAAA,EAAM;AAAA,cAClD;AACA,cAAA,IAAI,IAAA,CAAK,SAAS,cAAA,EAAgB;AAC9B,gBAAA,MAAM,EAAE,MAAM,IAAA,EAAK;AACnB,gBAAA;AAAA,cACJ;AAAA,YACJ,CAAA,CAAA,MAAQ;AAAA,YAER;AAAA,UACJ;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,CAAA;AAAA,IAEA,MAAM,KAAA,GAAoC;AACtC,MAAA,MAAM,IAAI,MAAM,iEAAiE,CAAA;AAAA,IACrF;AAAA,GACJ;AAEA,EAAA,OAAO,OAAA;AACX;AAEA,IAAO,iBAAA,GAAQ","file":"anthropic.js","sourcesContent":["/**\r\n * Anthropic Adapter for @flightdev/ai\r\n */\r\n\r\nimport type {\r\n AIAdapter,\r\n AIAdapterFactory,\r\n ChatMessage,\r\n ChatOptions,\r\n ChatResult,\r\n StreamChunk,\r\n EmbeddingResult,\r\n} from '../index.js';\r\n\r\nexport interface AnthropicConfig {\r\n apiKey: string;\r\n model?: string;\r\n baseUrl?: string;\r\n}\r\n\r\nexport const anthropic: AIAdapterFactory<AnthropicConfig> = (config) => {\r\n const {\r\n apiKey,\r\n model: defaultModel = 'claude-3-5-sonnet-20241022',\r\n baseUrl = 'https://api.anthropic.com/v1',\r\n } = config;\r\n\r\n const headers = {\r\n 'x-api-key': apiKey,\r\n 'Content-Type': 'application/json',\r\n 'anthropic-version': '2023-06-01',\r\n };\r\n\r\n async function request(endpoint: string, body: unknown): Promise<unknown> {\r\n const response = await fetch(`${baseUrl}${endpoint}`, {\r\n method: 'POST',\r\n headers,\r\n body: JSON.stringify(body),\r\n });\r\n\r\n if (!response.ok) {\r\n const error = await response.json().catch(() => ({})) as { error?: { message?: string } };\r\n throw new Error(`Anthropic error: ${error.error?.message ?? response.statusText}`);\r\n }\r\n\r\n return response.json();\r\n }\r\n\r\n const adapter: AIAdapter = {\r\n name: 'anthropic',\r\n\r\n async listModels(): Promise<string[]> {\r\n return ['claude-3-5-sonnet-20241022', 'claude-3-opus-20240229', 'claude-3-haiku-20240307'];\r\n },\r\n\r\n async chat(messages: ChatMessage[], options?: ChatOptions): Promise<ChatResult> {\r\n const model = options?.model ?? defaultModel;\r\n const systemMsg = messages.find(m => m.role === 'system');\r\n const nonSystemMessages = messages.filter(m => m.role !== 'system');\r\n\r\n const body: Record<string, unknown> = {\r\n model,\r\n messages: nonSystemMessages.map(m => ({\r\n role: m.role === 'assistant' ? 'assistant' : 'user',\r\n content: m.content,\r\n })),\r\n max_tokens: options?.maxTokens ?? 4096,\r\n };\r\n\r\n if (systemMsg || options?.system) {\r\n body.system = options?.system ?? systemMsg?.content;\r\n }\r\n if (options?.temperature !== undefined) body.temperature = options.temperature;\r\n if (options?.stop) body.stop_sequences = options.stop;\r\n\r\n const response = await request('/messages', body) as {\r\n content: Array<{ type: string; text: string }>;\r\n stop_reason: string;\r\n usage: { input_tokens: number; output_tokens: number };\r\n };\r\n\r\n return {\r\n content: response.content.find(c => c.type === 'text')?.text ?? '',\r\n finishReason: response.stop_reason === 'end_turn' ? 'stop' : 'length',\r\n usage: {\r\n promptTokens: response.usage.input_tokens,\r\n completionTokens: response.usage.output_tokens,\r\n totalTokens: response.usage.input_tokens + response.usage.output_tokens,\r\n },\r\n };\r\n },\r\n\r\n async *chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncIterable<StreamChunk> {\r\n const model = options?.model ?? defaultModel;\r\n const systemMsg = messages.find(m => m.role === 'system');\r\n const nonSystemMessages = messages.filter(m => m.role !== 'system');\r\n\r\n const body: Record<string, unknown> = {\r\n model,\r\n messages: nonSystemMessages.map(m => ({\r\n role: m.role === 'assistant' ? 'assistant' : 'user',\r\n content: m.content,\r\n })),\r\n max_tokens: options?.maxTokens ?? 4096,\r\n stream: true,\r\n };\r\n\r\n if (systemMsg || options?.system) {\r\n body.system = options?.system ?? systemMsg?.content;\r\n }\r\n\r\n const response = await fetch(`${baseUrl}/messages`, {\r\n method: 'POST',\r\n headers,\r\n body: JSON.stringify(body),\r\n });\r\n\r\n if (!response.ok || !response.body) {\r\n throw new Error(`Anthropic stream error: ${response.statusText}`);\r\n }\r\n\r\n const reader = response.body.getReader();\r\n const decoder = new TextDecoder();\r\n let buffer = '';\r\n\r\n while (true) {\r\n const { done, value } = await reader.read();\r\n if (done) break;\r\n\r\n buffer += decoder.decode(value, { stream: true });\r\n const lines = buffer.split('\\n');\r\n buffer = lines.pop() ?? '';\r\n\r\n for (const line of lines) {\r\n if (line.startsWith('data: ')) {\r\n try {\r\n const data = JSON.parse(line.slice(6)) as {\r\n type: string;\r\n delta?: { type: string; text?: string };\r\n };\r\n if (data.type === 'content_block_delta' && data.delta?.text) {\r\n yield { content: data.delta.text, done: false };\r\n }\r\n if (data.type === 'message_stop') {\r\n yield { done: true };\r\n return;\r\n }\r\n } catch {\r\n // Skip malformed JSON\r\n }\r\n }\r\n }\r\n }\r\n },\r\n\r\n async embed(): Promise<EmbeddingResult[]> {\r\n throw new Error('Anthropic does not support embeddings. Use a different adapter.');\r\n },\r\n };\r\n\r\n return adapter;\r\n};\r\n\r\nexport default anthropic;\r\n"]}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { AIAdapterFactory } from '../index.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Ollama Adapter for @flightdev/ai (Local LLMs)
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
interface OllamaConfig {
|
|
8
|
+
baseUrl?: string;
|
|
9
|
+
model?: string;
|
|
10
|
+
}
|
|
11
|
+
declare const ollama: AIAdapterFactory<OllamaConfig>;
|
|
12
|
+
|
|
13
|
+
export { type OllamaConfig, ollama as default, ollama };
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
// src/adapters/ollama.ts
|
|
2
|
+
var ollama = (config = {}) => {
|
|
3
|
+
const {
|
|
4
|
+
baseUrl = "http://localhost:11434",
|
|
5
|
+
model: defaultModel = "llama3.2"
|
|
6
|
+
} = config;
|
|
7
|
+
const adapter = {
|
|
8
|
+
name: "ollama",
|
|
9
|
+
async listModels() {
|
|
10
|
+
const response = await fetch(`${baseUrl}/api/tags`);
|
|
11
|
+
const data = await response.json();
|
|
12
|
+
return data.models.map((m) => m.name);
|
|
13
|
+
},
|
|
14
|
+
async chat(messages, options) {
|
|
15
|
+
const model = options?.model ?? defaultModel;
|
|
16
|
+
const allMessages = options?.system ? [{ role: "system", content: options.system }, ...messages] : messages;
|
|
17
|
+
const response = await fetch(`${baseUrl}/api/chat`, {
|
|
18
|
+
method: "POST",
|
|
19
|
+
headers: { "Content-Type": "application/json" },
|
|
20
|
+
body: JSON.stringify({
|
|
21
|
+
model,
|
|
22
|
+
messages: allMessages,
|
|
23
|
+
stream: false,
|
|
24
|
+
options: {
|
|
25
|
+
temperature: options?.temperature,
|
|
26
|
+
num_predict: options?.maxTokens
|
|
27
|
+
}
|
|
28
|
+
})
|
|
29
|
+
});
|
|
30
|
+
const data = await response.json();
|
|
31
|
+
return {
|
|
32
|
+
content: data.message.content,
|
|
33
|
+
finishReason: "stop",
|
|
34
|
+
usage: {
|
|
35
|
+
promptTokens: data.prompt_eval_count ?? 0,
|
|
36
|
+
completionTokens: data.eval_count ?? 0,
|
|
37
|
+
totalTokens: (data.prompt_eval_count ?? 0) + (data.eval_count ?? 0)
|
|
38
|
+
}
|
|
39
|
+
};
|
|
40
|
+
},
|
|
41
|
+
async *chatStream(messages, options) {
|
|
42
|
+
const model = options?.model ?? defaultModel;
|
|
43
|
+
const allMessages = options?.system ? [{ role: "system", content: options.system }, ...messages] : messages;
|
|
44
|
+
const response = await fetch(`${baseUrl}/api/chat`, {
|
|
45
|
+
method: "POST",
|
|
46
|
+
headers: { "Content-Type": "application/json" },
|
|
47
|
+
body: JSON.stringify({
|
|
48
|
+
model,
|
|
49
|
+
messages: allMessages,
|
|
50
|
+
stream: true
|
|
51
|
+
})
|
|
52
|
+
});
|
|
53
|
+
if (!response.body) throw new Error("No response body");
|
|
54
|
+
const reader = response.body.getReader();
|
|
55
|
+
const decoder = new TextDecoder();
|
|
56
|
+
while (true) {
|
|
57
|
+
const { done, value } = await reader.read();
|
|
58
|
+
if (done) break;
|
|
59
|
+
const lines = decoder.decode(value).split("\n").filter(Boolean);
|
|
60
|
+
for (const line of lines) {
|
|
61
|
+
try {
|
|
62
|
+
const data = JSON.parse(line);
|
|
63
|
+
if (data.message?.content) {
|
|
64
|
+
yield { content: data.message.content, done: false };
|
|
65
|
+
}
|
|
66
|
+
if (data.done) {
|
|
67
|
+
yield { done: true };
|
|
68
|
+
return;
|
|
69
|
+
}
|
|
70
|
+
} catch {
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
},
|
|
75
|
+
async embed(input, options) {
|
|
76
|
+
const model = options?.model ?? "nomic-embed-text";
|
|
77
|
+
const inputs = Array.isArray(input) ? input : [input];
|
|
78
|
+
const results = [];
|
|
79
|
+
for (const text of inputs) {
|
|
80
|
+
const response = await fetch(`${baseUrl}/api/embeddings`, {
|
|
81
|
+
method: "POST",
|
|
82
|
+
headers: { "Content-Type": "application/json" },
|
|
83
|
+
body: JSON.stringify({ model, prompt: text })
|
|
84
|
+
});
|
|
85
|
+
const data = await response.json();
|
|
86
|
+
results.push({ embedding: data.embedding });
|
|
87
|
+
}
|
|
88
|
+
return results;
|
|
89
|
+
}
|
|
90
|
+
};
|
|
91
|
+
return adapter;
|
|
92
|
+
};
|
|
93
|
+
var ollama_default = ollama;
|
|
94
|
+
|
|
95
|
+
export { ollama_default as default, ollama };
|
|
96
|
+
//# sourceMappingURL=ollama.js.map
|
|
97
|
+
//# sourceMappingURL=ollama.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/adapters/ollama.ts"],"names":[],"mappings":";AAoBO,IAAM,MAAA,GAAyC,CAAC,MAAA,GAAS,EAAC,KAAM;AACnE,EAAA,MAAM;AAAA,IACF,OAAA,GAAU,wBAAA;AAAA,IACV,OAAO,YAAA,GAAe;AAAA,GAC1B,GAAI,MAAA;AAEJ,EAAA,MAAM,OAAA,GAAqB;AAAA,IACvB,IAAA,EAAM,QAAA;AAAA,IAEN,MAAM,UAAA,GAAgC;AAClC,MAAA,MAAM,QAAA,GAAW,MAAM,KAAA,CAAM,CAAA,EAAG,OAAO,CAAA,SAAA,CAAW,CAAA;AAClD,MAAA,MAAM,IAAA,GAAO,MAAM,QAAA,CAAS,IAAA,EAAK;AACjC,MAAA,OAAO,IAAA,CAAK,MAAA,CAAO,GAAA,CAAI,CAAA,CAAA,KAAK,EAAE,IAAI,CAAA;AAAA,IACtC,CAAA;AAAA,IAEA,MAAM,IAAA,CAAK,QAAA,EAAyB,OAAA,EAA4C;AAC5E,MAAA,MAAM,KAAA,GAAQ,SAAS,KAAA,IAAS,YAAA;AAChC,MAAA,MAAM,WAAA,GAAc,OAAA,EAAS,MAAA,GACvB,CAAC,EAAE,IAAA,EAAM,QAAA,EAAmB,OAAA,EAAS,OAAA,CAAQ,MAAA,EAAO,EAAG,GAAG,QAAQ,CAAA,GAClE,QAAA;AAEN,MAAA,MAAM,QAAA,GAAW,MAAM,KAAA,CAAM,CAAA,EAAG,OAAO,CAAA,SAAA,CAAA,EAAa;AAAA,QAChD,MAAA,EAAQ,MAAA;AAAA,QACR,OAAA,EAAS,EAAE,cAAA,EAAgB,kBAAA,EAAmB;AAAA,QAC9C,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,UACjB,KAAA;AAAA,UACA,QAAA,EAAU,WAAA;AAAA,UACV,MAAA,EAAQ,KAAA;AAAA,UACR,OAAA,EAAS;AAAA,YACL,aAAa,OAAA,EAAS,WAAA;AAAA,YACtB,aAAa,OAAA,EAAS;AAAA;AAC1B,SACH;AAAA,OACJ,CAAA;AAED,MAAA,MAAM,IAAA,GAAO,MAAM,QAAA,CAAS,IAAA,EAAK;AAOjC,MAAA,OAAO;AAAA,QACH,OAAA,EAAS,KAAK,OAAA,CAAQ,OAAA;AAAA,QACtB,YAAA,EAAc,MAAA;AAAA,QACd,KAAA,EAAO;AAAA,UACH,YAAA,EAAc,KAAK,iBAAA,IAAqB,CAAA;AAAA,UACxC,gBAAA,EAAkB,KAAK,UAAA,IAAc,CAAA;AAAA,UACrC,WAAA,EAAA,CAAc,IAAA,CAAK,iBAAA,IAAqB,CAAA,KAAM,KAAK,UAAA,IAAc,CAAA;AAAA;AACrE,OACJ;AAAA,IACJ,CAAA;AAAA,IAEA,OAAO,UAAA,CAAW,QAAA,EAAyB,OAAA,EAAmD;AAC1F,MAAA,MAAM,KAAA,GAAQ,SAAS,KAAA,IAAS,YAAA;AAChC,MAAA,MAAM,WAAA,GAAc,OAAA,EAAS,MAAA,GACvB,CAAC,EAAE,IAAA,EAAM,QAAA,EAAmB,OAAA,EAAS,OAAA,CAAQ,MAAA,EAAO,EAAG,GAAG,QAAQ,CAAA,GAClE,QAAA;AAEN,MAAA,MAAM,QAAA,GAAW,MAAM,KAAA,CAAM,CAAA,EAAG,OAAO,CAAA,SAAA,CAAA,EAAa;AAAA,QAChD,MAAA,EAAQ,MAAA;AAAA,QACR,OAAA,EAAS,EAAE,cAAA,EAAgB,kBAAA,EAAmB;AAAA,QAC9C,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,UACjB,KAAA;AAAA,UACA,QAAA,EAAU,WAAA;AAAA,UACV,MAAA,EAAQ;AAAA,SACX;AAAA,OACJ,CAAA;AAED,MAAA,IAAI,CAAC,QAAA,CAAS,IAAA,EAAM,MAAM,IAAI,MAAM,kBAAkB,CAAA;AAEtD,MAAA,MAAM,MAAA,GAAS,QAAA,CAAS,IAAA,CAAK,SAAA,EAAU;AACvC,MAAA,MAAM,OAAA,GAAU,IAAI,WAAA,EAAY;AAEhC,MAAA,OAAO,IAAA,EAAM;AACT,QAAA,MAAM,EAAE,IAAA,EAAM,KAAA,EAAM,GAAI,MAAM,OAAO,IAAA,EAAK;AAC1C,QAAA,IAAI,IAAA,EAAM;AAEV,QAAA,MAAM,KAAA,GAAQ,QAAQ,MAAA,CAAO,KAAK,EAAE,KAAA,CAAM,IAAI,CAAA,CAAE,MAAA,CAAO,OAAO,CAAA;AAC9D,QAAA,KAAA,MAAW,QAAQ,KAAA,EAAO;AACtB,UAAA,IAAI;AACA,YAAA,MAAM,IAAA,GAAO,IAAA,CAAK,KAAA,CAAM,IAAI,CAAA;AAC5B,YAAA,IAAI,IAAA,CAAK,SAAS,OAAA,EAAS;AACvB,cAAA,MAAM,EAAE,OAAA,EAAS,IAAA,CAAK,OAAA,CAAQ,OAAA,EAAS,MAAM,KAAA,EAAM;AAAA,YACvD;AACA,YAAA,IAAI,KAAK,IAAA,EAAM;AACX,cAAA,MAAM,EAAE,MAAM,IAAA,EAAK;AACnB,cAAA;AAAA,YACJ;AAAA,UACJ,CAAA,CAAA,MAAQ;AAAA,UAER;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,CAAA;AAAA,IAEA,MAAM,KAAA,CAAM,KAAA,EAA0B,OAAA,EAAwD;AAC1F,MAAA,MAAM,KAAA,GAAQ,SAAS,KAAA,IAAS,kBAAA;AAChC,MAAA,MAAM,SAAS,KAAA,CAAM,OAAA,CAAQ,KAAK,CAAA,GAAI,KAAA,GAAQ,CAAC,KAAK,CAAA;AACpD,MAAA,MAAM,UAA6B,EAAC;AAEpC,MAAA,KAAA,MAAW,QAAQ,MAAA,EAAQ;AACvB,QAAA,MAAM,QAAA,GAAW,MAAM,KAAA,CAAM,CAAA,EAAG,OAAO,CAAA,eAAA,CAAA,EAAmB;AAAA,UACtD,MAAA,EAAQ,MAAA;AAAA,UACR,OAAA,EAAS,EAAE,cAAA,EAAgB,kBAAA,EAAmB;AAAA,UAC9C,MAAM,IAAA,CAAK,SAAA,CAAU,EAAE,KAAA,EAAO,MAAA,EAAQ,MAAM;AAAA,SAC/C,CAAA;AAED,QAAA,MAAM,IAAA,GAAO,MAAM,QAAA,CAAS,IAAA,EAAK;AACjC,QAAA,OAAA,CAAQ,IAAA,CAAK,EAAE,SAAA,EAAW,IAAA,CAAK,WAAW,CAAA;AAAA,MAC9C;AAEA,MAAA,OAAO,OAAA;AAAA,IACX;AAAA,GACJ;AAEA,EAAA,OAAO,OAAA;AACX;AAEA,IAAO,cAAA,GAAQ","file":"ollama.js","sourcesContent":["/**\r\n * Ollama Adapter for @flightdev/ai (Local LLMs)\r\n */\r\n\r\nimport type {\r\n AIAdapter,\r\n AIAdapterFactory,\r\n ChatMessage,\r\n ChatOptions,\r\n ChatResult,\r\n StreamChunk,\r\n EmbeddingOptions,\r\n EmbeddingResult,\r\n} from '../index.js';\r\n\r\nexport interface OllamaConfig {\r\n baseUrl?: string;\r\n model?: string;\r\n}\r\n\r\nexport const ollama: AIAdapterFactory<OllamaConfig> = (config = {}) => {\r\n const {\r\n baseUrl = 'http://localhost:11434',\r\n model: defaultModel = 'llama3.2',\r\n } = config;\r\n\r\n const adapter: AIAdapter = {\r\n name: 'ollama',\r\n\r\n async listModels(): Promise<string[]> {\r\n const response = await fetch(`${baseUrl}/api/tags`);\r\n const data = await response.json() as { models: Array<{ name: string }> };\r\n return data.models.map(m => m.name);\r\n },\r\n\r\n async chat(messages: ChatMessage[], options?: ChatOptions): Promise<ChatResult> {\r\n const model = options?.model ?? defaultModel;\r\n const allMessages = options?.system\r\n ? [{ role: 'system' as const, content: options.system }, ...messages]\r\n : messages;\r\n\r\n const response = await fetch(`${baseUrl}/api/chat`, {\r\n method: 'POST',\r\n headers: { 'Content-Type': 'application/json' },\r\n body: JSON.stringify({\r\n model,\r\n messages: allMessages,\r\n stream: false,\r\n options: {\r\n temperature: options?.temperature,\r\n num_predict: options?.maxTokens,\r\n },\r\n }),\r\n });\r\n\r\n const data = await response.json() as {\r\n message: { content: string };\r\n done: boolean;\r\n eval_count?: number;\r\n prompt_eval_count?: number;\r\n };\r\n\r\n return {\r\n content: data.message.content,\r\n finishReason: 'stop',\r\n usage: {\r\n promptTokens: data.prompt_eval_count ?? 0,\r\n completionTokens: data.eval_count ?? 0,\r\n totalTokens: (data.prompt_eval_count ?? 0) + (data.eval_count ?? 0),\r\n },\r\n };\r\n },\r\n\r\n async *chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncIterable<StreamChunk> {\r\n const model = options?.model ?? defaultModel;\r\n const allMessages = options?.system\r\n ? [{ role: 'system' as const, content: options.system }, ...messages]\r\n : messages;\r\n\r\n const response = await fetch(`${baseUrl}/api/chat`, {\r\n method: 'POST',\r\n headers: { 'Content-Type': 'application/json' },\r\n body: JSON.stringify({\r\n model,\r\n messages: allMessages,\r\n stream: true,\r\n }),\r\n });\r\n\r\n if (!response.body) throw new Error('No response body');\r\n\r\n const reader = response.body.getReader();\r\n const decoder = new TextDecoder();\r\n\r\n while (true) {\r\n const { done, value } = await reader.read();\r\n if (done) break;\r\n\r\n const lines = decoder.decode(value).split('\\n').filter(Boolean);\r\n for (const line of lines) {\r\n try {\r\n const data = JSON.parse(line) as { message?: { content: string }; done: boolean };\r\n if (data.message?.content) {\r\n yield { content: data.message.content, done: false };\r\n }\r\n if (data.done) {\r\n yield { done: true };\r\n return;\r\n }\r\n } catch {\r\n // Skip\r\n }\r\n }\r\n }\r\n },\r\n\r\n async embed(input: string | string[], options?: EmbeddingOptions): Promise<EmbeddingResult[]> {\r\n const model = options?.model ?? 'nomic-embed-text';\r\n const inputs = Array.isArray(input) ? input : [input];\r\n const results: EmbeddingResult[] = [];\r\n\r\n for (const text of inputs) {\r\n const response = await fetch(`${baseUrl}/api/embeddings`, {\r\n method: 'POST',\r\n headers: { 'Content-Type': 'application/json' },\r\n body: JSON.stringify({ model, prompt: text }),\r\n });\r\n\r\n const data = await response.json() as { embedding: number[] };\r\n results.push({ embedding: data.embedding });\r\n }\r\n\r\n return results;\r\n },\r\n };\r\n\r\n return adapter;\r\n};\r\n\r\nexport default ollama;\r\n"]}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import { AIAdapterFactory } from '../index.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* OpenAI Adapter for @flightdev/ai
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* ```typescript
|
|
8
|
+
* import { createAI } from '@flightdev/ai';
|
|
9
|
+
* import { openai } from '@flightdev/ai/openai';
|
|
10
|
+
*
|
|
11
|
+
* const ai = createAI(openai({
|
|
12
|
+
* apiKey: process.env.OPENAI_API_KEY,
|
|
13
|
+
* model: 'gpt-4-turbo',
|
|
14
|
+
* }));
|
|
15
|
+
* ```
|
|
16
|
+
*/
|
|
17
|
+
|
|
18
|
+
interface OpenAIConfig {
|
|
19
|
+
/** OpenAI API key */
|
|
20
|
+
apiKey: string;
|
|
21
|
+
/** Default model */
|
|
22
|
+
model?: string;
|
|
23
|
+
/** Organization ID */
|
|
24
|
+
organization?: string;
|
|
25
|
+
/** Base URL (for Azure OpenAI or proxies) */
|
|
26
|
+
baseUrl?: string;
|
|
27
|
+
}
|
|
28
|
+
/**
|
|
29
|
+
* Create an OpenAI adapter
|
|
30
|
+
*/
|
|
31
|
+
declare const openai: AIAdapterFactory<OpenAIConfig>;
|
|
32
|
+
|
|
33
|
+
export { type OpenAIConfig, openai as default, openai };
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
// src/adapters/openai.ts
|
|
2
|
+
var openai = (config) => {
|
|
3
|
+
const {
|
|
4
|
+
apiKey,
|
|
5
|
+
model: defaultModel = "gpt-4-turbo",
|
|
6
|
+
organization,
|
|
7
|
+
baseUrl = "https://api.openai.com/v1"
|
|
8
|
+
} = config;
|
|
9
|
+
const headers = {
|
|
10
|
+
"Authorization": `Bearer ${apiKey}`,
|
|
11
|
+
"Content-Type": "application/json"
|
|
12
|
+
};
|
|
13
|
+
if (organization) {
|
|
14
|
+
headers["OpenAI-Organization"] = organization;
|
|
15
|
+
}
|
|
16
|
+
async function request(endpoint, body) {
|
|
17
|
+
const response = await fetch(`${baseUrl}${endpoint}`, {
|
|
18
|
+
method: "POST",
|
|
19
|
+
headers,
|
|
20
|
+
body: JSON.stringify(body)
|
|
21
|
+
});
|
|
22
|
+
if (!response.ok) {
|
|
23
|
+
const error = await response.json().catch(() => ({}));
|
|
24
|
+
throw new Error(`OpenAI error: ${error.error?.message ?? response.statusText}`);
|
|
25
|
+
}
|
|
26
|
+
return response.json();
|
|
27
|
+
}
|
|
28
|
+
function mapTools(tools) {
|
|
29
|
+
if (!tools) return void 0;
|
|
30
|
+
return tools.map((tool) => ({
|
|
31
|
+
type: "function",
|
|
32
|
+
function: {
|
|
33
|
+
name: tool.name,
|
|
34
|
+
description: tool.description,
|
|
35
|
+
parameters: tool.parameters
|
|
36
|
+
}
|
|
37
|
+
}));
|
|
38
|
+
}
|
|
39
|
+
const adapter = {
|
|
40
|
+
name: "openai",
|
|
41
|
+
async listModels() {
|
|
42
|
+
const response = await fetch(`${baseUrl}/models`, { headers });
|
|
43
|
+
const data = await response.json();
|
|
44
|
+
return data.data.map((m) => m.id);
|
|
45
|
+
},
|
|
46
|
+
async chat(messages, options) {
|
|
47
|
+
const model = options?.model ?? defaultModel;
|
|
48
|
+
const allMessages = options?.system ? [{ role: "system", content: options.system }, ...messages] : messages;
|
|
49
|
+
const body = {
|
|
50
|
+
model,
|
|
51
|
+
messages: allMessages.map((m) => ({
|
|
52
|
+
role: m.role,
|
|
53
|
+
content: m.content,
|
|
54
|
+
...m.name && { name: m.name },
|
|
55
|
+
...m.toolCallId && { tool_call_id: m.toolCallId }
|
|
56
|
+
}))
|
|
57
|
+
};
|
|
58
|
+
if (options?.temperature !== void 0) body.temperature = options.temperature;
|
|
59
|
+
if (options?.maxTokens) body.max_tokens = options.maxTokens;
|
|
60
|
+
if (options?.stop) body.stop = options.stop;
|
|
61
|
+
if (options?.tools) body.tools = mapTools(options.tools);
|
|
62
|
+
const response = await request("/chat/completions", body);
|
|
63
|
+
const choice = response.choices[0];
|
|
64
|
+
if (!choice) {
|
|
65
|
+
throw new Error("OpenAI: No response choice");
|
|
66
|
+
}
|
|
67
|
+
const toolCalls = choice.message.tool_calls?.map((tc) => ({
|
|
68
|
+
id: tc.id,
|
|
69
|
+
name: tc.function.name,
|
|
70
|
+
arguments: JSON.parse(tc.function.arguments)
|
|
71
|
+
}));
|
|
72
|
+
return {
|
|
73
|
+
content: choice.message.content ?? "",
|
|
74
|
+
toolCalls,
|
|
75
|
+
finishReason: choice.finish_reason,
|
|
76
|
+
usage: response.usage ? {
|
|
77
|
+
promptTokens: response.usage.prompt_tokens,
|
|
78
|
+
completionTokens: response.usage.completion_tokens,
|
|
79
|
+
totalTokens: response.usage.total_tokens
|
|
80
|
+
} : void 0
|
|
81
|
+
};
|
|
82
|
+
},
|
|
83
|
+
async *chatStream(messages, options) {
|
|
84
|
+
const model = options?.model ?? defaultModel;
|
|
85
|
+
const allMessages = options?.system ? [{ role: "system", content: options.system }, ...messages] : messages;
|
|
86
|
+
const body = {
|
|
87
|
+
model,
|
|
88
|
+
messages: allMessages,
|
|
89
|
+
stream: true
|
|
90
|
+
};
|
|
91
|
+
if (options?.temperature !== void 0) body.temperature = options.temperature;
|
|
92
|
+
if (options?.maxTokens) body.max_tokens = options.maxTokens;
|
|
93
|
+
const response = await fetch(`${baseUrl}/chat/completions`, {
|
|
94
|
+
method: "POST",
|
|
95
|
+
headers,
|
|
96
|
+
body: JSON.stringify(body)
|
|
97
|
+
});
|
|
98
|
+
if (!response.ok || !response.body) {
|
|
99
|
+
throw new Error(`OpenAI stream error: ${response.statusText}`);
|
|
100
|
+
}
|
|
101
|
+
const reader = response.body.getReader();
|
|
102
|
+
const decoder = new TextDecoder();
|
|
103
|
+
let buffer = "";
|
|
104
|
+
while (true) {
|
|
105
|
+
const { done, value } = await reader.read();
|
|
106
|
+
if (done) break;
|
|
107
|
+
buffer += decoder.decode(value, { stream: true });
|
|
108
|
+
const lines = buffer.split("\n");
|
|
109
|
+
buffer = lines.pop() ?? "";
|
|
110
|
+
for (const line of lines) {
|
|
111
|
+
if (line.startsWith("data: ")) {
|
|
112
|
+
const data = line.slice(6);
|
|
113
|
+
if (data === "[DONE]") {
|
|
114
|
+
yield { done: true };
|
|
115
|
+
return;
|
|
116
|
+
}
|
|
117
|
+
try {
|
|
118
|
+
const parsed = JSON.parse(data);
|
|
119
|
+
const content = parsed.choices[0]?.delta?.content;
|
|
120
|
+
if (content) {
|
|
121
|
+
yield { content, done: false };
|
|
122
|
+
}
|
|
123
|
+
} catch {
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
},
|
|
129
|
+
async embed(input, options) {
|
|
130
|
+
const model = options?.model ?? "text-embedding-3-small";
|
|
131
|
+
const inputs = Array.isArray(input) ? input : [input];
|
|
132
|
+
const body = { model, input: inputs };
|
|
133
|
+
if (options?.dimensions) body.dimensions = options.dimensions;
|
|
134
|
+
const response = await request("/embeddings", body);
|
|
135
|
+
return response.data.map((d) => ({
|
|
136
|
+
embedding: d.embedding,
|
|
137
|
+
usage: response.usage ? { totalTokens: response.usage.total_tokens } : void 0
|
|
138
|
+
}));
|
|
139
|
+
}
|
|
140
|
+
};
|
|
141
|
+
return adapter;
|
|
142
|
+
};
|
|
143
|
+
var openai_default = openai;
|
|
144
|
+
|
|
145
|
+
export { openai_default as default, openai };
|
|
146
|
+
//# sourceMappingURL=openai.js.map
|
|
147
|
+
//# sourceMappingURL=openai.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/adapters/openai.ts"],"names":[],"mappings":";AAiDO,IAAM,MAAA,GAAyC,CAAC,MAAA,KAAW;AAC9D,EAAA,MAAM;AAAA,IACF,MAAA;AAAA,IACA,OAAO,YAAA,GAAe,aAAA;AAAA,IACtB,YAAA;AAAA,IACA,OAAA,GAAU;AAAA,GACd,GAAI,MAAA;AAEJ,EAAA,MAAM,OAAA,GAAkC;AAAA,IACpC,eAAA,EAAiB,UAAU,MAAM,CAAA,CAAA;AAAA,IACjC,cAAA,EAAgB;AAAA,GACpB;AACA,EAAA,IAAI,YAAA,EAAc;AACd,IAAA,OAAA,CAAQ,qBAAqB,CAAA,GAAI,YAAA;AAAA,EACrC;AAEA,EAAA,eAAe,OAAA,CAAQ,UAAkB,IAAA,EAAiC;AACtE,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,GAAG,OAAO,CAAA,EAAG,QAAQ,CAAA,CAAA,EAAI;AAAA,MAClD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA;AAAA,MACA,IAAA,EAAM,IAAA,CAAK,SAAA,CAAU,IAAI;AAAA,KAC5B,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,MAAM,KAAA,GAAQ,MAAM,QAAA,CAAS,IAAA,GAAO,KAAA,CAAM,OAAO,EAAC,CAAE,CAAA;AACpD,MAAA,MAAM,IAAI,MAAM,CAAA,cAAA,EAAkB,KAAA,CAA2C,OAAO,OAAA,IAAW,QAAA,CAAS,UAAU,CAAA,CAAE,CAAA;AAAA,IACxH;AAEA,IAAA,OAAO,SAAS,IAAA,EAAK;AAAA,EACzB;AAEA,EAAA,SAAS,SAAS,KAAA,EAAiD;AAC/D,IAAA,IAAI,CAAC,OAAO,OAAO,MAAA;AACnB,IAAA,OAAO,KAAA,CAAM,IAAI,CAAA,IAAA,MAAS;AAAA,MACtB,IAAA,EAAM,UAAA;AAAA,MACN,QAAA,EAAU;AAAA,QACN,MAAM,IAAA,CAAK,IAAA;AAAA,QACX,aAAa,IAAA,CAAK,WAAA;AAAA,QAClB,YAAY,IAAA,CAAK;AAAA;AACrB,KACJ,CAAE,CAAA;AAAA,EACN;AAEA,EAAA,MAAM,OAAA,GAAqB;AAAA,IACvB,IAAA,EAAM,QAAA;AAAA,IAEN,MAAM,UAAA,GAAgC;AAClC,MAAA,MAAM,QAAA,GAAW,MAAM,KAAA,CAAM,CAAA,EAAG,OAAO,CAAA,OAAA,CAAA,EAAW,EAAE,SAAS,CAAA;AAC7D,MAAA,MAAM,IAAA,GAAO,MAAM,QAAA,CAAS,IAAA,EAAK;AACjC,MAAA,OAAO,IAAA,CAAK,IAAA,CAAK,GAAA,CAAI,CAAA,CAAA,KAAK,EAAE,EAAE,CAAA;AAAA,IAClC,CAAA;AAAA,IAEA,MAAM,IAAA,CAAK,QAAA,EAAyB,OAAA,EAA4C;AAC5E,MAAA,MAAM,KAAA,GAAQ,SAAS,KAAA,IAAS,YAAA;AAChC,MAAA,MAAM,WAAA,GAAc,OAAA,EAAS,MAAA,GACvB,CAAC,EAAE,IAAA,EAAM,QAAA,EAAmB,OAAA,EAAS,OAAA,CAAQ,MAAA,EAAO,EAAG,GAAG,QAAQ,CAAA,GAClE,QAAA;AAEN,MAAA,MAAM,IAAA,GAAgC;AAAA,QAClC,KAAA;AAAA,QACA,QAAA,EAAU,WAAA,CAAY,GAAA,CAAI,CAAA,CAAA,MAAM;AAAA,UAC5B,MAAM,CAAA,CAAE,IAAA;AAAA,UACR,SAAS,CAAA,CAAE,OAAA;AAAA,UACX,GAAI,CAAA,CAAE,IAAA,IAAQ,EAAE,IAAA,EAAM,EAAE,IAAA,EAAK;AAAA,UAC7B,GAAI,CAAA,CAAE,UAAA,IAAc,EAAE,YAAA,EAAc,EAAE,UAAA;AAAW,SACrD,CAAE;AAAA,OACN;AAEA,MAAA,IAAI,OAAA,EAAS,WAAA,KAAgB,MAAA,EAAW,IAAA,CAAK,cAAc,OAAA,CAAQ,WAAA;AACnE,MAAA,IAAI,OAAA,EAAS,SAAA,EAAW,IAAA,CAAK,UAAA,GAAa,OAAA,CAAQ,SAAA;AAClD,MAAA,IAAI,OAAA,EAAS,IAAA,EAAM,IAAA,CAAK,IAAA,GAAO,OAAA,CAAQ,IAAA;AACvC,MAAA,IAAI,SAAS,KAAA,EAAO,IAAA,CAAK,KAAA,GAAQ,QAAA,CAAS,QAAQ,KAAK,CAAA;AAEvD,MAAA,MAAM,QAAA,GAAW,MAAM,OAAA,CAAQ,mBAAA,EAAqB,IAAI,CAAA;AAQxD,MAAA,MAAM,MAAA,GAAS,QAAA,CAAS,OAAA,CAAQ,CAAC,CAAA;AACjC,MAAA,IAAI,CAAC,MAAA,EAAQ;AACT,QAAA,MAAM,IAAI,MAAM,4BAA4B,CAAA;AAAA,MAChD;AACA,MAAA,MAAM,SAAA,GAAY,MAAA,CAAO,OAAA,CAAQ,UAAA,EAAY,IAAI,CAAA,EAAA,MAAO;AAAA,QACpD,IAAI,EAAA,CAAG,EAAA;AAAA,QACP,IAAA,EAAM,GAAG,QAAA,CAAS,IAAA;AAAA,QAClB,SAAA,EAAW,IAAA,CAAK,KAAA,CAAM,EAAA,CAAG,SAAS,SAAS;AAAA,OAC/C,CAAE,CAAA;AAEF,MAAA,OAAO;AAAA,QACH,OAAA,EAAS,MAAA,CAAO,OAAA,CAAQ,OAAA,IAAW,EAAA;AAAA,QACnC,SAAA;AAAA,QACA,cAAc,MAAA,CAAO,aAAA;AAAA,QACrB,KAAA,EAAO,SAAS,KAAA,GAAQ;AAAA,UACpB,YAAA,EAAc,SAAS,KAAA,CAAM,aAAA;AAAA,UAC7B,gBAAA,EAAkB,SAAS,KAAA,CAAM,iBAAA;AAAA,UACjC,WAAA,EAAa,SAAS,KAAA,CAAM;AAAA,SAChC,GAAI;AAAA,OACR;AAAA,IACJ,CAAA;AAAA,IAEA,OAAO,UAAA,CAAW,QAAA,EAAyB,OAAA,EAAmD;AAC1F,MAAA,MAAM,KAAA,GAAQ,SAAS,KAAA,IAAS,YAAA;AAChC,MAAA,MAAM,WAAA,GAAc,OAAA,EAAS,MAAA,GACvB,CAAC,EAAE,IAAA,EAAM,QAAA,EAAmB,OAAA,EAAS,OAAA,CAAQ,MAAA,EAAO,EAAG,GAAG,QAAQ,CAAA,GAClE,QAAA;AAEN,MAAA,MAAM,IAAA,GAAgC;AAAA,QAClC,KAAA;AAAA,QACA,QAAA,EAAU,WAAA;AAAA,QACV,MAAA,EAAQ;AAAA,OACZ;AAEA,MAAA,IAAI,OAAA,EAAS,WAAA,KAAgB,MAAA,EAAW,IAAA,CAAK,cAAc,OAAA,CAAQ,WAAA;AACnE,MAAA,IAAI,OAAA,EAAS,SAAA,EAAW,IAAA,CAAK,UAAA,GAAa,OAAA,CAAQ,SAAA;AAElD,MAAA,MAAM,QAAA,GAAW,MAAM,KAAA,CAAM,CAAA,EAAG,OAAO,CAAA,iBAAA,CAAA,EAAqB;AAAA,QACxD,MAAA,EAAQ,MAAA;AAAA,QACR,OAAA;AAAA,QACA,IAAA,EAAM,IAAA,CAAK,SAAA,CAAU,IAAI;AAAA,OAC5B,CAAA;AAED,MAAA,IAAI,CAAC,QAAA,CAAS,EAAA,IAAM,CAAC,SAAS,IAAA,EAAM;AAChC,QAAA,MAAM,IAAI,KAAA,CAAM,CAAA,qBAAA,EAAwB,QAAA,CAAS,UAAU,CAAA,CAAE,CAAA;AAAA,MACjE;AAEA,MAAA,MAAM,MAAA,GAAS,QAAA,CAAS,IAAA,CAAK,SAAA,EAAU;AACvC,MAAA,MAAM,OAAA,GAAU,IAAI,WAAA,EAAY;AAChC,MAAA,IAAI,MAAA,GAAS,EAAA;AAEb,MAAA,OAAO,IAAA,EAAM;AACT,QAAA,MAAM,EAAE,IAAA,EAAM,KAAA,EAAM,GAAI,MAAM,OAAO,IAAA,EAAK;AAC1C,QAAA,IAAI,IAAA,EAAM;AAEV,QAAA,MAAA,IAAU,QAAQ,MAAA,CAAO,KAAA,EAAO,EAAE,MAAA,EAAQ,MAAM,CAAA;AAChD,QAAA,MAAM,KAAA,GAAQ,MAAA,CAAO,KAAA,CAAM,IAAI,CAAA;AAC/B,QAAA,MAAA,GAAS,KAAA,CAAM,KAAI,IAAK,EAAA;AAExB,QAAA,KAAA,MAAW,QAAQ,KAAA,EAAO;AACtB,UAAA,IAAI,IAAA,CAAK,UAAA,CAAW,QAAQ,CAAA,EAAG;AAC3B,YAAA,MAAM,IAAA,GAAO,IAAA,CAAK,KAAA,CAAM,CAAC,CAAA;AACzB,YAAA,IAAI,SAAS,QAAA,EAAU;AACnB,cAAA,MAAM,EAAE,MAAM,IAAA,EAAK;AACnB,cAAA;AAAA,YACJ;AACA,YAAA,IAAI;AACA,cAAA,MAAM,MAAA,GAAS,IAAA,CAAK,KAAA,CAAM,IAAI,CAAA;AAG9B,cAAA,MAAM,OAAA,GAAU,MAAA,CAAO,OAAA,CAAQ,CAAC,GAAG,KAAA,EAAO,OAAA;AAC1C,cAAA,IAAI,OAAA,EAAS;AACT,gBAAA,MAAM,EAAE,OAAA,EAAS,IAAA,EAAM,KAAA,EAAM;AAAA,cACjC;AAAA,YACJ,CAAA,CAAA,MAAQ;AAAA,YAER;AAAA,UACJ;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,CAAA;AAAA,IAEA,MAAM,KAAA,CAAM,KAAA,EAA0B,OAAA,EAAwD;AAC1F,MAAA,MAAM,KAAA,GAAQ,SAAS,KAAA,IAAS,wBAAA;AAChC,MAAA,MAAM,SAAS,KAAA,CAAM,OAAA,CAAQ,KAAK,CAAA,GAAI,KAAA,GAAQ,CAAC,KAAK,CAAA;AAEpD,MAAA,MAAM,IAAA,GAAgC,EAAE,KAAA,EAAO,KAAA,EAAO,MAAA,EAAO;AAC7D,MAAA,IAAI,OAAA,EAAS,UAAA,EAAY,IAAA,CAAK,UAAA,GAAa,OAAA,CAAQ,UAAA;AAEnD,MAAA,MAAM,QAAA,GAAW,MAAM,OAAA,CAAQ,aAAA,EAAe,IAAI,CAAA;AAKlD,MAAA,OAAO,QAAA,CAAS,IAAA,CAAK,GAAA,CAAI,CAAA,CAAA,MAAM;AAAA,QAC3B,WAAW,CAAA,CAAE,SAAA;AAAA,QACb,KAAA,EAAO,SAAS,KAAA,GAAQ,EAAE,aAAa,QAAA,CAAS,KAAA,CAAM,cAAa,GAAI;AAAA,OAC3E,CAAE,CAAA;AAAA,IACN;AAAA,GACJ;AAEA,EAAA,OAAO,OAAA;AACX;AAEA,IAAO,cAAA,GAAQ","file":"openai.js","sourcesContent":["/**\r\n * OpenAI Adapter for @flightdev/ai\r\n * \r\n * @example\r\n * ```typescript\r\n * import { createAI } from '@flightdev/ai';\r\n * import { openai } from '@flightdev/ai/openai';\r\n * \r\n * const ai = createAI(openai({\r\n * apiKey: process.env.OPENAI_API_KEY,\r\n * model: 'gpt-4-turbo',\r\n * }));\r\n * ```\r\n */\r\n\r\nimport type {\r\n AIAdapter,\r\n AIAdapterFactory,\r\n ChatMessage,\r\n ChatOptions,\r\n ChatResult,\r\n StreamChunk,\r\n EmbeddingOptions,\r\n EmbeddingResult,\r\n ToolDefinition,\r\n} from '../index.js';\r\n\r\n// ============================================================================\r\n// Types\r\n// ============================================================================\r\n\r\nexport interface OpenAIConfig {\r\n /** OpenAI API key */\r\n apiKey: string;\r\n /** Default model */\r\n model?: string;\r\n /** Organization ID */\r\n organization?: string;\r\n /** Base URL (for Azure OpenAI or proxies) */\r\n baseUrl?: string;\r\n}\r\n\r\n// ============================================================================\r\n// OpenAI Adapter\r\n// ============================================================================\r\n\r\n/**\r\n * Create an OpenAI adapter\r\n */\r\nexport const openai: AIAdapterFactory<OpenAIConfig> = (config) => {\r\n const {\r\n apiKey,\r\n model: defaultModel = 'gpt-4-turbo',\r\n organization,\r\n baseUrl = 'https://api.openai.com/v1',\r\n } = config;\r\n\r\n const headers: Record<string, string> = {\r\n 'Authorization': `Bearer ${apiKey}`,\r\n 'Content-Type': 'application/json',\r\n };\r\n if (organization) {\r\n headers['OpenAI-Organization'] = organization;\r\n }\r\n\r\n async function request(endpoint: string, body: unknown): Promise<unknown> {\r\n const response = await fetch(`${baseUrl}${endpoint}`, {\r\n method: 'POST',\r\n headers,\r\n body: JSON.stringify(body),\r\n });\r\n\r\n if (!response.ok) {\r\n const error = await response.json().catch(() => ({}));\r\n throw new Error(`OpenAI error: ${(error as { error?: { message?: string } }).error?.message ?? response.statusText}`);\r\n }\r\n\r\n return response.json();\r\n }\r\n\r\n function mapTools(tools?: ToolDefinition[]): unknown[] | undefined {\r\n if (!tools) return undefined;\r\n return tools.map(tool => ({\r\n type: 'function',\r\n function: {\r\n name: tool.name,\r\n description: tool.description,\r\n parameters: tool.parameters,\r\n },\r\n }));\r\n }\r\n\r\n const adapter: AIAdapter = {\r\n name: 'openai',\r\n\r\n async listModels(): Promise<string[]> {\r\n const response = await fetch(`${baseUrl}/models`, { headers });\r\n const data = await response.json() as { data: Array<{ id: string }> };\r\n return data.data.map(m => m.id);\r\n },\r\n\r\n async chat(messages: ChatMessage[], options?: ChatOptions): Promise<ChatResult> {\r\n const model = options?.model ?? defaultModel;\r\n const allMessages = options?.system\r\n ? [{ role: 'system' as const, content: options.system }, ...messages]\r\n : messages;\r\n\r\n const body: Record<string, unknown> = {\r\n model,\r\n messages: allMessages.map(m => ({\r\n role: m.role,\r\n content: m.content,\r\n ...(m.name && { name: m.name }),\r\n ...(m.toolCallId && { tool_call_id: m.toolCallId }),\r\n })),\r\n };\r\n\r\n if (options?.temperature !== undefined) body.temperature = options.temperature;\r\n if (options?.maxTokens) body.max_tokens = options.maxTokens;\r\n if (options?.stop) body.stop = options.stop;\r\n if (options?.tools) body.tools = mapTools(options.tools);\r\n\r\n const response = await request('/chat/completions', body) as {\r\n choices: Array<{\r\n message: { content: string | null; tool_calls?: Array<{ id: string; function: { name: string; arguments: string } }> };\r\n finish_reason: string;\r\n }>;\r\n usage?: { prompt_tokens: number; completion_tokens: number; total_tokens: number };\r\n };\r\n\r\n const choice = response.choices[0];\r\n if (!choice) {\r\n throw new Error('OpenAI: No response choice');\r\n }\r\n const toolCalls = choice.message.tool_calls?.map(tc => ({\r\n id: tc.id,\r\n name: tc.function.name,\r\n arguments: JSON.parse(tc.function.arguments),\r\n }));\r\n\r\n return {\r\n content: choice.message.content ?? '',\r\n toolCalls,\r\n finishReason: choice.finish_reason as ChatResult['finishReason'],\r\n usage: response.usage ? {\r\n promptTokens: response.usage.prompt_tokens,\r\n completionTokens: response.usage.completion_tokens,\r\n totalTokens: response.usage.total_tokens,\r\n } : undefined,\r\n };\r\n },\r\n\r\n async *chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncIterable<StreamChunk> {\r\n const model = options?.model ?? defaultModel;\r\n const allMessages = options?.system\r\n ? [{ role: 'system' as const, content: options.system }, ...messages]\r\n : messages;\r\n\r\n const body: Record<string, unknown> = {\r\n model,\r\n messages: allMessages,\r\n stream: true,\r\n };\r\n\r\n if (options?.temperature !== undefined) body.temperature = options.temperature;\r\n if (options?.maxTokens) body.max_tokens = options.maxTokens;\r\n\r\n const response = await fetch(`${baseUrl}/chat/completions`, {\r\n method: 'POST',\r\n headers,\r\n body: JSON.stringify(body),\r\n });\r\n\r\n if (!response.ok || !response.body) {\r\n throw new Error(`OpenAI stream error: ${response.statusText}`);\r\n }\r\n\r\n const reader = response.body.getReader();\r\n const decoder = new TextDecoder();\r\n let buffer = '';\r\n\r\n while (true) {\r\n const { done, value } = await reader.read();\r\n if (done) break;\r\n\r\n buffer += decoder.decode(value, { stream: true });\r\n const lines = buffer.split('\\n');\r\n buffer = lines.pop() ?? '';\r\n\r\n for (const line of lines) {\r\n if (line.startsWith('data: ')) {\r\n const data = line.slice(6);\r\n if (data === '[DONE]') {\r\n yield { done: true };\r\n return;\r\n }\r\n try {\r\n const parsed = JSON.parse(data) as {\r\n choices: Array<{ delta: { content?: string } }>;\r\n };\r\n const content = parsed.choices[0]?.delta?.content;\r\n if (content) {\r\n yield { content, done: false };\r\n }\r\n } catch {\r\n // Skip malformed JSON\r\n }\r\n }\r\n }\r\n }\r\n },\r\n\r\n async embed(input: string | string[], options?: EmbeddingOptions): Promise<EmbeddingResult[]> {\r\n const model = options?.model ?? 'text-embedding-3-small';\r\n const inputs = Array.isArray(input) ? input : [input];\r\n\r\n const body: Record<string, unknown> = { model, input: inputs };\r\n if (options?.dimensions) body.dimensions = options.dimensions;\r\n\r\n const response = await request('/embeddings', body) as {\r\n data: Array<{ embedding: number[] }>;\r\n usage?: { total_tokens: number };\r\n };\r\n\r\n return response.data.map(d => ({\r\n embedding: d.embedding,\r\n usage: response.usage ? { totalTokens: response.usage.total_tokens } : undefined,\r\n }));\r\n },\r\n };\r\n\r\n return adapter;\r\n};\r\n\r\nexport default openai;\r\n"]}
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @flightdev/ai - Agnostic AI/LLM Integration
|
|
3
|
+
*
|
|
4
|
+
* Flight provides AI primitives, you choose the provider.
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* ```typescript
|
|
8
|
+
* import { createAI } from '@flightdev/ai';
|
|
9
|
+
* import { openai } from '@flightdev/ai/openai';
|
|
10
|
+
*
|
|
11
|
+
* const ai = createAI(openai({ apiKey: process.env.OPENAI_API_KEY }));
|
|
12
|
+
*
|
|
13
|
+
* const response = await ai.chat([
|
|
14
|
+
* { role: 'user', content: 'Hello!' }
|
|
15
|
+
* ]);
|
|
16
|
+
* ```
|
|
17
|
+
*/
|
|
18
|
+
/** Message role */
|
|
19
|
+
type MessageRole = 'system' | 'user' | 'assistant' | 'tool';
|
|
20
|
+
/** Chat message */
|
|
21
|
+
interface ChatMessage {
|
|
22
|
+
role: MessageRole;
|
|
23
|
+
content: string;
|
|
24
|
+
name?: string;
|
|
25
|
+
toolCallId?: string;
|
|
26
|
+
}
|
|
27
|
+
/** Tool definition */
|
|
28
|
+
interface ToolDefinition {
|
|
29
|
+
name: string;
|
|
30
|
+
description: string;
|
|
31
|
+
parameters: Record<string, unknown>;
|
|
32
|
+
}
|
|
33
|
+
/** Tool call from assistant */
|
|
34
|
+
interface ToolCall {
|
|
35
|
+
id: string;
|
|
36
|
+
name: string;
|
|
37
|
+
arguments: Record<string, unknown>;
|
|
38
|
+
}
|
|
39
|
+
/** Chat completion options */
|
|
40
|
+
interface ChatOptions {
|
|
41
|
+
/** Model to use */
|
|
42
|
+
model?: string;
|
|
43
|
+
/** Temperature (0-2) */
|
|
44
|
+
temperature?: number;
|
|
45
|
+
/** Max tokens to generate */
|
|
46
|
+
maxTokens?: number;
|
|
47
|
+
/** Stop sequences */
|
|
48
|
+
stop?: string[];
|
|
49
|
+
/** Available tools */
|
|
50
|
+
tools?: ToolDefinition[];
|
|
51
|
+
/** System prompt */
|
|
52
|
+
system?: string;
|
|
53
|
+
/** Stream response */
|
|
54
|
+
stream?: boolean;
|
|
55
|
+
}
|
|
56
|
+
/** Chat completion result */
|
|
57
|
+
interface ChatResult {
|
|
58
|
+
/** Generated content */
|
|
59
|
+
content: string;
|
|
60
|
+
/** Tool calls (if any) */
|
|
61
|
+
toolCalls?: ToolCall[];
|
|
62
|
+
/** Finish reason */
|
|
63
|
+
finishReason: 'stop' | 'length' | 'tool_calls' | 'error';
|
|
64
|
+
/** Usage stats */
|
|
65
|
+
usage?: {
|
|
66
|
+
promptTokens: number;
|
|
67
|
+
completionTokens: number;
|
|
68
|
+
totalTokens: number;
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
/** Streaming chunk */
|
|
72
|
+
interface StreamChunk {
|
|
73
|
+
/** Content delta */
|
|
74
|
+
content?: string;
|
|
75
|
+
/** Tool call delta */
|
|
76
|
+
toolCall?: Partial<ToolCall>;
|
|
77
|
+
/** Is final chunk */
|
|
78
|
+
done: boolean;
|
|
79
|
+
}
|
|
80
|
+
/** Embedding options */
|
|
81
|
+
interface EmbeddingOptions {
|
|
82
|
+
model?: string;
|
|
83
|
+
dimensions?: number;
|
|
84
|
+
}
|
|
85
|
+
/** Embedding result */
|
|
86
|
+
interface EmbeddingResult {
|
|
87
|
+
embedding: number[];
|
|
88
|
+
usage?: {
|
|
89
|
+
totalTokens: number;
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
/**
|
|
93
|
+
* AI Adapter Interface
|
|
94
|
+
*
|
|
95
|
+
* Implement this to create a custom AI provider.
|
|
96
|
+
*/
|
|
97
|
+
interface AIAdapter {
|
|
98
|
+
/** Adapter name */
|
|
99
|
+
readonly name: string;
|
|
100
|
+
/** List available models */
|
|
101
|
+
listModels(): Promise<string[]>;
|
|
102
|
+
/** Generate chat completion */
|
|
103
|
+
chat(messages: ChatMessage[], options?: ChatOptions): Promise<ChatResult>;
|
|
104
|
+
/** Generate streaming chat completion */
|
|
105
|
+
chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncIterable<StreamChunk>;
|
|
106
|
+
/** Generate embeddings */
|
|
107
|
+
embed(input: string | string[], options?: EmbeddingOptions): Promise<EmbeddingResult[]>;
|
|
108
|
+
}
|
|
109
|
+
/** Adapter factory type */
|
|
110
|
+
type AIAdapterFactory<TConfig = unknown> = (config: TConfig) => AIAdapter;
|
|
111
|
+
/** AI service options */
|
|
112
|
+
interface AIServiceOptions {
|
|
113
|
+
/** Default model */
|
|
114
|
+
defaultModel?: string;
|
|
115
|
+
/** Default temperature */
|
|
116
|
+
defaultTemperature?: number;
|
|
117
|
+
/** Default max tokens */
|
|
118
|
+
defaultMaxTokens?: number;
|
|
119
|
+
}
|
|
120
|
+
/** AI service */
|
|
121
|
+
interface AIService {
|
|
122
|
+
/** The underlying adapter */
|
|
123
|
+
readonly adapter: AIAdapter;
|
|
124
|
+
/** List available models */
|
|
125
|
+
listModels(): Promise<string[]>;
|
|
126
|
+
/** Generate chat completion */
|
|
127
|
+
chat(messages: ChatMessage[], options?: ChatOptions): Promise<ChatResult>;
|
|
128
|
+
/** Generate streaming chat completion */
|
|
129
|
+
chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncIterable<StreamChunk>;
|
|
130
|
+
/** Generate embeddings */
|
|
131
|
+
embed(input: string | string[], options?: EmbeddingOptions): Promise<EmbeddingResult[]>;
|
|
132
|
+
/** Simple text generation (convenience) */
|
|
133
|
+
generate(prompt: string, options?: ChatOptions): Promise<string>;
|
|
134
|
+
/** Create a conversation context */
|
|
135
|
+
conversation(systemPrompt?: string): Conversation;
|
|
136
|
+
}
|
|
137
|
+
/** Conversation context */
|
|
138
|
+
interface Conversation {
|
|
139
|
+
/** Message history */
|
|
140
|
+
readonly messages: ChatMessage[];
|
|
141
|
+
/** Add a message */
|
|
142
|
+
add(role: MessageRole, content: string): void;
|
|
143
|
+
/** Send and get response */
|
|
144
|
+
send(content: string, options?: ChatOptions): Promise<string>;
|
|
145
|
+
/** Reset conversation */
|
|
146
|
+
reset(): void;
|
|
147
|
+
}
|
|
148
|
+
/**
|
|
149
|
+
* Create an AI service
|
|
150
|
+
*/
|
|
151
|
+
declare function createAI(adapter: AIAdapter, options?: AIServiceOptions): AIService;
|
|
152
|
+
/** Count approximate tokens (rough estimation) */
|
|
153
|
+
declare function estimateTokens(text: string): number;
|
|
154
|
+
/** Truncate text to fit token limit */
|
|
155
|
+
declare function truncateToTokens(text: string, maxTokens: number): string;
|
|
156
|
+
|
|
157
|
+
export { type AIAdapter, type AIAdapterFactory, type AIService, type AIServiceOptions, type ChatMessage, type ChatOptions, type ChatResult, type Conversation, type EmbeddingOptions, type EmbeddingResult, type MessageRole, type StreamChunk, type ToolCall, type ToolDefinition, createAI, estimateTokens, truncateToTokens };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
// src/index.ts
|
|
2
|
+
function createAI(adapter, options = {}) {
|
|
3
|
+
const { defaultModel, defaultTemperature, defaultMaxTokens } = options;
|
|
4
|
+
function mergeOptions(opts) {
|
|
5
|
+
return {
|
|
6
|
+
model: opts?.model ?? defaultModel,
|
|
7
|
+
temperature: opts?.temperature ?? defaultTemperature,
|
|
8
|
+
maxTokens: opts?.maxTokens ?? defaultMaxTokens,
|
|
9
|
+
...opts
|
|
10
|
+
};
|
|
11
|
+
}
|
|
12
|
+
const service = {
|
|
13
|
+
adapter,
|
|
14
|
+
listModels() {
|
|
15
|
+
return adapter.listModels();
|
|
16
|
+
},
|
|
17
|
+
chat(messages, options2) {
|
|
18
|
+
return adapter.chat(messages, mergeOptions(options2));
|
|
19
|
+
},
|
|
20
|
+
chatStream(messages, options2) {
|
|
21
|
+
return adapter.chatStream(messages, mergeOptions(options2));
|
|
22
|
+
},
|
|
23
|
+
embed(input, options2) {
|
|
24
|
+
return adapter.embed(input, options2);
|
|
25
|
+
},
|
|
26
|
+
async generate(prompt, options2) {
|
|
27
|
+
const result = await adapter.chat(
|
|
28
|
+
[{ role: "user", content: prompt }],
|
|
29
|
+
mergeOptions(options2)
|
|
30
|
+
);
|
|
31
|
+
return result.content;
|
|
32
|
+
},
|
|
33
|
+
conversation(systemPrompt) {
|
|
34
|
+
const messages = [];
|
|
35
|
+
if (systemPrompt) {
|
|
36
|
+
messages.push({ role: "system", content: systemPrompt });
|
|
37
|
+
}
|
|
38
|
+
return {
|
|
39
|
+
get messages() {
|
|
40
|
+
return [...messages];
|
|
41
|
+
},
|
|
42
|
+
add(role, content) {
|
|
43
|
+
messages.push({ role, content });
|
|
44
|
+
},
|
|
45
|
+
async send(content, options2) {
|
|
46
|
+
messages.push({ role: "user", content });
|
|
47
|
+
const result = await service.chat(messages, options2);
|
|
48
|
+
messages.push({ role: "assistant", content: result.content });
|
|
49
|
+
return result.content;
|
|
50
|
+
},
|
|
51
|
+
reset() {
|
|
52
|
+
messages.length = systemPrompt ? 1 : 0;
|
|
53
|
+
}
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
};
|
|
57
|
+
return service;
|
|
58
|
+
}
|
|
59
|
+
function estimateTokens(text) {
|
|
60
|
+
return Math.ceil(text.length / 4);
|
|
61
|
+
}
|
|
62
|
+
function truncateToTokens(text, maxTokens) {
|
|
63
|
+
const estimatedChars = maxTokens * 4;
|
|
64
|
+
if (text.length <= estimatedChars) return text;
|
|
65
|
+
return text.slice(0, estimatedChars - 3) + "...";
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
export { createAI, estimateTokens, truncateToTokens };
|
|
69
|
+
//# sourceMappingURL=index.js.map
|
|
70
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/index.ts"],"names":["options"],"mappings":";AAgMO,SAAS,QAAA,CAAS,OAAA,EAAoB,OAAA,GAA4B,EAAC,EAAc;AACpF,EAAA,MAAM,EAAE,YAAA,EAAc,kBAAA,EAAoB,gBAAA,EAAiB,GAAI,OAAA;AAE/D,EAAA,SAAS,aAAa,IAAA,EAAiC;AACnD,IAAA,OAAO;AAAA,MACH,KAAA,EAAO,MAAM,KAAA,IAAS,YAAA;AAAA,MACtB,WAAA,EAAa,MAAM,WAAA,IAAe,kBAAA;AAAA,MAClC,SAAA,EAAW,MAAM,SAAA,IAAa,gBAAA;AAAA,MAC9B,GAAG;AAAA,KACP;AAAA,EACJ;AAEA,EAAA,MAAM,OAAA,GAAqB;AAAA,IACvB,OAAA;AAAA,IAEA,UAAA,GAAa;AACT,MAAA,OAAO,QAAQ,UAAA,EAAW;AAAA,IAC9B,CAAA;AAAA,IAEA,IAAA,CAAK,UAAUA,QAAAA,EAAS;AACpB,MAAA,OAAO,OAAA,CAAQ,IAAA,CAAK,QAAA,EAAU,YAAA,CAAaA,QAAO,CAAC,CAAA;AAAA,IACvD,CAAA;AAAA,IAEA,UAAA,CAAW,UAAUA,QAAAA,EAAS;AAC1B,MAAA,OAAO,OAAA,CAAQ,UAAA,CAAW,QAAA,EAAU,YAAA,CAAaA,QAAO,CAAC,CAAA;AAAA,IAC7D,CAAA;AAAA,IAEA,KAAA,CAAM,OAAOA,QAAAA,EAAS;AAClB,MAAA,OAAO,OAAA,CAAQ,KAAA,CAAM,KAAA,EAAOA,QAAO,CAAA;AAAA,IACvC,CAAA;AAAA,IAEA,MAAM,QAAA,CAAS,MAAA,EAAQA,QAAAA,EAAS;AAC5B,MAAA,MAAM,MAAA,GAAS,MAAM,OAAA,CAAQ,IAAA;AAAA,QACzB,CAAC,EAAE,IAAA,EAAM,MAAA,EAAQ,OAAA,EAAS,QAAQ,CAAA;AAAA,QAClC,aAAaA,QAAO;AAAA,OACxB;AACA,MAAA,OAAO,MAAA,CAAO,OAAA;AAAA,IAClB,CAAA;AAAA,IAEA,aAAa,YAAA,EAAc;AACvB,MAAA,MAAM,WAA0B,EAAC;AAEjC,MAAA,IAAI,YAAA,EAAc;AACd,QAAA,QAAA,CAAS,KAAK,EAAE,IAAA,EAAM,QAAA,EAAU,OAAA,EAAS,cAAc,CAAA;AAAA,MAC3D;AAEA,MAAA,OAAO;AAAA,QACH,IAAI,QAAA,GAAW;AACX,UAAA,OAAO,CAAC,GAAG,QAAQ,CAAA;AAAA,QACvB,CAAA;AAAA,QAEA,GAAA,CAAI,MAAmB,OAAA,EAAiB;AACpC,UAAA,QAAA,CAAS,IAAA,CAAK,EAAE,IAAA,EAAM,OAAA,EAAS,CAAA;AAAA,QACnC,CAAA;AAAA,QAEA,MAAM,IAAA,CAAK,OAAA,EAAiBA,QAAAA,EAAuB;AAC/C,UAAA,QAAA,CAAS,IAAA,CAAK,EAAE,IAAA,EAAM,MAAA,EAAQ,SAAS,CAAA;AACvC,UAAA,MAAM,MAAA,GAAS,MAAM,OAAA,CAAQ,IAAA,CAAK,UAAUA,QAAO,CAAA;AACnD,UAAA,QAAA,CAAS,KAAK,EAAE,IAAA,EAAM,aAAa,OAAA,EAAS,MAAA,CAAO,SAAS,CAAA;AAC5D,UAAA,OAAO,MAAA,CAAO,OAAA;AAAA,QAClB,CAAA;AAAA,QAEA,KAAA,GAAQ;AACJ,UAAA,QAAA,CAAS,MAAA,GAAS,eAAe,CAAA,GAAI,CAAA;AAAA,QACzC;AAAA,OACJ;AAAA,IACJ;AAAA,GACJ;AAEA,EAAA,OAAO,OAAA;AACX;AAOO,SAAS,eAAe,IAAA,EAAsB;AAEjD,EAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,MAAA,GAAS,CAAC,CAAA;AACpC;AAGO,SAAS,gBAAA,CAAiB,MAAc,SAAA,EAA2B;AACtE,EAAA,MAAM,iBAAiB,SAAA,GAAY,CAAA;AACnC,EAAA,IAAI,IAAA,CAAK,MAAA,IAAU,cAAA,EAAgB,OAAO,IAAA;AAC1C,EAAA,OAAO,IAAA,CAAK,KAAA,CAAM,CAAA,EAAG,cAAA,GAAiB,CAAC,CAAA,GAAI,KAAA;AAC/C","file":"index.js","sourcesContent":["/**\r\n * @flightdev/ai - Agnostic AI/LLM Integration\r\n * \r\n * Flight provides AI primitives, you choose the provider.\r\n * \r\n * @example\r\n * ```typescript\r\n * import { createAI } from '@flightdev/ai';\r\n * import { openai } from '@flightdev/ai/openai';\r\n * \r\n * const ai = createAI(openai({ apiKey: process.env.OPENAI_API_KEY }));\r\n * \r\n * const response = await ai.chat([\r\n * { role: 'user', content: 'Hello!' }\r\n * ]);\r\n * ```\r\n */\r\n\r\n// ============================================================================\r\n// Types\r\n// ============================================================================\r\n\r\n/** Message role */\r\nexport type MessageRole = 'system' | 'user' | 'assistant' | 'tool';\r\n\r\n/** Chat message */\r\nexport interface ChatMessage {\r\n role: MessageRole;\r\n content: string;\r\n name?: string;\r\n toolCallId?: string;\r\n}\r\n\r\n/** Tool definition */\r\nexport interface ToolDefinition {\r\n name: string;\r\n description: string;\r\n parameters: Record<string, unknown>;\r\n}\r\n\r\n/** Tool call from assistant */\r\nexport interface ToolCall {\r\n id: string;\r\n name: string;\r\n arguments: Record<string, unknown>;\r\n}\r\n\r\n/** Chat completion options */\r\nexport interface ChatOptions {\r\n /** Model to use */\r\n model?: string;\r\n /** Temperature (0-2) */\r\n temperature?: number;\r\n /** Max tokens to generate */\r\n maxTokens?: number;\r\n /** Stop sequences */\r\n stop?: string[];\r\n /** Available tools */\r\n tools?: ToolDefinition[];\r\n /** System prompt */\r\n system?: string;\r\n /** Stream response */\r\n stream?: boolean;\r\n}\r\n\r\n/** Chat completion result */\r\nexport interface ChatResult {\r\n /** Generated content */\r\n content: string;\r\n /** Tool calls (if any) */\r\n toolCalls?: ToolCall[];\r\n /** Finish reason */\r\n finishReason: 'stop' | 'length' | 'tool_calls' | 'error';\r\n /** Usage stats */\r\n usage?: {\r\n promptTokens: number;\r\n completionTokens: number;\r\n totalTokens: number;\r\n };\r\n}\r\n\r\n/** Streaming chunk */\r\nexport interface StreamChunk {\r\n /** Content delta */\r\n content?: string;\r\n /** Tool call delta */\r\n toolCall?: Partial<ToolCall>;\r\n /** Is final chunk */\r\n done: boolean;\r\n}\r\n\r\n/** Embedding options */\r\nexport interface EmbeddingOptions {\r\n model?: string;\r\n dimensions?: number;\r\n}\r\n\r\n/** Embedding result */\r\nexport interface EmbeddingResult {\r\n embedding: number[];\r\n usage?: { totalTokens: number };\r\n}\r\n\r\n// ============================================================================\r\n// Adapter Interface\r\n// ============================================================================\r\n\r\n/**\r\n * AI Adapter Interface\r\n * \r\n * Implement this to create a custom AI provider.\r\n */\r\nexport interface AIAdapter {\r\n /** Adapter name */\r\n readonly name: string;\r\n\r\n /** List available models */\r\n listModels(): Promise<string[]>;\r\n\r\n /** Generate chat completion */\r\n chat(messages: ChatMessage[], options?: ChatOptions): Promise<ChatResult>;\r\n\r\n /** Generate streaming chat completion */\r\n chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncIterable<StreamChunk>;\r\n\r\n /** Generate embeddings */\r\n embed(input: string | string[], options?: EmbeddingOptions): Promise<EmbeddingResult[]>;\r\n}\r\n\r\n/** Adapter factory type */\r\nexport type AIAdapterFactory<TConfig = unknown> = (config: TConfig) => AIAdapter;\r\n\r\n// ============================================================================\r\n// AI Service\r\n// ============================================================================\r\n\r\n/** AI service options */\r\nexport interface AIServiceOptions {\r\n /** Default model */\r\n defaultModel?: string;\r\n /** Default temperature */\r\n defaultTemperature?: number;\r\n /** Default max tokens */\r\n defaultMaxTokens?: number;\r\n}\r\n\r\n/** AI service */\r\nexport interface AIService {\r\n /** The underlying adapter */\r\n readonly adapter: AIAdapter;\r\n\r\n /** List available models */\r\n listModels(): Promise<string[]>;\r\n\r\n /** Generate chat completion */\r\n chat(messages: ChatMessage[], options?: ChatOptions): Promise<ChatResult>;\r\n\r\n /** Generate streaming chat completion */\r\n chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncIterable<StreamChunk>;\r\n\r\n /** Generate embeddings */\r\n embed(input: string | string[], options?: EmbeddingOptions): Promise<EmbeddingResult[]>;\r\n\r\n /** Simple text generation (convenience) */\r\n generate(prompt: string, options?: ChatOptions): Promise<string>;\r\n\r\n /** Create a conversation context */\r\n conversation(systemPrompt?: string): Conversation;\r\n}\r\n\r\n/** Conversation context */\r\nexport interface Conversation {\r\n /** Message history */\r\n readonly messages: ChatMessage[];\r\n\r\n /** Add a message */\r\n add(role: MessageRole, content: string): void;\r\n\r\n /** Send and get response */\r\n send(content: string, options?: ChatOptions): Promise<string>;\r\n\r\n /** Reset conversation */\r\n reset(): void;\r\n}\r\n\r\n// ============================================================================\r\n// Factory\r\n// ============================================================================\r\n\r\n/**\r\n * Create an AI service\r\n */\r\nexport function createAI(adapter: AIAdapter, options: AIServiceOptions = {}): AIService {\r\n const { defaultModel, defaultTemperature, defaultMaxTokens } = options;\r\n\r\n function mergeOptions(opts?: ChatOptions): ChatOptions {\r\n return {\r\n model: opts?.model ?? defaultModel,\r\n temperature: opts?.temperature ?? defaultTemperature,\r\n maxTokens: opts?.maxTokens ?? defaultMaxTokens,\r\n ...opts,\r\n };\r\n }\r\n\r\n const service: AIService = {\r\n adapter,\r\n\r\n listModels() {\r\n return adapter.listModels();\r\n },\r\n\r\n chat(messages, options) {\r\n return adapter.chat(messages, mergeOptions(options));\r\n },\r\n\r\n chatStream(messages, options) {\r\n return adapter.chatStream(messages, mergeOptions(options));\r\n },\r\n\r\n embed(input, options) {\r\n return adapter.embed(input, options);\r\n },\r\n\r\n async generate(prompt, options) {\r\n const result = await adapter.chat(\r\n [{ role: 'user', content: prompt }],\r\n mergeOptions(options)\r\n );\r\n return result.content;\r\n },\r\n\r\n conversation(systemPrompt) {\r\n const messages: ChatMessage[] = [];\r\n\r\n if (systemPrompt) {\r\n messages.push({ role: 'system', content: systemPrompt });\r\n }\r\n\r\n return {\r\n get messages() {\r\n return [...messages];\r\n },\r\n\r\n add(role: MessageRole, content: string) {\r\n messages.push({ role, content });\r\n },\r\n\r\n async send(content: string, options?: ChatOptions) {\r\n messages.push({ role: 'user', content });\r\n const result = await service.chat(messages, options);\r\n messages.push({ role: 'assistant', content: result.content });\r\n return result.content;\r\n },\r\n\r\n reset() {\r\n messages.length = systemPrompt ? 1 : 0;\r\n },\r\n };\r\n },\r\n };\r\n\r\n return service;\r\n}\r\n\r\n// ============================================================================\r\n// Utilities\r\n// ============================================================================\r\n\r\n/** Count approximate tokens (rough estimation) */\r\nexport function estimateTokens(text: string): number {\r\n // Rough estimation: ~4 chars per token for English\r\n return Math.ceil(text.length / 4);\r\n}\r\n\r\n/** Truncate text to fit token limit */\r\nexport function truncateToTokens(text: string, maxTokens: number): string {\r\n const estimatedChars = maxTokens * 4;\r\n if (text.length <= estimatedChars) return text;\r\n return text.slice(0, estimatedChars - 3) + '...';\r\n}\r\n"]}
|
package/package.json
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@flightdev/ai",
|
|
3
|
+
"version": "0.0.2",
|
|
4
|
+
"description": "Agnostic AI/LLM integration for Flight Framework. Choose your provider: OpenAI, Anthropic, Ollama, or custom.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"exports": {
|
|
7
|
+
".": {
|
|
8
|
+
"types": "./dist/index.d.ts",
|
|
9
|
+
"import": "./dist/index.js"
|
|
10
|
+
},
|
|
11
|
+
"./openai": {
|
|
12
|
+
"types": "./dist/adapters/openai.d.ts",
|
|
13
|
+
"import": "./dist/adapters/openai.js"
|
|
14
|
+
},
|
|
15
|
+
"./anthropic": {
|
|
16
|
+
"types": "./dist/adapters/anthropic.d.ts",
|
|
17
|
+
"import": "./dist/adapters/anthropic.js"
|
|
18
|
+
},
|
|
19
|
+
"./ollama": {
|
|
20
|
+
"types": "./dist/adapters/ollama.d.ts",
|
|
21
|
+
"import": "./dist/adapters/ollama.js"
|
|
22
|
+
}
|
|
23
|
+
},
|
|
24
|
+
"files": [
|
|
25
|
+
"dist"
|
|
26
|
+
],
|
|
27
|
+
"dependencies": {},
|
|
28
|
+
"peerDependencies": {
|
|
29
|
+
"openai": ">=4.0.0",
|
|
30
|
+
"ai": ">=3.0.0"
|
|
31
|
+
},
|
|
32
|
+
"peerDependenciesMeta": {
|
|
33
|
+
"openai": {
|
|
34
|
+
"optional": true
|
|
35
|
+
},
|
|
36
|
+
"ai": {
|
|
37
|
+
"optional": true
|
|
38
|
+
}
|
|
39
|
+
},
|
|
40
|
+
"devDependencies": {
|
|
41
|
+
"@types/node": "^22.0.0",
|
|
42
|
+
"tsup": "^8.0.0",
|
|
43
|
+
"typescript": "^5.7.0",
|
|
44
|
+
"vitest": "^2.0.0"
|
|
45
|
+
},
|
|
46
|
+
"keywords": [
|
|
47
|
+
"flight",
|
|
48
|
+
"ai",
|
|
49
|
+
"llm",
|
|
50
|
+
"openai",
|
|
51
|
+
"anthropic",
|
|
52
|
+
"ollama"
|
|
53
|
+
],
|
|
54
|
+
"license": "MIT",
|
|
55
|
+
"scripts": {
|
|
56
|
+
"build": "tsup",
|
|
57
|
+
"dev": "tsup --watch",
|
|
58
|
+
"test": "vitest run",
|
|
59
|
+
"typecheck": "tsc --noEmit"
|
|
60
|
+
}
|
|
61
|
+
}
|