@xenterprises/fastify-x-ai 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.gitlab-ci.yml +45 -0
- package/README.md +357 -0
- package/package.json +59 -0
- package/src/xAI.js +377 -0
- package/test/xAI.test.js +280 -0
package/.gitlab-ci.yml
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# ============================================================================
|
|
2
|
+
# GitLab CI/CD Pipeline - xAI
|
|
3
|
+
# ============================================================================
|
|
4
|
+
# Runs tests on merge requests and commits to main/master
|
|
5
|
+
|
|
6
|
+
stages:
|
|
7
|
+
- test
|
|
8
|
+
|
|
9
|
+
variables:
|
|
10
|
+
NODE_ENV: test
|
|
11
|
+
|
|
12
|
+
# ============================================================================
|
|
13
|
+
# Shared Configuration
|
|
14
|
+
# ============================================================================
|
|
15
|
+
.shared_rules: &shared_rules
|
|
16
|
+
rules:
|
|
17
|
+
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
|
18
|
+
- if: '$CI_COMMIT_BRANCH == "main"'
|
|
19
|
+
- if: '$CI_COMMIT_BRANCH == "master"'
|
|
20
|
+
- if: '$CI_COMMIT_TAG'
|
|
21
|
+
|
|
22
|
+
# ============================================================================
|
|
23
|
+
# STAGE: TEST
|
|
24
|
+
# ============================================================================
|
|
25
|
+
test:
|
|
26
|
+
stage: test
|
|
27
|
+
image: node:20-alpine
|
|
28
|
+
<<: *shared_rules
|
|
29
|
+
|
|
30
|
+
cache:
|
|
31
|
+
key: ${CI_COMMIT_REF_SLUG}
|
|
32
|
+
paths:
|
|
33
|
+
- node_modules/
|
|
34
|
+
|
|
35
|
+
before_script:
|
|
36
|
+
- npm ci
|
|
37
|
+
|
|
38
|
+
script:
|
|
39
|
+
- echo "Running xAI tests..."
|
|
40
|
+
- npm test
|
|
41
|
+
- npm audit --audit-level=high || true
|
|
42
|
+
|
|
43
|
+
retry:
|
|
44
|
+
max: 2
|
|
45
|
+
when: runner_system_failure
|
package/README.md
ADDED
|
@@ -0,0 +1,357 @@
|
|
|
1
|
+
# xAI
|
|
2
|
+
|
|
3
|
+
A Fastify plugin for the [Vercel AI SDK](https://ai-sdk.dev/) providing unified access to AI providers (OpenAI, Anthropic, Google) with text generation, streaming, embeddings, and structured output.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Unified Provider Access** - Single API for OpenAI, Anthropic, Google, and more
|
|
8
|
+
- **Text Generation** - Generate text with `generate()` and `complete()`
|
|
9
|
+
- **Streaming** - Real-time streaming with `stream()`
|
|
10
|
+
- **Chat** - Conversation handling with `chat()`
|
|
11
|
+
- **Embeddings** - Create embeddings for semantic search and RAG
|
|
12
|
+
- **Structured Output** - Generate typed objects with schemas
|
|
13
|
+
- **Tool Calling** - Function/tool execution support
|
|
14
|
+
|
|
15
|
+
## Installation
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
npm install xai ai
|
|
19
|
+
|
|
20
|
+
# Install provider SDKs as needed
|
|
21
|
+
npm install @ai-sdk/openai # For OpenAI/GPT models
|
|
22
|
+
npm install @ai-sdk/anthropic # For Anthropic/Claude models
|
|
23
|
+
npm install @ai-sdk/google # For Google/Gemini models
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
## Quick Start
|
|
27
|
+
|
|
28
|
+
```javascript
|
|
29
|
+
import Fastify from "fastify";
|
|
30
|
+
import xAI from "xai";
|
|
31
|
+
|
|
32
|
+
const fastify = Fastify();
|
|
33
|
+
|
|
34
|
+
await fastify.register(xAI, {
|
|
35
|
+
defaultProvider: "openai",
|
|
36
|
+
providers: {
|
|
37
|
+
openai: { apiKey: process.env.OPENAI_API_KEY },
|
|
38
|
+
},
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
// Simple text completion
|
|
42
|
+
const text = await fastify.xai.complete("Write a haiku about coding");
|
|
43
|
+
console.log(text);
|
|
44
|
+
|
|
45
|
+
// Chat with conversation history
|
|
46
|
+
const result = await fastify.xai.chat({
|
|
47
|
+
messages: [
|
|
48
|
+
{ role: "system", content: "You are a helpful assistant." },
|
|
49
|
+
{ role: "user", content: "What is the capital of France?" },
|
|
50
|
+
],
|
|
51
|
+
});
|
|
52
|
+
console.log(result.text);
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
## Configuration Options
|
|
56
|
+
|
|
57
|
+
| Option | Type | Default | Description |
|
|
58
|
+
|--------|------|---------|-------------|
|
|
59
|
+
| `active` | boolean | `true` | Enable/disable the plugin |
|
|
60
|
+
| `defaultProvider` | string | `"openai"` | Default AI provider |
|
|
61
|
+
| `defaultModel` | string | Provider default | Default model to use |
|
|
62
|
+
| `defaultMaxTokens` | number | `4096` | Default max tokens |
|
|
63
|
+
| `defaultTemperature` | number | `0.7` | Default temperature (0-1) |
|
|
64
|
+
| `providers` | object | `{}` | Provider configurations |
|
|
65
|
+
|
|
66
|
+
### Provider Configuration
|
|
67
|
+
|
|
68
|
+
```javascript
|
|
69
|
+
await fastify.register(xAI, {
|
|
70
|
+
providers: {
|
|
71
|
+
openai: {
|
|
72
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
73
|
+
baseURL: "https://custom-endpoint.com", // Optional
|
|
74
|
+
},
|
|
75
|
+
anthropic: {
|
|
76
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
77
|
+
},
|
|
78
|
+
google: {
|
|
79
|
+
apiKey: process.env.GOOGLE_API_KEY,
|
|
80
|
+
},
|
|
81
|
+
},
|
|
82
|
+
});
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
### Environment Variables
|
|
86
|
+
|
|
87
|
+
You can also configure providers via environment variables:
|
|
88
|
+
- `OPENAI_API_KEY`
|
|
89
|
+
- `ANTHROPIC_API_KEY`
|
|
90
|
+
- `GOOGLE_API_KEY`
|
|
91
|
+
|
|
92
|
+
## API Reference
|
|
93
|
+
|
|
94
|
+
### Decorators
|
|
95
|
+
|
|
96
|
+
| Method | Description |
|
|
97
|
+
|--------|-------------|
|
|
98
|
+
| `fastify.xai.generate(params)` | Generate text with full control |
|
|
99
|
+
| `fastify.xai.stream(params)` | Stream text generation |
|
|
100
|
+
| `fastify.xai.chat(params)` | Chat with conversation history |
|
|
101
|
+
| `fastify.xai.complete(prompt, options)` | Simple text completion |
|
|
102
|
+
| `fastify.xai.createEmbedding(params)` | Create embeddings |
|
|
103
|
+
| `fastify.xai.similarity(a, b)` | Calculate cosine similarity |
|
|
104
|
+
| `fastify.xai.generateStructured(params)` | Generate structured output |
|
|
105
|
+
| `fastify.xai.getModel(provider, model)` | Get model instance |
|
|
106
|
+
| `fastify.xai.raw` | Raw AI SDK functions |
|
|
107
|
+
|
|
108
|
+
### `generate(params)`
|
|
109
|
+
|
|
110
|
+
Generate text with full control over parameters.
|
|
111
|
+
|
|
112
|
+
```javascript
|
|
113
|
+
const result = await fastify.xai.generate({
|
|
114
|
+
prompt: "Explain quantum computing",
|
|
115
|
+
// Or use messages for chat-style
|
|
116
|
+
messages: [
|
|
117
|
+
{ role: "user", content: "Hello!" },
|
|
118
|
+
],
|
|
119
|
+
system: "You are a physics professor.",
|
|
120
|
+
provider: "openai", // Optional: override default
|
|
121
|
+
model: "gpt-4o", // Optional: specific model
|
|
122
|
+
maxTokens: 1000, // Optional: max tokens
|
|
123
|
+
temperature: 0.7, // Optional: creativity (0-1)
|
|
124
|
+
tools: { ... }, // Optional: tool definitions
|
|
125
|
+
maxSteps: 5, // Optional: max tool execution steps
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
console.log(result.text);
|
|
129
|
+
console.log(result.usage); // Token usage
|
|
130
|
+
console.log(result.toolCalls); // Tool calls made
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
### `stream(params)`
|
|
134
|
+
|
|
135
|
+
Stream text generation for real-time applications.
|
|
136
|
+
|
|
137
|
+
```javascript
|
|
138
|
+
const result = await fastify.xai.stream({
|
|
139
|
+
prompt: "Tell me a long story",
|
|
140
|
+
onChunk: ({ chunk }) => {
|
|
141
|
+
// Handle each chunk
|
|
142
|
+
console.log(chunk);
|
|
143
|
+
},
|
|
144
|
+
onFinish: ({ text, usage }) => {
|
|
145
|
+
console.log("Done:", text);
|
|
146
|
+
},
|
|
147
|
+
onError: ({ error }) => {
|
|
148
|
+
console.error(error);
|
|
149
|
+
},
|
|
150
|
+
});
|
|
151
|
+
|
|
152
|
+
// Iterate over the stream
|
|
153
|
+
for await (const text of result.textStream) {
|
|
154
|
+
process.stdout.write(text);
|
|
155
|
+
}
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
### `chat(params)`
|
|
159
|
+
|
|
160
|
+
Handle conversations with message history.
|
|
161
|
+
|
|
162
|
+
```javascript
|
|
163
|
+
// Non-streaming chat
|
|
164
|
+
const result = await fastify.xai.chat({
|
|
165
|
+
messages: [
|
|
166
|
+
{ role: "system", content: "You are a helpful assistant." },
|
|
167
|
+
{ role: "user", content: "What is 2+2?" },
|
|
168
|
+
{ role: "assistant", content: "2+2 equals 4." },
|
|
169
|
+
{ role: "user", content: "What about 3+3?" },
|
|
170
|
+
],
|
|
171
|
+
});
|
|
172
|
+
|
|
173
|
+
// Streaming chat
|
|
174
|
+
const streamResult = await fastify.xai.chat({
|
|
175
|
+
messages: [...],
|
|
176
|
+
stream: true,
|
|
177
|
+
});
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
### `complete(prompt, options)`
|
|
181
|
+
|
|
182
|
+
Simple text completion helper.
|
|
183
|
+
|
|
184
|
+
```javascript
|
|
185
|
+
const text = await fastify.xai.complete("Write a poem about the sea");
|
|
186
|
+
|
|
187
|
+
// With options
|
|
188
|
+
const text = await fastify.xai.complete("Summarize this article", {
|
|
189
|
+
provider: "anthropic",
|
|
190
|
+
model: "claude-sonnet-4-20250514",
|
|
191
|
+
maxTokens: 500,
|
|
192
|
+
});
|
|
193
|
+
```
|
|
194
|
+
|
|
195
|
+
### `createEmbedding(params)`
|
|
196
|
+
|
|
197
|
+
Create embeddings for semantic search and RAG.
|
|
198
|
+
|
|
199
|
+
```javascript
|
|
200
|
+
// Single embedding
|
|
201
|
+
const { embedding } = await fastify.xai.createEmbedding({
|
|
202
|
+
text: "Hello world",
|
|
203
|
+
});
|
|
204
|
+
|
|
205
|
+
// Multiple embeddings
|
|
206
|
+
const { embeddings } = await fastify.xai.createEmbedding({
|
|
207
|
+
texts: ["Hello", "World", "Foo", "Bar"],
|
|
208
|
+
});
|
|
209
|
+
|
|
210
|
+
// Calculate similarity
|
|
211
|
+
const similarity = fastify.xai.similarity(embedding1, embedding2);
|
|
212
|
+
console.log(`Similarity: ${similarity}`); // -1 to 1
|
|
213
|
+
```
|
|
214
|
+
|
|
215
|
+
### `generateStructured(params)`
|
|
216
|
+
|
|
217
|
+
Generate structured output with a Zod schema.
|
|
218
|
+
|
|
219
|
+
```javascript
|
|
220
|
+
import { z } from "zod";
|
|
221
|
+
|
|
222
|
+
const result = await fastify.xai.generateStructured({
|
|
223
|
+
prompt: "Generate a recipe for chocolate cake",
|
|
224
|
+
schema: z.object({
|
|
225
|
+
name: z.string(),
|
|
226
|
+
ingredients: z.array(z.object({
|
|
227
|
+
item: z.string(),
|
|
228
|
+
amount: z.string(),
|
|
229
|
+
})),
|
|
230
|
+
instructions: z.array(z.string()),
|
|
231
|
+
prepTime: z.number(),
|
|
232
|
+
cookTime: z.number(),
|
|
233
|
+
}),
|
|
234
|
+
schemaName: "Recipe",
|
|
235
|
+
schemaDescription: "A cooking recipe",
|
|
236
|
+
});
|
|
237
|
+
|
|
238
|
+
console.log(result.output); // Typed recipe object
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
### Tool Calling
|
|
242
|
+
|
|
243
|
+
Use tools/functions with AI models.
|
|
244
|
+
|
|
245
|
+
```javascript
|
|
246
|
+
const result = await fastify.xai.generate({
|
|
247
|
+
prompt: "What's the weather in San Francisco?",
|
|
248
|
+
tools: {
|
|
249
|
+
getWeather: {
|
|
250
|
+
description: "Get weather for a location",
|
|
251
|
+
parameters: z.object({
|
|
252
|
+
location: z.string(),
|
|
253
|
+
}),
|
|
254
|
+
execute: async ({ location }) => {
|
|
255
|
+
// Call weather API
|
|
256
|
+
return { temperature: 72, condition: "sunny" };
|
|
257
|
+
},
|
|
258
|
+
},
|
|
259
|
+
},
|
|
260
|
+
maxSteps: 3, // Allow up to 3 tool execution steps
|
|
261
|
+
});
|
|
262
|
+
|
|
263
|
+
console.log(result.text);
|
|
264
|
+
console.log(result.toolCalls);
|
|
265
|
+
console.log(result.toolResults);
|
|
266
|
+
```
|
|
267
|
+
|
|
268
|
+
### Raw SDK Access
|
|
269
|
+
|
|
270
|
+
Access the underlying Vercel AI SDK functions directly.
|
|
271
|
+
|
|
272
|
+
```javascript
|
|
273
|
+
const { generateText, streamText, embed } = fastify.xai.raw;
|
|
274
|
+
|
|
275
|
+
// Use raw functions for advanced scenarios
|
|
276
|
+
const result = await generateText({
|
|
277
|
+
model: fastify.xai.getModel("openai", "gpt-4o"),
|
|
278
|
+
prompt: "Hello",
|
|
279
|
+
});
|
|
280
|
+
```
|
|
281
|
+
|
|
282
|
+
## Default Models
|
|
283
|
+
|
|
284
|
+
| Provider | Default Model |
|
|
285
|
+
|----------|---------------|
|
|
286
|
+
| OpenAI | `gpt-4o` |
|
|
287
|
+
| Anthropic | `claude-sonnet-4-20250514` |
|
|
288
|
+
| Google | `gemini-2.0-flash` |
|
|
289
|
+
|
|
290
|
+
## Usage in Routes
|
|
291
|
+
|
|
292
|
+
```javascript
|
|
293
|
+
fastify.post("/chat", async (request, reply) => {
|
|
294
|
+
const { messages } = request.body;
|
|
295
|
+
|
|
296
|
+
const result = await fastify.xai.chat({
|
|
297
|
+
messages,
|
|
298
|
+
system: "You are a helpful assistant.",
|
|
299
|
+
});
|
|
300
|
+
|
|
301
|
+
return { response: result.text };
|
|
302
|
+
});
|
|
303
|
+
|
|
304
|
+
fastify.post("/stream", async (request, reply) => {
|
|
305
|
+
const { prompt } = request.body;
|
|
306
|
+
|
|
307
|
+
const result = await fastify.xai.stream({ prompt });
|
|
308
|
+
|
|
309
|
+
reply.header("Content-Type", "text/event-stream");
|
|
310
|
+
reply.header("Cache-Control", "no-cache");
|
|
311
|
+
|
|
312
|
+
for await (const chunk of result.textStream) {
|
|
313
|
+
reply.raw.write(`data: ${JSON.stringify({ text: chunk })}\n\n`);
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
reply.raw.end();
|
|
317
|
+
});
|
|
318
|
+
|
|
319
|
+
fastify.post("/search", async (request, reply) => {
|
|
320
|
+
const { query, documents } = request.body;
|
|
321
|
+
|
|
322
|
+
// Create embeddings
|
|
323
|
+
const { embedding: queryEmbed } = await fastify.xai.createEmbedding({
|
|
324
|
+
text: query,
|
|
325
|
+
});
|
|
326
|
+
|
|
327
|
+
const { embeddings: docEmbeds } = await fastify.xai.createEmbedding({
|
|
328
|
+
texts: documents,
|
|
329
|
+
});
|
|
330
|
+
|
|
331
|
+
// Find most similar
|
|
332
|
+
const similarities = docEmbeds.map((embed, i) => ({
|
|
333
|
+
document: documents[i],
|
|
334
|
+
score: fastify.xai.similarity(queryEmbed, embed),
|
|
335
|
+
}));
|
|
336
|
+
|
|
337
|
+
similarities.sort((a, b) => b.score - a.score);
|
|
338
|
+
|
|
339
|
+
return { results: similarities.slice(0, 5) };
|
|
340
|
+
});
|
|
341
|
+
```
|
|
342
|
+
|
|
343
|
+
## Testing
|
|
344
|
+
|
|
345
|
+
```bash
|
|
346
|
+
npm test
|
|
347
|
+
```
|
|
348
|
+
|
|
349
|
+
## Resources
|
|
350
|
+
|
|
351
|
+
- [Vercel AI SDK Documentation](https://ai-sdk.dev/docs/introduction)
|
|
352
|
+
- [AI SDK Core: Generating Text](https://ai-sdk.dev/docs/ai-sdk-core/generating-text)
|
|
353
|
+
- [AI SDK GitHub](https://github.com/vercel/ai)
|
|
354
|
+
|
|
355
|
+
## License
|
|
356
|
+
|
|
357
|
+
MIT
|
package/package.json
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@xenterprises/fastify-x-ai",
|
|
3
|
+
"version": "1.0.1",
|
|
4
|
+
"description": "Fastify plugin for Vercel AI SDK - unified AI provider access with text generation, streaming, embeddings, and structured output",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "src/xAI.js",
|
|
7
|
+
"exports": {
|
|
8
|
+
".": "./src/xAI.js"
|
|
9
|
+
},
|
|
10
|
+
"engines": {
|
|
11
|
+
"node": ">=20.0.0",
|
|
12
|
+
"npm": ">=10.0.0"
|
|
13
|
+
},
|
|
14
|
+
"scripts": {
|
|
15
|
+
"test": "node --test test/xAI.test.js",
|
|
16
|
+
"lint": "eslint src/ test/"
|
|
17
|
+
},
|
|
18
|
+
"keywords": [
|
|
19
|
+
"fastify",
|
|
20
|
+
"ai",
|
|
21
|
+
"vercel",
|
|
22
|
+
"openai",
|
|
23
|
+
"anthropic",
|
|
24
|
+
"google",
|
|
25
|
+
"llm",
|
|
26
|
+
"chatgpt",
|
|
27
|
+
"claude",
|
|
28
|
+
"gemini",
|
|
29
|
+
"embeddings",
|
|
30
|
+
"streaming",
|
|
31
|
+
"text-generation"
|
|
32
|
+
],
|
|
33
|
+
"author": "X Enterprises",
|
|
34
|
+
"license": "MIT",
|
|
35
|
+
"dependencies": {
|
|
36
|
+
"fastify-plugin": "^5.0.1"
|
|
37
|
+
},
|
|
38
|
+
"peerDependencies": {
|
|
39
|
+
"ai": "^4.0.0 || ^5.0.0 || ^6.0.0",
|
|
40
|
+
"fastify": "^5.0.0"
|
|
41
|
+
},
|
|
42
|
+
"peerDependenciesMeta": {
|
|
43
|
+
"@ai-sdk/openai": {
|
|
44
|
+
"optional": true
|
|
45
|
+
},
|
|
46
|
+
"@ai-sdk/anthropic": {
|
|
47
|
+
"optional": true
|
|
48
|
+
},
|
|
49
|
+
"@ai-sdk/google": {
|
|
50
|
+
"optional": true
|
|
51
|
+
}
|
|
52
|
+
},
|
|
53
|
+
"devDependencies": {
|
|
54
|
+
"@ai-sdk/openai": "^1.0.0",
|
|
55
|
+
"ai": "^4.0.0",
|
|
56
|
+
"eslint": "^9.0.0",
|
|
57
|
+
"fastify": "^5.0.0"
|
|
58
|
+
}
|
|
59
|
+
}
|
package/src/xAI.js
ADDED
|
@@ -0,0 +1,377 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* xAI - Vercel AI SDK Integration for Fastify
|
|
3
|
+
*
|
|
4
|
+
* A Fastify plugin that wraps the Vercel AI SDK providing:
|
|
5
|
+
* - Unified model provider access (OpenAI, Anthropic, Google, etc.)
|
|
6
|
+
* - Text generation (generateText, streamText)
|
|
7
|
+
* - Structured output generation
|
|
8
|
+
* - Embeddings for semantic search and RAG
|
|
9
|
+
* - Tool/function calling support
|
|
10
|
+
* - Streaming responses for real-time applications
|
|
11
|
+
*
|
|
12
|
+
* @see https://ai-sdk.dev/docs/introduction
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
import fp from "fastify-plugin";
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Default model configurations
|
|
19
|
+
*/
|
|
20
|
+
const DEFAULT_MODELS = {
|
|
21
|
+
openai: "gpt-4o",
|
|
22
|
+
anthropic: "claude-sonnet-4-20250514",
|
|
23
|
+
google: "gemini-2.0-flash",
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* xAI Plugin
|
|
28
|
+
*
|
|
29
|
+
* @param {import('fastify').FastifyInstance} fastify - Fastify instance
|
|
30
|
+
* @param {Object} options - Plugin options
|
|
31
|
+
* @param {boolean} [options.active] - Enable/disable the plugin (default: true)
|
|
32
|
+
* @param {string} [options.defaultProvider] - Default AI provider (openai, anthropic, google)
|
|
33
|
+
* @param {string} [options.defaultModel] - Default model to use
|
|
34
|
+
* @param {Object} [options.providers] - Provider configurations
|
|
35
|
+
* @param {Object} [options.providers.openai] - OpenAI configuration
|
|
36
|
+
* @param {string} [options.providers.openai.apiKey] - OpenAI API key
|
|
37
|
+
* @param {string} [options.providers.openai.baseURL] - Custom base URL
|
|
38
|
+
* @param {Object} [options.providers.anthropic] - Anthropic configuration
|
|
39
|
+
* @param {string} [options.providers.anthropic.apiKey] - Anthropic API key
|
|
40
|
+
* @param {Object} [options.providers.google] - Google configuration
|
|
41
|
+
* @param {string} [options.providers.google.apiKey] - Google API key
|
|
42
|
+
* @param {number} [options.defaultMaxTokens] - Default max tokens (default: 4096)
|
|
43
|
+
* @param {number} [options.defaultTemperature] - Default temperature (default: 0.7)
|
|
44
|
+
*/
|
|
45
|
+
async function xAI(fastify, options) {
|
|
46
|
+
// Check if plugin is disabled
|
|
47
|
+
if (options.active === false) {
|
|
48
|
+
console.info(" ⏸️ xAI Disabled");
|
|
49
|
+
return;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// Dynamically import AI SDK
|
|
53
|
+
let ai;
|
|
54
|
+
try {
|
|
55
|
+
ai = await import("ai");
|
|
56
|
+
} catch {
|
|
57
|
+
throw new Error("xAI: 'ai' package is required. Install with: npm install ai");
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
const { generateText, streamText, embed, embedMany, cosineSimilarity } = ai;
|
|
61
|
+
|
|
62
|
+
// Initialize providers
|
|
63
|
+
const providers = {};
|
|
64
|
+
const providerConfigs = options.providers || {};
|
|
65
|
+
|
|
66
|
+
// Setup OpenAI provider
|
|
67
|
+
if (providerConfigs.openai?.apiKey || process.env.OPENAI_API_KEY) {
|
|
68
|
+
try {
|
|
69
|
+
const { createOpenAI } = await import("@ai-sdk/openai");
|
|
70
|
+
providers.openai = createOpenAI({
|
|
71
|
+
apiKey: providerConfigs.openai?.apiKey || process.env.OPENAI_API_KEY,
|
|
72
|
+
baseURL: providerConfigs.openai?.baseURL,
|
|
73
|
+
});
|
|
74
|
+
} catch {
|
|
75
|
+
fastify.log.warn("OpenAI provider not available. Install @ai-sdk/openai");
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// Setup Anthropic provider
|
|
80
|
+
if (providerConfigs.anthropic?.apiKey || process.env.ANTHROPIC_API_KEY) {
|
|
81
|
+
try {
|
|
82
|
+
const { createAnthropic } = await import("@ai-sdk/anthropic");
|
|
83
|
+
providers.anthropic = createAnthropic({
|
|
84
|
+
apiKey: providerConfigs.anthropic?.apiKey || process.env.ANTHROPIC_API_KEY,
|
|
85
|
+
});
|
|
86
|
+
} catch {
|
|
87
|
+
fastify.log.warn("Anthropic provider not available. Install @ai-sdk/anthropic");
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// Setup Google provider
|
|
92
|
+
if (providerConfigs.google?.apiKey || process.env.GOOGLE_API_KEY) {
|
|
93
|
+
try {
|
|
94
|
+
const { createGoogleGenerativeAI } = await import("@ai-sdk/google");
|
|
95
|
+
providers.google = createGoogleGenerativeAI({
|
|
96
|
+
apiKey: providerConfigs.google?.apiKey || process.env.GOOGLE_API_KEY,
|
|
97
|
+
});
|
|
98
|
+
} catch {
|
|
99
|
+
fastify.log.warn("Google provider not available. Install @ai-sdk/google");
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
const config = {
|
|
104
|
+
defaultProvider: options.defaultProvider || "openai",
|
|
105
|
+
defaultModel: options.defaultModel,
|
|
106
|
+
defaultMaxTokens: options.defaultMaxTokens || 4096,
|
|
107
|
+
defaultTemperature: options.defaultTemperature ?? 0.7,
|
|
108
|
+
providers: Object.keys(providers),
|
|
109
|
+
};
|
|
110
|
+
|
|
111
|
+
/**
|
|
112
|
+
* Get model instance for a provider
|
|
113
|
+
* @param {string} [providerName] - Provider name (openai, anthropic, google)
|
|
114
|
+
* @param {string} [modelName] - Model name
|
|
115
|
+
* @returns {Object} Model instance
|
|
116
|
+
*/
|
|
117
|
+
function getModel(providerName, modelName) {
|
|
118
|
+
const provider = providerName || config.defaultProvider;
|
|
119
|
+
const model = modelName || config.defaultModel || DEFAULT_MODELS[provider];
|
|
120
|
+
|
|
121
|
+
if (!providers[provider]) {
|
|
122
|
+
throw new Error(`xAI: Provider '${provider}' not configured. Available: ${config.providers.join(", ")}`);
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
return providers[provider](model);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
/**
|
|
129
|
+
* Generate text using AI
|
|
130
|
+
* @param {Object} params - Generation parameters
|
|
131
|
+
* @param {string} params.prompt - The prompt to generate from
|
|
132
|
+
* @param {string} [params.system] - System message
|
|
133
|
+
* @param {Array} [params.messages] - Chat messages array
|
|
134
|
+
* @param {string} [params.provider] - Provider to use
|
|
135
|
+
* @param {string} [params.model] - Model to use
|
|
136
|
+
* @param {number} [params.maxTokens] - Maximum tokens
|
|
137
|
+
* @param {number} [params.temperature] - Temperature (0-1)
|
|
138
|
+
* @param {Object} [params.tools] - Tools/functions for the model to use
|
|
139
|
+
* @param {number} [params.maxSteps] - Max tool execution steps
|
|
140
|
+
* @param {Object} [params.output] - Structured output schema
|
|
141
|
+
* @returns {Promise<Object>} Generation result
|
|
142
|
+
*/
|
|
143
|
+
async function generate(params) {
|
|
144
|
+
const {
|
|
145
|
+
prompt,
|
|
146
|
+
system,
|
|
147
|
+
messages,
|
|
148
|
+
provider,
|
|
149
|
+
model: modelName,
|
|
150
|
+
maxTokens = config.defaultMaxTokens,
|
|
151
|
+
temperature = config.defaultTemperature,
|
|
152
|
+
tools,
|
|
153
|
+
maxSteps,
|
|
154
|
+
output,
|
|
155
|
+
...rest
|
|
156
|
+
} = params;
|
|
157
|
+
|
|
158
|
+
const model = getModel(provider, modelName);
|
|
159
|
+
|
|
160
|
+
const generateParams = {
|
|
161
|
+
model,
|
|
162
|
+
maxTokens,
|
|
163
|
+
temperature,
|
|
164
|
+
...rest,
|
|
165
|
+
};
|
|
166
|
+
|
|
167
|
+
if (prompt) generateParams.prompt = prompt;
|
|
168
|
+
if (system) generateParams.system = system;
|
|
169
|
+
if (messages) generateParams.messages = messages;
|
|
170
|
+
if (tools) generateParams.tools = tools;
|
|
171
|
+
if (maxSteps) generateParams.maxSteps = maxSteps;
|
|
172
|
+
if (output) generateParams.output = output;
|
|
173
|
+
|
|
174
|
+
const result = await generateText(generateParams);
|
|
175
|
+
|
|
176
|
+
return {
|
|
177
|
+
text: result.text,
|
|
178
|
+
content: result.content,
|
|
179
|
+
toolCalls: result.toolCalls,
|
|
180
|
+
toolResults: result.toolResults,
|
|
181
|
+
finishReason: result.finishReason,
|
|
182
|
+
usage: result.usage,
|
|
183
|
+
totalUsage: result.totalUsage,
|
|
184
|
+
steps: result.steps,
|
|
185
|
+
response: result.response,
|
|
186
|
+
warnings: result.warnings,
|
|
187
|
+
};
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* Stream text using AI
|
|
192
|
+
* @param {Object} params - Generation parameters (same as generate)
|
|
193
|
+
* @returns {Promise<Object>} Stream result with textStream and helpers
|
|
194
|
+
*/
|
|
195
|
+
async function stream(params) {
|
|
196
|
+
const {
|
|
197
|
+
prompt,
|
|
198
|
+
system,
|
|
199
|
+
messages,
|
|
200
|
+
provider,
|
|
201
|
+
model: modelName,
|
|
202
|
+
maxTokens = config.defaultMaxTokens,
|
|
203
|
+
temperature = config.defaultTemperature,
|
|
204
|
+
tools,
|
|
205
|
+
maxSteps,
|
|
206
|
+
onChunk,
|
|
207
|
+
onFinish,
|
|
208
|
+
onError,
|
|
209
|
+
...rest
|
|
210
|
+
} = params;
|
|
211
|
+
|
|
212
|
+
const model = getModel(provider, modelName);
|
|
213
|
+
|
|
214
|
+
const streamParams = {
|
|
215
|
+
model,
|
|
216
|
+
maxTokens,
|
|
217
|
+
temperature,
|
|
218
|
+
...rest,
|
|
219
|
+
};
|
|
220
|
+
|
|
221
|
+
if (prompt) streamParams.prompt = prompt;
|
|
222
|
+
if (system) streamParams.system = system;
|
|
223
|
+
if (messages) streamParams.messages = messages;
|
|
224
|
+
if (tools) streamParams.tools = tools;
|
|
225
|
+
if (maxSteps) streamParams.maxSteps = maxSteps;
|
|
226
|
+
if (onChunk) streamParams.onChunk = onChunk;
|
|
227
|
+
if (onFinish) streamParams.onFinish = onFinish;
|
|
228
|
+
if (onError) streamParams.onError = onError;
|
|
229
|
+
|
|
230
|
+
return streamText(streamParams);
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
/**
|
|
234
|
+
* Generate embeddings for text
|
|
235
|
+
* @param {Object} params - Embedding parameters
|
|
236
|
+
* @param {string} params.text - Single text to embed
|
|
237
|
+
* @param {string[]} [params.texts] - Multiple texts to embed
|
|
238
|
+
* @param {string} [params.provider] - Provider to use
|
|
239
|
+
* @param {string} [params.model] - Embedding model
|
|
240
|
+
* @returns {Promise<Object>} Embedding result
|
|
241
|
+
*/
|
|
242
|
+
async function createEmbedding(params) {
|
|
243
|
+
const { text, texts, provider, model: modelName } = params;
|
|
244
|
+
|
|
245
|
+
// Default embedding models
|
|
246
|
+
const embeddingModels = {
|
|
247
|
+
openai: "text-embedding-3-small",
|
|
248
|
+
google: "text-embedding-004",
|
|
249
|
+
};
|
|
250
|
+
|
|
251
|
+
const providerName = provider || config.defaultProvider;
|
|
252
|
+
const model = getModel(providerName, modelName || embeddingModels[providerName]);
|
|
253
|
+
|
|
254
|
+
if (texts && texts.length > 0) {
|
|
255
|
+
const result = await embedMany({
|
|
256
|
+
model,
|
|
257
|
+
values: texts,
|
|
258
|
+
});
|
|
259
|
+
return {
|
|
260
|
+
embeddings: result.embeddings,
|
|
261
|
+
usage: result.usage,
|
|
262
|
+
};
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
const result = await embed({
|
|
266
|
+
model,
|
|
267
|
+
value: text,
|
|
268
|
+
});
|
|
269
|
+
|
|
270
|
+
return {
|
|
271
|
+
embedding: result.embedding,
|
|
272
|
+
usage: result.usage,
|
|
273
|
+
};
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
/**
|
|
277
|
+
* Calculate cosine similarity between two embeddings
|
|
278
|
+
* @param {number[]} a - First embedding
|
|
279
|
+
* @param {number[]} b - Second embedding
|
|
280
|
+
* @returns {number} Similarity score (-1 to 1)
|
|
281
|
+
*/
|
|
282
|
+
function similarity(a, b) {
|
|
283
|
+
return cosineSimilarity(a, b);
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
/**
|
|
287
|
+
* Chat completion with conversation history
|
|
288
|
+
* @param {Object} params - Chat parameters
|
|
289
|
+
* @param {Array<{role: string, content: string}>} params.messages - Conversation messages
|
|
290
|
+
* @param {string} [params.system] - System message
|
|
291
|
+
* @param {string} [params.provider] - Provider to use
|
|
292
|
+
* @param {string} [params.model] - Model to use
|
|
293
|
+
* @param {boolean} [params.stream] - Whether to stream the response
|
|
294
|
+
* @returns {Promise<Object>} Chat result
|
|
295
|
+
*/
|
|
296
|
+
async function chat(params) {
|
|
297
|
+
const { messages, system, stream: shouldStream = false, ...rest } = params;
|
|
298
|
+
|
|
299
|
+
if (shouldStream) {
|
|
300
|
+
return stream({ messages, system, ...rest });
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
return generate({ messages, system, ...rest });
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
/**
|
|
307
|
+
* Simple text completion
|
|
308
|
+
* @param {string} prompt - The prompt
|
|
309
|
+
* @param {Object} [options] - Additional options
|
|
310
|
+
* @returns {Promise<string>} Generated text
|
|
311
|
+
*/
|
|
312
|
+
async function complete(prompt, options = {}) {
|
|
313
|
+
const result = await generate({ prompt, ...options });
|
|
314
|
+
return result.text;
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
/**
|
|
318
|
+
* Generate structured output using a schema
|
|
319
|
+
* @param {Object} params - Generation parameters
|
|
320
|
+
* @param {string} params.prompt - The prompt
|
|
321
|
+
* @param {Object} params.schema - Zod schema for output
|
|
322
|
+
* @param {string} [params.schemaName] - Name for the schema
|
|
323
|
+
* @param {string} [params.schemaDescription] - Description for the schema
|
|
324
|
+
* @returns {Promise<Object>} Structured output
|
|
325
|
+
*/
|
|
326
|
+
async function generateStructured(params) {
|
|
327
|
+
const { prompt, schema, schemaName, schemaDescription, ...rest } = params;
|
|
328
|
+
|
|
329
|
+
// Import Output helper from ai package
|
|
330
|
+
const { Output } = await import("ai");
|
|
331
|
+
|
|
332
|
+
const result = await generate({
|
|
333
|
+
prompt,
|
|
334
|
+
output: Output.object({
|
|
335
|
+
schema,
|
|
336
|
+
name: schemaName,
|
|
337
|
+
description: schemaDescription,
|
|
338
|
+
}),
|
|
339
|
+
...rest,
|
|
340
|
+
});
|
|
341
|
+
|
|
342
|
+
return result;
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
// Store configuration and methods
|
|
346
|
+
fastify.decorate("xai", {
|
|
347
|
+
config,
|
|
348
|
+
providers,
|
|
349
|
+
getModel,
|
|
350
|
+
generate,
|
|
351
|
+
stream,
|
|
352
|
+
chat,
|
|
353
|
+
complete,
|
|
354
|
+
createEmbedding,
|
|
355
|
+
similarity,
|
|
356
|
+
generateStructured,
|
|
357
|
+
// Re-export raw AI SDK functions for advanced use
|
|
358
|
+
raw: {
|
|
359
|
+
generateText,
|
|
360
|
+
streamText,
|
|
361
|
+
embed,
|
|
362
|
+
embedMany,
|
|
363
|
+
cosineSimilarity,
|
|
364
|
+
},
|
|
365
|
+
});
|
|
366
|
+
|
|
367
|
+
const enabledProviders = config.providers.length > 0 ? config.providers.join(", ") : "none";
|
|
368
|
+
console.info(" ✅ xAI Vercel AI SDK Enabled");
|
|
369
|
+
console.info(` • Default Provider: ${config.defaultProvider}`);
|
|
370
|
+
console.info(` • Enabled Providers: ${enabledProviders}`);
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
export default fp(xAI, {
|
|
374
|
+
name: "xAI",
|
|
375
|
+
});
|
|
376
|
+
|
|
377
|
+
export { DEFAULT_MODELS };
|
package/test/xAI.test.js
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* xAI Tests
|
|
3
|
+
*
|
|
4
|
+
* Tests for the xAI Fastify plugin (Vercel AI SDK integration)
|
|
5
|
+
* Note: These are unit tests that don't require actual API calls
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { describe, test, beforeEach, afterEach } from "node:test";
|
|
9
|
+
import assert from "node:assert";
|
|
10
|
+
import Fastify from "fastify";
|
|
11
|
+
import xAI, { DEFAULT_MODELS } from "../src/xAI.js";
|
|
12
|
+
|
|
13
|
+
describe("xAI Plugin", () => {
|
|
14
|
+
let fastify;
|
|
15
|
+
|
|
16
|
+
beforeEach(() => {
|
|
17
|
+
fastify = Fastify({ logger: { level: "silent" } });
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
afterEach(async () => {
|
|
21
|
+
await fastify.close();
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
describe("Plugin Registration", () => {
|
|
25
|
+
test("should skip registration when active is false", async () => {
|
|
26
|
+
await fastify.register(xAI, { active: false });
|
|
27
|
+
await fastify.ready();
|
|
28
|
+
|
|
29
|
+
assert.ok(!fastify.xai, "xai should not exist");
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
test("should register with OpenAI provider from env", async () => {
|
|
33
|
+
// Set env var temporarily
|
|
34
|
+
const originalKey = process.env.OPENAI_API_KEY;
|
|
35
|
+
process.env.OPENAI_API_KEY = "test-key-12345";
|
|
36
|
+
|
|
37
|
+
try {
|
|
38
|
+
await fastify.register(xAI, {});
|
|
39
|
+
await fastify.ready();
|
|
40
|
+
|
|
41
|
+
assert.ok(fastify.xai, "xai should exist");
|
|
42
|
+
assert.ok(fastify.xai.config, "xai.config should exist");
|
|
43
|
+
assert.ok(fastify.xai.generate, "xai.generate should exist");
|
|
44
|
+
assert.ok(fastify.xai.stream, "xai.stream should exist");
|
|
45
|
+
assert.ok(fastify.xai.chat, "xai.chat should exist");
|
|
46
|
+
assert.ok(fastify.xai.complete, "xai.complete should exist");
|
|
47
|
+
assert.ok(fastify.xai.createEmbedding, "xai.createEmbedding should exist");
|
|
48
|
+
assert.ok(fastify.xai.similarity, "xai.similarity should exist");
|
|
49
|
+
assert.ok(fastify.xai.generateStructured, "xai.generateStructured should exist");
|
|
50
|
+
assert.ok(fastify.xai.getModel, "xai.getModel should exist");
|
|
51
|
+
assert.ok(fastify.xai.raw, "xai.raw should exist");
|
|
52
|
+
} finally {
|
|
53
|
+
if (originalKey) {
|
|
54
|
+
process.env.OPENAI_API_KEY = originalKey;
|
|
55
|
+
} else {
|
|
56
|
+
delete process.env.OPENAI_API_KEY;
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
test("should register with explicit provider config", async () => {
|
|
62
|
+
await fastify.register(xAI, {
|
|
63
|
+
providers: {
|
|
64
|
+
openai: { apiKey: "test-api-key" },
|
|
65
|
+
},
|
|
66
|
+
});
|
|
67
|
+
await fastify.ready();
|
|
68
|
+
|
|
69
|
+
assert.ok(fastify.xai, "xai should exist");
|
|
70
|
+
assert.ok(fastify.xai.config.providers.includes("openai"), "openai should be in providers");
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
test("should store default configuration", async () => {
|
|
74
|
+
await fastify.register(xAI, {
|
|
75
|
+
defaultProvider: "openai",
|
|
76
|
+
defaultMaxTokens: 2048,
|
|
77
|
+
defaultTemperature: 0.5,
|
|
78
|
+
providers: {
|
|
79
|
+
openai: { apiKey: "test-key" },
|
|
80
|
+
},
|
|
81
|
+
});
|
|
82
|
+
await fastify.ready();
|
|
83
|
+
|
|
84
|
+
assert.strictEqual(fastify.xai.config.defaultProvider, "openai");
|
|
85
|
+
assert.strictEqual(fastify.xai.config.defaultMaxTokens, 2048);
|
|
86
|
+
assert.strictEqual(fastify.xai.config.defaultTemperature, 0.5);
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
test("should use default values when not specified", async () => {
|
|
90
|
+
await fastify.register(xAI, {
|
|
91
|
+
providers: {
|
|
92
|
+
openai: { apiKey: "test-key" },
|
|
93
|
+
},
|
|
94
|
+
});
|
|
95
|
+
await fastify.ready();
|
|
96
|
+
|
|
97
|
+
assert.strictEqual(fastify.xai.config.defaultProvider, "openai");
|
|
98
|
+
assert.strictEqual(fastify.xai.config.defaultMaxTokens, 4096);
|
|
99
|
+
assert.strictEqual(fastify.xai.config.defaultTemperature, 0.7);
|
|
100
|
+
});
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
describe("Model Selection", () => {
|
|
104
|
+
test("should get model for configured provider", async () => {
|
|
105
|
+
await fastify.register(xAI, {
|
|
106
|
+
providers: {
|
|
107
|
+
openai: { apiKey: "test-key" },
|
|
108
|
+
},
|
|
109
|
+
});
|
|
110
|
+
await fastify.ready();
|
|
111
|
+
|
|
112
|
+
const model = fastify.xai.getModel("openai", "gpt-4o");
|
|
113
|
+
assert.ok(model, "model should exist");
|
|
114
|
+
});
|
|
115
|
+
|
|
116
|
+
test("should throw for unconfigured provider", async () => {
|
|
117
|
+
await fastify.register(xAI, {
|
|
118
|
+
providers: {
|
|
119
|
+
openai: { apiKey: "test-key" },
|
|
120
|
+
},
|
|
121
|
+
});
|
|
122
|
+
await fastify.ready();
|
|
123
|
+
|
|
124
|
+
assert.throws(() => {
|
|
125
|
+
fastify.xai.getModel("anthropic", "claude-3");
|
|
126
|
+
}, /Provider 'anthropic' not configured/);
|
|
127
|
+
});
|
|
128
|
+
|
|
129
|
+
test("should use default provider when not specified", async () => {
|
|
130
|
+
await fastify.register(xAI, {
|
|
131
|
+
defaultProvider: "openai",
|
|
132
|
+
providers: {
|
|
133
|
+
openai: { apiKey: "test-key" },
|
|
134
|
+
},
|
|
135
|
+
});
|
|
136
|
+
await fastify.ready();
|
|
137
|
+
|
|
138
|
+
// Should not throw when provider not specified
|
|
139
|
+
const model = fastify.xai.getModel();
|
|
140
|
+
assert.ok(model, "model should exist");
|
|
141
|
+
});
|
|
142
|
+
});
|
|
143
|
+
|
|
144
|
+
describe("Raw SDK Access", () => {
|
|
145
|
+
test("should expose raw AI SDK functions", async () => {
|
|
146
|
+
await fastify.register(xAI, {
|
|
147
|
+
providers: {
|
|
148
|
+
openai: { apiKey: "test-key" },
|
|
149
|
+
},
|
|
150
|
+
});
|
|
151
|
+
await fastify.ready();
|
|
152
|
+
|
|
153
|
+
assert.ok(fastify.xai.raw, "raw should exist");
|
|
154
|
+
assert.ok(fastify.xai.raw.generateText, "generateText should exist");
|
|
155
|
+
assert.ok(fastify.xai.raw.streamText, "streamText should exist");
|
|
156
|
+
assert.ok(fastify.xai.raw.embed, "embed should exist");
|
|
157
|
+
assert.ok(fastify.xai.raw.embedMany, "embedMany should exist");
|
|
158
|
+
assert.ok(fastify.xai.raw.cosineSimilarity, "cosineSimilarity should exist");
|
|
159
|
+
});
|
|
160
|
+
});
|
|
161
|
+
|
|
162
|
+
describe("Similarity Function", () => {
|
|
163
|
+
test("should calculate cosine similarity", async () => {
|
|
164
|
+
await fastify.register(xAI, {
|
|
165
|
+
providers: {
|
|
166
|
+
openai: { apiKey: "test-key" },
|
|
167
|
+
},
|
|
168
|
+
});
|
|
169
|
+
await fastify.ready();
|
|
170
|
+
|
|
171
|
+
// Test with identical vectors - should be 1
|
|
172
|
+
const sim1 = fastify.xai.similarity([1, 0, 0], [1, 0, 0]);
|
|
173
|
+
assert.ok(Math.abs(sim1 - 1) < 0.001, "identical vectors should have similarity ~1");
|
|
174
|
+
|
|
175
|
+
// Test with orthogonal vectors - should be 0
|
|
176
|
+
const sim2 = fastify.xai.similarity([1, 0], [0, 1]);
|
|
177
|
+
assert.ok(Math.abs(sim2) < 0.001, "orthogonal vectors should have similarity ~0");
|
|
178
|
+
|
|
179
|
+
// Test with opposite vectors - should be -1
|
|
180
|
+
const sim3 = fastify.xai.similarity([1, 0], [-1, 0]);
|
|
181
|
+
assert.ok(Math.abs(sim3 + 1) < 0.001, "opposite vectors should have similarity ~-1");
|
|
182
|
+
});
|
|
183
|
+
});
|
|
184
|
+
|
|
185
|
+
describe("Provider Configuration", () => {
|
|
186
|
+
test("should list enabled providers", async () => {
|
|
187
|
+
await fastify.register(xAI, {
|
|
188
|
+
providers: {
|
|
189
|
+
openai: { apiKey: "test-key" },
|
|
190
|
+
},
|
|
191
|
+
});
|
|
192
|
+
await fastify.ready();
|
|
193
|
+
|
|
194
|
+
assert.ok(Array.isArray(fastify.xai.config.providers));
|
|
195
|
+
assert.ok(fastify.xai.config.providers.includes("openai"));
|
|
196
|
+
});
|
|
197
|
+
|
|
198
|
+
test("should have empty providers when none configured", async () => {
|
|
199
|
+
// Clear any env vars
|
|
200
|
+
const originalOpenAI = process.env.OPENAI_API_KEY;
|
|
201
|
+
const originalAnthropic = process.env.ANTHROPIC_API_KEY;
|
|
202
|
+
const originalGoogle = process.env.GOOGLE_API_KEY;
|
|
203
|
+
|
|
204
|
+
delete process.env.OPENAI_API_KEY;
|
|
205
|
+
delete process.env.ANTHROPIC_API_KEY;
|
|
206
|
+
delete process.env.GOOGLE_API_KEY;
|
|
207
|
+
|
|
208
|
+
try {
|
|
209
|
+
await fastify.register(xAI, {});
|
|
210
|
+
await fastify.ready();
|
|
211
|
+
|
|
212
|
+
assert.ok(Array.isArray(fastify.xai.config.providers));
|
|
213
|
+
assert.strictEqual(fastify.xai.config.providers.length, 0);
|
|
214
|
+
} finally {
|
|
215
|
+
if (originalOpenAI) process.env.OPENAI_API_KEY = originalOpenAI;
|
|
216
|
+
if (originalAnthropic) process.env.ANTHROPIC_API_KEY = originalAnthropic;
|
|
217
|
+
if (originalGoogle) process.env.GOOGLE_API_KEY = originalGoogle;
|
|
218
|
+
}
|
|
219
|
+
});
|
|
220
|
+
});
|
|
221
|
+
});
|
|
222
|
+
|
|
223
|
+
describe("Exports", () => {
|
|
224
|
+
test("should export DEFAULT_MODELS", () => {
|
|
225
|
+
assert.ok(DEFAULT_MODELS, "DEFAULT_MODELS should exist");
|
|
226
|
+
assert.strictEqual(DEFAULT_MODELS.openai, "gpt-4o");
|
|
227
|
+
assert.strictEqual(DEFAULT_MODELS.anthropic, "claude-sonnet-4-20250514");
|
|
228
|
+
assert.strictEqual(DEFAULT_MODELS.google, "gemini-2.0-flash");
|
|
229
|
+
});
|
|
230
|
+
});
|
|
231
|
+
|
|
232
|
+
describe("Method Signatures", () => {
|
|
233
|
+
let fastify;
|
|
234
|
+
|
|
235
|
+
beforeEach(async () => {
|
|
236
|
+
fastify = Fastify({ logger: { level: "silent" } });
|
|
237
|
+
await fastify.register(xAI, {
|
|
238
|
+
providers: {
|
|
239
|
+
openai: { apiKey: "test-key" },
|
|
240
|
+
},
|
|
241
|
+
});
|
|
242
|
+
await fastify.ready();
|
|
243
|
+
});
|
|
244
|
+
|
|
245
|
+
afterEach(async () => {
|
|
246
|
+
await fastify.close();
|
|
247
|
+
});
|
|
248
|
+
|
|
249
|
+
test("generate should be a function", () => {
|
|
250
|
+
assert.strictEqual(typeof fastify.xai.generate, "function");
|
|
251
|
+
});
|
|
252
|
+
|
|
253
|
+
test("stream should be a function", () => {
|
|
254
|
+
assert.strictEqual(typeof fastify.xai.stream, "function");
|
|
255
|
+
});
|
|
256
|
+
|
|
257
|
+
test("chat should be a function", () => {
|
|
258
|
+
assert.strictEqual(typeof fastify.xai.chat, "function");
|
|
259
|
+
});
|
|
260
|
+
|
|
261
|
+
test("complete should be a function", () => {
|
|
262
|
+
assert.strictEqual(typeof fastify.xai.complete, "function");
|
|
263
|
+
});
|
|
264
|
+
|
|
265
|
+
test("createEmbedding should be a function", () => {
|
|
266
|
+
assert.strictEqual(typeof fastify.xai.createEmbedding, "function");
|
|
267
|
+
});
|
|
268
|
+
|
|
269
|
+
test("similarity should be a function", () => {
|
|
270
|
+
assert.strictEqual(typeof fastify.xai.similarity, "function");
|
|
271
|
+
});
|
|
272
|
+
|
|
273
|
+
test("generateStructured should be a function", () => {
|
|
274
|
+
assert.strictEqual(typeof fastify.xai.generateStructured, "function");
|
|
275
|
+
});
|
|
276
|
+
|
|
277
|
+
test("getModel should be a function", () => {
|
|
278
|
+
assert.strictEqual(typeof fastify.xai.getModel, "function");
|
|
279
|
+
});
|
|
280
|
+
});
|