cencori 0.3.2 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +88 -124
- package/dist/ai/index.d.mts +67 -0
- package/dist/ai/index.d.ts +67 -0
- package/dist/ai/index.js +188 -0
- package/dist/ai/index.js.map +1 -0
- package/dist/ai/index.mjs +163 -0
- package/dist/ai/index.mjs.map +1 -0
- package/dist/compute/index.d.mts +38 -0
- package/dist/compute/index.d.ts +38 -0
- package/dist/compute/index.js +62 -0
- package/dist/compute/index.js.map +1 -0
- package/dist/compute/index.mjs +37 -0
- package/dist/compute/index.mjs.map +1 -0
- package/dist/index.d.mts +119 -53
- package/dist/index.d.ts +119 -53
- package/dist/index.js +706 -46
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +695 -40
- package/dist/index.mjs.map +1 -1
- package/dist/storage/index.d.mts +82 -0
- package/dist/storage/index.d.ts +82 -0
- package/dist/storage/index.js +122 -0
- package/dist/storage/index.js.map +1 -0
- package/dist/storage/index.mjs +97 -0
- package/dist/storage/index.mjs.map +1 -0
- package/dist/tanstack/index.d.mts +95 -0
- package/dist/tanstack/index.d.ts +95 -0
- package/dist/tanstack/index.js +290 -0
- package/dist/tanstack/index.js.map +1 -0
- package/dist/tanstack/index.mjs +262 -0
- package/dist/tanstack/index.mjs.map +1 -0
- package/dist/types-DIuz6XWN.d.mts +78 -0
- package/dist/types-DIuz6XWN.d.ts +78 -0
- package/dist/vercel/index.d.mts +126 -0
- package/dist/vercel/index.d.ts +126 -0
- package/dist/vercel/index.js +373 -0
- package/dist/vercel/index.js.map +1 -0
- package/dist/vercel/index.mjs +344 -0
- package/dist/vercel/index.mjs.map +1 -0
- package/dist/workflow/index.d.mts +44 -0
- package/dist/workflow/index.d.ts +44 -0
- package/dist/workflow/index.js +72 -0
- package/dist/workflow/index.js.map +1 -0
- package/dist/workflow/index.mjs +47 -0
- package/dist/workflow/index.mjs.map +1 -0
- package/package.json +98 -41
package/README.md
CHANGED
|
@@ -1,15 +1,11 @@
|
|
|
1
1
|
# Cencori
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
**The unified infrastructure layer for AI applications.**
|
|
4
4
|
|
|
5
|
-
|
|
5
|
+
One SDK. Every AI primitive. Always secure. Always logged.
|
|
6
6
|
|
|
7
7
|
```bash
|
|
8
8
|
npm install cencori
|
|
9
|
-
# or
|
|
10
|
-
yarn add cencori
|
|
11
|
-
# or
|
|
12
|
-
pnpm add cencori
|
|
13
9
|
```
|
|
14
10
|
|
|
15
11
|
## Quick Start
|
|
@@ -17,176 +13,144 @@ pnpm add cencori
|
|
|
17
13
|
```typescript
|
|
18
14
|
import { Cencori } from 'cencori';
|
|
19
15
|
|
|
20
|
-
const cencori = new Cencori({
|
|
21
|
-
apiKey: process.env.CENCORI_API_KEY
|
|
16
|
+
const cencori = new Cencori({
|
|
17
|
+
apiKey: process.env.CENCORI_API_KEY
|
|
22
18
|
});
|
|
23
19
|
|
|
20
|
+
// AI Gateway - Chat with any model
|
|
24
21
|
const response = await cencori.ai.chat({
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
]
|
|
22
|
+
model: 'gpt-4o',
|
|
23
|
+
messages: [{ role: 'user', content: 'Hello!' }]
|
|
28
24
|
});
|
|
29
25
|
|
|
30
26
|
console.log(response.content);
|
|
31
27
|
```
|
|
32
28
|
|
|
33
|
-
##
|
|
34
|
-
|
|
35
|
-
Get your API key from the [Cencori Dashboard](https://cencori.com/dashboard):
|
|
36
|
-
|
|
37
|
-
1. Create a project
|
|
38
|
-
2. Navigate to Settings → API tab
|
|
39
|
-
3. Generate a new key:
|
|
40
|
-
- **Secret key (`csk_`)** - For server-side use only
|
|
41
|
-
- **Publishable key (`cpk_`)** - Safe for browser use (requires domain whitelisting)
|
|
42
|
-
4. Copy and store it securely
|
|
43
|
-
|
|
44
|
-
## API Reference
|
|
45
|
-
|
|
46
|
-
### Cencori
|
|
47
|
-
|
|
48
|
-
Initialize the SDK client.
|
|
49
|
-
|
|
50
|
-
```typescript
|
|
51
|
-
import { Cencori } from 'cencori';
|
|
52
|
-
|
|
53
|
-
const cencori = new Cencori({
|
|
54
|
-
apiKey: 'csk_xxx', // Secret key for server-side
|
|
55
|
-
baseUrl: 'https://cencori.com' // Optional, defaults to production
|
|
56
|
-
});
|
|
57
|
-
```
|
|
58
|
-
|
|
59
|
-
### AI Module
|
|
29
|
+
## Products
|
|
60
30
|
|
|
61
|
-
|
|
31
|
+
| Product | Status | Description |
|
|
32
|
+
|---------|--------|-------------|
|
|
33
|
+
| **AI Gateway** | ✅ Available | Multi-provider routing, security, observability |
|
|
34
|
+
| **Compute** | 🚧 Coming Soon | Serverless functions, GPU access |
|
|
35
|
+
| **Workflow** | 🚧 Coming Soon | Visual AI pipelines, orchestration |
|
|
36
|
+
| **Storage** | 🚧 Coming Soon | Vector database, knowledge base, RAG |
|
|
37
|
+
| **Integration** | ✅ Available | SDKs, Vercel AI, TanStack |
|
|
62
38
|
|
|
63
|
-
|
|
39
|
+
## AI Gateway
|
|
64
40
|
|
|
65
|
-
|
|
66
|
-
- `messages`: Array of message objects with `role` ('system' | 'user' | 'assistant') and `content`
|
|
67
|
-
- `model`: Optional AI model (defaults to 'gemini-2.5-flash')
|
|
68
|
-
- `temperature`: Optional temperature (0-1)
|
|
69
|
-
- `maxTokens`: Optional max tokens for response
|
|
70
|
-
- `userId`: Optional user ID for rate limiting
|
|
71
|
-
|
|
72
|
-
**Example:**
|
|
41
|
+
### Chat Completions
|
|
73
42
|
|
|
74
43
|
```typescript
|
|
75
44
|
const response = await cencori.ai.chat({
|
|
45
|
+
model: 'gpt-4o', // or 'claude-3-opus', 'gemini-1.5-pro', etc.
|
|
76
46
|
messages: [
|
|
77
|
-
{ role: '
|
|
47
|
+
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
48
|
+
{ role: 'user', content: 'What is the capital of France?' }
|
|
78
49
|
],
|
|
79
|
-
|
|
80
|
-
|
|
50
|
+
temperature: 0.7,
|
|
51
|
+
maxTokens: 1000
|
|
81
52
|
});
|
|
82
53
|
|
|
83
54
|
console.log(response.content);
|
|
84
|
-
console.log(response.usage); //
|
|
85
|
-
console.log(response.cost_usd); // Cost in USD
|
|
55
|
+
console.log(response.usage); // { promptTokens, completionTokens, totalTokens }
|
|
86
56
|
```
|
|
87
57
|
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
Stream a chat response token-by-token.
|
|
91
|
-
|
|
92
|
-
**Example:**
|
|
58
|
+
### Embeddings
|
|
93
59
|
|
|
94
60
|
```typescript
|
|
95
|
-
const
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
],
|
|
99
|
-
model: 'gpt-4o'
|
|
61
|
+
const response = await cencori.ai.embeddings({
|
|
62
|
+
model: 'text-embedding-3-small',
|
|
63
|
+
input: 'Hello world'
|
|
100
64
|
});
|
|
101
65
|
|
|
102
|
-
|
|
103
|
-
process.stdout.write(chunk.delta);
|
|
104
|
-
}
|
|
66
|
+
console.log(response.embeddings[0]); // [0.1, 0.2, ...]
|
|
105
67
|
```
|
|
106
68
|
|
|
107
|
-
##
|
|
69
|
+
## Framework Integrations
|
|
108
70
|
|
|
109
|
-
|
|
71
|
+
### Vercel AI SDK
|
|
110
72
|
|
|
111
73
|
```typescript
|
|
112
|
-
import {
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
if (error instanceof AuthenticationError) {
|
|
123
|
-
console.error('Invalid API key');
|
|
124
|
-
} else if (error instanceof RateLimitError) {
|
|
125
|
-
console.error('Too many requests, please slow down');
|
|
126
|
-
} else if (error instanceof SafetyError) {
|
|
127
|
-
console.error('Content blocked:', error.reasons);
|
|
128
|
-
}
|
|
74
|
+
import { cencori } from 'cencori/vercel';
|
|
75
|
+
import { streamText } from 'ai';
|
|
76
|
+
|
|
77
|
+
const result = await streamText({
|
|
78
|
+
model: cencori('gpt-4o'),
|
|
79
|
+
messages: [{ role: 'user', content: 'Hello!' }]
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
for await (const chunk of result.textStream) {
|
|
83
|
+
console.log(chunk);
|
|
129
84
|
}
|
|
130
85
|
```
|
|
131
86
|
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
The SDK is written in TypeScript and includes full type definitions.
|
|
87
|
+
### With React/Next.js
|
|
135
88
|
|
|
136
89
|
```typescript
|
|
137
|
-
import
|
|
90
|
+
import { cencori } from 'cencori/vercel';
|
|
91
|
+
import { useChat } from 'ai/react';
|
|
92
|
+
|
|
93
|
+
export default function Chat() {
|
|
94
|
+
const { messages, input, handleInputChange, handleSubmit } = useChat({
|
|
95
|
+
api: '/api/chat'
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
return (
|
|
99
|
+
<div>
|
|
100
|
+
{messages.map(m => <div key={m.id}>{m.content}</div>)}
|
|
101
|
+
<form onSubmit={handleSubmit}>
|
|
102
|
+
<input value={input} onChange={handleInputChange} />
|
|
103
|
+
</form>
|
|
104
|
+
</div>
|
|
105
|
+
);
|
|
106
|
+
}
|
|
138
107
|
```
|
|
139
108
|
|
|
140
|
-
##
|
|
141
|
-
|
|
142
|
-
- ✅ Full TypeScript support with type definitions
|
|
143
|
-
- ✅ Built-in authentication
|
|
144
|
-
- ✅ Automatic retry logic with exponential backoff
|
|
145
|
-
- ✅ Custom error classes
|
|
146
|
-
- ✅ Content safety filtering (PII, prompt injection, harmful content)
|
|
147
|
-
- ✅ Rate limiting protection
|
|
148
|
-
- ✅ Streaming support with `chatStream()`
|
|
149
|
-
|
|
150
|
-
## Supported Models
|
|
109
|
+
## Coming Soon
|
|
151
110
|
|
|
152
|
-
|
|
153
|
-
|----------|--------|
|
|
154
|
-
| OpenAI | `gpt-4o`, `gpt-4-turbo`, `gpt-3.5-turbo` |
|
|
155
|
-
| Anthropic | `claude-3-opus`, `claude-3-sonnet`, `claude-3-haiku` |
|
|
156
|
-
| Google | `gemini-2.5-flash`, `gemini-2.0-flash` |
|
|
111
|
+
### Compute
|
|
157
112
|
|
|
158
|
-
|
|
113
|
+
```typescript
|
|
114
|
+
// 🚧 Coming Soon
|
|
115
|
+
await cencori.compute.run('my-function', {
|
|
116
|
+
input: { data: 'hello' }
|
|
117
|
+
});
|
|
118
|
+
```
|
|
159
119
|
|
|
160
|
-
|
|
120
|
+
### Workflow
|
|
161
121
|
|
|
162
122
|
```typescript
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
123
|
+
// 🚧 Coming Soon
|
|
124
|
+
await cencori.workflow.trigger('data-enrichment', {
|
|
125
|
+
data: { userId: '123' }
|
|
166
126
|
});
|
|
167
127
|
```
|
|
168
128
|
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
For browser/client-side usage, use publishable keys:
|
|
129
|
+
### Storage
|
|
172
130
|
|
|
173
131
|
```typescript
|
|
174
|
-
//
|
|
175
|
-
const
|
|
176
|
-
|
|
132
|
+
// 🚧 Coming Soon
|
|
133
|
+
const results = await cencori.storage.vectors.search('query', {
|
|
134
|
+
limit: 5
|
|
177
135
|
});
|
|
178
136
|
|
|
179
|
-
|
|
180
|
-
messages: [{ role: 'user', content: 'Hello!' }]
|
|
181
|
-
});
|
|
137
|
+
await cencori.storage.knowledge.query('What is our refund policy?');
|
|
182
138
|
```
|
|
183
139
|
|
|
184
|
-
##
|
|
140
|
+
## Why Cencori?
|
|
141
|
+
|
|
142
|
+
- **🛡️ Security Built-in**: PII detection, content filtering, jailbreak protection
|
|
143
|
+
- **📊 Observability**: Every request logged, every token tracked
|
|
144
|
+
- **💰 Cost Control**: Budget alerts, spend caps, per-request costing
|
|
145
|
+
- **🔄 Multi-Provider**: Switch between OpenAI, Anthropic, Google, etc.
|
|
146
|
+
- **⚡ One SDK**: AI, compute, storage, workflows - unified
|
|
147
|
+
|
|
148
|
+
## Links
|
|
185
149
|
|
|
186
|
-
-
|
|
187
|
-
-
|
|
188
|
-
-
|
|
150
|
+
- [Documentation](https://cencori.com/docs)
|
|
151
|
+
- [Dashboard](https://cencori.com/dashboard)
|
|
152
|
+
- [GitHub](https://github.com/cencori/cencori)
|
|
189
153
|
|
|
190
154
|
## License
|
|
191
155
|
|
|
192
|
-
MIT
|
|
156
|
+
MIT
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import { C as CencoriConfig, a as ChatRequest, b as ChatResponse, c as CompletionRequest, E as EmbeddingRequest, d as EmbeddingResponse } from '../types-DIuz6XWN.mjs';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* AI Gateway - Chat, Completions, Embeddings, and Streaming
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* const response = await cencori.ai.chat({
|
|
8
|
+
* model: 'gpt-4o',
|
|
9
|
+
* messages: [{ role: 'user', content: 'Hello!' }]
|
|
10
|
+
* });
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Stream chunk from chat stream
|
|
15
|
+
*/
|
|
16
|
+
interface StreamChunk {
|
|
17
|
+
delta: string;
|
|
18
|
+
finish_reason?: 'stop' | 'length' | 'content_filter' | 'error';
|
|
19
|
+
/** Error message if the stream encountered an error */
|
|
20
|
+
error?: string;
|
|
21
|
+
}
|
|
22
|
+
declare class AINamespace {
|
|
23
|
+
private config;
|
|
24
|
+
constructor(config: Required<CencoriConfig>);
|
|
25
|
+
/**
|
|
26
|
+
* Create a chat completion
|
|
27
|
+
*
|
|
28
|
+
* @example
|
|
29
|
+
* const response = await cencori.ai.chat({
|
|
30
|
+
* model: 'gpt-4o',
|
|
31
|
+
* messages: [{ role: 'user', content: 'Hello!' }]
|
|
32
|
+
* });
|
|
33
|
+
*/
|
|
34
|
+
chat(request: ChatRequest): Promise<ChatResponse>;
|
|
35
|
+
/**
|
|
36
|
+
* Stream chat completions
|
|
37
|
+
* Returns an async generator that yields chunks as they arrive
|
|
38
|
+
*
|
|
39
|
+
* @example
|
|
40
|
+
* for await (const chunk of cencori.ai.chatStream({ model: 'gpt-4o', messages })) {
|
|
41
|
+
* process.stdout.write(chunk.delta);
|
|
42
|
+
* }
|
|
43
|
+
*/
|
|
44
|
+
chatStream(request: ChatRequest): AsyncGenerator<StreamChunk, void, unknown>;
|
|
45
|
+
/**
|
|
46
|
+
* Create a text completion
|
|
47
|
+
*
|
|
48
|
+
* @example
|
|
49
|
+
* const response = await cencori.ai.completions({
|
|
50
|
+
* model: 'gpt-4o',
|
|
51
|
+
* prompt: 'Write a haiku about coding'
|
|
52
|
+
* });
|
|
53
|
+
*/
|
|
54
|
+
completions(request: CompletionRequest): Promise<ChatResponse>;
|
|
55
|
+
/**
|
|
56
|
+
* Create embeddings
|
|
57
|
+
*
|
|
58
|
+
* @example
|
|
59
|
+
* const response = await cencori.ai.embeddings({
|
|
60
|
+
* model: 'text-embedding-3-small',
|
|
61
|
+
* input: 'Hello world'
|
|
62
|
+
* });
|
|
63
|
+
*/
|
|
64
|
+
embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse>;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
export { AINamespace, type StreamChunk };
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import { C as CencoriConfig, a as ChatRequest, b as ChatResponse, c as CompletionRequest, E as EmbeddingRequest, d as EmbeddingResponse } from '../types-DIuz6XWN.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* AI Gateway - Chat, Completions, Embeddings, and Streaming
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* const response = await cencori.ai.chat({
|
|
8
|
+
* model: 'gpt-4o',
|
|
9
|
+
* messages: [{ role: 'user', content: 'Hello!' }]
|
|
10
|
+
* });
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Stream chunk from chat stream
|
|
15
|
+
*/
|
|
16
|
+
interface StreamChunk {
|
|
17
|
+
delta: string;
|
|
18
|
+
finish_reason?: 'stop' | 'length' | 'content_filter' | 'error';
|
|
19
|
+
/** Error message if the stream encountered an error */
|
|
20
|
+
error?: string;
|
|
21
|
+
}
|
|
22
|
+
declare class AINamespace {
|
|
23
|
+
private config;
|
|
24
|
+
constructor(config: Required<CencoriConfig>);
|
|
25
|
+
/**
|
|
26
|
+
* Create a chat completion
|
|
27
|
+
*
|
|
28
|
+
* @example
|
|
29
|
+
* const response = await cencori.ai.chat({
|
|
30
|
+
* model: 'gpt-4o',
|
|
31
|
+
* messages: [{ role: 'user', content: 'Hello!' }]
|
|
32
|
+
* });
|
|
33
|
+
*/
|
|
34
|
+
chat(request: ChatRequest): Promise<ChatResponse>;
|
|
35
|
+
/**
|
|
36
|
+
* Stream chat completions
|
|
37
|
+
* Returns an async generator that yields chunks as they arrive
|
|
38
|
+
*
|
|
39
|
+
* @example
|
|
40
|
+
* for await (const chunk of cencori.ai.chatStream({ model: 'gpt-4o', messages })) {
|
|
41
|
+
* process.stdout.write(chunk.delta);
|
|
42
|
+
* }
|
|
43
|
+
*/
|
|
44
|
+
chatStream(request: ChatRequest): AsyncGenerator<StreamChunk, void, unknown>;
|
|
45
|
+
/**
|
|
46
|
+
* Create a text completion
|
|
47
|
+
*
|
|
48
|
+
* @example
|
|
49
|
+
* const response = await cencori.ai.completions({
|
|
50
|
+
* model: 'gpt-4o',
|
|
51
|
+
* prompt: 'Write a haiku about coding'
|
|
52
|
+
* });
|
|
53
|
+
*/
|
|
54
|
+
completions(request: CompletionRequest): Promise<ChatResponse>;
|
|
55
|
+
/**
|
|
56
|
+
* Create embeddings
|
|
57
|
+
*
|
|
58
|
+
* @example
|
|
59
|
+
* const response = await cencori.ai.embeddings({
|
|
60
|
+
* model: 'text-embedding-3-small',
|
|
61
|
+
* input: 'Hello world'
|
|
62
|
+
* });
|
|
63
|
+
*/
|
|
64
|
+
embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse>;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
export { AINamespace, type StreamChunk };
|
package/dist/ai/index.js
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/ai/index.ts
|
|
21
|
+
var ai_exports = {};
|
|
22
|
+
__export(ai_exports, {
|
|
23
|
+
AINamespace: () => AINamespace
|
|
24
|
+
});
|
|
25
|
+
module.exports = __toCommonJS(ai_exports);
|
|
26
|
+
var AINamespace = class {
|
|
27
|
+
constructor(config) {
|
|
28
|
+
this.config = config;
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Create a chat completion
|
|
32
|
+
*
|
|
33
|
+
* @example
|
|
34
|
+
* const response = await cencori.ai.chat({
|
|
35
|
+
* model: 'gpt-4o',
|
|
36
|
+
* messages: [{ role: 'user', content: 'Hello!' }]
|
|
37
|
+
* });
|
|
38
|
+
*/
|
|
39
|
+
async chat(request) {
|
|
40
|
+
const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {
|
|
41
|
+
method: "POST",
|
|
42
|
+
headers: {
|
|
43
|
+
"CENCORI_API_KEY": this.config.apiKey,
|
|
44
|
+
"Content-Type": "application/json",
|
|
45
|
+
...this.config.headers
|
|
46
|
+
},
|
|
47
|
+
body: JSON.stringify({
|
|
48
|
+
model: request.model,
|
|
49
|
+
messages: request.messages,
|
|
50
|
+
temperature: request.temperature,
|
|
51
|
+
maxTokens: request.maxTokens,
|
|
52
|
+
stream: false
|
|
53
|
+
})
|
|
54
|
+
});
|
|
55
|
+
if (!response.ok) {
|
|
56
|
+
const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
|
|
57
|
+
throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
|
|
58
|
+
}
|
|
59
|
+
const data = await response.json();
|
|
60
|
+
return {
|
|
61
|
+
id: data.id,
|
|
62
|
+
model: data.model,
|
|
63
|
+
content: data.choices?.[0]?.message?.content ?? "",
|
|
64
|
+
usage: {
|
|
65
|
+
promptTokens: data.usage?.prompt_tokens ?? 0,
|
|
66
|
+
completionTokens: data.usage?.completion_tokens ?? 0,
|
|
67
|
+
totalTokens: data.usage?.total_tokens ?? 0
|
|
68
|
+
}
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Stream chat completions
|
|
73
|
+
* Returns an async generator that yields chunks as they arrive
|
|
74
|
+
*
|
|
75
|
+
* @example
|
|
76
|
+
* for await (const chunk of cencori.ai.chatStream({ model: 'gpt-4o', messages })) {
|
|
77
|
+
* process.stdout.write(chunk.delta);
|
|
78
|
+
* }
|
|
79
|
+
*/
|
|
80
|
+
async *chatStream(request) {
|
|
81
|
+
const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {
|
|
82
|
+
method: "POST",
|
|
83
|
+
headers: {
|
|
84
|
+
"CENCORI_API_KEY": this.config.apiKey,
|
|
85
|
+
"Content-Type": "application/json",
|
|
86
|
+
...this.config.headers
|
|
87
|
+
},
|
|
88
|
+
body: JSON.stringify({
|
|
89
|
+
model: request.model,
|
|
90
|
+
messages: request.messages,
|
|
91
|
+
temperature: request.temperature,
|
|
92
|
+
maxTokens: request.maxTokens,
|
|
93
|
+
stream: true
|
|
94
|
+
})
|
|
95
|
+
});
|
|
96
|
+
if (!response.ok) {
|
|
97
|
+
const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
|
|
98
|
+
throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
|
|
99
|
+
}
|
|
100
|
+
if (!response.body) {
|
|
101
|
+
throw new Error("Response body is null");
|
|
102
|
+
}
|
|
103
|
+
const reader = response.body.getReader();
|
|
104
|
+
const decoder = new TextDecoder();
|
|
105
|
+
let buffer = "";
|
|
106
|
+
try {
|
|
107
|
+
while (true) {
|
|
108
|
+
const { done, value } = await reader.read();
|
|
109
|
+
if (done) break;
|
|
110
|
+
buffer += decoder.decode(value, { stream: true });
|
|
111
|
+
const lines = buffer.split("\n");
|
|
112
|
+
buffer = lines.pop() || "";
|
|
113
|
+
for (const line of lines) {
|
|
114
|
+
if (line.trim() === "") continue;
|
|
115
|
+
if (!line.startsWith("data: ")) continue;
|
|
116
|
+
const data = line.slice(6);
|
|
117
|
+
if (data === "[DONE]") {
|
|
118
|
+
return;
|
|
119
|
+
}
|
|
120
|
+
try {
|
|
121
|
+
const chunk = JSON.parse(data);
|
|
122
|
+
yield chunk;
|
|
123
|
+
} catch {
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
} finally {
|
|
128
|
+
reader.releaseLock();
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
/**
|
|
132
|
+
* Create a text completion
|
|
133
|
+
*
|
|
134
|
+
* @example
|
|
135
|
+
* const response = await cencori.ai.completions({
|
|
136
|
+
* model: 'gpt-4o',
|
|
137
|
+
* prompt: 'Write a haiku about coding'
|
|
138
|
+
* });
|
|
139
|
+
*/
|
|
140
|
+
async completions(request) {
|
|
141
|
+
return this.chat({
|
|
142
|
+
model: request.model,
|
|
143
|
+
messages: [{ role: "user", content: request.prompt }],
|
|
144
|
+
temperature: request.temperature,
|
|
145
|
+
maxTokens: request.maxTokens
|
|
146
|
+
});
|
|
147
|
+
}
|
|
148
|
+
/**
|
|
149
|
+
* Create embeddings
|
|
150
|
+
*
|
|
151
|
+
* @example
|
|
152
|
+
* const response = await cencori.ai.embeddings({
|
|
153
|
+
* model: 'text-embedding-3-small',
|
|
154
|
+
* input: 'Hello world'
|
|
155
|
+
* });
|
|
156
|
+
*/
|
|
157
|
+
async embeddings(request) {
|
|
158
|
+
const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {
|
|
159
|
+
method: "POST",
|
|
160
|
+
headers: {
|
|
161
|
+
"CENCORI_API_KEY": this.config.apiKey,
|
|
162
|
+
"Content-Type": "application/json",
|
|
163
|
+
...this.config.headers
|
|
164
|
+
},
|
|
165
|
+
body: JSON.stringify({
|
|
166
|
+
model: request.model,
|
|
167
|
+
input: request.input
|
|
168
|
+
})
|
|
169
|
+
});
|
|
170
|
+
if (!response.ok) {
|
|
171
|
+
const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
|
|
172
|
+
throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
|
|
173
|
+
}
|
|
174
|
+
const data = await response.json();
|
|
175
|
+
return {
|
|
176
|
+
model: data.model,
|
|
177
|
+
embeddings: data.data?.map((d) => d.embedding) ?? [],
|
|
178
|
+
usage: {
|
|
179
|
+
totalTokens: data.usage?.total_tokens ?? 0
|
|
180
|
+
}
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
};
|
|
184
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
185
|
+
0 && (module.exports = {
|
|
186
|
+
AINamespace
|
|
187
|
+
});
|
|
188
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/ai/index.ts"],"sourcesContent":["/**\n * AI Gateway - Chat, Completions, Embeddings, and Streaming\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n\nimport type {\n CencoriConfig,\n ChatRequest,\n ChatResponse,\n CompletionRequest,\n EmbeddingRequest,\n EmbeddingResponse\n} from '../types';\n\n// API Response types\ninterface OpenAIChatResponse {\n id: string;\n model: string;\n choices?: Array<{\n message?: {\n content?: string;\n };\n }>;\n usage?: {\n prompt_tokens?: number;\n completion_tokens?: number;\n total_tokens?: number;\n };\n}\n\ninterface OpenAIEmbeddingResponse {\n model: string;\n data?: Array<{\n embedding: number[];\n }>;\n usage?: {\n total_tokens?: number;\n };\n}\n\n/**\n * Stream chunk from chat stream\n */\nexport interface StreamChunk {\n delta: string;\n finish_reason?: 'stop' | 'length' | 'content_filter' | 'error';\n /** Error message if the stream encountered an error */\n error?: string;\n}\n\nexport class AINamespace {\n private config: Required<CencoriConfig>;\n\n constructor(config: Required<CencoriConfig>) {\n this.config = config;\n }\n\n /**\n * Create a chat completion\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n async chat(request: ChatRequest): Promise<ChatResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: {\n 'CENCORI_API_KEY': this.config.apiKey,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n messages: request.messages,\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n stream: false,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIChatResponse;\n\n return {\n id: data.id,\n model: data.model,\n content: data.choices?.[0]?.message?.content ?? '',\n usage: {\n promptTokens: data.usage?.prompt_tokens ?? 0,\n completionTokens: data.usage?.completion_tokens ?? 0,\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n\n /**\n * Stream chat completions\n * Returns an async generator that yields chunks as they arrive\n * \n * @example\n * for await (const chunk of cencori.ai.chatStream({ model: 'gpt-4o', messages })) {\n * process.stdout.write(chunk.delta);\n * }\n */\n async *chatStream(request: ChatRequest): AsyncGenerator<StreamChunk, void, unknown> {\n const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: {\n 'CENCORI_API_KEY': this.config.apiKey,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n messages: request.messages,\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n stream: true,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n if (!response.body) {\n throw new Error('Response body is null');\n }\n\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n\n if (done) break;\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n\n // Keep the last incomplete line in the buffer\n buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (line.trim() === '') continue;\n if (!line.startsWith('data: ')) continue;\n\n const data = line.slice(6); // Remove 'data: ' prefix\n\n if (data === '[DONE]') {\n return;\n }\n\n try {\n const chunk = JSON.parse(data) as StreamChunk;\n yield chunk;\n } catch {\n // Skip malformed JSON\n }\n }\n }\n } finally {\n reader.releaseLock();\n }\n }\n\n /**\n * Create a text completion\n * \n * @example\n * const response = await cencori.ai.completions({\n * model: 'gpt-4o',\n * prompt: 'Write a haiku about coding'\n * });\n */\n async completions(request: CompletionRequest): Promise<ChatResponse> {\n // Convert to chat format internally\n return this.chat({\n model: request.model,\n messages: [{ role: 'user', content: request.prompt }],\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n });\n }\n\n /**\n * Create embeddings\n * \n * @example\n * const response = await cencori.ai.embeddings({\n * model: 'text-embedding-3-small',\n * input: 'Hello world'\n * });\n */\n async embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {\n method: 'POST',\n headers: {\n 'CENCORI_API_KEY': this.config.apiKey,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n input: request.input,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIEmbeddingResponse;\n\n return {\n model: data.model,\n embeddings: data.data?.map((d) => d.embedding) ?? [],\n usage: {\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n}\n\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAuDO,IAAM,cAAN,MAAkB;AAAA,EAGrB,YAAY,QAAiC;AACzC,SAAK,SAAS;AAAA,EAClB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAK,SAA6C;AACpD,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,gBAAgB;AAAA,MAC/D,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,mBAAmB,KAAK,OAAO;AAAA,QAC/B,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,UAAU,QAAQ;AAAA,QAClB,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,MACZ,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,IAAI,KAAK;AAAA,MACT,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK,UAAU,CAAC,GAAG,SAAS,WAAW;AAAA,MAChD,OAAO;AAAA,QACH,cAAc,KAAK,OAAO,iBAAiB;AAAA,QAC3C,kBAAkB,KAAK,OAAO,qBAAqB;AAAA,QACnD,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,OAAO,WAAW,SAAkE;AAChF,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,gBAAgB;AAAA,MAC/D,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,mBAAmB,KAAK,OAAO;AAAA,QAC/B,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,UAAU,QAAQ;AAAA,QAClB,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,MACZ,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,QAAI,CAAC,SAAS,MAAM;AAChB,YAAM,IAAI,MAAM,uBAAuB;AAAA,IAC3C;AAEA,UAAM,SAAS,SAAS,KAAK,UAAU;AACvC,UAAM,UAAU,IAAI,YAAY;AAChC,QAAI,SAAS;AAEb,QAAI;AACA,aAAO,MAAM;AACT,cAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,YAAI,KAAM;AAEV,kBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAChD,cAAM,QAAQ,OAAO,MAAM,IAAI;AAG/B,iBAAS,MAAM,IAAI,KAAK;AAExB,mBAAW,QAAQ,OAAO;AACtB,cAAI,KAAK,KAAK,MAAM,GAAI;AACxB,cAAI,CAAC,KAAK,WAAW,QAAQ,EAAG;AAEhC,gBAAM,OAAO,KAAK,MAAM,CAAC;AAEzB,cAAI,SAAS,UAAU;AACnB;AAAA,UACJ;AAEA,cAAI;AACA,kBAAM,QAAQ,KAAK,MAAM,IAAI;AAC7B,kBAAM;AAAA,UACV,QAAQ;AAAA,UAER;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,UAAE;AACE,aAAO,YAAY;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,YAAY,SAAmD;AAEjE,WAAO,KAAK,KAAK;AAAA,MACb,OAAO,QAAQ;AAAA,MACf,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,QAAQ,OAAO,CAAC;AAAA,MACpD,aAAa,QAAQ;AAAA,MACrB,WAAW,QAAQ;AAAA,IACvB,CAAC;AAAA,EACL;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,WAAW,SAAuD;AACpE,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,sBAAsB;AAAA,MACrE,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,mBAAmB,KAAK,OAAO;AAAA,QAC/B,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,OAAO,QAAQ;AAAA,MACnB,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,OAAO,KAAK;AAAA,MACZ,YAAY,KAAK,MAAM,IAAI,CAAC,MAAM,EAAE,SAAS,KAAK,CAAC;AAAA,MACnD,OAAO;AAAA,QACH,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AACJ;","names":[]}
|