cencori 0.3.1 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +97 -81
- package/dist/ai/index.d.mts +48 -0
- package/dist/ai/index.d.ts +48 -0
- package/dist/ai/index.js +128 -0
- package/dist/ai/index.js.map +1 -0
- package/dist/ai/index.mjs +103 -0
- package/dist/ai/index.mjs.map +1 -0
- package/dist/compute/index.d.mts +38 -0
- package/dist/compute/index.d.ts +38 -0
- package/dist/compute/index.js +62 -0
- package/dist/compute/index.js.map +1 -0
- package/dist/compute/index.mjs +37 -0
- package/dist/compute/index.mjs.map +1 -0
- package/dist/index.d.mts +89 -72
- package/dist/index.d.ts +89 -72
- package/dist/index.js +632 -165
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +623 -156
- package/dist/index.mjs.map +1 -1
- package/dist/storage/index.d.mts +82 -0
- package/dist/storage/index.d.ts +82 -0
- package/dist/storage/index.js +122 -0
- package/dist/storage/index.js.map +1 -0
- package/dist/storage/index.mjs +97 -0
- package/dist/storage/index.mjs.map +1 -0
- package/dist/tanstack/index.d.mts +95 -0
- package/dist/tanstack/index.d.ts +95 -0
- package/dist/tanstack/index.js +290 -0
- package/dist/tanstack/index.js.map +1 -0
- package/dist/tanstack/index.mjs +262 -0
- package/dist/tanstack/index.mjs.map +1 -0
- package/dist/types-Be_rWV2h.d.mts +70 -0
- package/dist/types-Be_rWV2h.d.ts +70 -0
- package/dist/vercel/index.d.mts +126 -0
- package/dist/vercel/index.d.ts +126 -0
- package/dist/vercel/index.js +373 -0
- package/dist/vercel/index.js.map +1 -0
- package/dist/vercel/index.mjs +344 -0
- package/dist/vercel/index.mjs.map +1 -0
- package/dist/workflow/index.d.mts +44 -0
- package/dist/workflow/index.d.ts +44 -0
- package/dist/workflow/index.js +72 -0
- package/dist/workflow/index.js.map +1 -0
- package/dist/workflow/index.mjs +47 -0
- package/dist/workflow/index.mjs.map +1 -0
- package/package.json +98 -41
package/README.md
CHANGED
|
@@ -1,140 +1,156 @@
|
|
|
1
1
|
# Cencori
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
**The unified infrastructure layer for AI applications.**
|
|
4
4
|
|
|
5
|
-
|
|
5
|
+
One SDK. Every AI primitive. Always secure. Always logged.
|
|
6
6
|
|
|
7
7
|
```bash
|
|
8
8
|
npm install cencori
|
|
9
|
-
# or
|
|
10
|
-
yarn add cencori
|
|
11
9
|
```
|
|
12
10
|
|
|
13
11
|
## Quick Start
|
|
14
12
|
|
|
15
13
|
```typescript
|
|
16
|
-
import {
|
|
14
|
+
import { Cencori } from 'cencori';
|
|
17
15
|
|
|
18
|
-
const cencori = new
|
|
19
|
-
apiKey: process.env.CENCORI_API_KEY
|
|
16
|
+
const cencori = new Cencori({
|
|
17
|
+
apiKey: process.env.CENCORI_API_KEY
|
|
20
18
|
});
|
|
21
19
|
|
|
20
|
+
// AI Gateway - Chat with any model
|
|
22
21
|
const response = await cencori.ai.chat({
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
]
|
|
22
|
+
model: 'gpt-4o',
|
|
23
|
+
messages: [{ role: 'user', content: 'Hello!' }]
|
|
26
24
|
});
|
|
27
25
|
|
|
28
26
|
console.log(response.content);
|
|
29
27
|
```
|
|
30
28
|
|
|
31
|
-
##
|
|
32
|
-
|
|
33
|
-
Get your API key from the [Cencori Dashboard](https://cencori.com/dashboard):
|
|
29
|
+
## Products
|
|
34
30
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
31
|
+
| Product | Status | Description |
|
|
32
|
+
|---------|--------|-------------|
|
|
33
|
+
| **AI Gateway** | ✅ Available | Multi-provider routing, security, observability |
|
|
34
|
+
| **Compute** | 🚧 Coming Soon | Serverless functions, GPU access |
|
|
35
|
+
| **Workflow** | 🚧 Coming Soon | Visual AI pipelines, orchestration |
|
|
36
|
+
| **Storage** | 🚧 Coming Soon | Vector database, knowledge base, RAG |
|
|
37
|
+
| **Integration** | ✅ Available | SDKs, Vercel AI, TanStack |
|
|
39
38
|
|
|
40
|
-
##
|
|
39
|
+
## AI Gateway
|
|
41
40
|
|
|
42
|
-
###
|
|
43
|
-
|
|
44
|
-
Initialize the SDK client.
|
|
41
|
+
### Chat Completions
|
|
45
42
|
|
|
46
43
|
```typescript
|
|
47
|
-
const
|
|
48
|
-
|
|
49
|
-
|
|
44
|
+
const response = await cencori.ai.chat({
|
|
45
|
+
model: 'gpt-4o', // or 'claude-3-opus', 'gemini-1.5-pro', etc.
|
|
46
|
+
messages: [
|
|
47
|
+
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
48
|
+
{ role: 'user', content: 'What is the capital of France?' }
|
|
49
|
+
],
|
|
50
|
+
temperature: 0.7,
|
|
51
|
+
maxTokens: 1000
|
|
50
52
|
});
|
|
53
|
+
|
|
54
|
+
console.log(response.content);
|
|
55
|
+
console.log(response.usage); // { promptTokens, completionTokens, totalTokens }
|
|
51
56
|
```
|
|
52
57
|
|
|
53
|
-
###
|
|
58
|
+
### Embeddings
|
|
54
59
|
|
|
55
|
-
|
|
60
|
+
```typescript
|
|
61
|
+
const response = await cencori.ai.embeddings({
|
|
62
|
+
model: 'text-embedding-3-small',
|
|
63
|
+
input: 'Hello world'
|
|
64
|
+
});
|
|
56
65
|
|
|
57
|
-
|
|
66
|
+
console.log(response.embeddings[0]); // [0.1, 0.2, ...]
|
|
67
|
+
```
|
|
58
68
|
|
|
59
|
-
|
|
60
|
-
- `messages`: Array of message objects with `role` ('user' | 'assistant') and `content`
|
|
61
|
-
- `model`: Optional AI model (defaults to 'gemini-1.5-flash')
|
|
62
|
-
- `temperature`: Optional temperature (0-1)
|
|
63
|
-
- `maxOutputTokens`: Optional max tokens for response
|
|
69
|
+
## Framework Integrations
|
|
64
70
|
|
|
65
|
-
|
|
71
|
+
### Vercel AI SDK
|
|
66
72
|
|
|
67
73
|
```typescript
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
74
|
+
import { cencori } from 'cencori/vercel';
|
|
75
|
+
import { streamText } from 'ai';
|
|
76
|
+
|
|
77
|
+
const result = await streamText({
|
|
78
|
+
model: cencori('gpt-4o'),
|
|
79
|
+
messages: [{ role: 'user', content: 'Hello!' }]
|
|
73
80
|
});
|
|
74
81
|
|
|
75
|
-
|
|
76
|
-
console.log(
|
|
82
|
+
for await (const chunk of result.textStream) {
|
|
83
|
+
console.log(chunk);
|
|
84
|
+
}
|
|
77
85
|
```
|
|
78
86
|
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
The SDK includes custom error classes for common scenarios:
|
|
87
|
+
### With React/Next.js
|
|
82
88
|
|
|
83
89
|
```typescript
|
|
84
|
-
import {
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
}
|
|
90
|
+
import { cencori } from 'cencori/vercel';
|
|
91
|
+
import { useChat } from 'ai/react';
|
|
92
|
+
|
|
93
|
+
export default function Chat() {
|
|
94
|
+
const { messages, input, handleInputChange, handleSubmit } = useChat({
|
|
95
|
+
api: '/api/chat'
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
return (
|
|
99
|
+
<div>
|
|
100
|
+
{messages.map(m => <div key={m.id}>{m.content}</div>)}
|
|
101
|
+
<form onSubmit={handleSubmit}>
|
|
102
|
+
<input value={input} onChange={handleInputChange} />
|
|
103
|
+
</form>
|
|
104
|
+
</div>
|
|
105
|
+
);
|
|
101
106
|
}
|
|
102
107
|
```
|
|
103
108
|
|
|
104
|
-
##
|
|
109
|
+
## Coming Soon
|
|
105
110
|
|
|
106
|
-
|
|
111
|
+
### Compute
|
|
107
112
|
|
|
108
113
|
```typescript
|
|
109
|
-
|
|
114
|
+
// 🚧 Coming Soon
|
|
115
|
+
await cencori.compute.run('my-function', {
|
|
116
|
+
input: { data: 'hello' }
|
|
117
|
+
});
|
|
110
118
|
```
|
|
111
119
|
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
- Full TypeScript support with type definitions
|
|
115
|
-
- Built-in authentication
|
|
116
|
-
- Automatic retry logic with exponential backoff
|
|
117
|
-
- Custom error classes
|
|
118
|
-
- Content safety filtering
|
|
119
|
-
- Rate limiting protection
|
|
120
|
+
### Workflow
|
|
120
121
|
|
|
121
|
-
|
|
122
|
+
```typescript
|
|
123
|
+
// 🚧 Coming Soon
|
|
124
|
+
await cencori.workflow.trigger('data-enrichment', {
|
|
125
|
+
data: { userId: '123' }
|
|
126
|
+
});
|
|
127
|
+
```
|
|
122
128
|
|
|
123
|
-
|
|
129
|
+
### Storage
|
|
124
130
|
|
|
125
131
|
```typescript
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
132
|
+
// 🚧 Coming Soon
|
|
133
|
+
const results = await cencori.storage.vectors.search('query', {
|
|
134
|
+
limit: 5
|
|
129
135
|
});
|
|
136
|
+
|
|
137
|
+
await cencori.storage.knowledge.query('What is our refund policy?');
|
|
130
138
|
```
|
|
131
139
|
|
|
132
|
-
##
|
|
140
|
+
## Why Cencori?
|
|
141
|
+
|
|
142
|
+
- **🛡️ Security Built-in**: PII detection, content filtering, jailbreak protection
|
|
143
|
+
- **📊 Observability**: Every request logged, every token tracked
|
|
144
|
+
- **💰 Cost Control**: Budget alerts, spend caps, per-request costing
|
|
145
|
+
- **🔄 Multi-Provider**: Switch between OpenAI, Anthropic, Google, etc.
|
|
146
|
+
- **⚡ One SDK**: AI, compute, storage, workflows - unified
|
|
147
|
+
|
|
148
|
+
## Links
|
|
133
149
|
|
|
134
|
-
-
|
|
135
|
-
-
|
|
136
|
-
-
|
|
150
|
+
- [Documentation](https://cencori.com/docs)
|
|
151
|
+
- [Dashboard](https://cencori.com/dashboard)
|
|
152
|
+
- [GitHub](https://github.com/cencori/cencori)
|
|
137
153
|
|
|
138
154
|
## License
|
|
139
155
|
|
|
140
|
-
MIT
|
|
156
|
+
MIT
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import { C as CencoriConfig, a as ChatRequest, b as ChatResponse, d as CompletionRequest, E as EmbeddingRequest, e as EmbeddingResponse } from '../types-Be_rWV2h.mjs';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* AI Gateway - Chat, Completions, and Embeddings
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* const response = await cencori.ai.chat({
|
|
8
|
+
* model: 'gpt-4o',
|
|
9
|
+
* messages: [{ role: 'user', content: 'Hello!' }]
|
|
10
|
+
* });
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
declare class AINamespace {
|
|
14
|
+
private config;
|
|
15
|
+
constructor(config: Required<CencoriConfig>);
|
|
16
|
+
/**
|
|
17
|
+
* Create a chat completion
|
|
18
|
+
*
|
|
19
|
+
* @example
|
|
20
|
+
* const response = await cencori.ai.chat({
|
|
21
|
+
* model: 'gpt-4o',
|
|
22
|
+
* messages: [{ role: 'user', content: 'Hello!' }]
|
|
23
|
+
* });
|
|
24
|
+
*/
|
|
25
|
+
chat(request: ChatRequest): Promise<ChatResponse>;
|
|
26
|
+
/**
|
|
27
|
+
* Create a text completion
|
|
28
|
+
*
|
|
29
|
+
* @example
|
|
30
|
+
* const response = await cencori.ai.completions({
|
|
31
|
+
* model: 'gpt-4o',
|
|
32
|
+
* prompt: 'Write a haiku about coding'
|
|
33
|
+
* });
|
|
34
|
+
*/
|
|
35
|
+
completions(request: CompletionRequest): Promise<ChatResponse>;
|
|
36
|
+
/**
|
|
37
|
+
* Create embeddings
|
|
38
|
+
*
|
|
39
|
+
* @example
|
|
40
|
+
* const response = await cencori.ai.embeddings({
|
|
41
|
+
* model: 'text-embedding-3-small',
|
|
42
|
+
* input: 'Hello world'
|
|
43
|
+
* });
|
|
44
|
+
*/
|
|
45
|
+
embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse>;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
export { AINamespace };
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import { C as CencoriConfig, a as ChatRequest, b as ChatResponse, d as CompletionRequest, E as EmbeddingRequest, e as EmbeddingResponse } from '../types-Be_rWV2h.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* AI Gateway - Chat, Completions, and Embeddings
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* const response = await cencori.ai.chat({
|
|
8
|
+
* model: 'gpt-4o',
|
|
9
|
+
* messages: [{ role: 'user', content: 'Hello!' }]
|
|
10
|
+
* });
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
declare class AINamespace {
|
|
14
|
+
private config;
|
|
15
|
+
constructor(config: Required<CencoriConfig>);
|
|
16
|
+
/**
|
|
17
|
+
* Create a chat completion
|
|
18
|
+
*
|
|
19
|
+
* @example
|
|
20
|
+
* const response = await cencori.ai.chat({
|
|
21
|
+
* model: 'gpt-4o',
|
|
22
|
+
* messages: [{ role: 'user', content: 'Hello!' }]
|
|
23
|
+
* });
|
|
24
|
+
*/
|
|
25
|
+
chat(request: ChatRequest): Promise<ChatResponse>;
|
|
26
|
+
/**
|
|
27
|
+
* Create a text completion
|
|
28
|
+
*
|
|
29
|
+
* @example
|
|
30
|
+
* const response = await cencori.ai.completions({
|
|
31
|
+
* model: 'gpt-4o',
|
|
32
|
+
* prompt: 'Write a haiku about coding'
|
|
33
|
+
* });
|
|
34
|
+
*/
|
|
35
|
+
completions(request: CompletionRequest): Promise<ChatResponse>;
|
|
36
|
+
/**
|
|
37
|
+
* Create embeddings
|
|
38
|
+
*
|
|
39
|
+
* @example
|
|
40
|
+
* const response = await cencori.ai.embeddings({
|
|
41
|
+
* model: 'text-embedding-3-small',
|
|
42
|
+
* input: 'Hello world'
|
|
43
|
+
* });
|
|
44
|
+
*/
|
|
45
|
+
embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse>;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
export { AINamespace };
|
package/dist/ai/index.js
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/ai/index.ts
|
|
21
|
+
var ai_exports = {};
|
|
22
|
+
__export(ai_exports, {
|
|
23
|
+
AINamespace: () => AINamespace
|
|
24
|
+
});
|
|
25
|
+
module.exports = __toCommonJS(ai_exports);
|
|
26
|
+
var AINamespace = class {
|
|
27
|
+
constructor(config) {
|
|
28
|
+
this.config = config;
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Create a chat completion
|
|
32
|
+
*
|
|
33
|
+
* @example
|
|
34
|
+
* const response = await cencori.ai.chat({
|
|
35
|
+
* model: 'gpt-4o',
|
|
36
|
+
* messages: [{ role: 'user', content: 'Hello!' }]
|
|
37
|
+
* });
|
|
38
|
+
*/
|
|
39
|
+
async chat(request) {
|
|
40
|
+
const response = await fetch(`${this.config.baseUrl}/api/v1/chat/completions`, {
|
|
41
|
+
method: "POST",
|
|
42
|
+
headers: {
|
|
43
|
+
"Authorization": `Bearer ${this.config.apiKey}`,
|
|
44
|
+
"Content-Type": "application/json",
|
|
45
|
+
...this.config.headers
|
|
46
|
+
},
|
|
47
|
+
body: JSON.stringify({
|
|
48
|
+
model: request.model,
|
|
49
|
+
messages: request.messages,
|
|
50
|
+
temperature: request.temperature,
|
|
51
|
+
max_tokens: request.maxTokens,
|
|
52
|
+
stream: request.stream ?? false
|
|
53
|
+
})
|
|
54
|
+
});
|
|
55
|
+
if (!response.ok) {
|
|
56
|
+
const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
|
|
57
|
+
throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
|
|
58
|
+
}
|
|
59
|
+
const data = await response.json();
|
|
60
|
+
return {
|
|
61
|
+
id: data.id,
|
|
62
|
+
model: data.model,
|
|
63
|
+
content: data.choices?.[0]?.message?.content ?? "",
|
|
64
|
+
usage: {
|
|
65
|
+
promptTokens: data.usage?.prompt_tokens ?? 0,
|
|
66
|
+
completionTokens: data.usage?.completion_tokens ?? 0,
|
|
67
|
+
totalTokens: data.usage?.total_tokens ?? 0
|
|
68
|
+
}
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Create a text completion
|
|
73
|
+
*
|
|
74
|
+
* @example
|
|
75
|
+
* const response = await cencori.ai.completions({
|
|
76
|
+
* model: 'gpt-4o',
|
|
77
|
+
* prompt: 'Write a haiku about coding'
|
|
78
|
+
* });
|
|
79
|
+
*/
|
|
80
|
+
async completions(request) {
|
|
81
|
+
return this.chat({
|
|
82
|
+
model: request.model,
|
|
83
|
+
messages: [{ role: "user", content: request.prompt }],
|
|
84
|
+
temperature: request.temperature,
|
|
85
|
+
maxTokens: request.maxTokens
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
/**
|
|
89
|
+
* Create embeddings
|
|
90
|
+
*
|
|
91
|
+
* @example
|
|
92
|
+
* const response = await cencori.ai.embeddings({
|
|
93
|
+
* model: 'text-embedding-3-small',
|
|
94
|
+
* input: 'Hello world'
|
|
95
|
+
* });
|
|
96
|
+
*/
|
|
97
|
+
async embeddings(request) {
|
|
98
|
+
const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {
|
|
99
|
+
method: "POST",
|
|
100
|
+
headers: {
|
|
101
|
+
"Authorization": `Bearer ${this.config.apiKey}`,
|
|
102
|
+
"Content-Type": "application/json",
|
|
103
|
+
...this.config.headers
|
|
104
|
+
},
|
|
105
|
+
body: JSON.stringify({
|
|
106
|
+
model: request.model,
|
|
107
|
+
input: request.input
|
|
108
|
+
})
|
|
109
|
+
});
|
|
110
|
+
if (!response.ok) {
|
|
111
|
+
const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
|
|
112
|
+
throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
|
|
113
|
+
}
|
|
114
|
+
const data = await response.json();
|
|
115
|
+
return {
|
|
116
|
+
model: data.model,
|
|
117
|
+
embeddings: data.data?.map((d) => d.embedding) ?? [],
|
|
118
|
+
usage: {
|
|
119
|
+
totalTokens: data.usage?.total_tokens ?? 0
|
|
120
|
+
}
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
};
|
|
124
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
125
|
+
0 && (module.exports = {
|
|
126
|
+
AINamespace
|
|
127
|
+
});
|
|
128
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/ai/index.ts"],"sourcesContent":["/**\n * AI Gateway - Chat, Completions, and Embeddings\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n\nimport type {\n CencoriConfig,\n ChatRequest,\n ChatResponse,\n CompletionRequest,\n EmbeddingRequest,\n EmbeddingResponse\n} from '../types';\n\n// API Response types\ninterface OpenAIChatResponse {\n id: string;\n model: string;\n choices?: Array<{\n message?: {\n content?: string;\n };\n }>;\n usage?: {\n prompt_tokens?: number;\n completion_tokens?: number;\n total_tokens?: number;\n };\n}\n\ninterface OpenAIEmbeddingResponse {\n model: string;\n data?: Array<{\n embedding: number[];\n }>;\n usage?: {\n total_tokens?: number;\n };\n}\n\nexport class AINamespace {\n private config: Required<CencoriConfig>;\n\n constructor(config: Required<CencoriConfig>) {\n this.config = config;\n }\n\n /**\n * Create a chat completion\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n async chat(request: ChatRequest): Promise<ChatResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/chat/completions`, {\n method: 'POST',\n headers: {\n 'Authorization': `Bearer ${this.config.apiKey}`,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n messages: request.messages,\n temperature: request.temperature,\n max_tokens: request.maxTokens,\n stream: request.stream ?? false,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIChatResponse;\n\n return {\n id: data.id,\n model: data.model,\n content: data.choices?.[0]?.message?.content ?? '',\n usage: {\n promptTokens: data.usage?.prompt_tokens ?? 0,\n completionTokens: data.usage?.completion_tokens ?? 0,\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n\n /**\n * Create a text completion\n * \n * @example\n * const response = await cencori.ai.completions({\n * model: 'gpt-4o',\n * prompt: 'Write a haiku about coding'\n * });\n */\n async completions(request: CompletionRequest): Promise<ChatResponse> {\n // Convert to chat format internally\n return this.chat({\n model: request.model,\n messages: [{ role: 'user', content: request.prompt }],\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n });\n }\n\n /**\n * Create embeddings\n * \n * @example\n * const response = await cencori.ai.embeddings({\n * model: 'text-embedding-3-small',\n * input: 'Hello world'\n * });\n */\n async embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {\n method: 'POST',\n headers: {\n 'Authorization': `Bearer ${this.config.apiKey}`,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n input: request.input,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIEmbeddingResponse;\n\n return {\n model: data.model,\n embeddings: data.data?.map((d) => d.embedding) ?? [],\n usage: {\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AA6CO,IAAM,cAAN,MAAkB;AAAA,EAGrB,YAAY,QAAiC;AACzC,SAAK,SAAS;AAAA,EAClB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAK,SAA6C;AACpD,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,4BAA4B;AAAA,MAC3E,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,iBAAiB,UAAU,KAAK,OAAO,MAAM;AAAA,QAC7C,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,UAAU,QAAQ;AAAA,QAClB,aAAa,QAAQ;AAAA,QACrB,YAAY,QAAQ;AAAA,QACpB,QAAQ,QAAQ,UAAU;AAAA,MAC9B,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,IAAI,KAAK;AAAA,MACT,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK,UAAU,CAAC,GAAG,SAAS,WAAW;AAAA,MAChD,OAAO;AAAA,QACH,cAAc,KAAK,OAAO,iBAAiB;AAAA,QAC3C,kBAAkB,KAAK,OAAO,qBAAqB;AAAA,QACnD,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,YAAY,SAAmD;AAEjE,WAAO,KAAK,KAAK;AAAA,MACb,OAAO,QAAQ;AAAA,MACf,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,QAAQ,OAAO,CAAC;AAAA,MACpD,aAAa,QAAQ;AAAA,MACrB,WAAW,QAAQ;AAAA,IACvB,CAAC;AAAA,EACL;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,WAAW,SAAuD;AACpE,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,sBAAsB;AAAA,MACrE,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,iBAAiB,UAAU,KAAK,OAAO,MAAM;AAAA,QAC7C,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,OAAO,QAAQ;AAAA,MACnB,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,OAAO,KAAK;AAAA,MACZ,YAAY,KAAK,MAAM,IAAI,CAAC,MAAM,EAAE,SAAS,KAAK,CAAC;AAAA,MACnD,OAAO;AAAA,QACH,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AACJ;","names":[]}
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
// src/ai/index.ts
|
|
2
|
+
var AINamespace = class {
|
|
3
|
+
constructor(config) {
|
|
4
|
+
this.config = config;
|
|
5
|
+
}
|
|
6
|
+
/**
|
|
7
|
+
* Create a chat completion
|
|
8
|
+
*
|
|
9
|
+
* @example
|
|
10
|
+
* const response = await cencori.ai.chat({
|
|
11
|
+
* model: 'gpt-4o',
|
|
12
|
+
* messages: [{ role: 'user', content: 'Hello!' }]
|
|
13
|
+
* });
|
|
14
|
+
*/
|
|
15
|
+
async chat(request) {
|
|
16
|
+
const response = await fetch(`${this.config.baseUrl}/api/v1/chat/completions`, {
|
|
17
|
+
method: "POST",
|
|
18
|
+
headers: {
|
|
19
|
+
"Authorization": `Bearer ${this.config.apiKey}`,
|
|
20
|
+
"Content-Type": "application/json",
|
|
21
|
+
...this.config.headers
|
|
22
|
+
},
|
|
23
|
+
body: JSON.stringify({
|
|
24
|
+
model: request.model,
|
|
25
|
+
messages: request.messages,
|
|
26
|
+
temperature: request.temperature,
|
|
27
|
+
max_tokens: request.maxTokens,
|
|
28
|
+
stream: request.stream ?? false
|
|
29
|
+
})
|
|
30
|
+
});
|
|
31
|
+
if (!response.ok) {
|
|
32
|
+
const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
|
|
33
|
+
throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
|
|
34
|
+
}
|
|
35
|
+
const data = await response.json();
|
|
36
|
+
return {
|
|
37
|
+
id: data.id,
|
|
38
|
+
model: data.model,
|
|
39
|
+
content: data.choices?.[0]?.message?.content ?? "",
|
|
40
|
+
usage: {
|
|
41
|
+
promptTokens: data.usage?.prompt_tokens ?? 0,
|
|
42
|
+
completionTokens: data.usage?.completion_tokens ?? 0,
|
|
43
|
+
totalTokens: data.usage?.total_tokens ?? 0
|
|
44
|
+
}
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Create a text completion
|
|
49
|
+
*
|
|
50
|
+
* @example
|
|
51
|
+
* const response = await cencori.ai.completions({
|
|
52
|
+
* model: 'gpt-4o',
|
|
53
|
+
* prompt: 'Write a haiku about coding'
|
|
54
|
+
* });
|
|
55
|
+
*/
|
|
56
|
+
async completions(request) {
|
|
57
|
+
return this.chat({
|
|
58
|
+
model: request.model,
|
|
59
|
+
messages: [{ role: "user", content: request.prompt }],
|
|
60
|
+
temperature: request.temperature,
|
|
61
|
+
maxTokens: request.maxTokens
|
|
62
|
+
});
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Create embeddings
|
|
66
|
+
*
|
|
67
|
+
* @example
|
|
68
|
+
* const response = await cencori.ai.embeddings({
|
|
69
|
+
* model: 'text-embedding-3-small',
|
|
70
|
+
* input: 'Hello world'
|
|
71
|
+
* });
|
|
72
|
+
*/
|
|
73
|
+
async embeddings(request) {
|
|
74
|
+
const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {
|
|
75
|
+
method: "POST",
|
|
76
|
+
headers: {
|
|
77
|
+
"Authorization": `Bearer ${this.config.apiKey}`,
|
|
78
|
+
"Content-Type": "application/json",
|
|
79
|
+
...this.config.headers
|
|
80
|
+
},
|
|
81
|
+
body: JSON.stringify({
|
|
82
|
+
model: request.model,
|
|
83
|
+
input: request.input
|
|
84
|
+
})
|
|
85
|
+
});
|
|
86
|
+
if (!response.ok) {
|
|
87
|
+
const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
|
|
88
|
+
throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
|
|
89
|
+
}
|
|
90
|
+
const data = await response.json();
|
|
91
|
+
return {
|
|
92
|
+
model: data.model,
|
|
93
|
+
embeddings: data.data?.map((d) => d.embedding) ?? [],
|
|
94
|
+
usage: {
|
|
95
|
+
totalTokens: data.usage?.total_tokens ?? 0
|
|
96
|
+
}
|
|
97
|
+
};
|
|
98
|
+
}
|
|
99
|
+
};
|
|
100
|
+
export {
|
|
101
|
+
AINamespace
|
|
102
|
+
};
|
|
103
|
+
//# sourceMappingURL=index.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/ai/index.ts"],"sourcesContent":["/**\n * AI Gateway - Chat, Completions, and Embeddings\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n\nimport type {\n CencoriConfig,\n ChatRequest,\n ChatResponse,\n CompletionRequest,\n EmbeddingRequest,\n EmbeddingResponse\n} from '../types';\n\n// API Response types\ninterface OpenAIChatResponse {\n id: string;\n model: string;\n choices?: Array<{\n message?: {\n content?: string;\n };\n }>;\n usage?: {\n prompt_tokens?: number;\n completion_tokens?: number;\n total_tokens?: number;\n };\n}\n\ninterface OpenAIEmbeddingResponse {\n model: string;\n data?: Array<{\n embedding: number[];\n }>;\n usage?: {\n total_tokens?: number;\n };\n}\n\nexport class AINamespace {\n private config: Required<CencoriConfig>;\n\n constructor(config: Required<CencoriConfig>) {\n this.config = config;\n }\n\n /**\n * Create a chat completion\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n async chat(request: ChatRequest): Promise<ChatResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/chat/completions`, {\n method: 'POST',\n headers: {\n 'Authorization': `Bearer ${this.config.apiKey}`,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n messages: request.messages,\n temperature: request.temperature,\n max_tokens: request.maxTokens,\n stream: request.stream ?? false,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIChatResponse;\n\n return {\n id: data.id,\n model: data.model,\n content: data.choices?.[0]?.message?.content ?? '',\n usage: {\n promptTokens: data.usage?.prompt_tokens ?? 0,\n completionTokens: data.usage?.completion_tokens ?? 0,\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n\n /**\n * Create a text completion\n * \n * @example\n * const response = await cencori.ai.completions({\n * model: 'gpt-4o',\n * prompt: 'Write a haiku about coding'\n * });\n */\n async completions(request: CompletionRequest): Promise<ChatResponse> {\n // Convert to chat format internally\n return this.chat({\n model: request.model,\n messages: [{ role: 'user', content: request.prompt }],\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n });\n }\n\n /**\n * Create embeddings\n * \n * @example\n * const response = await cencori.ai.embeddings({\n * model: 'text-embedding-3-small',\n * input: 'Hello world'\n * });\n */\n async embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {\n method: 'POST',\n headers: {\n 'Authorization': `Bearer ${this.config.apiKey}`,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n input: request.input,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIEmbeddingResponse;\n\n return {\n model: data.model,\n embeddings: data.data?.map((d) => d.embedding) ?? [],\n usage: {\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n}\n"],"mappings":";AA6CO,IAAM,cAAN,MAAkB;AAAA,EAGrB,YAAY,QAAiC;AACzC,SAAK,SAAS;AAAA,EAClB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAK,SAA6C;AACpD,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,4BAA4B;AAAA,MAC3E,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,iBAAiB,UAAU,KAAK,OAAO,MAAM;AAAA,QAC7C,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,UAAU,QAAQ;AAAA,QAClB,aAAa,QAAQ;AAAA,QACrB,YAAY,QAAQ;AAAA,QACpB,QAAQ,QAAQ,UAAU;AAAA,MAC9B,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,IAAI,KAAK;AAAA,MACT,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK,UAAU,CAAC,GAAG,SAAS,WAAW;AAAA,MAChD,OAAO;AAAA,QACH,cAAc,KAAK,OAAO,iBAAiB;AAAA,QAC3C,kBAAkB,KAAK,OAAO,qBAAqB;AAAA,QACnD,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,YAAY,SAAmD;AAEjE,WAAO,KAAK,KAAK;AAAA,MACb,OAAO,QAAQ;AAAA,MACf,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,QAAQ,OAAO,CAAC;AAAA,MACpD,aAAa,QAAQ;AAAA,MACrB,WAAW,QAAQ;AAAA,IACvB,CAAC;AAAA,EACL;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,WAAW,SAAuD;AACpE,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,sBAAsB;AAAA,MACrE,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,iBAAiB,UAAU,KAAK,OAAO,MAAM;AAAA,QAC7C,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,OAAO,QAAQ;AAAA,MACnB,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,OAAO,KAAK;AAAA,MACZ,YAAY,KAAK,MAAM,IAAI,CAAC,MAAM,EAAE,SAAS,KAAK,CAAC;AAAA,MACnD,OAAO;AAAA,QACH,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AACJ;","names":[]}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { f as ComputeRunOptions } from '../types-Be_rWV2h.mjs';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Compute Namespace - Serverless Functions & GPU Access
|
|
5
|
+
*
|
|
6
|
+
* 🚧 Coming Soon
|
|
7
|
+
*
|
|
8
|
+
* @example
|
|
9
|
+
* const result = await cencori.compute.run('my-function', {
|
|
10
|
+
* input: { data: 'hello' }
|
|
11
|
+
* });
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
declare class ComputeNamespace {
|
|
15
|
+
/**
|
|
16
|
+
* Run a serverless function
|
|
17
|
+
*
|
|
18
|
+
* 🚧 Coming Soon - This feature is not yet available.
|
|
19
|
+
*/
|
|
20
|
+
run(functionId: string, options?: ComputeRunOptions): Promise<never>;
|
|
21
|
+
/**
|
|
22
|
+
* Deploy a function
|
|
23
|
+
*
|
|
24
|
+
* 🚧 Coming Soon - This feature is not yet available.
|
|
25
|
+
*/
|
|
26
|
+
deploy(config: {
|
|
27
|
+
name: string;
|
|
28
|
+
code: string;
|
|
29
|
+
}): Promise<never>;
|
|
30
|
+
/**
|
|
31
|
+
* List deployed functions
|
|
32
|
+
*
|
|
33
|
+
* 🚧 Coming Soon - This feature is not yet available.
|
|
34
|
+
*/
|
|
35
|
+
list(): Promise<never>;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
export { ComputeNamespace };
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { f as ComputeRunOptions } from '../types-Be_rWV2h.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Compute Namespace - Serverless Functions & GPU Access
|
|
5
|
+
*
|
|
6
|
+
* 🚧 Coming Soon
|
|
7
|
+
*
|
|
8
|
+
* @example
|
|
9
|
+
* const result = await cencori.compute.run('my-function', {
|
|
10
|
+
* input: { data: 'hello' }
|
|
11
|
+
* });
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
declare class ComputeNamespace {
|
|
15
|
+
/**
|
|
16
|
+
* Run a serverless function
|
|
17
|
+
*
|
|
18
|
+
* 🚧 Coming Soon - This feature is not yet available.
|
|
19
|
+
*/
|
|
20
|
+
run(functionId: string, options?: ComputeRunOptions): Promise<never>;
|
|
21
|
+
/**
|
|
22
|
+
* Deploy a function
|
|
23
|
+
*
|
|
24
|
+
* 🚧 Coming Soon - This feature is not yet available.
|
|
25
|
+
*/
|
|
26
|
+
deploy(config: {
|
|
27
|
+
name: string;
|
|
28
|
+
code: string;
|
|
29
|
+
}): Promise<never>;
|
|
30
|
+
/**
|
|
31
|
+
* List deployed functions
|
|
32
|
+
*
|
|
33
|
+
* 🚧 Coming Soon - This feature is not yet available.
|
|
34
|
+
*/
|
|
35
|
+
list(): Promise<never>;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
export { ComputeNamespace };
|