@reverbia/sdk 1.0.0-next.20251113193348 → 1.0.0-next.20251114165311
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{index.cjs → client/index.cjs} +3 -3
- package/dist/{index.d.mts → client/index.d.mts} +3 -234
- package/dist/{index.d.ts → client/index.d.ts} +3 -234
- package/dist/types.gen-DENXHZhp.d.mts +235 -0
- package/dist/types.gen-DENXHZhp.d.ts +235 -0
- package/dist/vercel/index.cjs +86 -0
- package/dist/vercel/index.d.mts +53 -0
- package/dist/vercel/index.d.ts +53 -0
- package/dist/vercel/index.mjs +57 -0
- package/package.json +16 -9
- /package/dist/{index.mjs → client/index.mjs} +0 -0
|
@@ -18,13 +18,13 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
18
18
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
19
|
|
|
20
20
|
// src/client/index.ts
|
|
21
|
-
var
|
|
22
|
-
__export(
|
|
21
|
+
var client_exports = {};
|
|
22
|
+
__export(client_exports, {
|
|
23
23
|
getHealth: () => getHealth,
|
|
24
24
|
postApiV1ChatCompletions: () => postApiV1ChatCompletions,
|
|
25
25
|
postApiV1Embeddings: () => postApiV1Embeddings
|
|
26
26
|
});
|
|
27
|
-
module.exports = __toCommonJS(
|
|
27
|
+
module.exports = __toCommonJS(client_exports);
|
|
28
28
|
|
|
29
29
|
// src/client/core/bodySerializer.gen.ts
|
|
30
30
|
var jsonBodySerializer = {
|
|
@@ -1,236 +1,5 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
};
|
|
4
|
-
type HandlersHealthResponse = {
|
|
5
|
-
/**
|
|
6
|
-
* Status indicates the service health status
|
|
7
|
-
*/
|
|
8
|
-
status?: string;
|
|
9
|
-
/**
|
|
10
|
-
* Timestamp is the Unix timestamp of the response
|
|
11
|
-
*/
|
|
12
|
-
timestamp?: number;
|
|
13
|
-
/**
|
|
14
|
-
* Version is the current API version
|
|
15
|
-
*/
|
|
16
|
-
version?: string;
|
|
17
|
-
};
|
|
18
|
-
type LlmapiChatCompletionRequest = {
|
|
19
|
-
/**
|
|
20
|
-
* Messages is the conversation history
|
|
21
|
-
*/
|
|
22
|
-
messages?: Array<LlmapiMessage>;
|
|
23
|
-
/**
|
|
24
|
-
* Model is the model identifier
|
|
25
|
-
*/
|
|
26
|
-
model?: string;
|
|
27
|
-
/**
|
|
28
|
-
* Stream indicates if response should be streamed
|
|
29
|
-
*/
|
|
30
|
-
stream?: boolean;
|
|
31
|
-
};
|
|
32
|
-
type LlmapiChatCompletionResponse = {
|
|
33
|
-
/**
|
|
34
|
-
* Choices contains the completion choices
|
|
35
|
-
*/
|
|
36
|
-
choices?: Array<LlmapiChoice>;
|
|
37
|
-
/**
|
|
38
|
-
* ID is the completion ID
|
|
39
|
-
*/
|
|
40
|
-
id?: string;
|
|
41
|
-
/**
|
|
42
|
-
* Model is the model used
|
|
43
|
-
*/
|
|
44
|
-
model?: string;
|
|
45
|
-
};
|
|
46
|
-
type LlmapiChoice = {
|
|
47
|
-
/**
|
|
48
|
-
* FinishReason indicates why the completion stopped
|
|
49
|
-
*/
|
|
50
|
-
finish_reason?: string;
|
|
51
|
-
/**
|
|
52
|
-
* Index is the choice index
|
|
53
|
-
*/
|
|
54
|
-
index?: number;
|
|
55
|
-
message?: LlmapiMessage;
|
|
56
|
-
};
|
|
57
|
-
type LlmapiEmbeddingData = {
|
|
58
|
-
/**
|
|
59
|
-
* Embedding vector
|
|
60
|
-
*/
|
|
61
|
-
embedding?: Array<number>;
|
|
62
|
-
/**
|
|
63
|
-
* Index of the embedding
|
|
64
|
-
*/
|
|
65
|
-
index?: number;
|
|
66
|
-
/**
|
|
67
|
-
* Object type identifier
|
|
68
|
-
*/
|
|
69
|
-
object?: string;
|
|
70
|
-
};
|
|
71
|
-
/**
|
|
72
|
-
* ExtraFields contains additional metadata
|
|
73
|
-
*/
|
|
74
|
-
type LlmapiEmbeddingExtraFields = {
|
|
75
|
-
/**
|
|
76
|
-
* ChunkIndex is the chunk index (0 for single requests)
|
|
77
|
-
*/
|
|
78
|
-
chunk_index?: number;
|
|
79
|
-
/**
|
|
80
|
-
* Latency is the request latency in milliseconds
|
|
81
|
-
*/
|
|
82
|
-
latency?: number;
|
|
83
|
-
/**
|
|
84
|
-
* ModelRequested is the model that was requested
|
|
85
|
-
*/
|
|
86
|
-
model_requested?: string;
|
|
87
|
-
/**
|
|
88
|
-
* Provider is the LLM provider used (e.g., "openai", "anthropic")
|
|
89
|
-
*/
|
|
90
|
-
provider?: string;
|
|
91
|
-
/**
|
|
92
|
-
* RequestType is always "embedding"
|
|
93
|
-
*/
|
|
94
|
-
request_type?: string;
|
|
95
|
-
};
|
|
96
|
-
type LlmapiEmbeddingRequest = {
|
|
97
|
-
/**
|
|
98
|
-
* Dimensions is the number of dimensions the resulting output embeddings should have (optional)
|
|
99
|
-
*/
|
|
100
|
-
dimensions?: number;
|
|
101
|
-
/**
|
|
102
|
-
* EncodingFormat is the format to return the embeddings in (optional: "float" or "base64")
|
|
103
|
-
*/
|
|
104
|
-
encoding_format?: string;
|
|
105
|
-
/**
|
|
106
|
-
* Input text or tokens to embed (can be string, []string, []int, or [][]int)
|
|
107
|
-
*/
|
|
108
|
-
input?: unknown;
|
|
109
|
-
/**
|
|
110
|
-
* Model identifier in 'provider/model' format
|
|
111
|
-
*/
|
|
112
|
-
model?: string;
|
|
113
|
-
};
|
|
114
|
-
type LlmapiEmbeddingResponse = {
|
|
115
|
-
/**
|
|
116
|
-
* Data contains the embeddings
|
|
117
|
-
*/
|
|
118
|
-
data?: Array<LlmapiEmbeddingData>;
|
|
119
|
-
extra_fields?: LlmapiEmbeddingExtraFields;
|
|
120
|
-
/**
|
|
121
|
-
* Model is the model used
|
|
122
|
-
*/
|
|
123
|
-
model?: string;
|
|
124
|
-
/**
|
|
125
|
-
* Object is always "list"
|
|
126
|
-
*/
|
|
127
|
-
object?: string;
|
|
128
|
-
usage?: LlmapiEmbeddingUsage;
|
|
129
|
-
};
|
|
130
|
-
/**
|
|
131
|
-
* Usage contains token usage information
|
|
132
|
-
*/
|
|
133
|
-
type LlmapiEmbeddingUsage = {
|
|
134
|
-
/**
|
|
135
|
-
* PromptTokens is the number of tokens in the prompt
|
|
136
|
-
*/
|
|
137
|
-
prompt_tokens?: number;
|
|
138
|
-
/**
|
|
139
|
-
* TotalTokens is the total number of tokens used
|
|
140
|
-
*/
|
|
141
|
-
total_tokens?: number;
|
|
142
|
-
};
|
|
143
|
-
/**
|
|
144
|
-
* Message is the generated message
|
|
145
|
-
*/
|
|
146
|
-
type LlmapiMessage = {
|
|
147
|
-
/**
|
|
148
|
-
* Content is the message content
|
|
149
|
-
*/
|
|
150
|
-
content?: string;
|
|
151
|
-
role?: LlmapiRole;
|
|
152
|
-
};
|
|
153
|
-
/**
|
|
154
|
-
* Role is the message role (system, user, assistant)
|
|
155
|
-
*/
|
|
156
|
-
type LlmapiRole = string;
|
|
157
|
-
type ResponseErrorResponse = {
|
|
158
|
-
error?: string;
|
|
159
|
-
};
|
|
160
|
-
type PostApiV1ChatCompletionsData = {
|
|
161
|
-
/**
|
|
162
|
-
* Chat completion request
|
|
163
|
-
*/
|
|
164
|
-
body: LlmapiChatCompletionRequest;
|
|
165
|
-
path?: never;
|
|
166
|
-
query?: never;
|
|
167
|
-
url: '/api/v1/chat/completions';
|
|
168
|
-
};
|
|
169
|
-
type PostApiV1ChatCompletionsErrors = {
|
|
170
|
-
/**
|
|
171
|
-
* Bad Request
|
|
172
|
-
*/
|
|
173
|
-
400: ResponseErrorResponse;
|
|
174
|
-
/**
|
|
175
|
-
* Internal Server Error
|
|
176
|
-
*/
|
|
177
|
-
500: ResponseErrorResponse;
|
|
178
|
-
};
|
|
179
|
-
type PostApiV1ChatCompletionsError = PostApiV1ChatCompletionsErrors[keyof PostApiV1ChatCompletionsErrors];
|
|
180
|
-
type PostApiV1ChatCompletionsResponses = {
|
|
181
|
-
/**
|
|
182
|
-
* OK
|
|
183
|
-
*/
|
|
184
|
-
200: LlmapiChatCompletionResponse;
|
|
185
|
-
};
|
|
186
|
-
type PostApiV1ChatCompletionsResponse = PostApiV1ChatCompletionsResponses[keyof PostApiV1ChatCompletionsResponses];
|
|
187
|
-
type PostApiV1EmbeddingsData = {
|
|
188
|
-
/**
|
|
189
|
-
* Embedding request
|
|
190
|
-
*/
|
|
191
|
-
body: LlmapiEmbeddingRequest;
|
|
192
|
-
path?: never;
|
|
193
|
-
query?: never;
|
|
194
|
-
url: '/api/v1/embeddings';
|
|
195
|
-
};
|
|
196
|
-
type PostApiV1EmbeddingsErrors = {
|
|
197
|
-
/**
|
|
198
|
-
* Bad Request
|
|
199
|
-
*/
|
|
200
|
-
400: ResponseErrorResponse;
|
|
201
|
-
/**
|
|
202
|
-
* Internal Server Error
|
|
203
|
-
*/
|
|
204
|
-
500: ResponseErrorResponse;
|
|
205
|
-
};
|
|
206
|
-
type PostApiV1EmbeddingsError = PostApiV1EmbeddingsErrors[keyof PostApiV1EmbeddingsErrors];
|
|
207
|
-
type PostApiV1EmbeddingsResponses = {
|
|
208
|
-
/**
|
|
209
|
-
* OK
|
|
210
|
-
*/
|
|
211
|
-
200: LlmapiEmbeddingResponse;
|
|
212
|
-
};
|
|
213
|
-
type PostApiV1EmbeddingsResponse = PostApiV1EmbeddingsResponses[keyof PostApiV1EmbeddingsResponses];
|
|
214
|
-
type GetHealthData = {
|
|
215
|
-
body?: never;
|
|
216
|
-
path?: never;
|
|
217
|
-
query?: never;
|
|
218
|
-
url: '/health';
|
|
219
|
-
};
|
|
220
|
-
type GetHealthErrors = {
|
|
221
|
-
/**
|
|
222
|
-
* Internal Server Error
|
|
223
|
-
*/
|
|
224
|
-
500: ResponseErrorResponse;
|
|
225
|
-
};
|
|
226
|
-
type GetHealthError = GetHealthErrors[keyof GetHealthErrors];
|
|
227
|
-
type GetHealthResponses = {
|
|
228
|
-
/**
|
|
229
|
-
* OK
|
|
230
|
-
*/
|
|
231
|
-
200: HandlersHealthResponse;
|
|
232
|
-
};
|
|
233
|
-
type GetHealthResponse = GetHealthResponses[keyof GetHealthResponses];
|
|
1
|
+
import { P as PostApiV1ChatCompletionsData, a as PostApiV1ChatCompletionsResponses, b as PostApiV1ChatCompletionsErrors, c as PostApiV1EmbeddingsData, d as PostApiV1EmbeddingsResponses, e as PostApiV1EmbeddingsErrors, G as GetHealthData, f as GetHealthResponses, g as GetHealthErrors } from '../types.gen-DENXHZhp.mjs';
|
|
2
|
+
export { C as ClientOptions, u as GetHealthError, v as GetHealthResponse, H as HandlersHealthResponse, L as LlmapiChatCompletionRequest, h as LlmapiChatCompletionResponse, i as LlmapiChoice, j as LlmapiEmbeddingData, k as LlmapiEmbeddingExtraFields, l as LlmapiEmbeddingRequest, m as LlmapiEmbeddingResponse, n as LlmapiEmbeddingUsage, o as LlmapiMessage, p as LlmapiRole, q as PostApiV1ChatCompletionsError, r as PostApiV1ChatCompletionsResponse, s as PostApiV1EmbeddingsError, t as PostApiV1EmbeddingsResponse, R as ResponseErrorResponse } from '../types.gen-DENXHZhp.mjs';
|
|
234
3
|
|
|
235
4
|
type AuthToken = string | undefined;
|
|
236
5
|
interface Auth {
|
|
@@ -553,4 +322,4 @@ declare const postApiV1Embeddings: <ThrowOnError extends boolean = false>(option
|
|
|
553
322
|
*/
|
|
554
323
|
declare const getHealth: <ThrowOnError extends boolean = false>(options?: Options<GetHealthData, ThrowOnError>) => RequestResult<GetHealthResponses, GetHealthErrors, ThrowOnError>;
|
|
555
324
|
|
|
556
|
-
export {
|
|
325
|
+
export { GetHealthData, GetHealthErrors, GetHealthResponses, type Options, PostApiV1ChatCompletionsData, PostApiV1ChatCompletionsErrors, PostApiV1ChatCompletionsResponses, PostApiV1EmbeddingsData, PostApiV1EmbeddingsErrors, PostApiV1EmbeddingsResponses, getHealth, postApiV1ChatCompletions, postApiV1Embeddings };
|
|
@@ -1,236 +1,5 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
};
|
|
4
|
-
type HandlersHealthResponse = {
|
|
5
|
-
/**
|
|
6
|
-
* Status indicates the service health status
|
|
7
|
-
*/
|
|
8
|
-
status?: string;
|
|
9
|
-
/**
|
|
10
|
-
* Timestamp is the Unix timestamp of the response
|
|
11
|
-
*/
|
|
12
|
-
timestamp?: number;
|
|
13
|
-
/**
|
|
14
|
-
* Version is the current API version
|
|
15
|
-
*/
|
|
16
|
-
version?: string;
|
|
17
|
-
};
|
|
18
|
-
type LlmapiChatCompletionRequest = {
|
|
19
|
-
/**
|
|
20
|
-
* Messages is the conversation history
|
|
21
|
-
*/
|
|
22
|
-
messages?: Array<LlmapiMessage>;
|
|
23
|
-
/**
|
|
24
|
-
* Model is the model identifier
|
|
25
|
-
*/
|
|
26
|
-
model?: string;
|
|
27
|
-
/**
|
|
28
|
-
* Stream indicates if response should be streamed
|
|
29
|
-
*/
|
|
30
|
-
stream?: boolean;
|
|
31
|
-
};
|
|
32
|
-
type LlmapiChatCompletionResponse = {
|
|
33
|
-
/**
|
|
34
|
-
* Choices contains the completion choices
|
|
35
|
-
*/
|
|
36
|
-
choices?: Array<LlmapiChoice>;
|
|
37
|
-
/**
|
|
38
|
-
* ID is the completion ID
|
|
39
|
-
*/
|
|
40
|
-
id?: string;
|
|
41
|
-
/**
|
|
42
|
-
* Model is the model used
|
|
43
|
-
*/
|
|
44
|
-
model?: string;
|
|
45
|
-
};
|
|
46
|
-
type LlmapiChoice = {
|
|
47
|
-
/**
|
|
48
|
-
* FinishReason indicates why the completion stopped
|
|
49
|
-
*/
|
|
50
|
-
finish_reason?: string;
|
|
51
|
-
/**
|
|
52
|
-
* Index is the choice index
|
|
53
|
-
*/
|
|
54
|
-
index?: number;
|
|
55
|
-
message?: LlmapiMessage;
|
|
56
|
-
};
|
|
57
|
-
type LlmapiEmbeddingData = {
|
|
58
|
-
/**
|
|
59
|
-
* Embedding vector
|
|
60
|
-
*/
|
|
61
|
-
embedding?: Array<number>;
|
|
62
|
-
/**
|
|
63
|
-
* Index of the embedding
|
|
64
|
-
*/
|
|
65
|
-
index?: number;
|
|
66
|
-
/**
|
|
67
|
-
* Object type identifier
|
|
68
|
-
*/
|
|
69
|
-
object?: string;
|
|
70
|
-
};
|
|
71
|
-
/**
|
|
72
|
-
* ExtraFields contains additional metadata
|
|
73
|
-
*/
|
|
74
|
-
type LlmapiEmbeddingExtraFields = {
|
|
75
|
-
/**
|
|
76
|
-
* ChunkIndex is the chunk index (0 for single requests)
|
|
77
|
-
*/
|
|
78
|
-
chunk_index?: number;
|
|
79
|
-
/**
|
|
80
|
-
* Latency is the request latency in milliseconds
|
|
81
|
-
*/
|
|
82
|
-
latency?: number;
|
|
83
|
-
/**
|
|
84
|
-
* ModelRequested is the model that was requested
|
|
85
|
-
*/
|
|
86
|
-
model_requested?: string;
|
|
87
|
-
/**
|
|
88
|
-
* Provider is the LLM provider used (e.g., "openai", "anthropic")
|
|
89
|
-
*/
|
|
90
|
-
provider?: string;
|
|
91
|
-
/**
|
|
92
|
-
* RequestType is always "embedding"
|
|
93
|
-
*/
|
|
94
|
-
request_type?: string;
|
|
95
|
-
};
|
|
96
|
-
type LlmapiEmbeddingRequest = {
|
|
97
|
-
/**
|
|
98
|
-
* Dimensions is the number of dimensions the resulting output embeddings should have (optional)
|
|
99
|
-
*/
|
|
100
|
-
dimensions?: number;
|
|
101
|
-
/**
|
|
102
|
-
* EncodingFormat is the format to return the embeddings in (optional: "float" or "base64")
|
|
103
|
-
*/
|
|
104
|
-
encoding_format?: string;
|
|
105
|
-
/**
|
|
106
|
-
* Input text or tokens to embed (can be string, []string, []int, or [][]int)
|
|
107
|
-
*/
|
|
108
|
-
input?: unknown;
|
|
109
|
-
/**
|
|
110
|
-
* Model identifier in 'provider/model' format
|
|
111
|
-
*/
|
|
112
|
-
model?: string;
|
|
113
|
-
};
|
|
114
|
-
type LlmapiEmbeddingResponse = {
|
|
115
|
-
/**
|
|
116
|
-
* Data contains the embeddings
|
|
117
|
-
*/
|
|
118
|
-
data?: Array<LlmapiEmbeddingData>;
|
|
119
|
-
extra_fields?: LlmapiEmbeddingExtraFields;
|
|
120
|
-
/**
|
|
121
|
-
* Model is the model used
|
|
122
|
-
*/
|
|
123
|
-
model?: string;
|
|
124
|
-
/**
|
|
125
|
-
* Object is always "list"
|
|
126
|
-
*/
|
|
127
|
-
object?: string;
|
|
128
|
-
usage?: LlmapiEmbeddingUsage;
|
|
129
|
-
};
|
|
130
|
-
/**
|
|
131
|
-
* Usage contains token usage information
|
|
132
|
-
*/
|
|
133
|
-
type LlmapiEmbeddingUsage = {
|
|
134
|
-
/**
|
|
135
|
-
* PromptTokens is the number of tokens in the prompt
|
|
136
|
-
*/
|
|
137
|
-
prompt_tokens?: number;
|
|
138
|
-
/**
|
|
139
|
-
* TotalTokens is the total number of tokens used
|
|
140
|
-
*/
|
|
141
|
-
total_tokens?: number;
|
|
142
|
-
};
|
|
143
|
-
/**
|
|
144
|
-
* Message is the generated message
|
|
145
|
-
*/
|
|
146
|
-
type LlmapiMessage = {
|
|
147
|
-
/**
|
|
148
|
-
* Content is the message content
|
|
149
|
-
*/
|
|
150
|
-
content?: string;
|
|
151
|
-
role?: LlmapiRole;
|
|
152
|
-
};
|
|
153
|
-
/**
|
|
154
|
-
* Role is the message role (system, user, assistant)
|
|
155
|
-
*/
|
|
156
|
-
type LlmapiRole = string;
|
|
157
|
-
type ResponseErrorResponse = {
|
|
158
|
-
error?: string;
|
|
159
|
-
};
|
|
160
|
-
type PostApiV1ChatCompletionsData = {
|
|
161
|
-
/**
|
|
162
|
-
* Chat completion request
|
|
163
|
-
*/
|
|
164
|
-
body: LlmapiChatCompletionRequest;
|
|
165
|
-
path?: never;
|
|
166
|
-
query?: never;
|
|
167
|
-
url: '/api/v1/chat/completions';
|
|
168
|
-
};
|
|
169
|
-
type PostApiV1ChatCompletionsErrors = {
|
|
170
|
-
/**
|
|
171
|
-
* Bad Request
|
|
172
|
-
*/
|
|
173
|
-
400: ResponseErrorResponse;
|
|
174
|
-
/**
|
|
175
|
-
* Internal Server Error
|
|
176
|
-
*/
|
|
177
|
-
500: ResponseErrorResponse;
|
|
178
|
-
};
|
|
179
|
-
type PostApiV1ChatCompletionsError = PostApiV1ChatCompletionsErrors[keyof PostApiV1ChatCompletionsErrors];
|
|
180
|
-
type PostApiV1ChatCompletionsResponses = {
|
|
181
|
-
/**
|
|
182
|
-
* OK
|
|
183
|
-
*/
|
|
184
|
-
200: LlmapiChatCompletionResponse;
|
|
185
|
-
};
|
|
186
|
-
type PostApiV1ChatCompletionsResponse = PostApiV1ChatCompletionsResponses[keyof PostApiV1ChatCompletionsResponses];
|
|
187
|
-
type PostApiV1EmbeddingsData = {
|
|
188
|
-
/**
|
|
189
|
-
* Embedding request
|
|
190
|
-
*/
|
|
191
|
-
body: LlmapiEmbeddingRequest;
|
|
192
|
-
path?: never;
|
|
193
|
-
query?: never;
|
|
194
|
-
url: '/api/v1/embeddings';
|
|
195
|
-
};
|
|
196
|
-
type PostApiV1EmbeddingsErrors = {
|
|
197
|
-
/**
|
|
198
|
-
* Bad Request
|
|
199
|
-
*/
|
|
200
|
-
400: ResponseErrorResponse;
|
|
201
|
-
/**
|
|
202
|
-
* Internal Server Error
|
|
203
|
-
*/
|
|
204
|
-
500: ResponseErrorResponse;
|
|
205
|
-
};
|
|
206
|
-
type PostApiV1EmbeddingsError = PostApiV1EmbeddingsErrors[keyof PostApiV1EmbeddingsErrors];
|
|
207
|
-
type PostApiV1EmbeddingsResponses = {
|
|
208
|
-
/**
|
|
209
|
-
* OK
|
|
210
|
-
*/
|
|
211
|
-
200: LlmapiEmbeddingResponse;
|
|
212
|
-
};
|
|
213
|
-
type PostApiV1EmbeddingsResponse = PostApiV1EmbeddingsResponses[keyof PostApiV1EmbeddingsResponses];
|
|
214
|
-
type GetHealthData = {
|
|
215
|
-
body?: never;
|
|
216
|
-
path?: never;
|
|
217
|
-
query?: never;
|
|
218
|
-
url: '/health';
|
|
219
|
-
};
|
|
220
|
-
type GetHealthErrors = {
|
|
221
|
-
/**
|
|
222
|
-
* Internal Server Error
|
|
223
|
-
*/
|
|
224
|
-
500: ResponseErrorResponse;
|
|
225
|
-
};
|
|
226
|
-
type GetHealthError = GetHealthErrors[keyof GetHealthErrors];
|
|
227
|
-
type GetHealthResponses = {
|
|
228
|
-
/**
|
|
229
|
-
* OK
|
|
230
|
-
*/
|
|
231
|
-
200: HandlersHealthResponse;
|
|
232
|
-
};
|
|
233
|
-
type GetHealthResponse = GetHealthResponses[keyof GetHealthResponses];
|
|
1
|
+
import { P as PostApiV1ChatCompletionsData, a as PostApiV1ChatCompletionsResponses, b as PostApiV1ChatCompletionsErrors, c as PostApiV1EmbeddingsData, d as PostApiV1EmbeddingsResponses, e as PostApiV1EmbeddingsErrors, G as GetHealthData, f as GetHealthResponses, g as GetHealthErrors } from '../types.gen-DENXHZhp.js';
|
|
2
|
+
export { C as ClientOptions, u as GetHealthError, v as GetHealthResponse, H as HandlersHealthResponse, L as LlmapiChatCompletionRequest, h as LlmapiChatCompletionResponse, i as LlmapiChoice, j as LlmapiEmbeddingData, k as LlmapiEmbeddingExtraFields, l as LlmapiEmbeddingRequest, m as LlmapiEmbeddingResponse, n as LlmapiEmbeddingUsage, o as LlmapiMessage, p as LlmapiRole, q as PostApiV1ChatCompletionsError, r as PostApiV1ChatCompletionsResponse, s as PostApiV1EmbeddingsError, t as PostApiV1EmbeddingsResponse, R as ResponseErrorResponse } from '../types.gen-DENXHZhp.js';
|
|
234
3
|
|
|
235
4
|
type AuthToken = string | undefined;
|
|
236
5
|
interface Auth {
|
|
@@ -553,4 +322,4 @@ declare const postApiV1Embeddings: <ThrowOnError extends boolean = false>(option
|
|
|
553
322
|
*/
|
|
554
323
|
declare const getHealth: <ThrowOnError extends boolean = false>(options?: Options<GetHealthData, ThrowOnError>) => RequestResult<GetHealthResponses, GetHealthErrors, ThrowOnError>;
|
|
555
324
|
|
|
556
|
-
export {
|
|
325
|
+
export { GetHealthData, GetHealthErrors, GetHealthResponses, type Options, PostApiV1ChatCompletionsData, PostApiV1ChatCompletionsErrors, PostApiV1ChatCompletionsResponses, PostApiV1EmbeddingsData, PostApiV1EmbeddingsErrors, PostApiV1EmbeddingsResponses, getHealth, postApiV1ChatCompletions, postApiV1Embeddings };
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
type ClientOptions = {
|
|
2
|
+
baseUrl: `${string}://${string}` | (string & {});
|
|
3
|
+
};
|
|
4
|
+
type HandlersHealthResponse = {
|
|
5
|
+
/**
|
|
6
|
+
* Status indicates the service health status
|
|
7
|
+
*/
|
|
8
|
+
status?: string;
|
|
9
|
+
/**
|
|
10
|
+
* Timestamp is the Unix timestamp of the response
|
|
11
|
+
*/
|
|
12
|
+
timestamp?: number;
|
|
13
|
+
/**
|
|
14
|
+
* Version is the current API version
|
|
15
|
+
*/
|
|
16
|
+
version?: string;
|
|
17
|
+
};
|
|
18
|
+
type LlmapiChatCompletionRequest = {
|
|
19
|
+
/**
|
|
20
|
+
* Messages is the conversation history
|
|
21
|
+
*/
|
|
22
|
+
messages?: Array<LlmapiMessage>;
|
|
23
|
+
/**
|
|
24
|
+
* Model is the model identifier
|
|
25
|
+
*/
|
|
26
|
+
model?: string;
|
|
27
|
+
/**
|
|
28
|
+
* Stream indicates if response should be streamed
|
|
29
|
+
*/
|
|
30
|
+
stream?: boolean;
|
|
31
|
+
};
|
|
32
|
+
type LlmapiChatCompletionResponse = {
|
|
33
|
+
/**
|
|
34
|
+
* Choices contains the completion choices
|
|
35
|
+
*/
|
|
36
|
+
choices?: Array<LlmapiChoice>;
|
|
37
|
+
/**
|
|
38
|
+
* ID is the completion ID
|
|
39
|
+
*/
|
|
40
|
+
id?: string;
|
|
41
|
+
/**
|
|
42
|
+
* Model is the model used
|
|
43
|
+
*/
|
|
44
|
+
model?: string;
|
|
45
|
+
};
|
|
46
|
+
type LlmapiChoice = {
|
|
47
|
+
/**
|
|
48
|
+
* FinishReason indicates why the completion stopped
|
|
49
|
+
*/
|
|
50
|
+
finish_reason?: string;
|
|
51
|
+
/**
|
|
52
|
+
* Index is the choice index
|
|
53
|
+
*/
|
|
54
|
+
index?: number;
|
|
55
|
+
message?: LlmapiMessage;
|
|
56
|
+
};
|
|
57
|
+
type LlmapiEmbeddingData = {
|
|
58
|
+
/**
|
|
59
|
+
* Embedding vector
|
|
60
|
+
*/
|
|
61
|
+
embedding?: Array<number>;
|
|
62
|
+
/**
|
|
63
|
+
* Index of the embedding
|
|
64
|
+
*/
|
|
65
|
+
index?: number;
|
|
66
|
+
/**
|
|
67
|
+
* Object type identifier
|
|
68
|
+
*/
|
|
69
|
+
object?: string;
|
|
70
|
+
};
|
|
71
|
+
/**
|
|
72
|
+
* ExtraFields contains additional metadata
|
|
73
|
+
*/
|
|
74
|
+
type LlmapiEmbeddingExtraFields = {
|
|
75
|
+
/**
|
|
76
|
+
* ChunkIndex is the chunk index (0 for single requests)
|
|
77
|
+
*/
|
|
78
|
+
chunk_index?: number;
|
|
79
|
+
/**
|
|
80
|
+
* Latency is the request latency in milliseconds
|
|
81
|
+
*/
|
|
82
|
+
latency?: number;
|
|
83
|
+
/**
|
|
84
|
+
* ModelRequested is the model that was requested
|
|
85
|
+
*/
|
|
86
|
+
model_requested?: string;
|
|
87
|
+
/**
|
|
88
|
+
* Provider is the LLM provider used (e.g., "openai", "anthropic")
|
|
89
|
+
*/
|
|
90
|
+
provider?: string;
|
|
91
|
+
/**
|
|
92
|
+
* RequestType is always "embedding"
|
|
93
|
+
*/
|
|
94
|
+
request_type?: string;
|
|
95
|
+
};
|
|
96
|
+
type LlmapiEmbeddingRequest = {
|
|
97
|
+
/**
|
|
98
|
+
* Dimensions is the number of dimensions the resulting output embeddings should have (optional)
|
|
99
|
+
*/
|
|
100
|
+
dimensions?: number;
|
|
101
|
+
/**
|
|
102
|
+
* EncodingFormat is the format to return the embeddings in (optional: "float" or "base64")
|
|
103
|
+
*/
|
|
104
|
+
encoding_format?: string;
|
|
105
|
+
/**
|
|
106
|
+
* Input text or tokens to embed (can be string, []string, []int, or [][]int)
|
|
107
|
+
*/
|
|
108
|
+
input?: unknown;
|
|
109
|
+
/**
|
|
110
|
+
* Model identifier in 'provider/model' format
|
|
111
|
+
*/
|
|
112
|
+
model?: string;
|
|
113
|
+
};
|
|
114
|
+
type LlmapiEmbeddingResponse = {
|
|
115
|
+
/**
|
|
116
|
+
* Data contains the embeddings
|
|
117
|
+
*/
|
|
118
|
+
data?: Array<LlmapiEmbeddingData>;
|
|
119
|
+
extra_fields?: LlmapiEmbeddingExtraFields;
|
|
120
|
+
/**
|
|
121
|
+
* Model is the model used
|
|
122
|
+
*/
|
|
123
|
+
model?: string;
|
|
124
|
+
/**
|
|
125
|
+
* Object is always "list"
|
|
126
|
+
*/
|
|
127
|
+
object?: string;
|
|
128
|
+
usage?: LlmapiEmbeddingUsage;
|
|
129
|
+
};
|
|
130
|
+
/**
|
|
131
|
+
* Usage contains token usage information
|
|
132
|
+
*/
|
|
133
|
+
type LlmapiEmbeddingUsage = {
|
|
134
|
+
/**
|
|
135
|
+
* PromptTokens is the number of tokens in the prompt
|
|
136
|
+
*/
|
|
137
|
+
prompt_tokens?: number;
|
|
138
|
+
/**
|
|
139
|
+
* TotalTokens is the total number of tokens used
|
|
140
|
+
*/
|
|
141
|
+
total_tokens?: number;
|
|
142
|
+
};
|
|
143
|
+
/**
|
|
144
|
+
* Message is the generated message
|
|
145
|
+
*/
|
|
146
|
+
type LlmapiMessage = {
|
|
147
|
+
/**
|
|
148
|
+
* Content is the message content
|
|
149
|
+
*/
|
|
150
|
+
content?: string;
|
|
151
|
+
role?: LlmapiRole;
|
|
152
|
+
};
|
|
153
|
+
/**
|
|
154
|
+
* Role is the message role (system, user, assistant)
|
|
155
|
+
*/
|
|
156
|
+
type LlmapiRole = string;
|
|
157
|
+
type ResponseErrorResponse = {
|
|
158
|
+
error?: string;
|
|
159
|
+
};
|
|
160
|
+
type PostApiV1ChatCompletionsData = {
|
|
161
|
+
/**
|
|
162
|
+
* Chat completion request
|
|
163
|
+
*/
|
|
164
|
+
body: LlmapiChatCompletionRequest;
|
|
165
|
+
path?: never;
|
|
166
|
+
query?: never;
|
|
167
|
+
url: '/api/v1/chat/completions';
|
|
168
|
+
};
|
|
169
|
+
type PostApiV1ChatCompletionsErrors = {
|
|
170
|
+
/**
|
|
171
|
+
* Bad Request
|
|
172
|
+
*/
|
|
173
|
+
400: ResponseErrorResponse;
|
|
174
|
+
/**
|
|
175
|
+
* Internal Server Error
|
|
176
|
+
*/
|
|
177
|
+
500: ResponseErrorResponse;
|
|
178
|
+
};
|
|
179
|
+
type PostApiV1ChatCompletionsError = PostApiV1ChatCompletionsErrors[keyof PostApiV1ChatCompletionsErrors];
|
|
180
|
+
type PostApiV1ChatCompletionsResponses = {
|
|
181
|
+
/**
|
|
182
|
+
* OK
|
|
183
|
+
*/
|
|
184
|
+
200: LlmapiChatCompletionResponse;
|
|
185
|
+
};
|
|
186
|
+
type PostApiV1ChatCompletionsResponse = PostApiV1ChatCompletionsResponses[keyof PostApiV1ChatCompletionsResponses];
|
|
187
|
+
type PostApiV1EmbeddingsData = {
|
|
188
|
+
/**
|
|
189
|
+
* Embedding request
|
|
190
|
+
*/
|
|
191
|
+
body: LlmapiEmbeddingRequest;
|
|
192
|
+
path?: never;
|
|
193
|
+
query?: never;
|
|
194
|
+
url: '/api/v1/embeddings';
|
|
195
|
+
};
|
|
196
|
+
type PostApiV1EmbeddingsErrors = {
|
|
197
|
+
/**
|
|
198
|
+
* Bad Request
|
|
199
|
+
*/
|
|
200
|
+
400: ResponseErrorResponse;
|
|
201
|
+
/**
|
|
202
|
+
* Internal Server Error
|
|
203
|
+
*/
|
|
204
|
+
500: ResponseErrorResponse;
|
|
205
|
+
};
|
|
206
|
+
type PostApiV1EmbeddingsError = PostApiV1EmbeddingsErrors[keyof PostApiV1EmbeddingsErrors];
|
|
207
|
+
type PostApiV1EmbeddingsResponses = {
|
|
208
|
+
/**
|
|
209
|
+
* OK
|
|
210
|
+
*/
|
|
211
|
+
200: LlmapiEmbeddingResponse;
|
|
212
|
+
};
|
|
213
|
+
type PostApiV1EmbeddingsResponse = PostApiV1EmbeddingsResponses[keyof PostApiV1EmbeddingsResponses];
|
|
214
|
+
type GetHealthData = {
|
|
215
|
+
body?: never;
|
|
216
|
+
path?: never;
|
|
217
|
+
query?: never;
|
|
218
|
+
url: '/health';
|
|
219
|
+
};
|
|
220
|
+
type GetHealthErrors = {
|
|
221
|
+
/**
|
|
222
|
+
* Internal Server Error
|
|
223
|
+
*/
|
|
224
|
+
500: ResponseErrorResponse;
|
|
225
|
+
};
|
|
226
|
+
type GetHealthError = GetHealthErrors[keyof GetHealthErrors];
|
|
227
|
+
type GetHealthResponses = {
|
|
228
|
+
/**
|
|
229
|
+
* OK
|
|
230
|
+
*/
|
|
231
|
+
200: HandlersHealthResponse;
|
|
232
|
+
};
|
|
233
|
+
type GetHealthResponse = GetHealthResponses[keyof GetHealthResponses];
|
|
234
|
+
|
|
235
|
+
export type { ClientOptions as C, GetHealthData as G, HandlersHealthResponse as H, LlmapiChatCompletionRequest as L, PostApiV1ChatCompletionsData as P, ResponseErrorResponse as R, PostApiV1ChatCompletionsResponses as a, PostApiV1ChatCompletionsErrors as b, PostApiV1EmbeddingsData as c, PostApiV1EmbeddingsResponses as d, PostApiV1EmbeddingsErrors as e, GetHealthResponses as f, GetHealthErrors as g, LlmapiChatCompletionResponse as h, LlmapiChoice as i, LlmapiEmbeddingData as j, LlmapiEmbeddingExtraFields as k, LlmapiEmbeddingRequest as l, LlmapiEmbeddingResponse as m, LlmapiEmbeddingUsage as n, LlmapiMessage as o, LlmapiRole as p, PostApiV1ChatCompletionsError as q, PostApiV1ChatCompletionsResponse as r, PostApiV1EmbeddingsError as s, PostApiV1EmbeddingsResponse as t, GetHealthError as u, GetHealthResponse as v };
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
type ClientOptions = {
|
|
2
|
+
baseUrl: `${string}://${string}` | (string & {});
|
|
3
|
+
};
|
|
4
|
+
type HandlersHealthResponse = {
|
|
5
|
+
/**
|
|
6
|
+
* Status indicates the service health status
|
|
7
|
+
*/
|
|
8
|
+
status?: string;
|
|
9
|
+
/**
|
|
10
|
+
* Timestamp is the Unix timestamp of the response
|
|
11
|
+
*/
|
|
12
|
+
timestamp?: number;
|
|
13
|
+
/**
|
|
14
|
+
* Version is the current API version
|
|
15
|
+
*/
|
|
16
|
+
version?: string;
|
|
17
|
+
};
|
|
18
|
+
type LlmapiChatCompletionRequest = {
|
|
19
|
+
/**
|
|
20
|
+
* Messages is the conversation history
|
|
21
|
+
*/
|
|
22
|
+
messages?: Array<LlmapiMessage>;
|
|
23
|
+
/**
|
|
24
|
+
* Model is the model identifier
|
|
25
|
+
*/
|
|
26
|
+
model?: string;
|
|
27
|
+
/**
|
|
28
|
+
* Stream indicates if response should be streamed
|
|
29
|
+
*/
|
|
30
|
+
stream?: boolean;
|
|
31
|
+
};
|
|
32
|
+
type LlmapiChatCompletionResponse = {
|
|
33
|
+
/**
|
|
34
|
+
* Choices contains the completion choices
|
|
35
|
+
*/
|
|
36
|
+
choices?: Array<LlmapiChoice>;
|
|
37
|
+
/**
|
|
38
|
+
* ID is the completion ID
|
|
39
|
+
*/
|
|
40
|
+
id?: string;
|
|
41
|
+
/**
|
|
42
|
+
* Model is the model used
|
|
43
|
+
*/
|
|
44
|
+
model?: string;
|
|
45
|
+
};
|
|
46
|
+
type LlmapiChoice = {
|
|
47
|
+
/**
|
|
48
|
+
* FinishReason indicates why the completion stopped
|
|
49
|
+
*/
|
|
50
|
+
finish_reason?: string;
|
|
51
|
+
/**
|
|
52
|
+
* Index is the choice index
|
|
53
|
+
*/
|
|
54
|
+
index?: number;
|
|
55
|
+
message?: LlmapiMessage;
|
|
56
|
+
};
|
|
57
|
+
type LlmapiEmbeddingData = {
|
|
58
|
+
/**
|
|
59
|
+
* Embedding vector
|
|
60
|
+
*/
|
|
61
|
+
embedding?: Array<number>;
|
|
62
|
+
/**
|
|
63
|
+
* Index of the embedding
|
|
64
|
+
*/
|
|
65
|
+
index?: number;
|
|
66
|
+
/**
|
|
67
|
+
* Object type identifier
|
|
68
|
+
*/
|
|
69
|
+
object?: string;
|
|
70
|
+
};
|
|
71
|
+
/**
|
|
72
|
+
* ExtraFields contains additional metadata
|
|
73
|
+
*/
|
|
74
|
+
type LlmapiEmbeddingExtraFields = {
|
|
75
|
+
/**
|
|
76
|
+
* ChunkIndex is the chunk index (0 for single requests)
|
|
77
|
+
*/
|
|
78
|
+
chunk_index?: number;
|
|
79
|
+
/**
|
|
80
|
+
* Latency is the request latency in milliseconds
|
|
81
|
+
*/
|
|
82
|
+
latency?: number;
|
|
83
|
+
/**
|
|
84
|
+
* ModelRequested is the model that was requested
|
|
85
|
+
*/
|
|
86
|
+
model_requested?: string;
|
|
87
|
+
/**
|
|
88
|
+
* Provider is the LLM provider used (e.g., "openai", "anthropic")
|
|
89
|
+
*/
|
|
90
|
+
provider?: string;
|
|
91
|
+
/**
|
|
92
|
+
* RequestType is always "embedding"
|
|
93
|
+
*/
|
|
94
|
+
request_type?: string;
|
|
95
|
+
};
|
|
96
|
+
type LlmapiEmbeddingRequest = {
|
|
97
|
+
/**
|
|
98
|
+
* Dimensions is the number of dimensions the resulting output embeddings should have (optional)
|
|
99
|
+
*/
|
|
100
|
+
dimensions?: number;
|
|
101
|
+
/**
|
|
102
|
+
* EncodingFormat is the format to return the embeddings in (optional: "float" or "base64")
|
|
103
|
+
*/
|
|
104
|
+
encoding_format?: string;
|
|
105
|
+
/**
|
|
106
|
+
* Input text or tokens to embed (can be string, []string, []int, or [][]int)
|
|
107
|
+
*/
|
|
108
|
+
input?: unknown;
|
|
109
|
+
/**
|
|
110
|
+
* Model identifier in 'provider/model' format
|
|
111
|
+
*/
|
|
112
|
+
model?: string;
|
|
113
|
+
};
|
|
114
|
+
type LlmapiEmbeddingResponse = {
|
|
115
|
+
/**
|
|
116
|
+
* Data contains the embeddings
|
|
117
|
+
*/
|
|
118
|
+
data?: Array<LlmapiEmbeddingData>;
|
|
119
|
+
extra_fields?: LlmapiEmbeddingExtraFields;
|
|
120
|
+
/**
|
|
121
|
+
* Model is the model used
|
|
122
|
+
*/
|
|
123
|
+
model?: string;
|
|
124
|
+
/**
|
|
125
|
+
* Object is always "list"
|
|
126
|
+
*/
|
|
127
|
+
object?: string;
|
|
128
|
+
usage?: LlmapiEmbeddingUsage;
|
|
129
|
+
};
|
|
130
|
+
/**
|
|
131
|
+
* Usage contains token usage information
|
|
132
|
+
*/
|
|
133
|
+
type LlmapiEmbeddingUsage = {
|
|
134
|
+
/**
|
|
135
|
+
* PromptTokens is the number of tokens in the prompt
|
|
136
|
+
*/
|
|
137
|
+
prompt_tokens?: number;
|
|
138
|
+
/**
|
|
139
|
+
* TotalTokens is the total number of tokens used
|
|
140
|
+
*/
|
|
141
|
+
total_tokens?: number;
|
|
142
|
+
};
|
|
143
|
+
/**
|
|
144
|
+
* Message is the generated message
|
|
145
|
+
*/
|
|
146
|
+
type LlmapiMessage = {
|
|
147
|
+
/**
|
|
148
|
+
* Content is the message content
|
|
149
|
+
*/
|
|
150
|
+
content?: string;
|
|
151
|
+
role?: LlmapiRole;
|
|
152
|
+
};
|
|
153
|
+
/**
|
|
154
|
+
* Role is the message role (system, user, assistant)
|
|
155
|
+
*/
|
|
156
|
+
type LlmapiRole = string;
|
|
157
|
+
type ResponseErrorResponse = {
|
|
158
|
+
error?: string;
|
|
159
|
+
};
|
|
160
|
+
type PostApiV1ChatCompletionsData = {
|
|
161
|
+
/**
|
|
162
|
+
* Chat completion request
|
|
163
|
+
*/
|
|
164
|
+
body: LlmapiChatCompletionRequest;
|
|
165
|
+
path?: never;
|
|
166
|
+
query?: never;
|
|
167
|
+
url: '/api/v1/chat/completions';
|
|
168
|
+
};
|
|
169
|
+
type PostApiV1ChatCompletionsErrors = {
|
|
170
|
+
/**
|
|
171
|
+
* Bad Request
|
|
172
|
+
*/
|
|
173
|
+
400: ResponseErrorResponse;
|
|
174
|
+
/**
|
|
175
|
+
* Internal Server Error
|
|
176
|
+
*/
|
|
177
|
+
500: ResponseErrorResponse;
|
|
178
|
+
};
|
|
179
|
+
type PostApiV1ChatCompletionsError = PostApiV1ChatCompletionsErrors[keyof PostApiV1ChatCompletionsErrors];
|
|
180
|
+
type PostApiV1ChatCompletionsResponses = {
|
|
181
|
+
/**
|
|
182
|
+
* OK
|
|
183
|
+
*/
|
|
184
|
+
200: LlmapiChatCompletionResponse;
|
|
185
|
+
};
|
|
186
|
+
type PostApiV1ChatCompletionsResponse = PostApiV1ChatCompletionsResponses[keyof PostApiV1ChatCompletionsResponses];
|
|
187
|
+
type PostApiV1EmbeddingsData = {
|
|
188
|
+
/**
|
|
189
|
+
* Embedding request
|
|
190
|
+
*/
|
|
191
|
+
body: LlmapiEmbeddingRequest;
|
|
192
|
+
path?: never;
|
|
193
|
+
query?: never;
|
|
194
|
+
url: '/api/v1/embeddings';
|
|
195
|
+
};
|
|
196
|
+
type PostApiV1EmbeddingsErrors = {
|
|
197
|
+
/**
|
|
198
|
+
* Bad Request
|
|
199
|
+
*/
|
|
200
|
+
400: ResponseErrorResponse;
|
|
201
|
+
/**
|
|
202
|
+
* Internal Server Error
|
|
203
|
+
*/
|
|
204
|
+
500: ResponseErrorResponse;
|
|
205
|
+
};
|
|
206
|
+
type PostApiV1EmbeddingsError = PostApiV1EmbeddingsErrors[keyof PostApiV1EmbeddingsErrors];
|
|
207
|
+
type PostApiV1EmbeddingsResponses = {
|
|
208
|
+
/**
|
|
209
|
+
* OK
|
|
210
|
+
*/
|
|
211
|
+
200: LlmapiEmbeddingResponse;
|
|
212
|
+
};
|
|
213
|
+
type PostApiV1EmbeddingsResponse = PostApiV1EmbeddingsResponses[keyof PostApiV1EmbeddingsResponses];
|
|
214
|
+
type GetHealthData = {
|
|
215
|
+
body?: never;
|
|
216
|
+
path?: never;
|
|
217
|
+
query?: never;
|
|
218
|
+
url: '/health';
|
|
219
|
+
};
|
|
220
|
+
type GetHealthErrors = {
|
|
221
|
+
/**
|
|
222
|
+
* Internal Server Error
|
|
223
|
+
*/
|
|
224
|
+
500: ResponseErrorResponse;
|
|
225
|
+
};
|
|
226
|
+
type GetHealthError = GetHealthErrors[keyof GetHealthErrors];
|
|
227
|
+
type GetHealthResponses = {
|
|
228
|
+
/**
|
|
229
|
+
* OK
|
|
230
|
+
*/
|
|
231
|
+
200: HandlersHealthResponse;
|
|
232
|
+
};
|
|
233
|
+
type GetHealthResponse = GetHealthResponses[keyof GetHealthResponses];
|
|
234
|
+
|
|
235
|
+
export type { ClientOptions as C, GetHealthData as G, HandlersHealthResponse as H, LlmapiChatCompletionRequest as L, PostApiV1ChatCompletionsData as P, ResponseErrorResponse as R, PostApiV1ChatCompletionsResponses as a, PostApiV1ChatCompletionsErrors as b, PostApiV1EmbeddingsData as c, PostApiV1EmbeddingsResponses as d, PostApiV1EmbeddingsErrors as e, GetHealthResponses as f, GetHealthErrors as g, LlmapiChatCompletionResponse as h, LlmapiChoice as i, LlmapiEmbeddingData as j, LlmapiEmbeddingExtraFields as k, LlmapiEmbeddingRequest as l, LlmapiEmbeddingResponse as m, LlmapiEmbeddingUsage as n, LlmapiMessage as o, LlmapiRole as p, PostApiV1ChatCompletionsError as q, PostApiV1ChatCompletionsResponse as r, PostApiV1EmbeddingsError as s, PostApiV1EmbeddingsResponse as t, GetHealthError as u, GetHealthResponse as v };
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/vercel/index.ts
|
|
21
|
+
var vercel_exports = {};
|
|
22
|
+
__export(vercel_exports, {
|
|
23
|
+
createAssistantStream: () => createAssistantStream,
|
|
24
|
+
createErrorStream: () => createErrorStream,
|
|
25
|
+
mapMessagesToCompletionPayload: () => mapMessagesToCompletionPayload
|
|
26
|
+
});
|
|
27
|
+
module.exports = __toCommonJS(vercel_exports);
|
|
28
|
+
|
|
29
|
+
// src/vercel/messages.ts
|
|
30
|
+
function mapMessagesToCompletionPayload(messages) {
|
|
31
|
+
return messages.map((message) => {
|
|
32
|
+
if (message.role !== "user" && message.role !== "assistant" && message.role !== "system") {
|
|
33
|
+
return null;
|
|
34
|
+
}
|
|
35
|
+
const textParts = message.parts.map((part) => part.type === "text" ? part.text : void 0).filter((part) => Boolean(part && part.trim()));
|
|
36
|
+
const content = textParts.join("\n\n").trim();
|
|
37
|
+
if (!content.length) return null;
|
|
38
|
+
const llmMessage = {
|
|
39
|
+
role: message.role,
|
|
40
|
+
content
|
|
41
|
+
};
|
|
42
|
+
return llmMessage;
|
|
43
|
+
}).filter((m) => m !== null);
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
// src/vercel/streams.ts
|
|
47
|
+
function createAssistantStream(text) {
|
|
48
|
+
const messageId = crypto.randomUUID();
|
|
49
|
+
return new ReadableStream({
|
|
50
|
+
start(controller) {
|
|
51
|
+
controller.enqueue({
|
|
52
|
+
type: "text-start",
|
|
53
|
+
id: messageId
|
|
54
|
+
});
|
|
55
|
+
if (text.length > 0) {
|
|
56
|
+
controller.enqueue({
|
|
57
|
+
type: "text-delta",
|
|
58
|
+
id: messageId,
|
|
59
|
+
delta: text
|
|
60
|
+
});
|
|
61
|
+
}
|
|
62
|
+
controller.enqueue({
|
|
63
|
+
type: "text-end",
|
|
64
|
+
id: messageId
|
|
65
|
+
});
|
|
66
|
+
controller.close();
|
|
67
|
+
}
|
|
68
|
+
});
|
|
69
|
+
}
|
|
70
|
+
function createErrorStream(errorText) {
|
|
71
|
+
return new ReadableStream({
|
|
72
|
+
start(controller) {
|
|
73
|
+
controller.enqueue({
|
|
74
|
+
type: "error",
|
|
75
|
+
errorText
|
|
76
|
+
});
|
|
77
|
+
controller.close();
|
|
78
|
+
}
|
|
79
|
+
});
|
|
80
|
+
}
|
|
81
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
82
|
+
0 && (module.exports = {
|
|
83
|
+
createAssistantStream,
|
|
84
|
+
createErrorStream,
|
|
85
|
+
mapMessagesToCompletionPayload
|
|
86
|
+
});
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import { UIMessage } from 'ai';
|
|
2
|
+
import { o as LlmapiMessage } from '../types.gen-DENXHZhp.mjs';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Converts an array of Vercel AI {@link UIMessage} objects into the
|
|
6
|
+
* `LlmapiMessage` format that the Portal API expects.
|
|
7
|
+
*
|
|
8
|
+
* - Non text-only parts and unsupported roles are ignored.
|
|
9
|
+
* - Text parts are merged with double newlines, matching the structure that
|
|
10
|
+
* `postApiV1ChatCompletions` accepts.
|
|
11
|
+
*
|
|
12
|
+
* @param messages The UI layer conversation history received from `createUIMessageStreamResponse`.
|
|
13
|
+
* @returns A clean array of Portal-ready messages, filtered to user, assistant, and system roles.
|
|
14
|
+
*/
|
|
15
|
+
declare function mapMessagesToCompletionPayload(messages: UIMessage[]): LlmapiMessage[];
|
|
16
|
+
|
|
17
|
+
type AssistantStreamEvent = {
|
|
18
|
+
type: "text-start";
|
|
19
|
+
id: string;
|
|
20
|
+
} | {
|
|
21
|
+
type: "text-delta";
|
|
22
|
+
id: string;
|
|
23
|
+
delta: string;
|
|
24
|
+
} | {
|
|
25
|
+
type: "text-end";
|
|
26
|
+
id: string;
|
|
27
|
+
} | {
|
|
28
|
+
type: "error";
|
|
29
|
+
errorText: string;
|
|
30
|
+
};
|
|
31
|
+
/**
|
|
32
|
+
* Creates a `ReadableStream` that emits the sequence of events expected by
|
|
33
|
+
* Vercel's `createUIMessageStreamResponse` helper for a successful assistant reply.
|
|
34
|
+
*
|
|
35
|
+
* The stream emits `text-start`, an optional `text-delta` containing the
|
|
36
|
+
* provided `text`, and finally `text-end`, allowing Portal completions to be
|
|
37
|
+
* piped directly into UI components that consume the AI SDK stream contract.
|
|
38
|
+
*
|
|
39
|
+
* @param text The assistant response text returned by the Portal API.
|
|
40
|
+
* @returns A stream ready to be passed to `createUIMessageStreamResponse`.
|
|
41
|
+
*/
|
|
42
|
+
declare function createAssistantStream(text: string): ReadableStream<AssistantStreamEvent>;
|
|
43
|
+
/**
|
|
44
|
+
* Creates a `ReadableStream` that emits a single `error` event compatible
|
|
45
|
+
* with the Vercel AI stream contract. This allows Portal API errors to be
|
|
46
|
+
* surfaced directly in UI components that expect streamed assistant output.
|
|
47
|
+
*
|
|
48
|
+
* @param errorText A human-readable error message to display in the UI.
|
|
49
|
+
* @returns A stream that, when consumed, immediately emits the error event.
|
|
50
|
+
*/
|
|
51
|
+
declare function createErrorStream(errorText: string): ReadableStream<AssistantStreamEvent>;
|
|
52
|
+
|
|
53
|
+
export { createAssistantStream, createErrorStream, mapMessagesToCompletionPayload };
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import { UIMessage } from 'ai';
|
|
2
|
+
import { o as LlmapiMessage } from '../types.gen-DENXHZhp.js';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Converts an array of Vercel AI {@link UIMessage} objects into the
|
|
6
|
+
* `LlmapiMessage` format that the Portal API expects.
|
|
7
|
+
*
|
|
8
|
+
* - Non text-only parts and unsupported roles are ignored.
|
|
9
|
+
* - Text parts are merged with double newlines, matching the structure that
|
|
10
|
+
* `postApiV1ChatCompletions` accepts.
|
|
11
|
+
*
|
|
12
|
+
* @param messages The UI layer conversation history received from `createUIMessageStreamResponse`.
|
|
13
|
+
* @returns A clean array of Portal-ready messages, filtered to user, assistant, and system roles.
|
|
14
|
+
*/
|
|
15
|
+
declare function mapMessagesToCompletionPayload(messages: UIMessage[]): LlmapiMessage[];
|
|
16
|
+
|
|
17
|
+
type AssistantStreamEvent = {
|
|
18
|
+
type: "text-start";
|
|
19
|
+
id: string;
|
|
20
|
+
} | {
|
|
21
|
+
type: "text-delta";
|
|
22
|
+
id: string;
|
|
23
|
+
delta: string;
|
|
24
|
+
} | {
|
|
25
|
+
type: "text-end";
|
|
26
|
+
id: string;
|
|
27
|
+
} | {
|
|
28
|
+
type: "error";
|
|
29
|
+
errorText: string;
|
|
30
|
+
};
|
|
31
|
+
/**
|
|
32
|
+
* Creates a `ReadableStream` that emits the sequence of events expected by
|
|
33
|
+
* Vercel's `createUIMessageStreamResponse` helper for a successful assistant reply.
|
|
34
|
+
*
|
|
35
|
+
* The stream emits `text-start`, an optional `text-delta` containing the
|
|
36
|
+
* provided `text`, and finally `text-end`, allowing Portal completions to be
|
|
37
|
+
* piped directly into UI components that consume the AI SDK stream contract.
|
|
38
|
+
*
|
|
39
|
+
* @param text The assistant response text returned by the Portal API.
|
|
40
|
+
* @returns A stream ready to be passed to `createUIMessageStreamResponse`.
|
|
41
|
+
*/
|
|
42
|
+
declare function createAssistantStream(text: string): ReadableStream<AssistantStreamEvent>;
|
|
43
|
+
/**
|
|
44
|
+
* Creates a `ReadableStream` that emits a single `error` event compatible
|
|
45
|
+
* with the Vercel AI stream contract. This allows Portal API errors to be
|
|
46
|
+
* surfaced directly in UI components that expect streamed assistant output.
|
|
47
|
+
*
|
|
48
|
+
* @param errorText A human-readable error message to display in the UI.
|
|
49
|
+
* @returns A stream that, when consumed, immediately emits the error event.
|
|
50
|
+
*/
|
|
51
|
+
declare function createErrorStream(errorText: string): ReadableStream<AssistantStreamEvent>;
|
|
52
|
+
|
|
53
|
+
export { createAssistantStream, createErrorStream, mapMessagesToCompletionPayload };
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
// src/vercel/messages.ts
|
|
2
|
+
function mapMessagesToCompletionPayload(messages) {
|
|
3
|
+
return messages.map((message) => {
|
|
4
|
+
if (message.role !== "user" && message.role !== "assistant" && message.role !== "system") {
|
|
5
|
+
return null;
|
|
6
|
+
}
|
|
7
|
+
const textParts = message.parts.map((part) => part.type === "text" ? part.text : void 0).filter((part) => Boolean(part && part.trim()));
|
|
8
|
+
const content = textParts.join("\n\n").trim();
|
|
9
|
+
if (!content.length) return null;
|
|
10
|
+
const llmMessage = {
|
|
11
|
+
role: message.role,
|
|
12
|
+
content
|
|
13
|
+
};
|
|
14
|
+
return llmMessage;
|
|
15
|
+
}).filter((m) => m !== null);
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
// src/vercel/streams.ts
|
|
19
|
+
function createAssistantStream(text) {
|
|
20
|
+
const messageId = crypto.randomUUID();
|
|
21
|
+
return new ReadableStream({
|
|
22
|
+
start(controller) {
|
|
23
|
+
controller.enqueue({
|
|
24
|
+
type: "text-start",
|
|
25
|
+
id: messageId
|
|
26
|
+
});
|
|
27
|
+
if (text.length > 0) {
|
|
28
|
+
controller.enqueue({
|
|
29
|
+
type: "text-delta",
|
|
30
|
+
id: messageId,
|
|
31
|
+
delta: text
|
|
32
|
+
});
|
|
33
|
+
}
|
|
34
|
+
controller.enqueue({
|
|
35
|
+
type: "text-end",
|
|
36
|
+
id: messageId
|
|
37
|
+
});
|
|
38
|
+
controller.close();
|
|
39
|
+
}
|
|
40
|
+
});
|
|
41
|
+
}
|
|
42
|
+
function createErrorStream(errorText) {
|
|
43
|
+
return new ReadableStream({
|
|
44
|
+
start(controller) {
|
|
45
|
+
controller.enqueue({
|
|
46
|
+
type: "error",
|
|
47
|
+
errorText
|
|
48
|
+
});
|
|
49
|
+
controller.close();
|
|
50
|
+
}
|
|
51
|
+
});
|
|
52
|
+
}
|
|
53
|
+
export {
|
|
54
|
+
createAssistantStream,
|
|
55
|
+
createErrorStream,
|
|
56
|
+
mapMessagesToCompletionPayload
|
|
57
|
+
};
|
package/package.json
CHANGED
|
@@ -1,16 +1,22 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@reverbia/sdk",
|
|
3
|
-
"version": "1.0.0-next.
|
|
3
|
+
"version": "1.0.0-next.20251114165311",
|
|
4
4
|
"description": "",
|
|
5
|
-
"main": "./dist/index.cjs",
|
|
6
|
-
"module": "./dist/index.mjs",
|
|
7
|
-
"types": "./dist/index.d.ts",
|
|
5
|
+
"main": "./dist/client/index.cjs",
|
|
6
|
+
"module": "./dist/client/index.mjs",
|
|
7
|
+
"types": "./dist/client/index.d.ts",
|
|
8
8
|
"exports": {
|
|
9
9
|
".": {
|
|
10
|
-
"types": "./dist/index.d.ts",
|
|
11
|
-
"import": "./dist/index.mjs",
|
|
12
|
-
"require": "./dist/index.cjs",
|
|
13
|
-
"default": "./dist/index.cjs"
|
|
10
|
+
"types": "./dist/client/index.d.ts",
|
|
11
|
+
"import": "./dist/client/index.mjs",
|
|
12
|
+
"require": "./dist/client/index.cjs",
|
|
13
|
+
"default": "./dist/client/index.cjs"
|
|
14
|
+
},
|
|
15
|
+
"./vercel": {
|
|
16
|
+
"types": "./dist/vercel/index.d.ts",
|
|
17
|
+
"import": "./dist/vercel/index.mjs",
|
|
18
|
+
"require": "./dist/vercel/index.cjs",
|
|
19
|
+
"default": "./dist/vercel/index.cjs"
|
|
14
20
|
}
|
|
15
21
|
},
|
|
16
22
|
"files": [
|
|
@@ -35,7 +41,8 @@
|
|
|
35
41
|
},
|
|
36
42
|
"homepage": "https://github.com/zeta-chain/ai-sdk#readme",
|
|
37
43
|
"dependencies": {
|
|
38
|
-
"@reverbia/portal": "^1.0.0-next.20251113192414"
|
|
44
|
+
"@reverbia/portal": "^1.0.0-next.20251113192414",
|
|
45
|
+
"ai": "5.0.93"
|
|
39
46
|
},
|
|
40
47
|
"devDependencies": {
|
|
41
48
|
"@hey-api/openapi-ts": "0.87.2",
|
|
File without changes
|