@gammatech/aijsx 0.1.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.md +21 -0
- package/README.md +224 -0
- package/dist/createElement-Q_LxUYf8.d.ts +159 -0
- package/dist/index.d.ts +72 -0
- package/dist/index.js +859 -0
- package/dist/jsx-runtime.d.ts +29 -0
- package/dist/jsx-runtime.js +63 -0
- package/package.json +75 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,859 @@
|
|
|
1
|
+
var __create = Object.create;
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
6
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
7
|
+
var __export = (target, all) => {
|
|
8
|
+
for (var name in all)
|
|
9
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
10
|
+
};
|
|
11
|
+
var __copyProps = (to, from, except, desc) => {
|
|
12
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
13
|
+
for (let key of __getOwnPropNames(from))
|
|
14
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
15
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
16
|
+
}
|
|
17
|
+
return to;
|
|
18
|
+
};
|
|
19
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
20
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
21
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
22
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
23
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
24
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
25
|
+
mod
|
|
26
|
+
));
|
|
27
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
28
|
+
|
|
29
|
+
// src/index.ts
|
|
30
|
+
var src_exports = {};
|
|
31
|
+
__export(src_exports, {
|
|
32
|
+
AIFragment: () => AIFragment,
|
|
33
|
+
AnthropicChatCompletion: () => AnthropicChatCompletion,
|
|
34
|
+
AnthropicClient: () => import_sdk2.default,
|
|
35
|
+
AnthropicClientContext: () => AnthropicClientContext,
|
|
36
|
+
AssistantMessage: () => AssistantMessage,
|
|
37
|
+
BoundLogger: () => BoundLogger,
|
|
38
|
+
ChatCompletionError: () => ChatCompletionError,
|
|
39
|
+
CombinedLogger: () => CombinedLogger,
|
|
40
|
+
ConsoleLogger: () => ConsoleLogger,
|
|
41
|
+
LogImplementation: () => LogImplementation,
|
|
42
|
+
LoggerContext: () => LoggerContext,
|
|
43
|
+
NoopLogImplementation: () => NoopLogImplementation,
|
|
44
|
+
OpenAIChatCompletion: () => OpenAIChatCompletion,
|
|
45
|
+
OpenAIClient: () => import_openai2.OpenAI,
|
|
46
|
+
OpenAIClientContext: () => OpenAIClientContext,
|
|
47
|
+
SystemMessage: () => SystemMessage,
|
|
48
|
+
UserMessage: () => UserMessage,
|
|
49
|
+
attachedContextSymbol: () => attachedContextSymbol,
|
|
50
|
+
childrenToConversationMessage: () => childrenToConversationMessage,
|
|
51
|
+
computeUsage: () => computeUsage,
|
|
52
|
+
countAnthropicTokens: () => import_tokenizer3.countTokens,
|
|
53
|
+
createAIElement: () => createAIElement,
|
|
54
|
+
createContext: () => createContext,
|
|
55
|
+
createRenderContext: () => createRenderContext,
|
|
56
|
+
defaultMaxTokens: () => defaultMaxTokens,
|
|
57
|
+
tokenCountForConversationMessage: () => tokenCountForConversationMessage,
|
|
58
|
+
tokenLimitForChatModel: () => tokenLimitForChatModel,
|
|
59
|
+
tokenizer: () => tokenizer
|
|
60
|
+
});
|
|
61
|
+
module.exports = __toCommonJS(src_exports);
|
|
62
|
+
|
|
63
|
+
// src/chat.tsx
|
|
64
|
+
var SystemMessage = (props) => {
|
|
65
|
+
return props.children;
|
|
66
|
+
};
|
|
67
|
+
var UserMessage = (props) => {
|
|
68
|
+
return props.children;
|
|
69
|
+
};
|
|
70
|
+
var AssistantMessage = (props) => {
|
|
71
|
+
return props.children;
|
|
72
|
+
};
|
|
73
|
+
var childrenToConversationMessage = (c) => {
|
|
74
|
+
const children = Array.isArray(c) ? c : [c];
|
|
75
|
+
return children.map((child) => {
|
|
76
|
+
if (child.tag.name === "UserMessage") {
|
|
77
|
+
return {
|
|
78
|
+
type: "user",
|
|
79
|
+
element: child
|
|
80
|
+
};
|
|
81
|
+
} else if (child.tag.name === "SystemMessage") {
|
|
82
|
+
return {
|
|
83
|
+
type: "system",
|
|
84
|
+
element: child
|
|
85
|
+
};
|
|
86
|
+
} else if (child.tag.name === "AssistantMessage") {
|
|
87
|
+
return {
|
|
88
|
+
type: "assistant",
|
|
89
|
+
element: child
|
|
90
|
+
};
|
|
91
|
+
} else {
|
|
92
|
+
throw new Error("OpenAI: unknown message type");
|
|
93
|
+
}
|
|
94
|
+
});
|
|
95
|
+
};
|
|
96
|
+
var computeUsage = (messages) => {
|
|
97
|
+
const prompt = messages.filter((m) => m.type === "user" || m.type === "system").reduce((acc, m) => acc + m.tokens, 0);
|
|
98
|
+
const completion = messages.filter((m) => m.type === "assistant").reduce((acc, m) => acc + m.tokens, 0);
|
|
99
|
+
return {
|
|
100
|
+
prompt,
|
|
101
|
+
completion,
|
|
102
|
+
total: prompt + completion
|
|
103
|
+
};
|
|
104
|
+
};
|
|
105
|
+
var ChatCompletionError = class extends Error {
|
|
106
|
+
constructor(message, chatCompletionRequest) {
|
|
107
|
+
super(message);
|
|
108
|
+
this.chatCompletionRequest = chatCompletionRequest;
|
|
109
|
+
}
|
|
110
|
+
};
|
|
111
|
+
|
|
112
|
+
// src/createElement.ts
|
|
113
|
+
function createAIElement(tag, props, ...children) {
|
|
114
|
+
const propsToPass = {
|
|
115
|
+
...props ?? {},
|
|
116
|
+
...children.length === 0 ? {} : { children: children.length === 1 ? children[0] : children }
|
|
117
|
+
};
|
|
118
|
+
const result = {
|
|
119
|
+
tag,
|
|
120
|
+
props: propsToPass,
|
|
121
|
+
render: (ctx) => {
|
|
122
|
+
return tag(propsToPass, ctx);
|
|
123
|
+
}
|
|
124
|
+
};
|
|
125
|
+
return result;
|
|
126
|
+
}
|
|
127
|
+
function isAIElement(value) {
|
|
128
|
+
return value !== null && typeof value === "object" && "tag" in value;
|
|
129
|
+
}
|
|
130
|
+
function isLiteral(value) {
|
|
131
|
+
return typeof value === "string" || typeof value === "number" || typeof value === "undefined" || typeof value === "boolean" || // capture null + undefined
|
|
132
|
+
value == null;
|
|
133
|
+
}
|
|
134
|
+
function AIFragment({ children }) {
|
|
135
|
+
return children;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// src/EventEmitter.ts
|
|
139
|
+
var EventEmitter = class {
|
|
140
|
+
listeners = {};
|
|
141
|
+
piped = [];
|
|
142
|
+
on(key, handler) {
|
|
143
|
+
this.listeners[key] = this.listeners[key] || [];
|
|
144
|
+
this.listeners[key].push(handler);
|
|
145
|
+
return () => {
|
|
146
|
+
const ind = this.listeners[key].indexOf(handler);
|
|
147
|
+
if (ind > -1) {
|
|
148
|
+
this.listeners[key].splice(ind, 1);
|
|
149
|
+
}
|
|
150
|
+
};
|
|
151
|
+
}
|
|
152
|
+
off(key, handler) {
|
|
153
|
+
if (typeof handler === "function") {
|
|
154
|
+
const ind = this.listeners[key].indexOf(handler);
|
|
155
|
+
if (ind > -1) {
|
|
156
|
+
this.listeners[key].splice(ind, 1);
|
|
157
|
+
}
|
|
158
|
+
} else {
|
|
159
|
+
this.listeners[key] = [];
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
emit(key, payload) {
|
|
163
|
+
const fns = this.listeners[key] || [];
|
|
164
|
+
fns.forEach((fn) => fn(payload));
|
|
165
|
+
this.piped.forEach((emitter) => {
|
|
166
|
+
emitter.emit(key, payload);
|
|
167
|
+
});
|
|
168
|
+
}
|
|
169
|
+
pipe(emitter) {
|
|
170
|
+
this.piped.push(emitter);
|
|
171
|
+
return () => {
|
|
172
|
+
const ind = this.piped.indexOf(emitter);
|
|
173
|
+
if (ind > -1) {
|
|
174
|
+
this.piped.splice(ind, 1);
|
|
175
|
+
}
|
|
176
|
+
};
|
|
177
|
+
}
|
|
178
|
+
};
|
|
179
|
+
|
|
180
|
+
// src/log.ts
|
|
181
|
+
var LogImplementation = class {
|
|
182
|
+
loggedExceptions = /* @__PURE__ */ new WeakMap();
|
|
183
|
+
/**
|
|
184
|
+
* Logs exceptions thrown during an element's render.
|
|
185
|
+
*/
|
|
186
|
+
logException(ctx, exception) {
|
|
187
|
+
if (typeof exception === "object" && exception !== null) {
|
|
188
|
+
if (this.loggedExceptions.has(exception)) {
|
|
189
|
+
return;
|
|
190
|
+
}
|
|
191
|
+
this.loggedExceptions.set(exception, true);
|
|
192
|
+
}
|
|
193
|
+
const elementTag = `<${ctx.element.tag.name}>`;
|
|
194
|
+
this.log(
|
|
195
|
+
ctx,
|
|
196
|
+
"error",
|
|
197
|
+
`Rendering element ${elementTag} failed with exception: ${exception}`
|
|
198
|
+
);
|
|
199
|
+
}
|
|
200
|
+
chatCompletionRequest(_ctx, _provider, _payload) {
|
|
201
|
+
}
|
|
202
|
+
chatCompletionResponse(_ctx, _provider, _payload) {
|
|
203
|
+
}
|
|
204
|
+
};
|
|
205
|
+
var BoundLogger = class {
|
|
206
|
+
constructor(impl, ctx) {
|
|
207
|
+
this.impl = impl;
|
|
208
|
+
this.ctx = ctx;
|
|
209
|
+
}
|
|
210
|
+
formatMessage = (...msgs) => msgs.map((m) => {
|
|
211
|
+
if (typeof m === "string") {
|
|
212
|
+
return m;
|
|
213
|
+
} else if (typeof m === "number") {
|
|
214
|
+
return m.toString();
|
|
215
|
+
} else if (typeof m === "boolean") {
|
|
216
|
+
return m ? "true" : "false";
|
|
217
|
+
} else if (m === void 0) {
|
|
218
|
+
return "undefined";
|
|
219
|
+
} else if (m === null) {
|
|
220
|
+
return "null";
|
|
221
|
+
} else {
|
|
222
|
+
return JSON.stringify(m);
|
|
223
|
+
}
|
|
224
|
+
}).join(" ");
|
|
225
|
+
error = (...msgs) => this.impl.log(this.ctx, "error", this.formatMessage(...msgs));
|
|
226
|
+
warn = (...msgs) => this.impl.log(this.ctx, "warn", this.formatMessage(...msgs));
|
|
227
|
+
info = (...msgs) => this.impl.log(this.ctx, "info", this.formatMessage(...msgs));
|
|
228
|
+
debug = (...msgs) => this.impl.log(this.ctx, "debug", this.formatMessage(...msgs));
|
|
229
|
+
logException = (exception) => this.impl.logException(this.ctx, exception);
|
|
230
|
+
chatCompletionRequest = (provider, payload) => {
|
|
231
|
+
return this.impl.chatCompletionRequest(this.ctx, provider, payload);
|
|
232
|
+
};
|
|
233
|
+
chatCompletionResponse = (provider, payload) => {
|
|
234
|
+
return this.impl.chatCompletionResponse(this.ctx, provider, payload);
|
|
235
|
+
};
|
|
236
|
+
};
|
|
237
|
+
var NoopLogImplementation = class extends LogImplementation {
|
|
238
|
+
log(_ctx, _level, _message) {
|
|
239
|
+
}
|
|
240
|
+
};
|
|
241
|
+
var ConsoleLogger = class extends LogImplementation {
|
|
242
|
+
log(ctx, level, message) {
|
|
243
|
+
console.log(
|
|
244
|
+
`[${level}] <${ctx.element.tag.name}> id=${ctx.renderId} ${message}`
|
|
245
|
+
);
|
|
246
|
+
}
|
|
247
|
+
};
|
|
248
|
+
var CombinedLogger = class extends LogImplementation {
|
|
249
|
+
constructor(loggers) {
|
|
250
|
+
super();
|
|
251
|
+
this.loggers = loggers;
|
|
252
|
+
}
|
|
253
|
+
log(...args) {
|
|
254
|
+
this.loggers.forEach((l) => l.log(...args));
|
|
255
|
+
}
|
|
256
|
+
chatCompletionRequest(...args) {
|
|
257
|
+
this.loggers.forEach((l) => l.chatCompletionRequest(...args));
|
|
258
|
+
}
|
|
259
|
+
chatCompletionResponse(...args) {
|
|
260
|
+
this.loggers.forEach((l) => l.chatCompletionResponse(...args));
|
|
261
|
+
}
|
|
262
|
+
};
|
|
263
|
+
|
|
264
|
+
// src/utils.ts
|
|
265
|
+
var import_nanoid = require("nanoid");
|
|
266
|
+
function uuidv4() {
|
|
267
|
+
return (0, import_nanoid.nanoid)();
|
|
268
|
+
}
|
|
269
|
+
function getEnvVar(name, shouldThrow = true) {
|
|
270
|
+
let env = globalThis.process?.env ?? void 0;
|
|
271
|
+
if (env === void 0) {
|
|
272
|
+
try {
|
|
273
|
+
env = process.env;
|
|
274
|
+
} catch {
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
const result = env?.[name];
|
|
278
|
+
if (result === void 0 && shouldThrow) {
|
|
279
|
+
throw new Error(`Please specify env var '${name}'`);
|
|
280
|
+
}
|
|
281
|
+
return result;
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
// src/render.ts
|
|
285
|
+
function renderLiteral(renderable) {
|
|
286
|
+
if (typeof renderable === "string") {
|
|
287
|
+
return renderable;
|
|
288
|
+
}
|
|
289
|
+
if (typeof renderable === "number") {
|
|
290
|
+
return renderable.toString();
|
|
291
|
+
}
|
|
292
|
+
if (typeof renderable === "undefined" || typeof renderable === "boolean" || renderable === null) {
|
|
293
|
+
return "";
|
|
294
|
+
}
|
|
295
|
+
return "";
|
|
296
|
+
}
|
|
297
|
+
function Root() {
|
|
298
|
+
return null;
|
|
299
|
+
}
|
|
300
|
+
function createRenderContext({
|
|
301
|
+
logger = new NoopLogImplementation(),
|
|
302
|
+
rootRenderId = uuidv4()
|
|
303
|
+
} = {}) {
|
|
304
|
+
return new StreamRenderContext(
|
|
305
|
+
null,
|
|
306
|
+
createAIElement(Root, {}),
|
|
307
|
+
rootRenderId,
|
|
308
|
+
{
|
|
309
|
+
[LoggerContext.key]: logger || LoggerContext.defaultValue
|
|
310
|
+
}
|
|
311
|
+
);
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
// src/types.ts
|
|
315
|
+
var attachedContextSymbol = Symbol("AI.attachedContext");
|
|
316
|
+
|
|
317
|
+
// src/context.ts
|
|
318
|
+
var LoggerContext = createContext(
|
|
319
|
+
new NoopLogImplementation()
|
|
320
|
+
);
|
|
321
|
+
var accumResults = async (result) => {
|
|
322
|
+
let accum = "";
|
|
323
|
+
const iterator = result[Symbol.asyncIterator]();
|
|
324
|
+
for await (const value of iterator) {
|
|
325
|
+
accum += value;
|
|
326
|
+
}
|
|
327
|
+
return accum;
|
|
328
|
+
};
|
|
329
|
+
var ParallelStreamIterator = class extends EventEmitter {
|
|
330
|
+
values = [];
|
|
331
|
+
completedStreams = [];
|
|
332
|
+
cursor = [0, 0];
|
|
333
|
+
constructor(size) {
|
|
334
|
+
super();
|
|
335
|
+
for (let i = 0; i < size; i++) {
|
|
336
|
+
this.values[i] = [];
|
|
337
|
+
this.completedStreams[i] = false;
|
|
338
|
+
}
|
|
339
|
+
}
|
|
340
|
+
push(streamInd, value) {
|
|
341
|
+
const valInd = this.values[streamInd].length;
|
|
342
|
+
this.values[streamInd].push(value);
|
|
343
|
+
this.emit("data", {
|
|
344
|
+
streamInd,
|
|
345
|
+
valInd,
|
|
346
|
+
value
|
|
347
|
+
});
|
|
348
|
+
}
|
|
349
|
+
complete(streamInd) {
|
|
350
|
+
this.completedStreams[streamInd] = true;
|
|
351
|
+
this.emit("complete", {
|
|
352
|
+
streamInd
|
|
353
|
+
});
|
|
354
|
+
}
|
|
355
|
+
nextCursor(complete) {
|
|
356
|
+
const [streamInd, valInd] = this.cursor;
|
|
357
|
+
if (!complete) {
|
|
358
|
+
this.cursor = [streamInd, valInd + 1];
|
|
359
|
+
} else {
|
|
360
|
+
this.cursor = [streamInd + 1, 0];
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
// returns a promise that resolves when this.values[streamInd][valInd] is available
|
|
364
|
+
resolveAt(streamInd, valInd) {
|
|
365
|
+
return new Promise((resolve, reject) => {
|
|
366
|
+
const value = this.values[streamInd][valInd];
|
|
367
|
+
if (value !== void 0) {
|
|
368
|
+
resolve({ done: false, value });
|
|
369
|
+
return;
|
|
370
|
+
}
|
|
371
|
+
if (this.completedStreams[streamInd]) {
|
|
372
|
+
if (streamInd === this.completedStreams.length - 1) {
|
|
373
|
+
resolve({ done: true, value: void 0 });
|
|
374
|
+
}
|
|
375
|
+
reject("next");
|
|
376
|
+
return;
|
|
377
|
+
}
|
|
378
|
+
const unsub = this.on("data", (data) => {
|
|
379
|
+
if (streamInd === data.streamInd && data.valInd === valInd) {
|
|
380
|
+
resolve({ done: false, value: data.value });
|
|
381
|
+
unsub();
|
|
382
|
+
onCompleteUnsub();
|
|
383
|
+
}
|
|
384
|
+
});
|
|
385
|
+
const onCompleteUnsub = this.on("complete", (data) => {
|
|
386
|
+
if (streamInd !== data.streamInd) {
|
|
387
|
+
return;
|
|
388
|
+
}
|
|
389
|
+
if (streamInd === this.completedStreams.length - 1) {
|
|
390
|
+
resolve({ done: true, value: void 0 });
|
|
391
|
+
}
|
|
392
|
+
if (this.values[streamInd].length === valInd) {
|
|
393
|
+
reject("next");
|
|
394
|
+
}
|
|
395
|
+
unsub();
|
|
396
|
+
onCompleteUnsub();
|
|
397
|
+
});
|
|
398
|
+
});
|
|
399
|
+
}
|
|
400
|
+
async next() {
|
|
401
|
+
try {
|
|
402
|
+
const val = await this.resolveAt(...this.cursor);
|
|
403
|
+
this.nextCursor(false);
|
|
404
|
+
return val;
|
|
405
|
+
} catch (e) {
|
|
406
|
+
if (e !== "next") {
|
|
407
|
+
throw e;
|
|
408
|
+
}
|
|
409
|
+
this.nextCursor(true);
|
|
410
|
+
const nextStreamVal = await this.resolveAt(...this.cursor);
|
|
411
|
+
this.nextCursor(false);
|
|
412
|
+
return nextStreamVal;
|
|
413
|
+
}
|
|
414
|
+
}
|
|
415
|
+
[Symbol.asyncIterator]() {
|
|
416
|
+
return this;
|
|
417
|
+
}
|
|
418
|
+
};
|
|
419
|
+
function coalesceParallelStreams(streams) {
|
|
420
|
+
const iter = new ParallelStreamIterator(streams.length);
|
|
421
|
+
streams.forEach(async (s, streamInd) => {
|
|
422
|
+
for await (const value of s) {
|
|
423
|
+
iter.push(streamInd, value);
|
|
424
|
+
}
|
|
425
|
+
iter.complete(streamInd);
|
|
426
|
+
});
|
|
427
|
+
return iter;
|
|
428
|
+
}
|
|
429
|
+
var StreamRenderContext = class _StreamRenderContext {
|
|
430
|
+
constructor(parentContext, element, renderId, contextValues) {
|
|
431
|
+
this.parentContext = parentContext;
|
|
432
|
+
this.element = element;
|
|
433
|
+
this.renderId = renderId;
|
|
434
|
+
this.contextValues = contextValues;
|
|
435
|
+
const logImpl = this.getContext(LoggerContext);
|
|
436
|
+
this.logger = new BoundLogger(logImpl, this);
|
|
437
|
+
this.render = (renderable) => {
|
|
438
|
+
const generator = this.renderStream(renderable);
|
|
439
|
+
const result = {
|
|
440
|
+
then: (onFulfilled, onRejected) => accumResults(generator).then(onFulfilled, onRejected),
|
|
441
|
+
[Symbol.asyncIterator]: () => generator
|
|
442
|
+
};
|
|
443
|
+
return result;
|
|
444
|
+
};
|
|
445
|
+
const self = this;
|
|
446
|
+
this.renderStream = async function* (renderable) {
|
|
447
|
+
if (isLiteral(renderable)) {
|
|
448
|
+
yield renderLiteral(renderable);
|
|
449
|
+
return;
|
|
450
|
+
}
|
|
451
|
+
if (isAIElement(renderable)) {
|
|
452
|
+
const ctxValues = attachedContextValues(renderable) || {};
|
|
453
|
+
const childRenderId = uuidv4();
|
|
454
|
+
const newCtx = self.enter(renderable, childRenderId, ctxValues);
|
|
455
|
+
const logger = newCtx.logger;
|
|
456
|
+
try {
|
|
457
|
+
return yield* newCtx.render(renderable.render(newCtx));
|
|
458
|
+
} catch (ex) {
|
|
459
|
+
logger.logException(ex);
|
|
460
|
+
throw ex;
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
if (Array.isArray(renderable)) {
|
|
464
|
+
if (renderable.every((r) => isLiteral(r))) {
|
|
465
|
+
yield renderable.map((r) => renderLiteral(r)).join("");
|
|
466
|
+
return;
|
|
467
|
+
}
|
|
468
|
+
const streams = renderable.filter((a) => !!a).map((r) => self.renderStream(r));
|
|
469
|
+
const result = coalesceParallelStreams(streams);
|
|
470
|
+
while (true) {
|
|
471
|
+
const { value, done } = await result.next();
|
|
472
|
+
if (done) {
|
|
473
|
+
return;
|
|
474
|
+
}
|
|
475
|
+
yield value;
|
|
476
|
+
}
|
|
477
|
+
}
|
|
478
|
+
if (Symbol.asyncIterator in renderable) {
|
|
479
|
+
return yield* renderable[Symbol.asyncIterator]();
|
|
480
|
+
}
|
|
481
|
+
if (!("then" in renderable)) {
|
|
482
|
+
throw new Error(
|
|
483
|
+
`Unexpected renderable type: ${JSON.stringify(renderable)}`
|
|
484
|
+
);
|
|
485
|
+
}
|
|
486
|
+
const next = await renderable.then(
|
|
487
|
+
(r) => r
|
|
488
|
+
);
|
|
489
|
+
return yield* self.render(next);
|
|
490
|
+
};
|
|
491
|
+
}
|
|
492
|
+
render;
|
|
493
|
+
renderStream;
|
|
494
|
+
logger;
|
|
495
|
+
getContext = (context) => {
|
|
496
|
+
return this.contextValues[context.key] ?? context.defaultValue;
|
|
497
|
+
};
|
|
498
|
+
// @internal
|
|
499
|
+
enter(element, renderId, newCtx) {
|
|
500
|
+
return new _StreamRenderContext(this, element, renderId, {
|
|
501
|
+
...this.contextValues,
|
|
502
|
+
...newCtx
|
|
503
|
+
});
|
|
504
|
+
}
|
|
505
|
+
};
|
|
506
|
+
function ContextValueProvider({ children }) {
|
|
507
|
+
return children;
|
|
508
|
+
}
|
|
509
|
+
function createContext(defaultValue) {
|
|
510
|
+
const key = Symbol();
|
|
511
|
+
return {
|
|
512
|
+
Provider: function ContextProvider(props, _compContext) {
|
|
513
|
+
const additionalContext = {
|
|
514
|
+
[key]: props.value
|
|
515
|
+
};
|
|
516
|
+
return withContextValues(
|
|
517
|
+
createAIElement(ContextValueProvider, null, props.children),
|
|
518
|
+
additionalContext
|
|
519
|
+
);
|
|
520
|
+
},
|
|
521
|
+
defaultValue,
|
|
522
|
+
key
|
|
523
|
+
};
|
|
524
|
+
}
|
|
525
|
+
function BoundContextValues({ children }) {
|
|
526
|
+
return children;
|
|
527
|
+
}
|
|
528
|
+
function withContextValues(renderable, additionalContext) {
|
|
529
|
+
if (isLiteral(renderable)) {
|
|
530
|
+
return renderable;
|
|
531
|
+
}
|
|
532
|
+
if (Array.isArray(renderable)) {
|
|
533
|
+
return renderable.map((node) => withContextValues(node, additionalContext));
|
|
534
|
+
}
|
|
535
|
+
if (isAIElement(renderable)) {
|
|
536
|
+
if (renderable[attachedContextSymbol]) {
|
|
537
|
+
return renderable;
|
|
538
|
+
}
|
|
539
|
+
const elementWithContext = {
|
|
540
|
+
...renderable,
|
|
541
|
+
[attachedContextSymbol]: additionalContext
|
|
542
|
+
};
|
|
543
|
+
return elementWithContext;
|
|
544
|
+
}
|
|
545
|
+
return withContextValues(
|
|
546
|
+
createAIElement(BoundContextValues, null, renderable),
|
|
547
|
+
additionalContext
|
|
548
|
+
);
|
|
549
|
+
}
|
|
550
|
+
function attachedContextValues(element) {
|
|
551
|
+
return element[attachedContextSymbol];
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
// src/lib/openai/OpenAI.tsx
|
|
555
|
+
var import_openai = require("openai");
|
|
556
|
+
|
|
557
|
+
// src/lib/openai/tokenizer.ts
|
|
558
|
+
var import_js_tiktoken = require("js-tiktoken");
|
|
559
|
+
var cl100kTokenizer = (0, import_js_tiktoken.getEncoding)("cl100k_base");
|
|
560
|
+
var tokenizer = {
|
|
561
|
+
encode: (text) => cl100kTokenizer.encode(text),
|
|
562
|
+
decode: (tokens) => cl100kTokenizer.decode(tokens)
|
|
563
|
+
};
|
|
564
|
+
function tokenLimitForChatModel(model) {
|
|
565
|
+
const TOKENS_CONSUMED_BY_REPLY_PREFIX = 3;
|
|
566
|
+
switch (model) {
|
|
567
|
+
case "gpt-4":
|
|
568
|
+
case "gpt-4-0314":
|
|
569
|
+
case "gpt-4-0613":
|
|
570
|
+
return 8192 - TOKENS_CONSUMED_BY_REPLY_PREFIX;
|
|
571
|
+
case "gpt-4-32k":
|
|
572
|
+
case "gpt-4-32k-0314":
|
|
573
|
+
case "gpt-4-32k-0613":
|
|
574
|
+
return 32768 - TOKENS_CONSUMED_BY_REPLY_PREFIX;
|
|
575
|
+
case "gpt-4-1106-preview":
|
|
576
|
+
return 128e3 - TOKENS_CONSUMED_BY_REPLY_PREFIX;
|
|
577
|
+
case "gpt-3.5-turbo":
|
|
578
|
+
case "gpt-3.5-turbo-0301":
|
|
579
|
+
case "gpt-3.5-turbo-0613":
|
|
580
|
+
return 4096 - TOKENS_CONSUMED_BY_REPLY_PREFIX;
|
|
581
|
+
case "gpt-3.5-turbo-16k":
|
|
582
|
+
case "gpt-3.5-turbo-16k-0613":
|
|
583
|
+
case "gpt-3.5-turbo-1106":
|
|
584
|
+
return 16384 - TOKENS_CONSUMED_BY_REPLY_PREFIX;
|
|
585
|
+
default: {
|
|
586
|
+
const _ = model;
|
|
587
|
+
return void 0;
|
|
588
|
+
}
|
|
589
|
+
}
|
|
590
|
+
}
|
|
591
|
+
function tokenCountForConversationMessage(message) {
|
|
592
|
+
const TOKENS_PER_MESSAGE = 3;
|
|
593
|
+
switch (message.type) {
|
|
594
|
+
case "assistant":
|
|
595
|
+
case "system":
|
|
596
|
+
case "user":
|
|
597
|
+
return TOKENS_PER_MESSAGE + tokenizer.encode(message.content).length;
|
|
598
|
+
}
|
|
599
|
+
}
|
|
600
|
+
|
|
601
|
+
// src/jsx-runtime.ts
|
|
602
|
+
function jsx(type, config, maybeKey) {
|
|
603
|
+
const configWithKey = maybeKey !== void 0 ? { ...config, key: maybeKey } : config;
|
|
604
|
+
const children = config && Array.isArray(config.children) ? config.children : [];
|
|
605
|
+
return createAIElement(type, configWithKey, ...children);
|
|
606
|
+
}
|
|
607
|
+
var jsxs = jsx;
|
|
608
|
+
|
|
609
|
+
// src/lib/openai/OpenAI.tsx
|
|
610
|
+
var defaultClient = null;
|
|
611
|
+
var OpenAIClientContext = createContext(() => {
|
|
612
|
+
if (defaultClient) {
|
|
613
|
+
return defaultClient;
|
|
614
|
+
}
|
|
615
|
+
const apiKey = getEnvVar("OPENAI_API_KEY", true);
|
|
616
|
+
defaultClient = new import_openai.OpenAI({ apiKey });
|
|
617
|
+
return defaultClient;
|
|
618
|
+
});
|
|
619
|
+
async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
|
|
620
|
+
const startTime = performance.now();
|
|
621
|
+
const client = getContext(OpenAIClientContext)();
|
|
622
|
+
if (!client) {
|
|
623
|
+
throw new Error("[OpenAI] must supply OpenAI model via context");
|
|
624
|
+
}
|
|
625
|
+
const renderedMessages = await Promise.all(
|
|
626
|
+
childrenToConversationMessage(props.children).map(async (message) => {
|
|
627
|
+
const partiallyRendered = {
|
|
628
|
+
...message,
|
|
629
|
+
content: await render(message.element)
|
|
630
|
+
};
|
|
631
|
+
return {
|
|
632
|
+
...partiallyRendered,
|
|
633
|
+
tokens: tokenCountForConversationMessage(partiallyRendered)
|
|
634
|
+
};
|
|
635
|
+
})
|
|
636
|
+
);
|
|
637
|
+
const chatMessages = renderedMessages.map((m) => {
|
|
638
|
+
return {
|
|
639
|
+
content: m.content,
|
|
640
|
+
role: m.type
|
|
641
|
+
};
|
|
642
|
+
});
|
|
643
|
+
const chatCompletionRequest = {
|
|
644
|
+
model: props.model,
|
|
645
|
+
max_tokens: props.maxTokens,
|
|
646
|
+
temperature: props.temperature,
|
|
647
|
+
messages: chatMessages,
|
|
648
|
+
stream: true
|
|
649
|
+
};
|
|
650
|
+
const logRequestData = {
|
|
651
|
+
startTime,
|
|
652
|
+
model: props.model,
|
|
653
|
+
provider: props.provider,
|
|
654
|
+
providerRegion: props.providerRegion,
|
|
655
|
+
inputMessages: renderedMessages,
|
|
656
|
+
request: chatCompletionRequest
|
|
657
|
+
};
|
|
658
|
+
logger.chatCompletionRequest("openai", logRequestData);
|
|
659
|
+
let chatResponse;
|
|
660
|
+
try {
|
|
661
|
+
chatResponse = await client.chat.completions.create(chatCompletionRequest);
|
|
662
|
+
} catch (ex) {
|
|
663
|
+
if (ex instanceof import_openai.OpenAI.APIError) {
|
|
664
|
+
throw new ChatCompletionError(
|
|
665
|
+
`OpenAIClient.APIError: ${ex.message}`,
|
|
666
|
+
logRequestData
|
|
667
|
+
);
|
|
668
|
+
} else if (ex instanceof Error) {
|
|
669
|
+
throw new ChatCompletionError(ex.message, logRequestData);
|
|
670
|
+
}
|
|
671
|
+
throw ex;
|
|
672
|
+
}
|
|
673
|
+
let finishReason = void 0;
|
|
674
|
+
let content = "";
|
|
675
|
+
for await (const message of chatResponse) {
|
|
676
|
+
if (!message.choices || !message.choices[0]) {
|
|
677
|
+
continue;
|
|
678
|
+
}
|
|
679
|
+
const delta = message.choices[0].delta;
|
|
680
|
+
if (message.choices[0].finish_reason) {
|
|
681
|
+
finishReason = message.choices[0].finish_reason;
|
|
682
|
+
}
|
|
683
|
+
if (delta.content) {
|
|
684
|
+
content += delta.content;
|
|
685
|
+
yield delta.content;
|
|
686
|
+
}
|
|
687
|
+
}
|
|
688
|
+
const outputMessage = {
|
|
689
|
+
type: "assistant",
|
|
690
|
+
element: /* @__PURE__ */ jsx(AssistantMessage, { children: content }),
|
|
691
|
+
content,
|
|
692
|
+
tokens: tokenCountForConversationMessage({
|
|
693
|
+
type: "assistant",
|
|
694
|
+
content
|
|
695
|
+
})
|
|
696
|
+
};
|
|
697
|
+
const responseData = {
|
|
698
|
+
...logRequestData,
|
|
699
|
+
finishReason,
|
|
700
|
+
latency: performance.now() - startTime,
|
|
701
|
+
outputMessage,
|
|
702
|
+
tokensUsed: computeUsage([...renderedMessages, outputMessage])
|
|
703
|
+
};
|
|
704
|
+
logger.chatCompletionResponse("openai", responseData);
|
|
705
|
+
}
|
|
706
|
+
|
|
707
|
+
// src/lib/openai/index.ts
|
|
708
|
+
var import_openai2 = require("openai");
|
|
709
|
+
|
|
710
|
+
// src/lib/anthropic/Anthropic.tsx
|
|
711
|
+
var import_sdk = __toESM(require("@anthropic-ai/sdk"));
|
|
712
|
+
var import_tokenizer2 = require("@anthropic-ai/tokenizer");
|
|
713
|
+
var defaultClient2 = null;
|
|
714
|
+
var AnthropicClientContext = createContext(
|
|
715
|
+
() => {
|
|
716
|
+
if (defaultClient2) {
|
|
717
|
+
return defaultClient2;
|
|
718
|
+
}
|
|
719
|
+
defaultClient2 = new import_sdk.default({
|
|
720
|
+
apiKey: getEnvVar("ANTHROPIC_API_KEY", false)
|
|
721
|
+
});
|
|
722
|
+
return defaultClient2;
|
|
723
|
+
}
|
|
724
|
+
);
|
|
725
|
+
var defaultMaxTokens = 4096;
|
|
726
|
+
async function* AnthropicChatCompletion(props, { render, logger, getContext }) {
|
|
727
|
+
const startTime = performance.now();
|
|
728
|
+
const client = getContext(AnthropicClientContext)();
|
|
729
|
+
if (!client) {
|
|
730
|
+
throw new Error(
|
|
731
|
+
"[AnthropicChatCompletion] must supply AnthropicClient via context"
|
|
732
|
+
);
|
|
733
|
+
}
|
|
734
|
+
const renderedMessages = await Promise.all(
|
|
735
|
+
childrenToConversationMessage(props.children).flatMap((message) => {
|
|
736
|
+
if (message.type === "system") {
|
|
737
|
+
return [
|
|
738
|
+
{
|
|
739
|
+
type: "user",
|
|
740
|
+
element: /* @__PURE__ */ jsxs(UserMessage, { children: [
|
|
741
|
+
"For subsequent replies you will adhere to the following instructions: ",
|
|
742
|
+
message.element
|
|
743
|
+
] })
|
|
744
|
+
},
|
|
745
|
+
{
|
|
746
|
+
type: "assistant",
|
|
747
|
+
element: /* @__PURE__ */ jsx(AssistantMessage, { children: "Okay, I will do that." })
|
|
748
|
+
}
|
|
749
|
+
];
|
|
750
|
+
}
|
|
751
|
+
return [message];
|
|
752
|
+
}).map(async (message) => {
|
|
753
|
+
const prefix = message.type === "user" ? import_sdk.default.HUMAN_PROMPT : import_sdk.default.AI_PROMPT;
|
|
754
|
+
const rendered = await render(message.element);
|
|
755
|
+
const content2 = `${prefix} ${rendered.trim()}`;
|
|
756
|
+
return {
|
|
757
|
+
...message,
|
|
758
|
+
content: content2,
|
|
759
|
+
tokens: (0, import_tokenizer2.countTokens)(content2)
|
|
760
|
+
};
|
|
761
|
+
})
|
|
762
|
+
);
|
|
763
|
+
const chatMessages = renderedMessages.map((m) => {
|
|
764
|
+
return m.content;
|
|
765
|
+
});
|
|
766
|
+
chatMessages.push(import_sdk.default.AI_PROMPT);
|
|
767
|
+
const anthropicCompletionRequest = {
|
|
768
|
+
prompt: chatMessages.join("\n\n"),
|
|
769
|
+
max_tokens_to_sample: props.maxTokens ?? defaultMaxTokens,
|
|
770
|
+
temperature: props.temperature,
|
|
771
|
+
model: props.model,
|
|
772
|
+
stream: true
|
|
773
|
+
};
|
|
774
|
+
const logRequestData = {
|
|
775
|
+
startTime,
|
|
776
|
+
model: props.model,
|
|
777
|
+
provider: props.provider,
|
|
778
|
+
providerRegion: props.providerRegion,
|
|
779
|
+
inputMessages: renderedMessages,
|
|
780
|
+
request: anthropicCompletionRequest
|
|
781
|
+
};
|
|
782
|
+
logger.chatCompletionRequest("anthropic", logRequestData);
|
|
783
|
+
let response;
|
|
784
|
+
try {
|
|
785
|
+
response = await client.completions.create(anthropicCompletionRequest);
|
|
786
|
+
} catch (err) {
|
|
787
|
+
if (err instanceof import_sdk.default.APIError) {
|
|
788
|
+
throw new ChatCompletionError(
|
|
789
|
+
`AnthropicClient.APIError: ${err.message}`,
|
|
790
|
+
logRequestData
|
|
791
|
+
);
|
|
792
|
+
} else if (err instanceof Error) {
|
|
793
|
+
throw new ChatCompletionError(err.message, logRequestData);
|
|
794
|
+
}
|
|
795
|
+
throw err;
|
|
796
|
+
}
|
|
797
|
+
let content = "";
|
|
798
|
+
let isFirstResponse = true;
|
|
799
|
+
for await (const completion of response) {
|
|
800
|
+
let text = completion.completion;
|
|
801
|
+
if (isFirstResponse && text.length > 0) {
|
|
802
|
+
isFirstResponse = false;
|
|
803
|
+
if (text.startsWith(" ")) {
|
|
804
|
+
text = text.slice(1);
|
|
805
|
+
}
|
|
806
|
+
}
|
|
807
|
+
content += text;
|
|
808
|
+
yield text;
|
|
809
|
+
}
|
|
810
|
+
const outputMessage = {
|
|
811
|
+
type: "assistant",
|
|
812
|
+
element: /* @__PURE__ */ jsx(AssistantMessage, { children: content }),
|
|
813
|
+
content,
|
|
814
|
+
tokens: (0, import_tokenizer2.countTokens)(content)
|
|
815
|
+
};
|
|
816
|
+
const responseData = {
|
|
817
|
+
...logRequestData,
|
|
818
|
+
finishReason: "stop",
|
|
819
|
+
latency: performance.now() - startTime,
|
|
820
|
+
outputMessage,
|
|
821
|
+
tokensUsed: computeUsage([...renderedMessages, outputMessage])
|
|
822
|
+
};
|
|
823
|
+
logger.chatCompletionResponse("anthropic", responseData);
|
|
824
|
+
}
|
|
825
|
+
|
|
826
|
+
// src/lib/anthropic/index.ts
|
|
827
|
+
var import_sdk2 = __toESM(require("@anthropic-ai/sdk"));
|
|
828
|
+
var import_tokenizer3 = require("@anthropic-ai/tokenizer");
|
|
829
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
830
|
+
0 && (module.exports = {
|
|
831
|
+
AIFragment,
|
|
832
|
+
AnthropicChatCompletion,
|
|
833
|
+
AnthropicClient,
|
|
834
|
+
AnthropicClientContext,
|
|
835
|
+
AssistantMessage,
|
|
836
|
+
BoundLogger,
|
|
837
|
+
ChatCompletionError,
|
|
838
|
+
CombinedLogger,
|
|
839
|
+
ConsoleLogger,
|
|
840
|
+
LogImplementation,
|
|
841
|
+
LoggerContext,
|
|
842
|
+
NoopLogImplementation,
|
|
843
|
+
OpenAIChatCompletion,
|
|
844
|
+
OpenAIClient,
|
|
845
|
+
OpenAIClientContext,
|
|
846
|
+
SystemMessage,
|
|
847
|
+
UserMessage,
|
|
848
|
+
attachedContextSymbol,
|
|
849
|
+
childrenToConversationMessage,
|
|
850
|
+
computeUsage,
|
|
851
|
+
countAnthropicTokens,
|
|
852
|
+
createAIElement,
|
|
853
|
+
createContext,
|
|
854
|
+
createRenderContext,
|
|
855
|
+
defaultMaxTokens,
|
|
856
|
+
tokenCountForConversationMessage,
|
|
857
|
+
tokenLimitForChatModel,
|
|
858
|
+
tokenizer
|
|
859
|
+
});
|