@gammatech/aijsx 0.1.3 → 0.2.0-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{chunk-7GA5BUUP.mjs → chunk-UMN5F5A5.mjs} +2 -16
- package/dist/chunk-WDM2VIHQ.mjs +21 -0
- package/dist/{createElement-ms1wdmoH.d.mts → createElement-YEuZ7P4l.d.mts} +12 -9
- package/dist/{createElement-ms1wdmoH.d.ts → createElement-YEuZ7P4l.d.ts} +12 -9
- package/dist/index.d.mts +24 -6
- package/dist/index.d.ts +24 -6
- package/dist/index.js +446 -108
- package/dist/index.mjs +440 -98
- package/dist/jsx-dev-runtime.d.mts +1 -1
- package/dist/jsx-dev-runtime.d.ts +1 -1
- package/dist/jsx-dev-runtime.mjs +2 -1
- package/dist/jsx-runtime.d.mts +1 -1
- package/dist/jsx-runtime.d.ts +1 -1
- package/dist/jsx-runtime.mjs +2 -1
- package/package.json +2 -1
package/dist/index.js
CHANGED
|
@@ -38,23 +38,25 @@ __export(src_exports, {
|
|
|
38
38
|
ChatCompletionError: () => ChatCompletionError,
|
|
39
39
|
CombinedLogger: () => CombinedLogger,
|
|
40
40
|
ConsoleLogger: () => ConsoleLogger,
|
|
41
|
+
ContentTypeImage: () => ContentTypeImage,
|
|
41
42
|
LogImplementation: () => LogImplementation,
|
|
42
43
|
LoggerContext: () => LoggerContext,
|
|
43
44
|
NoopLogImplementation: () => NoopLogImplementation,
|
|
44
45
|
OpenAIChatCompletion: () => OpenAIChatCompletion,
|
|
45
|
-
OpenAIClient: () =>
|
|
46
|
+
OpenAIClient: () => import_openai3.OpenAI,
|
|
46
47
|
OpenAIClientContext: () => OpenAIClientContext,
|
|
48
|
+
OpenAIVisionChatCompletion: () => OpenAIVisionChatCompletion,
|
|
47
49
|
SystemMessage: () => SystemMessage,
|
|
48
50
|
UserMessage: () => UserMessage,
|
|
49
51
|
attachedContextSymbol: () => attachedContextSymbol,
|
|
50
|
-
childrenToConversationMessage: () => childrenToConversationMessage,
|
|
51
52
|
computeUsage: () => computeUsage,
|
|
52
|
-
countAnthropicTokens: () =>
|
|
53
|
+
countAnthropicTokens: () => import_tokenizer4.countTokens,
|
|
53
54
|
createAIElement: () => createAIElement,
|
|
54
55
|
createContext: () => createContext,
|
|
55
56
|
createRenderContext: () => createRenderContext,
|
|
56
57
|
defaultMaxTokens: () => defaultMaxTokens,
|
|
57
|
-
|
|
58
|
+
tokenCountForOpenAIMessage: () => tokenCountForOpenAIMessage,
|
|
59
|
+
tokenCountForOpenAIVisionMessage: () => tokenCountForOpenAIVisionMessage,
|
|
58
60
|
tokenLimitForChatModel: () => tokenLimitForChatModel,
|
|
59
61
|
tokenizer: () => tokenizer
|
|
60
62
|
});
|
|
@@ -70,32 +72,9 @@ var UserMessage = (props) => {
|
|
|
70
72
|
var AssistantMessage = (props) => {
|
|
71
73
|
return props.children;
|
|
72
74
|
};
|
|
73
|
-
var childrenToConversationMessage = (c) => {
|
|
74
|
-
const children = Array.isArray(c) ? c : [c];
|
|
75
|
-
return children.map((child) => {
|
|
76
|
-
if (child.tag.name === "UserMessage") {
|
|
77
|
-
return {
|
|
78
|
-
type: "user",
|
|
79
|
-
element: child
|
|
80
|
-
};
|
|
81
|
-
} else if (child.tag.name === "SystemMessage") {
|
|
82
|
-
return {
|
|
83
|
-
type: "system",
|
|
84
|
-
element: child
|
|
85
|
-
};
|
|
86
|
-
} else if (child.tag.name === "AssistantMessage") {
|
|
87
|
-
return {
|
|
88
|
-
type: "assistant",
|
|
89
|
-
element: child
|
|
90
|
-
};
|
|
91
|
-
} else {
|
|
92
|
-
throw new Error("OpenAI: unknown message type");
|
|
93
|
-
}
|
|
94
|
-
});
|
|
95
|
-
};
|
|
96
75
|
var computeUsage = (messages) => {
|
|
97
|
-
const prompt = messages.filter((m) => m.
|
|
98
|
-
const completion = messages.filter((m) => m.
|
|
76
|
+
const prompt = messages.filter((m) => m.role === "user" || m.role === "system").reduce((acc, m) => acc + m.tokens, 0);
|
|
77
|
+
const completion = messages.filter((m) => m.role === "assistant").reduce((acc, m) => acc + m.tokens, 0);
|
|
99
78
|
return {
|
|
100
79
|
prompt,
|
|
101
80
|
completion,
|
|
@@ -125,7 +104,7 @@ function createAIElement(tag, props, ...children) {
|
|
|
125
104
|
return result;
|
|
126
105
|
}
|
|
127
106
|
function isAIElement(value) {
|
|
128
|
-
return value !== null && typeof value === "object" && "tag" in value;
|
|
107
|
+
return value !== null && typeof value === "object" && "tag" in value && "render" in value;
|
|
129
108
|
}
|
|
130
109
|
function isLiteral(value) {
|
|
131
110
|
return typeof value === "string" || typeof value === "number" || typeof value === "undefined" || typeof value === "boolean" || // capture null + undefined
|
|
@@ -314,6 +293,108 @@ function createRenderContext({
|
|
|
314
293
|
// src/types.ts
|
|
315
294
|
var attachedContextSymbol = Symbol("AI.attachedContext");
|
|
316
295
|
|
|
296
|
+
// src/xml.ts
|
|
297
|
+
var import_fast_xml_parser = require("fast-xml-parser");
|
|
298
|
+
var XmlNode = class {
|
|
299
|
+
constructor(parent, nodeName, attributes, value, childNodes) {
|
|
300
|
+
this.parent = parent;
|
|
301
|
+
this.nodeName = nodeName;
|
|
302
|
+
this.attributes = attributes;
|
|
303
|
+
this.value = value;
|
|
304
|
+
this.childNodes = childNodes;
|
|
305
|
+
this.childNodes = childNodes;
|
|
306
|
+
if (childNodes) {
|
|
307
|
+
childNodes.forEach((n) => n.parent = this);
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
toObject() {
|
|
311
|
+
if (this.value) {
|
|
312
|
+
return {
|
|
313
|
+
nodeName: this.nodeName,
|
|
314
|
+
value: this.value
|
|
315
|
+
};
|
|
316
|
+
}
|
|
317
|
+
if (this.childNodes) {
|
|
318
|
+
return {
|
|
319
|
+
nodeName: this.nodeName,
|
|
320
|
+
attributes: this.attributes,
|
|
321
|
+
childNodes: this.childNodes.map((n) => n.toObject())
|
|
322
|
+
};
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
get textContent() {
|
|
326
|
+
return this.value || this.childNodes?.map((n) => n.textContent).join("");
|
|
327
|
+
}
|
|
328
|
+
collapse(nodes) {
|
|
329
|
+
if (this.value !== void 0) {
|
|
330
|
+
return this;
|
|
331
|
+
}
|
|
332
|
+
if (this.childNodes) {
|
|
333
|
+
this.childNodes.forEach((n) => n.collapse(nodes));
|
|
334
|
+
}
|
|
335
|
+
if (
|
|
336
|
+
// is the root node
|
|
337
|
+
!this.parent || // is a preserved node tag
|
|
338
|
+
nodes.includes(this.nodeName)
|
|
339
|
+
) {
|
|
340
|
+
return this;
|
|
341
|
+
}
|
|
342
|
+
const parent = this.parent;
|
|
343
|
+
const index = parent.childNodes.indexOf(this);
|
|
344
|
+
parent.childNodes.splice(index, 1, ...this.childNodes);
|
|
345
|
+
return this;
|
|
346
|
+
}
|
|
347
|
+
};
|
|
348
|
+
var XmlFragment = class extends XmlNode {
|
|
349
|
+
// initialize with value here because ts isn't smart enough
|
|
350
|
+
// to understand that the 4th argument to super initializes
|
|
351
|
+
// childNodes
|
|
352
|
+
childNodes = [];
|
|
353
|
+
constructor(childNodes) {
|
|
354
|
+
super(null, "#fragment", {}, void 0, childNodes);
|
|
355
|
+
}
|
|
356
|
+
};
|
|
357
|
+
function parseXml(input) {
|
|
358
|
+
const parser = new import_fast_xml_parser.XMLParser({
|
|
359
|
+
isArray: (_tag, path) => {
|
|
360
|
+
if (path.indexOf(".") === -1) {
|
|
361
|
+
return false;
|
|
362
|
+
}
|
|
363
|
+
return true;
|
|
364
|
+
},
|
|
365
|
+
ignoreAttributes: false,
|
|
366
|
+
attributeNamePrefix: "",
|
|
367
|
+
preserveOrder: true,
|
|
368
|
+
trimValues: false
|
|
369
|
+
});
|
|
370
|
+
const constructNode = (parent, nodeObject) => {
|
|
371
|
+
if (nodeObject.hasOwnProperty("#text")) {
|
|
372
|
+
return new XmlNode(parent, "#text", {}, nodeObject["#text"]);
|
|
373
|
+
}
|
|
374
|
+
const nodeName = Object.keys(nodeObject)[0];
|
|
375
|
+
const attributeName = Object.keys(nodeObject)[1];
|
|
376
|
+
const childObjects = nodeObject[nodeName];
|
|
377
|
+
const attributes = Object.entries(nodeObject[attributeName] || {}).reduce((acc, [key, value]) => {
|
|
378
|
+
try {
|
|
379
|
+
acc[key] = JSON.parse(value);
|
|
380
|
+
} catch (e) {
|
|
381
|
+
console.error(
|
|
382
|
+
`Error parsing attribute value (attr=${key} value=${value}) for element ${nodeName}`
|
|
383
|
+
);
|
|
384
|
+
}
|
|
385
|
+
return acc;
|
|
386
|
+
}, {});
|
|
387
|
+
const node = parent === null ? new XmlFragment([]) : new XmlNode(parent, nodeName, attributes, void 0, []);
|
|
388
|
+
node.childNodes = childObjects.map((child) => constructNode(node, child));
|
|
389
|
+
return node;
|
|
390
|
+
};
|
|
391
|
+
const parsed = parser.parse(`<#fragment>${input}</#fragment>`);
|
|
392
|
+
return constructNode(null, parsed[0]);
|
|
393
|
+
}
|
|
394
|
+
function escape(html) {
|
|
395
|
+
return html.replace(/&/g, "&").replace(/</g, "<").replace(/>/g, ">").replace(/"/g, """).replace(/'/g, "'");
|
|
396
|
+
}
|
|
397
|
+
|
|
317
398
|
// src/context.ts
|
|
318
399
|
var LoggerContext = createContext(
|
|
319
400
|
new NoopLogImplementation()
|
|
@@ -469,8 +550,11 @@ var StreamRenderContext = class _StreamRenderContext {
|
|
|
469
550
|
this.contextValues = contextValues;
|
|
470
551
|
const logImpl = this.getContext(LoggerContext);
|
|
471
552
|
this.logger = new BoundLogger(logImpl, this);
|
|
472
|
-
this.render = (renderable) => {
|
|
473
|
-
const generator = this.renderStream(
|
|
553
|
+
this.render = (renderable, opts) => {
|
|
554
|
+
const generator = this.renderStream(
|
|
555
|
+
renderable,
|
|
556
|
+
opts || { preserveTags: false }
|
|
557
|
+
);
|
|
474
558
|
const result = {
|
|
475
559
|
then: (onFulfilled, onRejected) => accumResults(generator).then(onFulfilled, onRejected),
|
|
476
560
|
[Symbol.asyncIterator]: () => generator
|
|
@@ -478,9 +562,14 @@ var StreamRenderContext = class _StreamRenderContext {
|
|
|
478
562
|
return result;
|
|
479
563
|
};
|
|
480
564
|
const self = this;
|
|
481
|
-
this.renderStream = async function* (renderable) {
|
|
565
|
+
this.renderStream = async function* (renderable, opts) {
|
|
566
|
+
const preserveTags = opts.preserveTags ?? false;
|
|
567
|
+
const renderedProps = opts.renderedProps || {};
|
|
568
|
+
const renderEscaped = (val) => {
|
|
569
|
+
return preserveTags ? escape(val) : val;
|
|
570
|
+
};
|
|
482
571
|
if (isLiteral(renderable)) {
|
|
483
|
-
yield renderLiteral(renderable);
|
|
572
|
+
yield renderEscaped(renderLiteral(renderable));
|
|
484
573
|
return;
|
|
485
574
|
}
|
|
486
575
|
if (isAIElement(renderable)) {
|
|
@@ -488,8 +577,29 @@ var StreamRenderContext = class _StreamRenderContext {
|
|
|
488
577
|
const childRenderId = uuidv4();
|
|
489
578
|
const newCtx = self.enter(renderable, childRenderId, ctxValues);
|
|
490
579
|
const logger = newCtx.logger;
|
|
580
|
+
const isFragment = renderable.tag.name === "AIFragment";
|
|
491
581
|
try {
|
|
492
|
-
|
|
582
|
+
if (preserveTags && !isFragment) {
|
|
583
|
+
const propsToRender = Object.entries(
|
|
584
|
+
renderable.props
|
|
585
|
+
).reduce(
|
|
586
|
+
// create an array of escaped + json serialized key="{value}"s for attributes
|
|
587
|
+
(acc, [name, value]) => {
|
|
588
|
+
if (renderedProps[renderable.tag.name]?.[name]) {
|
|
589
|
+
acc.push(`${name}="${escape(JSON.stringify(value))}"`);
|
|
590
|
+
}
|
|
591
|
+
return acc;
|
|
592
|
+
},
|
|
593
|
+
[]
|
|
594
|
+
).join(" ");
|
|
595
|
+
const attrs = propsToRender ? ` ${propsToRender}` : "";
|
|
596
|
+
yield `<${renderable.tag.name}${attrs}>`;
|
|
597
|
+
}
|
|
598
|
+
yield* newCtx.renderStream(renderable.render(newCtx), opts);
|
|
599
|
+
if (preserveTags && !isFragment) {
|
|
600
|
+
yield `</${renderable.tag.name}>`;
|
|
601
|
+
}
|
|
602
|
+
return;
|
|
493
603
|
} catch (ex) {
|
|
494
604
|
logger.logException(ex);
|
|
495
605
|
throw ex;
|
|
@@ -497,10 +607,10 @@ var StreamRenderContext = class _StreamRenderContext {
|
|
|
497
607
|
}
|
|
498
608
|
if (Array.isArray(renderable)) {
|
|
499
609
|
if (renderable.every((r) => isLiteral(r))) {
|
|
500
|
-
yield renderable.map((r) => renderLiteral(r)).join("");
|
|
610
|
+
yield renderable.map((r) => renderEscaped(renderLiteral(r))).join("");
|
|
501
611
|
return;
|
|
502
612
|
}
|
|
503
|
-
const streams = renderable.filter((a) => !!a).map((r) => self.renderStream(r));
|
|
613
|
+
const streams = renderable.filter((a) => !!a).map((r) => self.renderStream(r, opts));
|
|
504
614
|
const result = coalesceParallelStreams(streams);
|
|
505
615
|
while (true) {
|
|
506
616
|
const { value, done } = await result.next();
|
|
@@ -521,7 +631,7 @@ var StreamRenderContext = class _StreamRenderContext {
|
|
|
521
631
|
const next = await renderable.then(
|
|
522
632
|
(r) => r
|
|
523
633
|
);
|
|
524
|
-
return yield* self.
|
|
634
|
+
return yield* self.renderStream(next, opts);
|
|
525
635
|
};
|
|
526
636
|
}
|
|
527
637
|
render;
|
|
@@ -623,23 +733,44 @@ function tokenLimitForChatModel(model) {
|
|
|
623
733
|
}
|
|
624
734
|
}
|
|
625
735
|
}
|
|
626
|
-
function
|
|
736
|
+
function tokenCountForOpenAIMessage(message) {
|
|
627
737
|
const TOKENS_PER_MESSAGE = 3;
|
|
628
|
-
switch (message.
|
|
738
|
+
switch (message.role) {
|
|
629
739
|
case "assistant":
|
|
630
740
|
case "system":
|
|
631
741
|
case "user":
|
|
632
|
-
return
|
|
742
|
+
return (
|
|
743
|
+
// TODO this isn't working for vision
|
|
744
|
+
TOKENS_PER_MESSAGE + tokenizer.encode(message.content).length
|
|
745
|
+
);
|
|
633
746
|
}
|
|
634
747
|
}
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
748
|
+
function tokenCountForOpenAIVisionMessage(message) {
|
|
749
|
+
const TOKENS_PER_MESSAGE = 3;
|
|
750
|
+
const textCost = (content) => {
|
|
751
|
+
return TOKENS_PER_MESSAGE + tokenizer.encode(content).length;
|
|
752
|
+
};
|
|
753
|
+
switch (message.role) {
|
|
754
|
+
case "assistant":
|
|
755
|
+
case "system":
|
|
756
|
+
return textCost(message.content || "");
|
|
757
|
+
case "user":
|
|
758
|
+
if (typeof message.content === "string") {
|
|
759
|
+
return textCost(message.content);
|
|
760
|
+
}
|
|
761
|
+
return message.content.reduce((acc, part) => {
|
|
762
|
+
if (part.type === "text") {
|
|
763
|
+
return acc + textCost(part.text);
|
|
764
|
+
} else {
|
|
765
|
+
if (!part.image_url.detail || part.image_url.detail === "low") {
|
|
766
|
+
return acc + 85;
|
|
767
|
+
} else {
|
|
768
|
+
return acc + (170 * 4 + 85);
|
|
769
|
+
}
|
|
770
|
+
}
|
|
771
|
+
}, 0);
|
|
772
|
+
}
|
|
641
773
|
}
|
|
642
|
-
var jsxs = jsx;
|
|
643
774
|
|
|
644
775
|
// src/lib/openai/OpenAI.tsx
|
|
645
776
|
var defaultClient = null;
|
|
@@ -651,35 +782,60 @@ var OpenAIClientContext = createContext(() => {
|
|
|
651
782
|
defaultClient = new import_openai.OpenAI({ apiKey });
|
|
652
783
|
return defaultClient;
|
|
653
784
|
});
|
|
785
|
+
function buildOpenAIMessages(childrenXml) {
|
|
786
|
+
const messages = [];
|
|
787
|
+
const chatMessageTags2 = ["UserMessage", "AssistantMessage", "SystemMessage"];
|
|
788
|
+
const parsed = parseXml(childrenXml).collapse(chatMessageTags2);
|
|
789
|
+
const topLevelValid = parsed.childNodes.every(
|
|
790
|
+
(node) => chatMessageTags2.includes(node.nodeName)
|
|
791
|
+
);
|
|
792
|
+
if (!topLevelValid) {
|
|
793
|
+
throw new Error("Invalid top level chat message tags");
|
|
794
|
+
}
|
|
795
|
+
for (const node of parsed.childNodes) {
|
|
796
|
+
if (node.nodeName === "UserMessage") {
|
|
797
|
+
messages.push({
|
|
798
|
+
content: node.textContent,
|
|
799
|
+
role: "user"
|
|
800
|
+
});
|
|
801
|
+
} else if (node.nodeName === "AssistantMessage") {
|
|
802
|
+
messages.push({
|
|
803
|
+
content: node.textContent,
|
|
804
|
+
role: "assistant"
|
|
805
|
+
});
|
|
806
|
+
} else if (node.nodeName === "SystemMessage") {
|
|
807
|
+
messages.push({
|
|
808
|
+
content: node.textContent,
|
|
809
|
+
role: "system"
|
|
810
|
+
});
|
|
811
|
+
}
|
|
812
|
+
}
|
|
813
|
+
return messages;
|
|
814
|
+
}
|
|
654
815
|
async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
|
|
655
816
|
const startTime = performance.now();
|
|
656
817
|
const client = getContext(OpenAIClientContext)();
|
|
657
818
|
if (!client) {
|
|
658
819
|
throw new Error("[OpenAI] must supply OpenAI model via context");
|
|
659
820
|
}
|
|
660
|
-
const
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
...message,
|
|
664
|
-
content: await render(message.element)
|
|
665
|
-
};
|
|
666
|
-
return {
|
|
667
|
-
...partiallyRendered,
|
|
668
|
-
tokens: tokenCountForConversationMessage(partiallyRendered)
|
|
669
|
-
};
|
|
821
|
+
const openAIMessages = buildOpenAIMessages(
|
|
822
|
+
await render(props.children, {
|
|
823
|
+
preserveTags: true
|
|
670
824
|
})
|
|
671
825
|
);
|
|
672
|
-
const
|
|
826
|
+
const renderedMessages = openAIMessages.map((message) => {
|
|
673
827
|
return {
|
|
674
|
-
|
|
675
|
-
|
|
828
|
+
role: message.role,
|
|
829
|
+
// TODO support gpt4 vision
|
|
830
|
+
content: message.content,
|
|
831
|
+
tokens: tokenCountForOpenAIMessage(message)
|
|
676
832
|
};
|
|
677
833
|
});
|
|
678
834
|
const chatCompletionRequest = {
|
|
679
835
|
model: props.model,
|
|
680
836
|
max_tokens: props.maxTokens,
|
|
681
837
|
temperature: props.temperature,
|
|
682
|
-
messages:
|
|
838
|
+
messages: openAIMessages,
|
|
683
839
|
stream: true
|
|
684
840
|
};
|
|
685
841
|
const logRequestData = {
|
|
@@ -721,11 +877,10 @@ async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
|
|
|
721
877
|
}
|
|
722
878
|
}
|
|
723
879
|
const outputMessage = {
|
|
724
|
-
|
|
725
|
-
element: /* @__PURE__ */ jsx(AssistantMessage, { children: content }),
|
|
880
|
+
role: "assistant",
|
|
726
881
|
content,
|
|
727
|
-
tokens:
|
|
728
|
-
|
|
882
|
+
tokens: tokenCountForOpenAIMessage({
|
|
883
|
+
role: "assistant",
|
|
729
884
|
content
|
|
730
885
|
})
|
|
731
886
|
};
|
|
@@ -739,12 +894,176 @@ async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
|
|
|
739
894
|
logger.chatCompletionResponse("openai", responseData);
|
|
740
895
|
}
|
|
741
896
|
|
|
742
|
-
// src/lib/openai/
|
|
897
|
+
// src/lib/openai/OpenAIVision.tsx
|
|
743
898
|
var import_openai2 = require("openai");
|
|
899
|
+
var DEFAULT_MODEL = "gpt-4-vision-preview";
|
|
900
|
+
var ContentTypeImage = (_props) => {
|
|
901
|
+
return null;
|
|
902
|
+
};
|
|
903
|
+
function buildOpenAIVisionChatMessages(childrenXml) {
|
|
904
|
+
const messages = [];
|
|
905
|
+
const chatMessageTags2 = [
|
|
906
|
+
"UserMessage",
|
|
907
|
+
"AssistantMessage",
|
|
908
|
+
"SystemMessage",
|
|
909
|
+
"ContentTypeImage"
|
|
910
|
+
];
|
|
911
|
+
const parsed = parseXml(childrenXml).collapse(chatMessageTags2);
|
|
912
|
+
const topLevelValid = parsed.childNodes.every(
|
|
913
|
+
(node) => chatMessageTags2.includes(node.nodeName)
|
|
914
|
+
);
|
|
915
|
+
if (!topLevelValid) {
|
|
916
|
+
throw new Error("Invalid top level chat message tags");
|
|
917
|
+
}
|
|
918
|
+
for (const node of parsed.childNodes) {
|
|
919
|
+
if (node.nodeName === "UserMessage") {
|
|
920
|
+
const parts = node.childNodes.map((n) => {
|
|
921
|
+
if (n.nodeName === "#text") {
|
|
922
|
+
return {
|
|
923
|
+
type: "text",
|
|
924
|
+
text: n.value
|
|
925
|
+
};
|
|
926
|
+
} else if (n.nodeName === "ContentTypeImage") {
|
|
927
|
+
return {
|
|
928
|
+
type: "image_url",
|
|
929
|
+
image_url: {
|
|
930
|
+
url: n.attributes.url,
|
|
931
|
+
detail: n.attributes.detail || "auto"
|
|
932
|
+
}
|
|
933
|
+
};
|
|
934
|
+
}
|
|
935
|
+
throw new Error(
|
|
936
|
+
"Invalid ChatCompletionContentPart, expecting text or ContentTypeImage"
|
|
937
|
+
);
|
|
938
|
+
});
|
|
939
|
+
messages.push({
|
|
940
|
+
content: parts,
|
|
941
|
+
role: "user"
|
|
942
|
+
});
|
|
943
|
+
} else if (node.nodeName === "AssistantMessage") {
|
|
944
|
+
messages.push({
|
|
945
|
+
content: node.textContent,
|
|
946
|
+
role: "assistant"
|
|
947
|
+
});
|
|
948
|
+
} else if (node.nodeName === "SystemMessage") {
|
|
949
|
+
messages.push({
|
|
950
|
+
content: node.textContent,
|
|
951
|
+
role: "system"
|
|
952
|
+
});
|
|
953
|
+
}
|
|
954
|
+
}
|
|
955
|
+
return messages;
|
|
956
|
+
}
|
|
957
|
+
async function* OpenAIVisionChatCompletion(props, { logger, render, getContext }) {
|
|
958
|
+
const startTime = performance.now();
|
|
959
|
+
const model = props.model || DEFAULT_MODEL;
|
|
960
|
+
const client = getContext(OpenAIClientContext)();
|
|
961
|
+
if (!client) {
|
|
962
|
+
throw new Error("[OpenAI] must supply OpenAI model via context");
|
|
963
|
+
}
|
|
964
|
+
const openAIMessages = buildOpenAIVisionChatMessages(
|
|
965
|
+
await render(props.children, {
|
|
966
|
+
preserveTags: true,
|
|
967
|
+
renderedProps: {
|
|
968
|
+
ContentTypeImage: {
|
|
969
|
+
url: true,
|
|
970
|
+
detail: true
|
|
971
|
+
}
|
|
972
|
+
}
|
|
973
|
+
})
|
|
974
|
+
);
|
|
975
|
+
const renderedMessages = openAIMessages.map((message) => {
|
|
976
|
+
const renderContent = (content2) => {
|
|
977
|
+
if (content2 == null) {
|
|
978
|
+
return "";
|
|
979
|
+
}
|
|
980
|
+
if (typeof content2 === "string") {
|
|
981
|
+
return content2;
|
|
982
|
+
}
|
|
983
|
+
return content2.map((part) => {
|
|
984
|
+
if (part.type === "text") {
|
|
985
|
+
return part.text;
|
|
986
|
+
} else if (part.type === "image_url") {
|
|
987
|
+
return `<ContentTypeImage url="${part.image_url.url}" detail="${part.image_url.detail || "auto"}" />`;
|
|
988
|
+
}
|
|
989
|
+
throw new Error("Invalid ChatCompletionContentPart type");
|
|
990
|
+
}).join(" ");
|
|
991
|
+
};
|
|
992
|
+
return {
|
|
993
|
+
role: message.role,
|
|
994
|
+
content: renderContent(message.content),
|
|
995
|
+
tokens: tokenCountForOpenAIVisionMessage(message)
|
|
996
|
+
};
|
|
997
|
+
});
|
|
998
|
+
const chatCompletionRequest = {
|
|
999
|
+
model,
|
|
1000
|
+
max_tokens: props.maxTokens,
|
|
1001
|
+
temperature: props.temperature,
|
|
1002
|
+
messages: openAIMessages,
|
|
1003
|
+
stream: true
|
|
1004
|
+
};
|
|
1005
|
+
const logRequestData = {
|
|
1006
|
+
startTime,
|
|
1007
|
+
model,
|
|
1008
|
+
provider: props.provider,
|
|
1009
|
+
providerRegion: props.providerRegion,
|
|
1010
|
+
inputMessages: renderedMessages,
|
|
1011
|
+
request: chatCompletionRequest
|
|
1012
|
+
};
|
|
1013
|
+
logger.chatCompletionRequest("openai", logRequestData);
|
|
1014
|
+
let chatResponse;
|
|
1015
|
+
try {
|
|
1016
|
+
chatResponse = await client.chat.completions.create(chatCompletionRequest);
|
|
1017
|
+
} catch (ex) {
|
|
1018
|
+
if (ex instanceof import_openai2.OpenAI.APIError) {
|
|
1019
|
+
throw new ChatCompletionError(
|
|
1020
|
+
`OpenAIClient.APIError: ${ex.message}`,
|
|
1021
|
+
logRequestData
|
|
1022
|
+
);
|
|
1023
|
+
} else if (ex instanceof Error) {
|
|
1024
|
+
throw new ChatCompletionError(ex.message, logRequestData);
|
|
1025
|
+
}
|
|
1026
|
+
throw ex;
|
|
1027
|
+
}
|
|
1028
|
+
let finishReason = void 0;
|
|
1029
|
+
let content = "";
|
|
1030
|
+
for await (const message of chatResponse) {
|
|
1031
|
+
if (!message.choices || !message.choices[0]) {
|
|
1032
|
+
continue;
|
|
1033
|
+
}
|
|
1034
|
+
const delta = message.choices[0].delta;
|
|
1035
|
+
if (message.choices[0].finish_reason) {
|
|
1036
|
+
finishReason = message.choices[0].finish_reason;
|
|
1037
|
+
}
|
|
1038
|
+
if (delta.content) {
|
|
1039
|
+
content += delta.content;
|
|
1040
|
+
yield delta.content;
|
|
1041
|
+
}
|
|
1042
|
+
}
|
|
1043
|
+
const outputMessage = {
|
|
1044
|
+
role: "assistant",
|
|
1045
|
+
content,
|
|
1046
|
+
tokens: tokenCountForOpenAIMessage({
|
|
1047
|
+
role: "assistant",
|
|
1048
|
+
content
|
|
1049
|
+
})
|
|
1050
|
+
};
|
|
1051
|
+
const responseData = {
|
|
1052
|
+
...logRequestData,
|
|
1053
|
+
finishReason,
|
|
1054
|
+
latency: performance.now() - startTime,
|
|
1055
|
+
outputMessage,
|
|
1056
|
+
tokensUsed: computeUsage([...renderedMessages, outputMessage])
|
|
1057
|
+
};
|
|
1058
|
+
logger.chatCompletionResponse("openai", responseData);
|
|
1059
|
+
}
|
|
1060
|
+
|
|
1061
|
+
// src/lib/openai/index.ts
|
|
1062
|
+
var import_openai3 = require("openai");
|
|
744
1063
|
|
|
745
1064
|
// src/lib/anthropic/Anthropic.tsx
|
|
746
1065
|
var import_sdk = __toESM(require("@anthropic-ai/sdk"));
|
|
747
|
-
var
|
|
1066
|
+
var import_tokenizer3 = require("@anthropic-ai/tokenizer");
|
|
748
1067
|
var defaultClient2 = null;
|
|
749
1068
|
var AnthropicClientContext = createContext(
|
|
750
1069
|
() => {
|
|
@@ -758,6 +1077,48 @@ var AnthropicClientContext = createContext(
|
|
|
758
1077
|
}
|
|
759
1078
|
);
|
|
760
1079
|
var defaultMaxTokens = 4096;
|
|
1080
|
+
var chatMessageTags = ["UserMessage", "AssistantMessage", "SystemMessage"];
|
|
1081
|
+
function buildChatMessages(childrenXml) {
|
|
1082
|
+
const messages = [];
|
|
1083
|
+
const parsed = parseXml(childrenXml).collapse(chatMessageTags);
|
|
1084
|
+
const topLevelValid = parsed.childNodes.every(
|
|
1085
|
+
(node) => chatMessageTags.includes(node.nodeName)
|
|
1086
|
+
);
|
|
1087
|
+
if (!topLevelValid) {
|
|
1088
|
+
throw new Error("Invalid top level chat message tags");
|
|
1089
|
+
}
|
|
1090
|
+
for (const node of parsed.childNodes) {
|
|
1091
|
+
if (node.nodeName === "UserMessage") {
|
|
1092
|
+
const content = `${import_sdk.default.HUMAN_PROMPT} ${node.textContent}`;
|
|
1093
|
+
messages.push({
|
|
1094
|
+
role: "user",
|
|
1095
|
+
content,
|
|
1096
|
+
tokens: (0, import_tokenizer3.countTokens)(content)
|
|
1097
|
+
});
|
|
1098
|
+
} else if (node.nodeName === "AssistantMessage") {
|
|
1099
|
+
const content = `${import_sdk.default.AI_PROMPT} ${node.textContent}`;
|
|
1100
|
+
messages.push({
|
|
1101
|
+
role: "assistant",
|
|
1102
|
+
content,
|
|
1103
|
+
tokens: (0, import_tokenizer3.countTokens)(content)
|
|
1104
|
+
});
|
|
1105
|
+
} else if (node.nodeName === "SystemMessage") {
|
|
1106
|
+
const userContent = `${import_sdk.default.HUMAN_PROMPT} For subsequent replies you will adhere to the following instructions: ${node.textContent}`;
|
|
1107
|
+
messages.push({
|
|
1108
|
+
role: "user",
|
|
1109
|
+
content: userContent,
|
|
1110
|
+
tokens: (0, import_tokenizer3.countTokens)(userContent)
|
|
1111
|
+
});
|
|
1112
|
+
const assistantContent = `${import_sdk.default.AI_PROMPT} Okay, I will do that.`;
|
|
1113
|
+
messages.push({
|
|
1114
|
+
role: "assistant",
|
|
1115
|
+
content: assistantContent,
|
|
1116
|
+
tokens: (0, import_tokenizer3.countTokens)(assistantContent)
|
|
1117
|
+
});
|
|
1118
|
+
}
|
|
1119
|
+
}
|
|
1120
|
+
return messages;
|
|
1121
|
+
}
|
|
761
1122
|
async function* AnthropicChatCompletion(props, { render, logger, getContext }) {
|
|
762
1123
|
const startTime = performance.now();
|
|
763
1124
|
const client = getContext(AnthropicClientContext)();
|
|
@@ -766,41 +1127,17 @@ async function* AnthropicChatCompletion(props, { render, logger, getContext }) {
|
|
|
766
1127
|
"[AnthropicChatCompletion] must supply AnthropicClient via context"
|
|
767
1128
|
);
|
|
768
1129
|
}
|
|
769
|
-
const
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
return [
|
|
773
|
-
{
|
|
774
|
-
type: "user",
|
|
775
|
-
element: /* @__PURE__ */ jsxs(UserMessage, { children: [
|
|
776
|
-
"For subsequent replies you will adhere to the following instructions: ",
|
|
777
|
-
message.element
|
|
778
|
-
] })
|
|
779
|
-
},
|
|
780
|
-
{
|
|
781
|
-
type: "assistant",
|
|
782
|
-
element: /* @__PURE__ */ jsx(AssistantMessage, { children: "Okay, I will do that." })
|
|
783
|
-
}
|
|
784
|
-
];
|
|
785
|
-
}
|
|
786
|
-
return [message];
|
|
787
|
-
}).map(async (message) => {
|
|
788
|
-
const prefix = message.type === "user" ? import_sdk.default.HUMAN_PROMPT : import_sdk.default.AI_PROMPT;
|
|
789
|
-
const rendered = await render(message.element);
|
|
790
|
-
const content2 = `${prefix} ${rendered.trim()}`;
|
|
791
|
-
return {
|
|
792
|
-
...message,
|
|
793
|
-
content: content2,
|
|
794
|
-
tokens: (0, import_tokenizer2.countTokens)(content2)
|
|
795
|
-
};
|
|
1130
|
+
const inputMessages = buildChatMessages(
|
|
1131
|
+
await render(props.children, {
|
|
1132
|
+
preserveTags: true
|
|
796
1133
|
})
|
|
797
1134
|
);
|
|
798
|
-
const
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
1135
|
+
const prompt = [
|
|
1136
|
+
...inputMessages.map((message) => message.content),
|
|
1137
|
+
import_sdk.default.AI_PROMPT
|
|
1138
|
+
].join("");
|
|
802
1139
|
const anthropicCompletionRequest = {
|
|
803
|
-
prompt
|
|
1140
|
+
prompt,
|
|
804
1141
|
max_tokens_to_sample: props.maxTokens ?? defaultMaxTokens,
|
|
805
1142
|
temperature: props.temperature,
|
|
806
1143
|
model: props.model,
|
|
@@ -811,7 +1148,7 @@ async function* AnthropicChatCompletion(props, { render, logger, getContext }) {
|
|
|
811
1148
|
model: props.model,
|
|
812
1149
|
provider: props.provider,
|
|
813
1150
|
providerRegion: props.providerRegion,
|
|
814
|
-
inputMessages
|
|
1151
|
+
inputMessages,
|
|
815
1152
|
request: anthropicCompletionRequest
|
|
816
1153
|
};
|
|
817
1154
|
logger.chatCompletionRequest("anthropic", logRequestData);
|
|
@@ -843,24 +1180,23 @@ async function* AnthropicChatCompletion(props, { render, logger, getContext }) {
|
|
|
843
1180
|
yield text;
|
|
844
1181
|
}
|
|
845
1182
|
const outputMessage = {
|
|
846
|
-
|
|
847
|
-
element: /* @__PURE__ */ jsx(AssistantMessage, { children: content }),
|
|
1183
|
+
role: "assistant",
|
|
848
1184
|
content,
|
|
849
|
-
tokens: (0,
|
|
1185
|
+
tokens: (0, import_tokenizer3.countTokens)(content)
|
|
850
1186
|
};
|
|
851
1187
|
const responseData = {
|
|
852
1188
|
...logRequestData,
|
|
853
1189
|
finishReason: "stop",
|
|
854
1190
|
latency: performance.now() - startTime,
|
|
855
1191
|
outputMessage,
|
|
856
|
-
tokensUsed: computeUsage([...
|
|
1192
|
+
tokensUsed: computeUsage([...inputMessages, outputMessage])
|
|
857
1193
|
};
|
|
858
1194
|
logger.chatCompletionResponse("anthropic", responseData);
|
|
859
1195
|
}
|
|
860
1196
|
|
|
861
1197
|
// src/lib/anthropic/index.ts
|
|
862
1198
|
var import_sdk2 = __toESM(require("@anthropic-ai/sdk"));
|
|
863
|
-
var
|
|
1199
|
+
var import_tokenizer4 = require("@anthropic-ai/tokenizer");
|
|
864
1200
|
// Annotate the CommonJS export names for ESM import in node:
|
|
865
1201
|
0 && (module.exports = {
|
|
866
1202
|
AIFragment,
|
|
@@ -872,23 +1208,25 @@ var import_tokenizer3 = require("@anthropic-ai/tokenizer");
|
|
|
872
1208
|
ChatCompletionError,
|
|
873
1209
|
CombinedLogger,
|
|
874
1210
|
ConsoleLogger,
|
|
1211
|
+
ContentTypeImage,
|
|
875
1212
|
LogImplementation,
|
|
876
1213
|
LoggerContext,
|
|
877
1214
|
NoopLogImplementation,
|
|
878
1215
|
OpenAIChatCompletion,
|
|
879
1216
|
OpenAIClient,
|
|
880
1217
|
OpenAIClientContext,
|
|
1218
|
+
OpenAIVisionChatCompletion,
|
|
881
1219
|
SystemMessage,
|
|
882
1220
|
UserMessage,
|
|
883
1221
|
attachedContextSymbol,
|
|
884
|
-
childrenToConversationMessage,
|
|
885
1222
|
computeUsage,
|
|
886
1223
|
countAnthropicTokens,
|
|
887
1224
|
createAIElement,
|
|
888
1225
|
createContext,
|
|
889
1226
|
createRenderContext,
|
|
890
1227
|
defaultMaxTokens,
|
|
891
|
-
|
|
1228
|
+
tokenCountForOpenAIMessage,
|
|
1229
|
+
tokenCountForOpenAIVisionMessage,
|
|
892
1230
|
tokenLimitForChatModel,
|
|
893
1231
|
tokenizer
|
|
894
1232
|
});
|