@gammatech/aijsx 0.5.0-dev.2024-03-13.2 → 0.5.0-dev.2024-03-14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -135,7 +135,7 @@ declare module '@gammatech/aijsx' {
135
135
  openai: OpenAIChatCompletionRequest;
136
136
  }
137
137
  }
138
- type ValidOpenAIChatModel = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-4-1106-preview' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125';
138
+ type ValidOpenAIChatModel = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-4-1106-preview' | 'gpt-4-0125-preview' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125';
139
139
  declare const OpenAIClientContext: Context<() => ChatCompletionClientAndProvider<OpenAI>>;
140
140
  type OpenAIChatCompletionProps = {
141
141
  model: ValidOpenAIChatModel;
package/dist/index.d.ts CHANGED
@@ -135,7 +135,7 @@ declare module '@gammatech/aijsx' {
135
135
  openai: OpenAIChatCompletionRequest;
136
136
  }
137
137
  }
138
- type ValidOpenAIChatModel = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-4-1106-preview' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125';
138
+ type ValidOpenAIChatModel = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-4-1106-preview' | 'gpt-4-0125-preview' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125';
139
139
  declare const OpenAIClientContext: Context<() => ChatCompletionClientAndProvider<OpenAI>>;
140
140
  type OpenAIChatCompletionProps = {
141
141
  model: ValidOpenAIChatModel;
package/dist/index.js CHANGED
@@ -1053,7 +1053,10 @@ var XmlNode = class {
1053
1053
  return this;
1054
1054
  }
1055
1055
  if (this.childNodes) {
1056
- this.childNodes.forEach((n) => n.collapse(nodes));
1056
+ const originalNodes = [...this.childNodes];
1057
+ originalNodes.forEach((n) => {
1058
+ return n.collapse(nodes);
1059
+ });
1057
1060
  }
1058
1061
  if (
1059
1062
  // is the root node
@@ -1065,6 +1068,7 @@ var XmlNode = class {
1065
1068
  const parent = this.parent;
1066
1069
  const index = parent.childNodes.indexOf(this);
1067
1070
  parent.childNodes.splice(index, 1, ...this.childNodes);
1071
+ this.childNodes.forEach((n) => n.parent = parent);
1068
1072
  return this;
1069
1073
  }
1070
1074
  };
@@ -1649,6 +1653,7 @@ function tokenLimitForChatModel(model) {
1649
1653
  case "gpt-4-32k-0613":
1650
1654
  return 32768 - TOKENS_CONSUMED_BY_REPLY_PREFIX;
1651
1655
  case "gpt-4-1106-preview":
1656
+ case "gpt-4-0125-preview":
1652
1657
  return 128e3 - TOKENS_CONSUMED_BY_REPLY_PREFIX;
1653
1658
  case "gpt-3.5-turbo":
1654
1659
  case "gpt-3.5-turbo-0301":
@@ -2142,6 +2147,7 @@ function buildAnthropicMessages(childrenXml) {
2142
2147
  }
2143
2148
  for (const node of parsed.childNodes) {
2144
2149
  if (node.nodeName === "UserMessage") {
2150
+ console.log(node.childNodes);
2145
2151
  if (node.childNodes?.length === 1 && node.childNodes[0].nodeName === "#text") {
2146
2152
  messages.push({
2147
2153
  content: node.childNodes[0].value,
package/dist/index.mjs CHANGED
@@ -962,7 +962,10 @@ var XmlNode = class {
962
962
  return this;
963
963
  }
964
964
  if (this.childNodes) {
965
- this.childNodes.forEach((n) => n.collapse(nodes));
965
+ const originalNodes = [...this.childNodes];
966
+ originalNodes.forEach((n) => {
967
+ return n.collapse(nodes);
968
+ });
966
969
  }
967
970
  if (
968
971
  // is the root node
@@ -974,6 +977,7 @@ var XmlNode = class {
974
977
  const parent = this.parent;
975
978
  const index = parent.childNodes.indexOf(this);
976
979
  parent.childNodes.splice(index, 1, ...this.childNodes);
980
+ this.childNodes.forEach((n) => n.parent = parent);
977
981
  return this;
978
982
  }
979
983
  };
@@ -1551,6 +1555,7 @@ function tokenLimitForChatModel(model) {
1551
1555
  case "gpt-4-32k-0613":
1552
1556
  return 32768 - TOKENS_CONSUMED_BY_REPLY_PREFIX;
1553
1557
  case "gpt-4-1106-preview":
1558
+ case "gpt-4-0125-preview":
1554
1559
  return 128e3 - TOKENS_CONSUMED_BY_REPLY_PREFIX;
1555
1560
  case "gpt-3.5-turbo":
1556
1561
  case "gpt-3.5-turbo-0301":
@@ -2044,6 +2049,7 @@ function buildAnthropicMessages(childrenXml) {
2044
2049
  }
2045
2050
  for (const node of parsed.childNodes) {
2046
2051
  if (node.nodeName === "UserMessage") {
2052
+ console.log(node.childNodes);
2047
2053
  if (node.childNodes?.length === 1 && node.childNodes[0].nodeName === "#text") {
2048
2054
  messages.push({
2049
2055
  content: node.childNodes[0].value,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@gammatech/aijsx",
3
- "version": "0.5.0-dev.2024-03-13.2",
3
+ "version": "0.5.0-dev.2024-03-14",
4
4
  "description": "Rewrite of aijsx",
5
5
  "author": "Jordan Garcia",
6
6
  "license": "MIT",