@gammatech/aijsx 0.2.0-beta.1 → 0.2.0-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -31,6 +31,21 @@ type OpenAIChatCompletionProps = {
31
31
  };
32
32
  declare function OpenAIChatCompletion(props: OpenAIChatCompletionProps, { logger, render, getContext }: RenderContext): AsyncGenerator<string, void, unknown>;
33
33
 
34
+ type ValidOpenAIVisionModel = 'gpt-4-vision-preview';
35
+ declare const ContentTypeImage: (_props: {
36
+ url: string;
37
+ detail?: 'auto' | 'high' | 'low';
38
+ }) => null;
39
+ type OpenAIVisionChatCompletionProps = {
40
+ model?: ValidOpenAIVisionModel;
41
+ maxTokens?: number;
42
+ temperature?: number;
43
+ children: AINode;
44
+ provider?: string;
45
+ providerRegion?: string;
46
+ };
47
+ declare function OpenAIVisionChatCompletion(props: OpenAIVisionChatCompletionProps, { logger, render, getContext }: RenderContext): AsyncGenerator<string, void, unknown>;
48
+
34
49
  declare const tokenizer: {
35
50
  encode: (text: string) => number[];
36
51
  decode: (tokens: number[]) => string;
@@ -72,4 +87,4 @@ type AnthropicChatCompletionProps = {
72
87
  */
73
88
  declare function AnthropicChatCompletion(props: AnthropicChatCompletionProps, { render, logger, getContext }: RenderContext): AsyncGenerator<string, void, unknown>;
74
89
 
75
- export { AINode, AnthropicChatCompletion, type AnthropicChatCompletionRequest, AnthropicClientContext, Context, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, type OpenAIChatMessage, OpenAIClientContext, RenderContext, type ValidAnthropicChatModel, type ValidOpenAIChatModel, createRenderContext, defaultMaxTokens, tokenCountForOpenAIMessage, tokenCountForOpenAIVisionMessage, tokenLimitForChatModel, tokenizer };
90
+ export { AINode, AnthropicChatCompletion, type AnthropicChatCompletionRequest, AnthropicClientContext, ContentTypeImage, Context, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, type OpenAIChatMessage, OpenAIClientContext, OpenAIVisionChatCompletion, RenderContext, type ValidAnthropicChatModel, type ValidOpenAIChatModel, type ValidOpenAIVisionModel, createRenderContext, defaultMaxTokens, tokenCountForOpenAIMessage, tokenCountForOpenAIVisionMessage, tokenLimitForChatModel, tokenizer };
package/dist/index.d.ts CHANGED
@@ -31,6 +31,21 @@ type OpenAIChatCompletionProps = {
31
31
  };
32
32
  declare function OpenAIChatCompletion(props: OpenAIChatCompletionProps, { logger, render, getContext }: RenderContext): AsyncGenerator<string, void, unknown>;
33
33
 
34
+ type ValidOpenAIVisionModel = 'gpt-4-vision-preview';
35
+ declare const ContentTypeImage: (_props: {
36
+ url: string;
37
+ detail?: 'auto' | 'high' | 'low';
38
+ }) => null;
39
+ type OpenAIVisionChatCompletionProps = {
40
+ model?: ValidOpenAIVisionModel;
41
+ maxTokens?: number;
42
+ temperature?: number;
43
+ children: AINode;
44
+ provider?: string;
45
+ providerRegion?: string;
46
+ };
47
+ declare function OpenAIVisionChatCompletion(props: OpenAIVisionChatCompletionProps, { logger, render, getContext }: RenderContext): AsyncGenerator<string, void, unknown>;
48
+
34
49
  declare const tokenizer: {
35
50
  encode: (text: string) => number[];
36
51
  decode: (tokens: number[]) => string;
@@ -72,4 +87,4 @@ type AnthropicChatCompletionProps = {
72
87
  */
73
88
  declare function AnthropicChatCompletion(props: AnthropicChatCompletionProps, { render, logger, getContext }: RenderContext): AsyncGenerator<string, void, unknown>;
74
89
 
75
- export { AINode, AnthropicChatCompletion, type AnthropicChatCompletionRequest, AnthropicClientContext, Context, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, type OpenAIChatMessage, OpenAIClientContext, RenderContext, type ValidAnthropicChatModel, type ValidOpenAIChatModel, createRenderContext, defaultMaxTokens, tokenCountForOpenAIMessage, tokenCountForOpenAIVisionMessage, tokenLimitForChatModel, tokenizer };
90
+ export { AINode, AnthropicChatCompletion, type AnthropicChatCompletionRequest, AnthropicClientContext, ContentTypeImage, Context, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, type OpenAIChatMessage, OpenAIClientContext, OpenAIVisionChatCompletion, RenderContext, type ValidAnthropicChatModel, type ValidOpenAIChatModel, type ValidOpenAIVisionModel, createRenderContext, defaultMaxTokens, tokenCountForOpenAIMessage, tokenCountForOpenAIVisionMessage, tokenLimitForChatModel, tokenizer };
package/dist/index.js CHANGED
@@ -38,17 +38,19 @@ __export(src_exports, {
38
38
  ChatCompletionError: () => ChatCompletionError,
39
39
  CombinedLogger: () => CombinedLogger,
40
40
  ConsoleLogger: () => ConsoleLogger,
41
+ ContentTypeImage: () => ContentTypeImage,
41
42
  LogImplementation: () => LogImplementation,
42
43
  LoggerContext: () => LoggerContext,
43
44
  NoopLogImplementation: () => NoopLogImplementation,
44
45
  OpenAIChatCompletion: () => OpenAIChatCompletion,
45
- OpenAIClient: () => import_openai2.OpenAI,
46
+ OpenAIClient: () => import_openai3.OpenAI,
46
47
  OpenAIClientContext: () => OpenAIClientContext,
48
+ OpenAIVisionChatCompletion: () => OpenAIVisionChatCompletion,
47
49
  SystemMessage: () => SystemMessage,
48
50
  UserMessage: () => UserMessage,
49
51
  attachedContextSymbol: () => attachedContextSymbol,
50
52
  computeUsage: () => computeUsage,
51
- countAnthropicTokens: () => import_tokenizer3.countTokens,
53
+ countAnthropicTokens: () => import_tokenizer4.countTokens,
52
54
  createAIElement: () => createAIElement,
53
55
  createContext: () => createContext,
54
56
  createRenderContext: () => createRenderContext,
@@ -390,7 +392,7 @@ function parseXml(input) {
390
392
  return constructNode(null, parsed[0]);
391
393
  }
392
394
  function escape(html) {
393
- return html.replace(/&/g, "&amp;").replace(/</g, "&lt;").replace(/>/g, "&gt;").replace(/"/g, "&quot;").replace(/'/g, "&#039;");
395
+ return html.replace(/&/g, "&amp;").replace(/</g, "&lt;").replace(/>/g, "&gt;").replace(/"/g, "&quot;");
394
396
  }
395
397
 
396
398
  // src/context.ts
@@ -892,12 +894,176 @@ async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
892
894
  logger.chatCompletionResponse("openai", responseData);
893
895
  }
894
896
 
895
- // src/lib/openai/index.ts
897
+ // src/lib/openai/OpenAIVision.tsx
896
898
  var import_openai2 = require("openai");
899
+ var DEFAULT_MODEL = "gpt-4-vision-preview";
900
+ var ContentTypeImage = (_props) => {
901
+ return null;
902
+ };
903
+ function buildOpenAIVisionChatMessages(childrenXml) {
904
+ const messages = [];
905
+ const chatMessageTags2 = [
906
+ "UserMessage",
907
+ "AssistantMessage",
908
+ "SystemMessage",
909
+ "ContentTypeImage"
910
+ ];
911
+ const parsed = parseXml(childrenXml).collapse(chatMessageTags2);
912
+ const topLevelValid = parsed.childNodes.every(
913
+ (node) => chatMessageTags2.includes(node.nodeName)
914
+ );
915
+ if (!topLevelValid) {
916
+ throw new Error("Invalid top level chat message tags");
917
+ }
918
+ for (const node of parsed.childNodes) {
919
+ if (node.nodeName === "UserMessage") {
920
+ const parts = node.childNodes.map((n) => {
921
+ if (n.nodeName === "#text") {
922
+ return {
923
+ type: "text",
924
+ text: n.value
925
+ };
926
+ } else if (n.nodeName === "ContentTypeImage") {
927
+ return {
928
+ type: "image_url",
929
+ image_url: {
930
+ url: n.attributes.url,
931
+ detail: n.attributes.detail || "auto"
932
+ }
933
+ };
934
+ }
935
+ throw new Error(
936
+ "Invalid ChatCompletionContentPart, expecting text or ContentTypeImage"
937
+ );
938
+ });
939
+ messages.push({
940
+ content: parts,
941
+ role: "user"
942
+ });
943
+ } else if (node.nodeName === "AssistantMessage") {
944
+ messages.push({
945
+ content: node.textContent,
946
+ role: "assistant"
947
+ });
948
+ } else if (node.nodeName === "SystemMessage") {
949
+ messages.push({
950
+ content: node.textContent,
951
+ role: "system"
952
+ });
953
+ }
954
+ }
955
+ return messages;
956
+ }
957
+ async function* OpenAIVisionChatCompletion(props, { logger, render, getContext }) {
958
+ const startTime = performance.now();
959
+ const model = props.model || DEFAULT_MODEL;
960
+ const client = getContext(OpenAIClientContext)();
961
+ if (!client) {
962
+ throw new Error("[OpenAI] must supply OpenAI model via context");
963
+ }
964
+ const openAIMessages = buildOpenAIVisionChatMessages(
965
+ await render(props.children, {
966
+ preserveTags: true,
967
+ renderedProps: {
968
+ ContentTypeImage: {
969
+ url: true,
970
+ detail: true
971
+ }
972
+ }
973
+ })
974
+ );
975
+ const renderedMessages = openAIMessages.map((message) => {
976
+ const renderContent = (content2) => {
977
+ if (content2 == null) {
978
+ return "";
979
+ }
980
+ if (typeof content2 === "string") {
981
+ return content2;
982
+ }
983
+ return content2.map((part) => {
984
+ if (part.type === "text") {
985
+ return part.text;
986
+ } else if (part.type === "image_url") {
987
+ return `<ContentTypeImage url="${part.image_url.url}" detail="${part.image_url.detail || "auto"}" />`;
988
+ }
989
+ throw new Error("Invalid ChatCompletionContentPart type");
990
+ }).join(" ");
991
+ };
992
+ return {
993
+ role: message.role,
994
+ content: renderContent(message.content),
995
+ tokens: tokenCountForOpenAIVisionMessage(message)
996
+ };
997
+ });
998
+ const chatCompletionRequest = {
999
+ model,
1000
+ max_tokens: props.maxTokens,
1001
+ temperature: props.temperature,
1002
+ messages: openAIMessages,
1003
+ stream: true
1004
+ };
1005
+ const logRequestData = {
1006
+ startTime,
1007
+ model,
1008
+ provider: props.provider,
1009
+ providerRegion: props.providerRegion,
1010
+ inputMessages: renderedMessages,
1011
+ request: chatCompletionRequest
1012
+ };
1013
+ logger.chatCompletionRequest("openai", logRequestData);
1014
+ let chatResponse;
1015
+ try {
1016
+ chatResponse = await client.chat.completions.create(chatCompletionRequest);
1017
+ } catch (ex) {
1018
+ if (ex instanceof import_openai2.OpenAI.APIError) {
1019
+ throw new ChatCompletionError(
1020
+ `OpenAIClient.APIError: ${ex.message}`,
1021
+ logRequestData
1022
+ );
1023
+ } else if (ex instanceof Error) {
1024
+ throw new ChatCompletionError(ex.message, logRequestData);
1025
+ }
1026
+ throw ex;
1027
+ }
1028
+ let finishReason = void 0;
1029
+ let content = "";
1030
+ for await (const message of chatResponse) {
1031
+ if (!message.choices || !message.choices[0]) {
1032
+ continue;
1033
+ }
1034
+ const delta = message.choices[0].delta;
1035
+ if (message.choices[0].finish_reason) {
1036
+ finishReason = message.choices[0].finish_reason;
1037
+ }
1038
+ if (delta.content) {
1039
+ content += delta.content;
1040
+ yield delta.content;
1041
+ }
1042
+ }
1043
+ const outputMessage = {
1044
+ role: "assistant",
1045
+ content,
1046
+ tokens: tokenCountForOpenAIMessage({
1047
+ role: "assistant",
1048
+ content
1049
+ })
1050
+ };
1051
+ const responseData = {
1052
+ ...logRequestData,
1053
+ finishReason,
1054
+ latency: performance.now() - startTime,
1055
+ outputMessage,
1056
+ tokensUsed: computeUsage([...renderedMessages, outputMessage])
1057
+ };
1058
+ logger.chatCompletionResponse("openai", responseData);
1059
+ }
1060
+
1061
+ // src/lib/openai/index.ts
1062
+ var import_openai3 = require("openai");
897
1063
 
898
1064
  // src/lib/anthropic/Anthropic.tsx
899
1065
  var import_sdk = __toESM(require("@anthropic-ai/sdk"));
900
- var import_tokenizer2 = require("@anthropic-ai/tokenizer");
1066
+ var import_tokenizer3 = require("@anthropic-ai/tokenizer");
901
1067
  var defaultClient2 = null;
902
1068
  var AnthropicClientContext = createContext(
903
1069
  () => {
@@ -927,27 +1093,27 @@ function buildChatMessages(childrenXml) {
927
1093
  messages.push({
928
1094
  role: "user",
929
1095
  content,
930
- tokens: (0, import_tokenizer2.countTokens)(content)
1096
+ tokens: (0, import_tokenizer3.countTokens)(content)
931
1097
  });
932
1098
  } else if (node.nodeName === "AssistantMessage") {
933
1099
  const content = `${import_sdk.default.AI_PROMPT} ${node.textContent}`;
934
1100
  messages.push({
935
1101
  role: "assistant",
936
1102
  content,
937
- tokens: (0, import_tokenizer2.countTokens)(content)
1103
+ tokens: (0, import_tokenizer3.countTokens)(content)
938
1104
  });
939
1105
  } else if (node.nodeName === "SystemMessage") {
940
1106
  const userContent = `${import_sdk.default.HUMAN_PROMPT} For subsequent replies you will adhere to the following instructions: ${node.textContent}`;
941
1107
  messages.push({
942
1108
  role: "user",
943
1109
  content: userContent,
944
- tokens: (0, import_tokenizer2.countTokens)(userContent)
1110
+ tokens: (0, import_tokenizer3.countTokens)(userContent)
945
1111
  });
946
1112
  const assistantContent = `${import_sdk.default.AI_PROMPT} Okay, I will do that.`;
947
1113
  messages.push({
948
1114
  role: "assistant",
949
1115
  content: assistantContent,
950
- tokens: (0, import_tokenizer2.countTokens)(assistantContent)
1116
+ tokens: (0, import_tokenizer3.countTokens)(assistantContent)
951
1117
  });
952
1118
  }
953
1119
  }
@@ -1016,7 +1182,7 @@ async function* AnthropicChatCompletion(props, { render, logger, getContext }) {
1016
1182
  const outputMessage = {
1017
1183
  role: "assistant",
1018
1184
  content,
1019
- tokens: (0, import_tokenizer2.countTokens)(content)
1185
+ tokens: (0, import_tokenizer3.countTokens)(content)
1020
1186
  };
1021
1187
  const responseData = {
1022
1188
  ...logRequestData,
@@ -1030,7 +1196,7 @@ async function* AnthropicChatCompletion(props, { render, logger, getContext }) {
1030
1196
 
1031
1197
  // src/lib/anthropic/index.ts
1032
1198
  var import_sdk2 = __toESM(require("@anthropic-ai/sdk"));
1033
- var import_tokenizer3 = require("@anthropic-ai/tokenizer");
1199
+ var import_tokenizer4 = require("@anthropic-ai/tokenizer");
1034
1200
  // Annotate the CommonJS export names for ESM import in node:
1035
1201
  0 && (module.exports = {
1036
1202
  AIFragment,
@@ -1042,12 +1208,14 @@ var import_tokenizer3 = require("@anthropic-ai/tokenizer");
1042
1208
  ChatCompletionError,
1043
1209
  CombinedLogger,
1044
1210
  ConsoleLogger,
1211
+ ContentTypeImage,
1045
1212
  LogImplementation,
1046
1213
  LoggerContext,
1047
1214
  NoopLogImplementation,
1048
1215
  OpenAIChatCompletion,
1049
1216
  OpenAIClient,
1050
1217
  OpenAIClientContext,
1218
+ OpenAIVisionChatCompletion,
1051
1219
  SystemMessage,
1052
1220
  UserMessage,
1053
1221
  attachedContextSymbol,
package/dist/index.mjs CHANGED
@@ -309,7 +309,7 @@ function parseXml(input) {
309
309
  return constructNode(null, parsed[0]);
310
310
  }
311
311
  function escape(html) {
312
- return html.replace(/&/g, "&amp;").replace(/</g, "&lt;").replace(/>/g, "&gt;").replace(/"/g, "&quot;").replace(/'/g, "&#039;");
312
+ return html.replace(/&/g, "&amp;").replace(/</g, "&lt;").replace(/>/g, "&gt;").replace(/"/g, "&quot;");
313
313
  }
314
314
 
315
315
  // src/context.ts
@@ -811,8 +811,172 @@ async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
811
811
  logger.chatCompletionResponse("openai", responseData);
812
812
  }
813
813
 
814
- // src/lib/openai/index.ts
814
+ // src/lib/openai/OpenAIVision.tsx
815
815
  import { OpenAI as OpenAIClient2 } from "openai";
816
+ var DEFAULT_MODEL = "gpt-4-vision-preview";
817
+ var ContentTypeImage = (_props) => {
818
+ return null;
819
+ };
820
+ function buildOpenAIVisionChatMessages(childrenXml) {
821
+ const messages = [];
822
+ const chatMessageTags2 = [
823
+ "UserMessage",
824
+ "AssistantMessage",
825
+ "SystemMessage",
826
+ "ContentTypeImage"
827
+ ];
828
+ const parsed = parseXml(childrenXml).collapse(chatMessageTags2);
829
+ const topLevelValid = parsed.childNodes.every(
830
+ (node) => chatMessageTags2.includes(node.nodeName)
831
+ );
832
+ if (!topLevelValid) {
833
+ throw new Error("Invalid top level chat message tags");
834
+ }
835
+ for (const node of parsed.childNodes) {
836
+ if (node.nodeName === "UserMessage") {
837
+ const parts = node.childNodes.map((n) => {
838
+ if (n.nodeName === "#text") {
839
+ return {
840
+ type: "text",
841
+ text: n.value
842
+ };
843
+ } else if (n.nodeName === "ContentTypeImage") {
844
+ return {
845
+ type: "image_url",
846
+ image_url: {
847
+ url: n.attributes.url,
848
+ detail: n.attributes.detail || "auto"
849
+ }
850
+ };
851
+ }
852
+ throw new Error(
853
+ "Invalid ChatCompletionContentPart, expecting text or ContentTypeImage"
854
+ );
855
+ });
856
+ messages.push({
857
+ content: parts,
858
+ role: "user"
859
+ });
860
+ } else if (node.nodeName === "AssistantMessage") {
861
+ messages.push({
862
+ content: node.textContent,
863
+ role: "assistant"
864
+ });
865
+ } else if (node.nodeName === "SystemMessage") {
866
+ messages.push({
867
+ content: node.textContent,
868
+ role: "system"
869
+ });
870
+ }
871
+ }
872
+ return messages;
873
+ }
874
+ async function* OpenAIVisionChatCompletion(props, { logger, render, getContext }) {
875
+ const startTime = performance.now();
876
+ const model = props.model || DEFAULT_MODEL;
877
+ const client = getContext(OpenAIClientContext)();
878
+ if (!client) {
879
+ throw new Error("[OpenAI] must supply OpenAI model via context");
880
+ }
881
+ const openAIMessages = buildOpenAIVisionChatMessages(
882
+ await render(props.children, {
883
+ preserveTags: true,
884
+ renderedProps: {
885
+ ContentTypeImage: {
886
+ url: true,
887
+ detail: true
888
+ }
889
+ }
890
+ })
891
+ );
892
+ const renderedMessages = openAIMessages.map((message) => {
893
+ const renderContent = (content2) => {
894
+ if (content2 == null) {
895
+ return "";
896
+ }
897
+ if (typeof content2 === "string") {
898
+ return content2;
899
+ }
900
+ return content2.map((part) => {
901
+ if (part.type === "text") {
902
+ return part.text;
903
+ } else if (part.type === "image_url") {
904
+ return `<ContentTypeImage url="${part.image_url.url}" detail="${part.image_url.detail || "auto"}" />`;
905
+ }
906
+ throw new Error("Invalid ChatCompletionContentPart type");
907
+ }).join(" ");
908
+ };
909
+ return {
910
+ role: message.role,
911
+ content: renderContent(message.content),
912
+ tokens: tokenCountForOpenAIVisionMessage(message)
913
+ };
914
+ });
915
+ const chatCompletionRequest = {
916
+ model,
917
+ max_tokens: props.maxTokens,
918
+ temperature: props.temperature,
919
+ messages: openAIMessages,
920
+ stream: true
921
+ };
922
+ const logRequestData = {
923
+ startTime,
924
+ model,
925
+ provider: props.provider,
926
+ providerRegion: props.providerRegion,
927
+ inputMessages: renderedMessages,
928
+ request: chatCompletionRequest
929
+ };
930
+ logger.chatCompletionRequest("openai", logRequestData);
931
+ let chatResponse;
932
+ try {
933
+ chatResponse = await client.chat.completions.create(chatCompletionRequest);
934
+ } catch (ex) {
935
+ if (ex instanceof OpenAIClient2.APIError) {
936
+ throw new ChatCompletionError(
937
+ `OpenAIClient.APIError: ${ex.message}`,
938
+ logRequestData
939
+ );
940
+ } else if (ex instanceof Error) {
941
+ throw new ChatCompletionError(ex.message, logRequestData);
942
+ }
943
+ throw ex;
944
+ }
945
+ let finishReason = void 0;
946
+ let content = "";
947
+ for await (const message of chatResponse) {
948
+ if (!message.choices || !message.choices[0]) {
949
+ continue;
950
+ }
951
+ const delta = message.choices[0].delta;
952
+ if (message.choices[0].finish_reason) {
953
+ finishReason = message.choices[0].finish_reason;
954
+ }
955
+ if (delta.content) {
956
+ content += delta.content;
957
+ yield delta.content;
958
+ }
959
+ }
960
+ const outputMessage = {
961
+ role: "assistant",
962
+ content,
963
+ tokens: tokenCountForOpenAIMessage({
964
+ role: "assistant",
965
+ content
966
+ })
967
+ };
968
+ const responseData = {
969
+ ...logRequestData,
970
+ finishReason,
971
+ latency: performance.now() - startTime,
972
+ outputMessage,
973
+ tokensUsed: computeUsage([...renderedMessages, outputMessage])
974
+ };
975
+ logger.chatCompletionResponse("openai", responseData);
976
+ }
977
+
978
+ // src/lib/openai/index.ts
979
+ import { OpenAI as OpenAIClient3 } from "openai";
816
980
 
817
981
  // src/lib/anthropic/Anthropic.tsx
818
982
  import AnthropicClient from "@anthropic-ai/sdk";
@@ -960,12 +1124,14 @@ export {
960
1124
  ChatCompletionError,
961
1125
  CombinedLogger,
962
1126
  ConsoleLogger,
1127
+ ContentTypeImage,
963
1128
  LogImplementation,
964
1129
  LoggerContext,
965
1130
  NoopLogImplementation,
966
1131
  OpenAIChatCompletion,
967
- OpenAIClient2 as OpenAIClient,
1132
+ OpenAIClient3 as OpenAIClient,
968
1133
  OpenAIClientContext,
1134
+ OpenAIVisionChatCompletion,
969
1135
  SystemMessage,
970
1136
  UserMessage,
971
1137
  attachedContextSymbol,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@gammatech/aijsx",
3
- "version": "0.2.0-beta.1",
3
+ "version": "0.2.0-beta.3",
4
4
  "description": "Rewrite of aijsx",
5
5
  "author": "Jordan Garcia",
6
6
  "license": "MIT",