@gammatech/aijsx 0.6.1-dev.2024-04-17 → 0.6.3-dev.2024-05-14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,5 +1,5 @@
1
- import { R as RenderContext, L as LogImplementation, S as SpanProcessor, C as ContextValues, a as Context, A as AINode, T as Tracer, b as ReadableSpan, c as SpanExporter, d as AIComponent, e as SpanAttributes, E as EvaluatorFn, P as PromptParsed, f as Prompt, N as NotAsyncGenerator, F as FunctionChain, g as StreamChain, h as EvaluatorResult, i as ChatCompletionClientAndProvider, J as JSX } from './jsx-dev-runtime-n4ojN2eR.mjs';
2
- export { I as AIElement, k as AIFragment, o as AssistantMessage, B as BoundLogger, r as ChatCompletionError, s as ChatCompletionRequestPayloads, m as ChatCompletionRole, z as CombinedLogger, y as ConsoleLogger, D as Literal, t as LogChatCompletionRequest, u as LogChatCompletionResponse, v as LogLevel, w as Logger, x as NoopLogImplementation, _ as OutputParser, M as PropsOfAIComponent, G as RenderResult, K as Renderable, p as RenderedConversationMessage, V as Span, O as SpanContext, W as SpanEvent, Q as SpanStatus, n as SystemMessage, Y as TracingContext, X as TracingContextKey, Z as TracingContextManager, U as UserMessage, H as attachedContextSymbol, q as computeUsage, j as createAIElement, l as createContext } from './jsx-dev-runtime-n4ojN2eR.mjs';
1
+ import { R as RenderContext, L as LogImplementation, S as SpanProcessor, C as ContextValues, a as Context, A as AINode, T as Tracer, b as ReadableSpan, c as SpanExporter, d as AIComponent, e as SpanAttributes, E as EvaluatorFn, P as PromptParsed, f as Prompt, N as NotAsyncGenerator, F as FunctionChain, g as StreamChain, h as EvaluatorResult, i as ChatCompletionClientAndProvider, J as JSX } from './jsx-dev-runtime-6M25UAsb.mjs';
2
+ export { I as AIElement, k as AIFragment, o as AssistantMessage, B as BoundLogger, r as ChatCompletionError, s as ChatCompletionRequestPayloads, m as ChatCompletionRole, z as CombinedLogger, y as ConsoleLogger, D as Literal, t as LogChatCompletionRequest, u as LogChatCompletionResponse, v as LogLevel, w as Logger, x as NoopLogImplementation, _ as OutputParser, M as PropsOfAIComponent, G as RenderResult, K as Renderable, p as RenderedConversationMessage, V as Span, O as SpanContext, W as SpanEvent, Q as SpanStatus, n as SystemMessage, Y as TracingContext, X as TracingContextKey, Z as TracingContextManager, U as UserMessage, H as attachedContextSymbol, q as computeUsage, j as createAIElement, l as createContext } from './jsx-dev-runtime-6M25UAsb.mjs';
3
3
  import { ZodObject, ZodRawShape, ZodTypeAny, ZodString, z } from 'zod';
4
4
  import { OpenAI } from 'openai';
5
5
  export { OpenAI as OpenAIClient } from 'openai';
@@ -145,7 +145,7 @@ declare module '@gammatech/aijsx' {
145
145
  openai: OpenAIChatCompletionRequest;
146
146
  }
147
147
  }
148
- type ValidOpenAIChatModel = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-4-1106-preview' | 'gpt-4-0125-preview' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125';
148
+ type ValidOpenAIChatModel = 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4o' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-4-1106-preview' | 'gpt-4-0125-preview' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125';
149
149
  declare const OpenAIClientContext: Context<() => ChatCompletionClientAndProvider<OpenAI>>;
150
150
  type OpenAIChatCompletionProps = {
151
151
  model: ValidOpenAIChatModel;
@@ -158,7 +158,7 @@ type OpenAIChatCompletionProps = {
158
158
  };
159
159
  declare function OpenAIChatCompletion(props: OpenAIChatCompletionProps, ctx: RenderContext): AINode;
160
160
 
161
- type ValidOpenAIVisionModel = 'gpt-4-vision-preview';
161
+ type ValidOpenAIVisionModel = 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4-vision-preview';
162
162
  declare const ContentTypeImage: (_props: {
163
163
  url: string;
164
164
  dimensions?: {
package/dist/index.d.ts CHANGED
@@ -1,5 +1,5 @@
1
- import { R as RenderContext, L as LogImplementation, S as SpanProcessor, C as ContextValues, a as Context, A as AINode, T as Tracer, b as ReadableSpan, c as SpanExporter, d as AIComponent, e as SpanAttributes, E as EvaluatorFn, P as PromptParsed, f as Prompt, N as NotAsyncGenerator, F as FunctionChain, g as StreamChain, h as EvaluatorResult, i as ChatCompletionClientAndProvider, J as JSX } from './jsx-dev-runtime-n4ojN2eR.js';
2
- export { I as AIElement, k as AIFragment, o as AssistantMessage, B as BoundLogger, r as ChatCompletionError, s as ChatCompletionRequestPayloads, m as ChatCompletionRole, z as CombinedLogger, y as ConsoleLogger, D as Literal, t as LogChatCompletionRequest, u as LogChatCompletionResponse, v as LogLevel, w as Logger, x as NoopLogImplementation, _ as OutputParser, M as PropsOfAIComponent, G as RenderResult, K as Renderable, p as RenderedConversationMessage, V as Span, O as SpanContext, W as SpanEvent, Q as SpanStatus, n as SystemMessage, Y as TracingContext, X as TracingContextKey, Z as TracingContextManager, U as UserMessage, H as attachedContextSymbol, q as computeUsage, j as createAIElement, l as createContext } from './jsx-dev-runtime-n4ojN2eR.js';
1
+ import { R as RenderContext, L as LogImplementation, S as SpanProcessor, C as ContextValues, a as Context, A as AINode, T as Tracer, b as ReadableSpan, c as SpanExporter, d as AIComponent, e as SpanAttributes, E as EvaluatorFn, P as PromptParsed, f as Prompt, N as NotAsyncGenerator, F as FunctionChain, g as StreamChain, h as EvaluatorResult, i as ChatCompletionClientAndProvider, J as JSX } from './jsx-dev-runtime-6M25UAsb.js';
2
+ export { I as AIElement, k as AIFragment, o as AssistantMessage, B as BoundLogger, r as ChatCompletionError, s as ChatCompletionRequestPayloads, m as ChatCompletionRole, z as CombinedLogger, y as ConsoleLogger, D as Literal, t as LogChatCompletionRequest, u as LogChatCompletionResponse, v as LogLevel, w as Logger, x as NoopLogImplementation, _ as OutputParser, M as PropsOfAIComponent, G as RenderResult, K as Renderable, p as RenderedConversationMessage, V as Span, O as SpanContext, W as SpanEvent, Q as SpanStatus, n as SystemMessage, Y as TracingContext, X as TracingContextKey, Z as TracingContextManager, U as UserMessage, H as attachedContextSymbol, q as computeUsage, j as createAIElement, l as createContext } from './jsx-dev-runtime-6M25UAsb.js';
3
3
  import { ZodObject, ZodRawShape, ZodTypeAny, ZodString, z } from 'zod';
4
4
  import { OpenAI } from 'openai';
5
5
  export { OpenAI as OpenAIClient } from 'openai';
@@ -145,7 +145,7 @@ declare module '@gammatech/aijsx' {
145
145
  openai: OpenAIChatCompletionRequest;
146
146
  }
147
147
  }
148
- type ValidOpenAIChatModel = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-4-1106-preview' | 'gpt-4-0125-preview' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125';
148
+ type ValidOpenAIChatModel = 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4o' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-4-1106-preview' | 'gpt-4-0125-preview' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125';
149
149
  declare const OpenAIClientContext: Context<() => ChatCompletionClientAndProvider<OpenAI>>;
150
150
  type OpenAIChatCompletionProps = {
151
151
  model: ValidOpenAIChatModel;
@@ -158,7 +158,7 @@ type OpenAIChatCompletionProps = {
158
158
  };
159
159
  declare function OpenAIChatCompletion(props: OpenAIChatCompletionProps, ctx: RenderContext): AINode;
160
160
 
161
- type ValidOpenAIVisionModel = 'gpt-4-vision-preview';
161
+ type ValidOpenAIVisionModel = 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4-vision-preview';
162
162
  declare const ContentTypeImage: (_props: {
163
163
  url: string;
164
164
  dimensions?: {
package/dist/index.js CHANGED
@@ -96,9 +96,10 @@ var computeUsage = (messages) => {
96
96
  };
97
97
  };
98
98
  var ChatCompletionError = class extends Error {
99
- constructor(message, chatCompletionRequest, shouldRetry4 = false) {
99
+ constructor(message, chatCompletionRequest, status, shouldRetry4 = false) {
100
100
  super(message);
101
101
  this.chatCompletionRequest = chatCompletionRequest;
102
+ this.status = status;
102
103
  this.shouldRetry = shouldRetry4;
103
104
  }
104
105
  name = "ChatCompletionError";
@@ -1722,6 +1723,10 @@ function tokenLimitForChatModel(model) {
1722
1723
  case "gpt-4-32k-0314":
1723
1724
  case "gpt-4-32k-0613":
1724
1725
  return 32768 - TOKENS_CONSUMED_BY_REPLY_PREFIX;
1726
+ case "gpt-4o":
1727
+ case "gpt-4o-2024-05-13":
1728
+ case "gpt-4-turbo-2024-04-09":
1729
+ case "gpt-4-turbo":
1725
1730
  case "gpt-4-1106-preview":
1726
1731
  case "gpt-4-0125-preview":
1727
1732
  return 128e3 - TOKENS_CONSUMED_BY_REPLY_PREFIX;
@@ -1929,10 +1934,16 @@ async function* OpenAIChatCompletionInner(props, { logger, render, tracer, getCo
1929
1934
  throw new ChatCompletionError(
1930
1935
  `OpenAIClient.APIError: ${ex.message}`,
1931
1936
  logRequestData,
1937
+ ex.status,
1932
1938
  retry
1933
1939
  );
1934
1940
  } else if (ex instanceof Error) {
1935
- throw new ChatCompletionError(ex.message, logRequestData, retry);
1941
+ throw new ChatCompletionError(
1942
+ ex.message,
1943
+ logRequestData,
1944
+ void 0,
1945
+ retry
1946
+ );
1936
1947
  }
1937
1948
  throw ex;
1938
1949
  }
@@ -2157,10 +2168,16 @@ async function* OpenAIVisionChatCompletionInner(props, { logger, render, tracer,
2157
2168
  throw new ChatCompletionError(
2158
2169
  `OpenAIClient.APIError: ${ex.message}`,
2159
2170
  logRequestData,
2171
+ ex.status,
2160
2172
  retry
2161
2173
  );
2162
2174
  } else if (ex instanceof Error) {
2163
- throw new ChatCompletionError(ex.message, logRequestData, retry);
2175
+ throw new ChatCompletionError(
2176
+ ex.message,
2177
+ logRequestData,
2178
+ void 0,
2179
+ retry
2180
+ );
2164
2181
  }
2165
2182
  throw ex;
2166
2183
  }
@@ -2329,6 +2346,33 @@ function buildAnthropicMessages(childrenXml) {
2329
2346
  var shouldRetry3 = (error) => {
2330
2347
  return error instanceof ChatCompletionError && error.shouldRetry;
2331
2348
  };
2349
+ var RE_INTERNAL_SERVER_MESSASGE = /The system encountered an unexpected error during processing/i;
2350
+ var RE_RATE_LIMIT_MESSAGE = /Too many requests, please wait before trying again/;
2351
+ var extractStatusFromError = (error) => {
2352
+ if (typeof error !== "object" || !(error instanceof Error)) {
2353
+ return;
2354
+ }
2355
+ if (error instanceof import_sdk.default.APIError && typeof error.status === "number") {
2356
+ return error.status;
2357
+ }
2358
+ const causeName = error.cause?.name;
2359
+ if (causeName === "ValidationException") {
2360
+ return 400;
2361
+ } else if (causeName === "InternalServerException") {
2362
+ return 500;
2363
+ } else if (causeName === "ModelStreamErrorException") {
2364
+ return 424;
2365
+ } else if (causeName === "ThrottlingException") {
2366
+ return 429;
2367
+ }
2368
+ if (RE_INTERNAL_SERVER_MESSASGE.test(error.message)) {
2369
+ return 500;
2370
+ }
2371
+ if (RE_RATE_LIMIT_MESSAGE.test(error.message)) {
2372
+ return 429;
2373
+ }
2374
+ return void 0;
2375
+ };
2332
2376
  function AnthropicChatCompletion(props, ctx) {
2333
2377
  const defaultMaxRetries = ctx.getContext(DefaultMaxRetriesContext);
2334
2378
  return /* @__PURE__ */ jsx(
@@ -2407,10 +2451,15 @@ async function* AnthropicChatCompletionInner(props, { render, logger, tracer, ge
2407
2451
  throw new ChatCompletionError(
2408
2452
  `AnthropicClient.APIError: ${err.message}`,
2409
2453
  logRequestData,
2454
+ extractStatusFromError(err),
2410
2455
  retry
2411
2456
  );
2412
2457
  } else if (err instanceof Error) {
2413
- throw new ChatCompletionError(err.message, logRequestData);
2458
+ throw new ChatCompletionError(
2459
+ err.message,
2460
+ logRequestData,
2461
+ extractStatusFromError(err)
2462
+ );
2414
2463
  }
2415
2464
  throw err;
2416
2465
  }
@@ -2434,15 +2483,9 @@ async function* AnthropicChatCompletionInner(props, { render, logger, tracer, ge
2434
2483
  }
2435
2484
  }
2436
2485
  } catch (e) {
2437
- let retry = false;
2438
- try {
2439
- const isValidationExceptionAWS = e?.cause?.name === "ValidationException";
2440
- const isValidationExceptionAnthropic = e?.error?.error?.type === "invalid_request_error";
2441
- retry = !isValidationExceptionAWS && !isValidationExceptionAnthropic;
2442
- } catch (inner) {
2443
- console.error(inner);
2444
- }
2445
- throw new ChatCompletionError(e.message, logRequestData, retry);
2486
+ const status = extractStatusFromError(e);
2487
+ const retry = status === 429 || status === 500;
2488
+ throw new ChatCompletionError(e.message, logRequestData, status, retry);
2446
2489
  }
2447
2490
  const outputMessage = {
2448
2491
  role: "assistant",
package/dist/index.mjs CHANGED
@@ -26,9 +26,10 @@ var computeUsage = (messages) => {
26
26
  };
27
27
  };
28
28
  var ChatCompletionError = class extends Error {
29
- constructor(message, chatCompletionRequest, shouldRetry4 = false) {
29
+ constructor(message, chatCompletionRequest, status, shouldRetry4 = false) {
30
30
  super(message);
31
31
  this.chatCompletionRequest = chatCompletionRequest;
32
+ this.status = status;
32
33
  this.shouldRetry = shouldRetry4;
33
34
  }
34
35
  name = "ChatCompletionError";
@@ -1619,6 +1620,10 @@ function tokenLimitForChatModel(model) {
1619
1620
  case "gpt-4-32k-0314":
1620
1621
  case "gpt-4-32k-0613":
1621
1622
  return 32768 - TOKENS_CONSUMED_BY_REPLY_PREFIX;
1623
+ case "gpt-4o":
1624
+ case "gpt-4o-2024-05-13":
1625
+ case "gpt-4-turbo-2024-04-09":
1626
+ case "gpt-4-turbo":
1622
1627
  case "gpt-4-1106-preview":
1623
1628
  case "gpt-4-0125-preview":
1624
1629
  return 128e3 - TOKENS_CONSUMED_BY_REPLY_PREFIX;
@@ -1826,10 +1831,16 @@ async function* OpenAIChatCompletionInner(props, { logger, render, tracer, getCo
1826
1831
  throw new ChatCompletionError(
1827
1832
  `OpenAIClient.APIError: ${ex.message}`,
1828
1833
  logRequestData,
1834
+ ex.status,
1829
1835
  retry
1830
1836
  );
1831
1837
  } else if (ex instanceof Error) {
1832
- throw new ChatCompletionError(ex.message, logRequestData, retry);
1838
+ throw new ChatCompletionError(
1839
+ ex.message,
1840
+ logRequestData,
1841
+ void 0,
1842
+ retry
1843
+ );
1833
1844
  }
1834
1845
  throw ex;
1835
1846
  }
@@ -2054,10 +2065,16 @@ async function* OpenAIVisionChatCompletionInner(props, { logger, render, tracer,
2054
2065
  throw new ChatCompletionError(
2055
2066
  `OpenAIClient.APIError: ${ex.message}`,
2056
2067
  logRequestData,
2068
+ ex.status,
2057
2069
  retry
2058
2070
  );
2059
2071
  } else if (ex instanceof Error) {
2060
- throw new ChatCompletionError(ex.message, logRequestData, retry);
2072
+ throw new ChatCompletionError(
2073
+ ex.message,
2074
+ logRequestData,
2075
+ void 0,
2076
+ retry
2077
+ );
2061
2078
  }
2062
2079
  throw ex;
2063
2080
  }
@@ -2226,6 +2243,33 @@ function buildAnthropicMessages(childrenXml) {
2226
2243
  var shouldRetry3 = (error) => {
2227
2244
  return error instanceof ChatCompletionError && error.shouldRetry;
2228
2245
  };
2246
+ var RE_INTERNAL_SERVER_MESSASGE = /The system encountered an unexpected error during processing/i;
2247
+ var RE_RATE_LIMIT_MESSAGE = /Too many requests, please wait before trying again/;
2248
+ var extractStatusFromError = (error) => {
2249
+ if (typeof error !== "object" || !(error instanceof Error)) {
2250
+ return;
2251
+ }
2252
+ if (error instanceof AnthropicClient.APIError && typeof error.status === "number") {
2253
+ return error.status;
2254
+ }
2255
+ const causeName = error.cause?.name;
2256
+ if (causeName === "ValidationException") {
2257
+ return 400;
2258
+ } else if (causeName === "InternalServerException") {
2259
+ return 500;
2260
+ } else if (causeName === "ModelStreamErrorException") {
2261
+ return 424;
2262
+ } else if (causeName === "ThrottlingException") {
2263
+ return 429;
2264
+ }
2265
+ if (RE_INTERNAL_SERVER_MESSASGE.test(error.message)) {
2266
+ return 500;
2267
+ }
2268
+ if (RE_RATE_LIMIT_MESSAGE.test(error.message)) {
2269
+ return 429;
2270
+ }
2271
+ return void 0;
2272
+ };
2229
2273
  function AnthropicChatCompletion(props, ctx) {
2230
2274
  const defaultMaxRetries = ctx.getContext(DefaultMaxRetriesContext);
2231
2275
  return /* @__PURE__ */ jsx(
@@ -2304,10 +2348,15 @@ async function* AnthropicChatCompletionInner(props, { render, logger, tracer, ge
2304
2348
  throw new ChatCompletionError(
2305
2349
  `AnthropicClient.APIError: ${err.message}`,
2306
2350
  logRequestData,
2351
+ extractStatusFromError(err),
2307
2352
  retry
2308
2353
  );
2309
2354
  } else if (err instanceof Error) {
2310
- throw new ChatCompletionError(err.message, logRequestData);
2355
+ throw new ChatCompletionError(
2356
+ err.message,
2357
+ logRequestData,
2358
+ extractStatusFromError(err)
2359
+ );
2311
2360
  }
2312
2361
  throw err;
2313
2362
  }
@@ -2331,15 +2380,9 @@ async function* AnthropicChatCompletionInner(props, { render, logger, tracer, ge
2331
2380
  }
2332
2381
  }
2333
2382
  } catch (e) {
2334
- let retry = false;
2335
- try {
2336
- const isValidationExceptionAWS = e?.cause?.name === "ValidationException";
2337
- const isValidationExceptionAnthropic = e?.error?.error?.type === "invalid_request_error";
2338
- retry = !isValidationExceptionAWS && !isValidationExceptionAnthropic;
2339
- } catch (inner) {
2340
- console.error(inner);
2341
- }
2342
- throw new ChatCompletionError(e.message, logRequestData, retry);
2383
+ const status = extractStatusFromError(e);
2384
+ const retry = status === 429 || status === 500;
2385
+ throw new ChatCompletionError(e.message, logRequestData, status, retry);
2343
2386
  }
2344
2387
  const outputMessage = {
2345
2388
  role: "assistant",
@@ -314,9 +314,10 @@ declare const computeUsage: (messages: RenderedConversationMessage[]) => {
314
314
  };
315
315
  declare class ChatCompletionError extends Error {
316
316
  readonly chatCompletionRequest: LogChatCompletionRequest;
317
+ readonly status: number | undefined;
317
318
  readonly shouldRetry: boolean;
318
319
  readonly name = "ChatCompletionError";
319
- constructor(message: string, chatCompletionRequest: LogChatCompletionRequest, shouldRetry?: boolean);
320
+ constructor(message: string, chatCompletionRequest: LogChatCompletionRequest, status: number | undefined, shouldRetry?: boolean);
320
321
  }
321
322
 
322
323
  declare function createAIElement<P extends {
@@ -314,9 +314,10 @@ declare const computeUsage: (messages: RenderedConversationMessage[]) => {
314
314
  };
315
315
  declare class ChatCompletionError extends Error {
316
316
  readonly chatCompletionRequest: LogChatCompletionRequest;
317
+ readonly status: number | undefined;
317
318
  readonly shouldRetry: boolean;
318
319
  readonly name = "ChatCompletionError";
319
- constructor(message: string, chatCompletionRequest: LogChatCompletionRequest, shouldRetry?: boolean);
320
+ constructor(message: string, chatCompletionRequest: LogChatCompletionRequest, status: number | undefined, shouldRetry?: boolean);
320
321
  }
321
322
 
322
323
  declare function createAIElement<P extends {
@@ -1,2 +1,2 @@
1
- export { a2 as Fragment, J as JSX, $ as jsx, a0 as jsxDEV, a1 as jsxs } from './jsx-dev-runtime-n4ojN2eR.mjs';
1
+ export { a2 as Fragment, J as JSX, $ as jsx, a0 as jsxDEV, a1 as jsxs } from './jsx-dev-runtime-6M25UAsb.mjs';
2
2
  import 'zod';
@@ -1,2 +1,2 @@
1
- export { a2 as Fragment, J as JSX, $ as jsx, a0 as jsxDEV, a1 as jsxs } from './jsx-dev-runtime-n4ojN2eR.js';
1
+ export { a2 as Fragment, J as JSX, $ as jsx, a0 as jsxDEV, a1 as jsxs } from './jsx-dev-runtime-6M25UAsb.js';
2
2
  import 'zod';
@@ -1,2 +1,2 @@
1
- export { a2 as Fragment, J as JSX, $ as jsx, a0 as jsxDEV, a1 as jsxs } from './jsx-dev-runtime-n4ojN2eR.mjs';
1
+ export { a2 as Fragment, J as JSX, $ as jsx, a0 as jsxDEV, a1 as jsxs } from './jsx-dev-runtime-6M25UAsb.mjs';
2
2
  import 'zod';
@@ -1,2 +1,2 @@
1
- export { a2 as Fragment, J as JSX, $ as jsx, a0 as jsxDEV, a1 as jsxs } from './jsx-dev-runtime-n4ojN2eR.js';
1
+ export { a2 as Fragment, J as JSX, $ as jsx, a0 as jsxDEV, a1 as jsxs } from './jsx-dev-runtime-6M25UAsb.js';
2
2
  import 'zod';
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@gammatech/aijsx",
3
- "version": "0.6.1-dev.2024-04-17",
3
+ "version": "0.6.3-dev.2024-05-14",
4
4
  "description": "Rewrite of aijsx",
5
5
  "author": "Jordan Garcia",
6
6
  "license": "MIT",