@gammatech/aijsx 0.1.2 → 0.1.3-asa.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -99,8 +99,8 @@ declare class BoundLogger implements Logger {
99
99
  info: (...msgs: Loggable[]) => void;
100
100
  debug: (...msgs: Loggable[]) => void;
101
101
  logException: (exception: unknown) => void;
102
- chatCompletionRequest: <K extends keyof ChatCompletionRequestPayloads>(provider: K, payload: LogChatCompletionRequest<ChatCompletionRequestPayloads[K]>) => void;
103
- chatCompletionResponse: <K extends keyof ChatCompletionRequestPayloads>(provider: K, payload: LogChatCompletionResponse<ChatCompletionRequestPayloads[K]>) => void;
102
+ chatCompletionRequest: <K extends "openai">(provider: K, payload: LogChatCompletionRequest<ChatCompletionRequestPayloads[K]>) => void;
103
+ chatCompletionResponse: <K extends "openai">(provider: K, payload: LogChatCompletionResponse<ChatCompletionRequestPayloads[K]>) => void;
104
104
  }
105
105
  declare class NoopLogImplementation extends LogImplementation {
106
106
  log(_ctx: RenderContext, _level: LogLevel, _message: string): void;
@@ -112,8 +112,8 @@ declare class CombinedLogger extends LogImplementation {
112
112
  private readonly loggers;
113
113
  constructor(loggers: LogImplementation[]);
114
114
  log(...args: Parameters<LogImplementation['log']>): void;
115
- chatCompletionRequest<K extends keyof ChatCompletionRequestPayloads>(...args: Parameters<LogImplementation['chatCompletionRequest']>): void;
116
- chatCompletionResponse<K extends keyof ChatCompletionRequestPayloads>(...args: Parameters<LogImplementation['chatCompletionResponse']>): void;
115
+ chatCompletionRequest<_K extends keyof ChatCompletionRequestPayloads>(...args: Parameters<LogImplementation['chatCompletionRequest']>): void;
116
+ chatCompletionResponse<_K extends keyof ChatCompletionRequestPayloads>(...args: Parameters<LogImplementation['chatCompletionResponse']>): void;
117
117
  }
118
118
 
119
119
  type ChatCompletionRole = 'user' | 'system' | 'assistant';
@@ -99,8 +99,8 @@ declare class BoundLogger implements Logger {
99
99
  info: (...msgs: Loggable[]) => void;
100
100
  debug: (...msgs: Loggable[]) => void;
101
101
  logException: (exception: unknown) => void;
102
- chatCompletionRequest: <K extends keyof ChatCompletionRequestPayloads>(provider: K, payload: LogChatCompletionRequest<ChatCompletionRequestPayloads[K]>) => void;
103
- chatCompletionResponse: <K extends keyof ChatCompletionRequestPayloads>(provider: K, payload: LogChatCompletionResponse<ChatCompletionRequestPayloads[K]>) => void;
102
+ chatCompletionRequest: <K extends "openai">(provider: K, payload: LogChatCompletionRequest<ChatCompletionRequestPayloads[K]>) => void;
103
+ chatCompletionResponse: <K extends "openai">(provider: K, payload: LogChatCompletionResponse<ChatCompletionRequestPayloads[K]>) => void;
104
104
  }
105
105
  declare class NoopLogImplementation extends LogImplementation {
106
106
  log(_ctx: RenderContext, _level: LogLevel, _message: string): void;
@@ -112,8 +112,8 @@ declare class CombinedLogger extends LogImplementation {
112
112
  private readonly loggers;
113
113
  constructor(loggers: LogImplementation[]);
114
114
  log(...args: Parameters<LogImplementation['log']>): void;
115
- chatCompletionRequest<K extends keyof ChatCompletionRequestPayloads>(...args: Parameters<LogImplementation['chatCompletionRequest']>): void;
116
- chatCompletionResponse<K extends keyof ChatCompletionRequestPayloads>(...args: Parameters<LogImplementation['chatCompletionResponse']>): void;
115
+ chatCompletionRequest<_K extends keyof ChatCompletionRequestPayloads>(...args: Parameters<LogImplementation['chatCompletionRequest']>): void;
116
+ chatCompletionResponse<_K extends keyof ChatCompletionRequestPayloads>(...args: Parameters<LogImplementation['chatCompletionResponse']>): void;
117
117
  }
118
118
 
119
119
  type ChatCompletionRole = 'user' | 'system' | 'assistant';
package/dist/index.d.mts CHANGED
@@ -1,10 +1,7 @@
1
- import { L as LogImplementation, R as RenderContext, C as Context, A as AIElement, a as RenderedConversationMessage } from './createElement-Q_LxUYf8.mjs';
2
- export { u as AIComponent, b as AIFragment, w as AINode, f as AssistantMessage, B as BoundLogger, j as ChatCompletionError, k as ChatCompletionRequestPayloads, q as CombinedLogger, p as ConsoleLogger, g as ConversationMessage, r as Literal, l as LogChatCompletionRequest, m as LogChatCompletionResponse, n as LogLevel, o as Logger, d as LoggerContext, N as NoopLogImplementation, P as PropsOfAIComponent, t as RenderResult, x as Renderable, s as RenderableStream, S as SystemMessage, U as UserMessage, v as attachedContextSymbol, h as childrenToConversationMessage, i as computeUsage, c as createAIElement, e as createContext } from './createElement-Q_LxUYf8.mjs';
1
+ import { L as LogImplementation, R as RenderContext, C as Context, A as AIElement, a as RenderedConversationMessage } from './createElement-sO2rY6Ly.mjs';
2
+ export { u as AIComponent, b as AIFragment, w as AINode, f as AssistantMessage, B as BoundLogger, j as ChatCompletionError, k as ChatCompletionRequestPayloads, q as CombinedLogger, p as ConsoleLogger, g as ConversationMessage, r as Literal, l as LogChatCompletionRequest, m as LogChatCompletionResponse, n as LogLevel, o as Logger, d as LoggerContext, N as NoopLogImplementation, P as PropsOfAIComponent, t as RenderResult, x as Renderable, s as RenderableStream, S as SystemMessage, U as UserMessage, v as attachedContextSymbol, h as childrenToConversationMessage, i as computeUsage, c as createAIElement, e as createContext } from './createElement-sO2rY6Ly.mjs';
3
3
  import { OpenAI } from 'openai';
4
4
  export { OpenAI as OpenAIClient } from 'openai';
5
- import AnthropicClient from '@anthropic-ai/sdk';
6
- export { default as AnthropicClient } from '@anthropic-ai/sdk';
7
- export { countTokens as countAnthropicTokens } from '@anthropic-ai/tokenizer';
8
5
 
9
6
  declare function createRenderContext({ logger, rootRenderId, }?: {
10
7
  logger?: LogImplementation;
@@ -36,37 +33,4 @@ declare const tokenizer: {
36
33
  declare function tokenLimitForChatModel(model: ValidOpenAIChatModel): number | undefined;
37
34
  declare function tokenCountForConversationMessage(message: Pick<RenderedConversationMessage, 'type' | 'content'>): number;
38
35
 
39
- type AnthropicChatCompletionRequest = AnthropicClient.CompletionCreateParams;
40
- declare module '@gammatech/aijsx' {
41
- interface ChatCompletionRequestPayloads {
42
- anthropic: AnthropicChatCompletionRequest;
43
- }
44
- }
45
- /**
46
- * The set of valid Claude models.
47
- * @see https://docs.anthropic.com/claude/reference/selecting-a-model
48
- */
49
- type ValidAnthropicChatModel = 'claude-instant-1.2' | 'claude-2.1';
50
- declare const AnthropicClientContext: Context<() => AnthropicClient>;
51
- /**
52
- * If you use an Anthropic model without specifying the max tokens for the completion, this value will be used as the default.
53
- */
54
- declare const defaultMaxTokens = 4096;
55
- type AnthropicChatCompletionProps = {
56
- model: ValidAnthropicChatModel;
57
- maxTokens?: number;
58
- temperature?: number;
59
- children: AIElement<any> | AIElement<any>[];
60
- provider?: string;
61
- providerRegion?: string;
62
- };
63
- /**
64
- * An AI.JSX component that invokes an Anthropic Large Language Model.
65
- * @param children The children to render.
66
- * @param chatModel The chat model to use.
67
- * @param completionModel The completion model to use.
68
- * @param client The Anthropic client.
69
- */
70
- declare function AnthropicChatCompletion(props: AnthropicChatCompletionProps, { render, logger, getContext }: RenderContext): AsyncGenerator<string, void, unknown>;
71
-
72
- export { AIElement, AnthropicChatCompletion, type AnthropicChatCompletionRequest, AnthropicClientContext, Context, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, OpenAIClientContext, RenderContext, RenderedConversationMessage, type ValidAnthropicChatModel, type ValidOpenAIChatModel, createRenderContext, defaultMaxTokens, tokenCountForConversationMessage, tokenLimitForChatModel, tokenizer };
36
+ export { AIElement, Context, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, OpenAIClientContext, RenderContext, RenderedConversationMessage, type ValidOpenAIChatModel, createRenderContext, tokenCountForConversationMessage, tokenLimitForChatModel, tokenizer };
package/dist/index.d.ts CHANGED
@@ -1,10 +1,7 @@
1
- import { L as LogImplementation, R as RenderContext, C as Context, A as AIElement, a as RenderedConversationMessage } from './createElement-Q_LxUYf8.js';
2
- export { u as AIComponent, b as AIFragment, w as AINode, f as AssistantMessage, B as BoundLogger, j as ChatCompletionError, k as ChatCompletionRequestPayloads, q as CombinedLogger, p as ConsoleLogger, g as ConversationMessage, r as Literal, l as LogChatCompletionRequest, m as LogChatCompletionResponse, n as LogLevel, o as Logger, d as LoggerContext, N as NoopLogImplementation, P as PropsOfAIComponent, t as RenderResult, x as Renderable, s as RenderableStream, S as SystemMessage, U as UserMessage, v as attachedContextSymbol, h as childrenToConversationMessage, i as computeUsage, c as createAIElement, e as createContext } from './createElement-Q_LxUYf8.js';
1
+ import { L as LogImplementation, R as RenderContext, C as Context, A as AIElement, a as RenderedConversationMessage } from './createElement-sO2rY6Ly.js';
2
+ export { u as AIComponent, b as AIFragment, w as AINode, f as AssistantMessage, B as BoundLogger, j as ChatCompletionError, k as ChatCompletionRequestPayloads, q as CombinedLogger, p as ConsoleLogger, g as ConversationMessage, r as Literal, l as LogChatCompletionRequest, m as LogChatCompletionResponse, n as LogLevel, o as Logger, d as LoggerContext, N as NoopLogImplementation, P as PropsOfAIComponent, t as RenderResult, x as Renderable, s as RenderableStream, S as SystemMessage, U as UserMessage, v as attachedContextSymbol, h as childrenToConversationMessage, i as computeUsage, c as createAIElement, e as createContext } from './createElement-sO2rY6Ly.js';
3
3
  import { OpenAI } from 'openai';
4
4
  export { OpenAI as OpenAIClient } from 'openai';
5
- import AnthropicClient from '@anthropic-ai/sdk';
6
- export { default as AnthropicClient } from '@anthropic-ai/sdk';
7
- export { countTokens as countAnthropicTokens } from '@anthropic-ai/tokenizer';
8
5
 
9
6
  declare function createRenderContext({ logger, rootRenderId, }?: {
10
7
  logger?: LogImplementation;
@@ -36,37 +33,4 @@ declare const tokenizer: {
36
33
  declare function tokenLimitForChatModel(model: ValidOpenAIChatModel): number | undefined;
37
34
  declare function tokenCountForConversationMessage(message: Pick<RenderedConversationMessage, 'type' | 'content'>): number;
38
35
 
39
- type AnthropicChatCompletionRequest = AnthropicClient.CompletionCreateParams;
40
- declare module '@gammatech/aijsx' {
41
- interface ChatCompletionRequestPayloads {
42
- anthropic: AnthropicChatCompletionRequest;
43
- }
44
- }
45
- /**
46
- * The set of valid Claude models.
47
- * @see https://docs.anthropic.com/claude/reference/selecting-a-model
48
- */
49
- type ValidAnthropicChatModel = 'claude-instant-1.2' | 'claude-2.1';
50
- declare const AnthropicClientContext: Context<() => AnthropicClient>;
51
- /**
52
- * If you use an Anthropic model without specifying the max tokens for the completion, this value will be used as the default.
53
- */
54
- declare const defaultMaxTokens = 4096;
55
- type AnthropicChatCompletionProps = {
56
- model: ValidAnthropicChatModel;
57
- maxTokens?: number;
58
- temperature?: number;
59
- children: AIElement<any> | AIElement<any>[];
60
- provider?: string;
61
- providerRegion?: string;
62
- };
63
- /**
64
- * An AI.JSX component that invokes an Anthropic Large Language Model.
65
- * @param children The children to render.
66
- * @param chatModel The chat model to use.
67
- * @param completionModel The completion model to use.
68
- * @param client The Anthropic client.
69
- */
70
- declare function AnthropicChatCompletion(props: AnthropicChatCompletionProps, { render, logger, getContext }: RenderContext): AsyncGenerator<string, void, unknown>;
71
-
72
- export { AIElement, AnthropicChatCompletion, type AnthropicChatCompletionRequest, AnthropicClientContext, Context, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, OpenAIClientContext, RenderContext, RenderedConversationMessage, type ValidAnthropicChatModel, type ValidOpenAIChatModel, createRenderContext, defaultMaxTokens, tokenCountForConversationMessage, tokenLimitForChatModel, tokenizer };
36
+ export { AIElement, Context, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, OpenAIClientContext, RenderContext, RenderedConversationMessage, type ValidOpenAIChatModel, createRenderContext, tokenCountForConversationMessage, tokenLimitForChatModel, tokenizer };
package/dist/index.js CHANGED
@@ -1,8 +1,6 @@
1
- var __create = Object.create;
2
1
  var __defProp = Object.defineProperty;
3
2
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
3
  var __getOwnPropNames = Object.getOwnPropertyNames;
5
- var __getProtoOf = Object.getPrototypeOf;
6
4
  var __hasOwnProp = Object.prototype.hasOwnProperty;
7
5
  var __export = (target, all) => {
8
6
  for (var name in all)
@@ -16,23 +14,12 @@ var __copyProps = (to, from, except, desc) => {
16
14
  }
17
15
  return to;
18
16
  };
19
- var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
20
- // If the importer is in node compatibility mode or this is not an ESM
21
- // file that has been converted to a CommonJS file using a Babel-
22
- // compatible transform (i.e. "__esModule" has not been set), then set
23
- // "default" to the CommonJS "module.exports" for node compatibility.
24
- isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
25
- mod
26
- ));
27
17
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
28
18
 
29
19
  // src/index.ts
30
20
  var src_exports = {};
31
21
  __export(src_exports, {
32
22
  AIFragment: () => AIFragment,
33
- AnthropicChatCompletion: () => AnthropicChatCompletion,
34
- AnthropicClient: () => import_sdk2.default,
35
- AnthropicClientContext: () => AnthropicClientContext,
36
23
  AssistantMessage: () => AssistantMessage,
37
24
  BoundLogger: () => BoundLogger,
38
25
  ChatCompletionError: () => ChatCompletionError,
@@ -49,11 +36,9 @@ __export(src_exports, {
49
36
  attachedContextSymbol: () => attachedContextSymbol,
50
37
  childrenToConversationMessage: () => childrenToConversationMessage,
51
38
  computeUsage: () => computeUsage,
52
- countAnthropicTokens: () => import_tokenizer3.countTokens,
53
39
  createAIElement: () => createAIElement,
54
40
  createContext: () => createContext,
55
41
  createRenderContext: () => createRenderContext,
56
- defaultMaxTokens: () => defaultMaxTokens,
57
42
  tokenCountForConversationMessage: () => tokenCountForConversationMessage,
58
43
  tokenLimitForChatModel: () => tokenLimitForChatModel,
59
44
  tokenizer: () => tokenizer
@@ -346,6 +331,15 @@ var ParallelStreamIterator = class extends EventEmitter {
346
331
  value
347
332
  });
348
333
  }
334
+ error(streamInd, error) {
335
+ const valInd = this.values[streamInd].length;
336
+ this.values[streamInd].push(error);
337
+ this.emit("error", {
338
+ streamInd,
339
+ valInd,
340
+ error
341
+ });
342
+ }
349
343
  complete(streamInd) {
350
344
  this.completedStreams[streamInd] = true;
351
345
  this.emit("complete", {
@@ -364,6 +358,10 @@ var ParallelStreamIterator = class extends EventEmitter {
364
358
  resolveAt(streamInd, valInd) {
365
359
  return new Promise((resolve, reject) => {
366
360
  const value = this.values[streamInd][valInd];
361
+ if (value instanceof Error) {
362
+ reject(value);
363
+ return;
364
+ }
367
365
  if (value !== void 0) {
368
366
  resolve({ done: false, value });
369
367
  return;
@@ -375,14 +373,27 @@ var ParallelStreamIterator = class extends EventEmitter {
375
373
  reject("next");
376
374
  return;
377
375
  }
378
- const unsub = this.on("data", (data) => {
379
- if (streamInd === data.streamInd && data.valInd === valInd) {
380
- resolve({ done: false, value: data.value });
381
- unsub();
382
- onCompleteUnsub();
376
+ const unsubData = this.on("data", (data) => {
377
+ const atCursor = data.streamInd === streamInd && data.valInd === valInd;
378
+ if (!atCursor) {
379
+ return;
383
380
  }
381
+ resolve({ done: false, value: data.value });
382
+ unsubData();
383
+ unsubError();
384
+ unsubComplete();
384
385
  });
385
- const onCompleteUnsub = this.on("complete", (data) => {
386
+ const unsubError = this.on("error", (data) => {
387
+ const atCursor = data.streamInd === streamInd && data.valInd === valInd;
388
+ if (!atCursor) {
389
+ return;
390
+ }
391
+ reject(data.error);
392
+ unsubData();
393
+ unsubError();
394
+ unsubComplete();
395
+ });
396
+ const unsubComplete = this.on("complete", (data) => {
386
397
  if (streamInd !== data.streamInd) {
387
398
  return;
388
399
  }
@@ -392,8 +403,9 @@ var ParallelStreamIterator = class extends EventEmitter {
392
403
  if (this.values[streamInd].length === valInd) {
393
404
  reject("next");
394
405
  }
395
- unsub();
396
- onCompleteUnsub();
406
+ unsubData();
407
+ unsubError();
408
+ unsubComplete();
397
409
  });
398
410
  });
399
411
  }
@@ -419,10 +431,18 @@ var ParallelStreamIterator = class extends EventEmitter {
419
431
  function coalesceParallelStreams(streams) {
420
432
  const iter = new ParallelStreamIterator(streams.length);
421
433
  streams.forEach(async (s, streamInd) => {
422
- for await (const value of s) {
423
- iter.push(streamInd, value);
434
+ try {
435
+ for await (const value of s) {
436
+ iter.push(streamInd, value);
437
+ }
438
+ iter.complete(streamInd);
439
+ } catch (e) {
440
+ if (e instanceof Error) {
441
+ iter.error(streamInd, e);
442
+ } else {
443
+ iter.error(streamInd, new Error(e));
444
+ }
424
445
  }
425
- iter.complete(streamInd);
426
446
  });
427
447
  return iter;
428
448
  }
@@ -604,7 +624,6 @@ function jsx(type, config, maybeKey) {
604
624
  const children = config && Array.isArray(config.children) ? config.children : [];
605
625
  return createAIElement(type, configWithKey, ...children);
606
626
  }
607
- var jsxs = jsx;
608
627
 
609
628
  // src/lib/openai/OpenAI.tsx
610
629
  var defaultClient = null;
@@ -706,132 +725,9 @@ async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
706
725
 
707
726
  // src/lib/openai/index.ts
708
727
  var import_openai2 = require("openai");
709
-
710
- // src/lib/anthropic/Anthropic.tsx
711
- var import_sdk = __toESM(require("@anthropic-ai/sdk"));
712
- var import_tokenizer2 = require("@anthropic-ai/tokenizer");
713
- var defaultClient2 = null;
714
- var AnthropicClientContext = createContext(
715
- () => {
716
- if (defaultClient2) {
717
- return defaultClient2;
718
- }
719
- defaultClient2 = new import_sdk.default({
720
- apiKey: getEnvVar("ANTHROPIC_API_KEY", false)
721
- });
722
- return defaultClient2;
723
- }
724
- );
725
- var defaultMaxTokens = 4096;
726
- async function* AnthropicChatCompletion(props, { render, logger, getContext }) {
727
- const startTime = performance.now();
728
- const client = getContext(AnthropicClientContext)();
729
- if (!client) {
730
- throw new Error(
731
- "[AnthropicChatCompletion] must supply AnthropicClient via context"
732
- );
733
- }
734
- const renderedMessages = await Promise.all(
735
- childrenToConversationMessage(props.children).flatMap((message) => {
736
- if (message.type === "system") {
737
- return [
738
- {
739
- type: "user",
740
- element: /* @__PURE__ */ jsxs(UserMessage, { children: [
741
- "For subsequent replies you will adhere to the following instructions: ",
742
- message.element
743
- ] })
744
- },
745
- {
746
- type: "assistant",
747
- element: /* @__PURE__ */ jsx(AssistantMessage, { children: "Okay, I will do that." })
748
- }
749
- ];
750
- }
751
- return [message];
752
- }).map(async (message) => {
753
- const prefix = message.type === "user" ? import_sdk.default.HUMAN_PROMPT : import_sdk.default.AI_PROMPT;
754
- const rendered = await render(message.element);
755
- const content2 = `${prefix} ${rendered.trim()}`;
756
- return {
757
- ...message,
758
- content: content2,
759
- tokens: (0, import_tokenizer2.countTokens)(content2)
760
- };
761
- })
762
- );
763
- const chatMessages = renderedMessages.map((m) => {
764
- return m.content;
765
- });
766
- chatMessages.push(import_sdk.default.AI_PROMPT);
767
- const anthropicCompletionRequest = {
768
- prompt: chatMessages.join("\n\n"),
769
- max_tokens_to_sample: props.maxTokens ?? defaultMaxTokens,
770
- temperature: props.temperature,
771
- model: props.model,
772
- stream: true
773
- };
774
- const logRequestData = {
775
- startTime,
776
- model: props.model,
777
- provider: props.provider,
778
- providerRegion: props.providerRegion,
779
- inputMessages: renderedMessages,
780
- request: anthropicCompletionRequest
781
- };
782
- logger.chatCompletionRequest("anthropic", logRequestData);
783
- let response;
784
- try {
785
- response = await client.completions.create(anthropicCompletionRequest);
786
- } catch (err) {
787
- if (err instanceof import_sdk.default.APIError) {
788
- throw new ChatCompletionError(
789
- `AnthropicClient.APIError: ${err.message}`,
790
- logRequestData
791
- );
792
- } else if (err instanceof Error) {
793
- throw new ChatCompletionError(err.message, logRequestData);
794
- }
795
- throw err;
796
- }
797
- let content = "";
798
- let isFirstResponse = true;
799
- for await (const completion of response) {
800
- let text = completion.completion;
801
- if (isFirstResponse && text.length > 0) {
802
- isFirstResponse = false;
803
- if (text.startsWith(" ")) {
804
- text = text.slice(1);
805
- }
806
- }
807
- content += text;
808
- yield text;
809
- }
810
- const outputMessage = {
811
- type: "assistant",
812
- element: /* @__PURE__ */ jsx(AssistantMessage, { children: content }),
813
- content,
814
- tokens: (0, import_tokenizer2.countTokens)(content)
815
- };
816
- const responseData = {
817
- ...logRequestData,
818
- finishReason: "stop",
819
- latency: performance.now() - startTime,
820
- outputMessage,
821
- tokensUsed: computeUsage([...renderedMessages, outputMessage])
822
- };
823
- logger.chatCompletionResponse("anthropic", responseData);
824
- }
825
-
826
- // src/lib/anthropic/index.ts
827
- var import_sdk2 = __toESM(require("@anthropic-ai/sdk"));
828
- var import_tokenizer3 = require("@anthropic-ai/tokenizer");
829
728
  // Annotate the CommonJS export names for ESM import in node:
830
729
  0 && (module.exports = {
831
730
  AIFragment,
832
- AnthropicChatCompletion,
833
- AnthropicClient,
834
- AnthropicClientContext,
835
731
  AssistantMessage,
836
732
  BoundLogger,
837
733
  ChatCompletionError,
@@ -848,11 +744,9 @@ var import_tokenizer3 = require("@anthropic-ai/tokenizer");
848
744
  attachedContextSymbol,
849
745
  childrenToConversationMessage,
850
746
  computeUsage,
851
- countAnthropicTokens,
852
747
  createAIElement,
853
748
  createContext,
854
749
  createRenderContext,
855
- defaultMaxTokens,
856
750
  tokenCountForConversationMessage,
857
751
  tokenLimitForChatModel,
858
752
  tokenizer
package/dist/index.mjs CHANGED
@@ -3,8 +3,7 @@ import {
3
3
  createAIElement,
4
4
  isAIElement,
5
5
  isLiteral,
6
- jsx,
7
- jsxs
6
+ jsx
8
7
  } from "./chunk-7GA5BUUP.mjs";
9
8
 
10
9
  // src/chat.tsx
@@ -267,6 +266,15 @@ var ParallelStreamIterator = class extends EventEmitter {
267
266
  value
268
267
  });
269
268
  }
269
+ error(streamInd, error) {
270
+ const valInd = this.values[streamInd].length;
271
+ this.values[streamInd].push(error);
272
+ this.emit("error", {
273
+ streamInd,
274
+ valInd,
275
+ error
276
+ });
277
+ }
270
278
  complete(streamInd) {
271
279
  this.completedStreams[streamInd] = true;
272
280
  this.emit("complete", {
@@ -285,6 +293,10 @@ var ParallelStreamIterator = class extends EventEmitter {
285
293
  resolveAt(streamInd, valInd) {
286
294
  return new Promise((resolve, reject) => {
287
295
  const value = this.values[streamInd][valInd];
296
+ if (value instanceof Error) {
297
+ reject(value);
298
+ return;
299
+ }
288
300
  if (value !== void 0) {
289
301
  resolve({ done: false, value });
290
302
  return;
@@ -296,14 +308,27 @@ var ParallelStreamIterator = class extends EventEmitter {
296
308
  reject("next");
297
309
  return;
298
310
  }
299
- const unsub = this.on("data", (data) => {
300
- if (streamInd === data.streamInd && data.valInd === valInd) {
301
- resolve({ done: false, value: data.value });
302
- unsub();
303
- onCompleteUnsub();
311
+ const unsubData = this.on("data", (data) => {
312
+ const atCursor = data.streamInd === streamInd && data.valInd === valInd;
313
+ if (!atCursor) {
314
+ return;
304
315
  }
316
+ resolve({ done: false, value: data.value });
317
+ unsubData();
318
+ unsubError();
319
+ unsubComplete();
305
320
  });
306
- const onCompleteUnsub = this.on("complete", (data) => {
321
+ const unsubError = this.on("error", (data) => {
322
+ const atCursor = data.streamInd === streamInd && data.valInd === valInd;
323
+ if (!atCursor) {
324
+ return;
325
+ }
326
+ reject(data.error);
327
+ unsubData();
328
+ unsubError();
329
+ unsubComplete();
330
+ });
331
+ const unsubComplete = this.on("complete", (data) => {
307
332
  if (streamInd !== data.streamInd) {
308
333
  return;
309
334
  }
@@ -313,8 +338,9 @@ var ParallelStreamIterator = class extends EventEmitter {
313
338
  if (this.values[streamInd].length === valInd) {
314
339
  reject("next");
315
340
  }
316
- unsub();
317
- onCompleteUnsub();
341
+ unsubData();
342
+ unsubError();
343
+ unsubComplete();
318
344
  });
319
345
  });
320
346
  }
@@ -340,10 +366,18 @@ var ParallelStreamIterator = class extends EventEmitter {
340
366
  function coalesceParallelStreams(streams) {
341
367
  const iter = new ParallelStreamIterator(streams.length);
342
368
  streams.forEach(async (s, streamInd) => {
343
- for await (const value of s) {
344
- iter.push(streamInd, value);
369
+ try {
370
+ for await (const value of s) {
371
+ iter.push(streamInd, value);
372
+ }
373
+ iter.complete(streamInd);
374
+ } catch (e) {
375
+ if (e instanceof Error) {
376
+ iter.error(streamInd, e);
377
+ } else {
378
+ iter.error(streamInd, new Error(e));
379
+ }
345
380
  }
346
- iter.complete(streamInd);
347
381
  });
348
382
  return iter;
349
383
  }
@@ -619,131 +653,8 @@ async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
619
653
 
620
654
  // src/lib/openai/index.ts
621
655
  import { OpenAI as OpenAIClient2 } from "openai";
622
-
623
- // src/lib/anthropic/Anthropic.tsx
624
- import AnthropicClient from "@anthropic-ai/sdk";
625
- import { countTokens } from "@anthropic-ai/tokenizer";
626
- var defaultClient2 = null;
627
- var AnthropicClientContext = createContext(
628
- () => {
629
- if (defaultClient2) {
630
- return defaultClient2;
631
- }
632
- defaultClient2 = new AnthropicClient({
633
- apiKey: getEnvVar("ANTHROPIC_API_KEY", false)
634
- });
635
- return defaultClient2;
636
- }
637
- );
638
- var defaultMaxTokens = 4096;
639
- async function* AnthropicChatCompletion(props, { render, logger, getContext }) {
640
- const startTime = performance.now();
641
- const client = getContext(AnthropicClientContext)();
642
- if (!client) {
643
- throw new Error(
644
- "[AnthropicChatCompletion] must supply AnthropicClient via context"
645
- );
646
- }
647
- const renderedMessages = await Promise.all(
648
- childrenToConversationMessage(props.children).flatMap((message) => {
649
- if (message.type === "system") {
650
- return [
651
- {
652
- type: "user",
653
- element: /* @__PURE__ */ jsxs(UserMessage, { children: [
654
- "For subsequent replies you will adhere to the following instructions: ",
655
- message.element
656
- ] })
657
- },
658
- {
659
- type: "assistant",
660
- element: /* @__PURE__ */ jsx(AssistantMessage, { children: "Okay, I will do that." })
661
- }
662
- ];
663
- }
664
- return [message];
665
- }).map(async (message) => {
666
- const prefix = message.type === "user" ? AnthropicClient.HUMAN_PROMPT : AnthropicClient.AI_PROMPT;
667
- const rendered = await render(message.element);
668
- const content2 = `${prefix} ${rendered.trim()}`;
669
- return {
670
- ...message,
671
- content: content2,
672
- tokens: countTokens(content2)
673
- };
674
- })
675
- );
676
- const chatMessages = renderedMessages.map((m) => {
677
- return m.content;
678
- });
679
- chatMessages.push(AnthropicClient.AI_PROMPT);
680
- const anthropicCompletionRequest = {
681
- prompt: chatMessages.join("\n\n"),
682
- max_tokens_to_sample: props.maxTokens ?? defaultMaxTokens,
683
- temperature: props.temperature,
684
- model: props.model,
685
- stream: true
686
- };
687
- const logRequestData = {
688
- startTime,
689
- model: props.model,
690
- provider: props.provider,
691
- providerRegion: props.providerRegion,
692
- inputMessages: renderedMessages,
693
- request: anthropicCompletionRequest
694
- };
695
- logger.chatCompletionRequest("anthropic", logRequestData);
696
- let response;
697
- try {
698
- response = await client.completions.create(anthropicCompletionRequest);
699
- } catch (err) {
700
- if (err instanceof AnthropicClient.APIError) {
701
- throw new ChatCompletionError(
702
- `AnthropicClient.APIError: ${err.message}`,
703
- logRequestData
704
- );
705
- } else if (err instanceof Error) {
706
- throw new ChatCompletionError(err.message, logRequestData);
707
- }
708
- throw err;
709
- }
710
- let content = "";
711
- let isFirstResponse = true;
712
- for await (const completion of response) {
713
- let text = completion.completion;
714
- if (isFirstResponse && text.length > 0) {
715
- isFirstResponse = false;
716
- if (text.startsWith(" ")) {
717
- text = text.slice(1);
718
- }
719
- }
720
- content += text;
721
- yield text;
722
- }
723
- const outputMessage = {
724
- type: "assistant",
725
- element: /* @__PURE__ */ jsx(AssistantMessage, { children: content }),
726
- content,
727
- tokens: countTokens(content)
728
- };
729
- const responseData = {
730
- ...logRequestData,
731
- finishReason: "stop",
732
- latency: performance.now() - startTime,
733
- outputMessage,
734
- tokensUsed: computeUsage([...renderedMessages, outputMessage])
735
- };
736
- logger.chatCompletionResponse("anthropic", responseData);
737
- }
738
-
739
- // src/lib/anthropic/index.ts
740
- import AnthropicClient2 from "@anthropic-ai/sdk";
741
- import { countTokens as countAnthropicTokens } from "@anthropic-ai/tokenizer";
742
656
  export {
743
657
  AIFragment,
744
- AnthropicChatCompletion,
745
- AnthropicClient2 as AnthropicClient,
746
- AnthropicClientContext,
747
658
  AssistantMessage,
748
659
  BoundLogger,
749
660
  ChatCompletionError,
@@ -760,11 +671,9 @@ export {
760
671
  attachedContextSymbol,
761
672
  childrenToConversationMessage,
762
673
  computeUsage,
763
- countAnthropicTokens,
764
674
  createAIElement,
765
675
  createContext,
766
676
  createRenderContext,
767
- defaultMaxTokens,
768
677
  tokenCountForConversationMessage,
769
678
  tokenLimitForChatModel,
770
679
  tokenizer
@@ -1,2 +1,2 @@
1
1
  export { Fragment, JSX, jsx, jsxDEV, jsxs } from './jsx-runtime.mjs';
2
- import './createElement-Q_LxUYf8.mjs';
2
+ import './createElement-sO2rY6Ly.mjs';
@@ -1,2 +1,2 @@
1
1
  export { Fragment, JSX, jsx, jsxDEV, jsxs } from './jsx-runtime.js';
2
- import './createElement-Q_LxUYf8.js';
2
+ import './createElement-sO2rY6Ly.js';
@@ -1,4 +1,4 @@
1
- import { u as AIComponent, A as AIElement, b as AIFragment } from './createElement-Q_LxUYf8.mjs';
1
+ import { u as AIComponent, A as AIElement, b as AIFragment } from './createElement-sO2rY6Ly.mjs';
2
2
 
3
3
  /**
4
4
  * The is used as an import source for ts/js files as the JSX transpile functinos
@@ -1,4 +1,4 @@
1
- import { u as AIComponent, A as AIElement, b as AIFragment } from './createElement-Q_LxUYf8.js';
1
+ import { u as AIComponent, A as AIElement, b as AIFragment } from './createElement-sO2rY6Ly.js';
2
2
 
3
3
  /**
4
4
  * The is used as an import source for ts/js files as the JSX transpile functinos
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@gammatech/aijsx",
3
- "version": "0.1.2",
3
+ "version": "0.1.3-asa.2",
4
4
  "description": "Rewrite of aijsx",
5
5
  "author": "Jordan Garcia",
6
6
  "license": "MIT",
@@ -13,14 +13,11 @@
13
13
  "test:watch": "jest --watch --verbose",
14
14
  "build": "yarn check-types && yarn clean-symlinks && tsup",
15
15
  "clean-symlinks": "rm ./jsx-* || true",
16
- "symlink": "ln -s ./dist/jsx-runtime.js . && ln -s ./dist/jsx-runtime.d.ts && ln -s ./dist/jsx-runtime.js ./jsx-dev-runtime.js && ln -s ./dist/jsx-runtime.d.ts ./jsx-dev-runtime.d.ts",
17
16
  "prepublishOnly": "yarn build",
18
17
  "lint": "eslint \"{src,test}/**/*.ts\" && yarn check-types",
19
18
  "check-types": "tsc --skipLibCheck --noEmit"
20
19
  },
21
20
  "dependencies": {
22
- "@anthropic-ai/sdk": "^0.12.0",
23
- "@anthropic-ai/tokenizer": "^0.0.4",
24
21
  "js-tiktoken": "^1.0.8",
25
22
  "nanoid": "^3.1.23",
26
23
  "openai": "^4.23.0"