@gammatech/aijsx 0.12.1-dev.2024-07-09 → 0.13.0-dev.2024-07-15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -260,7 +260,9 @@ declare function OpenAIChatCompletion(props: OpenAIChatCompletionProps, ctx: Ren
260
260
 
261
261
  declare const openaiTokenizer: TokenizerFn;
262
262
 
263
- type AnthropicChatCompletionRequest = AnthropicClient.Messages.MessageStreamParams;
263
+ type AnthropicChatCompletionRequest = AnthropicClient.Messages.MessageStreamParams & {
264
+ extraHeaders?: Record<string, string>;
265
+ };
264
266
  declare module '@gammatech/aijsx' {
265
267
  interface ChatCompletionRequestPayloads {
266
268
  anthropic: AnthropicChatCompletionRequest;
@@ -278,6 +280,7 @@ type AnthropicChatCompletionProps = {
278
280
  temperature?: number;
279
281
  stop?: string | string[];
280
282
  maxRetries?: number;
283
+ extraHeaders?: Record<string, string>;
281
284
  children: AINode;
282
285
  };
283
286
  declare function AnthropicChatCompletion(props: AnthropicChatCompletionProps, ctx: RenderContext): JSX.Element;
package/dist/index.d.ts CHANGED
@@ -260,7 +260,9 @@ declare function OpenAIChatCompletion(props: OpenAIChatCompletionProps, ctx: Ren
260
260
 
261
261
  declare const openaiTokenizer: TokenizerFn;
262
262
 
263
- type AnthropicChatCompletionRequest = AnthropicClient.Messages.MessageStreamParams;
263
+ type AnthropicChatCompletionRequest = AnthropicClient.Messages.MessageStreamParams & {
264
+ extraHeaders?: Record<string, string>;
265
+ };
264
266
  declare module '@gammatech/aijsx' {
265
267
  interface ChatCompletionRequestPayloads {
266
268
  anthropic: AnthropicChatCompletionRequest;
@@ -278,6 +280,7 @@ type AnthropicChatCompletionProps = {
278
280
  temperature?: number;
279
281
  stop?: string | string[];
280
282
  maxRetries?: number;
283
+ extraHeaders?: Record<string, string>;
281
284
  children: AINode;
282
285
  };
283
286
  declare function AnthropicChatCompletion(props: AnthropicChatCompletionProps, ctx: RenderContext): JSX.Element;
package/dist/index.js CHANGED
@@ -2492,9 +2492,15 @@ async function* AnthropicChatCompletionInner(props, ctx) {
2492
2492
  stop_sequences: stopSequences,
2493
2493
  model: props.model
2494
2494
  };
2495
+ const requestOptions = props.extraHeaders ? {
2496
+ headers: props.extraHeaders
2497
+ } : void 0;
2495
2498
  const chatCompletionRequestToLog = cleanChatCompletionRequest2(
2496
2499
  anthropicCompletionRequest
2497
2500
  );
2501
+ if (props.extraHeaders) {
2502
+ chatCompletionRequestToLog.extraHeaders = props.extraHeaders;
2503
+ }
2498
2504
  const logRequestData = {
2499
2505
  startTime,
2500
2506
  model: props.model,
@@ -2515,7 +2521,7 @@ async function* AnthropicChatCompletionInner(props, ctx) {
2515
2521
  });
2516
2522
  let response;
2517
2523
  try {
2518
- response = client.messages.stream(anthropicCompletionRequest);
2524
+ response = requestOptions ? client.messages.stream(anthropicCompletionRequest, requestOptions) : client.messages.stream(anthropicCompletionRequest);
2519
2525
  } catch (err) {
2520
2526
  if (err instanceof import_sdk.default.APIError) {
2521
2527
  const status = extractStatusFromError2(err);
@@ -2542,7 +2548,7 @@ async function* AnthropicChatCompletionInner(props, ctx) {
2542
2548
  if (event.type === "message_start") {
2543
2549
  inputUsage = event.message.usage?.input_tokens || 0;
2544
2550
  }
2545
- if (event.type === "content_block_delta") {
2551
+ if (event.type === "content_block_delta" && event.delta.type === "text_delta") {
2546
2552
  const chunk = event.delta.text;
2547
2553
  content += chunk;
2548
2554
  yield chunk;
package/dist/index.mjs CHANGED
@@ -2390,9 +2390,15 @@ async function* AnthropicChatCompletionInner(props, ctx) {
2390
2390
  stop_sequences: stopSequences,
2391
2391
  model: props.model
2392
2392
  };
2393
+ const requestOptions = props.extraHeaders ? {
2394
+ headers: props.extraHeaders
2395
+ } : void 0;
2393
2396
  const chatCompletionRequestToLog = cleanChatCompletionRequest2(
2394
2397
  anthropicCompletionRequest
2395
2398
  );
2399
+ if (props.extraHeaders) {
2400
+ chatCompletionRequestToLog.extraHeaders = props.extraHeaders;
2401
+ }
2396
2402
  const logRequestData = {
2397
2403
  startTime,
2398
2404
  model: props.model,
@@ -2413,7 +2419,7 @@ async function* AnthropicChatCompletionInner(props, ctx) {
2413
2419
  });
2414
2420
  let response;
2415
2421
  try {
2416
- response = client.messages.stream(anthropicCompletionRequest);
2422
+ response = requestOptions ? client.messages.stream(anthropicCompletionRequest, requestOptions) : client.messages.stream(anthropicCompletionRequest);
2417
2423
  } catch (err) {
2418
2424
  if (err instanceof AnthropicClient.APIError) {
2419
2425
  const status = extractStatusFromError2(err);
@@ -2440,7 +2446,7 @@ async function* AnthropicChatCompletionInner(props, ctx) {
2440
2446
  if (event.type === "message_start") {
2441
2447
  inputUsage = event.message.usage?.input_tokens || 0;
2442
2448
  }
2443
- if (event.type === "content_block_delta") {
2449
+ if (event.type === "content_block_delta" && event.delta.type === "text_delta") {
2444
2450
  const chunk = event.delta.text;
2445
2451
  content += chunk;
2446
2452
  yield chunk;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@gammatech/aijsx",
3
- "version": "0.12.1-dev.2024-07-09",
3
+ "version": "0.13.0-dev.2024-07-15",
4
4
  "description": "Rewrite of aijsx",
5
5
  "author": "Jordan Garcia",
6
6
  "license": "MIT",
@@ -34,7 +34,7 @@
34
34
  "check-types": "tsc --skipLibCheck --noEmit"
35
35
  },
36
36
  "dependencies": {
37
- "@anthropic-ai/sdk": "0.19.1",
37
+ "@anthropic-ai/sdk": "^0.24.3",
38
38
  "@anthropic-ai/tokenizer": "^0.0.4",
39
39
  "@google-cloud/vertexai": "^1.3.0",
40
40
  "fast-xml-parser": "^4.3.4",