@gammatech/aijsx 0.2.0-beta.5 → 0.2.0-beta.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -2,7 +2,7 @@ import { L as LogImplementation, R as RenderContext, C as Context, A as AINode }
2
2
  export { t as AIComponent, v as AIElement, a as AIFragment, f as AssistantMessage, B as BoundLogger, i as ChatCompletionError, j as ChatCompletionRequestPayloads, e as ChatCompletionRole, p as CombinedLogger, o as ConsoleLogger, q as Literal, k as LogChatCompletionRequest, l as LogChatCompletionResponse, m as LogLevel, n as Logger, b as LoggerContext, N as NoopLogImplementation, P as PropsOfAIComponent, s as RenderResult, w as Renderable, r as RenderableStream, g as RenderedConversationMessage, S as SystemMessage, U as UserMessage, u as attachedContextSymbol, h as computeUsage, c as createAIElement, d as createContext } from './createElement-YEuZ7P4l.mjs';
3
3
  import { OpenAI } from 'openai';
4
4
  export { OpenAI as OpenAIClient } from 'openai';
5
- import { ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam } from 'openai/resources';
5
+ import { ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam, ChatCompletionCreateParams } from 'openai/resources';
6
6
  import AnthropicClient from '@anthropic-ai/sdk';
7
7
  export { default as AnthropicClient } from '@anthropic-ai/sdk';
8
8
  export { countTokens as countAnthropicTokens } from '@anthropic-ai/tokenizer';
@@ -25,6 +25,7 @@ type OpenAIChatCompletionProps = {
25
25
  model: ValidOpenAIChatModel;
26
26
  maxTokens?: number;
27
27
  temperature?: number;
28
+ responseFormat?: ChatCompletionCreateParams.ResponseFormat['type'];
28
29
  children: AINode;
29
30
  provider?: string;
30
31
  providerRegion?: string;
package/dist/index.d.ts CHANGED
@@ -2,7 +2,7 @@ import { L as LogImplementation, R as RenderContext, C as Context, A as AINode }
2
2
  export { t as AIComponent, v as AIElement, a as AIFragment, f as AssistantMessage, B as BoundLogger, i as ChatCompletionError, j as ChatCompletionRequestPayloads, e as ChatCompletionRole, p as CombinedLogger, o as ConsoleLogger, q as Literal, k as LogChatCompletionRequest, l as LogChatCompletionResponse, m as LogLevel, n as Logger, b as LoggerContext, N as NoopLogImplementation, P as PropsOfAIComponent, s as RenderResult, w as Renderable, r as RenderableStream, g as RenderedConversationMessage, S as SystemMessage, U as UserMessage, u as attachedContextSymbol, h as computeUsage, c as createAIElement, d as createContext } from './createElement-YEuZ7P4l.js';
3
3
  import { OpenAI } from 'openai';
4
4
  export { OpenAI as OpenAIClient } from 'openai';
5
- import { ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam } from 'openai/resources';
5
+ import { ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam, ChatCompletionCreateParams } from 'openai/resources';
6
6
  import AnthropicClient from '@anthropic-ai/sdk';
7
7
  export { default as AnthropicClient } from '@anthropic-ai/sdk';
8
8
  export { countTokens as countAnthropicTokens } from '@anthropic-ai/tokenizer';
@@ -25,6 +25,7 @@ type OpenAIChatCompletionProps = {
25
25
  model: ValidOpenAIChatModel;
26
26
  maxTokens?: number;
27
27
  temperature?: number;
28
+ responseFormat?: ChatCompletionCreateParams.ResponseFormat['type'];
28
29
  children: AINode;
29
30
  provider?: string;
30
31
  providerRegion?: string;
package/dist/index.js CHANGED
@@ -854,6 +854,9 @@ async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
854
854
  max_tokens: props.maxTokens,
855
855
  temperature: props.temperature,
856
856
  messages: openAIMessages,
857
+ response_format: props.responseFormat ? {
858
+ type: props.responseFormat
859
+ } : void 0,
857
860
  stream: true
858
861
  };
859
862
  const logRequestData = {
package/dist/index.mjs CHANGED
@@ -771,6 +771,9 @@ async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
771
771
  max_tokens: props.maxTokens,
772
772
  temperature: props.temperature,
773
773
  messages: openAIMessages,
774
+ response_format: props.responseFormat ? {
775
+ type: props.responseFormat
776
+ } : void 0,
774
777
  stream: true
775
778
  };
776
779
  const logRequestData = {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@gammatech/aijsx",
3
- "version": "0.2.0-beta.5",
3
+ "version": "0.2.0-beta.6",
4
4
  "description": "Rewrite of aijsx",
5
5
  "author": "Jordan Garcia",
6
6
  "license": "MIT",