@gammatech/aijsx 0.12.1-dev.2024-07-09 → 0.14.0-dev.2024-07-15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +16 -2
- package/dist/index.d.ts +16 -2
- package/dist/index.js +56 -35
- package/dist/index.mjs +55 -35
- package/package.json +2 -2
package/dist/index.d.mts
CHANGED
|
@@ -76,6 +76,14 @@ type RetryProps = {
|
|
|
76
76
|
children: AINode;
|
|
77
77
|
};
|
|
78
78
|
declare function Retry({ shouldRetry, retries, maxRetries, lastError, children }: RetryProps, ctx: RenderContext): AsyncGenerator<string, void, unknown>;
|
|
79
|
+
type AccumulatorProps = {
|
|
80
|
+
enabled?: boolean;
|
|
81
|
+
children: AINode;
|
|
82
|
+
};
|
|
83
|
+
/**
|
|
84
|
+
* If enabled, renders its children and accumulates the output instead of passing the stream through
|
|
85
|
+
*/
|
|
86
|
+
declare function Accumulate({ enabled, children }: AccumulatorProps, ctx: RenderContext): Promise<string | AINode>;
|
|
79
87
|
|
|
80
88
|
type FallbackProps = {
|
|
81
89
|
fallback: AINode;
|
|
@@ -254,13 +262,16 @@ type OpenAIChatCompletionProps = {
|
|
|
254
262
|
stop?: string | string[];
|
|
255
263
|
responseFormat?: ChatCompletionCreateParams.ResponseFormat['type'];
|
|
256
264
|
maxRetries?: number;
|
|
265
|
+
stream?: boolean;
|
|
257
266
|
children: AINode;
|
|
258
267
|
};
|
|
259
268
|
declare function OpenAIChatCompletion(props: OpenAIChatCompletionProps, ctx: RenderContext): AINode;
|
|
260
269
|
|
|
261
270
|
declare const openaiTokenizer: TokenizerFn;
|
|
262
271
|
|
|
263
|
-
type AnthropicChatCompletionRequest = AnthropicClient.Messages.MessageStreamParams
|
|
272
|
+
type AnthropicChatCompletionRequest = AnthropicClient.Messages.MessageStreamParams & {
|
|
273
|
+
extraHeaders?: Record<string, string>;
|
|
274
|
+
};
|
|
264
275
|
declare module '@gammatech/aijsx' {
|
|
265
276
|
interface ChatCompletionRequestPayloads {
|
|
266
277
|
anthropic: AnthropicChatCompletionRequest;
|
|
@@ -278,6 +289,8 @@ type AnthropicChatCompletionProps = {
|
|
|
278
289
|
temperature?: number;
|
|
279
290
|
stop?: string | string[];
|
|
280
291
|
maxRetries?: number;
|
|
292
|
+
extraHeaders?: Record<string, string>;
|
|
293
|
+
stream?: boolean;
|
|
281
294
|
children: AINode;
|
|
282
295
|
};
|
|
283
296
|
declare function AnthropicChatCompletion(props: AnthropicChatCompletionProps, ctx: RenderContext): JSX.Element;
|
|
@@ -302,6 +315,7 @@ type GoogleChatCompletionProps = {
|
|
|
302
315
|
maxTokens?: number;
|
|
303
316
|
temperature?: number;
|
|
304
317
|
stop?: string | string[];
|
|
318
|
+
stream?: boolean;
|
|
305
319
|
maxRetries?: number;
|
|
306
320
|
children: AINode;
|
|
307
321
|
safetySettings?: {
|
|
@@ -311,4 +325,4 @@ type GoogleChatCompletionProps = {
|
|
|
311
325
|
};
|
|
312
326
|
declare function GoogleChatCompletion(props: GoogleChatCompletionProps, ctx: RenderContext): JSX.Element;
|
|
313
327
|
|
|
314
|
-
export { AIComponent, AINode, AISpanAttributes, AISpanProcessor, AnthropicChatCompletion, type AnthropicChatCompletionRequest, AnthropicClientContext, AssistantMessage, type ChatCompletionClientAndProvider, ChatCompletionError, type ChatCompletionRequestPayloads, ChatMessage, Context, type CostFn, DebugMessage, DefaultMaxRetriesContext, EvaluatorFn, EvaluatorResult, Fallback, type GetChatCompletionClientAndProvider, GoogleChatCompletion, type GoogleChatCompletionRequest, GoogleClientContext, LogChatCompletionRequest, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, type OpenAIChatMessage, OpenAIClientContext, ParseVariablesError, ProcessedAISpanAttributes, type Prompt, PromptInvalidOutputError, PromptParsed, ReadableSpan, RenderContext, Retry, RetryCountContext, RetryLastErrorContext, SpanAttributes, SpanExporter, SpanProcessor, SystemMessage, type TokenizerFn, Trace, Tracer, UserMessage, type ValidAnthropicChatModel, type ValidGoogleChatModel, type ValidOpenAIChatModel, type ValidOpenAIVisionModel, anthropicTokenizer, computeUsage, createPrompt, createRenderContext, evaluatePrompt, isPromptParsed, openaiTokenizer, tracing };
|
|
328
|
+
export { AIComponent, AINode, AISpanAttributes, AISpanProcessor, Accumulate, AnthropicChatCompletion, type AnthropicChatCompletionRequest, AnthropicClientContext, AssistantMessage, type ChatCompletionClientAndProvider, ChatCompletionError, type ChatCompletionRequestPayloads, ChatMessage, Context, type CostFn, DebugMessage, DefaultMaxRetriesContext, EvaluatorFn, EvaluatorResult, Fallback, type GetChatCompletionClientAndProvider, GoogleChatCompletion, type GoogleChatCompletionRequest, GoogleClientContext, LogChatCompletionRequest, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, type OpenAIChatMessage, OpenAIClientContext, ParseVariablesError, ProcessedAISpanAttributes, type Prompt, PromptInvalidOutputError, PromptParsed, ReadableSpan, RenderContext, Retry, RetryCountContext, RetryLastErrorContext, SpanAttributes, SpanExporter, SpanProcessor, SystemMessage, type TokenizerFn, Trace, Tracer, UserMessage, type ValidAnthropicChatModel, type ValidGoogleChatModel, type ValidOpenAIChatModel, type ValidOpenAIVisionModel, anthropicTokenizer, computeUsage, createPrompt, createRenderContext, evaluatePrompt, isPromptParsed, openaiTokenizer, tracing };
|
package/dist/index.d.ts
CHANGED
|
@@ -76,6 +76,14 @@ type RetryProps = {
|
|
|
76
76
|
children: AINode;
|
|
77
77
|
};
|
|
78
78
|
declare function Retry({ shouldRetry, retries, maxRetries, lastError, children }: RetryProps, ctx: RenderContext): AsyncGenerator<string, void, unknown>;
|
|
79
|
+
type AccumulatorProps = {
|
|
80
|
+
enabled?: boolean;
|
|
81
|
+
children: AINode;
|
|
82
|
+
};
|
|
83
|
+
/**
|
|
84
|
+
* If enabled, renders its children and accumulates the output instead of passing the stream through
|
|
85
|
+
*/
|
|
86
|
+
declare function Accumulate({ enabled, children }: AccumulatorProps, ctx: RenderContext): Promise<string | AINode>;
|
|
79
87
|
|
|
80
88
|
type FallbackProps = {
|
|
81
89
|
fallback: AINode;
|
|
@@ -254,13 +262,16 @@ type OpenAIChatCompletionProps = {
|
|
|
254
262
|
stop?: string | string[];
|
|
255
263
|
responseFormat?: ChatCompletionCreateParams.ResponseFormat['type'];
|
|
256
264
|
maxRetries?: number;
|
|
265
|
+
stream?: boolean;
|
|
257
266
|
children: AINode;
|
|
258
267
|
};
|
|
259
268
|
declare function OpenAIChatCompletion(props: OpenAIChatCompletionProps, ctx: RenderContext): AINode;
|
|
260
269
|
|
|
261
270
|
declare const openaiTokenizer: TokenizerFn;
|
|
262
271
|
|
|
263
|
-
type AnthropicChatCompletionRequest = AnthropicClient.Messages.MessageStreamParams
|
|
272
|
+
type AnthropicChatCompletionRequest = AnthropicClient.Messages.MessageStreamParams & {
|
|
273
|
+
extraHeaders?: Record<string, string>;
|
|
274
|
+
};
|
|
264
275
|
declare module '@gammatech/aijsx' {
|
|
265
276
|
interface ChatCompletionRequestPayloads {
|
|
266
277
|
anthropic: AnthropicChatCompletionRequest;
|
|
@@ -278,6 +289,8 @@ type AnthropicChatCompletionProps = {
|
|
|
278
289
|
temperature?: number;
|
|
279
290
|
stop?: string | string[];
|
|
280
291
|
maxRetries?: number;
|
|
292
|
+
extraHeaders?: Record<string, string>;
|
|
293
|
+
stream?: boolean;
|
|
281
294
|
children: AINode;
|
|
282
295
|
};
|
|
283
296
|
declare function AnthropicChatCompletion(props: AnthropicChatCompletionProps, ctx: RenderContext): JSX.Element;
|
|
@@ -302,6 +315,7 @@ type GoogleChatCompletionProps = {
|
|
|
302
315
|
maxTokens?: number;
|
|
303
316
|
temperature?: number;
|
|
304
317
|
stop?: string | string[];
|
|
318
|
+
stream?: boolean;
|
|
305
319
|
maxRetries?: number;
|
|
306
320
|
children: AINode;
|
|
307
321
|
safetySettings?: {
|
|
@@ -311,4 +325,4 @@ type GoogleChatCompletionProps = {
|
|
|
311
325
|
};
|
|
312
326
|
declare function GoogleChatCompletion(props: GoogleChatCompletionProps, ctx: RenderContext): JSX.Element;
|
|
313
327
|
|
|
314
|
-
export { AIComponent, AINode, AISpanAttributes, AISpanProcessor, AnthropicChatCompletion, type AnthropicChatCompletionRequest, AnthropicClientContext, AssistantMessage, type ChatCompletionClientAndProvider, ChatCompletionError, type ChatCompletionRequestPayloads, ChatMessage, Context, type CostFn, DebugMessage, DefaultMaxRetriesContext, EvaluatorFn, EvaluatorResult, Fallback, type GetChatCompletionClientAndProvider, GoogleChatCompletion, type GoogleChatCompletionRequest, GoogleClientContext, LogChatCompletionRequest, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, type OpenAIChatMessage, OpenAIClientContext, ParseVariablesError, ProcessedAISpanAttributes, type Prompt, PromptInvalidOutputError, PromptParsed, ReadableSpan, RenderContext, Retry, RetryCountContext, RetryLastErrorContext, SpanAttributes, SpanExporter, SpanProcessor, SystemMessage, type TokenizerFn, Trace, Tracer, UserMessage, type ValidAnthropicChatModel, type ValidGoogleChatModel, type ValidOpenAIChatModel, type ValidOpenAIVisionModel, anthropicTokenizer, computeUsage, createPrompt, createRenderContext, evaluatePrompt, isPromptParsed, openaiTokenizer, tracing };
|
|
328
|
+
export { AIComponent, AINode, AISpanAttributes, AISpanProcessor, Accumulate, AnthropicChatCompletion, type AnthropicChatCompletionRequest, AnthropicClientContext, AssistantMessage, type ChatCompletionClientAndProvider, ChatCompletionError, type ChatCompletionRequestPayloads, ChatMessage, Context, type CostFn, DebugMessage, DefaultMaxRetriesContext, EvaluatorFn, EvaluatorResult, Fallback, type GetChatCompletionClientAndProvider, GoogleChatCompletion, type GoogleChatCompletionRequest, GoogleClientContext, LogChatCompletionRequest, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, type OpenAIChatMessage, OpenAIClientContext, ParseVariablesError, ProcessedAISpanAttributes, type Prompt, PromptInvalidOutputError, PromptParsed, ReadableSpan, RenderContext, Retry, RetryCountContext, RetryLastErrorContext, SpanAttributes, SpanExporter, SpanProcessor, SystemMessage, type TokenizerFn, Trace, Tracer, UserMessage, type ValidAnthropicChatModel, type ValidGoogleChatModel, type ValidOpenAIChatModel, type ValidOpenAIVisionModel, anthropicTokenizer, computeUsage, createPrompt, createRenderContext, evaluatePrompt, isPromptParsed, openaiTokenizer, tracing };
|
package/dist/index.js
CHANGED
|
@@ -31,6 +31,7 @@ var src_exports = {};
|
|
|
31
31
|
__export(src_exports, {
|
|
32
32
|
AIFragment: () => AIFragment,
|
|
33
33
|
AISpanProcessor: () => AISpanProcessor,
|
|
34
|
+
Accumulate: () => Accumulate,
|
|
34
35
|
AnthropicChatCompletion: () => AnthropicChatCompletion,
|
|
35
36
|
AnthropicClient: () => import_sdk2.default,
|
|
36
37
|
AnthropicClientContext: () => AnthropicClientContext,
|
|
@@ -1603,6 +1604,17 @@ async function* Retry({ shouldRetry: shouldRetry4, retries = 0, maxRetries = 3,
|
|
|
1603
1604
|
);
|
|
1604
1605
|
}
|
|
1605
1606
|
}
|
|
1607
|
+
async function Accumulate({ enabled = true, children }, ctx) {
|
|
1608
|
+
if (!enabled) {
|
|
1609
|
+
return children;
|
|
1610
|
+
}
|
|
1611
|
+
let accum = "";
|
|
1612
|
+
const stream = ctx.render(children);
|
|
1613
|
+
for await (const value of stream) {
|
|
1614
|
+
accum += value;
|
|
1615
|
+
}
|
|
1616
|
+
return accum;
|
|
1617
|
+
}
|
|
1606
1618
|
var BASE_BACKOFF = 100;
|
|
1607
1619
|
var backoff = (retries) => {
|
|
1608
1620
|
const waitTime = BASE_BACKOFF * Math.pow(4, retries);
|
|
@@ -1952,6 +1964,36 @@ function isPromptParsed2(prompt) {
|
|
|
1952
1964
|
// src/lib/openai/OpenAI.tsx
|
|
1953
1965
|
var import_openai2 = require("openai");
|
|
1954
1966
|
|
|
1967
|
+
// src/lib/openai/errors.ts
|
|
1968
|
+
var import_openai = require("openai");
|
|
1969
|
+
var extractStatusFromError = (error) => {
|
|
1970
|
+
if (error instanceof import_openai.OpenAI.APIError) {
|
|
1971
|
+
return error.status;
|
|
1972
|
+
} else if (error instanceof import_openai.OpenAI.APIConnectionError) {
|
|
1973
|
+
return void 0;
|
|
1974
|
+
} else {
|
|
1975
|
+
return void 0;
|
|
1976
|
+
}
|
|
1977
|
+
};
|
|
1978
|
+
var errorToChatCompletionError = (error, requestData) => {
|
|
1979
|
+
const castError = castToError(error);
|
|
1980
|
+
const status = extractStatusFromError(castError);
|
|
1981
|
+
let messagePrefix = "";
|
|
1982
|
+
if (error instanceof import_openai.OpenAI.APIError) {
|
|
1983
|
+
messagePrefix = "OpenAIClient.APIError: ";
|
|
1984
|
+
} else if (error instanceof import_openai.OpenAI.APIConnectionError) {
|
|
1985
|
+
messagePrefix = "OpenAIClient.APIConnectionError: ";
|
|
1986
|
+
}
|
|
1987
|
+
const shouldRetry4 = status !== 400;
|
|
1988
|
+
return new ChatCompletionError(
|
|
1989
|
+
`${messagePrefix}${castError.message}`,
|
|
1990
|
+
requestData,
|
|
1991
|
+
status,
|
|
1992
|
+
shouldRetry4,
|
|
1993
|
+
error instanceof Error ? error : void 0
|
|
1994
|
+
);
|
|
1995
|
+
};
|
|
1996
|
+
|
|
1955
1997
|
// src/lib/openai/tokenizer.ts
|
|
1956
1998
|
var import_js_tiktoken = require("js-tiktoken");
|
|
1957
1999
|
var cl100kTokenizer = (0, import_js_tiktoken.getEncoding)("cl100k_base");
|
|
@@ -2063,36 +2105,6 @@ async function buildChatMessages(ctx, children, opts) {
|
|
|
2063
2105
|
});
|
|
2064
2106
|
}
|
|
2065
2107
|
|
|
2066
|
-
// src/lib/openai/errors.ts
|
|
2067
|
-
var import_openai = require("openai");
|
|
2068
|
-
var extractStatusFromError = (error) => {
|
|
2069
|
-
if (error instanceof import_openai.OpenAI.APIError) {
|
|
2070
|
-
return error.status;
|
|
2071
|
-
} else if (error instanceof import_openai.OpenAI.APIConnectionError) {
|
|
2072
|
-
return void 0;
|
|
2073
|
-
} else {
|
|
2074
|
-
return void 0;
|
|
2075
|
-
}
|
|
2076
|
-
};
|
|
2077
|
-
var errorToChatCompletionError = (error, requestData) => {
|
|
2078
|
-
const castError = castToError(error);
|
|
2079
|
-
const status = extractStatusFromError(castError);
|
|
2080
|
-
let messagePrefix = "";
|
|
2081
|
-
if (error instanceof import_openai.OpenAI.APIError) {
|
|
2082
|
-
messagePrefix = "OpenAIClient.APIError: ";
|
|
2083
|
-
} else if (error instanceof import_openai.OpenAI.APIConnectionError) {
|
|
2084
|
-
messagePrefix = "OpenAIClient.APIConnectionError: ";
|
|
2085
|
-
}
|
|
2086
|
-
const shouldRetry4 = status !== 400;
|
|
2087
|
-
return new ChatCompletionError(
|
|
2088
|
-
`${messagePrefix}${castError.message}`,
|
|
2089
|
-
requestData,
|
|
2090
|
-
status,
|
|
2091
|
-
shouldRetry4,
|
|
2092
|
-
error instanceof Error ? error : void 0
|
|
2093
|
-
);
|
|
2094
|
-
};
|
|
2095
|
-
|
|
2096
2108
|
// src/lib/openai/OpenAI.tsx
|
|
2097
2109
|
var defaultClient = null;
|
|
2098
2110
|
var OpenAIClientContext = createContext(async () => {
|
|
@@ -2155,12 +2167,13 @@ var shouldRetry = (error) => {
|
|
|
2155
2167
|
};
|
|
2156
2168
|
function OpenAIChatCompletion(props, ctx) {
|
|
2157
2169
|
const defaultMaxRetries = ctx.getContext(DefaultMaxRetriesContext);
|
|
2170
|
+
const shouldAccumulate = props.stream === false;
|
|
2158
2171
|
return /* @__PURE__ */ jsx(
|
|
2159
2172
|
Retry,
|
|
2160
2173
|
{
|
|
2161
2174
|
maxRetries: props.maxRetries || defaultMaxRetries,
|
|
2162
2175
|
shouldRetry,
|
|
2163
|
-
children: /* @__PURE__ */ jsx(Trace, { name: "ai.chatCompletion", children: /* @__PURE__ */ jsx(OpenAIChatCompletionInner, { ...props }) })
|
|
2176
|
+
children: /* @__PURE__ */ jsx(Accumulate, { enabled: shouldAccumulate, children: /* @__PURE__ */ jsx(Trace, { name: "ai.chatCompletion", children: /* @__PURE__ */ jsx(OpenAIChatCompletionInner, { ...props }) }) })
|
|
2164
2177
|
}
|
|
2165
2178
|
);
|
|
2166
2179
|
}
|
|
@@ -2446,12 +2459,13 @@ var extractStatusFromError2 = (error) => {
|
|
|
2446
2459
|
};
|
|
2447
2460
|
function AnthropicChatCompletion(props, ctx) {
|
|
2448
2461
|
const defaultMaxRetries = ctx.getContext(DefaultMaxRetriesContext);
|
|
2462
|
+
const shouldAccumulate = props.stream === false;
|
|
2449
2463
|
return /* @__PURE__ */ jsx(
|
|
2450
2464
|
Retry,
|
|
2451
2465
|
{
|
|
2452
2466
|
maxRetries: props.maxRetries || defaultMaxRetries,
|
|
2453
2467
|
shouldRetry: shouldRetry2,
|
|
2454
|
-
children: /* @__PURE__ */ jsx(Trace, { name: "ai.chatCompletion", children: /* @__PURE__ */ jsx(AnthropicChatCompletionInner, { ...props }) })
|
|
2468
|
+
children: /* @__PURE__ */ jsx(Accumulate, { enabled: shouldAccumulate, children: /* @__PURE__ */ jsx(Trace, { name: "ai.chatCompletion", children: /* @__PURE__ */ jsx(AnthropicChatCompletionInner, { ...props }) }) })
|
|
2455
2469
|
}
|
|
2456
2470
|
);
|
|
2457
2471
|
}
|
|
@@ -2492,9 +2506,15 @@ async function* AnthropicChatCompletionInner(props, ctx) {
|
|
|
2492
2506
|
stop_sequences: stopSequences,
|
|
2493
2507
|
model: props.model
|
|
2494
2508
|
};
|
|
2509
|
+
const requestOptions = props.extraHeaders ? {
|
|
2510
|
+
headers: props.extraHeaders
|
|
2511
|
+
} : void 0;
|
|
2495
2512
|
const chatCompletionRequestToLog = cleanChatCompletionRequest2(
|
|
2496
2513
|
anthropicCompletionRequest
|
|
2497
2514
|
);
|
|
2515
|
+
if (props.extraHeaders) {
|
|
2516
|
+
chatCompletionRequestToLog.extraHeaders = props.extraHeaders;
|
|
2517
|
+
}
|
|
2498
2518
|
const logRequestData = {
|
|
2499
2519
|
startTime,
|
|
2500
2520
|
model: props.model,
|
|
@@ -2515,7 +2535,7 @@ async function* AnthropicChatCompletionInner(props, ctx) {
|
|
|
2515
2535
|
});
|
|
2516
2536
|
let response;
|
|
2517
2537
|
try {
|
|
2518
|
-
response = client.messages.stream(anthropicCompletionRequest);
|
|
2538
|
+
response = requestOptions ? client.messages.stream(anthropicCompletionRequest, requestOptions) : client.messages.stream(anthropicCompletionRequest);
|
|
2519
2539
|
} catch (err) {
|
|
2520
2540
|
if (err instanceof import_sdk.default.APIError) {
|
|
2521
2541
|
const status = extractStatusFromError2(err);
|
|
@@ -2542,7 +2562,7 @@ async function* AnthropicChatCompletionInner(props, ctx) {
|
|
|
2542
2562
|
if (event.type === "message_start") {
|
|
2543
2563
|
inputUsage = event.message.usage?.input_tokens || 0;
|
|
2544
2564
|
}
|
|
2545
|
-
if (event.type === "content_block_delta") {
|
|
2565
|
+
if (event.type === "content_block_delta" && event.delta.type === "text_delta") {
|
|
2546
2566
|
const chunk = event.delta.text;
|
|
2547
2567
|
content += chunk;
|
|
2548
2568
|
yield chunk;
|
|
@@ -2755,7 +2775,7 @@ function GoogleChatCompletion(props, ctx) {
|
|
|
2755
2775
|
{
|
|
2756
2776
|
maxRetries: props.maxRetries || defaultMaxRetries,
|
|
2757
2777
|
shouldRetry: shouldRetry3,
|
|
2758
|
-
children: /* @__PURE__ */ jsx(Trace, { name: "ai.chatCompletion", children: /* @__PURE__ */ jsx(GoogleChatCompletionInner, { ...props }) })
|
|
2778
|
+
children: /* @__PURE__ */ jsx(Accumulate, { enabled: props.stream === false, children: /* @__PURE__ */ jsx(Trace, { name: "ai.chatCompletion", children: /* @__PURE__ */ jsx(GoogleChatCompletionInner, { ...props }) }) })
|
|
2759
2779
|
}
|
|
2760
2780
|
);
|
|
2761
2781
|
}
|
|
@@ -2940,6 +2960,7 @@ var import_vertexai2 = require("@google-cloud/vertexai");
|
|
|
2940
2960
|
0 && (module.exports = {
|
|
2941
2961
|
AIFragment,
|
|
2942
2962
|
AISpanProcessor,
|
|
2963
|
+
Accumulate,
|
|
2943
2964
|
AnthropicChatCompletion,
|
|
2944
2965
|
AnthropicClient,
|
|
2945
2966
|
AnthropicClientContext,
|
package/dist/index.mjs
CHANGED
|
@@ -1501,6 +1501,17 @@ async function* Retry({ shouldRetry: shouldRetry4, retries = 0, maxRetries = 3,
|
|
|
1501
1501
|
);
|
|
1502
1502
|
}
|
|
1503
1503
|
}
|
|
1504
|
+
async function Accumulate({ enabled = true, children }, ctx) {
|
|
1505
|
+
if (!enabled) {
|
|
1506
|
+
return children;
|
|
1507
|
+
}
|
|
1508
|
+
let accum = "";
|
|
1509
|
+
const stream = ctx.render(children);
|
|
1510
|
+
for await (const value of stream) {
|
|
1511
|
+
accum += value;
|
|
1512
|
+
}
|
|
1513
|
+
return accum;
|
|
1514
|
+
}
|
|
1504
1515
|
var BASE_BACKOFF = 100;
|
|
1505
1516
|
var backoff = (retries) => {
|
|
1506
1517
|
const waitTime = BASE_BACKOFF * Math.pow(4, retries);
|
|
@@ -1850,6 +1861,36 @@ function isPromptParsed2(prompt) {
|
|
|
1850
1861
|
// src/lib/openai/OpenAI.tsx
|
|
1851
1862
|
import { OpenAI as OpenAIClient2 } from "openai";
|
|
1852
1863
|
|
|
1864
|
+
// src/lib/openai/errors.ts
|
|
1865
|
+
import { OpenAI as OpenAIClient } from "openai";
|
|
1866
|
+
var extractStatusFromError = (error) => {
|
|
1867
|
+
if (error instanceof OpenAIClient.APIError) {
|
|
1868
|
+
return error.status;
|
|
1869
|
+
} else if (error instanceof OpenAIClient.APIConnectionError) {
|
|
1870
|
+
return void 0;
|
|
1871
|
+
} else {
|
|
1872
|
+
return void 0;
|
|
1873
|
+
}
|
|
1874
|
+
};
|
|
1875
|
+
var errorToChatCompletionError = (error, requestData) => {
|
|
1876
|
+
const castError = castToError(error);
|
|
1877
|
+
const status = extractStatusFromError(castError);
|
|
1878
|
+
let messagePrefix = "";
|
|
1879
|
+
if (error instanceof OpenAIClient.APIError) {
|
|
1880
|
+
messagePrefix = "OpenAIClient.APIError: ";
|
|
1881
|
+
} else if (error instanceof OpenAIClient.APIConnectionError) {
|
|
1882
|
+
messagePrefix = "OpenAIClient.APIConnectionError: ";
|
|
1883
|
+
}
|
|
1884
|
+
const shouldRetry4 = status !== 400;
|
|
1885
|
+
return new ChatCompletionError(
|
|
1886
|
+
`${messagePrefix}${castError.message}`,
|
|
1887
|
+
requestData,
|
|
1888
|
+
status,
|
|
1889
|
+
shouldRetry4,
|
|
1890
|
+
error instanceof Error ? error : void 0
|
|
1891
|
+
);
|
|
1892
|
+
};
|
|
1893
|
+
|
|
1853
1894
|
// src/lib/openai/tokenizer.ts
|
|
1854
1895
|
import { getEncoding } from "js-tiktoken";
|
|
1855
1896
|
var cl100kTokenizer = getEncoding("cl100k_base");
|
|
@@ -1961,36 +2002,6 @@ async function buildChatMessages(ctx, children, opts) {
|
|
|
1961
2002
|
});
|
|
1962
2003
|
}
|
|
1963
2004
|
|
|
1964
|
-
// src/lib/openai/errors.ts
|
|
1965
|
-
import { OpenAI as OpenAIClient } from "openai";
|
|
1966
|
-
var extractStatusFromError = (error) => {
|
|
1967
|
-
if (error instanceof OpenAIClient.APIError) {
|
|
1968
|
-
return error.status;
|
|
1969
|
-
} else if (error instanceof OpenAIClient.APIConnectionError) {
|
|
1970
|
-
return void 0;
|
|
1971
|
-
} else {
|
|
1972
|
-
return void 0;
|
|
1973
|
-
}
|
|
1974
|
-
};
|
|
1975
|
-
var errorToChatCompletionError = (error, requestData) => {
|
|
1976
|
-
const castError = castToError(error);
|
|
1977
|
-
const status = extractStatusFromError(castError);
|
|
1978
|
-
let messagePrefix = "";
|
|
1979
|
-
if (error instanceof OpenAIClient.APIError) {
|
|
1980
|
-
messagePrefix = "OpenAIClient.APIError: ";
|
|
1981
|
-
} else if (error instanceof OpenAIClient.APIConnectionError) {
|
|
1982
|
-
messagePrefix = "OpenAIClient.APIConnectionError: ";
|
|
1983
|
-
}
|
|
1984
|
-
const shouldRetry4 = status !== 400;
|
|
1985
|
-
return new ChatCompletionError(
|
|
1986
|
-
`${messagePrefix}${castError.message}`,
|
|
1987
|
-
requestData,
|
|
1988
|
-
status,
|
|
1989
|
-
shouldRetry4,
|
|
1990
|
-
error instanceof Error ? error : void 0
|
|
1991
|
-
);
|
|
1992
|
-
};
|
|
1993
|
-
|
|
1994
2005
|
// src/lib/openai/OpenAI.tsx
|
|
1995
2006
|
var defaultClient = null;
|
|
1996
2007
|
var OpenAIClientContext = createContext(async () => {
|
|
@@ -2053,12 +2064,13 @@ var shouldRetry = (error) => {
|
|
|
2053
2064
|
};
|
|
2054
2065
|
function OpenAIChatCompletion(props, ctx) {
|
|
2055
2066
|
const defaultMaxRetries = ctx.getContext(DefaultMaxRetriesContext);
|
|
2067
|
+
const shouldAccumulate = props.stream === false;
|
|
2056
2068
|
return /* @__PURE__ */ jsx(
|
|
2057
2069
|
Retry,
|
|
2058
2070
|
{
|
|
2059
2071
|
maxRetries: props.maxRetries || defaultMaxRetries,
|
|
2060
2072
|
shouldRetry,
|
|
2061
|
-
children: /* @__PURE__ */ jsx(Trace, { name: "ai.chatCompletion", children: /* @__PURE__ */ jsx(OpenAIChatCompletionInner, { ...props }) })
|
|
2073
|
+
children: /* @__PURE__ */ jsx(Accumulate, { enabled: shouldAccumulate, children: /* @__PURE__ */ jsx(Trace, { name: "ai.chatCompletion", children: /* @__PURE__ */ jsx(OpenAIChatCompletionInner, { ...props }) }) })
|
|
2062
2074
|
}
|
|
2063
2075
|
);
|
|
2064
2076
|
}
|
|
@@ -2344,12 +2356,13 @@ var extractStatusFromError2 = (error) => {
|
|
|
2344
2356
|
};
|
|
2345
2357
|
function AnthropicChatCompletion(props, ctx) {
|
|
2346
2358
|
const defaultMaxRetries = ctx.getContext(DefaultMaxRetriesContext);
|
|
2359
|
+
const shouldAccumulate = props.stream === false;
|
|
2347
2360
|
return /* @__PURE__ */ jsx(
|
|
2348
2361
|
Retry,
|
|
2349
2362
|
{
|
|
2350
2363
|
maxRetries: props.maxRetries || defaultMaxRetries,
|
|
2351
2364
|
shouldRetry: shouldRetry2,
|
|
2352
|
-
children: /* @__PURE__ */ jsx(Trace, { name: "ai.chatCompletion", children: /* @__PURE__ */ jsx(AnthropicChatCompletionInner, { ...props }) })
|
|
2365
|
+
children: /* @__PURE__ */ jsx(Accumulate, { enabled: shouldAccumulate, children: /* @__PURE__ */ jsx(Trace, { name: "ai.chatCompletion", children: /* @__PURE__ */ jsx(AnthropicChatCompletionInner, { ...props }) }) })
|
|
2353
2366
|
}
|
|
2354
2367
|
);
|
|
2355
2368
|
}
|
|
@@ -2390,9 +2403,15 @@ async function* AnthropicChatCompletionInner(props, ctx) {
|
|
|
2390
2403
|
stop_sequences: stopSequences,
|
|
2391
2404
|
model: props.model
|
|
2392
2405
|
};
|
|
2406
|
+
const requestOptions = props.extraHeaders ? {
|
|
2407
|
+
headers: props.extraHeaders
|
|
2408
|
+
} : void 0;
|
|
2393
2409
|
const chatCompletionRequestToLog = cleanChatCompletionRequest2(
|
|
2394
2410
|
anthropicCompletionRequest
|
|
2395
2411
|
);
|
|
2412
|
+
if (props.extraHeaders) {
|
|
2413
|
+
chatCompletionRequestToLog.extraHeaders = props.extraHeaders;
|
|
2414
|
+
}
|
|
2396
2415
|
const logRequestData = {
|
|
2397
2416
|
startTime,
|
|
2398
2417
|
model: props.model,
|
|
@@ -2413,7 +2432,7 @@ async function* AnthropicChatCompletionInner(props, ctx) {
|
|
|
2413
2432
|
});
|
|
2414
2433
|
let response;
|
|
2415
2434
|
try {
|
|
2416
|
-
response = client.messages.stream(anthropicCompletionRequest);
|
|
2435
|
+
response = requestOptions ? client.messages.stream(anthropicCompletionRequest, requestOptions) : client.messages.stream(anthropicCompletionRequest);
|
|
2417
2436
|
} catch (err) {
|
|
2418
2437
|
if (err instanceof AnthropicClient.APIError) {
|
|
2419
2438
|
const status = extractStatusFromError2(err);
|
|
@@ -2440,7 +2459,7 @@ async function* AnthropicChatCompletionInner(props, ctx) {
|
|
|
2440
2459
|
if (event.type === "message_start") {
|
|
2441
2460
|
inputUsage = event.message.usage?.input_tokens || 0;
|
|
2442
2461
|
}
|
|
2443
|
-
if (event.type === "content_block_delta") {
|
|
2462
|
+
if (event.type === "content_block_delta" && event.delta.type === "text_delta") {
|
|
2444
2463
|
const chunk = event.delta.text;
|
|
2445
2464
|
content += chunk;
|
|
2446
2465
|
yield chunk;
|
|
@@ -2657,7 +2676,7 @@ function GoogleChatCompletion(props, ctx) {
|
|
|
2657
2676
|
{
|
|
2658
2677
|
maxRetries: props.maxRetries || defaultMaxRetries,
|
|
2659
2678
|
shouldRetry: shouldRetry3,
|
|
2660
|
-
children: /* @__PURE__ */ jsx(Trace, { name: "ai.chatCompletion", children: /* @__PURE__ */ jsx(GoogleChatCompletionInner, { ...props }) })
|
|
2679
|
+
children: /* @__PURE__ */ jsx(Accumulate, { enabled: props.stream === false, children: /* @__PURE__ */ jsx(Trace, { name: "ai.chatCompletion", children: /* @__PURE__ */ jsx(GoogleChatCompletionInner, { ...props }) }) })
|
|
2661
2680
|
}
|
|
2662
2681
|
);
|
|
2663
2682
|
}
|
|
@@ -2845,6 +2864,7 @@ import {
|
|
|
2845
2864
|
export {
|
|
2846
2865
|
AIFragment,
|
|
2847
2866
|
AISpanProcessor,
|
|
2867
|
+
Accumulate,
|
|
2848
2868
|
AnthropicChatCompletion,
|
|
2849
2869
|
AnthropicClient2 as AnthropicClient,
|
|
2850
2870
|
AnthropicClientContext,
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@gammatech/aijsx",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.14.0-dev.2024-07-15",
|
|
4
4
|
"description": "Rewrite of aijsx",
|
|
5
5
|
"author": "Jordan Garcia",
|
|
6
6
|
"license": "MIT",
|
|
@@ -34,7 +34,7 @@
|
|
|
34
34
|
"check-types": "tsc --skipLibCheck --noEmit"
|
|
35
35
|
},
|
|
36
36
|
"dependencies": {
|
|
37
|
-
"@anthropic-ai/sdk": "0.
|
|
37
|
+
"@anthropic-ai/sdk": "^0.24.3",
|
|
38
38
|
"@anthropic-ai/tokenizer": "^0.0.4",
|
|
39
39
|
"@google-cloud/vertexai": "^1.3.0",
|
|
40
40
|
"fast-xml-parser": "^4.3.4",
|