@gammatech/aijsx 0.1.2 → 0.1.3-asa.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{createElement-Q_LxUYf8.d.mts → createElement-sO2rY6Ly.d.mts} +4 -4
- package/dist/{createElement-Q_LxUYf8.d.ts → createElement-sO2rY6Ly.d.ts} +4 -4
- package/dist/index.d.mts +3 -39
- package/dist/index.d.ts +3 -39
- package/dist/index.js +55 -154
- package/dist/index.mjs +56 -140
- package/dist/jsx-dev-runtime.d.mts +1 -1
- package/dist/jsx-dev-runtime.d.ts +1 -1
- package/dist/jsx-runtime.d.mts +1 -1
- package/dist/jsx-runtime.d.ts +1 -1
- package/package.json +1 -4
|
@@ -99,8 +99,8 @@ declare class BoundLogger implements Logger {
|
|
|
99
99
|
info: (...msgs: Loggable[]) => void;
|
|
100
100
|
debug: (...msgs: Loggable[]) => void;
|
|
101
101
|
logException: (exception: unknown) => void;
|
|
102
|
-
chatCompletionRequest: <K extends
|
|
103
|
-
chatCompletionResponse: <K extends
|
|
102
|
+
chatCompletionRequest: <K extends "openai">(provider: K, payload: LogChatCompletionRequest<ChatCompletionRequestPayloads[K]>) => void;
|
|
103
|
+
chatCompletionResponse: <K extends "openai">(provider: K, payload: LogChatCompletionResponse<ChatCompletionRequestPayloads[K]>) => void;
|
|
104
104
|
}
|
|
105
105
|
declare class NoopLogImplementation extends LogImplementation {
|
|
106
106
|
log(_ctx: RenderContext, _level: LogLevel, _message: string): void;
|
|
@@ -112,8 +112,8 @@ declare class CombinedLogger extends LogImplementation {
|
|
|
112
112
|
private readonly loggers;
|
|
113
113
|
constructor(loggers: LogImplementation[]);
|
|
114
114
|
log(...args: Parameters<LogImplementation['log']>): void;
|
|
115
|
-
chatCompletionRequest<
|
|
116
|
-
chatCompletionResponse<
|
|
115
|
+
chatCompletionRequest<_K extends keyof ChatCompletionRequestPayloads>(...args: Parameters<LogImplementation['chatCompletionRequest']>): void;
|
|
116
|
+
chatCompletionResponse<_K extends keyof ChatCompletionRequestPayloads>(...args: Parameters<LogImplementation['chatCompletionResponse']>): void;
|
|
117
117
|
}
|
|
118
118
|
|
|
119
119
|
type ChatCompletionRole = 'user' | 'system' | 'assistant';
|
|
@@ -99,8 +99,8 @@ declare class BoundLogger implements Logger {
|
|
|
99
99
|
info: (...msgs: Loggable[]) => void;
|
|
100
100
|
debug: (...msgs: Loggable[]) => void;
|
|
101
101
|
logException: (exception: unknown) => void;
|
|
102
|
-
chatCompletionRequest: <K extends
|
|
103
|
-
chatCompletionResponse: <K extends
|
|
102
|
+
chatCompletionRequest: <K extends "openai">(provider: K, payload: LogChatCompletionRequest<ChatCompletionRequestPayloads[K]>) => void;
|
|
103
|
+
chatCompletionResponse: <K extends "openai">(provider: K, payload: LogChatCompletionResponse<ChatCompletionRequestPayloads[K]>) => void;
|
|
104
104
|
}
|
|
105
105
|
declare class NoopLogImplementation extends LogImplementation {
|
|
106
106
|
log(_ctx: RenderContext, _level: LogLevel, _message: string): void;
|
|
@@ -112,8 +112,8 @@ declare class CombinedLogger extends LogImplementation {
|
|
|
112
112
|
private readonly loggers;
|
|
113
113
|
constructor(loggers: LogImplementation[]);
|
|
114
114
|
log(...args: Parameters<LogImplementation['log']>): void;
|
|
115
|
-
chatCompletionRequest<
|
|
116
|
-
chatCompletionResponse<
|
|
115
|
+
chatCompletionRequest<_K extends keyof ChatCompletionRequestPayloads>(...args: Parameters<LogImplementation['chatCompletionRequest']>): void;
|
|
116
|
+
chatCompletionResponse<_K extends keyof ChatCompletionRequestPayloads>(...args: Parameters<LogImplementation['chatCompletionResponse']>): void;
|
|
117
117
|
}
|
|
118
118
|
|
|
119
119
|
type ChatCompletionRole = 'user' | 'system' | 'assistant';
|
package/dist/index.d.mts
CHANGED
|
@@ -1,10 +1,7 @@
|
|
|
1
|
-
import { L as LogImplementation, R as RenderContext, C as Context, A as AIElement, a as RenderedConversationMessage } from './createElement-
|
|
2
|
-
export { u as AIComponent, b as AIFragment, w as AINode, f as AssistantMessage, B as BoundLogger, j as ChatCompletionError, k as ChatCompletionRequestPayloads, q as CombinedLogger, p as ConsoleLogger, g as ConversationMessage, r as Literal, l as LogChatCompletionRequest, m as LogChatCompletionResponse, n as LogLevel, o as Logger, d as LoggerContext, N as NoopLogImplementation, P as PropsOfAIComponent, t as RenderResult, x as Renderable, s as RenderableStream, S as SystemMessage, U as UserMessage, v as attachedContextSymbol, h as childrenToConversationMessage, i as computeUsage, c as createAIElement, e as createContext } from './createElement-
|
|
1
|
+
import { L as LogImplementation, R as RenderContext, C as Context, A as AIElement, a as RenderedConversationMessage } from './createElement-sO2rY6Ly.mjs';
|
|
2
|
+
export { u as AIComponent, b as AIFragment, w as AINode, f as AssistantMessage, B as BoundLogger, j as ChatCompletionError, k as ChatCompletionRequestPayloads, q as CombinedLogger, p as ConsoleLogger, g as ConversationMessage, r as Literal, l as LogChatCompletionRequest, m as LogChatCompletionResponse, n as LogLevel, o as Logger, d as LoggerContext, N as NoopLogImplementation, P as PropsOfAIComponent, t as RenderResult, x as Renderable, s as RenderableStream, S as SystemMessage, U as UserMessage, v as attachedContextSymbol, h as childrenToConversationMessage, i as computeUsage, c as createAIElement, e as createContext } from './createElement-sO2rY6Ly.mjs';
|
|
3
3
|
import { OpenAI } from 'openai';
|
|
4
4
|
export { OpenAI as OpenAIClient } from 'openai';
|
|
5
|
-
import AnthropicClient from '@anthropic-ai/sdk';
|
|
6
|
-
export { default as AnthropicClient } from '@anthropic-ai/sdk';
|
|
7
|
-
export { countTokens as countAnthropicTokens } from '@anthropic-ai/tokenizer';
|
|
8
5
|
|
|
9
6
|
declare function createRenderContext({ logger, rootRenderId, }?: {
|
|
10
7
|
logger?: LogImplementation;
|
|
@@ -36,37 +33,4 @@ declare const tokenizer: {
|
|
|
36
33
|
declare function tokenLimitForChatModel(model: ValidOpenAIChatModel): number | undefined;
|
|
37
34
|
declare function tokenCountForConversationMessage(message: Pick<RenderedConversationMessage, 'type' | 'content'>): number;
|
|
38
35
|
|
|
39
|
-
type
|
|
40
|
-
declare module '@gammatech/aijsx' {
|
|
41
|
-
interface ChatCompletionRequestPayloads {
|
|
42
|
-
anthropic: AnthropicChatCompletionRequest;
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
/**
|
|
46
|
-
* The set of valid Claude models.
|
|
47
|
-
* @see https://docs.anthropic.com/claude/reference/selecting-a-model
|
|
48
|
-
*/
|
|
49
|
-
type ValidAnthropicChatModel = 'claude-instant-1.2' | 'claude-2.1';
|
|
50
|
-
declare const AnthropicClientContext: Context<() => AnthropicClient>;
|
|
51
|
-
/**
|
|
52
|
-
* If you use an Anthropic model without specifying the max tokens for the completion, this value will be used as the default.
|
|
53
|
-
*/
|
|
54
|
-
declare const defaultMaxTokens = 4096;
|
|
55
|
-
type AnthropicChatCompletionProps = {
|
|
56
|
-
model: ValidAnthropicChatModel;
|
|
57
|
-
maxTokens?: number;
|
|
58
|
-
temperature?: number;
|
|
59
|
-
children: AIElement<any> | AIElement<any>[];
|
|
60
|
-
provider?: string;
|
|
61
|
-
providerRegion?: string;
|
|
62
|
-
};
|
|
63
|
-
/**
|
|
64
|
-
* An AI.JSX component that invokes an Anthropic Large Language Model.
|
|
65
|
-
* @param children The children to render.
|
|
66
|
-
* @param chatModel The chat model to use.
|
|
67
|
-
* @param completionModel The completion model to use.
|
|
68
|
-
* @param client The Anthropic client.
|
|
69
|
-
*/
|
|
70
|
-
declare function AnthropicChatCompletion(props: AnthropicChatCompletionProps, { render, logger, getContext }: RenderContext): AsyncGenerator<string, void, unknown>;
|
|
71
|
-
|
|
72
|
-
export { AIElement, AnthropicChatCompletion, type AnthropicChatCompletionRequest, AnthropicClientContext, Context, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, OpenAIClientContext, RenderContext, RenderedConversationMessage, type ValidAnthropicChatModel, type ValidOpenAIChatModel, createRenderContext, defaultMaxTokens, tokenCountForConversationMessage, tokenLimitForChatModel, tokenizer };
|
|
36
|
+
export { AIElement, Context, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, OpenAIClientContext, RenderContext, RenderedConversationMessage, type ValidOpenAIChatModel, createRenderContext, tokenCountForConversationMessage, tokenLimitForChatModel, tokenizer };
|
package/dist/index.d.ts
CHANGED
|
@@ -1,10 +1,7 @@
|
|
|
1
|
-
import { L as LogImplementation, R as RenderContext, C as Context, A as AIElement, a as RenderedConversationMessage } from './createElement-
|
|
2
|
-
export { u as AIComponent, b as AIFragment, w as AINode, f as AssistantMessage, B as BoundLogger, j as ChatCompletionError, k as ChatCompletionRequestPayloads, q as CombinedLogger, p as ConsoleLogger, g as ConversationMessage, r as Literal, l as LogChatCompletionRequest, m as LogChatCompletionResponse, n as LogLevel, o as Logger, d as LoggerContext, N as NoopLogImplementation, P as PropsOfAIComponent, t as RenderResult, x as Renderable, s as RenderableStream, S as SystemMessage, U as UserMessage, v as attachedContextSymbol, h as childrenToConversationMessage, i as computeUsage, c as createAIElement, e as createContext } from './createElement-
|
|
1
|
+
import { L as LogImplementation, R as RenderContext, C as Context, A as AIElement, a as RenderedConversationMessage } from './createElement-sO2rY6Ly.js';
|
|
2
|
+
export { u as AIComponent, b as AIFragment, w as AINode, f as AssistantMessage, B as BoundLogger, j as ChatCompletionError, k as ChatCompletionRequestPayloads, q as CombinedLogger, p as ConsoleLogger, g as ConversationMessage, r as Literal, l as LogChatCompletionRequest, m as LogChatCompletionResponse, n as LogLevel, o as Logger, d as LoggerContext, N as NoopLogImplementation, P as PropsOfAIComponent, t as RenderResult, x as Renderable, s as RenderableStream, S as SystemMessage, U as UserMessage, v as attachedContextSymbol, h as childrenToConversationMessage, i as computeUsage, c as createAIElement, e as createContext } from './createElement-sO2rY6Ly.js';
|
|
3
3
|
import { OpenAI } from 'openai';
|
|
4
4
|
export { OpenAI as OpenAIClient } from 'openai';
|
|
5
|
-
import AnthropicClient from '@anthropic-ai/sdk';
|
|
6
|
-
export { default as AnthropicClient } from '@anthropic-ai/sdk';
|
|
7
|
-
export { countTokens as countAnthropicTokens } from '@anthropic-ai/tokenizer';
|
|
8
5
|
|
|
9
6
|
declare function createRenderContext({ logger, rootRenderId, }?: {
|
|
10
7
|
logger?: LogImplementation;
|
|
@@ -36,37 +33,4 @@ declare const tokenizer: {
|
|
|
36
33
|
declare function tokenLimitForChatModel(model: ValidOpenAIChatModel): number | undefined;
|
|
37
34
|
declare function tokenCountForConversationMessage(message: Pick<RenderedConversationMessage, 'type' | 'content'>): number;
|
|
38
35
|
|
|
39
|
-
type
|
|
40
|
-
declare module '@gammatech/aijsx' {
|
|
41
|
-
interface ChatCompletionRequestPayloads {
|
|
42
|
-
anthropic: AnthropicChatCompletionRequest;
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
/**
|
|
46
|
-
* The set of valid Claude models.
|
|
47
|
-
* @see https://docs.anthropic.com/claude/reference/selecting-a-model
|
|
48
|
-
*/
|
|
49
|
-
type ValidAnthropicChatModel = 'claude-instant-1.2' | 'claude-2.1';
|
|
50
|
-
declare const AnthropicClientContext: Context<() => AnthropicClient>;
|
|
51
|
-
/**
|
|
52
|
-
* If you use an Anthropic model without specifying the max tokens for the completion, this value will be used as the default.
|
|
53
|
-
*/
|
|
54
|
-
declare const defaultMaxTokens = 4096;
|
|
55
|
-
type AnthropicChatCompletionProps = {
|
|
56
|
-
model: ValidAnthropicChatModel;
|
|
57
|
-
maxTokens?: number;
|
|
58
|
-
temperature?: number;
|
|
59
|
-
children: AIElement<any> | AIElement<any>[];
|
|
60
|
-
provider?: string;
|
|
61
|
-
providerRegion?: string;
|
|
62
|
-
};
|
|
63
|
-
/**
|
|
64
|
-
* An AI.JSX component that invokes an Anthropic Large Language Model.
|
|
65
|
-
* @param children The children to render.
|
|
66
|
-
* @param chatModel The chat model to use.
|
|
67
|
-
* @param completionModel The completion model to use.
|
|
68
|
-
* @param client The Anthropic client.
|
|
69
|
-
*/
|
|
70
|
-
declare function AnthropicChatCompletion(props: AnthropicChatCompletionProps, { render, logger, getContext }: RenderContext): AsyncGenerator<string, void, unknown>;
|
|
71
|
-
|
|
72
|
-
export { AIElement, AnthropicChatCompletion, type AnthropicChatCompletionRequest, AnthropicClientContext, Context, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, OpenAIClientContext, RenderContext, RenderedConversationMessage, type ValidAnthropicChatModel, type ValidOpenAIChatModel, createRenderContext, defaultMaxTokens, tokenCountForConversationMessage, tokenLimitForChatModel, tokenizer };
|
|
36
|
+
export { AIElement, Context, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, OpenAIClientContext, RenderContext, RenderedConversationMessage, type ValidOpenAIChatModel, createRenderContext, tokenCountForConversationMessage, tokenLimitForChatModel, tokenizer };
|
package/dist/index.js
CHANGED
|
@@ -1,8 +1,6 @@
|
|
|
1
|
-
var __create = Object.create;
|
|
2
1
|
var __defProp = Object.defineProperty;
|
|
3
2
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
3
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
-
var __getProtoOf = Object.getPrototypeOf;
|
|
6
4
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
7
5
|
var __export = (target, all) => {
|
|
8
6
|
for (var name in all)
|
|
@@ -16,23 +14,12 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
16
14
|
}
|
|
17
15
|
return to;
|
|
18
16
|
};
|
|
19
|
-
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
20
|
-
// If the importer is in node compatibility mode or this is not an ESM
|
|
21
|
-
// file that has been converted to a CommonJS file using a Babel-
|
|
22
|
-
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
23
|
-
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
24
|
-
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
25
|
-
mod
|
|
26
|
-
));
|
|
27
17
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
28
18
|
|
|
29
19
|
// src/index.ts
|
|
30
20
|
var src_exports = {};
|
|
31
21
|
__export(src_exports, {
|
|
32
22
|
AIFragment: () => AIFragment,
|
|
33
|
-
AnthropicChatCompletion: () => AnthropicChatCompletion,
|
|
34
|
-
AnthropicClient: () => import_sdk2.default,
|
|
35
|
-
AnthropicClientContext: () => AnthropicClientContext,
|
|
36
23
|
AssistantMessage: () => AssistantMessage,
|
|
37
24
|
BoundLogger: () => BoundLogger,
|
|
38
25
|
ChatCompletionError: () => ChatCompletionError,
|
|
@@ -49,11 +36,9 @@ __export(src_exports, {
|
|
|
49
36
|
attachedContextSymbol: () => attachedContextSymbol,
|
|
50
37
|
childrenToConversationMessage: () => childrenToConversationMessage,
|
|
51
38
|
computeUsage: () => computeUsage,
|
|
52
|
-
countAnthropicTokens: () => import_tokenizer3.countTokens,
|
|
53
39
|
createAIElement: () => createAIElement,
|
|
54
40
|
createContext: () => createContext,
|
|
55
41
|
createRenderContext: () => createRenderContext,
|
|
56
|
-
defaultMaxTokens: () => defaultMaxTokens,
|
|
57
42
|
tokenCountForConversationMessage: () => tokenCountForConversationMessage,
|
|
58
43
|
tokenLimitForChatModel: () => tokenLimitForChatModel,
|
|
59
44
|
tokenizer: () => tokenizer
|
|
@@ -280,6 +265,13 @@ function getEnvVar(name, shouldThrow = true) {
|
|
|
280
265
|
}
|
|
281
266
|
return result;
|
|
282
267
|
}
|
|
268
|
+
function performanceNow() {
|
|
269
|
+
if (typeof performance !== "undefined" && performance.now) {
|
|
270
|
+
return performance.now();
|
|
271
|
+
} else {
|
|
272
|
+
return Date.now();
|
|
273
|
+
}
|
|
274
|
+
}
|
|
283
275
|
|
|
284
276
|
// src/render.ts
|
|
285
277
|
function renderLiteral(renderable) {
|
|
@@ -346,6 +338,15 @@ var ParallelStreamIterator = class extends EventEmitter {
|
|
|
346
338
|
value
|
|
347
339
|
});
|
|
348
340
|
}
|
|
341
|
+
error(streamInd, error) {
|
|
342
|
+
const valInd = this.values[streamInd].length;
|
|
343
|
+
this.values[streamInd].push(error);
|
|
344
|
+
this.emit("error", {
|
|
345
|
+
streamInd,
|
|
346
|
+
valInd,
|
|
347
|
+
error
|
|
348
|
+
});
|
|
349
|
+
}
|
|
349
350
|
complete(streamInd) {
|
|
350
351
|
this.completedStreams[streamInd] = true;
|
|
351
352
|
this.emit("complete", {
|
|
@@ -364,6 +365,10 @@ var ParallelStreamIterator = class extends EventEmitter {
|
|
|
364
365
|
resolveAt(streamInd, valInd) {
|
|
365
366
|
return new Promise((resolve, reject) => {
|
|
366
367
|
const value = this.values[streamInd][valInd];
|
|
368
|
+
if (value instanceof Error) {
|
|
369
|
+
reject(value);
|
|
370
|
+
return;
|
|
371
|
+
}
|
|
367
372
|
if (value !== void 0) {
|
|
368
373
|
resolve({ done: false, value });
|
|
369
374
|
return;
|
|
@@ -375,14 +380,27 @@ var ParallelStreamIterator = class extends EventEmitter {
|
|
|
375
380
|
reject("next");
|
|
376
381
|
return;
|
|
377
382
|
}
|
|
378
|
-
const
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
+
const unsubData = this.on("data", (data) => {
|
|
384
|
+
const atCursor = data.streamInd === streamInd && data.valInd === valInd;
|
|
385
|
+
if (!atCursor) {
|
|
386
|
+
return;
|
|
387
|
+
}
|
|
388
|
+
resolve({ done: false, value: data.value });
|
|
389
|
+
unsubData();
|
|
390
|
+
unsubError();
|
|
391
|
+
unsubComplete();
|
|
392
|
+
});
|
|
393
|
+
const unsubError = this.on("error", (data) => {
|
|
394
|
+
const atCursor = data.streamInd === streamInd && data.valInd === valInd;
|
|
395
|
+
if (!atCursor) {
|
|
396
|
+
return;
|
|
383
397
|
}
|
|
398
|
+
reject(data.error);
|
|
399
|
+
unsubData();
|
|
400
|
+
unsubError();
|
|
401
|
+
unsubComplete();
|
|
384
402
|
});
|
|
385
|
-
const
|
|
403
|
+
const unsubComplete = this.on("complete", (data) => {
|
|
386
404
|
if (streamInd !== data.streamInd) {
|
|
387
405
|
return;
|
|
388
406
|
}
|
|
@@ -392,8 +410,9 @@ var ParallelStreamIterator = class extends EventEmitter {
|
|
|
392
410
|
if (this.values[streamInd].length === valInd) {
|
|
393
411
|
reject("next");
|
|
394
412
|
}
|
|
395
|
-
|
|
396
|
-
|
|
413
|
+
unsubData();
|
|
414
|
+
unsubError();
|
|
415
|
+
unsubComplete();
|
|
397
416
|
});
|
|
398
417
|
});
|
|
399
418
|
}
|
|
@@ -419,10 +438,18 @@ var ParallelStreamIterator = class extends EventEmitter {
|
|
|
419
438
|
function coalesceParallelStreams(streams) {
|
|
420
439
|
const iter = new ParallelStreamIterator(streams.length);
|
|
421
440
|
streams.forEach(async (s, streamInd) => {
|
|
422
|
-
|
|
423
|
-
|
|
441
|
+
try {
|
|
442
|
+
for await (const value of s) {
|
|
443
|
+
iter.push(streamInd, value);
|
|
444
|
+
}
|
|
445
|
+
iter.complete(streamInd);
|
|
446
|
+
} catch (e) {
|
|
447
|
+
if (e instanceof Error) {
|
|
448
|
+
iter.error(streamInd, e);
|
|
449
|
+
} else {
|
|
450
|
+
iter.error(streamInd, new Error(e));
|
|
451
|
+
}
|
|
424
452
|
}
|
|
425
|
-
iter.complete(streamInd);
|
|
426
453
|
});
|
|
427
454
|
return iter;
|
|
428
455
|
}
|
|
@@ -604,7 +631,6 @@ function jsx(type, config, maybeKey) {
|
|
|
604
631
|
const children = config && Array.isArray(config.children) ? config.children : [];
|
|
605
632
|
return createAIElement(type, configWithKey, ...children);
|
|
606
633
|
}
|
|
607
|
-
var jsxs = jsx;
|
|
608
634
|
|
|
609
635
|
// src/lib/openai/OpenAI.tsx
|
|
610
636
|
var defaultClient = null;
|
|
@@ -617,7 +643,7 @@ var OpenAIClientContext = createContext(() => {
|
|
|
617
643
|
return defaultClient;
|
|
618
644
|
});
|
|
619
645
|
async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
|
|
620
|
-
const startTime =
|
|
646
|
+
const startTime = performanceNow();
|
|
621
647
|
const client = getContext(OpenAIClientContext)();
|
|
622
648
|
if (!client) {
|
|
623
649
|
throw new Error("[OpenAI] must supply OpenAI model via context");
|
|
@@ -697,7 +723,7 @@ async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
|
|
|
697
723
|
const responseData = {
|
|
698
724
|
...logRequestData,
|
|
699
725
|
finishReason,
|
|
700
|
-
latency:
|
|
726
|
+
latency: performanceNow() - startTime,
|
|
701
727
|
outputMessage,
|
|
702
728
|
tokensUsed: computeUsage([...renderedMessages, outputMessage])
|
|
703
729
|
};
|
|
@@ -706,132 +732,9 @@ async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
|
|
|
706
732
|
|
|
707
733
|
// src/lib/openai/index.ts
|
|
708
734
|
var import_openai2 = require("openai");
|
|
709
|
-
|
|
710
|
-
// src/lib/anthropic/Anthropic.tsx
|
|
711
|
-
var import_sdk = __toESM(require("@anthropic-ai/sdk"));
|
|
712
|
-
var import_tokenizer2 = require("@anthropic-ai/tokenizer");
|
|
713
|
-
var defaultClient2 = null;
|
|
714
|
-
var AnthropicClientContext = createContext(
|
|
715
|
-
() => {
|
|
716
|
-
if (defaultClient2) {
|
|
717
|
-
return defaultClient2;
|
|
718
|
-
}
|
|
719
|
-
defaultClient2 = new import_sdk.default({
|
|
720
|
-
apiKey: getEnvVar("ANTHROPIC_API_KEY", false)
|
|
721
|
-
});
|
|
722
|
-
return defaultClient2;
|
|
723
|
-
}
|
|
724
|
-
);
|
|
725
|
-
var defaultMaxTokens = 4096;
|
|
726
|
-
async function* AnthropicChatCompletion(props, { render, logger, getContext }) {
|
|
727
|
-
const startTime = performance.now();
|
|
728
|
-
const client = getContext(AnthropicClientContext)();
|
|
729
|
-
if (!client) {
|
|
730
|
-
throw new Error(
|
|
731
|
-
"[AnthropicChatCompletion] must supply AnthropicClient via context"
|
|
732
|
-
);
|
|
733
|
-
}
|
|
734
|
-
const renderedMessages = await Promise.all(
|
|
735
|
-
childrenToConversationMessage(props.children).flatMap((message) => {
|
|
736
|
-
if (message.type === "system") {
|
|
737
|
-
return [
|
|
738
|
-
{
|
|
739
|
-
type: "user",
|
|
740
|
-
element: /* @__PURE__ */ jsxs(UserMessage, { children: [
|
|
741
|
-
"For subsequent replies you will adhere to the following instructions: ",
|
|
742
|
-
message.element
|
|
743
|
-
] })
|
|
744
|
-
},
|
|
745
|
-
{
|
|
746
|
-
type: "assistant",
|
|
747
|
-
element: /* @__PURE__ */ jsx(AssistantMessage, { children: "Okay, I will do that." })
|
|
748
|
-
}
|
|
749
|
-
];
|
|
750
|
-
}
|
|
751
|
-
return [message];
|
|
752
|
-
}).map(async (message) => {
|
|
753
|
-
const prefix = message.type === "user" ? import_sdk.default.HUMAN_PROMPT : import_sdk.default.AI_PROMPT;
|
|
754
|
-
const rendered = await render(message.element);
|
|
755
|
-
const content2 = `${prefix} ${rendered.trim()}`;
|
|
756
|
-
return {
|
|
757
|
-
...message,
|
|
758
|
-
content: content2,
|
|
759
|
-
tokens: (0, import_tokenizer2.countTokens)(content2)
|
|
760
|
-
};
|
|
761
|
-
})
|
|
762
|
-
);
|
|
763
|
-
const chatMessages = renderedMessages.map((m) => {
|
|
764
|
-
return m.content;
|
|
765
|
-
});
|
|
766
|
-
chatMessages.push(import_sdk.default.AI_PROMPT);
|
|
767
|
-
const anthropicCompletionRequest = {
|
|
768
|
-
prompt: chatMessages.join("\n\n"),
|
|
769
|
-
max_tokens_to_sample: props.maxTokens ?? defaultMaxTokens,
|
|
770
|
-
temperature: props.temperature,
|
|
771
|
-
model: props.model,
|
|
772
|
-
stream: true
|
|
773
|
-
};
|
|
774
|
-
const logRequestData = {
|
|
775
|
-
startTime,
|
|
776
|
-
model: props.model,
|
|
777
|
-
provider: props.provider,
|
|
778
|
-
providerRegion: props.providerRegion,
|
|
779
|
-
inputMessages: renderedMessages,
|
|
780
|
-
request: anthropicCompletionRequest
|
|
781
|
-
};
|
|
782
|
-
logger.chatCompletionRequest("anthropic", logRequestData);
|
|
783
|
-
let response;
|
|
784
|
-
try {
|
|
785
|
-
response = await client.completions.create(anthropicCompletionRequest);
|
|
786
|
-
} catch (err) {
|
|
787
|
-
if (err instanceof import_sdk.default.APIError) {
|
|
788
|
-
throw new ChatCompletionError(
|
|
789
|
-
`AnthropicClient.APIError: ${err.message}`,
|
|
790
|
-
logRequestData
|
|
791
|
-
);
|
|
792
|
-
} else if (err instanceof Error) {
|
|
793
|
-
throw new ChatCompletionError(err.message, logRequestData);
|
|
794
|
-
}
|
|
795
|
-
throw err;
|
|
796
|
-
}
|
|
797
|
-
let content = "";
|
|
798
|
-
let isFirstResponse = true;
|
|
799
|
-
for await (const completion of response) {
|
|
800
|
-
let text = completion.completion;
|
|
801
|
-
if (isFirstResponse && text.length > 0) {
|
|
802
|
-
isFirstResponse = false;
|
|
803
|
-
if (text.startsWith(" ")) {
|
|
804
|
-
text = text.slice(1);
|
|
805
|
-
}
|
|
806
|
-
}
|
|
807
|
-
content += text;
|
|
808
|
-
yield text;
|
|
809
|
-
}
|
|
810
|
-
const outputMessage = {
|
|
811
|
-
type: "assistant",
|
|
812
|
-
element: /* @__PURE__ */ jsx(AssistantMessage, { children: content }),
|
|
813
|
-
content,
|
|
814
|
-
tokens: (0, import_tokenizer2.countTokens)(content)
|
|
815
|
-
};
|
|
816
|
-
const responseData = {
|
|
817
|
-
...logRequestData,
|
|
818
|
-
finishReason: "stop",
|
|
819
|
-
latency: performance.now() - startTime,
|
|
820
|
-
outputMessage,
|
|
821
|
-
tokensUsed: computeUsage([...renderedMessages, outputMessage])
|
|
822
|
-
};
|
|
823
|
-
logger.chatCompletionResponse("anthropic", responseData);
|
|
824
|
-
}
|
|
825
|
-
|
|
826
|
-
// src/lib/anthropic/index.ts
|
|
827
|
-
var import_sdk2 = __toESM(require("@anthropic-ai/sdk"));
|
|
828
|
-
var import_tokenizer3 = require("@anthropic-ai/tokenizer");
|
|
829
735
|
// Annotate the CommonJS export names for ESM import in node:
|
|
830
736
|
0 && (module.exports = {
|
|
831
737
|
AIFragment,
|
|
832
|
-
AnthropicChatCompletion,
|
|
833
|
-
AnthropicClient,
|
|
834
|
-
AnthropicClientContext,
|
|
835
738
|
AssistantMessage,
|
|
836
739
|
BoundLogger,
|
|
837
740
|
ChatCompletionError,
|
|
@@ -848,11 +751,9 @@ var import_tokenizer3 = require("@anthropic-ai/tokenizer");
|
|
|
848
751
|
attachedContextSymbol,
|
|
849
752
|
childrenToConversationMessage,
|
|
850
753
|
computeUsage,
|
|
851
|
-
countAnthropicTokens,
|
|
852
754
|
createAIElement,
|
|
853
755
|
createContext,
|
|
854
756
|
createRenderContext,
|
|
855
|
-
defaultMaxTokens,
|
|
856
757
|
tokenCountForConversationMessage,
|
|
857
758
|
tokenLimitForChatModel,
|
|
858
759
|
tokenizer
|
package/dist/index.mjs
CHANGED
|
@@ -3,8 +3,7 @@ import {
|
|
|
3
3
|
createAIElement,
|
|
4
4
|
isAIElement,
|
|
5
5
|
isLiteral,
|
|
6
|
-
jsx
|
|
7
|
-
jsxs
|
|
6
|
+
jsx
|
|
8
7
|
} from "./chunk-7GA5BUUP.mjs";
|
|
9
8
|
|
|
10
9
|
// src/chat.tsx
|
|
@@ -201,6 +200,13 @@ function getEnvVar(name, shouldThrow = true) {
|
|
|
201
200
|
}
|
|
202
201
|
return result;
|
|
203
202
|
}
|
|
203
|
+
function performanceNow() {
|
|
204
|
+
if (typeof performance !== "undefined" && performance.now) {
|
|
205
|
+
return performance.now();
|
|
206
|
+
} else {
|
|
207
|
+
return Date.now();
|
|
208
|
+
}
|
|
209
|
+
}
|
|
204
210
|
|
|
205
211
|
// src/render.ts
|
|
206
212
|
function renderLiteral(renderable) {
|
|
@@ -267,6 +273,15 @@ var ParallelStreamIterator = class extends EventEmitter {
|
|
|
267
273
|
value
|
|
268
274
|
});
|
|
269
275
|
}
|
|
276
|
+
error(streamInd, error) {
|
|
277
|
+
const valInd = this.values[streamInd].length;
|
|
278
|
+
this.values[streamInd].push(error);
|
|
279
|
+
this.emit("error", {
|
|
280
|
+
streamInd,
|
|
281
|
+
valInd,
|
|
282
|
+
error
|
|
283
|
+
});
|
|
284
|
+
}
|
|
270
285
|
complete(streamInd) {
|
|
271
286
|
this.completedStreams[streamInd] = true;
|
|
272
287
|
this.emit("complete", {
|
|
@@ -285,6 +300,10 @@ var ParallelStreamIterator = class extends EventEmitter {
|
|
|
285
300
|
resolveAt(streamInd, valInd) {
|
|
286
301
|
return new Promise((resolve, reject) => {
|
|
287
302
|
const value = this.values[streamInd][valInd];
|
|
303
|
+
if (value instanceof Error) {
|
|
304
|
+
reject(value);
|
|
305
|
+
return;
|
|
306
|
+
}
|
|
288
307
|
if (value !== void 0) {
|
|
289
308
|
resolve({ done: false, value });
|
|
290
309
|
return;
|
|
@@ -296,14 +315,27 @@ var ParallelStreamIterator = class extends EventEmitter {
|
|
|
296
315
|
reject("next");
|
|
297
316
|
return;
|
|
298
317
|
}
|
|
299
|
-
const
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
318
|
+
const unsubData = this.on("data", (data) => {
|
|
319
|
+
const atCursor = data.streamInd === streamInd && data.valInd === valInd;
|
|
320
|
+
if (!atCursor) {
|
|
321
|
+
return;
|
|
322
|
+
}
|
|
323
|
+
resolve({ done: false, value: data.value });
|
|
324
|
+
unsubData();
|
|
325
|
+
unsubError();
|
|
326
|
+
unsubComplete();
|
|
327
|
+
});
|
|
328
|
+
const unsubError = this.on("error", (data) => {
|
|
329
|
+
const atCursor = data.streamInd === streamInd && data.valInd === valInd;
|
|
330
|
+
if (!atCursor) {
|
|
331
|
+
return;
|
|
304
332
|
}
|
|
333
|
+
reject(data.error);
|
|
334
|
+
unsubData();
|
|
335
|
+
unsubError();
|
|
336
|
+
unsubComplete();
|
|
305
337
|
});
|
|
306
|
-
const
|
|
338
|
+
const unsubComplete = this.on("complete", (data) => {
|
|
307
339
|
if (streamInd !== data.streamInd) {
|
|
308
340
|
return;
|
|
309
341
|
}
|
|
@@ -313,8 +345,9 @@ var ParallelStreamIterator = class extends EventEmitter {
|
|
|
313
345
|
if (this.values[streamInd].length === valInd) {
|
|
314
346
|
reject("next");
|
|
315
347
|
}
|
|
316
|
-
|
|
317
|
-
|
|
348
|
+
unsubData();
|
|
349
|
+
unsubError();
|
|
350
|
+
unsubComplete();
|
|
318
351
|
});
|
|
319
352
|
});
|
|
320
353
|
}
|
|
@@ -340,10 +373,18 @@ var ParallelStreamIterator = class extends EventEmitter {
|
|
|
340
373
|
function coalesceParallelStreams(streams) {
|
|
341
374
|
const iter = new ParallelStreamIterator(streams.length);
|
|
342
375
|
streams.forEach(async (s, streamInd) => {
|
|
343
|
-
|
|
344
|
-
|
|
376
|
+
try {
|
|
377
|
+
for await (const value of s) {
|
|
378
|
+
iter.push(streamInd, value);
|
|
379
|
+
}
|
|
380
|
+
iter.complete(streamInd);
|
|
381
|
+
} catch (e) {
|
|
382
|
+
if (e instanceof Error) {
|
|
383
|
+
iter.error(streamInd, e);
|
|
384
|
+
} else {
|
|
385
|
+
iter.error(streamInd, new Error(e));
|
|
386
|
+
}
|
|
345
387
|
}
|
|
346
|
-
iter.complete(streamInd);
|
|
347
388
|
});
|
|
348
389
|
return iter;
|
|
349
390
|
}
|
|
@@ -530,7 +571,7 @@ var OpenAIClientContext = createContext(() => {
|
|
|
530
571
|
return defaultClient;
|
|
531
572
|
});
|
|
532
573
|
async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
|
|
533
|
-
const startTime =
|
|
574
|
+
const startTime = performanceNow();
|
|
534
575
|
const client = getContext(OpenAIClientContext)();
|
|
535
576
|
if (!client) {
|
|
536
577
|
throw new Error("[OpenAI] must supply OpenAI model via context");
|
|
@@ -610,7 +651,7 @@ async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
|
|
|
610
651
|
const responseData = {
|
|
611
652
|
...logRequestData,
|
|
612
653
|
finishReason,
|
|
613
|
-
latency:
|
|
654
|
+
latency: performanceNow() - startTime,
|
|
614
655
|
outputMessage,
|
|
615
656
|
tokensUsed: computeUsage([...renderedMessages, outputMessage])
|
|
616
657
|
};
|
|
@@ -619,131 +660,8 @@ async function* OpenAIChatCompletion(props, { logger, render, getContext }) {
|
|
|
619
660
|
|
|
620
661
|
// src/lib/openai/index.ts
|
|
621
662
|
import { OpenAI as OpenAIClient2 } from "openai";
|
|
622
|
-
|
|
623
|
-
// src/lib/anthropic/Anthropic.tsx
|
|
624
|
-
import AnthropicClient from "@anthropic-ai/sdk";
|
|
625
|
-
import { countTokens } from "@anthropic-ai/tokenizer";
|
|
626
|
-
var defaultClient2 = null;
|
|
627
|
-
var AnthropicClientContext = createContext(
|
|
628
|
-
() => {
|
|
629
|
-
if (defaultClient2) {
|
|
630
|
-
return defaultClient2;
|
|
631
|
-
}
|
|
632
|
-
defaultClient2 = new AnthropicClient({
|
|
633
|
-
apiKey: getEnvVar("ANTHROPIC_API_KEY", false)
|
|
634
|
-
});
|
|
635
|
-
return defaultClient2;
|
|
636
|
-
}
|
|
637
|
-
);
|
|
638
|
-
var defaultMaxTokens = 4096;
|
|
639
|
-
async function* AnthropicChatCompletion(props, { render, logger, getContext }) {
|
|
640
|
-
const startTime = performance.now();
|
|
641
|
-
const client = getContext(AnthropicClientContext)();
|
|
642
|
-
if (!client) {
|
|
643
|
-
throw new Error(
|
|
644
|
-
"[AnthropicChatCompletion] must supply AnthropicClient via context"
|
|
645
|
-
);
|
|
646
|
-
}
|
|
647
|
-
const renderedMessages = await Promise.all(
|
|
648
|
-
childrenToConversationMessage(props.children).flatMap((message) => {
|
|
649
|
-
if (message.type === "system") {
|
|
650
|
-
return [
|
|
651
|
-
{
|
|
652
|
-
type: "user",
|
|
653
|
-
element: /* @__PURE__ */ jsxs(UserMessage, { children: [
|
|
654
|
-
"For subsequent replies you will adhere to the following instructions: ",
|
|
655
|
-
message.element
|
|
656
|
-
] })
|
|
657
|
-
},
|
|
658
|
-
{
|
|
659
|
-
type: "assistant",
|
|
660
|
-
element: /* @__PURE__ */ jsx(AssistantMessage, { children: "Okay, I will do that." })
|
|
661
|
-
}
|
|
662
|
-
];
|
|
663
|
-
}
|
|
664
|
-
return [message];
|
|
665
|
-
}).map(async (message) => {
|
|
666
|
-
const prefix = message.type === "user" ? AnthropicClient.HUMAN_PROMPT : AnthropicClient.AI_PROMPT;
|
|
667
|
-
const rendered = await render(message.element);
|
|
668
|
-
const content2 = `${prefix} ${rendered.trim()}`;
|
|
669
|
-
return {
|
|
670
|
-
...message,
|
|
671
|
-
content: content2,
|
|
672
|
-
tokens: countTokens(content2)
|
|
673
|
-
};
|
|
674
|
-
})
|
|
675
|
-
);
|
|
676
|
-
const chatMessages = renderedMessages.map((m) => {
|
|
677
|
-
return m.content;
|
|
678
|
-
});
|
|
679
|
-
chatMessages.push(AnthropicClient.AI_PROMPT);
|
|
680
|
-
const anthropicCompletionRequest = {
|
|
681
|
-
prompt: chatMessages.join("\n\n"),
|
|
682
|
-
max_tokens_to_sample: props.maxTokens ?? defaultMaxTokens,
|
|
683
|
-
temperature: props.temperature,
|
|
684
|
-
model: props.model,
|
|
685
|
-
stream: true
|
|
686
|
-
};
|
|
687
|
-
const logRequestData = {
|
|
688
|
-
startTime,
|
|
689
|
-
model: props.model,
|
|
690
|
-
provider: props.provider,
|
|
691
|
-
providerRegion: props.providerRegion,
|
|
692
|
-
inputMessages: renderedMessages,
|
|
693
|
-
request: anthropicCompletionRequest
|
|
694
|
-
};
|
|
695
|
-
logger.chatCompletionRequest("anthropic", logRequestData);
|
|
696
|
-
let response;
|
|
697
|
-
try {
|
|
698
|
-
response = await client.completions.create(anthropicCompletionRequest);
|
|
699
|
-
} catch (err) {
|
|
700
|
-
if (err instanceof AnthropicClient.APIError) {
|
|
701
|
-
throw new ChatCompletionError(
|
|
702
|
-
`AnthropicClient.APIError: ${err.message}`,
|
|
703
|
-
logRequestData
|
|
704
|
-
);
|
|
705
|
-
} else if (err instanceof Error) {
|
|
706
|
-
throw new ChatCompletionError(err.message, logRequestData);
|
|
707
|
-
}
|
|
708
|
-
throw err;
|
|
709
|
-
}
|
|
710
|
-
let content = "";
|
|
711
|
-
let isFirstResponse = true;
|
|
712
|
-
for await (const completion of response) {
|
|
713
|
-
let text = completion.completion;
|
|
714
|
-
if (isFirstResponse && text.length > 0) {
|
|
715
|
-
isFirstResponse = false;
|
|
716
|
-
if (text.startsWith(" ")) {
|
|
717
|
-
text = text.slice(1);
|
|
718
|
-
}
|
|
719
|
-
}
|
|
720
|
-
content += text;
|
|
721
|
-
yield text;
|
|
722
|
-
}
|
|
723
|
-
const outputMessage = {
|
|
724
|
-
type: "assistant",
|
|
725
|
-
element: /* @__PURE__ */ jsx(AssistantMessage, { children: content }),
|
|
726
|
-
content,
|
|
727
|
-
tokens: countTokens(content)
|
|
728
|
-
};
|
|
729
|
-
const responseData = {
|
|
730
|
-
...logRequestData,
|
|
731
|
-
finishReason: "stop",
|
|
732
|
-
latency: performance.now() - startTime,
|
|
733
|
-
outputMessage,
|
|
734
|
-
tokensUsed: computeUsage([...renderedMessages, outputMessage])
|
|
735
|
-
};
|
|
736
|
-
logger.chatCompletionResponse("anthropic", responseData);
|
|
737
|
-
}
|
|
738
|
-
|
|
739
|
-
// src/lib/anthropic/index.ts
|
|
740
|
-
import AnthropicClient2 from "@anthropic-ai/sdk";
|
|
741
|
-
import { countTokens as countAnthropicTokens } from "@anthropic-ai/tokenizer";
|
|
742
663
|
export {
|
|
743
664
|
AIFragment,
|
|
744
|
-
AnthropicChatCompletion,
|
|
745
|
-
AnthropicClient2 as AnthropicClient,
|
|
746
|
-
AnthropicClientContext,
|
|
747
665
|
AssistantMessage,
|
|
748
666
|
BoundLogger,
|
|
749
667
|
ChatCompletionError,
|
|
@@ -760,11 +678,9 @@ export {
|
|
|
760
678
|
attachedContextSymbol,
|
|
761
679
|
childrenToConversationMessage,
|
|
762
680
|
computeUsage,
|
|
763
|
-
countAnthropicTokens,
|
|
764
681
|
createAIElement,
|
|
765
682
|
createContext,
|
|
766
683
|
createRenderContext,
|
|
767
|
-
defaultMaxTokens,
|
|
768
684
|
tokenCountForConversationMessage,
|
|
769
685
|
tokenLimitForChatModel,
|
|
770
686
|
tokenizer
|
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
export { Fragment, JSX, jsx, jsxDEV, jsxs } from './jsx-runtime.mjs';
|
|
2
|
-
import './createElement-
|
|
2
|
+
import './createElement-sO2rY6Ly.mjs';
|
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
export { Fragment, JSX, jsx, jsxDEV, jsxs } from './jsx-runtime.js';
|
|
2
|
-
import './createElement-
|
|
2
|
+
import './createElement-sO2rY6Ly.js';
|
package/dist/jsx-runtime.d.mts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { u as AIComponent, A as AIElement, b as AIFragment } from './createElement-
|
|
1
|
+
import { u as AIComponent, A as AIElement, b as AIFragment } from './createElement-sO2rY6Ly.mjs';
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
4
|
* The is used as an import source for ts/js files as the JSX transpile functinos
|
package/dist/jsx-runtime.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { u as AIComponent, A as AIElement, b as AIFragment } from './createElement-
|
|
1
|
+
import { u as AIComponent, A as AIElement, b as AIFragment } from './createElement-sO2rY6Ly.js';
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
4
|
* The is used as an import source for ts/js files as the JSX transpile functinos
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@gammatech/aijsx",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.3-asa.3",
|
|
4
4
|
"description": "Rewrite of aijsx",
|
|
5
5
|
"author": "Jordan Garcia",
|
|
6
6
|
"license": "MIT",
|
|
@@ -13,14 +13,11 @@
|
|
|
13
13
|
"test:watch": "jest --watch --verbose",
|
|
14
14
|
"build": "yarn check-types && yarn clean-symlinks && tsup",
|
|
15
15
|
"clean-symlinks": "rm ./jsx-* || true",
|
|
16
|
-
"symlink": "ln -s ./dist/jsx-runtime.js . && ln -s ./dist/jsx-runtime.d.ts && ln -s ./dist/jsx-runtime.js ./jsx-dev-runtime.js && ln -s ./dist/jsx-runtime.d.ts ./jsx-dev-runtime.d.ts",
|
|
17
16
|
"prepublishOnly": "yarn build",
|
|
18
17
|
"lint": "eslint \"{src,test}/**/*.ts\" && yarn check-types",
|
|
19
18
|
"check-types": "tsc --skipLibCheck --noEmit"
|
|
20
19
|
},
|
|
21
20
|
"dependencies": {
|
|
22
|
-
"@anthropic-ai/sdk": "^0.12.0",
|
|
23
|
-
"@anthropic-ai/tokenizer": "^0.0.4",
|
|
24
21
|
"js-tiktoken": "^1.0.8",
|
|
25
22
|
"nanoid": "^3.1.23",
|
|
26
23
|
"openai": "^4.23.0"
|